diff --git "a/4091.jsonl" "b/4091.jsonl" new file mode 100644--- /dev/null +++ "b/4091.jsonl" @@ -0,0 +1,806 @@ +{"seq_id":"326814853","text":"# coding:utf-8\n\n# This script is used for clean apk \n# by lanxiong@richinfo.cn \n# 2015.12.03\n\n\nimport os\nimport time\nimport ora\nimport logging\n\n\nstarttime = time.time()\nlogfile = 'cleanapk.'+time.strftime(\"%Y%m%d%H%M%S\", time.localtime())+'.log'\nlogging.basicConfig(filename='/home/middle/script/py/'+logfile,level=logging.INFO,format='%(message)s')\n\napkfileset = set()\napkoraset = set()\n\ndef scandir(dir):\n\tos.chdir(dir)\n\tfor i in os.listdir(dir):\n\t\tif os.path.isfile(i):\n\t\t\tapkfileset.add(os.path.abspath(i).replace('/data/pcapk.mmarket.com',''))\n\t\telse:\n\t\t\tscandir(os.path.abspath(i))\n\t\t\tos.chdir(os.pardir)\n\napkoraset = ora.get_apkset('mmportquery','iK7f1kiLkjf9','10.101.12.190','1521','mmcxdb',ora.sql)\nscandir('/data/pcapk.mmarket.com/MMAPK/')\ndelset = apkfileset - apkoraset\n\n# \n# i = \t\t /MMAPK_del/MMAPK/300000076723/2701596/229200taobao_android_4.2.2.apk\n# apkfullname = MMAPK_del/MMAPK/300000076723/2701596/229200taobao_android_4.2.2.apk\n# dirname = MMAPK_del/MMAPK/300000076723/2701596\n\n# clean MMAPK_del\nos.system('rm -rf /data/pcapk.mmarket.com/MMAPK_del/MMAPK/*')\natime_bigthanninemonth = 0\nfor i in delset:\n\tstatinfo=os.stat('/data/pcapk.mmarket.com'+i)\n\tif starttime - statinfo.st_atime > 7776000:\n\t\tapkfullname = i.replace('/MMAPK/','MMAPK/')\n\t\tdirname_cmd = \"\"\"dirname %s\"\"\"%(apkfullname)\n\t\tdirname = os.popen(dirname_cmd).read()\n\t\tmkdir_cmd = \"\"\"mkdir -p /data/pcapk.mmarket.com/MMAPK_del/%s\"\"\"%(dirname)\n\t\tmv_cmd = \"\"\"mv /data/pcapk.mmarket.com/%s /data/pcapk.mmarket.com/MMAPK_del/%s\"\"\"%(apkfullname,dirname)\n\t\tlogging.info(mkdir_cmd)\n\t\tlogging.info(mv_cmd)\n\t\tos.system(mkdir_cmd)\n\t\tos.system(mv_cmd)\n\t\tatime_bigthanninemonth += 1\nendtime = time.time()\n\nlogging.info('apkfileset length = '+str(len(apkfileset)))\nlogging.info('apkoraset length = '+str(len(apkoraset)))\nlogging.info('delset length = '+str(len(delset)))\nlogging.info('del atime >90day = '+str(atime_bigthanninemonth)+'rows')\nlogging.info('used time: '+str(endtime-starttime)+'s')\n","sub_path":"python/cleanapk.py","file_name":"cleanapk.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"204725959","text":"# encoding: utf-8\n\"\"\"\ntest_issue57.py\n\nCreated by Graham Higgins on 2021-11-14.\nCopyright (c) 2021 Graham Higgins. All rights reserved.\n\"\"\"\n\nimport os\nimport re\nfrom test.data import TEST_DATA_DIR\n\nimport pytest\nfrom rdflib import RDF, XSD, Namespace, logger\n\nfrom FuXi.Horn.RIFCore import ENT, RIF_NS, RIFCoreParser\n\nBI_NS = Namespace(\"http://www.w3.org/2007/rif-builtin-predicate#\")\nEX_NS = Namespace(\"http://example.org/example#\")\n\nnsBindings = {\n \"rif\": RIF_NS,\n \"xs\": XSD,\n \"ent\": ENT,\n \"rdf\": RDF,\n \"bi\": BI_NS,\n \"ex\": EX_NS,\n}\n\n\ndef test_issue_057():\n url = \"file://\" + os.path.join(TEST_DATA_DIR, \"infinite_existential.rif\")\n logger.info(f\"RIF TEST {url}\")\n parser = RIFCoreParser(url, nsBindings=nsBindings)\n\n assert str(sorted(list(parser.getRuleset()))) == (\n (\n \"[[], [Forall ?x ( Exists ?y ( And( (?x ?y) \"\n \"(?y) ) ) :- (?x) )]]\"\n )\n )\n","sub_path":"test/test_issues/test_issue_057.py","file_name":"test_issue_057.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"477399491","text":"class RF:\n \"\"\"docstring for RF\"\"\"\n def read_tag(self):\n import RPi.GPIO as GPIO\n import sys\n\n sys.path.append('/home/pi/Documents/objetos/MFRC522-python')\n from mfrc522 import SimpleMFRC522\n\n reader = SimpleMFRC522()\n\n print(\"Hold tag near the reader\")\n\n try:\n t_id, text= reader.read_no_block()\n return t_id, text\n\n finally:\n GPIO.cleanup()\n \nif __name__ == '__main__': \n rf = RF()\n rf.readTag()\n","sub_path":"Dia_05/RF.py","file_name":"RF.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"508912165","text":"'''database functions'''\n\n# pylint: disable=E1101, E1601, W0612\n\nimport numpy as np\n\nsql_path = 'c:\\\\Users\\\\adam.sohonyai\\\\Documents\\\\GitHub\\\\poker_model\\\\sql\\\\version2\\\\'\n\ndef sql_delete_all(poker_db, table):\n '''delete all rows from table'''\n\n # poker_db = mysql.connector.connect(user='root', host='127.0.0.1', database='poker_version2')\n poker_cursor = poker_db.cursor()\n delete_sql_file = open(sql_path + 'delete_all.sql').read()\n delete_sql = eval(f'f\"\"\"{delete_sql_file}\"\"\"')\n poker_cursor.execute(delete_sql)\n poker_cursor.execute('COMMIT')\n # poker_db.close()\n\n return None\n\ndef sql_select_tables(poker_db):\n '''query name of all tables from poker_version2 database'''\n\n # poker_db = mysql.connector.connect(user='root', host='127.0.0.1', database='poker_version2')\n poker_cursor = poker_db.cursor()\n select_sql_file = open(sql_path + 'select_tables.sql').read()\n select_sql = eval(f'f\"\"\"{select_sql_file}\"\"\"')\n poker_cursor.execute(select_sql)\n poker_result = poker_cursor.fetchall()\n tables = [[*table] for table in zip(*poker_result)]\n # poker_db.close()\n\n return tables[0]\n\ndef sql_games_max_id(poker_db):\n '''query max id from poker_version2.games table'''\n\n # poker_db = mysql.connector.connect(user='root', host='127.0.0.1', database='poker_version2')\n poker_cursor = poker_db.cursor()\n select_sql_file = open(sql_path + 'select_games_max_id.sql').read()\n select_sql = eval(f'f\"\"\"{select_sql_file}\"\"\"')\n poker_cursor.execute(select_sql)\n poker_result = poker_cursor.fetchall()\n # poker_db.close()\n\n return poker_result[0][0]\n\ndef sql_decision_points_max_id(poker_db):\n '''query max id from poker_version2.decision_points table'''\n\n # poker_db = mysql.connector.connect(user='root', host='127.0.0.1', database='poker_version2')\n poker_cursor = poker_db.cursor()\n select_sql_file = open(sql_path + 'select_decision_points_max_id.sql').read()\n select_sql = eval(f'f\"\"\"{select_sql_file}\"\"\"')\n poker_cursor.execute(select_sql)\n poker_result = poker_cursor.fetchall()\n # poker_db.close()\n\n return poker_result[0][0]\n\ndef sql_possible_moves_max_id(poker_db, decision_point_id):\n '''query max id from poker_version2.possible_moves table'''\n\n # poker_db = mysql.connector.connect(user='root', host='127.0.0.1', database='poker_version2')\n poker_cursor = poker_db.cursor()\n select_sql_file = open(sql_path + 'select_possible_moves_max_id.sql').read()\n select_sql = eval(f'f\"\"\"{select_sql_file}\"\"\"')\n poker_cursor.execute(select_sql)\n poker_result = poker_cursor.fetchall()\n # poker_db.close()\n\n return poker_result[0][0]\n\ndef sql_stack_result(poker_db, position):\n '''calculate result of game for given position'''\n\n index = sql_games_max_id(poker_db=poker_db)\n\n # poker_db = mysql.connector.connect(user='root', host='127.0.0.1', database='poker_version2')\n poker_cursor = poker_db.cursor()\n select_sql_file = open(sql_path + 'select_stack_result.sql').read()\n select_sql = eval(f'f\"\"\"{select_sql_file}\"\"\"')\n poker_cursor.execute(select_sql)\n poker_result = poker_cursor.fetchall()\n # poker_db.close()\n \n return poker_result[0][0]\n\ndef sql_possible_moves_features(poker_db, decision_point_id, action):\n '''select current values of poker_version2.possible_moves.counter and\n poker_version2.possible_moves.total_profit'''\n\n # poker_db = mysql.connector.connect(user='root', host='127.0.0.1', database='poker_version2')\n poker_cursor = poker_db.cursor()\n select_sql_file = open(sql_path + 'select_possible_moves_features.sql').read()\n select_sql = eval(f'f\"\"\"{select_sql_file}\"\"\"')\n poker_cursor.execute(select_sql)\n poker_result = poker_cursor.fetchall()\n # poker_db.close()\n\n return poker_result[0][0], poker_result[0][1]\n\ndef sql_insert_games(poker_db, index, player_num, small_blind_amount, ante_amount, uuid, name, stack, position, \\\n card1, card2, hand_db_format, flop1, flop2, flop3, turn, river, final_stack):\n '''insert rows into poker_version2.games table'''\n\n if position == 1:\n position_name = 'SB'\n elif position == 2:\n position_name = 'BB'\n elif position == 3:\n position_name = 'UTG'\n elif position == 4:\n position_name = 'MIDDLE'\n elif position == 5:\n position_name = 'TAIL'\n else:\n position_name = 'DEALER'\n\n # poker_db = mysql.connector.connect(user='root', host='127.0.0.1', database='poker_version2')\n poker_cursor = poker_db.cursor()\n insert_sql_file = open(sql_path + 'insert_games.sql').read()\n insert_sql = eval(f'f\"\"\"{insert_sql_file}\"\"\"')\n poker_cursor.execute(insert_sql)\n poker_cursor.execute('COMMIT')\n # poker_db.close()\n\n return None\n\ndef sql_insert_history(poker_db, phase, nr, step, uuid, position, stack, pot, flop1, flop2, flop3, turn, river, \\\n action, amount, new_stack, new_pot):\n '''insert rows into poker_version2.games table'''\n\n game_id = sql_games_max_id(poker_db=poker_db)\n\n # poker_db = mysql.connector.connect(user='root', host='127.0.0.1', database='poker_version2')\n poker_cursor = poker_db.cursor()\n insert_sql_file = open(sql_path + 'insert_history.sql').read()\n insert_sql = eval(f'f\"\"\"{insert_sql_file}\"\"\"')\n poker_cursor.execute(insert_sql)\n poker_cursor.execute('COMMIT')\n\n sql_update_games_board(poker_db=poker_db, index=game_id, flop1=flop1, flop2=flop2, flop3=flop3, \\\n turn=turn, river=river)\n # poker_db.close()\n\n return None\n\ndef sql_insert_decision_points(poker_db, phase, nr, position, hand_db_format, stack, pot, \\\n flop1, flop2, flop3, turn, river, history):\n '''insert rows into poker_version2.decision_points table'''\n\n index = sql_decision_points_max_id(poker_db=poker_db) + 1\n\n # poker_db = mysql.connector.connect(user='root', host='127.0.0.1', database='poker_version2')\n poker_cursor = poker_db.cursor()\n insert_sql_file = open(sql_path + 'insert_decision_points.sql').read()\n insert_sql = eval(f'f\"\"\"{insert_sql_file}\"\"\"')\n poker_cursor.execute(insert_sql)\n poker_cursor.execute('COMMIT')\n # poker_db.close()\n\n return None\n\ndef sql_insert_possible_moves(poker_db, action, bet_amount, counter=1, total_profit=1, expected_value=1):\n '''insert rows into poker_version2.possible_moves table'''\n\n decision_point_id = sql_decision_points_max_id(poker_db=poker_db)\n index = sql_possible_moves_max_id(poker_db=poker_db, decision_point_id=decision_point_id) + 1\n \n # poker_db = mysql.connector.connect(user='root', host='127.0.0.1', database='poker_version2')\n poker_cursor = poker_db.cursor()\n insert_sql_file = open(sql_path + 'insert_possible_moves.sql').read()\n insert_sql = eval(f'f\"\"\"{insert_sql_file}\"\"\"')\n poker_cursor.execute(insert_sql)\n poker_cursor.execute('COMMIT')\n # poker_db.close()\n\n return None\n\ndef decision_point_based_action(poker_db, phase, nr, step, position, stack, pot, \\\n flop1, flop2, flop3, turn, river, valid_actions):\n '''decision point calculations'''\n\n game_id = sql_games_max_id(poker_db)\n\n # poker_db = mysql.connector.connect(user='root', host='127.0.0.1', database='poker_version2')\n poker_cursor = poker_db.cursor()\n select_sql_file = open(sql_path + 'select_hand.sql').read()\n select_sql = eval(f'f\"\"\"{select_sql_file}\"\"\"')\n poker_cursor.execute(select_sql)\n poker_result = poker_cursor.fetchall()\n\n hand_db_format = poker_result[0][0]\n\n select_sql_file = open(sql_path + 'select_decision_points.sql').read()\n select_sql = eval(f'f\"\"\"{select_sql_file}\"\"\"')\n select_sql = select_sql.replace('s.*', 'REPLACE(REPLACE(REPLACE(' +\\\n ' CONCAT(\\'{\\',' +\\\n ' GROUP_CONCAT(s.jobj SEPARATOR \\',\\\\' + 'n\\'),' +\\\n ' \\'}\\'), \\'[\\\"\\', \\'[\\'), \\'\\\"]\\', \\']\\'), \\'\\\\\\\\\\', \\'\\') as jobj')\n poker_cursor.execute(select_sql)\n poker_result = poker_cursor.fetchall()\n\n if poker_result[0][0] != -1:\n decision_point_id = poker_result[0][0]\n\n select_sql_file = open(sql_path + 'select_possible_moves.sql').read()\n select_sql = eval(f'f\"\"\"{select_sql_file}\"\"\"')\n poker_cursor.execute(select_sql)\n possible_actions_list = poker_cursor.fetchall()\n \n POSSIBLE_ACTIONS_LIST = [[*elem] for elem in zip(*possible_actions_list)]\n ID = POSSIBLE_ACTIONS_LIST[0]\n ACTION = POSSIBLE_ACTIONS_LIST[1]\n BET_AMOUNT = POSSIBLE_ACTIONS_LIST[2]\n EV = POSSIBLE_ACTIONS_LIST[3]\n if min(EV) <= 0:\n EV = [elem + abs(min(EV)) + 1 for elem in EV]\n\n EV = [elem / sum(EV) for elem in EV]\n final_action_id = np.random.choice(ID, p=EV)\n final_action = ACTION[final_action_id]\n final_action_amount = BET_AMOUNT[final_action_id]\n\n decision = {'position': position, 'decision_point_id': decision_point_id, 'action': final_action}\n\n return final_action, final_action_amount, decision\n else:\n select_sql_file = open(sql_path + 'select_decision_points_history.sql').read()\n select_sql = eval(f'f\"\"\"{select_sql_file}\"\"\"')\n select_sql = select_sql.replace('s.*', 'REPLACE(REPLACE(REPLACE(' +\\\n ' CONCAT(\\'{\\',' +\\\n ' GROUP_CONCAT(s.jobj SEPARATOR \\',\\\\' + 'n\\'),' +\\\n ' \\'}\\'), \\'[\\\"\\', \\'[\\'), \\'\\\"]\\', \\']\\'), \\'\\\\\\\\\\', \\'\\') as jobj')\n poker_cursor.execute(select_sql)\n history = poker_cursor.fetchall()\n \n sql_insert_decision_points(poker_db=poker_db, phase=phase, nr=nr, position=position, \\\n hand_db_format=hand_db_format, stack=stack, pot=pot, flop1=flop1, flop2=flop2, flop3=flop3, \\\n turn=turn, river=river, history=history[0][0])\n \n for action in valid_actions:\n if action['action'] != 'raise':\n sql_insert_possible_moves(poker_db, action=action['action'], bet_amount=action['amount'])\n else:\n # amount = np.random.randint(action['amount']['min'], action['amount']['max'] + 1)\n sql_insert_possible_moves(poker_db, action=action['action'], bet_amount=action['amount']['min'])\n\n # poker_db.close()\n\n return decision_point_based_action(poker_db=poker_db, phase=phase, nr=nr, step=step, position=position, \\\n pot=pot, stack=stack, flop1=flop1, flop2=flop2, flop3=flop3, turn=turn, river=river, \\\n valid_actions=valid_actions)\n\ndef sql_update_games_cards(poker_db, index, uuid, card1, card2, hand_db_format):\n '''update card info in poker_version2.games table'''\n\n # poker_db = mysql.connector.connect(user='root', host='127.0.0.1', database='poker_version2')\n poker_cursor = poker_db.cursor()\n update_sql_file = open(sql_path + 'update_games_cards.sql').read()\n update_sql = eval(f'f\"\"\"{update_sql_file}\"\"\"')\n poker_cursor.execute(update_sql)\n poker_cursor.execute('COMMIT')\n # poker_db.close()\n\n return None\n\ndef sql_update_games_board(poker_db, index, flop1, flop2, flop3, turn, river):\n '''update card info in poker_version2.games table'''\n\n # poker_db = mysql.connector.connect(user='root', host='127.0.0.1', database='poker_version2')\n poker_cursor = poker_db.cursor()\n update_sql_file = open(sql_path + 'update_games_board.sql').read()\n update_sql = eval(f'f\"\"\"{update_sql_file}\"\"\"')\n poker_cursor.execute(update_sql)\n poker_cursor.execute('COMMIT')\n # poker_db.close()\n\n return None\n\ndef sql_update_games_final_stack(poker_db, index, uuid, final_stack):\n '''update final stack info in poker_version2.games table at end of game'''\n\n # poker_db = mysql.connector.connect(user='root', host='127.0.0.1', database='poker_version2')\n poker_cursor = poker_db.cursor()\n update_sql_file = open(sql_path + 'update_games_final_stack.sql').read()\n update_sql = eval(f'f\"\"\"{update_sql_file}\"\"\"')\n poker_cursor.execute(update_sql)\n poker_cursor.execute('COMMIT')\n # poker_db.close()\n\n return None\n\ndef sql_update_possible_moves(poker_db, position, decision_point_id, action):\n '''update columns in poker_version2.possible_moves table'''\n\n result = sql_stack_result(poker_db=poker_db, position=position)\n counter, total_profit = sql_possible_moves_features(poker_db=poker_db, \\\n decision_point_id=decision_point_id, action=action)\n\n if counter == 1 and total_profit == 1:\n total_profit = result\n else:\n counter = counter + 1\n total_profit = total_profit + result\n \n expected_value = total_profit / counter\n\n # poker_db = mysql.connector.connect(user='root', host='127.0.0.1', database='poker_version2')\n poker_cursor = poker_db.cursor()\n update_sql_file = open(sql_path + 'update_possible_moves.sql').read()\n update_sql = eval(f'f\"\"\"{update_sql_file}\"\"\"')\n poker_cursor.execute(update_sql)\n poker_cursor.execute('COMMIT')\n # poker_db.close()\n\n return None\n\ndef community_cards_eval(board):\n '''function to evaluate community cards'''\n\n if board == []:\n final_board = ['', '', '', '', '']\n elif board != [] and len(board) == 3:\n board.sort()\n for i in range(2):\n board.append('')\n final_board = board\n elif board != [] and len(board) == 4:\n final_board = board[:3]\n final_board.sort()\n final_board.append(board[3])\n final_board.append('')\n elif board != [] and len(board) == 5:\n final_board = board[:3]\n final_board.sort()\n for i in range(3, 5):\n final_board.append(board[i])\n else:\n pass\n\n return final_board\n\ndef valid_actions_check(actions, phase, position, stack, small_blind_amount):\n '''function to check and return truly valid actions'''\n\n fold_actions = fold_check(actions=actions, phase=phase, position=position, \\\n small_blind_amount=small_blind_amount)\n call_actions = call_check(actions=fold_actions, stack=stack)\n final_actions = raise_check(actions=call_actions, stack=stack)\n\n return final_actions\n\ndef fold_check(actions, phase, position, small_blind_amount):\n '''check fold action'''\n\n remove_fold_flag = 0\n for action in actions:\n\n if action['action'] == 'call' and (action['amount'] == 0 or (action['amount'] == small_blind_amount * 2 \\\n and phase == 'preflop' and position == 2)):\n remove_fold_flag = 1\n \n fold_actions = []\n for action in actions:\n\n if action['action'] == 'fold' and remove_fold_flag == 1:\n pass\n else:\n fold_actions.append(action)\n\n return fold_actions\n\ndef call_check(actions, stack):\n '''check fold action'''\n\n for action in actions:\n\n if action['action'] == 'call' and action['amount'] > stack:\n action['amount'] = stack\n\n call_actions = actions\n\n return call_actions\n\ndef raise_check(actions, stack):\n '''check fold action'''\n\n for action in actions:\n\n if action['action'] == 'raise' and action['amount']['min'] == -1 and action['amount']['max'] == -1:\n action['amount']['min'] = stack\n action['amount']['max'] = stack\n \n raise_actions = actions\n\n return raise_actions\n\ndef summarize(poker_db, final_stacks, summary):\n '''summarize the result of round'''\n\n index = sql_games_max_id(poker_db)\n\n for final_stack in final_stacks:\n\n sql_update_games_final_stack(poker_db=poker_db, index=index, uuid=final_stack['uuid'], \\\n final_stack=final_stack['stack'])\n \n for decision in summary:\n\n sql_update_possible_moves(poker_db=poker_db, position=decision['position'], \\\n decision_point_id=decision['decision_point_id'], action=decision['action'])\n\n return None\n","sub_path":"py/version2/funcs_db.py","file_name":"funcs_db.py","file_ext":"py","file_size_in_byte":15640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"518050809","text":"import math\nfrom scipy.integrate import ode\nimport matplotlib.pyplot as plt\nimport os\n\n# a pulse forcing function. The width and height are the\n# time to hold the pulse and the pulse amplitude. The delay\n# is the time to start applying the pulse.\nclass Pulse:\n\n def __init__(self,width, height, delay=0.0):\n self.delay = delay\n self.pulse_width = width\n self.height = height\n\n # given a time, compute the value of the pulse.\n def compute(self,time):\n delta = time - self.delay\n if delta >= 0 and delta < self.pulse_width:\n return self.height\n else:\n return 0.0\n\n def copy(self):\n return Pulse(self.pulse_width, self.height, self.delay)\n\n\n# a forcing function that always returns zero.\nclass Zero:\n\n\tdef __init__(self):\n\t\tpass\n\n\tdef compute(self,time):\n\t\treturn 0.0;\n\n\n# A 3 cell puf.\nclass ODEPuf3:\n\n # creates a new PUF with a set of K parameters.\n def __init__(self, ks):\n assert(len(self.var_names) == self.number_diffeqs)\n\n self.ks = {}\n # set all of the k parameters, make sure that\n # there is a k parameter for every variable in the system\n for vn in self.var_names:\n assert(vn in ks)\n self.ks[vn] = ks[vn]\n\n\n # instantiate the state of the system.\n self._state = {}\n for vn in self.var_names:\n self._state[vn] = 0.0\n\n # instantiate forcing function for each variable.\n # each variable is initially given a forcing function that always\n # returns zero (no challenge is applied)\n self._forcing_functions = {};\n for vn in self.var_names:\n self._forcing_functions[vn] = Zero()\n\n\n def state(self):\n return dict(self._state)\n\n def set_state(self,state):\n for variable in self.var_names:\n self._state[variable] = state[variable]\n\n @property\n def number_diffeqs(self):\n return self.number_cells*2;\n\n @property\n def number_cells(self):\n return 3;\n\n @property\n def var_names(self):\n return [\"x0\",\"v0\",\"x1\",\"v1\",\"x2\",\"v2\"]\n\n def set_to_zero(self):\n for variable in self.var_names:\n self._state[variable] = 0.0\n\n def _derivative(self,t,state):\n\t\t#FIXME, return array of derivatives given the current state of the system.\n # x_0\n ddts = dict(map(lambda variable: (variable,0.0), self.var_names))\n ddts[\"x0\"] = self.ks[\"x0\"]*state[\"x0\"] + self._forcing_functions[\"x0\"].compute(t)\n #FIXME: return actual correct derivatives\n return ddts\n\n # converts a map (or dictionary) of variable values to an array of values.\n # the order of values in the array is determined by the `var_names` property\n def _to_array(self,value_dict):\n return list(map(lambda name: value_dict[name],self.var_names))\n\n # converts a list of variable values to a map (or dictionary) of values. The\n # dictionary maps variable names to values. The order of values in the list\n # should follow the order of variables defined in `var_names`\n def _from_array(self,value_list):\n return dict(zip(self.var_names, value_list))\n\n # simulates the system, using the state variables in `self._state` as the initial condition\n # The simulation runs for `sim_time` units of time and returns a dictionary of computed time\n # and value samples\n def simulate(self, sim_time, samples_per_step=100):\n def dt_func(t,vs):\n result = self._derivative(t, self._from_array(vs))\n return self._to_array(result)\n\n def ext_func(t):\n return list(map(lambda name: self._forcing_functions[name].compute(t), \\\n self.var_names))\n\n # compute the time between samples and the total number of samples\n dt = 1.0/samples_per_step\n n = math.ceil(sim_time/dt)\n\n # setup differential equation solver\n solver = ode(dt_func).set_integrator('zvode', method='bdf')\n\n # set initial condition\n x0 = self._to_array(self.state())\n solver.set_initial_value(x0)\n\n # record the state at each sample step. The solver may compute the step\n # at intermediate times to get a good simulation.\n info = {\"time\":[], \"values\":[], \"forcing_inputs\":[]}\n while solver.successful() and solver.t < sim_time:\n info[\"time\"].append(solver.t)\n info[\"values\"].append(solver.y)\n info[\"forcing_inputs\"].append(ext_func(solver.t))\n solver.integrate(solver.t + dt)\n\n\n return info\n\n\n # saves the variable trajectories from the simulation to a series of files in plots/\n # The `prefix` argument sets the prefix for each plot. The suffix of the plot\n # is the state variable the plot shows.\n def save_plots(self,prefix,info):\n directory = \"plots\"\n if not os.path.exists(directory):\n os.mkdir(directory)\n\n # translate the data in `info` to a format that can be easily plotted\n times = info[\"time\"]\n variables = {}\n forcing_inputs = {}\n for idx,variable in enumerate(self.var_names):\n variables[variable] = []\n\n for vect in info[\"values\"]:\n variables[variable].append(vect[idx])\n\n # set up plots for any non-zero forcing functions.\n if not isinstance(self._forcing_functions[variable], Zero):\n forcing_inputs[variable] = []\n for vect in info[\"forcing_inputs\"]:\n forcing_inputs[variable].append(vect[idx])\n\n\n # plot each variable's trajectory and save it to a file\n for name,values in forcing_inputs.items():\n plt.plot(times,values)\n plt.savefig(\"%s/%s_INP_%s.png\" % (directory,prefix,name))\n plt.clf()\n\n\n # plot each variable's trajectory and save it to a file\n for name,values in variables.items():\n plt.plot(times,values)\n plt.savefig(\"%s/%s_%s.png\" % (directory,prefix,name))\n plt.clf()\n\n\n def write_challenge(self,pulses):\n # make sure the inputs make sense\n if not (all(map(lambda p: isinstance(p, Pulse), pulses.values()))):\n raise Exception(\"the challenge must be a list of Pulse objects\")\n\n # update forcing function array and set delays so that we align signals\n for variable in self.var_names:\n # if the variable has a pulse associated with it, align all the\n # leading edges of the pulses (they all start at time 0)\n # you need to copy each pulse before doing that otherwise changing\n # the delay changes the input argument\n if variable in pulses:\n self._forcing_functions[variable] = pulses[variable].copy()\n self._forcing_functions[variable].delay = 0\n else:\n # any variables that do not have pulses are assigned\n # a forcing function which always returns zero (equivalent to\n # no forcing function)\n self._forcing_functions[variable] = Zero()\n\n # set the state variable values to zero\n self.set_to_zero();\n\n # simulate for 10 simulation time units\n #FIXME.. how do i simulate\n info = self.simulate(10);\n\n # set the current state of the state variabes in the system\n # to the last value of each state variable in the simulation\n self.set_state(self._from_array(info[\"values\"][-1]))\n return self.state,info\n\n def get_response(self):\n #backup original forcing functions\n challenge = dict(self._forcing_functions)\n\n # set every forcing function to zero.\n # we do this because we stopped applying inputs!\n for variable in self.var_names:\n self._forcing_functions[variable] = Zero()\n\n #FIXME.. how do i get a response\n # simulate for ten simulation time units and return nothing \n info = self.simulate(10);\n\n # restore original forcing functions\n self._forcing_functions = challenge\n return None,info\n\n\ndef test():\n print(\"making 3-cell puf....\")\n # instantiate the puf with a K parameter for each state variable.\n # the constructor will check to make sure every variable has a k parameter.\n puf3 = ODEPuf3({\"x0\":0.1,\"x1\": 0.2, \"x2\": 0.3, \"v0\": 1.0, \"v1\": 0.24, \"v2\":0.01})\n\n # instantiate state variables to zero\n puf3.set_to_zero()\n\n # write a challenge to the puf\n # each puf is labelled with the variable it is provided to\n state,info = puf3.write_challenge({ \\\n \"x0\":Pulse(width=1.0,height=1.0), \\\n \"v0\":Pulse(width=1.0,height=2.0), \\\n \"x1\":Pulse(width=1.0,height=3.0), \\\n \"v1\":Pulse(width=2.0,height=1.0), \\\n \"x2\":Pulse(width=3.0,height=1.0), \\\n \"v2\":Pulse(width=0.5,height=1.0)})\n\n # save the variable trajectories for the challenge phase\n # all plots are written to the \"plots/\" directory\n puf3.save_plots(\"challenge\",info);\n\n # collect the response to the challenge\n response,info = puf3.get_response()\n\n # save the variable trajectories for the response phase\n # all plots are written to the \"plots/\" directory\n puf3.save_plots(\"response\",info);\n\ntest()\n","sub_path":"puf.py","file_name":"puf.py","file_ext":"py","file_size_in_byte":9474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"604568431","text":"from joblib import Parallel, delayed\nimport tensorflow as tf\nimport numpy as np\n\n\ndef closest_cell(mat):\n \"\"\"\n Takes a binary tensor M of shape (n, m) and for each position (i,j) computes\n min_{k,l s.th. M[k,l] = 1} \\\\(i,j) - (k,l)\\\\\n :param mat: binary tensor of shape (n,m)\n :return: a tensor of shape (n,m) as defined above\n \"\"\"\n n, m = mat.shape\n g = [[] for _ in range(n * m)]\n\n queue = []\n dist = np.asarray([[np.inf for _ in range(m)] for _ in range(n)])\n visit = [False for _ in range(n * m)]\n\n # building the graph. each pixel is a node connected to its neighbours.\n for i in range(n):\n for j in range(m):\n if i < n - 1:\n g[i * m + j].append((i + 1) * m + j)\n g[(i + 1) * m + j].append(i * m + j)\n if j < m - 1:\n g[i * m + j].append(i * m + j + 1)\n g[i * m + j + 1].append(i * m + j)\n\n # add all the positive pixels to the BFS Queue\n if mat[i, j]:\n dist[i, j] = 0\n visit[i * m + j] = True\n queue.append(i * m + j)\n\n # Performing BFS starting at the positive pixels\n while queue:\n tmp = queue.pop(0)\n tmp_i = tmp // m\n tmp_j = tmp % m\n # print(tmp)\n for nbr in g[tmp]:\n # print(\"nbr:\", nbr)\n if not visit[nbr]:\n nbr_i = nbr // m\n nbr_j = nbr % m\n dist[nbr_i, nbr_j] = min(dist[nbr_i, nbr_j], dist[tmp_i, tmp_j] + 1)\n queue.append(nbr)\n visit[nbr] = True\n\n return dist\n\n\ndef sdf(images):\n \"\"\"\n Takes a binary tensor of shape (batch_size, h, w) and computes the signed distance function\n based on it.\n\n Def.\n ----\n Let p be a pixel in image and let E denote the edge of the street (positive region of the binary mask.) Then,\n { - d(p, E) if I[p] = 1, i.e. if the pixel belongs to the street\n sdf(p) := {\n { d(p, E) otherwise,\n\n where d(p, E) = min_{q \\in E} ||p - q||\n\n :param images: tensor\n :return: a tensor of shape (batch_size, h, w) as defined above.\n \"\"\"\n images = tf.cast(images, tf.float32)\n edges = tf.image.sobel_edges(images[:, :, :, None])\n edges = tf.sqrt(edges[:, :, :, 0, 0] ** 2 + edges[:, :, :, 0, 1] ** 2)\n edges /= tf.sqrt(32.)\n\n # edges mask, true iff pixel is an edge\n edges = edges > 0.5\n\n # cc = Parallel(n_jobs=10)(delayed(closest_cell)(edges[i].numpy()) for i in range(len(edges)))\n cc = [closest_cell(edge.numpy()) for edge in edges]\n cc = tf.constant(cc)\n\n return tf.where(images == 1., -cc, cc)\n","sub_path":"preproc/sdf.py","file_name":"sdf.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"39735208","text":"# -*- coding:utf-8 -*-\n# Copyright (c) 2012 OpenStack Foundation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport json\nimport sys\nimport time\nimport uuid\n\nfrom werkzeug import exceptions as exc\nfrom werkzeug import routing\nfrom werkzeug import serving\nfrom werkzeug import wrappers\n\n\ndef main():\n argp = argparse.ArgumentParser()\n argp.add_argument('bind_port', type=int,\n help='Port number to bind to')\n argp.add_argument('--bind-address', default='127.0.0.1',\n help='Address to bind to')\n argp.add_argument('--debug', default=False, action='store_true',\n help='Enable debugging')\n\n app_args = argp.parse_args()\n\n app = _WSGIApplication()\n\n serving.run_simple(\n app_args.bind_address, app_args.bind_port, app,\n use_debugger=app_args.debug, use_reloader=app_args.debug)\n\n\nclass _CatcherStorage(object):\n def __init__(self, stale_tout=3600):\n self.stale_tout = stale_tout\n self.data = list()\n\n def add(self, payload):\n self.data.append({\n 'time': time.time(),\n 'idnr': str(uuid.uuid1()),\n 'payload': payload})\n\n def lookup(self, since=None):\n sidx = self._lookup_slice(since=since)[0]\n if sidx:\n sidx += 1 # skip \"last\" entity\n return self.data[sidx:]\n\n def delete(self, since=None, till=None):\n if not self.data:\n return\n sidx, eidx = self._lookup_slice(since, till)\n self.data[sidx:eidx] = []\n\n def delete_entity(self, idnr):\n for idx, entity in enumerate(self.data):\n if entity['idnr'] != idnr:\n continue\n break\n else:\n raise ValueError('Entity not found')\n\n self.data.pop(idx)\n\n def _lookup_slice(self, since=None, till=None):\n sidx = 0\n eidx = None\n if since:\n for idx, entity in enumerate(self.data):\n if entity['idnr'] != since:\n continue\n sidx = idx\n break\n\n if till:\n for idx in xrange(len(self.data) - 1, sidx - 1, -1):\n entity = self.data[idx]\n if entity['idnr'] != till:\n continue\n eidx = idx + 1\n break\n return sidx, eidx\n\n def _remove_staled(self):\n stale_line = time.time()\n stale_line -= min(stale_line, self.stale_tout)\n\n for idx, entity in enumerate(self.data):\n if entity['time'] < stale_line:\n continue\n break\n else:\n idx = 0\n\n self.data[:idx] = []\n\n\nclass _HandlerBase(object):\n def __init__(self, request, path_args):\n self.request = request\n self.path_args = path_args\n\n def __call__(self):\n raise NotImplementedError\n\n\nclass _NotifierCatcher(_HandlerBase):\n def __call__(self):\n storage.add(self._fetch_payload())\n return {'op': True}\n\n def _fetch_payload(self):\n if self.request.content_type == 'application/json':\n return self._payload_from_json()\n return self._payload_from_form()\n\n def _payload_from_json(self):\n try:\n payload = json.loads(self.request.data)\n except (ValueError, TypeError) as e:\n raise exc.BadRequest('Invalid payload: {}'.format(e))\n return payload\n\n def _payload_from_form(self):\n payload = dict(self.request.form)\n\n # FIXME: ugly fix of incorrect data transfer from ORD-API\n if len(payload) != 1:\n return payload\n\n key = payload.keys()[0]\n value = payload[key]\n if value != ['']:\n return payload\n\n try:\n payload = json.loads(key)\n except (TypeError, ValueError):\n pass\n return payload\n\n\nclass _NotificationsBase(_HandlerBase):\n pass\n\n\nclass _NotificationsList(_NotificationsBase):\n def __call__(self):\n last = self.request.args.get('last')\n payload = storage.lookup(since=last)\n return {\n 'notifications': payload}\n\n\nclass _NotificationsDelete(_NotificationsBase):\n def __call__(self):\n since = self.request.args.get('start')\n till = self.request.args.get('end')\n storage.delete(since, till)\n return {'op': True}\n\n\nclass _NotificationsEntityDelete(_NotificationsBase):\n def __call__(self):\n try:\n storage.delete_entity(self.path_args['idnr'])\n except ValueError:\n raise exc.NotFound\n return {'op': True}\n\n\nclass _WSGIApplication(object):\n url_map = routing.Map([\n routing.Rule('/ord-target', endpoint='target', methods=['post']),\n routing.Rule('/api/notifications', methods=['get'],\n endpoint='api_notify:list'),\n routing.Rule('/api/notifications', methods=['delete'],\n endpoint='api_notify:remove'),\n routing.Rule('/api/notifications/', methods=['delete'],\n endpoint='api_notify-entity:remove')])\n\n endpoint_map = {\n 'target': _NotifierCatcher,\n 'api_notify:list': _NotificationsList,\n 'api_notify:remove': _NotificationsDelete,\n 'api_notify-entity:remove': _NotificationsEntityDelete}\n\n def dispatch_request(self, request):\n adapter = self.url_map.bind_to_environ(request.environ)\n try:\n endpoint, args = adapter.match()\n endpoint = self.endpoint_map[endpoint]\n\n view = endpoint(request, args)\n payload = view()\n payload = json.dumps(payload)\n\n response = wrappers.Response(payload, mimetype='application/json')\n except exc.HTTPException as e:\n return e\n return response\n\n def __call__(self, environ, start_response):\n return self.wsgi_app(environ, start_response)\n\n def wsgi_app(self, environ, start_response):\n request = wrappers.Request(environ)\n response = self.dispatch_request(request)\n return response(environ, start_response)\n\n\nstorage = _CatcherStorage()\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"ord/cmd/fake_rds_listener.py","file_name":"fake_rds_listener.py","file_ext":"py","file_size_in_byte":6712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"259520146","text":"import time\nimport logging\nfrom tinydb import TinyDB, Query\n\ndb = TinyDB('db.json')\nTask = Query()\ndef initialize():\n\tres = db.all()\n\tif(bool(res) == False):\n\t\tdb.insert({'currenttask': '', 'name' : 'meta'})\n\t\tlogging.info(\"Database initialized\")\n\ndef starttask(tname):\n\tres = db.search(Task.name == tname)\n\tif(bool(res) == False):\n\t\tdb.insert({'name' : tname, 'starttime': time.time(), 'endtime' : ''})\n\telse:\n\t\tdb.update({'starttime': time.time()}, Task.name == tname )\n\n\tlogging.info(\"Task started.\")\n\tdb.update({'currenttask': tname}, Task.name == 'meta')\n\tlogging.info(\"Current Task updated.\")\n\tprint(\"You've started working on %s \" % tname)\n\ndef getstatus():\n\tres = db.search(Task.name == 'meta')\n\tnm = res[0]['currenttask']\n\tcur = db.search(Task.name == nm)\n\tcurStartTime = cur[0]['starttime']\n\n\tlogging.info(\"Status returned.\")\n\tprint('You\\'ve been working on',nm,'for',int((time.time()-curStartTime)/60),\"minute(s)\")\n\ndef endtask():\n\tres = db.search(Task.name == 'meta')\n\tnm = res[0]['currenttask']\n\tcur = db.search(Task.name == nm)\n\tcurStartTime = cur[0]['starttime']\n\n\tdb.update({'endtime': time.time()},Task.name == nm)\n\tlogging.info(\"Task ended.\")\n\tprint('Task',nm,'ended. You worked on',nm,'for',int((time.time()-curStartTime)/60),'minute(s).')\n\ndef note(notetext):\n\tprint('Note added to notes file/current task.')\n\tlogging.info(\"Note added.\")\n\n","sub_path":"dbaccess.py","file_name":"dbaccess.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"24601473","text":"from MC import MarkovChain\nfrom bitarray import bitarray\nfrom cryptography.fernet import Fernet\n\n\nimport sys\nsys.setrecursionlimit(150000)\n\nwith open (\"../books/3001.txt\", \"r\", encoding=\"utf-8\") as myfile:\n data = myfile.readlines()\n\nm = MarkovChain()\n\nfor i in data:\n m.learn(i)\n\nlength = 10000\nm.babble(length)\ndef Encrypt(data):\n key = Fernet.generate_key()\n f = Fernet(key)\n ciphertext = f.encrypt(data)\n return(key, ciphertext)\n\ndef Decrypt(key_ciphertext):\n f = Fernet(key_ciphertext[0])\n decrypttext = f.decrypt(key_ciphertext[1])\n return(decrypttext)\n\ndef Generator(Markov):\n return(Markov.babble(100000))\ndef Generator_small(Markov):\n return(Markov.babble(1000))\n\n# time calculate\nimport matplotlib.pyplot as plt\nimport random\nrandom.seed(4*7)\nimport time\nimport functools\ntimesaver = []\n# Set a probability p\np = 1\n\nstart_time = time.time()\ndata = Generator(m)\ndata = bytes(data, encoding = \"utf-8\")\ndata_small = Generator_small(m)\ndata_small = bytes(data_small, encoding = \"utf-8\")\nprint(\"generating time:\",time.time()-start_time)\nfor j in range(0,5000,1000):\n dataset = []\n #start_time = time.time()\n for i in range(j):\n if random.uniform(0, 1)[^/]+)$')\n .applymap(lambda title: title.split('-'))\n .applymap(lambda title_word_list: ' '.join(title_word_list)))\n \n \n df.loc[missing_titles_mask, 'title']=missing_titles.loc[:,'missing_titles']\n return df\n\ndef _generate_uids_for_rows(df):\n logger.info('Generating uids for rows.')\n uids=(df.apply(lambda row: hashlib.md5(bytes(row['url'].encode('utf-8'))), axis=1)\n .apply(lambda hash_object: hash_object.hexdigest()))\n df['uid'] = uids\n df.set_index('uid',inplace=True)\n \n return df\n\ndef _cleaning_body(df):\n logger.info('Cleaning text body.')\n df['body']=df['body'].apply(lambda body: body.replace('\\n', '')).apply(lambda body: body.replace('\\r', ''))\n return df\n\ndef _tokenize_column(df, column_name):\n stop_words=set(stopwords.words('spanish'))\n return (df\n .dropna()\n .apply(lambda row: nltk.word_tokenize(row[column_name]), axis=1)\n .apply(lambda tokens: list(filter(lambda token: token.isalpha(), tokens)))\n .apply(lambda tokens: list(map(lambda token: token.lower(), tokens)))\n .apply(lambda word_list: list(filter(lambda word: word not in stop_words, word_list)))\n .apply(lambda valid_word_list: len(valid_word_list)))\n\n\ndef _tokenize_titles(df):\n df['n_tokens_title']=_tokenize_column(df, 'title')\n return df\n\ndef _tokenize_body(df):\n df['n_tokens_body']=_tokenize_column(df, 'body')\n return df\n\ndef _remove_duplicates_entries(df, column_name):\n logger.info('Removing duplicates entries')\n df.drop_duplicates(subset=[column_name], keep='first', inplace=True)\n return df\n\ndef _drop_rows_with_missing_values(df):\n logger.info('Dropping rows with missing values')\n return df.dropna()\n\ndef _save_data(df, filename):\n clean_filename=f'clean_{filename}'\n logger.info(f'Saving data at location: {clean_filename}')\n df.to_csv(clean_filename)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('filename',\n help='The path to the dirty data',\n type=str)\n\n args = parser.parse_args()\n\n df = main(args.filename)\n print(df)","sub_path":"web_scraper/transform/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"548746620","text":"\"\"\"\n:mod:`data_extractor`\n=====================\nCombine **XPath**, **CSS Selector** and **JSONPath** for Web data extracting.\n\"\"\"\n__version__ = \"0.3.2\"\n\n# Local Folder\nfrom .abc import AbstractExtractor, ComplexExtractorMeta, SimpleExtractorBase\nfrom .exceptions import ExprError, ExtractError\nfrom .item import Field, Item\nfrom .json import JSONExtractor\nfrom .lxml import (\n AttrCSSExtractor,\n CSSExtractor,\n Element,\n TextCSSExtractor,\n XPathExtractor,\n)\nfrom .utils import LazyStr, sentinel\n\n\n__all__ = (\n \"AbstractExtractor\",\n \"AttrCSSExtractor\",\n \"CSSExtractor\",\n \"ComplexExtractorMeta\",\n \"Element\",\n \"ExprError\",\n \"ExtractError\",\n \"Field\",\n \"Item\",\n \"JSONExtractor\",\n \"LazyStr\",\n \"SimpleExtractorBase\",\n \"TextCSSExtractor\",\n \"XPathExtractor\",\n \"sentinel\",\n)\n","sub_path":"data_extractor/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"338315805","text":"import math\n\nclass BTree:\n def __init__(self, root = None):\n self.root = root\n def print(self):\n self.print_node(self.root)\n def height(self):\n return self._height(1, self.root)\n def _height(self, level, node):\n if node == None:\n return level - 1\n return max(self._height(level + 1, node.left),self._height(level+1, node.right))\n def print_node(self, node):\n if node == None:\n return\n print(str(node.val))\n self.print_node(node.left)\n self.print_node(node.right)\n\nclass Node:\n def __init__(self, val, left = None, right = None):\n self.val = val\n self.left = left\n self.right = right\n\ndef minimal_tree(values):\n num_values = len(values)\n if num_values == 0:\n return None\n if num_values == 1:\n return Node(values[0])\n\n min_height = int(math.ceil(math.log2(num_values + 1)))\n mid = 2 ** (min_height - 1) - 1\n left = minimal_tree(values[:mid])\n right = minimal_tree(values[mid+1:])\n return Node(values[mid], left, right)\n \ntree = BTree()\nprint(tree.height())\nprint(\"\")\n\nfor i in [2 ** x - 1 for x in range(5)]:\n print(\"x = %d\" % (i))\n tree.root = minimal_tree(range(i))\n print(tree.height())\n print(\"\")\n tree.print()\n print(\"\")\nfor i in [2 ** x for x in range(5)]:\n print(\"x = %d\" % (i))\n tree.root = minimal_tree(range(i))\n print(tree.height())\n print(\"\")\n tree.print()\n print(\"\")","sub_path":"graphs/q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"259229338","text":"#Kivy import:\nfrom kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.properties import ObjectProperty\nfrom kivy.properties import StringProperty\nfrom kivy.properties import ListProperty\nfrom kivy.properties import ReferenceListProperty\nfrom kivy.properties import NumericProperty\nfrom kivy.properties import ObjectProperty\nfrom kivy.uix.bubble import Bubble\nfrom kivy.uix.button import Button\nfrom kivy.uix.popup import Popup\nfrom kivy.core.window import Window\n\n#Python import:\nimport os.path\nimport subprocess\n\n#own classes import:\nimport imgSearch\n\n\nclass My_Manager(ScreenManager):\n\n\t#Screens:\n\tmain_screen_op = ObjectProperty(None)\n\tbrowse_screen_op = ObjectProperty(None)\n\t\n\t#Filepath textInput variable:\n\tfilepath = StringProperty(\"\")\n\t\n\t#Image list:\n\timage_list = ListProperty(None)\n\t\n\t#Mouse Position:\n\t#mouse_pos_test = 0.7\n\t#mouse_pos_x = ObjectProperty(\"float\")\n\tmouse_pos_x = NumericProperty(0.0)\n\tmouse_pos_y = NumericProperty(0.0)\n\tmouse_pos_rlp = ReferenceListProperty(mouse_pos_x, mouse_pos_y)\n\t\n\t#Switch to browse_screen:\n\tdef btn_browse_pressed (self, *args):\n\t\tself.main_screen_op.manager.current = \"browse_screen_name\"\n\t\t\n\t#Switch to main_screen:\n\tdef btn_back_pressed (self, *args):\n\t\tself.main_screen_op.manager.current = \"main_screen_name\"\n\t\t\n\t#When OK-button is pressed or textInput is validated:\n\tdef new_path(self, path, screen_id):\n\t\t#Update filepath StringProperty:\n\t\tself.filepath = path\n\t\t#Browse_screen list gets new path:\n\t\tif screen_id == \"browse_screen_id\":\n\t\t\tself.ids[\"folder_view_id\"].path = self.filepath\n\t\t#Main_screen list gets filled:\n\t\tif screen_id == \"main_screen_id\":\n\t\t\tlist_images = imgSearch.ListImages(self.filepath, self.ids[\"subfolders_id\"].active)\n\t\t\tself.image_list = list_images.images\n\t\t\t\n\t\t\t\n\t#Click on image list gets mouse position:\n\tdef left_click(self, sel_image):\n\t\tx = Window.mouse_pos[0]/Window.width\n\t\ty = Window.mouse_pos[1]/Window.height\n\t\tself.mouse_pos_x = x\n\t\tself.mouse_pos_y = y\n\t\t\n\t\tif sel_image != []:\n\t\t\tPopupBubble(pos_hint={\"x\":self.mouse_pos_x, \"y\":self.mouse_pos_y}).open()\n\n\t\nclass MainScreen(Screen):\n\tpass\n\t\nclass BrowseScreen(Screen):\n\tpass\n\t\nclass PopupBubble(Popup):\n\t#Open explorer and mark file:\t\t\n\tdef mark_file (self, sel_image):\n\t\timage_path = None\n\t\t\n\t\ttry:\n\t\t\timage_path = sel_image[0].text\n\t\texcept: \n\t\t\timage_path = None\n\t\t\t\n\t\tif image_path != None:\n\t\t\tprint (image_path)\n\t\t\tsubprocess.Popen(r'explorer /select,\"{}\"'.format(image_path))\n\t\n\t\n\t\n\t\n\nclass My_App(App):\n\tdef build(self):\n\t\tBuilder.load_file(\"interface.kv\")\n\t\tmy_manager = My_Manager()\n\t\treturn my_manager\n\t\t\n\t\t\nMy_App().run()","sub_path":"Python/Eksempler Kivy/Uferdig søk i bildemetadata/Saves/18 popup bubble dukker opp på riktig sted når man trykker på bildelista/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"116128077","text":"#!/usr/bin/env python3\n\n\"\"\"\nWorker module fetching ISO 3166 from github to add facts for:\n country -memberOf-> subRegion\n subRegion -memberOf-> region\n\nIf --act-baseurl and --userid is specified, add the facts to the platform.\nIf not, print facts to stdout.\n\"\"\"\n\n\nimport argparse\nimport traceback\nfrom logging import error, warning\nfrom typing import Dict, List\n\nimport act\nimport worker\nfrom act.helpers import handle_fact\n\n\ndef parseargs() -> argparse.Namespace:\n \"\"\" Parse arguments \"\"\"\n parser = worker.parseargs('Country/region enrichment')\n parser.add_argument('--country-region-url', dest='country_region_url',\n default=\"https://raw.githubusercontent.com/lukes/ISO-3166-Countries-with-Regional-Codes/master/all/all.json\",\n help=\"Country region URL in json format\")\n\n return parser.parse_args()\n\n\ndef process(actapi: act.Act, country_list: List[Dict[str, str]]) -> None:\n \"\"\"\n Loop over all ISO-3166 countries and construct facts for\n county -memberOf-> subRegion and subRegion -memberOf-> region.\n \"\"\"\n\n for c_map in country_list:\n country_name = c_map[\"name\"]\n sub_region = c_map[\"sub-region\"]\n region = c_map[\"region\"]\n\n if country_name and sub_region:\n handle_fact(\n actapi.fact(\"memberOf\")\n .source(\"country\", country_name)\n .destination(\"subRegion\", sub_region)\n )\n else:\n warning(\"Missing name or sub-region: {}\".format(c_map))\n\n if sub_region and region:\n handle_fact(\n actapi.fact(\"memberOf\")\n .source(\"subRegion\", sub_region)\n .destination(\"region\", region)\n )\n else:\n warning(\"Missing sub-region or region: {}\".format(c_map))\n\n\nif __name__ == '__main__':\n ARGS = parseargs()\n\n try:\n process(\n act.Act(ARGS.act_baseurl, ARGS.user_id, ARGS.loglevel, ARGS.logfile, \"country-region\"),\n worker.fetch_json(ARGS.country_region_url, ARGS.proxy_string, ARGS.timeout)\n )\n except Exception:\n error(\"Unhandled exception: {}\".format(traceback.format_exc()))\n raise\n","sub_path":"country_regions.py","file_name":"country_regions.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"509909777","text":"# coding:utf-8\n\nimport tensorflow as tf\nfrom tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq\n\n\ndef get_model():\n \"\"\"构造模型\n \"\"\"\n\n learning_rate = tf.Variable(float(init_learning_rate), trainable=False, dtype=tf.float32)\n learning_rate_decay_op = learning_rate.assign(learning_rate * 0.9)\n\n encoder_inputs = []\n decoder_inputs = []\n target_weights = []\n for i in range(input_seq_len):\n encoder_inputs.append(tf.placeholder(tf.int32, shape=[None], name=\"encoder{0}\".format(i)))\n for i in range(output_seq_len + 1):\n decoder_inputs.append(tf.placeholder(tf.int32, shape=[None], name=\"decoder{0}\".format(i)))\n for i in range(output_seq_len):\n target_weights.append(tf.placeholder(tf.float32, shape=[None], name=\"weight{0}\".format(i)))\n\n # decoder_inputs左移一个时序作为targets\n targets = [decoder_inputs[i + 1] for i in range(output_seq_len)]\n\n # cell = tf.contrib.rnn.BasicLSTMCell(size)\n dropout = 1\n num_layers = 3\n cell = tf.contrib.rnn.BasicLSTMCell(size)\n cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=dropout)\n cell = tf.contrib.rnn.MultiRNNCell([cell] * num_layers) # 纵向上有两个LSTM\n\n # 这里输出的状态我们不需要\n outputs, _ = seq2seq.embedding_attention_seq2seq(\n encoder_inputs,\n decoder_inputs[:output_seq_len],\n cell,\n num_encoder_symbols=num_encoder_symbols,\n num_decoder_symbols=num_decoder_symbols,\n embedding_size=size,\n output_projection=None,\n feed_previous=feed_previous,\n dtype=tf.float32)\n\n # 计算加权交叉熵损失\n loss = seq2seq.sequence_loss(outputs, targets, target_weights)\n # 梯度下降优化器\n opt = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n # 优化目标:让loss最小化\n # update = opt.apply_gradients(opt.compute_gradients(loss))\n # 模型持久化\n saver = tf.train.Saver(tf.global_variables())\n\n return encoder_inputs, decoder_inputs, target_weights, outputs, loss, opt, saver, learning_rate_decay_op, learning_rate","sub_path":"model/Seq2Seq.py","file_name":"Seq2Seq.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"304201649","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import CrossEntropyLoss\nfrom ..bases.model_base import TrainModel\n\nclass TextCNN(TrainModel):\n def __init__(self, config):\n super(TextCNN, self).__init__(config)\n\n self.kernel_sizes = config.kernel_sizes\n self.convs = torch.nn.ModuleList()\n for kernel_size in self.kernel_sizes:\n self.convs.append(torch.nn.Conv1d(config.embedding_size,\n config.num_kernels,\n kernel_size,\n padding=kernel_size - 1))\n\n self.top_k = config.top_k_max_pooling\n hidden_size = len(self.kernel_sizes) * config.num_kernels * self.top_k\n self.linear = torch.nn.Linear(hidden_size, config.num_labels)\n self.dropout = torch.nn.Dropout(p=config.hidden_layer_dropout)\n self.token_embedding = nn.Embedding(config.vocab_size, config.embedding_size)\n self.init_weights()\n\n def forward(self, input_ids,labels=None):\n embedding = self.token_embedding(input_ids)\n embedding = embedding.transpose(1, 2)\n pooled_outputs = []\n for i, conv in enumerate(self.convs):\n convolution = F.relu(conv(embedding))\n pooled = torch.topk(convolution, self.top_k)[0].view(\n convolution.size(0), -1)\n pooled_outputs.append(pooled)\n doc_embedding = torch.cat(pooled_outputs, 1)\n logits = self.dropout(self.linear(doc_embedding))\n outputs = (logits,)\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n outputs = (loss,) + outputs\n return outputs # (loss), logits, (hidden_states), (attentions)\n","sub_path":"torchblocks/models/nn/textcnn.py","file_name":"textcnn.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"608489091","text":"# -*- coding: utf-8 -*-\r\n\r\n\r\nimport pandas as pd\r\nfrom pymongo import MongoClient\r\n\r\n\r\nclass MongoCtrler(object):\r\n # dsn = 'mongodb://localhost:27017/'\r\n # db = 'strategy_group'\r\n dsn = None\r\n db = None\r\n conn = None\r\n\r\n def __init__(self, db, dsn):\r\n if isinstance(db, str):\r\n self.db = db\r\n if isinstance(dsn, str):\r\n self.dsn = dsn\r\n self.connect()\r\n\r\n def connect(self):\r\n self.conn = MongoClient(self.dsn)\r\n\r\n def disconnect(self):\r\n self.conn.close()\r\n\r\n def get_result(self, code, tbn):\r\n \"\"\"\r\n 在mongodb中获取result\r\n\r\n :param code: str,\r\n :param tbn: str,\r\n :return res: None/dict,\r\n \"\"\"\r\n db = self.conn[self.db]\r\n res = db[tbn].find_one({'_id': code})\r\n return res\r\n\r\n\r\n","sub_path":"refpoints_UI/mongo.py","file_name":"mongo.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"293552458","text":"#!/usr/bin/python\n# import modules\nimport datetime\n# import ConfigParser, io\nimport gc\nimport getopt\nimport importlib\nimport math\nimport os\nimport random\nimport sys\nimport textwrap\nimport time\nfrom random import shuffle\nfrom subprocess import call\nfrom modules.configuration import bcolors\nfrom modules import colorutils, configuration\nfrom modules.imagesprite import ImageSprite\nfrom PIL import Image, ImageChops, ImageDraw, ImageFilter, ImageFont, ImagePalette\n\n\"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\n\nxPos = 320\nyPos = 0\ncolorModeDirectional = False\ncolorModes = [\"colorWheel\", \"random\", \"colorRGB\"]\nglitchRate = 0.1\n\n\"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\n\n\ndef drawVLine():\n\tglobal xPos, yPos\n\tif random.random() > 0.998:\n\t\tpass\n\t\t# xPos = int(random.uniform(0,config.screenWidth))\n\t\t# yPos = 0 #int(random.uniform(0,config.screenHeight))\n\tr = 0\n\tg = 0\n\tb = 0\n\tif random.random() > 0.0:\n\t\tconfig.renderDraw.rectangle(\n\t\t\t(xPos, yPos, xPos, config.screenHeight / 2 - 1), fill=(r, g, b)\n\t\t)\n\t\tconfig.renderDraw.rectangle(\n\t\t\t(xPos + 1, config.screenHeight / 2, xPos + 1, config.screenHeight),\n\t\t\tfill=(r, g, b),\n\t\t)\n\txPos -= 1\n\tif xPos < 0:\n\t\txPos = config.screenWidth\n\n\n\"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\n\n\ndef redrawBackGround():\n\tconfig.renderDraw.rectangle(\n\t\t(0, 0, config.screenWidth, config.screenHeight), fill=(0, 0, 0)\n\t)\n\t# config.draw.rectangle((0,0,config.screenWidth, config.screenHeight), fill = (255,0,0))\n\t# if(random.random() > .99) : gc.collect()\n\t# if(random.random() > .97) : config.renderImageFull = Image.new(\"RGBA\", (config.screenWidth, config.screenHeight))\n\treturn True\n\n\n\"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\n\n\ndef main(run=True):\n\tglobal config, workConfig, blocks, simulBlocks, colorModeDirectional, glitchRate\n\tgc.enable()\n\n\tprint(\"Present Loaded\")\n\n\ttry:\n\t\tconfig.vOffset = int(workConfig.get(\"images\", \"vOffset\"))\n\t\tconfig.speed = float(workConfig.get(\"images\", \"redrawSpeed\"))\n\t\tconfig.displayRows = int(workConfig.get(\"images\", \"displayRows\"))\n\t\tconfig.displayCols = int(workConfig.get(\"images\", \"displayCols\"))\n\t\tconfig.unitCount = int(workConfig.get(\"images\", \"unitCount\"))\n\t\tconfig.scalingFactor = float(workConfig.get(\"images\", \"scalingFactor\"))\n\t\tconfig.speedFactor = float(workConfig.get(\"images\", \"speedFactor\"))\n\t\tconfig.useJitter = workConfig.getboolean(\"images\", \"useJitter\")\n\t\tconfig.glitchRate = float(workConfig.get(\"images\", \"glitchRate\"))\n\t\tconfig.jitterRange = float(workConfig.get(\"images\", \"jitterRange\"))\n\t\tconfig.glitchResetRate = float(workConfig.get(\"images\", \"glitchResetRate\"))\n\t\tconfig.glitchModeRate = float(workConfig.get(\"images\", \"glitchModeRate\"))\n\t\tconfig.imageGlitchSize = int(workConfig.get(\"images\", \"imageGlitchSize\"))\n\t\tconfig.colorChage = float(workConfig.get(\"images\", \"colorChage\"))\n\t\tconfig.colorBGChage = float(workConfig.get(\"images\", \"colorBGChage\"))\n\t\tconfig.useBlink = workConfig.getboolean(\"images\", \"useBlink\")\n\t\tconfig.noTrails = workConfig.getboolean(\"images\", \"noTrails\")\n\t\tconfig.imageList = (workConfig.get(\"images\", \"imageList\")).split(\",\")\n\t\tconfig.colorMode = workConfig.get(\"images\", \"colorMode\")\n\t\tconfig.randomColorMode = workConfig.getboolean(\"images\", \"randomColorMode\")\n\n\t\tconfig.clrBlkWidth = int(workConfig.get(\"filter\", \"clrBlkWidth\"))\n\t\tconfig.clrBlkHeight = int(workConfig.get(\"filter\", \"clrBlkHeight\"))\n\t\tconfig.overlayxPosOrig = int(workConfig.get(\"filter\", \"overlayxPos\"))\n\t\tconfig.overlayyPosOrig = int(workConfig.get(\"filter\", \"overlayyPos\"))\n\t\tconfig.overlayxPos = int(workConfig.get(\"filter\", \"overlayxPos\"))\n\t\tconfig.overlayyPos = int(workConfig.get(\"filter\", \"overlayyPos\"))\n\t\tconfig.overlayChangeProb = float(workConfig.get(\"filter\", \"overlayChangeProb\"))\n\t\tconfig.overlayChangePosProb = float(\n\t\t\tworkConfig.get(\"filter\", \"overlayChangePosProb\")\n\t\t)\n\t\tconfig.colorOverlay = (255, 0, 255)\n\n\texcept Exception as e:\n\t\tprint(str(e))\n\n\t# for attr, value in config.__dict__.iteritems():print (attr, value)\n\tblocks = []\n\t# for i in range (0,simulBlocks) : makeBlock()\n\n\tpath = config.path + \"assets/imgs/\"\n\timageList = config.imageList\n\tglitchRate = config.glitchRate\n\n\tfor i in range(0, config.unitCount):\n\t\tdx = 0\n\t\timgLoader = ImageSprite(config)\n\t\timgLoader.debug = True\n\t\timgLoader.action = \"pan\"\n\t\timgLoader.xOffset = 0\n\t\timgLoader.yOffsetFactor = 0\n\t\timgLoader.endX = config.screenWidth\n\t\timgLoader.endY = config.screenHeight + 32\n\n\t\timgLoader.scalingFactor = config.scalingFactor\n\t\timgLoader.useJitter = config.useJitter\n\t\timgLoader.useBlink = config.useBlink\n\t\timgLoader.brightnessFactor = config.brightness * random.random()\n\t\timgLoader.config = config\n\t\timgLoader.colorMode = (\n\t\t\tconfig.colorMode\n\t\t) # \"colorRGB\" #colorWheel #random #colorRGB\n\t\timgLoader.colorModeDirectional = colorModeDirectional\n\t\tif i == 2:\n\t\t\tdx = 0\n\t\t# def make(self, img=\"\", setvX = 0, setvY = 0, processImage = True, resizeImage = True, randomizeDirection = True, randomizeColor = True):\n\t\timgLoader.make(path + imageList[i], dx, 0, True, False, False, True)\n\t\timgLoader.yOffsetChange = False\n\t\timgLoader.yOffset = 0\n\t\t# if(i == 1) : imgLoader.yOffset = 80\n\t\timgLoader.jitterRange = config.jitterRange\n\t\tblocks.append(imgLoader)\n\n\tprint(\"Running Work...\")\n\tif run:\n\t\trunWork()\n\n\n\"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\n\n\ndef runWork():\n\tglobal blocks, config\n\t# gc.enable()\n\tprint(\"running work.\")\n\twhile True:\n\t\titerate()\n\t\ttime.sleep(config.speed)\n\n\n\"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\n\n\ndef dance():\n\tglobal blocks\n\n\t# Jitter a/o glitch\n\t# everything is sideways ... width is height etc\n\t#\n\t# \"apparentHeight\" = width\n\t# ------------------\n\t# | |\n\t# | | \"apparentWidth\" == height\n\t# | |\n\t# ------------------\n\t#\n\t#\n\n\tif len(blocks) > 1:\n\t\tapparentWidth = blocks[1].image.size[1]\n\t\tapparentHeight = blocks[1].image.size[0]\n\t\tdy = int(random.uniform(-10, 10))\n\t\tdx = int(random.uniform(1, apparentWidth - 2))\n\t\tdx = 0\n\n\t\t# really doing \"vertical\" or y-axis glitching\n\t\t# block height is uniform but width is variable\n\n\t\tsectionWidth = int(random.uniform(2, apparentHeight - dx))\n\t\tsectionHeight = apparentWidth\n\n\t\t# 95% of the time they dance together as mirrors\n\t\tif random.random() < 0.97:\n\t\t\tcp1 = blocks[1].image.crop((dx, 0, dx + sectionWidth, sectionHeight))\n\t\t\tconfig.renderImageFull.paste(\n\t\t\t\tcp1, (int(blocks[1].x + dx), int(blocks[1].y + dy)), cp1\n\t\t\t)\n\n\t\tif len(blocks) >= 3:\n\t\t\tif random.random() < 0.97:\n\t\t\t\tcp2 = blocks[2].image.crop((dx, 0, dx + sectionWidth, sectionHeight))\n\t\t\t\tconfig.renderImageFull.paste(\n\t\t\t\t\tcp2, (int(blocks[2].x + dx), int(blocks[2].y - dy)), cp2\n\t\t\t\t)\n\n\t\t\"\"\" \n\t\t# Not sure if this is a useful variation\n\t\tif(random.random() < .25) :\n\t\t\tclr = colorutils.randomColor(random.uniform(.1,1))\n\t\t\tblocks[0].colorize(clr, True)\n\t\t\t\n\t\tif(random.random() < .1) :\n\t\t\tclr = colorutils.randomColor(random.uniform(.1,1))\n\t\t\tblocks[1].colorize(clr, True)\n\t\t\tclr = colorutils.randomColor(random.uniform(.1,1))\n\t\t\tblocks[2].colorize(clr, True)\n\t\t\"\"\"\n\n\n\"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\n\n\ndef glitchBox(img, r1=-10, r2=10):\n\tapparentWidth = img.size[1]\n\tapparentHeight = img.size[0]\n\tdy = int(random.uniform(r1, r2))\n\tdx = int(random.uniform(1, config.imageGlitchSize))\n\tdx = 0\n\n\t# really doing \"vertical\" or y-axis glitching\n\t# block height is uniform but width is variable\n\n\tsectionWidth = int(random.uniform(2, apparentHeight - dx))\n\tsectionHeight = apparentWidth\n\n\t# 95% of the time they dance together as mirrors\n\tif random.random() < 0.97:\n\t\tcp1 = img.crop((dx, 0, dx + sectionWidth, sectionHeight))\n\t\timg.paste(cp1, (int(0 + dx), int(0 + dy)))\n\n\n\"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\n\n\ndef iterate(n=0):\n\tglobal config, blocks, colorModeDirectional, colorModes\n\tglobal glitchRate\n\n\t# Clear the background and redraw all planes\n\t# if(config.noTrails) : redrawBackGround()\n\tcolorBGChaged = False\n\tcolorChaged = False\n\n\tif random.random() > 0.9 and config.randomColorMode == True:\n\t\tindex = int(random.uniform(0, 3))\n\t\tconfig.colorMode = colorModes[index]\n\n\tif random.random() < config.glitchRate:\n\t\tdY = random.uniform(-config.jitterRange, config.jitterRange)\n\telse:\n\t\tdY = 0\n\n\tfor n in range(0, len(blocks)):\n\t\tblock = blocks[n]\n\t\tblock.colorMode = config.colorMode\n\t\tblock.colorModeDirectional = colorModeDirectional\n\t\tblock.update()\n\n\t\t### Change the color of the figures\n\t\tif random.random() < config.colorChage and n > 0:\n\t\t\tclr = colorutils.randomColor(random.uniform(0.1, 1))\n\t\t\tblock.colorize(clr, True)\n\t\t\tcolorBGChaged = True\n\t\t\tfor i in range(0, 10):\n\t\t\t\tblock.glitchBox()\n\t\t\t# config.renderImageFull.paste( block.image, (int(block.x), int(block.y)), block.image )\n\n\t\t### Change the color of the Background -- in configs, BG is first\n\t\tif random.random() < config.colorBGChage and n == 0:\n\t\t\tclr = colorutils.randomColor(random.uniform(0.4, 1))\n\t\t\tblock.colorize(clr, True)\n\t\t\tcolorBGChaged = True\n\t\t\tfor i in range(0, 10):\n\t\t\t\tblock.glitchBox()\n\t\t\t# config.renderImageFull.paste( block.image, (int(block.x), int(block.y)), block.image )\n\n\t\tif random.random() < 0.51:\n\t\t\tif n == 1:\n\t\t\t\tblock.glitchBox(-2, 3)\n\t\t\tif n == 2:\n\t\t\t\tblock.glitchBox(-3, 2)\n\n\t\tconfig.renderImageFull.paste(\n\t\t\tblock.image, (int(block.x), int(block.y)), block.image\n\t\t)\n\n\tif random.random() < config.glitchResetRate:\n\t\tglitchRate = config.glitchRate\n\n\tif random.random() < config.glitchModeRate:\n\t\tglitchRate = 0.5\n\n\tif random.random() < 0.1 and colorBGChaged == True:\n\t\tfor i in range(0, 100):\n\t\t\tglitchBox(config.renderImageFull, -2, 2)\n\n\tif random.random() < glitchRate:\n\t\tdance()\n\n\t# Render the final full image\n\t# config.image = config.renderImageFull\n\n\tif random.random() < config.overlayChangeProb:\n\t\tconfig.colorOverlay = colorutils.getRandomRGB()\n\t\tconfig.colOverlay.colorTransitionSetup()\n\t\t# config.colorOverlay = colorutils.getRandomColorWheel()\n\tif random.random() < config.overlayChangePosProb:\n\t\tconfig.overlayyPos = 100\n\tif random.random() < config.overlayChangePosProb:\n\t\tconfig.overlayxPos = config.overlayxPosOrig\n\t\tconfig.overlayyPos = config.overlayyPosOrig\n\tcolorize(config.colorOverlay)\n\tconfig.render(\n\t\tconfig.renderImageFull,\n\t\t0,\n\t\t0,\n\t\tconfig.screenWidth,\n\t\tconfig.screenHeight,\n\t\tFalse,\n\t\tFalse,\n\t\tTrue,\n\t)\n\n\t# cleanup the list\n\t# blocks[:] = [block for block in blocks if block.setForRemoval!=True]\n\tconfig.updateCanvas()\n\n\tif len(blocks) == 0:\n\t\texit()\n\n\n\"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\n\n\ndef colorize(clr=(250, 0, 250), recolorize=False):\n\n\t# Colorize via overlay etc\n\tw = config.renderImageFull.size[0]\n\th = config.renderImageFull.size[1]\n\tclrBlock = Image.new(config.renderImageFull.mode, (w, h))\n\tclrBlockDraw = ImageDraw.Draw(clrBlock)\n\n\t# Color overlay on b/w PNG sprite\n\tclrBlockDraw.rectangle((0, 0, w, h), fill=(255, 255, 255))\n\tclrBlockDraw.rectangle(\n\t\t(\n\t\t\tconfig.overlayxPos,\n\t\t\tconfig.overlayyPos,\n\t\t\tconfig.clrBlkWidth + config.overlayxPos,\n\t\t\tconfig.clrBlkHeight + config.overlayyPos,\n\t\t),\n\t\tfill=clr,\n\t)\n\t\"\"\"\n\n\t\tptA = (config.overlayxPos + 10, config.overlayyPos + 20)\n\t\tptB = (config.overlayxPos + config.clrBlkWidth , config.overlayyPos)\n\t\tptC = (config.overlayxPos + config.clrBlkWidth , config.overlayyPos + config.clrBlkHeight + 20)\n\t\tptD = (config.overlayxPos, config.clrBlkHeight + config.overlayyPos)\n\t\tclrBlockDraw.polygon([ptA,ptB,ptC,ptD], fill=clr)\n\t\t\"\"\"\n\t# config.renderImageFull.paste(clrBlock, (0,0))\n\n\ttry:\n\t\tconfig.renderImageFull = ImageChops.multiply(clrBlock, config.renderImageFull)\n\t\t# imgTemp = imgTemp.convert(config.renderImageFull.mode)\n\t\t# print(imgTemp.mode, clrBlock.mode, config.renderImageFull.mode)\n\t\t# config.renderImageFull.paste(imgTemp,(0,0,w,h))\n\n\texcept Exception as e:\n\t\tprint(e, clrBlock.mode, config.renderImageFull.mode)\n\t\tpass\n\n\ndef callBack():\n\tglobal config\n\tpass\n\n\n\"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\n","sub_path":"pieces/singletons/present.py","file_name":"present.py","file_ext":"py","file_size_in_byte":12024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"315124063","text":"import pandas as pd\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.linear_model import Ridge\nfrom sklearn.preprocessing import PolynomialFeatures, scale\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn import model_selection\nimport pickle\nimport numpy as np\nimport datetime\n\n\ndef load_price(df, split_percentage):\n df = shuffle(df)\n\n price_x = df[['PriceUpdatedDate']].values\n price_y = df['Price'].values\n\n price_x = scale(price_x)\n\n split_point = int(len(price_x) * split_percentage)\n price_x_train = price_x[:split_point]\n price_y_train = price_y[:split_point]\n price_x_test = price_x[split_point:]\n price_y_test = price_y[split_point:]\n\n return price_x_train, price_y_train, price_x_test, price_y_test\n\n \n\ndef normalize_data(df):\n df['PriceUpdatedDate'] = df['PriceUpdatedDate'].apply(extract_date) # format date to be ordinal date\n\n df = df.query('FuelCode == \"E10\"')\n df = df.query('ServiceStationName ==\"Metro Fuel Marrickville\"')\n\n df1 = df[['PriceUpdatedDate', 'Price']]\n print(df1)\n df1 = df1.sort_values(by=['PriceUpdatedDate']) \n return df1\n\ndef extract_date(x):\n x = str(x).split(' ') # gets just the date\n x = x[0]\n date = x.split('/')\n date_date = datetime.date(int(date[2]), int(date[1]), int(date[0])) #date(year, month, day)\n return date_date.toordinal()\n \nif __name__ == \"__main__\":\n #df = pd.read_excel(\"fuel_data/service-station-price-history-june-2017.xlsx\")\n df = pd.read_excel('fuel_data/price_history_checks_oct2019.xlsx', skiprows=2)\n df = df.dropna()\n print(df)\n #df = pd.read_excel(\"fuel_data_may-september_2017.xlsx\")\n #df = pd.read_excel(\"marrickville_4_months.xlsx\")\n #df = pd.read_excel(\"marrickville.xlsx\")\n df = normalize_data(df)\n #df = df[['PriceUpdatedDate', 'Price']]\n\n price_x_train, price_y_train, price_x_test, price_y_test = load_price(df, split_percentage=0.7)\n \n linear_model = LinearRegression()\n knn_model = KNeighborsRegressor(n_neighbors=2)\n poly3_model = make_pipeline(PolynomialFeatures(3), Ridge())\n poly4_model = make_pipeline(PolynomialFeatures(4), Ridge())\n gbr_model = GradientBoostingRegressor(loss='ls', max_depth=6)\n decision_tree_model = DecisionTreeRegressor(random_state=0)\n\n linear_model.fit(price_x_train, price_y_train)\n #knn_model.fit(price_x_train, price_y_train)\n poly3_model.fit(price_x_train, price_y_train)\n poly4_model.fit(price_x_train, price_y_train)\n gbr_model.fit(price_x_train, price_y_train)\n decision_tree_model.fit(price_x_train, price_y_train)\n\n\n #filename = 'model.sav'\n #pickle.dump(linear_model, open(filename, 'wb'))\n\n y_pred = linear_model.predict(price_x_test)\n y_pred1 = poly3_model.predict(price_x_test)\n y_pred2 = poly4_model.predict(price_x_test)\n y_pred3 = gbr_model.predict(price_x_test)\n y_pred4 = decision_tree_model.predict(price_x_test)\n\n #print(price_x_test)\n #print(type(price_x_test))\n #print(type(price_y_test))\n #print(price_y_test)\n\n #y_pred2 = knn_model.predict(price_x_test)\n\n #for i in range(len(price_y_test)):\n # print(\"Linear Expected: \", price_y_test[i], \"Linear Predicted:\", y_pred[i])\n\n #for i in range(len(price_y_test)):\n # print(\"KNN Expected: \", price_y_test[i], \"KNN Predicted:\", y_pred2[i])\n \n linearConfidence = linear_model.score(price_x_test, price_y_test)\n #knnConfidence = knn_model.score(price_x_test, price_y_test)\n poly3Confidence = poly3_model.score(price_x_test, price_y_test)\n poly4Confidence = poly4_model.score(price_x_test, price_y_test)\n gbrConfidence = gbr_model.score(price_x_test, price_y_test)\n dtrConfidence = decision_tree_model.score(price_x_test, price_y_test)\n\n\n print(\"Linear confidence is: \", linearConfidence)\n #print(\"KNN confidence is: \", knnConfidence)\n print(\"Polynomial 3 confidence is: \", poly3Confidence)\n print(\"Polynomial 4 confidence is: \", poly4Confidence)\n print(\"Gradient Boosting confidence is: \", gbrConfidence)\n print(\"Decision Tree confidence is: \", dtrConfidence)\n print(\"\")\n print(\"Linear mean squared error: %.2f\" % mean_squared_error(price_y_test, y_pred))\n print(\"Polynomial 3 mean squared error: %.2f\" % mean_squared_error(price_y_test, y_pred1))\n print(\"Polynomial 4 mean squared error: %.2f\" % mean_squared_error(price_y_test, y_pred2))\n print(\"Gradient Boosting Regressor mean squared error: %.2f\" % mean_squared_error(price_y_test, y_pred3))\n print(\"Decision Tree Regressor mean squared error: %.2f\" % mean_squared_error(price_y_test, y_pred4))\n print(\"\")\n print(\"Linear R^2 score: \", r2_score(price_y_test, y_pred))\n print(\"Polynomial 3 R^2 score: \", r2_score(price_y_test, y_pred1))\n print(\"Polynomial 4 R^2 score: \", r2_score(price_y_test, y_pred2))\n print(\"Gradient Boosting Regressor R^2 score: \", r2_score(price_y_test, y_pred3))\n print(\"Decision Tree Regressor R^2 score: \", r2_score(price_y_test, y_pred4))\n\n #print(\"KNN Mean squared error: %.2f\" % mean_squared_error(price_y_test, y_pred2))\n","sub_path":"fuel_model/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"59624431","text":"# 변수, \"값을 한개\"\r\n# 리스트, 값을 여러개 + 여러 데이터 타입\r\n# [ 요소1, 요소2, 요소3, ...]\r\n#\r\n# scores = []\r\n# scores = [100, 30, 45, 19]\r\n# print(scores[:2])\r\n\r\n# lst = [10, 20, 30, 40 ,50]\r\n# print(len(lst))\r\n\r\n# 1. 반복문을 이용해서 25 이상만 출력\r\n#\r\nlst = [10, 20, 30, 40, 50]\r\n\r\n\r\n# 2. 반복문을 이용해서 3의 배수만 출력\r\n# lst = [10, 20, 30, 40 ,50]\r\n#\r\n# result = []\r\n# for i in lst:\r\n# result.append(i * 3)\r\n#\r\n# print(result)\r\n\r\n\r\n\r\n# 3. 반복문을 이용해서 전체 합 출력\r\n# sum = 0\r\n# lst = [10, 20, 30, 40 ,50]\r\n# for i in range(lst[0]):\r\n# sum = sum + i\r\n# print(sum)\r\n# 4. index 번호를 입력받아 해당 값 출력 예) 2입력 => 30출력\r\n\r\n# 5. 값을 입력받고 index 출력 예) 20 입력=> 1 출력\r\n# 6. 위 리스트에서 짝수의 갯수와 양의 갯수 출력\r\nlst[10, 30, 13, 5, -1]\r\n","sub_path":"python/0710/basic/StringEx09.py","file_name":"StringEx09.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"354974888","text":"#Creating a fully-connected classifier using clean,\n#uncorrupted MNIST data on ten digits\n\n#Loss minimizes to 0.00028285 over 100 epochs using cross entropy and adam\n#Acieves accuracy of 98.00% on testing data\n\nimport numpy as np\nfrom keras.models import Sequential\nimport keras\nfrom keras.datasets import mnist\nfrom keras.layers import Dense\n\n\n#Import dataset and normalize to [0,1]\n(data_train, labels_train), (data_test, labels_test) = mnist.load_data()\ndata_train = data_train/255.0\ndata_test = data_test/255.0\n\n#Flatten dataset (New shape for training and testing set is (60000,784) and (10000, 784))\ndata_train = data_train.reshape((len(data_train), np.prod(data_train.shape[1:])))\ndata_test = data_test.reshape((len(data_test), np.prod(data_test.shape[1:])))\n\n#Create labels as one-hot vectors\nlabels_train = keras.utils.np_utils.to_categorical(labels_train, num_classes=10)\nlabels_test = keras.utils.np_utils.to_categorical(labels_test, num_classes=10)\n\n\n#Create the model\ndef fc_model():\n\n model = Sequential()\n model.add(Dense(100, activation=\"relu\", use_bias=True, kernel_initializer=\"normal\", input_dim=784))\n model.add(Dense(100, activation=\"relu\", kernel_initializer=\"normal\"))\n model.add(Dense(10, activation=\"softmax\", kernel_initializer=\"normal\"))\n return model\n\n\n\nmodel = fc_model()\n\n\n#Compile model using croo entropy as loss and adam as optimizer\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n#Train the model\nmodel.fit(data_train, labels_train, validation_data=(data_test, labels_test), epochs=100, batch_size=200, shuffle=True)\n\n#Save the model\n#model.save('fc-100-100-10.h5')\n\n#Evaluate classifier\nscores = model.evaluate(data_test, labels_test)\n\n#Print accuracy\nprint (\"Accuracy: %.2f%%\" %(scores[1]*100))\n\n","sub_path":"Semi-white_Box_Attack_(FGM)/FC_classifer.py","file_name":"FC_classifer.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"560177471","text":"# Copyright 2021 Zilliz. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport types\n\n\nclass BaseRepr:\n \"\"\"Base representation from which all other representation objects inherit.\n Primarily implements automatic serialization into YAML/YAML-like string formats,\n along with defining other universally used properties.\n\n Args:\n name:\n Name of the internal object described by this representation.\n \"\"\"\n\n def __init__(self, name: str):\n self._name = name\n\n @property\n def name(self):\n return self._name\n\n def serialize(self) -> str:\n \"\"\"Universal function used to serialize this representation.\n\n Returns:\n A string containing the serialized version of this representation. Example\n output:\n\n 'VariableRepr:\n name: variable\n vtype: tensor\n dtype: float'\n \"\"\"\n out = self.__class__.__name___ + '\\n'\n for name in dir(self):\n value = self.__getattribute__(name)\n if name[0] != '_' and not isinstance(value, types.FunctionType):\n value = value.__class__.__name__\n out += name + ': ' + str(value) + '\\n'\n return out\n","sub_path":"towhee/base_repr.py","file_name":"base_repr.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"534411226","text":"#%%\nimport wrf as w\nimport xarray as xr\nfrom netCDF4 import Dataset\nimport pandas as pd \nimport matplotlib.pyplot as plt \nimport cartopy.crs as ccrs\nimport cmaps\nimport os \nimport sys\nsys.path.append('/home/zzhzhao/code/python/wrf-test-10')\nfrom zMap import set_grid, add_NamCo\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef load_wrfdata(data_dir):\n wrf_files = [f for f in os.listdir(data_dir) if f[9]=='2']\n wrflist = [Dataset(os.path.join(data_dir, wrf_file)) for wrf_file in wrf_files]\n\n rainc = w.getvar(wrflist, 'RAINC', timeidx=w.ALL_TIMES, method='cat')\n rainnc = w.getvar(wrflist, 'RAINNC', timeidx=w.ALL_TIMES, method='cat')\n total_rain = rainc + rainnc\n\n prec = total_rain.diff('Time', 1)#.sel(Time=pd.date_range('2017-06-01 3:00:00', '2017-06-8 00:00:00', freq='3H'))\n # prec = total_rain.isel(Time=-1)\n lats, lons = w.latlon_coords(prec)\n time = total_rain.Time.to_index() \n\n return prec, lats, lons, time \n\n\n\n#%%\nif __name__ == '__main__':\n data_dir1 = '/home/zzhzhao/Model/wrfout/test-10'\n data_dir2 = '/home/zzhzhao/Model/wrfout/test-10-removelake'\n prec1, lats, lons, time = load_wrfdata(data_dir1)\n prec2, lats, lons, time = load_wrfdata(data_dir2) \n\n lat_range = (28, 34)\n lon_range = (86, 94)\n\n ### CMFD资料\n file_path = '/home/zzhzhao/code/python/wrf-test-10/data/prec_CMFD_201706.nc'\n cmfd = xr.open_dataset(file_path)['prec'].sel(lat=slice(lat_range[0],lat_range[1]), lon=slice(lon_range[0],lon_range[1])) * 3\n lat, lon =cmfd.lat, cmfd.lon\n\n ### 累计降水\n # prec_sum = prec.sel(Time=second_period).sum(dim='Time')\n cmfd_sum = cmfd.sum(dim='time')\n prec1_sum = prec1.sum(dim='Time')\n prec2_sum = prec2.sum(dim='Time')\n\n\n#%%\n ### 累计降水分布\n proj = ccrs.PlateCarree()\n # crange = np.arange(0, 200+10, 10)\n labels = ['WRF', 'WRF-nolake', 'CMFD', 'Difference']\n fig, axes = plt.subplots(ncols=2, nrows=2, figsize=(9,11), subplot_kw={'projection':proj})\n fig.subplots_adjust(hspace=0.2, wspace=0.15)\n for i in range(2):\n for j in range(2):\n set_grid(axes[i, j], lat=[30, 31.5], lon=[90, 91.5], span=.5)\n add_NamCo(axes[i, j])\n for j, prec_sum in enumerate([prec1_sum, prec2_sum]):\n c = axes[0][j].pcolor(lons, lats, prec_sum, vmin=0, vmax=250, cmap=cmaps.WhiteBlueGreenYellowRed, transform=proj)\n axes[0][j].set_title(labels[j], fontsize=14, weight='bold')\n axes[1][0].pcolor(lon, lat, cmfd_sum, vmin=0, vmax=250, cmap=cmaps.WhiteBlueGreenYellowRed, transform=proj)\n axes[1][0].set_title(labels[2], fontsize=14, weight='bold')\n \n c2 = axes[1][1].pcolor(lons, lats, prec1_sum-prec2_sum, vmin=-60, vmax=60, cmap='RdBu', transform=proj)\n axes[1][1].set_title(labels[3], fontsize=14, weight='bold')\n cb = fig.colorbar(c, ax=axes, orientation='horizontal', pad=0.05, shrink=0.9, aspect=35)\n cb.set_label('Precipitation / mm', fontsize=14)\n \n from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n axins = inset_axes(axes[1][1],\n width=\"5%\", # width = 10% of parent_bbox width\n height=\"100%\", # height : 50%\n loc=6,\n bbox_to_anchor=(1.05, 0., 1, 1),\n bbox_transform=axes[1][1].transAxes,\n borderpad=0,\n )\n \n cb2 = fig.colorbar(c2, cax=axins)#, orientation='vertical', shrink=0.6, aspect=25)\n # cb2.set_label('Precipitation / mm', fontsize=14)\n # axes[0][1].set_visible(False)\n\n fig.savefig('/home/zzhzhao/code/python/wrf-test-10/fig/prec.jpg', dpi=300)\n\n","sub_path":"python/wrf-test-10/prec_sum.py","file_name":"prec_sum.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"186014536","text":"#\n# @lc app=leetcode.cn id=36 lang=python\n#\n# [36] 有效的数独\n#\n\n# @lc code=start\nclass Solution(object):\n def isValidSudoku(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: bool\n \"\"\"\n row = [defaultdict(int) for _ in range(9)]\n col = [defaultdict(int) for _ in range(9)]\n box = [defaultdict(int) for _ in range(9)]\n for i in range(9):\n for j in range(9):\n index = i // 3 * 3 + j // 3\n if board[i][j] != '.':\n row[i][int(board[i][j])] += 1\n col[j][int(board[i][j])] += 1\n box[index][int(board[i][j])] += 1\n if row[i][int(board[i][j])] > 1 or col[j][int(board[i][j])] > 1 or box[index][int(board[i][j])] > 1:\n return False\n return True\n\n\n# @lc code=end\n\n","sub_path":"Week_07/36.有效的数独.py","file_name":"36.有效的数独.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"322222150","text":"# VDLS - 12/06/18 - Excercises Chaper 6 Python Crash Course\n# 6.1 - Person\nperson_1 = {\n 'first': 'daniel',\n 'last': 'aragon',\n 'age': 25,\n 'city': 'toluca',\n }\n# 6.2 Favorite Numbers\nfavorite_N = {\n 'daniel': 1,\n 'ana': 2,\n 'eliezer': 3,\n 'camila': 4,\n 'mafer': 5,\n }\n# 6.3/6.4 Glossary\nglossary = {\n 'lambda_func': 'can take any number of arguments, but only one expression',\n 'augmented_op': '+=,-=,*=,/=',\n 'dictionary': 'stores objects and lists with custom keys',\n 'boolean': 'true or false',\n }\n# Looping Through All Key-Value Pairs 1\nfor key in glossary:\n print(key + ' can be defined as ' + glossary[key] + '.\\n')\n# Looping Through All Key-Value Pairs 2\nfor k, v in glossary.items():\n print(k + ' can be defined as ' + v)\n# Looping Through All the Keys in a Dictionary\nfor item in glossary.keys():\n print(item.title())\n# 6.5 Rivers\nrivers = {\n 'atoyac': 'oaxaca',\n 'usumacinta': 'chiapas',\n 'lerma': 'mexico',\n }\nfor n, c in rivers.items():\n print('The river ' + n.title() + ' is located in the state of ' + c.title())\n# 6.6 Polling\nfavorite_languages = {\n 'jen': 'python',\n 'sarah': 'c',\n 'edward': 'ruby',\n 'phil': 'python',\n 'fer': 'html',\n 'caro': 'javascript',\n }\npoll_users = ['fer','jen','david','robert','caro','phil','edzard']\nfor user in poll_users:\n if user in favorite_languages.keys():\n print('Thank you ' + user.title() + ' for taking the poll.')\n else:\n print(user.title() + ' please take the poll.')\n# 6.7 People\nperson_2 = {\n 'first': 'mariana',\n 'last': 'villacaña',\n 'age': 25,\n 'city': 'mexico',\n }\nperson_3 = {\n 'first': 'david',\n 'last': 'liceaga',\n 'age': 25,\n 'city': 'leon'\n }\npeople = [person_1, person_2, person_3]\nfor person in people:\n full_name = person['first'].title() + ' ' + person['last'].title()\n age_loc = ' lives in ' + person['city'].title() + \\\n ' and is ' + str(person['age']) + ' years old.'\n print(full_name + age_loc)\n\n# Excersices 6.8-6.12 are about the same topic as 6.7 only different dictionaries.","sub_path":"PCC/Ex_C6_PCC.py","file_name":"Ex_C6_PCC.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"371294911","text":"from django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render\n\n\nclass Http403Middleware(object):\n def process_exception(self, request, exception):\n if isinstance(exception, PermissionDenied):\n return render(request,\n \"403.html\", \n dictionary={\"message\": unicode(exception)},\n status=403,\n )\n","sub_path":"HVZ/HVZ/main/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"290689151","text":"import pygame,sys\r\nfrom pygame.locals import *\r\npygame.init()\r\n\r\n#Crear y nombrar ventana\r\nwin=pygame.display.set_mode((1137,640))\r\npygame.display.set_caption(\"Aprende a programar\")\r\n\r\n# imagenes o recursos\r\nwalkRight = [pygame.image.load('img/playerGreen_walk1.png'), pygame.image.load('img/playerGreen_walk2.png'), pygame.image.load('img/playerGreen_walk3.png'), pygame.image.load('img/playerGreen_walk4.png')]\r\nwalkLeft = [pygame.image.load('img/playerGreen_stand.png'), pygame.image.load('img/playerGreen_walk2.png'), pygame.image.load('img/playerGreen_walk3.png'), pygame.image.load('img/playerGreen_walk4.png')]\r\nbg = pygame.image.load('img/backgroundMenu.png')\r\nchar = pygame.image.load('img/playerGreen_up1.png')\r\n\r\n#tipografia\r\nmiFuente = pygame.font.Font(\"tipografia/Pacifico-Regular.ttf\",60)\r\ntexto= pygame.font.Font(\"tipografia/VarelaRound-Regular.ttf\",20)\r\nmiTexto= miFuente.render(\"Bienvenidos\",0,(200,60,80))\r\ntextoFelicitaciones= texto.render(\"Hola que tal, Aprende a programar...\",0,(200,60,80))\r\ntextoFelicitaciones1= texto.render(\"Te mostraremos algunos programas,\",0,(200,60,80))\r\ntextoFelicitaciones2= texto.render(\" así que dejate Sorprender!\",0,(200,60,80))\r\n\r\n\r\n\r\n\r\n#variable del tiempo\r\nclock=pygame.time.Clock()\r\n\r\n#Clases para los Sprites\r\nclass jugador (object):\r\n def __init__(self , x,y,width,height):\r\n self.x=70\r\n self.y=475\r\n self.width=width\r\n self.height=height\r\n self.vel=10\r\n self.isJump=False\r\n self.jumpCount=-10\r\n self.left=False\r\n self.right=False\r\n self.walkCount=0\r\n self.screenWidth=1137\r\n self.hitbox = (self.x , self.y+4, 70,70)\r\n self.rect=(180,403, 100,10)\r\n \r\n\r\n\r\n #Manejo de animación\r\n def draw(self, win):\r\n if self.walkCount + 8 >= 16:\r\n self.walkCount = 0\r\n\r\n if self.left:\r\n win.blit(walkLeft[self.walkCount//4], (self.x,self.y))\r\n self.walkCount += 1\r\n elif self.right:\r\n win.blit(walkRight[self.walkCount//4], (self.x,self.y))\r\n self.walkCount +=1\r\n else:\r\n win.blit(char, (self.x,self.y))\r\n pygame.display.update()\r\n self.hitbox = (self.x , self.y, 40 ,60) \r\n pygame.draw.rect(win, (255,239,189), self.hitbox,2)\r\n \r\n\r\n#Clase plataforma numero 1 \r\nclass pisoOne(object):\r\n def __init__(self , x,y,width,height):\r\n self.x=180\r\n self.y=403\r\n self.width=width\r\n self.height=height\r\n self.hitbox = (self.x , self.y, 100,10)\r\n\r\n def draw(self,win):\r\n self.hitbox = (self.x , self.y, 100,10)\r\n pygame.draw.rect(win, (44,84,96,0), self.hitbox,2)\r\n \r\n#Clase plataforma numero 2 \r\nclass pisoTwo(object):\r\n def __init__(self , x,y,width,height):\r\n self.x=450\r\n self.y=403\r\n self.width=width\r\n self.height=height\r\n self.hitbox = (self.x , self.y, 100,10)\r\n\r\n def draw(self,win):\r\n self.hitbox = (self.x , self.y, 100,10)\r\n pygame.draw.rect(win, (44,84,96,0), self.hitbox,2) \r\n\r\n\r\n#Clase plataforma numero 3\r\nclass pisoThree(object):\r\n def __init__(self , x,y,width,height):\r\n self.x=705\r\n self.y=403\r\n self.width=width\r\n self.height=height\r\n self.hitbox = (self.x , self.y, 100,10)\r\n\r\n def draw(self,win):\r\n self.hitbox = (self.x , self.y, 100,10)\r\n pygame.draw.rect(win, (44,84,96,0), self.hitbox,2) \r\n\r\n\r\n\r\n#Metodo para redibujar constantemente la pantalla\r\ndef reDrawFondo():\r\n win.blit(bg,(0,0))\r\n player.draw(win)\r\n win.blit(miTexto,(40,5))\r\n win.blit(textoFelicitaciones,(650,150))\r\n win.blit(textoFelicitaciones1,(650,180))\r\n win.blit(textoFelicitaciones2,(650,210))\r\n pygame.display.update() \r\n\r\n \r\ndef programaBeca():\r\n print (\"MY PROYECTO CONSISTE EN CREAR UN CODIGO QUE NOS INDIQUE QUIEN SE HIZO MERECEDOR A UNA BECA\")\r\n alumno=input(\"Ingrese el nombre del alumno:\")\r\n ciencias=float(input(\"Digite la nota de ciencias:\"))\r\n sociales=float(input(\"Digite la nota de sociales:\"))\r\n español=float(input(\"Digite la nota de español:\"))\r\n ingles=float(input(\"Digite la nota de ingles:\"))\r\n matematicas=float(input(\"Digite la nota de matematicas:\"))\r\n religion=float(input(\"Digite la nota de religion:\"))\r\n fisica=float(input(\"Digite la nota de fisica:\"))\r\n etica=float(input(\"Digite la nota de etica:\"))\r\n tecnologia=float(input(\"Digite la nota de tecnologia:\"))\r\n filosofia=float(input(\"Digite la nota de filosofia:\"))\r\n emprendimiento=float(input(\"Digite la nota de emprendimiento:\"))\r\n materias=ciencias+sociales+español+ingles+matematicas+religion+fisica+etica+tecnologia+filosofia+emprendimiento\r\n promedio=(materias/11)\r\n print (\"MI PROMEDIO ES:\")\r\n print (promedio)\r\n # para crear el condicional para la BECA\r\n if (promedio>=4.5):\r\n print (\"FELICITACIONES, GANASTE LA BECA COMPLETA\")\r\n elif (promedio<4.5) and (promedio>=4.0):\r\n print (\"FELICITACIONES, GANASTE MEDIA BECA \")\r\n else:\r\n print (\"PAGAS COMPLETO\")\r\n\r\n pass\r\n\r\n\r\ndef adivinanza():\r\n #Juego de adivinar numero aleatorio\r\n import random\r\n\r\n intentos = 0\r\n\r\n print (\"BIENVENIDOS A... ADIVINA EL NUMERO\\n\")\r\n print(\"El juego consiste en intentar adivinar el numero que estoy pensando, si lo haces ganarás\")\r\n nombre =input(\"Dime, ¿Cual es tu nombre?\\n\")\r\n\r\n x = random.randint (1, 20)\r\n\r\n print (\"Esta bien \" + nombre + \", Bienvenido al juego\" )\r\n\r\n while intentos < 8:\r\n intentos = intentos + 1\r\n \r\n numero = float(input(\"Dime un numero del 1 al 20\\n\"))\r\n numero = int (numero)\r\n if numero < x:\r\n print (\" Mmmm,Demasiado pequeño\")\r\n if numero > x:\r\n print (\"Wow wow, tranquilo, Demasido alto\")\r\n if numero == x:\r\n break\r\n\r\n if numero == x:\r\n print (\"Correcto!!\"+nombre )\r\n print ( \" lo lograste con %d intentos\" % (intentos))\r\n print (\"Gracias por jugar, la proxima vez no será tan facil\")\r\n \r\n if numero != x:\r\n print (\"Ups!, lo sentimos, ya no tienes mas oportunidades\")\r\n print (\"Sera para la proxima, Nos vemos!\")\r\n\r\n\r\n \r\n'''\r\nCiclo main loop para mantener la pantalla activa & el manejo de los eventos como\r\ncerrar la pantalla, la dirección de las teclas, etc\r\n'''\r\n\r\n#Instanciamos los objetos de las clases correspondientes para hacer la colisión\r\nplayer = jugador(70,415,100,10)\r\nplayer.left=player.x\r\nplayer.top=player.y\r\nfloorOne=pisoOne(180,403, 100,10)\r\nfloorTwo=pisoTwo(450,403, 100,10)\r\nfloorThree=pisoThree(750,403, 100,10)\r\nrun=True\r\nwhile run:\r\n\r\n #Validación primera plataforma\r\n \r\n oldx=player.left\r\n oldy=player.top\r\n if player.hitbox[1] < floorOne.hitbox[1] + floorOne.hitbox[3] and player.hitbox[1] + player.hitbox[3] > floorOne.hitbox[1]:\r\n if player.hitbox[0] + player.hitbox[2] > floorOne.hitbox[0] and player.hitbox[0] < floorOne.hitbox[0] +floorOne.hitbox[2]:\r\n programaBeca() \r\n \r\n\r\n #Validación segunda plataforma\r\n oldx=player.left\r\n posy=player.top\r\n if player.hitbox[1] < floorTwo.hitbox[1] + floorTwo.hitbox[3] and player.hitbox[1] + player.hitbox[3] > floorTwo.hitbox[1]:\r\n if player.hitbox[0] + player.hitbox[2] > floorTwo.hitbox[0] and player.hitbox[0] < floorTwo.hitbox[0] +floorTwo.hitbox[2]:\r\n adivinanza()\r\n \r\n \r\n\r\n #Validación tercera plataforma\r\n oldx=player.left\r\n oldy=player.top\r\n if player.hitbox[1] < floorThree.hitbox[1] + floorThree.hitbox[3] and player.hitbox[1] + player.hitbox[3] > floorThree.hitbox[1]:\r\n if player.hitbox[0] + player.hitbox[2] > floorThree.hitbox[0] and player.hitbox[0] < floorThree.hitbox[0] +floorThree.hitbox[2]:\r\n print(\"ANIMATE! Y SIGUE APRENDIENDO\") \r\n\r\n \r\n\r\n\r\n #cambiamos por la variable clock\r\n clock.tick(16)\r\n \r\n '''\r\n para el manejo de los eventos se usa un ciclo \"FOR\" y se validan con \"IF\"\r\n '''\r\n \r\n for evento in pygame.event.get():\r\n #Evento para cerrar la pantalla\r\n if evento.type== QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n \r\n #Evento de las direcciones del teclado\r\n key = pygame.key. get_pressed ()\r\n\r\n #Tecla izquierda\r\n if key[pygame.K_LEFT] and player.x>player.vel:\r\n player. x -=player.vel\r\n player.left=True\r\n player.right=False\r\n\r\n \r\n elif key[pygame.K_RIGHT] and player.x=-10:\r\n neg=1\r\n if player.jumpCount < 0:\r\n neg =-1\r\n player.y-=(player.jumpCount ** 2) *0.5*neg\r\n player.jumpCount -=1\r\n\r\n else:\r\n player.isJump=False\r\n player.jumpCount =10\r\n \r\n \r\n reDrawFondo()\r\n\r\n \r\n \r\n\r\n \r\n","sub_path":"Game_ Coder Dojo/OOP_juego.py","file_name":"OOP_juego.py","file_ext":"py","file_size_in_byte":10101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"421246698","text":"#!usr/bin/env python\n'''\n变换函数:cv2.warpAffine ,cv2.warpPerspective\ncv2.warpAffine 接受2*3的变换矩阵\ncv2.warpPerspective 接受3*3的变换矩阵\n为了构建这个旋转矩阵,OpenCV提供了一个函数:cv2.getRotationMatrix2D。\n下面的例子是在不缩放的情况下将图像旋转 90 度。\n'''\nimport cv2\nimport numpy as np\n\nimg = cv2.imread('G:\\CODE\\opencv\\me.jpg')\nrow,col = img.shape[0:2]\n\n#这里的第一个参数为旋转中心,第二个为旋转角度,第三个为旋转后的缩放因子\n#可以通过设置旋转中心,缩放因子,以及窗口大小来防止旋转后超出边界的问题\nM = cv2.getRotationMatrix2D((col/2,row/2),30,0.6)\n\n#第三个参数是输出图像的尺寸\ndst = cv2.warpAffine(img,M,(2*col,2*row))\nwhile True:\n cv2.imshow('img',dst)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\ncv2.destroyAllWindows()","sub_path":"Image Processing/Transformations/Rotation.py","file_name":"Rotation.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"276367799","text":"#! /usr/bin/env python\n\nimport os.path\nimport sys\n\nsys.path.append(\"../scripts\")\nfrom petmatrix import SparseMatrixHeader\n\nfrom subprocess import run\n\n\n\nout = '.'\n\n\ndef configure(ctx):\n pass\n\n\nfrom waflib.Task import Task\n\n\ndef build(ctx):\n pass\n\n\ndef run_and_check(cmd):\n print(\"running \" + ' '.join(cmd))\n info = run(cmd)\n if info.returncode != 0:\n sys.exit()\n\n\nn_emissions = 1000000\n\n\nclass gen_description(Task):\n def run(self):\n run_and_check([\"../2d_barrel_describe_scanner\", \"--big-barrel\", \"-o\", \"big_barrel\"])\n\n\nclass gen_matrix(Task):\n def run(self):\n run_and_check([\"../2d_barrel_matrix\", \"-c\", \"m_big_ref.cfg\",\n \"--detector-file\", \"big_barrel_dets.txt\",\n \"--detector-file-sym\", \"big_barrel_syms.txt\",\n \"-e\", \"%d\" % (n_emissions,), \"-o\", \"m_big\",\n \"-v\"])\n\n\nclass to_full(Task):\n def run(self):\n run_and_check([\"../2d_barrel_matrix\", \"-c\", \"m_big.cfg\",\n \"--detector-file\", \"big_barrel_dets.txt\",\n \"--detector-file-sym\", \"big_barrel_syms.txt\",\n \"-o\", \"f_big\", \"-f\", \"m_big\"])\n\nclass gen_phantom(Task):\n def run(self):\n n_phantom_emissions = 100000000\n run_and_check([\"../3d_hybrid_phantom\", \"-c\", \"f_big.cfg\", \"-o\", \"p_sphere.txt\",\n \"-e\", \"%d\" % (n_phantom_emissions,), \"s_sphere.json\", \"-v\"])\n\n\n\n# # Alternatively prepare phantom wih GATE\n\n\nclass reconstruct(Task):\n def run(self):\n run_and_check([\"../3d_hybrid_reconstruction\", \"-c\", \"f_big.cfg\", \"--system\", \"f_big\", \"-o\", \"r_big\",\n \"-i\", \"10\", \"-v\", \"p_sphere.txt\"])\n\n# # Reconstruct\n# if recalculate:\n# run_and_check([\"../3d_hybrid_reconstruction\", \"-c\", \"m_big.cfg\", \"--system\", \"f_big\", \"-o\", \"r_big\",\n# \"-i\", \"10\", \"-v\", \"p_sphere.txt\"])\n\n\n\ndef build(ctx): \n desc = gen_description(env=ctx.env)\n desc.set_inputs(ctx.path.find_resource('../2d_barrel_describe_scanner'))\n desc.set_outputs([ctx.path.find_or_declare(\"big_barrel_dets.txt\"),\n ctx.path.find_or_declare(\"big_barrel_syms.txt\")])\n ctx.add_to_group(desc)\n\n mat = gen_matrix(env=ctx.env)\n print(desc.outputs)\n mat.set_inputs(\n [ctx.path.find_resource('m_big_ref.cfg')] + desc.outputs\n )\n mat.set_outputs([ctx.path.find_or_declare('m_big.cfg'),\n ctx.path.find_or_declare('m_big')])\n ctx.add_to_group(mat)\n\n to_f = to_full(env=ctx.env)\n to_f.set_inputs(mat.outputs)\n to_f.set_outputs([ctx.path.find_or_declare('f_big.cfg'), ctx.path.find_or_declare('f_big')])\n ctx.add_to_group(to_f)\n\n phantom = gen_phantom(env=ctx.env)\n phantom.set_inputs(\n [\n to_f.outputs[0],\n ctx.path.find_resource('s_sphere.json')\n ]\n )\n phantom.set_outputs([ctx.path.find_or_declare(\"p_sphere.txt\")])\n ctx.add_to_group(phantom)\n\n rec = reconstruct(env=ctx.env)\n rec.set_inputs(phantom.outputs+[ctx.path.find_or_declare('f_big.cfg')])\n rec.set_outputs(ctx.path.find_or_declare('r_big'));\n ctx.add_to_group(rec)\n\n\n\n\n","sub_path":"testbed/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"69309674","text":"import argparse\nimport cv2\nimport numpy as np\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom rgbmatrix import RGBMatrix, RGBMatrixOptions\nimport time\n\nparser = argparse.ArgumentParser(description=\"RGB LED matrix Example\")\nparser.add_argument(\"--video\", type=str, required = True, help=\"video file name\")\nparser.add_argument(\"--horizontal\", type=int, default = 1, help=\"horizontal count\")\nparser.add_argument(\"--vertical\", type=int, default = 1, help=\"vertical count\")\nargs = parser.parse_args()\n\nFPS = 30.0\nSLEEP = 1.0 / FPS\n# Configuration for the matrix\noptions = RGBMatrixOptions()\noptions.cols = 64\noptions.rows = 32\noptions.chain_length = args.horizontal * args.vertical\noptions.parallel = 1\noptions.brightness = 80\noptions.pwm_bits = 11\noptions.gpio_slowdown = 1.0\noptions.show_refresh_rate = 1\noptions.hardware_mapping = 'regular' # If you have an Adafruit HAT: 'adafruit-hat'\n#options.hardware_mapping = 'adafruit-hat' # If you have an Adafruit HAT: 'adafruit-hat'\noptions.pwm_dither_bits = 0\n\nmatrix = RGBMatrix(options = options)\n\ncanvas_w = args.horizontal * options.cols\ncanvas_h = args.vertical * options.rows\n\nprint('Matrix H:%d W:%d'%(matrix.height, matrix.width))\nprint('Image size H:%d W:%d'%(canvas_h, canvas_w))\n\ncap = cv2.VideoCapture(args.video)\n\ndouble_buffer = matrix.CreateFrameCanvas()\n\nwhile cap.isOpened():\n imgs = []\n start = time.time()\n for i in range(2):\n ret, im = cap.read()\n if(ret == False):\n break\n im = cv2.resize(im, (canvas_w, canvas_h))\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n imgs.append(im)\n\n if(ret == False):\n break\n\n im_pils = []\n for img in imgs:\n for x in range (args.vertical):\n i = img[options.rows * x: options.rows * (x + 1), 0:canvas_w]\n h, w, c = i.shape\n # print('split image H:%d W:%d'%(h, w))\n if ((args.vertical - x) % 2) == 0: #-> flip\n i = cv2.flip(i, 0) #vertical\n i = cv2.flip(i, 1) #horizontal\n if(x == 0):\n final = i\n else:\n final = cv2.hconcat([final, i]) #stack horizontally\n\n h, w, c = final.shape\n # print('final image H:%d W:%d'%(h, w))\n im_pil = Image.fromarray(final)\n im_pils.append(im_pil)\n # matrix.Clear()\n #matrix.SetImage(im_pil, 0)\n\n double_buffer.SetImage(im_pils[0])\n double_buffer.SetImage(im_pils[1], canvas_w)\n double_buffer = matrix.SwapOnVSync(double_buffer)\n\n elapsed = time.time() - start\n #print('elapsed:%f'%(elapsed))\n time.sleep(max([0, SLEEP - elapsed]))\n","sub_path":"led/raspberryPi/nbym_video.py","file_name":"nbym_video.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"224906930","text":"import os\nimport os.path\n\nimport torch.utils.data as data\nfrom PIL import Image\n\n\ndef make_dataset(root, is_train):\n if is_train:\n\n img_txt = open(os.path.join(root, 'train_category.txt'))\n\n img_name = []\n img_category = []\n\n for img_list in img_txt:\n x = img_list.split()\n img_name.append([os.path.join(root, x[0]), (os.path.join(root, x[1]))])\n img_category.append(x[2])\n\n img_txt.close()\n\n\n\n return img_name, img_category\n\n\n else:\n\n img_txt = open(os.path.join(root, 'val_category.txt'))\n\n img_name = []\n img_category = []\n\n for img_list in img_txt:\n x = img_list.split()\n img_name.append([os.path.join(root, x[0]), (os.path.join(root, x[1]))])\n img_category.append(x[2])\n\n img_txt.close()\n\n return img_name, img_category\n\n\n\nclass ImageFolder(data.Dataset):\n def __init__(self, root, joint_transform=None, transform=None, target_transform=None, is_train=True, batch_size=4):\n self.root = root\n self.imgs, self.imgs_category = make_dataset(root, is_train)\n self.joint_transform = joint_transform\n self.transform = transform\n self.target_transform = target_transform\n self.batch_size = batch_size\n\n def __getitem__(self, index):\n img_path, gt_path = self.imgs[index % len(self.imgs)]\n img = Image.open(img_path).convert('RGB')\n target = Image.open(gt_path)\n if self.joint_transform is not None:\n img, target = self.joint_transform(img, target)\n if self.transform is not None:\n img = self.transform(img)\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n img_category = self.imgs_category[index % len(self.imgs)]\n\n\n return img, target, img_category\n\n def __len__(self):\n return len(self.imgs) + self.batch_size - (len(self.imgs) % self.batch_size)\n\n\n","sub_path":"dataset_category.py","file_name":"dataset_category.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"490717894","text":"import pathlib\nimport logging\n\nLOG = logging.getLogger(__name__)\n\nLOG_PATH = pathlib.Path('./logs').expanduser()\n\n\ndef action(bot, update):\n LOG.debug('Save message to log')\n\n if update.message.chat_id is None:\n return\n\n chat_name = update.message.chat.title\n username = update.message.from_user.first_name\n date = update.message.date\n\n log_dir = LOG_PATH / chat_name / str(date.year) / str(date.month) / str(date.day)\n\n if not log_dir.is_dir():\n log_dir.mkdir(parents=True)\n\n log_path = log_dir / 'logs.log'\n\n with open(log_path, 'a') as log_file:\n log_file.write('[%s] [%s] %s\\n' % (date.strftime('%H:%M:%S'), username, update.message.text))\n","sub_path":"actions/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"262538180","text":"import torch \nimport numpy as np\nimport clip\n\nfrom tqdm import tqdm\n\ndef zeroshot_classifier(model, classnames):\n with torch.no_grad():\n zeroshot_weights = []\n for classname in tqdm(classnames):\n texts = [classname] #format with class\n texts = clip.tokenize(texts).cuda() #tokenize\n class_embeddings = model.encode_text(texts) #embed with text encoder\n class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)\n class_embedding = class_embeddings.mean(dim=0)\n class_embedding /= class_embedding.norm()\n zeroshot_weights.append(class_embedding)\n zeroshot_weights = torch.stack(zeroshot_weights, dim=1).cuda()\n return zeroshot_weights\n\ndef get_topk_labels(output, target, id2label_dict, topk=5):\n pred = np.argpartition(output.cpu().numpy()[0], -topk)[-topk:]\n predicted_labels = list(map(lambda x: id2label_dict[str(x)], pred))\n ground_truth = id2label_dict[str(target.cpu().numpy().item())]\n return predicted_labels, ground_truth\n\ndef is_wrongly_labelled(output, target, topk=1):\n pred = output.topk(topk, 1, True, True)[1].t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n is_correct = correct.cpu().numpy().item()\n return not is_correct\n\ndef logits_to_pred(output, topk=1):\n pred = output.topk(topk, 1, True, True)[1].t()\n return pred.cpu().numpy()[0].item()","sub_path":"code/ExperimentModules/zero_shot.py","file_name":"zero_shot.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"163029193","text":"# vi: ts=4 sw=4 sts=4 expandtab\n'''\n:mod:`ophyd.utils.epics_pvs` - EPICS-related utilities\n======================================================\n\n.. module:: ophyd.utils.epics_pvs\n :synopsis:\n'''\n\nfrom __future__ import print_function\nimport ctypes\nimport threading\nimport Queue as queue\nimport warnings\n\nimport epics\n\nfrom . import errors\nfrom .decorators import cached_retval\n\n__all__ = ['split_record_field',\n 'strip_field',\n 'record_field',\n 'check_alarm',\n 'MonitorDispatcher',\n 'get_pv_form',\n ]\n\n\ndef split_record_field(pv):\n '''Splits a pv into (record, field)\n\n Parameters\n ----------\n pv : str\n the pv to split\n\n Returns\n -------\n record : str\n field : str\n '''\n if '.' in pv:\n record, field = pv.rsplit('.', 1)\n else:\n record, field = pv, ''\n\n return record, field\n\n\ndef strip_field(pv):\n '''Strip off the field from a record'''\n return split_record_field(pv)[0]\n\n\ndef record_field(record, field):\n '''Given a record and a field, combine them into\n a pv of the form: record.FIELD\n '''\n record = strip_field(record)\n return '%s.%s' % (record, field.upper())\n\n\ndef check_alarm(base_pv, stat_field='STAT', severity_field='SEVR',\n reason_field=None, reason_pv=None,\n min_severity=errors.MinorAlarmError.severity):\n \"\"\"Raise an exception if an alarm is set\n\n Raises\n ------\n AlarmError (MinorAlarmError, MajorAlarmError)\n \"\"\"\n stat_pv = '%s.%s' % (base_pv, stat_field)\n severity_pv = '%s.%s' % (base_pv, severity_field)\n if reason_field is not None:\n reason_pv = '%s.%s' % (base_pv, reason_field)\n reason = None\n\n severity = epics.caget(severity_pv)\n\n if severity >= min_severity:\n try:\n error_class = errors.get_alarm_error(severity)\n except KeyError:\n pass\n else:\n severity = epics.caget(severity_pv, as_string=True)\n alarm = epics.caget(stat_pv, as_string=True)\n if reason_pv is not None:\n reason = epics.caget(reason_pv, as_string=True)\n\n message = 'Alarm status %s [severity %s]' % (alarm, severity)\n if reason is not None:\n message = '%s: %s' % (message, reason)\n\n raise error_class(message)\n\n return True\n\n\nclass MonitorDispatcher(epics.ca.CAThread):\n '''A monitor dispatcher which works with pyepics\n\n The monitor dispatcher works around having callbacks from libca threads.\n Using epics CA calls (caget, caput, etc.) from those callbacks is not\n possible without this dispatcher workaround.\n\n ... note:: Without `all_contexts` set, only the callbacks that are run with\n the same context as the the main thread are affected.\n\n ... note:: Ensure that you call epics.ca.use_initial_context() at startup in\n the main thread\n\n Parameters\n ----------\n all_contexts : bool, optional\n re-route _all_ callbacks from _any_ context to the dispatcher callback\n thread\n timeout : float, optional\n callback_logger : logging.Logger, optional\n A logger to notify about failed callbacks\n\n Attributes\n ----------\n main_context : ctypes long\n The main CA context\n callback_logger : logging.Logger\n A logger to notify about failed callbacks\n queue : Queue\n The event queue\n '''\n\n # TODO this needs to be setup by the session manager.\n def __init__(self, all_contexts=False, timeout=0.1,\n callback_logger=None):\n epics.ca.CAThread.__init__(self, name='monitor_dispatcher')\n\n self.daemon = True\n self.queue = queue.Queue()\n\n # The dispatcher thread will stop if this event is set\n self._stop_event = threading.Event()\n self.main_context = epics.ca.current_context()\n self.callback_logger = callback_logger\n\n self._all_contexts = bool(all_contexts)\n self._timeout = timeout\n\n self.start()\n\n def run(self):\n '''The dispatcher itself'''\n self._setup_pyepics(True)\n\n while not self._stop_event.is_set():\n try:\n callback, args, kwargs = self.queue.get(True, self._timeout)\n except queue.Empty:\n pass\n else:\n try:\n callback(*args, **kwargs)\n except Exception as ex:\n if self.callback_logger is not None:\n self.callback_logger.error(ex, exc_info=ex)\n\n self._setup_pyepics(False)\n epics.ca.detach_context()\n\n def stop(self):\n '''Stop the dispatcher thread and re-enable normal callbacks'''\n self._stop_event.set()\n\n def _setup_pyepics(self, enable):\n # Re-route monitor events to our new handler\n if enable:\n fcn = self._monitor_event\n else:\n fcn = epics.ca._onMonitorEvent\n\n epics.ca._CB_EVENT = ctypes.CFUNCTYPE(None, epics.dbr.event_handler_args)(fcn)\n\n def _monitor_event(self, args):\n if self._all_contexts or self.main_context == epics.ca.current_context():\n if callable(args.usr):\n if not hasattr(args.usr, '_disp_tag') or args.usr._disp_tag is not self:\n args.usr = lambda orig_cb=args.usr, **kwargs: \\\n self.queue.put((orig_cb, [], kwargs))\n args.usr._disp_tag = self\n\n return epics.ca._onMonitorEvent(args)\n\n\ndef waveform_to_string(value, type_=str, delim=''):\n '''Convert a waveform that represents a string into an actual Python string\n\n Parameters\n ----------\n value\n The value to convert\n type_ : type, optional\n Python type to convert to\n delim : str, optional\n delimiter to use when joining string\n '''\n try:\n value = delim.join(chr(c) for c in value)\n except TypeError:\n value = type_(value)\n\n try:\n value = value[:value.index('\\0')]\n except (IndexError, ValueError):\n pass\n\n return value\n\n\n@cached_retval\ndef get_pv_form():\n '''Get the PV form that should be used for pyepics\n\n Due to a bug in certain versions of PyEpics, form='time' cannot be used\n with some large arrays.\n\n native: gives time.time() timestamps from this machine\n time: gives timestamps from the PVs themselves\n\n Returns\n -------\n {'native', 'time'}\n '''\n\n def _naive_parse_version(version):\n try:\n version = version.lower()\n\n # Strip off the release-candidate version number (best-effort)\n if 'rc' in version:\n version = version[:version.index('rc')]\n\n version_tuple = tuple(int(v) for v in version.split('.'))\n except:\n return None\n\n return version_tuple\n\n try:\n from pkg_resources import parse_version\n except ImportError:\n parse_version = _naive_parse_version\n\n version = parse_version(epics.__version__)\n\n if version is None:\n warnings.warn('Unrecognized PyEpics version; using local timestamps',\n ImportWarning)\n return 'native'\n\n elif version <= parse_version('3.2.3'):\n warnings.warn('PyEpics versions <= 3.2.3 will use local timestamps (version: %s)' %\n epics.__version__,\n ImportWarning)\n return 'native'\n else:\n return 'time'\n\n\ndef records_from_db(fn):\n '''Naively parse db/template files looking for record names\n\n Returns\n -------\n records : list\n [(record type, record name), ...]\n '''\n\n ret = []\n for line in open(fn, 'rt').readlines():\n line = line.strip()\n\n if line.startswith('#'):\n continue\n\n if not (line.startswith('record') or line.startswith('grecord')):\n continue\n\n if '(' not in line:\n continue\n\n line = line[line.index('(') + 1:]\n if ',' not in line:\n continue\n\n rtype, record = line.split(',', 1)\n rtype = rtype.strip()\n record = record.strip()\n\n if record.startswith('\"'):\n # Surrounded by quotes, easy to parse\n record = record[1:]\n record = record[:record.index('\"')]\n else:\n # No quotes, and macros may contain parentheses\n # Find the first non-matching parenthesis and\n # that should denote the end of the record name\n #\n # $(P)$(R)Record)\n # ^\n\n in_paren = 0\n for i, c in enumerate(record):\n if c == '(':\n in_paren += 1\n elif c == ')':\n in_paren -= 1\n\n if in_paren < 0:\n record = record[:i]\n break\n\n ret.append((rtype, record))\n\n return ret\n","sub_path":"ophyd/utils/epics_pvs.py","file_name":"epics_pvs.py","file_ext":"py","file_size_in_byte":8927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"456611587","text":"from django.db import models\nfrom core import models as core_models\nfrom users.models import User\n\n\nclass Weapon(models.Model):\n\n \"\"\" Weapon model definition\"\"\"\n\n SCISSORS = 0\n ROCK = 1\n PAPER = 2\n NAME_CHOICES = (\n (SCISSORS, \"가위\"),\n (ROCK, \"바위\"),\n (PAPER, \"보\"),\n )\n\n name = models.IntegerField(choices=NAME_CHOICES)\n\n def __str__(self):\n if self.name == 0:\n return \"가위\"\n elif self.name == 1:\n return \"바위\"\n else:\n return \"보\"\n\n\nclass Challenge(core_models.TimeStampedModel):\n\n \"\"\"challenge model definition\"\"\"\n\n attacker = models.ForeignKey(\n User, related_name=\"attacker\", on_delete=models.SET_NULL, null=True\n )\n defender = models.ForeignKey(\n User, related_name=\"defender\", on_delete=models.SET_NULL, null=True\n )\n attacker_choice = models.ForeignKey(\n Weapon, related_name=\"attacker_choice\", on_delete=models.SET_NULL, null=True\n )\n defender_choice = models.ForeignKey(\n Weapon,\n related_name=\"defender_choice\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n )\n winner = models.ForeignKey(\n User, related_name=\"winner\", on_delete=models.SET_NULL, null=True, blank=True\n )\n\n def __str__(self):\n return f\"{self.id} - {self.attacker} vs {self.defender}\"\n\n def get_winner(self):\n if self.attacker_choice and self.defender_choice:\n delta = self.attacker_choice.name - self.defender_choice.name\n if delta == 1 or delta == -2:\n return self.attacker\n elif delta == 0:\n return \"무승부\"\n else:\n return self.defender\n else:\n return \"경기중\"\n\n get_winner.short_description = \"winner\"\n","sub_path":"games/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"140632329","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom transformers import AlbertTokenizer, BertTokenizer\n\n\ndef print_from_tokenizer(title, path, sentences, tokenizer_class):\n print('Loading %s tokenizer from %s...' % (title, path))\n tokenizer = tokenizer_class.from_pretrained(path, do_lower_case=False)\n for sentence in sentences:\n print(tokenizer.tokenize(sentence))\n\n\nsentence1 = 'Qual time da NFL representou a AFC no Super Bowl 50?'\nsentence2 = 'A quem a Virgem Maria supostamente apareceu em 1858 em Lourdes, França?'\nsentences = [sentence1, sentence2]\n\nprint_from_tokenizer(title='Bert', path='bert-base-multilingual-cased', tokenizer_class=BertTokenizer,\n sentences=sentences)\n# print_from_tokenizer(title='Albert', path='/media/discoD/models/sentencepiece/model_unigram_32k',\n# tokenizer_class=AlbertTokenizer, sentences=sentences)\n# print_from_tokenizer(title='Albert', path='/media/discoD/models/sentencepiece/model_bpe_32k',\n# tokenizer_class=AlbertTokenizer, sentences=sentences)\n# print_from_tokenizer(title='Albert', path='/media/discoD/models/sentencepiece/brwac_wiki_eduardo',\n# tokenizer_class=AlbertTokenizer, sentences=sentences)\n# print_from_tokenizer(title='Albert', path='/media/discoD/models/sentencepiece/model_guillou_15k',\n# tokenizer_class=AlbertTokenizer, sentences=sentences)\n# print_from_tokenizer(title='Albert', path='/media/discoD/models/sentencepiece/model_unigram_30k',\n# tokenizer_class=AlbertTokenizer, sentences=sentences)\nprint_from_tokenizer(title='Albert', path='/media/discoD/models/sentencepiece/model_unigram_uncased_30k',\n tokenizer_class=AlbertTokenizer, sentences=sentences)\nprint_from_tokenizer(title='Albert', path='/media/discoD/models/sentencepiece/model_albert_base_en',\n tokenizer_class=AlbertTokenizer, sentences=[\n 'Yucaipa owned Dominick\\'s before selling the chain to Safeway in 1998 for $2.5 billion.',\n 'Yucaipa bought Dominick\\'s in 1995 for $693 million and sold it to Safeway for $1.8 billion in 1998.'])\n","sub_path":"sentencepiece_test.py","file_name":"sentencepiece_test.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"143225473","text":"import pandas as pd\nfrom optimalflow.autoPipe import autoPipe\nfrom optimalflow.funcPP import PPtools\nfrom optimalflow.autoPP import dynaPreprocessing\nfrom optimalflow.autoFS import dynaFS_clf,dynaFS_reg\nfrom optimalflow.autoCV import evaluate_model,dynaClassifier,dynaRegressor,fastClassifier,fastRegressor\nfrom optimalflow.utilis_func import pipeline_splitting_rule,reset_parameters,update_parameters\n\nimport json\nimport os\n\n###------------------------ Estimators Searching Space Settings --------------------------------------\njson_path_s = os.path.join(os.path.dirname(\"./\"), 'settings.json')\nwith open(json_path_s, encoding='utf-8') as data_file:\n para_data = json.load(data_file)\ndata_file.close()\n\nreset_flag = para_data['confirm_reset']\n\ncustom_space = {\n \"cls_mlp\":para_data['space_set']['cls']['mlp'],\n \"cls_lr\":para_data['space_set']['cls']['lgr'],\n \"cls_svm\":para_data['space_set']['cls']['svm'],\n \"cls_ada\":para_data['space_set']['cls']['ada'],\n \"cls_xgb\":para_data['space_set']['cls']['xgb'],\n \"cls_rgcv\":para_data['space_set']['cls']['rgcv'],\n \"cls_rf\":para_data['space_set']['cls']['rf'],\n \"cls_gb\":para_data['space_set']['cls']['gb'],\n \"cls_lsvc\":para_data['space_set']['cls']['lsvc'],\n \"cls_hgboost\":para_data['space_set']['cls']['hgboost'],\n \"cls_sgd\":para_data['space_set']['cls']['sgd'],\n \"reg_lr\":para_data['space_set']['reg']['lr'],\n \"reg_svm\":para_data['space_set']['reg']['svm'],\n \"reg_mlp\":para_data['space_set']['reg']['mlp'],\n \"reg_ada\":para_data['space_set']['reg']['ada'],\n \"reg_rf\":para_data['space_set']['reg']['rf'],\n \"reg_gb\":para_data['space_set']['reg']['gb'],\n \"reg_xgb\":para_data['space_set']['reg']['xgb'],\n \"reg_tree\":para_data['space_set']['reg']['tree'],\n \"reg_hgboost\":para_data['space_set']['reg']['hgboost'],\n \"reg_rgcv\":para_data['space_set']['reg']['rgcv'],\n \"reg_cvlasso\":para_data['space_set']['reg']['cvlasso'],\n \"reg_huber\":para_data['space_set']['reg']['huber'],\n \"reg_sgd\":para_data['space_set']['reg']['sgd'],\n \"reg_knn\":para_data['space_set']['reg']['knn']\n}\n\n\ntry:\n if(reset_flag == \"reset_default\"):\n reset_parameters()\n if(reset_flag == \"reset_settings\"):\n json_s = os.path.join(os.path.dirname(\"./\"), 'reset_settings.json')\n with open(json_s,'r') as d_file:\n para = json.load(d_file)\n json_s = os.path.join(os.path.dirname(\"./\"), 'settings.json')\n w_file = open(json_s, \"w\",encoding='utf-8')\n w_file. truncate(0)\n json.dump(para, w_file)\n w_file.close()\n if(reset_flag == \"no_confirm\"): \n reset_parameters()\n for i in custom_space.keys():\n if custom_space[i]!={}:\n model_type, algo_name=i.split('_')\n update_parameters(mode = model_type,estimator_name=algo_name,**custom_space[i])\nexcept:\n print(\"Failed to Set Up the Searching Space, will Use the Default Settings!\")\n\n\n###------------------------ Settings PCTE Parameters--------------------------------------\n\njson_path = os.path.join(os.path.dirname(\"./\"), 'webapp.json')\nwith open(json_path, encoding='utf-8') as data_file:\n para_data = json.load(data_file)\ndata_file.close()\n\n# Custom settings for the autoPP module\ncustom_pp = {}\ncustom_pp['encode_band'] = [int(para_data['autoPP']['encode_band'])]\ncustom_pp['scaler'] = para_data['autoPP']['scaler']\ncustom_pp['low_encode'] = para_data['autoPP']['low_encode']\ncustom_pp['high_encode'] = para_data['autoPP']['high_encode']\nwinsor_list = []\nfor i in para_data['autoPP']['winsorizer']:\n winsor_list.append((float(i),float(i)))\ncustom_pp['winsorizer'] = winsor_list\ncustom_pp['sparsity'] = [float(para_data['autoPP']['sparsity'])]\ncustom_pp['cols'] = [int(para_data['autoPP']['cols'])]\n\n# Custom settings for the autoFS module\ncustom_fs = {\n \"feature_num\":int(para_data['autoFS']['feature_num']),\n \"model_type_fs\":para_data['autoFS']['model_type_fs'],\n \"algo_fs\":para_data['autoFS']['algo_fs']\n}\n\n# Custom settings for the autoCV module\ncustom_cv = {\n \"model_type_cv\":para_data['autoCV']['model_type_cv'],\n \"method_cv\":para_data['autoCV']['method_cv'],\n \"algo_cv\":para_data['autoCV']['algo_cv']\n}\n\n\n# Custom settings for input dataset\ncustom_input = {\n \"filename\": para_data['filename'],\n \"label_col\": para_data['label_col']\n}\n\n###------------------------ Run a PCTE Workflow--------------------------------------\ndf = pd.read_csv('./input/' + custom_input['filename'])\n\nif custom_fs['model_type_fs'] == \"cls\" and custom_cv['model_type_cv'] == \"cls\":\n if custom_cv['method_cv'] == 'fastClassifier':\n # Create Pipeline Cluster Traversal Experiments by autoPipe\n pipe = autoPipe(\n [(\"autoPP\",dynaPreprocessing(custom_parameters = custom_pp, label_col = custom_input['label_col'], model_type = \"cls\")),\n (\"datasets_splitting\",pipeline_splitting_rule(val_size = 0.2, test_size = 0.2, random_state = 13)),\n (\"autoFS\",dynaFS_clf(custom_selectors = custom_fs['algo_fs'],fs_num = custom_fs['feature_num'], random_state=13, cv = 5, in_pipeline = True, input_from_file = False)),\n (\"autoCV\",fastClassifier(custom_estimators = custom_cv['algo_cv'],random_state = 13,cv_num = 5,in_pipeline = True, input_from_file = False)),\n (\"model_evaluate\",evaluate_model(model_type = \"cls\"))])\n elif custom_cv['method_cv'] == 'dynaClassifier':\n # Create Pipeline Cluster Traversal Experiments by autoPipe\n pipe = autoPipe(\n [(\"autoPP\",dynaPreprocessing(custom_parameters = custom_pp, label_col = custom_input['label_col'], model_type = \"cls\")),\n (\"datasets_splitting\",pipeline_splitting_rule(val_size = 0.2, test_size = 0.2, random_state = 13)),\n (\"autoFS\",dynaFS_clf(custom_selectors = custom_fs['algo_fs'],fs_num = custom_fs['feature_num'], random_state=13, cv = 5, in_pipeline = True, input_from_file = False)),\n (\"autoCV\",dynaClassifier(custom_estimators = custom_cv['algo_cv'],random_state = 13,cv_num = 5,in_pipeline = True, input_from_file = False)),\n (\"model_evaluate\",evaluate_model(model_type = \"cls\"))])\nelif custom_fs['model_type_fs'] == \"reg\" and custom_cv['model_type_cv'] == \"reg\":\n if custom_cv['method_cv'] == 'fastRegressor':\n # Create Pipeline Cluster Traversal Experiments by autoPipe\n pipe = autoPipe(\n [(\"autoPP\",dynaPreprocessing(custom_parameters = custom_pp, label_col = custom_input['label_col'], model_type = \"reg\")),\n (\"datasets_splitting\",pipeline_splitting_rule(val_size = 0.2, test_size = 0.2, random_state = 13)),\n (\"autoFS\",dynaFS_reg(custom_selectors = custom_fs['algo_fs'],fs_num = custom_fs['feature_num'], random_state=13, cv = 5, in_pipeline = True, input_from_file = False)),\n (\"autoCV\",fastRegressor(custom_estimators = custom_cv['algo_cv'],random_state = 13,cv_num = 5,in_pipeline = True, input_from_file = False)),\n (\"model_evaluate\",evaluate_model(model_type = \"reg\"))])\n elif custom_cv['method_cv'] == 'dynaRegressor':\n # Create Pipeline Cluster Traversal Experiments by autoPipe\n pipe = autoPipe(\n [(\"autoPP\",dynaPreprocessing(custom_parameters = custom_pp, label_col = custom_input['label_col'], model_type = \"reg\")),\n (\"datasets_splitting\",pipeline_splitting_rule(val_size = 0.2, test_size = 0.2, random_state = 13)),\n (\"autoFS\",dynaFS_reg(custom_selectors = custom_fs['algo_fs'],fs_num = custom_fs['feature_num'], random_state=13, cv = 5, in_pipeline = True, input_from_file = False)),\n (\"autoCV\",dynaRegressor(custom_estimators = custom_cv['algo_cv'],random_state = 13,cv_num = 5,in_pipeline = True, input_from_file = False)),\n (\"model_evaluate\",evaluate_model(model_type = \"reg\"))])\n\nDICT_PREPROCESSING,DICT_FEATURE_SELECTION,DICT_MODELS_EVALUATION,DICT_DATA,dyna_report= pipe.fit(df)\n\n###------------------------ Save Outputs --------------------------------------\nimport pickle\ndef save_obj(obj, name ):\n with open(name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\ndef load_obj(name ):\n with open(name + '.pkl', 'rb') as f:\n return pickle.load(f)\n# Save the outputs as pickles for further analysis and visualization\nsave_obj(DICT_PREPROCESSING,\"dict_preprocess\")\nsave_obj(DICT_DATA,\"dict_data\")\nsave_obj(DICT_MODELS_EVALUATION,\"dict_models_evaluate\")\nsave_obj(dyna_report,\"dyna_report\")\n\n###------------------------ Load PCTE Outputs --------------------------------------\n# Load the outputs from picles\nDICT_PREP = load_obj(\"dict_preprocess\")\ndyna_report = load_obj(\"dyna_report\")\nDICT_DATA = load_obj(\"dict_data\")\n\n###------------------------ PCTE Outputs Visualization --------------------------------------\nimport shutil\ndef move(src, dest):\n shutil.move(src, dest)\n\ntry:\n from optimalflow.autoViz import autoViz\n viz = autoViz(preprocess_dict=DICT_PREP,report=dyna_report)\n viz.clf_model_retrieval(metrics='accuracy')\n move('./Pipeline Cluster Retrieval Diagram.html','./templates/diagram.html')\n viz = autoViz(report = dyna_report)\n viz.clf_table_report()\n move('./Pipeline Cluster Model Evaluation Report.html','./templates/report.html')\nexcept:\n try:\n viz = autoViz(report = dyna_report)\n viz.reg_table_report()\n move('./static/img/no-cls-output.html','./templates/diagram.html')\n move('./Pipeline Cluster Model Evaluation Report.html','./template/report.html')\n except:\n print('No Visualization Outputs found!')\n\nprint(\"PCTE Workflow's Done. More details of results are in LogsViewer & Visualization Page. Thanks for using OptimalFlow!! --Tony Dong\")\n\ninput(\"\\n\\nPress the enter key to exit.\")","sub_path":"optimalflow/webapp/webapp_script.py","file_name":"webapp_script.py","file_ext":"py","file_size_in_byte":9763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"259701647","text":"import os\nimport subprocess\nimport sys\n# set path tool\nthreads = \"20\"\nMAX_FRAG_LEN = \"2000\"\nindexname = \"GRCm38\"\n# the multimapping flag\nmultimap = \"4\"\n# folder with Mus musculus (house mouse) genome assembly GRCm38 (mm10)\nGRCm38indexpath = \"/home/spuccio/AnnotationBowtie2/Mus_musculus/GRCm38.p6\"\n# bowtie2-build path\nbowtiebuild2path = \"/home/spuccio/miniconda3/envs/chipseq_env/bin/bowtie2-build\"\n# bowtie2 path\nbowtie2path = \"/home/spuccio/miniconda3/envs/chipseq_env/bin/bowtie2\"\n# path folder\nprojectdir = \"/mnt/datadisk2/spuccio/SP011_Integration_ChipSeqGSE98264_RnaSeqSP010/\"\n# path out file sam\nmappingout = \"\".join([projectdir, \"bowtie2_mapping_GSE98264/\"])\n# input file path\nraw_data_dir = \"\".join([projectdir, \"raw_data_GSE98264/\"])\n# Path\nGRCm38fasta = \"\".join([GRCm38indexpath, \"/GRCm38.primary_assembly.fa\"])\nraw_fastq = {\"Treg_Irf4_r1\": [\"SRR5483021_1.fastq\", \"SRR5483021_2.fastq\"],\n \"Treg_Irf4_r2\": [\"SRR5483022_1.fastq\", \"SRR5483022_2.fastq\"]}\nraw_fastq2 = {\"Treg_Irf4_r3\": \"SRR5483020_1.fastq\"}\n\n\ndef createdir(dirpath):\n \"\"\"\n Make dir function and check if directory is already exists\n :param dirpath: string with path and directory name\n :return:\n \"\"\"\n if not os.path.exists(dirpath):\n os.mkdir(dirpath)\n print(\" \".join([\"Directory\", dirpath.split(\"/\")[-1], \"Created\"]))\n else:\n print(\" \".join([\"Directory\", dirpath.split(\"/\")[-1], \"already exists\"]))\n\n\ndef checkindex(indexpath, fastafile, indexname):\n \"\"\"\n Check if the genome index already exist and in case create a new one\n :param indexpath: Output folder\n :param fastafile: genome fasta file\n :param indexname: index name\n :return:\n \"\"\"\n for i in range(1,5):\n if os.path.isfile(\"\".join([indexpath, \"/\", indexname, \".\", str(i), \".bt2\"])) == True:\n print(\"Genome index %s.%d.bt2 already exists.\" % (indexname,i))\n else:\n try:\n os.chdir(GRCm38indexpath)\n subprocess.check_call(\" \".join([bowtiebuild2path, \"--threads\", threads, fastafile, indexname]),\n shell=True)\n except subprocess.CalledProcessError:\n print(\"ERROR.Fastqc analysis failed. Stop execution.\")\n sys.exit(1)\n else:\n print(\"Index %d OK\" % i)\n return indexname\n\n\ndef bowtie2mappingpairedend(indexname, fastqname, pathoutput,samname):\n \"\"\"\n Bowtie2 mapping Paired-end mode\n :param indexname: path with index name\n :param fastqname: fastq file\n :param samname: output SAM name\n :param pathoutput: path folder output\n :return:\n \"\"\"\n try:\n subprocess.check_call(\" \".join([bowtie2path, \"-p\", threads, \"-q\", \"--local\", \"-k\", multimap,\n \"-x\", indexname, \"-X\", MAX_FRAG_LEN,\n \"-1\", \"\".join([raw_data_dir, fastqname[0]]),\n \"-2\", \"\".join([raw_data_dir, fastqname[1]]),\n \"-S\", \"\".join([pathoutput, samname,\".sam\"])]),\n shell=True)\n except subprocess.CalledProcessError:\n print(\"ERROR.Mapping of %s with bowtie2 failed. Stop execution.\" % fastqname)\n sys.exit(1)\n else:\n print(\"Mapping of %s with bowtie2 complete.\" % fastqname)\n\n\ndef bowtie2mappingsinglend(indexname, fastqname, pathoutput,samname):\n \"\"\"\n Bowtie2 mapping Single-end mode\n :param indexname: path with index name\n :param fastqname: fastq file\n :param samname: output SAM name\n :param pathoutput: path folder output\n :return:\n \"\"\"\n try:\n subprocess.check_call(\" \".join([bowtie2path, \"-p\", threads, \"-q\", \"--local\", \"-k\", multimap,\n \"-x\", indexname,\n \"\".join([raw_data_dir, fastqname]),\n \"-S\", \"\".join([pathoutput, samname,\".sam\"])]),\n shell=True)\n except subprocess.CalledProcessError:\n print(\"ERROR.Mapping of %s with bowtie2 failed. Stop execution.\" % fastqname)\n sys.exit(1)\n else:\n print(\"Mapping of %s with bowtie2 complete.\" % fastqname)\n\n\nif __name__ == \"__main__\":\n index = checkindex(GRCm38indexpath, GRCm38fasta, indexname)\n createdir(mappingout)\n os.chdir(raw_data_dir)\n for key, value in raw_fastq2.items():\n bowtie2mappingsinglend(\"\".join([GRCm38indexpath, \"/\", index]), value, mappingout, key)\n for key, value in raw_fastq.items():\n bowtie2mappingpairedend(\"\".join([GRCm38indexpath, \"/\", index]), value, mappingout, key)\n","sub_path":"Figure3/Mapping_Bowtie2_ChipIrf4.py","file_name":"Mapping_Bowtie2_ChipIrf4.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"285724362","text":"import time\nimport pandas as pd\nimport numpy as np\n# add a new comment in master branch\n# first change\n# second change\n# first change\n# second change\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n \n city= str(input('Hi witch city do you want to explore !! chicago ,new york city or washington ? ').lower())\n while city not in ['chicago','new york city','washington']:\n print (\"Your choice is invalid. \")\n city=str(input(\"Please choose one of the following chicago, new york city or washington :\").lower())\n \n \n # TO DO: get user input for month (all, january, february, ... , june)\n month= str(input('choose a month (all,january,february, ... , june) :').lower())\n while month not in ['all','january','february','march','april','may', 'june']:\n print (\"Your choice is invalid. \")\n month=str(input(\"Please choose one of the following months (all, january, february, ... , june): \").lower())\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n day= str(input('choose a day (all, monday, tuesday, ... , sunday)').lower())\n while day not in ['all', 'monday', 'tuesday', 'wednesday', 'friday', 'saturday', 'sunday']:\n print('your choice is invalid. ')\n day = str(input('Please choose one of the following days (all, monday, tuesday, wednesday, friday, saturday, sunday) : ').lower())\n\n\n print('-'*40)\n return city, month, day\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n \n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n return df\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n common_month = df['month'].mode()[0]\n print('Most common Month:', common_month)\n\n\n # TO DO: display the most common day of week\n common_day = df['day_of_week'].mode()[0]\n print('Most common day of week is: ',common_day)\n # TO DO: display the most common start hour\n df['hour'] = df['Start Time'].dt.time\n common_start_hour = df['hour'].mode()[0]\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n common_Start_Station= df['Start Station'].mode()[0]\n print('Most common Start Station is : ',common_Start_Station)\n # TO DO: display most commonly used end station\n common_End_Station= df['End Station'].mode()[0]\n print('Most common End Station is : ',common_End_Station)\n\n # TO DO: display most frequent combination of start station and end station trip\n common_combi_station = df[['Start Station', 'End Station']].mode().loc[0]\n print(\"The most commonly used start station and end station : {}, {}\"\\\n .format(common_combi_station[0], common_combi_station[1]))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_travel = df['Trip Duration'].sum()\n print(\"Total travel time :\", total_travel, 'seconds')\n # TO DO: display mean travel time\n mean_travel = df['Trip Duration'].mean()\n print(\"Mean travel time :\", mean_travel,'seconds')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_counts = df['User Type'].value_counts()\n print(\"Counts of user types:\\n\",user_counts)\n # TO DO: Display counts of gender\n if CITY_DATA in ['chicago','new york city']:\n gender_counts = df['Gender'].value_counts()\n print(\"Counts of gender:\\n\",gender_counts)\n else:\n print('Gender: Oh :( there is no Gender Data to display ')\n # TO DO: Display earliest, most recent, and most common year of birth\n if CITY_DATA in ['chicago','new york city']:\n earliest_by=df['Birth Year'].max()\n most_recent_by=df['Birth Year'].min()\n most_common_by = df['Birth Year'].mode()[0]\n print('earliest Birth Year is {} \\n, most recent Birth Year is {} \\n, and most common year of birth is {} \\n '\\\n .format (earliest_by,most_recent_by,most_common_by))\n else:\n print('Birth Year: Oh :( there is no Birth Year Data to display ')\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df) \n n=5\n raw_data= input('would you like to display raw data? enter yes or stop :').lower()\n while raw_data == 'yes':\n print(df.head(n))\n n+=5\n raw_data= input('would you like to display more raw data? enter yes or stop :').lower()\n if raw_data == 'yes':\n print(df.head(n))\n n+=5\n elif raw_data == 'stop':\n break\n \n \n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n \n \n \nif __name__ == \"__main__\":\n\tmain()","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":6945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"216377466","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom decimal import Decimal\nimport django.utils.timezone\nimport jsonfield.fields\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='BitcoinReceiver',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('stripe_id', models.CharField(unique=True, max_length=255)),\n ('created_at', models.DateTimeField(default=django.utils.timezone.now)),\n ('active', models.BooleanField(default=False)),\n ('amount', models.DecimalField(decimal_places=2, max_digits=9)),\n ('amount_received', models.DecimalField(decimal_places=2, max_digits=9, default=Decimal('0'))),\n ('bitcoin_amount', models.PositiveIntegerField()),\n ('bitcoin_amount_received', models.PositiveIntegerField(default=0)),\n ('bitcoin_uri', models.TextField(blank=True)),\n ('currency', models.CharField(max_length=10, default='usd')),\n ('description', models.TextField(blank=True)),\n ('email', models.TextField(blank=True)),\n ('filled', models.BooleanField(default=False)),\n ('inbound_address', models.TextField(blank=True)),\n ('payment', models.TextField(blank=True)),\n ('refund_address', models.TextField(blank=True)),\n ('uncaptured_funds', models.BooleanField(default=False)),\n ('used_for_payment', models.BooleanField(default=False)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Card',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('stripe_id', models.CharField(unique=True, max_length=255)),\n ('created_at', models.DateTimeField(default=django.utils.timezone.now)),\n ('name', models.TextField(blank=True)),\n ('address_line_1', models.TextField(blank=True)),\n ('address_line_1_check', models.CharField(max_length=15)),\n ('address_line_2', models.TextField(blank=True)),\n ('address_city', models.TextField(blank=True)),\n ('address_state', models.TextField(blank=True)),\n ('address_country', models.TextField(blank=True)),\n ('address_zip', models.TextField(blank=True)),\n ('address_zip_check', models.CharField(max_length=15)),\n ('brand', models.TextField(blank=True)),\n ('country', models.CharField(max_length=2)),\n ('cvc_check', models.CharField(max_length=15)),\n ('dynamic_last4', models.CharField(blank=True, max_length=4)),\n ('tokenization_method', models.CharField(blank=True, max_length=15)),\n ('exp_month', models.IntegerField()),\n ('exp_year', models.IntegerField()),\n ('funding', models.CharField(max_length=15)),\n ('last4', models.CharField(blank=True, max_length=4)),\n ('fingerprint', models.TextField()),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Charge',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('stripe_id', models.CharField(unique=True, max_length=255)),\n ('created_at', models.DateTimeField(default=django.utils.timezone.now)),\n ('source', models.CharField(max_length=100)),\n ('currency', models.CharField(max_length=10, default='usd')),\n ('amount', models.DecimalField(null=True, decimal_places=2, max_digits=9)),\n ('amount_refunded', models.DecimalField(null=True, decimal_places=2, max_digits=9)),\n ('description', models.TextField(blank=True)),\n ('paid', models.NullBooleanField()),\n ('disputed', models.NullBooleanField()),\n ('refunded', models.NullBooleanField()),\n ('captured', models.NullBooleanField()),\n ('receipt_sent', models.BooleanField(default=False)),\n ('charge_created', models.DateTimeField(null=True, blank=True)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Customer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('stripe_id', models.CharField(unique=True, max_length=255)),\n ('created_at', models.DateTimeField(default=django.utils.timezone.now)),\n ('account_balance', models.DecimalField(null=True, decimal_places=2, max_digits=9)),\n ('currency', models.CharField(blank=True, max_length=10, default='usd')),\n ('delinquent', models.BooleanField(default=False)),\n ('default_source', models.TextField(blank=True)),\n ('date_purged', models.DateTimeField(null=True, editable=False)),\n ('user', models.OneToOneField(null=True, to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Event',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('stripe_id', models.CharField(unique=True, max_length=255)),\n ('created_at', models.DateTimeField(default=django.utils.timezone.now)),\n ('kind', models.CharField(max_length=250)),\n ('livemode', models.BooleanField(default=False)),\n ('webhook_message', jsonfield.fields.JSONField()),\n ('validated_message', jsonfield.fields.JSONField(null=True)),\n ('valid', models.NullBooleanField()),\n ('processed', models.BooleanField(default=False)),\n ('request', models.CharField(blank=True, max_length=100)),\n ('pending_webhooks', models.PositiveIntegerField(default=0)),\n ('api_version', models.CharField(blank=True, max_length=100)),\n ('customer', models.ForeignKey(null=True, to='pinax_stripe.Customer', on_delete=models.CASCADE)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='EventProcessingException',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('data', models.TextField()),\n ('message', models.CharField(max_length=500)),\n ('traceback', models.TextField()),\n ('created_at', models.DateTimeField(default=django.utils.timezone.now)),\n ('event', models.ForeignKey(null=True, to='pinax_stripe.Event', on_delete=models.CASCADE)),\n ],\n ),\n migrations.CreateModel(\n name='Invoice',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('stripe_id', models.CharField(unique=True, max_length=255)),\n ('created_at', models.DateTimeField(default=django.utils.timezone.now)),\n ('amount_due', models.DecimalField(decimal_places=2, max_digits=9)),\n ('attempted', models.NullBooleanField()),\n ('attempt_count', models.PositiveIntegerField(null=True)),\n ('statement_descriptor', models.TextField(blank=True)),\n ('currency', models.CharField(max_length=10, default='usd')),\n ('closed', models.BooleanField(default=False)),\n ('description', models.TextField(blank=True)),\n ('paid', models.BooleanField(default=False)),\n ('receipt_number', models.TextField(blank=True)),\n ('period_end', models.DateTimeField()),\n ('period_start', models.DateTimeField()),\n ('subtotal', models.DecimalField(decimal_places=2, max_digits=9)),\n ('total', models.DecimalField(decimal_places=2, max_digits=9)),\n ('date', models.DateTimeField()),\n ('webhooks_delivered_at', models.DateTimeField(null=True)),\n ('charge', models.ForeignKey(null=True, related_name='invoices', to='pinax_stripe.Charge', on_delete=models.CASCADE)),\n ('customer', models.ForeignKey(related_name='invoices', to='pinax_stripe.Customer', on_delete=models.CASCADE)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='InvoiceItem',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('stripe_id', models.CharField(max_length=255)),\n ('created_at', models.DateTimeField(default=django.utils.timezone.now)),\n ('amount', models.DecimalField(decimal_places=2, max_digits=9)),\n ('currency', models.CharField(max_length=10, default='usd')),\n ('kind', models.CharField(blank=True, max_length=25)),\n ('period_start', models.DateTimeField()),\n ('period_end', models.DateTimeField()),\n ('proration', models.BooleanField(default=False)),\n ('line_type', models.CharField(max_length=50)),\n ('description', models.CharField(blank=True, max_length=200)),\n ('quantity', models.IntegerField(null=True)),\n ('invoice', models.ForeignKey(related_name='items', to='pinax_stripe.Invoice', on_delete=models.CASCADE)),\n ],\n ),\n migrations.CreateModel(\n name='Plan',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('stripe_id', models.CharField(unique=True, max_length=255)),\n ('created_at', models.DateTimeField(default=django.utils.timezone.now)),\n ('amount', models.DecimalField(decimal_places=2, max_digits=9)),\n ('currency', models.CharField(max_length=15)),\n ('interval', models.CharField(max_length=15)),\n ('interval_count', models.IntegerField()),\n ('name', models.CharField(max_length=150)),\n ('statement_descriptor', models.TextField(blank=True)),\n ('trial_period_days', models.IntegerField(null=True)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Subscription',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('stripe_id', models.CharField(unique=True, max_length=255)),\n ('created_at', models.DateTimeField(default=django.utils.timezone.now)),\n ('application_fee_percent', models.DecimalField(null=True, decimal_places=2, max_digits=3, default=None)),\n ('cancel_at_period_end', models.BooleanField(default=False)),\n ('canceled_at', models.DateTimeField(null=True, blank=True)),\n ('current_period_end', models.DateTimeField(null=True, blank=True)),\n ('current_period_start', models.DateTimeField(null=True, blank=True)),\n ('ended_at', models.DateTimeField(null=True, blank=True)),\n ('quantity', models.IntegerField()),\n ('start', models.DateTimeField()),\n ('status', models.CharField(max_length=25)),\n ('trial_end', models.DateTimeField(null=True, blank=True)),\n ('trial_start', models.DateTimeField(null=True, blank=True)),\n ('customer', models.ForeignKey(to='pinax_stripe.Customer', on_delete=models.CASCADE)),\n ('plan', models.ForeignKey(to='pinax_stripe.Plan', on_delete=models.CASCADE)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Transfer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('stripe_id', models.CharField(unique=True, max_length=255)),\n ('created_at', models.DateTimeField(default=django.utils.timezone.now)),\n ('amount', models.DecimalField(decimal_places=2, max_digits=9)),\n ('currency', models.CharField(max_length=25, default='usd')),\n ('status', models.CharField(max_length=25)),\n ('date', models.DateTimeField()),\n ('description', models.TextField(null=True, blank=True)),\n ('event', models.ForeignKey(related_name='transfers', to='pinax_stripe.Event', on_delete=models.CASCADE)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='TransferChargeFee',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('amount', models.DecimalField(decimal_places=2, max_digits=9)),\n ('currency', models.CharField(max_length=10, default='usd')),\n ('application', models.TextField(null=True, blank=True)),\n ('description', models.TextField(null=True, blank=True)),\n ('kind', models.CharField(max_length=150)),\n ('created_at', models.DateTimeField(default=django.utils.timezone.now)),\n ('transfer', models.ForeignKey(related_name='charge_fee_details', to='pinax_stripe.Transfer', on_delete=models.CASCADE)),\n ],\n ),\n migrations.AddField(\n model_name='invoiceitem',\n name='plan',\n field=models.ForeignKey(null=True, to='pinax_stripe.Plan', on_delete=models.CASCADE),\n ),\n migrations.AddField(\n model_name='invoiceitem',\n name='subscription',\n field=models.ForeignKey(null=True, to='pinax_stripe.Subscription', on_delete=models.CASCADE),\n ),\n migrations.AddField(\n model_name='invoice',\n name='subscription',\n field=models.ForeignKey(null=True, to='pinax_stripe.Subscription', on_delete=models.CASCADE),\n ),\n migrations.AddField(\n model_name='charge',\n name='customer',\n field=models.ForeignKey(related_name='charges', to='pinax_stripe.Customer', on_delete=models.CASCADE),\n ),\n migrations.AddField(\n model_name='charge',\n name='invoice',\n field=models.ForeignKey(null=True, related_name='charges', to='pinax_stripe.Invoice', on_delete=models.CASCADE),\n ),\n migrations.AddField(\n model_name='card',\n name='customer',\n field=models.ForeignKey(to='pinax_stripe.Customer', on_delete=models.CASCADE),\n ),\n migrations.AddField(\n model_name='bitcoinreceiver',\n name='customer',\n field=models.ForeignKey(to='pinax_stripe.Customer', on_delete=models.CASCADE),\n ),\n ]\n","sub_path":"pinax/stripe/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":16151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"378891393","text":"#!/usr/bin/python3\n# This script prints the titles of the first 10 hot posts\n# for a given subreddit\n\nimport requests\n\n\ndef top_ten(subreddit):\n '''Prints the titles of the first 10 hot posts on r/`subreddit`'''\n\n endpoint = 'https://reddit.com/r/{}.json'.format(subreddit)\n\n # In order to avoid a \"Too Many Request\" error we need to\n # include a User-Agent of something besides Python's default\n headers = {'User-Agent': 'x'}\n\n r = requests.get(endpoint, headers=headers).json()\n try:\n hot_posts = r.get('data').get('children')\n except AttributeError:\n print('None')\n return\n for post in hot_posts[:10]:\n print(post.get('data').get('title'))\n","sub_path":"0x16-api_advanced/1-top_ten.py","file_name":"1-top_ten.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"121281594","text":"#!/usr/bin/python\n\n\nimport gtk\nimport controller\n\n\nclass View:\n\n def on_close_clicked(self, button, e):\n gtk.main_quit()\n\n\n def __init__(self):\n self.store = gtk.ListStore(int, str)\n self.ctrl = controller.Controller()\n\n self.builder = gtk.Builder()\n self.builder.add_from_file(\"window.glade\")\n self.builder.connect_signals(self)\n\n self.lv = self.builder.get_object(\"listview\")\n self.lv.set_model(self.store)\n self.lv.set_rules_hint(True)\n self.createColumns()\n\n self.searchEntry = self.builder.get_object(\"search_entry\")\n self.spinner = self.builder.get_object(\"spinner\")\n self.text = self.builder.get_object(\"loading_text\")\n\n window = self.builder.get_object(\"window1\")\n window.show_all()\n\n\n def on_downloadBtn_clicked(self, b):\n lv_selection = self.lv.get_selection()\n lv_selection.set_mode(gtk.SELECTION_SINGLE)\n (model, iter) = lv_selection.get_selected()\n self.ctrl.download(model.get_value(iter,0))\n\n\n def on_entryBtn_clicked(self, b):\n self.text.set_text(\"Buscando...\")\n self.spinner.start()\n self.songs = self.ctrl.setQuery(self.searchEntry.get_text())\n self.addToLV()\n self.text.set_text(\"Listo\")\n\n\n def createColumns(self):\n rendererText = gtk.CellRendererText()\n column1 = gtk.TreeViewColumn(\"Numero\", rendererText, text=0)\n column1.set_resizable(True)\n column1.set_sort_column_id(0)\n column = gtk.TreeViewColumn(\"Titulo\", rendererText, text=1)\n column.set_resizable(True)\n column.set_sort_column_id(1)\n self.lv.append_column(column1)\n self.lv.append_column(column)\n\n\n def addToLV(self):\n i = 1\n self.store.clear()\n for a in self.songs.entry:\n self.store.append([i,a.media.title.text])\n i += 1\n\n\nif __name__ == '__main__':\n View()\n gtk.main()\n","sub_path":"ytDownloader/gui/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"352519951","text":"from keras.layers import Input, Dense\nfrom keras.models import Model\nfrom keras import regularizers\nfrom keras import backend as K\n\ndef ae(encoding_layer_dim, input_shape, X, X_interpolated, X_test, X_test_interpolated):\n # this is the size of our encoded representations\n encoding_dim = input_shape\n # this is our input placeholder\n input_img = Input(shape=(input_shape,))\n # \"encoded\" is the encoded representation of the input\n h1 = Dense(encoding_dim*2, activation='relu')(input_img)\n encoded = Dense(encoding_dim, activation='linear',\n activity_regularizer=regularizers.l2(0.00001), name='encoded')(h1)\n # \"decoded\" is the lossy reconstruction of the input\n decoded_h2 = Dense(encoding_dim*2, activation='relu')(encoded)\n decoded = Dense(input_shape, activation='sigmoid')(decoded_h2)\n # this model maps an input to its reconstruction\n autoencoder = Model(input_img, decoded)\n # this model maps an input to its encoded representation\n encoder = Model(input_img, encoded)\n \n def custom_loss(classInstance, decoded):\n mse_loss = K.mean(K.square(decoded - classInstance), axis=-1)\n W = K.variable(value=autoencoder.get_layer('encoded').get_weights()[0])\n intra_spread_loss = K.mean(K.sqrt((K.square(K.mean(W, axis=0) - W)).sum(1)), axis=-1)\n return K.mean(mse_loss + intra_spread_loss)\n \n autoencoder.compile(loss=custom_loss, optimizer='adadelta', metrics=['accuracy'])\n \n autoencoder.fit(X_interpolated, X_interpolated, \n batch_size=input_shape, \n epochs=100,\n shuffle=True,\n validation_data=(X_test_interpolated, X_test_interpolated))\n \n return autoencoder, encoder\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neighbors import NearestNeighbors\n\nsc = StandardScaler()\n\nusers = [1,2,3,4,5,6]\nactivities = [\"Jogging\", \"Running\", \"Walking down-stairs\", \"Walking up-stairs\", \"Walking\"]\nfeatures = [\"featuresFilt\"]\n\nNEIGHBOURS_AMOUNT = 10 #the amount of needed neighbours\nLAMBDA = 0.5\n\ndef KNN_Interpolation(featureSpace):\n space_width = featureSpace.shape[1]\n final_space = pd.DataFrame(columns=range(space_width))\n nbrs = NearestNeighbors(n_neighbors=NEIGHBOURS_AMOUNT).fit(featureSpace)\n distances, indices = nbrs.kneighbors(featureSpace)\n \n for idx, val in enumerate(featureSpace):\n for i in range(NEIGHBOURS_AMOUNT):\n needed_vector = indices[idx][i]\n interpolated_vector = (featureSpace[needed_vector]-val)*0.5 + val\n interpolated_vector = interpolated_vector.reshape((1, space_width))\n final_space = np.r_[final_space, interpolated_vector]\n \n final_space = final_space.astype(float)\n return final_space\n\nfor feature in features:\n\n for act in activities:\n \n for us in users:\n totalData = pd.read_csv('../../../myTrainingData/' + feature + '_' + act + '#' + str(us) + '.csv');\n totalData.drop([\"user\"], axis=1, inplace=True)\n totalData = sc.fit_transform(np.asarray(totalData, dtype= np.float32));\n \n statisticalData = np.concatenate((totalData[:,0:12], totalData[:,18:27]), axis=1)\n timeData = totalData[:,27:36]\n fftData = totalData[:, 12:18]\n waveletData = totalData[:, 36:57]\n\n x_train_stat, x_test_stat = train_test_split(statisticalData, test_size=0.2)\n \n x_train_stat_interpolated = KNN_Interpolation(x_train_stat)\n x_test_stat_interpolated = KNN_Interpolation(x_test_stat)\n \n x_train_time, x_test_time = train_test_split(timeData, test_size=0.2)\n \n x_train_time_interpolated = KNN_Interpolation(x_train_time)\n x_test_time_interpolated = KNN_Interpolation(x_test_time)\n \n x_train_fft, x_test_fft = train_test_split(fftData, test_size=0.2)\n \n x_train_fft_interpolated = KNN_Interpolation(x_train_fft)\n x_test_fft_interpolated = KNN_Interpolation(x_test_fft)\n \n x_train_wavelet, x_test_wavelet = train_test_split(waveletData, test_size=0.2)\n \n x_train_wavelet_interpolated = KNN_Interpolation(x_train_wavelet)\n x_test_wavelet_interpolated = KNN_Interpolation(x_test_wavelet)\n \n autoencoder_stat, encoder_stat = ae(10, 21, x_train_stat, x_train_stat_interpolated, x_test_stat, x_test_stat_interpolated);\n\n autoencoder_time, encoder_time = ae(4, 9, x_train_time, x_train_time_interpolated, x_test_time, x_test_time_interpolated);\n\n autoencoder_fft, encoder_fft = ae(3, 6, x_train_fft, x_train_fft_interpolated, x_test_fft, x_test_fft_interpolated);\n\n autoencoder_wavelet, encoder_wavelet = ae(10, 21, x_train_wavelet, x_train_wavelet_interpolated, x_test_wavelet, x_test_wavelet_interpolated);\n \n encoded_stats = encoder_stat.predict(statisticalData)\n encoded_time = encoder_time.predict(timeData)\n encoded_fft = encoder_fft.predict(fftData)\n encoded_wavelet = encoder_wavelet.predict(waveletData)\n\n concat_encoded = np.concatenate((encoded_stats, encoded_time, encoded_fft, encoded_wavelet), axis=1)\n\n x_train_fused, x_test_fused = train_test_split(concat_encoded, test_size=0.2)\n \n x_train_fused_interpolated = KNN_Interpolation(x_train_fused)\n x_test_fused_interpolated = KNN_Interpolation(x_test_fused)\n\n autoencoder_fused, encoder_fused = ae(16, 57, x_train_fused, x_train_fused_interpolated, x_test_fused, x_test_fused_interpolated);\n\n encoded_fused = encoder_fused.predict(concat_encoded)\n np.savetxt(\"./resultsFusedInterpolated5AE/AEResult_\" + feature + \"_\" + act + '#' + str(us) +\".csv\", encoded_fused, delimiter=',')","sub_path":"NewCostFunction/Sparse/augmentation/fusedDeep5AEsWithInterpolation.py","file_name":"fusedDeep5AEsWithInterpolation.py","file_ext":"py","file_size_in_byte":5995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"591191616","text":"import numpy as np\nimport random\n\n\ndef rand_mat(cm, type):\n\n if type=='links shuffle' or type=='links_shuffle':\n cm_rand = links_shuffle(cm)\n\n elif type=='weights shuffle' or type=='weights_shuffle':\n cm_rand = weights_shuffle(cm)\n\n return cm_rand\n\n\ndef links_shuffle(cm):\n cm1 = np.copy(cm)\n shape = np.shape(cm1)\n tril = np.ravel_multi_index(np.tril_indices(len(cm1), k=0), np.shape(cm1)) #k = diagonal offset\n indices = np.copy(tril)\n random.shuffle(indices)\n cm_rand = np.zeros(np.shape(cm1))\n cm_rand = cm_rand.reshape(-1)\n cm1 = cm1.reshape(-1)\n cm_rand[tril] = cm1[indices]\n cm_rand = cm_rand.reshape(shape)\n cm_rand = np.tril(cm_rand) + np.triu(cm_rand.T, 1)\n\n return cm_rand\n\n\ndef weights_shuffle(cm):\n cm1 = np.copy(cm)\n shape = np.shape(cm1)\n tril = np.ravel_multi_index(np.tril_indices(len(cm1), k=0), np.shape(cm1)) #k = diagonal offset\n cm1 = cm1.reshape(-1)\n tril_vals = cm1[tril]\n tril_edge_locs = tril_vals>0\n indices = np.copy(tril[ tril_edge_locs]) #choose only existing edges\n random.shuffle(indices)\n cm_rand = np.zeros(np.shape(cm1))\n cm_rand[tril[ tril_edge_locs]] = cm1[indices]\n cm_rand = cm_rand.reshape(shape)\n cm_rand = np.tril(cm_rand) + np.triu(cm_rand.T, 1)\n\n return cm_rand\n\n\ndef make_n_rand_mat(cm,n,type):\n rand_cm = np.zeros((cm.shape[0],cm.shape[1],n))\n for i in range(0,n):\n rand_cm_i = rand_mat(cm, type)\n rand_cm[:,:,i] = rand_cm_i\n\n return rand_cm","sub_path":"network_analysis/norm_cm_by_random_mat.py","file_name":"norm_cm_by_random_mat.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"41663992","text":"import sqlite3\n\ndef create_table():\n conn = sqlite3.connect(\"sqliteclass.db\")\n c = conn.cursor()\n c.execute(\"\"\"CREATE TABLE IF NOT EXISTS contacts (\n first text,\n last text,\n number text\n )\n \"\"\"\n )\n conn.commit()\n conn.close()\n\n\ndef add_one(first, last , number):\n conn = sqlite3.connect(\"sqliteclass.db\")\n c = conn.cursor()\n\n c.execute(f\"SELECT * FROM contacts WHERE first = '{first}' AND last='{last}' AND number='{number}'\")\n result = c.fetchall()\n\n if len(result) == 0 and first != \"\" and last !=\"\" and number !=\"\":\n c.execute(\"INSERT INTO contacts VALUES (?, ?, ?)\", (first, last, number))\n\n conn.commit()\n conn.close()\n\ndef show_all():\n conn = sqlite3.connect(\"sqliteclass.db\")\n c = conn.cursor() \n c.execute(\"SELECT rowid, * FROM contacts\")\n result = c.fetchall()\n conn.commit()\n conn.close()\n return result\n\n\ndef delete(id):\n conn = sqlite3.connect(\"sqliteclass.db\")\n c = conn.cursor() \n c.execute(f\"DELETE FROM contacts WHERE rowid = {id}\")\n conn.commit()\n conn.close()\n\n\ndef update(id, first=\"\", last=\"\", number=\"\"):\n conn = sqlite3.connect(\"sqliteclass.db\")\n c = conn.cursor() \n\n if first != \"\":\n c.execute(f\"UPDATE contacts SET first='{first}' WHERE rowid = {id}\")\n \n if last != \"\":\n c.execute(f\"UPDATE contacts SET last='{last}' WHERE rowid = {id}\")\n \n if number != \"\":\n c.execute(f\"UPDATE contacts SET number='{number}' WHERE rowid = {id}\")\n conn.commit()\n conn.close()\n\n\ncreate_table()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"37557769","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 12 13:50:57 2017\n\n@author: owen\n\"\"\"\n\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n \n# http://www.cnblogs.com/grandyang/p/7590156.html\n#import collections\n#class Solution(object):\n# def findSecondMinimumValue(self, root):\n# \"\"\"\n# :type root: TreeNode\n# :rtype: int\n# \"\"\"\n# # level traverse\n# minVal=root.val\n# secMin=float('inf')\n# dq=collections.deque([root])\n# while dq:\n# curr=dq.popleft()\n# if curr.val>minVal and secMin>curr.val: # Notice! root is a special binary tree\n# secMin=curr.val\n# if curr.left:\n# dq.append(curr.left)\n# if curr.right:\n# dq.append(curr.right)\n# \n# return -1 if minVal==secMin or secMin==float('inf') else secMin\n \nclass Solution:\n def findSecondMinimumValue(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n res = [float('inf')]\n def traverse(node):\n if not node:\n return\n if root.val < node.val < res[0]:\n res[0] = node.val\n traverse(node.left)\n traverse(node.right)\n \n traverse(root)\n return -1 if res[0] == float('inf') else res[0]\n \nif __name__==\"__main__\":\n root=TreeNode(2)\n root.left=TreeNode(2)\n root.right=TreeNode(5)\n root.right.left=TreeNode(5)\n root.right.right=TreeNode(7)\n print(Solution().findSecondMinimumValue(root))","sub_path":"671. Second Minimum Node In a Binary Tree.py","file_name":"671. Second Minimum Node In a Binary Tree.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"645432049","text":"import tensorflow as tf\n\nfrom avod.core.feature_extractors import img_feature_extractor\nfrom avod.core.feature_extractor_layers.resblock import resblock\n\nslim = tf.contrib.slim\n\n\nclass ImgResNetPyr(img_feature_extractor.ImgFeatureExtractor):\n \"\"\"Contains modified ResNet model definition to extract features from\n RGB image input using pyramid features.\n \"\"\"\n\n def resnet_arg_scope(self, weight_decay=0.0005):\n \"\"\"Defines the resnet arg scope.\n\n Args:\n weight_decay: The l2 regularization coefficient.\n\n Returns:\n An arg_scope.\n \"\"\"\n with slim.arg_scope([slim.conv2d, slim.fully_connected, resblock, slim.conv2d_transpose],\n activation_fn=tf.nn.relu,\n weights_regularizer=slim.l2_regularizer(\n weight_decay),\n biases_initializer=tf.zeros_initializer()):\n with slim.arg_scope([slim.conv2d, resblock], padding='SAME') as arg_sc:\n return arg_sc\n\n def build(self,\n inputs,\n input_pixel_size,\n is_training,\n scope='img_resnet_pyr'):\n \"\"\" Modified ResNet for Img feature extraction with pyramid features\n\n Args:\n inputs: a tensor of size [batch_size, height, width, channels].\n input_pixel_size: size of the input (H x W)\n is_training: True for training, False for validation/testing.\n scope: Optional scope for the variables.\n\n Returns:\n The last op containing the log predictions and end_points dict.\n \"\"\"\n resnet_config = self.config\n\n with slim.arg_scope(self.resnet_arg_scope(\n weight_decay=resnet_config.l2_weight_decay)):\n with tf.variable_scope(scope, 'img_resnet_pyr', [inputs]) as sc:\n end_points_collection = sc.name + '_end_points'\n\n # Collect outputs for conv2d, fully_connected and max_pool2d.\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, resblock],\n outputs_collections=end_points_collection):\n # Encoder\n resblock1 = slim.repeat(inputs,\n resnet_config.resnet_conv1[0],\n slim.conv2d,\n resnet_config.resnet_conv1[1],\n [resnet_config.resnet_conv1[2], resnet_config.resnet_conv1[2]],\n normalizer_fn=slim.batch_norm,\n normalizer_params={\n 'is_training': is_training},\n scope='resblock1')\n\n # pool1 = slim.max_pool2d(conv1, [2, 2], scope='pool1')\n resblock2 = slim.repeat(resblock1,\n resnet_config.resnet_conv2[0],\n resblock,\n resnet_config.resnet_conv2[1],\n [resnet_config.resnet_conv2[2], resnet_config.resnet_conv2[2]],\n normalizer_fn=slim.batch_norm,\n normalizer_params={\n 'is_training': is_training},\n scope='resblock2')\n\n resblock3 = slim.repeat(resblock2,\n resnet_config.resnet_conv3[0],\n resblock,\n resnet_config.resnet_conv3[1],\n [resnet_config.resnet_conv3[2], resnet_config.resnet_conv3[2]],\n normalizer_fn=slim.batch_norm,\n normalizer_params={\n 'is_training': is_training},\n scope='resblock3')\n\n resblock4 = slim.repeat(resblock3,\n resnet_config.resnet_conv4[0],\n resblock,\n resnet_config.resnet_conv4[1],\n [resnet_config.resnet_conv4[2], resnet_config.resnet_conv4[2]],\n normalizer_fn=slim.batch_norm,\n normalizer_params={\n 'is_training': is_training},\n scope='resblock4')\n\n # Decoder (upsample and fuse features)\n upconv3 = slim.conv2d_transpose(\n resblock4,\n resnet_config.resnet_conv3[1],\n [3, 3],\n stride=2,\n normalizer_fn=slim.batch_norm,\n normalizer_params={\n 'is_training': is_training},\n scope='upconv3')\n\n concat3 = tf.concat(\n (resblock3, upconv3), axis=3, name='concat3')\n pyramid_fusion3 = slim.conv2d(\n concat3,\n resnet_config.resnet_conv2[1],\n [3, 3],\n normalizer_fn=slim.batch_norm,\n normalizer_params={\n 'is_training': is_training},\n scope='pyramid_fusion3')\n\n upconv2 = slim.conv2d_transpose(\n pyramid_fusion3,\n resnet_config.resnet_conv2[1],\n [3, 3],\n stride=2,\n normalizer_fn=slim.batch_norm,\n normalizer_params={\n 'is_training': is_training},\n scope='upconv2')\n\n concat2 = tf.concat(\n (resblock2, upconv2), axis=3, name='concat2')\n pyramid_fusion_2 = slim.conv2d(\n concat2,\n resnet_config.resnet_conv1[1],\n [3, 3],\n normalizer_fn=slim.batch_norm,\n normalizer_params={\n 'is_training': is_training},\n scope='pyramid_fusion2')\n\n upconv1 = slim.conv2d_transpose(\n pyramid_fusion_2,\n resnet_config.resnet_conv1[1],\n [3, 3],\n stride=2,\n normalizer_fn=slim.batch_norm,\n normalizer_params={\n 'is_training': is_training},\n scope='upconv1')\n\n concat1 = tf.concat(\n (resblock1, upconv1), axis=3, name='concat1')\n pyramid_fusion1 = slim.conv2d(\n concat1,\n resnet_config.resnet_conv1[1],\n [3, 3],\n normalizer_fn=slim.batch_norm,\n normalizer_params={\n 'is_training': is_training},\n scope='pyramid_fusion1')\n\n # Slice off padded area\n sliced = pyramid_fusion1[:, 4:]\n\n feature_maps_out = sliced\n\n # Convert end_points_collection into a end_point dict.\n end_points = slim.utils.convert_collection_to_dict(\n end_points_collection)\n\n return feature_maps_out, end_points\n","sub_path":"avod/core/feature_extractors/img_resnet_pyramid.py","file_name":"img_resnet_pyramid.py","file_ext":"py","file_size_in_byte":7998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"347740867","text":"m=int(input())\r\nn=[int(input()) for i in range(int(m))]\r\n\r\nn.sort(reverse=True)\r\nyaki1 = 0\r\nyaki2 = 0\r\n\r\nfor l in range(m):\r\n if(yaki1 <= yaki2) :\r\n yaki1 += int(n[l])\r\n else:\r\n yaki2 += int(n[l])\r\n\r\nif yaki1 >= yaki2:\r\n print(yaki1)\r\nelse:\r\n print(yaki2)","sub_path":"ABC_A_oniku.py","file_name":"ABC_A_oniku.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"94100799","text":"\ndef remove_comments(line:str):\n for index,character in enumerate(line):\n if character == \";\" and not is_in_quote(line,index):\n line = line[:index]\n break\n return line\n\ndef is_in_quote(text:str,index:int):\n return text[:index].count(\"\\\"\") % 2 == 1\n\n# TODO remove\ndef remove_trailing_spaces(line:str):\n return line\n\n# TODO remove\ndef collapse_brackets(text:str):\n return text\n\ndef is_integer(n):\n try:\n int(n)\n return True\n except ValueError:\n return False\n\ndef parse_bind(bind:str):\n periods = {\n \"s\": 1,\n \"m\": 60,\n \"h\": 3600,\n \"d\": 86400,\n \"w\": 604800\n }\n\n seconds = 0\n\n parts = []\n buffer = \"\"\n\n for char in bind:\n if is_integer(char):\n buffer += char\n else:\n parts.append(buffer + char)\n buffer = \"\"\n\n for part in parts:\n period = part[-1]\n\n amount = int(part.replace(period,\"\"))\n\n seconds += amount * periods[period.lower()]\n\n return seconds\n\n# TODO unit test\ndef default_ttl(text:str):\n lines = text.splitlines()\n for line in lines:\n if \"$TTL\" in line:\n ttl_str = line.split(\" \")[1]\n try:\n ttl = int(ttl_str)\n return int(ttl)\n except ValueError:\n # the value could be BIND format, attempt to parse\n # https://www.zytrax.com/books//dns/apa/time.html\n ttl = parse_bind(ttl_str)\n return ttl\n\n return None\n\n# TODO write test case\ndef default_origin(text:str):\n lines = text.splitlines()\n for line in lines:\n if \"$ORIGIN\" in line:\n origin = line.split(\" \")[1]\n return origin\n return None\n\n# TODO unit test\n# TODO refactor\ndef find_soa_lines(text:str):\n\n lines = text.splitlines()\n\n soa_start_line = 0\n\n soa_end_line = 0\n\n find_bracket = False\n\n for line_number in range(0,len(lines)-1):\n line = lines[line_number]\n if \"SOA\" in line.upper():\n soa_start_line = line_number\n if \"(\" in line:\n find_bracket = True\n else:\n soa_end_line = soa_start_line\n break\n\n\n if \")\" in line and find_bracket is True:\n soa_end_line = line_number\n break\n\n\n return range(soa_start_line,soa_end_line + 1)\n\n# TODO unit test\ndef parted_soa(text:str):\n\n # flatten\n text = text.replace(\"\\n\",\"\")\n\n # part out the soa\n parts = text.split()\n\n # remove multiple spaces, and replace them with a single space\n parts = list(\n filter(\n lambda x : \")\" not in x and \"(\" not in x,\n parts\n )\n )\n\n return parts\n","sub_path":"zonefile_parser/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"552945079","text":"# Copyright 2013 Open Cloud Consortium\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n''' Functions for getting the clouds '''\n\nimport memcache\n\nfrom tukey_middleware import utils\nfrom tukey_middleware import local_settings\nfrom tukey_middleware.cloud_driver.login_keypairs import LoginKeypairsDriver\nfrom tukey_middleware.cloud_driver.login_keypairs import AllKeypairsDriver\nfrom tukey_middleware.cloud_driver.osdc_novacluster import OsdcNovacluster\nfrom tukey_middleware.auth.base import TukeyAuthException\nfrom tukey_middleware.auth.token_store import TokenStore\n\n\ndef create_login_driver(cloud_name, driver_instance, login_url):\n ''' create login node ssh key driver from cloud'''\n db_connection_string = local_settings.vm_ip_auth[\"auth_db_str\"]\n fingerprint = local_settings.GPG_FINGERPRINT\n gpg_home_dir = local_settings.GPG_HOME\n gpg_passphrase = local_settings.GPG_PASSPHRASE\n gpg_host_pubkey_filename = \"%s/%s.pub\" % (local_settings.GPG_PUBKEY_DIR,\n cloud_name)\n host = login_url\n host_passphrase = \"\"\n\n return LoginKeypairsDriver(driver_instance.cloud, driver_instance.cloud_id,\n db_connection_string, fingerprint, gpg_home_dir,\n gpg_host_pubkey_filename, gpg_passphrase, host_passphrase, host,\n driver_instance.auth.username())\n\n\nclass CloudRegistry(object):\n ''' Using local_settings.py, the auth_token passed in externally create\n cloud driver objects'''\n\n DEFAULT_AUTH = \"tukey_middleware.auth.keystone_proxy.KeystoneProxy\"\n DEFAULT_AUTH_PARAMS = {\n \"memcache_client\": {\n \"class\": \"memcache.Client\",\n \"params\": [[\"localhost:11211\"], 0]\n },\n \"eucarc_path\": local_settings.EUCARC_BASE + \"/users/%s/%s/.euca/eucarc\"\n }\n\n def __init__(self, settings=None):\n if settings is None:\n settings = {}\n self.settings = settings\n self.logger = utils.get_logger()\n self.client_format = None\n\n def _initialize_cloud(self, cloud, name, auth_token):\n ''' Cloud entering is a dictionary and returned is the initialized\n cloud driver '''\n\n if \"auth_driver\" in cloud:\n auth_path = cloud[\"auth_driver\"]\n else:\n auth_path = self.DEFAULT_AUTH\n\n self.logger.debug(\"getting auth class from %s\", auth_path)\n\n if \"auth_driver_parameters\" in cloud:\n params = cloud[\"auth_driver_parameters\"]\n else:\n params = self.DEFAULT_AUTH_PARAMS\n\n if auth_path is not None:\n auth_class = utils.get_class(auth_path).handle_parameters(params)\n auth_instance = auth_class(name, auth_token)\n else:\n auth_instance = None\n\n if \"driver\" in cloud:\n driver_path = cloud[\"driver\"]\n elif \"access\" in cloud:\n has_volume = False\n has_object = False\n\n for service in cloud[\"access\"][\"serviceCatalog\"]:\n if service[\"type\"] == \"volume\":\n has_volume = True\n if service[\"type\"] == \"object-store\":\n has_object = True\n\n if has_object and has_volume:\n #TODO: replace with volume + object class\n driver_path = (\"tukey_middleware.cloud_driver\"\n \".openstack_volumes.OpenStackVolumeDriver\")\n\n elif has_object:\n #TODO: replace with object class\n driver_path = (\"tukey_middleware.cloud_driver\"\n \".openstack.OpenStackDriver\")\n\n elif has_volume:\n driver_path = (\"tukey_middleware.cloud_driver\"\n \".openstack_volumes.OpenStackVolumeDriver\")\n\n else:\n driver_path = (\"tukey_middleware.cloud_driver\"\n \".openstack.OpenStackDriver\")\n else:\n driver_path = (\"tukey_middleware.cloud_driver\"\n \".osdc_euca.OsdcEucaDriver\")\n\n self.logger.debug(\"getting cloud driver class from %s\",\n driver_path)\n\n driver_class = utils.get_class(driver_path)\n if driver_class == (\"tukey_middleware.cloud_driver.openstack.\"\n \"OpenStackDriver\"):\n driver_instance = driver_class(auth_instance,\n client_format=self.client_format)\n else:\n driver_instance = driver_class(auth_instance)\n\n\n self.logger.debug(\"cloud: %s auth SUCCESS %s\", cloud,\n auth_token)\n\n #TODO: for faster selection of ec2/eucalyptus cloud have a special id\n # that we can look at to instantly dismiss or accept requests\n #driver_instance.cloud_id = cloud[\"id\"]\n\n driver_instance.cloud = cloud.get(\"cloud\", name)\n driver_instance.cloud_id = name\n\n return driver_instance\n\n def build_login_driver_by_name(self, cloud_name, token_info, auth_token):\n cloud_name = cloud_name[len(\"login\"):]\n cloud_info = token_info[cloud_name]\n base_driver = self._initialize_cloud(cloud_info, cloud_name,\n auth_token)\n return create_login_driver(cloud_name, base_driver,\n token_info[\"login\" + cloud_name])\n\n def get_cloud_by_id(self, cloud_name, auth_token):\n try:\n toks = TokenStore(memcache.Client(['127.0.0.1:11211']))\n token_info = toks.get(str(auth_token))\n\n if cloud_name.startswith(\"cluster\"):\n cloud_name = cloud_name[len(\"cluster\"):]\n cloud_info = token_info[cloud_name]\n base_driver = self._initialize_cloud(cloud_info, cloud_name,\n auth_token)\n return OsdcNovacluster(base_driver)\n\n elif cloud_name.startswith(\"login\"):\n return self.build_login_driver_by_name(cloud_name, token_info,\n auth_token)\n\n elif cloud_name == \"all\":\n # This will probably only ever be used for generating pubkeys\n # for all clouds\n # For now we assume that is the case\n drivers = []\n for key, value in token_info.items():\n try:\n if key != \"__tukey_internal\":\n if key.startswith(\"login\"):\n drivers.append(self.build_login_driver_by_name(\n key, token_info, auth_token))\n elif value.get(\"instance_keypairs\", False):\n drivers.append(self._initialize_cloud(value,\n key, auth_token))\n except TukeyAuthException:\n continue\n\n return AllKeypairsDriver(drivers)\n\n else:\n cloud_info = token_info[cloud_name]\n except Exception as exc:\n self.logger.info(\"Accessing cloud %s without auth_token %s\",\n cloud_name, exc.message)\n cloud_info = self.settings[cloud_name]\n\n return self._initialize_cloud(cloud_info, cloud_name, auth_token)\n\n def all_clouds(self, auth_token, client_format=None):\n ''' Return list of cloud_driver objects settings is a dictionary of\n cloud names and their drivers and parameters '''\n self.client_format = client_format\n clouds = []\n\n toks = TokenStore(memcache.Client(['127.0.0.1:11211']))\n token_info = toks.get(str(auth_token))\n\n for name, cloud in [(n, c) for n, c in token_info.items()\n if n != '__tukey_internal' and not n.startswith(\"login\")]:\n try:\n driver_instance = self._initialize_cloud(cloud, name,\n auth_token)\n except TukeyAuthException:\n continue\n\n clouds.append(driver_instance)\n clouds.append(create_login_driver(name, driver_instance,\n token_info[\"login\" + name]))\n\n return clouds\n","sub_path":"Tukey/tukey_middleware/tukey_middleware/cloud_driver/registry.py","file_name":"registry.py","file_ext":"py","file_size_in_byte":8511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"358551417","text":"import onnx\nimport unittest\nimport torchvision\nimport numpy as np\nfrom onnxruntime_customops.utils import trace_for_onnx, op_from_model\nfrom onnxruntime_customops import eager_op, hook_model_op, PyOp, mytorch as torch\n\n\nclass TestTorchE2E(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.mobilenet = torchvision.models.mobilenet_v2(pretrained=True)\n cls.argmax_input = None\n\n @staticmethod\n def on_hook(*x):\n TestTorchE2E.argmax_input = x[0]\n return x\n\n def test_imagenet_postprocess(self):\n mb_core_path = \"mobilev2.onnx\"\n mb_full_path = \"mobilev2_full.onnx\"\n dummy_input = torch.randn(10, 3, 224, 224)\n np_input = dummy_input.numpy()\n torch.onnx.export(self.mobilenet, dummy_input, mb_core_path, opset_version=11)\n mbnet2 = op_from_model(mb_core_path)\n\n with trace_for_onnx(dummy_input, names=['b10_input']) as tc_sess:\n scores = mbnet2(*tc_sess.get_inputs())\n probabilities = torch.softmax(scores, dim=1)\n batch_top1 = probabilities.argmax(dim=1)\n\n np_argmax = probabilities.numpy() # for the result comparison\n np_output = batch_top1.numpy()\n\n tc_sess.save_as_onnx(mb_full_path, batch_top1)\n\n hkdmdl = hook_model_op(onnx.load_model(mb_full_path), 'argmax', self.on_hook, [PyOp.dt_float])\n mbnet2_full = eager_op.EagerOp.from_model(hkdmdl)\n batch_top1_2 = mbnet2_full(np_input)\n np.testing.assert_allclose(np_argmax, self.argmax_input, rtol=1e-5)\n np.testing.assert_array_equal(batch_top1_2, np_output)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/test_mytorch.py","file_name":"test_mytorch.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"341140669","text":"import shelve\nfrom scapy.all import *\nfrom sklearn.feature_extraction import FeatureHasher\n\n\"Purpose1\"\n\n\ndef gen_extract(f):\n \"\"\"\n Use a file name as input,filter the lines with TCP layer\n :param f:\n :return:\n \"\"\"\n file = rdpcap(f)\n lines = range(len(file))\n raw_pkt = {}\n for i in lines:\n if TCP in file[i]:\n raw_pkt[i + 1] = file[i]\n return raw_pkt\n\n\ndef feature_ext(rawpkt, ld):\n \"\"\"\n Extract feature as training data,use a list as input,a four features line as output\n :param rawpkt: raw packet features\n :param ld: a list of sample lines\n :return:\n \"\"\"\n sample = {}\n for i in ld:\n sample[i] = []\n exec(\"sip = rawpkt[%d][IP].src\" % i)\n exec(\"sport = rawpkt[%d][TCP].sport\" % i)\n exec(\"dip = rawpkt[%d][IP].dst\" % i)\n exec(\"dport = rawpkt[%d][TCP].dport\" % i)\n sample[i].append(sip)\n sample[i].append(sport)\n sample[i].append(dip)\n sample[i].append(dport)\n return sample\n\n\ndef translate(fld):\n \"\"\"\n Use sklearn to translate the four features line to a matrix\n :param fld:a dict contain index and four features lines from feature_ext()\n :return:\n \"\"\"\n hasher = FeatureHasher(n_features=10, input_type='string')\n result = {}\n for i in fld:\n for k in fld[i]:\n if not isinstance(k, str):\n fld[i][fld[i].index(k)] = str(fld[i][fld[i].index(k)])\n tmp = ((hasher.fit_transform(fld[i])).toarray()).reshape(1, 40)\n result[i] = tmp\n return result\n\nfilename = raw_input(\"Input the raw pcap data: \")\nraw_features = gen_extract(filename)\ntmp_features = feature_ext(raw_features, [x for x in raw_features])\nfinal_features = translate(tmp_features)\n\nprint('Test data have made')\nname = raw_input(\"Please use a new file to save it: \")\nsave = shelve.open(name)\nsave['res'] = final_features\nsave.close()","sub_path":"product/newtools/expmaker.py","file_name":"expmaker.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"420589981","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom deepy import *\nimport theano.tensor as T\n\nclass AggregationLayer(NeuralLayer):\n \"\"\"\n Aggregation layer.\n \"\"\"\n\n def __init__(self, size, activation='relu', init=None, layers=3):\n super(AggregationLayer, self).__init__(\"aggregation\")\n self.size = size\n self.activation = activation\n self.init = init\n self.layers = layers\n\n def prepare(self):\n self.output_dim = self.size\n self._act = build_activation(self.activation)\n self._inner_layers = [Dense(self.size, self.activation, init=self.init).initialize(self.input_dim)]\n for _ in range(self.layers - 1):\n self._inner_layers.append(Dense(self.size, self.activation, init=self.init).initialize(self.size))\n self.register_inner_layers(*self._inner_layers)\n\n self._chain2 = Chain(self.input_dim).stack(\n Dense(self.size, self.activation, init=self.init),\n Dense(self.layers, 'linear', init=self.init),\n Softmax()\n )\n\n self.register_inner_layers(self._chain2)\n self._dropout = Dropout(0.1)\n\n def _output(self, x, test=False):\n seq = []\n v = x\n for layer in self._inner_layers:\n v = layer.compute_flexible_tensor(v, test)\n v = self._dropout.compute_flexible_tensor(v, test)\n seq.append(v.dimshuffle(0, \"x\", 1))\n\n seq_v = T.concatenate(seq, axis=1)\n\n eva = self._chain2.compute_flexible_tensor(x, test)\n\n result = seq_v * eva.dimshuffle((0, 1, \"x\"))\n result = result.sum(axis=1)\n return result\n\n def compute_tensor(self, x):\n return self._output(x, False)\n\n def compute_test_tesnor(self, x):\n return self._output(x, True)\n\n\n\n","sub_path":"experiments/aggregation_networks/aggregation_layer.py","file_name":"aggregation_layer.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"62627828","text":"from test.test_MimeWriter import OUTPUT\n__author__ = 'mdarmadi@ucsd.edu, A11410141, hdharmaw@ucsd.edu, A91413023, vcchandr@ucsd.edu, A12496582'\nimport sys\nfrom Queue import LifoQueue\nimport time\nclosedList = {} # closedList is going to be a dictionary showing the parent of number\n\ndef isPrime(n):\n if n == 0 or n == 1:\n return False\n if n == 2:\n return True\n if n % 2 == 0:\n return False\n for x in range(3, int(n**0.5)+1, 2):\n if n % x == 0:\n return False\n return True\n\ndef getPossibleActions(currentPrime):\n listOfPrimes = []\n\n currentStr = str(currentPrime)\n currentList = list(currentStr)\n length = len(currentList) # this is the digit of the number\n\n # check every possible combination\n for i in range(0, length):\n curChar = currentList[i]\n for j in range(0,10): # possible digit replacement\n j = str(j)\n\n if(j == curChar): # to avoid repetition\n continue\n\n if (j == '0' and i == 0): # to avoid producing leading 0\n continue\n\n currentList[i] = j # replace the digit\n newStr = ''.join(currentList)\n newInt = int(newStr) # might not need to convert to int if dictionary uses str for key\n\n # check if new integer is prime and not in closedList already\n if (isPrime(newInt)):\n if (not str(newInt) in closedList):\n listOfPrimes.append(newInt)\n\n currentList[i] = curChar # return currentList to original char\n\n return listOfPrimes\n\ndef getPath(startingPrime, finalPrime):\n # print(type(startingPrime))\n # print(\"starting Prime: \" + str(startingPrime))\n # print(type(finalPrime))\n # print(\"final Prime: \" + str(finalPrime))\n\n\t# your code here\n\t#depth limit is 5\t\n\t#declare stack\n closedList.clear()\n stack = LifoQueue()\n\t\n\t#push into the stack\n stack.put((startingPrime , 0))\n\n outputString = \"\"\n\t\n\t#while stack is not empty \n while(not stack.empty()):\n\t\t#pop a from stack\n a = stack.get()\n\n\t\t#if a.currentPrime == finalPrime\n if(a[0] == finalPrime):\n break\n\n\t\t#else if a.depth >= 5\n elif(a[1] >= 5):\n continue\n\t\t\n\t\t#find all neighbor of currentPrime\n neighbor = getPossibleActions(a[0])\n\t\t\n for i in range(0,len(neighbor)):\n\t\t\t#set the parent of the neighbor to currentPrime\n closedList[str(neighbor[i])] = a[0]\n\t\t\t#push all neighbor as into the stack\n stack.put((neighbor[i],a[1] + 1))\n\t\n #if(currentPRime != finalPrime)\n if(a[0] != finalPrime):\n #unsolvable\n outputString = 'UNSOLVABLE'\n\n else:\n current = a[0]\n outputString = \"\"\n outputString = str(current) + \" \" + outputString\n while(current != startingPrime):\n current = closedList[str(current)]\n outputString = str(current) + \" \" + outputString\n# \t\toutputString = startingPrime + \" \" + outputString\n\n# file = open('output.txt','w')\n# print >> file,outputString\n# file.close()\n sys.stdout.write(outputString + \"\\n\")\n return \n\ndef main():\n for line in sys.stdin.readlines():\n #line = sys.stdin.readline()\n primes = str(line).split()\n first = list(primes[0])\n second = list(primes[1])\n\n t0 = time.time()\n getPath(int(primes[0]), int(primes[1]))\n\n t1 = time.time()\n #print t1 - t0\nif __name__ == '__main__':\n\tmain()","sub_path":"assignment1/assignment1_p2.py","file_name":"assignment1_p2.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"650280853","text":"from datetime import datetime, timedelta\n\nfrom numpy import long\n\n\nclass SettingsSnippets:\n\n def __init__(self, service):\n self.service = service\n\n def update_signature(self):\n gmail_service = self.service\n # [START updateSignature]\n primary_alias = None\n aliases = gmail_service.users().settings().sendAs(). \\\n list(userId='me').execute()\n for alias in aliases.get('sendAs'):\n if alias.get('isPrimary'):\n primary_alias = alias\n break\n\n sendAsConfiguration = {\n 'signature': 'I heart cats'\n }\n result = gmail_service.users().settings().sendAs(). \\\n patch(userId='me',\n sendAsEmail=primary_alias.get('sendAsEmail'),\n body=sendAsConfiguration).execute()\n print('Updated signature for: %s' % result.get('displayName'))\n # [END updateSignature]\n return result.get('signature')\n\n def create_filter(self, real_label_id):\n gmail_service = self.service\n # [START createFilter]\n label_id = 'Label_14' # ID of user label to add\n # [START_EXCLUDE silent]\n label_id = real_label_id\n # [END_EXCLUDE]\n filter = {\n 'criteria': {\n 'from': 'cat-enthusiasts@example.com'\n },\n 'action': {\n 'addLabelIds': [label_id],\n 'removeLabelIds': ['INBOX']\n }\n }\n result = gmail_service.users().settings().filters(). \\\n create(userId='me', body=filter).execute()\n print('Created filter: %s' % result.get('id'))\n # [END createFilter]\n return result.get('id')\n\n def enable_forwarding(self, real_forwarding_address):\n gmail_service = self.service\n # [START enableForwarding]\n address = {\n 'forwardingEmail': 'user2@example.com'\n }\n # [START_EXCLUDE silent]\n address = {\n 'forwardingEmail': real_forwarding_address\n }\n # [END_EXCLUDE]\n result = gmail_service.users().settings().forwardingAddresses(). \\\n create(userId='me', body=address).execute()\n if result.get('verificationStatus') == 'accepted':\n body = {\n 'emailAddress': result.get('forwardingEmail'),\n 'enabled': True,\n 'disposition': 'trash'\n }\n result = gmail_service.users().settings(). \\\n updateAutoForwarding(userId='me', body=body).execute()\n # [START_EXCLUDE silent]\n return result\n # [END_EXCLUDE]\n\n # [END enableForwarding]\n return None\n\n def enable_auto_reply(self):\n gmail_service = self.service\n # [START enableAutoReply]\n epoch = datetime.utcfromtimestamp(0)\n now = datetime.now()\n start_time = (now - epoch).total_seconds() * 1000\n end_time = (now + timedelta(days=7) - epoch).total_seconds() * 1000\n vacation_settings = {\n 'enableAutoReply': True,\n 'responseBodyHtml': \"I'm on vacation and will reply when I'm \"\n \"back in the office. Thanks!\",\n 'restrictToDomain': True,\n 'startTime': long(start_time),\n 'endTime': long(end_time)\n }\n response = gmail_service.users().settings(). \\\n updateVacation(userId='me', body=vacation_settings).execute()\n # [END enableAutoReply]\n return response\n","sub_path":"gmail/snippet/settings_snippets.py","file_name":"settings_snippets.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"351035050","text":"'''Moon Lander Game 1\n PyGame version of a moon landing game. 1D with physics.\n'''\nimport sys, pygame\nfrom math import sqrt, pi\n\npygame.init()\n\n# Size of window to use\nscr_size = 1000, 850\nscr_height = scr_size[1] - 50\n\n# Define some colour RGB tuples\nblack = 0, 0, 0\nwhite = 255, 255, 255\ngrey = 128, 128, 128\ngreen = 128, 255, 128\nred = 255, 64, 64\n\n# Set up screen, define & fill background\nscreen = pygame.display.set_mode(scr_size)\nbackground = pygame.Surface((1000,800))\nbackground = background.convert()\nbackground.fill(black)\n\nmoon = pygame.Surface((1000,50))\nmoon = moon.convert()\nmoon.fill(black)\n\ndef draw_moon():\n #pygame.draw.arc(moon,white,[0,0,1000,100], pi/4, 3*pi/4,4)\n #pygame.draw.ellipse(moon,white,[0,0,1000,400],200)\n #pygame.draw.circle(moon,white,(500,500), 500,4)\n pygame.draw.rect(moon, grey, (0, 0, 1000, 50))\n screen.blit(moon,(0,800))\n pygame.display.flip()\n\ndraw_moon()\n\n# Print lines of text to the graphics area (LHS)\ndef print_pg(text, text_y, text_size=24, colour=green):\n font = pygame.font.Font(None, text_size)\n for text_line in text.split(\"\\n\"):\n line = font.render(text_line, 1, colour)\n pygame.draw.rect(background, black, (0, text_y - 12, 250, text_size))\n textpos = line.get_rect(left = 20, centery = text_y)\n background.blit(line, textpos)\n text_y += text_size\n\n# Display the lander at the correct height and refresh the screen\ndef display_lander(lander, height):\n pygame.draw.rect(background, black, (251, 0, 750, scr_height))\n background.blit(lander, (500 - lander_scales[zoom][0] / 2,\n scr_height * (1.0 - height / screen_scales[zoom])))\n screen.blit(background, (0, 0))\n pygame.display.flip()\n\n# Display status informatio on the screen\ndef set_status(time, height, throttle, thrust, speed, fuel_supply, accn):\n # Calculate the time to zero height using the quadratic formula\n sq_term = speed**2 + 2.0 * height * accn\n if (sq_term > 0.0):\n land_time = (-speed + sqrt(sq_term)) / accn\n land_vel = speed + land_time * accn\n else:\n land_time = 9999.0\n land_vel = 9999.0\n\n turnover_ht = height + 0.5 * speed**2 / accn\n print_pg((\"Time = {:.1f} s\\nHeight = {:.0f} m\\nThrottle = {:.0f} %\\nThrust = {:.0f} N\\n\" +\n \"Descent speed = {:.2f} m/s\\nAccelleration = {:.2f} m/s/s\\nFuel = {:.1f} kg\\n\" +\n \"Land in {:.1f} s\\nLanding speed = {:.2f} m/s\\nT/over height = {:.0f} m\").\n format(time, height, throttle * 100.0, thrust, speed, accn, fuel_supply, land_time,\n land_vel, turnover_ht), 12)\n \n\n# Lunar module picture\nlander_init = pygame.image.load(\"Apollo_LunarModule.png\")\n\n# Different scales for the background and lander.\n# Background scale factors ~4 between levels, lander scale factors ~1.6 - only realistic on final scale\nlander_scales = ((56, 44), (89, 70), (142, 112), (227, 179), (364, 286), (582, 457))\nscreen_scales = (16000, 3800, 900, 216, 51.5, 12.25, 0)\n\n# Physics and lander constants\ngravity_0 = 1.62 # Moon's gravity at surface\nLM_mass = 15.2e3 # Launch mass of LM (15.2 tonnes)\nDPS_thrust = 45.04e3 # Descent propulsion system full power thrust (N)\nburn_rate = 7.5 # Fuel use at 100% thrust, kg/s\nfuel_supply = 900 # Fuel available for descent\nthrottle_min = 0.1\nthrottle_max = 0.6 # DPS engine can be throttled between 10% & 60% of full thrust\npericynthion = 15.0e3 # Lowest point in Lunar orbit - descend from here\nmax_impact_speed = 10 # Gives ~ 3g over 1.7m\ntime_step = 0.1 # Simulation time step (s)\n\n# Initial conditions\nheight = pericynthion\nspeed = 0.0\nthrottle = 0.0\nthrust = 0.0\ntime = 0.0\naccn = gravity_0\n\n\n# Loop until landed or crashed, zooming in as we go\nfor zoom in range(6):\n # Scale lander as we zoom in\n lander = pygame.transform.scale(lander_init, lander_scales[zoom])\n# print (lander_scales[zoom])\n# print (screen_scales[zoom])\n lander_rect = lander.get_rect()\n\n # When lander reaches ~ bottom 1/4 of the screen zoom in\n while (height > screen_scales[zoom + 1]):\n # Current time, height, thrust & speed\n set_status(time, height, throttle, thrust * DPS_thrust, speed, fuel_supply, accn)\n display_lander(lander, height)\n \n # throttle must be off, 10% - 60% or 100%\n for event in pygame.event.get():\n \n # determine if X was clicked, or Ctrl+W or Alt+F4 was used\n if event.type == pygame.QUIT:\n break\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n break\n \n # determine if a key was pressed\n if event.key == pygame.K_b:\n throttle = 1.0\n elif event.key == pygame.K_KP0:\n throttle = 0.0\n elif event.key == pygame.K_KP1:\n throttle = 0.1\n elif event.key == pygame.K_KP2:\n throttle = 0.2\n elif event.key == pygame.K_KP3:\n throttle = 0.3\n elif event.key == pygame.K_KP4:\n throttle = 0.4\n elif event.key == pygame.K_KP5:\n throttle = 0.5\n elif event.key == pygame.K_KP6:\n throttle = 0.6\n elif event.key == pygame.K_KP8:\n throttle = 1.0\n elif event.key == pygame.K_KP9:\n throttle = 1.0\n elif event.key == pygame.K_KP_PLUS:\n throttle += 0.01\n elif event.key == pygame.K_KP_MINUS:\n throttle -= 0.01\n \n if (throttle < 0.05):\n throttle = 0.0\n elif (throttle > 0.8):\n throttle = 1.0\n else:\n throttle = min(max(throttle, throttle_min), throttle_max)\n\n print (throttle)\n \n thrust_time = 0.1\n time += time_step\n #while (thrust_time < 0):\n # thrust_time = float(input(\"Thrust time (s)? \"))\n \n for ii in range(int(thrust_time // time_step)):\n # Estimate increase of thrust due to ground effect (reflected gasses) [+50% at 20m]\n ground_effect = (40.0 + height) / (20.0 + height)\n # Check to see if fuel will run out during the time step\n thrust = min(throttle, fuel_supply / (burn_rate * time_step)) * ground_effect\n # Convert thrust to acceleration and subtract from gravity\n accn = (gravity_0 - thrust * DPS_thrust / LM_mass)\n speed += accn * time_step\n height -= speed * time_step\n # Subtract fuel used, from supply and module mass\n fuel_used = thrust * burn_rate * time_step\n fuel_supply -= fuel_used\n LM_mass -= fuel_used\n\n if (height <= screen_scales[zoom + 1]):\n break\n\n # Wait for thrust_time seconds\n pygame.time.delay(int(thrust_time * 250))\n \n# Final status and result message\nset_status(time, height, throttle, thrust * DPS_thrust, speed, fuel_supply, accn)\n\nif abs(speed) < max_impact_speed:\n print_pg(\"Landed!\", 360, 84, white)\nelse:\n print_pg(\"Oops!!\", 360, 84, red)\n\ndisplay_lander(lander, height)\n\n# Wait for 5s before clearing display\npygame.time.delay(5000)\n","sub_path":"Moon_Lander_Master.py","file_name":"Moon_Lander_Master.py","file_ext":"py","file_size_in_byte":7435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"506134993","text":"#!/usr/bin/env python3\r\nimport cv2\r\nimport numpy as np\r\nfrom PIL import Image\r\n\r\n\r\ndef zeroing_edge(array,POI):\r\n array_h = array.shape[0]\r\n array_w = array.shape[1]\r\n mask = np.zeros((array_h, array_w))\r\n mask[POI:array_h-POI, POI:array_w-POI] = 1\r\n array[mask == 0] = 0\r\n return array\r\n\r\n\r\ndef remove_outliers(array):\r\n array_eval = array[array!=0]\r\n median = np.median(array_eval)\r\n std = np.std(array_eval)\r\n distance_from_median = abs(array-median)\r\n max_dev = 1\r\n not_outliers = distance_from_median < max_dev * std\r\n # remove outliers\r\n clean_array = np.copy(array)\r\n clean_array[not_outliers == 0] = 0\r\n return clean_array\r\n\r\ndef is_perpendicular(depth_frame):\r\n w = 50\r\n x1 = int(depth_frame.shape[1] / 5)\r\n y1 = int(depth_frame.shape[0]/ 5)\r\n x2 = 4 * x1\r\n y2 = 4 * y1\r\n\r\n d1 = np.mean(depth_frame[y1:y1 + w, x1:x1 + w])\r\n d2 = np.mean(depth_frame[y1:y1 + w, x2 - w:x2])\r\n d3 = np.mean(depth_frame[y2 - w:y2, x1:x1 + w])\r\n d4 = np.mean(depth_frame[y2 - w:y2, x1:x1 + w])\r\n\r\n max_val = np.max([d1, d2, d3, d4])\r\n min_val = np.min([d1, d2, d3, d4])\r\n diff = np.absolute(max_val - min_val)\r\n\r\n # print('d1: ', d1)\r\n # print('d2: ', d2)\r\n # print('d3: ', d3)\r\n # print('d4: ', d4)\r\n print('diff: ', diff)\r\n\r\n if diff <= 50:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndepth = Image.open(\"image2-depth.png\")\r\ndepth = np.array(depth)\r\nimage = cv2.imread(\"image2.png\")\r\n\r\nx = 200\r\ny = 102\r\nh = 192\r\nw = 138\r\n\r\n# mask\r\nmask = np.zeros((image.shape[0], image.shape[1]))\r\nmask[y:y+h, x:x+w] = 1\r\ninv_mask = 1-mask\r\n\r\n# calculate base\r\nbase = np.copy(depth)\r\nbase = zeroing_edge(base, 5)\r\ncheck = remove_outliers(base)\r\ncheck[mask == 1] = 0\r\nz0 = np.median(check[check!=0])\r\nprint(z0)\r\n\r\nif is_perpendicular(check):\r\n # calculate zn\r\n item = np.copy(depth)\r\n item[mask == 0] = 0\r\n dz = item[item!=0]-z0\r\n dz = abs(dz[dz<0]) #mm\r\n print(np.mean(dz))\r\n print(np.sum(dz))\r\n\r\ncrop = image\r\ncrop = zeroing_edge(crop,30)\r\n#crop[mask == 0] = 0\r\n\r\ncv2.imshow(\"black\", mask)\r\ncv2.imshow(\"white\", inv_mask)\r\ncv2.imshow(\"window\", crop)\r\ncv2.imshow(\"cropped\",image[y:y+h, x:x+w])\r\n\r\ncv2.waitKey(0)","sub_path":"test-array.py","file_name":"test-array.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"148245298","text":"from collections import defaultdict\n\ntotal = 0\ndic2 = defaultdict(int)\ndic3 = defaultdict(set)\n\ndef solve(inp):\n dic = defaultdict(set)\n for line in open(inp):\n line = line.strip().split(\")\")\n dic[line[0]].add(line[1])\n traverse(dic, dic[\"COM\"], 1)\n print(\"Part 1:\", total)\n traverse2(dic, dic[\"COM\"], 1, {\"COM\"})\n ft = True\n mini = (\"\", {})\n for k, v in dic3.items():\n if ft and \"SAN\" in v and \"YOU\" in v:\n mini = (k, v)\n elif \"SAN\" in v and \"YOU\" in v:\n if len(v) < len(mini[1]):\n mini = (k, v)\n san = bfs(mini[0], \"SAN\", dic)\n you = bfs(mini[0], \"YOU\", dic)\n print(\"Part 2:\", san + you - 2)\n\n\ndef bfs(start, end, dic):\n visited = defaultdict(int)\n queue = []\n visited[start] = 1\n queue.append((start, 0))\n while len(queue) != 0:\n current = queue[0]\n queue = queue[1:]\n if current[0] == end:\n return visited[end]\n for e in dic[current[0]]:\n if visited[e] == 0:\n queue += [(e, current[1] + 1)]\n visited[e] = current[1] + 1\n\ndef traverse(dic, curr, count):\n global total\n for o in curr:\n dic2[o] += count\n total += count\n if o not in dic.keys():\n pass\n else:\n traverse(dic, dic[o], count + 1)\n\ndef traverse2(dic, curr, count, sets):\n global total\n for o in curr:\n for s in sets:\n dic3[s].add(o)\n dic2[o] += count\n total += count\n if o not in dic.keys():\n pass\n else:\n temp = sets.copy()\n temp.add(o)\n traverse2(dic, dic[o], count + 1, temp)\n\nif __name__ == '__main__':\n inp = \"input.txt\"\n solve(inp)","sub_path":"2019/Day6/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"233111354","text":"import pygame\nimport sys\nfrom pygame import *\nfrom model.entity.alive.Fire import Fire\nfrom model.entity.alive.enemies.Bowser import Bowser\nfrom model.entity.alive.enemies.Monster import Monster\nfrom model.entity.alive.enemies.BlueFlower import BlueFlower\nfrom model.entity.alive.enemies.Slub import Slub\nfrom model.entity.alive.heroes.Hero import Hero\nfrom model.entity.dead.BonusBlock import BonusBlock\nfrom model.entity.dead.Flower import Flower\nfrom model.entity.dead.Mushroom import Mushroom\nfrom model.entity.dead.Bonus import Bonus\nfrom model.entity.dead.Exit import Exit\nfrom model.entity.dead.SimpleBlock import SimpleBlock\n\n\nclass LogicHero:\n\n @staticmethod\n def check_press_key(heroes, entities, monsters):\n for e in pygame.event.get():\n for hero in heroes.lst:\n if e.type == QUIT:\n sys.exit()\n if e.type == KEYDOWN:\n if e.key == hero.key_fire and hero.fire_ability:\n LogicHero.create_fire(hero, entities, monsters)\n if e.key == hero.key_left:\n hero.left = True\n hero.side = True\n if e.key == hero.key_right:\n hero.right = True\n hero.side = False\n if e.key == hero.key_up:\n hero.up = True\n if e.key == K_ESCAPE:\n return True\n if e.type == KEYUP:\n if e.key == hero.key_right:\n hero.right = False\n if e.key == hero.key_left:\n hero.left = False\n if e.key == hero.key_up:\n hero.up = False\n\n @staticmethod\n def update_heroes(heroes, blocks, entities, monsters, RESULT):\n if not heroes.not_exist():\n\n for block in blocks:\n if isinstance(block, BonusBlock):\n if block.change_ability:\n block.change_image()\n if isinstance(block, Bonus):\n if block.amount_images_appearing != 0:\n block.appear_bonus()\n else:\n block.exist_bonus()\n\n for hero in heroes.lst:\n hero.update()\n\n hero.rect.y += hero.yvel\n if hero.rect.y >= 640:\n hero.lifes -= 1\n heroes.killed(hero, entities)\n heroes.move_to_start()\n break\n\n status_hero = LogicHero.contact_with_blocks(heroes, hero, 0, hero.yvel, blocks, entities, monsters)\n if status_hero == True:\n RESULT[0] = True\n return\n\n hero.rect.x += hero.xvel\n status_hero = LogicHero.contact_with_blocks(heroes, hero, hero.xvel, 0, blocks, entities, monsters)\n\n if status_hero == True:\n RESULT[0] = True\n return\n # print(time.time())\n\n @staticmethod\n def contact_with_blocks(heroes, hero, xvel, yvel, blocks, entities, monsters):\n if not heroes.not_exist():\n\n for block in blocks:\n hero.update_bonus()\n if sprite.collide_rect(hero, block):\n if xvel > 0:\n hero.rect.right = block.rect.left\n if isinstance(block, Monster) and block.alive:\n hero.lifes -= 1\n heroes.killed(hero, entities)\n return False\n elif isinstance(block, Bonus):\n LogicHero.contact_bonus(hero, block, blocks, entities)\n LogicHero.contact_with_blocks(heroes, hero, xvel, yvel, blocks, entities, monsters)\n return\n elif xvel < 0:\n hero.rect.left = block.rect.right\n if isinstance(block, Monster) and block.alive:\n hero.lifes -= 1\n heroes.killed(hero, entities)\n return False\n elif isinstance(block, Bonus):\n LogicHero.contact_bonus(hero, block, blocks, entities)\n LogicHero.contact_with_blocks(heroes, hero, xvel, yvel, blocks, entities, monsters)\n return\n if yvel > 0:\n hero.rect.bottom = block.rect.top\n hero.on_ground = True\n hero.yvel = 0\n if (isinstance(block, Bowser) or isinstance(block, BlueFlower)) and block.alive:\n hero.lifes -= 1\n heroes.killed(hero, entities)\n return False\n elif isinstance(block, Slub) and block.alive:\n block.alive = False\n block.lifes -=hero.power\n block.killed(monsters, entities, blocks)\n LogicHero.contact_with_blocks(heroes, hero, xvel, yvel, blocks, entities, monsters)\n return\n elif isinstance(block, Bonus):\n LogicHero.contact_bonus(hero, block, blocks, entities)\n LogicHero.contact_with_blocks(heroes, hero, xvel, yvel, blocks, entities, monsters)\n return\n elif yvel < 0:\n hero.rect.top = block.rect.bottom\n hero.yvel = 0\n if isinstance(block, Monster) and block.alive:\n hero.lifes -= 1\n heroes.killed(hero, entities)\n return False\n elif isinstance(block, SimpleBlock):\n block.lifes -=hero.power\n block.killed(blocks, entities)\n LogicHero.contact_with_blocks(heroes, hero, xvel, yvel, blocks, entities, monsters)\n return\n elif isinstance(block, BonusBlock) and block.activity:\n LogicHero.contact_bonus_blocks(blocks,block, entities)\n LogicHero.contact_with_blocks(heroes, hero, xvel, yvel, blocks, entities, monsters)\n return\n LogicHero.contact_bonus(hero, block, blocks, entities)\n LogicHero.contact_with_blocks(heroes, hero, xvel, yvel, blocks, entities, monsters)\n return\n if isinstance(block, Exit):\n time.wait(200)\n return True\n return False\n\n @staticmethod\n def contact_bonus(hero, block, blocks, entities):\n if isinstance(block, Bonus):\n if isinstance(block, Mushroom):\n hero.time_mushroom_activity += block.time_activity\n hero.fire_ability = True\n if isinstance(block, Flower):\n hero.get_super_jump(block.flower_value)\n hero.time_flower_activity += block.time_activity\n hero.flower_ability = True\n entities.remove(block)\n blocks.remove(block)\n\n @staticmethod\n def contact_bonus_blocks(blocks, block, entities):\n if isinstance(block, BonusBlock):\n block.make_simple()\n bonus = None\n if block.type_bonus == 1:\n bonus = Flower(x=block.x + 3, y=block.y - 59, width=26, height=27,\n start_image='data/flower_1.png',\n images_appearing=['data/flower_27.png', 'data/flower_25.png', 'data/flower_23.png',\n 'data/flower_21.png', 'data/flower_19.png', 'data/flower_17.png',\n 'data/flower_15.png', 'data/flower_13.png', 'data/flower_11.png',\n 'data/flower_9.png', 'data/flower_7.png', 'data/flower_5.png',\n 'data/flower_3.png', 'data/flower_1.png'],\n images_existing=['data/flower_exist_day_1.png', 'data/flower_exist_day_2.png',\n 'data/flower_exist_day_3.png', 'data/flower_exist_day_4.png'],\n koef=30, time_activity=200000, flower_value=1.2, change_ability=True)\n if block.type_bonus == 2:\n bonus = Mushroom(x=block.x + 3, y=block.y - 59, width=26, height=27,\n start_image='data/mushroom_1.png',\n images_appearing=['data/mushroom_27.png', 'data/mushroom_25.png', 'data/mushroom_23.png',\n 'data/mushroom_21.png', 'data/mushroom_19.png', 'data/mushroom_17.png',\n 'data/mushroom_15.png', 'data/mushroom_13.png', 'data/mushroom_11.png',\n 'data/mushroom_9.png', 'data/mushroom_7.png', 'data/mushroom_5.png',\n 'data/mushroom_3.png', 'data/mushroom_1.png'],\n images_existing=['data/mushroom_exist_day_1.png',\n 'data/mushroom_exist_day_2.png'],\n koef=30, time_activity=200000, change_ability=True)\n if bonus:\n blocks.append(bonus)\n entities.add(bonus)\n\n @staticmethod\n def create_fire(hero, entities, monsters):\n if isinstance(hero, Hero):\n if hero.side:\n fireball = Fire(x=hero.rect.x-10, y=hero.rect.y+(hero.height//2), width=7, height=6,\n image='data/fireball.png', side=hero.side, power=1, xvel=0, yvel=0, gravity=1,\n move_speed=5, left=True, right=False, up=False, on_ground=False, max_way=96, alive=True)\n else:\n fireball = Fire(x=hero.rect.x+hero.width, y=hero.rect.y + (hero.height // 2), width=7, height=6,\n image='data/fireball.png', side=hero.side, power=1, xvel=0, yvel=0, gravity=1,\n move_speed=5, left=True, right=False, up=False, on_ground=False, max_way=96, alive=True)\n entities.add(fireball)\n monsters.add(fireball)\n","sub_path":"model/logic/LogicHero.py","file_name":"LogicHero.py","file_ext":"py","file_size_in_byte":10724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"338701886","text":"import numpy as np\nimport cv2\nimport imageio\nfrom types import SimpleNamespace\nfrom PIL import ImageFont, ImageDraw, Image \n\ndef add_sepia(img, k=1):\n matrix = [[0.272 - 0.349 * (1 - k), 0.534 - 0.534 *\n (1 - k), 0.131 + 0.869 * (1 - k)],\n [0.393 + 0.607 * (1 - k), 0.769 - 0.769 *\n (1 - k), 0.189 - 0.189 * (1 - k)],\n [0.349 - 0.349 * (1 - k), 0.686 + 0.314 *\n (1 - k), 0.168 - 0.168 * (1 - k)]]\n\n filt = cv2.transform(img, np.matrix(matrix))\n filt[np.where(filt > 255)] = 255\n return filt\n\n\ndef draw_text_scaled_to_rect(img, target_text, target_rect, font_name, thickness, color):\n \n # Use a truetype font \n font = ImageFont.truetype(font_name, 10)\n ascent, descent = font.getmetrics()\n (width, baseline), (offset_x, offset_y) = font.font.getsize(target_text) \n\n text_height = ascent + descent\n text_width = width\n target_rect_x = target_rect[0]\n target_rect_y = target_rect[1]\n target_rect_width = target_rect[2]\n target_rect_height = target_rect[3]\n scale_x = float(target_rect_width) / float(text_width)\n scale_y = float(target_rect_height) / float(text_height)\n scale = min(scale_x, scale_y)\n margin_x = 0 if scale == scale_x else int(target_rect_width *\n (scale_x - scale) / scale_x*0.5)\n margin_y = 0 if scale == scale_y else int(target_rect_height *\n (scale_y - scale) / scale_y*0.5)\n \n pil_im = Image.fromarray(img) \n draw = ImageDraw.Draw(pil_im)\n font = ImageFont.truetype(font_name, int(10*scale))\n # Draw the text \n draw.text((target_rect_x + margin_x, target_rect_y +\n target_rect_height - margin_y), target_text,fill='black', font=font) \n result_img = np.array(pil_im)\n return result_img\n\n\ndef make_gif(params_paths, params_text, params_transform, scale_factor=3, scenario=2, show_result=False):\n ns_paths = SimpleNamespace(**params_paths)\n ns_text = SimpleNamespace(**params_text)\n ns_transforms = SimpleNamespace(**params_transform)\n\n print(ns_paths.templates_folder)\n if scenario == 1:\n template_name = 'template4.jpg'\n position = [230, 430]\n desired_size = [580, 380]\n elif scenario == 2:\n template_name = 'template3_cropped.jpg'\n position = [425, 165]\n desired_size = [470, 420]\n text_box_params = {'ltp': [236, 33],\n 'rbp': [421, 754],\n 'color': [216, 226, 234]}\n\n position = [int(x/scale_factor) for x in position]\n desired_size = [int(x/scale_factor) for x in desired_size]\n text_box_params['ltp'] = [int(x/scale_factor)\n for x in text_box_params['ltp']]\n text_box_params['rbp'] = [int(x/scale_factor)\n for x in text_box_params['rbp']]\n if show_result:\n cv2.namedWindow(\"output\", cv2.WINDOW_NORMAL)\n cv2.namedWindow(\"template\", cv2.WINDOW_NORMAL)\n\n # Load an color image in grayscale\n template = cv2.imread('{}{}'.format(ns_paths.templates_folder,\n template_name), cv2.IMREAD_COLOR)\n\n template = cv2.resize(template, None, fx=1/scale_factor, fy=1/scale_factor)\n\n # Add text on template\n text_box_ltp = text_box_params['ltp']\n text_box_rbp = text_box_params['rbp']\n template[text_box_ltp[0]:text_box_rbp[0],\n text_box_ltp[1]:text_box_rbp[1], :] = text_box_params['color']\n\n target_rect_line1 = [text_box_ltp[1], text_box_ltp[0]-int((text_box_rbp[0]-text_box_ltp[0])/2),\n text_box_rbp[1]-text_box_ltp[1],\n int((text_box_rbp[0]-text_box_ltp[0])/2)]\n target_rect_line2 = [text_box_ltp[1], text_box_ltp[0],\n text_box_rbp[1]-text_box_ltp[1],\n int((text_box_rbp[0]-text_box_ltp[0])/2)]\n\n template = draw_text_scaled_to_rect(template, ns_text.headline_text, target_rect_line1,\n ns_paths.fonts_folder + ns_text.font, ns_text.thickness_line_1, ns_text.color)\n template = draw_text_scaled_to_rect(template, ns_text.sub_headline_text, target_rect_line2,\n ns_paths.fonts_folder + ns_text.font, ns_text.thickness_line_2, ns_text.color)\n if show_result:\n cv2.imshow('template', template)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n # print(template.shape)\n cap = cv2.VideoCapture('{}{}'.format(ns_paths.animations_folder,\n ns_paths.animation_name))\n\n # Check if camera opened successfully\n if (cap.isOpened() == False):\n print(\"Error opening video stream or file\")\n else:\n anim_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n anim_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n t_rows, t_cols, t_channels = template.shape\n # Read until video is completed\n with imageio.get_writer('{}{}'.format(ns_paths.gifs_folder,\n ns_paths.gif_name), mode='I', fps=25) as writer:\n angle_curr = ns_transforms.angle_start\n scale_curr = ns_transforms.scale_start\n angle_direction = 1\n scale_direction = 1\n while(cap.isOpened()):\n ret, frame = cap.read()\n if ret == True:\n angle_curr %= 360\n anim_frame = cv2.resize(\n frame, tuple(desired_size))\n if ns_transforms.sepia:\n anim_frame = add_sepia(\n anim_frame, ns_transforms.sepia_scale)\n anim_shape = anim_frame.shape\n anim_h, anim_w = anim_shape[1], anim_shape[0]\n template[position[0]:position[0]+anim_w,\n position[1]: position[1]+anim_h] = anim_frame\n\n output = cv2.cvtColor(template, cv2.COLOR_BGR2RGB)\n if ns_transforms.rotate or ns_transforms.scale or ns_transforms.skew:\n\n if angle_curr < ns_transforms.angle_stop:\n angle_curr += (angle_direction * ns_transforms.angle_step)\n elif ns_transforms.angle_reverse:\n angle_direction *= -1\n angle_curr += (angle_direction * ns_transforms.angle_step)\n if scale_curr < ns_transforms.scale_stop:\n scale_curr += (scale_direction * ns_transforms.scale_step)\n elif ns_transforms.scale_reverse:\n scale_direction *= -1\n scale_curr += (scale_direction * ns_transforms.scale_step)\n angle_t = angle_curr if ns_transforms.rotate else 0\n scale_t = scale_curr if ns_transforms.scale else 1\n R = cv2.getRotationMatrix2D(\n (int(t_cols/2), int(t_rows/2)\n ), angle_t,\n scale_t)\n if ns_transforms.skew:\n pts1 = np.float32([[0,0],[0,t_cols],[t_rows,0],[t_rows,t_cols]])\n pts2 = np.float32([[0,0],[0,t_cols-angle],[t_rows,0],[t_rows,t_cols]])\n # M = cv2.getPerspectiveTransform(pts1,pts2)\n # R = np.matrix(M) * np.matrix(R)\n output = cv2.warpAffine(output, R, (t_cols, t_rows))\n if show_result:\n cv2.imshow('output', output)\n writer.append_data(output)\n\n # Press Q to exit\n if cv2.waitKey(25) & 0xFF == ord('q'):\n break\n else:\n break\n cap.release()\n\n\ndef main():\n # Image transformation\n params_transform = {\n 'rotate': True,\n 'scale': True,\n 'skew': False,\n 'sepia': False,\n 'sepia_scale': 0.4,\n 'angle_start': 0,\n 'angle_stop': 35,\n 'angle_step': 0.2,\n 'angle_reverse': True,\n 'scale_start': 0.7,\n 'scale_stop': 1.2,\n 'scale_step': 0.005,\n 'scale_reverse': True\n }\n\n # Paths\n params_paths = {\n 'templates_folder': 'templates/',\n 'animations_folder': 'animations/',\n 'gifs_folder': 'results/gifs/',\n 'animation_name': 'vlad_2.mp4',\n 'gif_name': 'newspaper_vlad2.gif',\n 'fonts_folder': 'fonts/'\n }\n\n # Text lines params\n params_text = {\n 'thickness_line_1': 2,\n 'thickness_line_2': -1,\n 'color': (0, 0, 0),\n 'font': 'Mugglenews.ttf',\n 'headline_text': 'SENSATION!',\n 'sub_headline_text': 'EVIL PANDA MADE CRAZY THING AGAIN!'\n }\n\n # Select one of the scenarios\n scenario = 2\n scale_factor = 3\n show_result = True\n\n make_gif(params_paths, params_text, params_transform,\n scale_factor=scale_factor, scenario=scenario, show_result=show_result)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"add_video_in_template.py","file_name":"add_video_in_template.py","file_ext":"py","file_size_in_byte":8982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"532396326","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom io import BytesIO\nimport base64\nimport requests\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom datetime import datetime\nfrom pytz import timezone\nimport time\nfrom django.shortcuts import render\n\n\ndef historical(request):\n '''\n Below is the scrapper of historical data which contains brief market information for the past year\n '''\n\n result = ''\n if 'company' in request.GET:\n stock_abbr = request.GET.get('company')\n driver = webdriver.Chrome('c:\\\\Program Files\\\\chromedriver.exe')\n driver.get('https://finance.yahoo.com/quote/' + stock_abbr + '/history?p=' + stock_abbr)\n\n for i in range(0, 3):\n driver.execute_script(\"window.scrollBy(0,5000)\")\n time.sleep(2)\n\n html_page = driver.page_source\n driver.quit()\n\n historical_soup = BeautifulSoup(html_page, 'html.parser')\n historical_table = historical_soup.find(class_='W(100%) M(0)')\n dates_past_year = historical_table.find_all(class_='Py(10px) Ta(start) Pend(10px)')\n stock_price_volume = historical_table.find_all(class_='Py(10px) Pstart(10px)')\n\n dates_list = [date.get_text() for date in dates_past_year]\n stock_list = [stock.get_text() for stock in stock_price_volume]\n\n # Generates a table with dates as keys and stock price info as values stored in lists\n hist_data_table = {}\n num_list_insert = 0\n tmp_lst = []\n for i in range(len(stock_list)):\n if i % 6 == 0:\n tmp_val = stock_list[i].replace(',', '')\n tmp_dic = float(tmp_val)\n tmp_lst.append(tmp_dic)\n elif i % 6 == 1:\n tmp_val = stock_list[i].replace(',', '')\n tmp_dic = float(tmp_val)\n tmp_lst.append(tmp_dic)\n elif i % 6 == 2:\n tmp_val = stock_list[i].replace(',', '')\n tmp_dic = float(tmp_val)\n tmp_lst.append(tmp_dic)\n elif i % 6 == 3:\n tmp_val = stock_list[i].replace(',', '')\n tmp_dic = float(tmp_val)\n tmp_lst.append(tmp_dic)\n elif i % 6 == 4:\n tmp_val = stock_list[i].replace(',', '')\n tmp_dic = float(tmp_val)\n tmp_lst.append(tmp_dic)\n i += 1\n hist_data_table[dates_list[num_list_insert]] = tmp_lst\n num_list_insert += 1\n tmp_lst = []\n\n hist_data_frame = pd.DataFrame.from_dict(hist_data_table)\n hist_data_frame = hist_data_frame.astype(float)\n hist_data_frame.index = ['Open', 'High', 'Low', 'Close', 'Adj Close']\n hist_data_frame = hist_data_frame.T\n hist_data_frame = hist_data_frame.iloc[::-1]\n hist_data_frame.plot(grid='True')\n result = hist_data_frame.to_html()\n\n hist_data_frame['5-Day Moving Average'] = hist_data_frame.iloc[:, 4].rolling(window=5).mean()\n hist_data_frame['10-Day Moving Average'] = hist_data_frame.iloc[:, 4].rolling(window=10).mean()\n hist_data_frame['20-Day Moving Average'] = hist_data_frame.iloc[:, 4].rolling(window=20).mean()\n hist_data_frame['50-Day Moving Average'] = hist_data_frame.iloc[:, 4].rolling(window=50).mean()\n hist_data_frame['100-Day Moving Average'] = hist_data_frame.iloc[:, 4].rolling(window=100).mean()\n print(hist_data_frame)\n\n closingPrices = hist_data_frame.iloc[:, 4]\n\n gainPrices = []\n lossPrices = []\n j = 0\n while j < len(closingPrices):\n if j == 0:\n gainPrices.append(0)\n lossPrices.append(0)\n else:\n if (closingPrices[j] - closingPrices[j - 1]) > 0:\n gainPrices.append(closingPrices[j] - closingPrices[j - 1])\n lossPrices.append(0)\n else:\n gainPrices.append(0)\n lossPrices.append(closingPrices[j] - closingPrices[j - 1])\n j += 1\n\n gainsLosses = pd.DataFrame({\n 'Daily Gains': gainPrices,\n 'Daily Losses': lossPrices})\n gainsLosses.index = hist_data_frame.index\n gainsLosses = gainsLosses.astype(float)\n gainsLosses.plot(grid='True')\n\n buf = BytesIO()\n plt.savefig(buf, format='png', dpi=300)\n gainsLossesImage = base64.b64encode(buf.getvalue()).decode('utf-8').replace('\\n', '')\n buf.close()\n\n #plt.savefig('../../static/graphImages/gainsLosses.png')\n gainsLosses['gainsAvg'] = gainsLosses.iloc[:, 0].rolling(window=14).mean()\n gainsLosses['lossesAvg'] = gainsLosses.iloc[:, 1].rolling(window=14).mean().abs()\n gainsLosses['RS'] = gainsLosses['gainsAvg'] / gainsLosses['lossesAvg']\n gainsLosses['RSI'] = 100 - (100 / (1 + gainsLosses['RS']))\n RSI_graph = gainsLosses[['RSI']]\n\n ax = RSI_graph.plot(grid='True')\n ax.axhline(y=70, color='r', linestyle='--', lw=2)\n ax.axhline(y=30, color='g', linestyle='--', lw=2)\n\n buf = BytesIO()\n plt.savefig(buf, format='png', dpi=300)\n rsiGraphImage = base64.b64encode(buf.getvalue()).decode('utf-8').replace('\\n', '')\n buf.close()\n # plt.savefig('../../static/graphImages/RSI_graph.png')\n # print(gainsLosses)\n\n five_day = hist_data_frame[['Adj Close', '5-Day Moving Average']]\n ten_day = hist_data_frame[['Adj Close', '10-Day Moving Average']]\n twenty_day = hist_data_frame[['Adj Close', '20-Day Moving Average']]\n fifty_day = hist_data_frame[['Adj Close', '50-Day Moving Average']]\n hundred_day = hist_data_frame[['Adj Close', '100-Day Moving Average']]\n short_long = hist_data_frame[['Adj Close', '20-Day Moving Average', '50-Day Moving Average', '100-Day Moving Average']]\n moving_averages = hist_data_frame[\n ['5-Day Moving Average', '10-Day Moving Average', '20-Day Moving Average', '50-Day Moving Average',\n '100-Day Moving Average']]\n five_day.plot()\n\n buf = BytesIO()\n plt.savefig(buf, format='png', dpi=300)\n fiveDayImage = base64.b64encode(buf.getvalue()).decode('utf-8').replace('\\n', '')\n buf.close()\n\n #plt.savefig('../../static/graphImages/five_day.png')\n ten_day.plot()\n\n buf = BytesIO()\n plt.savefig(buf, format='png', dpi=300)\n tenDayImage = base64.b64encode(buf.getvalue()).decode('utf-8').replace('\\n', '')\n buf.close()\n\n #plt.savefig('../../static/graphImages/ten_day.png')\n twenty_day.plot()\n\n buf = BytesIO()\n plt.savefig(buf, format='png', dpi=300)\n twentyDayImage = base64.b64encode(buf.getvalue()).decode('utf-8').replace('\\n', '')\n buf.close()\n\n #plt.savefig('../../static/graphImages/twenty_day.png')\n fifty_day.plot()\n\n buf = BytesIO()\n plt.savefig(buf, format='png', dpi=300)\n fiftyDayImage = base64.b64encode(buf.getvalue()).decode('utf-8').replace('\\n', '')\n buf.close()\n\n #plt.savefig('../../static/graphImages/fifty_day.png')\n hundred_day.plot()\n\n buf = BytesIO()\n plt.savefig(buf, format='png', dpi=300)\n hundredDayImage = base64.b64encode(buf.getvalue()).decode('utf-8').replace('\\n', '')\n buf.close()\n\n #plt.savefig('../../static/graphImages/hundred_day.png')\n short_long.plot()\n\n buf = BytesIO()\n plt.savefig(buf, format='png', dpi=300)\n shortLongImage = base64.b64encode(buf.getvalue()).decode('utf-8').replace('\\n', '')\n buf.close()\n\n #plt.savefig('../../static/graphImages/short_long.png')\n moving_averages.plot()\n\n buf = BytesIO()\n plt.savefig(buf, format='png', dpi=300)\n movingAveragesImage = base64.b64encode(buf.getvalue()).decode('utf-8').replace('\\n', '')\n buf.close()\n\n #plt.savefig('../../static/graphImages/moving_averages.png')\n # plt.show()\n\n return render(request, 'main/historical.html', {\n 'result': result,\n 'gainsLossesImage': gainsLossesImage,\n 'rsiGraphImage': rsiGraphImage,\n 'fiveDayImage': fiveDayImage,\n 'tenDayImage': tenDayImage,\n 'twentyDayImage': twentyDayImage,\n 'fiftyDayImage': fiftyDayImage,\n 'hundredDayImage': hundredDayImage,\n 'shortLongImage': shortLongImage,\n 'movingAveragesImage': movingAveragesImage\n })\n else:\n return render(request, 'main/historical.html')\n","sub_path":"WebScraping/main/backend/historical_data.py","file_name":"historical_data.py","file_ext":"py","file_size_in_byte":8615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"215165191","text":"# Django native imports\nfrom django import forms\nfrom django.contrib import admin\nfrom django.forms import (ModelForm, ValidationError, CharField)\nfrom django.utils.translation import ugettext_lazy as _\n\n# Import from our apps\nfrom server.models import ServerHardware\n\n#we need to add form validation here\nclass ServerHardwareForm(ModelForm):\n class Meta:\n model = ServerHardware\n exclude = ['created_by']\n\n def __init__(self, *args, **kwargs):\n super(ServerHardwareForm, self).__init__(*args, **kwargs)\n for visible in self.visible_fields():\n if isinstance(visible.field, forms.BooleanField):\n visible.field.widget.attrs['class'] = 'icheckbox_square-green'\n elif visible.name == \"decomissioned_date\":\n visible.field.widget.attrs['class'] = 'datetime-input form-control'\n else:\n visible.field.widget.attrs['class'] = 'form-control'","sub_path":"server/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"75910989","text":"def main():\r\n import sys\r\n read = sys.stdin.read\r\n numv, *indata = map(int, read().split())\r\n offset = 0\r\n LCALIM = 18 # max height < 2**18\r\n\r\n rootnode = 0\r\n parentlist = [None] * numv\r\n distlist = [-1] * numv\r\n e = [[] for _ in range(numv)]\r\n dt = []\r\n for i in range(LCALIM):\r\n l = [-1] * numv\r\n dt.append(l)\r\n\r\n # make edge\r\n for i in range(numv):\r\n k = indata[offset]\r\n offset += 1\r\n for j in indata[offset:offset + k]:\r\n e[i].append(j)\r\n e[j].append(i)\r\n offset += k\r\n\r\n # calc depth and parent node\r\n from collections import deque\r\n q = deque([])\r\n q.append( (rootnode, -1, 0) )\r\n while len(q) != 0:\r\n node, parent, d = q.popleft()\r\n parentlist[node] = parent\r\n distlist[node] = d\r\n for nextnode in e[node]:\r\n if parentlist[nextnode] is not None: continue # parent\r\n q.append( (nextnode, node, d + 1) )\r\n\r\n # doubling calc\r\n for i in range(numv):\r\n dt[0][i] = parentlist[i]\r\n for i in range(1,LCALIM):\r\n for curnode in range(numv):\r\n p1 = dt[i-1][curnode]\r\n p2 = dt[i-1][p1] if p1 != -1 else -1\r\n dt[i][curnode] = p2\r\n\r\n def ancestor(node, n):\r\n i = 0\r\n cur = node\r\n while n != 0:\r\n x = 2 ** i\r\n if (n & x) != 0: # this bit is 1\r\n n ^= x # this bit is off\r\n cur = dt[i][cur]\r\n i += 1\r\n return cur\r\n\r\n def lca(nodeu, nodev):\r\n if nodeu == nodev: return nodeu\r\n tu = nodeu\r\n tv = nodev\r\n for k in range(LCALIM-1, -1, -1):\r\n mu = ancestor(tu, 2**k)\r\n mv = ancestor(tv, 2**k)\r\n if mu != mv:\r\n tu = mu\r\n tv = mv\r\n\r\n #assert ancestor(tu, 1) == ancestor(tv, 1)\r\n return ancestor(tu, 1)\r\n\r\n q = indata[offset]\r\n offset += 1\r\n for _ in range(q):\r\n u, v = indata[offset:offset + 2]\r\n offset += 2\r\n # u < v\r\n if distlist[u] > distlist[v]: u, v = v, u\r\n d = distlist[v] - distlist[u]\r\n v = ancestor(v, d)\r\n print(lca(u, v))\r\n\r\nmain()","sub_path":"atcoder/lib/graph/lcaOld.py","file_name":"lcaOld.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"115357838","text":"import csv\nfrom scripts.ids.identify_bcd import *\nfrom scripts.ids.identify_plasmid import *\nfrom scripts.getters.get_wrike_link import *\n\n\ndef put_one_task_into_table(task_dir):\n with open('/home/catr1ne55/dp_tasks_upd.csv', 'a') as csvfile:\n # fieldnames = ['idtask', 'BCD_idBCD', 'plasmid_idplasmid', 'what_is_the_rawdata', 'wrike', 'comment']\n # writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n # writer.writeheader()\n # writer.writerow({'name': ref, 'type': str(type)})\n raw_dir = os.path.join(task_dir, \"rawdata\")\n idtask = None\n files = os.listdir(raw_dir)\n BCD_idBCD = None\n if files != []:\n BCD_idBCD = identify_bcd(task_dir)\n plasmid_idplasmid = None\n plasmids = identify_plasmid(task_dir)\n what_is_the_rawdata = 'non-identified'\n wrike = get_wrike_link(task_dir)\n comment = 'no comment'\n if plasmids == []:\n plasmid_idplasmid = 0\n csv.writer(csvfile).writerow(\n [idtask, BCD_idBCD, plasmid_idplasmid, what_is_the_rawdata, wrike, comment, raw_dir])\n else:\n for p in plasmids:\n if p == 'pLL':\n plasmid_idplasmid = 1\n if p == 'pGEM':\n plasmid_idplasmid = 2\n if p == 'pET22':\n plasmid_idplasmid = 3\n if p == 'pBL':\n plasmid_idplasmid = 4\n if p == 'pEE-HC':\n plasmid_idplasmid = 5\n if p == 'pEE-KC':\n plasmid_idplasmid = 6\n if p == 'pEE-FC':\n plasmid_idplasmid = 7\n if p == 'pEE-LC':\n plasmid_idplasmid = 8\n if p == 'pCSK':\n plasmid_idplasmid = 9\n if p == 'pEE-22':\n plasmid_idplasmid = 10\n if p == 'pH5':\n plasmid_idplasmid = 11\n if p == 'pH6':\n plasmid_idplasmid = 12\n if p == 'pSCK':\n plasmid_idplasmid = 13\n # with open('/home/catr1ne55/dp-storage/plasmid.csv', 'a') as plasmid_file:\n # plasmid_idplasmid = None\n # csv.writer(plasmid_file).writerow([raw_dir, BCD_idBCD, identify_plasmid(directory)])\n # plasmid_file.close()\n # print(\"Input what type of data it is:\")\n # what_is_the_rawdata = 'non-identified' #input()\n # wrike = get_wrike_link(directory)\n # print(\"Input comment for this task:\")\n # comment = 'no comment' #input()\n csv.writer(csvfile).writerow([idtask, BCD_idBCD, plasmid_idplasmid, what_is_the_rawdata, wrike, comment, raw_dir])\n csvfile.close()\n\n\ndef put_all(storage):\n bcds = os.listdir(storage)\n for bcd in bcds:\n if bcd.startswith(\"BCD\"):\n project = os.path.join(storage, bcd)\n tasks = os.listdir(project)\n for t in tasks:\n task = os.path.join(project, t)\n put_one_task_into_table(task)\n print(\"Tasks was put into table\")\n\n\nput_all(\"/opt/dp-storage/\")","sub_path":"tables_filllers/put_task.py","file_name":"put_task.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"276365568","text":"# -*- coding: utf-8 -*-\n\nimport pytest\n\nfrom wemake_python_styleguide.visitors.wrong_name import (\n BAD_MODULE_METADATA_VARIABLES,\n WrongModuleMetadataViolation,\n WrongModuleMetadataVisitor,\n)\n\nmodule_test = \"\"\"\n{0} = 'Nikita'\n\"\"\"\n\nnested_test = \"\"\"\nclass ORM:\n {0} = None\n\"\"\"\n\n\n@pytest.mark.parametrize('bad_name', BAD_MODULE_METADATA_VARIABLES)\n@pytest.mark.parametrize('code', [\n module_test,\n])\ndef test_wrong_metadata(\n assert_errors, parse_ast_tree, bad_name, code,\n):\n \"\"\"Testing that metadata can not have blacklisted names.\"\"\"\n tree = parse_ast_tree(code.format(bad_name))\n\n visiter = WrongModuleMetadataVisitor()\n visiter.visit(tree)\n\n assert_errors(visiter, [WrongModuleMetadataViolation])\n\n\n@pytest.mark.parametrize('correct_name', ['correct_name', 'xy', '_value'])\n@pytest.mark.parametrize('code', [\n module_test,\n nested_test,\n])\ndef test_correct_metadata(\n assert_errors, parse_ast_tree, code, correct_name,\n):\n \"\"\"Testing that metadata can have normal names.\"\"\"\n tree = parse_ast_tree(code.format(correct_name))\n\n visiter = WrongModuleMetadataVisitor()\n visiter.visit(tree)\n\n assert_errors(visiter, [])\n","sub_path":"tests/test_visitors/test_wrong_name/test_module_metadata.py","file_name":"test_module_metadata.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"579252271","text":"import keras\nfrom keras import layers\nimport numpy as np\n\nlatent_dim = 32\nheight = 32\nwidth = 32\nchannels = 3\n\ngenerator_input = keras.Input(shape=(latent_dim,))\n\n# 将输入转换为大小为16*16的128个通道的特征图\nx = layers.Dense(128 * 16 * 16)(generator_input)\nx = layers.LeakyReLU()(x)\nx = layers.Reshape((16, 16, 128))(x)\n\nx = layers.Conv2D(256, 5, padding='same')(x)\nx = layers.LeakyReLU()(x)\n\n# 上采样为32*32\nx = layers.Conv2DTranspose(256, 4, strides=2, padding='same')(x)\nx = layers.LeakyReLU()(x)\n\nx = layers.Conv2D(256, 5, padding='same')(x)\nx = layers.LeakyReLU()(x)\nx = layers.Conv2D(256, 5, padding='same')(x)\nx = layers.LeakyReLU()(x)\n\n# 生成一个大小为32*32的单通道特征图(即CIFAR10图像的形状)\nx = layers.Conv2D(channels, 7, activation='tanh', padding='same')(x)\n# 将生成器模型实例化,它将形状为(latent_dim,)的输入映射到形状为(32,32,3)的图像\ngenerator = keras.models.Model(generator_input, x)\ngenerator.summary()\n\n# GAN判别器网络\ndiscriminator_input = layers.Input(shape=(height, width, channels))\nx = layers.Conv2D(128, 3)(discriminator_input)\nx = layers.LeakyReLU()(x)\nx = layers.Conv2D(128, 4, strides=2)(x)\nx = layers.LeakyReLU()(x)\nx = layers.Conv2D(128, 4, strides=2)(x)\nx = layers.LeakyReLU()(x)\nx = layers.Conv2D(128, 4, strides=2)(x)\nx = layers.LeakyReLU()(x)\nx = layers.Flatten()(x)\n\nx = layers.Dropout(0.4)(x)\n\nx = layers.Dense(1, activation='sigmoid')(x) # 分类层\n\n# 将判别器模型实例化,它将形状为(32,32,3)的输入转换为一个二进制分类决策(真/假)\ndiscriminator = keras.models.Model(discriminator_input, x)\ndiscriminator.summary()\n\ndiscriminator_optimizer = keras.optimizers.RMSprop(\n lr=0.0008,\n clipvalue=1.0, # 在优化器中使用梯度裁剪(限制梯度值的范围)\n decay=1e-8, # 为了稳定训练过程,使用学习率衰减\n)\n\ndiscriminator.compile(optimizer=discriminator_optimizer,\n loss='binary_crossentropy')\n\ndiscriminator.trainable = True # 将判别器权重设置为不可训练(仅应用于gan模型)\n\ngan_input = keras.Input(shape=(latent_dim,))\ngan_output = discriminator(generator(gan_input))\ngan = keras.models.Model(gan_input, gan_output)\ngan_optimizer = keras.optimizers.RMSprop(lr=0.0004, clipvalue=1.0, decay=1e-8)\ngan.compile(optimizer=gan_optimizer, loss='binary_crossentropy')\n\nimport os\nfrom keras.preprocessing import image\n\n(x_train, y_train), (_, _) = keras.datasets.cifar10.load_data() # cifar数据集\nx_train = x_train[y_train.flatten() == 6] # 选择青蛙的图像\nx_train = x_train.reshape((x_train.shape[0],) + (height, width, channels)).astype('float32') / 255.\n\niterations = 1000\nbatch_size = 2\nsave_dir = 'frog_dir'\n\nstart = 0\n\nfor step in range(iterations):\n random_latent_vectors = np.random.normal(size=(batch_size, latent_dim))\n generated_images = generator.predict(random_latent_vectors) # 点-->虚假图像\n\n stop = start + batch_size\n\n # 混淆真实图像和虚假图像\n real_images = x_train[start:stop]\n combined_images = np.concatenate([generated_images,\n real_images])\n labels = np.concatenate([np.ones((batch_size, 1)),\n np.zeros((batch_size, 1))])\n labels += 0.05 * np.random.random(labels.shape) # 向标签中添加噪声\n\n # 训练判别器\n d_loss = discriminator.train_on_batch(combined_images, labels)\n\n # 在潜在空间中采样随机点\n random_latent_vectors = np.random.normal(size=(batch_size, latent_dim))\n # 合并标签,全都是“真实图像”(这是在撒谎)\n misleading_targets = np.zeros((batch_size, 1))\n\n # 通过gan模型来训练生成器(此时冻结判别器模型)\n a_loss = gan.train_on_batch(random_latent_vectors, misleading_targets)\n\n start += batch_size\n if start > len(x_train) - batch_size:\n start = 0\n if step % 2 == 0:\n gan.save_weights('gan.h5')\n\n print('discriminator loss:', d_loss)\n print('adversarial loss:', a_loss)\n\n img = image.array_to_img(generated_images[0] * 255., scale=False)\n img.save(os.path.join(save_dir, 'generated_frog' + str(step) + '.png'))\n\n img = image.array_to_img(real_images[0] * 255., scale=False)\n img.save(os.path.join(save_dir, 'real_frog' + str(step) + '.png'))","sub_path":"VieML/ML/gan.py","file_name":"gan.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"305724090","text":"import matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.rc('axes', edgecolor=[0.8, 0.8, 0.8])\nmatplotlib.rcParams['text.color'] = [0.8, 0.8, 0.8]\nmatplotlib.rcParams['axes.labelcolor'] = [0.8, 0.8, 0.8]\nmatplotlib.rcParams['axes.labelcolor'] = [0.8, 0.8, 0.8]\nmatplotlib.rcParams['xtick.color'] = [0.8, 0.8, 0.8]\nmatplotlib.rcParams['ytick.color'] = [0.8, 0.8, 0.8]\nparams = {'legend.fontsize': 16,\n 'legend.handlelength': 2}\nplt.rcParams.update(params)\n\nimport numpy as np\nimport math\nimport os\nfrom multiprocessing.dummy import Pool as ThreadPool\nfrom termcolor import colored\n\nfrom Utils.maths import line_smoother\nfrom Utils.Messaging import send_email_attachments\nfrom Utils.loadsave_funcs import load_yaml\nfrom Utils.decorators import clock\nfrom Plotting.Plotting_utils import *\nfrom Processing.Processing_utils import parallelizer\n\nfrom Config import send_messages, plotting_options\n\n\nclass Plotter():\n \"\"\" Plots all the variables as a result of tracking+processing a single trial \"\"\"\n def __init__(self, session):\n print(colored('\\n Plotting single trials summaries', 'green'))\n\n plt.ion()\n if not session is None:\n # Get a number of useful flags\n plotting_settings = load_yaml(plotting_options['cfg'])\n self.exploration_maxmin = plotting_settings['exploration max length']\n self.plot_pose = plotting_settings['plot pose']\n self.save_figs = plotting_settings['save figs']\n\n self.session = session\n\n # Define stuff used for extracting data\n self.sel_trial = 0\n self.prestim_frames = 5\n self.poststim_frames = 180\n\n # Define stuff used for plotting\n self.colors = {\n 'Rear': [0.4, 0.8, 0.4],\n 'Lear': [0.4, 0.8, 0.4],\n 'snout': [0.5, 0.6, 0.5],\n 'neck': [0.6, 0.4, 0.4],\n 'body': [0.8, 0.3, 0.3],\n 'tail': [0.4, 0.4, 0.8],\n }\n\n # Get trials data and start main() which takes care of plotting each trial\n self.get_trials_metadata()\n self.main()\n\n########################################################################################################################\n\n def get_trials_metadata(self):\n \"\"\" get metadata about all trials in the session being processed \"\"\"\n tracking = self.session.Tracking\n trials_names = tracking.keys()\n self.trials = {}\n for trial in trials_names:\n self.trials[trial] = tracking[trial]\n\n if 'exploration' not in trial.lower() and 'session' not in trial.lower():\n # Get processing metadta\n try:\n self.processing_metadata = tracking[trial].metadata['Processing info']\n if self.processing_metadata['velocity unit'] == 'all':\n self.processing_metadata['velocity unit'] = 'blpersec'\n except:\n pass\n\n def setup_figure(self):\n \"\"\" Create figure and set up axes for plotting \"\"\"\n self.f = plt.figure(figsize=(35,15), facecolor=[0.1, 0.1, 0.1])\n grid = (6, 9)\n\n # 2d tracking data for the trial plotted\n self.twod_track = plt.subplot2grid(grid, (0, 1), rowspan=3, colspan=2)\n self.twod_track.set(title='Tracking relative to shelter',\n facecolor=[0.2, 0.2, 0.2], xlim=[300, -300], ylim=[-500, 100], xlabel='px', ylabel='px')\n\n # STD tracking 1D\n self.std = plt.subplot2grid(grid, (0, 3), rowspan=1, colspan=2)\n self.std.set(title='STD - X-Y displacement', facecolor=[0.2, 0.2, 0.2], xlabel='frames', ylabel='px')\n\n # DLC tracking 1D\n self.dlc = plt.subplot2grid(grid, (2, 3), rowspan=1, colspan=2, sharex=self.std)\n self.dlc.set(title='STD - X-Y displacement', facecolor=[0.2, 0.2, 0.2], xlabel='frames', ylabel='px')\n\n # STD tracking velocity\n self.std_vel_plot = plt.subplot2grid(grid, (1, 3), rowspan=1, colspan=2, sharex=self.std)\n self.std_vel_plot.set(title='DLC - Velocity [{}]'.format(self.processing_metadata['velocity unit']),\n facecolor=[0.2, 0.2, 0.2], xlabel='frames',\n ylabel=' [{}]'.format(self.processing_metadata['velocity unit']))\n\n # DLC tracking velocity\n self.dlc_vel_plot = plt.subplot2grid(grid, (3, 3), rowspan=1, colspan=2, sharex=self.std)\n self.dlc_vel_plot.set(title='DLC - Velocity [{}]'.format(self.processing_metadata['velocity unit']),\n facecolor=[0.2, 0.2, 0.2], xlabel='frames',\n ylabel=' [{}]'.format(self.processing_metadata['velocity unit']))\n\n # show tracking 2d overimposed on background\n self.tracking_on_maze = plt.subplot2grid(grid, (0, 0), rowspan=1, colspan=1)\n self.tracking_on_maze.set(title='Tracking on maze', facecolor=[0.2, 0.2, 0.2], xlim=[0, 600], ylim=[600, 0],\n xlabel='frames', ylabel='px')\n\n # Plot the angle of the body and head during the trial\n self.absolute_angle_plot = plt.subplot2grid(grid, (0, 5), rowspan=2, colspan=2, projection='polar')\n self.absolute_angle_plot.set(title='Orientation (body green)', theta_zero_location='N', facecolor=[0.2, 0.2, 0.2],\n theta_direction=-1, xlabel='frames', ylabel='deg')\n\n # DLC pose reconstruction\n self.pose = plt.subplot2grid(grid, (4, 0), rowspan=2, colspan=9)\n self.pose.set(title='Pose reconstruction', facecolor=[0.2, 0.2, 0.2], ylim=[635, -150], xlabel='frames')\n\n # Plot the pose in 2d space at stim onset\n self.pose_space = plt.subplot2grid(grid, (2, 0), rowspan=1, colspan=1)\n self.pose_space.set(title='Pose at stim', facecolor=[0.2, 0.2, 0.2], xlim=[150, 450], ylim=[650, 350])\n\n # Plot a heatmap of either exploration or whole session tracking data\n self.exploration_plot = plt.subplot2grid(grid, (1, 0), rowspan=1, colspan=1)\n self.exploration_plot.set(title='Eploration', facecolor=[0.2, 0.2, 0.2], xlim=[0, 700], ylim=[600, 0])\n\n # Plot a bunch of variables to try and get at reaction time\n self.react_time_plot = plt.subplot2grid(grid, (3, 0), rowspan=1, colspan=3)\n self.react_time_plot.set(title='Reaction Time', facecolor=[0.2, 0.2, 0.2], xlabel='frames', ylabel='-')\n\n # plot the head angle - body angle\n self.head_rel_angle = plt.subplot2grid(grid, (2, 5), rowspan=2, colspan=2, projection='polar')\n self.head_rel_angle.set(title='Head Relative Angle', theta_zero_location='N', facecolor=[0.2, 0.2, 0.2],\n theta_direction=-1)\n # Plot head and body angular velocity\n self.ang_vel_plot = plt.subplot2grid(grid, (0, 7), rowspan=1, colspan=2)\n self.ang_vel_plot.set(title='Angular velocity', facecolor=[0.2, 0.2, 0.2], ylim=[-50, 50],\n xlabel='frames', ylabel='deg/sec')\n\n self.f.tight_layout()\n\n########################################################################################################################\n\n def get_tr_data_to_plot(self, trial):\n \"\"\" get tracking data for the trial being plotted \"\"\"\n self.stim = int(len(trial.std_tracking) / 2)\n self.wnd = 600\n\n stim = self.stim\n wnd = self.wnd\n\n # STD\n self.std_x_adj = trial.std_tracking['adjusted x'].values[self.stim - wnd:stim + wnd]\n self.std_y_adj = trial.std_tracking['adjusted y'].values[stim - wnd:stim + wnd]\n self.std_x = trial.std_tracking['x'].values[stim - wnd:stim + wnd]\n self.std_y = trial.std_tracking['y'].values[stim - wnd:stim + wnd]\n\n if self.processing_metadata['velocity unit'] in trial.std_tracking.keys():\n self.std_vel = trial.std_tracking['Velocity_{}'.format(self.\n processing_metadata['velocity unit'])].values[stim - wnd:stim + wnd]\n else:\n self.std_vel = trial.std_tracking['Velocity'].values[stim - wnd:stim + wnd]\n\n # DLC\n for bp in trial.dlc_tracking['Posture'].keys():\n if bp == 'body':\n self.dlc_x_adj = trial.dlc_tracking['Posture'][bp]['adjusted x'].values[stim - wnd:stim + wnd]\n self.dlc_y_adj = trial.dlc_tracking['Posture'][bp]['adjusted y'].values[stim - wnd:stim + wnd]\n self.dlc_x = trial.dlc_tracking['Posture'][bp]['x'].values[stim - wnd:stim + wnd]\n self.dlc_y = trial.dlc_tracking['Posture'][bp]['y'].values[stim - wnd:stim + wnd]\n\n if self.processing_metadata['velocity unit'] in trial.dlc_tracking['Posture'][bp].keys():\n self.dlc_vel = trial.dlc_tracking['Posture'][bp]['Velocity_{}'.format(\n self.processing_metadata['velocity unit'])].values[stim - wnd:stim + wnd]\n else:\n self.dlc_vel = trial.dlc_tracking['Posture'][bp]['Velocity'].values[stim - wnd:stim + wnd]\n\n self.dlc_ori = trial.dlc_tracking['Posture'][bp]['Orientation'].values[stim - wnd:stim + wnd]\n self.dlc_head_ori = trial.dlc_tracking['Posture'][bp]['Head angle'].values[stim - wnd:stim + wnd]\n self.dlc_bodylength = trial.dlc_tracking['Posture'][bp]['Body length'].values[stim - wnd:stim + wnd]\n\n self.dlc_head_ang_vel = trial.dlc_tracking['Posture'][bp]['Head ang vel'].values[stim - wnd:stim + wnd]\n self.dlc_body_ang_vel = trial.dlc_tracking['Posture'][bp]['Body ang vel'].values[stim - wnd:stim + wnd]\n self.dlc_head_ang_acc = trial.dlc_tracking['Posture'][bp]['Head ang vel'].values[stim - wnd:stim + wnd]\n break\n\n avgbdlength = trial.metadata['avg body length']\n self.dlc_bodylength = np.array([x/avgbdlength for x in self.dlc_bodylength])\n\n # Exploration\n fps = self.session.Metadata.videodata[0]['Frame rate'][0]\n exploration_maxfr = int(self.exploration_maxmin*60*fps)\n\n try:\n expl_len = int(len(self.session.Tracking['Exploration']))\n if expl_len>exploration_maxfr:\n self.exp_heatmap = True\n self.exploration = self.session.Tracking['Exploration'][expl_len-exploration_maxfr:]\n else:\n self.exp_heatmap = False\n self.exploration = self.session.Tracking['Exploration']\n except:\n self.exp_heatmap = True\n self.exploration = self.session.Tracking['Whole Session']\n\n def get_dlc_pose(self, trial, stim):\n \"\"\" get tracking data to reconstruct \"\"\"\n frames = np.linspace(stim-self.prestim_frames, stim+self.poststim_frames,\n self.prestim_frames+self.poststim_frames+1)\n\n poses = {}\n for frame in frames:\n pose = {}\n for bp in trial.dlc_tracking['Posture'].keys():\n pose[bp] = trial.dlc_tracking['Posture'][bp].loc[int(frame)]\n if bp == 'body':\n pose['zero'] = trial.dlc_tracking['Posture'][bp].loc[int(frame)]['x']\n poses[str(frame)] = pose\n return poses\n\n def get_outcome(self, x, y, window, ax):\n pre = x[0:window-1]\n post = x[window:-1]\n\n self.post_y = y[window:-1]\n self.post_vel = self.dlc_vel[window:-1]\n self.post_ori = self.dlc_ori[window:-1]\n self.post_bl = self.dlc_bodylength[window:-1]\n\n self.mean_pre_xvel, self.sdev_pre_xacc = np.mean(np.diff(x[0:window - 31])), np.std(np.diff(x[0:window - 31]))\n self.mean_pre_yvel, self.sdev_pre_yacc = np.mean(np.diff(y[0:window - 31])), np.std(np.diff(y[0:window - 31]))\n self.mean_pre_vel, self.sdev_pre_vel = np.mean(self.dlc_vel[0:window-31]), np.std(self.dlc_vel[0:window-31])\n self.mean_pre_bl, self.sdev_pre_bl = np.mean(self.dlc_bodylength[0:window-31]),np.std(self.dlc_bodylength[0:window-31])\n\n # Get frame at which the mouse is the most distant from midline, ang get the X position at that frame\n pre_peak = pre[np.where(np.abs(pre)==np.max(np.abs(pre)))]\n post_peak = post[np.where(np.abs(post)==np.max(np.abs(post)))]\n\n # Get position and orientation at time of stimulus\n x_stim, y_stim, ori_stim, vel_stim, bodylenfth_stim = self.dlc_x_adj[window], self.dlc_y_adj[window],\\\n self.dlc_ori[window],\\\n self.dlc_vel[window], self.dlc_bodylength[window]\n\n # Get REACTION TIME\n # Get point of max Y distance from shelet\n self.y_diff = np.diff(self.post_y)\n self.x_diff = np.diff(post)\n\n try:\n self.at_shelter = np.where(self.post_y>0)[0][0]\n except:\n self.at_shelter = len(self.post_y)\n\n # Adjust ax limits and mark time in which mouse reached shelter\n self.head_rel_angle.set(ylim=[0, self.at_shelter])\n self.absolute_angle_plot.set(ylim=[0, self.at_shelter])\n self.std.axvline(self.at_shelter+window, color=[0.8, 0.2, 0.8], linewidth=1, label=None)\n self.std_vel_plot.axvline(self.at_shelter+window, color=[0.8, 0.2, 0.8], linewidth=1, label=None)\n self.dlc.axvline(self.at_shelter+window, color=[0.8, 0.2, 0.8], linewidth=1, label=None)\n self.dlc_vel_plot.axvline(self.at_shelter+window, color=[0.8, 0.2, 0.8], linewidth=1, label=None)\n self.react_time_plot.axvline(self.at_shelter, color=[0.8, 0.2, 0.8], linewidth=1, label=None)\n self.twod_track.plot(self.dlc_x_adj[self.wnd+self.at_shelter], self.dlc_y_adj[self.wnd+self.at_shelter],\n 'o', color=[0.8, 0.2, 0.8], markersize=20, alpha=0.75, label='At shelter')\n self.ang_vel_plot.set(xlim=[window-100, window+self.at_shelter+5])\n self.ang_vel_plot.axvline(self.at_shelter+window, color=[0.8, 0.2, 0.8], linewidth=1, label=None)\n\n\n # Show the results\n text_x, text_y, text_bg_col = -280, 75, [0.1, 0.1, 0.1]\n\n if pre_peak<0:\n ax.text(-text_x, text_y, 'Origin RIGHT', bbox={'facecolor':text_bg_col, 'alpha':0.5, 'pad':10})\n else:\n ax.text(-text_x, text_y, 'Origin LEFT', bbox={'facecolor':text_bg_col, 'alpha':0.5, 'pad':10})\n\n if post_peak<0:\n ax.text(-text_x, text_y-50, 'Escape RIGHT', bbox={'facecolor':text_bg_col, 'alpha':0.5, 'pad':10})\n else:\n ax.text(-text_x, text_y-50, 'Escape LEFT', bbox={'facecolor':text_bg_col, 'alpha':0.5, 'pad':10})\n\n ax.text(-text_x, text_y - 100, 'Stim X: {}'.format(round(x_stim, 2)),\n bbox={'facecolor': text_bg_col, 'alpha': 0.5, 'pad': 10})\n\n ax.text(-text_x, text_y - 150, 'Stim Y: {}'.format(round(y_stim, 2)),\n bbox={'facecolor': text_bg_col, 'alpha': 0.5, 'pad': 10})\n\n ax.text(-text_x, text_y - 200, 'Stim Ori: {}'.format(round(360+ori_stim, 2)),\n bbox={'facecolor': text_bg_col, 'alpha': 0.5, 'pad': 10})\n\n ax.text(-text_x, text_y - 250, 'Stim Vel: {}'.format(round(vel_stim, 2)),\n bbox={'facecolor': text_bg_col, 'alpha': 0.5, 'pad': 10})\n\n ax.text(-text_x, text_y - 300, 'Stim BL: {}'.format(round(bodylenfth_stim, 2)),\n bbox={'facecolor': text_bg_col, 'alpha': 0.5, 'pad': 10})\n\n ax.text(-text_x, text_y - 350, 'At shelt: {}'.format(round(self.at_shelter, 2)),\n bbox={'facecolor': text_bg_col, 'alpha': 0.5, 'pad': 10})\n\n########################################################################################################################\n\n def plot_skeleton_time(self, poses, ax):\n x = np.linspace(1, 101 * (len(poses.keys()) / 2), len(poses.keys()) + 1)\n for idx, (fr, pose) in enumerate(sorted(poses.items())):\n fr = x[idx]\n # Mark the frame\n if idx == self.prestim_frames-1:\n ax.axvline(fr, color='r', linewidth=3)\n # Plot pose over maze edges at react time\n self.plot_skeleton_single_pose(pose, self.pose_space)\n self.plot_skeleton_lines(self.pose_space, pose, self.colors, False)\n\n maze_outline = self.session.Metadata.videodata[0]['Maze Edges']\n self.pose_space.imshow(maze_outline, cmap='gray')\n\n elif not (idx+self.prestim_frames+1)%10:\n ax.axvline(fr, color=[0.4, 0.4, 0.4], linewidth=2)\n ax.text(fr-20, 600, '{}'.format(idx-self.prestim_frames+1),\n bbox={'facecolor': [0.1, 0.1, 0.1], 'alpha': 0.5, 'pad': 10})\n\n elif (idx-self.prestim_frames) == self.at_shelter:\n ax.axvline(fr, color=[0.8, 0.2, 0.8], linewidth=3, label=None)\n\n else:\n ax.axvline(fr, color=[0.6, 0.6, 0.6], linewidth=0.25)\n\n # Plot the skeleton\n self.plot_skeleton_lines(ax, pose, self.colors, fr)\n\n # Plot the location of the bodyparts\n self.plot_skeleton_single_pose(pose, ax, shift=fr)\n return x\n\n def plot_skeleton_space(self, poses, ax):\n for idx, (fr, pose) in enumerate(sorted(poses.items())):\n if idx num_trials:\n break\n\n plt.show()\n","sub_path":"Plotting/Single_trial_summary.py","file_name":"Single_trial_summary.py","file_ext":"py","file_size_in_byte":30213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"523304253","text":"# Write a program to calculate the credit card balance after one year if a person only pays the minimum monthly payment required by the credit card company each month.\n\nbalance = 484\nannualInterestRate = 0.2\nmonthlyPaymentRate = 0.04\nmonthlyInterestRate = annualInterestRate / 12\n\nmonth = 12\nwhile month > 0:\n minimum_payment = round(balance * monthlyPaymentRate, 2)\n unpaid_balance = round(balance - minimum_payment, 2)\n interest = round(unpaid_balance * monthlyInterestRate, 2)\n balance = round(unpaid_balance + interest, 2)\n print(\"remaining balance is\", balance)\n month -= 1\n\nprint(\"Remaining balance:\", balance)\n\n# Write a program to calculate the fixed monthly payment in order to pay off the balance.\nbalance = 999999\nmonth = 12\nannualInterestRate = 0.18\nmonthlyInterestRate = annualInterestRate / 12\n\nmonthlyPayment = 0\nrunning_flag = True\n\nwhile running_flag:\n new_balance = balance\n month = 12\n while month > 0:\n unpaid_balance = round(new_balance - monthlyPayment, 2)\n interest = round(unpaid_balance * monthlyInterestRate, 2)\n new_balance = round(unpaid_balance + interest, 2)\n month -= 1\n if new_balance >= 0:\n monthlyPayment += 10\n else:\n print(\"Lowest Payment:\", monthlyPayment)\n running_flag = False\n \n# Write a program to calculate the fixed monthly payment in order to pay off the balance.\nbalance = 999999\nmonth = 12\nannualInterestRate = 0.18\nmonthlyInterestRate = annualInterestRate / 12\n\nrunning_flag = True\n\nlow = balance / 12\nhigh = balance * (1 + monthlyInterestRate)**12 / 12\nmonthlyPayment = (high + low) / 2\n\nwhile running_flag:\n month = 12\n new_balance = balance\n while month > 0:\n unpaid_balance = round(new_balance - monthlyPayment, 2)\n interest = round(unpaid_balance * monthlyInterestRate, 2)\n new_balance = round(unpaid_balance + interest, 2)\n month -= 1\n if new_balance > 5:\n low = monthlyPayment\n elif new_balance < 0:\n high = monthlyPayment\n else:\n monthlyPayment = monthlyPayment + 0.1\n print(\"monthly payment is\", round(monthlyPayment, 2))\n running_flag = False\n monthlyPayment = (low + high) / 2\n\n \n","sub_path":"ProgrammingCourses/MIT6001X/week2/week2_problem_set.py","file_name":"week2_problem_set.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"481161821","text":"from functools import wraps\n\nfrom django.core.cache import cache\n\nfrom yummy import conf\n\n\ndef recached_method_to_mem(func):\n @wraps(func)\n def wrapped_func(self, recache=False):\n attr_name = '_%s' % func.__name__\n if not hasattr(self, attr_name) or recache:\n res = func(self, recache=recache)\n setattr(self, attr_name, res)\n return getattr(self, attr_name)\n\n return wrapped_func\n\n\ndef cache_method_with_obj(func):\n @wraps(func)\n def new_func(self, obj):\n key = self.__class__.cache_manager_key(func.__name__, obj)\n res = cache.get(key)\n if res is None:\n res = list(func(self, obj))\n cache.set(key, res, timeout=conf.CACHE_TIMEOUT)\n return res\n\n return new_func\n\n\ndef add_cached_methods(cls):\n\n for method in cls.CACHE_METHODS:\n cached_method = '%s_cached' % method\n setattr(cls, cached_method, cache_method_with_obj(getattr(cls, method)))\n return cls\n","sub_path":"yummy/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"580093806","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.3-fat/egg/schevo/fieldspec.py\n# Compiled at: 2007-03-21 14:34:41\n\"\"\"Fieldspec-related code.\n\nFor copyright, license, and warranty, see bottom of file.\n\"\"\"\nimport sys\nfrom schevo.lib import optimize\nfrom schevo.constant import UNASSIGNED\nfrom schevo.label import label_from_name\nfrom schevo.lib.odict import odict\n\nclass FieldMap(odict):\n \"\"\"Field Mapping based on Ordered Dictionary.\"\"\"\n __module__ = __name__\n __slots__ = [\n '_keys']\n\n def update_values(self, other):\n \"\"\"Update field values based on field values in other FieldMap.\"\"\"\n for (name, field) in other.iteritems():\n if name in self:\n f = self[name]\n if f.fget is None and not f.readonly:\n f.set(field.get())\n\n return\n\n def value_map(self):\n d = odict()\n for (name, field) in self.items():\n value = field._value\n d[name] = value\n\n return d\n\n\nclass FieldSpecMap(odict):\n \"\"\"Field spec mapping based on Ordered Dictionary.\"\"\"\n __module__ = __name__\n __slots__ = [\n '_keys']\n\n def __call__(self, *filters):\n \"\"\"Return FieldSpecMap instance based on self, filtered by optional\n callable objects specified in `filters`.\"\"\"\n new_fields = self.iteritems()\n for filt in filters:\n new_fields = [ (key, field) for (key, field) in new_fields if filt(field) ]\n\n return FieldSpecMap(new_fields)\n\n def field_map(self, instance=None, values={}):\n \"\"\"Return a FieldMap based on field specifications.\"\"\"\n pairs = [ (name, FieldClass(instance=instance, value=values.get(name, UNASSIGNED))) for (name, FieldClass) in self.iteritems() ]\n return FieldMap(pairs)\n\n\ndef field_spec_from_class(cls, class_dict, slots=False):\n field_spec = FieldSpecMap()\n if cls._field_spec:\n for (name, BaseFieldClass) in cls._field_spec.iteritems():\n field_spec[name] = new_field_class(BaseFieldClass, slots)\n\n specs = []\n for (name, field_def) in class_dict.items():\n if isinstance(field_def, FieldDefinition):\n field_def.name = name\n BaseFieldClass = field_def.FieldClass\n NewClass = new_field_class(BaseFieldClass, slots)\n NewClass._name = name\n if not NewClass.label:\n NewClass.label = label_from_name(name)\n specs.append((field_def.counter, name, NewClass))\n if isinstance(getattr(cls, name, None), FieldDefinition):\n delattr(cls, name)\n\n specs.sort()\n specs = [ s[1:] for s in specs ]\n field_spec.update(FieldSpecMap(specs))\n return field_spec\n\n\ndef new_field_class(BaseFieldClass, slots):\n \"\"\"Return a new field class subclassed from BaseFieldClass.\"\"\"\n if slots:\n\n class NewClass(BaseFieldClass):\n __module__ = __name__\n\n else:\n\n class NoSlotsField(BaseFieldClass):\n __module__ = __name__\n\n NewClass = NoSlotsField\n NewClass.readonly = BaseFieldClass.readonly\n NewClass.__name__ = BaseFieldClass.__name__\n return NewClass\n\n\nclass FieldDefinition(object):\n \"\"\"A definition of a field attached to something.\n\n The order of FieldDefinition instance creation is kept for the\n purposes of creating ordered dictionaries of fields, etc.\n \"\"\"\n __module__ = __name__\n __do_not_optimize__ = True\n BaseFieldClass = None\n _counter = 0\n\n def __init__(self, *args, **kw):\n self.name = None\n BaseFieldClass = self.BaseFieldClass\n\n class _Field(BaseFieldClass):\n __module__ = __name__\n\n _Field.BaseFieldClass = BaseFieldClass\n _Field._init_kw(kw)\n _Field._init_args(args)\n _Field._init_final()\n _Field.__name__ = BaseFieldClass.__name__\n self.FieldClass = _Field\n self.counter = FieldDefinition._counter\n FieldDefinition._counter += 1\n return\n\n def __call__(self, fn):\n \"\"\"For use as a decorator.\"\"\"\n self.FieldClass.fget = (\n fn,)\n return self\n\n def field(self, name, instance=None, value=None):\n\n class NoSlotsField(self.FieldClass):\n __module__ = __name__\n\n NoSlotsField.__name__ = self.FieldClass.__name__\n NewClass = NoSlotsField\n NewClass._name = name\n if not NewClass.label:\n NewClass.label = label_from_name(name)\n f = NewClass(instance, value)\n return f\n\n\noptimize.bind_all(sys.modules[__name__])","sub_path":"pycfiles/Schevo-3.0-py2.4-macosx-10.3-fat/fieldspec.py","file_name":"fieldspec.py","file_ext":"py","file_size_in_byte":4674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"181295808","text":"#\n# [169] Majority Element\n#\n# https://leetcode.com/problems/majority-element/description/\n#\n# algorithms\n# Easy (49.01%)\n# Total Accepted: 283.1K\n# Total Submissions: 577.6K\n# Testcase Example: '[3,2,3]'\n#\n# Given an array of size n, find the majority element. The majority element is\n# the element that appears more than ⌊ n/2 ⌋ times.\n# \n# You may assume that the array is non-empty and the majority element always\n# exist in the array.\n# \n# Example 1:\n# \n# \n# Input: [3,2,3]\n# Output: 3\n# \n# Example 2:\n# \n# \n# Input: [2,2,1,1,1,2,2]\n# Output: 2\n# \n# \n#\nclass Solution:\n def majorityElement(self, num):\n count = 0\n key = num[0] \n m = {key: 1}\n for x in num[1:]:\n if m.has_key(x):\n m[x] += 1\n if m[x] > m[key]: key = x;\n else: m[x] = 1\n\n return key","sub_path":"169.majority-element.py","file_name":"169.majority-element.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"41807645","text":"#! python3\n# coding: utf-8\n\nimport math\n\n#logistic map with r\nlmwr = lambda r:lambda x:r * x * (1 - x)\n\nm1wa = lambda a:lambda x:a - x * x\n\ndef itn(f, n, x = .5):\n for i in range(n):\n x = f(x)\n yield x\n\ndef r3in2(f, n = 3, x = .5, bg = 3, mi = 2000, ma = 1000000):\n ov = 0\n c = 0\n idle = True\n ds = [0]\n us = [0]\n for i, v in enumerate(itn(f, ma, x)):\n if v <= ov:\n if c == bg:\n if not idle:\n if us[-1] >= ds[-1]:\n us.pop()\n us.append(i)\n idle = True\n else:\n if idle:\n if i - us[-1] > mi:\n ds.append(i)\n idle = False\n if len(ds) > n:\n break\n c = 0\n ov = v\n c += 1\n return ds, us\n\ndef i3in2(*args):\n ds, us = r3in2(*args)\n return int(ds[-1] / (len(ds) - 1))\n\nfrom matplotlib import pyplot as plt\ndef plot(itr, st = 0, sc = 2):\n ys = []\n y1s = []\n y2s = []\n ycs = []\n xs = []\n ov = 0\n t = 0\n c = [0]\n for i, v in enumerate(itr):\n if v <= ov:\n t += 1\n c = [0]\n ov = v\n c[0] += 1\n ys.append(v)\n y1s.append(i/(t+1.))\n y2s.append(i)\n ycs.append(c)\n xs.append(t)\n plt.cla()\n plt.plot(xs[st:], ys[st:], '.')\n #plt.plot(xs[st:], [i - 2 if i > 2 else 0 for i in y1s[st:]], '.')\n #plt.plot(xs[st:], [i / float(len(y2s) - st) for i in y2s[st:]], 'g.')\n plt.plot(xs[st:], [i[0] / float(sc) for i in ycs[st:]], '-')\n plt.show()\n\ndef plot2(r, n = 5, st = 0, sta = None, f=lmwr, **kargs):\n lm = f(r)\n plt.cla()\n rs = []\n for i in range(5):\n x0 = .1 + .1 * i\n ys = []\n xs = []\n du = r3in2(lm, n, x0, **kargs)\n rs.append(du)\n for d, u in zip(*du):\n ys.append(i + 1)\n xs.append(d)\n ys.append(i + .5)\n xs.append(d)\n ys.append(i + .5)\n xs.append(u)\n ys.append(i + 1)\n xs.append(u)\n plt.plot(xs, ys, '-')\n plt.xlim(xmin = st)\n if not sta is None:\n plt.xlim(xmax = sta)\n plt.show()\n return rs\n\ndef avg(rs):\n lr = []\n for i in range(len(rs[0])):\n s = 0\n for j in range(len(rs)):\n s += rs[j][i]\n lr.append(s / len(rs))\n rs.append(lr)\n for r in rs:\n r.append(sum(r) / len(r))\n return [[int(i) for i in r] for r in rs]\n\ndef avgs(dus):\n rds = []\n rus = []\n rcs = []\n for du in dus:\n ds = []\n us = []\n cs = []\n od = ou = oc = None\n for d, u in zip(*du):\n c = (d + u) / 2\n if not od is None:\n ds.append(d - od)\n us.append(u - ou)\n cs.append(c - oc)\n od = d\n ou = u\n oc = c\n rds.append(ds)\n rus.append(us)\n rcs.append(cs)\n return avg(rds), avg(rus), avg(rcs)\n\ndef plot3(ritr, n = 5, x0 = .5, f = lmwr):\n plt.cla()\n xs = []\n ys = []\n for r in ritr:\n lm = f(r)\n xs.append(r)\n ys.append(i3in2(lm, n, x0))\n plt.plot(xs, ys, '-')\n plt.show()\n return zip(xs, ys)\n\nif __name__ == '__main__':\n from pprint import pprint as ppr\n period_3 = 1 + math.sqrt(8)\n lm = lmwr(period_3 - .000005)\n #plot(itn(lm, 6000))\n #plot(itn(lm, 10000), 8000)\n #plot(itn(lm, 80000), 70000)\n #plot(itn(lmwr(period_3+.02838), 80000), 70000)\n def plm(r, n = 6000, st = 0):\n plot(itn(lmwr(r), n), st)\n def pm1(a, n = 6000, st = 0):\n plot(itn(m1wa(a), n), st, 1)\n def test1():\n vs = [\n 3.6786 - period_3,\n -.0001,\n -.000005,\n .02838,\n .03,\n 3.8568007 - period_3,\n #3.678573510698, n=1e5\n ]\n vas = [\n 3.6785736,\n #3.678573510428,\n 3.73817232,\n 3.7447104,\n 3.774133381,\n 3.7748561,\n 3.80074011,\n 3.80101786,\n period_3 - 1e-7,\n 3.85680068\n ]\n #for v in vs:\n #plm(period_3 + v, 6000)\n for v in vas:\n plm(v, 6000)\n if v < period_3:\n print(t2(v), v)\n def test2():\n e = 1e-9\n dus = plot2(period_3 - e, 7)\n for i in range(5):\n x0 = .1 + .1 * i\n #print(i+1, *r3in2(lmwr(period_3 - .000000001), 10, x0))\n print(i+1, *dus[i])\n ppr(avgs(dus))\n def test3():\n e = 1e-9\n n = 70\n return [(period_3 - r, t) for r, t in plot3([period_3 - (i + 1) * e for i in range(n)])]\n def testc():\n c = 1.28383\n n = 3\n plot2(period_3 - c * 1e-10, n * 10 + 1, mi = 3000, ma = int(1e7), st = n * 1e6 - 3e4, sta = n * 1e6 + 1e4)\n #c2 = 1.35\n #c2 = 4 - math.sqrt(7)\n c2 = 1.348682528863713\n t2wc = lambda c: lambda r: math.sqrt(c / (period_3 - r))\n t2 = t2wc(c2)\n rft2wc = lambda c: lambda t: period_3 - c / t ** 2\n rft2 = rft2wc(c2)\n def testm1():\n vas = [\n 1.75 - 1e-7,\n ]\n for v in vas:\n pm1(v, 6000)\n print(v)\n def testm2():\n e = 1e-9\n dus = plot2(1.75 - e, 7, f = m1wa)\n for i in range(5):\n x0 = .1 + .1 * i\n print(i+1, *dus[i])\n ppr(avgs(dus))\n def testm3():\n e = 1e-9\n n = 70\n return [(1.75 - r, t) for r, t in plot3([1.75 - (i + 1) * e for i in range(n)], f = m1wa)]\n cm2 = 1.8\n ppr(testm3())\n","sub_path":"others/t19021701_logisticmap_time.py","file_name":"t19021701_logisticmap_time.py","file_ext":"py","file_size_in_byte":5651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"352238683","text":"# -*- coding: utf-8 -*-\nimport MySQLdb \n\nfrom contextlib import closing\n\nfrom main import app\n\ndef connect_db():\n return MySQLdb.connect(host='localhost', user='root', passwd='qaz123', db='users', port=3306)\n\ndef init_db():\n with closing(connect_db()) as db:\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().execute(f.read())\n db.commit()\n\ndef exec_db():\n with closing(connect_db()) as db:\n a=1469116800\n for i in range(0,48*30):\n b=a+1800\n db.cursor().execute(\"\"\"insert into subscribe_calendar(start, end, title, type) values(\"{0}\", \"{1}\", \"-\", \"0\")\"\"\".format(a,b))\n a=b\n db.commit()\n\ndef insert_db():\n with closing(connect_db()) as db:\n db.cursor().execute(\"\"\" insert into product_type (type ,description) values(1,\"证件照\") \"\"\")\n db.cursor().execute(\"\"\" insert into product_type (type ,description) values(2,\"轻写真\") \"\"\")\n db.cursor().execute(\"\"\" insert into product_type (type ,description) values(3,\"Happy Face 系列\") \"\"\")\n db.commit()\n\n\ndef event():\n sql=\"\"\"\nset global event_scheduler=1;\ncreate event myevent on schedule\nevery 1 day starts '2016-07-19 00:00:00'\ndo call subscribe()\n\"\"\"\n with closing(connect_db()) as db:\n db.cursor().execute(sql)\n db.commit()\n\ndef proc():\n sql=\"\"\"\ncreate procedure subscribe()\nbegin\ndeclare i int;\nset i=0;\nwhile i<48 do\n insert into subscribe_calendar(start, end, title, type) values(\n unix_timestamp(now())+3600*24*29+1800*i, unix_timestamp(now())+3600*24*29+1800*(i+1), \"\", \"0\");\n set i=i+1;\nend while;\nend;\n\"\"\"\n with closing(connect_db()) as db:\n db.cursor().execute(sql)\n db.commit()\n#delete from subscribe_calendar where SYSDATE()>FROM_UNIXTIME(end);\n#select * from mysql.event\n#drop event myevent\n#show create procedure subscribe\n#drop procedure subscribe\n#insert into product_type (type ,description) values(1,\"证件照\");\n#insert into user_info (name, phone , password ,email) values (\"admin\", 0 ,123,\"admin@qq.com\");\n#init_db()\nexec_db()\n#insert_db()\nproc()\n#event()\n","sub_path":"app/initdb.py","file_name":"initdb.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"608089548","text":"# coding:utf8\nfrom typing import List\nclass Solution:\n def calculateMinimumHP(self, dungeon: List[List[int]]) -> int:\n return self.calculateMinimumHP_v1(dungeon)\n def calculateMinimumHP_v1(self, dungeon: List[List[int]]) -> int:\n '''\n dp[i][j] 表示在第[i, j]位置的要用的最低血量\n dp的初始化为(m + 1) * (n + 1)\n 推导如下:\n -2 -3 +3 7 5 2 inf \n -5 -10 +1 -> 6 11 5 inf\n +10 +30 -5 1 1 6 1\n inf inf 1 #\n 自底向上\n 状态转移方程:\n 计算, dp[i+1][j] - dungeon[i][j] 与 dp[i][j+1]-dungeon[i][j]\n 1.如果两个数中有一个为负数则hp = 1可以存活\n 2.如果两个数都是正数, 选最小值\n dp[i][j] = max(min(dp[i][j + 1], dp[i + 1][j]) - dungeon[i][j], 1)\n\n '''\n if not dungeon:\n return 0\n rows, cols = len(dungeon), len(dungeon[0])\n dp = [[float('inf')] * (cols + 1) for _ in range(rows + 1)]\n\n dp[rows][cols - 1] = 1\n dp[rows - 1][cols] = 1\n\n for i in range(row - 1, -1, -1):\n for j in range(cols - 1, -1 ,-1):\n dp[i][j] = max(min(dp[i][j+1], dp[i+1][j]) - dungeon[i][j], 1)\n return dp[0][0]\n\n","sub_path":"leetcode_everyday/pastqing_174.py","file_name":"pastqing_174.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"193309805","text":"from scapy.layers.inet import IP, UDP, TCP\nfrom scapy.layers.l2 import Ether\n\nfrom typing import List, Tuple\nfrom scapy.all import *\nfrom time import sleep\nfrom argparse import ArgumentParser\nfrom copy import deepcopy\nimport json\n\n\ndef generate_ip(id_):\n\tid_ = int(id_) + 1\n\tif 1 <= id_ <= 254:\n\t\treturn \"10.0.0.\" + str(id_)\n\tif 255 <= id_ <= 255 * 254 + 253:\n\t\treturn \"10.0.\" + str(id_ // 254) + \".\" + str(id_ % 254)\n\traise Exception(\"Cannot support id address given a too large id\")\n\n\ndef generate_mac(id_):\n\tid_ = int(id_) + 1\n\n\tdef base_16(num):\n\t\tres = []\n\t\tnum = int(num)\n\t\tif num == 0:\n\t\t\treturn \"0\"\n\t\twhile num > 0:\n\t\t\tleft = num % 16\n\t\t\tres.append(left if left < 10 else chr(ord('a') + (left - 10)))\n\t\t\tnum //= 16\n\t\tres.reverse()\n\t\treturn \"\".join(map(str, res))\n\n\traw_str = base_16(id_)\n\tif len(raw_str) > 12:\n\t\traise Exception(\"Invalid id\")\n\t# reverse\n\traw_str = raw_str[::-1]\n\tto_complete = 12 - len(raw_str)\n\twhile to_complete > 0:\n\t\traw_str += \"0\"\n\t\tto_complete -= 1\n\tmac_addr = \":\".join([raw_str[i:i + 2] for i in range(0, len(raw_str), 2)])\n\tmac_addr = mac_addr[::-1]\n\treturn mac_addr\n\n\n# def start_new_thread_and_run()\ndef send_msg(ip, port, msg):\n\tprint(\"send msg\")\n\twith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n\t\ts.connect((ip, int(port)))\n\t\ts.sendall(bytes(json.dumps(msg), \"ascii\"))\n\t\tprint(\"json :\",json.dumps(msg))\n\t\ts.close()\n\n\ndef process_stats(ip, port, specifier: Tuple, pkt_size: List, idt: List):\n\tmsg = {\"specifier\": specifier, \"stats\": []}\n\tmsg[\"stats\"].append(min(pkt_size))\n\tmsg[\"stats\"].append(max(pkt_size))\n\tmsg[\"stats\"].append(sum(pkt_size) / len(pkt_size))\n\t# todo std var\n\tmsg[\"stats\"].append(0)\n\n\tmsg[\"stats\"].append(min(idt))\n\tmsg[\"stats\"].append(max(idt))\n\tmsg[\"stats\"].append(sum(idt) / len(idt))\n\tmsg[\"stats\"].append(0)\n\tsend_msg(ip, port, msg)\n\n\nclass Gen:\n\tdef __init__(self, pkts_dir, self_id, dst_ids, win_size=5):\n\t\t# self.fn = pkts_fn\n\t\tself.pkts_dir = pkts_dir\n\t\tself.ip = generate_ip(self_id)\n\t\tself.id = self_id\n\t\tprint(\"self ip {}\".format(self.ip))\n\t\tself.dst_ips = [generate_ip(i) for i in dst_ids]\n\t\tself.mac = generate_mac(self_id)\n\t\tself.dst_macs = [generate_mac(i) for i in dst_ids]\n\t\tself.win_size = win_size\n\n\t\t# flow_stats[(specifier)]=={pkt_size:[],idt:[]}\n\t\tself.flow_stats = defaultdict(lambda: {\"pkt_size\": [], \"idt\": []})\n\t\tself.sent_record = set()\n\n\t\tself.specifier_to_flow_id = {}\n\n\tdef reset(self):\n\t\tself.flow_stats = defaultdict(lambda: {\"pkt_size\": [], \"idt\": []})\n\t\tself.sent_record = set()\n\t\tself.specifier_to_flow_id = {}\n\n\tdef __call__(self):\n\t\tpkt_files = list(os.listdir(self.pkts_dir))\n\t\tpkt_files = list(filter(lambda x: \".pkts\" in x, pkt_files))\n\t\tif len(pkt_files) == 0:\n\t\t\tprint(\"error: no pkt files\")\n\t\t\treturn\n\t\tpkt_file_idx = 0\n\n\t\t# 每个文件对应的端口段长度\n\t\tport_seg = (65535 - 1500) // len(pkt_files)\n\n\t\twhile True:\n\t\t\tprint(\"new loop\")\n\t\t\tself.reset()\n\t\t\tfn = pkt_files[pkt_file_idx]\n\t\t\tfp = open(os.path.join(self.pkts_dir, fn), \"r\")\n\t\t\tpkts = fp.readlines()\n\t\t\tfp.close()\n\n\t\t\tn_dsts = len(self.dst_macs)\n\t\t\tone_byte = b'\\xff'\n\t\t\ts = conf.L3socket(iface='h{}-eth0'.format(self.id))\n\t\t\treport_record = self.sent_record\n\n\t\t\tfor pkt_line in pkts:\n\t\t\t\tto_sleep, size, proto, flow_id, ts_diff_in_flow = pkt_line.split(\" \")\n\t\t\t\tsize = int(size)\n\n\t\t\t\tflow_id = int(flow_id)\n\t\t\t\t# todo sleep\n\t\t\t\tto_sleep = float(to_sleep)\n\t\t\t\tts_diff_in_flow = float(ts_diff_in_flow)\n\n\t\t\t\tdst_ip = self.dst_ips[flow_id % n_dsts]\n\n\t\t\t\tsrc_port = 1500 + (pkt_file_idx * port_seg) + flow_id % port_seg\n\t\t\t\tdst_port = src_port\n\t\t\t\tif proto == \"TCP\":\n\t\t\t\t\tl4 = TCP(sport=src_port, dport=dst_port) / (one_byte * size)\n\t\t\t\telse:\n\t\t\t\t\tl4 = UDP(sport=src_port, dport=dst_port) / (one_byte * size)\n\n\t\t\t\tpkt = IP(dst=dst_ip) / l4\n\t\t\t\tspecifier = (src_port, dst_port, self.ip, dst_ip, proto)\n\n\t\t\t\tif flow_id not in report_record:\n\t\t\t\t\t# 统计信息\n\t\t\t\t\tself.flow_stats[specifier][\"pkt_size\"].append(size)\n\t\t\t\t\tif ts_diff_in_flow >= 0:\n\t\t\t\t\t\tself.flow_stats[specifier][\"idt\"].append(ts_diff_in_flow)\n\t\t\t\t\tif len(self.flow_stats[specifier][\"pkt_size\"]) == self.win_size:\n\t\t\t\t\t\treport_record.add(flow_id)\n\t\t\t\t\t\tthread = threading.Thread(target=process_stats,\n\t\t\t\t\t\t args=(\"192.168.64.1\",\n\t\t\t\t\t\t \"1026\",\n\t\t\t\t\t\t deepcopy(specifier),\n\t\t\t\t\t\t deepcopy(\n\t\t\t\t\t\t\t self.flow_stats[specifier][\"pkt_size\"]),\n\t\t\t\t\t\t deepcopy(\n\t\t\t\t\t\t\t self.flow_stats[specifier][\"idt\"])))\n\t\t\t\t\t\tdel self.flow_stats[specifier]\n\t\t\t\t\t\tthread.start()\n\n\t\t\t\ts.send(pkt)\n\t\t\t\tprint(\"sent\")\n\t\t\tpkt_file_idx = (pkt_file_idx + 1) % len(pkt_files)\n\n\nif __name__ == '__main__':\n\tpkt_dir = \"/home/ubuntu/temp/pkts\"\n\tparser = ArgumentParser()\n\tparser.add_argument(\"--id\", required=True, help=\"self id\")\n\tparser.add_argument(\"--dst_id\", required=True, help=\"destination id file\")\n\tparser.add_argument(\"--pkts_dir\", required=True, default=pkt_dir, help=\"pkts dir\")\n\targs = parser.parse_args()\n\twith open(args.dst_id, \"r\") as fp:\n\t\tdst_ids = fp.readlines()\n\t\tfp.close()\n\tgenerator = Gen(args.pkts_dir, args.id, dst_ids)\n\tgenerator()\n","sub_path":"traffic/scapy_generator.py","file_name":"scapy_generator.py","file_ext":"py","file_size_in_byte":5178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"402086581","text":"\"\"\"\nWrite a program that swaps the keys and values of a dictionary.\n\nInput:\n{\n \"a\" : 1,\n \"b\" : 1,\n \"c\" : 2,\n \"d\" : 3\n}\n\nOutput:\n{\n 1: [\"a\", \"b\"],\n 2: [\"c\"],\n 3: [\"d\"]\n}\n\nET: 15 minutes\n\"\"\"\ninput_dict = {'a': 1, 'b': 1, 'c': 2, 'd': 3}\noutput_dict = {key: value for value, key in input_dict.items()}\nprint(output_dict)\n","sub_path":"CompletedPY/pratice-exercises/Section1-Exercises/ex20.py","file_name":"ex20.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"206962393","text":"#!/usr/bin/env python3.7\n''' Basic scatter plot How to Modify Axis Ticks and Positions and Labels '''\n# https://www.machinelearningplus.com/plots/matplotlib-tutorial-complete-guide-python-plot-examples/\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np\n\n\nmpl.rcParams.update(\n {'font.size': 18,\n 'font.family': 'STIXGeneral',\n 'mathtext.fontset': 'stix'\n }\n)\n\ndef plot_sine_cosine_wave(style='ggplot'):\n plt.style.use(style)\n plt.figure(figsize=(7, 4), dpi=120)\n X = np.linspace(0,2*np.pi, 1000)\n plt.plot(X, np.sin(X)); plt.plot(X, np.cos(X))\n plt.xticks(\n ticks=np.arange(0, 440/57.2985, 90/57.2985),\n labels=[\n r'$0$',\n r'$\\frac{\\pi}{2}$',\n r'$\\pi$',\n r'$\\frac{3\\pi}{2}$',\n r'$2\\pi$'\n ]\n )\n plt.gca().set(\n ylim=(-1.25, 1.25),\n xlim=(-0.5, 7)\n )\n plt.title(style, fontsize=18)\n plt.show()\n\n\nplot_sine_cosine_wave('seaborn-notebook')\nplot_sine_cosine_wave()\nplot_sine_cosine_wave('bmh')\n","sub_path":"matplot/basic_scatter_plot1.4.py","file_name":"basic_scatter_plot1.4.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"32867627","text":"import itertools as it\n\nclass Solution:\n def insert(self, intervals, newInterval):\n def is_overlap(n1, n2):\n if n2[0] < n1[0]:\n n1, n2 = n2, n1\n return n1[0] <= n2[0] <= n1[1]\n\n s, e = newInterval\n\n right_side = it.takewhile(lambda x: x[1] < s, intervals)\n left_side = it.dropwhile(lambda x: x[0] <= e, intervals)\n middle = list(filter(lambda x: is_overlap(x, newInterval), intervals))\n if middle:\n s, e = min(middle[0][0], s), max(middle[-1][1], e)\n else:\n s, e = newInterval\n return list(it.chain(right_side, [[s, e]], left_side))\n\n\nif __name__ == '__main__':\n intervals = [[1,3],[6,9]]\n newInterval = [2,5]\n print(Solution().insert(intervals, newInterval))\n","sub_path":"leetcode/57_insert_interval_1.py","file_name":"57_insert_interval_1.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"50936884","text":"#Nome: Denys Souza da Costa\n#Matricula: 21453458\n#Lab de Codificação 04\n#Exercicio 9\n#Data: 03/08/2016\n\nfrom math import*\n\nx = int(input(\"No. de aproximacoes: \"))\ncont = 0\nh = sqrt(2)\npot = 2\n\nwhile(cont < x):\n\tp = pot * h\n\th = sqrt(2 - 2 * sqrt(1 - (h / 2) ** 2))\n\tpot = 2 * pot\n\tcont = cont + 1\nprint(round(p , 8))","sub_path":"exs/1516-1140.py","file_name":"1516-1140.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"238287286","text":"\nfrom threeza.web_utils import get_url\n\ndef author_has_approved_pipe_page(algo_name,name):\n \"\"\" Does author approve of a page that invokes their algorithm ? \"\"\"\n assert \"/\" in algo_name, \"Expecting full algorithm name including username (e.g. threezatests/double)\"\n details_url = \"https://algorithmia.com/webapi/algorithms/\"+algo_name\n try:\n details = json.loads(get_url(details_url))\n except Exception as e:\n return False\n\n if \"summary\" in details[\"algorithm\"]:\n if name in details[\"algorithm\"][\"summary\"]:\n return True\n return False\n","sub_path":"threeza/mores.py","file_name":"mores.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"96790164","text":"# -*- coding: utf-8 -*-\n# 完成时间:2017/8/10\n# 功能描述:通过从给定的链接开始进行多线程爬取词条标题和简介,并从简介中提取链接进行自动多网页爬取\nimport sys\nimport re\nimport MySQLdb\nimport requests\nimport time\nfrom time import ctime\nfrom bs4 import BeautifulSoup\nfrom threading import Thread, Lock\n\n\n# 中文编码设置\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\nclass Spider:\n def __init__(self, urls):\n self.conn = MySQLdb.connect('localhost', 'root', 'yyj', 'bdbk', charset='utf8')\n self.cursor = self.conn.cursor()\n self.insert_sql = \"INSERT INTO lemma(title, content, url) VALUES('%s', '%s', '%s')\"\n self.base_url = 'https://baike.baidu.com'\n self.urls = urls\n # 待爬取的url列表\n self.crawl_url = urls\n # 线程列表\n self.threads = []\n # 已爬取的url列表\n self.has_crawl_url = []\n\n # 清理数据\n def clear_data(self, data):\n data = re.sub('[\\n\\t]', '', data)\n data = re.sub('\\[\\d+\\]', '', data)\n data = data.strip()\n return data\n\n # 发送请求并解析html内容提取数据\n def parser(self, url, thread_name, lock):\n resp = requests.get(url)\n # 设置网页内容编码格式为utf-8\n resp.encoding = 'utf-8'\n html = BeautifulSoup(resp.content, 'lxml')\n try:\n # 获取词条标题\n title = html.find('dd', {'class': 'lemmaWgt-lemmaTitle-title'}).find_all('h1')[0].text\n title = self.clear_data(title)\n # 获取词条简介\n content = html.find('div', {'class': 'lemma-summary'}).text\n content = self.clear_data(content)\n # 获取词条简介中的其他链接\n new_urls = html.find('div', {'class': 'lemma-summary'}).find_all('a')\n except AttributeError:\n print('没有找到指定的标签')\n return None\n else:\n lock.acquire()\n print('%s gets %s' % (thread_name, url))\n print('[%s]%s %s' % (ctime(), title, content))\n print('')\n self.cursor.execute(self.insert_sql % (title, content, url))\n self.conn.commit()\n lock.release()\n return new_urls\n\n def crawl(self, thread_id, lock):\n thread_name = '%s-%s' % ('Thread', thread_id)\n # 当队列有新请求就取出一个请求进行爬取\n while self.crawl_url:\n url = self.crawl_url.pop()\n # 把新请求传给parser函数进行发送并获取从parser返回的新请求列表\n new_urls = self.parser(url, thread_name, lock)\n # 如果爬取失败则将该新请求加入待爬取url列表以便下次重新爬取\n if new_urls is None:\n self.crawl_url.append(url)\n continue\n # 爬取成功则将该新请求加入已爬取url列表中\n self.has_crawl_url.append(url)\n # 对新请求列表进行去重\n for new_url in new_urls:\n if new_url.has_attr('href'):\n new_url = '%s%s' % (self.base_url, new_url['href'])\n if new_url not in self.has_crawl_url:\n if new_url not in self.crawl_url:\n self.crawl_url.append(new_url)\n # 延迟2s发送请求\n time.sleep(2)\n\n def start(self):\n # 实例同步锁对象\n lock = Lock()\n # 创建5个线程实例并添加到线程列表中\n for i in range(0, 5):\n thread = Thread(target=self.crawl, args=(i, lock))\n self.threads.append(thread)\n # 启动线程\n for i in range(0, len(self.threads)):\n self.threads[i].start()\n # 等待所有线程执行完\n for i in range(0, len(self.threads)):\n self.threads[i].join()\n self.cursor.close()\n self.conn.close()\n\n\ndef main():\n # 初始化待爬取的url\n urls = ['https://baike.baidu.com/item/%E4%BA%8C%E6%AC%A1%E5%85%83/85064',\n 'https://baike.baidu.com/item/Python/407313',\n 'https://baike.baidu.com/item/Java/85979',\n 'https://baike.baidu.com/item/C++/99272'\n ]\n spider = Spider(urls)\n spider.start()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"crawl_BDBK.py","file_name":"crawl_BDBK.py","file_ext":"py","file_size_in_byte":4362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"616239134","text":"#!/usr/bin/python\n# encoding:utf-8\n\"\"\"\n@author: lance\n@version: 1.0.0\n@license: Apache Licence\n@file: LogisticRegressionModel.py\n@time: 2021/7/19 13:09\n\"\"\"\nimport torch\nx_data = torch.Tensor([[1.0], [2.0], [3.0]])\ny_data = torch.Tensor([[0], [0], [1]])\n\nclass LogisticRessionModel(torch.nn.Module):\n def __init__(self):\n super(LogisticRessionModel, self).__init__()\n self.linear = torch.nn.Linear(1, 1)\n\n def forward(self, x):\n y_pred = torch.sigmoid(self.linear(x))\n return y_pred\nmodel = LogisticRessionModel()\ncriterion = torch.nn.BCELoss()\n# optimizer = torch.optim.SGD(model.parameters(), lr=0.01)\noptimizer = torch.optim.Adam(model.parameters(), lr=0.01)\nfor epoch in range(1000):\n y_pred = model(x_data)\n loss = criterion(y_pred, y_data)\n print(epoch, loss.item())\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\nx_test = torch.Tensor([[4.0]])\ny_test = model(x_test)\nprint('y_pred = ', y_test.data)\n\nif __name__ == '__main__':\n pass","sub_path":"PyTorch/逻辑斯蒂回归/LogisticRegressionModel.py","file_name":"LogisticRegressionModel.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"629046591","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import get_object_or_404, render, redirect,render_to_response\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.template import RequestContext,loader,Context\nfrom HuoDong.models import HuoDong\nfrom HuoDong.forms import HuoDongForm\n\ndef huo(request):\n t = loader.get_template(\"huo.html\") \n c = Context({})\n return HttpResponse(t.render(c))\n\ndef ListAllHuo(request):\n stuff = HuoDong.objects.all()\n return render(request, 'huodong/ListAll.html', {'stuff': stuff})\n\ndef CreateItHuo(request):\n if request.method == 'POST':\n form = HuoDongForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('HuoDong.views.ListAllHuo')\n else:\n form = HuoDongForm()\n return render(request, 'huodong/CreateIt.html', {'form': form})\n\ndef EditItHuo(request, thing_id):\n thing = get_object_or_404(HuoDong, pk=thing_id)\n if request.method == 'POST':\n form = HuoDongForm(request.POST, instance=thing)\n if form.is_valid():\n form.save()\n return redirect('ShowDetailHuo', thing_id)\n else:\n form = HuoDongForm(instance=thing)\n return render(request, 'huodong/EditIt.html', {'form': form})\n\ndef ShowDetailHuo(request, thing_id):\n thing = get_object_or_404(HuoDong, pk=thing_id)\n return render(request, 'huodong/ShowDetail.html', {'thing': thing})\n\ndef DeleteItHuo(request, thing_id):\n thing = get_object_or_404(HuoDong, pk=thing_id)\n if request.method == 'POST':\n thing.delete()\n return redirect('HuoDong.views.ListAllHuo')\n return render(request, 'huodong/DeleteIt.html', {'thing': thing}) ","sub_path":"GuanLiFive/HuoDong/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"105618725","text":"################################\n### Yvonne DeSousa 107177186 ###\n### CSE 307 Spring ###\n### HW #1 ###\n################################\n\n### IMPORTS ###\nimport sys\t # gets input from the command line\nfrom collections import Counter # counting elements, nice and simple\n\n### FUNCTIONS ###\ndef stringOveride(mixed_list):\n string_list = []\n for m in mixed_list:\n string_list.append(str(m))\n return string_list\n\n### PROGRAM START ###\nrow_list = [] # holds txt data, stored as list of rows\nblank_row_test = 0 # checks to see if the row is valid\n\n## GET THE TXT FILE ##\n\n# Is there a txt file name given? #\nfile_name = sys.argv[-1] # this is the name of the python program\nif sys.argv[0]==file_name : # is the value the .py file?\n print ('No file name given!')\n print ('Quitting now!')\n sys.exit(0) # no additional arguments given, [QUIT]\n\n# Name given. Can we open it? #\ntry:\n file = open(file_name, 'r') # let's try to open it now!\nexcept IOError:\n print ('File \\\"', file_name, '\\\" invalid!')\n print ('Quitting now!')\n sys.exit(0) # open failed, [QUIT]\n\n# We now have a valid txt file! #\n\n### PROCESS INTO DATA MATRIX ###\nfor line in file:\n line = line.split('\\t') # tokenize by tab\n line = [l.rstrip().format(l) for l in line] # remove new lines\n for l in line:\n blank_row_test += len(l) # testing for blank row\n if(blank_row_test > 0): # does any token have a length?\n row_list.append(line) # if so, add the row!\n blank_row_test = 0 # reset for next row testing\n \nfile.close() # close the file!\n# We now have all non-empty rows stored in row_list #\n\n### OUTPUT NUMBER OF ROWS ###\nprint ('Number of rows:',len(row_list))\n\n## TEST FOR VALID COLUMNS #\nfor row in row_list:\n if len(row) != len(row_list[0]): # remember, all col MUST BE THE SAME\n print('Cannot determine number of columns')\n sys.exit(0) # discrepancy found, [QUIT]\n\n# The data in row_list is 100% OK! #\n \n### OUTPUT NUMBER OF COLUMNS ###\nprint ('Number of columns:',len(row_list[0]))\n\n\n### GET COLUMNS & THEIR STATS ###\nfor i in range(0, len(row_list[0])): # for the number of cols that exist\n print ('Column', i,'\\b:', row_list[0][i]) # print the 'header' value with col counter\n temp_list = [] # this will hold data in each col\n string_count = 0 # test for int/float mixed with naughty rogue strings\n for j in range(1, len(row_list)): # for the rest of the values in each col\n try:\n int(j) # is value an int? cast as int when appending...\n temp_list.append(int(row_list[j][i]))\n except ValueError: # nope, not an int!\n try:\n float(j) # is value a float? cast as float when appending...\n temp_list.append(float(row_list[j][i]))\n except ValueError: # nope, neither int no float! just add as string!\n temp_list.append(row_list[j][i])\n string_count+=1\n if(string_count!=0):\n temp_list = stringOveride(temp_list) # let's re-add, just in case of mixed input!\n\n # Now we have a temp_list of a col's values #\n count = Counter(temp_list) # count all occurances of each element in temp_list\n unique_set = set(temp_list) # gather all of the unique \n unique_set = sorted(unique_set)\n for u in unique_set:\n print(count[u], u) # print each element of unique_list with the num of occurances\n\n###################\n### END OF FILE ###\n###################\n\n","sub_path":"HW1/a1main.py","file_name":"a1main.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"119672149","text":"from .behavior import Behavior\nfrom entities.collider import Collider\nfrom util import world_to_screen, rescale_vector\nimport config\nimport constants\n\n\nclass Interactive(Behavior):\n \"\"\"Invokes a callback when Mario intersects this entity's collider\"\"\"\n def __init__(self, level, entity, hitbox_offset, hitbox_size, on_hit, on_not_hit=None):\n super().__init__()\n\n assert level is not None\n assert entity is not None\n\n # note to self: hitbox offset should be unscaled (so it's based on the disk-size sprites, not the rescaled ones)\n\n self.level = level\n self.entity = entity\n self.hitbox = Collider.from_entity(entity, level.collider_manager, constants.Mario)\n self.hitbox.rect.size = rescale_vector(hitbox_size)\n self.hitbox_offset = rescale_vector(hitbox_offset)\n self.hitbox.position = entity.position + self.hitbox_offset\n self.on_hit = on_hit\n self.on_not_hit = on_not_hit\n\n def update(self, dt):\n if self.on_hit is None:\n return\n\n collisions = self.hitbox.move(self.entity.position + self.hitbox_offset, True)\n\n for c in collisions:\n self.on_hit(c)\n\n if not collisions and self.on_not_hit:\n self.on_not_hit()\n\n def draw(self, screen, view_rect):\n if config.debug_hitboxes:\n r = self.hitbox.rect.copy()\n self.hitbox.position = self.entity.position + self.hitbox_offset\n r.topleft = world_to_screen(self.hitbox.position, view_rect)\n r = screen.get_rect().clip(r)\n screen.fill((0, 255, 0), r)\n\n def destroy(self):\n pass\n","sub_path":"entities/characters/behaviors/interactive.py","file_name":"interactive.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"227959443","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 5/9/17 8:37 PM\n# @Author : xiaowa\n\nimport os\n\nAPP_HOME = os.path.split(os.path.realpath(__file__))[0]\nPROJECT_HOME = os.path.dirname(APP_HOME)\n\nAI_HOME = os.environ.get(\"AI_HOME\", PROJECT_HOME)\nLOG_HOME = os.path.join(AI_HOME, \"logs\")\n","sub_path":"app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"332637198","text":"import time\n\nimport sparse_operation_kit as sok\nimport horovod.tensorflow as hvd\nimport tensorflow as tf\nimport nvtx\n\n\ndef evaluate(model, dataset, thresholds):\n auc = tf.keras.metrics.AUC(\n num_thresholds=thresholds, curve=\"ROC\", summation_method=\"interpolation\", from_logits=True\n )\n\n @tf.function\n def _step(samples, labels):\n probs = model(samples, training=False)\n auc.update_state(labels, probs)\n\n for idx, (samples, labels) in enumerate(dataset):\n _step(samples, labels)\n\n auc.true_positives.assign(\n hvd.allreduce(auc.true_positives, name=\"true_positives\", op=hvd.mpi_ops.Sum)\n )\n auc.true_negatives.assign(\n hvd.allreduce(auc.true_negatives, name=\"true_negatives\", op=hvd.mpi_ops.Sum)\n )\n auc.false_positives.assign(\n hvd.allreduce(auc.false_positives, name=\"false_positives\", op=hvd.mpi_ops.Sum)\n )\n auc.false_negatives.assign(\n hvd.allreduce(auc.false_negatives, name=\"false_negatives\", op=hvd.mpi_ops.Sum)\n )\n\n return auc.result().numpy()\n\n\ndef evaluate_wilcoxon(model, dataset):\n @tf.function\n def _step(samples, labels):\n probs = model(samples, training=False)\n return tf.concat([probs, labels], axis=1)\n\n results = []\n for idx, (samples, labels) in enumerate(dataset):\n result = _step(samples, labels)\n results.append(result)\n results = tf.concat(results, axis=0)\n\n results = hvd.allgather(results, name=\"wilcoxon_AUC\")\n\n sort_order = tf.argsort(results[:, 0])\n sorted_label = tf.gather(results[:, 1], sort_order)\n rank = tf.cast(tf.range(1, sorted_label.shape[0] + 1), tf.float32)\n num_true = tf.reduce_sum(sorted_label)\n num_false = sorted_label.shape[0] - num_true\n auc = (tf.reduce_sum(rank * sorted_label) - (num_true * (num_true + 1) / 2)) / (\n num_true * num_false\n )\n return auc.numpy()\n\n\nclass LearningRateScheduler:\n \"\"\"\n LR Scheduler combining Polynomial Decay with Warmup at the beginning.\n TF-based cond operations necessary for performance in graph mode.\n \"\"\"\n\n def __init__(self, optimizers, base_lr, warmup_steps, decay_start_step, decay_steps):\n self.optimizers = optimizers\n self.warmup_steps = tf.constant(warmup_steps, dtype=tf.int32)\n self.decay_start_step = tf.constant(decay_start_step, dtype=tf.int32)\n self.decay_steps = tf.constant(decay_steps)\n self.decay_end_step = decay_start_step + decay_steps\n self.poly_power = 2\n self.base_lr = base_lr\n with tf.device(\"/CPU:0\"):\n self.step = tf.Variable(0)\n\n @tf.function\n def __call__(self):\n with tf.device(\"/CPU:0\"):\n # used for the warmup stage\n warmup_step = tf.cast(1 / self.warmup_steps, tf.float32)\n lr_factor_warmup = 1 - tf.cast(self.warmup_steps - self.step, tf.float32) * warmup_step\n lr_factor_warmup = tf.cast(lr_factor_warmup, tf.float32)\n\n # used for the constant stage\n lr_factor_constant = tf.cast(1.0, tf.float32)\n\n # used for the decay stage\n lr_factor_decay = (self.decay_end_step - self.step) / self.decay_steps\n lr_factor_decay = tf.math.pow(lr_factor_decay, self.poly_power)\n lr_factor_decay = tf.cast(lr_factor_decay, tf.float32)\n\n poly_schedule = tf.cond(\n self.step < self.decay_start_step,\n lambda: lr_factor_constant,\n lambda: lr_factor_decay,\n )\n\n lr_factor = tf.cond(\n self.step < self.warmup_steps, lambda: lr_factor_warmup, lambda: poly_schedule\n )\n\n lr = self.base_lr * lr_factor\n for optimizer in self.optimizers:\n optimizer.lr.assign(lr)\n\n self.step.assign(self.step + 1)\n\n\ndef scale_grad(grad, factor):\n if isinstance(grad, tf.IndexedSlices):\n # sparse gradient\n grad._values = grad._values * factor\n return grad\n else:\n # dense gradient\n return grad * factor\n\n\nclass Trainer:\n def __init__(\n self,\n model,\n dataset,\n test_dataset,\n auc_thresholds,\n base_lr,\n warmup_steps,\n decay_start_step,\n decay_steps,\n amp,\n ):\n base_lr = float(base_lr)\n\n self._model = model\n # self._embedding_vars, self._dense_vars = \\\n # sok.split_embedding_variable_from_others(self._model.trainable_variables)\n self._dataset = dataset\n self._test_dataset = test_dataset\n self._auc_thresholds = auc_thresholds\n self._amp = amp\n\n self._loss_fn = tf.losses.BinaryCrossentropy(from_logits=True)\n\n self._dense_optimizer = tf.keras.optimizers.SGD(base_lr)\n self._embedding_optimizer = tf.keras.optimizers.SGD(base_lr)\n if self._amp:\n self._embedding_optimizer = tf.keras.mixed_precision.LossScaleOptimizer(\n self._embedding_optimizer, initial_scale=1024, dynamic=False\n )\n self._lr_scheduler = LearningRateScheduler(\n [self._dense_optimizer, self._embedding_optimizer],\n base_lr,\n warmup_steps,\n decay_start_step,\n decay_steps,\n )\n\n @tf.function\n def _step(self, samples, labels, first_batch):\n self._lr_scheduler()\n\n with tf.GradientTape() as tape:\n probs = self._model(samples, training=True)\n loss = self._loss_fn(labels, probs)\n if self._amp:\n loss = self._embedding_optimizer.get_scaled_loss(loss)\n\n embedding_vars, dense_vars = sok.split_embedding_variable_from_others(\n self._model.trainable_variables\n )\n embedding_grads, dense_grads = tape.gradient(loss, [embedding_vars, dense_vars])\n if self._amp:\n embedding_grads = self._embedding_optimizer.get_unscaled_gradients(embedding_grads)\n dense_grads = self._embedding_optimizer.get_unscaled_gradients(dense_grads)\n\n # embedding_grads = [scale_grad(g, hvd.size()) for g in embedding_grads]\n\n with sok.OptimizerScope(embedding_vars):\n self._embedding_optimizer.apply_gradients(\n zip(embedding_grads, embedding_vars), experimental_aggregate_gradients=False\n )\n\n # with tf.control_dependencies(embedding_grads):\n dense_grads = [\n hvd.allreduce(grad, op=hvd.Average, compression=hvd.compression.NoneCompressor)\n for grad in dense_grads\n ]\n self._dense_optimizer.apply_gradients(\n zip(dense_grads, dense_vars), experimental_aggregate_gradients=False\n )\n\n if first_batch:\n hvd.broadcast_variables(dense_vars, root_rank=0)\n hvd.broadcast_variables(self._dense_optimizer.variables(), root_rank=0)\n\n return loss\n\n def train(self, interval=1000, eval_interval=3793, eval_in_last=False, early_stop=-1, epochs=1):\n eval_time = 0\n iter_time = time.time()\n total_time = time.time()\n throughputs = []\n for epoch in range(epochs):\n early_stop_flag = False\n for i, (samples, labels) in enumerate(self._dataset):\n idx = epoch * len(self._dataset) + i\n\n # rng = nvtx.start_range(message='Iteration_'+str(idx), color='blue')\n loss = self._step(samples, labels, idx == 0)\n # nvtx.end_range(rng)\n\n if idx == 0:\n print(\n \"Iteration 0 finished. The following log will be printed every %d iterations.\"\n % interval\n )\n\n if (idx % interval == 0) and (idx > 0):\n t = time.time() - iter_time\n throughput = interval * self._dataset._batch_size * hvd.size() / t\n print(\n \"Iteration:%d\\tloss:%.6f\\ttime:%.2fs\\tthroughput:%.2fM\"\n % (idx, loss, t, throughput / 1000000)\n )\n throughputs.append(throughput)\n iter_time = time.time()\n\n if (eval_interval is not None) and (idx % eval_interval == 0) and (idx > 0):\n t = time.time()\n auc = evaluate(self._model, self._test_dataset, self._auc_thresholds)\n t = time.time() - t\n eval_time += t\n iter_time += t\n print(\n \"Evaluate in %dth iteration, test time: %.2fs, AUC: %.6f.\" % (idx, t, auc)\n )\n if auc > 0.8025:\n early_stop_flag = True\n break\n\n if early_stop > 0 and (idx + 1) >= early_stop:\n early_stop_flag = True\n break\n\n if early_stop_flag:\n break\n\n if eval_in_last:\n t = time.time()\n auc = evaluate(self._model, self._test_dataset, self._auc_thresholds)\n t = time.time() - t\n eval_time += t\n print(\"Evaluate in the end, test time: %.2fs, AUC: %.6f.\" % (t, auc))\n\n total_time = time.time() - total_time\n training_time = total_time - eval_time\n avg_training_time = training_time / (idx + 1)\n print(\"total time: %.2fs, in %d iterations\" % (total_time, (idx + 1)))\n if len(throughputs[1:]) == 0:\n average_throughput = 0\n average_time_per_iter = 0\n else:\n average_throughput = sum(throughputs[1:]) / len(throughputs[1:])\n average_time_per_iter = (\n self._dataset._batch_size * hvd.size() / average_throughput * 1000\n )\n print(\n \"only training time: %.2fs, average: %.2fms/iter, average throughput: %.2fM(%.2fms/iter)\"\n % (\n training_time,\n avg_training_time * 1000,\n average_throughput / 1000000,\n average_time_per_iter,\n )\n )\n print(\"only evaluate time: %.2fs\" % (eval_time))\n","sub_path":"sparse_operation_kit/documents/tutorials/DLRM_Benchmark/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":10158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"528738695","text":"import sqlalchemy as sa\n\nfrom sqlalchemy_utils.observer import observes\nfrom tests import TestCase\n\n\nclass TestObservesForColumn(TestCase):\n dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'\n\n def create_models(self):\n class Product(self.Base):\n __tablename__ = 'product'\n id = sa.Column(sa.Integer, primary_key=True)\n price = sa.Column(sa.Integer)\n\n @observes('price')\n def product_price_observer(self, price):\n self.price = price * 2\n\n self.Product = Product\n\n def test_simple_insert(self):\n product = self.Product(price=100)\n self.session.add(product)\n self.session.flush()\n assert product.price == 200\n","sub_path":"tests/observes/test_column_property.py","file_name":"test_column_property.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"529608025","text":"'''\nCan you create a program to solve a word jumble? (More info here.)\nThe program should accept a string as input, and then return a list of words\nthat can be created using the submitted letters. For example, on the input\n\"dog\", the program should return a set of words including \"god\", \"do\", and \"go\".\n\nPlease implement the program in Python but refrain from using any helper\nmodules or imports (e.g. itertools). In order to verify your words, just\ndownload an English word list (here are a few).\n\nThen uplaod your program to GitHub or Gist, and send it back!\n\nAssumptions I am following:\n- Can NOT use any imported libraries/modules\n- Can use built-in python data structures (lists, dicts, tuples) and functions (strip())\n- Allow input string to contain any of the 256 extended ASCII chart characters\n- Whitespace at the beginning and end of the input string will be ignored\n- Whitespace that's not at the beginning or end of the input string (between the \n other characters) will not be ignored and will be used towards character limits \n when comparing with the reference list of words\nCreated on Mar 10, 2014\n@author: Prahalika\n'''\n\n'''\nPrint out the list of words that can be made from the input string\nand are legitimate words, as compared to the reference list of words\n'''\ndef print_output(output_list):\n for word in output_list:\n print(word)\n\n'''\nIterate through the list of words in the reference list.\nFor each word, iterate through each letter and compare with the dict \ncreated in parse_input_word(). If the letter in the reference word\ndoesn't appear in the input string dict or if the number of times\nthat character appears in the reference word exceeds the number of \ntimes that character appears in the input string, that reference word\ncannot be created from the input string. At that point, move on to\nchecking the next word.\nIf all of the letters in the reference word appear with appropriate \ncounts in the input string, the reference word is a valid creation\nusing the input string letters. Save that word to an output list. \n'''\ndef check_words(input_dict, file_path):\n output_list = []\n try:\n word_list = open(file_path)\n except IOError as err:\n print('File error: ' + str(err))\n return list()\n \n for word in word_list:\n clean_word = word.strip()\n word_dict = {}\n let_cnt = 0\n \n for let in clean_word:\n if let not in input_dict:\n break\n \n if let not in word_dict:\n word_dict[let] = 1\n else:\n word_dict[let] += 1\n if word_dict[let] > input_dict[let]:\n break\n let_cnt += 1\n \n if let_cnt == len(clean_word):\n output_list.append(clean_word)\n \n return output_list\n\n'''\nGo through the input string and determine how many of each character\nare present. Save that information to a dict where the key is the character\nand the value is the count of appearance of that character. \n'''\ndef parse_input_word(input_word):\n input_dict = {}\n for let in input_word:\n if let not in input_dict:\n input_dict[let] = 1\n else:\n input_dict[let] += 1\n return input_dict\n\n'''\nAsk the user to input the string and path to the reference list\nand save that information\n'''\ndef get_input():\n input_word = input(\"Enter input word: \")\n file_path = input(\"Enter path to reference list: \")\n return (input_word, file_path)\n\n'''\nStart the program here.\nRetrieve the input from the user\nParse the inputted string to create a character histogram\nRead in the reference list of words and compare with the \ncharacter histogram\nOutput the list of valid words\n'''\ndef main():\n (input_word, file_path) = get_input()\n input_dict = parse_input_word(input_word)\n output_list = check_words(input_dict, file_path)\n print_output(output_list)\n\nif __name__ == \"__main__\":\n main()","sub_path":"CodingJumble.py","file_name":"CodingJumble.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"506041009","text":"import csv\nimport pandas as pd\nfrom textblob import TextBlob\n\ninputFile = \"biden200820_230001.csv\"\nStage1File = \"biden200820_230001leaned.csv\"\nStage2File = \"biden200820_230001SentimentApplied.csv\"\n\ndf = pd.read_csv(inputFile,engine='python')\n\n#Deletes all columns other than the tweet column. As its the 10th Column. REF https://www.geeksforgeeks.org/how-to-drop-one-or-multiple-columns-in-pandas-dataframe/\ndf.drop(df.columns[[0, 1, 2, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33]], axis = 1, inplace = True)\n\n#changes all text to lower case. REF https://medium.com/@koshut.takatsuji/twitter-sentiment-analysis-with-full-code-and-explanation-naive-bayes-a380b38f036b\ndf['tweet'] = df['tweet'].apply(lambda x: x.lower())\n\n#Removing special chars from the dataframe REF https://stackoverflow.com/questions/51778480/remove-certain-string-from-entire-column-in-pandas-dataframe\ndf['tweet'] = df['tweet'].str.replace('@', '')\ndf['tweet'] = df['tweet'].str.replace('.', '')\ndf['tweet'] = df['tweet'].str.replace(',', '')\ndf['tweet'] = df['tweet'].str.replace('#', '')\ndf['tweet'] = df['tweet'].str.replace('&', '')\ndf['tweet'] = df['tweet'].str.replace('!', '')\ndf['tweet'] = df['tweet'].str.replace('%', '')\ndf['tweet'] = df['tweet'].str.replace('(', '')\ndf['tweet'] = df['tweet'].str.replace(')', '')\n\n#Removing line breaks. REF https://stackoverflow.com/questions/44227748/removing-newlines-from-messy-strings-in-pandas-dataframe-cells\ndf = df.replace('\\n','', regex=True)\n\n\n\n#Export cleaned dataframe to CSV.\ndf.to_csv(Stage1File)\n\n\nwith open(Stage1File, 'r') as csvfile: #opening the input file.\n rows = csv.reader(csvfile)\n f = open(Stage2File, \"w\") #opening the output file. REF https://stackoverflow.com/questions/25115140/python-only-last-line-is-saved-to-file. Important to open this outside of the loop.\n for row in rows:\n sentence = row[2] #This picks the column.\n blob = TextBlob(sentence)\n sentimentrow = blob.sentiment.polarity #Sentiment Polarity Analysis\n subjectivityrow = blob.sentiment.subjectivity #Subjectivity Polarity Analysis\n f.write(sentence + \",\" + str(sentimentrow) + \",\" + str(subjectivityrow) + \"\\n\") #REF https://stackoverflow.com/questions/25115140/python-only-last-line-is-saved-to-file\n f.close() #REF https://stackoverflow.com/questions/25115140/python-only-last-line-is-saved-to-file. Important to close this outside of the loop.\n\n\n#Now to add a header to the CSV REF https://stackoverflow.com/questions/28162358/append-a-header-for-csv-file\nwith open(Stage2File,newline='') as header:\n r = csv.reader(header)\n data = [line for line in r]\nwith open(Stage2File,'w',newline='') as header:\n w = csv.writer(header)\n w.writerow(['Sentence','Polarity','Subjectivity'])\n w.writerows(data)\n\n#Below we are stats from the CSV` REF https://stackoverflow.com/questions/50165953/python-dataframes-describing-a-single-column\ndf3 = pd.read_csv(Stage2File,engine='python') #Converting a csv to a panda dataframe. Need to use engine=python as per https://www.shanelynn.ie/pandas-csv-error-error-tokenizing-data-c-error-eof-inside-string-starting-at-line/\ndescribePolarity = df3['Polarity'].describe() #Computing the common statistics of the Polarity column in the dataframe/\nprint(describePolarity)\nprint(df3)\n","sub_path":"opinions.py","file_name":"opinions.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"500655628","text":"\"\"\"\nGiven a list of integers, write a function that returns the largest\nsum of non-adjacent numbers. Numbers can be 0 or negative.\n\nFor example, [2, 4, 6, 2, 5] should return 13, since we pick 2, 6, and 5.\n[5, 1, 1, 5] should return 10, since we pick 5 and 5.\n\nFollow-up: Can you do this in O(N) time and constant space?\n\"\"\"\n\n# O(2^n)\ndef largest_non_adjacent(nums):\n if not nums:\n return 0\n # pick current or not\n return max(largest_non_adjacent(nums[1:]), # pick\n nums[0] + largest_non_adjacent(nums[2:]) # dont pick\n )\n\ndef largest_non_adjacent_v2(nums):\n if len(nums) <= 2:\n return max(0, max(nums))\n \n cache = [0 for n in nums]\n cache[0] = max(0, nums[0])\n cache[1] = max(cache[0], nums[1])\n\n for i in range(2, len(nums)):\n num = nums[i]\n cache[i] = max(cache[i-1], num + cache[i-2])\n return cache[-1]\n\ndef largest_non_adjacent_v3(nums):\n if len(nums) <= 2:\n return max(0, max(nums))\n \n max_excluding_last = max(0, nums[0])\n max_including_last = max(max_excluding_last, nums[1])\n\n for num in nums[2:]:\n prev_max_including_last = max_including_last\n\n max_including_last = max(max_including_last, max_excluding_last + num)\n max_excluding_last = prev_max_including_last\n\n return max(max_including_last, max_excluding_last)\n\nX, y = [2, 4, 6, 2, 5], 13\nassert largest_non_adjacent(X) == y\nassert largest_non_adjacent_v2(X) == y\nassert largest_non_adjacent_v3(X) == y","sub_path":"daily_coding_problem/05/05-largest-sum-of-non-adjacent.py","file_name":"05-largest-sum-of-non-adjacent.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"155460488","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 18 09:47:07 2019\n\n@author: student\n\"\"\"\n\nimport numpy as np\nimport os\n\ndt = 0.001 # controller time step [s]\nexp_duration_sin = 4.0 #sine reference duration\nexp_duration = 5.0 #simulation duration\n\nSLOW_FACTOR = 1#to slow down simulation\n\nframe_name = 'ee_link' # name of the frame to control (end-effector)\n\n#PD controller\n## Matrix of gains\nkp = np.eye(6)*300 # proportional gains \nkd = np.eye(6)*30 # derivative gains (critical damping)\n\n## PARAMETERS OF REFERENCE SINUSOIDAL TRAJECTORY (1, 2, 3, 4, 5, 6 joint)\namp = np.array([ 0.0, 0.2, 0.0, 0.0, 0.4, 0.0]) # amplitude\nphi = np.array([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) # phase\nfreq = np.array([ 0.0, 1.0, 0.0, 0.0, 1.5, 0.0]) # frequency\n\n## bigger inertia variation on joint 2\n#amp = np.array([ 0.0, 0.4, 0.8, 0.0, 0.4, 0.0]) # amplitude\n#phi = np.array([ 0.0, 0.0, 3.14, 0.0, 0.0, 0.0]) # phase\n#freq = np.array([ 0.0, 1.0, 1.0, 0.0, 1.5, 0.0]) # frequency\n\n\n# Initial configuration / velocity / Acceleration\nq0 = np.array([ 0.0, -0.3, 0.5, -1.57, -1.57, 0.5]) \nqd0 = np.array([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\nqdd0 = np.array([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) \n\n\n#EXERCISE 1.4: high gains\n#Kp = np.eye(6)*600\n#kd = np.eye(6)*30\n\n# EXERCISE 2.4: Add external force at T =2.0s\n# Value of linear external force\nextForce = np.array([0.0, 0.0, 50.0])\n# FLAGS\nEXTERNAL_FORCE = False\n\n# EXERCISE 2.7: Add unilateral compliant contact\nn = np.array([0.0,0.0,1.0]) # contact normal\np0 = np.array([0.0,0.0,0.0]) # contact position \nK_env = np.eye(3)*10000 # contact stiffness \nD_env = np.eye(3)*1000 # contact damping\nmu =1.0","sub_path":"ex_2_conf.py","file_name":"ex_2_conf.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"192078004","text":"class Solution(object):\n \"\"\"\n nums: list of integers\n return: every subset of nums\n idea: backtrack\n Note: nums is described as set, so no duplicates in nums\n \"\"\"\n def subsets(self, nums):\n nums.sort()\n ans, path = [], []\n self.backtrack(ans, path, nums, 0)\n return ans\n def backtrack(self, ans, path, nums, pos):\n ans.append([x for x in path])\n if pos >= len(nums): return\n for i in range(pos, len(nums)):\n path.append(nums[i])\n self.backtrack(ans, path, nums, i + 1)\n path.pop()\n","sub_path":"078_Subsets.py","file_name":"078_Subsets.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"547247938","text":"import os\r\n\r\nwith open(\"version\", \"r+\", encoding='utf-8') as versionfile:\r\n print(\"Reading version file...\")\r\n verinfo = versionfile.read().split()\r\n versionfile.seek(0)\r\n print(verinfo)\r\n count = 0\r\n for i in verinfo:\r\n verinfo[count] = int(i)\r\n count = count + 1\r\n print(\"Calculating new version...\")\r\n with open(\"PROGDETAILS.py\", \"w\", encoding='utf-8') as programfile:\r\n if(verinfo[2] == 9):\r\n verinfo[2] = 0\r\n if(verinfo[1] == 9):\r\n verinfo[1] = 0\r\n verinfo[0] = verinfo[0] + 1\r\n else:\r\n verinfo[1] = verinfo[1] + 1\r\n else:\r\n verinfo[2] = verinfo[2] + 1\r\n print(verinfo)\r\n print(\"Writing...\")\r\n versionfile.write(str(verinfo[0]) + \" \" + str(verinfo[1]) + \" \" + str(verinfo[2]))\r\n versionfile.truncate()\r\n programfile.seek(0)\r\n programfile.write(\"class Program:\\n def __init__(self):\\n self.version = \\\"\" + str(verinfo[0]) + \".\" + str(verinfo[1]) + \".\" + str(verinfo[2]) + \"\\\"\")\r\n programfile.truncate()\r\n\r\nos.system(\"pause\")\r\nexec(open(\"OREGON.py\", encoding='utf-8').read())\r\nos.system(\"pause\")\r\n","sub_path":"UpdateVersionInfo.py","file_name":"UpdateVersionInfo.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"621734976","text":"'''\n@author Manuel Hiche \n'''\n\nimport asyncio\nimport struct\nimport sys\n\nclass ClientProtocol(asyncio.Protocol):\n def __init__(self, loop, filepath):\n self.loop = loop\n self.connected = False\n self.file = open(filepath, \"rb\")\n self.size = self.file.seek(0, 2)\n self.file.seek(0, 0)\n self.counter = 0\n print(self.size)\n\n def connection_made(self, transport):\n self.connected = True\n self.transport = transport\n\n self.transport.write(struct.pack(\"i\", self.size))\n\n def data_received(self, data):\n loop = asyncio.get_event_loop()\n loop.call_soon(self.send)\n\n def send(self): \n bytes = self.file.read(256)\n if bytes:\n self.transport.write(bytes) \n self.counter += 1\n print(\"Wrote\",self.counter,\"/\",int(self.size/256),\"pages\")\n if self.counter == int(self.size/256):\n print(\"done\")\n self.transport.close()\n else:\n print(\"error reading page from file\")\n self.transport.close()\n\n def connection_lost(self, exc):\n self.loop.stop()\n\nif len(sys.argv) != 2:\n print(\"usage: python3 flash.py \")\n exit(1)\nfilepath = sys.argv[1]\n\nloop = asyncio.get_event_loop()\n \ncoro = loop.create_connection(lambda: ClientProtocol(loop, filepath),'192.168.1.50', 1025)\nloop.run_until_complete(coro)\nloop.run_forever()\n\nloop.close()\n","sub_path":"flash.py","file_name":"flash.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"25719998","text":"def factorial(n):\n product = 1\n for i in range(1,n+1):\n product *= i\n return product\n\ndef digit_sum(n):\n string_n = str(n)\n d_sum = 0\n for char in string_n:\n d_sum += int(char)\n return d_sum\n\nprint(digit_sum(factorial(100)))","sub_path":"ProjectEuler/problem_20.py","file_name":"problem_20.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"196316160","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 23 12:23:25 2018\n\n@author: Stefan Draghici\n\"\"\"\n\n# list\nlist_comp_1=[i for i in range(200)]\nprint(list_comp_1)\n\nlist_a=[1,2,3,4]\nlist_b=[5,6,7,8]\nresult_list=[]\n\n# set\nset_a={1,2,3,4}\nset_b={5,6,7,8}\nresult_set={(x, y) for x in set_a for y in set_b}\nprint(result_set)\nresult_set={x+y for x in set_a for y in set_b}\nprint(result_set)\n\n# dict\ndict_1={'dada':125, 'rtre':7856, 'ewrtret':5314}\ndict_2={'eewrtete':1251, 'dsstrtyr':8936, 'dttyrut':8934}\nmerged_dict={k:v for i in (dict_1, dict_2) for k,v in i.items()}\nprint(merged_dict)","sub_path":"Python Pros/comprehensions.py","file_name":"comprehensions.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"268643433","text":"# -*- coding: utf-8 -*-\nfrom bs4 import BeautifulSoup #用来代替正则表达式取源码中相应标签的内容\nimport random\nimport requests #用来抓取网页的html源代码\nimport socket #用做异常处理\nimport http.client #用做异常处理\nfrom xml.dom.minidom import Document\nfrom concurrent.futures import ThreadPoolExecutor,wait, ALL_COMPLETED\nfrom queue import Queue\nfrom threading import Event\nimport logging,logging.config\n\ndef get_data_from_url(url):\n \"\"\"模拟浏览器来获取网页的html代码\"\"\"\n header = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'Connection': 'keep-alive',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.235',\n }\n\n # 设定超时时间,取随机数是因为防止被网站认为是爬虫\n logger = logging.getLogger('weather')\n timeout = random.choice(range(10, 20))\n try:\n rep = requests.get(url, headers=header, timeout=timeout)\n rep.encoding = \"utf-8\"\n except socket.timeout as e:\n print(\"3:\", e)\n logger.error(\"socket.timeout:%s\",e)\n return None\n except socket.error as e:\n logger.error(\"socket.error:%s\", e)\n return None\n except http.client.BadStatusLine as e:\n logger.error(\"http.client.BadStatusLine:%s\", e)\n return None\n except http.client.IncompleteRead as e:\n logger.error(\"http.client.IncompleteRead:%s\", e)\n return None\n return rep.text\n\nclass RequestCityCodeThreadPool:\n pro_url_prefix = 'http://www.weather.com.cn'\n prov_beijing_url = 'http://www.weather.com.cn/textFC/beijing.shtml'\n\n def __init__(self):\n self.thread_executor = ThreadPoolExecutor(10)\n self.logger = logging.getLogger('weather')\n\n def get_prov_data(self):\n '''\n 获取省份列表\n :return: OK--province list; Failed--None\n '''\n prov_data = get_data_from_url(self.prov_beijing_url)\n if prov_data == None:\n self.logger.error(\"Can not get province data\")\n return None\n\n bs = BeautifulSoup(prov_data, features='lxml') # 创建BeautifulSoup对象\n body = bs.body # 获取body部分\n div_data = body.find(\"div\", {\"class\": \"lqcontentBoxheader\"})\n a_data = div_data.select(\"a[href]\")\n prov_url_list=[]\n for i in range(len(a_data)):\n prov_url_list.append(self.pro_url_prefix+a_data[i].get('href'))\n\n self.logger.debug(\"Province data:%s\",prov_url_list)\n return prov_url_list\n\n def requst_thread(self,complete_callback):\n '''\n 获取每个省份的城市的天气代码\n :param complete_callback:\n :return:\n '''\n prov_url_list = self.get_prov_data()\n for i in range (len(prov_url_list)):\n task = self.thread_executor.submit(get_data_from_url,(prov_url_list[i]))\n task.add_done_callback(complete_callback)\n\nclass ProcessDataThreadPool(object):\n prov_list = ['北京', '上海', '天津', '重庆', '黑龙江', '吉林', '辽宁', '内蒙古', '河北', '山西', '陕西', '山东', '新疆', '西藏', '青海', '甘肃', '宁夏',\n '河南', '江苏', '湖北', '浙江', '安徽', '福建', '江西', '湖南', '贵州', '四川', '广东', '云南', '广西', '海南', '香港', '澳门', '台湾']\n prov_code_index = '10101'\n def __init__(self,save_xml_name):\n self.thread_pool = ThreadPoolExecutor(max_workers = 34)\n self.req_result_queue = Queue()\n self.save_xml_name = save_xml_name\n self.read_event = Event()\n self.logger = logging.getLogger('weather')\n\n def enqueue_data(self,request_result):\n self.req_result_queue.put(request_result)\n self.read_event.set()\n\n def process_data(self,save_obj):\n '''\n 从HTML文件中获取每个省份的城市天气代码\n :param save_obj:\n :return:\n '''\n while True:\n self.read_event.wait()\n html_content = self.req_result_queue.get()\n bs = BeautifulSoup(html_content, features='lxml') # 创建BeautifulSoup对象\n body = bs.body # 获取body部分\n div_hanml_data = body.find(\"div\", {\"class\": \"hanml\"})\n a_data = div_hanml_data.select(\".conMidtab3 a[href]\")\n\n city_code_dict = {}\n for i in range(len(a_data)):\n if a_data[i].string != \"详情\":\n city_code_dict[a_data[i].get('href').split('/')[-1].split('.')[0]] = a_data[i].string\n\n self.logger.debug(\"weather city code:%s\",city_code_dict)\n save_obj.send(city_code_dict)\n self.read_event.clear()\n break\n def process_thread_start(self):\n save_obj = self.save_citycode_file()\n save_obj.send(None)\n all_task = []\n for i in range(34):\n all_task.append(self.thread_pool.submit(self.process_data,(save_obj)))\n wait(all_task, return_when=ALL_COMPLETED)\n\n def save_citycode_file(self):\n '''\n 把获取的城市天气代码按照特定格式保存在xml文件中\n :return:\n '''\n prov_dict={}\n for m in range(len(self.prov_list)):\n prov_dict[str(int(self.prov_code_index) + m)] = self.prov_list[m]\n count_num = 0\n doc = Document()\n country = doc.createElement(\"China\")\n doc.appendChild(country)\n\n with open(self.save_xml_name, 'w', encoding='utf-8') as f:\n while True:\n citycode_data = yield\n if (citycode_data == None):\n continue\n citycode_list = list(citycode_data.items())\n province = doc.createElement('province')\n tmp_citycode = ''\n for j in range(len(citycode_list)):\n citycode = citycode_list[j][0]\n if (j == 0):\n tmp_citycode = citycode_list[j][0]\n province.setAttribute('id', citycode[:-4])\n province.setAttribute('province_name', prov_dict[citycode[:-4]])\n country.appendChild(province)\n city = doc.createElement('city')\n city.setAttribute('weather_id', citycode)\n city.setAttribute('city_name', citycode_list[j][1])\n province.appendChild(city)\n continue\n\n if prov_dict[citycode[:-4]] == \"海南\":\n city = doc.createElement('city')\n city.setAttribute('weather_id', citycode)\n city.setAttribute('city_name', citycode_list[j][1])\n province.appendChild(city)\n continue\n\n if (tmp_citycode[:-2] < citycode[:-2]):\n tmp_citycode = citycode\n city = doc.createElement('city')\n city.setAttribute('weather_id', citycode)\n city.setAttribute('city_name', citycode_list[j][1])\n province.appendChild(city)\n continue\n county = doc.createElement('county')\n county.setAttribute('weather_id', citycode)\n county.setAttribute('county_name', citycode_list[j][1])\n city.appendChild(county)\n count_num = count_num + 1;\n if(count_num == 33):\n f.write(doc.toprettyxml(indent='\\t', newl=\"\\n\"))\n self.logger.debug(\"write citycode xml file OK.\")\n if count_num != 33:\n self.logger.error(\"write citycode xml file Failed.\")\n\nclass GetWeatherCityCode(object):\n def __init__(self,save_xml_name):\n self.req_thread_pool = RequestCityCodeThreadPool()\n self.process_thread_pool = ProcessDataThreadPool(save_xml_name)\n\n def req_completion_callback(self,result_future):\n data = result_future.result()\n self.process_thread_pool.enqueue_data(data)\n\n def start_runner(self):\n self.req_thread_pool.requst_thread(self.req_completion_callback)\n self.process_thread_pool.process_thread_start()\n\nif __name__==\"__main__\":\n pass\n\n\n\n\n","sub_path":"weather_query_project/weather_query/get_weather_citycode.py","file_name":"get_weather_citycode.py","file_ext":"py","file_size_in_byte":8482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"435005756","text":"\"\"\"\r\n计算指定的年月日是这一年的第几天\r\n\"\"\"\r\n\r\n\r\ndef is_leap_year(year):\r\n \"\"\"\r\n 判断指定的年份是不是闰年\r\n :param year: 年份\r\n :return: 闰年返回True,平年返回False\r\n \"\"\"\r\n return year % 4 == 0 and year % 100 != 0 or year % 400 == 0\r\n\r\n\r\ndef which_day(year, month, date):\r\n \"\"\"\r\n 计算传入的日期是这一年的第几天\r\n :param year: 年份\r\n :param month: 月份\r\n :param date: 天\r\n :return: 第几天\r\n \"\"\"\r\n days_of_month = [\r\n [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],\r\n [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\r\n ][is_leap_year(year)]\r\n total = 0\r\n for index in range(month - 1):\r\n total += days_of_month[index]\r\n return total + date\r\n\r\n\r\ndef main():\r\n print(which_day(1980, 11, 28))\r\n print(which_day(1981, 12, 31))\r\n print(which_day(2018, 1, 1))\r\n print(which_day(2016, 3, 1))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"ls6/demo12.py","file_name":"demo12.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"61666274","text":"\"\"\"\n Copyright (c) 2021 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\n# transformation code from\n# https://github.com/clovaai/deep-text-recognition-benchmark/blob/68a80fe97943a111ff1efaf52a63ad8f0f1c0e5d/modules/transformation.py\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass TPS_SpatialTransformerNetwork(nn.Module):\n \"\"\" Rectification Network of RARE, namely TPS based STN \"\"\"\n\n def __init__(self, fiducial_num, input_size, output_size, i_channel_num=1):\n \"\"\" Based on RARE TPS\n input:\n batch_I: Batch Input Image [batch_size x i_channel_num x I_height x I_width]\n input_size : (height, width) of the input image I\n output_size : (height, width) of the rectified image I_r\n i_channel_num : the number of channels of the input image I\n output:\n batch_I_r: rectified image [batch_size x i_channel_num x I_r_height x I_r_width]\n \"\"\"\n super().__init__()\n self.fiducial_num = fiducial_num\n self.input_size = input_size\n self.output_size = output_size # = (I_r_height, I_r_width)\n self.i_channel_num = i_channel_num\n self.LocalizationNetwork = LocalizationNetwork(self.fiducial_num, self.i_channel_num)\n self.GridGenerator = GridGenerator(self.fiducial_num, self.output_size)\n\n def forward(self, batch_I):\n batch_C_prime = self.LocalizationNetwork(batch_I) # batch_size x K x 2\n build_P_prime = self.GridGenerator.build_P_prime(batch_C_prime) # batch_size x n (= I_r_width x I_r_height) x 2\n build_P_prime_reshape = build_P_prime.reshape(\n [build_P_prime.size(0), self.output_size[0], self.output_size[1], 2]) # batch x grid_h x grid_w x 2\n\n if torch.__version__ > \"1.2.0\":\n if not torch.onnx.is_in_onnx_export():\n batch_I_r = F.grid_sample(batch_I, build_P_prime_reshape, padding_mode='border', align_corners=True)\n else:\n # workwround for export to onnx\n # see here for details: https://github.com/open-mmlab/mmcv/pull/953/\n n, c, h, w = batch_I.shape\n gn, gh, gw, _ = build_P_prime_reshape.shape\n assert n == gn\n\n x = build_P_prime_reshape[:, :, :, 0]\n y = build_P_prime_reshape[:, :, :, 1]\n\n x = ((x + 1) / 2) * (w - 1)\n y = ((y + 1) / 2) * (h - 1)\n\n x = x.view(n, -1)\n y = y.view(n, -1)\n\n x0 = torch.floor(x).long()\n y0 = torch.floor(y).long()\n x1 = x0 + 1\n y1 = y0 + 1\n\n wa = ((x1 - x) * (y1 - y)).unsqueeze(1)\n wb = ((x1 - x) * (y - y0)).unsqueeze(1)\n wc = ((x - x0) * (y1 - y)).unsqueeze(1)\n wd = ((x - x0) * (y - y0)).unsqueeze(1)\n\n # Apply default for grid_sample function zero padding\n im_padded = F.pad(batch_I, pad=[1, 1, 1, 1], mode='replicate')\n padded_h = h + 2\n padded_w = w + 2\n # save points positions after padding\n x0, x1, y0, y1 = x0 + 1, x1 + 1, y0 + 1, y1 + 1\n\n # Clip coordinates to padded image size\n x0 = torch.where(x0 < 0, torch.tensor(0), x0)\n x0 = torch.where(x0 > padded_w - 1, torch.tensor(padded_w - 1), x0)\n x1 = torch.where(x1 < 0, torch.tensor(0), x1)\n x1 = torch.where(x1 > padded_w - 1, torch.tensor(padded_w - 1), x1)\n y0 = torch.where(y0 < 0, torch.tensor(0), y0)\n y0 = torch.where(y0 > padded_h - 1, torch.tensor(padded_h - 1), y0)\n y1 = torch.where(y1 < 0, torch.tensor(0), y1)\n y1 = torch.where(y1 > padded_h - 1, torch.tensor(padded_h - 1), y1)\n\n im_padded = im_padded.view(n, c, -1)\n\n x0_y0 = (x0 + y0 * padded_w).unsqueeze(1).expand(-1, c, -1)\n x0_y1 = (x0 + y1 * padded_w).unsqueeze(1).expand(-1, c, -1)\n x1_y0 = (x1 + y0 * padded_w).unsqueeze(1).expand(-1, c, -1)\n x1_y1 = (x1 + y1 * padded_w).unsqueeze(1).expand(-1, c, -1)\n\n Ia = torch.gather(im_padded, 2, x0_y0)\n Ib = torch.gather(im_padded, 2, x0_y1)\n Ic = torch.gather(im_padded, 2, x1_y0)\n Id = torch.gather(im_padded, 2, x1_y1)\n\n batch_I_r = (Ia * wa + Ib * wb + Ic * wc + Id * wd).reshape(n, c, gh, gw)\n\n else:\n batch_I_r = F.grid_sample(batch_I, build_P_prime_reshape, padding_mode='border')\n\n return batch_I_r\n\n\nclass LocalizationNetwork(nn.Module):\n \"\"\" Localization Network of RARE, which predicts C' (K x 2) from I (I_width x I_height) \"\"\"\n\n def __init__(self, fiducial_num, i_channel_num):\n super().__init__()\n self.fiducial_num = fiducial_num\n self.i_channel_num = i_channel_num\n self.conv = nn.Sequential(\n nn.Conv2d(in_channels=self.i_channel_num, out_channels=64, kernel_size=3, stride=1, padding=1,\n bias=False), nn.BatchNorm2d(64), nn.ReLU(True),\n nn.MaxPool2d(2, 2), # batch_size x 64 x I_height/2 x I_width/2\n nn.Conv2d(64, 128, 3, 1, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(True),\n nn.MaxPool2d(2, 2), # batch_size x 128 x I_height/4 x I_width/4\n nn.Conv2d(128, 256, 3, 1, 1, bias=False), nn.BatchNorm2d(256), nn.ReLU(True),\n nn.MaxPool2d(2, 2), # batch_size x 256 x I_height/8 x I_width/8\n nn.Conv2d(256, 512, 3, 1, 1, bias=False), nn.BatchNorm2d(512), nn.ReLU(True),\n nn.AdaptiveAvgPool2d(1) # batch_size x 512\n )\n\n self.localization_fc1 = nn.Sequential(nn.Linear(512, 256), nn.ReLU(True))\n self.localization_fc2 = nn.Linear(256, self.fiducial_num * 2)\n\n # Init fc2 in LocalizationNetwork\n self.localization_fc2.weight.data.fill_(0)\n # see RARE paper Fig. 6 (a)\n ctrl_pts_x = np.linspace(-1.0, 1.0, int(fiducial_num / 2))\n ctrl_pts_y_top = np.linspace(0.0, -1.0, num=int(fiducial_num / 2))\n ctrl_pts_y_bottom = np.linspace(1.0, 0.0, num=int(fiducial_num / 2))\n ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)\n ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)\n initial_bias = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0)\n self.localization_fc2.bias.data = torch.from_numpy(initial_bias).float().view(-1)\n\n def forward(self, batch_I):\n \"\"\"\n input: batch_I : Batch Input Image [batch_size x i_channel_num x I_height x I_width]\n output: batch_C_prime : Predicted coordinates of fiducial points\n for input batch [batch_size x fiducial_num x 2]\n \"\"\"\n batch_size = batch_I.size(0)\n features = self.conv(batch_I).view(batch_size, -1)\n batch_C_prime = self.localization_fc2(self.localization_fc1(features)).view(batch_size, self.fiducial_num, 2)\n return batch_C_prime\n\n\nclass GridGenerator(nn.Module):\n \"\"\" Grid Generator of RARE, which produces P_prime by multipling T with P \"\"\"\n\n def __init__(self, fiducial_num, output_size):\n \"\"\" Generate P_hat and inv_delta_C for later \"\"\"\n super().__init__()\n self.eps = 1e-6\n self.I_r_height, self.I_r_width = output_size\n self.fiducial_num = fiducial_num\n self.C = self._build_C(self.fiducial_num) # fiducial_num x 2\n self.P = self._build_P(self.I_r_width, self.I_r_height)\n # for multi-gpu, you need register buffer\n self.register_buffer(\"inv_delta_C\", torch.tensor(self._build_inv_delta_C(\n self.fiducial_num, self.C)).float()) # fiducial_num+3 x fiducial_num+3\n self.register_buffer(\"P_hat\", torch.tensor(self._build_P_hat(\n self.fiducial_num, self.C, self.P)).float()) # n x fiducial_num+3\n # for fine-tuning with different image width, you may use below instead of self.register_buffer\n # fiducial_num+3 x fiducial_num+3\n # self.inv_delta_C = torch.tensor(self._build_inv_delta_C(self.fiducial_num, self.C)).float().cuda()\n # n x fiducial_num+3\n # self.P_hat = torch.tensor(self._build_P_hat(self.fiducial_num, self.C, self.P)).float().cuda()\n\n def _build_C(self, fiducial_num):\n \"\"\" Return coordinates of fiducial points in I_r; C \"\"\"\n ctrl_pts_x = np.linspace(-1.0, 1.0, int(fiducial_num / 2))\n ctrl_pts_y_top = -1 * np.ones(int(fiducial_num / 2))\n ctrl_pts_y_bottom = np.ones(int(fiducial_num / 2))\n ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)\n ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)\n C = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0)\n return C # fiducial_num x 2\n\n def _build_inv_delta_C(self, fiducial_num, C):\n \"\"\" Return inv_delta_C which is needed to calculate T \"\"\"\n hat_C = np.zeros((fiducial_num, fiducial_num), dtype=float) # fiducial_num x fiducial_num\n for i in range(0, fiducial_num):\n for j in range(i, fiducial_num):\n r = np.linalg.norm(C[i] - C[j])\n hat_C[i, j] = r\n hat_C[j, i] = r\n np.fill_diagonal(hat_C, 1)\n hat_C = (hat_C ** 2) * np.log(hat_C)\n # print(C.shape, hat_C.shape)\n delta_C = np.concatenate( # fiducial_num+3 x fiducial_num+3\n [\n np.concatenate([np.ones((fiducial_num, 1)), C, hat_C], axis=1), # fiducial_num x fiducial_num+3\n np.concatenate([np.zeros((2, 3)), np.transpose(C)], axis=1), # 2 x fiducial_num+3\n np.concatenate([np.zeros((1, 3)), np.ones((1, fiducial_num))], axis=1) # 1 x fiducial_num+3\n ],\n axis=0\n )\n inv_delta_C = np.linalg.inv(delta_C)\n return inv_delta_C # fiducial_num+3 x fiducial_num+3\n\n def _build_P(self, I_r_width, I_r_height):\n I_r_grid_x = (np.arange(-I_r_width, I_r_width, 2) + 1.0) / I_r_width # self.I_r_width\n I_r_grid_y = (np.arange(-I_r_height, I_r_height, 2) + 1.0) / I_r_height # self.I_r_height\n P = np.stack( # self.I_r_width x self.I_r_height x 2\n np.meshgrid(I_r_grid_x, I_r_grid_y),\n axis=2\n )\n return P.reshape([-1, 2]) # n (= self.I_r_width x self.I_r_height) x 2\n\n def _build_P_hat(self, fiducial_num, C, P):\n n = P.shape[0] # n (= self.I_r_width x self.I_r_height)\n P_tile = np.tile(np.expand_dims(P, axis=1), (1, fiducial_num, 1)) # n x 2 -> n x 1 x 2 -> n x fiducial_num x 2\n C_tile = np.expand_dims(C, axis=0) # 1 x fiducial_num x 2\n P_diff = P_tile - C_tile # n x fiducial_num x 2\n rbf_norm = np.linalg.norm(P_diff, ord=2, axis=2, keepdims=False) # n x fiducial_num\n rbf = np.multiply(np.square(rbf_norm), np.log(rbf_norm + self.eps)) # n x fiducial_num\n P_hat = np.concatenate([np.ones((n, 1)), P, rbf], axis=1)\n return P_hat # n x fiducial_num+3\n\n def build_P_prime(self, batch_C_prime):\n \"\"\" Generate Grid from batch_C_prime [batch_size x fiducial_num x 2] \"\"\"\n batch_size = batch_C_prime.size(0)\n batch_inv_delta_C = self.inv_delta_C.repeat(batch_size, 1, 1)\n batch_P_hat = self.P_hat.repeat(batch_size, 1, 1)\n batch_C_prime_with_zeros = torch.cat((batch_C_prime, torch.zeros(\n batch_size, 3, 2).float().to(batch_C_prime.device)), dim=1) # batch_size x fiducial_num+3 x 2\n batch_T = torch.bmm(batch_inv_delta_C, batch_C_prime_with_zeros) # batch_size x fiducial_num+3 x 2\n batch_P_prime = torch.bmm(batch_P_hat, batch_T) # batch_size x n x 2\n return batch_P_prime # batch_size x n x 2\n","sub_path":"misc/pytorch_toolkit/text_recognition/text_recognition/models/transformation/tps.py","file_name":"tps.py","file_ext":"py","file_size_in_byte":12340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"415442792","text":"from io import IOBase\nfrom math import log\nfrom operator import and_\nfrom struct import calcsize, unpack\nfrom warnings import warn\nimport os\nimport re\nfrom functools import reduce\n\ntry:\n # noinspection PyUnresolvedReferences, PyUnboundLocalVariable\n basestring\nexcept NameError:\n # noinspection PyShadowingBuiltins\n basestring = str\n\n\nclass FlowData(object):\n \"\"\"\n Object representing a Flow Cytometry Standard (FCS) file\n \"\"\"\n def __init__(self, filename):\n \"\"\"\n filename: an FCS filename\n \"\"\"\n if isinstance(filename, basestring):\n try:\n self._fh = open(str(filename), 'rb')\n except IOError:\n raise\n elif isinstance(filename, IOBase):\n self._fh = filename\n else:\n raise TypeError(\n \"Filename must be a file path or a file handle \" +\n \"(either 'file' type or io.IOBase\")\n\n self.cur_offset = 0\n\n # parse headers\n self.header = self.__parse_header(self.cur_offset)\n\n # parse text\n self.text = self.__parse_text(\n self.cur_offset,\n self.header['text_start'],\n self.header['text_stop'])\n\n self.channel_count = int(self.text['par'])\n self.event_count = int(self.text['tot'])\n\n # parse analysis\n try:\n a_start = self.text['beginanalysis']\n except KeyError:\n a_start = self.header['analysis_start']\n try:\n a_stop = self.text['endanalysis']\n except KeyError:\n a_stop = self.header['analysis_end']\n self.analysis = self.__parse_analysis(self.cur_offset, a_start, a_stop)\n\n # parse data\n try:\n d_start = int(self.text['begindata'])\n except KeyError:\n d_start = self.header['data_start']\n try:\n d_stop = int(self.text['enddata'])\n except KeyError:\n d_stop = self.header['data_end']\n\n # account for LMD reporting wrong values for size of the data segment\n lmd = self.__fix_lmd(\n self.cur_offset,\n self.header['text_start'],\n self.header['text_stop'])\n d_stop += lmd\n self.events = self.__parse_data(\n self.cur_offset,\n d_start,\n d_stop,\n self.text)\n\n try:\n unused_path, self.name = os.path.split(self._fh.name)\n except (AttributeError, TypeError):\n self.name = 'InMemoryFile'\n\n self._fh.close()\n\n def __read_bytes(self, offset, start, stop):\n \"\"\"Read in bytes from start to stop inclusive.\"\"\"\n\n self._fh.seek(offset + start)\n\n return self._fh.read(stop - start + 1)\n\n def __parse_header(self, offset):\n \"\"\"\n Parse the FlowData FCS file at the offset (supporting multiple\n data segments in a file\n \"\"\"\n header = dict()\n header['version'] = float(self.__read_bytes(offset, 3, 5))\n header['text_start'] = int(self.__read_bytes(offset, 10, 17))\n header['text_stop'] = int(self.__read_bytes(offset, 18, 25))\n header['data_start'] = int(self.__read_bytes(offset, 26, 33))\n header['data_end'] = int(self.__read_bytes(offset, 34, 41))\n try:\n header['analysis_start'] = int(self.__read_bytes(offset, 42, 49))\n except ValueError:\n header['analysis_start'] = -1\n try:\n header['analysis_end'] = int(self.__read_bytes(offset, 50, 57))\n except ValueError:\n header['analysis_end'] = -1\n\n return header\n\n def __fix_lmd(self, offset, start, stop):\n \"\"\"\n Handle LMD counting differently then most other FCS data\n \"\"\"\n text = self.__read_bytes(offset, start, stop)\n if text[0] == text[-1]:\n return 0\n else:\n return -1\n\n def __parse_text(self, offset, start, stop):\n \"\"\"return parsed text segment of FCS file\"\"\"\n text = self.__read_bytes(offset, start, stop)\n try:\n # try UTF-8 first\n text = text.decode()\n except UnicodeDecodeError:\n # next best guess is Latin-1, if not that either, we throw the exception\n text = text.decode(\"ISO-8859-1\")\n return self.__parse_pairs(text)\n\n def __parse_analysis(self, offset, start, stop):\n \"\"\"return parsed analysis segment of FCS file\"\"\"\n if start == stop:\n return {}\n else:\n text = self.__read_bytes(offset, start, stop)\n return self.__parse_pairs(text)\n\n def __parse_data(self, offset, start, stop, text):\n \"\"\"return array of data segment of FCS file\"\"\"\n data_type = text['datatype']\n mode = text['mode']\n if mode == 'c' or mode == 'u':\n raise NotImplementedError(\n \"FCS data stored as type \\'%s\\' is unsupported\" % mode\n )\n\n if text['byteord'] == '1,2,3,4' or text['byteord'] == '1,2':\n order = '<'\n elif text['byteord'] == '4,3,2,1' or text['byteord'] == '2,1':\n order = '>'\n else:\n warn(\n \"unsupported byte order %s , using default @\" % text['byteord'])\n order = '@'\n # from here on out we assume mode l (list)\n\n bit_width = []\n data_range = []\n for i in range(1, int(text['par']) + 1):\n bit_width.append(int(text['p%db' % i]))\n try:\n data_range.append(int(text['p%dr' % i]))\n except ValueError:\n # we found an FCS file where one channel was using\n # exp notation for the int\n data_range.append(int(float(text['p%dr' % i])))\n\n if data_type.lower() == 'i':\n data = self.__parse_int_data(\n offset,\n start,\n stop,\n bit_width,\n data_range,\n order)\n elif data_type.lower() == 'f' or data_type.lower() == 'd':\n data = self.__parse_float_data(\n offset,\n start,\n stop,\n data_type.lower(),\n order)\n else: # ascii\n data = self.__parse_ascii_data(\n offset,\n start,\n stop,\n data_type,\n order)\n return data\n\n def __parse_int_data(self, offset, start, stop, bit_width, d_range, order):\n \"\"\"Parse out and return integer list data from FCS file\"\"\"\n\n if reduce(and_, [item in [8, 16, 32] for item in bit_width]):\n if len(set(bit_width)) == 1: # uniform size for all parameters\n # calculate how much data to read in.\n num_items = (stop - start + 1) / calcsize(\n self.__format_integer(bit_width[0]))\n\n # unpack to a list\n tmp = unpack(\n '%s%d%s' %\n (\n order, num_items,\n self.__format_integer(bit_width[0])\n ),\n self.__read_bytes(offset, start, stop)\n )\n\n # parameter sizes are different\n # e.g. 8, 8, 16,8, 32 ... do one at a time\n else:\n log2 = self.__log_factory(2)\n unused_bit_widths = map(int, map(log2, d_range))\n tmp = []\n cur = start\n while cur < stop:\n for i, cur_width in enumerate(bit_width):\n bit_mask = self.__mask_integer(\n cur_width,\n unused_bit_widths[i])\n n_bytes = cur_width / 8\n bin_string = self.__read_bytes(\n offset, cur,\n cur + n_bytes - 1)\n cur += n_bytes\n val = bit_mask & unpack(\n '%s%s' %\n (\n order,\n self.__format_integer(cur_width)\n ),\n bin_string)[0]\n tmp.append(val)\n else: # non standard bit width... Does this happen?\n warn('Non-standard bit width for data segments')\n return None\n return tmp\n\n def __parse_float_data(self, offset, start, stop, data_type, order):\n \"\"\"Parse out and return float list data from FCS file\"\"\"\n num_items = (stop - start + 1) / calcsize(data_type)\n\n tmp = unpack('%s%d%s' % (order, num_items, data_type),\n self.__read_bytes(offset, start, stop))\n return tmp\n\n def __parse_ascii_data(self, offset, start, stop, data_type, order):\n \"\"\"Parse out ascii encoded data from FCS file\"\"\"\n num_items = (stop - start + 1) / calcsize(data_type)\n\n tmp = unpack('%s%d%s' % (order, num_items, data_type),\n self.__read_bytes(offset, start, stop))\n return tmp\n\n @staticmethod\n def __parse_pairs(text):\n \"\"\"return key/value pairs from a delimited string\"\"\"\n delimiter = text[0]\n\n if delimiter != text[-1]:\n warn(\"text in segment does not start and end with delimiter\")\n\n if delimiter == r'|':\n delimiter = '\\|'\n elif delimiter == r'\\a'[0]: # test for delimiter being \\\n delimiter = '\\\\\\\\' # regex will require it to be \\\\\n\n tmp = text[1:-1].replace('$', '')\n # match the delimited character unless it's doubled\n regex = re.compile('(?<=[^%s])%s(?!%s)' % (\n delimiter, delimiter, delimiter))\n tmp = regex.split(tmp)\n return dict(\n zip(\n [x.lower().replace(\n delimiter + delimiter, delimiter) for x in tmp[::2]],\n [x.replace(delimiter + delimiter, delimiter) for x in tmp[1::2]]\n )\n )\n\n @staticmethod\n def __format_integer(b):\n \"\"\"return binary format of an integer\"\"\"\n if b == 8:\n return 'B'\n elif b == 16:\n return 'H'\n elif b == 32:\n return 'I'\n else:\n print(\"Cannot handle integers of bit size %d\" % b)\n return None\n\n @staticmethod\n def __mask_integer(b, ub):\n \"\"\"return bit mask of an integer and a bit witdh\"\"\"\n if b == 8:\n return 0xFF >> (b - ub)\n elif b == 16:\n return 0xFFFF >> (b - ub)\n elif b == 32:\n return 0xFFFFFFFF >> (b - ub)\n else:\n print(\"Cannot handle integers of bit size %d\" % b)\n return None\n\n @staticmethod\n def __log_factory(base):\n \"\"\"factory for various bases or the log function\"\"\"\n def f(x):\n return log(x, base)\n\n return f\n\n @property\n def channels(self):\n \"\"\"\n Returns a dictionary of channels, with key as channel number\n and value is a dictionary of the PnN and PnS text\n \"\"\"\n channels = dict()\n regex_pnn = re.compile(\"^p(\\d+)n$\", re.IGNORECASE)\n\n for i in self.text.keys():\n match = regex_pnn.match(i)\n if not match:\n continue\n\n channel_num = match.groups()[0]\n channels[channel_num] = dict()\n\n channels[channel_num]['PnN'] = self.text[match.group()]\n\n # now check for PnS field, which is optional so may not exist\n regex_pns = re.compile(\"^p%ss$\" % channel_num, re.IGNORECASE)\n for j in self.text.keys():\n match = regex_pns.match(j)\n if match:\n channels[channel_num]['PnS'] = self.text[match.group()]\n\n return channels\n\n def write_fcs(self, filename, extra=None):\n def text_size(text_dict, text_delimiter):\n result = text_delimiter\n for idx in text_dict:\n result += '$%s%s%s%s' % (\n idx,\n text_delimiter,\n text_dict[idx],\n text_delimiter)\n return len(result), result\n\n # magic FCS defined positions\n header_text_start = (10, 17)\n header_text_end = (18, 25)\n header_data_start = (26, 33)\n header_data_end = (34, 41)\n header_analysis_start = (42, 49)\n header_analysis_end = (50, 5)\n\n fh = open(filename, 'wb')\n fh.write('FCS3.1')\n fh.write(' ' * 53)\n\n # Write TEXT Segment\n text_start = 256 # arbitrarily start at byte 256.\n delimiter = '/' # use / as our delimiter.\n\n # Write spaces until the start of the txt segment\n fh.seek(58)\n fh.write(' ' * (text_start - fh.tell()))\n\n n_channels = int(self.text['par'])\n n_points = len(self.events)\n data_size = 4 * n_channels * n_points # 4 bytes to hold float\n\n text = dict()\n text['BEGINANALYSIS'] = '0'\n text['BEGINDATA'] = '0'\n text['BEGINSTEXT'] = '0'\n text['BYTEORD'] = '1,2,3,4' # little endian\n text['DATATYPE'] = 'F' # only do float data\n text['ENDANALYSIS'] = '0'\n text['ENDDATA'] = '0'\n text['ENDSTEXT'] = '0'\n text['MODE'] = 'L' # only do list mode data\n text['NEXTDATA'] = '0'\n text['PAR'] = str(n_channels)\n text['TOT'] = str(n_points)\n for i in range(n_channels):\n text['P%dB' % (i + 1)] = '32' # float requires 32 bits\n text['P%dE' % (i + 1)] = '0,0'\n text['P%dR' % (i + 1)] = str(max(self.events))\n text['P%dN' % (i + 1)] = str(i)\n\n if extra is not None:\n for i in extra:\n tmp = i.strip()\n if tmp.lower() not in text and tmp.upper() not in text:\n val = extra[i].replace(delimiter, delimiter + delimiter)\n text[i] = val\n\n i = 1\n size, _ = text_size(text, delimiter)\n prop_size = text_start + ((size % 256) + i) * 256\n text['BEGINDATA'] = prop_size\n text['ENDDATA'] = prop_size + data_size\n data_start = prop_size\n data_end = prop_size + data_size - 1\n size, text_segment = text_size(text, delimiter)\n text_end = text_start + size - 1\n\n fh.write(text_segment)\n fh.write(' ' * (data_start - fh.tell()))\n fh.write(str(float(i) for i in self.events))\n\n fh.seek(header_text_start[0])\n fh.write(str(text_start))\n fh.seek(header_text_end[0])\n fh.write(str(text_end))\n\n fh.seek(header_data_start[0])\n if len(str(data_end)) < (header_data_end[1] - header_data_end[0]):\n fh.write(str(data_start))\n fh.seek(header_data_end[0])\n fh.write(str(data_end))\n else:\n fh.write(str(0))\n fh.seek(header_data_end[0])\n fh.write(str(0))\n\n fh.seek(header_analysis_start[0])\n fh.write(str(0))\n fh.seek(header_analysis_end[0])\n fh.write(str(0))\n\n fh.close()\n\n def __unicode__(self):\n return self.name\n\n def __repr__(self):\n return self.name\n","sub_path":"flowio/flowdata.py","file_name":"flowdata.py","file_ext":"py","file_size_in_byte":15289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"267137407","text":"#!/usr/bin/env python2\nimport unittest\n\nfrom redis_wrap import get_redis, get_list, get_hash, get_set\n\n\nclass TestBase(unittest.TestCase):\n def setUp(self):\n get_redis().delete(u'bears')\n get_redis().delete(u'villains')\n get_redis().delete(u'fishes')\n\nclass TestList(TestBase):\n def test_list(self):\n bears = get_list(u'bears')\n\n bears.append(u'grizzly')\n self.assertEqual(len(bears), 1)\n\n self.assertTrue(all(bear == u'grizzly' for bear in bears))\n\n try:\n self.assertIn(u'grizzly', bears)\n except AttributeError: # needs to be changed\n self.assertTrue(u'grizzly' in bears)\n\n bears.extend([u'white bear', u'pedo bear'])\n self.assertEqual(len(bears), 3)\n bears.insert(0,u'cartesian bear')\n\n for expected,actual in zip(bears,\n [u'cartesian bear',u'grizzly',u'white bear', u'pedo bear']):\n self.assertEqual(expected,actual)\n self.assertEqual(bears.pop(),u'pedo bear')\n self.assertEqual(bears.pop(0),u'cartesian bear')\n\n bears[1] = u'polar bear'\n self.assertEqual(bears[1],u'polar bear')\n\n bears.remove(u'grizzly')\n try:\n self.assertNotIn(u'grizzly', bears)\n except AttributeError: # needs to be changed\n self.assertTrue(u'grizzly' not in bears)\n\n\nclass TestHash(TestBase):\n def test_hash(self):\n villains = get_hash(u'villains')\n try:\n self.assertNotIn(u'riddler', villains)\n except AttributeError:\n self.assertTrue(u'riddler' not in villains)\n\n villains[u'riddler'] = 'Edward Nigma'\n villains[u'Magneto'] = 'Max Eisenhardt'\n try:\n self.assertIn(u'riddler', villains)\n except AttributeError:\n self.assertTrue(u'riddler' in villains)\n\n self.assertEqual(villains.get(u'riddler'), u'Edward Nigma')\n\n self.assertEqual(len(villains.keys()), 2)\n for expected,actual_key in zip([u'Magneto',u'riddler'],\n sorted(villains.keys())):\n self.assertEqual(expected,actual_key)\n for expected,actual_value in zip([u'Edward Nigma',u'Max Eisenhardt'],\n sorted(villains.values())):\n self.assertEqual(expected,actual_value)\n\n\n villains.update({'Green Goblin':'Norman Osborn','riddler':'E. Nigma'})\n self.assertEqual(villains['Green Goblin'],'Norman Osborn')\n self.assertEqual(villains['riddler'],'E. Nigma')\n\n del villains[u'riddler']\n self.assertEqual(len(villains.keys()), 2)\n try:\n self.assertNotIn(u'riddler', villains)\n except AttributeError:\n self.assertTrue(u'riddler' not in villains)\n\n\nclass TestSet(TestBase):\n def test_set(self):\n fishes = get_set(u'fishes')\n try:\n self.assertNotIn(u'nemo', fishes)\n except AttributeError:\n self.assertTrue(u'nemo' not in fishes)\n\n fishes.add(u'nemo')\n try:\n self.assertIn(u'nemo', fishes)\n except AttributeError:\n self.assertTrue(u'nemo' in fishes)\n\n self.assertTrue(all(fish == u'nemo' for fish in fishes))\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"265595147","text":"# AoC day 12\n# puzzle 1\n\n# read input\nwith open('challenge-12-12-2020.txt') as file:\n entries = [(x[0], int(x[1:]) if x[0] not in (\"S\", \"W\") else -int(x[1:])) for x in file.readlines()]\n\n# our ship\nship = {\n \"pos\": [0, 0],\n \"direction\": \"E\"\n}\n\n\ndef move_east_west(ship, value):\n ship[\"pos\"][1] += value\n\n\ndef move_north_south(ship, value):\n ship[\"pos\"][0] += value\n\n\ndef rotate_right(ship, value):\n rotations = {\n \"N\": \"E\",\n \"E\": \"S\",\n \"S\": \"W\",\n \"W\": \"N\"\n }\n while value > 0:\n ship[\"direction\"] = rotations[ship[\"direction\"]]\n value -= 90\n\n\ndef rotate_left(ship, value):\n rotations = {\n \"E\": \"N\",\n \"S\": \"E\",\n \"W\": \"S\",\n \"N\": \"W\"\n }\n while value > 0:\n ship[\"direction\"] = rotations[ship[\"direction\"]]\n value -= 90\n\n\ndef move_forward(ship, value):\n if ship[\"direction\"] == \"E\":\n ship[\"pos\"][1] += value\n elif ship[\"direction\"] == \"W\":\n ship[\"pos\"][1] -= value\n elif ship[\"direction\"] == \"N\":\n ship[\"pos\"][0] += value\n else:\n ship[\"pos\"][0] -= value\n\n\nmovements = {\n \"N\": move_north_south,\n \"S\": move_north_south,\n \"E\": move_east_west,\n \"W\": move_east_west,\n \"L\": rotate_left,\n \"R\": rotate_right,\n \"F\": move_forward\n}\n\n# move the ship\nfor entry in entries:\n movements[entry[0]](ship, entry[1])\n\n# print resulting manhattan distance\nprint(sum([abs(x) for x in ship[\"pos\"]]))\n\n# our ship\nship = {\n \"pos\": [0, 0],\n \"waypoint\": [1, 10]\n}\n\n\ndef move_east_west_waypoint(ship, value):\n ship[\"waypoint\"][1] += value\n\n\ndef move_north_south_waypoint(ship, value):\n ship[\"waypoint\"][0] += value\n\n\ndef rotate_right_waypoint(ship, value):\n while value > 0:\n vec = ship[\"waypoint\"]\n ship[\"waypoint\"] = [-vec[1], vec[0]]\n value -= 90\n\n\ndef rotate_left_waypoint(ship, value):\n while value > 0:\n vec = ship[\"waypoint\"]\n ship[\"waypoint\"] = [vec[1], -vec[0]]\n value -= 90\n\n\ndef move_forward_waypoint(ship, value):\n ship[\"pos\"][0] += value * ship[\"waypoint\"][0]\n ship[\"pos\"][1] += value * ship[\"waypoint\"][1]\n\n\nmovements = {\n \"N\": move_north_south_waypoint,\n \"S\": move_north_south_waypoint,\n \"E\": move_east_west_waypoint,\n \"W\": move_east_west_waypoint,\n \"L\": rotate_left_waypoint,\n \"R\": rotate_right_waypoint,\n \"F\": move_forward_waypoint\n}\n\n# move the ship\nfor entry in entries:\n movements[entry[0]](ship, entry[1])\n\n# print resulting manhattan distance\nprint(sum([abs(x) for x in ship[\"pos\"]]))\n","sub_path":"challenge-cheated-12-12-2020.py","file_name":"challenge-cheated-12-12-2020.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"113620073","text":"\"\"\"empty message\n\nRevision ID: 718326402a65\nRevises: \nCreate Date: 2019-11-01 17:55:27.050348\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '718326402a65'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(length=64), nullable=True),\n sa.Column('email', sa.String(length=120), nullable=True),\n sa.Column('password_hash', sa.String(\n length=128), nullable=True),\n sa.Column('balance', sa.Integer(), nullable=True),\n sa.Column('about_me', sa.String(\n length=140), nullable=True),\n sa.Column('last_seen', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)\n op.create_index(op.f('ix_user_username'), 'user',\n ['username'], unique=True)\n op.create_table('resource',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('body', sa.String(length=140), nullable=True),\n sa.Column('timestamp', sa.DateTime(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_resource_timestamp'),\n 'resource', ['timestamp'], unique=False)\n op.create_table('buys',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('resource_id', sa.Integer(), nullable=True),\n sa.Column('consumer_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['consumer_id'], ['user.id'], ),\n sa.ForeignKeyConstraint(\n ['resource_id'], ['resource.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('transfers',\n sa.Column('transfer_id', sa.Integer(), nullable=True),\n sa.Column('transfee_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['transfee_id'], ['user.id'], ),\n sa.ForeignKeyConstraint(['transfer_id'], ['buys.id'], )\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('transfers')\n op.drop_table('buys')\n op.drop_index(op.f('ix_resource_timestamp'), table_name='resource')\n op.drop_table('resource')\n op.drop_index(op.f('ix_user_username'), table_name='user')\n op.drop_index(op.f('ix_user_email'), table_name='user')\n op.drop_table('user')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/718326402a65_.py","file_name":"718326402a65_.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"42287045","text":"#!/usr/bin/env python\n\nimport sqlite3\nfrom nistbeacon import NistBeacon\n\n\ndef iterate_over_records():\n db_file_name = 'nist.db'\n\n # Setup the database\n db = sqlite3.connect(db_file_name)\n cursor = db.cursor()\n cursor.execute(\n 'CREATE TABLE IF NOT EXISTS \"beacon_values\" ('\n 'timestamp INTEGER,'\n 'output_value TEXT,'\n 'seed_value TEXT,'\n 'previous_output_value TEXT,'\n 'status_code TEXT,'\n 'version TEXT,'\n 'signature_value TEXT,'\n 'frequency INTEGER,'\n 'PRIMARY KEY(timestamp)'\n ')'\n )\n db.commit()\n db.close()\n\n # IF rows\n db = sqlite3.connect(db_file_name)\n cursor = db.cursor()\n cursor.execute(\n 'SELECT timestamp FROM beacon_values ORDER BY timestamp DESC LIMIT 1;'\n )\n result = cursor.fetchone()\n db.close()\n\n if result:\n # Go to the next record from the last timestamp in the database\n record = NistBeacon.get_next(result[0])\n else:\n # We begin at record zero\n record = NistBeacon.get_first_record()\n\n # Open the database\n db = sqlite3.connect(db_file_name)\n cursor = db.cursor()\n\n while True:\n if record is None:\n print(\"Got 'None' back for a record. Stopping.\")\n break\n\n print(\n \"{0} - {1} - {2}\".format(\n record.timestamp,\n record.valid_signature,\n record.output_value,\n )\n )\n\n # Insert this record\n cursor.execute(\n 'INSERT INTO beacon_values('\n 'timestamp, output_value, seed_value, previous_output_value, '\n 'status_code, version, signature_value, frequency'\n ')'\n 'VALUES('\n ':timestamp, :output_value, :seed_value, :previous_output_value,'\n ':status_code, :version, :signature_value, :frequency'\n ')',\n {\n 'timestamp': record.timestamp,\n 'output_value': record.output_value,\n 'seed_value': record.seed_value,\n 'previous_output_value': record.previous_output_value,\n 'status_code': record.status_code,\n 'version': record.version,\n 'signature_value': record.signature_value,\n 'frequency': record.frequency,\n }\n )\n db.commit()\n\n # Advance to the next record\n record = NistBeacon.get_next(record.timestamp)\n\n # Close out\n db.close()\n\nif __name__ == '__main__':\n iterate_over_records()\n","sub_path":"sync_database.py","file_name":"sync_database.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"224545874","text":"import argparse\nimport re\nimport time\n\nimport docker\nfrom pushover import init, Client\n\ndef service_list_to_str(services_list):\n msg = \"\"\n for service in services_list:\n msg = f\"{service.name}\\n{msg}\"\n return msg\n\ndef monitor_swarm_pushover(docker_client, white_pattern_list):\n services = [docker_client.inspect_service(service=service_name) for service_name in white_pattern_list]\n not_running_services = [service for service in services if len(services.tasks(desired_state='Running')) == 0]\n err_msg = \"\"\n if len(not_running_services) != 0:\n err_msg = \"Detected Stopped Services: \\n%s\\n%s\" % (service_list_to_str(not_running_services), err_msg)\n\n if err_msg == \"\":\n return \"OK\", \"OK: detect no stopped services\"\n else:\n return \"ERROR\", err_msg\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--token', required=True, help=\"Pushover Token.\", type=str)\n parser.add_argument('--app_key', required=True, help=\"Pushover Application key.\", type=str)\n parser.add_argument('--whitelist', default='', required=False,\n help=\"Skip checking certain containers. A list of regexp separated by comma.\", type=str)\n parser.add_argument('--check_interval', default='300', required=False, help=\"Periodical check. By seconds.\",\n type=int)\n parser.add_argument('--msg_prefix', default='', required=False, help=\"Pushover message prefix.\", type=str)\n l = parser.parse_args()\n check_interval = l.check_interval\n white_pattern_list = l.whitelist.split(',')\n\n if white_pattern_list == ['']:\n white_pattern_list = []\n\n pushover_token = l.token\n pushover_key = l.app_key\n msg_prefix = l.msg_prefix\n\n if pushover_token == '':\n print(\"Warning: Please provide a valid pushover token.\")\n if pushover_key == '':\n print(\"Warning: Please provide a valid pushover application key.\")\n\n pushover_client = Client(pushover_key, api_token=\"pushover_token\")\n docker_client = docker.DockerClient(base_url='unix://var/run/docker.sock')\n\n has_send_error_alert = False\n while True:\n (status, err_msg) = monitor_swarm_pushover(\"/var/run/docker.sock\", white_pattern_list)\n if msg_prefix != \"\":\n err_msg = \"%s\\n%s\" % (msg_prefix, err_msg)\n print(\"%s: %s\" % (status, err_msg))\n if status == \"OK\":\n if has_send_error_alert is True:\n pushover_client.send_message(err_msg, title=\"SwarmAlert\")\n has_send_error_alert = False\n else:\n if has_send_error_alert is False:\n pushover_client.send_message(err_msg, title=\"SwarmAlert\")\n \n # avoid send alerts over and over again\n has_send_error_alert = True\n time.sleep(check_interval)\n# File : monitor-docker-slack.py ends\n\n","sub_path":"swarm-alert.py","file_name":"swarm-alert.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"219103393","text":"from decimal import Context\nfrom django.contrib.auth import login, authenticate,logout\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.utils.encoding import force_text\nfrom django.contrib.auth.models import User\nfrom django.db import IntegrityError\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.utils.encoding import force_bytes\nfrom django.utils.http import urlsafe_base64_encode\nfrom .tokens import account_activation_token\nfrom django.template.loader import render_to_string\nfrom django.contrib.auth.decorators import login_required\n\n\nfrom .forms import SignUpForm,RegisterCases\nfrom .tokens import account_activation_token\nfrom .models import Cases,Profile\n\n\n# Create your views here.\n\ndef index(request):\n \n #context={\"page_sec\":\"about\" ,\"title\":\"Quicksight\"}\n #User.objects.all().delete()\n #Profile.objects.all().delete()\n #Cases.objects.all().delete()\n \n \n return render(request, r\"LegalTech\\index.html\")\n\ndef login_form(request):\n \n #context={\"page_sec\":\"about\",\"title\":\"Quicksight\"}\n\n if request.POST:\n print(\"getting id\")\n username = request.POST['username']\n password = request.POST['password']\n\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return HttpResponseRedirect('inner-page')\n #return render_to_response('login.html', context_instance=RequestContext(request))\n \n return render(request, r\"LegalTech\\login-form.html\")\n\n \n#@login_required(login_url='index')\ndef inner_page(request):\n \n #context={\"page_sec\":\"print(user.is_active)\n \n return render(request, r\"LegalTech\\inner-page.html\")\n\n\n\ndef click_me(request):\n \n return redirect(r'index')\n\ndef activation_sent_view(request):\n return render(request, r'LegalTech\\activation_sent.html')\n\n\ndef activate(request, uidb64, token):\n try:\n \n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n # checking if the user exists, if the token is valid.\n if user is not None and account_activation_token.check_token(user, token):\n # if valid set active true \n user.is_active = True\n # set signup_confirmation true\n user.profile.signup_confirmation = True\n user.save()\n login(request, user)\n context={'object':'user','data':user.profile.first_name}\n return redirect('inner_page')\n else:\n return render(request, r'LegalTech\\activation_invalid.html')\n\ndef signup_view(request):\n print(\"list of users \",User.objects.all()) \n #print(\"deleting all users\",User.objects.all().delete())\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n \n if form.is_valid():\n user = form.save()\n \n user.refresh_from_db()\n \n \n user.profile.first_name = form.cleaned_data.get('first_name')\n user.profile.last_name = form.cleaned_data.get('last_name')\n user.profile.email = form.cleaned_data.get('email')\n #print(user)\n # user can't login until link confirmed\n user.is_active = False\n\n print(\"user.profile.last_name\",user.profile.last_name)\n user.save()\n #user.refresh_from_db()\n current_site = get_current_site(request)\n subject = 'Please Activate Your Account'\n # load a template like get_template() \n # and calls its render() method immediately.\n message = render_to_string(r'LegalTech\\activation_request.html', {\n 'user': user,\n 'domain': current_site.domain,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n # method will generate a hash value with user related data\n 'token': account_activation_token.make_token(user),\n })\n print(\"Message content\",message)\n \n user.email_user(subject, message)\n print(\"email sent\")\n context={'status':'email_sent','success':'Activation link sent! Please check your console or mail.'}\n \n\n return redirect(r'inner_page')\n #return HttpResponseRedirect(reverse(\"inner-page\"))\n #return render(request, r'LegalTech\\inner-page.html',context)\n else:\n form = SignUpForm()\n return render(request, r'LegalTech\\inner-page.html', {'form': form})\n\n@login_required(login_url='index')\ndef register_case(request):\n context ={}\n if request.method == 'POST':\n \n # create object of form\n form = RegisterCases(request.POST)\n #print(form)\n # check if form data is valid\n if form.is_valid():\n # save the form data to model\n case=form.save()\n case.refresh_from_db()\n \n \n case.user_name = Profile.objects.get(user=request.user)\n case.payee_name = form.cleaned_data.get('payee_name')\n case.payee_address = form.cleaned_data.get('payee_address')\n case.payer_name = form.cleaned_data.get('payer_name')\n case.payer_address = form.cleaned_data.get('payer_address')\n\n \n case.save()\n\n \n\n return redirect(r'view_cases')\n else:\n \n form = RegisterCases()\n \n \n \n return render(request, r\"LegalTech\\register_case.html\", {'form': form})\n \n \n\n@login_required(login_url='index')\ndef view_cases(request):\n user_case=Profile.objects.get(user=request.user)\n #print(user_case.id)\n\n \n cases_list=Cases.objects.filter(user_name_id=user_case.id)\n form_data=''\n for c in cases_list:\n form_data+=\"\"+c.payer_name+\"\"+c.payer_address+\"\"+c.payee_name+\"\"+c.payee_address+\"\"\"\n Download PDFStatus\"\"\"\n \n \n context={'data':form_data}\n return render(request, r\"LegalTech\\view_cases.html\", context)\n\ndef logout_session(request):\n logout(request)\n\n return redirect(r'index')","sub_path":"TechLawgy/LegalTech/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"298949548","text":"import requests\n\nfrom urllib.parse import quote_plus\n\nfrom django.contrib import messages\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.db.models import Q\nfrom django.http import (\n HttpResponse,\n HttpResponseRedirect,\n Http404,\n HttpResponseForbidden\n)\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.views.generic import RedirectView\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import authentication, permissions\n\nfrom .forms import PostForm\nfrom .models import Post\n\n\ndef create_post(request):\n template_name = 'create_post.html'\n\n if not request.user.is_staff and not request.user.is_superuser:\n return HttpResponseForbidden()\n\n form = PostForm(request.POST or None, request.FILES or None)\n if request.method == 'POST':\n\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n messages.success(request, _('Your post create successfully.'))\n return HttpResponseRedirect(instance.get_absolute_url())\n else:\n messages.error(request, form.errors)\n\n context = {\n 'form': form,\n }\n return render(request, template_name, context)\n\n\ndef post_detail(request, slug=None):\n \"\"\"\n :param request:\n :param slug:\n :return: HttpResponseForbidden, HttpResponse\n\n Displays the details of a post. This takes the slug to display and renders \n in the details.html template. It checks to see if the\n post is draft or the publish date is before current date first. If either \n are true, it only allows staff user or super user to\n view that post. Otherwise it returns a HttpResponseForbidden to the user\n \"\"\"\n template_name = 'detail.html'\n\n url = 'http://api.openweathermap.org/data/2.5/weather?q={}&units=imperial&APPID=4b24f18ddf4653d31f83890f4f73aa39'\n city = 'London'\n # we converted to json obj\n respon = requests.get(url.format(city)).json()\n temp_ = respon['main']['temp']\n celsius = format((temp_ - 32) * 5.0 / 9.0, '.3f')\n\n weather_api = {\n 'city': city,\n 'temperature': celsius,\n 'description': respon['weather'][0]['description'],\n 'icon': respon['weather'][0]['icon'],\n }\n\n instance = get_object_or_404(Post, slug=slug)\n if instance.draft or instance.publish > timezone.now().date():\n if not request.user.is_staff or not request.user.is_superuser:\n raise HttpResponseForbidden\n\n share_string = quote_plus(instance.description)\n\n context = {\n 'title': instance.title,\n 'description': instance.description,\n 'codes': instance.codes,\n 'target': instance.target,\n 'reference': instance.reference,\n 'picture': instance.image,\n 'instance': instance,\n 'share_string': share_string,\n\n }\n return render(request, template_name, context)\n\n\nclass PostHitToggle(RedirectView):\n\n def get_redirect_url(self, *args, **kwargs):\n slug = self.kwargs.get('slug')\n obj = get_object_or_404(Post, slug=slug)\n link = obj.get_absolute_url()\n user = self.request.user\n if user.is_authenticated():\n if user in obj.hits.all():\n obj.hits.remove(user)\n else:\n obj.hits.add(user)\n return link\n\n\nclass PostHitAPIToggle(APIView):\n \"\"\"\n View to list all users in the system.\n\n * Requires token authentication.\n * Only admin users are able to access this view.\n \"\"\"\n authentication_classes = (authentication.SessionAuthentication,)\n permission_classes = (permissions.IsAuthenticated,)\n\n def get(self, request, slug=None, format=None):\n \"\"\"\n Return a list of all users.\n \"\"\"\n # slug = self.kwargs.get('slug')\n obj = get_object_or_404(Post, slug=slug)\n link = obj.get_absolute_url()\n user = self.request.user\n updated = False\n hited = False\n\n if user.is_authenticated():\n if user in obj.hits.all():\n hited = False\n obj.hits.remove(user)\n else:\n hited = True\n obj.hits.add(user)\n updated = True\n data = {\n 'updated': updated,\n 'hited': hited,\n }\n return Response(data)\n\n\ndef post_list(request):\n template_name = 'posts.html'\n\n url = 'http://api.openweathermap.org/data/2.5/weather?q={}&units=imperial&APPID=4b24f18ddf4653d31f83890f4f73aa39'\n city = 'London'\n # we converted to json obj\n respon = requests.get(url.format(city)).json()\n temp_ = respon['main']['temp']\n celsius = format((temp_ - 32) * 5.0 / 9.0, '.3f')\n\n weather_api = {\n 'city': city,\n 'temperature': celsius,\n 'description': respon['weather'][0]['description'],\n 'icon': respon['weather'][0]['icon'],\n }\n\n today = timezone.now().date()\n\n if request.user.is_staff or request.user.is_superuser:\n queryset = Post.objects.all().order_by('-date_created')\n else:\n queryset = Post.objects.all().order_by('-publish')\n # else:\n # queryset = Post.objects.active().order_by('-date_created')\n queryset_list = Post.objects.all().order_by('-publish')\n query = request.GET.get('q')\n\n if query:\n queryset_list = queryset_list.filter(\n Q(title__icontains=query) |\n Q(content__icontains=query) |\n Q(user__first_name__icontains=query) |\n Q(user__last_name__icontains=query) |\n Q(user__reference__icontains=query) |\n Q(user__target__icontains=query)\n ).distinct()\n ''' TODO Make the list of blog posts dynamic. Reading from a value in the settings.py or\n a parameter in the DB\n '''\n paginator = Paginator(queryset, 3)\n page = request.GET.get('page')\n try:\n queryset = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n queryset = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n queryset = paginator.page(paginator.num_pages)\n\n context = {\n 'posts': queryset,\n 'today': today,\n 'weather_api': weather_api,\n }\n return render(request, template_name, context)\n\n\ndef post_update(request, slug=None):\n \"\"\"\n :param request:\n :param slug:\n :return: HttpResponse, HttpResponseForbidden, Http404, HttpRedirect (302)\n Update a post. This allows a super user or staff user to update a post \n which is already on the system. It handles HTTP POST/GET\n to do this. The GET method will populate the form and return to the \n browser. The POST method will store changes into the DB.\n \"\"\"\n template_name = 'update.html'\n\n instance = get_object_or_404(Post, slug=slug)\n if not request.user.is_staff and not request.user.is_superuser:\n return HttpResponseForbidden()\n\n # First check if it's a post. If it is we need to pull values from our\n # form and save them.\n form = PostForm(request.POST or None,\n request.FILES or None, instance=instance)\n if request.method == \"POST\":\n\n if form.is_valid():\n instance = form.save(commit=False)\n # This returns a post object that is persisted to the database.\n instance.save()\n messages.success(request, _('The page successfully updated'))\n return HttpResponseRedirect(instance.get_absolute_url())\n else:\n messages.success(request, form.errors)\n\n context = {\n 'title': instance.title,\n 'instance': instance,\n 'form': form,\n\n }\n return render(request, template_name, context)\n\n\ndef post_delete(request, slug=None):\n \"\"\"\n :param request:\n :param slug:\n :return: HttpResponseForbidden, Http404, or HttpResponse\n\n Delete a post. This allows a super user or staff user to delete a post \n which is already on the system. It handles HTTP POST/GET\n to do this. The GET or POST method will delete the blog post if it exists. \n Otherwise it will return a Http404. If the user is not\n a super user or staff user it will return a HttpResponseForbidden.\n \"\"\"\n if not request.user.is_staff and not request.user.is_superuser:\n return HttpResponseForbidden()\n instance = get_object_or_404(Post, slug=slug)\n instance.delete()\n messages.success(request, _('The post has been deleted'))\n return redirect('blogs:list')\n","sub_path":"dilmac/blogs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"263925438","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\"\"\"\r\nimport torch\r\nfrom utils import create_propagator_matrix , uniform\r\nfrom torch_sparse import spmm\r\n\r\n\r\nclass DenseFullyConnected(torch.nn.Module):\r\n \r\n \"\"\"\r\n Approximate PageRank Network\r\n \r\n Parameters\r\n ----------\r\n in_channels: Number of input channels.\r\n out_channels: Number of output channels.\r\n density: Feature matrix.\r\n \r\n \"\"\"\r\n def __init__(self, in_channels, out_channels):\r\n super(DenseFullyConnected, self).__init__()\r\n self.in_channels = in_channels\r\n self.out_channels = out_channels\r\n self.define_parameters()\r\n self.init_parameters()\r\n\r\n def define_parameters(self):\r\n \r\n \"\"\"\r\n Weights matrices\r\n \"\"\"\r\n \r\n self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.in_channels, self.out_channels))\r\n self.bias = torch.nn.Parameter(torch.Tensor(self.out_channels))\r\n\r\n def init_parameters(self):\r\n \r\n \"\"\"\r\n Wieghts Initialization \r\n \"\"\"\r\n \r\n torch.nn.init.xavier_uniform_(self.weight_matrix)\r\n uniform(self.out_channels, self.bias)\r\n\r\n def forward(self, features):\r\n \r\n \"\"\"\r\n Forward Pass\r\n \r\n Parameters\r\n ----------\r\n features: Feature matrix\r\n \r\n Return \r\n ----------\r\n filtered_features: Convolved features\r\n \"\"\"\r\n \r\n filtered_features = torch.mm(features, self.weight_matrix)\r\n filtered_features = filtered_features + self.bias\r\n return filtered_features\r\n\r\nclass SparseFullyConnected(torch.nn.Module):\r\n \r\n \"\"\"\r\n Approximate PageRank Network\r\n \r\n Parameters\r\n ----------\r\n in_channels: Number of input channels.\r\n out_channels: Number of output channels.\r\n density: Feature matrix.\r\n \r\n \"\"\"\r\n \r\n def __init__(self, in_channels, out_channels):\r\n super(SparseFullyConnected, self).__init__()\r\n self.in_channels = in_channels\r\n self.out_channels = out_channels\r\n self.define_parameters()\r\n self.init_parameters()\r\n\r\n def define_parameters(self):\r\n \r\n \"\"\"\r\n Weights matrices\r\n \"\"\"\r\n \r\n self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.in_channels, self.out_channels))\r\n self.bias = torch.nn.Parameter(torch.Tensor(self.out_channels))\r\n\r\n def init_parameters(self):\r\n \r\n \"\"\"\r\n Wieghts Initialization \r\n \"\"\"\r\n \r\n torch.nn.init.xavier_uniform_(self.weight_matrix)\r\n uniform(self.out_channels, self.bias)\r\n\r\n def forward(self, feature_indices, feature_values):\r\n \r\n \"\"\"\r\n Forward Pass\r\n \r\n Parameters\r\n ----------\r\n features: Feature matrix\r\n \r\n Return \r\n ----------\r\n filtered_features: Convolved features\r\n \"\"\"\r\n \r\n number_of_nodes = torch.max(feature_indices[0]).item()+1\r\n number_of_features = torch.max(feature_indices[1]).item()+1\r\n filtered_features = spmm(index = feature_indices,\r\n value = feature_values,\r\n m = number_of_nodes,\r\n n = number_of_features,\r\n matrix = self.weight_matrix)\r\n filtered_features = filtered_features + self.bias\r\n return filtered_features\r\n\r\nclass APPNPModel(torch.nn.Module):\r\n \r\n \"\"\"\r\n APPNP Model\r\n \r\n Parameters\r\n ----------\r\n number_of_labels: Number of Target labels\r\n number_of_features : Number of features\r\n graph: Networkx graph\r\n device: Device type\r\n model: Model\r\n layers\r\n dropout: Dropout parameter\r\n iteration: Number of iterations\r\n alpha \r\n \"\"\"\r\n \r\n def __init__(self, number_of_labels, number_of_features, graph, device,model,layers,dropout,iterations,alpha):\r\n super(APPNPModel, self).__init__()\r\n self.number_of_labels = number_of_labels\r\n self.number_of_features = number_of_features\r\n self.graph = graph\r\n self.device = device\r\n self.model = model\r\n self.layers = layers\r\n self.dropout = dropout\r\n self.iterations = iterations\r\n self.alpha = alpha\r\n self.setup_layers()\r\n self.setup_propagator()\r\n\r\n def setup_layers(self):\r\n \r\n \"\"\"\r\n Layers creation\r\n \"\"\"\r\n \r\n self.layer_1 = SparseFullyConnected(self.number_of_features, self.layers[0])\r\n self.layer_2 = DenseFullyConnected(self.layers[1], self.number_of_labels)\r\n\r\n def setup_propagator(self):\r\n \"\"\"\r\n Propagation matrix creation\r\n \"\"\"\r\n \r\n self.propagator = create_propagator_matrix(self.graph, self.alpha, self.model)\r\n if self.model == \"exact\":\r\n self.propagator = self.propagator.to(self.device)\r\n else:\r\n self.edge_indices = self.propagator[\"indices\"].to(self.device)\r\n self.edge_weights = self.propagator[\"values\"].to(self.device)\r\n\r\n def forward(self, feature_indices, feature_values):\r\n \r\n \"\"\"\r\n Forward propagation pass\r\n \r\n Parameters\r\n ----------\r\n feature_indices: Feature indices for feature matrix.\r\n feature_values: Values in the feature matrix.\r\n \r\n Return\r\n ----------\r\n self.predictions: Predicted class label log softmaxes\r\n \"\"\"\r\n \r\n feature_values = torch.nn.functional.dropout(feature_values,\r\n p=self.dropout,\r\n training=self.training)\r\n\r\n latent_features_1 = self.layer_1(feature_indices, feature_values)\r\n\r\n latent_features_1 = torch.nn.functional.relu(latent_features_1)\r\n\r\n latent_features_1 = torch.nn.functional.dropout(latent_features_1,\r\n p=self.dropout,\r\n training=self.training)\r\n\r\n latent_features_2 = self.layer_2(latent_features_1)\r\n if self.model == \"exact\":\r\n self.predictions = torch.nn.functional.dropout(self.propagator,\r\n p=self.dropout,\r\n training=self.training)\r\n\r\n self.predictions = torch.mm(self.predictions, latent_features_2)\r\n else:\r\n localized_predictions = latent_features_2\r\n edge_weights = torch.nn.functional.dropout(self.edge_weights,\r\n p=self.dropout,\r\n training=self.training)\r\n\r\n for iteration in range(self.iterations):\r\n\r\n new_features = spmm(index=self.edge_indices,\r\n value=edge_weights,\r\n n=localized_predictions.shape[0],\r\n m=localized_predictions.shape[0],\r\n matrix=localized_predictions)\r\n\r\n localized_predictions = (1-self.alpha)*new_features\r\n localized_predictions = localized_predictions + self.alpha*latent_features_2\r\n self.predictions = localized_predictions\r\n self.predictions = torch.nn.functional.log_softmax(self.predictions, dim=1)\r\n return self.predictions\r\n","sub_path":"src/appnpmodel.py","file_name":"appnpmodel.py","file_ext":"py","file_size_in_byte":7561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"163613519","text":"#!/usr/bin/env python\r\n\r\nimport re\r\nimport sys\r\nimport serial\r\nimport socket\r\nimport threading\r\nimport time\r\nimport subprocess\r\nfrom ws2812b import ws2812\r\n\r\nhost = \"\"\r\nport = 5011\r\n\r\nws = ws2812()\r\n\r\nsport = serial.Serial(\"/dev/ttyS0\", baudrate=19200, timeout=3.0)\r\n\r\nif sport.is_open:\r\n sport.close()\r\n\r\ndef playvoice(rcvVar):\r\n playStr = str(rcvVar + 300) + \".mp3\" \r\n print(playStr)\r\n subprocess.call([\"/usr/bin/mpg123\", playStr])\r\n\r\ndef rbCtrl(th_socket, addr):\r\n rcvData = th_socket.recv(1024)\r\n tmpList = re.findall('..', rcvData.decode()) # splited by any 2 string(..)\r\n rcvList = [ int(x, 16) for x in tmpList ] # convert string element to hex byte\r\n # [ 0xff, 0xff, 0xdc, 0x04, 0x00, 0x00, 0x00, 0x04, 0x01, 0x00 ]\r\n # ext, ext, sysid, write, arm, wing, eye, rcnt, led, voice\r\n #print(rcvList)\r\n if rcvList[3] == 4:\r\n if rcvList[8] == 0:\r\n rtnstr = \"lnc\" # led not change\r\n else:\r\n if rcvList[8] == 1:\r\n rtnstr = \"Rlo\" # RED led on\r\n ws.pixels.fill((255, 0, 0))\r\n elif rcvList[8] == 2:\r\n rtnstr = \"Glo\" # Green led on\r\n ws.pixels.fill((0, 255, 0))\r\n elif rcvList[8] == 3:\r\n rtnstr = \"Blo\" # Blue led on\r\n ws.pixels.fill((0, 0, 255))\r\n elif rcvList[8] == 4:\r\n rtnstr = \"Rbw\" # Rainbow led on\r\n ws.rainbow_cycle(0.001)\r\n elif rcvList[8] == 5:\r\n rtnstr = \"Alo\" # All led off\r\n ws.pixels.fill((0, 0, 0))\r\n else:\r\n pass\r\n ws.pixels.show()\r\n \r\n if rcvList[9] == 0:\r\n pass\r\n else:\r\n tvoice = threading.Thread(target=playvoice, args=(rcvList[9], ))\r\n tvoice.start()\r\n \r\n \r\n sndList = rcvList[2:8]\r\n if sndList[2] != 0 or sndList[3] != 0 or sndList[4] != 0: \r\n sport.open()\r\n sport.write(sndList)\r\n received_data = sport.read() #read serial port\r\n time.sleep(0.03)\r\n data_left = sport.inWaiting() #check for remaining byte\r\n received_data += sport.read(data_left)\r\n #print(\"rcvdata {}\".format(received_data.decode('utf-8')))\r\n sport.close()\r\n \r\n th_socket.sendall(bytes(rtnstr.encode(\"utf-8\")))\r\n th_socket.close()\r\n\r\ndef socket_accept():\r\n global server_socket\r\n\r\n svr_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n svr_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n\r\n svr_socket.bind((host, port))\r\n\r\n svr_socket.listen(5)\r\n\r\n while True:\r\n try:\r\n cli_socket, addr = svr_socket.accept()\r\n except KeyboardInterrupt:\r\n svr_socket.close()\r\n #print(\"Keyboard interrupt\")\r\n\r\n t = threading.Thread(target=rbCtrl, args=(cli_socket, addr))\r\n t.daemon = True\r\n t.start()\r\n\r\nif __name__ == '__main__':\r\n socket_accept()","sub_path":"urHct/UR/camRobot/rbctrl/rbcmgsvr_b1.py","file_name":"rbcmgsvr_b1.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"337751946","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/3/21 11:08\n# @Author : 潘师傅\n# @File : Client_casc.py\n\n\"\"\"客户相关\"\"\"\nfrom XFP.PubilcAPI.XfpApi import *\n\n\nclass ClientTestCase(unittest.TestCase):\n \"\"\"幸福派——客户列表\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(ClientTestCase, self).__init__(*args, **kwargs)\n self.XfpRequest = XfpApi()\n self.XmfpEXT = GlobalMap()\n\n @classmethod\n def setUpClass(cls):\n \"\"\"登录幸福派 只执行一次\n 登录幸福派 获取ID\"\"\"\n cls.do_request = XfpApi()\n cls.XfpRequest = cls.do_request\n cls.XfpRequest.Login()\n cls.XfpRequest.GetUserData()\n # cls.XfpRequest.ClueSave()\n\n def test_AddNewClue(self):\n \"\"\"新增一条线索\"\"\"\n self.XfpRequest.ClueSave(clueNickName=self.XfpRequest.RandomText(textArr=surname))\n # 在搜索列表进行查找\n self.XfpRequest.ClueList(keyWord=(self.XmfpEXT.get('CluePhone')))\n\n def test_FollowClue(self):\n \"\"\"跟进线索\"\"\"\n self.XfpRequest.ClueFollowSave()\n self.XfpRequest.ClueFollowList()\n self.assertEqual('python-线索跟进', self.XmfpEXT.get('followContent'))\n\n def test_ClientEntering(self):\n \"\"\"有效客户后转为录入客户\"\"\"\n self.XfpRequest.ClientEntering(callName=self.XfpRequest.RandomText(textArr=surname))\n # 查询客户列表-是否存在\n self.XfpRequest.ClientList(keyWord=self.XmfpEXT.get('cluePhone'))\n\n def test_ClientFollow(self):\n \"\"\"客户的跟进\"\"\"\n self.XfpRequest.ClientFolloow()\n\n\n def test(self):\n \"\"\"成交录入\"\"\"\n self.XfpRequest.ClientList() # 客户列表\n self.XfpRequest.GetMatchingAreaHouse() # 读取匹配楼盘\n self.XfpRequest.GetLabelList(labelNo='CJX', labelName='认筹') # 查询成交项 认筹-认购-草签-网签-\n self.XfpRequest.TransactionSave()\n\n\n def test01(self):\n # self.XfpRequest.Login(userName='13005776276', password='123456789')\n # self.XfpRequest.Login(userName='13192227370', password='666666666')\n # self.XfpRequest.Login(userName='17520485656', password='12345678')\n # self.XfpRequest.Login(userName='18811112222')\n # self.XfpRequest.Login(userName='13726224607')\n # self.XfpRequest.Login(password='12345678')\n self.XfpRequest.Login()\n a = 1\n while a < 2:\n time.sleep(0.5)\n self.XfpRequest.ClueSave(cluePhone='1' + str(int(time.time())),\n clueNickName=self.XfpRequest.RandomText(textArr=surname))\n time.sleep(2)\n # self.XfpRequest.ClueFollowList()\n # self.assertEqual('python-线索跟进', self.XmfpEXT.get('followContent'))\n self.XfpRequest.ClientEntering(callName=self.XfpRequest.RandomText(textArr=surname))\n print('1' + str(int(time.time())))\n a = a + 1\n\n\n\n\n def test_0001(self):\n \"\"\"\"\"\"\n dealId = ['13397009933','13631202084','13727028094','15626949989',\n '18529631118','13005799895','13226038886','13005699892',\n '18575608892','18620578787','13232235558','13112399892',\n '13232291118','13232207778','13246393336','13226029998']\n dealIds = dealId\n z = 0\n while z < len(dealId):\n try:\n self.XfpRequest.Login(userName=dealId[z], password='12345678')\n time.sleep(0.2)\n self.XfpRequest.GetUserData()\n # if r.status_code == 200 and 1 == globals()['r.text']['resultCode']:\n # try:\n # r = requests.post(url=\n # 'http://api.xfj100.com/api/mobile/projectAdminService/repairFlowing',\n # data={'agentToken': 'f690a96e-118d-4597-825e-b472beecc17b',\n # 'dealId': dealId[z]})\n # time.sleep(0.2)\n # globals()['r.text'] = json.loads(r.text)\n # if r.status_code == 200 and 1 == globals()['r.text']['resultCode']:\n # dealIds.remove((dealId[z]))\n # print(dealIds)\n # z = z - 1\n # except Exception as e:\n # pass\n z = z + 1\n except Exception as e:\n pass\n print(e)\n continue\n print(dealId)\n\n\n\n\n","sub_path":"XFP/XfpApi/test_casc/Client_casc.py","file_name":"Client_casc.py","file_ext":"py","file_size_in_byte":4584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"102914075","text":"f = open('teste.txt', 'r') #r, r+, a, w\r\nprint(f.name)\r\nprint(f.mode)\r\n\r\nf.close()\r\n\r\n#======================================#\r\n\r\nwith open('teste.txt', 'r') as f:\r\n\tf_read = f.read()\r\n\tprint(f_read)\r\n\tf_readlines = f.readlines()\r\n\tprint(f_readlines)\r\n\tf_readline = f.readline()\r\n\tprint(f_readline)\r\n\r\n\tfor line in f:\r\n\t\tprint(line, end='')\r\n\r\n\tf.seek(0)\r\n\r\n\tf_read = f.read(100)\r\n\tprint(f_read)\r\n\r\n\ttamanho = 10\r\n\r\n\tf_read = f.read(tamanho)\r\n\tprint(f.tell())\r\n\r\n\twhile len(f_read) > 0:\r\n\t\tprint(f_read, end='')\r\n\t\tf_read = f.read(tamanho)\r\n\r\n#======================================#\r\n\r\nwith open('teste2.txt', 'w') as f:\r\n\tf.write('Testando')\r\n\r\n#======================================#\r\n\r\nwith open('teste.txt', 'r') as rf:\r\n\twith open('teste_copia.txt', 'w') as wf:\r\n\t\tfor line in rf:\r\n\t\t\twf.write(line)\r\n\r\n#======================================#\r\n\r\nwith open('teste.jpg', 'rb') as rf:\r\n\twith open('teste_copia.jpg', 'wb') as wf:\r\n\t\tfor line in rf:\r\n\t\t\twf.write(line)\r\n\r\n#======================================#\r\n\r\nwith open('teste.jpg', 'rb') as rf:\r\n\twith open('teste_copia.jpg', 'wb') as wf:\r\n\t\tbloco = 4056\r\n\t\trf_bloco = rf.read(bloco)\r\n\t\twhile len(rf_bloco) > 0:\r\n\t\t\twf.write(rf_bloco)\r\n\t\t\trf_bloco = rf.read(bloco)\r\n\r\n#======================================#","sub_path":"prog11.py","file_name":"prog11.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"615480074","text":"import re, csv, string\nfrom bs4 import BeautifulSoup\n\nimport requests\n\n# GET THE LIST OF THE CATEGORIES RESULTS\n\nUPPER = string.ascii_uppercase[2:]\nfor upper in UPPER:\n listsCat = requests.get(\"http://www.localsearch.com.au/Categories/List_\"+upper).content\n listsoup = BeautifulSoup(listsCat, 'html.parser')\n\n urls_Cat = []\n print(\"processing categories...\")\n for h in listsoup.find_all(\"ul\", {\"class\": \"list-unstyled\"}):\n li = h.find_all('li')\n for number1 in li:\n a = number1.findChildren().pop()\n urls_Cat.append(\"http://www.localsearch.com.au/Categories/\" + a['href'])\n\n\n # print(urls_Cat)\n with open('categories_'+upper+'.csv', 'a') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL, lineterminator=\"\\n\")\n spamwriter.writerow(urls_Cat)\n\n\n\n # GET THE LOCATION LIST FOR ALL THE CATEGORIES\n urls_Loc = []\n for count, l in enumerate(urls_Cat):\n listsLoc = requests.get(l).content\n locsoup = BeautifulSoup(listsLoc, 'html.parser')\n\n for rows in locsoup.find_all(\"div\", {\"class\": \"list-of-markets\"}):\n for division in rows.find_all('div'):\n for unlist in division('ul'):\n for regions in unlist('li'):\n koko = regions.findChildren('a').pop()\n urls_Loc.append(\"http://www.localsearch.com.au/\" + koko['href'])\n\n\n with open('DataLinks_'+upper+'.csv', 'a') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL, lineterminator=\"\\n\")\n spamwriter.writerow(urls_Loc)\n\n # GET THE PAGE WITH FINAL RESULTS\n for kount, iop in enumerate(urls_Loc):\n print(\"Address which is scraped \" + urls_Loc[kount])\n page = requests.get(urls_Loc[kount]).content\n soup = BeautifulSoup(page, 'html.parser')\n\n with open('eggs'+upper+'.csv', 'a') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL, lineterminator=\"\\n\")\n\n titledata = []\n addressdata = []\n phonedata = []\n emaildata = []\n websitedata = []\n\n kopok = soup.find_all(class_=\"business\")\n for comp in kopok:\n try:\n ytdn = comp.find_all(\"span\", {\"itemprop\": \"telephone\"})\n except IndexError:\n ytdn = \"null\"\n\n phonenum = []\n for itemm in ytdn:\n phonenum.append(itemm.text)\n phonedata.append(phonenum)\n\n ji = soup.find_all(class_=\"quick-links\") # to get email and website into the tree\n for whole in ji:\n child = whole.findChildren()\n\n for quicknum, kkkk in enumerate(child):\n try:\n jio1 = kkkk.find_all(\"link\", {\"itemprop\": \"email\"}).pop()['href']\n except IndexError:\n jio1 = 'null'\n try:\n jio2 = kkkk.find_all(\"link\", {\"itemprop\": \"url\"}).pop()['href']\n except IndexError:\n jio2 = 'null'\n emaildata.append(jio1)\n websitedata.append(jio2)\n\n for title in soup.find_all(\"h3\", {\"itemprop\": \"name\"}):\n titledata.append(title.text)\n\n for address in soup.find_all(\"address\", {\"itemprop\": \"location\"}):\n addressdata.append(address.text)\n\n\n\n # for email in soup.find_all(\"link\", {\"itemprop\": \"email\"}):\n # fhfdf = email['href'][7:]\n # emaildata.append(fhfdf)\n #\n # for website in soup.find_all(\"link\", {\"itemprop\": \"url\"}):\n # print(website['href'])\n # print(website.parent.parent)\n # websitedata.append(website['href'])\n\n\n # child = whole.findChildren()\n # numberchild = len(child)\n # print(\"------=====\" + str(numberchild) + \"=====-----------\")\n # print(child)\n # for website, kkkk in enumerate(child):\n # print(kkkk)\n #\n # # check for empty strings, if any\n # try:\n # url = kkkk.findChildren()\n # except IndexError:\n # url = 'null'\n #\n # if url == 'null':\n # print(\"null\")\n # else:\n # print(url)\n\n for ikor in range(0, min(len(titledata), len(addressdata), len(emaildata), len(websitedata))):\n spamwriter.writerow([titledata[ikor],\n iop,\n addressdata[ikor],\n phonedata[ikor],\n emaildata[ikor],\n websitedata[ikor]])\n\n\n\n #\n # print(\"------------------------------------\")\n #\n # for open_time in soup.find_all(class_=\"qihours\"):\n # print(open_time.text)\n\n '''\n\n for more_tags in soup.find_all(\"span\"):\n print(more_tags.text)\n\n '''\n","sub_path":"heil-modified.py","file_name":"heil-modified.py","file_ext":"py","file_size_in_byte":5366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"446841567","text":"from core import sender\nfrom core import utils\n\n\nclass IPOsint():\n \"\"\"docstring for IPOsint\"\"\"\n\n def __init__(self, options):\n self.options = options\n # setting stuff depend on search engine\n # asn:123\n self.query = options.get('query', None)\n # target should be example.com\n self.target = options.get('target', None)\n self.output = self.options['output']\n utils.print_banner(\"Starting IPOsint module\")\n # really do something\n self.initial()\n\n def initial(self):\n company = utils.get_tld(self.target)\n self.asnlookup(company)\n self.get_asn()\n self.parse_query()\n\n def parse_query(self):\n if not self.query:\n return None\n if '|' in self.query:\n name = self.query.split(\"|\")[0]\n value = self.query.split(\"|\")[1]\n if 'asn' in name:\n ips = self.get_asn_ip(value)\n utils.just_write(self.options['output'], \"\\n\".join(ips))\n utils.just_cleanup(self.options['output'])\n\n def asnlookup(self, company):\n utils.print_banner(f\"Starting scraping {company} from asnlookup.com\")\n url = f'http://asnlookup.com/api/lookup?org={company}'\n r = sender.send_get(self.options, url, None)\n data = r.json()\n if not data:\n utils.print_bad('No IP found')\n else:\n content = \"\\n\".join(data)\n print(content)\n utils.just_write(self.options['output'], content)\n utils.just_cleanup(self.options['output'])\n\n def get_asn(self):\n ip_target = utils.resolve_input(self.target)\n if not ip_target:\n return False\n utils.print_banner(f\"Starting scraping detail ASN of {ip_target}\")\n\n utils.print_info(f'Get ASN from IP: {ip_target}')\n url = f'https://ipinfo.io/{ip_target}/json'\n r = sender.send_get(self.options, url, None)\n org_info = r.json().get('org')\n asn = utils.get_asn(org_info)\n if asn:\n utils.print_info(f\"Detect target running on {asn}\")\n ips = self.get_asn_ip(asn)\n utils.just_write(self.options['output'], \"\\n\".join(ips))\n utils.just_cleanup(self.options['output'])\n else:\n return False\n\n def get_asn_ip(self, asn):\n asn_num = utils.get_asn_num(asn)\n url = 'https://mxtoolbox.com/Public/Lookup.aspx/DoLookup2'\n data = {\"inputText\": f\"asn:{asn_num}\", \"resultIndex\": 1}\n r = sender.send_post(\n self.options, url, data, is_json=True)\n content = r.text\n ips = utils.grep_the_IP(content, verbose=True)\n return ips\n","sub_path":"modules/iposint.py","file_name":"iposint.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"374382152","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 3 09:54:08 2018\n\n@author: Prabz\n\"\"\"\nimport datetime\nimport importlib\nimport pandas as pd\n\nimport read_Stock_Data as rd\nimportlib.reload(rd) \n\ndef check_null():\n df_nse50 = rd.get_list('nse_Top50', 'Symbol') #Extract list of Top Nse50 Stcoks\n list_nse50 = df_nse50.index.tolist() #Array of Stcok symbols\n\n end_date = datetime.date(2018,12,31) #date.today()\n \n df_Stock = rd.get_data(list_nse50[0:51],end_date,3000) #Extract Data from a given date to past x no of traded days\n df_Stock = df_Stock.dropna(how='all') \n \n null_count = df_Stock.isnull().sum(axis=0)\n print(null_count)\n\ndef sort_file():\n df_nse50 = rd.get_list('nse_Top50', 'Symbol') #Extract list of Top Nse50 Stcoks\n list_nse50 = df_nse50.index.tolist()\n\n #Extract Data from a given date to past x no of traded days\n for symbol in list_nse50[0:51]:\n df_temp = pd.read_csv(rd.list_to_path(symbol), index_col='Date',\n parse_dates=True,dayfirst=True, na_values=['nan']) #Extract date , series and price\n df_temp = df_temp.reset_index().drop_duplicates(subset='Date', keep='last').set_index('Date')\n df_temp.sort_index(ascending=True, inplace=True) #Remove any blank records\n df_temp.to_csv(\"/Volumes/2/PyD/nsedb/{}.csv\".format(str(symbol))) \n \ndef test_run():\n check_null()\n #sort_file()\n\nif __name__ == \"__main__\":\n test_run()","sub_path":"Validate_data.py","file_name":"Validate_data.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"595131741","text":"from __future__ import division\r\nimport requests\r\nimport os\r\nfrom os import listdir\r\nfrom os.path import join, isfile\r\nfrom PIL import Image, ImageChops\r\nimport math\r\nimport numpy as np\r\nimport cv2\r\nimport random\r\nimport string\r\n\r\npart = 0\r\nlist_chars = [f for f in listdir('data/chars') if isfile(join('data/chars', f)) and 'jpg' in f]\r\n\r\n\r\ndef rand_string(N=6):\r\n\treturn ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(N))\r\n\r\ndef get_data():\r\n\turl = \"https://chuyencuadev.com/captcha\"\r\n\tfor i in range (1, 1000):\r\n\t\tfilename = '{0:04}.jpg'.format(i)\r\n\t\tprint(filename)\r\n\t\twith open(filename, 'wb') as f:\r\n\t\t\tresponse = requests.get(url)\r\n\t\t\tif response.ok: f.write(response.content)\r\n\r\ndef reduce_noise(filename):\r\n\timg = cv2.imread(filename)\r\n\tdst = cv2.fastNlMeansDenoisingColored(img,None,50,50,7,21)\r\n\tcv2.imwrite(filename, dst)\r\n\timg = Image.open(filename).convert('L')\r\n\timg = img.point(lambda x: 0 if x<128 else 255, '1')\r\n\timg.save(filename)\r\n\r\ndef crop(filename, outpath):\r\n\tglobal part\r\n\timg = Image.open(filename)\r\n\tp = img.convert('P')\r\n\tw, h = p.size\r\n\r\n\tletters = []\r\n\tstart, end = -1, -1\r\n\tfound = False\r\n\tfor i in range(w):\r\n\t\tin_letter = False\r\n\t\tfor j in range(h):\r\n\t\t\tif p.getpixel((i,j)) == 0:\r\n\t\t\t\tin_letter = True\r\n\t\t\t\tbreak\r\n\t\tif not found and in_letter:\r\n\t\t\tfound = True\r\n\t\t\tstart = i\r\n\t\tif found and not in_letter and i-start > 25:\r\n\t\t\tfound = False\r\n\t\t\tend = i\r\n\t\t\tletters.append([start, end])\r\n\torigin = filename.split('/')[-1].split('.')[0]\r\n\tfor [l,r] in letters:\r\n\t\tif r-l < 40:\r\n\t\t\tbbox = (l, 0, r, h)\r\n\t\t\tcrop = img.crop(bbox)\r\n\t\t\tcrop = crop.resize((30,60))\r\n\t\t\tcrop.save(outpath + '{0:04}_{1}.jpg'.format(part, origin))\r\n\t\t\tpart += 1\r\n\t\t\t\r\ndef adjust(path, filename):\r\n\timg = Image.open(join(path, filename))\r\n\tp = img.convert('P')\r\n\tw, h = p.size\r\n\tstart, end = -1, -1\r\n\tfound = False\r\n\tfor j in range(h):\r\n\t\tin_letter = False\r\n\t\tfor i in range(w):\r\n\t\t\tif p.getpixel((i,j)) == 0:\r\n\t\t\t\tin_letter = True\r\n\t\t\t\tbreak\r\n\t\tif not found and in_letter:\r\n\t\t\tfound = True\r\n\t\t\tstart = j\r\n\t\tif found and not in_letter and j-start > 35:\r\n\t\t\tfound = False\r\n\t\t\tend = j\r\n\tbbox = (0, start, w, end)\r\n\tcrop = img.crop(bbox)\r\n\tcrop = crop.resize((30,36))\r\n\tcrop.save(join(path, filename))\r\n\r\ndef rename(path, filename, letter):\r\n\tos.rename(join(path,filename), join(path, letter+'-' + rand_string() + '.jpg'))\r\n\t\t\t\r\ndef detect_char(path, filename):\r\n\tclass Fit:\r\n\t\tletter = None\r\n\t\tdifference = 0\r\n\tbest = Fit()\r\n\t_img = Image.open(join(path, filename))\r\n\tfor img_name in list_chars:\r\n\t\tcurrent = Fit()\r\n\t\timg = Image.open(join('data/chars', img_name))\r\n\t\tcurrent.letter = img_name.split('-')[0]\r\n\t\tdifference = ImageChops.difference(_img, img)\r\n\t\tfor x in range(difference.size[0]):\r\n\t\t\tfor y in range(difference.size[1]):\r\n\t\t\t\tcurrent.difference += difference.getpixel((x, y))/255.\r\n\t\tif not best.letter or best.difference > current.difference:\r\n\t\t\tbest = current\r\n\tif best.letter == filename.split('-')[0]: return\r\n\tprint(filename, best.letter)\r\n\trename(path, filename, best.letter)\r\n\r\ndef adjust_folder(path):\r\n\tfor f in listdir(path):\r\n\t\tif isfile(join(path, f)) and 'jpg' in f:\r\n\t\t\tadjust(path, f)\r\ndef detect_folder(path):\r\n\tfor f in listdir(path):\r\n\t\tif isfile(join(path, f)) and 'jpg' in f:\r\n\t\t\tdetect_char(path, f)\r\n\r\nif __name__=='__main__':\r\n\t# for i in range(1, 800):\r\n\t# \tfilename = 'data/train/{0:04}.jpg'.format(i)\r\n\t# \tprint(filename)\r\n\t# \tcrop(filename, 'data/train/sliced/')\r\n\t# for i in range(800, 1000):\r\n\t# \tfilename = 'data/test/{0:04}.jpg'.format(i)\r\n\t# \tprint(filename)\r\n\t# \tcrop(filename, 'data/test/sliced/')\r\n\t# adjust_folder('data/chars/')\r\n\t# adjust_folder('data/train/sliced')\r\n\t# adjust_folder('data/test/sliced')\r\n\t# detect_folder('data/train/sliced')\r\n\treduce_noise('1.jpg')\r\n\tcrop('1.jpg', 'viblo/')\r\n\tadjust('viblo/', '0000_1.jpg')\r\n\tadjust('viblo/', '0001_1.jpg')\r\n\tadjust('viblo/', '0002_1.jpg')\r\n\tadjust('viblo/', '0003_1.jpg')\r\n\tadjust('viblo/', '0004_1.jpg')\r\n\tadjust('viblo/', '0005_1.jpg')\r\n\tpass","sub_path":"prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":4000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"368915115","text":"from sensor_msgs.msg import CompressedImage\nimport cv2 \nimport numpy as np\nimport rospy\n\n\ndef compressed_img_message(image_np: np.ndarray):\n msg = CompressedImage()\n msg.header.stamp = rospy.Time.now()\n msg.format = \"jpeg\"\n retval, encoded = cv2.imencode('.jpg', image_np)\n if not retval:\n raise RuntimeError(\"Unable to encode image, {}\".format(image_np))\n msg.data = np.array(encoded).tostring()\n return msg\n\ndef np_from_compressed_ros_msg(ros_data) -> np.ndarray:\n np_arr = np.fromstring(ros_data.data, np.uint8)\n return cv2.imdecode(np_arr, cv2.IMREAD_COLOR)","sub_path":"catkin_ws/src/lane_follow/src/img_messages.py","file_name":"img_messages.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"357292341","text":"# coding=utf-8\n# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Sample Generate GPT2\"\"\"\nopen_old_pronounce=1\nimport os\nimport random\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport argparse\nimport time\nfrom datetime import datetime\nfrom arguments import get_args\nfrom utils import Timers\nfrom pretrain_gpt2 import initialize_distributed\nfrom pretrain_gpt2 import set_random_seed\nfrom pretrain_gpt2 import get_train_val_test_data\nfrom pretrain_gpt2 import get_masks_and_position_ids\nfrom utils import load_checkpoint, get_checkpoint_iteration\nfrom data_utils import make_tokenizer\nfrom configure_data import configure_data\nimport mpu\nimport deepspeed\nimport copy\nfrom fp16 import FP16_Module\nfrom model import GPT2Model\nfrom model import DistributedDataParallel as DDP\nfrom utils import print_rank_0\nfrom pretrain_gpt2 import get_model\nfrom pypinyin import pinyin,FINALS, FINALS_TONE,TONE3\nimport jsonlines\ndef setup_model(args):\n \"\"\"Setup model and optimizer.\"\"\"\n\n model = get_model(args)\n\n # if args.deepspeed:\n # print_rank_0(\"DeepSpeed is enabled.\")\n #\n # model, _, _, _ = deepspeed.initialize(\n # model=model,\n # model_parameters=model.parameters(),\n # args=args,\n # mpu=mpu,\n # dist_init_required=False\n # )\n if args.load is not None:\n if args.deepspeed:\n iteration, release, success = get_checkpoint_iteration(args)\n print(iteration)\n path = os.path.join(args.load, str(iteration), \"mp_rank_00_model_states.pt\")\n checkpoint = torch.load(path)\n model.load_state_dict(checkpoint[\"module\"])\n else:\n _ = load_checkpoint(\n model, None, None, args, load_optimizer_states=False)\n # if args.deepspeed:\n # model = model.module\n\n return model\n\n\ndef get_batch(context_tokens, device, args):\n tokens = context_tokens\n tokens = tokens.view(args.batch_size, -1).contiguous()\n tokens = tokens.to(device)\n\n # Get the masks and postition ids.\n attention_mask, loss_mask, position_ids = get_masks_and_position_ids(\n tokens,\n args.eod_token,\n reset_position_ids=False,\n reset_attention_mask=False,\n transformer_xl=args.transformer_xl,\n mem_length=args.mem_length)\n\n return tokens, attention_mask, position_ids\n\ndef generate_score(model, tokenizer, args, device, input_str, eval_str):\n #penalty on same word\n penalty=0\n for i in eval_str:\n if i in input_str[:-7]:\n penalty+=1\n context_count = 0\n model.eval()\n with torch.no_grad():\n context_tokens = tokenizer.EncodeAsIds(input_str).tokenization\n eval_tokens = tokenizer.EncodeAsIds(eval_str).tokenization\n if len(context_tokens)==0:\n context_tokens = eval_tokens[0:1]\n eval_tokens = eval_tokens[1:]\n context_length = len(context_tokens)\n eval_length = len(eval_tokens)\n if context_length >= args.seq_length:\n return \"输入过长。\"\n\n # terminate_runs_tensor = torch.cuda.LongTensor([terminate_runs])\n # pad_id = tokenizer.get_command('pad').Id\n # if context_length < args.out_seq_length:\n # context_tokens.extend([pad_id] * (args.out_seq_length - context_length))\n\n context_tokens_tensor = torch.cuda.LongTensor(context_tokens)\n eval_tokens_tensor = torch.cuda.LongTensor([eval_tokens])\n context_length_tensor = torch.cuda.LongTensor([context_length])\n eval_length_tensor = torch.cuda.LongTensor([eval_length])\n # context_length = context_length_tensor[0].item()\n tokens, attention_mask, position_ids = get_batch(context_tokens_tensor, device, args)\n # print(context_tokens)\n start_time = time.time()\n\n counter, mems = 0, []\n org_context_length = context_length\n sumlognum = 0\n while counter < eval_length:\n if counter == 0:\n logits, *mems = model(tokens, position_ids, attention_mask, *mems)\n logits = logits[:, -1]\n else:\n index = org_context_length + counter\n logits, *mems = model(tokens[:, index - 1: index], tokens.new_ones((1, 1)) * (index - 1),\n tokens.new_ones(1, 1, 1, args.mem_length + 1, device=tokens.device,\n dtype=torch.float), *mems)\n logits = logits[:, 0]\n # logits = logits[:, -1]\n #logits /= args.temperature\n # logits = top_k_logits(logits, top_k=args.top_k, top_p=args.top_p)\n log_probs = F.softmax(logits, dim=-1)\n log_num = torch.log(log_probs).data\n # print(log_num)\n sumlognum += log_num[0, eval_tokens[counter]]\n # print(log_probs)\n # prev = torch.multinomial(log_probs, num_samples=1)[0]\n # print(tokens,eval_tokens_tensor[counter:counter+1])\n tokens = torch.cat((tokens, eval_tokens_tensor[:, counter:counter + 1]), dim=1)\n # print(tokens,sumlognum)\n context_length += 1\n counter += 1\n\n # trim_decode_tokens = decode_tokens[:decode_tokens.find(\"<|endoftext|>\")]\n sumlognum = sumlognum\n del logits\n del mems\n torch.cuda.empty_cache()\n return sumlognum-2.5*(penalty**2.5)\n \ndef top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):\n # This function has been mostly taken from huggingface conversational ai code at\n # https://medium.com/huggingface/how-to-build-a-state-of-the-art-conversational-ai-with-transfer-learning-2d818ac26313\n\n if top_k > 0:\n # Remove all tokens with a probability less than the last token of the top-k\n indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]\n logits[indices_to_remove] = filter_value\n \n if top_p > 0.0:\n #convert to 1D\n logits=logits.view(logits.size()[1]).contiguous()\n sorted_logits, sorted_indices = torch.sort(logits, descending=True)\n cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n\n # Remove tokens with cumulative probability above the threshold\n sorted_indices_to_remove = cumulative_probs > top_p\n # Shift the indices to the right to keep also the first token above the threshold\n sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\n sorted_indices_to_remove[..., 0] = 0\n indices_to_remove = sorted_indices[sorted_indices_to_remove]\n logits[indices_to_remove] = filter_value\n #going back to 2D\n logits=logits.view(1, -1).contiguous()\n\t\n return logits\ndef checklength(s):\n w=s.replace('。',',').replace(',',',').replace('?',',').replace('?',',').replace('<','').replace(' ','').replace('>','').replace('《','').replace('》','').replace('\\\"','').replace('“','').replace('”','').replace(\"‘\",'').replace('‘','').replace('、','').replace('-','').replace('.','').replace('!',',').replace('!',',')\n return len(w)\n\ndef generate_token_tensor(str,tokenizer):\n with torch.no_grad():\n context_tokens = tokenizer.EncodeAsIds(str).tokenization\n context_tokens_tensor = torch.cuda.LongTensor(context_tokens)\n return context_tokens_tensor\n\nrus=set(['八','搭','塌','邋','插','察','杀','煞','夹','俠','瞎','辖','狹','匣','黠','鸭','押','压','刷','刮','滑','猾','挖','蜇','舌','鸽','割','胳','搁','瞌','喝','合','盒','盍','曷','貉','涸','劾','核','钵','剝','泼','摸','脱','托','捋','撮','缩','豁','活','切','噎','汁','织','隻','掷','湿','虱','失','十','什','拾','实','食','蝕','识','石','劈','霹','滴','踢','剔','屐','积','激','击','漆','吸','息','媳','昔','席','锡','檄','觋','揖','一','壹','扑','匍','仆','弗','紱','拂','福','蝠','幅','辐','服','伏','茯','督','突','秃','俗','出','蜀','窟','哭','忽','惚','斛','鹄','屋','屈','诎','曲','戌','拍','塞','摘','拆','黑','勺','芍','嚼','粥','妯','熟','白','柏','伯','薄','剥','摸','粥','轴','舳','妯','熟','角','削','学'])\nss=set(['de','te','le','ze','ce','se','fa','fo','dei','zei','gei','hei','sei','bie','pie','mie','die','tie','nie','lie','kuo','zhuo','chuo','shuo','ruo'])\ndef checkpz(st,wd):\n #入声字判断\n \n #轻声按失败算。\n if not(st[-1] in ['1','2','3','4']):\n return 0\n \n if open_old_pronounce==1:\n if wd in rus:\n return 2\n if wd in ['嗟','瘸','靴','爹']:\n return 1\n if st[0:-2] in ss:\n return 2\n \n \n if (st[-1]==2 and st[0] in ['b','d','g','j','z']):\n return 2\n if st[-4:-2]=='ue':\n return 2\n \n if st[-1] in ['1','2']:\n return 1\n \n return 2\n \n \ndef checkrhy(sentence,last,imp,req=0):\n if len(sentence)==0:\n return 0\n while sentence[-1] in [',','。',',','?','?','!','!']:\n sentence=sentence[:-1]\n while last[-1] in [',','。',',','?','?','!','!']:\n last=last[:-1]\n l1=pinyin(sentence,style=TONE3)\n l2=pinyin(last,style=TONE3)\n #print(l1,l2)\n disobey=0\n if len(l1)!=len(sentence):\n return -1000\n for i in range(len(sentence)):\n if (imax_length and not(sentence[-1] in endnote)) or len(sentence)==0) or len(sentence)>max_length+1:\n return 1\n if (sentence[-1] in endnote)and ((len(sentence)<=min_length) or (len(sentence)==7)):\n return 1\n \n if (sentence[-1] in endnote)and (sentence[:-1] in original_context):\n return 1\n last=getlastsentence(original_context)\n \n \n mdisobey=0\n illegal_notes=[' ',':','《','》','‘','“','-','——','⁇','[','【','】',']','.','、','(','(',')',')','·']\n if '。' in endnote:\n illegal_notes.extend([',',','])\n else:\n illegal_notes.append('。')\n for i in range(10):\n illegal_notes.append(str(i))\n for i in range(64,123):\n illegal_notes.append(chr(i))\n for note in illegal_notes:\n if note in sentence:\n return 1\n if min_length==max_length:\n imp=1\n if (',' in last) or(',' in last):\n imp=2\n \n if ('。') in last:\n last2=get2sentencebefore(original_context)\n rt=checkrhy(sentence,last2,imp,req=1)\n else:\n rt=checkrhy(sentence,last,imp)\n if rt<-3:\n return 1\n \n \n \n for i in range(len(sentence)):\n # if sentence[i]==\"柯\":\n # print(sentence[i],last[i],sentence[i]==last[i])\n if min_length==max_length:\n if (i').tokenization\n context_length = len(context_tokens)\n if context_length>=args.seq_length:\n return 0,\"输入过长。\"\n \n\n context_tokens_tensor = torch.cuda.LongTensor(context_tokens)\n eo_token_tensor=torch.cuda.LongTensor(eo_tokens)\n context_length_tensor = torch.cuda.LongTensor([context_length])\n context_length = context_length_tensor[0].item()\n #tokens, attention_mask, position_ids = get_batch(context_tokens_tensor, device, args)\n\n start_time = time.time()\n\n counter, mems = 0, []\n org_context_length = context_length\n beam_size=10\n beam_candidate=12\n beam_max=2\n max_headings=6\n final_storage=[]\n final_storage_score=[]\n step=9\n overall_score=[]\n past_beam_id=[]\n #print(counter,beam_tokens,beam_score)\n if length is None:\n beam_sentences=generate_sentence(model,tokenizer,args,device,context_tokens_tensor,[],num_candidates=beam_size*5)\n if length==5:\n beam_sentences=generate_sentence(model,tokenizer,args,device,context_tokens_tensor,[],num_candidates=beam_size*5,max_length=6)\n if length==7:\n beam_sentences=generate_sentence(model,tokenizer,args,device,context_tokens_tensor,[],num_candidates=beam_size*5,min_length=6)\n for w in range(len(beam_sentences)):\n if '<|end' in beam_sentences[w][0]:\n continue\n input='”'+beam_sentences[w][0]+'”此句出自'\n output_str='古诗《'+title+'》'\n score1=generate_score(model,tokenizer,args,device,input,output_str)\n '''\n input='”'+beam_sentences[w][0]+'”此句作者为'\n output_str=aus\n score2=generate_score(model,tokenizer,args,device,input,output_str)\n '''\n ss=-beam_sentences[w][1]/len(beam_sentences[w][0])-6.5\n iscore=score1-0.75*(np.abs(ss)+ss)\n beam_sentences[w][1]=iscore\n print(beam_sentences[w][0],beam_sentences[w][1])\n overall_score.append(iscore)\n past_beam_id.append(w)\n \n gy=np.argsort(overall_score)\n k=0\n sumbeam=np.zeros(100)\n \n gym=[]\n num=0\n while (num-1000) and (i>8):\n del beam_sentences\n del beam_new_sentences\n torch.cuda.empty_cache()\n return final_storage,final_storage_score\n beam_new_sentences=[]\n \n endnote=[',',',','?','?']\n if i%2==0:\n endnote=['。','?','?','!','!']\n overall_score=[]\n past_beam_id=[]\n size=beam_size\n if len(gym)7:\n ini_score-=0.2\n if i>11:\n ini_score-=0.4\n \n if ini_score=15:\n final_storage.append(copy.deepcopy(current_sentence[input_len:]))\n sc=beam_sentences[id][1]/(i+1)\n sc-=2\n final_storage_score.append(sc)\n print(current_sentence,final_storage_score[-1])\n continue\n '''\n #print(token_tensor)\n gen=generate_sentence(model,tokenizer,args,device,token_tensor,mems,num_candidates=beam_candidate,endnote=endnote,min_length=len_sentence,max_length=len_sentence)\n for jj in gen:\n if '<|end' in jj[0]:\n if (i%2==1 and i>=3):\n final_storage.append(copy.deepcopy(current_sentence[input_len:]))\n sc=beam_sentences[id][1]/(i+1) #prioritize short poems\n sc=sc.item()\n if (i==5 or i==9 or i==13):\n sc-=1.5\n if (i==15):\n sc-=0.6\n if (i==11):\n sc-=0.4\n if (i==3):\n sc+=0.2\n if sc>best_score:\n best_score=sc\n best_pos=len(final_storage)-1\n sc=np.abs(sc)\n final_storage_score.append(sc)\n print(current_sentence,final_storage_score[-1])\n \n continue\n st=jj[0]\n # experiment shows that this is better universal,\n if (i%2==0):\n st=getlastsentence(beam_sentences[id][0])+jj[0]\n else:\n st=get2sentencebefore(beam_sentences[id][0])+','+getlastsentence(beam_sentences[id][0])+jj[0]\n input='”'+st+'”此句出自'\n \n output_str='古诗《'+title+'》'\n \n score1=generate_score(model,tokenizer,args,device,input,output_str)\n '''\n input='”'+st+'”此句作者为'\n output_str=aus\n score2=generate_score(model,tokenizer,args,device,input,output_str)\n '''\n factor=1\n \n ss=-jj[1]/len(jj[0])-6.5\n iscore=score1-0.75*(np.abs(ss)+ss)\n if i>=1:\n imp=1\n if i%2==0:\n imp+=1.5\n scorem=check2com(jj[0],beam_sentences[id][0],imp)\n \n iscore+=scorem\n \n \n jj[0]=beam_sentences[id][0]+jj[0]\n jj[1]=iscore+ini_score\n #print(i,beam_sentences[id][0],jj[1])\n #print(i,jj[0],jj[1]/(i+2))\n beam_new_sentences.append(jj)\n overall_score.append(jj[1])\n past_beam_id.append(w)\n del beam_sentences\n torch.cuda.empty_cache()\n beam_sentences=beam_new_sentences\n gy=np.argsort(overall_score)\n sumbeam=np.zeros(100)\n sumheading={}\n k=0\n gym=[]\n num=0\n while (num padded vocab (size: {}) with {} dummy '\n 'tokens (new size: {})'.format(\n before, after - before, after))\n\n args.tokenizer_num_tokens = after\n args.tokenizer_num_type_tokens = tokenizer.num_type_tokens\n args.eod_token = tokenizer.get_command('eos').Id\n\n # after = tokenizer.num_tokens\n # while after % mpu.get_model_parallel_world_size() != 0:\n # after += 1\n\n args.vocab_size = after\n print(\"prepare tokenizer done\", flush=True)\n\n return tokenizer\n\ndef set_args():\n args=get_args()\n print(args.gpu)\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=args.gpu\n #set up\n #print(args)\n args.deepspeed=True\n args.num_nodes=1\n args.num_gpus=1\n args.model_parallel_size=1\n args.deepspeed_config=\"script_dir/ds_config.json\"\n args.num_layers=32\n args.hidden_size=2560\n args.load=\"../ckp/checkpoint/new\"\n args.num_attention_heads=32\n args.max_position_embeddings=1024\n args.tokenizer_type=\"ChineseSPTokenizer\"\n args.cache_dir=\"cache\"\n args.fp16=True\n args.out_seq_length=180\n args.seq_length=200\n args.mem_length=256\n args.transformer_xl=True\n args.temperature=1\n args.top_k=0\n args.top_p=0\n \n return args\ndef prepare_model():\n \"\"\"Main training program.\"\"\"\n\n #print('Generate Samples')\n\n # Disable CuDNN.\n torch.backends.cudnn.enabled = False\n\n # Timer.\n timers = Timers()\n\n # Arguments.\n args = set_args()\n #print(args)\n args.mem_length = args.seq_length + args.mem_length - 1\n \n\n # Pytorch distributed.\n initialize_distributed(args)\n\n # Random seeds for reproducability.\n args.seed=random.randint(0,1000000)\n set_random_seed(args.seed)\n\n #get the tokenizer\n tokenizer = prepare_tokenizer(args)\n\n # Model, optimizer, and learning rate.\n model = setup_model(args)\n #args.load=\"../ckp/txl-2.8b11-20-15-10\"\n #model2=setup_model(args)\n #setting default batch size to 1\n args.batch_size = 1\n\n #generate samples\n return model,tokenizer,args\n\ndef generate_strs(tups):\n model,tokenizer,args=prepare_model()\n output=[]\n for tup in tups:\n #str=generate_token_tensor(str,tokenizer)\n \n output_string,output_scores=generate_string(model,tokenizer, args, torch.cuda.current_device(),tup[0],tup[1],desc=tup[2])\n list_poems=0\n \n ranklist=np.argsort(output_scores)\n best_score=output_scores[ranklist[0]]\n text_dir=\"poems_save/\"\n already=[]\n with jsonlines.open(text_dir+tup[0]+tup[1]+'.jsonl', mode='w') as writer:\n for i in range(len(ranklist)):\n j=ranklist[i]\n if output_scores[j]0:\n #print(sp)\n author=\"唐 李白\"\n title=sp[0]\n lt.append([author,title])\n qts.close()\n \n model,tokenizer,args=prepare_model()\n while True:\n id=random.randint(0,len(lt)-1)\n #author,title,num_wd,num_st=lt[id]\n author,title=lt[id]\n lists=os.listdir(text_dir)\n lts=title+author+'.jsonl'\n if (lts in lists):\n continue\n #str=generate_token_tensor(str,tokenizer)\n #output_string,output_scores=generate_string(model, tokenizer, args, torch.cuda.current_device(),title,author,length=num_wd)\n output_string,output_scores=generate_string(model, tokenizer, args, torch.cuda.current_device(),title,author)\n new_output_string=[]\n new_output_score=[]\n for i in range(len(output_string)):\n st=output_string[i].replace('。',',').replace(',',',').replace('?',',').replace('?',',').replace('!',',').replace('!',',')\n st=st.split(',')\n #print(st,num_st)\n #if len(st)-1==num_st:\n new_output_string.append(output_string[i])\n new_output_score.append(output_scores[i])\n if len(new_output_string)==0:\n del output_string\n del output_scores\n continue\n list_poems=0\n \n ranklist=np.argsort(new_output_score)\n best_score=new_output_score[ranklist[0]]\n \n already=[]\n \n with jsonlines.open(text_dir+title+author+'.jsonl', mode='w') as writer:\n for i in range(len(ranklist)):\n j=ranklist[i]\n if new_output_score[j] le and pp > r:\n return pp\n if pp < r:\n return find_peak(sl[m:])\n if pp > r:\n return find_peak(sl[:m])\n","sub_path":"0x10-python-network_0/6-peak.py","file_name":"6-peak.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"183156661","text":"#!/usr/bin/env python\n\nimport numpy as np\nfrom numpy import linalg as LA\nfrom matplotlib.path import Path\nimport matplotlib.patches as patches\nfrom primitives import Ray, HitRecord, AABox, Wall, Cylinder, PolyWall\n\n# A scene describes a world in which an agent can move around. It can be built\n# using primitives (objects).\nclass Scene:\n def __init__(self):\n self.objects = []\n\n # add objects to the scene\n def addObject(self, obj):\n self.objects.append(obj)\n\n # trace a ray through the scene. this is useful to check if a certain\n # movement would lie inside or outside the scene. for instance, if the hit\n # distance towards an object is less than the movement distance, the agent\n # would run into a wall.\n def trace(self, ray):\n # default return value\n hr = HitRecord(-1, np.array([np.inf, np.inf, np.inf]))\n\n min_dist = np.inf\n i = 0\n for o in self.objects:\n dist, local_hr = o.intersect(ray)\n if dist >= 0.0 and dist <= min_dist:\n min_dist = dist\n hr.id = i\n hr.hit = local_hr.hit\n i = i + 1\n\n return min_dist, hr\n\n\n # this method checks if a certain movement from A to B is valid or not\n # within the scene\n def isValidMove(self, A, B):\n # get the direction of the movement ray\n D = B - A\n dist = LA.norm(D)\n\n # trace a ray to get the hit record towards scene objects\n ray = Ray(A, D / dist)\n d, hr = self.trace(ray)\n\n # we can move if we don't hit a wall\n return dist < d\n\n\n# Definition of a square scene. The scene itself is simple, therefore we change\n# the implementation of the isValidMove and just check if the target location is\n# within the box\nclass Square(Scene):\n def __init__(self):\n super(Square, self).__init__()\n\n # a square world consists of an AABox in which the agent moves\n self.aabox = AABox(np.array([-1.0, -1.0, 0.0]),\n np.array([ 1.0, 1.0, 0.0]))\n\n self.addObject(self.aabox)\n\n def isInside(self, A):\n return not ((A[0] < self.aabox.X0[0]) or (A[0] > self.aabox.X1[0]) or\n (A[1] < self.aabox.X0[1]) or (A[1] > self.aabox.X1[1]))\n\n\n def isValidMove(self, A, B):\n return self.isInside(B)\n\n\n def getScenePatch(self, **kwargs):\n verts = [\n (-1.0, -1.0),\n ( 1.0, -1.0),\n ( 1.0, 1.0),\n (-1.0, 1.0),\n ( 0.0, 0.0)]\n codes = [Path.MOVETO,\n Path.LINETO,\n Path.LINETO,\n Path.LINETO,\n Path.CLOSEPOLY]\n\n scenepath = Path(verts, codes)\n return patches.PathPatch(scenepath, **kwargs)\n\n\n\n# The circular scene is also simple to test, therefore again the isValidMove\n# function is overridden\nclass Circular(Scene):\n def __init__(self):\n super(Circular, self).__init__()\n\n self.cyl = Cylinder(np.array([0.0, 0.0, 0.0]),\n np.array([0.0, 0.0, 1.0]),\n 1.0)\n\n self.addObject(self.cyl)\n\n\n def isInside(self, A):\n return (A[0]**2 + A[1]**2) <= self.cyl.r**2\n\n\n def isValidMove(self, A, B):\n return self.isInside(B)\n\n\n def getScenePatch(self, **kwargs):\n return patches.Circle((0.0, 0.0), 1.0, **kwargs)\n\n\n\nclass TMaze(Scene):\n def __init__(self):\n super(TMaze, self).__init__()\n\n # the polywall is constructed by all points of the T in\n # counter-clockwise direction\n self.polywall = PolyWall([\n [-0.33, -1.00, 0.0],\n [ 0.33, -1.00, 0.0],\n [ 0.33, 0.33, 0.0],\n [ 1.00, 0.33, 0.0],\n [ 1.00, 1.00, 0.0],\n [-1.00, 1.00, 0.0],\n [-1.00, 0.33, 0.0],\n [-0.33, 0.33, 0.0]\n ])\n\n self.addObject(self.polywall)\n\n def getScenePatch(self, **kwargs):\n verts = [\n (-0.33, -1.00),\n ( 0.33, -1.00),\n ( 0.33, 0.33),\n ( 1.00, 0.33),\n ( 1.00, 1.00),\n (-1.00, 1.00),\n (-1.00, 0.33),\n (-0.33, 0.33),\n ( 0.00, 0.00)]\n codes = [Path.MOVETO,\n Path.LINETO,\n Path.LINETO,\n Path.LINETO,\n Path.LINETO,\n Path.LINETO,\n Path.LINETO,\n Path.LINETO,\n Path.CLOSEPOLY]\n\n scenepath = Path(verts, codes)\n return patches.PathPatch(scenepath, **kwargs)\n\n\n\n\nclass Triangular(Scene):\n def __init__(self):\n super(Triangular, self).__init__()\n\n\n self.addObject(\n PolyWall([\n [-1.0, 0.0, 0.0],\n [ 1.0, -1.0, 0.0],\n [ 1.0, 1.0, 0.0]\n ])\n )\n\n\n def getScenePatch(self, **kwargs):\n verts = [\n (-1.0, 0.0),\n ( 1.0, -1.0),\n ( 1.0, 1.0),\n ( 0.0, 0.0)]\n codes = [Path.MOVETO,\n Path.LINETO,\n Path.LINETO,\n Path.CLOSEPOLY]\n\n scenepath = Path(verts, codes)\n return patches.PathPatch(scenepath, **kwargs)\n\n","sub_path":"scenes.py","file_name":"scenes.py","file_ext":"py","file_size_in_byte":5432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"645961862","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom tencent.items import TencentItem\n\nclass TencentcareersSpider(scrapy.Spider):\n name = 'tencentCareers'\n allowd_domains = ['tencent.com']\n url='http://hr.tencent.com/position.php?&start='\n \n offset=0\n start_urls = [url+str(offset)+'#a']\n\n def parse(self, response):\n for each in response.xpath(\"//tr[@class='even'] | //tr[@class='odd']\"):\n item=TencentItem()\n #store the data into dict item\n #extract will transform the data into unicode string\n item['positionname']=each.xpath('./td[1]/a/text()').extract()[0]\n item['positionlink']=each.xpath('./td[1]/a/@href').extract()[0]\n item['positionType']=each.xpath('./td[2]/text()').extract()[0]\n item['peopleNum']=each.xpath('./td[3]/text()').extract()[0]\n item['workLocation']=each.xpath('./td[4]/text()').extract()[0]\n item['publishTime']=each.xpath('./td[5]/text()').extract()[0]\n \n #item will be sent to pipelines\n yield item\n\n # after handling one page,then launching another request\n if self.offset<3000:\n self.offset+=10\n else:\n raise \"program ends\"\n # request will be sent to scheduler\n yield scrapy.Request(self.url+str(self.offset)+'#a',callback=self.parse)\n \n \n","sub_path":"tencent/tencent/spiders/tencentCareers.py","file_name":"tencentCareers.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"76356021","text":"from setuptools import setup\n\nfrom os import path\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(name='gitlab-webhook-telegram',\n version='1.1',\n description='A simple bot reacting to gitlab webhook',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='http://github.com/nanoy42/gitlab-webhook-telegram',\n author='Yoann `Nanoy` Pietri',\n author_email='me@nanoy.fr',\n license='GNU General Public License v3.0',\n packages=['gwt'],\n zip_safe=False,\n install_requires=['docopt', 'python-telegram-bot'],\n scripts=['bin/gwt'],\n include_package_data=True,\n)\n","sub_path":"pypi_install_script/gitlab-webhook-telegram-1.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"57989326","text":"'''Trains a simple convnet on the MNIST dataset.\n\nGets to 99.25% test accuracy after 12 epochs\n(there is still a lot of margin for parameter tuning).\n16 seconds per epoch on a GRID K520 GPU.\n'''\n\nfrom __future__ import print_function\nimport keras\n# from keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\nfrom PIL import Image\nimport numpy as np\nfrom keras.optimizers import SGD, Adam, RMSprop\nfrom keras.utils import np_utils\nimport matplotlib.pyplot as plt\nfrom keras.models import load_model\nimport random\n'''My data loader\n'''\nx_train_all = []\ny_train_all = []\n\nlabeled_data_path = \"training_data_7000\"\nlabeled_data_path = \"training_data_visible_28x28\"\nlabeled_data_path = \"testing_data_7000_visible\"\nlabeled_data_path = \"training_data_7000_0_visible\"\nlabeled_data_path = \"testing_data_7000_visible\"\nlabeled_data_path = \"testing_data_9_bit_9000_visible\"\ndata_volume=7000\ndata_volume=9000\n# data_volume=256\n\nfor i in range(data_volume):\n im_path=labeled_data_path+\"/number_\"+str(i)+\".png\"\n im = Image.open(im_path)\n y_lable=im.getpixel((im.size[0]-1,im.size[0]-1))[3]\n# print im.mode\n im=im.convert(\"RGB\")\n# print im.mode\n im_array=np.asarray(im)\n \n x_train_all.append(im_array)\n y_train_all.append(y_lable)\n\nprint(len(x_train_all))\nprint(len(y_train_all))\nx_train_all=np.array(x_train_all)\ny_train_all=np.array(y_train_all)\n\n'''Shuffle operation\ntrain_all=[[x,y] for x,y in zip(x_train_all,y_train_all)]\nrandom.shuffle(train_all)\n\nx_train_all=[xy[0] for xy in train_all]\ny_train_all=[xy[1] for xy in train_all]\n\nx_train_all=np.array(x_train_all)\ny_train_all=np.array(y_train_all)\n'''\nprint(\"Done load\")\nprint(x_train_all.shape)\nprint(y_train_all.shape)\n'''My data loader emd\n'''\n\nbatch_size = 128\nnum_classes = 10\nepochs = 12\nepochs = 26\nepochs = 50\n# epochs = 1\n\n'''Loss-print loader emd\n'''\nclass LossHistory(keras.callbacks.Callback):\n def on_train_begin(self, logs={}):\n self.losses = {'batch':[], 'epoch':[]}\n self.accuracy = {'batch':[], 'epoch':[]}\n self.val_loss = {'batch':[], 'epoch':[]}\n self.val_acc = {'batch':[], 'epoch':[]}\n\n def on_batch_end(self, batch, logs={}):\n self.losses['batch'].append(logs.get('loss'))\n self.accuracy['batch'].append(logs.get('acc'))\n self.val_loss['batch'].append(logs.get('val_loss'))\n self.val_acc['batch'].append(logs.get('val_acc'))\n\n def on_epoch_end(self, batch, logs={}):\n self.losses['epoch'].append(logs.get('loss'))\n self.accuracy['epoch'].append(logs.get('acc'))\n self.val_loss['epoch'].append(logs.get('val_loss'))\n self.val_acc['epoch'].append(logs.get('val_acc'))\n\n def loss_plot(self, loss_type):\n iters = range(len(self.losses[loss_type]))\n plt.figure()\n # acc\n plt.plot(iters, self.accuracy[loss_type], 'r', label='train acc')\n # loss\n plt.plot(iters, self.losses[loss_type], 'g', label='train loss')\n if loss_type == 'epoch':\n # val_acc\n plt.plot(iters, self.val_acc[loss_type], 'b', label='val acc')\n # val_loss\n plt.plot(iters, self.val_loss[loss_type], 'k', label='val loss')\n plt.grid(True)\n plt.xlabel(loss_type)\n plt.ylabel('acc-loss')\n plt.legend(loc=\"upper right\")\n plt.show()\n plt.savefig(\"loss_curve-50.png\")\n \n\n'''Loss-print loader emd\n'''\n\n\nprint(\"Hello start\")\n# input image dimensions\nimg_rows, img_cols = 28, 28\n\n# the data, split between train and test sets\n'''\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n'''\nx_train = x_train_all[900:9000]\nx_test = x_train_all[0:900]\ny_train = y_train_all[900:9000]\ny_test =y_train_all[0:900]\n\n'''\nx_test = x_train_all[0:7000]\ny_test =y_train_all[0:7000]\n'''\n# x_train,x_test=np.array(x_train),np.array(x_test)\n# y_train,y_test=np.array(y_train),np.array(y_test)\n\n\nprint(len(x_train))\nprint(len(y_train))\nprint(len(x_test))\nprint(len(y_test))\n# print(x_train[0])\n# print(x_test[0])\n# print(y_train[0:20])\n# print(y_test[0:20])\nprint(x_train.shape)\nprint(y_train.shape)\nprint(x_test.shape)\nprint(y_test.shape)\n\n\nif K.image_data_format() == 'channels_first':\n# x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n# x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n x_train = x_train.reshape(x_train.shape[0], 3, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\nelse:\n# x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n# x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n# input_shape = (img_rows, img_cols, 1)\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 3)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3)\n input_shape = (img_rows, img_cols, 3)\n\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n\n# convert class vectors to binary class matrices\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\nmodel = Sequential()\n\nprint(\"start construct model\")\n\nmodel.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\n# Print model\n# \n# model.summary()\n# Print model\n\n\nmodel.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\nprint(\"compiled model\")\n\nhistory = LossHistory()\n'''\nprint(\"start fit model\")\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_test, y_test))\n'''\n# model.load_weights(\"model_img_crypto-50.h5\")\n# model.load_weights(\"model_W_img_crypto_7000_v2.h5\")\nmodel.load_weights(\"model_W_img_crypto_7000_0.h5\")\n# model=load_model.load_weights(\"model_img_crypto-50.h5\",)\n\n# x_train = x_train_all[0:6000]\n# y_train = y_train_all[0:6000]\n\n# x_test = x_train_all[5000:7000]\n# y_test =y_train_all[5000:7000]\nscore = model.evaluate(x_train, y_train, verbose=0)\n# print(\"y_test=\",y_test[0])\n\n\nprint(\"Start test check\")\n\ntest_error_count=0\ntrain_error_count=0\nbit_len=9\nfor i in range(900//bit_len):\n# x=[x_test[i]]\n x=x_test[bit_len*i:bit_len*(i+1)]\n x=np.array(x)\n# print(\"x_shape=\",x.shape)\n# print(x)\n# print(x.shape)\n# x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols)\n y=model.predict(x)\n# print(\"y=\",y)\n y= np.argmax(y,axis=1)\n# print(\"y=\",y)\n# z=y_test[i]\n z=y_test[bit_len*i:bit_len*(i+1)]\n# print(\"z=\",z)\n z= np.argmax(z,axis=1)\n# print(\"z=\",z)\n# print(y!=z)\n if (y!=z).any():\n cmp_table=(y==z)\n# cmp_table[cmp_table==True]=\"T\"\n cmp_table=[item if (item ==False) else \"T\" for item in cmp_table]\n\n# true_table=np.array([True]*20)\n\n# print(i,\"=================={\",y,z,cmp_table,\"}\")\n print(i,\"=================={\",y,z,\"}\")\n print(\"{\",y-z,\"}\")\n print(\"{\",cmp_table,\"}\")\n# print(\"{\",true_table^cmp_table,\"}\")\n\n test_error_count=test_error_count+1\n# print(i,\"======{\",y,z,\"}\")\n# print(i,\"===============================================\")\n\nprint(\"Done test check\")\n\nprint(\"Start train check\")\nfor i in range(((9000-900)//bit_len)):\n# x=[x_test[i]]\n x=x_train[bit_len*i:bit_len*(i+1)]\n x=np.array(x)\n# print(\"x_shape=\",x.shape)\n\n# print(x)\n# print(x.shape)\n# x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols)\n y=model.predict(x)\n# print(\"y=\",y)\n\n y= np.argmax(y,axis=1)\n# print(\"y=\",y)\n# z=y_test[i]\n z=y_train[bit_len*i:bit_len*(i+1)]\n# print(\"z=\",z)\n z= np.argmax(z,axis=1)\n# print(\"z=\",z)\n# print(y!=z)\n if (y!=z).any():\n cmp_table=(y==z)\n cmp_table=[item if (item ==False) else \"T\" for item in cmp_table]\n# true_table=np.array([True]*20)\n# cmp_table=y-z\n# print(i,\"=================={\",y,z,cmp_table,\"}\")\n print(i,\"=================={\",y,z,\"}\")\n print(\"{\",y-z,\"}\")\n print(\"{\",cmp_table,\"}\")\n train_error_count=train_error_count+1\n# print(i,\"======{\",y,z,\"}\")\n# print(i,\"===============================================\")\n\n\n'''\nfor i in range(6000):\n x=[x_train[i]]\n x=np.array(x)\n# print(x)\n# print(x.shape)\n# x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols)\n y=model.predict(x)\n y= np.argmax(y)\n z=y_train[i]\n z= np.argmax(z)\n if y!=z:\n print(i,\"=================={\",y,z,\"}\")\n train_error_count=train_error_count+1\n print(i,\"======{\",y,z,\"}\")\n'''\nprint(\"Done train check\")\n\n# model.save(\"\")\n\n# model.save_weights(\"model_img_crypto-50.h5\")\nprint(\"Saved model to disk\")\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\nprint(\"train_set_error====\",train_error_count,\"\\%9000-900\")\nprint(\"test_set_error====\",test_error_count,\"\\$900\")\n# history.loss_plot('epoch')\n\n# model.summary()\n\nprint(\"over\")","sub_path":"cnn-modify_image_crypto_test_9_bit_AAA.py","file_name":"cnn-modify_image_crypto_test_9_bit_AAA.py","file_ext":"py","file_size_in_byte":9565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"596136689","text":"\"\"\"empty message\n\nRevision ID: c3a8cf85c845\nRevises: 8aee0ac135b1\nCreate Date: 2021-08-31 15:13:02.045705\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c3a8cf85c845'\ndown_revision = '8aee0ac135b1'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('candela', 'portada',\n existing_type=sa.BOOLEAN(),\n nullable=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('candela', 'portada',\n existing_type=sa.BOOLEAN(),\n nullable=True)\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/c3a8cf85c845_.py","file_name":"c3a8cf85c845_.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"634893208","text":"import sys\n\nfrom PyQt5.QtWidgets import QApplication, QWidget\n\nfrom PyQt5.QtCore import pyqtSlot\n\nfrom PyQt5.QtGui import QIcon\n\nfrom ui_Widget import Ui_Widget\n\nclass QmyWidget(QWidget): \n def __init__(self, parent=None):\n super().__init__(parent) #调用父类构造函数,创建窗体\n self.ui=Ui_Widget() #创建UI对象\n self.ui.setupUi(self) #构造UI界面\n \n## ==========由connectSlotsByName() 自动连接的槽函数==================== \n def on_btnIniItems_clicked(self): ##“初始化列表”按钮\n #设置图标的操作\n icon=QIcon(\":/icons/images/aim.ico\")\n self.ui.comboBox.clear() #清除列表\n provinces=[\"山东\",\"河北\",\"河南\",\"湖北\",\"湖南\",\"广东\"] #列表数据\n for i in range(len(provinces)):\n self.ui.comboBox.addItem(icon,provinces[i])\n\n## #不设置图标的操作\n## self.ui.comboBox.clear() #清除列表\n## provinces=[\"山东\",\"河北\",\"河南\",\"湖北\",\"湖南\",\"广东\"] #列表数据\n## self.ui.comboBox.addItems(provinces) #直接添加列表,但无法加图标\n\n\n def on_btnClearItems_clicked(self): ##“清除列表”按钮\n self.ui.comboBox.clear()\n\n @pyqtSlot(bool) ##“可编辑” CheckBox\n def on_chkBoxEditable_clicked(self,checked): \n self.ui.comboBox.setEditable(checked)\n\n @pyqtSlot(str) ##“简单的ComboBox”的当前项变化\n def on_comboBox_currentIndexChanged(self,curText):\n self.ui.lineEdit.setText(curText)\n\n def on_btnIni2_clicked(self): ##有用户数据的comboBox2的初始化\n icon=QIcon(\":/icons/images/unit.ico\")\n self.ui.comboBox2.clear()\n cities={\"北京\":10, \"上海\":21, \"天津\":22,\n \"徐州\":516, \"福州\":591, \"青岛\":532} #字典数据\n for k in cities:\n self.ui.comboBox2.addItem(icon,k,cities[k])\n\n @pyqtSlot(str) ##当前项变化\n def on_comboBox2_currentIndexChanged(self,curText): \n self.ui.lineEdit.setText(curText)\n zone=self.ui.comboBox2.currentData() #读取关联数据\n if (zone != None): #必须加此判断,因为有可能是None\n self.ui.lineEdit.setText(curText+\":区号=%d\"%zone)\n \n## =========自定义槽函数=================================== \n\n \n## ===========窗体测试程序 ================================ \nif __name__ == \"__main__\": \n app = QApplication(sys.argv) \n form=QmyWidget() \n form.show()\n sys.exit(app.exec_())\n","sub_path":"pyqt/DemoFullCode-PythonQt/chap03Widgets/Demo3_6ComboBox/myWidget.py","file_name":"myWidget.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"317291397","text":"# 621. Task Scheduler\n# Medium\n\n# Given a char array representing tasks CPU need to do. It contains capital letters A to Z where different letters represent different tasks. Tasks could be done without original order. Each task could be done in one interval. For each interval, CPU could finish one task or just be idle.\n\n# However, there is a non-negative cooling interval n that means between two same tasks, there must be at least n intervals that CPU are doing different tasks or just be idle.\n\n# You need to return the least number of intervals the CPU will take to finish all the given tasks.\n\n\n# Example:\n\n# Input: tasks = [\"A\",\"A\",\"A\",\"B\",\"B\",\"B\"], n = 2\n# Output: 8\n# Explanation: A -> B -> idle -> A -> B -> idle -> A -> B.\n\n\ndef leastInterval(tasks, n):\n import collections\n import heapq\n n += 1\n count = collections.Counter(tasks)\n heap = [-v for v in count.values()]\n heapq.heapify(heap)\n res = 0\n while heap:\n arr = []\n cnt = 0\n for _ in range(n):\n if heap:\n item = heapq.heappop(heap)\n cnt += 1\n if item < -1:\n arr.append(item + 1)\n for item in arr:\n heapq.heappush(heap, item)\n res += heap and n or cnt # == if heap then n else cnt\n return res\n\n\ntasks = [\"A\", \"A\", \"A\", \"B\", \"B\", \"B\"]\nn = 2\nprint(leastInterval(tasks, n))\n","sub_path":"heap/task_scheduler.py","file_name":"task_scheduler.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"152674746","text":"import logging\n\nfrom airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\n# \"\"\"\n# HasRowsOperator it's used in the subdag which move\n# the data from S3 to staging tables.\n# This Operator it's used for data quality checks\n# \"\"\"\nclass HasRowsOperator(BaseOperator):\n# '''\n# Constructor HasRowsOperator\n# Parameters:\n# redshift_conn_id (string): the name of connection\n# table (string): the name of the table\n# '''\n @apply_defaults\n def __init__(self,\n redshift_conn_id=\"\",\n table=\"\",\n *args, **kwargs):\n\n super(HasRowsOperator, self).__init__(*args, **kwargs)\n self.table = table\n self.redshift_conn_id = redshift_conn_id\n\n def execute(self, context):\n #connect to redshift with the PostgresHook\n self.log.info('HasRowsOperator')\n redshift_hook = PostgresHook(self.redshift_conn_id)\n records = redshift_hook.get_records(f\"SELECT COUNT(*) FROM {self.table}\")\n# \"\"\"check if the staging tables were created \"\"\"\n if len(records) < 1 or len(records[0]) < 1:\n raise ValueError(f\"Data quality check failed. {self.table} returned no results\")\n num_records = records[0][0]\n# \"\"\"check if any record had loaded and raise an error if no records exists\"\"\"\n if num_records < 1:\n raise ValueError(f\"Data quality check failed. {self.table} contained 0 rows\")\n # logging the number of records loaded in the staging table\n logging.info(f\"Data quality on table {self.table} check passed with {records[0][0]} records\")","sub_path":"plugins/operators/has_rows.py","file_name":"has_rows.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"280188580","text":"import argparse\nimport numpy as np\nimport pandas as pd\nimport os\nimport tensorflow as tf\nimport tensorflow.keras as keras\nimport tensorflow.keras.backend as K\nimport matplotlib.pyplot as plt\nimport utils\nfrom sklearn.model_selection import train_test_split\nfrom tabulate import tabulate\n\nNON_ML_FEATURE_COLUMNS = ['Gain', 'Symbol', 'Date']\nDEFAULT_TRAIN_ITER = 1\n\n\nclass ML(object):\n\n def __init__(self, data_files, model=None, train_iter=DEFAULT_TRAIN_ITER):\n data_files = data_files\n self.model = model\n self.train_iter = train_iter\n self.root_dir = os.path.dirname(os.path.realpath(__file__))\n self.df = pd.concat([pd.read_csv(data_file) for data_file in data_files])\n self.X, self.T, self.y = [], [], []\n for _, row in self.df.iterrows():\n x_value = [row[col] for col in utils.ML_TECH_FEATURES]\n t_value = [[row[col]] for col in utils.ML_TIME_FEATURES]\n gain = row['Gain']\n if gain >= 0.01:\n y_value = [1, 0, 0]\n elif gain <= -0.01:\n y_value = [0, 1, 0]\n else:\n y_value = [0, 0, 1]\n self.X.append(x_value)\n self.T.append(t_value)\n self.y.append(y_value)\n self.X = np.array(self.X)\n self.T = np.array(self.T)\n self.y = np.array(self.y, dtype=np.float32)\n self.w = 0.3 + np.arange(len(self.df)) / len(self.df) * 0.7\n split_result = train_test_split(self.X, self.T, self.y, self.w, test_size=0.1, random_state=0)\n self.X_train, self.X_test = split_result[0:2]\n self.T_train, self.T_test = split_result[2:4]\n self.y_train, self.y_test = split_result[4:6]\n self.w_train, self.w_test = split_result[6:8]\n\n @staticmethod\n def loss_function(c_layer):\n def _loss(y_true, y_pred):\n if not tf.is_tensor(y_pred):\n y_pred = tf.constant(y_pred)\n return K.mean(c_layer * K.square(y_pred - y_true) + 0.8 * (1 - c_layer), axis=-1)\n\n return _loss\n\n @staticmethod\n def create_model():\n x_input = keras.layers.Input(shape=(len(utils.ML_TECH_FEATURES, )), name='x_input')\n x = keras.layers.Dense(50, activation='relu', name='x_dense_1')(x_input)\n x = keras.layers.Dense(20, activation='relu', name='x_dense_2')(x)\n x = keras.layers.Dense(10, activation='relu', name='x_dense_3')(x)\n x = keras.layers.Dropout(0.2, name='x_dropout')(x)\n\n t_input = keras.layers.Input(shape=(len(utils.ML_TIME_FEATURES), 1), name='t_input')\n t = keras.layers.Conv1D(4, kernel_size=3, activation='relu', use_bias=False, name='t_conv_1')(t_input)\n t = keras.layers.Conv1D(8, kernel_size=3, activation='relu', use_bias=False, name='t_conv_2')(t)\n t = keras.layers.MaxPool1D(pool_size=2, name='t_pool_1')(t)\n t = keras.layers.Conv1D(8, kernel_size=3, activation='relu', use_bias=False, name='t_conv_3')(t)\n t = keras.layers.Conv1D(16, kernel_size=3, activation='relu', use_bias=False, name='t_conv_4')(t)\n t = keras.layers.MaxPool1D(pool_size=2, name='t_pool_2')(t)\n t = keras.layers.Conv1D(32, kernel_size=3, activation='relu', use_bias=False, name='t_conv_5')(t)\n t = keras.layers.Conv1D(64, kernel_size=3, activation='relu', use_bias=False, name='t_conv_6')(t)\n t = keras.layers.MaxPool1D(pool_size=2, name='t_pool_3')(t)\n t = keras.layers.Flatten(name='t_flatten')(t)\n t = keras.layers.Dropout(0.3, name='t_dropout')(t)\n\n info = keras.layers.concatenate([x, t])\n\n r = keras.layers.Dense(3, activation='softmax', name='classification',\n kernel_regularizer=keras.regularizers.l2(0.1))(info)\n\n model = keras.Model(inputs=[x_input, t_input], outputs=r)\n\n model.compile(optimizer='adam', loss='mse')\n model.summary()\n return model\n\n def fit_model(self, model):\n early_stopping = keras.callbacks.EarlyStopping(\n monitor='val_loss', patience=5, restore_best_weights=True)\n model.fit([self.X_train, self.T_train], self.y_train, batch_size=512, epochs=1000,\n sample_weight=self.w_train,\n validation_data=([self.X_test, self.T_test], self.y_test, self.w_test),\n callbacks=[early_stopping])\n\n def evaluate(self, model):\n y_pred = model.predict([self.X, self.T])\n y_true = self.y\n precision, recall, accuracy = get_accuracy(y_true, y_pred)\n\n c_matrix = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n for yi, pi in zip(y_true, y_pred):\n c_true = np.argmax(yi)\n c_pred = np.argmax(pi)\n c_matrix[c_true][c_pred] += 1\n pos_true = np.sum(c_matrix[0])\n neg_true = np.sum(c_matrix[2])\n pos_pred = c_matrix[0][0] + c_matrix[1][0] + c_matrix[2][0]\n baseline = pos_true / (pos_true + neg_true + 1E-7)\n print(utils.get_header('Examples'))\n example_count = 30\n examples = []\n for i in range(example_count):\n if np.argmax(y_true[i]) == np.argmax(y_pred[i]):\n correct = 'Y'\n elif np.argmax(y_true[i]) == 1:\n correct = 'I'\n else:\n correct = 'N'\n examples.append([y_true[i], y_pred[i], correct])\n print(tabulate(examples, tablefmt='grid', headers=['Truth', 'Prediction', 'Correct']))\n print(utils.get_header('Classification Matrix'))\n matrix = [['', 'Prediction Gain', 'Prediction Flat', 'Prediction Loss'],\n ['Truth Gain'] + c_matrix[0],\n ['Truth Flat'] + c_matrix[1],\n ['Truth Loss'] + c_matrix[2]]\n print(tabulate(matrix, tablefmt='grid'))\n print(utils.get_header('Model Stats'))\n output = [['Precision', '%.2f%%' % (precision * 100,)],\n ['Recall', '%.2f%%' % (recall * 100,)],\n ['Accuracy', '%.2f%%' % (accuracy * 100,)],\n ['Baseline Precision', '%.2f%%' % (baseline * 100,)],\n ['Positive Count', pos_pred]]\n print(tabulate(output, tablefmt='grid'))\n #plot(y_true, y_pred)\n return precision\n\n def train(self):\n model = self.create_model()\n self.fit_model(model)\n precision = self.evaluate(model)\n model_name = 'model_p%d.hdf5' % (int(precision * 1E6),)\n model.save(os.path.join(self.root_dir, utils.MODELS_DIR, model_name))\n\n def load(self):\n model = keras.models.load_model(\n os.path.join(self.root_dir, utils.MODELS_DIR, self.model))\n model.summary()\n self.evaluate(model)\n\n\ndef plot(y_true, y_pred, c_pred, c_boundary):\n points = {}\n y_min = np.percentile(y_pred, 10)\n y_max = np.percentile(y_pred, 90)\n granularity = (y_max - y_min) / 10\n for pi, ci, yi in zip(y_pred, c_pred, y_true):\n if ci < c_boundary:\n continue\n p = (int(pi / granularity), int(yi / 0.1))\n points[p] = points.get(p, 0) + 1\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(17, 17))\n max_size = max(points.values())\n for point, size in points.items():\n ax1.plot([point[0] * granularity], [point[1] * 0.1], 'o', markersize=size / max_size * 12, c='C0')\n ax1.grid('--')\n ax1.set_xlim((y_min-0.1, y_max+0.1))\n ax1.set_ylim((-1.5, 1.5))\n ax1.set_xlabel('Prediction')\n ax1.set_ylabel('Truth')\n ax1.set_title('Scatter Plot')\n\n ax2.hist(c_pred, bins=20)\n ax2.set_xlabel('Confidence')\n ax2.set_ylabel('Count')\n ax2.set_title('Confidence Distribution')\n\n ax3.hist(y_pred, bins=20)\n ax3.set_xlabel('Prediction')\n ax3.set_ylabel('Count')\n ax3.set_title('Prediction Distribution')\n\n ax4.hist(y_true, bins=20)\n ax4.set_xlabel('Label')\n ax4.set_ylabel('Count')\n ax4.set_title('Truth Distribution')\n plt.show()\n\n\ndef get_accuracy(y_true, y_pred):\n tp, tn, fp, fn = 0, 0, 0, 0\n for pi, yi in zip(y_pred, y_true):\n yc = np.argmax(yi)\n pc = np.argmax(pi)\n if pc == 0:\n if yc == 0:\n tp += 1\n elif yc == 2:\n fp += 1\n elif pc == 2:\n if yc == 0:\n fn += 1\n elif yc == 2:\n tn += 1\n precision = tp / (tp + fp + 1E-7)\n recall = tp / (tp + fn + 1E-7)\n accuracy = (tp + tn) / (tp + fp + fn + tn + 1E-7)\n return precision, recall, accuracy\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Stock trading ML model.')\n parser.add_argument('--model', default=None,\n help='Model name to load')\n parser.add_argument('--data_files', required=True, nargs='+',\n help='Data to train on.')\n parser.add_argument('--train_iter', type=int, default=DEFAULT_TRAIN_ITER,\n help='Iterations in training.')\n args = parser.parse_args()\n ml = ML(args.data_files, args.model, args.train_iter)\n print(args.data_files)\n if args.model:\n ml.load()\n else:\n ml.train()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ml.py","file_name":"ml.py","file_ext":"py","file_size_in_byte":9119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"600054044","text":"import toml\n\nall_simulations = [\"dust\", \"synchrotron\", \"ame\"]\nmodel = \"1\"\n\nfor nside in [512, 4096]:\n small_scale = \"s\" if nside > 512 else \"\"\n for content in all_simulations:\n config = {\n \"tag\": content,\n \"pysm_components\": dict(\n pysm_components_string=\"SO_\" + content[0] + model + small_scale,\n pysm_output_reference_frame=\"C\",\n ),\n }\n with open(\"{}_{}.toml\".format(content, nside), \"w\") as f:\n toml.dump(config, f)\n","sub_path":"201904_highres_foregrounds_variable_spectral_index/prepare_cfg.py","file_name":"prepare_cfg.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"612271246","text":"import numpy as np\nfrom numpy import linalg as LA\nimport pickle\nimport random\nfrom collections import Counter\nimport re\n\ndef turn_data_into_separate(name):\n with open('/Users/yuanzhepang/Desktop/Computer Science/Research/data/style-classify/v1.0/'+str(name)) as f:\n train = f.readlines()\n\n y = []\n X = [[] for i in range(4)]\n for i in range(len(train)):\n tmp = re.split('[\\t]',train[i])\n train[i] = tmp[0]\n y.append(tmp[1])\n \n # four possibilities for y\n # email - [1,0,0,0]; newsgroups - [0,1,0,0]; reviews - [0,0,1,0]; weblogs - [0,0,0,1]\n for i in range(len(y)):\n if y[i][0] == 'e':\n X[0].append(train[i])\n elif y[i][0] == 'n':\n X[1].append(train[i])\n elif y[i][0] == 'r':\n X[2].append(train[i])\n elif y[i][0] == 'w':\n X[3].append(train[i]) # check existence of other cases?\n \n for i in range(4):\n with open('/Users/yuanzhepang/Desktop/Computer Science/Research/data/style-classify/v1.0/'+name+'.'+str(i),'w') as f:\n for l in X[i]:\n f.write(l+'\\n')\n\ndef build_vocab(data, path, min_occur=3):\n word2id = {'':0, '':1, '':2, '':3}\n id2word = ['', '', '', '']\n\n words = [word for sent in data for word in sent]\n cnt = Counter(words)\n for word in cnt:\n if cnt[word] >= min_occur:\n word2id[word] = len(word2id)\n id2word.append(word)\n vocab_size = len(word2id)\n with open(path, 'wb') as f:\n pickle.dump((vocab_size, word2id, id2word), f, pickle.HIGHEST_PROTOCOL)\n\nclass Vocabulary(object):\n def __init__(self, vocab_file, emb_file='', dim_emb=0):\n with open(vocab_file, 'rb') as f:\n self.size, self.word2id, self.id2word = pickle.load(f)\n self.dim_emb = dim_emb\n self.embedding = np.random.random_sample(\n (self.size, self.dim_emb)) - 0.5\n\n if emb_file:\n print('Loading word vectors from', emb_file)\n with open(emb_file) as f:\n for line in f:\n parts = line.split()\n word = parts[0]\n vec = np.array([float(x) for x in parts[1:]])\n if word in self.word2id:\n self.embedding[self.word2id[word]] = vec\n\n # for i in range(self.size):\n # self.embedding[i] /= LA.norm(self.embedding[i])\n\n\n\n '''\n embd = np.ones((self.vocab_size, self.embedding_size), dtype=np.float32)\n with open(pretrain_embedding_path, \"r\") as fin:\n for line in fin.readlines():\n row = line.strip().split(\" \")\n if row[0] in dictionary:\n embd[dictionary[row[0]]] = row[1:]\n '''\n\n","sub_path":"vocab.py","file_name":"vocab.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"14538964","text":"import numpy as np\nimport torch\nimport pdb\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport itertools\n\nnum_data = 50000\nnum_classes = 1000\n\n# Temps = [0.001, 0.01, 0.1, 5, 10, 50, 100, 500, 1000, 5000, 10000]\n# Temps = [1.2, 1.4, 1.6, 1.8, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5]\nTemps = np.arange(1.01, 1.2 ,0.02)\n\nmodels = ['vgg19_bn', 'resnet152', 'densenet161', 'densenet121', 'densenet201', 'resnet101', 'densenet169', 'resnet50']\n\ntarget = torch.load('Ensemble/imagenet/vgg19_bn/Targets of vgg19_bn.pt')\n\ndata = {}\nfor m in models:\n data[m] = torch.load('Ensemble/imagenet/{}/Logit Outputs of {}.pt'.format(m, m))\n\ndef logit_ensemble(models, data, target):\n output = torch.zeros(num_data, num_classes).cuda()\n for m in models:\n output += data[m]\n\n target_exp = target.view(-1, 1).expand(-1, num_classes).cuda()\n _, pred = output.topk(num_classes, 1, True, True)\n correct = pred.data.eq(target_exp).t()\n correct_1 = torch.sum(correct[:1])\n correct_5 = torch.sum(correct[:5])\n\n V = torch.Tensor([range(1, num_classes+1)]).t().cuda()\n gesNum = V * correct.float()\n zero_map = gesNum == 0\n zero_map = zero_map.float() * 999\n # pdb.set_trace()\n gesNum = gesNum + zero_map\n gesNum, _ = torch.min(gesNum,0)\n\n # pdb.set_trace()\n AverGesNum = torch.mean(gesNum)\n\n if AverGesNum > 50:\n pdb.set_trace()\n\n return correct_1 / len(target), correct_5 / len(target), AverGesNum\n\ndef temperature_ensemble(models, data, target, T):\n softmax = nn.Softmax().cuda()\n output = Variable(torch.zeros(num_data, num_classes).cuda())\n for m in models:\n output += softmax(Variable(data[m])/T)\n # pdb.set_trace()\n\n target_exp = target.view(-1, 1).expand(-1, num_classes).cuda()\n _, pred = output.topk(num_classes, 1, True, True)\n correct = pred.data.eq(target_exp).t()\n correct_1 = torch.sum(correct[:1])\n correct_5 = torch.sum(correct[:5])\n\n V = torch.Tensor([range(1, num_classes+1)]).t().cuda()\n gesNum = V * correct.float()\n zero_map = gesNum == 0\n zero_map = zero_map.float() * 999\n # pdb.set_trace()\n gesNum = gesNum + zero_map\n gesNum, _ = torch.min(gesNum,0)\n\n # pdb.set_trace()\n AverGesNum = torch.mean(gesNum)\n\n # if AverGesNum > 50:\n # pdb.set_trace()\n\n return correct_1 / len(target), correct_5 / len(target), AverGesNum\n\n\ndef geometric_ensemble(models, data, target):\n softmax = nn.Softmax().cuda()\n output = Variable(torch.ones(num_data, num_classes).cuda())\n for m in models:\n output *= softmax(Variable(data[m]))\n\n target = target.view(-1, 1).expand(-1, 5).cuda()\n _, pred = output.topk(5, 1, True, True)\n correct = pred.data.eq(target).t()\n correct_1 = torch.sum(correct[:1])\n correct_5 = torch.sum(correct[:5])\n\n return correct_1 / len(target), correct_5 / len(target)\n\n\n \n\n\nResult = {}\ncompare_top1 = {}\ncompare_top5 = {}\nfor T in Temps:\n # print(T)\n compare_top1[T] = {}\n compare_top5[T] = {}\n compare_top1[T]['better'], compare_top1[T]['worse'], compare_top1[T]['equal'], compare_top1[T]['improve'], compare_top1[T]['gesNum'] = 0, 0, 0, [], (-1,-1)\n compare_top1[T]['gNumBetter'], compare_top1[T]['gNumWorse'], compare_top1[T]['gNumEqual'] = 0, 0, 0\n compare_top5[T]['better'], compare_top5[T]['worse'], compare_top5[T]['equal'], compare_top5[T]['improve'] = 0, 0, 0, []\n ground_gesNum = []\n gesNum = []\n ## average improvement\n for r in range(2, len(models)+1):\n for submodels in itertools.combinations(models, r):\n submodels = list(submodels)\n A1, A5, Anum = temperature_ensemble(submodels, data, target, 1)\n C1, C5, Cnum = temperature_ensemble(submodels, data, target, T)\n compare_top1[T]['improve'].append(C1 - A1)\n compare_top5[T]['improve'].append(C5 - A5)\n ground_gesNum.append(Anum)\n gesNum.append(Cnum)\n print('T = {}: ({},{})'.format(T, Anum, Cnum))\n \n if C1 > A1:\n compare_top1[T]['better'] += 1\n elif C1 < A1:\n compare_top1[T]['worse'] += 1\n elif C1 == A1:\n compare_top1[T]['equal'] += 1\n if C5 > A5:\n compare_top5[T]['better'] += 1\n elif C5 < A5:\n compare_top5[T]['worse'] += 1\n elif C5 == A5:\n compare_top5[T]['equal'] += 1\n if Cnum < Anum:\n compare_top1[T]['gNumBetter'] += 1\n elif Cnum > Anum:\n compare_top1[T]['gNumWorse'] += 1\n elif Cnum == Anum:\n compare_top1[T]['gNumEqual'] += 1\n compare_top1[T]['improve'] = sum(compare_top1[T]['improve']) / len(compare_top1[T]['improve'])\n compare_top5[T]['improve'] = sum(compare_top5[T]['improve']) / len(compare_top5[T]['improve'])\n compare_top1[T]['accBetterRate'] = compare_top1[T]['better'] / (compare_top1[T]['better']+compare_top1[T]['equal']+compare_top1[T]['worse'])\n compare_top5[T]['accBetterRate'] = compare_top5[T]['better'] / (compare_top5[T]['better']+compare_top5[T]['equal']+compare_top5[T]['worse'])\n compare_top1[T]['numBetterRate'] = compare_top1[T]['gNumBetter'] / (compare_top1[T]['gNumBetter']+compare_top1[T]['gNumEqual']+compare_top1[T]['gNumWorse'])\n ground_gesNum = np.mean(ground_gesNum)#sum(ground_gesNum) / len(ground_gesNum)\n gesNum = np.mean(gesNum)#sum(gesNum) / len(gesNum)\n compare_top1[T]['gesNum'] = (ground_gesNum, gesNum)\n # pdb.set_trace()\nResult['top1'] = compare_top1\nResult['top5'] = compare_top5\n\ntorch.save(Result, 'Ensemble/ImageNet_Result.pt')\n\n\n","sub_path":"imagenet_ens.py","file_name":"imagenet_ens.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"114187416","text":"# Step Settings\nlambda_limit = 750\nload_wait = 8\nrandom_wait = 20\n\nmapper = {'human': 'GRCh38',\n 'mouse': 'GRCm38',\n 'fruit-fly': 'dm6',\n 'chicken': 'galGal5',\n 'zebrafish': 'GRCz11'}\n\npairs_mapper = {\"GRCh38\": \"hg38\",\n \"GRCm38\": \"mm10\",\n \"dm6\": 'dm6',\n \"galGal5\": \"galGal5\",\n \"GRCz11\": \"danRer11\"}\n\n\ndef step_settings(step_name, my_organism, attribution, overwrite=None):\n \"\"\"Return a setting dict for given step, and modify variables in\n output files; genome assembly, file_type, desc, contributing lab.\n overwrite is a dictionary, if given will overwrite keys in resulting template\n overwrite = {'config': {\"a\": \"b\"},\n 'parameters': {'c': \"d\"}\n }\n \"\"\"\n genome = \"\"\n genome = mapper.get(my_organism)\n pairs_assembly = pairs_mapper.get(genome)\n\n out_n = \"This is an output file of the Hi-C processing pipeline\"\n int_n = \"This is an intermediate file in the HiC processing pipeline\"\n # int_n_rep = \"This is an intermediate file in the Repliseq processing pipeline\"\n\n wf_dict = [\n {\n 'app_name': 'md5',\n 'workflow_uuid': 'c77a117b-9a58-477e-aaa5-291a109a99f6',\n \"config\": {\n \"ebs_size\": 10,\n \"instance_type\": 't3.small',\n 'EBS_optimized': True\n }\n },\n {\n 'app_name': 'fastqc',\n 'workflow_uuid': '49e96b51-ed6c-4418-a693-d0e9f79adfa5',\n \"config\": {\n \"ebs_size\": 10,\n \"instance_type\": 't3.small',\n 'EBS_optimized': True\n }\n },\n {\n 'app_name': 'pairsqc-single',\n 'workflow_uuid': 'b8c533e0-f8c0-4510-b4a1-ac35158e27c3',\n \"config\": {\"instance_type\": 't3.small'}\n },\n {\n 'app_name': 'bwa-mem',\n 'workflow_uuid': '3feedadc-50f9-4bb4-919b-09a8b731d0cc',\n 'parameters': {\"nThreads\": 16},\n 'custom_pf_fields': {\n 'out_bam': {\n 'genome_assembly': genome,\n 'file_type': 'intermediate file',\n 'description': int_n}\n }\n },\n {\n 'app_name': 'hi-c-processing-bam',\n 'workflow_uuid': '023bfb3e-9a8b-42b9-a9d4-216079526f68',\n 'parameters': {\"nthreads_merge\": 16, \"nthreads_parse_sort\": 16},\n 'custom_pf_fields': {\n 'annotated_bam': {\n 'genome_assembly': genome,\n 'file_type': 'alignments',\n 'description': out_n},\n 'filtered_pairs': {\n 'genome_assembly': genome,\n 'file_type': 'contact list-replicate',\n 'description': out_n}\n }\n },\n {\n 'app_name': 'hi-c-processing-pairs',\n 'workflow_uuid': '4dn-dcic-lab:wf-hi-c-processing-pairs-0.2.7',\n 'parameters': {\"nthreads\": 4,\n \"maxmem\": \"32g\",\n \"max_split_cooler\": 10,\n \"no_balance\": False\n },\n 'custom_pf_fields': {\n 'hic': {\n 'genome_assembly': genome,\n 'file_type': 'contact matrix',\n 'description': out_n},\n 'mcool': {\n 'genome_assembly': genome,\n 'file_type': 'contact matrix',\n 'description': out_n},\n 'merged_pairs': {\n 'genome_assembly': genome,\n 'file_type': 'contact list-combined',\n 'description': out_n}\n }\n },\n {\n 'app_name': 'imargi-processing-fastq',\n 'workflow_uuid': '7eedaaa8-4c2e-4c71-9d9a-04f05ab1becf',\n 'config': {'mem': 8, 'cpu': 4, 'ebs_size': '12x', 'EBS_optimized': 'true'},\n 'parameters': {\"nThreads\": 4},\n 'custom_pf_fields': {\n 'out_bam': {\n 'genome_assembly': genome,\n 'file_type': 'alignments',\n 'description': \"This is an alignment file for fastq pairs from the MARGI processing pipeline\"}\n }\n },\n {\n 'app_name': 'imargi-processing-bam',\n 'workflow_uuid': '4918e659-6e6c-444f-93c4-276c0d753537',\n 'config': {'mem': 8, 'cpu': 8, 'ebs_size': '10x', 'EBS_optimized': 'true'},\n 'parameters': {\"nthreads\": 8, \"assembly\": pairs_assembly},\n 'custom_pf_fields': {\n 'out_qc': {\n 'genome_assembly': genome,\n 'file_type': 'QC',\n 'description': 'This is an output file of the MARGI processing pipeline'},\n 'out_pairs': {\n 'genome_assembly': genome,\n 'file_type': 'contact list-replicate',\n 'description': 'This is an output file of the MARGI processing pipeline'}\n }\n },\n {\n 'app_name': 'imargi-processing-pairs',\n 'workflow_uuid': 'd3e33c23-7442-4f43-8601-337d2f04980a',\n 'config': {'mem': 8, 'cpu': 4, 'ebs_size': '10x', 'EBS_optimized': 'true'},\n 'custom_pf_fields': {\n 'out_mcool': {\n 'genome_assembly': genome,\n 'file_type': 'contact matrix',\n 'description': 'This is an output file of the MARGI processing pipeline'},\n 'merged_pairs': {\n 'genome_assembly': genome,\n 'file_type': 'contact list-combined',\n 'description': 'This is an output file of the MARGI processing pipeline'}\n }\n },\n {\n 'app_name': 'repliseq-parta',\n 'workflow_uuid': '4dn-dcic-lab:wf-repliseq-parta-v16',\n \"parameters\": {\"nthreads\": 4, \"memperthread\": \"2G\"},\n 'custom_pf_fields': {\n 'filtered_sorted_deduped_bam': {\n 'genome_assembly': genome,\n 'file_type': 'alignments',\n 'description': 'This is an output file of the RepliSeq processing pipeline'},\n 'count_bg': {\n 'genome_assembly': genome,\n 'file_type': 'counts',\n 'description': 'read counts, unfiltered, unnormalized'}\n }\n },\n {\n \"app_name\": \"bedtobeddb\",\n \"workflow_uuid\": \"91049eef-d434-4e16-a1ad-06de73f079dc\",\n \"config\": {'mem': 4, 'cpu': 2, \"ebs_size\": 10},\n \"overwrite_input_extra\": True\n },\n {\n \"app_name\": \"bedtomultivec\",\n \"workflow_uuid\": \"a52b9b9d-1654-4967-883f-4d2adee77bc7\",\n 'config': {'mem': 4, 'cpu': 2, 'EBS_optimized': 'false'},\n \"overwrite_input_extra\": True\n },\n {\n \"app_name\": \"bedGraphToBigWig\",\n \"workflow_uuid\": \"68d412a1-b78e-4101-b353-2f3da6272529\",\n \"config\": {'mem': 4, 'cpu': 2, \"ebs_size\": 30},\n \"overwrite_input_extra\": True\n },\n {\n \"app_name\": \"merge-fastq\",\n \"workflow_uuid\": \"e20ef13d-64d8-4d10-94b1-ed45e7d6a7c2\",\n \"parameters\": {},\n 'custom_pf_fields': {\n 'merged_fastq': {\n 'genome_assembly': genome,\n 'file_type': 'reads-combined',\n 'description': 'Merged fastq file'\n }\n }\n },\n {\n \"app_name\": \"encode-chipseq-aln-chip\",\n \"workflow_uuid\": \"4dn-dcic-lab:wf-encode-chipseq-aln-chip\",\n \"parameters\": {},\n \"config\": {},\n 'custom_pf_fields': {\n 'chip.first_ta': {\n 'genome_assembly': genome,\n 'file_type': 'read positions',\n 'description': 'Positions of aligned reads in bed format, one line per read mate, for control experiment, from ENCODE ChIP-Seq Pipeline'\n },\n 'chip.first_ta_xcor': {\n 'genome_assembly': genome,\n 'file_type': 'intermediate file',\n 'description': 'Counts file used only for QC'\n }\n }\n },\n {\n \"app_name\": \"encode-chipseq-aln-ctl\",\n \"workflow_uuid\": \"4dn-dcic-lab:wf-encode-chipseq-aln-ctl\",\n \"parameters\": {},\n \"config\": {},\n 'custom_pf_fields': {\n 'chip.first_ta_ctl': {\n 'genome_assembly': genome,\n 'file_type': 'read positions',\n 'description': 'Positions of aligned reads in bed format, one line per read mate, for control experiment, from ENCODE ChIP-Seq Pipeline',\n 'disable_wfr_inputs': True}\n }\n },\n {\n \"app_name\": \"encode-chipseq-postaln\",\n \"workflow_uuid\": \"4dn-dcic-lab:wf-encode-chipseq-postaln\",\n \"parameters\": {},\n \"config\": {},\n 'custom_pf_fields': {\n 'chip.optimal_peak': {\n 'genome_assembly': genome,\n 'file_type': 'peaks',\n 'description': 'Peak calls from ENCODE ChIP-Seq Pipeline'},\n 'chip.conservative_peak': {\n 'genome_assembly': genome,\n 'file_type': 'conservative peaks',\n 'description': 'Conservative peak calls from ENCODE ChIP-Seq Pipeline'},\n 'chip.sig_fc': {\n 'genome_assembly': genome,\n 'file_type': 'signal fold change',\n 'description': 'ChIP-seq signal fold change over input control'}\n }\n },\n {\n \"app_name\": \"encode-atacseq-aln\",\n \"workflow_uuid\": \"4dn-dcic-lab:wf-encode-atacseq-aln\",\n \"parameters\": {},\n \"config\": {},\n 'custom_pf_fields': {\n 'atac.first_ta': {\n 'genome_assembly': genome,\n 'file_type': 'read positions',\n 'description': 'Positions of aligned reads in bed format, one line per read mate, from ENCODE ATAC-Seq Pipeline'}\n }\n },\n {\n \"app_name\": \"encode-atacseq-postaln\",\n \"workflow_uuid\": \"4dn-dcic-lab:wf-encode-atacseq-postaln\",\n \"parameters\": {},\n \"config\": {},\n 'custom_pf_fields': {\n 'atac.optimal_peak': {\n 'genome_assembly': genome,\n 'file_type': 'peaks',\n 'description': 'Peak calls from ENCODE ATAC-Seq Pipeline'},\n 'atac.conservative_peak': {\n 'genome_assembly': genome,\n 'file_type': 'conservative peaks',\n 'description': 'Conservative peak calls from ENCODE ATAC-Seq Pipeline'},\n 'atac.sig_fc': {\n 'genome_assembly': genome,\n 'file_type': 'signal fold change',\n 'description': 'ATAC-seq signal fold change'}\n }\n },\n {\n \"app_name\": \"mergebed\",\n \"workflow_uuid\": \"2b10e472-065e-43ed-992c-fccad6417b65\",\n \"parameters\": {\"sortv\": \"0\"},\n 'custom_pf_fields': {\n 'merged_bed': {\n 'genome_assembly': genome,\n 'file_type': 'read positions',\n 'description': 'Merged file, positions of aligned reads in bed format, one line per read mate'}\n }\n },\n {\n \"app_name\": \"insulation-scores-and-boundaries-caller\",\n \"workflow_uuid\": \"dc9efc2d-baa5-4304-b72b-14610d8d5fc4\",\n \"parameters\": {\"binsize\": -1, \"windowsize\": 100000},\n \"config\": {'mem': 32},\n 'custom_pf_fields': {\n 'bwfile': {\n 'genome_assembly': genome,\n 'file_type': 'insulation score-diamond',\n 'description': 'Diamond insulation scores calls on Hi-C contact matrices'},\n 'bedfile': {\n 'genome_assembly': genome,\n 'file_type': 'boundaries',\n 'description': 'Boundaries calls on Hi-C contact matrices'}\n }\n },\n {\n \"app_name\": \"compartments-caller\",\n \"workflow_uuid\": \"d07fa5d4-8721-403e-89b5-e8f323ac9ece\",\n \"parameters\": {\"binsize\": 250000, \"contact_type\": \"cis\"},\n \"config\": {'mem': 4, 'cpu': 1, 'ebs_size': '1.1x', 'EBS_optimized': 'false'},\n 'custom_pf_fields': {\n 'bwfile': {\n 'genome_assembly': genome,\n 'file_type': 'compartments',\n 'description': 'Compartments signals on Hi-C contact matrices'}\n },\n },\n {\n \"app_name\": \"rna-strandedness\",\n \"workflow_uuid\": \"af97597e-877a-40b7-b211-98ec0cfb17b4\",\n 'config': {'mem': 2, 'cpu': 2, \"instance_type\": \"t3.small\", 'ebs_size': '1.1x', 'EBS_optimized': 'false'}\n },\n # RNA SEQ\n {\n \"app_name\": \"encode-rnaseq-stranded\",\n \"workflow_uuid\": \"4dn-dcic-lab:wf-encode-rnaseq-stranded\",\n \"parameters\": {\n 'rna.strandedness': 'stranded',\n 'rna.strandedness_direction': '',\n 'rna.endedness': ''\n },\n 'custom_pf_fields': {\n 'rna.outbam': {\n 'genome_assembly': genome,\n 'file_type': 'read positions',\n 'description': 'Output file from RNA seq pipeline'\n },\n 'rna.plusbw': {\n 'genome_assembly': genome,\n 'file_type': 'read counts (plus)',\n 'description': 'Output file from RNA seq pipeline'\n },\n 'rna.minusbw': {\n 'genome_assembly': genome,\n 'file_type': 'read counts (minus)',\n 'description': 'Output file from RNA seq pipeline'\n },\n 'rna.gene_expression': {\n 'genome_assembly': genome,\n 'file_type': 'gene expression',\n 'description': 'Output file from RNA seq pipeline'\n },\n 'rna.isoform_expression': {\n 'genome_assembly': genome,\n 'file_type': 'isoform expression',\n 'description': 'Output file from RNA seq pipeline'\n }\n }\n },\n {\n \"app_name\": \"encode-rnaseq-unstranded\",\n \"workflow_uuid\": \"4dn-dcic-lab:wf-encode-rnaseq-unstranded\",\n \"parameters\": {\n 'rna.strandedness': 'unstranded',\n 'rna.strandedness_direction': 'unstranded',\n 'rna.endedness': 'paired'\n },\n 'custom_pf_fields': {\n 'rna.outbam': {\n 'genome_assembly': genome,\n 'file_type': 'read positions',\n 'description': 'Output file from RNA seq pipeline'\n },\n 'rna.outbw': {\n 'genome_assembly': genome,\n 'file_type': 'read counts',\n 'description': 'Output file from RNA seq pipeline'\n },\n 'rna.gene_expression': {\n 'genome_assembly': genome,\n 'file_type': 'gene expression',\n 'description': 'Output file from RNA seq pipeline'\n },\n 'rna.isoform_expression': {\n 'genome_assembly': genome,\n 'file_type': 'isoform expression',\n 'description': 'Output file from RNA seq pipeline'\n }\n }\n },\n {\n \"app_name\": \"bamqc\",\n \"workflow_uuid\": \"42683ab1-59bf-4ec5-a973-030053a134f1\",\n \"overwrite_input_extra\": False,\n \"config\": {\"ebs_size\": 10}\n },\n {\n \"app_name\": \"fastq-first-line\",\n \"workflow_uuid\": \"93a1a931-d55d-4623-adfb-0fa735daf6ae\",\n \"overwrite_input_extra\": False,\n 'config': {'mem': 2, 'cpu': 2, \"instance_type\": \"t3.small\"}\n },\n {\n \"app_name\": \"re_checker_workflow\",\n \"workflow_uuid\": \"8479d16e-667a-41e9-8ace-391128f50dc5\",\n \"parameters\": {},\n \"config\": {\"mem\": 4,\n \"ebs_size\": 10,\n \"instance_type\": \"t3.medium\"\n }\n },\n {\n \"app_name\": \"mad_qc_workflow\",\n \"workflow_uuid\": \"4dba38f0-af7a-4432-88e4-ca804dea64f8\",\n \"parameters\": {},\n \"config\": {\"ebs_size\": 10, \"instance_type\": \"t3.medium\"}\n },\n {\n \"app_name\": \"mcoolQC\",\n \"workflow_uuid\": \"0bf9f47a-dec1-4324-9b41-fa183880a7db\",\n \"overwrite_input_extra\": False,\n \"config\": {\"ebs_size\": 10, \"instance_type\": \"c5ad.2xlarge\"}\n },\n # temp\n {\n \"app_name\": \"\",\n \"workflow_uuid\": \"\",\n \"parameters\": {},\n 'custom_pf_fields': {\n '': {\n 'genome_assembly': genome,\n 'file_type': '',\n 'description': ''}\n }\n }]\n\n template = [i for i in wf_dict if i['app_name'] == step_name][0]\n\n update_config = {\n \"ebs_type\": \"gp2\",\n \"spot_instance\": False,\n \"ebs_iops\": \"\",\n \"log_bucket\": \"tibanna-output\",\n \"key_name\": \"4dn-encode\",\n \"public_postrun_json\": True,\n \"behavior_on_capacity_limit\": \"retry_without_spot\"\n }\n if template.get('config'):\n temp_conf = template['config']\n for a_key in update_config:\n if a_key not in temp_conf:\n temp_conf[a_key] = update_config[a_key]\n else:\n template['config'] = update_config\n\n if not template.get('parameters'):\n template['parameters'] = {}\n template['common_fields'] = attribution\n if overwrite:\n for a_key in overwrite:\n for a_spec in overwrite[a_key]:\n # if the key value is a dictionary, set default and use update\n if isinstance(overwrite[a_key][a_spec], dict):\n template[a_key].setdefault(a_spec, {}).update(overwrite[a_key][a_spec])\n # if it is string array bool, set the value\n else:\n template[a_key][a_spec] = overwrite[a_key][a_spec]\n return template\n","sub_path":"chalicelib_fourfront/checks/helpers/wfrset_utils.py","file_name":"wfrset_utils.py","file_ext":"py","file_size_in_byte":17372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"631906620","text":"# Copyright (C) 2017 Boston College\r\n# http://www.bostoncollege.edu\r\n#\r\n# BC Proprietary Information\r\n#\r\n# US Government retains Unlimited Rights\r\n# Non-Government Users – restricted usage as defined through\r\n# licensing with STR or via arrangement with Government.\r\n#\r\n# In no event shall the initial developers or copyright holders be\r\n# liable for any damages whatsoever, including - but not restricted\r\n# to - lost revenue or profits or other direct, indirect, special,\r\n# incidental or consequential damages, even if they have been\r\n# advised of the possibility of such damages, except to the extent\r\n# invariable law, if any, provides otherwise.\r\n#\r\n# The Software is provided AS IS with NO\r\n# WARRANTY OF ANY KIND, INCLUDING THE WARRANTY OF DESIGN,\r\n# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.\r\n\r\n## @package IonoModelEngine.DataControl.Stations\r\n# STATIC Ionosonde station locations; This should not change\r\n\r\nimport unittest\r\nimport Shared.Utils.HfgeoLogger as Logger\r\n\r\nlogger = Logger.getLogger()\r\n\r\n## Class for information of stations\r\n#\r\nclass Station:\r\n\r\n\t\t__slots__ = (\"name\", \"longitude\", \"latitude\")\r\n\r\n\t\tdef __init__(self, name, longitude, latitude):\r\n\t\t\t\t## Name of the station\r\n\t\t\t\tself.name = name\r\n\t\t\t\t## Longitude in degrees\r\n\t\t\t\tself.longitude = longitude\r\n\t\t\t\t## Latitude in degrees\r\n\t\t\t\tself.latitude = latitude\r\n\r\n## Get the station latlon as a tuple\r\n#\r\n# @param ursi_name - string - name of the station\r\n#\r\n# @retval station - Station - a station object\r\n#\r\ndef getStation(ursi_name):\r\n\t\tif ursi_name == 'BC840':\r\n\t\t\t\treturn Station('BC840', 40.0, 254.7)\r\n\t\telif ursi_name == 'AU930':\r\n\t\t\t\treturn Station('AU930', 30.4, 262.3)\r\n\t\telif ursi_name == 'EG931':\r\n\t\t\t\treturn Station('EG931', 30.5, 273.5)\r\n\t\telif ursi_name == 'WP937':\r\n\t\t\t\treturn Station('WP937', 37.9, 284.5)\r\n\t\telif ursi_name == 'PA836':\r\n\t\t\t\treturn Station('PA836', 34.8, 239.5)\r\n\t\telif ursi_name == 'MHJ45':\r\n\t\t\t\treturn Station('MHJ45', 42.6, 288.5)\r\n\t\telif ursi_name == 'IF843':\r\n\t\t\t\treturn Station('IF843', 43.8, 247.3)\r\n\t\telif ursi_name == 'AL945':\r\n\t\t\t\treturn Station('AL945', 45.07, 276.44)\r\n\r\n## Get the station latlon as a tuple\r\n#\r\n# @param ursi_name - string - name of the station\r\n#\r\n# @retval latlon - (number, number) - latlon of the station\r\n#\r\ndef getStationLatLon(ursi_name):\r\n\t\tif ursi_name == 'BC840':\r\n\t\t\t\treturn (40.0, 254.7)\r\n\t\telif ursi_name == 'AU930':\r\n\t\t\t\treturn (30.4, 262.3)\r\n\t\telif ursi_name == 'EG931':\r\n\t\t\t\treturn (30.5, 273.5)\r\n\t\telif ursi_name == 'WP937':\r\n\t\t\t\treturn (37.9, 284.5)\r\n\t\telif ursi_name == 'PA836':\r\n\t\t\t\treturn (34.8, 239.5)\r\n\t\telif ursi_name == 'MHJ45':\r\n\t\t\t return (42.6, 288.5)\r\n\t\telif ursi_name == 'IF843':\r\n\t\t\t return (43.8, 247.3)\r\n\t\telif ursi_name == 'AL945':\r\n\t\t\t\treturn (45.07, 276.44)\r\n\t\telse:\r\n\t\t\t\treturn (None, None)\r\n\r\nclass UnitTest_Stations(unittest.TestCase):\r\n\r\n def test(self):\r\n# stations = Station('MHJ',23,134)\r\n (lat,lon) = getStationLatLon('BC840')\r\n self.assertEqual(lat,40.0)\r\n self.assertEqual(lon,254.7)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n logger.setLevel('INFO')\r\n unittest.main()\r\n","sub_path":"call-04/Send_RIPE/IonoModelEngine/DataControl/Stations.py","file_name":"Stations.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"99694071","text":"#-*-coding:utf-8 -*-\r\n\"\"\"\r\n 447. Number of Boomerangs\r\n Directed by user zhongch4g\r\n current system date 2017/4/25\r\n\"\"\"\r\nimport itertools\r\nimport collections\r\nfrom itertools import combinations\r\nclass Solution(object):\r\n def numberOfBoomerangs(self, points):\r\n \"\"\"\r\n :type points: List[List[int]]\r\n :rtype: int\r\n \"\"\"\r\n def distance(vector1,vector2):\r\n d=0;\r\n for a,b in zip(vector1,vector2):\r\n d+=(a-b)**2;\r\n return d\r\n\r\n # 得到长度为3的不重复子序列\r\n listOfBoomerangs = list(itertools.combinations(points, 3))\r\n countBoomerangs = 0\r\n for pair1 in listOfBoomerangs:\r\n # pair1 是否能构成等腰三角形\r\n if len(set([distance(pair1[0], pair1[1]), distance(pair1[0], pair1[2]), distance(pair1[1], pair1[2])])) != 3:\r\n continue\r\n\r\n newlist = list(itertools.permutations(pair1, 3))\r\n # print newlist, \" ---- \"\r\n for pair in newlist:\r\n # print pair[0][0] - pair[1][0], pair[0][0] - pair[2][0], pair[0][1] - pair[1][1], pair[0][1] - pair[2][1]\r\n # print abs(pair[0][0] - pair[1][0]), abs(pair[0][0] - pair[2][0])\r\n # if abs(pair[0][0] - pair[1][0]) == abs(pair[0][0] - pair[2][0]) and abs(pair[0][1] - pair[1][1]) == abs(pair[0][1] - pair[2][1]):\r\n\r\n if distance(pair[0], pair[1]) == distance(pair[0], pair[2]):\r\n countBoomerangs += 1\r\n return countBoomerangs\r\n\r\n # clear version\r\n # \"For every point, there are k points with distance d, so there are k*(k-1) pairwise with distance d.\"!!!\r\n def numberOfBoomrangs1(self, points):\r\n res = 0\r\n for p in points:\r\n cmap = {}\r\n for q in points:\r\n f = p[0] - q[0]\r\n s = p[1] - q[1]\r\n cmap[f * f + s * s] = 1 + cmap.get(f * f + s * s, 0)\r\n print(cmap)\r\n for k in cmap:\r\n res += cmap[k] * (cmap[k] - 1)\r\n return res\r\n\r\n# print(list(combinations([[0,0], [1,0], [2,0], [3, 0]], 3)))\r\ninstance = Solution()\r\nprint (instance.numberOfBoomrangs1([[0,0],[2,0],[-2,0],[0,2],[0,-2]])) # expect 20\r\n\r\n# l = [1, 2, 3]\r\n# print list(combinations_with_replacement(l, 3))\r\n# print list(product(l, repeat = 2))\r\n# print list(itertools.permutations(l, 3))","sub_path":"LeetCode/447. Number of Boomerangs.py","file_name":"447. Number of Boomerangs.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"410093500","text":"import random\n\nfrom medicines.models import Medicine, Pharmacy, PharmacyMedicine\n\n\ndef insert_pharmacy_medicine():\n medicines = Medicine.objects.all()\n pharmacies = Pharmacy.objects.all()\n pharmacy_len = len(pharmacies)\n pharmacy_medicine_instances = []\n for medicine in medicines:\n price = random.randint(1, 11) * 10000\n for i in range(1, random.randint(1, pharmacy_len)):\n pharmacy = random.choice(pharmacies)\n price = price + random.randint(1, 9) * 500\n pharmacy_medicine = PharmacyMedicine(medicine=medicine,\n pharmacy=pharmacy,\n price=price)\n pharmacy_medicine_instances.append(pharmacy_medicine)\n\n PharmacyMedicine.objects.bulk_create(pharmacy_medicine_instances)\n print(\"Successfully added\")\n\n\ndef run():\n insert_pharmacy_medicine()\n","sub_path":"src/medicines/scripts/load_pharmacy_medicine.py","file_name":"load_pharmacy_medicine.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"95159761","text":"import numpy as np\n\nclass chemkin:\n def __init__(self):\n self.r = 8.314\n\n def constant(self, k):\n \"\"\"Returns the constant reaction rate coefficient.\n\n INPUTS\n =======\n k: int or float, the constant reaction rate coefficient\n\n RETURNS\n ========\n k: float, the constant reaction rate coefficient\n\n EXAMPLES\n =========\n >>> constant(10.0)\n 10.0\n \"\"\"\n try:\n k = float(k)\n except ValueError:\n return \"Error: unable to convert k to float!\"\n return k\n\n def arrhenius(self, a, e, t):\n \"\"\"Returns the Arrhenius reaction rate coefficient.\n\n INPUTS\n =======\n a: int or float, Arrhenius prefactor, A, is strictly positive\n e: int or float, the activation energy for the reaction\n t: int or float, the temperature T, must be positive (assuming a Kelvin scale)\n\n RETURNS\n ========\n k: float, the Arrhenius reaction rate coefficient\n\n EXAMPLES\n =========\n >>> arrhenius(10,10,10)\n 8.8667297841210573\n \"\"\"\n try:\n a = float(a)\n e = float(e)\n t = float(t)\n except TypeError:\n return \"Error: unable to convert all parameters to float!\"\n if a <= 0:\n raise ValueError(\"Arrhenius prefactor a is non-positive!\")\n if t <= 0:\n raise ValueError(\"Temperature t is non-positive!\")\n return a*np.exp(-e/(self.r*t))\n\n def modified(self, a, b, e, t):\n \"\"\"Returns the modified Arrhenius reaction rate coefficient.\n\n INPUTS\n =======\n a: int or float, Arrhenius prefactor, A, is strictly positive\n b: int or float, fitted rate constant\n e: int or float, the activation energy for the reaction\n t: int or float, the temperature T, must be positive (assuming a Kelvin scale)\n\n RETURNS\n ========\n k: float, the modified Arrhenius reaction rate coefficient\n\n EXAMPLES\n =========\n >>> modified(10**7,0.5,10**3,10**2)\n 30035490.889639609\n \"\"\"\n try:\n a = float(a)\n b = float(b)\n e = float(e)\n t = float(t)\n except TypeError:\n return \"Error: unable to convert all parameters to float!\"\n if a <= 0:\n raise ValueError(\"Arrhenius prefactor a is non-positive!\")\n if isinstance(b, complex):\n raise ValueError(\"b is complex number!\")\n if t <= 0:\n raise ValueError(\"Temperature t is non-positive!\")\n return a*(t**b)*np.exp(-e/(self.r*t))\n\n def reaction_rates(self, rates, T):\n \"\"\"Calculates reaction rates for a list of reactions.\n \n INPUTS\n =======\n rates: dictionary containing reaction rate type and all needed parameters\n T: int or float, environment temperature\n\n RETURNS\n =======\n k: list of floats, has length m where m is the number of reactions\n\n EXAMPLES\n >> reaction_rates([{'type': 'Arrhenius', 'A': 35200000000.0, 'E': 71400.0}, {'type': 'modifiedArrhenius', 'A': 0.0506, 'E': 26300.0, 'b': 2.7}, {'type': 'Constant', 'k': 1000.0}], 1500)\n [114837571.22536749, 2310555.9199959813, 1000.0]\n \"\"\"\n\n k = []\n for reaction in rates:\n if reaction['type'] == 'Arrhenius':\n k.append(self.arrhenius(reaction['A'], reaction['E'], T))\n elif reaction['type'] == 'modifiedArrhenius':\n k.append(self.modified(reaction['A'], reaction['b'], reaction['E'], T))\n elif reaction['type'] == 'Constant':\n k.append(self.constant(reaction['k']))\n return k\n\n def progress_u(self, k, x, v):\n \"\"\"Returns the progress rate of a single reaction.\n\n INPUTS\n =======\n k: int or float, the reaction rate coefficient\n x: list of concentration of each species\n v: list of Stoichiometric coefficients of reactants\n\n RETURNS\n ========\n progress rate: float, the progress rate of a reaction\n\n EXAMPLES\n =========\n >>> progress(10,[1,2,3],[2,1,0])\n 20\n \"\"\"\n if len(x) != len(v):\n raise ValueError(\"Length of x and v does not match!\")\n n = len(x)\n p = 1\n for i in range(n):\n p = p * (x[i]**v[i])\n return k*p\n\n def progress(self, k, x, v1):\n \"\"\"Returns the progress rate of a system of reactions.\n\n INPUTS\n =======\n k: list of reaction rate coefficients for each reaction\n x: list of concentration of each species\n v1: matrix (list of list) of Stoichiometric coefficients of reactants of each reaction\n\n RETURNS\n ========\n progress rate: list of progress rate of each reaction\n\n EXAMPLES\n =========\n >>> progress_m([10,10],[1,2,1],[[1,2,0],[2,0,2]])\n [40, 10]\n \"\"\"\n m = len(v1)\n if m != len(k):\n raise ValueError(\"Number of k does not much number of reactions!\")\n n = len(x)\n w = []\n for i in range(m):\n if len(v1[i]) != n:\n raise ValueError(\"Error in dimension of v values!\")\n w.append(self.progress_u(k[i],x,v1[i]))\n return w\n\n def reaction(self, k, x, v1, v2):\n \"\"\"Returns the reaction rate of a system of reactions for each specie.\n\n INPUTS\n =======\n k: list of reaction rate coefficients for each reaction\n x: list of concentration of each species\n v1: matrix (list of list) of Stoichiometric coefficients of reactants of each reaction\n v2: matrix (list of list) of Stoichiometric coefficients of products of each reaction\n\n RETURNS\n ========\n reaction rate: list of rate of consumption or formation of specie\n\n EXAMPLES\n =========\n >>> reaction([10,10],[1,2,1],[[1,2,0],[0,0,2]],[[0,0,1],[1,2,0]])\n [-30, -60, 20]\n \"\"\"\n w = self.progress(k,x,v1)\n f = []\n m = len(w)\n if m != len(k):\n raise ValueError(\"Number of k does not much number of reactions!\")\n n = len(x)\n for i in range(n):\n f.append(0)\n for j in range(m):\n if len(v1[j]) != n or len(v2[j]) != n:\n raise ValueError(\"Error in dimension of v values!\")\n f[i] = f[i] + ((v2[j][i]-v1[j][i])*w[j])\n return f\n\n\n def __repr__(self):\n class_name = type(self).__name__\n return \"{0} has params {1}\".format(class_name, params)\n","sub_path":"chemkin.py","file_name":"chemkin.py","file_ext":"py","file_size_in_byte":6679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"21033482","text":"import unittest\n\nfrom Products.Five import zcml\nfrom Products.Five import fiveconfigure\nfrom Products.PloneTestCase import PloneTestCase as ptc\nfrom Products.PloneTestCase.layer import PloneSite\n\nptc.setupPloneSite(products=['collective.js.fullcalendar'])\n\nfrom Products.CMFCore.utils import getToolByName\n\nimport collective.js.fullcalendar\n\n\nclass TestCase(ptc.PloneTestCase):\n class layer(PloneSite):\n @classmethod\n def setUp(cls):\n fiveconfigure.debug_mode = True\n zcml.load_config('configure.zcml',\n collective.js.fullcalendar)\n fiveconfigure.debug_mode = False\n\n @classmethod\n def tearDown(cls):\n pass\n \n def afterSetUp(self):\n self.js_res_basepath = \"++resource++collective.js.fullcalendar/\"\n self.css_res_basepath = \"++resource++collective.js.fullcalendar/\"\n self.js_files = ['fullcalendar.gcal.js',\n 'fullcalendar.min.js',\n ]\n self.css_files = ['fullcalendar.css',\n ]\n\n def test_portal_js(self):\n p_js = getToolByName(self.portal,'portal_javascripts')\n for js_name in self.js_files:\n self.failUnless(self.js_res_basepath + js_name in p_js.getResourceIds(),\n \"%s not found in portal_javascripts\" % js_name)\n \n def test_portal_css(self):\n p_css = getToolByName(self.portal,'portal_css')\n for css_name in self.css_files:\n self.failUnless(self.css_res_basepath + css_name in p_css.getResourceIds(),\n \"%s not found in portal_css\" % css_name)\n \n def test_js_resources(self):\n for js_name in self.js_files:\n try:\n self.portal.restrictedTraverse(self.js_res_basepath + js_name)\n except AttributeError:\n self.fail('%s resource not found' % js_name)\n \n\ndef test_suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(TestCase))\n return suite\n\n\nif __name__ == '__main__':\n unittest.main(defaultTest='test_suite')\n","sub_path":"buildout-cache/eggs/collective.js.fullcalendar-1.6.4-py2.7.egg/collective/js/fullcalendar/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"458912031","text":"import time\nimport cgi\nimport os\n\nfrom copy import deepcopy\n\nfrom flask import g, request, redirect\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom sqlalchemy.sql.expression import desc, or_, and_\nfrom webhelpers.paginate import Page, PageURL\n\nfrom flask import jsonify\n\nfrom traffgroup.core.X.xflask import Route, Controller, capp\nfrom traffgroup.core.X.mako import render_template\nfrom traffgroup.core.model.project import Project\nfrom traffgroup.core.model.exit_pages import ExitPages\nfrom traffgroup.x.data import XDC\nfrom traffgroup.x.security import CProtect\nfrom traffgroup.core.model.design import Design\nfrom traffgroup.core.model.site import Site\nfrom traffgroup.x.util.func import try_int\nfrom traffgroup.x.util.subrates import (set_globals,\n get_available_subrates,\n get_selected_subrates_from_req,\n get_all_pseudo_subrates,\n get_recommended_subrates)\nfrom traffgroup.core.model.category import Category\nfrom traffgroup.core.model.billing.operator import Operator\nfrom traffgroup.core.model.billing.subrate import SubRate\nfrom traffgroup.core.model.billing.contentprovider import ContentProvider\nfrom traffgroup.core.model.meta import Session\nfrom traffgroup.partners.lib.security import HasProfile\nfrom traffgroup.core.model.partners.partner import Partner\nfrom traffgroup.core.model.security.account import Account\n\n\nGALERY_DIR = '/usr/local/jenkins/jobs/mf-templates/workspace/partnership/static/thumbs/sites/galery/'\n\nPER_PAGE = 10\n\n\n@CProtect(HasProfile())\n@Controller('/members/project')\nclass ProjectController():\n @Route('/')\n def pr_list(self):\n g.state = request.values.get('state', '')\n if g.state == 'hidden':\n state = Project.State.HIDDEN\n else:\n state = Project.State.ACTIVE\n\n g.page_num = (abs(try_int(request.values.get('page'))) or 1) - 1\n g.partner = Partner.One(id=XDC.main.uid)\n\n g.projects = Project.Filter(Project.account == XDC.main.uid, state=state)\n g.cat_filter = try_int(request.cookies.get('main_sites_list_cat_filter', 0))\n if g.cat_filter:\n sites = Site.Filter(Site.category == g.cat_filter)\n designs = Design.Filter(Design.site.in_([s.id for s in sites]))\n g.projects = g.projects.filter(Project.design.in_([d.id for d in designs]))\n else:\n sites = Site.All()\n designs = Design.All()\n projects_num = g.projects.count()\n\n while g.page_num * PER_PAGE > projects_num and not g.page_num == 0:\n g.page_num -= 1\n\n g.projects = g.projects.order_by(desc(Project.ts_spawn)) \\\n .limit(PER_PAGE) \\\n .offset(g.page_num * PER_PAGE).all()\n\n g.designs = dict([(d.id, d) for d in designs])\n g.sites = dict([(s.id, s) for s in sites])\n g.categories = dict([(c.id, c) for c in Category.All()])\n g.operators = dict([(o.id, o) for o in Operator.Filter(Operator.state == Operator.State.ACTIVE).all()])\n g.subrates = dict([(s.id, s) for s in SubRate.All()])\n g.contentproviders = dict([(c.id, c) for c in ContentProvider.All()])\n\n params = {}\n\n g.paginator = Page(range(projects_num),\n items_per_page=PER_PAGE,\n page=g.page_num + 1,\n url=PageURL('/members/project', params))\n\n return render_template('members/project/list.mako')\n\n @Route('/delete/')\n def delete_project(self, prj_id):\n project = Project.One(id=prj_id)\n Session.remove(project)\n Session.flush()\n Session.commit()\n\n return redirect(\"/members/project/\")\n\n @Route('/new')\n @Route('/edit//main')\n def create_new(self, prj_id=None):\n g.partner = Partner.Get(XDC.main.uid)\n try:\n if prj_id:\n g.project = Project.One(id=prj_id, account=XDC.main.uid)\n else:\n g.project = Project()\n except:\n return redirect('/members/project/')\n\n\n allowed_categories = g.partner.categories or [-1]\n capp.logger.info(\"Allowed cats for parther [%s] : %s\", g.partner.id, allowed_categories)\n\n g.sites = Site.Filter(Site.state == Site.State.ACTIVE) \\\n .filter(Site.category.in_(allowed_categories)) \\\n .order_by(desc(Site.id)) \\\n .all()\n\n capp.logger.info(\"Sites: %s\", g.sites)\n\n designs_q = Session.query(Design) \\\n .filter(Design.site.in_([s.id for s in g.sites]))\n\n g.account = Account.One(id=XDC.main.uid)\n if g.account.type in [Account.Type.DEVELOPER]:\n designs_q = designs_q.filter(Design.state.in_([Design.State.ACTIVE,\n Design.State.UNDER_CONSTRUCTION]))\n else:\n designs_q = (designs_q\n .filter(Design.state == Design.State.ACTIVE)\n .filter(or_(Design.allowed_partners.op(\"&&\")([XDC.main.uid]),\n Design.allowed_pgroups.op(\"&&\")([g.partner.group]),\n and_(Design.allowed_partners == [],\n Design.allowed_pgroups == []))))\n\n capp.logger.info('DESIGN QUERY: [%s]', designs_q)\n\n designs_db = designs_q.order_by(desc(Design.id)).all()\n g.categories = dict([(c.id, c) for c in Category.All(state=Category.State.ACTIVE)])\n\n g.designs_all = dict([(d.id, d) for d in designs_db])\n g.designs = {}\n\n for site in g.sites:\n g.designs[site.id] = [d for d in designs_db if d.site == site.id and d]\n\n return render_template('members/project/new.site.mako')\n\n @Route('/new_rates')\n @Route('/edit//rates')\n def create_rates(self, prj_id=None):\n try:\n if prj_id:\n g.project = Project.One(id=prj_id, account=XDC.main.uid)\n else:\n g.project = Project()\n except:\n return redirect('/members/project/')\n\n g.design = (Design\n .Filter(Design.id == int(request.args['design']))\n .filter(Design.state.in_([Design.State.ACTIVE, Design.State.UNDER_CONSTRUCTION]))\n .one())\n g.site = Site.Get(g.design.site)\n g.categories = Category.All(state=Category.State.ACTIVE)\n\n set_globals()\n g.subrates, g.pseudo = get_available_subrates()\n g.recommended_subrates = get_recommended_subrates()\n\n return render_template('members/project/new.rates.mako')\n\n @Route('/new_extra')\n @Route('/edit//extra')\n def create_extra(self, prj_id=None):\n try:\n if prj_id:\n g.project = Project.One(id=prj_id, account=XDC.main.uid)\n else:\n g.project = Project()\n except NoResultFound:\n return redirect('/members/project/')\n\n g.design = (Design\n .Filter(id=int(request.args['design']))\n .filter(Design.state.in_([Design.State.ACTIVE, Design.State.UNDER_CONSTRUCTION]))\n .one())\n\n g.site = Site.Get(g.design.site)\n g.categories = Category.All(state=Category.State.ACTIVE)\n\n set_globals()\n g.subrates, g.pseudo_subrates = get_selected_subrates_from_req()\n\n # Select available combinations for pseudo rates\n g.ps_types = get_all_pseudo_subrates(g.pseudo_subrates)\n\n return render_template('members/project/new.extra.mako')\n\n @Route('/new_finish')\n @Route('/edit//finish')\n def create_finish(self, prj_id=None):\n try:\n if prj_id:\n project = Project.One(id=prj_id, account=XDC.main.uid)\n else:\n project = Project()\n except:\n return redirect('/members/project/')\n\n\n g.design = Design.Filter(Design.id == int(request.args['design'])).filter(Design.state.in_([Design.State.ACTIVE,\n Design.State.UNDER_CONSTRUCTION])) \\\n .one()\n g.site = Site.One(id=g.design.site) # need for key in dict\n\n ps_combo = map(int, request.values[\"ps_combo\"].split())\n pseudo_rates = map(int, request.values[\"pseudo_subrates\"].split())\n\n project.name = cgi.escape(request.values[\"name\"])\n project.design = g.design.id\n project.account = XDC.main.uid\n project.sub_rates = map(int, request.values[\"subrates\"].split())\n\n for i in range(len(project.sub_rates)):\n if not project.sub_rates[i]:\n project.sub_rates[i] = ps_combo[pseudo_rates[i]] # install pseudo sub rates\n\n if \"fallback_designs\" in request.args:\n project.fallback_designs = map(int, request.args.getList(\"fallback_designs\"))\n else:\n project.fallback_designs = []\n\n if not prj_id:\n project.ts_spawn = int(time.time())\n\n project.type = Project.Type.REGULAR #@ReservedAssignment\n project.state = Project.State.ACTIVE\n project.flags = Project.Flag.SHOW_EXIT_PAGE\n\n check_age = request.args.get('age')\n opts = deepcopy(project.options) if project.options else dict()\n for scheme in range(3):\n if scheme not in opts:\n opts[scheme] = {}\n\n opts[scheme].update(check18=try_int(check_age))\n\n project.options = opts\n\n if not prj_id:\n Session.add(project)\n\n Session.flush()\n Session.commit()\n\n return redirect(\"/members/project/\")\n\n @Route('/toggle/')\n def toggle(self, prj_id=None):\n if not prj_id:\n return redirect('/members/project')\n\n project = Project.One(id=prj_id)\n if project.state == Project.State.HIDDEN:\n project.state = Project.State.ACTIVE\n elif project.state == Project.State.ACTIVE:\n project.state = Project.State.HIDDEN\n try:\n exit_projects = ExitPages.Filter(ExitPages.account == XDC.main.uid).one()\n epr = []\n\n for i in exit_projects.projects:\n epr.append(i)\n\n if project.id in epr:\n epr.remove(project.id)\n\n exit_projects.projects = epr\n\n except Exception as e:\n capp.logger.info(\"Query: %s\", e.message)\n else:\n pass\n\n Session.flush()\n Session.commit()\n return redirect('/members/project')\n\n @Route('/get_thumbs', methods=['POST'])\n def list_dir(self):\n prj_id = request.values.get('prj_id', None)\n if not prj_id:\n return jsonify(error=1)\n\n dir_path = GALERY_DIR + str(prj_id)\n fl_list = []\n\n if os.path.isdir(dir_path):\n fl_list = os.listdir(dir_path)\n else:\n return jsonify(error=2)\n\n if len(fl_list) > 0:\n return jsonify(error=0, fl=fl_list, p_id=prj_id)\n else:\n return jsonify(error=3)\n\n\n @Route('/exit_page')\n def exit_page_render(self):\n #g.projects = Project.Filter(account = XDC.main.uid, state = Project.State.ACTIVE)\\\n # .order_by(desc(Project.ts_spawn)).all()\n ADULT = 1\n ENT = 2\n q = (Session.query(Project)\n .filter(Account.id == Project.account)\n .filter(Design.id == Project.design)\n .filter(Site.id == Design.site)\n .filter(Category.id == Site.category))\n\n capp.logger.info(\"Query: %s\", q)\n q = q.filter(Account.id == XDC.main.uid)\n q = q.filter(Project.state == Project.State.ACTIVE)\n q = q.filter(Category.id == ADULT)\n q = q.order_by(desc(Project.ts_spawn))\n g.projects = q.all()\n\n g.designs = Design.All()\n g.partner = Partner.One(id=XDC.main.uid)\n\n try:\n g.page = ExitPages.One(account=XDC.main.uid)\n except:\n g.page = ExitPages()\n\n return render_template('members/project/exit_page.mako')\n\n\n @Route('/toggle_exit_page', methods=['POST'])\n def toggle_exit_page(self):\n try:\n page = ExitPages.One(account=XDC.main.uid)\n\n except Exception as e:\n page = ExitPages()\n\n page.state = try_int(request.values.get('toggle_exit'))\n\n if not page.id:\n page.account = XDC.main.uid\n Session.add(page)\n\n Session.flush()\n Session.commit()\n\n return jsonify(state=page.state)\n\n\n @Route('/save_exit_page', methods=['POST'])\n def save_exit_page(self):\n try:\n projects = []\n page = ExitPages.One(account=XDC.main.uid)\n num_projects = try_int(request.values.get('num_projects'))\n\n for i in range(num_projects):\n req_id = try_int(request.values.get(str(i)))\n if req_id != 0:\n projects.append(req_id)\n\n page.projects = projects\n\n Session.flush()\n Session.commit()\n\n return jsonify(error=0)\n except Exception as e:\n return jsonify(error=10, message=e.message)\n","sub_path":"traffgroup/partners/controllers/members/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":13486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"540060690","text":"import PyPDF2\r\nimport os\r\n\r\n# PdfFileMergerクラスのオブジェクトを生成\r\n# append()メソッドでファイルを追加\r\n# write()メソッドで書き出し\r\n# close()で閉じる\r\n\r\ndef pdf_merge(root_path, merge_list, delete_file=True):\r\n for item in merge_list:\r\n merger = PyPDF2.PdfFileMerger()\r\n for pdf in merge_list[item]:\r\n merger.append(pdf)\r\n if delete_file == True:\r\n os.remove(pdf)\r\n merger.write(root_path + item + '.pdf')\r\n merger.close()\r\n","sub_path":"unite_pdf.py","file_name":"unite_pdf.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"617196104","text":"import requests\nfrom bs4 import BeautifulSoup\nimport datetime\nimport threading\nimport random\nimport sys\nimport time\nimport re\nimport json\nimport os\nimport base64\nimport cloudscraper\nimport string\nfrom urllib3.exceptions import HTTPError\nimport csv\nimport tls as client\nfrom requests_toolbelt import MultipartEncoder\n\nfrom utils.captcha import captcha\nfrom utils.logger import logger\nfrom utils.webhook import Webhook\nfrom utils.log import log\nfrom utils.functions import (loadSettings, loadProfile, loadProxy, createId, loadCookie, loadToken, sendNotification, injection,storeCookies, updateConsoleTitle, scraper)\nimport utils.config as config\n\n_SITE_ = 'TITOLO'\nSITE = 'Titolo'\nclass TITOLO:\n def success(self,message):\n logger.success(SITE,self.taskID,message)\n def error(self,message):\n logger.error(SITE,self.taskID,message)\n def prepare(self,message):\n logger.prepare(SITE,self.taskID,message)\n def warning(self,message):\n logger.warning(SITE,self.taskID,message)\n def info(self,message):\n logger.info(SITE,self.taskID,message)\n def secondary(self,message):\n logger.secondary(SITE,self.taskID,message)\n def alert(self,message):\n logger.alert(SITE,self.taskID,message)\n\n\n def task_checker(self):\n originalTask = self.task\n while True:\n with open('./{}/tasks.csv'.format(_SITE_.lower()),'r') as csvFile:\n csv_reader = csv.DictReader(csvFile)\n row = [row for idx, row in enumerate(csv_reader) if idx in (self.rowNumber,self.rowNumber)]\n self.task = row[0]\n try:\n self.task['ACCOUNT EMAIL'] = originalTask['ACCOUNT EMAIL']\n self.task['ACCOUNT PASSWORD'] = originalTask['ACCOUNT PASSWORD']\n except:\n pass\n csvFile.close()\n\n time.sleep(2)\n\n def __init__(self, task, taskName, rowNumber):\n self.task = task\n self.taskID = taskName\n self.rowNumber = rowNumber\n\n if self.rowNumber != 'qt': \n threading.Thread(target=self.task_checker,daemon=True).start()\n\n try:\n # self.session = client.Session(browser=client.Fingerprint.CHROME_83)\n self.session = scraper()\n except Exception as e:\n self.error(f'error => {e}')\n self.__init__(task,taskName,rowNumber)\n\n self.baseSite = 'https://www.titoloshop.com'\n\n try:\n split = self.task[\"PRODUCT\"].split(\"titoloshop.\")[1]\n self.reg = 'eu_en'\n except:\n self.reg = 'ch_en'\n\n\n self.webhookData = {\n \"site\":SITE,\n \"product\":\"n/a\",\n \"size\":\"n/a\",\n \"image\":\"https://i.imgur.com/VqWvzDN.png\",\n \"price\":\"0\",\n \"profile\":self.task['PROFILE'],\n \"speed\":0,\n \"url\":\"https://venetiacli.io\",\n \"paymentMethod\":\"n/a\",\n \"proxy\":\"n/a\",\n \"product_url\":self.task['PRODUCT']\n }\n\n self.session.proxies = loadProxy(self.task[\"PROXIES\"],self.taskID,SITE)\n\n self.profile = loadProfile(self.task[\"PROFILE\"])\n if self.profile == None:\n self.error(\"Profile Not found. Exiting...\")\n time.sleep(10)\n sys.exit()\n\n self.tasks()\n \n def tasks(self):\n\n self.monitor()\n self.addToCart()\n self.method()\n self.getShippingMethod()\n self.shipping()\n self.paymentMethod()\n\n if self.task['PAYMENT'].strip().lower() == \"visa\" or self.task['PAYMENT'].strip().lower() == \"mastercard\" or self.task['PAYMENT'].strip().lower() == \"card\":\n self.placeOrder_cc()\n else:\n self.placeOrder_pp()\n\n self.sendToDiscord()\n\n def monitor(self):\n while True:\n self.prepare(\"Getting Product...\")\n\n try:\n response = self.session.get(self.task[\"PRODUCT\"])\n except (Exception, ConnectionError, ConnectionRefusedError, requests.exceptions.RequestException) as e:\n log.info(e)\n self.error(f\"error: {str(e)}\")\n self.session.proxies = loadProxy(self.task[\"PROXIES\"],self.taskID,SITE)\n time.sleep(int(self.task[\"DELAY\"]))\n continue\n\n if response.status_code == 200:\n self.start = time.time()\n\n self.warning(\"Retrieved Product\")\n\n try:\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n self.webhookData['product'] = str(soup.find(\"meta\",{\"property\":\"og:image:alt\"})[\"content\"])\n self.webhookData['image'] = str(soup.find(\"meta\",{\"property\":\"og:image\"})[\"content\"])\n self.webhookData['price'] = str(soup.find(\"span\",{\"class\":\"price\"}).text)\n\n self.atcUrl = soup.find(\"form\", {\"id\": \"product_addtocart_form\"})[\"action\"].replace(',',',,')\n self.formKey = soup.find(\"input\", {\"name\": \"form_key\"})[\"value\"]\n self.productId = soup.find(\"input\", {\"name\": \"product_id\"})[\"value\"]\n self.attributeId = response.text.split('{\"attributes\":{\"')[1].split('\"')[0]\n sizeSelect = soup.find(\"div\",{\"id\":\"tab-size_eu\"})\n\n cookie_obj = requests.cookies.create_cookie(domain='.www.titoloshop.com', name='form_key', value=self.formKey)\n self.session.cookies.set_cookie(cookie_obj)\n\n allSizes = []\n sizes = []\n for s in sizeSelect:\n try:\n allSizes.append('{}:{}:{}'.format(s['option-label'],s[\"data-option-label\"], s['data-option-id']))\n sizes.append(s['option-label'])\n except:\n pass\n\n if len(sizes) == 0:\n self.error(\"No sizes available\")\n time.sleep(int(self.task[\"DELAY\"]))\n continue\n\n if self.task[\"SIZE\"].strip().lower() != \"random\":\n if self.task[\"SIZE\"] not in sizes:\n self.error(\"Size not available\")\n time.sleep(int(self.task[\"DELAY\"]))\n continue\n else:\n for size in allSizes:\n if size.split(':')[0].strip().lower() == self.task[\"SIZE\"].strip().lower():\n self.size = size.split(':')[0]\n self.sizeValue = size.split(\":\")[1]\n self.optionId = size.split(':')[2]\n \n self.warning(f\"Found Size => {self.size}\")\n \n else:\n selected = random.choice(allSizes)\n self.size = selected.split(\":\")[0]\n self.sizeValue = selected.split(\":\")[1]\n self.optionId = selected.split(\":\")[2]\n \n self.warning(f\"Found Size => {self.size}\")\n\n\n except Exception as e:\n log.info(e)\n self.error(\"Failed to parse product data (maybe OOS)\")\n time.sleep(int(self.task['DELAY']))\n continue\n \n self.webhookData['size'] = self.size\n return\n \n else:\n self.error(f\"Failed to get product [{str(response.status_code)}]. Retrying...\")\n time.sleep(int(self.task['DELAY']))\n continue\n \n def addToCart(self):\n while True:\n self.prepare(\"Adding to cart...\")\n \n boundary = ''.join(random.choices(string.ascii_uppercase + string.digits + string.ascii_lowercase, k=16))\n payload = {\n 'product': self.productId,\n 'selected_configurable_option': '',\n 'related_product': '',\n 'item': self.productId,\n 'form_key': self.formKey,\n f'super_attribute[{self.attributeId}]': self.optionId,\n 'qty': '1',\n f'formatted_size_value[{self.attributeId}]': self.sizeValue\n }\n payload_encoded = MultipartEncoder(payload, boundary=f'----WebKitFormBoundary{boundary}')\n \n\n try:\n response = self.session.post(self.atcUrl, data=payload_encoded.to_string(), headers={\n 'accept-language': 'en-US,en;q=0.9',\n 'content-type': f'multipart/form-data; boundary=----WebKitFormBoundary{boundary}',\n 'referer': self.task[\"PRODUCT\"],\n 'sec-fetch-dest': 'empty',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-site': 'same-origin',\n 'x-requested-with': 'XMLHttpRequest',\n 'accept':'application/json, text/javascript, */*; q=0.01'\n })\n except (Exception, ConnectionError, ConnectionRefusedError, requests.exceptions.RequestException) as e:\n log.info(e)\n self.error(f\"error: {str(e)}\")\n time.sleep(int(self.task[\"DELAY\"]))\n self.session.proxies = loadProxy(self.task[\"PROXIES\"],self.taskID,SITE)\n continue\n\n\n try:\n response_data = response.json()\n except Exception as e:\n log.info(e)\n self.error(\"Failed to cart [failed to parse response]. Retrying...\")\n time.sleep(int(self.task[\"DELAY\"]))\n continue\n\n if response.status_code == 200 and response_data == []:\n self.success(\"Added to cart!\")\n updateConsoleTitle(True,False,SITE)\n return\n \n else:\n self.error(f\"Failed to cart [{str(response.status_code)}]. Retrying...\")\n time.sleep(int(self.task['DELAY']))\n continue\n \n def method(self):\n while True:\n self.prepare(\"Getting basket ID\")\n \n try:\n response = self.session.get(f'{self.baseSite}/{self.reg}/checkout/',headers={\n 'referer': self.task['PRODUCT'],\n 'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'\n })\n\n except (Exception, ConnectionError, ConnectionRefusedError, requests.exceptions.RequestException) as e:\n log.info(e)\n self.error(f\"error: {str(e)}\")\n time.sleep(int(self.task[\"DELAY\"]))\n self.session.proxies = loadProxy(self.task[\"PROXIES\"],self.taskID,SITE)\n continue\n\n if response.status_code == 200:\n\n try:\n self.sessionId = response.text.split('\"quoteData\":{\"entity_id\":\"')[1].split('\"')[0]\n except Exception as e:\n log.info(e)\n self.error(\"Failed to get basket ID [failed to parse response]. Retrying...\")\n time.sleep(int(self.task[\"DELAY\"]))\n continue\n\n self.warning(\"Got basket ID\")\n return\n else:\n self.error(f\"Failed to get basket ID [{str(response.status_code)}]. Retrying...\")\n time.sleep(int(self.task['DELAY']))\n continue\n\n def getShippingMethod(self):\n while True:\n self.prepare(\"Getting shipping method\")\n\n try:\n payload = {\n \"address\": {\n \"street\": [\"{} {}\".format(self.profile[\"addressOne\"], self.profile[\"addressTwo\"]), self.profile[\"house\"]],\n \"city\": self.profile[\"city\"],\n \"country_id\": self.profile[\"countryCode\"],\n \"postcode\": self.profile[\"zip\"],\n \"firstname\": self.profile[\"firstName\"],\n \"lastname\": self.profile[\"lastName\"],\n \"telephone\": self.profile[\"phone\"]\n }\n }\n except Exception:\n self.error(f\"Failed to get shipping method [Failed to construct payload]. Retrying...\")\n time.sleep(int(self.task['DELAY']))\n continue\n \n try:\n response = self.session.post(f'{self.baseSite}/{self.reg}/rest/{self.reg}/V1/guest-carts/{self.sessionId}/estimate-shipping-methods',\n json=payload,headers={\n \"accept\": \"*/*\",\n \"accept-language\": \"en-US,en;q=0.9\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"content-type\": \"application/json\",\n \"referrer\": f\"{self.baseSite}/{self.reg}\",\n \"x-requested-with\": \"XMLHttpRequest\"\n })\n\n except (Exception, ConnectionError, ConnectionRefusedError, requests.exceptions.RequestException) as e:\n log.info(e)\n self.error(f\"error: {str(e)}\")\n time.sleep(int(self.task[\"DELAY\"]))\n self.session.proxies = loadProxy(self.task[\"PROXIES\"],self.taskID,SITE)\n continue\n\n if response.status_code == 200:\n try:\n responseJson = response.json()\n except Exception as e:\n log.info(e)\n self.error(\"Failed to get shipping method [failed to parse response]. Retrying...\")\n time.sleep(int(self.task[\"DELAY\"]))\n continue\n\n if len(responseJson) > 0:\n self.shippingMethod = responseJson[0]\n self.warning(\"Got shipping method\")\n return\n else:\n self.error(f\"Failed to get shipping method [empty response]. Retrying...\")\n time.sleep(int(self.task['DELAY']))\n continue\n else:\n self.error(f\"Failed to get shipping method [{str(response.status_code)}]. Retrying...\")\n time.sleep(int(self.task['DELAY']))\n continue\n\n def shipping(self):\n while True:\n self.prepare(\"Submitting shipping...\")\n\n try:\n payload = {\n \"addressInformation\": {\n \"shipping_address\": {\n \"countryId\": self.profile[\"countryCode\"],\n \"street\": [\"{} {}\".format(self.profile[\"addressOne\"], self.profile[\"addressTwo\"]), self.profile[\"house\"]],\n \"telephone\": self.profile[\"phone\"],\n \"postcode\": self.profile[\"zip\"],\n \"city\": self.profile[\"city\"],\n \"firstname\": self.profile[\"firstName\"],\n \"lastname\": self.profile[\"lastName\"]\n },\n \"billing_address\": {\n \"countryId\": self.profile[\"countryCode\"],\n \"street\": [\"{} {}\".format(self.profile[\"addressOne\"], self.profile[\"addressTwo\"]), self.profile[\"house\"]],\n \"telephone\": self.profile[\"phone\"],\n \"postcode\": self.profile[\"zip\"],\n \"city\": self.profile[\"city\"],\n \"firstname\": self.profile[\"firstName\"],\n \"lastname\": self.profile[\"lastName\"],\n \"saveInAddressBook\": None\n },\n \"shipping_method_code\": self.shippingMethod['method_code'],\n \"shipping_carrier_code\": self.shippingMethod['carrier_code'],\n \"extension_attributes\": {}\n }\n }\n except Exception as e:\n self.error(f\"Failed to construct shipping form ({e}). Retrying...\")\n time.sleep(int(self.task['DELAY']))\n continue\n \n try:\n response = self.session.post(f'{self.baseSite}/{self.reg}/rest/{self.reg}/V1/guest-carts/{self.sessionId}/shipping-information',\n json=payload, headers={\n \"accept\": \"*/*\",\n \"accept-language\": \"en-US,en;q=0.9\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"content-type\": \"application/json\",\n \"referrer\": f\"{self.baseSite}/{self.reg}\",\n \"x-requested-with\": \"XMLHttpRequest\"\n })\n\n except (Exception, ConnectionError, ConnectionRefusedError, requests.exceptions.RequestException) as e:\n log.info(e)\n self.error(f\"error: {str(e)}\")\n time.sleep(int(self.task[\"DELAY\"]))\n self.session.proxies = loadProxy(self.task[\"PROXIES\"],self.taskID,SITE)\n continue\n\n \n if response.status_code == 200:\n self.warning(\"Successfully set shipping\")\n return\n else:\n self.error(f\"Failed to set shipping [{str(response.status_code)}]. Retrying...\")\n time.sleep(int(self.task['DELAY']))\n continue\n\n def paymentMethod(self):\n while True:\n self.prepare(\"Setting payment method...\")\n\n\n\n try:\n paymentM = \"datatranscw_paypal\"\n if self.task['PAYMENT'].strip().lower() == \"visa\" or self.task['PAYMENT'].strip().lower() == \"mastercard\" or self.task['PAYMENT'].strip().lower() == \"card\": paymentM = \"datatranscw_creditcard\"\n\n payload = {\n \"cartId\": self.sessionId,\n \"billingAddress\": {\n \"countryId\": self.profile[\"countryCode\"],\n \"street\": [\"{} {}\".format(self.profile[\"addressOne\"], self.profile[\"addressTwo\"]), self.profile[\"house\"]],\n \"telephone\": self.profile[\"phone\"],\n \"postcode\": self.profile[\"zip\"],\n \"city\": self.profile[\"city\"],\n \"firstname\": self.profile[\"firstName\"],\n \"lastname\": self.profile[\"lastName\"],\n \"saveInAddressBook\": None\n },\n \"paymentMethod\": {\n \"method\": paymentM,\n \"po_number\": None,\n \"additional_data\": {}\n },\n \"email\": self.profile['email']\n }\n except Exception as e:\n self.error(f\"Failed to construct payment form ({e}). Retrying...\")\n time.sleep(int(self.task['DELAY']))\n continue\n \n try:\n response = self.session.post(f'{self.baseSite}/{self.reg}/rest/{self.reg}/V1/guest-carts/{self.sessionId}/payment-information',\n json=payload, headers={\n \"accept\": \"*/*\",\n \"accept-language\": \"en-US,en;q=0.9\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"content-type\": \"application/json\",\n \"referrer\": f\"{self.baseSite}/{self.reg}\",\n \"x-requested-with\": \"XMLHttpRequest\"\n })\n\n except (Exception, ConnectionError, ConnectionRefusedError, requests.exceptions.RequestException) as e:\n log.info(e)\n self.error(f\"error: {str(e)}\")\n time.sleep(int(self.task[\"DELAY\"]))\n self.session.proxies = loadProxy(self.task[\"PROXIES\"],self.taskID,SITE)\n continue\n\n if response.status_code == 200:\n\n try:\n self.orderId = response.json()\n except Exception:\n self.error(f'Failed to set payment method [failed to parse response]. Retrying...')\n time.sleep(int(self.task[\"DELAY\"]))\n self.payment_method()\n\n self.warning(\"Set payment method\")\n \n return\n else:\n self.error(f\"Failed to set payment method [{str(response.status_code)}]. Retrying...\")\n time.sleep(int(self.task['DELAY']))\n continue\n \n def placeOrder_pp(self):\n while True:\n self.prepare(\"Getting paypal checkout...\")\n\n try:\n payload = {\n 'orderId': self.orderId\n }\n except Exception:\n self.error(f\"Failed to construct checkout form. Retrying...\")\n time.sleep(int(self.task['DELAY']))\n continue\n \n try:\n response = self.session.post(f'{self.baseSite}/{self.reg}/rest/{self.reg}/V1/guest-carts/{self.sessionId}/datatranscw/checkout/authorize',\n json=payload,headers={\n \"accept\": \"*/*\",\n \"accept-language\": \"en-US,en;q=0.9\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"content-type\": \"application/json\",\n \"referrer\": f\"{self.baseSite}/{self.reg}\",\n \"x-requested-with\": \"XMLHttpRequest\"\n })\n\n except (Exception, ConnectionError, ConnectionRefusedError, requests.exceptions.RequestException) as e:\n log.info(e)\n self.error(f\"error: {str(e)}\")\n time.sleep(int(self.task[\"DELAY\"]))\n self.session.proxies = loadProxy(self.task[\"PROXIES\"],self.taskID,SITE)\n continue\n\n if response.status_code == 200:\n try:\n responseJson = response.json()\n params = {\n \"uppModuleName\": \"Customweb Magento\",\n \"uppModuleVersion\": responseJson['java_script_callback_function'].split('\\\"data-upp-module-version\\\", \\\"')[1].split('\\\")')[0],\n \"merchantId\": responseJson['java_script_callback_function'].split('\\\"data-merchant-id\\\", \\\"')[1].split('\\\")')[0],\n \"amount\": responseJson['java_script_callback_function'].split('\\\"data-amount\\\", \\\"')[1].split('\\\")')[0],\n \"currency\": responseJson['java_script_callback_function'].split('\\\"data-currency\\\", \\\"')[1].split('\\\")')[0],\n \"refno\": responseJson['java_script_callback_function'].split('\\\"data-refno\\\", \\\"')[1].split('\\\")')[0],\n \"successUrl\": responseJson['java_script_callback_function'].split('\\\"data-success-url\\\", \\\"')[1].split('\\\")')[0],\n \"errorUrl\": responseJson['java_script_callback_function'].split('\\\"data-error-url\\\", \\\"')[1].split('\\\")')[0],\n \"cancelUrl\": responseJson['java_script_callback_function'].split('\\\"data-cancel-url\\\", \\\"')[1].split('\\\")')[0],\n \"uppReturnMaskedCC\": responseJson['java_script_callback_function'].split('\\\"data-upp-return-masked-c-c\\\", \\\"')[1].split('\\\")')[0],\n \"language\": responseJson['java_script_callback_function'].split('\\\"data-language\\\", \\\"')[1].split('\\\")')[0],\n \"reqtype\": responseJson['java_script_callback_function'].split('\\\"data-reqtype\\\", \\\"')[1].split('\\\")')[0],\n \"uppCustomerName\": responseJson['java_script_callback_function'].split('\\\"data-upp-customer-name\\\", \\\"')[1].split('\\\")')[0],\n \"uppCustomerFirstName\": responseJson['java_script_callback_function'].split('\\\"data-upp-customer-first-name\\\", \\\"')[1].split('\\\")')[0],\n \"uppCustomerLastName\": responseJson['java_script_callback_function'].split('\\\"data-upp-customer-last-name\\\", \\\"')[1].split('\\\")')[0],\n \"uppCustomerStreet\": responseJson['java_script_callback_function'].split('\\\"data-upp-customer-street\\\", \\\"')[1].split('\\\")')[0],\n \"uppCustomerCity\": responseJson['java_script_callback_function'].split('\\\"data-upp-customer-city\\\", \\\"')[1].split('\\\")')[0],\n \"uppCustomerCountry\": responseJson['java_script_callback_function'].split('\\\"data-upp-customer-country\\\", \\\"')[1].split('\\\")')[0],\n \"uppCustomerZipCode\": responseJson['java_script_callback_function'].split('\\\"data-upp-customer-zip-code\\\", \\\"')[1].split('\\\")')[0],\n \"uppCustomerEmail\": responseJson['java_script_callback_function'].split('\\\"data-upp-customer-email\\\", \\\"')[1].split('\\\")')[0],\n \"uppCustomerDetails\": responseJson['java_script_callback_function'].split('\\\"data-upp-customer-details\\\", \\\"')[1].split('\\\")')[0],\n \"paymentmethod\": responseJson['java_script_callback_function'].split('\\\"data-paymentmethod\\\", \\\"')[1].split('\\\")')[0],\n \"L_AMT0\": responseJson['java_script_callback_function'].split('\\\"data--l_-a-m-t0\\\", \\\"')[1].split('\\\")')[0],\n \"L_TAXAMT0\": responseJson['java_script_callback_function'].split('\\\"data--l_-t-a-x-a-m-t0\\\", \\\"')[1].split('\\\")')[0],\n \"L_NAME0\": responseJson['java_script_callback_function'].split('\\\"data--l_-n-a-m-e0\\\", \\\"')[1].split('\\\")')[0],\n \"L_Number0\": responseJson['java_script_callback_function'].split('\\\"data--l_-number0\\\", \\\"')[1].split('\\\")')[0],\n \"L_Desc0\": responseJson['java_script_callback_function'].split('\\\"data--l_-desc0\\\", \\\"')[1].split('\\\")')[0],\n \"SHIPPINGAMT\": responseJson['java_script_callback_function'].split('\\\"data--s-h-i-p-p-i-n-g-a-m-t\\\", \\\"')[1].split('\\\")')[0],\n \"ITEMAMT\": responseJson['java_script_callback_function'].split('\\\"data--i-t-e-m-a-m-t\\\", \\\"')[1].split('\\\")')[0],\n \"TAXAMT\": responseJson['java_script_callback_function'].split('\\\"data--t-a-x-a-m-t\\\", \\\"')[1].split('\\\")')[0],\n \"cwDataTransId\": responseJson['java_script_callback_function'].split('\\\"data-cw-data-trans-id\\\", \\\"')[1].split('\\\")')[0],\n \"theme\": responseJson['java_script_callback_function'].split('\\\"data-theme\\\", \\\"')[1].split('\\\")')[0],\n \"sign\": responseJson['java_script_callback_function'].split('\\\"data-sign\\\", \\\"')[1].split('\\\")')[0],\n \"version\": \"2.0.0\"\n }\n except Exception:\n self.error(f\"Failed to get paypal checkout [failed to parse response]. Retrying...\")\n time.sleep(int(self.task['DELAY']))\n continue\n \n try:\n response2 = self.session.get('https://pay.datatrans.com/upp/jsp/upStart.jsp',params=params,headers={\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"en-US,en;q=0.9\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n })\n\n except (Exception, ConnectionError, ConnectionRefusedError, requests.exceptions.RequestException) as e:\n log.info(e)\n self.error(f\"error: {str(e)}\")\n time.sleep(int(self.task[\"DELAY\"]))\n self.session.proxies = loadProxy(self.task[\"PROXIES\"],self.taskID,SITE)\n continue\n\n if response2.status_code == 200:\n try:\n trx = response2.text.split('name=\"datatransTrxId\" value=\"')[1].split('\"')[0]\n payload = {\n \"datatransTrxId\": trx,\n \"hiddenFrame\": False,\n \"uppScreenWidth\": 999,\n \"iframed\": \"\",\n \"browserUserAgent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36\",\n \"browserJavaEnabled\": False,\n \"browserLanguage\": \"en-US\",\n \"browserColorDepth\": 24,\n \"browserScreenHeight\": 1440,\n \"browserScreenWidth\": 2560,\n \"browserTZ\": 0\n }\n except Exception:\n self.error(f\"Failed to get paypal checkout [failed to parse response]. Retrying...\")\n time.sleep(int(self.task['DELAY']))\n continue\n\n try:\n response3 = self.session.post('https://pay.datatrans.com/upp/jsp/upStart_1.jsp',data=payload,headers={\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"en-US,en;q=0.9\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Referer\":response2.url,\n \"Host\": \"pay.datatrans.com\",\n \"Origin\": \"https://pay.datatrans.com\"\n })\n\n except (Exception, ConnectionError, ConnectionRefusedError, requests.exceptions.RequestException) as e:\n log.info(e)\n self.error(f\"error: {str(e)}\")\n time.sleep(int(self.task[\"DELAY\"]))\n self.session.proxies = loadProxy(self.task[\"PROXIES\"],self.taskID,SITE)\n continue\n\n if response3.status_code == 200:\n\n try:\n ec = response3.text.split(\"name='token' value='\")[1].split(\"'\")[0]\n ppurl = f'https://www.paypal.com/cgi-bin/webscr?cmd=_express-checkout&token={ec}&useraction=commit'\n except Exception:\n self.error(f\"Failed to get paypal checkout [failed to parse response]. Retrying...\")\n time.sleep(int(self.task['DELAY']))\n continue\n\n self.end = time.time() - self.start\n self.webhookData['speed'] = self.end\n\n self.success(\"Got paypal checkout!\")\n updateConsoleTitle(False,True,SITE)\n\n self.webhookData['url'] = storeCookies(\n ppurl,self.session,\n self.webhookData['product'],\n self.webhookData['image'],\n self.webhookData['price'],\n False\n )\n return\n \n else:\n self.error(f\"Failed to get paypal checkout [{str(response.status_code)}]. Retrying...\")\n time.sleep(int(self.task['DELAY']))\n continue\n\n \n else:\n self.error(f\"Failed to get paypal checkout [{str(response.status_code)}]. Retrying...\")\n time.sleep(int(self.task['DELAY']))\n continue\n\n \n def placeOrder_cc(self):\n while True:\n self.prepare(\"Getting card checkout...\")\n\n try:\n val = \"\"\n if self.task['PAYMENT'].strip().lower() == \"visa\": val = \"VIS\"\n else: val = \"ECA\"\n\n payload = {\n 'orderId': self.orderId,\n \"formValues\":[{\"key\":\"pmethod\",\"value\":val}]\n }\n except Exception:\n self.error(f\"Failed to construct checkout form. Retrying...\")\n time.sleep(int(self.task['DELAY']))\n continue\n \n try:\n response = self.session.post(f'{self.baseSite}/{self.reg}/rest/{self.reg}/V1/guest-carts/{self.sessionId}/datatranscw/checkout/authorize',\n json=payload,headers={\n \"accept\": \"*/*\",\n \"accept-language\": \"en-US,en;q=0.9\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"content-type\": \"application/json\",\n \"referrer\": f\"{self.baseSite}/{self.reg}\",\n \"x-requested-with\": \"XMLHttpRequest\"\n })\n\n except (Exception, ConnectionError, ConnectionRefusedError, requests.exceptions.RequestException) as e:\n log.info(e)\n self.error(f\"error: {str(e)}\")\n time.sleep(int(self.task[\"DELAY\"]))\n self.session.proxies = loadProxy(self.task[\"PROXIES\"],self.taskID,SITE)\n continue\n\n if response.status_code == 200:\n\n try:\n responseJson = response.json()\n params = {\n \"uppModuleName\": responseJson['hidden_form_fields'][0]['value'],\n \"uppModuleVersion\": responseJson['hidden_form_fields'][1]['value'],\n \"merchantId\": responseJson['hidden_form_fields'][2]['value'],\n \"amount\": responseJson['hidden_form_fields'][3]['value'],\n \"currency\": responseJson['hidden_form_fields'][4]['value'],\n \"refno\": responseJson['hidden_form_fields'][5]['value'],\n \"successUrl\": responseJson['hidden_form_fields'][6]['value'],\n \"errorUrl\": responseJson['hidden_form_fields'][7]['value'],\n \"cancelUrl\": responseJson['hidden_form_fields'][8]['value'],\n \"uppReturnMaskedCC\": responseJson['hidden_form_fields'][9]['value'],\n \"language\": responseJson['hidden_form_fields'][10]['value'],\n \"reqtype\": responseJson['hidden_form_fields'][11]['value'],\n \"uppCustomerName\": responseJson['hidden_form_fields'][12]['value'],\n \"uppCustomerFirstName\": responseJson['hidden_form_fields'][13]['value'],\n \"uppCustomerLastName\": responseJson['hidden_form_fields'][14]['value'],\n \"uppCustomerStreet\": responseJson['hidden_form_fields'][15]['value'],\n \"uppCustomerCity\": responseJson['hidden_form_fields'][16]['value'],\n \"uppCustomerCountry\": responseJson['hidden_form_fields'][17]['value'],\n \"uppCustomerZipCode\": responseJson['hidden_form_fields'][18]['value'],\n \"uppCustomerEmail\": responseJson['hidden_form_fields'][19]['value'],\n \"uppCustomerDetails\": responseJson['hidden_form_fields'][20]['value'],\n \"paymentmethod\": responseJson['hidden_form_fields'][21]['value'],\n \"cwDataTransId\": responseJson['hidden_form_fields'][22]['value'],\n \"theme\":responseJson['hidden_form_fields'][23]['value'],\n \"sign\": responseJson['hidden_form_fields'][24]['value'],\n \"version\": \"2.0.0\"\n }\n except Exception:\n self.error(f\"Failed to get card checkout [failed to parse response]. Retrying...\")\n time.sleep(int(self.task['DELAY']))\n continue\n \n try:\n response2 = self.session.get('https://pay.datatrans.com/upp/jsp/upStart.jsp',params=params,headers={\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"en-US,en;q=0.9\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n })\n\n except (Exception, ConnectionError, ConnectionRefusedError, requests.exceptions.RequestException) as e:\n log.info(e)\n self.error(f\"error: {str(e)}\")\n time.sleep(int(self.task[\"DELAY\"]))\n self.session.proxies = loadProxy(self.task[\"PROXIES\"],self.taskID,SITE)\n continue\n\n if response2.status_code == 200:\n\n self.end = time.time() - self.start\n self.webhookData['speed'] = self.end\n\n self.success(\"Got card checkout!\")\n updateConsoleTitle(False,True,SITE)\n\n self.webhookData['url'] = storeCookies(\n response2.url,self.session,\n self.webhookData['product'],\n self.webhookData['image'],\n self.webhookData['price'],\n False\n )\n return\n \n else:\n self.error(f\"Failed to get card checkout [{str(response.status_code)}]. Retrying...\")\n time.sleep(int(self.task['DELAY']))\n continue\n\n \n else:\n self.error(f\"Failed to get card checkout [{str(response.status_code)}]. Retrying...\")\n time.sleep(int(self.task['DELAY']))\n continue\n \n def sendToDiscord(self):\n while True:\n \n self.webhookData['proxy'] = self.session.proxies\n\n sendNotification(SITE,self.webhookData['product'])\n\n try:\n Webhook.success(\n webhook=loadSettings()[\"webhook\"],\n site=SITE,\n url=self.webhookData['url'],\n image=self.webhookData['image'],\n title=self.webhookData['product'],\n size=self.webhookData['size'],\n price=self.webhookData['price'],\n paymentMethod=self.task['PAYMENT'].strip().title(),\n product=self.webhookData['product_url'],\n profile=self.task[\"PROFILE\"],\n proxy=self.webhookData['proxy'],\n speed=self.webhookData['speed']\n )\n self.secondary(\"Sent to discord!\")\n while True:\n pass\n except:\n self.alert(\"Failed to send webhook. Checkout here ==> {}\".format(self.webhookData['url']))\n while True:\n pass","sub_path":"venetia-build/sites/titolo.py","file_name":"titolo.py","file_ext":"py","file_size_in_byte":39348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"195477860","text":"#!/usr/bin/env python\n\nimport rospy\nimport rosservice\nimport sys\nimport moveit_commander\nimport moveit_msgs.msg\nimport geometry_msgs.msg\nimport numpy as np\nimport tf\nfrom gpd_ros import msg\n\n\nclass gpd():\n def __init__(self):\n self.moveit_setup()\n rospy.Subscriber(\"/detect_grasps/clustered_grasps\", \n\t\t\t msg.GraspConfigList, \n\t\t self.cb, \n\t\t queue_size=10)\n self.flag = True\n print(\"End-Effector Link: \", self.group.get_end_effector_link())\n self.listener = tf.TransformListener()\n print(\"Init Complete\")\n \n def tf_calc(self):\n # Put tf/autolab stuff here\n (tr_cam_base, rot_cam_base) = self.listener.lookupTransform('/panda_link0',\n '/color',\n rospy.Time(0))\n \n # transform from camera to robot base\n pose_cam_robot = geometry_msgs.msg.Pose()\n pose_cam_robot.position.x = tr_cam_base[0]\n pose_cam_robot.position.y = tr_cam_base[1]\n pose_cam_robot.position.z = tr_cam_base[2]\n pose_cam_robot.orientation.x = rot_cam_base[0]\n pose_cam_robot.orientation.y = rot_cam_base[1]\n pose_cam_robot.orientation.z = rot_cam_base[2]\n pose_cam_robot.orientation.w = rot_cam_base[3]\n \n # transform from approach grasp frame to camera\n pose_approach_cam = geometry_msgs.msg.Pose()\n pose_approach_cam.position.x = self.position[0]\n pose_approach_cam.position.y = self.position[1]\n pose_approach_cam.position.z = self.position[2]\n pose_approach_cam.orientation.x = self.quat[0]\n pose_approach_cam.orientation.y = self.quat[1]\n pose_approach_cam.orientation.z = self.quat[2]\n pose_approach_cam.orientation.w = self.quat[3]\n \n # transform from grasp frame to camera\n pose_grasp_cam = geometry_msgs.msg.Pose()\n pose_grasp_cam.position.x = self.grasp_pos[0]\n pose_grasp_cam.position.y = self.grasp_pos[1]\n pose_grasp_cam.position.z = self.grasp_pos[2]\n pose_grasp_cam.orientation.x = self.quat[0]\n pose_grasp_cam.orientation.y = self.quat[1]\n pose_grasp_cam.orientation.z = self.quat[2]\n pose_grasp_cam.orientation.w = self.quat[3]\n \n # transform from end-effector link8 to grasp frame\n pose_eef_grasp = geometry_msgs.msg.Pose()\n pose_eef_grasp.position.x = self.position[0]\n pose_eef_grasp.position.y = self.position[1]\n pose_eef_grasp.position.z = self.position[2]\n pose_eef_grasp.orientation.x = self.quat[0]\n pose_eef_grasp.orientation.y = self.quat[1]\n pose_eef_grasp.orientation.z = self.quat[2]\n pose_eef_grasp.orientation.w = self.quat[3]\n\n def cb(self, grasp_msg):\n if self.flag == True:\n pos = grasp_msg.grasps[0].position\n app = grasp_msg.grasps[0].approach\n binorm = grasp_msg.grasps[0].binormal\n axis = grasp_msg.grasps[0].axis\n self.width = np.float(grasp_msg.grasps[0].width.data)\n offset = 0.05\n self.position = np.array([pos.x - offset, pos.y , (pos.z -0.060)])\n self.grasp_pos = np.array([pos.x - offset, pos.y , pos.z])\n self.orientation = np.array([[app.x,binorm.x,axis.x,0],\n\t\t\t\t\t [app.y,binorm.y,axis.y,0],\n\t\t\t\t\t [app.z,binorm.z,axis.z,0],\n [0 ,0 ,0 ,1]])\n \n pre_orientation = self.orientation\n post_orientation = self.orientation\n \n pre_orientation[:,3] = [self.position[0], self.position[1], self.position[2], 1]\n post_orientation[:,3] = [self.grasp_pos[0], self.grasp_pos[1], self.grasp_pos[2], 1]\n \n self.pos_offset_pre_grasp = [0, -0.016, 0.18, 1]\n self.pos_offset_grasp = [0, -0.016, 0.08, 1]\n \n # NEW POSITION VALUES\n self.adjusted_pre_grasp_pos = np.inner(self.pos_offset_pre_grasp, pre_orientation)\n # print(self.adjusted_pre_grasp_pos)\n self.adjusted_pre_grasp_pos = self.adjusted_pre_grasp_pos[:3]\n self.adjusted_grasp_pos = np.inner(self.pos_offset_grasp, post_orientation)\n self.adjusted_grasp_pos = self.adjusted_grasp_pos[:3]\n \n quat_offset = tf.transformations.quaternion_from_euler(45, 90, 0)\n \n # KEEP SAME ORIENTATION VALUES\n # self.quat = tf.transformations.quaternion_from_matrix(self.orientation)\n \n quat_adjust = tf.transformations.quaternion_from_matrix(self.orientation)\n self.quat = tf.transformations.quaternion_multiply(quat_adjust, quat_offset)\n print(\"Pos\", self.position)\n # print(\"Adjusted pre grasp pos\", self.adjusted_pre_grasp_pos)\n # print(\"Adjusted grasp pos\", self.adjusted_grasp_pos)\n print(\"Quat\", self.quat)\n self.flag = False\n\n def move_robot(self, goal_pose):\n print(\"Moving...\")\n self.group.set_pose_target(goal_pose)\n self.group.go(wait=True)\n self.group.stop()\n self.group.clear_pose_targets()\n \n def close_gripper(self):\n self.group_h.set_joint_value_target([0.0, 0.0])\n self.group_h.go(wait=True)\n self.group_h.stop()\n self.group_h.clear_pose_targets()\n\n def open_gripper(self):\n w = self.width/2.0\n goal = min(0.04, w+0.02)\n self.group_h.set_joint_value_target([goal, goal])\n self.group_h.go(wait=True)\n self.group_h.stop()\n self.group_h.clear_pose_targets()\n\n def moveit_setup(self): \n moveit_commander.roscpp_initialize(sys.argv)\n robot = moveit_commander.RobotCommander()\n scene = moveit_commander.PlanningSceneInterface()\n self.group = moveit_commander.MoveGroupCommander(\"panda_arm\")\n self.group_h = moveit_commander.MoveGroupCommander(\"hand\")\n self.group.set_end_effector_link(\"panda_hand\")\n display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',\n moveit_msgs.msg.DisplayTrajectory, queue_size=20)\n planning_frame = self.group.get_planning_frame() \n\n def execute(self):\n print(\"Opening Gripper\")\n self.open_gripper()\n \n print(\"Moving Robot to Pre Grasp\")\n pre_grasp = geometry_msgs.msg.PoseStamped()\n pre_grasp.header.frame_id = 'color'\n pre_grasp.pose.position.x = self.grasp_pos[0]\n pre_grasp.pose.position.y = self.grasp_pos[1]\n pre_grasp.pose.position.z = self.grasp_pos[2]\n pre_grasp.pose.orientation.x = self.quat[0]\n pre_grasp.pose.orientation.y = self.quat[1]\n pre_grasp.pose.orientation.z = self.quat[2]\n pre_grasp.pose.orientation.w = self.quat[3]\n \n pre_grasp1 = geometry_msgs.msg.PoseStamped()\n pre_grasp1 = self.listener.transformPose('/panda_link0', pre_grasp)\n pre_grasp_adjust = self.listener.transformPose('/panda_link0', pre_grasp)\n pre_grasp_adjust.pose.position.x += self.pos_offset_pre_grasp[0]\n pre_grasp_adjust.pose.position.y += self.pos_offset_pre_grasp[1]\n pre_grasp_adjust.pose.position.z += self.pos_offset_pre_grasp[2]\n \n # print(\"Pre-Grasp:\", pre_grasp)\n # print(\"Pre-Grasp1:\", pre_grasp1)\n # print(\"Pre-Grasp Adjusted:\", pre_grasp_adjust)\n\n self.move_robot(pre_grasp_adjust)\n \n print(\"Moving Robot to Grasp\")\n grasp = geometry_msgs.msg.PoseStamped()\n grasp.header.frame_id = 'color'\n grasp.pose.position.x = self.grasp_pos[0]\n grasp.pose.position.y = self.grasp_pos[1]\n grasp.pose.position.z = self.grasp_pos[2]\n grasp.pose.orientation.x = self.quat[0]\n grasp.pose.orientation.y = self.quat[1]\n grasp.pose.orientation.z = self.quat[2]\n grasp.pose.orientation.w = self.quat[3]\n \n grasp1 = geometry_msgs.msg.PoseStamped()\n grasp1 = self.listener.transformPose('/panda_link0', grasp)\n grasp_adjust = self.listener.transformPose('/panda_link0', grasp)\n grasp_adjust.pose.position.x += self.pos_offset_grasp[0]\n grasp_adjust.pose.position.y += self.pos_offset_grasp[1]\n grasp_adjust.pose.position.z += self.pos_offset_grasp[2]\n\n self.move_robot(grasp_adjust)\n \n print(\"Closing Gripper\")\n self.close_gripper()\n \n print(\"Returning to Ready Position\")\n self.ready_joint_pose()\n \n def ready_joint_pose(self):\n joint1 = [0.000,0.200,-0.000,-1.5963787922313974,-0.000,1.9,0.785]\n joint_goal = self.group.get_current_joint_values()\n joint_goal[0] = joint1[0]\n joint_goal[1] = joint1[1]\n joint_goal[2] = joint1[2]\n joint_goal[3] = joint1[3]\n joint_goal[4] = joint1[4]\n joint_goal[5] = joint1[5]\n joint_goal[6] = joint1[6]\n self.group.go(joint_goal, wait=True)\n self.group.stop()\n self.group.clear_pose_targets()\n \n self.flag = True\n\n\ndef main(args):\n rospy.init_node('gpd_ros_execution')\n gp = gpd()\n gp.ready_joint_pose() \n rospy.sleep(5)\n while True:\n gp.execute()\n rospy.sleep(5)\n a = raw_input(\"Would you like to execute the grasp program again? (y/n)\")\n if a == 'Y' or a == 'y':\n continue\n else:\n break\n \n rospy.spin()\n\nif __name__ == '__main__':\n main(sys.argv)","sub_path":"scripts/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":9675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"33474716","text":"import sys\nimport re\nimport math\ndef lcm(nums):\n ceiling=1\n for a in nums:\n ceiling*=a\n big=max(nums)\n res=big\n while resa:\n return 0\n else:\n if int(math.pow(2,k))*int(math.pow(3,m))==a:\n return 1\n else:\n return judge(a,k+1,m)+judge(a,k,m+1)+judge(a,k+1,m+1)\ns=sys.stdin.read()\ndigits=re.findall(r\"\\d+\",s)\nnums= [int(e) for e in digits ]\nn=nums[0] \ndel(nums[0])\nnums=list(set(nums))\nif len(nums)>10:\n print(\"No\")\nelse:\n t=lcm(nums)\n times=[0]*len(nums)\n for i in range(len(nums)):\n times[i]=t//nums[i]\n isvalid=[0]*len(nums)\n for i in range(len(nums)):\n isvalid[i]=judge(times[i],0,0)\n istrue=1\n for j in isvalid:\n if j==0:\n istrue=0\n break\n if istrue==1:\n print(\"Yes\")\n else:\n print(\"No\")","sub_path":"Code/CodeRecords/2799/60753/287928.py","file_name":"287928.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"518552326","text":"#!/bin/python3\n\n\ndef timeConversion(s):\n \"\"\"\n Given a time in 12-hour AM/PM format, convert it to military (24-hour) time.\n Note: Midnight is 12:00:00AM on a 12-hour clock, and 00:00:00 on a 24-hour clock. Noon is 12:00:00PM on a 12-hour clock, and 12:00:00 on a 24-hour clock. \n \"\"\"\n s_ = s.split(':')\n hr = s_[0]\n mn = s_[1]\n sc = ''.join([x for x in s_[-1]][:2])\n form = ''.join([x for x in s_[-1]][2:])\n if form == 'PM'and not int(hr) == 12:\n hr = str(int(hr) + 12)\n if form == 'AM' and int(hr) == 12:\n hr = '00'\n\n return ':'.join([hr, mn, sc])\n\n\nif __name__ == '__main__':\n input1 = '12:40:22PM'\n input2 = '12:40:22AM'\n print(timeConversion(input1))\n print(timeConversion(input2))\n","sub_path":"algorithms/time_conversion.py","file_name":"time_conversion.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"398456165","text":"__license__ = 'MIT License '\n__author__ = 'Lucas Theis '\n__docformat__ = 'epytext'\n\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom publications.models import Type, Publication\nfrom string import capwords, replace, split\n\ndef person(request, name):\n\tauthor = capwords(replace(name, '+', ' '))\n\tauthor = author.replace(' Von ', ' von ').replace(' Van ', ' van ')\n\tauthor = author.replace(' Der ', ' der ')\n\n\t# take care of dashes\n\toff = author.find('-')\n\twhile off > 0:\n\t\toff += 1\n\t\tif off <= len(author):\n\t\t\tauthor = author[:off] + author[off].upper() + author[off + 1:]\n\t\toff = author.find('-', off)\n\n\t# split into forename, middlenames and surname\n\tnames = split(replace(name, ' ', '+'), '+')\n\n\t# find publications of this author\n\tpublications = []\n\ttypes = Type.objects.all()\n\ttypes_dict = {}\n\n\tfor t in types:\n\t\ttypes_dict[t] = []\n\n\tif len(names) > 1:\n\t\tfor publication in Publication.objects.filter(authors__icontains=names[-1]):\n\t\t\tif names[0][0].lower() + '. ' + names[-1].lower() in publication.authors_list_simple:\n\t\t\t\tpublications.append(publication)\n\t\t\t\ttypes_dict[publication.type].append(publication)\n\n\telif len(names) > 0:\n\t\tfor publication in Publication.objects.filter(authors__icontains=names[-1]):\n\t\t\tif names[-1].lower() in publication.authors_list_simple:\n\t\t\t\tpublications.append(publication)\n\t\t\t\ttypes_dict[publication.type].append(publication)\n\n\t# remove empty types\n\tfor t in types:\n\t\tif not types_dict[t]:\n\t\t\ttypes = types.exclude(pk=t.pk)\n\n\t# attach publications to types\n\tfor t in types:\n\t\tt.publications = types_dict[t]\n\n\tif 'ascii' in request.GET:\n\t\treturn render_to_response('publications/publications.txt', {\n\t\t\t\t'publications': publications\n\t\t\t}, context_instance=RequestContext(request), mimetype='text/plain; charset=UTF-8')\n\n\telif 'bibtex' in request.GET:\n\t\treturn render_to_response('publications/publications.bib', {\n\t\t\t\t'publications': publications\n\t\t\t}, context_instance=RequestContext(request), mimetype='text/x-bibtex; charset=UTF-8')\n\n\telif 'rss' in request.GET:\n\t\treturn render_to_response('publications/publications.rss', {\n\t\t\t\t'url': 'http://' + request.META['HTTP_HOST'] + request.path,\n\t\t\t\t'author': author,\n\t\t\t\t'publications': publications\n\t\t\t}, context_instance=RequestContext(request), mimetype='application/rss+xml; charset=UTF-8')\n\n\telse:\n\t\treturn render_to_response('publications/years.html', {\n\t\t\t\t'publications': publications,\n\t\t\t\t'types': types,\n\t\t\t\t'author': author\n\t\t\t}, context_instance=RequestContext(request))\n","sub_path":"cell/publications/views/person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"428389264","text":"import numpy as np\nfrom sklearn.svm import SVC\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import GridSearchCV, train_test_split\nimport time\n\n\ndef plot_decision_boundary(model, axis):\n x0, x1 = np.meshgrid(\n np.linspace(axis[0], axis[1], int((axis[1] - axis[0]) * 5000)).reshape(-1, 1),\n np.linspace(axis[2], axis[3], int((axis[3] - axis[2]) * 5000)).reshape(-1, 1),\n )\n x_new = np.c_[x0.ravel(), x1.ravel()]\n\n y_predict = model.predict(x_new)\n zz = y_predict.reshape(x0.shape)\n\n from matplotlib.colors import ListedColormap\n custom_cmap = ListedColormap(['#90CAF9', '#FFF59D', '#EF9A9A'])\n\n plt.contourf(x0, x1, zz, linewidth=5, cmap=custom_cmap)\n\n\ndef savemap(model, axis):\n x0, x1, x2 = np.meshgrid(\n np.linspace(axis[0], axis[1], int((axis[1] - axis[0]) * 5000)).reshape(-1, 1),\n np.linspace(axis[2], axis[3], int((axis[3] - axis[2]) * 5000)).reshape(-1, 1),\n np.linspace(axis[4], axis[5], int((axis[5] - axis[4]) * 5000)).reshape(-1, 1)\n )\n print(x0.shape)\n print(x1.shape)\n print(x2.shape)\n x_new = np.c_[x0.ravel(), x1.ravel(), x2.ravel()]\n y_predict = model.predict(x_new)\n np.savetxt('map.txt', y_predict, fmt=\"%d\", delimiter=\" \")\n\n\nif __name__ == '__main__':\n data = np.genfromtxt('training data.txt', delimiter=',')\n x = data[:, 1:]\n y = data[:, 0].astype(int)\n # scaler = StandardScaler()\n # x_std = scaler.fit_transform(x)\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.3)\n print(x_test)\n\n start = time.time()\n svc = SVC(kernel='rbf', class_weight='balanced', )\n c_range = np.logspace(-5, 15, 11, base=2)\n gamma_range = np.logspace(-9, 3, 13, base=2)\n param_grid = [{'kernel': ['rbf'], 'C': c_range, 'gamma': gamma_range}]\n grid = GridSearchCV(svc, param_grid, cv=3, n_jobs=-1)\n clf = grid.fit(x_train, y_train)\n end = time.time()\n print('Running time: %s seconds' % (end - start))\n print(grid.predict(x_test))\n score = grid.score(x_test, y_test)\n print(score)\n # savemap(grid, [0.065, 0.090, 0.009, 0.013, 0.05, 0.1])\n '''\n plot_decision_boundary(clf, axis=[0.009, 0.013, 0.05, 0.1])\n plt.scatter(x[y==1, 0], x[y==1, 1], color=\"red\", label='1st')\n plt.scatter(x[y==30, 0], x[y==30, 1], color=\"blue\", label='30th')\n plt.scatter(x[y==50, 0], x[y==50, 1], color=\"yellow\", label='50th')\n plt.xlabel('R1 (Rsei)')\n plt.ylabel('R2 (Rct)')\n plt.legend()\n plt.show()\n '''\n","sub_path":"SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"4133623","text":"import time\nimport serial\nimport requests\nfrom pantilt import *\nimport math\n'''\n programmed by Jongwook Si\n Sensor: TF MINI-PLUS\n\n'''\n\nrequestHeader = bytearray([0x5A, 0x05, 0x05, 0x06, 0x6A])\nresponseHeader = bytearray([0x5A, 0x05, 0x05, 0x06, 0x6A])\nDataHeader = bytearray([0x59, 0x59])\nrequestLength = 100\nlidarBaudrate = 115200\nlidarPort = \"/dev/ttyUSB1\"\n\ndef modify_header(rx_data):\n loc_index = rx_data.find(responseHeader)\n \n if rx_data[loc_index:loc_index+len(responseHeader)] == responseHeader:\n return loc_index\n\ndef calDistance(high, low): \n calD = (low<<8) & 0xff00 | high & 0x00ff\n \n return calD * 0.1\n\ndef checkHeader(inputHeader):\n if responseHeader == inputHeader:\n return True\n\n else:\n return False\n\ndef checkDataHeader(inputHeader):\n if DataHeader == inputHeader:\n return True\n\n else:\n return False\n\ndef lidarstart():\n ser = serial.Serial(lidarPort, baudrate = lidarBaudrate, parity = serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE, bytesize = serial.EIGHTBITS, timeout=1) \n ser.write(requestHeader)\n rx_data = ser.read(requestLength)\n \n start_index = modify_header(rx_data)\n rx_data = rx_data[start_index:]\n rx_data = rx_data[:14]\n\n if checkHeader(rx_data[:5]):\n if not checkDataHeader(rx_data[5:7]): \n print(\"Invalid Data Header\")\n return -1\n \n\n for i in range(len(rx_data)):\n #print(hex(rx_data[i]), end=\" \")\n if i == 4 :\n print()\n\n rx_data = rx_data[5:]\n\n distance = calDistance(rx_data[2], rx_data[3])\n print()\n \n\n print()\n\n else:\n if len(rx_data) == 0:\n print(\"Empty Data\")\n return -1\n else:\n print(\"Invalid Response Header\")\n return -1\n ser.close()\n return distance\n \n\ndef scan(deg):\n move(deg)\n distance = lidarstart()\n #fix = 8\n #map_d = (distance**2 - fix**2)**0.5 +3\n #print(\"deg:{} {:.1f} cm\".format(deg-10,map_d))\n print(\"Distance {:.1f} cm\".format(distance))\n #f = open(\"distance.txt\", 'w')\n #f.write(str(distance))\n #f.close()\n return round(distance, 1) \n\n \n\n ","sub_path":"tfminiplus.py","file_name":"tfminiplus.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"222300554","text":"#!/usr/bin/env python3\nfrom __future__ import unicode_literals, print_function\n\nimport base64\nimport collections\nimport json\nimport numbers\n\nimport six\n\n\ndef i(e): # identity\n return e\n\n\ndef sorted_dict(input_dict):\n return sorted(list(input_dict), key=lambda d: json.dumps(d, sort_keys=True))\n\n\ndef not_found_default(e):\n raise Exception('Type of %s not found' % repr(e))\n\n\ndef walk_json(e, dict_fct=i, list_fct=i, num_fct=i, str_fct=i, bool_fct=i, null_fct=i, not_found=not_found_default):\n \"\"\"\n Go throught a json and call each function accordingly of the element type\n for each element, the value returned is used for the json output\n This doesn't change the input json, but re-create a new json object.\n (calling it without any function return a copy of a json for example)\n The calling is deep-first.\n ex : ['a', {'b':3}] will call :\n - str_fct('a')\n - num_fct(3)\n - dict_fct({'b':3})\n - list_fct(['a', {'b':3}])\n and if every function is set to return None\n ex : ['a', {'b':3}] will call :\n - str_fct('a')\n - num_fct(3)\n - dict_fct({'b':None})\n - list_fct([None, None])\n :param e:\n :param dict_fct:\n :param list_fct:\n :param num_fct:\n :param str_fct:\n :param bool_fct:\n :param null_fct:\n :param not_found:\n :return:\n \"\"\"\n if e is None:\n return null_fct(e)\n if isinstance(e, six.string_types):\n return str_fct(e)\n if isinstance(e, numbers.Number):\n return num_fct(e)\n if isinstance(e, bool):\n return bool_fct(e)\n\n param = { # only create it when needed\n 'dict_fct': dict_fct, 'list_fct': list_fct, 'num_fct': num_fct,\n 'str_fct': str_fct, 'bool_fct': bool_fct, 'null_fct': num_fct,\n 'not_found': not_found,\n }\n\n if isinstance(e, collections.Mapping):\n return dict_fct({k: walk_json(v, **param) for k, v in e.items()})\n if isinstance(e, collections.Iterable):\n return list_fct([walk_json(v, **param) for v in e])\n return not_found(e)\n\n\n# Used for the json_default as \"every element under this one\"\n\ndef json_default(j, value, *path):\n \"\"\"\n Put a default in place of a json\n :param j: the json value\n :param value: the value to set as default\n :param path: the path leading to the value\n \"\"\"\n head, rest = path[0], path[1:]\n is_star = head == '*'\n if not rest:\n if not is_star:\n j[head] = j.get(head, value)\n else:\n assert False # how to manage that one ?\n else:\n if not is_star:\n json_default(j[head], value, *rest)\n else:\n # works for list now\n for e in j:\n json_default(e, value, *rest)\n\n\n# def base64_decode(input_str):\n# return base64.encodebytes(input_str.encode('UTF-8')).decode('UTF-8')\n\ndef base64_decode(input_str):\n return base64.decodebytes(input_str.encode('UTF-8')).decode('UTF-8')\n","sub_path":"aws_stack_diff_tool/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"606688647","text":"import datetime\nimport sublime_plugin\nclass AddInfoCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n self.view.run_command(\"insert_snippet\",\n {\n \"contents\": \"/**\"\"\\n\"\n \" * @Title: title\"\"\\n\"\n \" * @Description: Description\"\"\\n\"\n \" * @author: ly\"\"\\n\"\n \" * @aateTime: \" \"%s\" %datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") +\"\\n\"\n \" * @param: \"\"\\n\"\n \" * @return \"\"\\n\"\n \" */\"\n }\n )","sub_path":"Packages/User/addInfo.py","file_name":"addInfo.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"337446106","text":"class Solution(object):\n def reverseWords(self, s):\n \"\"\"\n :type s: a list of 1 length strings (List[str])\n :rtype: nothing\n \"\"\"\n # Op1 O(n) runtime, O(1) space:\n self.reverse(s, 0, len(s))\n i = 0\n for j in range(len(s) + 1):\n if j == len(s) or s[j] == ' ':\n self.reverse(s, i, j)\n i = j + 1\n print(s)\n\n def reverse(self, s, begin, end):\n for i in range((end - begin) // 2):\n s[begin + i], s[end - 1 - i] = s[end - 1 - i], s[begin + i]\n\n def reverseWords2(self, s):\n # using built-in library function\n s.reverse()\n i = 0\n for j in range(len(s) + 1):\n if j == len(s) or s[j] == ' ':\n s[i:j] = s[i:j][::-1]\n i = j + 1\n print(s)\n\n\ntest = Solution()\nprint(test.reverseWords(['h', 'i', ' ', 'h', 'o', 'w', ' ', 'a', 'r', 'e', ' ', 'u']))\nprint(test.reverseWords2(['h', 'i', ' ', 'h', 'o', 'w', ' ', 'a', 'r', 'e', ' ', 'u']))\n","sub_path":"python/186 Reverse Words in a String II.py","file_name":"186 Reverse Words in a String II.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"98416761","text":"\"\"\"\n贪心法,维护一个最大收益和最小价格,然后遍历,遇到更小的价格,更新最小价格,如果当前价格减去最小价格大于最大收益,更新最大收益\n\"\"\"\n\nclass Solution:\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n if len(prices) == 0 or len(prices) == 1:\n return 0\n max_profit = 0\n record_min = prices[0]\n for i in prices:\n if i <= record_min:\n record_min = i\n continue\n if i - record_min > max_profit:\n max_profit = i - record_min\n return max_profit\n\n\nif __name__ == '__main__':\n sol = Solution()\n print(sol.maxProfit([7,6,4,3,1]))","sub_path":"101-200/121.py","file_name":"121.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"399228556","text":"'''\nA class for storing the message history.\n\n'''\n\nfrom collections import deque\nfrom itertools import islice\nfrom typing import Dict\n\n\nclass subHistory:\n def __init__(self, _maxSize):\n self.msg_queue = deque()\n self.max_size = _maxSize\n\n def push_message(self, _text):\n if len(self.msg_queue) == self.max_size:\n self.msg_queue.pop()\n\n self.msg_queue.appendleft(_text)\n\n def get_message(self, _num=1):\n '''\n A method for get history\n :param _num: the number of history need to get\n :return: an iterable\n '''\n # If the buffer is too small\n if _num > self.max_size:\n print(\"The history buffer is not large enough!\\n\")\n return\n\n # If the messages are not enough\n if _num > len(self.msg_queue):\n print(\"There is not enough messages!\\n\")\n return\n\n return islice(self.msg_queue, 0, _num)\n\n\nclass HistoryQueue:\n def __init__(self, _maxSize):\n self.topic_history: Dict[str, subHistory] = dict()\n self.max_size = _maxSize\n\n def push_history(self, _topic, _message):\n\n if not(_topic in self.topic_history):\n self.topic_history[_topic] = subHistory(self.max_size)\n\n self.topic_history[_topic].push_message(_message)\n\n def get_history(self, _topic, _history_num):\n if not(_topic in self.topic_history):\n print(\"This topic does not exist! \\n\")\n return\n else:\n if self.topic_history[_topic].get_message(_history_num):\n return list(self.topic_history[_topic].get_message(_history_num))\n else:\n print(\"There is no history for this topic!\\n\")\n return None\n\n def get_all_history(self):\n result = list()\n for key in self.topic_history.keys():\n localHis = \",\".join(list(self.topic_history[key].msg_queue))\n result.append(key + \"#\" + localHis)\n\n return '&'.join(result)\n\n def input_all_history(self, history_str):\n if history_str is None:\n print(\"The history input is empty\")\n return\n\n # Clean the dictionary\n self.topic_history.clear()\n\n historyList = history_str.split('&')\n for item in historyList:\n localHis = item.split('#')\n topic = localHis[0]\n self.topic_history[topic] = subHistory(self.max_size)\n\n content = localHis[1].split(',')\n content.reverse()\n for element in content:\n self.topic_history[topic].push_message(element)\n\n\nif __name__ == \"__main__\":\n test = HistoryQueue(5)\n\n test.push_history('1', '2')\n test.push_history('2', 'abc')\n test.push_history('3', '122323')\n test.push_history('3', 'qwere')\n print(list(test.get_history('1', 1)))\n test.get_history('1', 3)\n test.get_history('2', 1)\n print(test.get_history('3', 2))\n\n mystr = test.get_all_history()\n print(mystr)\n test.input_all_history(mystr)\n\n print(list(test.get_history('1', 1)))\n test.get_history('1', 3)\n test.get_history('2', 1)\n print(test.get_history('3', 2))\n","sub_path":"Assignment3/HistoryQueue.py","file_name":"HistoryQueue.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"55638908","text":"from animation import Animation\n\nimport numpy as np\nimport time\n\nclass SpiralOutFast(Animation):\n layout_type = \"Layout\"\n\n def __init__(self, layout, \n r_slowness=1.2, \n g_slowness=1.7, \n b_slowness=2, \n overall_slowness=0.5\n ):\n super(SpiralOutFast, self).__init__()\n self.layout = layout\n\n self.add_param(\"r_slowness\", r_slowness, 0, 3)\n self.add_param(\"g_slowness\", g_slowness, 0, 3)\n self.add_param(\"b_slowness\", b_slowness, 0, 3)\n self.add_param(\"overall_slowness\", overall_slowness, 0, 10)\n\n self.t = np.array([0,0], dtype=np.float16)\n self.r = np.array([0,0], dtype=np.float16)\n self.g = np.array([0,0], dtype=np.float16)\n self.b = np.array([0,0], dtype=np.float16)\n\n self.buff_len = 1500\n\n self.start_time = np.float(time.time())\n self.previous_time = np.float16(time.time())\n self.domain = np.linspace(0,1,len(self.layout.pixels))\n\n def update(self):\n overall_slowness = self.params[\"overall_slowness\"].value\n r_slowness = overall_slowness*self.params[\"r_slowness\"].value\n g_slowness = overall_slowness*self.params[\"g_slowness\"].value\n b_slowness = overall_slowness*self.params[\"b_slowness\"].value\n\n current_time = np.float16((time.time() - self.start_time))\n dt = current_time - self.previous_time\n self.previous_time = current_time\n\n self.t = np.append(self.t, current_time)\n \n self.r = np.append(self.r, np.mean([self.fft[0], self.fft[1]], dtype=np.float16))\n self.g = np.append(self.g, np.mean([self.fft[2], self.fft[3]], dtype=np.float16))\n self.b = np.append(self.b, np.mean([self.fft[4], self.fft[5], self.fft[6]], dtype=np.float16))\n\n if len(self.t) > self.buff_len:\n self.t = self.t[1:]\n self.r = self.r[1:]\n self.g = self.g[1:]\n self.b = self.b[1:]\n\n domain_r = np.linspace(current_time, current_time - r_slowness, len(self.pixels)) \n domain_g = np.linspace(current_time, current_time - g_slowness, len(self.pixels)) \n domain_b = np.linspace(current_time, current_time - b_slowness, len(self.pixels))\n\n r = np.interp(domain_r, self.t, self.r)\n g = np.interp(domain_r, self.t, self.g)\n b = np.interp(domain_r, self.t, self.b)\n\n for i in range(len(self.pixels)):\n self.layout.pixels[i].color = (r[i], g[i], b[i])\n","sub_path":"core/animations/spiral_out_fast.py","file_name":"spiral_out_fast.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"551301130","text":"##############################################################################\n#\n# Copyright (c) 2009 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"XDG base directory helpers\n\n$Id$\n\"\"\"\nimport os\n\nXDG_DATA_HOME = os.environ.get(\n 'XDG_DATA_HOME', os.path.join(os.environ.get('HOME', '/'), '.local', 'share'))\nXDG_DATA_DIRS = [XDG_DATA_HOME] + \\\n [dir for dir in\n os.environ.get('XDG_DATA_DIRS', '/usr/local/share:/usr/share').split(':')\n if dir]\n\ndef iterDataPaths(*resource):\n \"\"\"Iterate over all ``data`` paths as defined by XDG basedir standard\"\"\"\n\n resource = os.path.join(*resource)\n for data_dir in XDG_DATA_DIRS:\n path = os.path.join(data_dir, resource)\n if os.path.exists(path):\n yield path\n","sub_path":"z3c.sharedmimeinfo/trunk/src/z3c/sharedmimeinfo/basedir.py","file_name":"basedir.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"142777637","text":"import requests\nimport datetime\nimport services\n\n#colours\ngreen = '\\033[92m'\ncyan = '\\033[95m'\nbold = '\\033[1m'\nunderline = '\\033[4m'\nend = '\\033[0m'\nred = '\\033[91m'\n\n#header\nprint(f\"{green}{bold}\\t\\t{underline}[Megumin?!]{end}\")\n\nprint()\nprint(f\"{bold}fixed by{end}\", end=\"\")\nprint(f\"{green}{bold} >> {end}\", end = \"\")\nprint(f\"{cyan}{bold}sy{end}\")\n\n# Logo\nprint('''\n\n▒█▀▄▀█ ░█▀▀█ ▒█▀▀█ ▒█▀▀▀ ▒█▄░▒█ ▀▀█▀▀ ░█▀▀█ ▒█▀▀▀█ ▒█▀▄▀█ ▒█▀▀█ \n▒█▒█▒█ ▒█▄▄█ ▒█░▄▄ ▒█▀▀▀ ▒█▒█▒█ ░▒█░░ ▒█▄▄█ ░▀▀▀▄▄ ▒█▒█▒█ ▒█▀▀▄ \n▒█░░▒█ ▒█░▒█ ▒█▄▄█ ▒█▄▄▄ ▒█░░▀█ ░▒█░░ ▒█░▒█ ▒█▄▄▄█ ▒█░░▒█ ▒█▄▄█\n\n''')\n\n#inputs\nprint('only rus\\nnumber\\nexample: 9996667711')\ninput_number = input(green + bold + '>> ' + end)\nprint('how???')\nsms = int(input(green + bold + '>> ' + end))\n\n\n\ndef parse_number(number):\n\tmsg = f\"[*]check number - {green}{bold}OK{end}\"\n\tif int(len(number)) in (10, 11, 12):\n\t\tif number[0] == \"8\":\n\t\t\tnumber = number[1:]\n\t\t\tprint(msg)\n\t\telif number[:2] == \"+7\":\n\t\t\tnumber = number[2:]\n\t\t\tprint(msg)\n\t\telif int(len(number)) == 10 and number[0] == 9:\n\t\t\tprint(msg)\n\telse:\n\t\tprint(f\"[*]check number - {red}{bold}failed number!{end}\\nThis bomber is intended only for Russia and if the number you entered belongs to another country then alas this bomber is not suitable for you!\")\n\t\tquit()\n\treturn number\nnumber = parse_number(input_number)\n\n\n\nservices.attack(number, sms)\n","sub_path":"magentasmb.py","file_name":"magentasmb.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"138799494","text":"import os\nimport re\nfrom flask import *\nimport mistletoe\nimport bleach\nimport time\nfrom .db_prep import c, db\nimport json\nimport requests\n\n#Sanitization object used throughout module\ntags=bleach.sanitizer.ALLOWED_TAGS+['p', 'h1','h2','h3','h4','h5','h6','hr','br','table','tr','th','td','del','thead','tbody','tfoot','pre','div','span','img', 'sup','sub']\nattrs=bleach.sanitizer.ALLOWED_ATTRIBUTES\nattrs['*']=[\"class\",\"style\", \"title\"]\nattrs['img']=[\"height\",\"width\",\"alt\",\"src\"]\nstyles=['white-space',\"border\",\"border-radius\",\"text-align\",\"align\", \"float\",\"margin\",\"padding\"]\nCleaner=bleach.sanitizer.Cleaner(tags=tags, attributes=attrs, styles=styles)\n\n#SQL timestamp to readable time string\ndef time_string(timestamp):\n if timestamp is None:\n return None\n try:\n t=str(timestamp).split('.')[0]\n t=time.strptime(t,\"%Y-%m-%d %H:%M:%S\")\n t=time.strftime(\"%d %B %Y at %H:%M:%S\",t)\n except:\n return None\n return t\n\nclass User():\n\n def __init__(self, name=\"\", uid=0, token=None, make=False):\n\n if not(name or uid or token):\n raise ValueError(\"One of name or uid must be provided\")\n\n #check database\n #sanitize name and token\n if name:\n name=re.search(\"^[A-Za-z0-9_-]+\", name).group(0)\n \n if token:\n c.execute(\"EXECUTE GetUserByToken(%s)\", (token,)) \n elif name:\n c.execute(\"EXECUTE GetUserByName(%s)\", (name,))\n elif uid:\n c.execute(\"EXECUTE GetUserById(%s)\", (uid,))\n\n result=c.fetchone()\n\n if result is None:\n if make and name:\n c.execute(\"EXECUTE MakeUser(%s)\", (name,))\n db.commit()\n result=c.fetchone()\n else:\n raise KeyError(\"User not found\")\n\n self.id=int(result[0])\n self.name=result[1]\n self.created=result[2]\n self.google_analytics=result[3]\n self.banned=bool(result[4])\n self.admin=bool(result[5])\n self.agreed=bool(result[6])\n self.patreon=result[8]\n self.over18=result[9]\n self.patreon_id=int(result[10])\n self.patreon_token=result[11]\n self.patreon_campaign_id=int(result[12])\n self.patreon_refresh_token=result[13]\n \n self.url=\"/u/{}\".format(self.name)\n self.created_date=time_string(self.created).split(\" at \")[0]\n\n def set_patreon(self, name, pid, token, refresh, cid):\n c.execute(\"EXECUTE SetPatreon(%s, %s, %s, %s, %s, %s)\", (self.id, pid, name, token, refresh, cid))\n db.commit()\n\n def update_patreon_token(self):\n\n url=\"https://www.patreon.com/api/oauth2/token\"\n params={\"grant_type\":\"refresh_token\",\n \"refresh_token\":self.patreon_refresh_token,\n \"client_id\":os.environ.get(\"patreon_id\"),\n \"client_secret\":os.environ.get(\"patreon_secret\")\n }\n\n x=requests.post(url, params=params)\n j=x.json()\n self.patreon_token=j['access_token']\n self.patreon_refresh_token=j['refresh_token']\n\n c.execute(\"EXECUTE SetPatreonTokens(%s,%s,%s)\", (self.id, self.patreon_token, self.patreon_refresh_token))\n db.commit()\n \n\n def set_google(self, tracking_id):\n\n if tracking_id:\n c.execute(\"EXECUTE SetGoogle(%s, %s)\", (self.id, tracking_id))\n else:\n c.execute(\"EXECUTE SetGoogle(%s, %s)\", (self.id, \"\"))\n db.commit()\n \n def tos_agree(self):\n c.execute(\"UPDATE Users SET agreed='true' WHERE id=%s\",(self.id,))\n self.agreed=True\n db.commit()\n\t\n def update_token(self, token):\n c.execute(\"EXECUTE UpdateToken(%s,%s)\", (self.id, token))\n db.commit()\n\n def render_userpage(self, v=None):\n\n return render_template('userpage.html', u=self, stories=self.stories(), v=v)\n\n def stories(self):\n \n c.execute(\"EXECUTE GetStoriesByAuthorId(%s)\", (self.id,))\n \n for l in c.fetchall():\n yield Story(result=l)\n\n def books(self):\n\n c.execute(\"EXECUTE GetBooksByAuthorId(%s)\", (self.id,))\n for l in c.fetchall():\n yield Book(result=l)\n\n def ban(self):\n\n c.execute(\"EXECUTE BanUser(%s,%s)\", (self.id, True))\n db.commit()\n\n def unban(self):\n\n c.execute(\"EXECUTE BanUser(%s,%s)\", (self.id, False))\n db.commit()\n\n def set_over18(self, over18=False):\n c.execute(\"EXECUTE SetOver18(%s, %s)\", (self.id, over18))\n \n def json(self):\n \n output = self.__dict__\n \n if not self.banned:\n stories=[]\n books=[]\n for s in self.stories():\n if not s.banned and not s.deleted:\n stories.append(s.id)\n for b in self.books():\n if not b.banned and not b.deleted:\n books.append(b.id)\n output['stories']=stories\n output['books']=books\n \n output.pop(\"patreon_token\")\n output.pop(\"patreon_id\")\n output.pop(\"patreon_refresh_token\")\n output.pop(\"agreed\")\n output.pop(\"google_analytics\")\n output.pop(\"over18\")\n \n return output\n\nclass Story():\n\n def __init__(self, sid=0, result=None, load_author=False):\n\n if result is None:\n #sanitize id\n sid=re.search(\"^[0-9]+\", str(sid)).group(0)\n\n #check database\n c.execute(\"EXECUTE GetStoryById(%s)\", (sid,))\n result=c.fetchone()\n\n if result is None:\n raise KeyError('story with that id does not exist')\n\n\n self.id=int(result[0])\n self.created=result[1]\n self.pre=result[2]\n self.story=result[3]\n self.post=result[4]\n self.banned=bool(result[5])\n self.title=result[6]\n self.author_id=int(result[7])\n self.deleted=bool(result[8])\n self._pre_raw=result[9]\n self._story_raw=result[10]\n self._post_raw=result[11]\n self.book_id=int(result[12])\n self.nsfw=bool(result[13])\n self.patreon_threshold=int(result[14])\n self.edited=result[15]\n self.distinguished=bool(result[16])\n self.reddit=result[17]\n self.subreddit=result[18]\n \n self.url=\"/s/{}\".format(self.id)\n self.created_date=time_string(self.created)\n self.edited_date=time_string(self.edited)\n\n if load_author:\n self.author=User(uid=self.author_id)\n else:\n self.author=None\n\n def set_reddit(self, reddit_id, subreddit):\n\n c.execute(\"EXECUTE SetReddit(%s,%s,%s)\",(self.id, reddit_id, subreddit))\n \n def json(self):\n \n output=self.__dict__\n \n output.pop(\"_pre_raw\")\n output.pop(\"_story_raw\")\n output.pop(\"_post_raw\")\n output.pop(\"author\")\n \n if self.banned or self.deleted or self.patreon_threshold:\n output.pop(\"pre\")\n output.pop(\"story\")\n output.pop(\"post\")\n \n return output\n \n def set_nsfw(self, nsfw=False):\n c.execute(\"EXECUTE SetNSFW(%s, %s)\", (self.id, nsfw))\n db.commit()\n self.nsfw=nsfw\n\n def set_patreon_threshold(self, cents):\n c.execute(\"EXECUTE SetPatreonThreshold(%s,%s)\", (self.id, cents))\n db.commit()\n self.patreon_threshold=cents\n\n def book(self):\n\n if self.book_id==0:\n return None\n \n return Book(bid=self.book_id) \n\n def next(self):\n\n if self.book_id==0:\n return None\n\n c.execute(\"SELECT * FROM Stories WHERE book_id=%s AND id>%s ORDER BY id ASC LIMIT 1\", (self.book_id, self.id))\n result=c.fetchone()\n if result is None:\n return None\n\n return Story(result=result)\n\n def previous(self):\n\n if self.book_id==0:\n return None\n\n c.execute(\"SELECT * FROM Stories WHERE book_id=%s AND id<%s ORDER BY id DESC LIMIT 1\", (self.book_id, self.id))\n result=c.fetchone()\n if result is None:\n return None\n\n return Story(result=result)\n \n def process(self):\n \n #render markdown\n self.pre=mistletoe.markdown(self._pre_raw)\n self.story=mistletoe.markdown(self._story_raw)\n self.post=mistletoe.markdown(self._post_raw)\n\n #sanitize html\n self.title=Cleaner.clean(self.title)\n self.pre=Cleaner.clean(self.pre)\n self.story=Cleaner.clean(self.story)\n self.post=Cleaner.clean(self.post)\n\n def save(self):\n\n if self.id!=-1:\n raise Exception(\"This story seems to already exist. Use `edit()` instead.\")\n\n self.process()\n c.execute(\"EXECUTE MakeStory(%s,%s,%s,%s,%s,%s,%s,%s,%s)\", (self.author_id, self.title, self.pre, self.story, self.post, self._pre_raw, self._story_raw, self._post_raw, self.book_id))\n data=c.fetchone()\n db.commit()\n s=Story(result=data)\n return s\n \n def edit(self, title, pre, story, post):\n\t\t\n if self.id==-1:\n raise KeyError(\"This story does not yet exist. Use `save()` instead.\")\n\n if title==self.title and pre==self._pre_raw and story==self._story_raw and post == self._post_raw:\n return\n\t\n self.title=title\n\t\n self._pre_raw=pre\n self._story_raw=story\n self._post_raw=post\n self.process()\n \n c.execute(\"EXECUTE EditStory(%s,%s,%s,%s,%s,%s,%s,%s)\", (self.id, self.pre, self.story, self.post, self._pre_raw, self._story_raw, self._post_raw, self.title))\n db.commit()\n\n def set_book(self, bid):\n\n if bid==0:\n c.execute(\"UPDATE Stories SET book_id=0 WHERE id=%s\", (self.id,))\n db.commit()\n return\n\n b=Book(bid=bid, load_author=True)\n if b.author_id!=self.author_id and not(bid==4 and self.author.admin):\n abort(403)\n\n c.execute(\"UPDATE Stories SET book_id=%s WHERE id=%s\", (bid, self.id))\n db.commit()\n\n def render_storypage(self, over18=False, v=None):\n\n cent_string=str(self.patreon_threshold).rjust(3,'0')\n d=str(self.patreon_threshold)[0:-2]\n c=str(self.patreon_threshold)[-2:]\n pledge_valid=True\n\n if self.patreon_threshold and self.author.patreon_campaign_id:\n if not v:\n pledge_cents=0\n elif not v.patreon_token:\n pledge_cents=0\n else:\n # Hit Patreon API to determine pledge cents\n header={\"Authorization\":\"Bearer {}\".format(self.author.patreon_token)}\n params={\"include\":\"campaign,user\",\n \"fields[member]\":\"currently_entitled_amount_cents,last_charge_status\",\n \"page[count]\":2000}\n url=\"https://www.patreon.com/api/oauth2/v2/campaigns/{}/members\".format(self.author.patreon_campaign_id)\n x=requests.get(url, headers=header, params=params)\n if x.status_code!=200:\n #if token has expired, refresh it and try again\n self.author.update_patreon_token()\n header={\"Authorization\":\"Bearer {}\".format(self.author.patreon_token)}\n x=requests.get(url, headers=header, params=params)\n \n j=x.json()\n for entry in j['data']:\n if entry['relationships']['user']['data']['id']!=str(v.patreon_id):\n continue\n pledge_cents=entry['attributes']['currently_entitled_amount_cents']\n if entry['attributes']['last_charge_status'] not in [\"Paid\",None]:\n pledge_valid=False\n break\n else:\n pledge_cents=0\n \n else:\n pledge_cents=0\n\n return render_template('storypage.html', v=v, d=d, c=c, pledge_cents=pledge_cents, pledge_valid=pledge_valid, over18=over18, s=self)\n\n def ban(self):\n\n c.execute(\"EXECUTE BanStory(%s, %s)\", (self.id, True))\n db.commit()\n\n def unban(self):\n\n c.execute(\"EXECUTE BanStory(%s, %s)\", (self.id, False))\n db.commit()\n\n def delete(self):\n c.execute(\"EXECUTE DeleteStory(%s, %s)\", (self.id, True))\n db.commit()\n\n def undelete(self):\n\n c.execute(\"EXECUTE DeleteStory(%s, %s)\", (self.id, False))\n db.commit()\n\nclass Listing():\n\n def __init__(self, kind=\"new\"):\n self.kind=kind\n \n if kind=='new':\n c.execute(\"SELECT * FROM Stories WHERE banned='false' AND deleted='false' AND book_id<>4 ORDER BY id DESC LIMIT 15\")\n elif kind=='news':\n c.execute(\"SELECT * FROM Stories WHERE banned='false' AND deleted='false' AND book_id=4 ORDER BY id DESC LIMIT 5\")\n self.raw=c.fetchall()\n\n def __iter__(self):\n\n \n for entry in self.raw:\n yield Story(result=entry, load_author=True)\n \nclass Book():\n\n def __init__(self, bid=0, result=None, load_author=None):\n\n if result is None:\n #sanitize id\n sid=re.search(\"^[0-9]+\", str(bid)).group(0)\n\n #check database\n c.execute(\"EXECUTE GetBookById(%s)\", (bid,))\n result=c.fetchone()\n\n if result is None:\n raise KeyError('book with that id does not exist')\n \n self.id=int(result[0])\n self.title=result[1]\n self.author_id=result[2]\n self.description=result[3]\n self._description_raw=result[4]\n self.created=result[5]\n self.banned=result[6]\n self.deleted=result[7]\n self.edited=result[8]\n self.distinguished=bool(result[9])\n\n self.created_date=time_string(self.created)\n self.edited_date=time_string(self.edited)\n self.url=\"/b/{}\".format(str(self.id))\n\n if load_author:\n self.author=User(uid=self.author_id)\n else:\n self.author=None\n\n def json(self):\n \n output=self.__dict__\n \n output.pop(\"_description_raw\")\n output.pop(\"author\")\n \n if self.banned or self.deleted:\n output.pop(self.description)\n else:\n stories=[]\n for s in self.stories():\n if not s.banned and not s.deleted:\n stories.append(s.id)\n output['stories']=stories\n \n return output\n\n def save(self):\n \n self.title=Cleaner.clean(self.title)\n self.description=Cleaner.clean(mistletoe.markdown(self._description_raw))\n\n c.execute(\"EXECUTE MakeBook(%s,%s,%s,%s)\",(self.title, self.author_id, self.description, self._description_raw))\n data=c.fetchone()\n db.commit()\n b=Book(result=data)\n return b\n\n def edit(self, title, description):\n\n if title==self.title and description==self._description_raw:\n return\n\n self.title=Cleaner.clean(title)\n self._description_raw=description\n self.description=Cleaner.clean(mistletoe.markdown(self._description_raw))\n\n c.execute(\"EXECUTE EditBook(%s, %s, %s, %s)\", (self.title, self.description, self._description_raw, self.id))\n db.commit()\n \n\n def stories(self):\n\n c.execute(\"EXECUTE GetStoriesByBook(%s)\",(self.id,))\n for entry in c.fetchall():\n yield Story(result=entry)\n\n def render_bookpage(self, v=None):\n \n return render_template('bookpage.html', b=self, v=v)\n\n def ban(self):\n\n c.execute(\"EXECUTE BanBook(%s, 'true')\",(self.id,))\n db.commit()\n\n def unban(self):\n\n c.execute(\"EXECUTE BanBook(%s, 'false')\",(self.id,))\n db.commit()\n\n def delete(self):\n\n c.execute(\"EXECUTE DeleteBook(%s, 'true')\",(self.id,))\n db.commit()\n\n def undelete(self):\n\n c.execute(\"EXECUTE DeleteBook(%s, 'false')\",(self.id,))\n db.commit()\n","sub_path":"logbook/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":16029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"533169962","text":"'''This script loads pre-trained word embeddings (GloVe embeddings) into a\nfrozen Keras Embedding layer, and uses it to train a text classification model\non the 20 Newsgroup dataset.\n(classification of newsgroup messages into 20 different categories).\n\nGloVe embedding data can be found at:\n http://nlp.stanford.edu/data/glove.6B.zip\n(source page: http://nlp.stanford.edu/projects/glove/)\n\n20 Newsgroup data can be found at:\n http://www.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/news20.html\n\nBlog:\n - https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html\n - https://machinelearningmastery.com/use-word-embedding-layers-deep-learning-keras/\n\nRun:\n $ wget http://nlp.stanford.edu/data/glove.6B.zip\n $ zip glove.6B.zip\n\n $ wget http://www.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/news20.tar.gz\n $ tar xvzf news20.tar.gz\n\n $ BASE_DIR=$HOME/code/dataset python pretrained_word_embedding.py\n'''\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport numpy as np\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import to_categorical\nfrom keras.layers import Dense, Input, GlobalMaxPooling1D\nfrom keras.layers import Conv1D, MaxPooling1D, Embedding\nfrom keras.models import Model\nfrom keras.initializers import Constant\n\n\nBASE_DIR = os.environ['BASE_DIR']\nGLOVE_DIR = os.path.join(BASE_DIR, 'glove.6B')\nTEXT_DATA_DIR = os.path.join(BASE_DIR, '20_newsgroup')\nMAX_SEQUENCE_LENGTH = 1000\nMAX_NUM_WORDS = 20000\nEMBEDDING_DIM = 100\nVALIDATION_SPLIT = 0.2\n\n# first, build index mapping words in the embeddings set\n# to their embedding vector\n\nprint('Indexing word vectors.')\n\nembeddings_index = {}\nwith open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt')) as f:\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n\n# there's a total of 40k words, and each embedding vector\n# is 100 dimension (i.e. *.100d.txt)\nprint('Found %s word vectors.' % len(embeddings_index))\nprint('Dimension of each vector %s.' % len(embeddings_index['the']))\nprint('Dimension of each vector %s.' % len(embeddings_index['example']))\nprint('Dimension of each vector %s.' % len(embeddings_index['word']))\n\n# second, prepare text samples and their labels\nprint('Processing text dataset')\n\ntexts = [] # list of text samples\nlabels_index = {} # dictionary mapping label name to numeric id\nlabels = [] # list of label ids\nfor name in sorted(os.listdir(TEXT_DATA_DIR)):\n path = os.path.join(TEXT_DATA_DIR, name)\n if os.path.isdir(path):\n label_id = len(labels_index)\n labels_index[name] = label_id\n for fname in sorted(os.listdir(path)):\n if fname.isdigit():\n fpath = os.path.join(path, fname)\n args = {} if sys.version_info < (3,) else {'encoding': 'latin-1'}\n with open(fpath, **args) as f:\n t = f.read()\n i = t.find('\\n\\n') # skip header\n if 0 < i:\n t = t[i:]\n texts.append(t)\n labels.append(label_id)\n\nprint('Found %s texts and %s labels.' % (len(texts), len(labels)))\nprint('Label names:', list(labels_index.keys()))\n\nprint(\"Number of raw texts %s,\" % len(texts),\n \"Length of 1st text %s,\" % len(texts[0]),\n \"Length of 2nd text %s, ...\" % len(texts[1]))\n\n# finally, vectorize the text samples into a 2D integer tensor\ntokenizer = Tokenizer(num_words=MAX_NUM_WORDS)\ntokenizer.fit_on_texts(texts)\nsequences = tokenizer.texts_to_sequences(texts)\n\n# text is character-level and sequence is word-level, or token-level\nprint(\"Number of raw sequences %s,\" % len(sequences),\n \"Length of 1st sequence %s,\" % len(sequences[0]),\n \"Length of 2nd sequence %s, ...\" % len(sequences[1]))\n\n# word_index: A dictionary of words and their uniquely assigned integers\nword_index = tokenizer.word_index\nprint('Found %s unique tokens.' % len(word_index))\n\n# ensure that all sequences in a list have the same length.\ndata = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)\n\nlabels = to_categorical(np.asarray(labels))\nprint('Shape of data tensor:', data.shape) # (len(sequences), maxlen)\nprint('Shape of label tensor:', labels.shape) # (len(sequences), len(categories))\n\n# split the data into a training set and a validation set\nindices = np.arange(data.shape[0])\nnp.random.shuffle(indices)\ndata = data[indices]\nlabels = labels[indices]\nnum_validation_samples = int(VALIDATION_SPLIT * data.shape[0])\n\nx_train = data[:-num_validation_samples]\ny_train = labels[:-num_validation_samples]\nx_val = data[-num_validation_samples:]\ny_val = labels[-num_validation_samples:]\n\nprint('Preparing embedding matrix.')\n\n# prepare embedding matrix\nnum_words = min(MAX_NUM_WORDS, len(word_index)) + 1\nembedding_matrix = np.zeros((num_words, EMBEDDING_DIM))\nfor word, i in word_index.items():\n if i > MAX_NUM_WORDS:\n continue\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n # words not found in embedding index will be all-zeros.\n embedding_matrix[i] = embedding_vector\n\n# load pre-trained word embeddings into an Embedding layer\n# note that we set trainable = False so as to keep the embeddings fixed\nembedding_layer = Embedding(num_words,\n EMBEDDING_DIM,\n embeddings_initializer=Constant(embedding_matrix),\n input_length=MAX_SEQUENCE_LENGTH,\n trainable=False)\n\nprint('Training model.')\n\n# An Embedding layer should be fed sequences of integers, i.e. a 2D input of\n# shape (samples, indices). These input sequences should be padded so that\n# they all have the same length in a batch of input data (although an Embedding\n# layer is capable of processing sequence of heterogenous length, if you don't\n# pass an explicit input_length argument to the layer).\n\n# All that the Embedding layer does is to map the integer inputs to the vectors\n# found at the corresponding index in the embedding matrix, i.e. the sequence\n# [1, 2] would be converted to [embeddings[1], embeddings[2]]. This means that\n# the output of the Embedding layer will be a 3D tensor of shape (samples,\n# sequence_length, embedding_dim).\n\n# train a 1D convnet with global maxpooling\nsequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')\nembedded_sequences = embedding_layer(sequence_input)\nx = Conv1D(128, 5, activation='relu')(embedded_sequences)\nx = MaxPooling1D(5)(x)\nx = Conv1D(128, 5, activation='relu')(x)\nx = MaxPooling1D(5)(x)\nx = Conv1D(128, 5, activation='relu')(x)\nx = GlobalMaxPooling1D()(x)\nx = Dense(128, activation='relu')(x)\npreds = Dense(len(labels_index), activation='softmax')(x)\n\nmodel = Model(sequence_input, preds)\nmodel.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['acc'])\n\nmodel.fit(x_train, y_train,\n batch_size=128,\n epochs=10,\n validation_data=(x_val, y_val))\n","sub_path":"ml-system/frameworks/keras/experiments/pretrained_word_embedding.py","file_name":"pretrained_word_embedding.py","file_ext":"py","file_size_in_byte":7138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"594208985","text":"import sys\n\n\ndef comb(k,N):\n if k == N:\n print(chosen)\n return \n for i in range(k+1,N):\n comb\n\n \n\n\nsys.stdin = open('input3.txt','r')\nN,M = map(int,input().split())\nm = [list(map(int,input().split())) for _ in range(N)]\n\nr = []\nfor y in range(N):\n for x in range(N):\n if m[y][x] != 0:\n r.append([y,x])\nn = len(r)\nused = [0]*n\nchosen = []\nprint(used)\nprint(r)\n\n","sub_path":"9월/0902/치킨 배달.py","file_name":"치킨 배달.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"43264570","text":"from typing import List\n\nfrom torch import Tensor\nfrom torch import nn\n\n__all__ = [\"_ResBlock\", \"_MelResNet\", \"_Stretch2d\", \"_UpsampleNetwork\"]\n\n\nclass _ResBlock(nn.Module):\n r\"\"\"ResNet block based on \"Deep Residual Learning for Image Recognition\"\n\n The paper link is https://arxiv.org/pdf/1512.03385.pdf.\n\n Args:\n n_freq: the number of bins in a spectrogram (default=128)\n\n Examples\n >>> resblock = _ResBlock()\n >>> input = torch.rand(10, 128, 512) # a random spectrogram\n >>> output = resblock(input) # shape: (10, 128, 512)\n \"\"\"\n\n def __init__(self, n_freq: int = 128) -> None:\n super().__init__()\n\n self.resblock_model = nn.Sequential(\n nn.Conv1d(in_channels=n_freq, out_channels=n_freq, kernel_size=1, bias=False),\n nn.BatchNorm1d(n_freq),\n nn.ReLU(inplace=True),\n nn.Conv1d(in_channels=n_freq, out_channels=n_freq, kernel_size=1, bias=False),\n nn.BatchNorm1d(n_freq)\n )\n\n def forward(self, specgram: Tensor) -> Tensor:\n r\"\"\"Pass the input through the _ResBlock layer.\n Args:\n specgram (Tensor): the input sequence to the _ResBlock layer (n_batch, n_freq, n_time).\n\n Return:\n Tensor shape: (n_batch, n_freq, n_time)\n \"\"\"\n\n return self.resblock_model(specgram) + specgram\n\n\nclass _MelResNet(nn.Module):\n r\"\"\"MelResNet layer uses a stack of ResBlocks on spectrogram.\n\n Args:\n n_res_block: the number of ResBlock in stack (default=10)\n n_freq: the number of bins in a spectrogram (default=128)\n n_hidden: the number of hidden dimensions (default=128)\n n_output: the number of output dimensions (default=128)\n kernel_size: the number of kernel size in the first Conv1d layer (default=5)\n\n Examples\n >>> melresnet = _MelResNet()\n >>> input = torch.rand(10, 128, 512) # a random spectrogram\n >>> output = melresnet(input) # shape: (10, 128, 508)\n \"\"\"\n\n def __init__(self,\n n_res_block: int = 10,\n n_freq: int = 128,\n n_hidden: int = 128,\n n_output: int = 128,\n kernel_size: int = 5) -> None:\n super().__init__()\n\n ResBlocks = [_ResBlock(n_hidden) for _ in range(n_res_block)]\n\n self.melresnet_model = nn.Sequential(\n nn.Conv1d(in_channels=n_freq, out_channels=n_hidden, kernel_size=kernel_size, bias=False),\n nn.BatchNorm1d(n_hidden),\n nn.ReLU(inplace=True),\n *ResBlocks,\n nn.Conv1d(in_channels=n_hidden, out_channels=n_output, kernel_size=1)\n )\n\n def forward(self, specgram: Tensor) -> Tensor:\n r\"\"\"Pass the input through the _MelResNet layer.\n Args:\n specgram (Tensor): the input sequence to the _MelResNet layer (n_batch, n_freq, n_time).\n\n Return:\n Tensor shape: (n_batch, n_output, n_time - kernel_size + 1)\n \"\"\"\n\n return self.melresnet_model(specgram)\n\n\nclass _Stretch2d(nn.Module):\n r\"\"\"Upscale the frequency and time dimensions of a spectrogram.\n\n Args:\n time_scale: the scale factor in time dimension\n freq_scale: the scale factor in frequency dimension\n\n Examples\n >>> stretch2d = _Stretch2d(time_scale=10, freq_scale=5)\n\n >>> input = torch.rand(10, 100, 512) # a random spectrogram\n >>> output = stretch2d(input) # shape: (10, 500, 5120)\n \"\"\"\n\n def __init__(self,\n time_scale: int,\n freq_scale: int) -> None:\n super().__init__()\n\n self.freq_scale = freq_scale\n self.time_scale = time_scale\n\n def forward(self, specgram: Tensor) -> Tensor:\n r\"\"\"Pass the input through the _Stretch2d layer.\n\n Args:\n specgram (Tensor): the input sequence to the _Stretch2d layer (..., n_freq, n_time).\n\n Return:\n Tensor shape: (..., n_freq * freq_scale, n_time * time_scale)\n \"\"\"\n\n return specgram.repeat_interleave(self.freq_scale, -2).repeat_interleave(self.time_scale, -1)\n\n\nclass _UpsampleNetwork(nn.Module):\n r\"\"\"Upscale the dimensions of a spectrogram.\n\n Args:\n upsample_scales: the list of upsample scales\n n_res_block: the number of ResBlock in stack (default=10)\n n_freq: the number of bins in a spectrogram (default=128)\n n_hidden: the number of hidden dimensions (default=128)\n n_output: the number of output dimensions (default=128)\n kernel_size: the number of kernel size in the first Conv1d layer (default=5)\n\n Examples\n >>> upsamplenetwork = _UpsampleNetwork(upsample_scales=[4, 4, 16])\n >>> input = torch.rand(10, 128, 10) # a random spectrogram\n >>> output = upsamplenetwork(input) # shape: (10, 1536, 128), (10, 1536, 128)\n \"\"\"\n\n def __init__(self,\n upsample_scales: List[int],\n n_res_block: int = 10,\n n_freq: int = 128,\n n_hidden: int = 128,\n n_output: int = 128,\n kernel_size: int = 5) -> None:\n super().__init__()\n\n total_scale = 1\n for upsample_scale in upsample_scales:\n total_scale *= upsample_scale\n\n self.indent = (kernel_size - 1) // 2 * total_scale\n self.resnet = _MelResNet(n_res_block, n_freq, n_hidden, n_output, kernel_size)\n self.resnet_stretch = _Stretch2d(total_scale, 1)\n\n up_layers = []\n for scale in upsample_scales:\n stretch = _Stretch2d(scale, 1)\n conv = nn.Conv2d(in_channels=1,\n out_channels=1,\n kernel_size=(1, scale * 2 + 1),\n padding=(0, scale),\n bias=False)\n conv.weight.data.fill_(1. / (scale * 2 + 1))\n up_layers.append(stretch)\n up_layers.append(conv)\n self.upsample_layers = nn.Sequential(*up_layers)\n\n def forward(self, specgram: Tensor) -> Tensor:\n r\"\"\"Pass the input through the _UpsampleNetwork layer.\n\n Args:\n specgram (Tensor): the input sequence to the _UpsampleNetwork layer (n_batch, n_freq, n_time)\n\n Return:\n Tensor shape: (n_batch, n_freq, (n_time - kernel_size + 1) * total_scale),\n (n_batch, n_output, (n_time - kernel_size + 1) * total_scale)\n where total_scale is the product of all elements in upsample_scales.\n \"\"\"\n\n resnet_output = self.resnet(specgram).unsqueeze(1)\n resnet_output = self.resnet_stretch(resnet_output)\n resnet_output = resnet_output.squeeze(1)\n\n specgram = specgram.unsqueeze(1)\n upsampling_output = self.upsample_layers(specgram)\n upsampling_output = upsampling_output.squeeze(1)[:, :, self.indent:-self.indent]\n\n return upsampling_output, resnet_output\n","sub_path":"torchaudio/models/_wavernn.py","file_name":"_wavernn.py","file_ext":"py","file_size_in_byte":6929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"348683747","text":"# -*- coding: utf-8 -*-\n'''\ncreate at 2018.01.07\n@author scutpaul\n'''\n\n__author__ = 'scutpaul'\n\nclass Student(object):\n#object为继承的类\n def __init__(self,name,score):\n self.name = name\n self.score = score\n# __init__的第一个参数必须是self,表示示例本身,可以把各种属性绑定到self,\n# 创建实例时必须传入与__init__的形参(self除外)相匹配的参数\n def print_score(self):\n print('%s : %s'%(self.name,self.score))\n# 第一个参数为self,调用函数的话使用过实例调用的\n def get_grade(self):\n if self.score >= 90:\n return 'A'\n elif self.score >60:\n return 'C'\n else:\n return 'F'\n \n \nif __name__ == '__main__':\n chen = Student('chen',95)\n chen.print_score()\n print(chen)\n print(chen.get_grade())\n bart = Student('Bart Simpson', 59)\n bart.age = 18\n print(bart.age)\n \n'''\n和静态语言不同,Python允许对实例变量绑定任何数据,\n对于两个实例变量,虽然它们都是同一个类的不同实例,但拥有的变量名称都可能不同\n'''\n\n","sub_path":"python/8_class_instance.py","file_name":"8_class_instance.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"12754549","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as pyp\nfrom scipy.integrate import odeint\n\nfrom BestLaunch import bestLaunch\n\nC = 0.75 # https://www.grc.nasa.gov/WWW/K-12/rocket/shaped.html\nrho = 1.225 \ng = 9.81\n# Rho and g technically vary with altitude; too much difficult math to account for in this project\nA = .001\nm = 2\n# set for a model rocket. a guesstimate\n\ndef path(y0=0, v0=200):\n \n weatherData = \"CompSciWeatherData.xlsx\"\n T = pd.read_excel(weatherData)\n\n data = bestLaunch()\n data = data.reset_index(inplace = False)\n bestLoc = data['Facility Name'].iloc[0]\n\n for i in range(0,12):\n if T.loc[i,'Facility'] == bestLoc:\n windspeed = T.loc[i, 'Max Wind Speed Avg (mph)']\n \n x0 = 0\n #y0 already defined\n vx0 = windspeed\n vy0 = v0\n \n xvals = np.array([x0,vx0])\n yvals = np.array([y0,vy0])\n \n tf = int(((vy0)+np.sqrt((vy0)**2-2*g*y0))/g) # made into int so can be used as index \n t = np.linspace(0, tf, 100)\n \n solvex = odeint(modelx, xvals, t)\n solvey = odeint(modely, yvals, t)\n \n xgraph = solvex[:,0]\n ygraph = solvey[:,0]\n \n #print(xgraph)\n \n # trajectory\n pyp.plot(xgraph, ygraph)\n pyp.title('Path of Rocket')\n pyp.xlabel('Distance from Launch Point, relative to the ground (m)')\n pyp.ylabel('Altitude of Rocket (m)')\n pyp.ylim(0,None)\n pyp.show()\n \n \n pyp.plot(t, ygraph)\n pyp.title('Altitude vs Time')\n pyp.xlabel('Time (s)')\n pyp.ylabel('Altitude of Rocket (m)')\n pyp.ylim(0,None)\n pyp.show()\n \n return[]\n\n# Used as a guide: https://apmonitor.com/pdc/index.php/Main/SolveDifferentialEquations\ndef modelx(xvals, t):\n \n xp = xvals[1]\n xpp = -C*rho*A*(xp)**2/(2*m)\n \n return np.array([xp, xpp])\n\ndef modely(yvals, t):\n \n yp = yvals[1]\n ypp = -g - C*rho*A*(yp)**2/(2*m)\n \n return np.array([yp, ypp])","sub_path":"Final/rocketModel.py","file_name":"rocketModel.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"360678879","text":"from flask import Flask, send_from_directory, render_template\nfrom contextlib import closing\nimport boto3\nimport os\n\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/vai/')\ndef hello_world(text):\n client = boto3.client(\n 'polly',\n region_name=os.environ.get('AWS_REGION'),\n aws_access_key_id=os.environ.get('AWS_KEY_ID'),\n aws_secret_access_key=os.environ.get('AWS_SECRET_KEY')\n )\n\n response = client.synthesize_speech(\n OutputFormat='mp3',\n Text=text,\n VoiceId='Ricardo'\n )\n\n if \"AudioStream\" in response:\n with closing(response[\"AudioStream\"]) as stream:\n data = stream.read()\n fo = open(\"result.mp3\", \"wb\")\n fo.write(data)\n fo.close()\n\n return send_from_directory(directory='.', filename='result.mp3')\n\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host=\"0.0.0.0\", port=port)\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"67850690","text":"import speech_recognition as sr\r\nimport os\r\nimport playsound\r\nimport random\r\nimport pyowm\r\nimport datetime\r\nimport webbrowser\r\nimport requests\r\nfrom time import ctime\r\n\r\nfrom gtts import gTTS\r\n\r\nr = sr.Recognizer()\r\n\r\ndef voice_audio(ask=False):\r\n with sr.Microphone() as source:\r\n if ask:\r\n response(ask)\r\n audio = r.listen(source)\r\n voice_text = ''\r\n try:\r\n voice_text = r.recognize_google(audio)\r\n print(voice_text)\r\n except:\r\n response('Sorry could not recognize your voice')\r\n\r\n return voice_text\r\n\r\ndef response(audio_string):\r\n text_to_speech = gTTS(text=audio_string, lang='en')\r\n r = random.randint(1, 1000000000000000000)\r\n audio_file = 'audio-' + str(r) + '.mp3'\r\n text_to_speech.save(audio_file)\r\n playsound.playsound(audio_file)\r\n print(audio_string)\r\n os.remove(audio_file)\r\n\r\n\r\ndef weather_temperature():\r\n\r\n weather_key = pyowm.OWM('194d64227dc06cce1c198b75f3e22d9c')\r\n observation = weather_key.weather_at_place('Boston, US')\r\n weather = observation.get_weather()\r\n temperature = weather.get_temperature('fahrenheit')['temp']\r\n return temperature\r\n\r\ndef alexa (voice_text):\r\n\r\n if 'your name' in voice_text:\r\n response('My name is Alexa')\r\n\r\n if 'temperature' in voice_text or 'weather' in voice_text:\r\n response('The weather is ' + str(weather_temperature()))\r\n\r\n if 'what time is it' in voice_text or 'what time' in voice_text or \"what's the time\" in voice_text:\r\n response(just_time())\r\n\r\n if 'date' in voice_text:\r\n response(just_date())\r\n\r\n if 'open browser' in voice_text:\r\n search = voice_audio('What website would you like to visit?')\r\n url = 'https://' + search\r\n webbrowser.get().open(url)\r\n response('I found this on the web')\r\n\r\n if 'search' in voice_text:\r\n search = voice_audio('What do you want to search for?')\r\n url = 'https://google.com/search?q=' + search\r\n webbrowser.get().open(url)\r\n response('I found this on the web')\r\n\r\n if 'tell me a joke' in voice_text:\r\n response(joke())\r\n\r\n if 'exit' in voice_text:\r\n exit()\r\n\r\ndef just_time ():\r\n x = datetime.datetime.now()\r\n time_now = (x.strftime(\"%I:%M:%p\"))\r\n return time_now\r\n\r\ndef just_date ():\r\n x = datetime.datetime.now()\r\n today_date = (x.strftime(\"%a, %b %d, %Y\"))\r\n return today_date\r\n\r\ndef joke ():\r\n r = requests.get('https://geek-jokes.sameerkumar.website/api?format=json')\r\n return r.text\r\n\r\nresponse('what can I help you with?')\r\n\r\n\r\nwhile True:\r\n voice_text = voice_audio()\r\n alexa(voice_text)\r\n","sub_path":"Voice Assistant.py","file_name":"Voice Assistant.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"126142365","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.urls import reverse\n\nfrom datetime import datetime\nfrom .models import Bi, TimeTable, DimenTable, MeasureTable, Colselect, Rowselect, Measureselect, FileSelect, Graphselect, Subgraph, Graphmeasure, Selectsub\nfrom .model_2 import Model\nfrom .create import Create\nimport csv, os, cgi\nfrom os.path import basename\nimport json\nimport itertools \nimport pandas as pd\n\nm = Model()\ndef show(request):\n #Bi.objects.all().delete()\n FileSelect.objects.all().delete()\n Colselect.objects.all().delete()\n Rowselect.objects.all().delete()\n Measureselect.objects.all().delete()\n Graphselect.objects.all().delete()\n Selectsub.objects.all().delete()\n Graphmeasure.objects.all().delete()\n Subgraph.objects.all().delete()\n with open('bi/static/css/data.json', 'w') as f:\n f.write(\"\")\n with open('bi/static/css/table.html', 'w') as f:\n f.write(\"\")\n return render(request, 'bi/home.html', {'all_list':Bi.objects.order_by('file_name')})\n\ndef graph(request):\n #Bi.objects.all().delete()\n return render(request, 'bi/graph.html', {'all_time':TimeTable.objects.all(),\n 'all_dim':DimenTable.objects.all(),\n 'all_measure':MeasureTable.objects.all(), \n 'all_select':Graphselect.objects.all(),\n 'sub_select':Selectsub.objects.all(),\n 'measure':Graphmeasure.objects.all(),\n 'all_sub':Subgraph.objects.all()})\n\ndef get_subgraph(request):\n Subgraph.objects.all().delete()\n data = request.GET.get('data')\n for i in FileSelect.objects.all().values_list('file_name', flat=True):\n m.select_file(i)\n if data in m.time:\n data = data + ':Year'\n array = [] \n for i in m.get_data(data):\n record = Subgraph(sub_graph = i)\n record.save()\n array.append(i)\n return HttpResponse(json.dumps(array))\n\ndef subgraph_select(request):\n Selectsub.objects.all().delete()\n data = request.GET.getlist('data[]')\n for i in data:\n record = Selectsub(sub_select = i)\n record.save()\n return HttpResponseRedirect(reverse('bi:graph'))\ndef graph_select(request):\n data = request.GET.getlist('data[]')\n if data[0] != \" \":\n Graphselect.objects.all().delete()\n record = Graphselect(graph = data[0])\n record.save()\n if data[1] != \" \":\n Graphmeasure.objects.all().delete()\n record = Graphmeasure(measure_graph = data[1])\n record.save()\n showGraph()\n return HttpResponseRedirect(reverse('bi:graph'))\n\ndef showGraph():\n data = []\n df = pd.DataFrame({})\n dimen = \"\"\n for i in FileSelect.objects.all().values_list('file_name', flat=True):\n m.select_file(i)\n for i in Graphselect.objects.all().values_list('graph', flat=True):\n dimen = i\n if i in m.time:\n col = [i+':Year']\n else:\n col = [i]\n for i in Graphmeasure.objects.all().values_list('measure_graph', flat=True):\n row = ['Measurement']\n r = [[i]]\n c = [[]]\n for i in Selectsub.objects.all().values_list('sub_select', flat=True):\n if dimen in m.time:\n i = int(i)\n c[0].append(i)\n df['label'] = c[0]\n val = []\n if len(row)>0:\n m.keepC.append(m.df)\n for a in range(len(col)):\n m.keepC.append(None)\n for a in range(len(row)):\n m.keepR.append(None)\n sendC = list(itertools.product(*c))\n sendR = list(itertools.product(*r))\n for j in range(len(sendC)):\n m.get_col(col, sendC[j], m.keepC[0])\n for i in range(len(sendR)):\n val.append(m.get_row(row, sendR[i], m.keepR[0], 0))\n df['value'] = val\n out = df.to_json(orient = 'records')\n with open('bi/static/css/data.json', 'w') as f:\n f.write(out)\n \n\ndef delete_subgraph(request):\n record = Selectsub.objects.get(pk = int(request.GET.get('data')))\n record.delete()\n return HttpResponseRedirect(reverse('bi:graph'))\n\ndef import_file(request):\n if request.method == 'POST':\n try:\n if(str(request.FILES['file']).endswith('.xlsx')):\n filename = str(request.FILES['file'])\n filename = os.path.splitext(filename)[0]\n handle_uploaded_file(request.FILES['file'], filename)\n starschema(filename)\n record = Bi(file_name = filename)\n record.save()\n else:\n raise\n except:\n return render(request, 'bi/import.html',{'error_message':'Please Select xlsx file!!!'})\n return HttpResponseRedirect(reverse('bi:show'))\n\ndef pivot_page(request):\n return render(request, 'bi/pivot.html',\n {'all_time':TimeTable.objects.all(),\n 'all_dim':DimenTable.objects.all(),\n 'all_measure':MeasureTable.objects.all(), \n 'all_col':Colselect.objects.all(),\n 'measure':Measureselect.objects.all(),\n 'all_row':Rowselect.objects.all()})\n\ndef pivot_table(request, filename):\n '''FileSelect.objects.all().delete()\n Colselect.objects.all().delete()\n Rowselect.objects.all().delete()\n Measureselect.objects.all().delete()'''\n m.select_file(filename)\n record = FileSelect(file_name = filename)\n record.save()\n TimeTable.objects.all().delete()\n DimenTable.objects.all().delete()\n MeasureTable.objects.all().delete()\n for i in m.time:\n record = TimeTable(time = i)\n record.save()\n for i in m.dim:\n record = DimenTable(dimension = i)\n record.save()\n for i in m.measure:\n record = MeasureTable(measure = i)\n record.save()\n return HttpResponseRedirect(reverse('bi:pivot_page'))\n\ndef col_select(request):\n data = request.GET.getlist('data[]')\n for i in data:\n record = Colselect(column = i)\n record.save()\n return HttpResponseRedirect(reverse('bi:pivot_page'))\ndef row_select(request):\n data = request.GET.getlist('data[]')\n for i in data:\n record = Rowselect(row = i)\n record.save()\n return HttpResponseRedirect(reverse('bi:pivot_page'))\ndef measure_select(request):\n data = request.GET.get('data')\n if data != \" \":\n Measureselect.objects.all().delete()\n record = Measureselect(measure = data)\n record.save()\n test()\n return HttpResponseRedirect(reverse('bi:pivot_page'))\n\ndef test():\n col = []\n row = []\n rows = []\n c = []\n r = []\n value = []\n measure = \"\"\n for i in FileSelect.objects.all().values_list('file_name', flat=True):\n filename = i\n m.select_file(filename)\n for i in Colselect.objects.all().values_list('column', flat=True):\n if i in m.time:\n i = i+\":Year\"\n col.append(i)\n for i in Rowselect.objects.all().values_list('row', flat=True):\n if i in m.time:\n i = i+\":Year\"\n row.append(i)\n rows.append(i)\n for i in Measureselect.objects.all().values_list('measure', flat=True):\n measure = i\n if measure != \"\":\n row.append(\"Measurement\")\n value.append(measure)\n for i in range(len(col)):\n c.append([])\n for j in m.get_data(col[i]):\n c[i].append(j)\n for i in range(len(row)):\n r.append([])\n if row[i] == 'Measurement':\n r[i].append(measure)\n else:\n for j in m.get_data(row[i]):\n r[i].append(j)\n #print(row)\n df = pd.DataFrame({})\n get_col = []\n get_row = []\n get_value = []\n columns = []\n if len(value) > 0:\n m.keepC.append(m.df)\n for a in range(len(col)):\n get_col.append([])\n m.keepC.append(None)\n for a in range(len(row)):\n get_row.append([])\n m.keepR.append(None)\n sendC = list(itertools.product(*c))\n sendR = list(itertools.product(*r))\n for j in range(len(sendC)):\n m.get_col(col, sendC[j], m.keepC[0])\n for i in range(len(sendR)):\n val = m.get_row(row, sendR[i], m.keepR[0], 0)\n for k in range(len(col)):\n get_col[k].append(sendC[j][k])\n for l in range(len(row)):\n get_row[l].append(sendR[i][l])\n get_value.append(val)\n columns.append(\"Value\")\n for i in range(len(col)):\n df[col[i]] = get_col[i]\n for i in range(len(row)):\n df[row[i]] = get_row[i]\n df['Value'] = get_value\n df['Column'] = columns\n if len(col) > 0:\n colu = col\n else:\n colu = ['Column']\n if len(row) > 1:\n index = rows\n else:\n index = ['Measurement']\n table = pd.pivot_table(df, values = 'Value',\n index = index, columns = colu)\n with open(\"bi/static/css/table.html\", 'w') as fo:\n fo.write(table.to_html())\n \n\ndef delete_col(request):\n record = Colselect.objects.get(pk = int(request.GET.get('data')))\n record.delete()\n return HttpResponseRedirect(reverse('bi:pivot_page'))\ndef delete_row(request):\n record = Rowselect.objects.get(pk = int(request.GET.get('data')))\n record.delete()\n return HttpResponseRedirect(reverse('bi:pivot_page'))\n\ndef handle_uploaded_file(file, filename):\n if not os.path.exists('upload/'):\n os.mkdir('upload/')\n \n with open('upload/' + filename+\".xlsx\", 'wb+') as destination:\n for chunk in file.chunks():\n destination.write(chunk)\n\ndef starschema(filename):\n c = Create()\n c.import_file('./upload/'+filename+\".xlsx\")\n c.convert_data()\n c.manage_file()\n c.set_file()\n\n","sub_path":"bi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"183727705","text":"import torch.nn as nn\nimport torch\nimport onmt\nfrom onmt.encoders.encoder import EncoderBase\nfrom onmt.encoders.transformer import TransformerEncoder, TransformerEncoderLayer\n# from onmt.utils.misc import aeq\nfrom onmt.modules.position_ffn import PositionwiseFeedForward\n\n\nclass SimpleContextTransformerEncoder(EncoderBase):\n\n\n def __init__(self, num_layers, d_model, heads, d_ff, dropout, embeddings, \n selected_ctx=0, fields=None):\n super(SimpleContextTransformerEncoder, self).__init__()\n self.selected_ctx = selected_ctx\n self.fields = fields\n\n\n self.num_layers = num_layers\n self.embeddings = embeddings\n self.layer_norm_shared = onmt.modules.LayerNorm(d_model)\n self.layer_norm_ctx = onmt.modules.LayerNorm(d_model)\n self.layer_norm_src_final = onmt.modules.LayerNorm(d_model)\n self.layer_norm_ctx_final = onmt.modules.LayerNorm(d_model)\n\n self.shared_layers = nn.ModuleList(\n [TransformerEncoderLayer(d_model, heads, d_ff, dropout)\n for _ in range(num_layers - 1)])\n\n self.extra_ctx_layer = TransformerEncoderLayer(\n d_model, heads, d_ff, dropout)\n self.ctx_src_self_attn = onmt.modules.MultiHeadedAttention(\n heads, d_model, dropout=dropout)\n self.ctx_src_layer_norm = onmt.modules.LayerNorm(d_model)\n\n self.src_self_attn = onmt.modules.MultiHeadedAttention(\n heads, d_model, dropout=dropout)\n self.src_layer_norm = onmt.modules.LayerNorm(d_model)\n\n # TODO dim\n self.gate = nn.Linear(d_model * 2, 1)\n self.gate_sigmoid = nn.Sigmoid()\n\n self.final_feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)\n self.final_layer_norm = onmt.modules.LayerNorm(d_model)\n\n def partial_encode(self, input, input_lengths):\n emb = self.embeddings(input)\n out = emb.transpose(0, 1).contiguous()\n words = input[:, :, 0].transpose(0, 1)\n w_batch, w_len = words.size()\n padding_idx = self.embeddings.word_padding_idx\n\n mask = words.data.eq(padding_idx).unsqueeze(1).expand(w_batch, w_len, w_len)\n for i in range(len(self.shared_layers)):\n out = self.shared_layers[i](out, mask)\n return out, mask, emb\n\n def forward(self, src, src_lengths, ctx_0, ctx_0_lengths, ctx_1, ctx_1_lengths):\n # TODO refactor/clean up\n print(ctx_0); quit()\n # run src through n-1 layers of shared stack\n out_src, src_mask, src_emb = self.partial_encode(src, src_lengths)\n out_src = self.layer_norm_shared(out_src)\n\n # run ctx through n-1 layers of shared stack\n if self.selected_ctx == 0:\n out_ctx, ctx_mask, _ = self.partial_encode(ctx_0, ctx_0_lengths)\n elif self.selected_ctx == 1:\n out_ctx, ctx_mask, _ = self.partial_encode(ctx_1, ctx_1_lengths)\n out_ctx = self.layer_norm_shared(out_ctx)\n\n # finish off source: final self attn, norm + add\n final_src, _ = self.src_self_attn(out_src, out_src, out_src, mask=src_mask)\n final_src = self.layer_norm_src_final(final_src) + out_src\n\n # finish off ctx: extra layer, use src to attend over ctx\n out_ctx = self.extra_ctx_layer(out_ctx, ctx_mask)\n\n words = ctx_0 if self.selected_ctx == 0 else ctx_1\n words = words[:, :, 0].transpose(0, 1)\n w_batch, w_len = words.size()\n [_, src_len, _] = src_mask.size()\n padding_idx = self.embeddings.word_padding_idx\n mask = words.data.eq(padding_idx).unsqueeze(1).expand(w_batch, src_len, w_len)\n final_ctx, _ = self.ctx_src_self_attn(out_ctx, out_ctx, final_src, mask=mask) # TODO -- check masking\n final_ctx = self.layer_norm_ctx_final(final_ctx) + out_src\n\n # gate the ctx + src stuff\n g = self.gate_sigmoid(self.gate(torch.cat((final_ctx, final_src), 2)))\n gated_encoding = g * final_ctx + (1 - g) * final_src\n\n # final feedfowrward, layer norm + add\n output = self.final_feed_forward(gated_encoding)\n output = self.final_layer_norm(output) + gated_encoding\n\n\n # attn_input = torch.cat((out_ctx, out_src), 1)\n # attn_mask_words = torch.cat(\n # (src[:, :, 0].transpose(0, 1), ctx_1[:, :, 0].transpose(0, 1)),\n # 1)\n # w_batch, w_len = attn_mask_words.size()\n # attn_mask = attn_mask_words.data.eq(self.embeddings.word_padding_idx).unsqueeze(1).expand(\n # w_batch, w_len, w_len)\n # final_ctx, _ = self.ctx_src_self_attn(attn_input, attn_input, attn_input, mask=attn_mask)\n # final_ctx = self.ctx_src_layer_norm(final_ctx) + attn_input\n \n # final_ctx = final_ctx[:, ctx_len:, :] # only take src-enriched vecs\n\n # gated sum\n\n return src_emb, output.transpose(0, 1).contiguous()\n \n \n \n \n \n \n","sub_path":"onmt/encoders/simple_context_transformer.py","file_name":"simple_context_transformer.py","file_ext":"py","file_size_in_byte":4895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"350556192","text":"import os\n\nimport torch\n\nfrom src.connectx.constraints import ConstraintType\nfrom src.connectx.environment import ConnectXGymEnv, convert_state_to_image\nfrom src.connectx.evaluate import record_matches, show_recordings\nfrom src.connectx.opponents import interactive_player\nfrom src.connectx.policy import CNNPolicy\n\n#############################\n# Play with a trained model #\n#############################\n\n\ndef main():\n # Use the interactive player to play against the computer, otherwise 'random' or 'negamax' or declare your own\n # function as in src.connectx.opponents.py\n opponent = interactive_player\n play_as_first_player = str(input('Do you want your trained agent to play as first (y/n)?')).lower() == 'y'\n\n num_matches = int(input('How many match do you want to play?'))\n\n # When the interactive player play as the 1st player some steps must actions must be skipped at the beginning\n if not play_as_first_player and opponent is interactive_player:\n print('Creation of environment ... Press any button')\n env = ConnectXGymEnv(opponent, play_as_first_player)\n\n if not play_as_first_player and opponent is interactive_player:\n print('Initialize policy ... Press any button')\n\n # Policy\n init_screen = convert_state_to_image(env.reset())\n screen_shape = (init_screen.shape[1], init_screen.shape[2], init_screen.shape[3])\n agent = CNNPolicy(env.action_space.n,\n screen_shape)\n\n constraint_type = int(input('Insert number of the agent you want to play:\\n'\n f'0 - DQN\\n'\n f'{ConstraintType.LOGIC_PURE.value} - {ConstraintType.LOGIC_PURE.name}\\n'\n f'{ConstraintType.LOGIC_TRAIN.value} - {ConstraintType.LOGIC_TRAIN.name}\\n'\n f'{ConstraintType.SBR.value} - {ConstraintType.SBR.name}\\n'\n f'{ConstraintType.SPE.value} - {ConstraintType.SPE.name}\\n'\n f'{ConstraintType.CDQN.value} - {ConstraintType.CDQN.name}\\n'\n f'6 - curriculum\\n'))\n\n device = 'cpu'\n base_path = os.path.abspath(os.path.dirname(__file__))\n if constraint_type == 6:\n weight_path = base_path + '/models/curriculum.pt'\n c_type = ConstraintType(5)\n else:\n weight_path = base_path + '/models/' + (\n ConstraintType(constraint_type).name if 6 > constraint_type > 0 else 'dqn').lower() + '.pt'\n c_type = ConstraintType(constraint_type) if constraint_type > 0 else None\n\n agent.load_state_dict(torch.load(weight_path, map_location=torch.device(device)))\n\n config = {'columns': 7,\n 'rows': 6,\n 'inarow': 4,\n 'c_type': c_type}\n\n state_recording, action_recording, results_recording = record_matches(env,\n agent,\n config,\n play_as_first_player=play_as_first_player,\n num_matches=num_matches,\n render_env=True,\n keep_player_colour=True)\n\n for i, (sr, ar) in enumerate(zip(state_recording, action_recording)):\n print(f'Play recording {i + 1} (ended as {results_recording[-1]})')\n show_recordings(sr, ar)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/test/play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"413474674","text":"from django.urls import path, include\nfrom communities.views.communities.views import Communities, TrendingCommunities, CommunityNameCheck, \\\n FavoriteCommunities, SearchCommunities, JoinedCommunities, AdministratedCommunities, ModeratedCommunities, \\\n SearchJoinedCommunities\nfrom communities.views.community.administrators.views import CommunityAdministratorItem, \\\n CommunityAdministrators, SearchCommunityAdministrators\nfrom communities.views.community.banned_users.views import BanUser, UnbanUser, CommunityBannedUsers, \\\n SearchCommunityBannedUsers\nfrom communities.views.community.members.views import CommunityMembers, JoinCommunity, \\\n LeaveCommunity, InviteCommunityMember, SearchCommunityMembers, UninviteCommunityMember\nfrom communities.views.community.moderators.views import CommunityModeratorItem, CommunityModerators, \\\n SearchCommunityModerators\nfrom communities.views.community.posts.views import CommunityPosts, ClosedCommunityPosts\nfrom communities.views.community.views import CommunityItem, CommunityAvatar, CommunityCover, FavoriteCommunity\nfrom moderation.views.moderated_objects.views import CommunityModeratedObjects\nfrom moderation.views.report.views import ReportCommunity\n\n\n\ncommunity_administrator_patterns = [\n path('', CommunityAdministratorItem.as_view(), name='community-administrator'),\n]\n\ncommunity_administrators_patterns = [\n path('', CommunityAdministrators.as_view(), name='community-administrators'),\n path('search/', SearchCommunityAdministrators.as_view(), name='search-community-administrators'),\n path('/', include(community_administrator_patterns)),\n]\n\ncommunity_moderator_patterns = [\n path('', CommunityModeratorItem.as_view(), name='community-moderator'),\n]\n\ncommunity_moderators_patterns = [\n path('', CommunityModerators.as_view(), name='community-moderators'),\n path('search/', SearchCommunityModerators.as_view(), name='search-community-moderators'),\n path('/', include(community_moderator_patterns)),\n]\n\ncommunity_members_patterns = [\n path('', CommunityMembers.as_view(), name='community-members'),\n path('search/', SearchCommunityMembers.as_view(), name='search-community-members'),\n path('join/', JoinCommunity.as_view(), name='community-join'),\n path('leave/', LeaveCommunity.as_view(), name='community-leave'),\n path('invite/', InviteCommunityMember.as_view(), name='community-invite'),\n path('uninvite/', UninviteCommunityMember.as_view(), name='community-uninvite'),\n]\n\ncommunity_posts_patterns = [\n path('', CommunityPosts.as_view(), name='community-posts'),\n path('closed/', ClosedCommunityPosts.as_view(), name='closed-community-posts'),\n]\n\ncommunity_banned_users_patterns = [\n path('', CommunityBannedUsers.as_view(), name='community-banned-users'),\n path('search/', SearchCommunityBannedUsers.as_view(), name='search-community-banned-users'),\n path('ban/', BanUser.as_view(), name='community-ban-user'),\n path('unban/', UnbanUser.as_view(), name='community-unban-user'),\n]\n\ncommunity_moderated_objects_patterns = [\n path('', CommunityModeratedObjects.as_view(), name='community-moderated-objects')\n]\n\ncommunity_patterns = [\n path('', CommunityItem.as_view(), name='community'),\n path('avatar/', CommunityAvatar.as_view(), name='community-avatar'),\n path('cover/', CommunityCover.as_view(), name='community-cover'),\n path('favorite/', FavoriteCommunity.as_view(), name='favorite-community'),\n path('members/', include(community_members_patterns)),\n path('posts/', include(community_posts_patterns)),\n path('banned-users/', include(community_banned_users_patterns)),\n path('administrators/', include(community_administrators_patterns)),\n path('moderators/', include(community_moderators_patterns)),\n path('moderated-objects/', include(community_moderated_objects_patterns)),\n path('report/', ReportCommunity.as_view(), name='report-community'),\n]\n\ncommunities_patterns = [\n path('', Communities.as_view(), name='communities'),\n path('trending/', TrendingCommunities.as_view(), name='trending-communities'),\n path('joined/', JoinedCommunities.as_view(), name='joined-communities'),\n path('joined/search/', SearchJoinedCommunities.as_view(), name='search-joined-communities'),\n path('favorites/', FavoriteCommunities.as_view(), name='favorite-communities'),\n path('administrated/', AdministratedCommunities.as_view(), name='administrated-communities'),\n path('moderated/', ModeratedCommunities.as_view(), name='moderated-communities'),\n path('name-check/', CommunityNameCheck.as_view(), name='community-name-check'),\n path('search/', SearchCommunities.as_view(), name='search-communities'),\n path('/', include(community_patterns)),\n]\n","sub_path":"communities/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"415166017","text":"''' A certain grade of steel is graded according to the following\nconditions:\n(i) Hardness must be greater than 50\n(ii) Carbon content must be less than 0.7\n(iii) Tensile strength must be greater than 5600\nThe grades are as follows:\nGrade is 10 if all three conditions are met\nGrade is 9 if conditions (i) and (ii) are met\nGrade is 8 if conditions (ii) and (iii) are met\nGrade is 7 if conditions (i) and (iii) are met\nGrade is 6 if only one condition is met\nGrade is 5 if none of the conditions are met\nWrite a program, which will require the user to give values of\nhardness, carbon content and tensile strength of the steel\nunder consideration and output the grade of the steel. '''\n\nhardness = int(input(\" Enter Hardness \"))\ncarbon_content = float(input(\" Enter Carbon Content \"))\ntensile_str = int(input(\" Enter Tensile Strength \"))\nif hardness>50 and carbon_content<0.7 and tensile_str>5600:\n\tprint(\" grade=10 \")\nelif hardness>50 and carbon_content<0.7:\n\tprint(\" grade=9 \")\nelif carbon_content<0.7 and tensile_str>5600:\n\tprint(\" grade=8 \")\nelif hardness>50 and tensile_str>5600:\n\tprint(\" grade=7 \")\nelif hardness>50 or carbon_content<0.7 or tensile_str>5600:\n\tprint(\" grade=6 \")\nelse:\n\tprint(\" grade=5 \")","sub_path":"4SteelGrade.py","file_name":"4SteelGrade.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"294702680","text":"def matchWord(word, answer) : \n if word == answer : \n msg = '맞습니다!' \n else : \n msg = '틀렸습니다!' \n return msg \n\neng_dict = {'orange':'오렌지', 'cookie':'과자', 'mother':'어머니', 'brother':'형제', 'python':'파이썬'} \n\nfor key in eng_dict : \n string = input(eng_dict[key] + '에 맞는 영어 단어는? ') \n result = matchWord(string, key) \n print(result)","sub_path":"python/활용자료/예제/07/Q7-4.py","file_name":"Q7-4.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"30784564","text":"import os\r\nimport scipy as sp\r\nimport numpy as np\r\nimport pandas as pd\r\nimport xgboost as xgb\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.cross_validation import train_test_split, KFold\r\nfrom sklearn.metrics import (accuracy_score, roc_auc_score,\r\n confusion_matrix, recall_score, precision_score)\r\n# Constants.\r\nSEED = 42\r\nTARGET = 'open_account_flg'\r\nWD = '~/Documents/supernova/prog/dsa/choice_credit/'\r\n\r\n# Input.\r\ntrain = pd.read_csv(WD + 'data/credit_train.csv',\r\n sep=';', decimal=',', encoding='cp1251')\r\ntest = pd.read_csv(WD + 'data/credit_test.csv',\r\n sep=';', decimal=',', encoding='cp1251')\r\ntest[TARGET] = np.nan\r\nall_data = pd.concat([train, test]).drop('client_id', axis=1)\r\n\r\n# Preprocessing & Features Engineering.\r\ndef na_replace(data):\r\n data = data.copy()\r\n for col in data.columns.drop(TARGET):\r\n if any(pd.isnull(data[col])):\r\n print(col + ': ' + np.str(pd.isnull(data[col]).sum()) + ' NAs')\r\n idx = pd.isnull(data[col])\r\n if data[col].dtype != 'O':\r\n data.loc[idx, col] = data[col].mode()[0]\r\n else:\r\n data.loc[idx, col] = data[col].value_counts()[0]\r\n return data\r\n\r\nall_data = na_replace(data=all_data)\r\n\r\nall_data['nf_01'] = all_data['monthly_income'] / all_data['credit_sum']\r\nall_data['nf_02'] = all_data['credit_sum'] / (all_data['credit_count'] + 1)\r\nall_data['nf_03'] = np.int16(pd.cut(all_data['age'],\r\n [18, 25, 30, 35, 40, 45, 50, 55, 60, 65, 71],\r\n labels=False))\r\nall_data['nf_04'] = np.int16(pd.cut(all_data['credit_sum'],\r\n [2000, 5000] +\r\n np.arange(10000, 100000, 10000).tolist() +\r\n [150000, 200000],\r\n labels=False))\r\nall_data['nf_05'] = np.int16(pd.cut(all_data['monthly_income'],\r\n np.arange(0, 110000, 10000).tolist() +\r\n [150000, 200000, 1000000],\r\n labels=False))\r\nall_data['nf_06'] = all_data['gender'] + '_' + all_data['marital_status']\r\nall_data['nf_07'] = all_data['education'] + '_' + all_data['job_position']\r\nall_data['nf_08'] = all_data['nf_06'] + '_' + all_data['nf_07'] + '_' + \\\r\n all_data['overdue_credit_count'].astype('str') + '_' + \\\r\n all_data['credit_count'].astype('str')\r\nall_data['nf_09'] = all_data['credit_count'].astype('str') + '_' + \\\r\n all_data['tariff_id']\r\nall_data['nf_10'] = all_data['credit_count'].astype('str') + '_' + \\\r\n all_data['tariff_id'] + '_' + all_data['job_position']\r\nall_data['nf_11'] = all_data['credit_count'].astype('str') + '_' + \\\r\n all_data['credit_month'].astype('str')\r\nall_data['nf_12'] = np.log(all_data['credit_sum']) * \\\r\n np.log(all_data['monthly_income'] + 1)\r\nall_data['nf_13'] = np.int16(pd.cut(all_data['score_shk'],\r\n np.arange(0.0, 0.9, 0.05),\r\n labels=False))\r\nall_data['nf_14'] = np.log(all_data['credit_sum']) * \\\r\n np.log(all_data['score_shk'] + 1)\r\nall_data['nf_15'] = np.log(all_data['credit_sum']) + \\\r\n np.log(all_data['score_shk'] + 1)\r\nall_data['nf_16'] = all_data['overdue_credit_count'].astype('str') + '_' + \\\r\n all_data['credit_count'].astype('str') + '_' + \\\r\n all_data['nf_04'].astype('str') + '_' + \\\r\n all_data['nf_13'].astype('str')\r\nall_data['nf_17'] = all_data['nf_04'].astype('str') + '_' + \\\r\n all_data['nf_13'].astype('str')\r\nall_data['nf_18'] = all_data['gender'] + '_' + all_data['nf_03'].astype('str')\r\nall_data['nf_19'] = all_data['education'] + '_' + all_data['nf_03'].astype('str')\r\nall_data['nf_20'] = all_data['job_position'] + '_' + all_data['nf_03'].astype('str')\r\nall_data['nf_21'] = all_data['tariff_id'] + '_' + all_data['nf_03'].astype('str')\r\nall_data['nf_22'] = all_data['gender'] + '_' + all_data['nf_04'].astype('str')\r\nall_data['nf_23'] = all_data['education'] + '_' + all_data['nf_04'].astype('str')\r\nall_data['nf_24'] = all_data['job_position'] + '_' + all_data['nf_04'].astype('str')\r\nall_data['nf_25'] = all_data['tariff_id'] + '_' + all_data['nf_04'].astype('str')\r\nall_data['nf_26'] = all_data['gender'] + '_' + all_data['nf_05'].astype('str')\r\nall_data['nf_27'] = all_data['education'] + '_' + all_data['nf_05'].astype('str')\r\nall_data['nf_28'] = all_data['job_position'] + '_' + all_data['nf_05'].astype('str')\r\nall_data['nf_29'] = all_data['tariff_id'] + '_' + all_data['nf_05'].astype('str')\r\nall_data['nf_30'] = all_data['gender'] + '_' + all_data['credit_month'].astype('str')\r\nall_data['nf_31'] = all_data['education'] + '_' + all_data['credit_month'].astype('str')\r\nall_data['nf_32'] = all_data['job_position'] + '_' + all_data['credit_month'].astype('str')\r\nall_data['nf_33'] = all_data['tariff_id'] + '_' + all_data['credit_month'].astype('str')\r\nall_data['nf_34'] = all_data['nf_18'] + '_' + all_data['nf_22'] + '_' + \\\r\n all_data['nf_26'] + '_' + all_data['nf_30']\r\nall_data['nf_35'] = all_data['nf_19'] + '_' + all_data['nf_23'] + '_' + \\\r\n all_data['nf_27'] + '_' + all_data['nf_31']\r\nall_data['nf_36'] = all_data['nf_20'] + '_' + all_data['nf_24'] + '_' + \\\r\n all_data['nf_28'] + '_' + all_data['nf_32']\r\nall_data['nf_37'] = all_data['nf_21'] + '_' + all_data['nf_25'] + '_' + \\\r\n all_data['nf_29'] + '_' + all_data['nf_33']\r\nall_data['nf_38'] = all_data['credit_sum'] + all_data['monthly_income']\r\nall_data['nf_39'] = all_data['credit_sum'] * all_data['monthly_income']\r\nall_data['nf_40'] = all_data['credit_sum'] / (all_data['credit_count'] + 1)\r\nall_data['nf_41'] = all_data['credit_sum'] / (all_data['credit_month'] + 1)\r\nall_data['nf_42'] = all_data['monthly_income'] / all_data['nf_41']\r\nall_data['nf_42'] = all_data['monthly_income'] * all_data['score_shk']\r\n\r\ndef col_cleaning (data, col):\r\n x = data[col].copy().str.lower()\r\n x[pd.isnull(x)] = 'ND'\r\n x = x.str.replace('республика', '')\r\n x = x.str.replace('респ', '')\r\n x = x.str.replace('респ.', '')\r\n x = x.str.replace('край.', '')\r\n x = x.str.replace('край', '')\r\n x = x.str.replace('область', '')\r\n x = x.str.replace('обл', '')\r\n x = x.str.replace('обл.', '')\r\n x = x.str.replace('.', '')\r\n x = x.str.replace('-', '')\r\n x = x.str.replace('.', '')\r\n x = x.str.replace(' ', '')\r\n x[x.str.contains('моск')] = 'москва'\r\n x[x.str.contains('санк')] = 'санкт-петербург'\r\n x[x.str.contains('брянс')] = 'брянская'\r\n x[x.str.contains('забайкал')] = 'забайкальский'\r\n x[x.str.contains('краснодарск')] = 'краснодарский'\r\n x[x.str.contains('красноярск')] = 'красноярский'\r\n x[x.str.contains('приморск')] = 'приморский'\r\n x[x.str.contains('пермск')] = 'пермский'\r\n x[x.str.contains('ставропольс')] = 'ставропольский'\r\n x[x.str.contains('хабаровск')] = 'хабаровский'\r\n x[x.str.contains('ямало')] = 'ямало-ненецкий'\r\n x[x.str.contains('ненец')] = 'ямало-ненецкий'\r\n x[x.str.contains('ханты')] = 'ханты-мансийский'\r\n x[x.str.contains('югра')] = 'ханты-мансийский'\r\n x[x.str.contains('чукотс')] = 'чукотский'\r\n x[x.str.contains('приволожс')] = 'приволжский'\r\n x[x.str.contains('приволжс')] = 'приволжский'\r\n x[x.str.contains('волгоград')] = 'волгоградская'\r\n x[x.str.contains('вологодс')] = 'вологодская'\r\n x[x.str.contains('волгородс')] = 'волгоградская'\r\n x[x.str.contains('астраханс')] = 'астраханская'\r\n x[x.str.contains('архангельск')] = 'архангельск'\r\n x[x.str.contains('тюменс')] = 'тюменская'\r\n x[x.str.contains('калужс')] = 'калужская'\r\n x[x.str.contains('челябинс')] = 'челябинская'\r\n x[x.str.contains('кировс')] = 'кировская'\r\n x[x.str.contains('тульск')] = 'тульская'\r\n x[x.str.contains('оренбург')] = 'оренбургская'\r\n x[x.str.contains('свердловс')] = 'свердловская'\r\n x[x.str.contains('ростовс')] = 'ростовская'\r\n x[x.str.contains('кемеровс')] = 'кемеровская'\r\n x[x.str.contains('самарс')] = 'самарская'\r\n x[x.str.contains('ленинградс')] = 'ленинградская'\r\n x[x.str.contains('иркутская')] = 'иркутская'\r\n x[x.str.contains('нижегородска')] = 'нижегородская'\r\n x[x.str.contains('тверск')] = 'тверская'\r\n x[x.str.contains('воронежск')] = 'воронежская'\r\n x[x.str.contains('владимирская')] = 'владимирская'\r\n x[x.str.contains('курск')] = 'курская'\r\n x[x.str.contains('курганска')] = 'курганская'\r\n x[x.str.contains('мурманска')] = 'мурманская'\r\n x[x.str.contains('саратовска')] = 'саратовская'\r\n x[x.str.contains('новосибирск')] = 'новосибирская'\r\n x[x.str.contains('саха')] = 'саха'\r\n x[x.str.contains('якутия')] = 'саха'\r\n x[x.str.contains('дагес')] = 'дагестан'\r\n x[x.str.contains('чечен')] = 'чечня'\r\n x[x.str.contains('хакас')] = 'хакасия'\r\n x[x.str.contains('башкортостан')] = 'башкортостан'\r\n x[x.str.contains('татар')] = 'татарстан'\r\n x[x.str.contains('коми')] = 'коми'\r\n x[x.str.contains('бурят')] = 'бурятия'\r\n x[x.str.contains('кабардин')] = 'кабардино балкарская'\r\n x[x.str.contains('карача')] = 'карачаево черкесская'\r\n x[x.str.contains('чуваш')] = 'чувашская'\r\n x[x.str.contains('осетия')] = 'осетия'\r\n x[x.str.contains('алания')] = 'осетия'\r\n x[x.str.contains('еврей')] = 'еврейская'\r\n x[x.str.contains('камча')] = 'камчатский'\r\n x[x.str.contains('орлов')] = 'орловская'\r\n x[x.str.contains('орёл')] = 'орловская'\r\n x[x.str.contains('мытищ')] = 'москва'\r\n x[x.str.contains('74')] = 'челябинская'\r\n x[x.str.contains('98')] = 'санкт-петербург'\r\n x[x.str.contains('алтай')] = 'алтайский'\r\n x[x.str.contains('россия')] = 'ND'\r\n x[x.str.contains('эвенкий')] = 'ND'\r\n x[x.str.contains('гусь')] = 'ND'\r\n x[x.str.contains('дальн')] = 'ND'\r\n return x\r\n\r\nall_data['living_region_preproc'] = col_cleaning(all_data, 'living_region')\r\n\r\ndef target_features(data, col_groups, target, alpha = 100):\r\n data = data[[col_groups, target]].copy()\r\n data['target_var'] = np.nan\r\n for col in data[col_groups].unique():\r\n goods = (data.loc[data[col_groups] == col, target] == 1).sum()\r\n bads = (data.loc[data[col_groups] == col, target] == 0).sum()\r\n idx = pd.notnull(data[target])\r\n gr_nrows = data.loc[data.loc[idx, col_groups] == col, target].shape[0]\r\n gr_mean = goods / (goods + bads)\r\n global_mean = data[target].mean()\r\n sm_likelihood = ((gr_mean * gr_nrows) + (global_mean * alpha)) / \\\r\n (gr_nrows + alpha)\r\n data.loc[data[col_groups] == col, 'target_var'] = sm_likelihood\r\n return data['target_var']\r\n\r\nall_data['trgt_01'] = target_features(all_data, 'living_region_preproc', TARGET)\r\nall_data['trgt_02'] = target_features(all_data, 'tariff_id', TARGET)\r\nall_data['trgt_03'] = target_features(all_data, 'nf_03', TARGET)\r\nall_data['trgt_04'] = target_features(all_data, 'nf_04', TARGET)\r\nall_data['trgt_05'] = target_features(all_data, 'nf_05', TARGET)\r\nall_data['trgt_06'] = target_features(all_data, 'nf_06', TARGET)\r\nall_data['trgt_07'] = target_features(all_data, 'nf_07', TARGET)\r\nall_data['trgt_08'] = target_features(all_data, 'nf_13', TARGET)\r\n\r\n# Other features.\r\nregions = pd.read_csv(WD + 'data/regions.csv', sep=';', decimal=',', encoding='cp1251', nrows=80)\r\nall_data = pd.merge(all_data, regions, how='left', left_on='living_region_preproc', right_on='reg')\r\nall_data = na_replace(data=all_data)\r\nall_data = all_data.drop('reg', 1)\r\n\r\n# Save\r\nall_data.to_csv('all_data.csv', index=False)\r\nall_data = pd.read_csv(WD + 'data/all_data.csv', sep=',', decimal='.', encoding='cp1251')\r\n\r\n# LE\r\ndef label_encoder(data):\r\n data = data.copy()\r\n for col in data:\r\n if data[col].dtype == 'O':\r\n print(col + ' encode')\r\n x = np.array(data[col], dtype = 'str')\r\n x = LabelEncoder().fit(x).transform(x)\r\n data[col] = np.int16(x)\r\n return data\r\n\r\nall_data = label_encoder(all_data)\r\n\r\n# OHE\r\nall_data = pd.get_dummies(all_data, columns=['gender'])\r\nall_data = pd.get_dummies(all_data, columns=['marital_status'])\r\nall_data = pd.get_dummies(all_data, columns=['job_position'])\r\nall_data = pd.get_dummies(all_data, columns=['credit_month'])\r\nall_data = pd.get_dummies(all_data, columns=['tariff_id'])\r\nall_data = pd.get_dummies(all_data, columns=['education'])\r\nall_data = pd.get_dummies(all_data, columns=['nf_03'])\r\nall_data = pd.get_dummies(all_data, columns=['nf_04'])\r\nall_data = pd.get_dummies(all_data, columns=['nf_05'])\r\nall_data = pd.get_dummies(all_data, columns=['living_region_preproc'])\r\n\r\n#\r\ndef log_scale(data):\r\n data = data.copy()\r\n for col in data.columns.drop(TARGET):\r\n if True:\r\n print(col)\r\n data[col] = np.log(data[col] + 1)\r\n else:\r\n next\r\n return data\r\n\r\nall_data = log_scale(all_data)\r\n\r\n# Learning. del all_data\r\ntest = all_data.loc[pd.isnull(all_data[TARGET])].drop(TARGET, axis=1)\r\nX = all_data[pd.notnull(all_data[TARGET])].drop(TARGET, axis=1)\r\ny = all_data.loc[pd.notnull(all_data[TARGET]), TARGET]\r\nX_train, X_valid = train_test_split(X, train_size=0.7, random_state=SEED)\r\ny_train, y_valid = train_test_split(y, train_size=0.7, random_state=SEED)\r\n\r\nmodel_xgb = xgb.XGBClassifier(n_estimators=320,\r\n max_depth=12,\r\n learning_rate=0.03,\r\n subsample=0.9,\r\n colsample_bytree=1,\r\n min_child_weight=2.5,\r\n gamma=5,\r\n reg_lambda=15,\r\n seed=SEED)\r\nmodel_xgb.fit(X_train, y_train, eval_metric='auc',\r\n verbose=1, eval_set=[(X_valid, y_valid), (X_train, y_train)])\r\nmodel_xgb.fit(X, y, eval_metric='auc',\r\n verbose=1, eval_set=[(X, y)])\r\n\r\n# Features Importance.\r\n# importance = sorted(model_xgb.get_fscore().items(), key=operator.itemgetter(1))\r\n\r\n# Model perfromance.\r\ndef perfromance(model, X_valid, y_valid, alpha):\r\n y_pred_prb = model.predict_proba(X_valid)[:,1]\r\n y_pred_bin = np.int64(y_pred_prb > alpha)\r\n accuracy = accuracy_score(y_valid, y_pred_bin) * 100\r\n roc = roc_auc_score(y_valid, y_pred_prb) * 100\r\n mat = confusion_matrix(y_valid, y_pred_bin)\r\n print('Accuracy: %.2f%%' % (accuracy))\r\n print('ROC-AUC : %.2f%%' % (roc))\r\n print('Precision : %.2f%%' % (precision_score(y_valid, y_pred_bin)))\r\n print('Recall : %.2f%%' % (recall_score(y_valid, y_pred_bin)))\r\n print(mat)\r\n return [mat, accuracy, roc, y_pred_prb, y_pred_bin]\r\n\r\nprerf = perfromance(model_xgb, X_valid, y_valid, 0.2)\r\n\r\n# Predict and Output.\r\nid = pd.read_csv(WD + 'data/credit_test.csv',\r\n sep=';', decimal=',', encoding='cp1251')['client_id']\r\nxgb_predict = model_xgb.predict_proba(test, ntree_limit=194)[:, 1]\r\nxgb_predict = pd.DataFrame({'_ID_': id,\r\n '_VAL_': xgb_predict})\r\nxgb_predict.to_csv('predict_' + 'xgb_' + '61' + '.csv', index=False)\r\n\r\n# Ensemble: Mean Predict.\r\nfile = ['predict_xgb_' + str(i + 1) + '.csv' for i in range(60)]\r\npredict_table = pd.DataFrame()\r\nfor file in os.listdir(wd):\r\n if 'xgb' in file:\r\n predict_table[file.split('.')[0][8:]] = pd.read_csv(wd + file).iloc[:, 1]\r\n\r\nensemble_mean = pd.DataFrame({'amean': predict_table.apply(np.mean, axis=1),\r\n 'gmean': predict_table.apply(sp.stats.gmean,\r\n axis=1),\r\n 'hmean': predict_table.apply(sp.stats.hmean,\r\n axis=1)})\r\n\r\ndef aghm(a, b, c = None, verbose = False):\r\n if c == None:\r\n an, bn = a, b\r\n while abs(an - bn) > 1.e-9:\r\n an, bn = np.mean([an, bn]), sp.stats.gmean([an, bn])\r\n if verbose:\r\n agm, am, gm = an, np.mean([a, b]), sp.stats.gmean([a, b])\r\n print('AGM {:.9f} \\nAM {:.9f} \\nGM {:.9f}'.format(agm, am, gm))\r\n return an\r\n else:\r\n an, bn, cn = a, b, c\r\n while abs((an - bn) + (an - cn) + (bn - cn)) > 1.e-9:\r\n an, bn, cn = np.mean([an, bn, cn]), \\\r\n sp.stats.gmean([an, bn, cn]), \\\r\n sp.stats.hmean([an, bn, cn])\r\n if verbose:\r\n aghm, am, gm, hm = an, np.mean([a, b, c]), \\\r\n sp.stats.gmean([a, b, c]), \\\r\n sp.stats.hmean([a, b, c])\r\n print('AGHM {:.9f} \\\r\n \\nAM {:.9f} \\\r\n \\nGM {:.9f} \\\r\n \\nHM {:.9f}'.format(aghm, am, gm, hm))\r\n return an\r\n\r\nfor row in range(ensemble_mean.shape[0]):\r\n ensemble_mean.loc[row, 'aghmean'] = aghm(a=ensemble_mean.loc[row, 'amean'],\r\n b=ensemble_mean.loc[row, 'gmean'],\r\n c=ensemble_mean.loc[row, 'hmean'])\r\n if row % 1000 == 0: print(row)\r\n\r\npredict_ensemble_amean = pd.DataFrame({'_ID_': id,\r\n '_VAL_': ensemble_mean['amean']})\r\npredict_ensemble_hmean = pd.DataFrame({'_ID_': id,\r\n '_VAL_': ensemble_mean['gmean']})\r\npredict_ensemble_gmean = pd.DataFrame({'_ID_': id,\r\n '_VAL_': ensemble_mean['hmean']})\r\npredict_ensemble_aghmean = pd.DataFrame({'_ID_': id,\r\n '_VAL_': ensemble_mean['aghmean']})\r\n\r\npredict_ensemble_amean.to_csv('predict_ensemble_amean' + '.csv', index=False)\r\npredict_ensemble_hmean.to_csv('predict_ensemble_hmean' + '.csv', index=False)\r\npredict_ensemble_gmean.to_csv('predict_ensemble_gmean' + '.csv', index=False)\r\npredict_ensemble_aghmean.to_csv('predict_ensemble_aghmean' + '.csv', index=False)\r\n\r\n# Ensemble: Stacking.\r\n# Обучить на метапризнаках\r\n# Обучить на трейн+прогноз на тест и дать повторный прогноз на тест\r\nall_data = pd.read_csv(WD + 'data/all_data.csv', sep=',', decimal='.', encoding='cp1251')\r\n\r\ntest = all_data[pd.isnull(all_data[TARGET])].drop(TARGET, axis=1)\r\nX = all_data[pd.notnull(all_data[TARGET])].drop(TARGET, axis=1)\r\ny = all_data.loc[pd.notnull(all_data[TARGET]), TARGET]\r\nX_train, X_valid = train_test_split(X, train_size=0.7, random_state=SEED)\r\ny_train, y_valid = train_test_split(y, train_size=0.7, random_state=SEED)\r\n\r\nmodels = [\r\n xgb.XGBClassifier(n_estimators=50,\r\n max_depth=12,\r\n learning_rate=0.15,\r\n subsample=0.9,\r\n colsample_bytree=1,\r\n min_child_weight=2.5,\r\n gamma=5,\r\n reg_lambda=15,\r\n seed=SEED),\r\n xgb.XGBClassifier(n_estimators=220,\r\n max_depth=10,\r\n learning_rate=0.05,\r\n subsample=0.85,\r\n colsample_bytree=85,\r\n min_child_weight=2.5,\r\n gamma=5,\r\n reg_lambda=15,\r\n seed=SEED),\r\n xgb.XGBClassifier(n_estimators=300,\r\n max_depth=12,\r\n learning_rate=0.03,\r\n subsample=0.9,\r\n colsample_bytree=1,\r\n min_child_weight=2.5,\r\n gamma=5,\r\n reg_lambda=15,\r\n seed=SEED)\r\n]\r\n\r\nclass EnsembleStacking(object):\r\n def __init__(self, base_models, n_folds=1, fit_baase_models=None):\r\n self.base_models = base_models\r\n self.n_folds = n_folds\r\n self.fit_baase_models = fit_baase_models\r\n\r\n def fit_base_models(self, X_train, y_train, X_test, y_test=None):\r\n X_train = np.array(X_train)\r\n y_train = np.array(y_train)\r\n X_test = np.array(X_test)\r\n if y_test is not None:\r\n y_test = np.array(y_test)\r\n self.fit_baase_models = []\r\n\r\n if self.n_folds != 1:\r\n folds = list(KFold(len(y_train), n_folds=self.n_folds, random_state=SEED))\r\n scores = np.zeros((len(self.base_models), self.n_folds))\r\n\r\n S_train = np.zeros((X_train.shape[0], len(self.base_models)))\r\n S_test = np.zeros((X_test.shape[0], len(self.base_models)))\r\n\r\n for i, clf in enumerate(self.base_models):\r\n print('Model', i)\r\n if self.n_folds == 1:\r\n if y_test is not None:\r\n if isinstance(clf, xgb.sklearn.XGBClassifier):\r\n clf.fit(X_train, y_train, eval_metric='auc',\r\n verbose=5, eval_set=[(X_test, y_test)])\r\n else:\r\n clf.fit(X_train_cv, y_train_cv)\r\n self.fit_baase_models.append(clf)\r\n else:\r\n if isinstance(clf, xgb.sklearn.XGBClassifier):\r\n clf.fit(X_train, y_train,\r\n eval_metric='auc', eval_set=[(X_train, y_train)])\r\n else:\r\n clf.fit(X_train_cv, y_train_cv)\r\n self.fit_baase_models.append(clf)\r\n\r\n S_train[:, i] = clf.predict_proba(X_train)[:, 1]\r\n S_test[:, i] = clf.predict_proba(X_test)[:, 1]\r\n\r\n if y_test is not None:\r\n scores[i, 0] = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])\r\n print(scores[i, 0])\r\n\r\n pd.DataFrame(S_train[:, i]).to_csv(str(i) + '_model' + '_train' + '.csv',\r\n index=False)\r\n pd.DataFrame(S_test[:, i]).to_csv(str(i) + '_model' + '_test' + '.csv',\r\n index=False)\r\n else:\r\n S_test_i = np.zeros((X_test.shape[0], len(folds)))\r\n for j, (train_idx, test_idx) in enumerate(folds):\r\n print('Model', i, 'Fold', j)\r\n X_train_cv = X_train[train_idx]\r\n y_train_cv = y_train[train_idx]\r\n X_holdout_cv = X_train[test_idx]\r\n y_holdout_cv = y_train[test_idx]\r\n if isinstance(clf, xgb.sklearn.XGBClassifier):\r\n clf.fit(X_train_cv, y_train_cv, eval_metric='auc',\r\n verbose=5, eval_set=[(X_holdout_cv, y_holdout_cv)])\r\n else:\r\n clf.fit(X_train_cv, y_train_cv)\r\n self.fit_baase_models.append(clf)\r\n S_train[test_idx, i] = clf.predict_proba(X_holdout_cv)[:, 1]\r\n S_test_i[:, j] = clf.predict_proba(X_test)[:, 1]\r\n if y_test is not None:\r\n scores[i, j] = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])\r\n print(scores[i, j])\r\n if j == (self.n_folds - 1):\r\n pd.DataFrame(S_train[:, i]).to_csv(str(i) + '_model' + '_train' + '.csv',\r\n index=False)\r\n pd.DataFrame(sp.mean(S_test_i, 1)).to_csv(str(i) + '_model' + '_test' + '.csv',\r\n index=False)\r\n S_test[:, i] = sp.mean(S_test_i, 1)\r\n\r\n self.S_train = S_train\r\n self.S_test = S_test\r\n\r\n if y_test is not None:\r\n print('\\n')\r\n for model in range(len(self.base_models)):\r\n print('Model {}: {:.4f}'.format(model, scores.mean(1)[model] * 100))\r\n\r\n def predict_stacker(self, stacker, y_train, y_test=None, S_data_path=None, models=None):\r\n if S_data_path is not None:\r\n self.S_train = np.zeros((X.shape[0], models))\r\n self.S_test = np.zeros((test.shape[0], models))\r\n n_models = 0\r\n\r\n for file in os.listdir(S_data_path):\r\n if 'test' in file:\r\n self.S_test[:, n_models] = pd.read_csv(S_data_path + file)[:, 0].values\r\n n_models += 1\r\n n_models = 0\r\n\r\n for file in os.listdir(S_data_path):\r\n if 'train' in file:\r\n self.S_train[:, n_models] = pd.read_csv(S_data_path + file).iloc[:, 0].values\r\n n_models += 1\r\n\r\n self.stacker = stacker\r\n self.stacker.fit(self.S_train, y_train)\r\n y_pred = self.stacker.predict_proba(self.S_test)[:, 1]\r\n\r\n if y_test is not None:\r\n print('\\n')\r\n print('AUC: %.4f%%' % (roc_auc_score(y_test, y_pred) * 100))\r\n return y_pred\r\n\r\nS_data = 'ds_applications/customers_choice/data/S_data/'\r\nstacker = LogisticRegression(C=0.001)\r\nstacking = EnsembleStacking(models)\r\n\r\nstacking.fit_base_models(X_train, y_train, X_valid, y_valid)\r\nstacking.fit_base_models(X, y, test)\r\n\r\nstack_y_pred = stacking.predict_stacker(stacker, y_train, y_test = y_valid)\r\nstack_y_pred = stacking.predict_stacker(stacker, y)\r\nstack_y_pred = stacking.predict_stacker(stacker, y, S_data_path = S_data, models = 22)\r\n\r\nid = pd.read_csv(WD + 'data/credit_test.csv',\r\n sep=';', decimal=',', encoding='cp1251')['client_id']\r\nstacking_predict = pd.DataFrame({'_ID_': id, '_VAL_': stack_y_pred})\r\nstacking_predict.to_csv('stacking_predict_2' + '.csv', index=False)\r\n\r\n# with open('models.p', 'wb') as file:\r\n# pickle.dump(models, file)\r\n# with open('models.p', 'rb') as file:\r\n# pickle.load(models, file)\r\n","sub_path":"competitions/boosters-tinkoff-credit/src/choice_credit.py","file_name":"choice_credit.py","file_ext":"py","file_size_in_byte":27215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"260451244","text":"import pandas as pd\n\n\ndata = pd.read_csv('./property_id1.csv')\nproperty_id_list = list(set(data['propertyId']))\nproperty_id_list = [x.replace('\"','') for x in property_id_list]\n\n# print(len(property_id_list))\n# https://mapi-ng.rdc.moveaws.com/api/v1/properties/1564789514?client_id=rdc_mobile_native%2C9.3.7%2Candroid\nnew_data = pd.DataFrame()\nurl_sample = 'https://mapi-ng.rdc.moveaws.com/api/v1/properties?offset=0&limit=200&county=New+York&state_code=NY&sort=relevance&schema=mapsearch&client_id=rdc_mobile_native%2C9.4.2%2Candroid'\nnew_data['detail_api'] = ['https://mapi-ng.rdc.moveaws.com/api/v1/properties/'+str(int(x))+'?client_id=rdc_mobile_native%2C9.3.7%2Candroid' for x in property_id_list]\nnew_data = new_data.drop_duplicates()\nprint(new_data)\nnew_data.to_csv('./realtor_app_detail_page_search_criteria.csv', index=False)","sub_path":"tools/analysis_realtor_app_list_page_json_data/analysis_property_id/analysis_property_id.py","file_name":"analysis_property_id.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"62846032","text":"import socket\nimport threading\n\nHEADER=1024\nPORT=8900\nSERVER= 'localhost'\nADDR=(SERVER,PORT)\nFORMAT='utf-8'\nDISCONNECT_MESSAGE=\"!Disconnect\"\ngreet=\"Welcome!\"\n\nserver=socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind(ADDR)\n\ndef handle_client(conn,addr):\n print(f\"[New Connection] {addr} connected.\")\n conn.send(greet.encode(FORMAT))\n connected=True\n while connected:\n msg_length=conn.recv(HEADER).decode(FORMAT)\n if msg_length:\n msg_length=int(msg_length)\n msg=conn.recv(msg_length).decode(FORMAT)\n if msg==DISCONNECT_MESSAGE:\n connected=False\n print(f\"[{addr}] {msg}\")\n conn.close()\n print(f\"connection closed for {addr}\")\n\n\ndef start():\n print(\"[STARTING]......\")\n server.listen()\n print(f\"Server Listening on {SERVER}:{PORT}\")\n while True:\n conn, addr=server.accept()\n thread=threading.Thread(target=handle_client,args=(conn,addr))\n thread.start()\n print(f\"Active Connections: {threading.active_count()-1}\")\n\nstart()\n","sub_path":"3 Full Duplex TCP/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"392981285","text":"from unittest.mock import patch, mock_open\nfrom util.filehelper import (\n file_to_array,\n get_number_list_from_file,\n get_string_lists_from_file,\n)\n\n\nclass TestUtil:\n def test_file_to_array_opens_the_corrrect_file(self):\n path = \"./puzzles/01/puzzle.txt\"\n with patch(\"builtins.open\", mock_open(read_data=\"8\\n9\\n10\")) as mock_file:\n file_to_array(path)\n assert open(path).read() == \"8\\n9\\n10\"\n mock_file.assert_called_with(path)\n mock_file.reset_mock()\n\n def test_file_to_array_transforms_file_content_to_array(self):\n path = \"./puzzles/01/puzzle2.txt\"\n with patch(\"builtins.open\", mock_open(read_data=\"8\\n9\\n10\")) as mock_file:\n expected = [8, 9, 10]\n result = file_to_array(path)\n assert len(expected) == len(result)\n assert sorted(expected) == sorted(result)\n mock_file.reset_mock()\n\n def test_get_number_list_from_file_transforms_file_content_to_array(self):\n path = \"./puzzles/02/puzzle.txt\"\n with patch(\"builtins.open\", mock_open(read_data=\"8,9,10\")) as mock_file:\n expected = [8, 9, 10]\n result = get_number_list_from_file(path)\n assert len(expected) == len(result)\n assert sorted(expected) == sorted(result)\n mock_file.reset_mock()\n\n def test_get_string_lists_from_file_transforms_file_content_to_array(self):\n path = \"./puzzles/02/puzzle.txt\"\n with patch(\"builtins.open\", mock_open(read_data=\"a,b,c\\nd,e,f\")) as mock_file:\n expected = [[\"a\", \"b\", \"c\"], [\"d\", \"e\", \"f\"]]\n result = get_string_lists_from_file(path)\n assert len(expected) == len(result)\n assert sorted(expected) == sorted(result)\n mock_file.reset_mock()\n\n","sub_path":"tests/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"516523859","text":"def minDistance(word1, word2) -> int:\r\n n, m = len(word1), len(word2)\r\n dp = [[0]*(m+1) for _ in range(n+1)]\r\n \r\n for i in range(m+1):\r\n dp[0][i] = i\r\n for i in range(n+1):\r\n dp[i][0] = i\r\n \r\n for i in range(1, n+1):\r\n for j in range(1, m+1):\r\n if word1[i-1] == word2[j-1]:\r\n dp[i][j] = dp[i-1][j-1]\r\n else:\r\n insert = dp[i-1][j] + 1\r\n delete = dp[i][j - 1] + 1\r\n replace = dp[i-1][j-1] + 1\r\n \r\n dp[i][j] = min(insert, delete, replace)\r\n\r\n return dp[n][m]\r\n\r\nword1 = \"zoologicoarchaeologist\"\r\nword2 = \"zoogeologist\"\r\n\r\nprint(minDistance(word1, word2))","sub_path":"Day_25/5_edit_distance.py","file_name":"5_edit_distance.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"311745680","text":"# coding=utf-8\n# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport os\nfrom contextlib import contextmanager\nfrom textwrap import dedent\n\nfrom pants.util.dirutil import safe_open\nfrom pants_test.pants_run_integration_test import PantsRunIntegrationTest\n\n\nclass PrepCommandIntegrationTest(PantsRunIntegrationTest):\n\n _SENTINELS = {\n 'test': 'running-prep-in-goal-test.txt',\n 'compile': 'running-prep-in-goal-compile.txt',\n 'binary': 'running-prep-in-goal-binary.txt'\n }\n\n @classmethod\n def _emit_targets(cls, workdir):\n prep_command_path = os.path.join(workdir, 'src/java/org/pantsbuild/prepcommand')\n with safe_open(os.path.join(prep_command_path, 'BUILD'), 'w') as fp:\n for name, touch_target in cls._SENTINELS.items():\n fp.write(dedent(\"\"\"\n prep_command(\n name='{name}',\n goals=['{goal}'],\n prep_executable='touch',\n prep_args=['{tmpdir}/{touch_target}'],\n )\n \"\"\".format(name=name, goal=name, tmpdir=workdir, touch_target=touch_target)))\n return ['{}:{}'.format(prep_command_path, name) for name in cls._SENTINELS]\n\n @classmethod\n def _goal_ran(cls, basedir, goal):\n return os.path.exists(os.path.join(basedir, cls._SENTINELS[goal]))\n\n def _assert_goal_ran(self, basedir, goal):\n self.assertTrue(self._goal_ran(basedir, goal))\n\n def _assert_goal_did_not_run(self, basedir, goal):\n self.assertFalse(self._goal_ran(basedir, goal))\n\n @contextmanager\n def _execute_pants(self, goal):\n with self.temporary_workdir() as workdir:\n prep_commands_specs = self._emit_targets(workdir)\n # Make sure the emitted BUILD under .pants.d is not ignored.\n config = {\n 'GLOBAL': {\n 'build_ignore': [],\n 'pants_ignore': []\n }\n }\n pants_run = self.run_pants_with_workdir([goal] + prep_commands_specs, workdir, config=config)\n self.assert_success(pants_run)\n yield workdir\n\n def test_prep_command_in_compile(self):\n with self._execute_pants('compile') as workdir:\n self._assert_goal_ran(workdir, 'compile')\n self._assert_goal_did_not_run(workdir, 'test')\n self._assert_goal_did_not_run(workdir, 'binary')\n\n def test_prep_command_in_test(self):\n with self._execute_pants('test') as workdir:\n self._assert_goal_ran(workdir, 'compile')\n self._assert_goal_ran(workdir, 'test')\n self._assert_goal_did_not_run(workdir, 'binary')\n\n def test_prep_command_in_binary(self):\n with self._execute_pants('binary') as workdir:\n self._assert_goal_ran(workdir, 'compile')\n self._assert_goal_ran(workdir, 'binary')\n self._assert_goal_did_not_run(workdir, 'test')\n","sub_path":"tests/python/pants_test/core_tasks/test_prep_command_integration.py","file_name":"test_prep_command_integration.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"263213050","text":"import gdsMill\nimport tech\nfrom contact import contact\nimport math\nimport debug\nimport grid\nfrom pin_layout import pin_layout\nfrom vector import vector\nfrom vector3d import vector3d \nfrom globals import OPTS\nfrom router import router\n\nclass supply_router(router):\n \"\"\"\n A router class to read an obstruction map from a gds and\n routes a grid to connect the supply on the two layers.\n \"\"\"\n\n def __init__(self, gds_name=None, module=None):\n \"\"\"Use the gds file for the blockages with the top module topName and\n layers for the layers to route on\n \"\"\"\n router.__init__(self, gds_name, module)\n \n self.pins = {}\n\n \n def clear_pins(self):\n \"\"\"\n Convert the routed path to blockages.\n Keep the other blockages unchanged.\n \"\"\"\n self.pins = {}\n self.rg.reinit()\n \n\n def route(self, cell, layers, vdd_name=\"vdd\", gnd_name=\"gnd\"):\n \"\"\" \n Route a single source-destination net and return\n the simplified rectilinear path. \n \"\"\"\n debug.info(1,\"Running supply router on {0} and {1}...\".format(vdd_name, gnd_name))\n self.cell = cell\n self.pins[vdd_name] = []\n self.pins[gnd_name] = []\n\n # Clear the pins if we have previously routed\n if (hasattr(self,'rg')):\n self.clear_pins()\n else:\n # Set up layers and track sizes\n self.set_layers(layers)\n # Creat a routing grid over the entire area\n # FIXME: This could be created only over the routing region,\n # but this is simplest for now.\n self.create_routing_grid()\n # This will get all shapes as blockages\n self.find_blockages()\n\n # Get the pin shapes\n self.get_pin(vdd_name)\n self.get_pin(gnd_name)\n \n # Now add the blockages (all shapes except the src/tgt pins)\n self.add_blockages()\n # Add blockages from previous routes\n self.add_path_blockages() \n\n # source pin will be a specific layout pin\n # target pin will be the rails only\n \n # returns the path in tracks\n # (path,cost) = self.rg.route(detour_scale)\n # if path:\n # debug.info(1,\"Found path: cost={0} \".format(cost))\n # debug.info(2,str(path))\n # self.add_route(path)\n # return True\n # else:\n # self.write_debug_gds()\n # # clean up so we can try a reroute\n # self.clear_pins()\n \n self.write_debug_gds()\n return False\n\n \n def add_route(self,path):\n \"\"\" \n Add the current wire route to the given design instance.\n \"\"\"\n debug.info(3,\"Set path: \" + str(path))\n\n # Keep track of path for future blockages\n self.paths.append(path)\n \n # This is marked for debug\n self.rg.add_path(path)\n\n # For debugging... if the path failed to route.\n if False or path==None:\n self.write_debug_gds()\n\n # First, simplify the path for\n #debug.info(1,str(self.path)) \n contracted_path = self.contract_path(path)\n debug.info(1,str(contracted_path))\n\n # convert the path back to absolute units from tracks\n abs_path = map(self.convert_point_to_units,contracted_path)\n debug.info(1,str(abs_path))\n self.cell.add_route(self.layers,abs_path)\n\n \n def create_routing_grid(self):\n \"\"\" \n Create a sprase routing grid with A* expansion functions.\n \"\"\"\n # We will add a halo around the boundary\n # of this many tracks\n size = self.ur - self.ll\n debug.info(1,\"Size: {0} x {1}\".format(size.x,size.y))\n\n import supply_grid\n self.rg = supply_grid.supply_grid()\n\n\n ##########################\n # Gridded supply route functions\n ##########################\n def create_grid(self, ll, ur):\n \"\"\" Create alternating vdd/gnd lines horizontally \"\"\"\n \n self.create_horizontal_grid()\n self.create_vertical_grid()\n\n\n def create_horizontal_grid(self):\n \"\"\" Create alternating vdd/gnd lines horizontally \"\"\"\n \n pass\n\n def create_vertical_grid(self):\n \"\"\" Create alternating vdd/gnd lines horizontally \"\"\"\n pass\n \n","sub_path":"compiler/router/supply_router.py","file_name":"supply_router.py","file_ext":"py","file_size_in_byte":4401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"37140846","text":"# coding: utf-8\n\nfrom __future__ import print_function\n\nimport os\nimport tensorflow as tf\nimport tensorflow.contrib.keras as kr\nimport sys\n\nsys.path.append(\"..\")\nfrom ss2ft.cnn_model2 import TCNNConfig, TextCNN\nfrom preprocess import preprocess\nfrom fileop import getlines\n\ntry:\n bool(type(unicode))\nexcept NameError:\n unicode = str\n\n#(以下路径必须使用绝对路径,相对路径的话,命令行运行不会报错,但是Java调用各种报错)\ncorpuspath = 'D:/Documents/GitHub/ECNet-J2EE/pythonSrc/wsfx/NN/ss2ft/model_store/source.txt'\nvocab_dir = 'D:/Documents/GitHub/ECNet-J2EE/pythonSrc/wsfx/NN/ss2ft/model_store/vocab.txt'\n\nsave_path = 'D:/Documents/GitHub/ECNet-J2EE/pythonSrc/wsfx/NN/ss2ft/model_store/checkpoints/textcnn/best_validation' # 最佳验证结果保存路径\ntensorboard_dir = 'D:/Documents/GitHub/ECNet-J2EE/pythonSrc/wsfx/NN/ss2ft/model_store/tensorboard/textcnn'\nstoplist1 = getlines('D:/Documents/GitHub/ECNet-J2EE/pythonSrc/data/stopwords.txt')\nstoplist2 = getlines('D:/Documents/GitHub/ECNet-J2EE/pythonSrc/data/num_20words.txt')\n\np = preprocess(corpuspath, vocab_dir)\nvocab = p.getvocab()\n\nclass CnnModel:\n def __init__(self):\n self.config = TCNNConfig()\n self.model = TextCNN(self.config)\n\n self.session = tf.Session()\n self.session.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n saver.restore(sess=self.session, save_path=save_path) # 读取保存的模型\n\n def predict(self, input1, input2):\n # 支持不论在python2还是python3下训练的模型都可以在2或者3的环境下运行\n ssls = p.precessinput(input1,stoplist1,stoplist2)#预处理后的事实\n ftls = p.precessinput(input2,stoplist1,stoplist2)\n data_1 = [p.getwordid(vocab, word, 0) for word in ssls]\n data_2 = [p.getwordid(vocab, word, 0) for word in ftls]\n\n feed_dict = {\n self.model.input_x_1: kr.preprocessing.sequence.pad_sequences([data_1], self.config.seq_length_1),\n self.model.input_x_2: kr.preprocessing.sequence.pad_sequences([data_2], self.config.seq_length_2),\n self.model.keep_prob: 1.0\n }\n\n y_pred_cls = self.session.run(self.model.y_pred_cls, feed_dict=feed_dict)\n return y_pred_cls[0]\n\n\n","sub_path":"afterend/ecnet-master/pythonSrc/wsfx/NN/ss2ft/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"6003000","text":"# \n# Each material window class for image analysis reference\n# \n\n# This class is a custom linkedlist object (see self.nextMaterialName and self.previousMaterialName below)\nclass materialWindow:\n # This is the constructor\n def __init__(self, name, parent, windowInstance, TkImage, position, zoomScale, scaleColor=\"#FFFFFF\", lineArr=None, nextMaterialName=None, previousMaterialName=None, isActive=False, canvas=None, imageArr=None, tool=\"Move\", brightness=0, contrast=float(1), gamma=float(1), madeChangeBeforeSaving=True):\n # This variable is for determining if we made an edit to this material before saving\n # Type: Boolean\n self.madeChangeBeforeSaving = madeChangeBeforeSaving\n\n # This variable is for holding the TkImage object that tkinter uses for rendering\n # Type: TkImage\n self.TkImage = TkImage\n\n # This variable is for holding the material image as an array for editing and saving purposes\n # Type: (n x m) Array\n self.imageArr = imageArr\n\n # The is the name of the material\n # Type: String\n self.name = name\n\n # This is the position of the material with respect to the canvas (for moving purposes)\n # Type: (1 x 2) Array\n self.position = position\n\n # This is the variable to determine how zoomed into the material we are\n # Type: Double\n self.zoomScale = zoomScale\n\n # This is the list of points that we have drawn on the material\n # Type: (n x 2) Array\n self.lineArr = lineArr\n self.linePoints = []\n\n # This is the variable to determine what material is the parent of the stack\n # Type: String (N.B. should be a valid material name in the windowArr global variable list)\n self.parent = parent\n\n # This is the variable to hold the tkinter canvas for reference later\n # Type: Tkinter.canvas()\n self.canvas = canvas\n\n # This is the html color code for the scale color if the user decides to change it\n # Type: String (N.B. should be a valid html color code)\n self.scaleColor = scaleColor\n\n # This determines what tool is currently active for this material\n # Type: String (N.B. should be a valid tool)\n self.tool = tool\n\n # The following variables determine aspects of the material\n # Type: Double\n self.brightness = brightness\n self.contrast = contrast\n self.gamma = gamma\n\n # The following variable holds the tkinter toplevel() window object for reference later\n # Type: Tkinter.toplevel()\n self.windowInstance = windowInstance\n\n # The following variable holds the name of the next material in the linkedlist for reference later\n # Type: String (N.B. should be a valid material name in the windowArr global variable list)\n self.nextMaterialName = nextMaterialName\n \n # The following variable holds the name of the previous material in the linkedlist for reference later\n # Type: String (N.B. should be a valid material name in the windowArr global variable list)\n self.previousMaterialName = previousMaterialName\n\n # The following variable determines if the current material is active or not to determine if we should be making changes to it\n # Type: Boolean\n self.isActive = isActive\n# \n# End of material window class for image analysis reference\n# ","sub_path":"materialWindow.py","file_name":"materialWindow.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"221606113","text":"import subprocess\nimport time\nimport os\nimport psutil\nfrom optparse import OptionParser\nimport time\nimport re\n\ndef parseInput():\n parser = OptionParser()\n parser.add_option(\"-e\", \"--dataset\", dest=\"dataset\",\n help=\"[REQUIRED] input edge list to parse\")\n\n parser.add_option(\"-t\", \"--threads\", dest=\"threads\",\n help=\"[REQUIRED] number of threads to run with\")\n\n parser.add_option(\"-r\", \"--runs\", dest=\"runs\",\n help=\"[REQUIRED] number of runs for each app\")\n\n parser.add_option(\"-p\", \"--poll\", dest=\"poll\",\n help=\"[REQUIRED] time slice for polling\")\n\n (opts, args) = parser.parse_args()\n\n missing_options = []\n for option in parser.option_list:\n if re.match(r'^\\[REQUIRED\\]', option.help) and eval('opts.' + option.dest) == None:\n missing_options.extend(option._long_opts)\n if len(missing_options) > 0:\n parser.error('Missing REQUIRED parameters: ' + str(missing_options))\n \n return opts\n\ndef main():\n options = parseInput();\n\n systems = ['SpaceGraph']\n apps = ['triangle_counting']\n\n #only way (i could get to work) to control threading in these systems\n os.environ[\"GRAPHLAB_THREADS_PER_WORKER\"]=options.threads\n os.environ[\"OMP_NUM_THREADS\"]=options.threads\n\n memfile=open(\"/afs/cs.stanford.edu/u/caberger/pgx-release-0.8.1/server.config\" , 'w+')\n memfile.write(\"{\\\"num_workers_analysis\\\": \"+options.threads+\"}\")\n\n system_dictionary = { \\\n 'SpaceGraph': { \\\n 'dir' : '/afs/cs.stanford.edu/u/caberger/SpaceGraph', \\\n 'triangle_counting':{ \\\n 'args':[\"/afs/cs.stanford.edu/u/caberger/SpaceGraph/bin/undirected_triangle_counting\",options.dataset + \"/bin/udata.bin\",options.threads,\"hybrid\"] \\\n } \\\n }, \\\n 'GraphLab': { \\\n 'dir':'/afs/cs.stanford.edu/u/caberger/graphlab/', \\\n 'triangle_counting':{ \\\n 'args':[\"/afs/cs.stanford.edu/u/caberger/graphlab/release/toolkits/graph_analytics/undirected_triangle_count\",\"--graph=\"+options.dataset + \"/glab_undirected/data.txt\",\"--format=snap\",\"--ncpus=\"+options.threads] \\\n } \\\n }, \\\n 'pgx': { \\\n 'dir' : '/afs/cs.stanford.edu/u/caberger/pgx-release-0.8.1', \\\n 'triangle_counting':{ \\\n 'args':[\"java\",\"-cp\",\"/afs/cs.stanford.edu/u/caberger/pgx-release-0.8.1/lib/*:/afs/cs.stanford.edu/u/caberger/pgx-release-0.8.1/third-party/*:/afs/cs.stanford.edu/u/caberger/pgx-release-0.8.1/classes\",\"demos.UndirectedTriangleCounting\",options.dataset+\"/pgx/sample.edge.json\"] \\\n } \\\n }, \\\n 'GraphChi': { \\\n 'dir': '/afs/cs.stanford.edu/u/caberger/graphchi-cpp', \\\n 'triangle_counting':{ \\\n 'args':[\"/afs/cs.stanford.edu/u/caberger/graphchi-cpp/bin/example_apps/trianglecounting\",\"file\",options.dataset+\"/edgelist/data.txt\",\"filetype\",\"edgelist\",\"execthreads\",options.threads,\"loadthreads\",options.threads,\"niothreads\",options.threads,\"--nshards=2\"] \\\n } \\\n }, \\\n 'snap': { \\\n 'dir': '/afs/cs.stanford.edu/u/caberger/snapr', \\\n 'triangle_counting':{ \\\n 'args':[\"/afs/cs.stanford.edu/u/caberger/snapr/examples/tcount/centrality\",\"-i:\"+options.dataset+\"/edgelist/data.txt\",options.threads] \\\n } \\\n }, \\\n\n } \\\n\n outpath = \"/dfs/scratch0/caberger/run_outputs/\" + options.dataset.split('/').pop()\n if not os.path.exists(outpath):\n os.system('mkdir ' + outpath)\n \n for system in systems:\n os.chdir(system_dictionary[system]['dir'])\n for app in apps:\n new_path = outpath + '/' + app;\n\n if not os.path.exists(new_path):\n os.system('mkdir ' + new_path)\n new_path += '/' + system\n if not os.path.exists(new_path):\n os.system('mkdir ' + new_path)\n new_path += '/' + time.strftime(\"%m-%d\")\n if not os.path.exists(new_path):\n os.system('mkdir ' + new_path)\n\n new_path += '/' + options.threads + \"t_\" + time.strftime(\"%H-%M-%S\")\n os.system('mkdir ' + new_path)\n\n memfile=open(new_path + \"/avg_mem_usage.csv\" , 'w+')\n memlists = []\n for i in range(0,int(options.runs)):\n stdfile=open(new_path + \"/stdout_\"+str(i)+\".txt\" , 'w+')\n SLICE_IN_SECONDS = 1\n p = psutil.Popen(system_dictionary[system][app]['args'],stdout=stdfile)\n memlists_i = 0\n while p.poll() is None:\n comm='''\n if memlists_i >= len(memlists):\n memlists.append(p.memory_info().rss)\n else:\n memlists[memlists_i] += p.memory_info().rss\n memlists_i += 1\n '''\n time.sleep(SLICE_IN_SECONDS)\n\n comm = '''\n for mem in memlists:\n memfile.write(str(float(mem)/float(options.runs)) + ',')\n '''\n\nif __name__ == \"__main__\":\n main()","sub_path":"benchmarking/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"286704364","text":"from django.conf.urls import patterns, include, url\nimport xadmin\nxadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'fenxiang_app.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^accounts/', include('accounts.urls')),\n url(r'^admin/', include(xadmin.site.urls)),\n url(r'^question/', include('question.urls'))\n)\n","sub_path":"fenxiang_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"79548732","text":"# Hint: You may not need all of these. Remove the unused functions.\nfrom hashtables import (HashTable,\n hash_table_insert,\n hash_table_remove,\n hash_table_retrieve,\n hash_table_resize)\n\n\nclass Ticket:\n def __init__(self, source, destination):\n self.source = source\n self.destination = destination\n\n\ndef reconstruct_trip(tickets, length):\n hashtable = HashTable(length)\n route = [None] * length\n\n \"\"\"\n YOUR CODE HERE\n \"\"\"\n # Populate the hash table using for loop, source is key and dest is value\n for ticket in tickets:\n hash_table_insert(hashtable, ticket.source, ticket.destination)\n # Declare an array\n answer_array = []\n # Place the first value in array\n answer_array.append(hash_table_retrieve(hashtable, \"NONE\"))\n\n for count in range(1, length):\n answer_array.insert(count, hash_table_retrieve(hashtable, answer_array[-1]))\n # Return array\n return answer_array\n","sub_path":"hashtables/ex2/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"58032128","text":"#\n# Copyright 2018 The Wallaroo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied. See the License for the specific language governing\n# permissions and limitations under the License.\n#\n\n\nimport argparse\nimport json\nimport time\nimport struct\n\nimport wallaroo\n\n\ndef application_setup(args):\n parser = argparse.ArgumentParser(\"Window Detector\")\n parser.add_argument(\"--window-type\", default=\"tumbling\",\n choices=[\"tumbling\", \"sliding\", \"counting\"])\n parser.add_argument(\"--window-delay\", type=int, default=0,\n help=(\"Window delay\"\n \"size in milliseconds. (Default: 0)\"))\n parser.add_argument(\"--window-size\", type=int, default=50,\n help=(\"Window size in\"\n \"milliseconds or units. (Default: 50)\"))\n parser.add_argument(\"--window-slide\", type=int, default=25,\n help=(\"Window slide size, in milliseconds. \"\n \"(Default: 25)\"))\n parser.add_argument(\"--gen-source\", action='store_true',\n help=\"Use an internal source for resilience tests\")\n parser.add_argument(\"--partitions\", type=int, default=40,\n help=\"Number of partitions for use with internal source\")\n pargs, _ = parser.parse_known_args(args)\n\n if pargs.gen_source:\n print(\"Using internal source generator\")\n source = wallaroo.GenSourceConfig(MultiPartitionGenerator(pargs.partitions))\n else:\n print(\"Using TCP Source\")\n in_host, in_port = wallaroo.tcp_parse_input_addrs(args)[0]\n source = wallaroo.TCPSourceConfig(in_host, in_port, decoder)\n\n p = wallaroo.source(\"{} window\".format(pargs.window_type), source)\n p = p.key_by(extract_key)\n p = p.to(trace_id)\n p = p.key_by(extract_key)\n\n # Programmatically construct the window type and arguments\n if pargs.window_type == 'counting':\n print(\"Using window size: {} units\".format(pargs.window_size))\n window = wallaroo.count_windows(pargs.window_size)\n else:\n print(\"Using window size: {} ms\".format(pargs.window_size))\n window = wallaroo.range_windows(wallaroo.milliseconds(pargs.window_size))\n if pargs.window_delay:\n print(\"Using window_delay: {} ms\".format(pargs.window_delay))\n window = window.with_delay(wallaroo.milliseconds(pargs.window_delay))\n if pargs.window_type == 'sliding':\n print(\"Using window_slide: {} ms\".format(pargs.window_slide))\n window = window.with_slide(wallaroo.milliseconds(pargs.window_slide))\n # add the window to the topology\n p = p.to(window.over(Collect))\n\n p = p.to(split_accumulated)\n\n out_host, out_port = wallaroo.tcp_parse_output_addrs(args)[0]\n p = p.to_sink(wallaroo.TCPSinkConfig(out_host, out_port, encoder))\n return wallaroo.build_application(\"Tumbling Time Window Detector\", p)\n\n\nclass MultiPartitionGenerator(object):\n \"\"\"\n An internal message generator for use in resilience tests\n \"\"\"\n def __init__(self, partitions=1):\n self.partitions = partitions\n\n def initial_value(self):\n return self.format_message(0,1)\n\n def apply(self, v):\n last_key = int(v.key)\n last_value = v.value\n if (last_key + 1) == self.partitions:\n next_value = last_value + 1\n else:\n next_value = last_value\n next_key = (last_key + 1) % self.partitions\n\n return self.format_message(next_key, next_value)\n\n def format_message(self, key, val):\n m = Message(\"{}\".format(key), val)\n return m\n\n\n@wallaroo.key_extractor\ndef extract_key(msg):\n return msg.key.split(\".\")[0]\n\n\nclass Message(object):\n def __init__(self, key, value):\n self.key = key\n self.value = value\n\n def __str__(self):\n return \"({},{})\".format(self.key, self.value)\n\n def __repr__(self):\n return str(self)\n\n\n@wallaroo.computation(name=\"TraceID\")\ndef trace_id(msg):\n print(\"trace_id({})\".format(msg))\n return Message(msg.key + \".TraceID\", msg.value)\n\n\nclass Collect(wallaroo.Aggregation):\n def initial_accumulator(self):\n return []\n\n def update(self, msg, accumulator):\n print(\"!@ Collect.update: append '\", msg.key, \"':\", str(msg.value), \"... appended to \", accumulator)\n # tag data key, then add it to accumulator\n accumulator.append(Message(msg.key + \".Collect\", msg.value))\n\n def combine(self, accumulator1, accumulator2):\n new_acc = accumulator1 + accumulator2\n print(\"!@ Collect.combine:\", accumulator1, \" + \", accumulator2, \" == \", new_acc)\n # return accumulator1 + accumulator2\n return new_acc\n\n def output(self, key, accumulator):\n keys = set(m.key for m in accumulator)\n values = tuple(m.value for m in accumulator)\n ts = time.time()\n print(\"Collect.output\", ts, key, [str(m) for m in accumulator])\n assert(len(keys) <= 1)\n try:\n assert(keys.pop().split(\".\")[0] == key)\n except KeyError: # key set is empty because accumulator is empty\n return None\n return (key, values, ts)\n\n\n@wallaroo.computation_multi(name=\"Split Accumulated\")\ndef split_accumulated(data):\n key, values, ts = data\n return [(key, v, ts) for v in values]\n\n\n@wallaroo.decoder(header_length=4, length_fmt=\">I\")\ndef decoder(bs):\n # Expecting a 64-bit unsigned int in big endian followed by a string\n val, key = struct.unpack(\">Q\", bs[:8])[0], bs[8:]\n key = key.decode(\"utf-8\") # python3 compat in downstream string concat\n print(\"decoder\", key, val, time.time())\n return Message(key, val)\n\n\n@wallaroo.encoder\ndef encoder(msg):\n print(\"encoder\", time.time(), msg)\n s = json.dumps({'key': msg[0], 'value': msg[1], 'ts': msg[2]}).encode()\n return struct.pack(\">I{}s\".format(len(s)), len(s), s)\n","sub_path":"testing/correctness/apps/window_detector/window_detector.py","file_name":"window_detector.py","file_ext":"py","file_size_in_byte":6294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"238620209","text":"from preprocess import Sentence2Vec, MeshHeading\nimport pickle\nimport parsers.datapath_parser as dp\nimport os\n\nembedding_dir = dp.get_sent_embedding_model_dir()\n\nclass SentenceCorpus(object):\n def __init__(self,dataset):\n self.mesh_headings = MeshHeading(dataset)\n self.documents = []\n self.dataset = dataset\n\n def save(self):\n self.mesh_headings.save(os.path.join(embedding_dir, \"y_size_{}\".format(self.dataset.size)))\n article_dataset = [article[\"pmid\"] for article in self.dataset]\n with open(os.path.join(embedding_dir, \"x_size_{}.pmid\".format(self.dataset.size)), 'w+') as pfn:\n pickle.dump(article_dataset, pfn)\n pfn.close()\n","sub_path":"Corpora/SentenceCorpus.py","file_name":"SentenceCorpus.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"552141778","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('office', '0005_auto_20150926_1838'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='syndicate',\n name='president',\n field=models.CharField(max_length=150, verbose_name=b'Presidente'),\n ),\n ]\n","sub_path":"apps/office/migrations/0006_auto_20151026_1151.py","file_name":"0006_auto_20151026_1151.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"69711263","text":"import os\nimport subprocess\n# import shlex\n# from threading import Timer\n# from subprocess import Popen, PIPE, call, STDOUT, check_output\nimport logging\nimport configparser\nlogger = logging.getLogger()\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n\nconfig = configparser.ConfigParser()\nconfig.read(\"config.ini\")\n\n\nFROM_PATH = config.get(\"source_path\", \"exe_path\")\nDEST_PATH = config.get(\"source_path\", \"asm_path\")\n\n\ndef exe2asm(sha):\n input_filename = '{}.danger'.format(sha)\n output_filename = '{}.txt'.format(sha)\n \n log = open(DEST_PATH + output_filename, 'w', encoding=\"utf-8\")\n log.flush()\n\n cmd = ['objdump', '-Slx', '{}{}'.format(FROM_PATH, input_filename)]\n # proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n proc = subprocess.Popen(cmd, stdout=log, stderr=log)\n try:\n outs, errs = proc.communicate(timeout=30)\n except Exception as e:\n print(e)\n logger.info('Timeout! Your file may be too large.')\n proc.kill()\n # outs, errs = proc.communicate()\n #print('Output: ' + outs.decode('utf-8'))\n #print('Error: ' + errs.decode('utf-8'))\n #print('code: ' + str(proc.returncode))\n\n\nif __name__ == '__main__':\n \n sha = '4a1ddeffe993aa2332d1b56590fa98969739dde19ed3cca51d2a7421ae62914e'\n exe_2_asm(sha)\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"malware_classification_workflow/app/exe_2_asm.py","file_name":"exe_2_asm.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"562260984","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport urllib2,re\n\ndef get_ip_address(ip):\n if ip is None:\n return u'未知'\n url = 'http://www.ip138.com/ips138.asp?ip=%s' % ip\n data = urllib2.urlopen(url).read()\n data = unicode(data, 'gbk')\n data = data.encode('utf-8')\n p = re.compile('\"ul1\"')\n m = p.search(data)\n start, end = m.span()\n listart = data.find('
  • ', start) + 4\n liend = data.find('
  • ', start)\n addr = data[listart:liend].decode('utf-8')\n addr = addr[addr.find(u':') + 1:]\n return addr\n","sub_path":"get_ip_address.py","file_name":"get_ip_address.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"168238493","text":"from django.contrib import admin\nfrom .models import CustomWorkType, CustomSizes, CustomersFiles\n\n\nclass CustomWorkTypeAdmin(admin.ModelAdmin):\n list_display = (\n \"work_type\",\n \"price_type\",\n \"created_at\",\n \"updated_at\",\n )\n ordering = (\"-updated_at\",)\n\n\nclass CustomSizesAdmin(admin.ModelAdmin):\n list_display = (\n \"size\",\n \"price_size\",\n \"updated_at\",\n )\n ordering = (\"-updated_at\",)\n\n\nclass CustomersFilesAdmin(admin.ModelAdmin):\n list_display = (\n \"id\",\n \"customer_file\",\n \"customer_file_url\",\n )\n\n\nadmin.site.register(CustomWorkType, CustomWorkTypeAdmin)\nadmin.site.register(CustomSizes, CustomSizesAdmin)\nadmin.site.register(CustomersFiles, CustomersFilesAdmin)\n","sub_path":"custom/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"406711421","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.arange(1,11)\ny = [-7.4,-2.3,3,7.6,12,17.9,22.5,27.6,32.1,37.4]\nplt.plot(x,y,'histograma')\nplt.xlabel('Voltaje')\nplt.ylabel('Corriente')\nplt.title('Resultados Graficados')\nplt.show()\n\n","sub_path":"Scripts/numpy/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"226926756","text":"import numpy as np\nfrom ast import literal_eval\n\n\ndata_dir = 'Data/a0=0.1_r0=0_P0=0_a2=1/'\n\nL_list = np.arange(5.0,6.21,0.2)\nL_list = [round(L,1) for L in L_list]\n\nA1_root_list = []\niso_root_list = []\ndiff_list = []\nfor L in L_list:\n A1_file = data_dir+'A1_roots_L='+str(L)+'.dat'\n iso_file = data_dir+'iso_roots_L='+str(L)+'.dat'\n\n with open(A1_file,'r') as f_A1:\n A1_root = literal_eval(f_A1.read())[0]\n A1_root_list.append(A1_root)\n \n with open(iso_file,'r') as f_iso:\n iso_root = literal_eval(f_iso.read())[0]\n iso_root_list.append(iso_root)\n\n diff_list.append(iso_root-A1_root)\n\n","sub_path":"plot_maker.py","file_name":"plot_maker.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"155918850","text":"# coding:utf-8\n\"\"\"\n Time : 2021/2/14 下午3:25\n Author : vincent\n FileName: prescriptionServices\n Software: PyCharm\n Last Modified by: vincent\n Last Modified time: 2021/2/14 下午3:25\n\"\"\"\nimport logging\nimport urllib\n\nfrom suds.client import Client\nimport time\n\nfrom requires.handle import md5_creat_password, get_config, Envelope, response_base64_decode, request_base64_encode\nlogger = logging.getLogger('log')\n\n\ndef upload_prescription(order, pres, pres_details, company):\n request_sb = build_upload_prescription_req(order, pres, pres_details, company)\n url = get_config('interface', 'orderUrl')\n logger.info(\"调用智慧药房接口saveOrderInfo Begin, 请求url:%s, 请求体:%s\" % (url, request_sb))\n # 使用 suds 调用 webservice 接口保存订单\n request_body = request_base64_encode(request_sb)\n logger.info('request_body:%s' % request_body)\n try:\n client = Client(url)\n except urllib.error.URLError as e:\n return {'message': '接口异常!' + str(e)}\n response = client.service.saveOrderInfo(request_body)\n logger.info(\"调用智慧药房接口saveOrderInfo End, 接口返回:%s\", response_base64_decode(response))\n decode_response = response_base64_decode(response)\n xml = Envelope(decode_response)\n if xml.get_context(\"resultCode\") == '0':\n order_id = xml.get_context(\"orderid\")\n description = xml.get_context('description')\n return {'orderId': order_id, 'message': description}\n elif xml.get_context(\"resultCode\") == '22':\n return {'specialCode': '22', 'message': xml.get_context('description')}\n else:\n reason = xml.get_context('description')\n return {'message': reason}\n\n\ndef build_upload_prescription_req(order, pres, pres_details, company):\n \"\"\"\n 组装请求报文\n :param company:\n :param pres_details:\n :param order:\n :param pres:\n :return:\n \"\"\"\n key = int(time.time() * 1000)\n sign = md5_creat_password(\"saveOrderInfo\" + str(key) + md5_creat_password(company.company_password))\n content = \"\"\n content += \"\"\n content += \"\"\n content += \"\" + company.company_code + \"\"\n content += \"\" + str(key) + \"\"\n content += \"\" + sign + \"\"\n content += \"\"\n content += \"\"\n content += \"\" + order.order_time + \"\"\n content += \"\" + order.treat_card + \"\"\n content += \"\" + order.reg_num + \"\"\n content += \"\" + order.provinces + \",\" + order.city + \",\" + order.zone + \",\" + order.addr_str + \"\"\n content += \"\" + order.consignee + \"\"\n content += \"\" + order.con_tel + \"\"\n content += \"\" + (order.send_goods_time if order.send_goods_time else '') + \"\"\n content += \"\" + str(order.is_hos_addr) + \"\"\n content += \"\"\n content += \"\"\n content += \"\" + pres.user_name + \"\"\n content += \"\" + pres.doctor + \"\"\n content += \"\" + str(pres.age) + \"\"\n content += \"\" + str(pres.gender) + \"\"\n content += \"\" + pres.tel + \"\"\n content += \"\" + str(pres.is_suffering) + \"\"\n content += \"\" + str(pres.amount) + \"\"\n content += \"\" + str(pres.suffering_num) + \"\"\n content += \"\" + str(pres.ji_fried) + \"\"\n content += \"\" + str(pres.per_pack_num) + \"\"\n content += \"\" + (str(pres.per_pack_dose) if pres.per_pack_dose else '200') + \"\"\n content += \"\" + str(pres.prescri_type) + \"\"\n content += \"\" + pres.prescri_id + \"\"\n content += \"\" + pres.special_instru + \"\"\n content += \"\" + str(pres.is_within) + \"\"\n content += \"\" + (str(pres.bed_num) if pres.bed_num else '') + \"\"\n content += \"\" + (pres.hos_depart if pres.hos_depart else '') + \"\"\n content += \"\" + (pres.hospital_num if pres.hospital_num else '') + \"\"\n content += \"\" + (pres.disease_code if pres.disease_code else '') + \"\"\n content += \"\" + (pres.prescript_remark if pres.prescript_remark else '') + \"\"\n content += \"\" + (pres.medication_methods if pres.medication_methods else '') + \\\n \"\"\n content += \"\" + \\\n (pres.medication_instruction if pres.medication_instruction else '') + \"\"\n content += \"\" + str(pres.is_hos) + \"\"\n content += \"\"\n for pres_det in pres_details:\n content += \"\"\n content += \"\" + pres_det.medicines + \"\"\n content += \"\" + pres_det.dose + \"\"\n content += \"\" + pres_det.unit + \"\"\n content += \"\" + pres_det.drugs_num + \"\"\n content += \"\" + (str(pres_det.unit_price) if pres_det.unit_price else '') + \"\"\n content += \"\" + (pres_det.remark if pres_det.remark else '') + \"\"\n content += \"\" + (pres_det.m_usage if pres_det.m_usage else '') + \"\"\n content += \"\"\n content += \"\"\n content += \"\"\n content += \"\"\n content += \"\"\n content += \"\"\n return content\n\n\ndef cancel(op_name, reason, order_id, company):\n \"\"\"\n 取消订单\n :param op_name:\n :param reason:\n :param order_id:\n :param company:\n :return:\n \"\"\"\n request_sb = build_cancel_order_rq(op_name, reason, order_id, company)\n url = get_config('interface', 'orderUrl')\n logger.info(\"调用智慧药房接口cancelOrder Begin, 请求url:%s, 请求体:%s\" % (url, request_sb))\n # 使用 suds 调用 webservice 接口保存订单\n request_body = request_base64_encode(request_sb)\n logger.info('request_body:%s' % request_body)\n try:\n client = Client(url)\n except urllib.error.URLError as e:\n return {'message': '接口异常!' + str(e)}\n response = client.service.cancelOrder(request_body)\n logger.info(\"调用智慧药房接口cancelOrder End, 接口返回:%s\", response_base64_decode(response))\n decode_response = response_base64_decode(response)\n xml = Envelope(decode_response)\n try:\n if xml.get_context(\"status\") == 'success':\n return {'status': 'success', 'message': '成功'}\n elif xml.get_context('status') == 'fail':\n message = xml.get_context('message')\n return {'status': 'fail', 'message': message}\n except IndexError:\n reason = xml.get_context('description')\n return {'message': reason}\n\n\ndef build_cancel_order_rq(op_name, reason, order_id, company):\n \"\"\"\n 取消订单报文创建\n :param op_name: 取消订单操作人\n :param reason: 取消订单原因\n :param order_id: 取消的目标订单号(智慧药房订单号)\n :param company: 订单所属机构\n :return:\n \"\"\"\n key = int(time.time() * 1000)\n sign = md5_creat_password(\"cancelOrder\" + str(key) + md5_creat_password(company.company_password))\n content = \"\"\n content += \"\"\n content += \"\"\n content += \"\" + company.company_code + \"\"\n content += \"\" + str(key) + \"\"\n content += \"\" + sign + \"\"\n content += \"\"\n content += \"\"\n content += \"\" + op_name + \"\"\n content += \"\" + reason + \"\"\n content += \"\" + order_id + \"\"\n content += \"\"\n content += \"\"\n return content\n\n","sub_path":"pharmacyClient/requires/prescriptionServices.py","file_name":"prescriptionServices.py","file_ext":"py","file_size_in_byte":8138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"116612120","text":"from unittest import TestCase\n\nfrom battle.businesslogic.Deck import Deck\nfrom battle.businesslogic.Player import Player\nfrom battle.businesslogic.PlayerFactory import PlayerFactory\nfrom battle.businesslogic.recorder.SimplifiedPlayer import SimplifiedPlayer\nfrom battle.businesslogic.tests.Creator import Creator\n\n\nclass SimplifiedPlayerTestCase(TestCase):\n @classmethod\n def setUpClass(cls):\n cls.creator = Creator()\n cls.profiles = cls.creator.get_user_profile_models()\n cls.player = PlayerFactory.get_instance().create(cls.profiles[0])\n\n def test_creation(self):\n new_object = SimplifiedPlayer(self.player)\n\n self.assertEqual(new_object.player_id, self.player.id)\n self.assertEqual(new_object.stats.hp, self.player.get_hp())\n for i in range(len(new_object.card_ids)):\n self.assertEqual(new_object.card_ids[i], self.player.deck.cards_queue[i].card_model.id)\n\n @classmethod\n def tearDownClass(cls):\n cls.creator.perform_deletion()\n","sub_path":"WMIAdventure/backend/WMIAdventure_backend/battle/businesslogic/tests/recorder/tests_SimplifiedPlayer.py","file_name":"tests_SimplifiedPlayer.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"124515334","text":"import numpy as np\nimport pyasdf\nfrom obspy import read\nfrom glob import glob\nimport os, sys\nfrom obspy.clients.fdsn.client import Client\nfrom obspy.core import UTCDateTime\n\n######################################################################\n# This script is used in python2.7 to read in .pkl files (made in 2.x)\n# and write out the data as .h5 files (asdf, compatible with 3.x)\n# Once it's been run, hopefully won't be needed again\n# (unless something changes in how things get stored in .h5 files)\n######################################################################\n\n# list event IDs by scanning data dir for quakeML files\nidir = '../Data/pkl_xml/'\nodir = '../Data/'\n\nevents = [i.split('/')[3][6:-4] for i in glob(idir + 'event*.xml')]\ncomps = ['H','R','T','Z']\n\n# get stationXML data from IRIS (same for all of the record sections)\nstarttime = UTCDateTime('2011-01-01')\nendtime = UTCDateTime('2013-12-31')\n\nicli = Client('IRIS')\n\ninventory = icli.get_stations(network='ZA', station='B*',\n\t\t\t\t starttime=starttime, endtime=endtime)\n\n# loop over events, channels:\n#\tread in pkl\n#\tcreate new asdf file\n#\tadd waveforms\n#\tadd quakml\nfor evt in events:\n\tfor c in comps:\n\t\tst = read(idir+'reksek_event_'+evt+'_component_'+c+'.pkl')\n\t\t# fix locations so not None (is this needed?) (maybe)\n\t\tfor i in st:\n\t\t\ti.stats.location = i.stats.station\n\n\t\tds = pyasdf.ASDFDataSet(odir+'reksek_'+evt+'_'+c+'.h5',\n\t\t\t\t\t compression='gzip-3')\n\n\t\tds.add_quakeml(idir+'event_'+evt+'.xml')\n\t\tevtid = ds.events[0].resource_id.resource_id\n\n\t\t# waveforms are added WITH EVENT IDS so they can be associated\n\t\t# correctly in station gathers etc.\n\n\t\tds.add_waveforms(st,tag='all',event_id=evt)\n\n\t\tds.add_stationxml(inventory)\n\n\n","sub_path":"old/convert_pkl_h5.py","file_name":"convert_pkl_h5.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"632371560","text":"import gurobipy as gurobi\nimport numpy as np\nfrom copy import deepcopy as copy\nimport matplotlib.pyplot as plt\n\n\n# success !\n# need test for multi-area-multi-time\n# TODO: why the ub and lb doesn't apply to the voltage_square ????\n# TODO: p2 always be zero, don't know why?\n\n# first i change the generator cost\n# second i change the load_conference\n\ndef print_info(info):\n print(info.this_voltage_square)\n print(info.that_voltage_square)\n print(info.power_flow)\n print(info.react_flow)\n print(info.this_node_pressure)\n print(info.that_node_pressure)\n print(info.gas_flow_in)\n print(info.gas_flow_out)\n\n\n# TODO : get_dual 里面原始的代码可能是错的\nclass connection_line_info:\n def __init__(self):\n self.this_voltage_square = 0.0\n self.that_voltage_square = 0.0\n self.power_flow = 0.0 # out -> to the connected node\n self.react_flow = 0.0 # out -> to the connected node\n self.this_node_pressure = 0.0\n self.that_node_pressure = 0.0\n self.gas_flow_in = 0.0\n self.gas_flow_out = 0.0\n\n\n# branch flow model ! success\nT = 4\npower_price = [[5] * 5] * 5\ng_link = 1\ng_connection = [\n [1],\n [0]\n]\nplayer_num = 2\ng_tao = 100\nPUNISH = 30 * 3\nOUTER_LOOP = 700\nOUTER_LOOP = [800] * 30\nPCCP_COUNT = 20\n# TODO: one_line_0_with_1 one_line_0_with_2\n# [ [ [info_0 info_1 ... info_T], [info_0 info_1 ... info_T], ] <===== area_0\n# one_line_1_with_0 one_line_1_with_2 one_line_1_with_3\n# [ [info_0 info_1 ... info_T], [info_0 info_1 ... info_T], [info_0 info_1 ... info_T],] <===== area_1\n# one_line_2_with_0 one_line_2_with_1\n# [ [info_0 info_1 ... info_T], [info_0 info_1 ... info_T],] ] <===== area_2\n# g_info[ 0 ] [ 0 ] [ 0 ]\n# area_0 0_with_i time_0 ====> connection_line_info\ng_info = [\n [\n [\n connection_line_info()\n for ____time in range(T)\n ] for ____line in range(len(g_connection[____area]))\n ] for ____area in range(len(g_connection))\n]\n# TODO: g_lam format:\n# [ [line1 [<> <> <> <> <> <> <> <>] | [<> <> <> <> <> <> <> <>] |...T...],\n# [line2 [<> <> <> <> <> <> <> <>] | [<> <> <> <> <> <> <> <>] |...T...],\n# [line3 [<> <> <> <> <> <> <> <>] | [<> <> <> <> <> <> <> <>] |...T...],\n# ...\n# [linen [<> <> <> <>] | [<> <> <> <>] |...T...]]\n# g_lam [0] [0] = [x, x, x, x , x, x, x, x]\n# 0-line 0-time this_v that_v power_f_in react_f_in this_pressure that_pressure flow_IN flow_OUT\ng_lam = [\n [\n [1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]\n for _____time in range(T)\n ] for ______line in range(g_link)\n]\n# g_lam_index format\n# [ area1[line, line, line ...],\n# area2[line, line, line ...],\n# ...]\n# g_lam_index [0]\n# all index for 0-area\ng_lam_index = [\n [0],\n [0]\n]\ng_gas_price_aux = [1, -1, 1]\nG_K = 2\nabcd = []\n\n\nclass PowerNet:\n def __init__(self, system_info, node_info, line_info, gas_node_info, gas_line_info):\n self.index = system_info['index']\n self.T = system_info['T']\n\n # ------------- generator of non-gas ----------------------\n self.gen_num = node_info['gen_num'] # add virtual node at last as the connected node\n self.gen_index = node_info['gen_index']\n self.gen_power_min = node_info['gen_power_min']\n self.gen_power_max = node_info['gen_power_max']\n self.gen_react_min = node_info['gen_react_min']\n self.gen_react_max = node_info['gen_react_max']\n self.gen_cost_a = node_info['gen_cost_a']\n self.gen_cost_b = node_info['gen_cost_b']\n self.gen_cost_c = node_info['gen_cost_c']\n # ---------------- power bus node ---------------------------\n self.bus_num = node_info['bus_num'] # add virtual node at last\n self.bus_voltage_min = node_info['bus_voltage_min']\n self.bus_voltage_max = node_info['bus_voltage_max']\n # ----------------- power load -------------------------------\n self.load_num = node_info['load_num']\n self.load_index = node_info['load_index']\n self.load_power_min = node_info['load_power_min']\n self.load_power_max = node_info['load_power_max']\n self.load_react_min = node_info['load_react_min']\n self.load_react_max = node_info['load_react_max']\n # --------------- power connection info -----------------------\n self.bus_num_outside = node_info['bus_num_outside']\n self.connection_area = system_info['connection_area']\n self.connection_index = node_info['connection_index']\n # ------------------- power line info -------------------------\n self.line_num = line_info['line_num']\n self.line_current_capacity = line_info['line_current_capacity']\n self.line_start_point = line_info['line_start_point']\n self.line_end_point = line_info['line_end_point']\n self.line_resistance = line_info['line_resistance']\n self.line_reactance = line_info['line_reactance']\n # ------------------ gas node info -------------------------\n self.gas_node_num = gas_node_info['gas_node_num']\n self.node_pressure_min = gas_node_info['node_pressure_min']\n self.node_pressure_max = gas_node_info['node_pressure_max']\n # ------------------ gas well info -------------------------\n self.well_num = gas_node_info['gas_well_num']\n self.well_index = gas_node_info['well_index'] # [0,0,4,5]\n self.well_output_min = gas_node_info['well_output_min']\n self.well_output_max = gas_node_info['well_output_max']\n # ------------------ gas load info -------------------------\n self.gas_load_index = gas_node_info['load_index']\n self.gas_load_min = gas_node_info['gas_load_min']\n self.gas_load_max = gas_node_info['gas_load_max']\n self.gas_load_num = gas_node_info['gas_load_num']\n # ----------------- gas generator --------------------------\n self.gen_gas_num = gas_node_info['gen_gas_num']\n self.gen_gas_index = gas_node_info['gen_gas_index']\n self.gen_gas_index_power = gas_node_info['gen_gas_index_power']\n self.gen_gas_min = gas_node_info['gen_gas_min']\n self.gen_gas_max = gas_node_info['gen_gas_max']\n self.gen_gas_efficiency = gas_node_info['gen_gas_efficiency']\n # ----------------- gas line info -------------------------\n self.weymouth = gas_line_info['weymouth'] # for easy, it should contain all line(include active line)\n self.gas_line_num = gas_line_info['gas_line_num']\n self.gas_line_start_point = gas_line_info['gas_line_start_point'] # gas flow out\n self.gas_line_end_point = gas_line_info['gas_line_end_point'] # gas flow in\n self.gas_line_pack_coefficient = gas_line_info['gas_line_pack_coefficient']\n self.gas_line_pack_initial = gas_line_info['gas_line_pack_initial']\n self.gas_line_active = gas_line_info['gas_line_active']\n self.gas_flow_in_max = gas_line_info['gas_flow_in_max']\n self.gas_flow_out_max = gas_line_info['gas_flow_out_max']\n # ------------------- gas compressor info ------------------\n self.compressor_num = gas_line_info['compressor_num']\n self.compressor_start_point = gas_line_info['compressor_start_point']\n self.compressor_end_point = gas_line_info['compressor_end_point']\n self.compressor_coefficient = gas_line_info['compressor_coefficient']\n self.compressor_max_flow = gas_line_info['compressor_max_flow']\n self.compressor_energy_consumption = gas_line_info['compressor_energy_consumption']\n # ----------------------------------------gas information end\n # ------------------model------------------------------------\n self.model = gurobi.Model()\n self.basic_objective = None\n self.addition_objective = None\n self.objective = None\n self.constrain_update = []\n self.objs = []\n self.lams = []\n self.dual = []\n self.dual_addition = 0\n self.norm_addition = 0\n # -------------------- power system var -------------------\n self.power_gen = None\n self.react_gen = None\n self.voltage_square = None\n self.line_current_square = None\n self.line_power_flow = None\n self.line_react_flow = None\n self.power_load = None\n self.react_load = None\n # -------------------- gas system var ----------------------\n self.node_pressure = None\n self.well_output = None\n self.gas_load = None\n self.gen_gas_power = None\n self.gas_flow_in = None\n self.gas_flow_out = None\n self.linepack = None\n self.compressor_out = None\n self.compressor_in = None\n self.gas_source = None\n self.pccp = None\n # ------------------------ old info -------------------------\n self.info = [\n [\n connection_line_info()\n for _ in range(self.T)\n ] for __ in range(len(self.connection_area))\n ]\n # TODO: self.info [0] [0]\n # self.index_with_i at time_0\n self.old_value = [\n [\n connection_line_info()\n for _ in range(self.T)\n ] for __ in range(len(self.connection_area))\n ]\n # TODO: self.old_value [0] [0]\n # self.index_with_i at time_0\n self.gas_flow_in_old = [\n [\n 0.2 for _ in range(self.T)\n ] for __ in range(self.gas_line_num)\n ]\n self.gas_flow_out_old = [\n [\n 0.2 for _ in range(self.T)\n ] for __ in range(self.gas_line_num)\n ]\n self.node_pressure_old = [\n [\n 0.2 for _ in range(self.T)\n ] for __ in range(self.gas_node_num)\n ]\n\n # ---------- power system ---------------------------------------\n def power_gen_connected_with(self, node):\n result = np.where(np.array(self.gen_index) == node)\n if result[0].size == 0:\n return np.array([[0] * self.T])\n result_list = []\n for i in result[0]:\n per = []\n for time in range(self.T):\n per.append(self.power_gen[i, time])\n result_list.append(per)\n return np.array(result_list)\n\n def react_gen_connected_with(self, node):\n result = np.where(np.array(self.gen_index) == node)\n if result[0].size == 0:\n return np.array([[0] * self.T])\n result_list = []\n for i in result[0]:\n per = []\n for time in range(self.T):\n per.append(self.react_gen[i, time])\n result_list.append(per)\n return np.array(result_list)\n\n def load_power_connected_with(self, node):\n result = np.where(np.array(self.load_index) == node)\n if result[0].size == 0:\n return np.array([[0] * self.T])\n result_list = []\n for i in result[0]:\n per = []\n for time in range(self.T):\n per.append(self.power_load[i, time])\n result_list.append(per)\n return np.array(result_list)\n\n def load_react_connected_with(self, node):\n result = np.where(np.array(self.load_index) == node)\n if result[0].size == 0:\n return np.array([[0] * self.T])\n result_list = []\n for i in result[0]:\n per = []\n for time in range(self.T):\n per.append(self.react_load[i, time])\n result_list.append(per)\n return np.array(result_list)\n\n # power flow in/out of the node\n def power_flow_in_connected_with(self, node):\n result = np.where(np.array(self.line_end_point) == node)\n if result[0].size == 0:\n return np.array([[0] * self.T])\n result_list = []\n for i in result[0]:\n per = []\n for time in range(self.T):\n per.append(self.line_power_flow[i, time])\n result_list.append(per)\n return np.array(result_list)\n\n def power_flow_out_connected_with(self, node):\n result = np.where(np.array(self.line_start_point) == node)\n if result[0].size == 0:\n return np.array([[0] * self.T])\n result_list = []\n for i in result[0]:\n per = []\n for time in range(self.T):\n per.append(self.line_power_flow[i, time])\n result_list.append(per)\n return np.array(result_list)\n\n def raect_flow_in_connected_with(self, node):\n result = np.where(np.array(self.line_end_point) == node)\n if result[0].size == 0:\n return np.array([[0] * self.T])\n result_list = []\n for i in result[0]:\n per = []\n for time in range(self.T):\n per.append(self.line_react_flow[i, time])\n result_list.append(per)\n return np.array(result_list)\n\n def react_flow_out_connected_with(self, node):\n result = np.where(np.array(self.line_start_point) == node)\n if result[0].size == 0:\n return np.array([[0] * self.T])\n result_list = []\n for i in result[0]:\n per = []\n for time in range(self.T):\n per.append(self.line_react_flow[i, time])\n result_list.append(per)\n return np.array(result_list)\n\n def current_in_connected_with(self, node):\n result = np.where(np.array(self.line_end_point) == node)\n if result[0].size == 0:\n return np.array([[0] * self.T])\n result_list = []\n for i in result[0]:\n per = []\n for time in range(self.T):\n per.append(self.line_current_square[i, time])\n result_list.append(per)\n return np.array(result_list)\n\n def resistance_in_connected_with(self, node):\n result = np.where(np.array(self.line_end_point) == node)\n if result[0].size == 0:\n return np.array([[0] * self.T])\n result_list = []\n for i in result[0]:\n per = []\n for time in range(self.T):\n per.append(self.line_resistance[i])\n result_list.append(per)\n return np.array(result_list)\n\n def reactance_in_connected_with(self, node):\n result = np.where(np.array(self.line_end_point) == node)\n if result[0].size == 0:\n return np.array([[0] * self.T])\n result_list = []\n for i in result[0]:\n per = []\n for time in range(self.T):\n per.append(self.line_reactance[i])\n result_list.append(per)\n return np.array(result_list)\n\n # ---------- gas system -----------------------------------------\n def well_connected_with(self, node):\n result = np.where(np.array(self.well_index) == node)\n if result[0].size == 0:\n return np.array([[0] * self.T])\n result_list = []\n for i in result[0]:\n per_well = []\n for time in range(self.T):\n per_well.append(self.well_output[i, time])\n result_list.append(per_well)\n return np.array(result_list)\n\n def load_connected_with(self, node):\n result = np.where(np.array(self.gas_load_index) == node)\n if result[0].size == 0:\n return np.array([[0] * self.T])\n result_list = []\n for i in result[0]:\n per_load = []\n for time in range(self.T):\n per_load.append(self.gas_load[i, time])\n result_list.append(per_load)\n return np.array(result_list)\n\n def p2g_connected_with(self, node):\n return np.array([[0] * self.T])\n\n def gas_flow_out_connected_with(self, node):\n result = np.where(np.array(self.gas_line_end_point) == node)\n if result[0].size == 0:\n return np.array([[0] * self.T])\n result_list = []\n for i in result[0]:\n per_out = []\n for time in range(self.T):\n per_out.append(self.gas_flow_out[i, time])\n result_list.append(per_out)\n return np.array(result_list)\n\n def gas_flow_in_connected_with(self, node):\n result = np.where(np.array(self.gas_line_start_point) == node)\n if result[0].size == 0:\n return np.array([[0] * self.T])\n result_list = []\n for i in result[0]:\n per_in = []\n for time in range(self.T):\n per_in.append(self.gas_flow_in[i, time])\n result_list.append(per_in)\n return np.array(result_list)\n\n def gen_connected_with(self, node): # list of expression\n result = np.where(np.array(self.gen_gas_index) == node) # this node is gas node\n if result[0].size == 0:\n return np.array([[0] * self.T])\n result_list = []\n for i in result[0]:\n per_gen = []\n for time in range(self.T):\n per_gen.append(self.gen_gas_power[i, time] / self.gen_gas_efficiency[i]) # change to gas\n result_list.append(per_gen)\n return np.array(result_list)\n\n def gas_to_power_connected_with(self, node):\n result = np.where(np.array(self.gen_gas_index_power) == node) # this node is power node\n if result[0].size == 0:\n return np.array([[0] * self.T])\n result_list = []\n for i in result[0]:\n per_gen = []\n for time in range(self.T):\n per_gen.append(self.gen_gas_power[i, time]) # just power\n result_list.append(per_gen)\n return np.array(result_list)\n\n # ----------- auxiliary key function ----------------------------\n def get_dual(self, this_info, that_info, start_point):\n # this or that of two areas is same!\n # 这里 我们 认为 that_info 始终遵循 一个 全局的 方向 即 0 的 this that 与 1 的 this that 始终 是 相同 的\n # 但是 this_info 却把 内部的节点 作为 this, 外部 作为 that\n if start_point != 0:\n diff1 = this_info.this_voltage_square - that_info.this_voltage_square\n diff2 = -1 * this_info.this_voltage_square + that_info.this_voltage_square\n diff3 = this_info.that_voltage_square - that_info.that_voltage_square\n diff4 = -1 * this_info.that_voltage_square + that_info.that_voltage_square\n else:\n diff1 = this_info.that_voltage_square - that_info.this_voltage_square\n diff2 = -1 * this_info.that_voltage_square + that_info.this_voltage_square\n diff3 = this_info.this_voltage_square - that_info.that_voltage_square\n diff4 = -1 * this_info.this_voltage_square + that_info.that_voltage_square\n\n if start_point != 0: # this is start point\n diff5 = this_info.power_flow - that_info.power_flow\n diff6 = -1 * this_info.power_flow + that_info.power_flow\n diff7 = this_info.react_flow - that_info.react_flow\n diff8 = -1 * this_info.react_flow + that_info.react_flow\n else:\n diff5 = -1 * this_info.power_flow - that_info.power_flow\n diff6 = 1 * this_info.power_flow + that_info.power_flow\n diff7 = -1 * this_info.react_flow - that_info.react_flow\n diff8 = 1 * this_info.react_flow + that_info.react_flow\n\n #\n if start_point != 0:\n diff9 = this_info.this_node_pressure - that_info.this_node_pressure\n diff10 = -1 * this_info.this_node_pressure + that_info.this_node_pressure\n diff11 = this_info.that_node_pressure - that_info.that_node_pressure\n diff12 = -1 * this_info.that_node_pressure + that_info.that_node_pressure\n else:\n diff9 = this_info.that_node_pressure - that_info.this_node_pressure\n diff10 = -1 * this_info.that_node_pressure + that_info.this_node_pressure\n diff11 = this_info.this_node_pressure - that_info.that_node_pressure\n diff12 = -1 * this_info.this_node_pressure + that_info.that_node_pressure\n\n diff13 = this_info.gas_flow_in - that_info.gas_flow_in\n diff14 = -1 * this_info.gas_flow_in + that_info.gas_flow_in\n diff15 = this_info.gas_flow_out - that_info.gas_flow_out\n diff16 = -1 * this_info.gas_flow_out + that_info.gas_flow_out\n\n return [diff1, diff2, diff3, diff4, diff5, diff6, diff7, diff8,\n diff9, diff10, diff11, diff12, diff13, diff14, diff15, diff16]\n\n def get_sub(self, this_info, this_info_old, start_point):\n # this_info_old 应该遵守全局 的 顺序\n diff = 0\n if start_point != 0: # this is start point\n diff = diff + \\\n (this_info.this_voltage_square - this_info_old.this_voltage_square) * \\\n (this_info.this_voltage_square - this_info_old.this_voltage_square) + \\\n (this_info.that_voltage_square - this_info_old.that_voltage_square) * \\\n (this_info.that_voltage_square - this_info_old.that_voltage_square) + \\\n (this_info.power_flow - this_info_old.power_flow) * \\\n (this_info.power_flow - this_info_old.power_flow) + \\\n (this_info.react_flow - this_info_old.react_flow) * \\\n (this_info.react_flow - this_info_old.react_flow) + \\\n (this_info.this_node_pressure - this_info_old.this_node_pressure) * \\\n (this_info.this_node_pressure - this_info_old.this_node_pressure) + \\\n (this_info.that_node_pressure - this_info_old.that_node_pressure) * \\\n (this_info.that_node_pressure - this_info_old.that_node_pressure) + \\\n (this_info.gas_flow_in - this_info_old.gas_flow_in) * \\\n (this_info.gas_flow_in - this_info_old.gas_flow_in) + \\\n (this_info.gas_flow_out - this_info_old.gas_flow_out) * \\\n (this_info.gas_flow_out - this_info_old.gas_flow_out)\n else:\n diff = diff + \\\n (this_info.that_voltage_square - this_info_old.this_voltage_square) * \\\n (this_info.that_voltage_square - this_info_old.this_voltage_square) + \\\n (this_info.this_voltage_square - this_info_old.that_voltage_square) * \\\n (this_info.this_voltage_square - this_info_old.that_voltage_square) + \\\n (-1 * this_info.power_flow - this_info_old.power_flow) * \\\n (-1 * this_info.power_flow - this_info_old.power_flow) + \\\n (-1 * this_info.react_flow - this_info_old.react_flow) * \\\n (-1 * this_info.react_flow - this_info_old.react_flow) + \\\n (this_info.that_node_pressure - this_info_old.this_node_pressure) * \\\n (this_info.that_node_pressure - this_info_old.this_node_pressure) + \\\n (this_info.this_node_pressure - this_info_old.that_node_pressure) * \\\n (this_info.this_node_pressure - this_info_old.that_node_pressure) + \\\n (this_info.gas_flow_in - this_info_old.gas_flow_in) * \\\n (this_info.gas_flow_in - this_info_old.gas_flow_in) + \\\n (this_info.gas_flow_out - this_info_old.gas_flow_out) * \\\n (this_info.gas_flow_out - this_info_old.gas_flow_out)\n return diff\n\n def get_lam(self, index, start_point):\n lam = g_lam[index]\n lam_T = []\n for i in range(self.T):\n lam_copy = lam[i].copy()\n lam_t = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n if start_point != 0: # this is start_point\n lam_t = lam_copy\n else:\n lam_t[0] = lam_copy[1]\n lam_t[1] = lam_copy[0]\n lam_t[2] = lam_copy[3]\n lam_t[3] = lam_copy[2]\n lam_t[4] = lam_copy[5]\n lam_t[5] = lam_copy[4]\n lam_t[6] = lam_copy[7]\n lam_t[7] = lam_copy[6]\n lam_t[8] = lam_copy[9]\n lam_t[9] = lam_copy[8]\n lam_t[10] = lam_copy[11]\n lam_t[11] = lam_copy[10]\n lam_t[12] = lam_copy[13]\n lam_t[13] = lam_copy[12]\n lam_t[14] = lam_copy[15]\n lam_t[15] = lam_copy[14]\n lam_T.extend(lam_t)\n return lam_T\n\n def build_model(self):\n # add var\n self.power_gen = self.model.addVars(\n self.gen_num, self.T,\n lb=[[self.gen_power_min[i]] * self.T for i in range(self.gen_num)],\n ub=[[self.gen_power_max[i]] * self.T for i in range(self.gen_num)],\n name='power_gene')\n self.react_gen = self.model.addVars(\n self.gen_num, self.T,\n lb=[[self.gen_react_min[i]] * self.T for i in range(self.gen_num)],\n ub=[[self.gen_react_max[i]] * self.T for i in range(self.gen_num)],\n name='reactive_gene')\n self.power_load = self.model.addVars(\n self.load_num, self.T,\n lb= self.load_power_min, #[[self.load_power_min[i]] * self.T for i in range(self.load_num)],\n ub= self.load_power_max, #[[self.load_power_max[i]] * self.T for i in range(self.load_num)],\n name='power_load')\n self.react_load = self.model.addVars(\n self.load_num, self.T,\n lb=self.load_react_min, #[[self.load_react_min[i]] * self.T for i in range(self.load_num)],\n ub=self.load_react_max, #[[self.load_react_max[i]] * self.T for i in range(self.load_num)],\n name='react_load')\n self.voltage_square = self.model.addVars(\n self.bus_num, self.T,\n lb=[[self.bus_voltage_min[i] * self.bus_voltage_min[i]] * self.T\n for i in range(self.bus_num)],\n ub=[[self.bus_voltage_max[i] * self.bus_voltage_max[i]] * self.T\n for i in range(self.bus_num)],\n name='bus_voltage_square')\n self.line_current_square = self.model.addVars(\n self.line_num, self.T,\n ub=[[self.line_current_capacity[i] * self.line_current_capacity[i]] * self.T\n for i in range(self.line_num)],\n name='line_current_square')\n self.line_power_flow = self.model.addVars(\n self.line_num, self.T,\n lb=-10, ub=10, # TODO: key error, core error\n name='line_power_flow')\n self.line_react_flow = self.model.addVars(\n self.line_num, self.T,\n lb=-10, ub=10,\n name='line_react_flow')\n self.well_output = self.model.addVars(self.well_num, self.T,\n lb=[[self.well_output_min[i]] * self.T for i in range(self.well_num)],\n ub=[[self.well_output_max[i]] * self.T for i in range(self.well_num)],\n name='gas_well_outputs')\n self.node_pressure = self.model.addVars(self.gas_node_num, self.T,\n lb=[[self.node_pressure_min[i]] * self.T for i in\n range(self.gas_node_num)],\n ub=[[self.node_pressure_max[i]] * self.T for i in\n range(self.gas_node_num)],\n name='node_pressure')\n self.gas_flow_in = self.model.addVars(self.gas_line_num, self.T,\n ub=[[self.gas_flow_in_max[i]] * self.T for i in range(self.gas_line_num)],\n lb=[[-1 * self.gas_flow_in_max[i]] * self.T for i in\n range(self.gas_line_num)],\n name='gas_flow_in')\n self.gas_flow_out = self.model.addVars(self.gas_line_num, self.T,\n ub=[[self.gas_flow_out_max[i]] * self.T for i in\n range(self.gas_line_num)],\n lb=[[-1 * self.gas_flow_out_max[i]] * self.T for i in\n range(self.gas_line_num)],\n name='gas_flow_out')\n self.gas_load = self.model.addVars(self.gas_load_num, self.T,\n lb=self.gas_load_min, ub=self.gas_load_max,\n name='gas_load')\n self.gen_gas_power = self.model.addVars(self.gen_gas_num, self.T,\n lb=[[self.gen_gas_min[i]] * self.T for i in range(self.gen_gas_num)],\n ub=[[self.gen_gas_max[i]] * self.T for i in range(self.gen_gas_num)],\n name='gen_gas_power')\n self.linepack = self.model.addVars(self.gas_line_num, self.T, name='gas_linepack')\n self.pccp = self.model.addVars(self.gas_line_num, self.T,\n lb=0, name='pccp')\n self.model.update()\n # ----------- construct the info structure --------------------------------\n for i in range(len(self.connection_area)):\n line_T = []\n line_start = self.line_num - len(self.connection_area) # 5-2 = 3 3 4\n bus_start = self.bus_num - len(self.connection_area)\n gas_node_start = self.gas_node_num - len(self.connection_area)\n gas_line_start = self.gas_line_num - len(self.connection_area)\n this_index = self.connection_index[i]\n this_index_gas = self.connection_index[i] # TODO: we assume gas and power have the same connection index\n for time in range(self.T):\n line_t = connection_line_info()\n line_t.power_flow = self.line_power_flow[i + line_start, time]\n line_t.react_flow = self.line_react_flow[i + line_start, time]\n line_t.this_voltage_square = self.voltage_square[this_index, time]\n line_t.that_voltage_square = self.voltage_square[i + bus_start, time]\n line_t.this_node_pressure = self.node_pressure[this_index_gas, time]\n line_t.that_node_pressure = self.node_pressure[i + gas_node_start, time]\n line_t.gas_flow_in = self.gas_flow_in[i + gas_line_start, time]\n line_t.gas_flow_out = self.gas_flow_out[i + gas_line_start, time]\n line_T.append(line_t)\n self.info[i] = line_T\n\n # ----------- node power balance -----------------\n for node in range(self.bus_num):\n Power = self.power_gen_connected_with(node)\n React = self.react_gen_connected_with(node)\n Power_Load = self.load_power_connected_with(node)\n React_Load = self.load_react_connected_with(node)\n Power_In = self.power_flow_in_connected_with(node)\n Power_Out = self.power_flow_out_connected_with(node)\n React_In = self.raect_flow_in_connected_with(node)\n React_Out = self.react_flow_out_connected_with(node)\n Current_In = self.current_in_connected_with(node)\n resistance = self.resistance_in_connected_with(node)\n reactance = self.reactance_in_connected_with(node)\n G2P = self.gas_to_power_connected_with(node)\n for time in range(self.T):\n self.model.addConstr(\n lhs=sum(Power[:, time]) + sum(G2P[:, time]) +\n sum(Power_In[:, time] - resistance[:, time] * Current_In[:, time]),\n rhs=sum(Power_Load[:, time]) + sum(Power_Out[:, time]),\n sense=gurobi.GRB.EQUAL,\n name='power_balance')\n self.model.addConstr(\n lhs=sum(React[:, time]) +\n sum(React_In[:, time] - reactance[:, time] * Current_In[:, time]),\n rhs=sum(React_Load[:, time]) + sum(React_Out[:, time]),\n sense=gurobi.GRB.EQUAL,\n name='react_balance')\n\n # ----------- line voltage drop ------------------\n for i in range(self.line_num):\n start_point = self.line_start_point[i]\n end_point = self.line_end_point[i]\n resistance = self.line_resistance[i]\n reactance = self.line_reactance[i]\n impedance_square = reactance * reactance + resistance * resistance\n for time in range(self.T):\n self.model.addConstr(\n lhs=self.voltage_square[end_point, time] -\n self.voltage_square[start_point, time],\n rhs=impedance_square * self.line_current_square[i, time] -\n 2 * (resistance * self.line_power_flow[i, time] +\n reactance * self.line_react_flow[i, time]),\n sense=gurobi.GRB.EQUAL,\n name='voltage_drop')\n self.model.addConstr(\n lhs=self.line_power_flow[i, time] * self.line_power_flow[i, time] +\n self.line_react_flow[i, time] * self.line_react_flow[i, time],\n rhs=self.line_current_square[i, time] * self.voltage_square[start_point, time],\n sense=gurobi.GRB.LESS_EQUAL,\n # sense=gurobi.GRB.EQUAL,\n name='flow_relax')\n\n # ----------- gas node balance ------------------\n for node in range(self.gas_node_num):\n # for all passive and active # use numpy !!!! return [[]] format\n Well = self.well_connected_with(node) # 节点node对应的well变量\n Load = self.load_connected_with(node)\n Gen = self.gen_connected_with(node) # this change Power to Gas # considered efficiency !!!!!!!\n P2G = self.p2g_connected_with(node) # this is just gas\n Line_Out = self.gas_flow_out_connected_with(node)\n Line_In = self.gas_flow_in_connected_with(node)\n for time in range(self.T):\n self.model.addConstr(\n lhs=sum(Well[:, time]) + sum(P2G[:, time]) + sum(Line_Out[:, time]), # source\n rhs=sum(Gen[:, time]) + sum(Load[:, time]) + sum(Line_In[:, time]), # load\n sense=gurobi.GRB.EQUAL,\n name='gas_nodal_balance_node')\n\n # ----------- line pack passive ------------------\n for line in range(self.gas_line_num):\n if line not in self.gas_line_active:\n start_point = self.gas_line_start_point[line]\n end_point = self.gas_line_end_point[line]\n linepack_coefficient = self.gas_line_pack_coefficient[line]\n for time in range(self.T):\n self.model.addConstr(\n lhs=self.linepack[line, time],\n rhs=linepack_coefficient *\n (self.node_pressure[start_point, time] + self.node_pressure[end_point, time]),\n sense=gurobi.GRB.EQUAL,\n name='linePack')\n\n # ----------- passive Pack-T ---------------------\n for line in range(self.gas_line_num):\n if line not in self.gas_line_active:\n for time in range(self.T):\n if time == 0:\n self.model.addConstr(\n lhs=self.linepack[line, 0] - self.linepack[line, self.T - 1],\n rhs=self.gas_flow_in[line, 0] - self.gas_flow_out[line, 0],\n sense=gurobi.GRB.EQUAL,\n name='linepack_with_time_' + str(time) + '_line' + str(line))\n else:\n self.model.addConstr(\n lhs=self.linepack[line, time] - self.linepack[line, time - 1],\n rhs=self.gas_flow_in[line, time] - self.gas_flow_out[line, time],\n sense=gurobi.GRB.EQUAL,\n name='linepack_with_time_' + str(time) + '_line' + str(line))\n\n # ----------- Pack Less Init ---------------------\n linepack_sum = 0 # ? passive or active\n for line in range(self.gas_line_num):\n if line not in self.gas_line_active:\n linepack_sum = linepack_sum + self.linepack[line, self.T - 1]\n self.model.addConstr(linepack_sum <= self.gas_line_pack_initial)\n\n # -------- active pressure-increase ---------------------\n # ---------active gas-consume ---------------------------\n for line in range(self.gas_line_num):\n if line in self.gas_line_active:\n thisIndex = self.gas_line_active.index(line)\n compressor_coeff = self.compressor_coefficient[thisIndex]\n start_point = self.gas_line_start_point[line]\n end_point = self.gas_line_end_point[line]\n max_flow = self.compressor_max_flow[thisIndex]\n energy_consumption = 1 - self.compressor_energy_consumption[thisIndex]\n for time in range(self.T):\n # self.model.addConstr(self.gas_flow_in[line, time] <= max_flow)\n self.model.addConstr(self.node_pressure[end_point, time] <=\n compressor_coeff * self.node_pressure[start_point, time])\n # add flow quantities for gas compressors\n self.model.addConstr(self.gas_flow_out[line, time] ==\n energy_consumption * self.gas_flow_in[line, time])\n\n # ------------- weymouth passive ------------------------\n for line in range(self.gas_line_num):\n if line not in self.gas_line_active:\n start_point = self.gas_line_start_point[line]\n end_point = self.gas_line_end_point[line]\n weymouth = self.weymouth[line]\n for time in range(self.T):\n self.model.addConstr(\n lhs=((self.gas_flow_in[line, time] + self.gas_flow_out[line, time]) / 2) *\n ((self.gas_flow_in[line, time] + self.gas_flow_out[line, time]) / 2),\n rhs=weymouth * (self.node_pressure[start_point, time] *\n self.node_pressure[start_point, time] -\n self.node_pressure[end_point, time] *\n self.node_pressure[end_point, time]),\n sense=gurobi.GRB.LESS_EQUAL,\n name='weymouth')\n\n self.constrain_update.append(\n self.model.addConstr(\n lhs=weymouth * self.node_pressure[start_point, time] *\n self.node_pressure[start_point, time] - (\n (self.gas_flow_in_old[line][time] + self.gas_flow_out_old[line][time]) *\n (self.gas_flow_in[line, time] + self.gas_flow_out[line, time]) / 2 -\n (self.gas_flow_in_old[line][time] + self.gas_flow_out_old[line][time]) *\n (self.gas_flow_in_old[line][time] + self.gas_flow_out_old[line][time]) / 4 -\n weymouth * self.node_pressure[end_point, time] *\n self.node_pressure[end_point, time] +\n 2 * weymouth * self.node_pressure[end_point, time] *\n self.node_pressure_old[end_point][time]\n ),\n rhs=self.pccp[line, time],\n sense=gurobi.GRB.LESS_EQUAL,\n name='pccp_less'\n )\n )\n # ------------- gas system end --------------------------\n\n # ------------- construct object ------------------------\n first_line = self.line_num - len(self.connection_area)\n self.objs = []\n for gen in range(self.gen_num - len(self.connection_area)):\n per = 0\n for time in range(self.T):\n per = per + \\\n self.power_gen[gen, time] * self.gen_cost_a[gen] + \\\n self.power_gen[gen, time] * self.power_gen[gen, time] * self.gen_cost_b[gen] + \\\n self.gen_cost_c[gen]\n self.objs.append(per)\n for load in range(self.load_num - len(self.connection_area)):\n per = 0\n for time in range(self.T):\n load_ref = (self.load_power_max[load][time] + self.load_power_min[load][time]) / 2\n per = per + \\\n 0.1 * (self.power_load[load, time] - load_ref) * \\\n (self.power_load[load, time] - load_ref)\n self.objs.append(per)\n for line in range(len(self.connection_area)):\n # for every area\n connect_to = self.connection_area[line]\n per_area = 0\n for time in range(self.T):\n per_area = per_area + self.line_power_flow[first_line + line, time] * \\\n power_price[self.index][connect_to]\n self.objs.append(per_area)\n line_num = self.gas_line_num - len(self.connection_area)\n for conn in range(len(self.connection_area)):\n for time in range(T):\n self.objs.append(3 * # gas_buy_price 购气成本\n self.gas_flow_in[line_num + conn, time] * g_gas_price_aux[self.index])\n objective = sum(self.objs)\n self.basic_objective = objective\n\n def update_model(self, tao):\n global g_info\n self.lams = []\n # obtain the lam of this player\n for i, index in enumerate(g_lam_index[self.index]):\n connect_to = self.connection_area[i]\n is_start_point = 0\n if connect_to > self.index:\n is_start_point = 1\n self.lams.extend(self.get_lam(index, is_start_point))\n\n # construct the dual object\n self.dual = [] # [ ---time---, ---time--- ]\n for i in range(len(self.connection_area)):\n for time in range(self.T):\n connect_to = self.connection_area[i]\n line_index = g_connection[connect_to].index(self.index)\n that_info = g_info[connect_to][line_index][time]\n this_info = self.info[i][time]\n is_start_point = 0\n if connect_to > self.index:\n is_start_point = 1\n self.dual.extend(self.get_dual(this_info, that_info, is_start_point))\n self.dual_addition = sum([PUNISH * a * b for a, b in zip(self.dual, self.lams)])\n\n # construct the norm object\n self.norm_addition = 0\n for i in range(len(self.connection_area)):\n connect_to = self.connection_area[i]\n is_start_point = 0\n if connect_to > self.index:\n is_start_point = 1\n for time in range(self.T):\n self.norm_addition = self.norm_addition + \\\n self.get_sub(self.info[i][time], self.old_value[i][time], is_start_point)\n self.addition_objective = self.dual_addition + tao / 2 * self.norm_addition\n self.objective = self.basic_objective + self.addition_objective\n\n def optimize(self):\n self.model.Params.OutputFlag = 0\n self.model.setObjective(self.objective + gurobi.quicksum(self.pccp) * G_K)\n self.model.optimize()\n\n for line in range(self.gas_line_num):\n for time in range(self.T):\n self.gas_flow_in_old[line][time] = self.gas_flow_in[line, time].getAttr('X')\n\n for line in range(self.gas_line_num):\n for time in range(self.T):\n self.gas_flow_out_old[line][time] = self.gas_flow_out[line, time].getAttr('X')\n\n for node in range(self.gas_node_num):\n for time in range(self.T):\n self.node_pressure_old[node][time] = self.node_pressure[node, time].getAttr('X')\n\n for i in range(len(self.connection_area)):\n for time in range(self.T):\n this_index = self.connection_index[i]\n connect_to = self.connection_area[i]\n this_index_gas = self.connection_index[i] # we assume gas and power have the same index\n is_start_point = 0\n if connect_to > self.index:\n is_start_point = 1\n line = connection_line_info()\n line_start = self.line_num - len(self.connection_area)\n bus_start = self.bus_num - len(self.connection_area)\n gas_node_start = self.gas_node_num - len(self.connection_area)\n gas_line_start = self.gas_line_num - len(self.connection_area)\n # ---------- update power flow --------------\n if is_start_point != 0: # this is start point\n line.power_flow = self.line_power_flow[i + line_start, time].getAttr('X')\n line.react_flow = self.line_react_flow[i + line_start, time].getAttr('X')\n else:\n line.power_flow = self.line_power_flow[i + line_start, time].getAttr('X') * (-1)\n line.react_flow = self.line_react_flow[i + line_start, time].getAttr('X') * (-1)\n # -------- update voltage ------------\n if is_start_point != 0: # this is start point\n line.this_voltage_square = self.voltage_square[this_index, time].getAttr('X')\n line.that_voltage_square = self.voltage_square[i + bus_start, time].getAttr('X')\n else:\n line.this_voltage_square = self.voltage_square[i + bus_start, time].getAttr('X')\n line.that_voltage_square = self.voltage_square[this_index, time].getAttr('X')\n # ------- update pressure -----------\n if is_start_point != 0: # this is start point\n line.this_node_pressure = self.node_pressure[this_index_gas, time].getAttr('X')\n line.that_node_pressure = self.node_pressure[i + gas_node_start, time].getAttr('X')\n else:\n line.this_node_pressure = self.node_pressure[i + gas_node_start, time].getAttr('X')\n line.that_node_pressure = self.node_pressure[this_index_gas, time].getAttr('X')\n # -------- update gas flow -----------\n line.gas_flow_in = self.gas_flow_in[i + gas_line_start, time].getAttr('X')\n line.gas_flow_out = self.gas_flow_out[i + gas_line_start, time].getAttr('X')\n # u p d a t e g _ i n f o\n g_info[self.index][i][time] = line\n\n def set_old_value(self, old): # old_value should be consisted with the g_info\n for area in range(len(self.connection_area)):\n for time in range(self.T):\n self.old_value[area][time].this_voltage_square = old[area][time].this_voltage_square\n self.old_value[area][time].that_voltage_square = old[area][time].that_voltage_square\n self.old_value[area][time].power_flow = old[area][time].power_flow\n self.old_value[area][time].react_flow = old[area][time].react_flow\n self.old_value[area][time].this_node_pressure = old[area][time].this_node_pressure\n self.old_value[area][time].that_node_pressure = old[area][time].that_node_pressure\n self.old_value[area][time].gas_flow_in = old[area][time].gas_flow_in\n self.old_value[area][time].gas_flow_out = old[area][time].gas_flow_out\n\n def cal_gap(self):\n result = []\n for line in range(self.gas_line_num):\n for time in range(self.T):\n self.gas_flow_in_old[line][time] = self.gas_flow_in[line, time].getAttr('X')\n for line in range(self.gas_line_num):\n for time in range(self.T):\n self.gas_flow_out_old[line][time] = self.gas_flow_out[line, time].getAttr('X')\n for node in range(self.gas_node_num):\n for time in range(self.T):\n self.node_pressure_old[node][time] = self.node_pressure[node, time].getAttr('X')\n for line in range(self.gas_line_num):\n if line not in self.gas_line_active:\n start_point = self.gas_line_start_point[line]\n end_point = self.gas_line_end_point[line]\n weymouth = self.weymouth[line]\n for time in range(self.T):\n lhs = ((self.gas_flow_in_old[line][time] + self.gas_flow_out_old[line][time]) / 2) * \\\n ((self.gas_flow_in_old[line][time] + self.gas_flow_out_old[line][time]) / 2)\n rhs = weymouth * (self.node_pressure_old[start_point][time] *\n self.node_pressure_old[start_point][time] -\n self.node_pressure_old[end_point][time] *\n self.node_pressure_old[end_point][time])\n result.append(abs(lhs - rhs) / abs(lhs))\n return max(result)\n\n def update_outer_model(self):\n self.model.remove(self.constrain_update)\n self.constrain_update = []\n # weymouth for passive line\n for line in range(self.gas_line_num):\n if line not in self.gas_line_active:\n start_point = self.gas_line_start_point[line]\n end_point = self.gas_line_end_point[line]\n weymouth = self.weymouth[line]\n for time in range(self.T):\n self.constrain_update.append(\n self.model.addConstr(\n lhs=weymouth * self.node_pressure[start_point, time] *\n self.node_pressure[start_point, time] - (\n (self.gas_flow_in_old[line][time] + self.gas_flow_out_old[line][time]) *\n (self.gas_flow_in[line, time] + self.gas_flow_out[line, time]) / 2 -\n (self.gas_flow_in_old[line][time] + self.gas_flow_out_old[line][time]) *\n (self.gas_flow_in_old[line][time] + self.gas_flow_out_old[line][time]) / 4 -\n weymouth * self.node_pressure[end_point, time] *\n self.node_pressure[end_point, time] +\n 2 * weymouth * self.node_pressure[end_point, time] *\n self.node_pressure_old[end_point][time]\n ),\n rhs=self.pccp[line, time],\n sense=gurobi.GRB.LESS_EQUAL,\n name='weymouth_relax'\n ))\n pccp_value = []\n for i in range(self.gas_line_num):\n pccp_value.append(self.pccp[i, 0].getAttr('X'))\n if abs(max(pccp_value)) < 0.005:\n print('a_a_a_a_a_a_a_a_a_a_a_a_a_a_a_a_a_amazing_______________________')\n\n\nclass PlayerN1:\n def __init__(self):\n self.gx = []\n self.old_value = [0] * g_link * T * 16\n self.dual_express = 0\n self.norm_express = 0\n self.objective = 0\n\n def sub(self, lhs, rhs):\n gx = []\n gx.append(lhs.this_voltage_square - rhs.this_voltage_square)\n gx.append(-1 * lhs.this_voltage_square + rhs.this_voltage_square)\n gx.append(lhs.that_voltage_square - rhs.that_voltage_square)\n gx.append(-1 * lhs.that_voltage_square + rhs.that_voltage_square)\n gx.append(lhs.power_flow - rhs.power_flow)\n gx.append(-1 * lhs.power_flow + rhs.power_flow)\n gx.append(lhs.react_flow - rhs.react_flow)\n gx.append(-1 * lhs.react_flow + rhs.react_flow)\n gx.append(lhs.this_node_pressure - rhs.this_node_pressure)\n gx.append(-1 * lhs.this_node_pressure + rhs.this_node_pressure)\n gx.append(lhs.that_node_pressure - rhs.that_node_pressure)\n gx.append(-1 * lhs.that_node_pressure + rhs.that_node_pressure)\n gx.append(lhs.gas_flow_in - rhs.gas_flow_in)\n gx.append(-1 * lhs.gas_flow_in + rhs.gas_flow_in)\n gx.append(lhs.gas_flow_out - rhs.gas_flow_out)\n gx.append(-1 * lhs.gas_flow_out + rhs.gas_flow_out)\n return gx\n\n def optimize(self, tao):\n model = gurobi.Model()\n\n self.dual_express = 0\n self.norm_express = 0\n self.objective = 0\n self.gx = []\n\n for i in range(len(g_connection)):\n for connect_to in g_connection[i]:\n if i < connect_to:\n for time in range(T):\n lhs = g_info[i][g_connection[i].index(connect_to)][time]\n rhs = g_info[connect_to][g_connection[connect_to].index(i)][time]\n self.gx.extend(self.sub(lhs, rhs))\n\n duals = model.addVars(len(self.gx))\n\n self.dual_express = gurobi.quicksum(\n 1 * duals[i] * self.gx[i] for i in range(len(self.gx))\n )\n\n self.norm_express = gurobi.quicksum(\n (duals[i] - self.old_value[i]) * (duals[i] - self.old_value[i])\n for i in range(len(self.gx)))\n\n self.objective = -1 * self.dual_express + tao / 2 * self.norm_express\n model.setObjective(self.objective)\n model.Params.OutputFlag = 0\n model.optimize()\n dual_value = []\n pos = 0\n for line in range(g_link):\n lam_T = []\n for time in range(T):\n lam_t = []\n for _m_m_ in range(16):\n lam_t.append(duals[pos].getAttr('X'))\n pos = pos + 1\n lam_T.append(lam_t)\n dual_value.append(lam_T)\n return copy(dual_value)\n\n def set_old_value(self, old_value):\n self.old_value = copy(old_value)\n\n\ndef getPowerNet():\n # -------------- p l a y e r 0 ----------------\n player_index = 0\n system_info_0 = {\n 'index': player_index,\n 'T': T,\n 'connection_area': g_connection[player_index]\n }\n node_info_0 = {\n 'gen_num': 3 + 1, # the outside node as node-12 connected with index-3 with line-12\n 'gen_index': [0, 0, 5, 12],\n 'gen_power_min': [0, 0, 0, 0], # 0\n 'gen_power_max': [0.3, 0.3, 0.4, 10], # 1.2\n 'gen_react_min': [0, 0, 0, 0], # 0\n 'gen_react_max': [0.3, 0.3, 0.4, 10], # 1.2\n 'gen_cost_a': [0.1, 0.0013, 0.09, 0.5],\n 'gen_cost_b': [0.01, 0.0001, 0.01, 0],\n 'gen_cost_c': [0.1, 0.1, 0.1, 0],\n 'bus_num': 12 + 1,\n 'bus_voltage_min': [0.8 * 1] * (12 + 1),\n 'bus_voltage_max': [1.2 * 1] * (12 + 1),\n 'load_num': 8 + 1,\n 'load_index': [2, 3, 4, 7, 8, 9, 10, 11, 12],\n 'load_power_min': np.array(\n [[0.10, 0.11, 0.12, 0.10, 0.09, 0.12, 0.10, 0.12, 0.12, 0.12],\n [0.20, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.25, 0.22, 0.20],\n [0.20, 0.19, 0.18, 0.19, 0.20, 0.20, 0.20, 0.19, 0.22, 0.20],\n [0.10, 0.12, 0.10, 0.10, 0.10, 0.13, 0.12, 0.10, 0.09, 0.09],\n [0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30],\n [0.10, 0.11, 0.12, 0.12, 0.13, 0.14, 0.15, 0.14, 0.13, 0.12],\n [0.20, 0.20, 0.18, 0.20, 0.22, 0.22, 0.22, 0.20, 0.20, 0.20],\n [0.10, 0.11, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10],\n [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])[:, 0:T].tolist(), # 1.3\n 'load_power_max': np.array(\n [[0.15, 0.16, 0.17, 0.18, 0.17, 0.15, 0.14, 0.14, 0.15, 0.15],\n [0.25, 0.26, 0.27, 0.26, 0.27, 0.25, 0.26, 0.24, 0.23, 0.25],\n [0.25, 0.23, 0.24, 0.26, 0.27, 0.28, 0.29, 0.27, 0.26, 0.25],\n [0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],\n [0.35, 0.40, 0.42, 0.37, 0.37, 0.37, 0.37, 0.37, 0.37, 0.37],\n [0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],\n [0.25, 0.26, 0.27, 0.28, 0.29, 0.24, 0.23, 0.20, 0.20, 0.20],\n [0.15, 0.16, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],\n [10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0]])[:, 0:T].tolist(), # 1.7\n 'load_react_min': np.array(\n [[0.10, 0.11, 0.12, 0.10, 0.09, 0.12, 0.10, 0.12, 0.12, 0.12],\n [0.20, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.25, 0.22, 0.20],\n [0.20, 0.19, 0.18, 0.19, 0.20, 0.20, 0.20, 0.19, 0.22, 0.20],\n [0.10, 0.12, 0.10, 0.10, 0.10, 0.13, 0.12, 0.10, 0.09, 0.09],\n [0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30],\n [0.10, 0.11, 0.12, 0.12, 0.13, 0.14, 0.15, 0.14, 0.13, 0.12],\n [0.20, 0.20, 0.18, 0.20, 0.22, 0.22, 0.22, 0.20, 0.20, 0.20],\n [0.10, 0.11, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10],\n [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])[:, 0:T].tolist(),\n 'load_react_max': np.array(\n [[0.15, 0.16, 0.17, 0.18, 0.17, 0.15, 0.14, 0.14, 0.15, 0.15],\n [0.25, 0.26, 0.27, 0.26, 0.27, 0.25, 0.26, 0.24, 0.23, 0.25],\n [0.25, 0.23, 0.24, 0.26, 0.27, 0.28, 0.29, 0.27, 0.26, 0.25],\n [0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],\n [0.35, 0.40, 0.42, 0.37, 0.37, 0.37, 0.37, 0.37, 0.37, 0.37],\n [0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],\n [0.25, 0.26, 0.27, 0.28, 0.29, 0.24, 0.23, 0.20, 0.20, 0.20],\n [0.15, 0.16, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],\n [10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0]])[:, 0:T].tolist(), # 1.7\n 'bus_num_outside': 1,\n 'connection_index': [1], # the outer area power/gas connect with this index\n }\n line_info_0 = {\n 'line_num': 11 + 1,\n 'line_current_capacity': [10] * (11 + 1),\n 'line_start_point': [0, 1, 0, 3, 0, 5, 8, 5, 6, 5, 6, 12],\n 'line_end_point': [1, 2, 3, 4, 5, 8, 9, 6, 7, 10, 11, 1],\n 'line_resistance': (np.array([.1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1]) / 10).tolist(),\n 'line_reactance': (np.array([.1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1]) / 10).tolist()\n }\n gas_node_info_0 = {\n 'gas_node_num': 3 + 1,\n 'node_pressure_min': [0] * (3 + 1),\n 'node_pressure_max': [20] * (3 + 1),\n 'gas_well_num': 0 + 1,\n 'well_index': [3],\n 'well_output_min': [0],\n 'well_output_max': [1],\n 'gas_load_num': 2,\n 'load_index': [0, 2],\n 'gas_load_min': [[0.1] * T, [0.1] * T],\n 'gas_load_max': [[0.2] * T, [0.2] * T],\n 'gen_gas_num': 1,\n 'gen_gas_index': [2], # the gas generator index in the gas system\n 'gen_gas_index_power': [3], # the gas generator index in the power system\n 'gen_gas_min': [0], # this is power\n 'gen_gas_max': [1.5], # this is power\n 'gen_gas_efficiency': [10], # 0.05 gas => 0.5 power\n }\n gas_line_info_0 = {\n 'weymouth': [3] * (2 + 1),\n 'gas_line_num': 2 + 1,\n 'gas_line_start_point': [1, 1, 3], # gas flow out\n 'gas_line_end_point': [0, 2, 1], # gas flow in\n 'gas_line_pack_coefficient': [1] * (2 + 1),\n 'gas_line_pack_initial': 2,\n 'gas_flow_in_max': [50] * (2 + 1), # unused\n 'gas_flow_out_max': [50] * (2 + 1), # unused\n 'gas_line_active': [],\n 'compressor_num': 0,\n 'compressor_start_point': [],\n 'compressor_end_point': [],\n 'compressor_coefficient': [],\n 'compressor_max_flow': [],\n 'compressor_energy_consumption': [],\n }\n player_index = player_index + 1\n # -------------- p l a y e r 1 ----------------\n system_info_1 = {\n 'index': player_index,\n 'T': T,\n 'connection_area': g_connection[player_index]\n }\n node_info_1 = {\n 'gen_num': 3 + 1, # the outside node as node-12 connected with index-3 with line-12\n 'gen_index': [0, 0, 5, 12],\n 'gen_power_min': [0.5, 0.4, 0.6, 0], # 0 - 3\n 'gen_power_max': [1, 0.8, 1.2, 10],\n 'gen_react_min': [0.5, 0.4, 0.6, 0], # 0 - 3\n 'gen_react_max': [1, 0.8, 1.2, 10],\n 'gen_cost_a': [0.1, 0.13, 0.09, 0.5],\n 'gen_cost_b': [0.01, 0.01, 0.01, 0],\n 'gen_cost_c': [0.1, 0.1, 0.1, 0],\n 'bus_num': 12 + 1,\n 'bus_voltage_min': [0.8 * 1] * (12 + 1),\n 'bus_voltage_max': [1.2 * 1] * (12 + 1),\n 'load_num': 8 + 1,\n 'load_index': [2, 3, 4, 7, 8, 9, 10, 11, 12],\n 'load_power_min': (np.array(\n [[0.10, 0.10, 0.10, 0.10, 0.09, 0.12, 0.10, 0.12, 0.12, 0.12],\n [0.20, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.25, 0.22, 0.20],\n [0.20, 0.19, 0.18, 0.19, 0.20, 0.20, 0.20, 0.19, 0.22, 0.20],\n [0.10, 0.12, 0.10, 0.10, 0.10, 0.13, 0.12, 0.10, 0.09, 0.09],\n [0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30],\n [0.10, 0.11, 0.12, 0.12, 0.13, 0.14, 0.15, 0.14, 0.13, 0.12],\n [0.20, 0.20, 0.18, 0.20, 0.22, 0.22, 0.22, 0.20, 0.20, 0.20],\n [0.10, 0.11, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10],\n [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])[:, 0:T] * 2).tolist(), # 1.3\n 'load_power_max': (np.array(\n [[0.15, 0.16, 0.17, 0.18, 0.17, 0.15, 0.14, 0.14, 0.15, 0.15],\n [0.25, 0.26, 0.27, 0.26, 0.27, 0.25, 0.26, 0.24, 0.23, 0.25],\n [0.25, 0.23, 0.24, 0.26, 0.27, 0.28, 0.29, 0.27, 0.26, 0.25],\n [0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],\n [0.35, 0.40, 0.42, 0.37, 0.37, 0.37, 0.37, 0.37, 0.37, 0.37],\n [0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],\n [0.25, 0.26, 0.27, 0.28, 0.29, 0.24, 0.23, 0.20, 0.20, 0.20],\n [0.15, 0.16, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],\n [10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0]])[:, 0:T] * 2).tolist(), # 1.7\n 'load_react_min': (np.array(\n [[0.10, 0.11, 0.12, 0.10, 0.09, 0.12, 0.10, 0.12, 0.12, 0.12],\n [0.20, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.25, 0.22, 0.20],\n [0.20, 0.19, 0.18, 0.19, 0.20, 0.20, 0.20, 0.19, 0.22, 0.20],\n [0.10, 0.12, 0.10, 0.10, 0.10, 0.13, 0.12, 0.10, 0.09, 0.09],\n [0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30],\n [0.10, 0.11, 0.12, 0.12, 0.13, 0.14, 0.15, 0.14, 0.13, 0.12],\n [0.20, 0.20, 0.18, 0.20, 0.22, 0.22, 0.22, 0.20, 0.20, 0.20],\n [0.10, 0.11, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10],\n [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])[:, 0:T] * 2).tolist(),\n 'load_react_max': (np.array(\n [[0.15, 0.16, 0.17, 0.18, 0.17, 0.15, 0.14, 0.14, 0.15, 0.15],\n [0.25, 0.26, 0.27, 0.26, 0.27, 0.25, 0.26, 0.24, 0.23, 0.25],\n [0.25, 0.23, 0.24, 0.26, 0.27, 0.28, 0.29, 0.27, 0.26, 0.25],\n [0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],\n [0.35, 0.40, 0.42, 0.37, 0.37, 0.37, 0.37, 0.37, 0.37, 0.37],\n [0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],\n [0.25, 0.26, 0.27, 0.28, 0.29, 0.24, 0.23, 0.20, 0.20, 0.20],\n [0.15, 0.16, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],\n [10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0]])[:, 0:T] * 2).tolist(),\n 'bus_num_outside': 1,\n 'connection_index': [1], # the outer area power/gas connect with this index\n }\n line_info_1 = {\n 'line_num': 11 + 1,\n 'line_current_capacity': [10] * (11 + 1),\n 'line_start_point': [0, 1, 0, 3, 0, 5, 8, 5, 6, 5, 6, 12],\n 'line_end_point': [1, 2, 3, 4, 5, 8, 9, 6, 7, 10, 11, 1],\n 'line_resistance': (np.array([.1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1]) / 10).tolist(),\n 'line_reactance': (np.array([.1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1]) / 10).tolist()\n }\n gas_node_info_1 = {\n 'gas_node_num': 3 + 1,\n 'node_pressure_min': [0] * (3 + 1),\n 'node_pressure_max': [20] * (3 + 1),\n 'gas_well_num': 1,\n 'well_index': [0],\n 'well_output_min': [0],\n 'well_output_max': [1.5],\n 'gas_load_num': 1 + 1,\n 'load_index': [2, 3],\n 'gas_load_min': [[0.1] * T, [0] * T],\n 'gas_load_max': [[0.2] * T, [1] * T],\n 'gen_gas_num': 1,\n 'gen_gas_index': [2], # the gas generator index in the gas system\n 'gen_gas_index_power': [2], # the gas generator index in the power system\n 'gen_gas_min': [0], # this is power\n 'gen_gas_max': [1.5],\n 'gen_gas_efficiency': [10],\n }\n gas_line_info_1 = {\n 'weymouth': [3] * (2 + 1),\n 'gas_line_num': 2 + 1,\n 'gas_line_start_point': [0, 1, 1], # gas flow out\n 'gas_line_end_point': [1, 2, 3], # gas flow in\n 'gas_line_pack_coefficient': [1] * (2 + 1),\n 'gas_line_pack_initial': 2,\n 'gas_flow_in_max': [5] * (2 + 1), # unused\n 'gas_flow_out_max': [5] * (2 + 1), # unused\n 'gas_line_active': [],\n 'compressor_num': 0,\n 'compressor_start_point': [],\n 'compressor_end_point': [],\n 'compressor_coefficient': [],\n 'compressor_max_flow': [],\n 'compressor_energy_consumption': [],\n }\n player_index = player_index + 1\n\n p1 = PowerNet(system_info_0, node_info_0, line_info_0, gas_node_info_0, gas_line_info_0)\n p2 = PowerNet(system_info_1, node_info_1, line_info_1, gas_node_info_1, gas_line_info_1)\n return [p1, p2, PlayerN1()]\n\n\ndef update_old_value():\n for i, player in enumerate(g_players):\n player.set_old_value(copy(g_info[i]))\n temp_lam = []\n for line in range(g_link):\n for time in range(T):\n for index in range(16):\n temp_lam.append(g_lam[line][time][index])\n g_playerN1.set_old_value(copy(temp_lam))\n\n\ndef sub_info(a, b):\n return (a.this_voltage_square - b.this_voltage_square) * (a.this_voltage_square - b.this_voltage_square) + \\\n (a.that_voltage_square - b.that_voltage_square) * (a.that_voltage_square - b.that_voltage_square) + \\\n (a.power_flow - b.power_flow) * (a.power_flow - b.power_flow) + \\\n (a.react_flow - b.react_flow) * (a.react_flow - b.react_flow) + \\\n (a.this_node_pressure - b.this_node_pressure) * (a.this_node_pressure - b.this_node_pressure) + \\\n (a.that_node_pressure - b.that_node_pressure) * (a.that_voltage_square - b.that_voltage_square) + \\\n (a.gas_flow_in - b.gas_flow_in) * (a.gas_flow_in - b.gas_flow_in) + \\\n (a.gas_flow_out - b.gas_flow_out) * (a.gas_flow_out - b.gas_flow_out)\n\n\ndef sub_norm(a, b):\n sum = 0\n for i in range(len(g_connection)):\n for j in range(len(g_connection[i])):\n for k in range(T):\n sum += sub_info(a[i][j][k], b[i][j][k])\n return sum\n\n\ndef calculate_NE():\n global g_lam\n count_best_response = 0\n old_info = 0\n while count_best_response < 30:\n old_info = copy(g_info)\n for i, player in enumerate(g_players):\n # get the data for the player i\n player.update_model(g_tao) # 填充x_i 以及lam_i\n player.optimize()\n # update the lam_dual variable\n g_lam = copy(g_playerN1.optimize(g_tao))\n # update the response\n if sub_norm(old_info, copy(g_info)) < 0.0001:\n print(count_best_response + 1)\n break\n count_best_response = count_best_response + 1\n\n\ndef calculate_GNE(iijj):\n outer_loop_count = 0\n global result_plt\n global result_plt1\n global result_plt2\n result_plt = []\n result_plt1 = []\n result_plt2 = []\n global gap1\n global gap2\n gap1 = []\n gap2 = []\n while outer_loop_count < OUTER_LOOP[iijj]:\n print(outer_loop_count)\n # give xn, lam_n, calculate the equilibrium\n calculate_NE()\n # 现在我们得到了一个新的NE,我们应该把这个NE设为参照值\n update_old_value()\n outer_loop_count = outer_loop_count + 1\n result_plt.append(g_info[0][0][0].power_flow)\n result_plt1.append(g_info[1][0][0].power_flow)\n result_plt2.append(g_info[0][0][0].power_flow - g_info[1][0][0].power_flow)\n gap1.append(p1.cal_gap())\n gap2.append(p2.cal_gap())\n plt.plot(result_plt, label='0->1')\n plt.plot(result_plt1, '-r', label='1->0')\n plt.plot(result_plt2, '-g', label='diff')\n plt.legend(loc='best')\n plt.savefig('diff' + str(iijj) + '.svg')\n plt.cla()\n plt.plot(gap1)\n plt.plot(gap2)\n plt.savefig('gap' + str(iijj) + '.svg')\n plt.cla()\n\n\ndef start():\n global g_info\n global result_plt\n global result_plt1\n global result_plt2\n global gap1\n global gap2\n result_plt = []\n result_plt1 = []\n result_plt2 = []\n gap1 = []\n gap2 = []\n outer_loop_count = 2\n for player in g_players:\n player.build_model()\n while outer_loop_count < OUTER_LOOP:\n print(outer_loop_count)\n calculate_NE()\n update_old_value()\n outer_loop_count = outer_loop_count + 1\n result_plt.append(g_info[0][0][0].power_flow)\n result_plt1.append(g_info[1][0][0].power_flow)\n result_plt2.append(g_info[0][0][0].power_flow - g_info[1][0][0].power_flow)\n g_info = [[[connection_line_info() for ____time in range(T)]\n for ____line in range(len(g_connection[____area]))]\n for ____area in range(len(g_connection))]\n plt.plot(result_plt, label='0->1')\n plt.plot(result_plt1, '-r', label='1->0')\n plt.plot(result_plt2, '-g', label='diff')\n plt.legend(loc='best')\n plt.show()\n\n\ndef calculate_pccp():\n global abcd\n abcd = []\n global G_K\n G_K = 1.4\n pccp_loop = 0\n while pccp_loop < PCCP_COUNT:\n pccp_loop = pccp_loop + 1\n G_K = G_K * 1.4\n calculate_GNE(pccp_loop)\n for player in g_players:\n player.update_outer_model()\n print('player gap : ' + str(player.cal_gap()))\n abcd.append([p1.cal_gap(), p2.cal_gap()])\n plt.plot(abcd)\n plt.show()\n\n\nif __name__ == '__main__':\n result_plt = []\n result_plt1 = []\n result_plt2 = []\n gap1 = []\n gap2 = []\n all_players = getPowerNet()\n g_players = all_players[:player_num]\n g_playerN1 = all_players[player_num]\n [p1, p2] = g_players\n pn = g_playerN1\n for player_g in g_players:\n player_g.build_model()\n calculate_pccp()\n pycharm_debug = 2\n","sub_path":"summary/5 电气 耦合网 电网用 BFM 模型/BFM/f/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":72535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"305278635","text":"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n\"\"\"Helper components.\"\"\"\n\nfrom typing import NamedTuple\n\n\ndef retrieve_best_run(\n project_id: str, job_id: str\n) -> NamedTuple('Outputs', [('metric_value', float), ('alpha', float),\n ('max_iter', int)]):\n \"\"\"Retrieves the parameters of the best Hypertune run.\"\"\"\n\n from googleapiclient import discovery\n from googleapiclient import errors\n\n ml = discovery.build('ml', 'v1')\n\n job_name = 'projects/{}/jobs/{}'.format(project_id, job_id)\n request = ml.projects().jobs().get(name=job_name)\n\n try:\n response = request.execute()\n except errors.HttpError as err:\n print(err)\n except:\n print('Unexpected error')\n\n print(response)\n\n best_trial = response['trainingOutput']['trials'][0]\n\n metric_value = best_trial['finalMetric']['objectiveValue']\n alpha = float(best_trial['hyperparameters']['alpha'])\n max_iter = int(best_trial['hyperparameters']['max_iter'])\n\n return (metric_value, alpha, max_iter)\n\n\ndef evaluate_model(\n dataset_path: str, model_path: str, metric_name: str\n) -> NamedTuple('Outputs', [('metric_name', str), ('metric_value', float),\n ('mlpipeline_metrics', 'Metrics')]):\n \"\"\"Evaluates a trained sklearn model.\"\"\"\n #import joblib\n import pickle\n import json\n import pandas as pd\n import subprocess\n import sys\n\n from sklearn.metrics import accuracy_score, recall_score\n\n df_test = pd.read_csv(dataset_path)\n\n X_test = df_test.drop('Cover_Type', axis=1)\n y_test = df_test['Cover_Type']\n\n # Copy the model from GCS\n model_filename = 'model.pkl'\n gcs_model_filepath = '{}/{}'.format(model_path, model_filename)\n print(gcs_model_filepath)\n subprocess.check_call(['gsutil', 'cp', gcs_model_filepath, model_filename],\n stderr=sys.stdout)\n\n with open(model_filename, 'rb') as model_file:\n model = pickle.load(model_file)\n\n y_hat = model.predict(X_test)\n\n if metric_name == 'accuracy':\n metric_value = accuracy_score(y_test, y_hat)\n elif metric_name == 'recall':\n metric_value = recall_score(y_test, y_hat)\n else:\n metric_name = 'N/A'\n metric_value = 0\n\n # Export the metric\n metrics = {\n 'metrics': [{\n 'name': metric_name,\n 'numberValue': float(metric_value)\n }]\n }\n\n return (metric_name, metric_value, json.dumps(metrics))\n","sub_path":"immersion/kubeflow_pipelines/cicd/labs/pipeline/helper_components.py","file_name":"helper_components.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"146871193","text":"#!/usr/bin/python3\n\nimport argparse\nimport pexpect\nimport sys\nimport random\n\nN_SIMPLE = 5\nN_THOROUGH = 100\nTIMEOUT = 5\nRETRIES_MAX = 5\nREPEAT = 5\n\n\ndef test_seed(seed, repeat=1, retry=0):\n if retry == RETRIES_MAX:\n print(\"Maximum retries reached, still not output received. \"\n \"Test inconclusive.\")\n sys.exit(1)\n\n print(\"Testing seed %d...\" % seed)\n # QEMU takes much much less time to start, so for testing multiple seeds it\n # is more convenient to use it instead of OVPsim.\n child = pexpect.spawn('./launch', ['-t', '-S', 'qemu', 'test=all',\n 'seed=%d' % seed, 'repeat=%d' % repeat])\n index = child.expect_exact(\n ['[TEST PASSED]', '[TEST FAILED]', pexpect.EOF, pexpect.TIMEOUT],\n timeout=TIMEOUT)\n if index == 0:\n child.terminate(True)\n return\n elif index == 1:\n print(\"Test failure reported!\")\n message = child.buffer.decode(\"ascii\")\n try:\n while len(message) < 20000:\n message += child.read_nonblocking(timeout=1).decode(\"ascii\")\n except pexpect.exceptions.TIMEOUT:\n pass\n print(message)\n sys.exit(1)\n elif index == 2:\n print(\"EOF reached without success report. This may indicate \"\n \"a problem with the testing framework or QEMU. \"\n \"Retrying (%d)...\" % (retry + 1))\n test_seed(seed, repeat, retry + 1)\n elif index == 3:\n print(\"Timeout reached.\")\n message = child.buffer.decode(\"utf-8\")\n child.terminate(True)\n print(message)\n if len(message) < 100:\n print(\"It looks like kernel did not even start within the time \"\n \"limit. Retrying (%d)...\" % (retry + 1))\n test_seed(seed, repeat, retry + 1)\n else:\n print(\"No test result reported within timeout. Unable to verify \"\n \"test success. Seed was: %d, repeat: %d\" % (seed, repeat))\n sys.exit(1)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Automatically performs kernel tests.')\n parser.add_argument('--thorough', action='store_true',\n help='Generate much more test seeds.'\n 'Testing will take much more time.')\n\n try:\n args = parser.parse_args()\n except SystemExit:\n sys.exit(0)\n\n n = N_SIMPLE\n if args.thorough:\n n = N_THOROUGH\n\n # Run tests in alphabetic order\n test_seed(0)\n # Run tests using n random seeds\n for i in range(0, n):\n seed = random.randint(0, 2**32)\n test_seed(seed, REPEAT)\n\n print(\"Tests successful!\")\n sys.exit(0)\n","sub_path":"run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"409592573","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 10 18:08:31 2017\n\n@author: yajun\n\"\"\"\n\n#import fr_data_set\nimport tensorflow as tf\nfrom tensorflow.contrib.learn.python.learn.datasets.financial_filled_report import read_data_sets\n\nmnist = read_data_sets('mnist/MNIST_data', one_hot=True)\n\n#mnist = read_data_sets('MNIST_data', one_hot=True) #下载并加载mnist数据\nlog_dir = 'mnist/logs/ratings_filled_summaries'\n \n#import matplotlib.pyplot as plt \n#for img0 in mnist.train.images[5:9]: \n## img0 = mnist.train.images[0] \n# img0 = img0.reshape(32,40)\n# \n# fig = plt.figure() \n# # 第一个子图,按照默认配置 \n# ax = fig.add_subplot(221) \n# ax.imshow(img0)\n# \n# img1 = img0*255\n# # 第一个子图,按照默认配置 \n# ax = fig.add_subplot(222) \n# ax.imshow(img1)\n# plt.show() \n \n\ndef variable_summaries(var):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean',mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n \n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n \nsess = tf.InteractiveSession()\n\nwith tf.name_scope('input'):\n x = tf.placeholder(tf.float32, shape = [None, 1280]) #输入的数据占位符\n y_ = tf.placeholder(tf.float32, shape = [None, 6]) \n\nW = tf.Variable(tf.zeros([1280, 6]))\nb = tf.Variable(tf.zeros([6]))\n\n\n#sess.run(tf.global_variables_initializer())\ny = tf.matmul(x,W) + b\n\n#cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y_, logits=y))\n#定义四个函数,分别用于初始化权值W,初始化偏置项b, 构建卷积层和构建池化层。 \ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev = 0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape= shape)\n return tf.Variable(initial)\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding = 'SAME')\n\nW_conv1 = weight_variable([5,5,1,32])\n'''\nThe convolution will compute 32 features for each 5x5 patch.Its weight tensor will have a shape of [5,5,1,32].\nThe first two dimensions are the patch size, the next is the number of input channels, and the last is the number of output channels.\n接下来构建网络。整个网络由两个卷积层(包含激活层和池化层),一个全连接层,一个dropout层和一个softmax层组成。 \n'''\nb_conv1 = bias_variable([32])\n# we will also have a bias vector with a component for each output channel.\n\nwith tf.name_scope(\"input_reshape\"):\n x_image = tf.reshape(x, [-1,32,40,1]) #转换输入数据shape,以便于用于网络中\n tf.summary.image('input', x_image, 6)\n\nh_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) #第一个卷积层 32*40\nh_pool1 = max_pool_2x2(h_conv1) #第一个池化层 16*20\n\n# The second layer will have 64 features for each 5x5 patch \nW_conv2 = weight_variable([5,5,32,64])\nb_conv2 = bias_variable([64])\nh_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) #第二个卷积层\nh_pool2 = max_pool_2x2(h_conv2) #第二个池化层 8*10\n\nW_fc1 = weight_variable([8*10*64, 1024])\nb_fc1 = bias_variable([1024])\nh_pool2_flat = tf.reshape(h_pool2, [-1, 8*10*64]) #reshape成向量\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) #第一个全连接层\n\nkeep_prob = tf.placeholder(tf.float32)\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) #dropout层\n\n# Readout Layer\nW_fc2 = weight_variable([1024, 6])\nb_fc2 = bias_variable([6])\ny_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n\n# Train and Evaluate the Model\nwith tf.name_scope(\"cross_entropy\"):\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y_, logits = y_conv))\n tf.summary.scalar('cross_entropy',cross_entropy)\nwith tf.name_scope('train'):\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n \nwith tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\n with tf.name_scope('accuracy'):\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\ntf.summary.scalar('accuracy',accuracy)\n\nmerged = tf.summary.merge_all()\ntrain_writer = tf.summary.FileWriter(log_dir+'/train', sess.graph)\ntest_writer = tf.summary.FileWriter(log_dir+'/test')\n#sess.run(tf.initialize_all_variables())\ntf.global_variables_initializer().run()\n\ndef feed_dict(train):\n if train:\n xs,ys = mnist.train.next_batch(50)\n k = 0.5\n else:\n xs,ys = mnist.test.images, mnist.test.labels\n k = 1.0\n return {x:xs, y_:ys, keep_prob: k}\n\nsaver = tf.train.Saver()\n\nfor i in range(4000):\n if i%50 ==0:\n summary, acc = sess.run([merged, accuracy], feed_dict=feed_dict(False))\n test_writer.add_summary(summary, i)\n print('Accuracy at step %s: %s' % (i, acc))\n else:\n if i%100 == 99:\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n summary, _ = sess.run([merged, train_step], feed_dict=feed_dict(True), options=run_options,run_metadata=run_metadata)\n train_writer.add_run_metadata(run_metadata, 'step%03d' % i)\n train_writer.add_summary(summary, i)\n saver.save(sess,log_dir+'/model.ckpt',i)\n print('Adding run metadata for', i)\n else:\n summary, _ = sess.run([merged, train_step], feed_dict = feed_dict(True))\n train_writer.add_summary(summary, i)\ntrain_writer.close()\ntest_writer.close()\n#\n#for i in range(20000):\n# batch = mnist.train.next_batch(50)\n# \n# train_step.run(feed_dict={x:batch[0], y_:batch[1], keep_prob:0.5})\n# if i%100 == 0:\n# train_accuracy = accuracy.eval(feed_dict={x:batch[0], y_:batch[1], keep_prob: 1.0})\n# print(\"step %d, training accuracy %g\"%(i, train_accuracy))\n#\n#print(\"test accuracy %g\"%accuracy.eval(feed_dict={x:mnist.test.images, y_:mnist.test.labels, keep_prob:1.0}))\n#\n","sub_path":"tensorflow/board_rating_filled_mnist.py","file_name":"board_rating_filled_mnist.py","file_ext":"py","file_size_in_byte":6543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"550334289","text":"word_to_remove = []\n\nwith open(\"remove\",\"r\") as f:\n for l in f.readlines():\n word_to_remove.append(l.rstrip())\n\n\ndef check_word(l):\n for word in word_to_remove:\n if word in l:\n return False\n return True\n\n\nwith open(\"tasks\") as f:\n lines = [line.strip() for line in f if line.strip()]\n\nfinai_lines = []\n\nfor l in lines:\n if check_word(l):\n finai_lines.append(l + \"\\n\")\n\nfw = open(\"taskss\", \"w\")\nfw.writelines(finai_lines)\n","sub_path":"text/remove_lines_in_file.py","file_name":"remove_lines_in_file.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"507517791","text":"import json\nfrom unittest.mock import patch\n\nfrom behave import given, then, when\nimport load\nfrom books.models import Book\n\nURL = '/books/'\n\n\n@given(u': PUT请求时,books数据是否存在为{put_status}')\ndef step_book_data_is_not_existing(context, put_status):\n \"\"\"\n 判断模板数据是否存在\n \"\"\"\n if put_status == 'True':\n context.exist = True\n else:\n context.exist = False\n\n\n@given(u': PUT请求books数据存在时,books数据为{put_books_data}')\ndef step_if_book_data_is_existing_data_is(context, put_books_data):\n if context.exist:\n data = load.load_json('features/data/put/' + put_books_data)\n context.data = data\n\n\n@when(u': PUT请求时修改book数据为{put_book}')\ndef step_add_a_book_data(context, put_book):\n \"\"\"\n post请求\n \"\"\"\n load_data = load.load_json('features/data/put/' + put_book)\n book_json = json.loads(load_data)\n put_url = URL + book_json['name']\n if context.exist:\n with patch('books.models.Book.objects.get') as mock_get:\n book = Book()\n book.name = book_json['name']\n book.author = book_json['author']\n book.price = book_json['price']\n mock_get.return_value = book\n context.response = context.test.client.put(path=put_url, data=load_data, content_type='application/json')\n else:\n context.response = context.test.client.put(path=put_url, data=load_data, content_type='application/json')\n\n\n@then(u': PUT请求时修改book返回结果状态码为{put_code}')\ndef step_add_result(context, put_code):\n \"\"\"\n 验证返回结果\n \"\"\"\n context.test.assertEquals(int(put_code.__str__()), context.response.status_code)\n","sub_path":"django/django-behave/dbehave/features/steps/books_put.py","file_name":"books_put.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"7360469","text":"# Given a non-empty array of digits representing a non-negative integer, plus one to the integer.\r\n#\r\n# The digits are stored such that the most significant digit is at the head of the list, and each element in the array contain a single digit.\r\n#\r\n# You may assume the integer does not contain any leading zero, except the number 0 itself.\r\n#\r\n# Example 1:\r\n#\r\n# Input: [1,2,3]\r\n# Output: [1,2,4]\r\n# Explanation: The array represents the integer 123.\r\n# Example 2:\r\n#\r\n# Input: [4,3,2,1]\r\n# Output: [4,3,2,2]\r\n# Explanation: The array represents the integer 4321.\r\n\r\nclass Solution:\r\n def plusOne(self, digits: 'List[int]') -> 'List[int]':\r\n s=\"\"\r\n for i in digits:\r\n s+=str(i)\r\n num=int(s)\r\n num+=1\r\n num=str(num)\r\n fin=[]\r\n for j in num:\r\n fin.append(int(j))\r\n return fin\r\ns=Solution()\r\nl=[6,1,4,5,3,9,0,1,9,5,1,8,6,7,0,5,5,4,3]\r\nprint(s.plusOne(l))\r\n\r\n","sub_path":"66_Plus One.py","file_name":"66_Plus One.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"27846764","text":"\"\"\"\nReset tasks that are not active (in a pilot).\n\nCheck all the tasks that are processing, and compare with the\ntasks in pilots. Reset the difference.\n\nInitial delay: rand(5 minutes)\nPeriodic delay: 10 minutes\n\"\"\"\n\nimport logging\nimport random\nimport time\n\nfrom tornado.ioloop import IOLoop\n\nlogger = logging.getLogger('non_active_tasks')\n\ndef non_active_tasks(module):\n \"\"\"\n Initial entrypoint.\n\n Args:\n module (:py:class:`iceprod.server.modules.schedule`): schedule module\n \"\"\"\n # initial delay\n IOLoop.current().call_later(random.randint(60,60*5), run, module.rest_client)\n\nasync def run(rest_client, debug=False):\n \"\"\"\n Actual runtime / loop.\n\n Args:\n rest_client (:py:class:`iceprod.core.rest_client.Client`): rest client\n debug (bool): debug flag to propagate exceptions\n \"\"\"\n start_time = time.time()\n\n try:\n datasets = await rest_client.request('GET', '/dataset_summaries/status')\n dataset_ids = []\n if 'processing' in datasets:\n dataset_ids.extend(datasets['processing'])\n if 'truncated' in datasets:\n dataset_ids.extend(datasets['truncated'])\n pilots = await rest_client.request('GET', '/pilots')\n task_ids_in_pilots = set()\n for p in pilots.values():\n if 'tasks' in p and p['tasks']:\n task_ids_in_pilots.update(p['tasks'])\n for dataset_id in dataset_ids:\n try:\n tasks = await rest_client.request('GET', '/datasets/{}/task_summaries/status'.format(dataset_id))\n if 'processing' in tasks:\n reset_tasks = set(tasks['processing'])-task_ids_in_pilots\n if reset_tasks:\n logger.info('dataset %s reset tasks: %s', dataset_id, reset_tasks)\n args = {'status':'reset'}\n for t in reset_tasks:\n await rest_client.request('PUT', '/datasets/{}/tasks/{}/status'.format(dataset_id,t), args)\n except Exception:\n logger.error('error resetting non-active tasks in dataset %s', dataset_id, exc_info=True)\n if debug:\n raise\n except Exception:\n logger.error('error resetting non-active tasks', exc_info=True)\n if debug:\n raise\n\n # run again after 60 minute delay\n stop_time = time.time()\n delay = max(60*10 - (stop_time-start_time), 60)\n IOLoop.current().call_later(delay, run, rest_client)\n","sub_path":"iceprod/server/scheduled_tasks/non_active_tasks.py","file_name":"non_active_tasks.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"363583366","text":"import datetime\nimport locale\n\nimport requests\nfrom bs4 import BeautifulSoup as bs4\nfrom fake_useragent import UserAgent\n\n\nUSER_AGENT = UserAgent()\nURL = 'https://sinoptik.com.ru/%D0%BF%D0%BE%D0%B3%D0%BE%D0%B4%D0%B0-' \\\n '%D0%BA%D1%80%D0%B0%D1%81%D0%BD%D0%BE%D0%B4%D0%B0%D1%80/10-%D0%B4%D0%BD%D0%B5%D0%B9'\nNOW = datetime.datetime.now()\n\nlocale.setlocale(locale.LC_TIME, 'ru_RU.UTF-8')\narchive_data = {}\n\n\nclass WeatherMaker:\n def __init__(self):\n self.data = []\n\n def weather_parser(self):\n \"\"\"Парсит данные погоды за следующие 10 дней и сохраняет в словарь\"\"\"\n response = requests.get(URL, USER_AGENT.ie)\n if response.status_code == 200:\n soup = bs4(response.text, 'lxml')\n items = soup.findAll(class_='weather__content_tab')\n for item in items:\n day = item.find(class_=\"weather__content_tab-date day_red\").get_text(strip=True)\n month = item.find(class_=\"weather__content_tab-month\").get_text(strip=True)\n weekday = item.find(class_=\"weather__content_tab-day\").get_text(strip=True)\n temp_max = item.find(\"div\", {\"class\": \"weather__content_tab-temperature\"}).get_text(strip=True)[13:-1]\n temp_min = item.find(\"div\", {\"class\": \"weather__content_tab-temperature\"}).get_text(strip=True)[4:7]\n clouds = item.find(class_=\"show-tooltip\").get_text(strip=True)\n\n date_str = f'{day} {month} {NOW.year}'\n date_datetime = datetime.datetime.strptime(date_str, '%d %B %Y')\n\n self.data.append(\n {\n 'date': date_datetime.date(),\n 'temperature': {'max': temp_max, 'min': temp_min},\n 'clouds': clouds,\n 'weekday': weekday,\n }\n )\n return self.data\n","sub_path":"scripts/weather_maker.py","file_name":"weather_maker.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"350307837","text":"# -*- coding: utf-8 -*-\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nclass PollRouter(object):\n\n def route_for_task(self, task, args=None, kwargs=None):\n logger.warning(task)\n if task == \"polls.tasks.add\":\n return {\n \"queue\": \"add\"\n }\n return None\n","sub_path":"python_ni_app/polls/celery_routers.py","file_name":"celery_routers.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"578513926","text":"\"\"\"\nFunctional tests of Hub URLs.\n\nThese are pretty much \"smoke tests\" with the main aim being to catch\nsevere regressions. Although they serve this purpose quite well, prefer\nwriting unit tests over relying on these tests, particularly\nfor test driven development.\n\"\"\"\nimport typing\nimport re\nfrom django.urls import reverse\n\nfrom general.testing import AnonTestCase, AdaTestCase, BobTestCase\n\n\n# Shorthand functions for creating regexes to match response HTML against\n\n\ndef title(t):\n \"\"\"Create a regex for the tag.\"\"\"\n return \"<title>{} : Stencila\".format(t)\n\n\ndef link(href):\n \"\"\"Create a regex for a tag.\"\"\"\n return ']*?)href=\"{}\"'.format(href)\n\n\n# Shorthand sets of expectations for certain pages\n\nsignin = [200, title(\"Sign in\")]\n\nCheckType = typing.Union[int, str]\nCheckTypeList = typing.List[CheckType]\n\n\n# Define a check\n# Each check is for a path and defines the expected\n# response for each user\n# Expectations can be an integer response code, a\n# a string regex pattern, or a list of either of those\nclass Check(typing.NamedTuple):\n path: str\n anon: typing.Optional[typing.Union[CheckTypeList, CheckType]] = None\n ada: typing.Optional[typing.Union[CheckTypeList, CheckType]] = None\n bob: typing.Optional[typing.Union[CheckTypeList, CheckType]] = None\n\n\n# Skip a check\n# Change `check` to `skip`\n# instead of having to comment out multiple lines\ndef skip(path, *args, **kwargs):\n print(\"Skipping {}\".format(path))\n return None\n\n\n# fmt: off\nchecks = [\n Check(\n \"/\",\n anon=title(\"Open\"),\n ada=title(\"Dashboard\")\n ),\n Check(\n \"/me\",\n anon=signin,\n ada=title(\"User settings\")\n ),\n Check(\n \"/me/dashboard\",\n anon=signin,\n ada=title(\"Dashboard\")\n ),\n Check(\n \"/me/password/change/\",\n anon=signin,\n ada=title(\"Password Change\")\n ),\n Check(\n \"/me/email/\",\n anon=signin,\n ada=title(\"Manage e-mail addresses\")\n ),\n Check(\n \"/me/social/connections/\",\n anon=signin,\n ada=title(\"\")\n ),\n Check(\n \"/me/avatar/change/\",\n anon=signin,\n ada=title(\"\")\n ),\n Check(\n \"/me/username/\",\n anon=signin,\n ada=title(\"Change Username : Stencila\")\n ),\n Check(\n \"/accounts\",\n anon=signin,\n ada=[title(\"Account : Teams\"), link(reverse(\"account_create\"))],\n ),\n Check(\n \"/ada-personal-account\",\n anon=signin,\n ada=title(\"Account ada-personal-account\")\n ),\n Check(\n \"/ada-personal-account/members\",\n anon=signin,\n ada=title(\"Account ada-personal-account : Members\"),\n ),\n Check(\n \"/ada-personal-account/teams\",\n anon=signin,\n ada=title(\"Account ada-personal-account : Teams\")\n ),\n Check(\n \"/ada-personal-account/settings\",\n anon=signin,\n ada=title(\"Account ada-personal-account : Settings\")\n ),\n Check(\n \"/ada-personal-account/subscriptions\",\n anon=signin,\n ada=title(\"Account ada-personal-account : Subscriptions\"),\n ),\n Check(\n \"/ada-personal-account/subscriptions/add\",\n anon=signin,\n ada=\"plan\"\n ),\n Check(\n \"/projects\",\n anon=title(\"Projects\"),\n ada=[title(\"Projects\"), link(reverse(\"project_create\"))],\n ),\n Check(\n \"/ada-personal-account/ada-public-project\",\n # Default view is files\n anon=title(\"Project ada-public-project : Files\"),\n ada=title(\"Project ada-public-project : Files\"),\n bob=title(\"Project ada-public-project : Files\")\n ),\n Check(\n \"/ada-personal-account/ada-private-project\",\n anon=403,\n # Default view is files\n ada=title(\"Project ada-private-project : Files\"),\n bob=403\n ),\n Check(\n \"/ada-personal-account/ada-private-project/files\",\n anon=403,\n ada=title(\"Project ada-private-project : Files\"),\n bob=403\n ),\n Check(\n \"/ada-personal-account/ada-private-project/snapshots\",\n anon=403,\n ada=title(\"Project ada-private-project : Snapshots\"),\n bob=403\n ),\n Check(\n \"/ada-personal-account/ada-private-project/jobs\",\n anon=403,\n ada=title(\"Project ada-private-project : Jobs\"),\n bob=403\n ),\n Check(\n \"/ada-personal-account/ada-private-project/sharing\",\n anon=403,\n ada=title(\"Project ada-private-project : Sharing\"),\n bob=403\n ),\n Check(\n \"/ada-personal-account/ada-private-project/settings\",\n anon=403,\n ada=title(\"Project ada-private-project : Settings\"),\n bob=403\n ),\n # API endpoints\n Check(\n \"/api\",\n anon=title(\"Stencila Hub API\"),\n ada=title(\"Stencila Hub API\")\n ),\n Check(\n \"/api/schema\",\n anon=200,\n ada=200\n ),\n]\n\n\n# fmt: on\n\n\n# The following turns warnings into errors to help debug where those\n# are being generated.\n# For finer grained control see https://docs.pytest.org/en/latest/warnings.html\n# pytestmark = pytest.mark.filterwarnings(\"error\")\n\n\nclass UrlsMixin:\n \"\"\"Test URL response status codes.\"\"\"\n\n def test_urls(self):\n for check in checks:\n if not isinstance(check, Check):\n continue\n\n expects = getattr(check, self.username)\n if expects is None:\n continue\n\n response = self.client.get(check.path, follow=True)\n content = response.content.decode(\"utf-8\")\n for expect in expects if isinstance(expects, list) else [expects]:\n if isinstance(expect, int):\n assert response.status_code == expect, check.path\n elif isinstance(expect, str):\n self.assertIsNotNone(\n re.search(expect, content),\n 'Could not find regex \"{}\" in content of \"{}\"'.format(\n expect, check.path\n ),\n )\n else:\n raise Exception(\n \"Unhandled expectation type: {}\".format(type(expect))\n )\n\n\nclass TestUrlsAnon(AnonTestCase, UrlsMixin):\n username = \"anon\"\n\n\nclass TestUrlsAda(AdaTestCase, UrlsMixin):\n username = \"ada\"\n\n\nclass TestUrlsBob(BobTestCase, UrlsMixin):\n username = \"bob\"\n","sub_path":"director/urls_tests.py","file_name":"urls_tests.py","file_ext":"py","file_size_in_byte":6440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"295263991","text":"#作业预约作废\n#from globalpkg.global_var import work_appoint_id\nfrom tools import tool\nfrom globalpkg.global_var import tsi\nfrom globalpkg.global_var import workticketid\nfrom globalpkg.global_var import worktaskid\nfrom globalpkg.global_var import jsaid\nfrom globalpkg.global_var import safeclarid\nfrom globalpkg.global_var import sql_query_work_appointid\nfrom tools.gethost import host\nfrom tools.gethost import pro\n\ncase = '作业预约作废'\nprojectname = pro()\nhost = host(projectname)\nprint(host)\n#times\nstarttime = tool.starttime\nendtime = tool.endtime\nnow = tool.now\n#mendtime = tool.mendtime\n#作业预约名称\nname = tool.ran_name_with_str()\nprint(name)\n#用例信息变量定义\ntestsuit3 = []\ncaseinfo = {}\ncaseinfo['id'] = 1\ncaseinfo['name'] = ''\ncaseinfo['result'] = \"\"\ncaseinfo['url'] = ''\ncaseinfo['data'] = ''\ncaseinfo['sign'] =''\ncaseinfo['flag'] = ''\ncaseinfo['isactive'] = ''\n#work_appoint_id_plus1= work_appoint_id+1\n\n#作业预约创建使用ID\nwork_appoint_id_plus1 = sql_query_work_appointid+1\n#work_appoint_id_plus1 = work_appoint_id_plus1\ncount =0\n#用例信息\ncaseinfo['id'] = 1\ncaseinfo['name'] = '作业预约'\ncaseinfo['isactive'] = 1\n#拼写预约URL\nurl2='http://%s/hse/HSE_WORK_APPOINT/cardSave?parentEntityId=&parentFuncCode=&topEntityId=%d&topFuncCode=HSE_WORK_APPOINT&dataId=%d&0.3707947936681053&contentType=json&ajax=true&tid=1'%(host,work_appoint_id_plus1,work_appoint_id_plus1)\ncaseinfo['url'] = url2\n#作业许可大票数据\ndata = {\n\t\"tableName\": \"hse_work_appoint\",\n\t\"iscontractor\": \"0\",\n\t\"workunitname_no\": \"\",\n\t\"territorialunitid\": 2000000003339,\n\t\"worktaskid_no\": 0,\n\t\"isreport\": \"0\",\n\t\"territorialunitname\": \"运行一部\",\n\t\"territorialunitcode\": \"CS8082020\",\n\t\"wf_audit_state\": \"6\",\n\t\"status\": \"draft\",\n\t\"dataStatus\": 0,\n\t\"ver\": 1,\n\t\"created_by\": \"\",\n\t\"created_dt\": now,\n\t\"updated_by\": \"\",\n\t\"updated_dt\": now,\n\t\"df\": 0,\n\t\"tenantid\": 1,\n\t\"ts\": \"\",\n\t\"isspecialcondition\": \"\",\n\t\"specialenvironment\": \"ALLNOT\",\n\t\"task_worktype_code\": \"QT\",\n\t\"task_worktype_name\": \"其他\",\n\t\"cywlqfyxzz\": \"0\",\n\t\"isdzdh\": \"0\",\n\t\"projecttype\": \"rcjx\",\n\t\"isupgradedh\": \"0\",\n\t\"persistent_type\": \"newoperation\",\n\t\"issjtssxzy\": \"0\",\n\t\"worklevel_dh\": \"\",\n\t\"worklevel_sx\": \"\",\n\t\"worklevel_gc\": \"\",\n\t\"worklevel_dz\": \"\",\n\t\"worklevel_gx\": \"\",\n\t\"sourcetype\": \"\",\n\t\"territorialdeviceid\": 2000000003454,\n\t\"territorialdevicename\": \"制氢装置\",\n\t\"work_position_id\": 2000000002019,\n\t\"work_position_name\": \"制氢北区\",\n\t\"worksite\": \"作业地点123\",\n\t\"workunit\": 1688712,\n\t\"workunitname\": \"长庆石化分公司\",\n\t\"workname\": name,\n\t\"workcontent\": \"作业内容123\",\n\t\"worktypename\": \"作业许可证\",\n\t\"worktype\": \"xkz\",\n\t\"appointstarttime\": starttime,\n\t\"appointendtime\": endtime,\n\t\"material_medium\": \"物料介质123\",\n\t\"risksmeasures\": \"重点防控的风险123\"\n}\ncaseinfo['data'] =data\ntestsuit3.append(caseinfo.copy())\n\n#送交用例信息\n\ncaseinfo['id'] = 2\ncaseinfo['name'] = '作业预约送交'\n#送交接口地址\nurl3='http://%s/hse/HSE_WORK_APPOINT/wfSend?parentEntityId=&parentFuncCode=&topEntityId=%d&topFuncCode=HSE_WORK_APPOINT&dataId=%d&0.30092471197648707&contentType=json&ajax=true&tid=1'%(host,work_appoint_id_plus1,work_appoint_id_plus1)\ncaseinfo['url'] = url3\nformdata2={\n\t\"opinion\": \"申请审批\",\n\t\"nodeStr\": \"2000000009070\",\n\t\"2000000009070\": \"测试用户\",\n\t\"2000000009070_id\": 1000\n}\ncaseinfo['data'] =formdata2\nprint(caseinfo['id'] )\ntestsuit3.append(caseinfo.copy())\n\n#作业预约审批用例信息\ncaseinfo['id'] = 3\ncaseinfo['name'] = '作业预约审批'\n#审批接口地址\nurl4='http://%s/hse/HSE_WORK_APPOINT/wfFinish?parentEntityId=&parentFuncCode=&topEntityId=+&topFuncCode=HSE_WORK_APPOINT&dataId=%d&0.027850408425730055&contentType=json&ajax=true&tid=1'%(host,work_appoint_id_plus1)\ncaseinfo['url'] = url4\n#参数\nformdata ={\n\t\"opinion\": \"同意\",\n\t\"cC\": \"1000\",\n\t\"cCName\": \"测试用户\",\n\t\"nickName\": \"用户\",\n\t\"is_normal_finish\": \"true\",\n\t\"nodeStr\": \"\"\n}\ncaseinfo['data'] =formdata\ntestsuit3.append(caseinfo.copy())\n\n\n#作业预约作废\n#caseid = 5\ncasename = '作业预约作废'\ncount =count+1\ncaseid = count\ncaseinfo['id'] = 4\ncaseinfo['name'] = casename\n#审批接口地址\n#url4='http://host/hse/HSE_WORK_APPOINT/wfFinish?parentEntityId=&parentFuncCode=&topEntityId=+&topFuncCode=HSE_WORK_APPOINT&dataId=%d&0.027850408425730055&contentType=json&ajax=true&tid=1'%(work_appoint_id_plus1)\nurl4 = 'http://%s/hse/HSE_WORK_APPOINT/wfInvalid?parentEntityId=&parentFuncCode=&topEntityId=%d&topFuncCode=HSE_WORK_APPOINT&dataId=%d&0.9786549083065863&contentType=json&ajax=true&tid=1'%(host,work_appoint_id_plus1,work_appoint_id_plus1)\n#参数\nformdata = {\n\t\"tableName\": \"hse_work_appoint\",\n\t\"task_worktype_code\": \"QT\",\n\t\"equt_name\": \"\",\n\t\"territorialdeviceid\": 2000000003454,\n\t\"created_by_name_nick\": \"用户\",\n\t\"worktaskid_no\": 0,\n\t\"cywlqfyxzz\": \"0\",\n\t\"specialenvironment\": \"ALLNOT\",\n\t\"isreport\": \"0\",\n\t\"created_by_name\": \"测试用户\",\n\t\"worklevel_dh\": \"\",\n\t\"sourcecode\": \"\",\n\t\"iscontainplayday\": 0,\n\t\"worktype_name\": \"作业许可证\",\n\t\"sourcefunc\": \"\",\n\t\"equipmentcode\": \"\",\n\t\"territorialdevicename\": \"制氢装置\",\n\t\"sourcetype\": \"\",\n\t\"worktypename\": \"作业许可证\",\n\t\"sourceid\": \"\",\n\t\"worklevel_gx\": \"\",\n\t\"serviceplanid\": \"\",\n\t\"task_worktype_name\": \"其他\",\n\t\"standardmaintenance\": \"\",\n\t\"worklevel_sx\": \"\",\n\t\"material_medium\": \"物料介质123\",\n\t\"risksmeasures\": \"重点防控的风险123\",\n\t\"issjtssxzy\": \"0\",\n\t\"isupgradedh\": \"0\",\n\t\"isdzdh\": \"0\",\n\t\"worklevel_gc\": \"\",\n\t\"persistent_type\": \"newoperation\",\n\t\"territorialunitcode\": \"CS8082020\",\n\t\"worklevel_dz\": \"\",\n\t\"dataStatus\": 0,\n\t\"ver\": 1,\n\t\"created_by\": 1000,\n\t\"created_dt\": now,\n\t\"updated_by\": 1000,\n\t\"updated_dt\": now,\n\t\"df\": 0,\n\t\"tenantid\": 1,\n\t\"ts\": \"\",\n\t\"work_appoint_id\": work_appoint_id_plus1,\n\t\"code\": \"\",\n\t\"iscontractor\": \"0\",\n\t\"workunit\": 1688712,\n\t\"workunitname\": \"长庆石化分公司\",\n\t\"workunitname_no\": \"长庆石化分公司\",\n\t\"workcontent\": \"作业内容123\",\n\t\"workname\": name,\n\t\"territorialunitid\": 2000000003339,\n\t\"territorialunitname\": \"运行一部\",\n\t\"work_position_id\": 2000000002019,\n\t\"appointstarttime\": starttime,\n\t\"appointendtime\": endtime,\n\t\"work_position_name\": \"制氢北区\",\n\t\"status\": \"approval\",\n\t\"constructionscheme\": \"\",\n\t\"wf_current_user\": \"1000\",\n\t\"wf_audit_state\": \"2\",\n\t\"wf_create_user\": 1000,\n\t\"wf_type\": \"2\",\n\t\"wf_instance\": 2000000010669,\n\t\"wf_current_nodeid\": \"2000000009070\",\n\t\"wf_audit_time\": now,\n\t\"worktype\": \"xkz\",\n\t\"worksite\": \"作业地点123\",\n\t\"equipmentnumber\": \"\",\n\t\"projecttype\": \"rcjx\",\n\t\"isspecialcondition\": \"\",\n\t\"specialcondition\": \"\"\n}\ncaseinfo['url'] = url4\ncaseinfo['data'] =formdata\ntestsuit3.append(caseinfo.copy())\n","sub_path":"backup/case3.py","file_name":"case3.py","file_ext":"py","file_size_in_byte":6624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"514000551","text":"from opengever.activity import notification_center\nfrom opengever.base.browser.resolveoguid import ResolveOGUIDView\nfrom opengever.base.exceptions import InvalidOguidIntIdPart\nfrom opengever.ogds.models.service import ogds_service\nfrom plone import api\nfrom zExceptions import NotFound\nfrom zExceptions import Unauthorized\n\n\nclass ResolveNotificationView(ResolveOGUIDView):\n\n key_to_strip = 'notification_id'\n\n def __call__(self):\n notification_id = self.request.get('notification_id', '')\n center = notification_center()\n self.notification = center.get_notification(notification_id)\n\n if not self.notification:\n raise NotFound('Invalid notification_id ({}) is given'.format(\n self.request.get('notification')))\n\n if self.notification.belongs_to(api.user.get_current()):\n self.notification.mark_as_read()\n\n self.redirect()\n\n def redirect(self):\n \"\"\"Redirect to the affected resource.\n\n If the resource is stored in an other admin_unit than the current one,\n it redirects to the resolve_oguid view on this admin_unit.\n\n If there is no resource, it must be an external resource and we\n redirect to the external_resource_url.\n \"\"\"\n if self.notification.activity.resource is None:\n external_url = self.notification.activity.external_resource_url\n return self.request.RESPONSE.redirect(external_url)\n\n oguid = self.notification.activity.resource.oguid\n\n if oguid.is_on_current_admin_unit:\n try:\n resource = oguid.resolve_object()\n if resource is None:\n raise Unauthorized()\n url = resource.absolute_url()\n except InvalidOguidIntIdPart:\n raise NotFound('Requested object has been deleted')\n\n else:\n admin_unit = ogds_service().fetch_admin_unit(oguid.admin_unit_id)\n url = ResolveOGUIDView.url_for(oguid, admin_unit)\n\n return self.request.RESPONSE.redirect(self.preserve_query_string(url))\n","sub_path":"opengever/activity/browser/resolve.py","file_name":"resolve.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"27840622","text":"# Python Implementation of quicksort algorithm #\n#This is not a good implementation since we are making too many temporary arrays #\n\n\ndef quicksort(arr):\n same = []\n more = []\n less = []\n if len(arr) > 1:\n start = 0\n end = len(arr) - 1\n pivot = arr[int(start + (end - start)/2)]\n \n for i in range(0,len(arr)):\n if arr[i] == pivot:\n same.append(arr[i])\n elif arr[i] > pivot:\n more.append(arr[i])\n elif arr[i] < pivot:\n less.append(arr[i])\n \n final = quicksort(less) + same + quicksort(more)\n \n return final\n else:\n return arr\n \n\n\n\narr = list(map(int,input().split(\" \")))\n#print (\"The input array is:\",arr)\n\nfinal = quicksort(arr)\n \nprint (\"The final sorted array:\",final)","sub_path":"Sorting/QuickSort.-alt.py","file_name":"QuickSort.-alt.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"417213979","text":"from bottle import request, route, run, template, static_file, default_app, response\nfrom bson import json_util\nfrom pymongo import MongoClient\nimport json\nimport os\nimport sendgrid\n\n\n\n# NOTE:\n# Bottle is weird about templates. By default, it expects them to be in a\n# directory adjacent to this file called `views`.\n@route('/', method='GET')\ndef index():\n '''The main route used in the application. We are going to provide all the\n questions within one page. The logic to render a new page is handled by the\n UI.\n '''\n return template('index')\n\n\n@route('/submit', method='POST')\ndef submit():\n coll = get_collection()\n\n result = None\n\n # this only does it once\n for key in request.POST.keys():\n if not result:\n result = key\n break\n\n data = json.loads(result)\n children = data.get('children', [])\n govt_program = data.get('govt_program', '')\n household_num = data.get('household_num','')\n child_income = data.get('child_income', '')\n adult_income = data.get('adultincome','')\n ssn = data.get('ssn', '')\n contact_info = data.get('contact_info','')\n user_email = data.get('email','')\n # data is a dictionary of parameters\n\n pushed_data = coll.insert_one({\"children\": children, \"govt_program\": govt_program, \"household_num\": household_num, \"child_income\":child_income, \"adult_income\": adult_income, \"ssn\":ssn, \"contact_info\": contact_info, \"user_email\": user_email})\n\n post_id = str(pushed_data.inserted_id)\n print(post_id)\n send_email(user_email, post_id)\n\n response.content_type = 'application/json'\n\n return json.dumps({\"id\":post_id})\n\n\n@route('/static/', name='static')\ndef server_static(filename):\n return static_file(filename, root='static')\n\n@route('/submissions', method='GET')\ndef submissions():\n coll = get_collection()\n\n cursor = coll.find()\n\n results = [document for document in cursor]\n\n response.content_type = 'application/json'\n\n return json.dumps({'count': len(results),\n 'results':results}, default = json_util.default)\n\ndef get_collection():\n mongo_user = os.getenv('mongo_user', '')\n mongo_password = os.getenv('mongo_password', '')\n mongo_url = os.getenv('mongo_url', '')\n mongo_db = os.getenv('mongo_db', '')\n\n url = 'mongodb://{}:{}@{}/{}'.format(mongo_user, mongo_password, mongo_url, mongo_db)\n\n client = MongoClient(url)\n db = client.lunch_data\n return db.lunch_ux\n\n\ndef send_email(user_email=None,post_id=None):\n if not post_id or not user_email:\n return None\n\n SENDGRID_API_KEY = os.getenv('SENDGRID_API_KEY', '')\n sg = sendgrid.SendGridClient(SENDGRID_API_KEY)\n\n message = sendgrid.Mail()\n message.add_to(user_email)\n message.set_subject('Thank you for registering! [National School Lunch Program]')\n message.set_html('''\n Thank you for submitting your registration for your child(ren) for the National School Lunch Program.
    \n Your confirmation number is {}
    \n Please save this message for your records.
    \n For questions, please call (866) 632-9992.\n '''.format(post_id))\n message.set_from('Lunch UX Team Eileen ')\n status, msg = sg.send(message)\n\n\napplication = default_app()\n#run(host='localhost', port=8080, debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"402124065","text":"\nimport plex\n\nclass RunError(Exception):\n pass\n \nclass ParseError(Exception):\n\tpass\n\nclass MyParser:\n\t\n\tdef __init__(self):\n\t\tletter = plex.Range('azAZ')\n\t\tdigit = plex.Range('09')\n\t\tdigit1 = plex.Range('01')\n\t\tandop = plex.Range('and')\n\t\torop = plex.Range('or')\n\t\txorop = plex.Range('xor')\n\t\tname = letter + plex.Rep(letter|digit)\n\t\tspace = plex.Any(' \\n\\t')\n\n\t\tKeyword = plex.Str('print','PRINT')\n\t\tbinary = plex.Rep(digit) +plex.Str('.') + plex.Rep1(digit)\n\t\tequals = plex.Str('=')\n\t\tpar = plex.Str('(')\n\t\tparr = plex.Str(')')\n\t\t\n\t\tself.vL = {}\n\n\t\tself.lexicon = plex.Lexicon([ \n\t\t\t(Keyword, 'PRINT_TOKEN'),\n\t\t\t(andop, plex.TEXT),\n\t\t\t(orop, plex.TEXT),\n\t\t\t(xorop, plex.TEXT),\n\t\t\t(name, 'ID_TOKEN'), \n\t\t\t(binary, 'BINARY'),\n\t\t\t(equals, '='),\n\t\t\t(par, '('),\n\t\t\t(parr, ')'),\n\t\t\t(space, plex.IGNORE)\t\t\t\n\t\t])\n\t\t\t\n\tdef createScanner(self,fp):\n\t\tself.scanner = plex.Scanner(self.lexicon,fp)\n\t\tself.la , self.text = self.next_token()\n\n\tdef next_token(self):\n\t\treturn self.scanner.read()\n\t\n\tdef match(self,token):\n\t\tif self.la == token:\n\t\t\tself.la, self.text = self.next_token()\n\t\telse:\n\t\t\traise ParseError(\" waiting for something to be received \")\n\n\tdef parse(self,fp):\n\t\tself.createScanner(fp)\n\t#\twhile self.la:\n #\t\t\tprint(self.la, self.text)\n #\t\t\tself.la, self.text = self.next_token()\n\t\tself.stmt_list()\n\n\tdef stmt_list(self):\n\t\tif self.la == 'ID_TOKEN' or self.la == 'PRINT_TOKEN':\n\t\t\tself.stmt()\n\t\t\tself.stmt_list()\n\t\telif self.la == None:\n\t\t\treturn\n\t\telse: \n\t\t\traise ParseError(\"Expected id or print\")\n\t\n\t\t\t\n\tdef stmt(self):\n\t\tif self.la == 'ID_TOKEN':\n\t\t\tvarname = self.text\n\t\t\tself.match('ID_TOKEN')\n\t\t\tself.match('=')\n\t\t\te = self.expr()\n\t\t\tself.vL[varname] = e\n\t\telif self.la == 'PRINT_TOKEN':\n\t\t\tself.match('PRINT_TOKEN')\n\t\t\tself.expr()\n\t\telse:\n\t\t\traise ParseError(\"Expected id or print\")\n\t\n\t\t\t\n\tdef expr(self):\n\t\tif self.la == '(' or self.la == 'ID_TOKEN' or self.la == 'BINARY': \n\t\t\tself.term()\n\t\t\tself.term_tail()\n\t\telif self.la == ')' or self.la == 'ID_TOKEN' or self.la == None or self.la == 'PRINT_TOKEN':\n\t\t\treturn self.term()\n\t\telse:\n\t\t\traise ParseError(\"Expected par, id or a binary number\")\n\t\t\t\n\tdef term_tail(self):\n\t\tif self.la == 'xor': \n\t\t\tself.match('xor')\n\t\t\tself.term()\n\t\t\tself.term_tail()\n\t\telif self.la == ')' or self.la == 'ID_TOKEN' or self.la == 'PRINT_TOKEN' or self.la == None:\n\t\t\treturn\n\t\telse:\n\t\t\traise ParseError(\"Expected an operation\") \n\t\t\n\tdef term(self):\n\t\tif self.la == '(' or self.la == 'ID_TOKEN' or self.la == 'BINARY':\n\t\t\tself.factor()\n\t\t\tself.factor_tail()\n\t\telse:\n\t\t\traise ParseError(\"Expected par, id or a binary number\") \n\n\tdef factor_tail(self):\n\t\tif self.la == 'or':\n\t\t\tself.match('or')\n\t\t\tself.factor()\n\t\t\tself.factor_tail()\n\t\telif self.la == ')' or self.la == 'xor' or self.la == 'ID_TOKEN' or self.la == 'PRINT_TOKEN' or self.la == None or self.la == None:\n\t\t\treturn\n\t\telse:\n\t\t\traise ParseError(\"Expected an operation\") \n\n\tdef factor(self):\n\t\tif self.la == '(' or self.la == 'ID_TOKEN' or self.la == 'BINARY':\n\t\t\tself.atom()\n\t\t\tself.atom_tail()\n\t\telse:\n\t\t\traise ParseError(\"Expecting par, id or a bin number.\")\n\t\t\n\t\n\t\n\tdef atom_tail(self):\n\t\tif self.la == 'and':\n\t\t\tself.match('and')\n\t\t\tself.atom()\n\t\t\tself.atom_tail()\n\t\telif self.la == ')' or self.la == 'or' or self.la == 'xor' or self.la == 'ID_TOKEN' or self.la == 'PRINT_TOKEN' or self.la == None: \n\t\t\treturn\n\t\telse: \n\t\t\traise ParseError('Expected an operation')\n\t\n\tdef atom(self):\n\t\tif self.la == '(' :\n\t\t\tself.match('(')\n\t\t\te = self.expr()\n\t\t\tself.match(')')\n\t\t\treturn(e)\n\t\telif self.la == 'ID_TOKEN':\n\t\t\tvarname = self.text\n\t\t\tself.match('ID_TOKEN')\n\t\t\tif varname in self.vL:\n\t\t\t\treturn self.vL[varname]\n\t\t\traise RunError(\"no value\")\n\t\telif self.la == 'BINARY':\n\t\t\tbinary = self.text\n\t\t\tself.match('BINARY')\n\t\t\treturn binary\n\t\telse:\n\t\t\traise ParseError('Expected par, id or a bin number')\n\t\t\t\nparser = MyParser()\n\nwith open('test.txt', 'r') as fp:\n\tparser.parse(fp)\n\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"213381363","text":"# -*- coding: utf8 -*-\nfrom flask import Flask, request, render_template\ntry:\n import simplejson as json\nexcept:\n import json\n\nimport db\nimport Vk\nimport Apk\nimport Blockchain\nimport Datahackkostroma\nimport yandex_data\nimport gis2\n\napp = Flask(__name__)\n\nwith open(\"log.txt\", \"w\") as f:\n f.write(\"Server started...\\n\")\n\n@app.route('/', methods=['GET'])\ndef hello_world():\n return \"Hello! And welcome to the best world...\"\n\n\n\n\n\n# DATAHACKKOSTROMA\n@app.route('/datahackkostroma', methods=['GET', 'POST'])\ndef datahackkostroma():\n if request.method == 'POST':\n cities = request.form['cities']\n phrases = request.form['phrases']\n\n if cities == \"\" or phrases == \"\":\n return render_template('sorry.html')\n\n cities = cities.split(';')\n phrases = phrases.split(';')\n\n with open(\"log.txt\", \"a\") as f:\n f.write(\"New request: yandex-search\" + \"\\n\" + \"cities: \" + str(cities) + \"\\t\" + \"phrases: \" + str(phrases) + \"\\n\")\n yandex_dat = yandex_data.super_run(phrases=phrases, cities=cities)\n\n gis2_data = gis2.get_all_count(phrases)\n data = {\"yandex_data\":yandex_dat,\"2gis_data\":gis2_data}\n\n with open(\"log.txt\", \"a\") as f:\n f.write(\"\\n\" + \"Request answer: \" + str(data) + \"\\n\" + \"###\" * 50 + \"\\n\")\n\n dataj = json.dumps(data, \"utf-8\")\n\n\n\n return render_template('datahackkostroma.html', result_yandex=dataj)\n\n return render_template('datahackkostroma.html')\n\n\n@app.route('/datahackkostroma_MTS_tracing', methods=['GET','POST'])\ndef datahackkostroma_MTS_tracing():\n if request.method == 'POST':\n results = Datahackkostroma.render()\n return render_template('datahackkostroma_mts.html', result_MTS=results)\n\n return render_template('datahackkostroma_mts.html')\n\n\n\n\n\n# QR_code_test_bot\n@app.route('/telebot/QR_code_test_bot', methods=['GET', 'POST'])\ndef QR_code_test_bot():\n with open(\"telebot_log.txt\", \"w\") as f:\n f.write(\"Received message = \".format(str(request)))\n return 'ok'\n\n# BLOCKCHAIN\n@app.route('/blockchain', methods=['GET', 'POST'])\ndef blockchain_index():\n if request.method == 'POST':\n lender = request.form['lender']\n amount = request.form['amount']\n borrower = request.form['borrower']\n\n Blockchain.write_block(name=lender, amount=amount, to_whom=borrower, hash='')\n return render_template('blockchain_index.html')\n\n@app.route('/blockchain/checking', methods=['GET'])\ndef blockchain_check():\n results = Blockchain.check_integrity()\n return render_template('blockchain_index.html', results=results)\n\n# ARPIT\n@app.route('/arpit', methods=['GET'])\ndef arpit():\n return render_template('arpit.html')\n\n# GAME\n@app.route('/game', methods=['GET', 'POST'])\ndef game():\n if request.method == 'POST':\n with open(\"game_log.txt\", \"a\") as f:\n f.write(\"request.form: {0}\\n\".format(request.form))\n return render_template('game.html')\n\n# Settings route\n@app.route('/chat//', methods=['GET'])\ndef get(hash, request):\n with open(\"log.txt\", \"a\") as f:\n f.write(\"Get request from channel: {0}\\n\".format(hash))\n with open(\"log.txt\", \"a\") as f:\n f.write(\"request: \" + str(request) + \"\\n\")\n\n channel_id = None\n type_id = None\n try:\n channel_id, type_id = db.get_channel_by_binding(hash)\n except Exception as ex:\n with open(\"log.txt\", \"a\") as f:\n f.write(\"Ошибка получения информации о канале из GET запроса: {}\\n\".format(ex))\n with open(\"log.txt\", \"a\") as f:\n f.write(\"answer = {}\\n\".format('Database error') + \"___\"*50 + \"\\n\")\n return 'Database error'\n if not channel_id or not type_id:\n with open(\"log.txt\", \"a\") as f:\n f.write(\"answer = {}\\n\".format('No channel info') + \"___\"*50 + \"\\n\")\n return 'No channel info'\n\n with open(\"log.txt\", \"a\") as f:\n f.write(\"channel_id = {}\\ntype_id = {}\\n\".format(channel_id, type_id))\n\n answer = get_something(channel_id, type_id, request)\n with open(\"log.txt\", \"a\") as f:\n f.write(\"answer = {}\\n\".format(answer) + \"___\"*50 + \"\\n\")\n return answer\n\n# Admission messages\n@app.route('/chat/', methods=['POST'])\ndef parse(hash):\n with open(\"log.txt\", \"a\") as f:\n f.write(\"Posted by channel: {0}\\n\".format(hash))\n with open(\"log.txt\", \"a\") as f:\n f.write(\"Data: \" + str(request.data) + \"\\n\")\n\n channel_id = None\n type_id = None\n try:\n channel_id, type_id = db.get_channel_by_binding(hash)\n except Exception as ex:\n with open(\"log.txt\", \"a\") as f:\n f.write(\"Ошибка получения информации о канале из POST запроса: {}\\n\".format(ex))\n\n with open(\"log.txt\", \"a\") as f:\n f.write(\"channel_id = {}\\ntype_id = {}\\n\".format(channel_id, type_id))\n if not channel_id or not type_id:\n with open(\"log.txt\", \"a\") as f:\n f.write(\"answer = {}\\n\".format(\"Unknown channel\") + \"___\"*50 + \"\\n\")\n return \"Unknown channel\"\n\n # Распаковка\n try:\n data = json.loads(request.data.decode())\n except:\n with open(\"log.txt\", \"a\") as f:\n f.write(\"answer = {}\\n\".format(\"Wrong request\") + \"___\"*50 + \"\\n\")\n return \"Wrong request\"\n\n answer = parse_message(type_id, channel_id, data)\n with open(\"log.txt\", \"a\") as f:\n f.write(\"answer = {}\\n\".format(answer) + \"___\"*50 + \"\\n\")\n return answer\n\ndef get_something(channel_id, type_id, request):\n if type_id == 1:\n answer = Vk.Vk.get_something(channel_id, request)\n return answer\n\n if type_id == 2:\n answer = Apk.Apk.get_something(channel_id, request)\n return answer\n else:\n return \"This type of channel is not working now.\"\n\n\ndef parse_message(type_id, channel_id, data):\n if type_id == 1:\n answer = Vk.Vk.parse_message(channel_id, data)\n return answer\n\n if type_id == 2:\n answer = Apk.Apk.parse_message(channel_id, data)\n return answer\n else:\n return \"This type of channel is not working now\"","sub_path":"chatter/serv.py","file_name":"serv.py","file_ext":"py","file_size_in_byte":6132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"610940099","text":"# Copyright (c) 2015 Ansible, Inc.\n# All Rights Reserved.\n\n# Python\nimport urllib\n\n# Six\nimport six\n\n# Django\nfrom django.contrib.auth import login, logout\nfrom django.shortcuts import redirect\nfrom django.utils.timezone import now\n\n# Python Social Auth\nfrom social.exceptions import SocialAuthBaseException\nfrom social.utils import social_logger\nfrom social.apps.django_app.middleware import SocialAuthExceptionMiddleware\n\n# Ansible Tower\nfrom awx.main.models import AuthToken\n\n\nclass SocialAuthMiddleware(SocialAuthExceptionMiddleware):\n\n def process_view(self, request, callback, callback_args, callback_kwargs):\n if request.path.startswith('/sso/login/'):\n request.session['social_auth_last_backend'] = callback_kwargs['backend']\n\n def process_request(self, request):\n token_key = request.COOKIES.get('token', '')\n token_key = urllib.quote(urllib.unquote(token_key).strip('\"'))\n\n if not hasattr(request, 'successful_authenticator'):\n request.successful_authenticator = None\n\n if not request.path.startswith('/sso/') and 'migrations_notran' not in request.path:\n\n # If token isn't present but we still have a user logged in via Django\n # sessions, log them out.\n if not token_key and request.user and request.user.is_authenticated():\n logout(request)\n\n # If a token is present, make sure it matches a valid one in the\n # database, and log the user via Django session if necessary.\n # Otherwise, log the user out via Django sessions.\n elif token_key:\n\n try:\n auth_token = AuthToken.objects.filter(key=token_key, expires__gt=now())[0]\n except IndexError:\n auth_token = None\n\n if not auth_token and request.user and request.user.is_authenticated():\n logout(request)\n elif auth_token and request.user.is_anonymous is False and request.user != auth_token.user:\n logout(request)\n auth_token.user.backend = ''\n login(request, auth_token.user)\n auth_token.refresh()\n\n if auth_token and request.user and request.user.is_authenticated():\n request.session.pop('social_auth_error', None)\n request.session.pop('social_auth_last_backend', None)\n\n def process_exception(self, request, exception):\n strategy = getattr(request, 'social_strategy', None)\n if strategy is None or self.raise_exception(request, exception):\n return\n\n if isinstance(exception, SocialAuthBaseException) or request.path.startswith('/sso/'):\n backend = getattr(request, 'backend', None)\n backend_name = getattr(backend, 'name', 'unknown-backend')\n\n message = self.get_message(request, exception)\n if request.session.get('social_auth_last_backend') != backend_name:\n backend_name = request.session.get('social_auth_last_backend')\n message = request.GET.get('error_description', message)\n\n full_backend_name = backend_name\n try:\n idp_name = strategy.request_data()['RelayState']\n full_backend_name = '%s:%s' % (backend_name, idp_name)\n except KeyError:\n pass\n\n social_logger.error(message)\n\n url = self.get_redirect_uri(request, exception)\n request.session['social_auth_error'] = (full_backend_name, message)\n return redirect(url)\n\n def get_message(self, request, exception):\n msg = six.text_type(exception)\n if msg and msg[-1] not in '.?!':\n msg = msg + '.'\n return msg\n\n def get_redirect_uri(self, request, exception):\n strategy = getattr(request, 'social_strategy', None)\n return strategy.session_get('next', '') or strategy.setting('LOGIN_ERROR_URL')\n","sub_path":"awx/sso/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"458754568","text":"from scrapy.selector import Selector\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\n\nfrom onecloud.items import AlexaItem\n\n\nclass AlexaSpider(CrawlSpider):\n name = \"alexa\"\n allowed_domains = [\"alexa.com\"]\n start_urls = (\n 'http://www.alexa.com/topsites',\n )\n\n rules = (\n # Rule(LinkExtractor(allow=('/topsites/global[\\;\\d]*'),\n # restrict_xpaths=('//a[@class=\"next\"]')),\n # callback='parse_items',\n # follow=True),)\n Rule(LinkExtractor(allow=('/topsites/global[\\;\\d]*'),\n ),\n callback='parse_items',\n follow=True),)\n\n def parse_start_url(self, response):\n self.parse_items(response)\n\n def parse_items(self, response):\n rows = Selector(response).xpath('//ul/li[@class=\"site-listing\"]')\n for row in rows:\n item = AlexaItem()\n item['rank_place'] = ''.join(row.xpath('div[@class=\"count\"]/text()').extract()).strip()\n item['web_site'] = ''.join(\n row.xpath('div[@class=\"desc-container\"]/p[@class=\"desc-paragraph\"]/a/text()').extract()).strip()\n item['link'] = ''.join(\n row.xpath('div[@class=\"desc-container\"]/p[@class=\"desc-paragraph\"]/a/@href').extract()).strip()\n item['description'] = ''.join(\n row.xpath('div[@class=\"desc-container\"]/div[@class=\"description\"]/text()').extract()).replace('\\n',\n '').strip()\n yield item\n","sub_path":"python-work/onecloud/onecloud/spiders/alexa_spider.py","file_name":"alexa_spider.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"202214332","text":"import numpy as np\nimport json\nimport os\n\ndef sigmoid(x, derivative=False):\n return x*(1.0-x) if derivative else 1.0/(1.0+np.exp(-x))\n #return np.greater(x,0).astype(int) if derivative else np.maximum(x,0,x)\n\nclass NeuralNet:\n\n def __init__(self, base_in, base_out, input_size, hidden_size, output_size, saved_weight1=None, saved_weight2=None, saved_weight3=None):\n\n self.input = base_in\n self.y = base_out\n\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n\n if saved_weight1 is None:\n self.w1 = np.random.uniform(-0.5, 0.5, (input_size,hidden_size))\n else:\n self.w1 = saved_weight1\n\n if saved_weight2 is None:\n self.w2 = np.random.uniform(-0.5, 0.5, (hidden_size,hidden_size))\n else:\n self.w2 = saved_weight2\n\n if saved_weight3 is None:\n self.w3 = np.random.uniform(-0.5, 0.5, (hidden_size,output_size))\n else:\n self.w3 = saved_weight3\n\n def feedforward(self, input=None):\n input_array = []\n\n if input is None:\n input_array = np.array(self.input, ndmin=2)\n else:\n print(\"recebi input\")\n print(input)\n input_array = np.array(input, ndmin=2)\n\n self.layer1 = sigmoid(np.dot(input_array, self.w1))\n self.layer2 = sigmoid(np.dot(self.layer1, self.w2))\n self.output = sigmoid(np.dot(self.layer2, self.w3))\n \n def backpropagation(self):\n # application of the chain rule to find derivative of the loss function with respect to weights2 and weights1\n d_weights3 = np.dot(self.layer2.T, (2*(self.y - self.output) * sigmoid(self.output, True)))\n d_weights2 = np.dot(self.layer1.T, (np.dot(2*(self.y - self.output) * sigmoid(self.output, True), self.w3.T) * sigmoid(self.layer2, True)))\n d_weights1 = np.dot(self.input.T, (np.dot(np.dot(2*(self.y - self.output) * sigmoid(self.output, True), self.w3.T), self.w2.T) * sigmoid(self.layer1, True)))\n\n # update the weights with the derivative (slope) of the loss function\n self.w1 += d_weights1*0.1\n self.w2 += d_weights2*0.1\n self.w3 += d_weights3*0.1\n \n def get_output(self, input):\n self.feedforward(input)\n return self.output\n\n \nif __name__ == \"__main__\":\n # X = np.array([[0,0,1],\n # [0,1,1],\n # [1,0,1],\n # [1,1,1]])\n # y = np.array([[0],[1],[1],[0]])\n # nn = NeuralNet(X,y,3,8,1)\n\n # for i in range(5000):\n # nn.feedforward()\n # nn.backpropagation()\n\n # print(nn.output)\n data = []\n\n path = os.getcwd() + '/base_weuler'\n for filename in os.listdir(path):\n with open(path + '/' + filename) as f:\n data = data + json.load(f)\n \n input = []\n out = []\n\n for d in data:\n input.append(d[0])\n out.append(d[1])\n\n input = np.array(input, ndmin=2)\n out = np.array(out, ndmin=2)\n\n #xor_in = np.array([[0,0], [0,1], [1,0], [1,1]])\n #xor_out = np.array([[0], [1], [1], [0]])\n \n neural_net = NeuralNet(input, out, 4, 16, 4)\n\n for i in range(5000):\n neural_net.feedforward()\n #print(neural_net.output)\n # diff = np.sum((neural_net.output - neural_net.y)**2)/len(neural_net.input)\n # print(diff)\n neural_net.backpropagation()\n\n # for k in neural_net.output:\n # print(k)\n\n w1 = neural_net.w1.tolist()\n w2 = neural_net.w2.tolist()\n w3 = neural_net.w3.tolist()\n\n with open('w1', 'w') as out:\n json.dump(w1, out)\n\n with open('w2', 'w') as out:\n json.dump(w2, out)\n \n with open('w3', 'w') as out:\n json.dump(w3, out)\n","sub_path":"NeuralNet.py","file_name":"NeuralNet.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"309229227","text":"import csv\nimport itertools\nfrom collections import namedtuple\n\nfrom django.core.management import BaseCommand, CommandError\n\nfrom corehq.apps.change_feed import data_sources, topics\nfrom corehq.apps.change_feed.producer import producer\nfrom corehq.apps.hqadmin.management.commands.stale_data_in_es import DataRow, HEADER_ROW, get_csv_args\nfrom corehq.form_processor.utils import should_use_sql_backend\nfrom couchforms.models import XFormInstance\nfrom dimagi.utils.chunked import chunked\n\nfrom casexml.apps.case.models import CommCareCase\nfrom corehq.util.couch import bulk_get_revs\nfrom corehq.apps.hqcase.management.commands.backfill_couch_forms_and_cases import (\n create_case_change_meta,\n create_form_change_meta,\n publish_change,\n)\nfrom pillowtop.feed.interface import ChangeMeta\n\n\nDocumentRecord = namedtuple('DocumentRecord', ['doc_id', 'doc_type', 'doc_subtype', 'domain'])\n\n\nCASE_DOC_TYPES = {'CommCareCase'}\nFORM_DOC_TYPES = {'XFormInstance', 'XFormArchived'}\n\nALL_DOC_TYPES = set.union(CASE_DOC_TYPES, FORM_DOC_TYPES)\n\n\nclass Command(BaseCommand):\n \"\"\"\n Republish doc changes. Meant to be used in conjunction with stale_data_in_es command\n\n $ ./manage.py republish_doc_changes changes.tsv\n \"\"\"\n\n def add_arguments(self, parser):\n parser.add_argument('stale_data_in_es_file')\n parser.add_argument('--delimiter', default='\\t', choices=('\\t', ','))\n\n def handle(self, stale_data_in_es_file, delimiter, *args, **options):\n data_rows = _get_data_rows(stale_data_in_es_file, delimiter=delimiter)\n document_records = _get_document_records(data_rows)\n form_records = []\n case_records = []\n for record in document_records:\n if record.doc_type in CASE_DOC_TYPES:\n case_records.append(record)\n elif record.doc_type in FORM_DOC_TYPES:\n form_records.append(record)\n else:\n assert False, f'Bad doc type {record.doc_type} should have been caught already below.'\n _publish_cases(case_records)\n _publish_forms(form_records)\n\n\ndef _get_data_rows(stale_data_in_es_file, delimiter):\n with open(stale_data_in_es_file, 'r') as f:\n csv_reader = csv.reader(f, **get_csv_args(delimiter))\n for csv_row in csv_reader:\n data_row = DataRow(*csv_row)\n # Skip the header row anywhere in the file.\n # The \"anywhere in the file\" part is useful\n # if you cat multiple stale_data_in_es_file files together.\n if data_row != HEADER_ROW:\n yield data_row\n\n\ndef _get_document_records(data_rows):\n for data_row in data_rows:\n doc_id, doc_type, doc_subtype, domain = \\\n data_row.doc_id, data_row.doc_type, data_row.doc_subtype, data_row.domain\n if doc_type not in ALL_DOC_TYPES:\n raise CommandError(f\"Found bad doc type {doc_type}. \"\n \"Did you use the right command to create the data?\")\n yield DocumentRecord(doc_id, doc_type, doc_subtype, domain)\n\n\ndef _publish_cases(case_records):\n for domain, records in itertools.groupby(case_records, lambda r: r.domain):\n if should_use_sql_backend(domain):\n _publish_cases_for_sql(domain, list(records))\n else:\n _publish_cases_for_couch(domain, list(records))\n\n\ndef _publish_forms(form_records):\n for domain, records in itertools.groupby(form_records, lambda r: r.domain):\n if should_use_sql_backend(domain):\n _publish_forms_for_sql(domain, records)\n else:\n _publish_forms_for_couch(domain, records)\n\n\ndef _publish_cases_for_couch(domain, case_records):\n _publish_docs_for_couch(CommCareCase, create_case_change_meta, domain, case_records)\n\n\ndef _publish_cases_for_sql(domain, case_records):\n for record in case_records:\n producer.send_change(\n topics.CASE_SQL,\n _change_meta_for_sql_case(domain, record.doc_id, record.doc_subtype)\n )\n\n\ndef _change_meta_for_sql_case(domain, case_id, case_type):\n doc_type, = CASE_DOC_TYPES\n return ChangeMeta(\n document_id=case_id,\n data_source_type=data_sources.SOURCE_SQL,\n data_source_name=data_sources.CASE_SQL,\n document_type=doc_type,\n document_subtype=case_type,\n domain=domain,\n is_deletion=False,\n )\n\n\ndef _publish_forms_for_sql(domain, form_records):\n for record in form_records:\n producer.send_change(\n topics.FORM_SQL,\n _change_meta_for_sql_form_record(domain, record)\n )\n\n\ndef _publish_forms_for_couch(domain, form_records):\n _publish_docs_for_couch(XFormInstance, create_form_change_meta, domain, form_records)\n\n\ndef _change_meta_for_sql_form_record(domain, form_record):\n return ChangeMeta(\n document_id=form_record.doc_id,\n data_source_type=data_sources.SOURCE_SQL,\n data_source_name=data_sources.FORM_SQL,\n document_type=form_record.doc_type,\n document_subtype=form_record.doc_subtype,\n domain=domain,\n is_deletion=False,\n )\n\n\ndef _publish_docs_for_couch(doc_cls, get_meta, domain, records):\n doc_ids = [r.doc_id for r in records]\n for ids in chunked(doc_ids, 500):\n doc_id_rev_list = bulk_get_revs(doc_cls.get_db(), ids)\n for doc_id, doc_rev in doc_id_rev_list:\n publish_change(\n get_meta(domain, doc_id, doc_rev)\n )\n","sub_path":"corehq/apps/hqadmin/management/commands/republish_doc_changes.py","file_name":"republish_doc_changes.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"235169995","text":"from engine.IO.dialogo import Monologo, Dialogo, Discurso\r\nfrom engine.UI.circularmenus import DialogCircularMenu\r\nfrom engine.misc import ReversibleDict\r\nfrom ._movil import Movil\r\n\r\n\r\nclass Parlante(Movil):\r\n interlocutor = None # para que el mob sepa con quién está hablando, si lo está\r\n hablante = True\r\n hablando = False\r\n\r\n def hablar(self, sprite):\r\n locutores = [self, sprite]\r\n self.interlocutor = sprite\r\n sprite.interlocutor = self\r\n for loc in locutores:\r\n loc.hablando = True\r\n loc.detener_movimiento()\r\n opuesta = ReversibleDict(arriba='abajo', derecha='izquierda')\r\n sprite.cambiar_direccion(opuesta[self.direccion])\r\n\r\n def dialogar(self, sprite):\r\n \"\"\"\r\n :type sprite: engine.mobs.Mob\r\n \"\"\"\r\n if sprite.hablante:\r\n self.hablar(sprite)\r\n locutores = self, sprite\r\n file = Discurso.is_possible(*locutores)\r\n if file is not None:\r\n dialogo = Dialogo(file, *locutores)\r\n menu = DialogCircularMenu(*locutores)\r\n dialogo.frontend.set_menu(menu)\r\n\r\n else:\r\n Monologo(sprite, self)\r\n","sub_path":"engine/mobs/CompoMob/_parlante.py","file_name":"_parlante.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"121914716","text":"from mapping_dict_list import *\nimport re\nimport pandas as pd\nimport numpy as np\nimport requests\n\n\n\ndef map_customer_nearest_branch(pincode, csv_writer, csv_writer_error):\n if is_customer_in_served_state(pincode):\n if cust_pincode_to_branch_pincode_map_dict[pincode] == 0:\n if str(pincode) not in unique_all_fsfb_branch_codes[0]:\n state_code = get_pincode_first_two_digits(pincode)\n state_all_branch_pincode = get_all_state_branch_pincodes(state_code)\n closest_branch_pincode = get_short_distance_branch_pincode(pincode, state_all_branch_pincode, csv_writer, csv_writer_error)\n cust_pincode_to_branch_pincode_map_dict[pincode] = closest_branch_pincode\n else:\n cust_pincode_to_branch_pincode_map_dict[pincode] = pincode\n else:\n cust_pincode_to_branch_pincode_map_dict[pincode] = 560035\n\n\ndef is_customer_in_served_state(pincode):\n return (get_pincode_first_two_digits(pincode) in all_states_served_pincode)\n\n\ndef get_pincode_first_two_digits(pincode):\n return int(pincode/10000)\n\n\ndef get_all_state_branch_pincodes(state_code):\n state_first_two_digits_all = get_state_all_first_two_digits(state_code)\n return get_branch_codes(state_first_two_digits_all)\n\n\ndef get_state_all_first_two_digits(state_code):\n for idx,state in enumerate(all_states):\n if state_code in state:\n return all_states[idx]\n\n\ndef get_branch_codes(state_first_two_digits_all):\n all_state_branch_codes = []\n for first_two_digit in state_first_two_digits_all:\n lst = all_branch_pincodes_for_first_two_digits[first_two_digit]\n if(len(lst) > 0):\n for value in lst:\n all_state_branch_codes.append(value)\n return all_state_branch_codes\n\ndef produce_unique_fsfb_branch_pincodes(series_of_branch_pincodes):\n unique_all_fsfb_branch_codes.append(list(np.unique(series_of_branch_pincodes)))\n\n\ndef produce_list_of_all_state_branch_pincodes(list_of_unique_branch_pincodes, list_of_pincodes_first_two_digits):\n for pin in list_of_pincodes_first_two_digits:\n list_pin = list(pd.Series(list_of_unique_branch_pincodes).apply(lambda x: 0 if re.match(str(pin),x)==None else int(x)))\n all_branch_pincodes_for_first_two_digits[pin] = list(list(filter(lambda list_pin: list_pin != 0, list_pin)))\n\n# #TODO remove the function below.\n# def get_short_distance_branch_pincode(orgin_pincode, destination_list_of_pincode,csv_writer):\n# number_of_dest = len(destination_list_of_pincode)\n# num_of_hits.append(number_of_dest)\n# return 560035\n\ndef get_short_distance_branch_pincode(origin_pincode, destination_list_of_pincode, csv_writer, csv_writer_error):\n distance_matrix = []\n closest_destination = 0\n\n for dest in destination_list_of_pincode:\n print(\"orgin_pincode: {} destination: {}\".format(origin_pincode, dest))\n url = \"https://maps.googleapis.com/maps/api/distancematrix/json?units=metric\"\n\n try:\n r = requests.get(url+'&origins='+str(origin_pincode)+'&destinations='+str(dest)+'&key=AIzaSyAwmWLmzibtkntESWyZYckRrw4kUQhqvvw®ion=IN')\n distance = float((r.json()['rows'][0])['elements'][0]['distance']['text'].replace('km', '').replace(',', '').strip())\n print(\"the distance between the origin and pincode is: {}\".format(distance))\n duration = (r.json()['rows'][0])['elements'][0]['duration']['text']\n out_status = r.json()['status']\n element_status = (r.json()['rows'][0])['elements'][0]['status']\n distance_matrix.append((origin_pincode, dest, out_status, element_status, distance, duration))\n csv_writer.writerow([origin_pincode, dest, out_status, element_status, distance, duration])\n except Exception as e:\n csv_writer_error.writerow([origin_pincode, dest])\n print(\"The error is \",e)\n\n # TODO remove the following break from the code.\n # break\n\n try:\n df = pd.DataFrame(distance_matrix, columns=['origin', 'destination', 'out_status', 'element_status', 'distance', 'duration'])\n closest_destination = df.loc[df['distance'] == min(df['distance']),'destination']\n closest_destination = closest_destination[closest_destination.index[0]]\n return closest_destination\n except Exception as e:\n print(\"The error is \", e)\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"mapping_functions.py","file_name":"mapping_functions.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"110343292","text":"import pygame\r\nfrom batoocolors import *\r\n\r\nPATH = './drawing/'\r\nboard_topleft = (0, 70)\r\nboard_tile_offset = 50\r\ntileSize = 55\r\n\r\nclass Stone(pygame.sprite.Sprite):\r\n\r\n def __init__(self, color, grid, stone_type = 'normal'):\r\n\r\n # tileSize = 55 px\r\n self.color = color\r\n if type(color) == str:\r\n if color == \"B\":\r\n self.color = BLACK\r\n else:\r\n self.color = WHITE\r\n\r\n pygame.sprite.Sprite.__init__(self)\r\n '''\r\n offset = 5\r\n radius = tileSize / 2 - offset\r\n self.image = pygame.Surface((2 * radius, 2 * radius))\r\n self.image.fill(WHITE)\r\n '''\r\n\r\n if stone_type == 'normal': #1\r\n if self.color == WHITE:\r\n self.image = pygame.image.load(PATH + 'white.png')\r\n else:\r\n self.image = pygame.image.load(PATH + 'black.png')\r\n elif stone_type == 'hidden': #2\r\n if self.color == WHITE:\r\n self.image = pygame.image.load(PATH + 'white_hidden.png')\r\n else:\r\n self.image = pygame.image.load(PATH + 'black_hidden.png')\r\n elif stone_type == 'base': #3\r\n if self.color == WHITE:\r\n self.image = pygame.image.load(PATH + 'white_base.png')\r\n else:\r\n self.image = pygame.image.load(PATH + 'black_base.png')\r\n\r\n '''\r\n if self.color != WHITE:\r\n self.image.set_colorkey(WHITE)\r\n pygame.draw.circle(self.image, self.color, (radius, radius), radius)\r\n else:\r\n pygame.draw.circle(self.image, BLACK, (radius, radius), radius, 1)\r\n self.radius = radius\r\n '''\r\n self.rect = self.image.get_rect()\r\n self.grid = grid\r\n\r\n def draw(self, screen):\r\n\r\n x = board_topleft[0] + (self.grid[0]-1)*tileSize + board_tile_offset\r\n y = board_topleft[1] + (self.grid[1]-1)*tileSize + board_tile_offset\r\n self.rect.center = (x, y)\r\n screen.blit(self.image, self.rect)\r\n\r\n\r\nclass Board(pygame.sprite.Sprite):\r\n\r\n def __init__(self, size=(11, 11)):\r\n\r\n pygame.sprite.Sprite.__init__(self)\r\n self.size = size # (11, 11)\r\n '''\r\n offset = 0 # needs to display the edge line in self.image\r\n self.image = pygame.Surface(((size[0] - 1) * tileSize + offset, (size[1] - 1) * tileSize + offset))\r\n self.image.fill(WHITE)\r\n '''\r\n\r\n self.image = pygame.image.load(PATH + 'board.png')\r\n self.rect = self.image.get_rect()\r\n '''\r\n for col in range(size[0]):\r\n start_pos = col*tileSize, 0\r\n end_pos = col*tileSize, (size[1]-1)*tileSize\r\n pygame.draw.line(self.image, BLACK, start_pos, end_pos, 1)\r\n\r\n for row in range(size[1]):\r\n start_pos = 0, row*tileSize\r\n end_pos = (size[0]-1)*tileSize, row*tileSize\r\n pygame.draw.line(self.image, BLACK, start_pos, end_pos, 1)\r\n '''\r\n\r\n def draw(self, screen):\r\n\r\n screen.blit(self.image, self.rect)\r\n\r\n def project(self, map):\r\n\r\n self.image = pygame.image.load(PATH + 'board.png')\r\n offset = 40 # 50 - 10\r\n for p in map.plus:\r\n center = ( offset + (p[0]-1)*tileSize, offset + (p[1]-1)*tileSize )\r\n plus = pygame.image.load(PATH + 'plus.png')\r\n self.image.blit(plus, center)\r\n for m in map.minus:\r\n center = ( offset + (m[0]-1)*tileSize, offset + (m[1]-1)*tileSize )\r\n minus = pygame.image.load(PATH + 'minus.png')\r\n self.image.blit(minus, center)\r\n\r\n self.plus = map.plus\r\n self.minus = map.minus\r\n\r\nclass Cursor_Marking(pygame.sprite.Sprite):\r\n\r\n def __init__(self):\r\n\r\n pygame.sprite.Sprite.__init__(self)\r\n self.size = tileSize / 2\r\n\r\n offset = 2 # wants to make it a bit smaller\r\n\r\n #self.image = pygame.Surface((self.size - offset, self.size - offset))\r\n #self.image.fill(BLACK)\r\n\r\n self.image = pygame.image.load(PATH + 'cursor.png')\r\n self.rect = self.image.get_rect()\r\n\r\n def draw(self, screen, pos):\r\n\r\n x = board_topleft[0] + (pos[0]-1)*tileSize + board_tile_offset\r\n y = board_topleft[1] + (pos[1]-1)*tileSize + board_tile_offset\r\n self.rect.center = (x, y)\r\n screen.blit(self.image, self.rect)\r\n\r\nclass Scoreboard(pygame.sprite.Sprite):\r\n\r\n def __init__(self):\r\n\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = pygame.image.load(PATH + 'scoreboard.png')\r\n self.rect = self.image.get_rect()\r\n\r\n def draw(self, screen):\r\n\r\n screen.blit(self.image, self.rect)\r\n\r\n","sub_path":"references/batoo/batoo/batoosprites.py","file_name":"batoosprites.py","file_ext":"py","file_size_in_byte":4672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"168676292","text":"from auto_reply.package import *\n\ndef card_open(fromUser):\n values = pubCardUsers.objects.filter(wx_open_id=fromUser)\n if values.exists():\n value = values[0]\n if value.state == True and value.time_list == None:\n return '请在5分钟内回复你要提醒的时间,如:09:00,18:00(注:多个时间之间用逗号隔开哦,最多可设置4个时间)'\n elif value.state == False and value.time_list == None:\n value.state = True\n value.save()\n return '请在5分钟内回复你要提醒的时间,如:09:00,18:00(注:多个时间之间用逗号隔开哦,最多可设置4个时间)'\n elif value.time_list != None:\n value.state = True\n value.save()\n if value.bind_name == None:\n pwd = get_bind_name(fromUser, 0)\n return '您当前已设置提醒时间为({}),但还未绑定微信/QQ,绑定后即可收到打卡提醒哦\\n\\n{}'.format(value.time_list, pwd)\n else:\n pwd = get_bind_name(fromUser, 0)\n return '您当前设置的提醒时间为({}),如需更改请在5分钟内回复你要提醒的新时间哦\\n\\n{}'.format(value.time_list, pwd)\n else:\n return '您当前设置的提醒时间为({}),如需更改请在5分钟内回复你要提醒的新时间哦,如:09:00,18:00(注:多个时间之间用逗号隔开哦,最多可设置4个时间)'.format(value.time_list)\n else:\n pub = pubCardUsers()\n pub.wx_open_id = fromUser\n pub.save()\n return '请在5分钟内回复你要提醒的时间,如:09:00,18:00(注:多个时间之间用逗号隔开哦,最多可设置4个时间)'\n\ndef card_set_time(text, fromUser):\n text = text.replace(' ', '')\n text = text.replace(':', ':')\n if ',' in text and ':' in text:\n text = text.replace(',', ',')\n text = text.replace(':', ':')\n if ',' in text:\n text = text.replace(',', ',')\n if ':' in text:\n text = text.replace(':', ':')\n if ',' in text:\n tx_time_list = []\n tx_times = text.split(',')\n if len(tx_times) < 5:\n for tx in tx_times:\n if ':' in tx:\n tx_list = tx.split(':')\n if len(tx_list) == 2 and len(tx_list[0]) < 3 and len(tx_list[1]) < 3:\n txs = is_time(tx)\n if len(txs) == 5:\n tx_time_list.append(txs)\n else:\n return txs\n else:\n return '时间格式不正确,请修改后重新发送'\n else:\n return '时间格式不正确,请修改后重新发送'\n if len(tx_times) == len(tx_time_list):\n tx_time_list = ','.join(tx_time_list)\n values = pubCardUsers.objects.filter(wx_open_id=fromUser)\n value = values[0]\n value.time_list = tx_time_list\n value.time_num = len(tx_times)\n value.state = True\n value.save()\n pwd = get_bind_name(fromUser, 1)\n if '你已绑定' in pwd:\n return '恭喜你,时间设置成功,到点您会收到打卡信息哦\\n\\n{}'.format(pwd)\n return '恭喜你,时间设置成功,但还未绑定微信/QQ,绑定后即可收到打卡提醒哦\\n\\n{}'.format(pwd)\n else:\n return '最多只能设置4个时间哦'\n else:\n if ':' in text:\n text_list = text.split(':')\n if len(text_list) == 2 and len(text_list[0]) < 3 and len(text_list[1]) < 3:\n txs = is_time(text)\n if len(txs) == 5:\n values = pubCardUsers.objects.filter(wx_open_id=fromUser)\n value = values[0]\n value.time_list = txs\n value.time_num = 1\n value.state = True\n value.save()\n pwd = get_bind_name(fromUser, 1)\n if '你已绑定' in pwd:\n return '恭喜你,时间设置成功,到点您会收到打卡信息哦\\n\\n{}'.format(pwd)\n return '恭喜你,时间设置成功,但还未绑定微信/QQ,绑定后即可收到打卡提醒哦\\n\\n{}'.format(pwd)\n else:\n return txs\n else:\n return '时间格式不正确,请修改后重新发送'\n else:\n return '时间格式不正确,请修改后重新发送'\n\ndef is_time(tx_time):\n hours = None\n min = None\n nums = tx_time.split(':')\n if nums[0].isdigit():\n if len(nums[0]) == 1:\n hours = '0{}'.format(nums[0])\n elif len(nums[0]) == 2 and int(nums[0]) < 24:\n hours = nums[0]\n else:\n return '小时格式不正确,请修改后重新发送'\n else:\n return '小时格式不正确,请修改后重新发送'\n if nums[1].isdigit():\n if len(nums[1]) == 1:\n min = '0{}'.format(nums[1])\n elif len(nums[1]) == 2 and int(nums[1]) < 60:\n min = nums[1]\n else:\n return '分钟格式不正确,请修改后重新发送'\n else:\n return '分钟格式不正确,请修改后重新发送'\n if hours != None and min != None:\n return '{}:{}'.format(hours, min)","sub_path":"public/auto_reply/package/card_remind.py","file_name":"card_remind.py","file_ext":"py","file_size_in_byte":5550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"359744795","text":"import urllib.request\nimport re\nimport time\n\n\ndef save_waterinfo(name,sex, lv,tiebaages,ties,fans,isvip):\n waterinfo_file=open(\"/home/hsx/py/tiebawaters/water_info.dat\",\"a\")\n waterinfos=\"%s\\t\"%name+\"%s\\t\"%sex+\"%s\\t\"%lv+\"%s\\t\"%tiebaages+\"%s\\t\"%ties+\"%s\\t\"%fans+\"%s\\t\\n\"%isvip\n #print(\"%s\\t\"%name+\"%s\\t\"%sex+\"%s\\t\"%lv+\"%s\\t\"%tiebaages+\"%s\\t\"%ties+\"%s\\t\"%fans+\"%s\\t\"%isvip+\"\\n\")\n try:\n waterinfo_file.write(waterinfos)\n finally:\n waterinfo_file.close()\n\ndef save_errorwaterinfo(watername):\n errorwaterinfo_file = open(\"/home/hsx/py/tiebawaters/water_info.dat\", \"a\")\n waterinfos = \"访问 %s\"%watername+\" 的主页出错\\n\"\n try:\n errorwaterinfo_file.write(waterinfos)\n finally:\n errorwaterinfo_file.close()\n\ndef water_info(watername,waterlv):\n waterurls=0\n re_watersex = r'(?<=\\\"sex\\\":\\\").+?(?=\\\",\\\"tb_age\\\")'\n re_isvip=r'(?<=\\\"tb_vip\\\":).+?(?=,\\\"followed)'\n re_watertbage = r'(?<=\\\"tb_age\\\":).+?(?=,\\\"post_num\\\")'\n re_ties = r'(?<=\\\"post_num\\\":).+?(?=,\\\"honor\\\")'\n re_fans=r'(?<=\\\"followed_count\\\":).+?(?=\\}\\})'\n tiebahead=\"http://tieba.baidu.com/home/get/panel?ie=utf-8&un=\"\n while waterurls\\\"头像\\\")'\n re_watername=r'(?<=class=\\\"user_name\" title=\\\").+?(?=\\\">)'\n re_waterlv=r'(?<=bawu-info-).+?(?=\">)'\n re_waterhome=r'(?<=
    共).+?(?=页<)'\n str_totalpages = re.findall(re_totalpages, sourcecode, re.I | re.M | re.S)\n totalpages=int(str_totalpages[0])\n while i<459:\n tiebaurl=pzhutieba+str(i)\n tieba_waterinfo(tiebaurl)\n i+=1\n print (totalpages)\n end_time=time.time()\n use_time=end_time-start_time\n print(\"下载完成,耗时:%f秒\"%use_time)\n","sub_path":"pzhutieba/pzhutieba_spider.py","file_name":"pzhutieba_spider.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"73838845","text":"import random\nimport sys\nimport time\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import*\nfrom 안수현컴퍼니2 import game\nfrom 안수현컴퍼니3 import game2\nfrom 안수현컴퍼니4 import game3\n\n\n#프로그램 시작부분\n\nclass Start(QWidget) :\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n easyButton = QPushButton('EASY',self)\n easyButton.clicked.connect(self.easyClicked)\n\n normalButton = QPushButton(\"NORMAL\",self)\n normalButton.clicked.connect(self.normalClicked)\n hardButton = QPushButton(\"HARD\",self)\n hardButton.clicked.connect(self.hardClicked)\n\n hbox = QHBoxLayout()\n hbox.addWidget(easyButton)\n hbox.addWidget(normalButton)\n hbox.addWidget(hardButton)\n self.setLayout(hbox)\n self.setWindowTitle(\"CARD GAME\")\n self.setGeometry(300, 300 ,500, 500)\n\n def easyClicked(self):\n self.hide()\n self.userDialog = QDialog(self)\n self.userDialog.ui = game()\n self.userDialog.ui.show()\n\n def normalClicked(self):\n self.hide()\n self.userDialog = QDialog(self)\n self.userDialog.ui = game2()\n self.userDialog.ui.show()\n\n def hardClicked(self):\n self.hide()\n self.userDialog = QDialog(self)\n self.userDialog.ui = game3()\n self.userDialog.ui.show()\n\n\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n start = Start()\n start.show()\n app.exec_()\n\n","sub_path":"안수현컴퍼니.py","file_name":"안수현컴퍼니.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"513958683","text":"# -*- coding = utf-8 -*-\nimport json\nimport requests\n\nurl = 'https://api.thinkpage.cn/v3/weather/now.json?'\n\ndef fetchWeather(location):\n preload = {'key': 'pxwlfvypg6plfzkj', 'location':location}\n result = requests.get(url, params=preload, timeout=2)\n return result.text \n\ndef sortData(location,historyList):\n\n weather_data = json.loads(fetchWeather(location))\n if 'status' in weather_data :\n print ('No result. please type again!')\n else:\n city_name = weather_data['results'][0]['location']['name']\n city_weather = weather_data['results'][0]['now']['text']\n city_temperature = weather_data['results'][0]['now']['temperature']\n city_updatetime = weather_data['results'][0]['last_update']\n\n item = city_name + ' ' + city_weather + ' ' + city_temperature + '°C'+' '+'更新時間: ' + city_updatetime\n historyList.append(item)\n print (item)\n\n return historyList\n\ndef main():\n historyList = []\n\n while True:\n\n key = input('请输入想查询的城市:')\n\n if key == 'quit':\n print ('\\n查詢歷史')\n for i in set(historyList):\n print (i)\n print ('感謝使用')\n quit()\n elif key == 'help':\n print('\\n输入城市名,查询当前天气\\n'\n '输入help,查看帮助\\n'\n '输入history,查看查询历史\\n'\n '输入quit,退出应用\\n')\n elif key == 'history':\n for i in historyList:\n print (i)\n print ('\\n')\n else:\n historyList = sortData(key, historyList)\n print('\\n')\n\n \n print('\\n')\n\nif __name__ == '__main__':\n main()\n \n\n\n \n","sub_path":"Chap3/project/API_test.py","file_name":"API_test.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"439231959","text":"# use the dfs searching algorithm\r\nfrom itertools import permutations\r\nimport time\r\n\r\nstart = time.time()\r\nseq_generator = lambda f, n, N: [str(f(k)) for k in range(n, N+1)]\r\n\r\nT = lambda n: n*(n+1)/2\r\nS = lambda n: n**2\r\nP = lambda n: n*(3*n-1)/2\r\nHEX = lambda n: n*(2*n-1)\r\nHEP = lambda n: n*(5*n-3)/2\r\nO = lambda n: n*(3*n-2)\r\nSeq1 = seq_generator(T, 45, 140)\r\nSeq2 = seq_generator(S, 32, 99)\r\nSeq3 = seq_generator(P, 26, 81)\r\nSeq4 = seq_generator(HEX, 23, 70)\r\nSeq5 = seq_generator(HEP, 21, 63)\r\nSeq6 = seq_generator(O, 19, 58)\r\nSEQ = [[], Seq1, Seq2, Seq3, Seq4, Seq5, Seq6]\r\n\r\ndef dfs(Seq, times, l, order):\r\n if times > 4:\r\n return\r\n base = SEQ[order[times + 1]]\r\n for each in Seq:\r\n temp = [x for x in base if each[2:] == x[0:2] and l.count(x) == 0]\r\n if temp == []:\r\n continue\r\n l[times] = each\r\n if times == 4 and l[0][0:2] == temp[0][2:]:\r\n l[-1] = temp[0]\r\n print(l, sum([int(k) for k in l]))\r\n print(order)\r\n end = time.time()\r\n print(end-start)\r\n exit()\r\n dfs(temp, times+1, l, order)\r\n l[times]='0'\r\n\r\ndef main():\r\n base = [1,2,3,4,5,6]\r\n per_base = permutations(base, len(base))\r\n for each in per_base:\r\n dfs(Seq=SEQ[each[0]], times=0, l=['0']*6, order=each)\r\nmain()","sub_path":"question61.py","file_name":"question61.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"25441712","text":"\n#-------------------------------------------------------------------------------\n\n# TASK 0 (example)\n\n# EXAMPLE: this function is implemented for you, to show \n# what a function definition looks like, and how the\n# 'student' added four lines to complete the definition.\n\ndef is_even(n):\n\t# at the end, we'll return whatever current value\n\t# that's in ans as our return value. Somewhere in \n\t# this function, you should re-assign it to be\n\t# either True or to False.\n\tans = None\n\t\n\t# make decisions with if-else structures to determine\n\t# whether n is even (divisible by two) or not. Then,\n\t# set ans to equal True or equal False as your answer.\n\t\n\t# YOUR CODE GOES HERE. (Since it's an example, we've\n\t# already written \"your code\" - four lines).\n\t\n\tif n % 2 == 0 :\n\t\tans = True\n\telse:\n\t\tans = False\n\t\n\t# make this the last line of your function definition\n\treturn ans\n\n#-------------------------------------------------------------------------------\n\n# TASK 1\n\n# given a non-negative integer, this function returns a\n# string (it does not print!) matching the letter grade\n# for our class (check the syllabus).\ndef letter_grade(score):\n\t# starting value for variable ans. Change it before\n\t# the end of the function.\n\tans = \"\"\n\t\n\t# YOUR CODE GOES HERE. Figure out what string you want\n\t# to assign to the ans variable, and assign it.\n\t# Checking the value of score compared to the letter grades\t\n\tif (score >= 98):\n\t\tans = \"A+\"\n\t# Because it already passed the first if statement, you don't need to\n\t# check the upper range as well\t\n\telif (score >= 92):\n\t\tans = \"A\"\n\telif (score >= 90):\n\t\tans = \"A-\"\n\telif (score >= 88):\n\t\tans = \"B+\"\n\telif (score >= 82):\n\t\tans = \"B\"\n\telif (score >= 80):\n\t\tans = \"B-\"\n\telif (score >= 78):\n\t\tans = \"C+\"\n\telif (score >= 72):\n\t\tans = \"C\"\n\telif (score >= 70):\n\t\tans = \"C-\"\n\telif (score >= 60):\n\t\tans = \"D\"\n\t# Now that we know it can't be anything else, it must be an \"F\"\t\n\telse:\n\t\tans = \"F\"\t\n\t\n\t\n\t# leave this as the last line of your function.\n\treturn ans\n\n#-------------------------------------------------------------------------------\n\n# TASK 2\n\n# without calling the max(), min(), or any sorting functionality,\n# this function determines the two largest values of the three \n# and returns their sum. The integers might be negative. When\n# there's a tie between two numbers, it doesn't actually matter\n# which one you choose.\n\ndef sum2biggest(a, b, c):\n\t# starting value for variable ans. Replace it with the\n\t# actual answer integer before reaching the return stmt.\n\tans = None\n\tfirst = None\n\tsecond = None\n\t\n\t# find the sum of the two largest values. Re-assign the\n\t# answer to the ans variable.\n\t# YOUR CODE GOES HERE\n\t# Checking to see if \"a\" is the largest and sets it as such\t\n\tif (a >= b and a >= c):\t\t\n\t\tfirst = a\n\t\t# If a is the largest, we need to find the second largest\t\t\n\t\tif (b >= c):\n\t\t\tsecond = b\n\t\telse:\n\t\t\tsecond = c\n\t# Same thing we did with a, just for b instead\t\n\telif (b >= a and b >= c):\n\t\tfirst = b\n\t\tif (a >= c):\n\t\t\tsecond = a\n\t\telse:\n\t\t\tsecond = c\n\t# Same thing we did with a and b, just for c instead\n\telif (c >= a and c >= b):\n\t\tfirst = c\n\t\tif (a >= b):\n\t\t\tsecond = a\n\t\telse:\n\t\t\tsecond = b\n\t# Adding the two largest together and setting the value of it to \"ans\"\n\tans = first + second\n\n\t# leave this as the last line of your function.\n\treturn ans\n\n#-------------------------------------------------------------------------------\n\n\t\n","sub_path":"Lab2/smurph21_205_L2.py","file_name":"smurph21_205_L2.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"472439047","text":"\"\"\"\nCreated on Apr 17, 2014\n\n@author: Thavanathan\n\"\"\"\n\n\nfrom django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nfrom tastypie.api import Api\nfrom names.api import NameResource, MaleNameResource, FemaleNameResource, SearchNameResource, RandomNameResource\n\nv1_api = Api(api_name='v1')\n\nv1_api.register(NameResource())\nv1_api.register(MaleNameResource())\nv1_api.register(FemaleNameResource())\nv1_api.register(SearchNameResource())\nv1_api.register(RandomNameResource())\n\nurlpatterns = patterns('',\n url(r'^$', include('names.urls')),\n url(r'^api/', include(v1_api.urls)),\n url(r'^admin/', include(admin.site.urls)),\n)","sub_path":"names_api/names_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"401732520","text":"from itertools import groupby\n\n\ndef compress(number):\n result = []\n for key, group in groupby(str(number)):\n result.append((key, str(len(list(group)))))\n result = [''.join(item) for item in result]\n return '_'.join(result)\n\ndef decompress(number):\n result = [tuple(item) for item in number.split(\"_\")]\n result = [i * int(j) for i,j in result]\n return int(\"\".join(result))\n\nprint(decompress('12_12'))","sub_path":"Exercise015.py","file_name":"Exercise015.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"107192346","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = \"convos\"\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^convos/$', views.get_convos, name='get_convos'),\n url(r'^convos/(?P[A-z0-9]+)$', views.convo_detail, name='convo_detail'),\n]\n","sub_path":"twitter_convos/convos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"531882114","text":"#!/usr/bin/env python\n\nimport os\nfrom glob import glob\nimport json\nimport codecs\nimport pandas\nimport datetime\n\nhere = os.path.dirname(os.path.abspath(__file__))\nfolder = os.path.basename(here)\nlatest = '%s/latest' % here\nyear = datetime.datetime.today().year\n\noutput_data = os.path.join(here, 'data-latest.tsv')\noutput_year = os.path.join(here, 'data-%s.tsv' % year)\n\n# Don't continue if we don't have latest folder\nif not os.path.exists(latest):\n print('%s does not have parsed data.' % folder)\n sys.exit(0)\n\n# Don't continue if we don't have results.json\nresults_json = os.path.join(latest, 'records.json')\nif not os.path.exists(results_json):\n print('%s does not have results.json' % folder)\n sys.exit(1)\n\nwith open(results_json, 'r') as filey:\n results = json.loads(filey.read())\n\ncolumns = ['charge_code', \n 'price', \n 'description', \n 'hospital_id', \n 'filename', \n 'charge_type']\n\ndf = pandas.DataFrame(columns=columns)\n\n# First parse standard charges (doesn't have DRG header)\nfor result in results:\n filename = os.path.join(latest, result['filename'])\n if not os.path.exists(filename):\n print('%s is not found in latest folder.' % filename)\n continue\n\n if os.stat(filename).st_size == 0:\n print('%s is empty, skipping.' % filename)\n continue\n\n charge_type = 'standard'\n\n print(\"Parsing %s\" % filename)\n\n if filename.endswith('json'):\n\n with codecs.open(filename, \"r\", encoding='utf-8-sig', errors='ignore') as filey:\n content = json.loads(filey.read())\n\n charge_types = {'DRG': 'drg', \n 'IP': 'inpatient', \n 'OP': 'outpatient', \n 'RX': 'pharmacy', \n 'SUP': 'supply'}\n\n for row in content['CDM']:\n hospital = result[\"hospital_id\"]\n if 'HOSPITAL_NAME' in row:\n hospital = row['HOSPITAL_NAME']\n description_key = 'DESCRIPTION'\n if description_key not in row:\n description_key = 'DESCRIPION'\n charge_type = charge_types[row['SERVICE_SETTING']]\n idx = df.shape[0] + 1\n entry = [row['CDM'], # charge code\n row['CHARGE'], # price\n row[description_key], # description\n hospital, # hospital_id\n result['filename'],\n charge_type] \n df.loc[idx,:] = entry\n\n\n# Remove empty rows\ndf = df.dropna(how='all')\n\n# Save data!\nprint(df.shape)\ndf.to_csv(output_data, sep='\\t', index=False)\ndf.to_csv(output_year, sep='\\t', index=False)\n","sub_path":"data/st.-luke’s-hospital-(san-francisco)/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"441156825","text":"# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport functools\n\nimport numpyro.distributions as dist\n\nfrom funsor.distribution import ( # noqa: F401\n Bernoulli,\n FUNSOR_DIST_NAMES,\n LogNormal,\n backenddist_to_funsor,\n eager_beta,\n eager_binomial,\n eager_categorical_funsor,\n eager_categorical_tensor,\n eager_delta_funsor_funsor,\n eager_delta_funsor_variable,\n eager_delta_tensor,\n eager_delta_variable_variable,\n eager_multinomial,\n eager_mvn,\n eager_normal,\n indepdist_to_funsor,\n make_dist,\n maskeddist_to_funsor,\n mvndist_to_funsor,\n transformeddist_to_funsor,\n)\nfrom funsor.domains import reals\nfrom funsor.tensor import Tensor, dummy_numeric_array\nfrom funsor.terms import Funsor, Variable, eager, to_funsor\n\n\n################################################################################\n# Distribution Wrappers\n################################################################################\n\n\nclass _NumPyroWrapper_Binomial(dist.BinomialProbs):\n pass\n\n\nclass _NumPyroWrapper_Categorical(dist.CategoricalProbs):\n # this fix is not available in NumPyro 0.2.4\n @property\n def support(self):\n return dist.constraints.integer_interval(0, self.probs.shape[-1] - 1)\n\n\nclass _NumPyroWrapper_Multinomial(dist.MultinomialProbs):\n pass\n\n\nclass _NumPyroWrapper_NonreparameterizedBeta(dist.Beta):\n has_rsample = False\n\n\nclass _NumPyroWrapper_NonreparameterizedDirichlet(dist.Dirichlet):\n has_rsample = False\n\n\nclass _NumPyroWrapper_NonreparameterizedGamma(dist.Gamma):\n has_rsample = False\n\n\nclass _NumPyroWrapper_NonreparameterizedNormal(dist.Normal):\n has_rsample = False\n\n\ndef _get_numpyro_dist(dist_name):\n if dist_name in ['Binomial', 'Categorical', 'Multinomial'] or dist_name.startswith('Nonreparameterized'):\n return globals().get('_NumPyroWrapper_' + dist_name)\n else:\n return getattr(dist, dist_name, None)\n\n\nNUMPYRO_DIST_NAMES = FUNSOR_DIST_NAMES\n_HAS_RSAMPLE_DISTS = ['Beta', 'Dirichlet', 'Gamma', 'Normal', 'MultivariateNormal']\n\n\nfor dist_name, param_names in NUMPYRO_DIST_NAMES:\n numpyro_dist = _get_numpyro_dist(dist_name)\n if numpyro_dist is not None:\n # resolve numpyro distributions do not have `has_rsample` attributes\n has_rsample = getattr(numpyro_dist, 'has_rsample',\n not getattr(numpyro_dist, \"is_discrete\", dist_name not in _HAS_RSAMPLE_DISTS))\n if has_rsample:\n numpyro_dist.has_rsample = True\n numpyro_dist.rsample = numpyro_dist.sample\n locals()[dist_name] = make_dist(numpyro_dist, param_names)\n\n# Delta has to be treated specially because of its weird shape inference semantics\nDelta._infer_value_domain = classmethod(lambda cls, **kwargs: kwargs['v']) # noqa: F821\n\n\n# Multinomial and related dists have dependent bint dtypes, so we just make them 'real'\n# See issue: https://github.com/pyro-ppl/funsor/issues/322\n@functools.lru_cache(maxsize=5000)\ndef _multinomial_infer_value_domain(cls, **kwargs):\n instance = cls.dist_class(**{k: dummy_numeric_array(domain) for k, domain in kwargs.items()}, validate_args=False)\n return reals(*instance.event_shape)\n\n\nBinomial._infer_value_domain = classmethod(_multinomial_infer_value_domain) # noqa: F821\nMultinomial._infer_value_domain = classmethod(_multinomial_infer_value_domain) # noqa: F821\n\n\n###############################################\n# Converting PyTorch Distributions to funsors\n###############################################\n\nto_funsor.register(dist.Distribution)(backenddist_to_funsor)\nto_funsor.register(dist.Independent)(indepdist_to_funsor)\nif hasattr(dist, \"MaskedDistribution\"):\n to_funsor.register(dist.MaskedDistribution)(maskeddist_to_funsor)\nto_funsor.register(dist.TransformedDistribution)(transformeddist_to_funsor)\nto_funsor.register(dist.MultivariateNormal)(mvndist_to_funsor)\n\n\n@to_funsor.register(dist.BinomialProbs)\n@to_funsor.register(dist.BinomialLogits)\ndef categorical_to_funsor(numpyro_dist, output=None, dim_to_name=None):\n new_pyro_dist = _NumPyroWrapper_Binomial(probs=numpyro_dist.probs)\n return backenddist_to_funsor(new_pyro_dist, output, dim_to_name)\n\n\n@to_funsor.register(dist.CategoricalProbs)\n# XXX: in Pyro backend, we always convert pyro.distributions.Categorical\n# to funsor.torch.distributions.Categorical\n@to_funsor.register(dist.CategoricalLogits)\ndef categorical_to_funsor(numpyro_dist, output=None, dim_to_name=None):\n new_pyro_dist = _NumPyroWrapper_Categorical(probs=numpyro_dist.probs)\n return backenddist_to_funsor(new_pyro_dist, output, dim_to_name)\n\n\n@to_funsor.register(dist.MultinomialProbs)\n@to_funsor.register(dist.MultinomialLogits)\ndef categorical_to_funsor(numpyro_dist, output=None, dim_to_name=None):\n new_pyro_dist = _NumPyroWrapper_Multinomial(probs=numpyro_dist.probs)\n return backenddist_to_funsor(new_pyro_dist, output, dim_to_name)\n\n\neager.register(Beta, Funsor, Funsor, Funsor)(eager_beta) # noqa: F821)\neager.register(Binomial, Funsor, Funsor, Funsor)(eager_binomial) # noqa: F821\neager.register(Multinomial, Tensor, Tensor, Tensor)(eager_multinomial) # noqa: F821)\neager.register(Categorical, Funsor, Tensor)(eager_categorical_funsor) # noqa: F821)\neager.register(Categorical, Tensor, Variable)(eager_categorical_tensor) # noqa: F821)\neager.register(Delta, Tensor, Tensor, Tensor)(eager_delta_tensor) # noqa: F821\neager.register(Delta, Funsor, Funsor, Variable)(eager_delta_funsor_variable) # noqa: F821\neager.register(Delta, Variable, Funsor, Variable)(eager_delta_funsor_variable) # noqa: F821\neager.register(Delta, Variable, Funsor, Funsor)(eager_delta_funsor_funsor) # noqa: F821\neager.register(Delta, Variable, Variable, Variable)(eager_delta_variable_variable) # noqa: F821\neager.register(Normal, Funsor, Tensor, Funsor)(eager_normal) # noqa: F821\neager.register(MultivariateNormal, Funsor, Tensor, Funsor)(eager_mvn) # noqa: F821\n\n__all__ = list(x[0] for x in FUNSOR_DIST_NAMES if _get_numpyro_dist(x[0]) is not None)\n","sub_path":"funsor/jax/distributions.py","file_name":"distributions.py","file_ext":"py","file_size_in_byte":6055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"218411996","text":"import usaepay\nfrom usaepay import run_call\n\n\ndef get(data={}):\n\t\"\"\"Calls /invoices\n\tGet a list of invoices\n\n\tIf invoice_key is included:\n\tRetreive details of a invoice in database\n\n\tArgs:\n\t\tdata (dict) contents:\n\t\t\tlimit (str) optional\n\t\t\toffset (str) optional\n\n\tReturns:\n\t\tDictionary InvoiceResponse\n\n\tReturns:\n\t\tDictionary InvoiceList\n\t\"\"\"\n\tparams={}\n\tpath='/invoices'\n\tif 'invoice_key' in data:\n\t\tpath = path + '/' + data['invoice_key']\n\tif 'limit' in data:\n\t\tparams['limit']=data['limit']\n\tif 'offset' in data:\n\t\tparams['offset']=data['offset']\n\n\treturn run_call('get',path,data,params)\n\ndef post(data={}):\n\t\"\"\"Calls /invoices\n\tGenerate a credit card token\n\n\tArgs:\n\t\tdata (dict) InvoiceRequest\n\n\tReturns:\n\t\tDictionary InvoiceResponse\n\t\"\"\"\n\tparams={}\n\tpath='/invoices'\n\treturn run_call('post',path,data,params)\n\ndef delete(data={}):\n\t\"\"\"Calls /invoices/{invoice_key}\n\tDelete a invoice from the database\n\n\tArgs:\n\t\tdata (dict) contents:\n\t\t\tinvoice_key (str) required\n\n\tReturns:\n\t\tDictionary Status\n\t\"\"\"\n\tparams={}\n\tif not 'invoice_key' in data:\n\t\traise Exception('invoice_key required for invoices.delete()')\n\n\tpath='/invoices'+ '/' + data['invoice_key']\n\treturn run_call('delete',path,data,params)\n\ndef put(data={}):\n\t\"\"\"Calls /invoices/{invoice_key}\n\tUpdate a invoice within the database\n\n\tArgs:\n\t\tdata (dict) contents:\n\t\t\tinvoice_key (str) required\n\t\t\tAlso can contain all fields from InvoiceRequest\n\n\tReturns:\n\t\tDictionary InvoiceResponse\n\t\"\"\"\n\tparams={}\n\tif not 'invoice_key' in data:\n\t\traise Exception('invoice_key required for invoices.put()')\n\n\tpath='/invoices'+ '/' + data['invoice_key']\n\treturn run_call('put',path,data,params)\n","sub_path":"invoices/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"543414684","text":"from keras.utils import plot_model\nimport sys\nimport os\nsys.path.append(os.pardir)\ncurrent_dir = os.curdir\nos.chdir(os.pardir)\nfrom keras.models import load_model\nmodel_name = 'getCommentCount.h5'\nif os.path.exists(model_name):\n print(\"Loading model\")\n model = load_model(model_name)\n plot_model(model, to_file='Model/{0}.png'.format(model_name.replace(\".h5\",\"\")))\n print(\"saved model\")\nelse:\n print(\"Does not exist\")\n\n\n","sub_path":"Notebooks/Model/Graph_Model.py","file_name":"Graph_Model.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"466258036","text":"\"\"\"\nRegression ensemble model\n-------------------------\n\"\"\"\nfrom sklearn.linear_model import LinearRegression\nfrom typing import Optional, List\n\nfrom darts.timeseries import TimeSeries\nfrom darts.models import EnsembleModel, StandardRegressionModel\nfrom darts.models.forecasting_model import ForecastingModel\nfrom darts.logging import get_logger, raise_if\n\nlogger = get_logger(__name__)\n\n\nclass RegressionEnsembleModel(EnsembleModel):\n def __init__(self,\n forecasting_models: List[ForecastingModel],\n regression_train_n_points: int,\n regression_model=LinearRegression(n_jobs=-1, fit_intercept=False)):\n \"\"\"\n Class for ensemble models using a regression model for ensembling individual models' predictions\n The provided regression model must implement fit() and predict() methods\n (e.g. scikit-learn regression models)\n\n Parameters\n ----------\n forecasting_models\n List of forecasting models whose predictions to ensemble\n regression_train_n_points\n The number of points to use to train the regression model\n regression_model\n Any regression model with predict() and fit() methods (e.g. from scikit-learn)\n Default: `sklearn.linear_model.LinearRegression(n_jobs=-1, fit_intercept=False)`\n \"\"\"\n super().__init__(forecasting_models)\n\n # wrap provided regression_model in a StandardRegressionModel (if not already the case)\n if isinstance(regression_model, StandardRegressionModel):\n # raise exception if train_n_points value is ambiguous\n model_train_n_points = regression_model.train_n_points\n raise_if(model_train_n_points is not None and regression_train_n_points != model_train_n_points,\n \"Provided StandardRegressionModel.train_n_points parameter doesn't match specified\"\n \" regression_train_n_points parameter.\",\n logger)\n\n # if it was None, set regression_model.train_n_points to regression_train_n_points\n regression_model.train_n_points = regression_train_n_points\n else:\n regression_model = StandardRegressionModel(regression_train_n_points, regression_model)\n\n self.regression_model = regression_model\n\n def fit(self, series: TimeSeries) -> None:\n super().fit(series)\n\n # spare train_n_points points to serve as regression target\n raise_if(len(self.training_series) <= self.regression_model.train_n_points,\n \"regression_train_n_points parameter too big (must be smaller or equal\" +\n \" to the number of points in training_series)\",\n logger)\n forecast_training = self.training_series[:-self.regression_model.train_n_points]\n regression_target = self.training_series[-self.regression_model.train_n_points:]\n\n # fit the forecasting models\n for model in self.models:\n model.fit(forecast_training)\n\n # predict train_n_points points for each model\n predictions = []\n for model in self.models:\n predictions.append(model.predict(self.regression_model.train_n_points))\n\n # train the regression model on the individual models' predictions\n self.regression_model.fit(train_features=predictions, train_target=regression_target)\n\n # prepare the forecasting models for further predicting by fitting\n # them with the entire data\n\n # Some models (incl. Neural-Network based models) may need to be 'reset'\n # to allow being retrained from scratch\n self.models = [model.untrained_model() if hasattr(model, 'untrained_model') else model\n for model in self.models]\n\n # fit the forecasting models\n for model in self.models:\n model.fit(self.training_series)\n\n def ensemble(self, predictions: List[TimeSeries]) -> TimeSeries:\n return self.regression_model.predict(predictions)\n","sub_path":"darts/models/regression_ensemble_model.py","file_name":"regression_ensemble_model.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"497770095","text":"'''\nWrite a program to take String if the length of String is odd print X pattern otherwise print INVALID.\n\nInput Format:\nTake a String as input from stdin.\n\nOutput Format:\nprint the desired Pattern or INVALID.\n\nExample Input:\nedyst\n\nOutput:\n\ne t\n d s \n y \n d s \ne t\n'''\nword = input()\n\nif len(word) % 2 == 0:\n print(\"INVALID\")\nelse:\n n = len(word)\n for i in range(n):\n for j in range(n):\n if(i == j) or (j == n-i-1):\n print(word[j],end='')\n else:\n print(' ',end='')\n print()\n\n","sub_path":"Patterns/Xpattern.py","file_name":"Xpattern.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"130902577","text":"\r\n#Q= (q0,q1,q2,q3) -> estados\r\n#E= (abbaabb)-> entrada finita\r\n#R= (a,b,x)->alfabeto\r\n#Q0= (q0)-> estado inicial\r\n#x=(x)->simbolo do vazio\r\n#F= (q3)->estado final\r\n\r\n#Dicionario dinamico\r\n#d[\"\"+chave] = valor\r\n#Onde \"d\" é o dicionário, \"chave\" é a chave do elemento (\"primeiro\", \"segundo\" e \"terceiro\") e \"valor\" é o valor do elemento (\"janeiro\", \"fevereiro\" e \"março\").\r\n#print(c1.__dict__) printar os dados do do objeto em formato de dicionario\r\n\r\nimport os\r\n\r\nPontos = []\r\nwhile 1<2:\r\n\tos.system(\"cls\")\r\n\tprint(\"Bem vindo a maquina de turing\")\r\n\topcao = int(input(\"1) Criar uma maquina de turing \\n2) Executar a maquina de turing \\n3)Sair do programa\"))\r\n\tif opcao == 1: #CRIANDO MAQUINA DE TURING\r\n\t\tEstados={}\r\n\t\tprint(\"Criando maquina de turing\")\r\n\t\tNestados=int(input(\"Quantos estados devera haver ?\"))\r\n\t\ti=0\r\n\t\tfor i in range(i,Nestados,1):\r\n\t\t\tos.system(\"cls\")\r\n\t\t\tlista=[]\r\n\t\t\tNome= input(\"Digite o nome Ex: q1,q2,q3,q4\\n\")\r\n\t\t\tPontos.append(Nome)\r\n\t\t\tI= int(input(\"É um estado inicial ? 1)sim 2)nao\"))\r\n\t\t\tif I == 1:\r\n\t\t\t\tlista.append(\"Inicial\")\r\n\t\t\tv = int(input(\"possui simbolo do vazio no estado? (1)sim (2)nao\"))\r\n\t\t\tif v == 1:\r\n\t\t\t\tlista.append(input(\"Qual é o simbolo do vazio ?\"))\r\n\t\t\t\tlista.append(input(\"Digite o estado que o vazio ira levar ex: q1,q2,q3\"))\r\n\t\t\tletras= int(input(\"Quantas letras havera ?\"))\r\n\t\t\tj=0\r\n\t\t\tfor j in range(j,letras,1):\r\n\t\t\t\tlista.append(input(\"Digite a letra\"))\r\n\t\t\t\tlista.append(input(\"Digite o estado que a letra vai levar ex: q1,q2,q3\"))\r\n\t\t\tF= int(input(\"É um estado final ? 1)sim 2)nao\"))\r\n\t\t\tif F == 1:\r\n\t\t\t\tlista.append(\"Final\")\r\n\r\n\t\t\tEstados[\"\"+Nome]=lista\r\n\r\n\t\tprint(\"Estados Criados\")\r\n\t\tprint(Estados)\r\n\r\n\t\t\r\n\r\n\t\t\r\n\t\tseila=input(\"Digite qualquer tecla para continuar\")\r\n\r\n\telif opcao == 2: # EXECUTANDO MAQUINA DE TURING\r\n\t\ti=0\r\n\t\tprint(\"Executando maquina de turing\")\r\n\t\tentrada = []\r\n\t\tpalavra = input(\"Digite a palavra que ira ser processada\")\r\n\t\tfor i in range(i,len(palavra)):\r\n\t\t\tentrada.append(palavra[i])\r\n\r\n\t\tj=0\r\n\t\tfor j in range(j,len(entrada)):\r\n\t\t\tletra = entrada[j]\r\n\t\t\tos.system(\"cls\")\r\n\t\t\t\r\n\t\t\tif j == 0:\t\r\n\t\t\t\tk=0\r\n\t\t\t\tfor k in range(k,len(Pontos)):\r\n\t\t\t\t\tini= Estados[\"\"+Pontos[k]].count(\"Inicial\")\r\n\t\t\t\t\tif ini == 1:\r\n\t\t\t\t\t\tBusca= Pontos[k]\r\n\t\t\tprint(letra+\"--> leitura da fita na posição atual\")\r\n\t\t\tprint(Busca+\" -->Estado Inicial ou acessado no momento\")\r\n\t\t\tvalida = Estados[\"\"+Busca].count(letra)\r\n\t\t\t\r\n\t\t\tif valida >=1 :\r\n\t\t\t\tpos = Estados[\"\"+Busca].index(letra)\r\n\t\t\t\tprint(pos)\r\n\t\t\t\tBusca= Estados[\"\"+Busca][pos+1]\r\n\t\t\t\tprint(Busca +\" --> O proximo estado a ser buscado\")\r\n\t\t\t\tseila=input(\"proximo passo-->\")\r\n\t\t\telse:\r\n\t\t\t\tseila=input(\"A maquina de Turing nao aceita a palavra\")\r\n\t\t\t\tbreak\r\n\r\n\t\tletra=\"X\"\r\n\t\tvalida = Estados[\"\"+Busca].count(letra)\r\n\t\tif valida >=1 :\r\n\t\t\tpos = Estados[\"\"+Busca].index(letra)\r\n\t\t\tprint(pos)\r\n\t\t\tBusca= Estados[\"\"+Busca][pos+1]\r\n\t\t\tprint(Busca +\" --> O proximo estado a ser buscado\")\r\n\t\t\tseila=input(\"proximo passo-->\")\r\n\t\t\tfinal = Estados[\"\"+Busca].count(\"Final\")\r\n\t\t\tif final >= 1:\r\n\t\t\t\tseila= input(\"A palavra foi aceita com sucesso na maquina de Turing\\nAperte qualquer tecla para voltar ao menu\")\r\n\t\t\telse:\r\n\t\t\t\tseila= input(\"A maquina de Turing nao aceitou a palavra\")\t\r\n\t\telse:\r\n\t\t\tfinal = Estados[\"\"+Busca].count(\"Final\")\r\n\t\t\tif final >= 1:\r\n\t\t\t\tinput(\"A palavra foi aceita com sucesso na maquina de Turing\\nAperte qualquer tecla para voltar ao menu\")\r\n\t\t\telse:\r\n\t\t\t\tseila= input(\"A palavra nao foi aceita na maquina de turing\")\r\n\r\n\r\n\r\n\t\tseila=input(\"Digite qualquer tecla para continuar\")\r\n\r\n\telif opcao ==3:\r\n\t\tseila=input(\"Finalizando a maquina de Turing, aperte qualquer tecla para sair\")\r\n\t\tbreak;\r\n\telse:\r\n\t\tseila=input(\"Opção selecionada nao é valida, aperte qualquer tecla para voltar ao menu\")","sub_path":"MAQUINA DE TURING.py","file_name":"MAQUINA DE TURING.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"640105766","text":"# !usr/bin/env python\n# -*- coding:utf-8 -*-\nimport numpy as np\nimport tensorflow as tf\nimport os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\ndef print2file(buf, outFile):\n outfd = open(outFile, 'a')\n outfd.write(buf + '\\n')\n outfd.close()\n\ndef load_data():\n import pandas as pd\n data=pd.read_csv(\"/home/deermini/PycharmProjects/medical-analysis/GAN_for_medical_3/medical_data_analysis_201851/data/medical_data2.csv\")\n return data.values\n\ndef bn(x, is_training=True):\n return tf.contrib.layers.batch_norm(x,decay=0.9,updates_collections=None,epsilon=1e-5,\n scale=True,is_training=is_training)\n\ndef generator(inputX,reuse=False,is_training=True):\n Activation=tf.nn.relu\n generatorDims=[256,512,677]\n tempDim=128\n tempVec=inputX\n with tf.variable_scope(\"generator\",reuse=reuse):\n for i,genDim in enumerate(generatorDims[:-1]):\n W=tf.get_variable(\"W_\"+str(i),shape=[tempDim, genDim],initializer=tf.contrib.layers.xavier_initializer())\n h=tf.matmul(tempVec,W)\n h2=bn(h,is_training=is_training)\n h3=Activation(h2)\n tempDim=genDim\n tempVec=h3\n W=tf.get_variable(\"W_\"+str(i+1),shape=[tempDim,generatorDims[-1]],initializer=tf.contrib.layers.xavier_initializer())\n h=bn(tf.matmul(tempVec,W),True)\n output=tf.nn.sigmoid(h)\n return h,output\n\ndef discriminator(inputX,reuse=None):\n Activation = tf.nn.relu\n discriminatorDims= [512,128,1]\n tempDim = 677\n tempVec = inputX\n with tf.variable_scope(\"discriminator\",reuse=reuse):\n for i,discDim in enumerate(discriminatorDims[:-1]):\n W=tf.get_variable(\"W_\"+str(i),shape=[tempDim,discDim],initializer=tf.contrib.layers.xavier_initializer())\n h=tf.matmul(tempVec,W)\n h2=Activation(h)\n tempVec = h2\n tempDim = discDim\n W=tf.get_variable(\"W_\"+str(i+1),shape=[tempDim,discriminatorDims[-1]],initializer=tf.contrib.layers.xavier_initializer())\n h=tf.matmul(tempVec,W)\n output=tf.nn.sigmoid(tf.matmul(tempVec,W))\n return h,output\n\ndef buildnet(modelPath=\"\"):\n inputDim=677\n Z=128;batch_size = 64\n inputX=tf.placeholder(dtype=tf.float32,shape=[None, inputDim])\n inputZ=tf.placeholder(dtype=tf.float32, shape=[None, Z])\n\n D_real_logit,D_real=discriminator(inputX,reuse=False)\n gene_logit,geneX=generator(inputZ)\n D_fake_logit,D_fake=discriminator(geneX,reuse=True)\n\n # GAN_loss\n # d_loss = -tf.reduce_mean(tf.log(D_real)) - tf.reduce_mean(tf.log(1-D_fake))\n # g_loss = -tf.reduce_mean(D_fake*tf.log(geneX))\n\n #WGAN_loss,\n d_loss=-tf.reduce_mean(D_real)+tf.reduce_mean(D_fake)\n g_loss=-tf.reduce_mean(D_fake)\n\n \"\"\" Gradient Penalty \"\"\"\n lambd=5\n alpha = tf.random_uniform(shape=[batch_size,inputDim], minval=0., maxval=1.)\n differences = geneX- inputX # This is different from MAGAN\n interpolates = inputX + (alpha * differences)\n\n D_inter,_= discriminator(interpolates,reuse=True)\n gradients = tf.gradients(D_inter, [interpolates])[0]\n slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))\n gradient_penalty = tf.reduce_mean((slopes-1.)** 2)\n d_loss += lambd*gradient_penalty\n\n \"\"\" Training \"\"\"\n # divide trainable variables into a group for D and a group for G\n t_vars=tf.trainable_variables()\n d_vars=[var for var in t_vars if \"discriminator\" in var.name]\n g_vars=[var for var in t_vars if \"generator\" in var.name]\n\n optimize_d = tf.train.AdamOptimizer(learning_rate=0.0002,beta1=0.5).minimize(d_loss, var_list=d_vars)\n optimize_g=tf.train.AdamOptimizer(learning_rate=0.0002,beta1=0.5).minimize(g_loss,var_list=g_vars)\n\n initOp = tf.global_variables_initializer()\n saver = tf.train.Saver(max_to_keep=0)\n if not os.path.exists(\"result\"):\n os.makedirs(\"result\")\n logFile = \"result/result_wgan.txt\"\n\n config = tf.ConfigProto()\n config.gpu_options.per_process_gpu_memory_fraction = 0.4\n with tf.Session(config=config) as sess:\n if modelPath==\"\":\n sess.run(initOp)\n else:\n saver.restore(sess,modelPath)\n\n data=load_data()\n nbatchs=int((data.shape[0]/batch_size))\n\n for epoch in range(501):\n np.random.shuffle(data)\n for i in range(nbatchs):\n start=i*batch_size;end=(i+1)*batch_size\n batchX=data[start:end]\n\n \"updata disc\"\n batch_z = np.random.uniform(-1, 1, [64, 128])\n discloss,_=sess.run([d_loss,optimize_d],feed_dict={inputX:batchX,inputZ:batch_z})\n\n \"updata gene\"\n batch_z = np.random.uniform(-1, 1, [64, 128])\n geneloss,_=sess.run([g_loss,optimize_g],feed_dict={inputX:batchX,inputZ:batch_z})\n buf=\"Epoch:%3d , d_loss:%5f, g_loss:%5f\"%(epoch,discloss,geneloss)\n print(buf)\n print2file(buf, logFile)\n if epoch>=400 and epoch%20==0:\n savePath=saver.save(sess, \"checkpoint_wgan/save_net.ckpt\", global_step=epoch)\n\ndef generateData(modelPath=None,nsamples=18000,batchsize=100,outFile='data/gene_wdata.npy'):\n Z=128\n inputZ = tf.placeholder('float', [None, Z])\n _,geneX = generator(inputZ)\n np.random.seed(1234)\n saver = tf.train.Saver()\n outputVec = []\n with tf.Session() as sess:\n saver.restore(sess,modelPath)\n print('generating')\n nBatches=int(np.ceil(float(nsamples)/ float(batchsize)))\n for i in range(nBatches):\n randomX = np.random.normal(size=(batchsize, Z))\n output=sess.run(geneX,feed_dict={inputZ:randomX})\n outputVec.extend(output)\n outputMat = np.array(outputVec)\n np.save(outFile, outputMat)\n print(outputMat.shape)\n\n \"\"\" translate float into int\"\"\"\n print(\"outputMat.shape:\", outputMat.shape)\n data_train = []\n for li in outputMat:\n raw_data = []\n for lj in li:\n if lj >= 0.5:\n raw_data.append(1)\n else:\n raw_data.append(0)\n data_train.append(raw_data)\n print(np.array(data_train))\n outputMat = np.array(data_train)\n # index主要是为了去除全零的行因为生成的数据中有可能产生全零的不符合要求的数据\n index1 = list(set(np.where(outputMat[:, :234])[0]))\n index2 = list(set(np.where(outputMat[:, 234:])[0]))\n index = [li for li in index1 if li in index2]\n print(\"after translate outputMat.shape:\", outputMat[index].shape)\n np.save(outFile, outputMat[index])\n\n\n#训练网络\nbuildnet()\n#生成数据\n# generateData(modelPath='checkpoint_wgan/save_net.ckpt-460',\n# outFile = 'data/gene_wgan480_data_toint.npy',\n# nsamples = 22000)\n","sub_path":"GAN_for_medical3/MEDGAN/wgan.py","file_name":"wgan.py","file_ext":"py","file_size_in_byte":6804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"238234327","text":"from models.item import Item\nfrom models.loanable import Loanable\n\n\nclass Book(Item, Loanable):\n def __init__(self, title=None, author=None, num_pages=None, publisher=None, year_published=None, language=None,\n isbn_10=None, isbn_13=None, **kwargs):\n super().__init__(**kwargs)\n self.title = title\n self.author = author\n self.num_pages = num_pages\n self.publisher = publisher\n self.year_published = year_published\n self.language = language\n self.isbn_10 = isbn_10\n self.isbn_13 = isbn_13\n self.object_class = 'Book'\n","sub_path":"models/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"245898656","text":"import io\nimport sys\n\n\ndef create_graph(input_data):\n # a vertice is a number\n # an edge is a couple of numbers, a token. Each line of the input is an edge\n edges = list()\n edges_per_vertex = dict()\n for edge_index, line in enumerate(input_data.readlines()):\n edge = line.rstrip('\\n')\n vertex1, vertex2 = (int(vertex) for vertex in edge.split('/'))\n edges.append((vertex1, vertex2))\n if vertex1 not in edges_per_vertex:\n edges_per_vertex[vertex1] = set()\n if vertex2 not in edges_per_vertex:\n edges_per_vertex[vertex2] = set()\n edges_per_vertex[vertex1].add(edge_index)\n edges_per_vertex[vertex2].add(edge_index)\n\n return edges, edges_per_vertex\n\n\ndef _find_path(edges_per_vertex, branches):\n current_vertex, edges_to_process, current_path = branches.pop(0)\n while not edges_to_process.issubset(current_path):\n next_edge = edges_to_process.pop()\n if len(edges_to_process) > 0:\n branches.append((current_vertex, edges_to_process, current_path.copy()))\n current_path.add(next_edge)\n candidate_vertex = list(edges[next_edge])\n candidate_vertex.remove(current_vertex)\n current_vertex = candidate_vertex.pop()\n edges_to_process = edges_per_vertex[current_vertex] - current_path\n return current_path\n\n\ndef find_strongest_path(edges, edges_per_vertex):\n max_strength = 0\n max_path = None\n current_path = set()\n current_vertex = 0\n branches = [(current_vertex, edges_per_vertex[current_vertex], current_path)]\n loop = 0\n while len(branches):\n path = _find_path(edges_per_vertex, branches)\n strength_path = _calculate_path_strength(edges, path)\n if strength_path > max_strength:\n max_strength = strength_path\n max_path = path\n\n return max_path, max_strength\n\n\ndef find_longest_path(edges, edges_per_vertex):\n max_length = 0\n max_strength = 0\n max_path = None\n current_path = set()\n current_vertex = 0\n branches = [(current_vertex, edges_per_vertex[current_vertex], current_path)]\n loop = 0\n while len(branches):\n path = _find_path(edges_per_vertex, branches)\n length_path = len(path)\n # print(length_path)\n strength_path = _calculate_path_strength(edges, path)\n if (\n length_path == max_length and strength_path > max_strength\n or length_path > max_length\n ):\n max_strength = strength_path\n max_length = length_path\n max_path = path\n\n return max_path, max_length, max_strength\n\n\ndef _calculate_path_strength(edges, path):\n return sum(sum(edges[edge]) for edge in path)\n\n\ntest_data = \"\"\"\\\n0/2\n2/2\n2/3\n3/4\n3/5\n0/1\n10/1\n9/10\n\"\"\"\nedges, edges_per_vertex = create_graph(io.StringIO(test_data))\npath, strength = find_strongest_path(edges, edges_per_vertex)\nassert strength == 31\n\n\nedges, edges_per_vertex = create_graph(sys.stdin)\npath, length, strength = find_longest_path(edges, edges_per_vertex)\nprint(strength)\n","sub_path":"2017/day24.py","file_name":"day24.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"66195645","text":"from cmath import exp,sin,cos,pi\nimport math\nfrom primitives import *\nfrom config import *\n\n\ndef B(a,r,step=16):\n\treturn PPath('%s+%s*(cos(t)+sin(t)*1j)'%(a,r),0,2*pi,2*pi/step)\ndef areit(a,r,t0,tn,step=16):\n\treturn PPath('%s+%s*(cos(t)+sin(t)*1j)'%(a,r),t0,tn,2*pi/step)\ndef ray(a,lowrad,hirad,angle,step=16):\n\treturn PPath('%s + (cos(%s)+sin(%s)*1j)*(%s*(1-t)+%s*t)'%(a,angle,angle,lowrad,hirad),0,1,1.0/step)\ndef frange(low,hi,step):\n\treturn [low + step*n for n in range((hi-low)/step+1)]\ndef image(func,path):\n\tpoints = []\n\tfor point in path.points:\n\t\tz = point\n\t\ttry:\n\t\t\tw = eval(func)\n\t\texcept:\n\t\t\t#w = 1e10 + 1j\n\t\t\tbreak\n\t\tpoints.append(w)\n\treturn Path(points)\ndef spherical_image(path):\n\tpoints = []\n\tfor point in path.points:\n\t\txyz = project_to_sphere(point)\n\t\tpoints.append(xyz)\n\treturn Path(points)\ndef project_to_sphere(z):\n\tz = complex(z)\n\tnormsquaredplusone = z*z.conjugate() + 1\n\tx = 2*z.real/normsquaredplusone\n\ty = 2*z.imag/normsquaredplusone\n\tz = 1 - 2/normsquaredplusone\n\treturn (x.real,y.real,z.real)\n\nclass ComplexMap:\n\t\"\"\" take a domain for a function and do neat things with it \"\"\"\n\tdef __init__(self,domain,func):\n\t\tself.domain = domain\n\t\tself.func = func\n\tdef draw(self,plane):\n\t\tfor region in self.domain.regions:\n\t\t\tfor n,family in enumerate(region.families):\n\t\t\t\tfor path in family:\n\t\t\t\t\tplane.drawpath(image(self.func,path),region.colors[n])\n\tdef lift_to_sphere(self,camera):\n\t\tpaths = []\n\t\tfor region in self.domain.regions:\n\t\t\tfor n,family in enumerate(region.families):\n\t\t\t\tfor path in family:\n\t\t\t\t\tspaceimage = spherical_image(path)\n\t\t\t\t\tpaths.append(spaceimage)\n\t\t\t\t\t\nclass Path:\n\tdef __init__(self,points):\n\t\tself.points = points\nclass PPath(Path):\n\t\"\"\"parametrized path in t\"\"\"\n\tdef __init__(self,expr,a,b,step):\n\t\tPath.__init__(self,[])\n\t\tself.a = a\n\t\tself.b = b\n\t\tself.step = step\n\t\tself.expr = expr\n\t\tself.evaluate()\n\tdef __repr__(self):\n\t\treturn '\\gamma(t) = %s'%self.expr\n\tdef evaluate(self,step=None):\n\t\tif not step:\n\t\t\tstep = self.step\n\t\tpoints = []\n\t\tt = self.a\n\t\tepsilon = .0001\n\t\twhile 1:\n\t\t\tpoints.append(eval(self.expr))\n\t\t\tif t+self.step > self.b + epsilon:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tt += self.step\n\t\tself.points = points\n\nclass Domain:\n\tdef __init__(self,regions):\n\t\tself.regions = regions\n\t\tself.colors = [metacolors['family%i'%(index+1)] for index in range(2)]\n\t\tself.families = [None,None]\nclass Region(Domain):\n\tdef __init__(self):\n\t\tDomain.__init__(self,[self])\nclass AnnSlitRegion(Region):\n\tdef __init__(self,center,radius1,radius2,angle1,angle2):\n\t\tRegion.__init__(self)\n\t\tself.center = center\n\t\tself.radius1 = radius1\n\t\tself.radius2 = radius2\n\t\tself.angle1 = angle1\n\t\tself.angle2 = angle2\n\t\tself.divisions = [circleres,rayres]\n\t\tself.resolutions = [numcircles,numrays]\n\t\tself.update()\n\tdef update(self):\n\t\tself.make_circles()\n\t\tself.make_rays()\n\tdef __repr__(self):\n\t\treturn 'slitAnn(%s;%s,%s)'%(self.center,self.radius1,self.radius2)\n\tdef make_circles(self):\n\t\tindex = 0\n\t\tself.families[index] = []\n\t\t\n\t\tfor radius in frange(self.radius1,self.radius2,(self.radius2-self.radius1)/float(self.divisions[index])):\n\t\t\tpath = areit(self.center,radius,self.angle1,self.angle2,self.resolutions[index])\n\t\t\tself.families[0].append(path)\n\tdef make_rays(self):\n\t\tindex = 1\n\t\tself.families[index] = []\n\t\tfor angle in frange(self.angle1,self.angle2,(self.angle2-self.angle1)/float(self.divisions[index])):\n\t\t\tpath = ray(self.center,self.radius1,self.radius2,angle,self.resolutions[index])\n\t\t\tself.families[index].append(path)\n\t\t\nclass AnnRegion(AnnSlitRegion):\n\tdef __init__(self,center,radius1,radius2):\n\t\tAnnSlitRegion.__init__(self,center,radius1,radius2,0,2*pi)\n\tdef __repr__(self):\n\t\treturn 'Ann(%s;%s,%s)'%(self.center,self.radius1,self.radius2)\nclass BallRegion(AnnRegion):\n\tdef __init__(self,center,radius):\n\t\tAnnRegion.__init__(self,center,0,radius)\nclass RectRegion(Region):\n\tdef __init__(self,botleft,width,height):\n\t\tRegion.__init__(self)\n\t\tself.botleft = botleft\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.divisions = [50,50]\n\t\tself.resolutions = [50,50]\n\t\tself.update()\n\tdef update(self):\n\t\tself.make_horiz()\n\t\tself.make_vert()\n\tdef make_horiz(self,spacing=1):\n\t\tindex = 0\n\t\tself.families[index] = []\n\t\tfor val in frange(0,self.height,self.height/float(self.divisions[index])):\n\t\t\tpath = ray(self.botleft+val*1j,0,self.width,0,self.resolutions[index])\n\t\t\tself.families[index].append(path)\n\tdef make_vert(self,spacing=1):\n\t\tindex = 1\n\t\tself.families[index] = []\n\t\tfor val in frange(0,self.width,self.width/float(self.divisions[index])):\n\t\t\tpath = ray(self.botleft+val,0,self.height,pi/2,self.resolutions[index])\n\t\t\tself.families[index].append(path)\n\t\t\n","sub_path":"oldprogs/resume/3dengine/primitives_complex.py","file_name":"primitives_complex.py","file_ext":"py","file_size_in_byte":4615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"115994988","text":"from rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\n\nfrom client.validators import parental_consent_validator, minimum_age_validator, phone_number_validator, \\\n unique_id_number_validator, id_number_validator\nfrom client.models import Volunteer, ParentalConsent, City, Language, HelpRequest, Area, VolunteerFreeze\n\n\nclass ParentalConsentSerializer(serializers.ModelSerializer):\n # Currently, nested serializer field required=False doesn't work.\n # Link to bug in DRF: https://github.com/encode/django-rest-framework/issues/2719\n # Declaring these fields as required=False is the workaround so ParentalConsent won't be required.\n parent_name = serializers.CharField(required=False)\n parent_id = serializers.CharField(required=False)\n\n class Meta:\n model = ParentalConsent\n fields = ['parent_name', 'parent_id']\n\n\nclass RegistrationSerializer(serializers.ModelSerializer):\n date_of_birth = serializers.DateField(required=True)\n gender = serializers.ChoiceField(required=True, choices=Volunteer.GENDERS)\n city = serializers.PrimaryKeyRelatedField(queryset=City.objects.all())\n languages = serializers.PrimaryKeyRelatedField(queryset=Language.objects.all(), many=True, required=True)\n wanted_assignments = serializers.MultipleChoiceField(choices=Volunteer.WANTED_ASSIGNMENTS)\n email = serializers.EmailField(required=True)\n parental_consent = ParentalConsentSerializer()\n\n class Meta:\n model = Volunteer\n fields = ['first_name', 'last_name', 'tz_number', 'date_of_birth', 'gender', 'city', 'address', 'moving_way',\n 'week_assignments_capacity', 'wanted_assignments', 'phone_number', 'email', 'parental_consent',\n 'languages']\n\n def validate_date_of_birth(self, date_of_birth):\n minimum_age_validator(date_of_birth)\n return date_of_birth\n\n def validate_phone_number(self, phone_number):\n phone_number_validator(phone_number)\n return phone_number\n\n def validate_tz_number(self, tz_number):\n id_number_validator(tz_number)\n unique_id_number_validator(tz_number)\n return tz_number\n\n def validate_languages(self, languages):\n if not languages:\n raise ValidationError('No languages specified.')\n return languages\n\n def validate(self, data):\n parental_consent_validator(data)\n return data\n\n def create(self, validated_data):\n parental_consent_data = validated_data.pop('parental_consent')\n languages = [Language.objects.get(name=name) for name in validated_data.pop('languages')]\n volunteer = Volunteer.objects.create(**validated_data)\n\n volunteer.languages.set(languages)\n ParentalConsent.objects.create(volunteer=volunteer, **parental_consent_data)\n return volunteer\n\n\nclass CitySerializer(serializers.ModelSerializer):\n region = serializers.PrimaryKeyRelatedField(queryset=Area.objects.all())\n\n class Meta:\n model = City\n fields = ['name', 'region']\n\n\nclass ShortCitySerializer(serializers.ModelSerializer):\n class Meta:\n model = City\n fields = ['name']\n\n\nclass VolunteerSerializer(serializers.ModelSerializer):\n gender = serializers.CharField(source='get_gender_display')\n moving_way = serializers.CharField(source='get_moving_way_display')\n wanted_assignments = serializers.ListField(source='get_wanted_assignments_list')\n city = CitySerializer()\n num_helprequests = serializers.IntegerField() # annotated in queryset\n\n class Meta:\n model = Volunteer\n fields = ['id', 'first_name', 'last_name', 'tz_number', 'phone_number', 'date_of_birth', 'age',\n 'gender', 'city', 'address', 'organization', 'moving_way', 'week_assignments_capacity',\n 'wanted_assignments', 'email', 'email_verified', 'score', 'created_date', 'num_helprequests',\n 'languages', 'location_latitude', 'location_longitude']\n\n\nclass VolunteerFreezeSerializer(serializers.ModelSerializer):\n expiration_date = serializers.DateField(required=True)\n\n class Meta:\n model = VolunteerFreeze\n fields = ['volunteer', 'expiration_date']\n\n\nclass ShortVolunteerSerializer(serializers.ModelSerializer):\n class Meta:\n model = Volunteer\n fields = ['id', 'full_name']\n\n\nclass MatchingVolunteerSerializer(serializers.ModelSerializer):\n moving_way = serializers.CharField(source='get_moving_way_display')\n\n class Meta:\n model = Volunteer\n fields = ['id', 'full_name', 'city', 'address', 'phone_number', 'email', 'location_latitude',\n 'location_longitude', 'moving_way']\n\n\nclass MapHelpRequestSerializer(serializers.ModelSerializer):\n status = serializers.CharField(source='get_status_display')\n\n class Meta:\n model = HelpRequest\n fields = ['id', 'full_name', 'location_latitude', 'location_longitude', 'status', 'helping_volunteer']\n\n\nclass CreateHelpRequestSerializer(serializers.ModelSerializer):\n city = serializers.PrimaryKeyRelatedField(queryset=City.objects.all())\n\n class Meta:\n model = HelpRequest\n fields = ['full_name', 'phone_number', 'city', 'address', 'notes', 'type', 'type_text', 'request_reason']\n\n def validate_phone_number(self, phone_number):\n phone_number_validator(phone_number)\n return phone_number\n\n\nclass HelpRequestSerializer(serializers.ModelSerializer):\n city = CitySerializer()\n type = serializers.CharField(source='get_type_display')\n request_reason = serializers.CharField(source='get_request_reason_display')\n status = serializers.CharField(source='get_status_display')\n helping_volunteer = ShortVolunteerSerializer()\n\n class Meta:\n model = HelpRequest\n fields = ['id', 'full_name', 'phone_number', 'city', 'address', 'notes', 'type', 'type_text',\n 'request_reason', 'status', 'status_updater', 'helping_volunteer', 'created_date']\n\n\nclass UpdateHelpRequestSerializer(serializers.ModelSerializer):\n status = serializers.CharField(source='get_status_display')\n\n class Meta:\n model = HelpRequest\n fields = ['notes', 'helping_volunteer', 'status', 'type_text']\n\n\nclass AreaSerializer(serializers.ModelSerializer):\n class Meta:\n model = Area\n fields = ['name']\n\n\nclass LanguageSerializer(serializers.ModelSerializer):\n class Meta:\n model = Language\n fields = ['name']\n","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":6486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"253103451","text":"# coding=utf-8\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport time\ndriver=webdriver.Chrome()\ndriver.set_page_load_timeout(10)\n#try:\ndriver.get(\"http://music.163.com/#/song?id=409060868\")\ndriver.switch_to.frame(\"contentFrame\")\n'''except selenium.common.exceptions.TimeoutException:\n print(\"time out of 10 s\")\n driver.execute_script('window.stop()')'''\nt=0 #用于的判定入口\nwhile True:\n print(t)\n time.sleep(0.7)\n soup = BeautifulSoup(driver.page_source, 'lxml')\n contents = soup.find_all('div', class_='cnt f-brk')\n for content in contents:\n print(content.get_text())\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n# time.sleep(0.5)\n t += 1\n if t == 383:\n break\n driver.find_element_by_link_text('下一页').click()\n# driver.find_element_by_link_text","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"235810100","text":"\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param root: a root of binary tree\n @return: return a integer\n \"\"\"\n\n def diameterOfBinaryTree(self, root):\n # write your code here\n def t(node):\n nonlocal res\n if not node: return 0\n left, right = t(node.left), t(node.right)\n res = max(res, left + right)\n return max(left, right) + 1\n\n res = 0\n t(root)\n return res\n","sub_path":"lintcode/1181-diameter-of-binary-tree.py","file_name":"1181-diameter-of-binary-tree.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"550265235","text":"\nimport re\nimport random\nimport numpy\n\n\ndef create_vocab_list(dataset):\n \"\"\" create a list of all uniques words from dataset\n\n Arguments:\n dataset {2d matrix} -- each row is a document that contains multiple words.\n The dataset most likely have multiple rows.\n\n Returns:\n [list] -- a list of all unique words in dataset\n \"\"\"\n vocab_set = set([])\n for document in dataset:\n vocab_set = vocab_set | set(document)\n\n return list(vocab_set)\n\n\ndef document_to_vector(vocab_list, document):\n \"\"\" generate a 0, 1 vector the same size as vocab_list. the vector is a\n representation of the document.\n\n Arguments:\n vocab_list {list} -- a list of all unique words.\n Can be generated using create_vocab_list()\n document {list} -- a list of words.\n\n Returns:\n [list] -- a list of the same size as vocab_list, each value will be either\n 1 or 0. 1 means the corresponding word in vocab_list exist in document,\n 0 means the corresponding word in vocab_list does not exist in document.\n \"\"\"\n retvec = [0] * len(vocab_list)\n\n for word in document:\n if word in vocab_list:\n retvec[vocab_list.index(word)] = 1\n else:\n print(\"the word: %s is not in vocabulary !\" % word)\n\n return retvec\n\ndef train_naive_bayes(train_matrix, category):\n \"\"\"[summary]\n\n Arguments:\n train_matrix {2d matrix} -- each row is a training sample. Each training\n sample is a vector of the same size as vocab_list, with value either\n 0 or 1. 1 means the corresponding word exist while 0 means not exist.\n\n category {list} -- a list whose size equals number of rows in train_matrix,\n with value either 0 or 1. 1 means the sample belongs to category-1 and\n 0 means the sample falls into category-0.\n\n Returns:\n category0_word_probability {list} a list of the same size as vocab_list. the\n value in the list is the probability of the corresponding word's appearance\n in all category0 documents.\n\n category1_word_probability {list} a list of the same size as vocab_list. the\n value in the list is the probability of the corresponding word's appearance\n in all category1 documents.\n\n category1_probability {float} the number of category1 documents divided by total\n number of documents.\n\n \"\"\"\n num_samples = len(train_matrix)\n num_words = len(train_matrix[0])\n\n # sum(labels) is count of ones\n category1_probability = sum(category) / float(num_samples)\n\n p0num = numpy.ones(num_words)\n p1num = numpy.ones(num_words)\n\n p0denom = 2.0\n p1denom = 2.0\n\n for i in range(num_samples):\n if category[i] == 1:\n p1num += train_matrix[i]\n p1denom += sum(train_matrix[i])\n else:\n p0num += train_matrix[i]\n p0denom += sum(train_matrix[i])\n\n category1_word_probability = numpy.log(p1num / p1denom)\n category0_word_probability = numpy.log(p0num / p0denom)\n\n return category0_word_probability, category1_word_probability, category1_probability\n\n\ndef classify_naive_bayes(data, p0vec, p1vec, pclass1):\n p1 = numpy.sum(data * p1vec) + numpy.log(pclass1)\n p0 = numpy.sum(data * p0vec) + numpy.log(1.0 - pclass1)\n\n if p1 > p0:\n return 1\n\n return 0\n\n\n\ndef load_dataset():\n dataset = [\n ['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],\n ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],\n ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],\n ['stop', 'posting', 'stupid', 'worthless', 'garbage'],\n ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],\n ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]\n\n category = [0, 1, 0, 1, 0, 1]\n\n return dataset, category\n\n\ndef naive_bayes_test():\n\n dataset, category = load_dataset()\n vocab_list = create_vocab_list(dataset)\n\n print(vocab_list)\n\n train_matrix = []\n\n for document in dataset:\n train_matrix.append(document_to_vector(vocab_list, document))\n\n p0v, p1v, pab = train_naive_bayes(train_matrix, category)\n\n test_data = [\"stupid\", \"garbage\"]\n\n test_vec = numpy.array(document_to_vector(vocab_list, test_data))\n\n result = classify_naive_bayes(test_vec, p0v, p1v, pab)\n print(result)\n\n\ndef parse_text(filename):\n text = open(filename).read()\n words_list = re.split(r'\\W+', text)\n return [word.lower() for word in words_list if len(word) > 2]\n\n\ndef split_dataset(total_count):\n\n training_set = list(range(total_count))\n test_set = []\n\n # create test set\n for i in range(10):\n random_index = int(random.uniform(0, len(training_set)))\n test_set.append(training_set[random_index])\n del(training_set[random_index])\n\n return training_set, test_set\n\n\ndef spam_test():\n\n document_list = []\n category_list = []\n\n for i in range(1, 26):\n document = parse_text(\"book-ml-action/dataset/email/spam/%d.txt\" % i)\n document_list.append(document)\n category_list.append(1)\n\n document = parse_text(\"book-ml-action/dataset/email/ham/%d.txt\" % i)\n document_list.append(document)\n category_list.append(0)\n\n vocab_list = create_vocab_list(document_list)\n\n training_set, test_set = split_dataset(len(document_list))\n\n train_matrix = []\n train_category = []\n\n for idx in training_set:\n train_matrix.append(document_to_vector(vocab_list, document_list[idx]))\n train_category.append(category_list[idx])\n\n p0vec, p1vec, pspam = train_naive_bayes(numpy.array(train_matrix), numpy.array(train_category))\n\n error_count = 0\n for idx in test_set:\n vec = document_to_vector(vocab_list, document_list[idx])\n if classify_naive_bayes(numpy.array(vec), p0vec, p1vec, pspam) != category_list[idx]:\n error_count += 1\n\n print(\"the error rate is: \", float(error_count) / len(test_set))\n\n\nspam_test()","sub_path":"book-ml-action/bayes.py","file_name":"bayes.py","file_ext":"py","file_size_in_byte":6063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"325986647","text":"# Copyright 2015 OpenStack Foundation.\n# All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\nimport mock\nfrom networking_plumgrid.neutron.tests.unit.cli import test_cli20\nfrom networking_plumgrid.neutronclient.phyattachmentpoint import (\n physical_attachment_point as phyattp)\nfrom neutronclient import shell\nimport sys\n\n\nclass CLITestV20ExtensionPhyAttPJSON(test_cli20.CLITestV20Base):\n def setUp(self):\n # need to mock before super because extensions loaded on instantiation\n self._mock_extension_loading()\n super(CLITestV20ExtensionPhyAttPJSON, self).setUp(plurals={'tags':\n 'tag'})\n\n def _mock_extension_loading(self):\n ext_pkg = 'neutronclient.common.extension'\n contrib = mock.patch(ext_pkg + '._discover_via_entry_points').start()\n contrib.return_value = [(\"physical_attachment_point\", phyattp)]\n return contrib\n\n def test_ext_cmd_loaded(self):\n \"\"\"Tests physical attachment point commands loaded.\"\"\"\n shell.NeutronShell('2.0')\n ext_cmd = {'physical-attachment-point-list':\n phyattp.PhysicalAttachmentPointList,\n 'physical-attachment-point-create':\n phyattp.PhysicalAttachmentPointCreate,\n 'physical-attachment-point-update':\n phyattp.PhysicalAttachmentPointUpdate,\n 'physical-attachment-point-delete':\n phyattp.PhysicalAttachmentPointDelete,\n 'physical-attachment-point-show':\n phyattp.PhysicalAttachmentPointShow}\n self.assertDictContainsSubset(ext_cmd, shell.COMMANDS['2.0'])\n\n def _create_physical_attachment_point(self, args, name, lacp, hash_mode,\n interface):\n resource = 'physical_attachment_point'\n cmd = phyattp.PhysicalAttachmentPointCreate(test_cli20.MyApp(\n sys.stdout), None)\n position_names = ['name', 'lacp', 'hash_mode', 'interfaces']\n position_values = [name, lacp, hash_mode, interface]\n self._test_create_resource(resource, cmd, name, 'myid', args,\n position_names, position_values)\n\n def _update_physical_attachment_point(self, args, values):\n resource = 'physical_attachment_point'\n cmd = phyattp.PhysicalAttachmentPointUpdate(test_cli20.MyApp(\n sys.stdout), None)\n self._test_update_resource(resource, cmd, 'myid',\n args, values)\n\n def test_create_physical_attachment_point(self):\n \"\"\"Test Create physical attachment point.\"\"\"\n\n name = 'phyattpoint1'\n lacp = 'True'\n hash_mode = 'L3'\n args = [name, '--lacp', lacp, '--hash_mode', hash_mode,\n '--interface', 'hostname=u1,interface_name=i1']\n interface = [{\"hostname\": \"u1\", \"interface\": \"i1\"}]\n self._create_physical_attachment_point(args, name, lacp,\n hash_mode, interface)\n\n def test_create_physical_attachment_point_with_multiple_interfaces(self):\n \"\"\"Test Create physical attachment point for multiple interfaces.\"\"\"\n\n name = 'phyattpoint1'\n lacp = 'True'\n hash_mode = 'L3'\n args = [name, '--lacp', lacp, '--hash_mode', hash_mode, '--interface',\n 'hostname=u1,interface_name=i1', '--interface',\n 'hostname=u2,interface_name=i2']\n interface = [{\"hostname\": \"u1\", \"interface\": \"i1\"},\n {\"hostname\": \"u2\", \"interface\": \"i2\"}]\n self._create_physical_attachment_point(args, name, lacp,\n hash_mode, interface)\n\n def test_create_physical_attachment_point_lacp_off(self):\n \"\"\"Test Create physical attachment point.\"\"\"\n\n name = 'phyattpoint1'\n lacp = 'False'\n hash_mode = 'L2'\n args = [name, '--lacp', lacp, '--hash_mode', hash_mode,\n '--interface', 'hostname=u1,interface_name=i1']\n interface = [{\"hostname\": \"u1\", \"interface\": \"i1\"}]\n self._create_physical_attachment_point(args, name, lacp,\n hash_mode, interface)\n\n def test_list_physical_attachment_points(self):\n \"\"\"Test List physical attachment points.\"\"\"\n\n resources = \"physical_attachment_points\"\n cmd = phyattp.PhysicalAttachmentPointList(test_cli20.MyApp(\n sys.stdout), None)\n self._test_list_resources(resources, cmd, True)\n\n def test_list_physical_attachment_points_pagination(self):\n #FIXME(fawadkhaliq)\n self.skipTest(\"Pagination does not work right now\")\n #resources = 'physical_attachment_points'\n #cmd = phyattp.PhysicalAttachmentPointList(test_cli20.MyApp(\n # sys.stdout), None)\n #self._test_list_resources_with_pagination(resources, cmd)\n\n def test_list_physical_attachment_points_sort(self):\n \"\"\"list physical attachment points: --sort-key name\n --sort-key id --sort-key asc --sort-key desc\n \"\"\"\n resources = 'physical_attachment_points'\n cmd = phyattp.PhysicalAttachmentPointList(test_cli20.MyApp(\n sys.stdout), None)\n self._test_list_resources(resources, cmd,\n sort_key=[\"name\", \"id\"],\n sort_dir=[\"asc\", \"desc\"])\n\n def test_list_physical_attachment_points_limit(self):\n \"\"\"list physical attachment points: -P.\"\"\"\n resources = 'physical_attachment_points'\n cmd = phyattp.PhysicalAttachmentPointList(test_cli20.MyApp(\n sys.stdout), None)\n self._test_list_resources(resources, cmd, page_size=1000)\n\n def test_delete_physical_attachment_point(self):\n \"\"\"Test Delete physical attachment points.\"\"\"\n\n resource = 'physical_attachment_point'\n cmd = phyattp.PhysicalAttachmentPointDelete(test_cli20.MyApp(\n sys.stdout), None)\n my_id = 'my-id'\n args = [my_id]\n self._test_delete_resource(resource, cmd, my_id, args)\n\n def test_show_physical_attachment_point(self):\n \"\"\"\n Test Show physical attachment point: --fields id\n --fields name myid.\n \"\"\"\n\n resource = 'physical_attachment_point'\n cmd = phyattp.PhysicalAttachmentPointShow(test_cli20.MyApp(\n sys.stdout), None)\n args = ['--fields', 'id', '--fields', 'name', self.test_id]\n self._test_show_resource(resource, cmd, self.test_id, args,\n ['id', 'name'])\n\n def test_update_physical_attachment_point(self):\n \"\"\"Test Update physical attachment point hash mode.\"\"\"\n\n args = ['myid', '--lacp', 'False']\n values = {'lacp': 'False'}\n self._update_physical_attachment_point(args, values)\n\n def test_update_physical_attachment_point_name(self):\n \"\"\"Test Update physical attachment point name.\"\"\"\n\n args = ['myid', '--name', 'myname']\n values = {'name': 'myname'}\n self._update_physical_attachment_point(args, values)\n\n def test_update_physical_attachment_point_hash_mode(self):\n \"\"\"Test Update physical attachment point hash mode.\"\"\"\n\n args = ['myid', '--name', 'myname', '--hash_mode', 'L2']\n values = {'name': 'myname', 'hash_mode': 'L2'}\n self._update_physical_attachment_point(args, values)\n","sub_path":"networking_plumgrid/neutron/tests/unit/cli/phyattachpoint/test_cli20_pap_ext.py","file_name":"test_cli20_pap_ext.py","file_ext":"py","file_size_in_byte":8323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"380108093","text":"# -*- coding: utf-8 -*-\n#\nimport numpy\nimport pytest\nimport sympy\n\nimport quadpy\n\nfrom helpers import check_degree\n\n\ndef _integrate_exact(k, pyra):\n def f(x):\n return x[0] ** int(k[0]) * x[1] ** int(k[1]) * x[2] ** int(k[2])\n\n # map the reference hexahedron [-1,1]^3 to the pyramid\n xi = sympy.DeferredVector(\"xi\")\n pxi = (\n +pyra[0] * (1 - xi[0]) * (1 - xi[1]) * (1 - xi[2]) / 8\n + pyra[1] * (1 + xi[0]) * (1 - xi[1]) * (1 - xi[2]) / 8\n + pyra[2] * (1 + xi[0]) * (1 + xi[1]) * (1 - xi[2]) / 8\n + pyra[3] * (1 - xi[0]) * (1 + xi[1]) * (1 - xi[2]) / 8\n + pyra[4] * (1 + xi[2]) / 2\n )\n\n pxi = [sympy.expand(pxi[0]), sympy.expand(pxi[1]), sympy.expand(pxi[2])]\n # determinant of the transformation matrix\n J = sympy.Matrix(\n [\n [\n sympy.diff(pxi[0], xi[0]),\n sympy.diff(pxi[0], xi[1]),\n sympy.diff(pxi[0], xi[2]),\n ],\n [\n sympy.diff(pxi[1], xi[0]),\n sympy.diff(pxi[1], xi[1]),\n sympy.diff(pxi[1], xi[2]),\n ],\n [\n sympy.diff(pxi[2], xi[0]),\n sympy.diff(pxi[2], xi[1]),\n sympy.diff(pxi[2], xi[2]),\n ],\n ]\n )\n det_J = sympy.det(J)\n # we cannot use abs(), see .\n # abs_det_J = sympy.Piecewise((det_J, det_J >= 0), (-det_J, det_J < 0))\n # This is quite the leap of faith, but sympy will cowardly bail out\n # otherwise.\n abs_det_J = det_J\n\n exact = sympy.integrate(\n sympy.integrate(\n sympy.integrate(abs_det_J * f(pxi), (xi[2], -1, 1)), (xi[1], -1, +1)\n ),\n (xi[0], -1, +1),\n )\n\n return float(exact)\n\n\n@pytest.mark.parametrize(\"scheme\", [quadpy.pyramid.Felippa(k) for k in range(1, 10)])\ndef test_scheme(scheme):\n assert scheme.points.dtype in [numpy.float64, numpy.int64], scheme.name\n assert scheme.weights.dtype in [numpy.float64, numpy.int64], scheme.name\n\n # Test integration until we get to a polynomial degree `d` that can no\n # longer be integrated exactly. The scheme's degree is `d-1`.\n pyra = numpy.array(\n [[-1, -1, -1], [+1, -1, -1], [+1, +1, -1], [-1, +1, -1], [0, 0, 1]]\n )\n degree = check_degree(\n lambda poly: quadpy.pyramid.integrate(poly, pyra, scheme),\n lambda k: _integrate_exact(k, pyra),\n 3,\n scheme.degree + 1,\n )\n assert degree == scheme.degree\n return\n\n\n@pytest.mark.parametrize(\"scheme\", [quadpy.pyramid.Felippa(5)])\ndef test_show(scheme):\n quadpy.pyramid.show(scheme)\n return\n\n\nif __name__ == \"__main__\":\n scheme_ = quadpy.pyramid.Felippa(3)\n test_scheme(scheme_)\n # test_show(scheme_)\n # quadpy.pyramid.show(scheme_, backend='vtk')\n","sub_path":"test/test_pyramid.py","file_name":"test_pyramid.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"558548120","text":"#code by eyyadh#\nimport os\nimport re\n\nfrom pyrogram import emoji, Client\nfrom pyrogram.types import Message, InlineKeyboardMarkup, InlineKeyboardButton, CallbackQuery, ForceReply\nfrom pyrogram import filters\nfrom mega.database.files import MegaFiles\nfrom mega.database.users import MegaUsers\nfrom mega.helpers.downloader import Downloader\nfrom mega.helpers.media_info import MediaInfo\n\n\n\n\n \n@Client.on_message(filters.document)\nasync def document(bot,update):\n await bot.send_message(\n chat_id=update.chat.id,\n text = \"hi\",\n reply_markup=InlineKeyboardMarkup(\n [\n [ InlineKeyboardButton(text=f\"{emoji.PENCIL} Rename\",\n callback_data=f\"rename\")\n ]\n ]\n ),\n \n \n \n reply_to_message_id=update.message_id\n\n)\n\n \n\n\n\n@Client.on_callback_query(filters.CallbackQuery)\nasync def callback_rename_handler(c: Client, cb: CallbackQuery):\n await cb.answer()\n\n params = cb.payload.split('_')\n cb_message_id = int(params[1]) if len(params) > 1 else None\n\n await cb.message.reply_text(\n f\"RENAME_{cb_message_id}:\\n\"\n f\"Send me the new name of the file as a reply to this message.\",\n reply_markup=ForceReply(True)\n )\n\n\n@Client.on_message(filters.reply & filters.private, group=1)\nasync def reply_message_handler(c: Client, m: Message):\n func_message_obj = str(m.reply_to_message.text).splitlines()[0].split(\"_\")\n if len(func_message_obj) > 1:\n func = func_message_obj[0]\n org_message_id = int(str(func_message_obj[1]).replace(\":\", \"\"))\n org_message = await c.get_messages(m.chat.id, org_message_id)\n if func == \"RENAME\":\n new_file_name = m.text\n\n ack_message = await m.reply_text(\n \"About to start downloading the file to Local.\"\n )\n\n \n","sub_path":"mega/telegram/plugins/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"102958753","text":"import csv\nimport pandas\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#name=raw_input('Enter file name:')\n\nwith open (\"/Users/tejas/Documents/CurrentProjects/transcr/test2.Pitch\", 'r') as file:\n f=file.readlines()\n\nfreqlist=[]\nfor line in f:\n\tif 'frequency' in line:\n\t\tfreqlist.append(line)\n\ndata=[]\nfor i in range(len(freqlist)):\n data.append(freqlist[i][28:])\n\nfor i in range(len(data)):\n data[i]=data[i].strip()\n\ndata=map(float,data)\ndata=data[0::2]\ndatanp= np.asarray(data)\n\npd = pandas.DataFrame(data)\npd.to_csv(\"output2.csv\",index = False)\n\n\n# with open(\"output.csv\", \"wb\") as fi:\n# writer = csv.writer(fi, delimiter=',')\n# writer.writerows(data[i])","sub_path":"read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"616179033","text":"#Yahtzee.py\n#\n#This game is Yahtzee\n\nimport random\nfrom statistics import mode\nglobal status\nstatus = 0\n\ndef Roll(): # returns a random number between 1 and 6\n return random.randint(1, 6)\n\ndef Yahtzee(dice, status): # counts bonus Yahtzees scored in markScorecard\n if dice[0] == dice[1] == dice[2] == dice[3] == dice[4]:\n print(\"YAHTZEE!\")\n if status == -1:\n print(\"Womp womp..\")\n else: status += 1\n return status\n\ndef FirstRoll(): # rolls first 5 dice to begin turn, returns list of 5 integers\n global status\n\n rolled_dice = [] # place holder for dice\n for n in range(5):\n rolled_dice.append(Roll())\n rolled_dice.sort() # sorts dice in numerical order\n print(\"\\nYour first roll: \" + str(rolled_dice))\n status = Yahtzee(rolled_dice, status)\n print()\n return rolled_dice\n\ndef RollAgain(): # determines if player wants to roll again, then asks which dice to roll again\n reroll = 'not yet'\n while reroll == 'not yet': # ensures a correct response has been received\n ans = input(\"Would you like to roll again?(y or n) \").lower()\n if ans == \"n\":\n reroll = False\n new_dice = ['done_rolling'] # used to bypass NextRoll\n elif ans == \"y\": # asks which dice to replace, then subtracts 1 to convert to zero based lists\n print(\"\\nWhich dice would you like to roll again?\")\n print(\"(Enter the corresponding numbers separated by\")\n reroll_dice = input(\" spaces. 1 to 5 with 1 being the far left number)\\n\")\n\n # Determine if the input was the correct format and handle errors\n valid = False # used to exit the while loop when correct format is received\n while valid == False:\n try: # handles non number input error\n new_dice = list(map(int, reroll_dice.split())) # converts string to list of integers\n except:\n reroll_dice = input(\n \"Enter number(s) 1 to 5 with 1 being the far left number and a space between each)\\n\")\n else: # input is numbers, now determine if they are between 1 and 5\n try:\n new_dice.sort() # sorts dice to handle too large or small number error\n if (new_dice[0]>=1 and new_dice[-1]<=5): #ensures replacement locations are 1 to 5\n valid = True\n else: # provides additional instruction if incorrect number was entered\n reroll_dice = input(\n \"Enter number(s) 1 to 5 with 1 being the far left number and a space between each)\\n\")\n except:\n reroll_dice = input(\n \"Enter number(s) 1 to 5 with 1 being the far left number and a space between each)\\n\")\n\n new_dice[:] = [x - 1 for x in new_dice] # subtracts 1 from each item in list to convert to zero based list\n reroll = True\n else: print('Answer \"y\" or \"n\"')\n return new_dice\n\ndef NextRoll(rolled_dice, replace, roll): # rolls and replaces dices selected in RollAgain, then resorts numerically\n global status\n\n print()\n for d in replace:\n rolled_dice[d] = Roll()\n rolled_dice.sort()\n if roll == 2:\n print(\"Your second roll: \" + str(rolled_dice))\n else:\n print(\"Your thrid roll: \" + str(rolled_dice))\n status = Yahtzee(rolled_dice, status)\n print()\n return rolled_dice\n\ndef OneTurn(): # one players turn, rolls dice up to 3 times\n x = FirstRoll()\n y = RollAgain()\n if y != ['done_rolling']:\n z = NextRoll(x, y, 2)\n y = RollAgain()\n if y != ['done_rolling']:\n return NextRoll(x, y, 3)\n else: return z\n else: return x\n\ndef PrintScorecard():\n print()\n print(\" --------------------------------------------------------------------\")\n print(\"| UPPER SECTION HOW TO SCORE Player 1 Player 2|\")\n print(\"| 1 Ones Count and Add Only Ones %s %s |\" % (r1[1], r1[3]))\n print(\"| 2 Twos Count and Add Only Twos %s %s |\" % (r2[1], r2[3]))\n print(\"| 3 Threes Count and Add Only Threes %s %s |\" % (r3[1], r3[3]))\n print(\"| 4 Fours Count and Add Only Fours %s %s |\" % (r4[1], r4[3]))\n print(\"| 5 Fives Count and Add Only Fives %s %s |\" % (r5[1], r5[3]))\n print(\"| 6 Sixes Count and Add Only Sixes %s %s |\" % (r6[1], r6[3]))\n print(\"| TOTAL SCORE ------------------------> %s %s |\" % (r16[1], r16[3]))\n print(\"| BONUS (if score > 63) SCORE 35 %s %s |\" % (r17[1], r17[3]))\n print(\"| TOTAL of Upper Section ----------------> %s %s |\" % (r18[1], r18[3]))\n print(\"| LOWER SECTION ------------------------------------------------|\")\n print(\"| 7 3 of a kind Add Total Of All Dice %s %s |\" % (r7[1], r7[3]))\n print(\"| 8 4 of a kind Add Total Of All Dice %s %s |\" % (r8[1], r8[3]))\n print(\"| 9 Full House SCORE 25 %s %s |\" % (r9[1], r9[3]))\n print(\"|10 Sm Straight (seq of 4) SCORE 30 %s %s |\" % (r10[1], r10[3]))\n print(\"|11 Lg Straight (seq of 5) SCORE 40 %s %s |\" % (r11[1], r11[3]))\n print(\"|12 YAHTZEE (5 of a kind) SCORE 50 %s %s |\" % (r12[1], r12[3]))\n print(\"|13 Chance Score Total Of All 5 Dice %s %s |\" % (r13[1], r13[3]))\n print(\"| YAHTZEE X for each bonus %s %s |\" % (r14[1], r14[3]))\n print(\"| BONUS Score 100 per X %s %s |\" % (r15[1], r15[3]))\n print(\"| TOTAL of Lower Section-------------------> %s %s |\" % (r19[1], r19[3]))\n print(\"| TOTAL of Upper Section ------------------> %s %s |\" % (r18[1], r18[3]))\n print(\"| GRAND TOTAL -----------------------------> %s %s |\" % (r20[1], r20[3]))\n print(\" --------------------------------------------------------------------\")\n\n# [P1 score(int), P1 score(str), P2 score(int), P2 score(str)]\nr1 = [0, \" \", 0, \" \"]\nr2 = [0, \" \", 0, \" \"]\nr3 = [0, \" \", 0, \" \"]\nr4 = [0, \" \", 0, \" \"]\nr5 = [0, \" \", 0, \" \",]\nr6 = [0, \" \", 0, \" \",]\nr7 = [0, \" \", 0, \" \",]\nr8 = [0, \" \", 0, \" \",]\nr9 = [0, \" \", 0, \" \",]\nr10 = [0, \" \", 0, \" \",]\nr11 = [0, \" \", 0, \" \",]\nr12 = [0, \" \", 0, \" \",]\nr13 = [0, \" \", 0, \" \",]\nr14 = [\"\", \" \", \"\", \" \",]\nr15 = [0, \" \", 0, \" \",]\nr16 = [0, \" \", 0, \" \",]\nr17 = [0, \" \", 0, \" \",]\nr18 = [0, \" \", 0, \" \",]\nr19 = [0, \" \", 0, \" \",]\nr20 = [0, \" \", 0, \" \",]\n\n#determines if player 1 has already scored the category TODO player 2 scoring determination\np1 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\ndef markScorecard(dice):\n global status\n\n print()\n print()\n PrintScorecard()\n print()\n print(\"You rolled: \" + str(dice))\n check = False\n while check == False:\n try:\n row = int(input(\"\\nWhich row are you scoring? \"))\n except:\n print(\"Enter a number between 1 and 13, with no decimal places.\")\n else:\n if (row > 0 and row < 14):\n if p1[row - 1] == 0:\n check = True\n p1[row - 1] = 1\n else: print(\"You already scored row %i\" % row)\n else: print(\"Enter a value between 1 and 13\")\n\n\n sum = 0 # Used to score various options\n\n # Scoring Selections\n\n # Select 1 - ones\n if row == 1:\n for d in dice:\n if d == row:\n sum += d\n r1[0] = sum\n r1[1] = str(r1[0]).rjust(3, \" \")\n\n # Select 2 - twos\n if row == 2:\n for d in dice:\n if d == row:\n sum += d\n r2[0] = sum\n r2[1] = str(r2[0]).rjust(3, \" \")\n\n # Select 3 - threes\n if row == 3:\n for d in dice:\n if d == row:\n sum += d\n r3[0] = sum\n r3[1] = str(r3[0]).rjust(3, \" \")\n\n # Select 4 - fours\n if row == 4:\n for d in dice:\n if d == row:\n sum += d\n r4[0] = sum\n r4[1] = str(r4[0]).rjust(3, \" \")\n\n # Select 5 - fives\n if row == 5:\n for d in dice:\n if d == row:\n sum += d\n r5[0] = sum\n r5[1] = str(r5[0]).rjust(3, \" \")\n\n # Select 6 - sixes\n if row == 6:\n for d in dice:\n if d == row:\n sum += d\n r6[0] = sum\n r6[1] = str(r6[0]).rjust(3, \" \")\n\n # Select 7 - Three of a kind\n if row == 7:\n for d in dice:\n if d == mode(dice):\n sum += d\n if sum/mode(dice) >= 3:\n r7[0] = sum\n r7[1] = str(r7[0]).rjust(3, \" \")\n\n # Select 8 - Four of a kind\n if row == 8:\n for d in dice:\n if d == mode(dice):\n sum += d\n if sum/mode(dice) >= 4:\n r8[0] = sum\n r8[1] = str(r8[0]).rjust(3, \" \")\n\n # Select 9 - Full House\n if row == 9:\n if ((dice[0] == dice[1] == dice[2] and dice[3] == dice[4]) or (dice[0] == dice[1] and dice[2] == dice[3] == dice[4])):\n r9[0] = 25\n else: r9[0] = 0\n r9[1] = str(r9[0]).rjust(3, \" \")\n\n # Select 10 - Small Straight (4 in a row)\n if row == 10:\n counter = 0\n for n in range(4):\n if (dice[n] == dice[n+1]-1):\n counter += 1\n if counter >= 3: r10[0] = 30\n else: r10[0] = 0\n r10[1] = str(r10[0]).rjust(3, \" \")\n\n if row == 11:\n if(dice[0] == (dice[1]-1) == (dice[2]-2) == (dice[3]-3) == (dice[4]-4)):\n r11[0] = 40\n else: r11[0] = 0\n r11[1] = str(r11[0]).rjust(3, \" \")\n\n # Select 12 - Yahtzee!\n if row == 12:\n if dice[0] == dice[1] == dice[2] == dice[3] == dice[4]:\n r12[0] = 50\n status = 1\n else:\n r12[0] = 0\n status = -1\n r12[1] = str(r12[0]).rjust(3, \" \")\n\n # Select 13 - Chance\n if row == 13:\n for d in dice:\n sum += d\n r13[0] = sum\n r13[1] = str(r13[0]).rjust(3, \" \")\n\ndef updateScorecard():\n #updates sums and upper section bonus for player 1\n r16[0] = r1[0] + r2[0] + r3[0] + r4[0] + r5[0] + r6[0]\n r16[1] = str(r16[0]).rjust(3, \" \")\n if r16[0] > 63:\n r17[0] = 35\n r17[1] = str(r17[0]).rjust(3, \" \")\n r18[0] = r16[0] + r17[0]\n r18[1] = str(r18[0]).rjust(3, \" \")\n if status == 2:\n r14[1] = \" X \"\n r15[0] = 100\n r15[1] = \"100\"\n elif status == 3:\n r14[1] = \" XX\"\n r15[0] = 200\n r15[1] = \"200\"\n elif status == 4:\n r14[1] = \"XXX\"\n r15[0] = 300\n r15[1] = \"300\"\n r19[0] = r7[0] + r8[0] + r9[0] + r10[0] + r11[0] + r12[0] + r13[0] + r15[0]\n r19[1] = str(r19[0]).rjust(3, \" \")\n r20[0] = r18[0] + r19[0]\n r20[1] = str(r20[0]).rjust(3, \" \")\n #TODO update sums for player 2\n\n PrintScorecard()\n\nprint(\"We're playing 1 player for now..\")\nPrintScorecard()\nfor s in range(13):\n markScorecard(OneTurn())\n updateScorecard()\nprint(\"Game Over\")\n\n# TODO Need to add option for 2nd player.\n# TODO Need to print out score comparison and winner when second player added.\n# TODO Combine RollAgain and NextRoll, think enter dice to reroll or enter 'n' to stop rolling\n\n# Changed input at roll again to 'y' and 'n' instead of 'yes' and 'no'\n# Added description of first, second, and third rolls.","sub_path":"Yahtzee.py","file_name":"Yahtzee.py","file_ext":"py","file_size_in_byte":11758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"610618642","text":"#write a program that prints out all the elements of the list that are less than the number inputted by the user\n\nlist1 = [1,2,3,4,5,6,7,8,9]\nlist2 = []\n\nnumber = int(input('Enter a number: '))\n\nfor element in list1:\n\tif element < number:\n\t\tlist2.append(element)\n\nprint(list2)","sub_path":"practice3.py","file_name":"practice3.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"477038967","text":"########################################################################\nfrom datetime import datetime\nclass BaseData(object):\n def __init__(self):\n \"\"\"Constructor\"\"\"\n pass\n\n \nclass NewsData(BaseData):\n def __init__(self):\n \"\"\"Constructor\"\"\"\n super(NewsData, self).__init__()\n self.title = '' \n self.published = datetime.now()\n self.content = '' \n self.tags = {} \n\n\nclass YoutubeVideo(BaseData):\n def __init__(self):\n \"\"\"Constructor\"\"\"\n super(YoutubeVideo, self).__init__()\n self.link = ''\n self.videoTitle = ''\n self.videoUploader = ''\n self.videoChannelUrl = ''\n self.videoUploadDate = ''\n self.videoThumbnail = ''\n self.videoDescription = ''\n self.videoDuration = ''\n self.videoViewCount = ''\n self.videoLikeCount = ''\n self.videoLikeCount = ''\n self.videoDislikeCount = ''\n self.videoAverageRating = ''\n self.createDate = datetime.now()\n self.isDownload = False\n self.downloadDate = datetime.now()\n self.downloadFile = ''","sub_path":"runWebsite/schedulerTask/newsSpider/Object.py","file_name":"Object.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"199660708","text":"from django.shortcuts import render\nfrom pyzipcode import ZipCodeDatabase\n\n\ndef index(request):\n return render(request, 'zip_codes/index.html')\n\n\ndef get_coordinates(request):\n zcdb = ZipCodeDatabase()\n try:\n zipcode = zcdb[request.POST.get('zip_code')]\n coordinates = {\n 'zip_code': zipcode.zip,\n 'longitude': zipcode.longitude,\n 'latitude': zipcode.latitude\n }\n context = {'coordinates': coordinates}\n except Exception as e:\n context = {'error': e}\n\n return render(request, 'zip_codes/coordinates.html', context)\n","sub_path":"zip_codes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"568446899","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nAdd .rtf version to all books kept in Calibre_\nwhich have only .doc.\n\n.. _Calibre: http://calibre-ebook.com\n\nPurpose\n=======\n\nCalibre does not handle .doc files natively, but it handles .rtf.\nSo, to make book format conversion possible, this script updates\nall .doc books with .rtf alternative.\n\nOpenOffice (and pyuno libraries provided by it) are used in the\nprocess.\n\nPrerequisities\n==============\n\nCalibre must be installed and it's programs present in PATH.\n\nPython2.6 must be installed.\n\nootools library () must\nbe installed. Simplest method to install it:\n\n easy_install ootools\n\n(on Ubuntu `sudo easy_install ootools`)\n\nUsage\n=====\n\nJust open some terminal and run\n\n calibre_convert_docs_to_rtf\n\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport os.path\nfrom tempfile import NamedTemporaryFile\nfrom collections import namedtuple\n\nfrom mekk.calibre.calibre_util import \\\n find_calibre_books, add_format_to_calibre\n\nfrom mekk.calibre.openoffice import doc2rtf_converter\n\n############################################################\n# Operations\n############################################################\n\nDocItem = namedtuple('DocItem', 'id uuid title file')\n\n\ndef locate_docs_without_rtf():\n \"\"\"\n Locates all files which have .doc but do not have .rtf version\n\n Routine yields objects with fields:\n id,\n uuid,\n title\n file (.doc file name)\n \"\"\"\n for book in find_calibre_books(search=\"format:doc and not format:rtf\"):\n files = [f\n for f in book.files\n if f.lower().endswith('.doc')]\n if files:\n yield DocItem(id=book.id,\n uuid=book.uuid,\n title=book.title,\n file=files[0])\n\n\ndef make_rtf_for(item):\n \"\"\"\n Call doc2rtf converter to create .rtf file for given item.\n Item is a standard object (item.id is a calibre id, item.file is .doc)\n \"\"\"\n rtf_file = NamedTemporaryFile(suffix=\".rtf\", delete=False)\n rtf_name = rtf_file.name\n print(\"Creating RTF for book %s (%s) in %s\" % (\n item.id, item.title, rtf_name))\n try:\n rtf_file.close()\n doc2rtf_converter.convert(item.file, rtf_name)\n add_format_to_calibre(item.id, rtf_name)\n finally:\n os.remove(rtf_name)\n\n############################################################\n# Main\n############################################################\n\n\ndef run():\n \"\"\"\n Run calibre_convert_docs_to_rtf script\n \"\"\"\n for doc_file in locate_docs_without_rtf():\n make_rtf_for(doc_file)\n","sub_path":"src/mekk/calibre/scripts/convert_docs_to_rtf.py","file_name":"convert_docs_to_rtf.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"275453902","text":"# !/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport requests\nfrom bs4 import BeautifulSoup\nimport os\nimport time\n\nclass BeautifulSoupPic():\n def __init__(self): ##初始化\n self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36'}\n #self.web_url = 'https://www.douban.com/photos/album/127879998'\n #win下目录\n # self.folder_path = 'D:\\BeautifulPicture'\n #mac下目录\n self.folder_path = '/Users/willdonner/DevsTest'\n def request(self, url): ##返回网页的请求\n r = requests.get(\n url=url,\n headers=self.headers)\n return r\n\n def mkdir(self, path): ##这个函数创建文件夹\n path = path.strip()\n isExists = os.path.exists(path)\n if not isExists:\n print('创建名字叫做', path, '的文件夹')\n os.makedirs(path)\n print('创建成功!')\n else:\n print(path, '文件夹已经存在了,不再创建')\n\n def save_img(self, url, name):\n print('保存准备中...')\n sleep_download_time = 0.5\n time.sleep(sleep_download_time)\n img = self.request(url)\n file_name = name + '.jpg'\n print('开始保存图片')\n f = open(file_name, 'ab')\n f.write(img.content)\n print(file_name + '保存成功')\n f.close()\n \n def geturl(self, url):\n r = self.request(url)\n print('开始获取所有img标签')\n all_a = BeautifulSoup(r.text, 'lxml').find('div', class_='photolst clearfix').find_all('img')\n print('开始创建文件夹')\n self.mkdir(self.folder_path)\n print('开始切换文件夹')\n os.chdir(self.folder_path)\n for a in all_a:\n reimg_url = a['src']\n first_pos1 = reimg_url[0:37]\n first_pos = first_pos1+'l'\n last_pos = reimg_url[38:]\n first_pos += last_pos\n img_name = first_pos[-15:-4]\n self.save_img(first_pos, img_name)\n def get_pic(self):\n #print('请输入你想下载的网址')\n downlode_url = 'https://www.douban.com/photos/album/127879998/'\n print(downlode_url)\n r = self.request(downlode_url)\n print('开始网页get请求')\n print('获取页数')\n print(r.text)\n all_message = BeautifulSoup(r.text, 'lxml').find('div', class_='paginator')\n #for a_page in all_message:\n page = int(all_message.find('span', class_='thispage').get('data-total-page'))\n print(page)\n for page in range(0, page*18, 18):\n if page == 0:\n url = downlode_url\n self.geturl(url)\n else:\n url = downlode_url+'/?start='+str(page)\n self.geturl(url)\nbeauty = BeautifulSoupPic() #创建类的实例\nbeauty.get_pic()\nprint('下载完成!')","sub_path":"downpic.py","file_name":"downpic.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"531462352","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import ValidationError\nfrom stdnum import ec\n\nLIST_TYPE_DOCUMENTATION = [\n ('0', 'RUC'),\n ('1', 'Cédula'),\n ('2', 'Pasaporte'),\n]\n\n\nclass Carrier(models.Model):\n _name = 'stock.carrier'\n _description = _(\"Transportista\")\n\n @api.constrains('documentation_number')\n def _check_documentation_number(self):\n \"\"\"\n Validar dígitos de documento y verificar qué no exista empresa.\n Se utiliza la clase ec de la librería stdnum.\n :return:\n \"\"\"\n if self.documentation_number and len(self.documentation_number) not in [10, 13]:\n raise ValidationError(_(\"Debe ingresar 10 o 13 dígitos según tipo de documento.\"))\n if self.type_documentation == '0':\n ec.ruc.is_valid(self.documentation_number)\n if self.type_documentation == '1':\n ec.ci.is_valid(self.documentation_number)\n\n @api.multi\n def name_get(self):\n \"\"\"\n Cambiamos nombre a mostrar de registros\n :return list:\n \"\"\"\n res = []\n for data in self:\n display_name = '{0} [{1}]'.format(data.name, data.transport_plate)\n res.append((data.id, display_name))\n return res\n\n name = fields.Char('Nombre', required=True, index=True)\n type_documentation = fields.Selection(LIST_TYPE_DOCUMENTATION, string='Tipo de identificación', required=True)\n documentation_number = fields.Char('Nº Identificación',\n copy=False,\n size=13,\n required=True,\n help='Identificación o registro único de contribuyente (RUC).')\n transport_plate = fields.Char('Placa de transporte', required=True)\n\n _sql_constraints = [\n (\n 'transport_plate_unique', 'unique(transport_plate)',\n _(\"Ya existe placa registrada!\")),\n ]\n","sub_path":"eliterp_stock_referral_guide/models/stock_carrier.py","file_name":"stock_carrier.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"394903697","text":"from echidna.util import root_help\nfrom ROOT import TH1D, TH2D\n\n\ndef plot_projection(spectra, dimension, graphical=True):\n \"\"\" Plot the spectra as projected onto the dimension.\n For example dimension == 0 will plot the spectra as projected onto the\n energy dimension.\n\n Args:\n spectra (:class:`echidna.core.spectra`): The spectra to plot.\n dimension (int): The dimension to project the spectra onto.\n graphical (bool): Shows plot and waits for user input when true.\n\n Returns:\n (:class:`ROOT.TH1D`): plot.\n \"\"\"\n if dimension == 0:\n plot = TH1D(\"Energy\", \";Energy[MeV];Count per bin\",\n int(spectra._energy_bins),\n spectra._energy_low,\n spectra._energy_high)\n elif dimension == 1:\n plot = TH1D(\"Radial\", \";Radius[mm];Count per bin\",\n int(spectra._radial_bins),\n spectra._radial_low,\n spectra._radial_high)\n elif dimension == 2:\n plot = TH1D(\"Time\", \";Time[yr];Count per bin\",\n int(spectra._time_bins),\n spectra._time_low,\n spectra._time_high)\n data = spectra.project(dimension)\n for index, datum in enumerate(data):\n plot.SetBinContent(index, datum)\n if graphical:\n plot.Draw()\n raw_input(\"Return to quit\")\n return plot\n\n\ndef plot_surface(spectra, dimension, graphical=True):\n \"\"\" Plot the spectra with the dimension projected out.\n For example dimension == 0 will plot the spectra as projected onto the\n radial and time dimensions i.e. not energy.\n\n Args:\n spectra (:class:`echidna.core.spectra`): The spectra to plot.\n dimension (int): The dimension to project out.\n graphical (bool): Shows plot and waits for user input when true.\n\n Returns:\n (:class:`ROOT.TH2D`): plot.\n \"\"\"\n if dimension == 0:\n plot = TH2D(\"EnergyRadial\", \";Energy[MeV];Radius[mm];Count per bin\",\n int(spectra._energy_bins),\n spectra._energy_low, spectra._energy_high,\n int(spectra._radial_bins),\n spectra._radial_low, spectra._radial_high)\n data = spectra.surface(2)\n elif dimension == 1:\n plot = TH2D(\"TimeEnergy\", \";Time[yr];Energy[MeV];Count per bin\",\n int(spectra._time_bins),\n spectra._time_low, spectra._time_high,\n int(spectra._energy_bins),\n spectra._energy_low, spectra._energy_high)\n data = spectra.surface(1)\n elif dimension == 2:\n plot = TH2D(\"TimeRadial\", \";Time[yr];Radius[mm];Count per bin\",\n int(spectra._time_bins),\n spectra._time_low, spectra._time_high,\n int(spectra._radial_bins),\n spectra._radial_low, spectra._radial_high)\n data = spectra.surface(0)\n for index_x, data_x in enumerate(data):\n for index_y, datum in enumerate(data_x):\n plot.SetBinContent(index_x, index_y, datum)\n if graphical:\n plot.Draw(\"COLZ\")\n raw_input(\"Return to quit\")\n return plot\n\ndef spectral_plot(spectra_dict, dimension=0, show_plot=False, **kwargs):\n \"\"\" Produce spectral plot.\n\n For a given signal, produce a plot showing the signal and relevant\n backgrounds. Backgrounds are automatically summed to create the\n spectrum \"Summed background\" and all spectra passed in\n :obj:`spectra_dict` will be summed to produce the \"Sum\" spectra\n\n Args:\n spectra_dict (dict): Dictionary containing each spectrum you wish\n to plot, and the relevant parameters required to plot them.\n dimension (int, optional): The dimension or axis along which the\n spectra should be plotted. Default is energy axis.\n\n Example:\n\n An example :obj:`spectra_dict` is as follows::\n\n {Te130_0n2b._name: {'spectra': Te130_0n2b, 'label': 'signal',\n 'style': {'color': ROOT.kBlue}, 'type': 'signal'},\n Te130_2n2b._name: {'spectra': Te130_2n2b, 'label': r'$2\\\\nu2\\\\beta',\n 'style': {'color': ROOT.kRed}, 'type': 'background'},\n B8_Solar._name: {'spectra': B8_Solar, 'label': 'solar',\n 'style': {'color': ROOT.kGreen}, 'type': 'background'}}\n\n .. note::\n\n Keyword arguments include:\n\n * log_y (*bool*): Use log scale on y-axis.\n * limit (:class:`spectra.Spectra`): Include a spectrum showing\n a current or target limit.\n\n Returns:\n :class:`ROOT.TCanvas`: Canvas containing spectral plot.\n \"\"\"\n first_spectra = True\n if dimension == 0:\n for value in spectra_dict.values():\n spectra = value.get(\"spectra\")\n if first_spectra:\n energy_low = spectra._energy_low\n energy_high = spectra._energy_high\n energy_bins = spectra._energy_bins\n width = spectra._energy_width\n shape = (energy_bins) # Shape for summed arrays\n first_spectra = False\n else:\n if spectra._energy_low != energy_low:\n raise AssertionError(\"Spectra \" + spectra._name + \" has \"\n \"incorrect energy lower limit\")\n if spectra._energy_high != energy_high:\n raise AssertionError(\"Spectra \" + spectra._name + \" has \"\n \"incorrect energy upper limit\")\n if spectra._energy_bins != energy_bins:\n raise AssertionError(\"Spectra \" + spectra._name + \" has \"\n \"incorrect energy upper limit\")\n summed_background = ROOT.TH1F(\"summed_background\",\"; Energy (MeV); Counts\", spectra._energy_bins, spectra._energy_low, spectra._energy_high)\n summed_total = ROOT.TH1F(\"summed_total\",\"; Energy (MeV); Counts\", spectra._energy_bins, spectra._energy_low, spectra._energy_high)\n elif dimension == 1:\n for value in spectra_dict.values:\n spectra = value.get(\"spectra\")\n if first_spectra:\n radial_low = spectra._radial_low\n radial_high = spectra._radial_high\n radial_bins = spectra._radial_bins\n width = spectra._radial_width\n shape = (radial_bins)\n first_spectra = False\n else:\n if spectra._radial_low != radial_low:\n raise AssertionError(\"Spectra \" + spectra._name + \" has \"\n \"incorrect time lower limit\")\n if spectra._radial_high != radial_high:\n raise AssertionError(\"Spectra \" + spectra._name + \" has \"\n \"incorrect time upper limit\")\n if spectra._radial_bins != radial_bins:\n raise AssertionError(\"Spectra \" + spectra._name + \" has \"\n \"incorrect time upper limit\")\n summed_background = ROOT.TH1F(\"summed_background\",\"; Radius (mm); Counts\", spectra._radial_bins, spectra._radial_low, spectra._radial_high)\n summed_total = ROOT.TH1F(\"summed_total\",\"; Radius (mm); Counts\", spectra._radial_bins, spectra._radial_low, spectra._radial_high)\n elif dimension == 2:\n for value in spectra_dict.values:\n spectra = value.get(\"spectra\")\n if first_spectra:\n time_low = spectra._time_low\n time_high = spectra._time_high\n time_bins = spectra._time_bins\n width = spectra._time_width\n shape = (time_bins)\n first_spectra = False\n else:\n if spectra._time_low != time_low:\n raise AssertionError(\"Spectra \" + spectra._name + \" has \"\n \"incorrect time lower limit\")\n if spectra._time_high != time_high:\n raise AssertionError(\"Spectra \" + spectra._name + \" has \"\n \"incorrect time upper limit\")\n if spectra._time_bins != time_bins:\n raise AssertionError(\"Spectra \" + spectra._name + \" has \"\n \"incorrect time upper limit\")\n summed_background = ROOT.TH1F(\"summed_background\",\"; Time (Yr); Counts\", spectra._time_bins, spectra._time_low, spectra._time_high)\n summed_total = ROOT.TH1F(\"summed_total\",\"; Time (Yr); Counts\", spectra._time_bins, spectra._time_low, spectra._time_high)\n summed_total = numpy.zeros(shape=shape)\n can = ROOT.TCanvas()\n leg = ROOT.TLegend(0.7,0.7,0.9,0.9)\n summed_background.SetLineStyle(7)\n summed_background.SetLineColor(ROOT.kRed)\n summed_total.SetLineStyle(7)\n summed_total.SetLineColor(ROOT.kBlack)\n leg.AddEntry(summed_total, \"Background + Signal\", \"l\")\n leg.AddEntry(summed_background, \"Background\", \"l\")\n hists = []\n if kwargs.get(\"log_y\") is True:\n can.SetLogy()\n for value in spectra_dict.values():\n spectra = value.get(\"spectra\")\n hist = spectra.project(dimension, graphical=False)\n hist.SetLineColor(value.get(\"style\")[\"color\"])\n leg.AddEntry(hist, value.get(\"label\"),'l')\n hists.append(hist)\n if value.get(\"type\") is \"background\":\n summed_background.Add(hist)\n else:\n summed_total.Add(hist)\n summed_total.Draw()\n summed_background.Draw(\"same\")\n for hist in hists:\n hist.Draw(\"same\")\n\n # Plot limit\n if kwargs.get(\"limit\") is not None:\n limit = kwargs.get(\"limit\")\n hist = limit.project(dimension, graphical=False)\n hist.SetLineColor(ROOT.kGray)\n leg.AddEntry(hist, \"Kamland-Zen Limit\", \"l\")\n leg.Draw(\"same\")\n return can\n\n\ndef plot_chi_squared_per_bin(calculator, x_bins, x_low, x_high,\n x_title=None, graphical=False):\n \"\"\" Produces a histogram of chi-squared per bin.\n\n Args:\n calculator (:class:`echidna.limit.chi_squared.ChiSquared`): Calculator\n containing the chi-squared values to plot.\n x_bins (int): Number of bins.\n x_low (float): Lower edge of first bin to plot.\n x_high (float): Upper edge of last bin to plot.\n x_title (string, optional): X Axis title.\n graphical (bool): Plots hist to screen if true.\n\n Returns:\n :class:`ROOT.TH1D`: Histogram of chi-squared per bin.\n \"\"\"\n if x_title:\n hist_title = \"; \"+x_title+\"; #chi^{2}\"\n else:\n hist_title = \"; Energy (MeV); #chi^{2}\"\n hist = ROOT.TH1F(\"chi_sq_per_bin\", hist_title, x_bins, x_low, x_high)\n bin = 1 # 0 is underflow\n for chi_sq in calculator.get_chi_squared_per_bin():\n hist.SetBinContent(bin, chi_sq)\n bin += 1\n if graphical:\n hist.Draw()\n raw_input(\"RET to quit\")\n return hist\n","sub_path":"echidna/output/plot_root.py","file_name":"plot_root.py","file_ext":"py","file_size_in_byte":10973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"156545468","text":"# coding: utf-8\nfrom devito import TimeFunction, memoized_meth\nfrom examples.seismic.tti.operators import ForwardOperator\nfrom examples.seismic import Receiver\n\n\nclass AnisotropicWaveSolver(object):\n \"\"\"\n Solver object that provides operators for seismic inversion problems\n and encapsulates the time and space discretization for a given problem\n setup.\n\n :param model: Physical model with domain parameters\n :param source: Sparse point symbol providing the injected wave\n :param receiver: Sparse point symbol describing an array of receivers\n :param time_order: Order of the time-stepping scheme (default: 2)\n :param space_order: Order of the spatial stencil discretisation (default: 4)\n\n Note: space_order must always be greater than time_order\n \"\"\"\n def __init__(self, model, source, receiver, space_order=2, **kwargs):\n self.model = model\n self.source = source\n self.receiver = receiver\n\n self.space_order = space_order\n self.dt = self.model.critical_dt\n\n # Cache compiler options\n self._kwargs = kwargs\n\n @memoized_meth\n def op_fwd(self, kernel='shifted', save=False):\n \"\"\"Cached operator for forward runs with buffered wavefield\"\"\"\n return ForwardOperator(self.model, save=save, source=self.source,\n receiver=self.receiver,\n space_order=self.space_order,\n kernel=kernel, **self._kwargs)\n\n def forward(self, src=None, rec=None, u=None, v=None, m=None,\n epsilon=None, delta=None, theta=None, phi=None,\n save=False, kernel='centered', **kwargs):\n \"\"\"\n Forward modelling function that creates the necessary\n data objects for running a forward modelling operator.\n\n :param src: Symbol with time series data for the injected source term\n :param rec: Symbol to store interpolated receiver data\n :param u: (Optional) Symbol to store the computed wavefield\n :param m: (Optional) Symbol for the time-constant square slowness\n :param save: Option to store the entire (unrolled) wavefield\n :param kernel: type of discretization, centered or shifted\n\n :returns: Receiver, wavefield and performance summary\n \"\"\"\n\n # Space order needs to be halved in the shifted case to have an\n # overall space_order discretization\n self.space_order = self.space_order // 2 if kernel == 'shifted' \\\n else self.space_order\n # Source term is read-only, so re-use the default\n if src is None:\n src = self.source\n # Create a new receiver object to store the result\n if rec is None:\n rec = Receiver(name='rec', grid=self.model.grid,\n time_range=self.receiver.time_range,\n coordinates=self.receiver.coordinates.data)\n\n # Create the forward wavefield if not provided\n if u is None:\n u = TimeFunction(name='u', grid=self.model.grid,\n save=self.source.nt if save else None,\n time_order=2, space_order=self.space_order)\n # Create the forward wavefield if not provided\n if v is None:\n v = TimeFunction(name='v', grid=self.model.grid,\n save=self.source.nt if save else None,\n time_order=2, space_order=self.space_order)\n # Pick m from model unless explicitly provided\n if m is None:\n m = m or self.model.m\n if epsilon is None:\n epsilon = epsilon or self.model.epsilon\n if delta is None:\n delta = delta or self.model.delta\n if theta is None:\n theta = theta or self.model.theta\n if phi is None:\n phi = phi or self.model.phi\n\n # Execute operator and return wavefield and receiver data\n op = self.op_fwd(kernel, save)\n\n if len(m.shape) == 2:\n summary = op.apply(src=src, rec=rec, u=u, v=v, m=m, epsilon=epsilon,\n delta=delta, theta=theta,\n dt=kwargs.pop('dt', self.dt), **kwargs)\n else:\n summary = op.apply(src=src, rec=rec, u=u, v=v, m=m, epsilon=epsilon,\n delta=delta, theta=theta, phi=phi,\n dt=kwargs.pop('dt', self.dt), **kwargs)\n return rec, u, v, summary\n","sub_path":"devito/examples/seismic/tti/wavesolver.py","file_name":"wavesolver.py","file_ext":"py","file_size_in_byte":4496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"203340018","text":"from room import Room\nfrom player import Player\nfrom world import World\n\nimport random\nfrom ast import literal_eval\n\n# Load world\nworld = World()\n\n\n# You may uncomment the smaller graphs for development and testing purposes.\n# map_file = \"maps/test_line.txt\"\n# map_file = \"maps/test_cross.txt\"\n# map_file = \"maps/test_loop.txt\"\n# map_file = \"maps/test_loop_fork.txt\"\nmap_file = \"maps/main_maze.txt\"\n\n# Loads the map into a dictionary\nroom_graph=literal_eval(open(map_file, \"r\").read())\nworld.load_graph(room_graph)\n\n# Print an ASCII map\n# world.print_rooms()\n\nplayer = Player(world.starting_room)\n\n# Fill this out with directions to walk\n# traversal_path = ['n', 'n']\nclass Stack():\n def __init__(self):\n self.stack = []\n def push(self, value):\n self.stack.append(value)\n def pop(self):\n if self.size() > 0:\n return self.stack.pop()\n else:\n return None\n def size(self):\n return len(self.stack)\n\nclass Graph():\n def __init__(self, player):\n self.player = player\n # establish a stack of traveresed rooms\n self.traversed = Stack()\n self.directions = {'n': 's', 's': 'n', 'e': 'w', 'w': 'e'}\n self.rooms = {}\n self.final_path = []\n\n def traverse(self):\n # for each room, starting at the starting room (player.current_room.id), we need to index the neighboring rooms and add the directions traveled to reach them, plus the backtracking after reaching a room with no neighbors\n \n # the rooms neighboring the starting room are indexed\n self.rooms[self.player.current_room.id] = {}\n for direction in self.player.current_room.get_exits():\n # currently, no neighboring room is known\n self.rooms[self.player.current_room.id][direction] = 'unknown'\n \n # initiate a traveral loop with while True\n while True:\n # keep track of rooms that have never been visited, then visit them\n never_visited = []\n \n for room in self.rooms[self.player.current_room.id]:\n if self.rooms[self.player.current_room.id][room] == 'unknown':\n # append all unvisited rooms to the never_visited list\n never_visited.append(room)\n \n if len(never_visited) > 0:\n new_direction = never_visited.pop()\n self.traversed.push(self.directions[new_direction])\n # Mark the room as visited\n self.rooms[self.player.current_room.id][new_direction] = True\n # push the travelled direction to the stack \n self.traversed.push(self.directions[new_direction]) \n # travel in the direction\n self.player.travel(new_direction)\n # append the travelled direction to the final path\n self.final_path.append(new_direction)\n \n # if the current_room isn't indexed, index it\n if player.current_room.id not in self.rooms:\n self.rooms[self.player.current_room.id] = {}\n \n for direction in self.player.current_room.get_exits():\n self.rooms[player.current_room.id][direction] = 'unvisited'\n self.rooms[player.current_room.id][self.directions[direction]] = True\n \n else:\n if self.traversed.size() > 0:\n new_direction = self.traversed.pop()\n self.player.travel(new_direction)\n self.final_path.append(new_direction)\n else:\n print(self.final_path)\n return self.final_path\n\n\n\nplayer = Player(world.starting_room)\ngraph = Graph(player)\ntraversal_path = graph.traverse()\n\n\n\n# TRAVERSAL TEST\nvisited_rooms = set()\nplayer.current_room = world.starting_room\nvisited_rooms.add(player.current_room)\n\nfor move in traversal_path:\n player.travel(move)\n visited_rooms.add(player.current_room)\n\nif len(visited_rooms) == len(room_graph):\n print(f\"TESTS PASSED: {len(traversal_path)} moves, {len(visited_rooms)} rooms visited\")\nelse:\n print(\"TESTS FAILED: INCOMPLETE TRAVERSAL\")\n print(f\"{len(room_graph) - len(visited_rooms)} unvisited rooms\")\n\n\n\n#######\n# UNCOMMENT TO WALK AROUND\n#######\n# player.current_room.print_room_description(player)\n# while True:\n# cmds = input(\"-> \").lower().split(\" \")\n# if cmds[0] in [\"n\", \"s\", \"e\", \"w\"]:\n# player.travel(cmds[0], True)\n# elif cmds[0] == \"q\":\n# break\n# else:\n# print(\"I did not understand that command.\")","sub_path":"projects/adventure/traverse.py","file_name":"traverse.py","file_ext":"py","file_size_in_byte":4678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"548184145","text":"# -*- coding: utf-8 -*-\r\n\r\nimport unittest\r\nimport os\r\nimport sys\r\nimport argparse\r\nimport imp\r\nfrom datetime import datetime\r\n\r\nGLOBAL = os.path.dirname(os.path.realpath(__file__))\r\nCONF = GLOBAL + \"\\\\conf\"\r\nPKG = GLOBAL + \"\\\\pkg\"\r\nAPP = GLOBAL + \"\\\\app\"\r\nTEST = GLOBAL + \"\\\\tests\"\r\nDATA = GLOBAL + \"\\\\data\"\r\n\r\ndef add_sys_path():\r\n\t# Windows - sys.path.append(\";\".join([GLOBAL, CONF, PKG]))\r\n\tsys.path.insert(1, GLOBAL)\r\n\tsys.path.insert(1, CONF)\r\n\tsys.path.insert(1, PKG)\r\n\tsys.path.insert(1, TEST)\r\n\r\ndef arg_parser():\r\n\tparser = argparse.ArgumentParser()\r\n\tparser.add_argument(\"--pkg\")\r\n\targs = parser.parse_args()\r\n\tpkg = args.pkg if args.pkg else None\r\n\treturn pkg\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\ttry:\r\n\t\t# config test cases which you would like to run unittests\r\n\t\t# from tests.pkg_channel_tasks import *\r\n\t\tadd_sys_path()\r\n\t\tfrom tests.pkg_user import *\r\n\t\tunittest.main()\r\n\texcept:\r\n\t\traise\r\n\t\tsys.exit(1)","sub_path":"data-analysis-system/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"186107207","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 28 16:07:22 2019\n\n@author: xie\n\"\"\"\n\ngood = int(input())\nlow = int(input())\nif abs(good-low) <= 10:\n print('2000')\nelif good-low > 10:\n money = 2000+15*(good-10)-10*low\n if money <= 3000:\n print(money)\n else:\n print('3000')\nelif low-good >10:\n money = 2000+15*good-20*(low-10)\n if money >= 1000:\n print(money)\n else:\n print('1000')","sub_path":"第二次上机/小明的补习班.py","file_name":"小明的补习班.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"67608517","text":"import os\nfrom flask import Flask\nfrom flask_restful import Api\nfrom flask_jwt_extended import JWTManager\n\nfrom db import db\nfrom resources.item import Item, ItemList\nfrom resources.user import UserRegister, User, UserLogin, TokenRefresh, UserLogout\nfrom resources.store import StoreList, Store\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///data.db') #specify database ':///'\n\"\"\"\nNote: previously when using sqlite3 directly, we created the connetion manually, now its a one time setting instead\n\"\"\"\n\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False #turns off flask built-in sqlalchemy tracker not the imported one\napp.config['PROPOGATE_EXCEPTIONS'] = True\napp.config['JWT_BLACKLIST_ENABLED'] = True\napp.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access','refresh'] #enable blackkist for access and refresh tokens\napp.secret_key = 'jose' #usually configured locally in production\napi = Api(app)\n\n\n@app.before_first_request\ndef class_tables(): #create date.db\n db.create_all() #tables are automatically created by sqlalchemy\n\n\n#the login and user session functions\njwt = JWTManager(app) # creates endpoint /auth returns a jwt token after username is and password is verified\n\n@jwt.token_in_blacklist_loader\ndef check_if_token_in_blacklist(decrypted_token):\n return decrypted_token['jti'] in BLACKLIST\n\napi.add_resource(Item, '/item/')\napi.add_resource(ItemList, '/items')\napi.add_resource(Store, '/store/')\napi.add_resource(StoreList, '/stores')\napi.add_resource(UserRegister, '/register')\napi.add_resource(User, '/user/')\napi.add_resource(UserLogin, '/login')\napi.add_resource(TokenRefresh, '/refresh')\n\n\nif __name__ == \"__main__\":\n from db import db\n db.init_app(app) #link flask with SQLAlchemy\n app.run(port=5000)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"439648970","text":"import base64\nfrom re import findall\nfrom sys import exc_info\nfrom nonebot.adapters.cqhttp import MessageSegment\nimport httpx\nfrom httpx import AsyncClient\nfrom nonebot import logger\nfrom nonebot import get_driver\n\nproxies = (\n \"http://127.0.0.1:7890\"\n if not get_driver().config.setu_porxy\n else get_driver().config.setu_porxy\n)\n\nsave = get_driver().config.setu_save\nif save == \"webdav\":\n from .save_to_WebDAV import *\nelse:\n from .save_to_Local import *\n\n\nasync def get_setu(keyword=\"\", r18=False) -> list:\n \"\"\"获取色图并返回一堆东西\n\n Args:\n keyword (str, optional): 关键词. Defaults to \"\".\n r18 (bool, optional): 是否r18. Defaults to False.\n\n Returns:\n list[0]: base64编码图片或\"Error:\"\n list[1]: 图片信息或错误详情\n list[2]: 获取到图片 True, 否则 False\n \"\"\"\n async with AsyncClient() as client:\n req_url = \"https://api.lolicon.app/setu/v2\"\n params = {\"keyword\": keyword, \"r18\": 1 if r18 else 0, \"size\": \"regular\"}\n try:\n res = await client.get(req_url, params=params, timeout=120)\n logger.info(res.json())\n except httpx.HTTPError as e:\n logger.warning(e)\n return \"Error:\", f\"API异常{e}\", False\n try:\n setu_title = res.json()[\"data\"][0][\"title\"]\n setu_url = res.json()[\"data\"][0][\"urls\"][\"regular\"]\n content = await downPic(setu_url)\n setu_pid = res.json()[\"data\"][0][\"pid\"]\n setu_author = res.json()[\"data\"][0][\"author\"]\n p = res.json()[\"data\"][0][\"p\"]\n\n base64 = convert_b64(content)\n\n # 保存图片\n save_img(content, pid=setu_pid, p=p, r18=r18)\n\n if type(base64) == str:\n pic = pic = \"[CQ:image,file=base64://\" + base64 + \"]\"\n data = (\n \"标题:\"\n + setu_title\n + \"\\npid:\"\n + str(setu_pid)\n + \"\\n画师:\"\n + setu_author\n )\n return pic, data, True, setu_url\n except httpx.ProxyError as e:\n logger.warning(e)\n return \"Error:\", f\"代理错误: {e}\", False\n except IndexError as e:\n logger.warning(e)\n return \"Error:\", f\"图库中没有搜到关于{keyword}的图。\", False\n except:\n logger.warning(f\"{exc_info()[0]}, {exc_info()[1]}\")\n return \"Error:\", f\"{exc_info()[0]} {exc_info()[1]}。\", False\n\n\nasync def downPic(url):\n async with AsyncClient(proxies=proxies) as client:\n headers = {\n \"Referer\": \"https://accounts.pixiv.net/login?lang=zh&source=pc&view_type=page&ref=wwwtop_accounts_index\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) \"\n \"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36\",\n }\n re = await client.get(url=url, headers=headers, timeout=120)\n if re.status_code == 200:\n logger.success(\"成功获取图片\")\n return re.content\n else:\n logger.error(f\"获取图片失败: {re.status_code}\")\n return re.status_code\n\n\ndef convert_b64(content) -> str:\n ba = str(base64.b64encode(content))\n pic = findall(r\"\\'([^\\\"]*)\\'\", ba)[0].replace(\"'\", \"\")\n return pic\n","sub_path":"nonebot_plugin_setu_now/get_Data.py","file_name":"get_Data.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"476050152","text":"# -*- coding: utf-8 -*-\nimport logging\n\nfrom luckycommon.area import handler as area_handler\n\nfrom luckycommon.utils.api import token_required\nfrom luckycommon.utils.decorator import response_wrapper\nfrom luckycommon.utils.exceptions import ParamError\n\nfrom django.views.decorators.http import require_GET\n\n_LOGGER = logging.getLogger('lucky')\n\n\n@require_GET\n@response_wrapper\n@token_required\ndef get_regions(request):\n try:\n parent_id = long(request.GET.get('area_id'))\n except:\n raise ParamError('area id invalid')\n parent_id = str(parent_id)\n if len(parent_id) > 9:\n raise ParamError('area id invalid')\n area_list = area_handler.get_list_by_parent(parent_id)\n _LOGGER.info('%s requested %s area data', request.user_id, len(area_list))\n data = {\n 'regions': area_list\n }\n return data\n","sub_path":"luckyapi/views/region.py","file_name":"region.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"425552682","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('drchrono', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='appointment',\n name='id',\n ),\n migrations.RemoveField(\n model_name='patient',\n name='id',\n ),\n migrations.AddField(\n model_name='appointment',\n name='appointment_id',\n field=models.IntegerField(default=0, serialize=False, primary_key=True),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='patient',\n name='patient_id',\n field=models.IntegerField(default=None, serialize=False, primary_key=True),\n preserve_default=False,\n ),\n ]\n","sub_path":"drchrono/migrations/0002_auto_20171004_1034.py","file_name":"0002_auto_20171004_1034.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"359281686","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.decorators.cache import never_cache\nfrom django.template.loader import get_template\nfrom django.views.decorators.csrf import csrf_protect\nfrom progreNews.extractorapiwrapper import AYLIEN\nfrom progreNews.newsapiwrapper import NEWSAPI\nfrom progreNews.getimagewrapper import GETIMAGE\nimport json\nfrom .models import *\n\n# Create your views here.\n\n\ndef base(request):\n return render(request, 'progreNews/base.html')\n\n\ndef news_feed(request):\n return render(request, 'progreNews/news_feed.html')\n\n\ndef annotated_news(request):\n return render(request, 'progreNews/annotated_news.html')\n\n\ndef offline(request):\n return render(request, 'progreNews/offline.html')\n\n\ndef page_404(request):\n return render(request, 'progreNews/404.html')\n\n\n@never_cache\ndef sw_js(request, js):\n template = get_template('service-worker.js')\n html = template.render()\n return HttpResponse(html, content_type=\"application/x-javascript\")\n\n\n@never_cache\ndef idb_js(request, js):\n template = get_template('idb.js')\n html = template.render()\n return HttpResponse(html, content_type=\"application/x-javascript\")\n\n\n@never_cache\ndef annotator_edited_js(request, js):\n template = get_template('annotator_edited.js')\n html = template.render()\n return HttpResponse(html, content_type=\"application/x-javascript\")\n\n\n@csrf_protect\ndef extract(request):\n news_url = request.GET[\"news_url\"]\n order = request.GET[\"order\"]\n items = AYLIEN.extract(news_url)\n items = {'items': items, 'order': order}\n if items == \"No response\":\n return HttpResponse(\"No response\")\n else:\n return HttpResponse(json.dumps(items), content_type=\"application/json\")\n\n\n@csrf_protect\ndef latest(request):\n news_source = request.GET[\"news_source\"]\n count = request.GET[\"count\"]\n items = NEWSAPI.get_latest(news_source)\n items = {'items': items, 'count': count}\n if items == \"No response\":\n return HttpResponse(\"No response\")\n else:\n return HttpResponse(json.dumps(items), content_type=\"application/json\")\n\n\n@csrf_protect\ndef get_image(request):\n image_source_url = request.GET[\"image_url\"]\n item = GETIMAGE.get_image_response(image_source_url)\n if item == \"No response\":\n return HttpResponse(\"No response\")\n else:\n return HttpResponse(item)\n\n\n@csrf_protect\ndef post_news(request):\n news_url = request.GET[\"news_url\"]\n news_article = request.GET[\"news_article\"]\n try:\n existing_annotated_news = AnnotatedNews.objects.get(url__exact=news_url)\n existing_annotated_news_id = existing_annotated_news.id\n items = []\n items.append(existing_annotated_news_id)\n items.append(\"News already exists in the database.\")\n return HttpResponse(json.dumps(items), content_type=\"application/json\")\n except:\n new_annotated_news = AnnotatedNews.objects.create(url=news_url, article=news_article)\n new_annotated_news.save()\n new_annotated_news_id = new_annotated_news.id\n items = []\n items.append(new_annotated_news_id)\n items.append(\"Success.\")\n return HttpResponse(json.dumps(items), content_type=\"application/json\")\n\n\n@csrf_protect\ndef post_anno(request):\n anno_body = request.GET[\"anno_body\"]\n new_annotation = Annotation.objects.create(body=anno_body)\n new_annotation.save()\n new_annotation_id = new_annotation.id\n items = []\n items.append(new_annotation_id)\n return HttpResponse(json.dumps(items), content_type=\"application/json\")\n\n\n@csrf_protect\ndef post_outbox(request):\n news_article = request.GET[\"news_article\"]\n news_url = request.GET[\"news_url\"]\n anno_body = request.GET[\"anno_body\"]\n news_source = request.GET[\"news_source\"]\n news_category = request.GET[\"news_category\"]\n news_description = request.GET[\"news_description\"]\n news_published = request.GET[\"news_published\"]\n news_author = request.GET[\"news_author\"]\n news_title = request.GET[\"news_title\"]\n anno_motivation = request.GET[\"anno_motivation\"]\n anno_created = request.GET[\"anno_created\"]\n anno_target_type = request.GET[\"anno_target_type\"]\n anno_target_format = request.GET[\"anno_target_format\"]\n anno_target_selector_type = request.GET[\"anno_target_selector_type\"]\n anno_target_selector_conformsTo = request.GET[\"anno_target_selector_conformsTo\"]\n anno_target_selector_value = request.GET[\"anno_target_selector_value\"]\n\n try:\n existing_annotated_news = AnnotatedNews.objects.get(url__exact=news_url)\n existing_annotated_news_id = existing_annotated_news.id\n new_annotation = Annotation.objects.create(body=anno_body)\n new_annotation.save()\n new_annotation_id = new_annotation.id\n items = []\n items.append(\"news exist\")\n items.append(existing_annotated_news_id)\n items.append(news_title)\n items.append(news_author)\n items.append(news_published)\n items.append(news_description)\n items.append(news_category)\n items.append(news_source)\n items.append(news_article)\n items.append(new_annotation_id)\n items.append(anno_created)\n items.append(anno_motivation)\n items.append(anno_body)\n items.append(anno_target_type)\n items.append(anno_target_format)\n items.append(anno_target_selector_type)\n items.append(anno_target_selector_conformsTo)\n items.append(anno_target_selector_value)\n return HttpResponse(json.dumps(items), content_type=\"application/json\")\n except:\n new_annotated_news = AnnotatedNews.objects.create(url=news_url, article=news_article)\n new_annotated_news.save()\n new_annotated_news_id = new_annotated_news.id\n new_annotation = Annotation.objects.create(body=anno_body)\n new_annotation.save()\n new_annotation_id = new_annotation.id\n items = []\n items.append(\"no news exist\")\n items.append(new_annotated_news_id)\n items.append(news_title)\n items.append(news_author)\n items.append(news_published)\n items.append(news_description)\n items.append(news_category)\n items.append(news_source)\n items.append(news_article)\n items.append(new_annotation_id)\n items.append(anno_created)\n items.append(anno_motivation)\n items.append(anno_body)\n items.append(anno_target_type)\n items.append(anno_target_format)\n items.append(anno_target_selector_type)\n items.append(anno_target_selector_conformsTo)\n items.append(anno_target_selector_value)\n return HttpResponse(json.dumps(items), content_type=\"application/json\")\n\n\ndef get_news(request, news_id):\n desired_news = AnnotatedNews.objects.get(pk=news_id)\n return HttpResponse(desired_news.article)\n\n\ndef get_anno(request, anno_id):\n desired_annotation = Annotation.objects.get(pk=anno_id)\n return HttpResponse(desired_annotation.body, content_type=\"text/plain\")\n\n\n","sub_path":"progreNews_Project/progreNews/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"169715493","text":"#import re\n#test=\"hi12.i mhi\\\\n find,thank you.and u, how about u?\"\n# m=re.findall('hi', test)\n# if m:\n# \tprint(m)\n# \tprint(type(m))\n# else:\n# \tprint('find nothing')\n\t\nfrom datetime import date\ntod = date.today()\npas = date(2017, 10, 16)\nsp = tod - pas\nprint(sp.days)","sub_path":"meetu.py","file_name":"meetu.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"630578561","text":"import os\nfrom flask import Flask, g, flash, redirect, url_for\nfrom resources.users import users_api\nfrom resources.stories import stories_api\nfrom resources.memberships import memberships_api \nfrom resources.storyqueues import storyqueues_api \nfrom resources.bookmarks import bookmarks_api \nfrom resources.contents import content_api \nfrom resources.comments import comment_api \nfrom resources.votes import votes_api \nimport models\nfrom flask_cors import CORS\nfrom flask_login import LoginManager, current_user\nif not 'ON_HEROKU' in os.environ:\n import config \n\napp = Flask(__name__)\n\n#session key for cookies!\napp.secret_key = 'LKSDFLKVNKNKSCNDMKLDMV SDLKMVNLKSD VSD'\n\n#setup app login\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'login'\n\n@login_manager.user_loader\ndef load_user(userid):\n try:\n return models.User.get(models.User.id == userid)\n except models.DoesNotExist:\n return None\n\nCORS(users_api, origins=[\"http://localhost:3000\", \"https://crowdtales.herokuapp.com\"], supports_credentials=True)\nCORS(stories_api, origins=[\"http://localhost:3000\", \"https://crowdtales.herokuapp.com\"], supports_credentials=True)\nCORS(memberships_api, origins=[\"http://localhost:3000\", \"https://crowdtales.herokuapp.com\"], supports_credentials=True)\nCORS(storyqueues_api, origins=[\"http://localhost:3000\", \"https://crowdtales.herokuapp.com\"], supports_credentials=True)\nCORS(bookmarks_api, origins=[\"http://localhost:3000\", \"https://crowdtales.herokuapp.com\"], supports_credentials=True)\nCORS(content_api, origins=[\"http://localhost:3000\", \"https://crowdtales.herokuapp.com\"], supports_credentials=True)\nCORS(comment_api, origins=[\"http://localhost:3000\", \"https://crowdtales.herokuapp.com\"], supports_credentials=True)\nCORS(votes_api, origins=[\"http://localhost:3000\", \"https://crowdtales.herokuapp.com\"], supports_credentials=True)\napp.register_blueprint(users_api, url_prefix='/api/v1')\napp.register_blueprint(stories_api, url_prefix='/api/v1')\napp.register_blueprint(memberships_api, url_prefix='/api/v1')\napp.register_blueprint(storyqueues_api, url_prefix='/api/v1')\napp.register_blueprint(bookmarks_api, url_prefix='/api/v1')\napp.register_blueprint(content_api, url_prefix='/api/v1')\napp.register_blueprint(comment_api, url_prefix='/api/v1')\napp.register_blueprint(votes_api, url_prefix='/api/v1')\n\n@app.route('/')\ndef hello_world():\n return 'hello world'\n\nif 'ON_HEROKU' in os.environ:\n models.initialize()\nelse:\n if __name__ == '__main__':\n models.initialize()\n app.run(debug=config.DEBUG, port=config.PORT)\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"608091644","text":"# ------------------------------------------------------------\n# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.\n#\n# Licensed under the BSD 2-Clause License.\n# You should have received a copy of the BSD 2-Clause License\n# along with the software. If not, See,\n#\n# \n#\n# ------------------------------------------------------------\n\nfrom dragon.ops import MPIBroadcast, MPIGather\n\nfrom ..layer import Layer\n\n\nclass MPIBroadcastLayer(Layer):\n \"\"\"The implementation of ``MPIBroadcastLayer``.\n\n Parameters\n ----------\n root : int\n The world rank of root. Refer `MPIParameter.root`_.\n\n \"\"\"\n def __init__(self, LayerParameter):\n super(MPIBroadcastLayer, self).__init__(LayerParameter)\n param = LayerParameter.mpi_param\n self._param = {'root': param.root}\n\n def Setup(self, bottom):\n super(MPIBroadcastLayer, self).Setup(bottom)\n input = bottom[0] if isinstance(bottom, list) else bottom\n return MPIBroadcast(input, **self._param)\n\n\nclass MPIGatherLayer(Layer):\n \"\"\"The implementation of ``MPIGatherLayer``.\n\n Parameters\n ----------\n root : int\n The world rank of root. Refer `MPIParameter.root`_.\n\n \"\"\"\n def __init__(self, LayerParameter):\n super(MPIGatherLayer, self).__init__(LayerParameter)\n param = LayerParameter.mpi_param\n self._param = {'root': param.root}\n\n def Setup(self, bottom):\n super(MPIGatherLayer, self).Setup(bottom)\n input = bottom[0] if isinstance(bottom, list) else bottom\n return MPIGather(input, nout=len(self._top), **self._param)","sub_path":"Dragon/python/dragon/vm/caffe/layers/mpi.py","file_name":"mpi.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"53040247","text":"#!/usr/bin/python2\ndef main():\n \"\"\"\n graph-stats.py\n\n Python script, usage:\n python2 graph-stats.py [File [MaxNodes]]\n\n Loads the dictionary stored in [File] (optional command line input argument)\n and creates the graph that relates the users by `following'/`not following'\n status. The resulting graph's visual representation and the node degree\n distribution ('degree(node) vs node').\n\n Requierements: python2.7+ (stdlib) matplotlib networkx\n \"\"\"\n\n import networkx as nx\n import matplotlib.pyplot as plt\n import numpy as np\n import sys\n import pickle\n\n File=\"followsConexions_15012017.bin\"\n MaxNodes=sys.maxint\n\n if len(sys.argv) > 1:\n File=sys.argv[1]\n if len(sys.argv) > 2:\n MaxNodes=int(sys.argv[2])\n with open(File,\"rb\") as f:\n d=pickle.load(f)\n\n G=nx.Graph()\n\n print(str(len(list(d.keys())))+\" people.\") #pertsona guztira.\n keys=set([int(i) for i in d])\n i=0\n for key1 in d:\n print(str(i)+\"th user.\")\n if i>MaxNodes: break\n if (key1!='480706558') and ((int(key1) in d['480706558']['ids']) or (480706558 in d[key1]['ids'])): continue\n G.add_node(int(key1))\n for key2 in d[key1]['ids']:\n if (key2 in keys):\n G.add_edge(int(key1),key2)\n i+=1\n print(str(len(G.nodes()))+\" nodes.\")\n print(str(len(G.edges()))+\" edges.\")\n print('')\n\n i=0\n nodelistRed = []\n nodelistBlue = []\n nodelistGreen = []\n Gclust = nx.closeness_centrality(G)\n print(\"done clustering\")\n for val in Gclust:\n if(Gclust[val] < 0.01): nodelistRed.append(val)\n elif(Gclust[val] < 0.3): nodelistGreen.append(val)\n else: nodelistBlue.append(val)\n i+=1;\n print(\"done colouring\")\n pos=nx.spring_layout(G)\n nx.draw_networkx_nodes(G,pos,nodelistRed, node_color='r', node_size=15, alpha=0.8)\n nx.draw_networkx_nodes(G,pos,nodelistGreen, node_color='g', node_size=15, alpha=0.8)\n nx.draw_networkx_nodes(G,pos,nodelistBlue, node_color='b', node_size=15, alpha=0.8)\n # edges\n nx.draw_networkx_edges(G,pos,width=1.0,alpha=0.5)\n #nx.draw(G,node_size=15)\n print(\"Saving picture\")\n plt.rc('text', usetex=True) \n plt.title(r\"Tweeter follower-followed relation graph plot\") \n plt.savefig(\"test-graph-plot-\"+File.split('.')[0]+\".eps\")\n plt.savefig(\"test-graph-plot.-\"+File.split('.')[0]+\"png\",dpi=160)\n\n plt.close()\n\n id=0\n dmax=0\n for node in G:\n if G.degree(node)>dmax: id=node\n print(node)\n print(d[str(node)])\n\n degrees=[G.degree(node) for node in G]\n degrees.sort()\n plt.plot(degrees,'bo')\n plt.title(r\"Link number distribution of a Tweeter follower-followed network\") \n plt.xlabel(r\"User number id normalized\") \n plt.ylabel(r\"Number of links in/out the user (node)\") \n plt.savefig(\"degree-distrib-\"+File.split('.')[0]+\".eps\")\n plt.savefig(\"degree-distrib-\"+File.split('.')[0]+\".png\",dpi=160)\n\n print(\"done :-)\")\n\n return 0\n\nif __name__==\"__main__\":\n main()\n","sub_path":"graph-stats.py","file_name":"graph-stats.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"492897722","text":"import threading as T\n\ndef add(na,nb):\n s=na+nb\n print(s)\ndef mult(na,nb):\n m=na*nb\n print(m)\n\nif __name__=='__main__':\n t1=T.Thread(target=add,args=(10,20))\n t2=T.Thread(target=mult,args=(10,20))\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n print('EOD')\n","sub_path":"thread/simple-thread.py","file_name":"simple-thread.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"643428685","text":"\n\nimport sys \nprint(sys.argv)\n\n# How do we read a file in Python?\nwith open(\"print8.ls8\") as file:\n for line in file:\n split_line = line.split('#')\n commad = split_line[0].strip()\n\n print(line)\n print(split_line)\n print(commad)\n\n\n\n\n # implement the core of run \n # This is the workhorse function of the entire processor\n # It needs to read the memory address that's stored in register PC, \n # and store that result in IR, the Instruction Register.\n def run(self):\n while self.running: # while we are running \n # Instruction Reader = the place counter in Ram pointing at instruction \n ir = self.ram_read(self.pc) # read memory address and store in IR Instruction Register\n pc_flag = (ir & 0b00010000) >> 4 # shift right, checking for 1 in spot \n reg_num1 = self.ram[self.pc +1] # Using ram_read(), read the bytes at PC+1 and PC+2 from RAM into variables operand_a and operand_b\n reg_num2 = self.ram[self.pc + 2]\n self.branch_table[ir](reg_num1, reg_num2) # Run instruction \n if pc_flag == 0:\n move = int((ir & 0b11000000) >>6) # shift right 6 places checking for 1 spot\n self.pc += move + 1 # After running code for any particular instruction, the PC needs to be updated\n # to point to the next instruction for the next iteration of the loop in run() ","sub_path":"ls8/in_out.py","file_name":"in_out.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"532859819","text":"# Acquisition from Jacques Specx\n\nfrom cromulent.model import factory, TimeSpan, Acquisition, Person, \\\n\tBeginningOfExistence, EndOfExistence, Place, LinguisticObject\nfrom cromulent.vocab import Painting, add_art_setter, Description\n\nadd_art_setter()\n\nacq = Acquisition()\nobj = Painting(\"http://www.getty.edu/art/collection/objects/882/rembrandt-harmensz-van-rijn-the-abduction-of-europa-dutch-1632/\", label=\"The Abduction of Europa\", art=1)\n\n\np = Person(label=\"Jacques Specx\")\nbirth = BeginningOfExistence(label=\"Birth\")\ntob = TimeSpan(label=\"1585\")\ntob.begin_of_the_begin = \"1585-01-01\"\ntob.end_of_the_end = \"1585-12-31\"\npob = Place(label=\"Amsterdam\")\npob.part_of = Place(label=\"The Netherlands\")\nbirth.timespan = tob \nbirth.took_place_at = pob\ndeath = EndOfExistence(label=\"Death\")\ntod = TimeSpan(label=\"1652\")\ntod.begin_of_the_begin = \"1652-01-01\"\ntod.end_of_the_end = \"1652-12-31\"\npod = Place(label=\"Amsterdam\")\npod.part_of = Place(label=\"The Netherlands\")\ndeath.timespan = tod \ndeath.took_place_at = pod\np.brought_into_existence_by = birth\np.taken_out_of_existence_by = death\n\n# Date of Acquisition\ndate = TimeSpan()\ndate.begin_of_the_begin = \"1652-01-01T00:00:00Z\"\ndate.end_of_the_end = \"1652-12-31T23:59:59Z\"\n\n# Description and Source\ndes = Description(value=\"- 1652: Jacques Specx, 1585 - 1652 (Amsterdam, The Netherlands)\")\nsrc = LinguisticObject(label=\"Source\", value=\"In death inv. of 1652\")\n\nacq.transferred_title_of = obj\nacq.timespan = date\nacq.transferred_title_from = p\nacq.referred_to_by = des\nacq.referred_to_by = src\n\n\nprint(factory.toString(acq, compact=False)) ","sub_path":"ex_europa/acq2.py","file_name":"acq2.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"491934694","text":"#!/usr/bin/env python3\n\nfrom systemd import journal\n\ndef main():\n j = journal.Reader()\n j.this_boot()\n j.log_level(journal.LOG_INFO)\n j.add_match(_SYSTEMD_UNIT=\"pirlnode.service\")\n\n # Look for these key words or phrases\n onetime_keyitems = ['UDP listener up', 'RLPx listener up', 'IPC endpoint opened']\n importing_blocks = 'Imported new chain segment'\n importing_blocks_check = 0\n sending_proof = ' masternode sending proof of activity for block'\n sending_proof_check = 0\n for entry in j:\n for item in onetime_keyitems:\n if item in entry['MESSAGE']:\n print(\"This is good --> {}\".format(entry['MESSAGE']))\n\n if importing_blocks in entry['MESSAGE'] and importing_blocks_check == 0:\n importing_blocks_check = 1\n print(\"Importing blocks is good --> {}\".format(entry['MESSAGE']))\n\n if sending_proof in entry['MESSAGE'] and sending_proof_check == 0:\n sending_proof_check = 1\n print(\"Sending proof is good --> {}\".format(entry['MESSAGE']))\n\nif __name__ == \"__main__\":\n main()","sub_path":"log_parser/pirlnode_log_parser.py","file_name":"pirlnode_log_parser.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"88242006","text":"# -*- coding: utf-8 -*-\n# Copyright 2019 The Chromium OS Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\n\"\"\"Deps analysis service.\"\"\"\n\nfrom __future__ import print_function\n\nimport fileinput\nimport os\n\nfrom chromite.lib import constants\nfrom chromite.lib import cros_build_lib\nfrom chromite.lib import git\nfrom chromite.lib import osutils\nfrom chromite.lib import portage_util\nfrom chromite.scripts import cros_extract_deps\n\n\ndef NormalizeSourcePaths(source_paths):\n \"\"\"Return the \"normalized\" form of a list of source paths.\n\n Normalizing includes:\n * Sorting the source paths in alphabetical order.\n * Remove paths that are sub-path of others in the source paths.\n * Ensure all the directory path strings are ended with the trailing '/'.\n \"\"\"\n for i, path in enumerate(source_paths):\n assert os.path.isabs(path), 'path %s is not an aboslute path' % path\n source_paths[i] = os.path.normpath(path)\n\n source_paths.sort()\n\n results = []\n\n for i, path in enumerate(source_paths):\n is_subpath_of_other = False\n for j, other in enumerate(source_paths):\n if j != i and osutils.IsSubPath(path, other):\n is_subpath_of_other = True\n if not is_subpath_of_other:\n if os.path.isdir(path) and not path.endswith('/'):\n path += '/'\n results.append(path)\n\n return results\n\n\ndef GenerateSourcePathMapping(packages, board):\n \"\"\"Returns a map from each package to the source paths it depends on.\n\n A source path is considered dependency of a package if modifying files in that\n path might change the content of the resulting package.\n\n Notes:\n 1) This method errs on the side of returning unneeded dependent paths.\n i.e: for a given package X, some of its dependency source paths may\n contain files which doesn't affect the content of X.\n\n On the other hands, any missing dependency source paths for package X is\n considered a bug.\n 2) This only outputs the direct dependency source paths for a given package\n and does not takes include the dependency source paths of dependency\n packages.\n e.g: if package A depends on B (DEPEND=B), then results of computing\n dependency source paths of A doesn't include dependency source paths\n of B.\n\n Args:\n packages: The list of packages CPV names (str)\n board (str): The name of the board if packages are dependency of board. If\n the packages are board agnostic, then this should be None.\n\n Returns:\n Map from each package to the source path (relative to the repo checkout\n root, i.e: ~/trunk/ in your cros_sdk) it depends on.\n For each source path which is a directory, the string is ended with a\n trailing '/'.\n \"\"\"\n\n results = {}\n\n packages_to_ebuild_paths = portage_util.FindEbuildsForPackages(\n packages, sysroot=cros_build_lib.GetSysroot(board),\n error_code_ok=False)\n\n # Source paths which are the directory of ebuild files.\n for package, ebuild_path in packages_to_ebuild_paths.iteritems():\n results[package] = [ebuild_path]\n\n # Source paths which are cros workon source paths.\n buildroot = os.path.join(constants.CHROOT_SOURCE_ROOT, 'src')\n manifest = git.ManifestCheckout.Cached(buildroot)\n for package, ebuild_path in packages_to_ebuild_paths.iteritems():\n is_workon, _, is_blacklisted, _ = portage_util.EBuild.Classify(ebuild_path)\n if (not is_workon or\n # Blacklisted ebuild is pinned to a specific git sha1, so change in\n # that repo matter to the ebuild.\n is_blacklisted):\n continue\n ebuild = portage_util.EBuild(ebuild_path)\n workon_subtrees = ebuild.GetSourceInfo(buildroot, manifest).subtrees\n for path in workon_subtrees:\n results[package].append(path)\n\n # Source paths which are the eclasses which ebuilds inherit from.\n # For now, we just include all the whole eclass directory.\n # TODO(crbug.com/917174): for each package, expand the enalysis to output\n # only the path to eclass files which the packakge depends on.\n _ECLASS_DIRS = [os.path.join(constants.CHROOT_SOURCE_ROOT,\n constants.CHROMIUMOS_OVERLAY_DIR, 'eclass')]\n for package, ebuild_path in packages_to_ebuild_paths.iteritems():\n use_inherit = False\n for line in fileinput.input(ebuild_path):\n if line.startswith('inherit '):\n use_inherit = True\n if use_inherit:\n results[package].extend(_ECLASS_DIRS)\n\n # Source paths which are the overlay directories for the given board\n # (packages are board specific).\n if board:\n overlay_directories = portage_util.FindOverlays(\n overlay_type='both', board=board)\n for package in results:\n results[package].extend(overlay_directories)\n\n for p in results:\n results[p] = NormalizeSourcePaths(results[p])\n\n return results\n\n\ndef GetBuildDependency(board):\n \"\"\"Return the build dependency and package -> source path map for |board|.\n\n Args:\n board (str): The name of the board whose artifacts are being created.\n\n Returns:\n JSON build dependencies report for the given board which includes:\n - Package level deps graph from portage\n - Map from each package to the source path\n (relative to the repo checkout root, i.e: ~/trunk/ in your cros_sdk) it\n depends on\n \"\"\"\n results = {}\n results['target_board'] = board\n results['package_deps'] = {}\n results['source_path_mapping'] = {}\n\n board_specific_packages = ['virtual/target-os', 'virtual/target-os-dev',\n 'virtual/target-os-test']\n # Since we don’t have a clear mapping from autotests to git repos\n # and/or portage packages, we assume every board run all autotests.\n board_specific_packages += ['chromeos-base/autotest-all']\n\n non_board_specific_packages = ['virtual/target-sdk', 'chromeos-base/chromite']\n\n board_specific_deps = cros_extract_deps.ExtractDeps(\n sysroot=cros_build_lib.GetSysroot(board),\n package_list=board_specific_packages)\n\n non_board_specific_deps = cros_extract_deps.ExtractDeps(\n sysroot=cros_build_lib.GetSysroot(None),\n package_list=non_board_specific_packages)\n\n results['package_deps'].update(board_specific_deps)\n results['package_deps'].update(non_board_specific_deps)\n\n results['source_path_mapping'].update(\n GenerateSourcePathMapping(board_specific_deps.keys(), board))\n\n results['source_path_mapping'].update(\n GenerateSourcePathMapping(non_board_specific_deps.keys(), board=None))\n\n return results\n","sub_path":"src/third_party/chromite/service/dependency.py","file_name":"dependency.py","file_ext":"py","file_size_in_byte":6524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"401440989","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport json\nfrom saveDictionary import saveDictionary\n\ndnlHref = 'http://downloads.sourceforge.net/xdxf/comn_sdict05_eng_rus_full.tar.bz2'\n\ndef translateWords(filename='dict.xdxf'):\n\tif not os.path.isfile(filename):\n\t\tprint('file ' + filename + ' not found!')\n\t\tprint('Please download and extract dictionary by url: ' + dnlHref)\n\t\treturn\n\twith open('dict-en.json', 'r') as f:\n\t\tenDict = json.load(f)\n\twith open('dict-ru.json', 'r') as f:\n\t\truDict = json.load(f)\n\n\ttrStart = -1\n\ttrsStr = ''\n\twith open(filename, 'r') as xdxf:\n\t\ten = False\n\t\tfor line in xdxf:\n\t\t\tif line[:7] == '':\n\t\t\t\tif en:\n\t\t\t\t\ttrs = split(trsStr, 1)\n\t\t\t\t\tfor tr in trs:\n\t\t\t\t\t\tif not (tr in ruDict):\n\t\t\t\t\t\t\truDict[tr] = {'text': tr}\n\t\t\t\t\tif not('translations' in en):\n\t\t\t\t\t\ten['translations'] = []\n\t\t\t\t\ten['translations'] += trs\n\t\t\t\t\tif(len(en['translations']) > 10):\n\t\t\t\t\t\ten['translations'] = en['translations'][0:9]\n\t\t\t\ten = False\n\t\t\t\ttrsStr = ''\n\t\t\t\ttext = trimmer(line)\n\t\t\t\tif text:\n\t\t\t\t\tif text in enDict:\n\t\t\t\t\t\ten = enDict[text]\n\t\t\t\t\telse:\n\t\t\t\t\t\ten = enDict[text] = {'text': text}\n\t\t\telif en:\n\t\t\t\ttrStart = line.find('')\n\t\t\t\tif trStart != -1:\n\t\t\t\t\ttrEnd = line.find('')\n\t\t\t\t\tif trEnd == -1:\n\t\t\t\t\t\tline = line[0:trStart]\n\t\t\t\t\t\ttrStart = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tif trStart == 0:\n\t\t\t\t\t\t\tline = line[trEnd + 5:]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tline = line[0:trStart] + line[trEnd+5:]\n\t\t\t\t\t\ttrStart = -1\n\t\t\t\ttrsStr += line\n\tsaveDictionary(enDict, 'dict-en.json')\n\tsaveDictionary(ruDict, 'dict-ru.json')\n\ndef trimmer(s, lang=False):\n\tif not s:\n\t\treturn\n\t#s = re.replace('\\([^\\)]\\)', s) # значит перевод применим только к особому случаю\n\ts = re.sub('\\[[^\\]+]\\]', '', s)\n\ts = re.sub('<[^>]+>', '', s)\n\ts = s.replace('"', '').replace('́', '')\n\tif lang:\n\t\ts = re.sub(' [A-Za-z].*', '', s)\n\ts = s.strip().lower()\n\tif len(s) < 2:\n\t\treturn\n\tif lang:\n\t\tif re.search('[^А-Яа-яЁё]', s):\n\t\t\treturn\n\telse:\n\t\tif re.search('[^A-Za-z]', s):\n\t\t\treturn\n\treturn s\n\ndef split(s, lang=False):\n\ts = re.split('\\n\\n', s)[0]\n\tarr = re.split('[\\S��-Яа-я]+[\\.\\)] |[\\.;\\d:] |\\n', s, 50)[0:49]\n\ti = 0;\n\tfor p in arr:\n\t\tif re.search('^ *[А-Яа-яЁё]+, [А-Яа-яЁё]+', p):\n\t\t\tpoz = p.find(', ')\n\t\t\tarr.append(p[poz+2:])\n\t\t\tarr[i] = p[0:poz]\n\t\ti += 1\n\tret = []\n\tfor part in arr:\n\t\tpart = trimmer(part, lang)\n\t\tif part:\n\t\t\tret.append(part)\n\treturn ret\n\nif __name__ == '__main__':\n\timport sys\n\ttranslateWords(*sys.argv[1:])","sub_path":"parsDict/parsXdxf.py","file_name":"parsXdxf.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"95885188","text":"from django.db.models import Sum\nfrom rest_framework import serializers\nimport math\n\nfrom inventory.models import Inventory\nfrom settings.models import DollarExchangeRate, TransactionCoefficient\nfrom .models import Category, Brand, Product, Image\n\n\nclass ImageSerializer(serializers.ModelSerializer):\n url = serializers.CharField(source='fileurl')\n class Meta:\n model = Image\n fields = ('url',)\n\n\nclass BrandSerializer(serializers.ModelSerializer):\n class Meta:\n model = Brand\n fields = [\n 'id',\n 'name',\n 'slug',\n 'logo',\n 'active',\n ]\n\n\nclass CategorySerializer(serializers.ModelSerializer):\n class Meta:\n model = Category\n fields = [\n 'id',\n 'parent',\n 'name',\n 'name_singular',\n 'short_description',\n 'slug',\n 'logo',\n ]\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n brand = BrandSerializer()\n category = CategorySerializer()\n price_uzs = serializers.SerializerMethodField()\n old_price_uzs = serializers.SerializerMethodField()\n\n class Meta:\n model = Product\n fields = [\n 'id',\n 'upc',\n 'category',\n 'brand',\n 'name',\n 'slug',\n 'model',\n 'description',\n 'old_price',\n 'old_price_uzs',\n 'price',\n 'price_uzs',\n 'viewed',\n 'thumbnail',\n 'awaiting',\n 'vat',\n 'created',\n 'updated',\n ]\n\n def get_price_uzs(self, obj):\n return int(math.ceil(obj.price * DollarExchangeRate.objects.filter().first().exchange_rate / TransactionCoefficient.objects.filter().first().coefficient)/10000)*10000\n\n def get_old_price_uzs(self, obj):\n return int(math.ceil(obj.old_price * DollarExchangeRate.objects.filter().first().exchange_rate / TransactionCoefficient.objects.filter().first().coefficient)/10000)*10000\n\n def get_inventory_count(self, obj):\n return Inventory.objects.filter(laptop_id__exact=obj).values('laptop').annotate(quantity=Sum('quantity'))","sub_path":"products/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"300550955","text":"import matplotlib.pyplot as plt\n\nfrom random_walk import RandomWalk\n\n# Make a randomw walk, and plot the points.\nrw = RandomWalk()\nrw.fill_walk()\nplt.figure(figsize=(20, 12))\n#plt.scatter(rw.x_values, rw.y_values, s=1)\n#now try with plot\nplt.plot(rw.x_values, rw.y_values, linewidth=1)\n\nplt.show()\n\n","sub_path":"rw_visual.py","file_name":"rw_visual.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"411249244","text":"from Jawa import Template\nfrom ROOT import TFile, TTree, TCut, TPaveText, TLine\n\nfrom top_config import *\n\nzmumu_mc2016.add_iptune()\nqq2ttbar_mc2016.add_iptune()\ngg2ttbar_mc2016.add_iptune()\n\nzmumu_label = TPaveText(0.66, 0.5, 0.9, 0.6, 'NDC')\nzmumu_label.SetFillStyle(0)\nzmumu_label.SetBorderSize(0)\nzmumu_label.AddText(\"Z#rightarrow#mu#mu\")\n\nttbar_label = TPaveText(0.66, 0.5, 0.9, 0.6, 'NDC')\nttbar_label.SetFillStyle(0)\nttbar_label.SetBorderSize(0)\nttbar_label.AddText(\"t#bar{t}#rightarrow#mue\")\n\n\nzmumu_fid = TCut(\"muminus_ETA > 2 && muminus_ETA < 4.5 && muminus_PT > 20000 && muplus_ETA > 2 && muplus_ETA < 4.5 && muplus_PT > 20000 && boson_M > 60000 && boson_M < 120000\")\nttbar_fid = TCut(\"mu_ETA > 2 && mu_ETA < 4.5 && mu_PT > 20000 && e_ETA > 2 && e_ETA < 4.5 && e_PT > 20000\")\n\n\ntt_qq_evts = qq2ttbar_mc2016.MU.Get('TotEvts').GetVal() + qq2ttbar_mc2016.MD.Get('TotEvts').GetVal()\ntt_qq_xsec = 9.966e-8*1e9\ntt_qq_acc = 10000.0/106462.0\ntt_qq_scale = tt_qq_xsec * tt_qq_acc /tt_qq_evts\n\ntt_gg_evts = gg2ttbar_mc2016.MU.Get('TotEvts').GetVal() + gg2ttbar_mc2016.MD.Get('TotEvts').GetVal()\ntt_gg_xsec = 5.826e-7*1e9\ntt_gg_acc = 10000.0/156045.0\ntt_gg_scale = tt_gg_xsec * tt_gg_acc /tt_gg_evts\n\ndata = Template(\"data\")\ndata.SetSelCut(zmumu_fid)\ndata.AddTrees(zmumuj_2016.trees())\ndata.ApplyCut()\ndata.AddVar(\"mum_ipubs\", \"muminus_ipubs_d\", 100, 0, 0.1)\ndata.AddVar(\"mup_ipubs\", \"muplus_ipubs_d\", 100, 0, 0.1)\ndata.FillVars()\n\nmc = Template(\"mc\")\nmc.SetSelCut(zmumu_fid)\nmc.AddTrees(zmumu_mc2016.trees())\nmc.ApplyCut()\nmc.AddVar(\"mum_ipubs\", \"muminus_ipubs_d\", 100, 0, 0.1)\nmc.AddVar(\"mup_ipubs\", \"muplus_ipubs_d\", 100, 0, 0.1)\nmc.AddVar(\"mum_ipubs_tune\", \"muminus_ipubs_d_tune\", 100, 0, 0.1)\nmc.AddVar(\"mup_ipubs_tune\", \"muplus_ipubs_d_tune\", 100, 0, 0.1)\nmc.FillVars()\n\nttbar_mc = Template(\"ttbar_mc\")\nttbar_mc.SetSelCut(ttbar_fid)\nttbar_mc.AddTrees(gg2ttbar_mc2016.trees(), tt_gg_scale)\nttbar_mc.AddTrees(qq2ttbar_mc2016.trees(), tt_qq_scale)\nttbar_mc.ApplyCut()\nttbar_mc.AddVar(\"mu_ipubs\" , \"mu_ipubs_d\", 100, 0, 0.1)\nttbar_mc.AddVar(\"e_ipubs\" , \"e_ipubs_d\", 100, 0, 0.1)\nttbar_mc.AddVar(\"mu_ipubs_tune\", \"mu_ipubs_d_tune\", 100, 0, 0.1)\nttbar_mc.AddVar(\"e_ipubs_tune\" , \"e_ipubs_d_tune\", 100, 0, 0.1)\nttbar_mc.FillVars()\n\n'''\nmwt_fwd = MWTemplate(\"top_eft_fwd\")\nmwt_fwd.AddTree(f.Get(\"topTuple\"))\nmwt_fwd.ApplyCut()\nmwt_fwd.AddVar(\"top_rap\", \"abs(top_rap)\", 100, 0, 5)\nmwt_fwd.AddWeight(\"central\", \"w\")\nfor i in range(24):\n mwt_fwd.AddWeight(\"rwgt_\"+str(i+1), \"rwgt_\"+str(i+1))\nmwt_fwd.FillVars()\nmwt_fwd.SaveToFile()\n'''\n\nfrom Style import *\nSetLHCbStyle()\n\nfrom PlotTools import *\nzmumu_mc = mc.GetVar('mum_ipubs').GetHist().Clone('zmumu_mc')\nzmumu_mc.Add(mc.GetVar('mup_ipubs').GetHist())\nzmumu_mc_tune = mc.GetVar('mum_ipubs_tune').GetHist().Clone('zmumu_mc_tune')\nzmumu_mc_tune.Add(mc.GetVar('mup_ipubs_tune').GetHist())\nzmumu_data = data.GetVar('mum_ipubs').GetHist().Clone('zmumu_data')\nzmumu_data.Add(data.GetVar('mup_ipubs').GetHist())\n\np = Plot([ zmumu_mc, zmumu_mc_tune, zmumu_data ])\nfor pp in p.plots: pp.UseCurrentStyle()\np.setProp('forcestyle', True)\np.setProp('filename', 'zmumu_tunedip.pdf')\np.setProp('location', '/user2/sfarry/workspaces/top/figures')\np.setProp('colors', [2, 4, 1])\np.setProp('drawOpts', ['hist' for i in range(12)])\np.setProp('fillstyles', 0)\np.setProp('markerstyles', [0, 0, 20])\np.setProp('drawOpts', ['h', 'h', 'e1'])\n#p.setProp('toCompare', { 1 : [2] })\np.setProp('ycomplims', [0.8, 1.15])\np.setProp('ylabel', '[A.U.]')\np.setProp('xlabel', 'ip [mm]')\np.setProp('normalised', True)\np.setProp('labels', ['MC2016', 'MC2016(tuned)', 'Data'])\np.setProp('extraObjs', [zmumu_label])\np.drawROOT()\n\n\np = Plot([ ttbar_mc.GetVar('mu_ipubs').GetHist(), ttbar_mc.GetVar('mu_ipubs_tune').GetHist() ])\nfor pp in p.plots: pp.UseCurrentStyle()\np.setProp('forcestyle', True)\np.setProp('filename', 'ttbar_tunedip_mu.pdf')\np.setProp('location', '/user2/sfarry/workspaces/top/figures')\np.setProp('colors', [2, 4, 1])\np.setProp('drawOpts', ['hist' for i in range(12)])\np.setProp('fillstyles', 0)\np.setProp('markerstyles', [0, 0, 20])\np.setProp('drawOpts', ['h', 'h', 'e1'])\n#p.setProp('toCompare', { 1 : [2] })\np.setProp('ycomplims', [0.8, 1.15])\np.setProp('ylabel', '[A.U.]')\np.setProp('xlabel', 'muon ip [mm]')\np.setProp('normalised', True)\np.setProp('labels', ['MC2016', 'MC2016(tuned)', 'Data'])\np.setProp('extraObjs', [ttbar_label])\np.drawROOT()\n\n\np = Plot([ ttbar_mc.GetVar('e_ipubs').GetHist(), ttbar_mc.GetVar('e_ipubs_tune').GetHist() ])\nfor pp in p.plots: pp.UseCurrentStyle()\np.setProp('forcestyle', True)\np.setProp('filename', 'ttbar_tunedip_e.pdf')\np.setProp('location', '/user2/sfarry/workspaces/top/figures')\np.setProp('colors', [2, 4, 1])\np.setProp('drawOpts', ['hist' for i in range(12)])\np.setProp('fillstyles', 0)\np.setProp('markerstyles', [0, 0, 20])\np.setProp('drawOpts', ['h', 'h', 'e1'])\n#p.setProp('toCompare', { 1 : [2] })\np.setProp('ycomplims', [0.8, 1.15])\np.setProp('ylabel', '[A.U.]')\np.setProp('xlabel', 'electron ip [mm]')\np.setProp('normalised', True)\np.setProp('labels', ['MC2016', 'MC2016(tuned)', 'Data'])\np.setProp('extraObjs', [ttbar_label])\np.drawROOT()\n","sub_path":"top/python/plot_zmumu_tunedip.py","file_name":"plot_zmumu_tunedip.py","file_ext":"py","file_size_in_byte":5134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"224498511","text":"################################################################################\n# MIT License\n# \n# Copyright (c) 2019\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to conditions.\n#\n# Author: Deep Learning Course | Fall 2019\n# Date Created: 2019-09-06\n################################################################################\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport time\nfrom datetime import datetime\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\n\nimport sys\nsys.path.append(\".\")\nsys.path.append(\"..\")\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom part1.dataset import PalindromeDataset\nfrom part1.vanilla_rnn import VanillaRNN\nfrom part1.lstm import LSTM\n\n# You may want to look into tensorboard for logging\n# from torch.utils.tensorboard import SummaryWriter\n\n################################################################################\n\ndef train(config):\n assert config.model_type in ('RNN', 'LSTM')\n\n # Initialize the device which to run the model on\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n def acc(predictions, targets):\n accuracy = (predictions.argmax(dim=1) == targets).float().mean().item()\n return accuracy\n\n # Initialize the dataset and data loader (note the +1\n dataset = PalindromeDataset(config.input_length + 1)\n data_loader = DataLoader(dataset, config.batch_size, num_workers=1)\n\n # Setup the loss and optimizer\n criterion = torch.nn.CrossEntropyLoss()\n lstm = LSTM(config.input_length, config.input_dim, config.num_hidden, config.num_classes)\n rnn = VanillaRNN(config.input_length, config.input_dim, config.num_hidden, config.num_classes, device)\n\n optimizer_lstm = torch.optim.RMSprop(lstm.parameters(), lr=config.learning_rate)\n optimizer_rnn = torch.optim.RMSprop(rnn.parameters(), lr=config.learning_rate)\n\n for step, (batch_inputs, batch_targets) in enumerate(data_loader):\n\n # Only for time measurement of step through network\n print(\"step\",step)\n # Initialize the model that we are going to use\n lstm_out = lstm.forward(batch_inputs)\n\n optimizer_lstm.zero_grad()\n loss_lstm = criterion(lstm_out, batch_targets)\n loss_lstm.backward()\n optimizer_lstm.step()\n\n\n rnn_out = rnn.forward(batch_inputs)\n\n optimizer_rnn.zero_grad()\n loss_rnn = criterion(rnn_out, batch_targets)\n loss_rnn.backward()\n optimizer_rnn.step()\n\n lstm_norms = []\n for h in lstm.all_h:\n lstm_norms.append(h.grad.norm().item())\n\n rnn_norms = []\n for h in rnn.all_h:\n rnn_norms.append(h.grad.norm().item())\n\n sequence = list(range(1, config.input_length + 1))\n plt.figure(figsize=(15, 6))\n plt.plot(sequence, rnn_norms, label=\"rnn\")\n plt.plot(sequence, lstm_norms, label=\"lstm\")\n plt.legend()\n plt.xlabel(\"sequence value\")\n plt.ylabel(\"gradient norm\")\n\n plt.show()\n\n break\n\n print('Done training.')\n\nif __name__ == \"__main__\":\n\n # Parse training configuration\n parser = argparse.ArgumentParser()\n\n # Model params\n parser.add_argument('--model_type', type=str, default=\"RNN\", help=\"Model type, should be 'RNN' or 'LSTM'\")\n parser.add_argument('--input_length', type=int, default=100, help='Length of an input sequence')\n parser.add_argument('--input_dim', type=int, default=1, help='Dimensionality of input sequence')\n parser.add_argument('--num_classes', type=int, default=10, help='Dimensionality of output sequence')\n parser.add_argument('--num_hidden', type=int, default=128, help='Number of hidden units in the model')\n parser.add_argument('--batch_size', type=int, default=128, help='Number of examples to process in a batch')\n parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate')\n parser.add_argument('--train_steps', type=int, default=200, help='Number of training steps')\n parser.add_argument('--max_norm', type=float, default=10.0)\n parser.add_argument('--device', type=str, default=\"cuda:0\", help=\"Training device 'cpu' or 'cuda:0'\")\n\n config = parser.parse_args()\n\n # Train the model\n train(config)","sub_path":"assignment_2/part1/grads_over_time.py","file_name":"grads_over_time.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"598090292","text":"import asyncio\n\nimport aioredis\nfrom uvicorn.config import Config\n\nfrom noobit.server import settings\nfrom noobit.server.main_server import Server\n\nserver = None\n\nasync def launch_server():\n # cant just call main_server.run\n # main_server.run(\"noobit.server.main_app:app\", host=\"localhost\", port=8000, reload=False)\n app = \"noobit.server.main_app:app\"\n config = Config(app, host=\"localhost\", port=8000, reload=False)\n config.backlog = 2048\n\n global server\n server = Server(config=config)\n server.aioredis_pool = await aioredis.create_redis_pool(('localhost', 6379))\n\n await server.serve()\n\n\nasync def get_ohlc():\n await asyncio.sleep(3)\n session = settings.SESSION\n\n global server\n\n try:\n ohlc = await session.get(\"http://localhost:8000/json/public/ohlc/kraken?symbol=XBT-USD&timeframe=60\")\n except:\n pass\n finally:\n server.should_exit = True\n await server.shutdown_server()\n assert ohlc.status_code == 200\n\nasync def main():\n results = await asyncio.gather(\n launch_server(),\n get_ohlc()\n )\n return results\n\ndef test_main_server():\n\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n","sub_path":"tests/server/test_main_server.py","file_name":"test_main_server.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"97196599","text":"import subprocess\nimport os\nimport asyncio\nimport discord\nimport threading\nimport torch\nimport io\nimport aiohttp\nimport time\n\nfrom tts.jtalkCore import libjt_initialize, g2p\nfrom tts.mecab import Mecab_initialize, MecabFeatures, Mecab_analysis\nfrom tts.text2mecab import text2mecab\n\ndef get_normalization_factor(max_abs_value, normalize):\n if not normalize and max_abs_value > 1:\n raise ValueError('Audio data must be between -1 and 1 when normalize=False.')\n return max_abs_value if normalize else 1\n\ndef validate_and_normalize_with_numpy(data, normalize):\n import numpy as np\n\n data = np.array(data, dtype=float)\n if len(data.shape) == 1:\n nchan = 1\n elif len(data.shape) == 2:\n # In wave files,channels are interleaved. E.g.,\n # \"L1R1L2R2...\" for stereo. See\n # http://msdn.microsoft.com/en-us/library/windows/hardware/dn653308(v=vs.85).aspx\n # for channel ordering\n nchan = data.shape[0]\n data = data.T.ravel()\n else:\n raise ValueError('Array audio input must be a 1D or 2D array')\n \n max_abs_value = np.max(np.abs(data))\n normalization_factor = get_normalization_factor(max_abs_value, normalize)\n scaled = np.int16(data / normalization_factor * 32767).tolist()\n return scaled, nchan\n\ndef validate_and_normalize_without_numpy(data, normalize):\n try:\n max_abs_value = float(max([abs(x) for x in data]))\n except TypeError:\n raise TypeError('Only lists of mono audio are '\n 'supported if numpy is not installed')\n\n normalization_factor = get_normalization_factor(max_abs_value, normalize)\n scaled = [int(x / normalization_factor * 32767) for x in data]\n nchan = 1\n return scaled, nchan\n\ndef make_wav(data, rate, normalize):\n \"\"\" Transform a numpy array to a PCM bytestring \"\"\"\n import struct\n from io import BytesIO\n import wave\n\n try:\n scaled, nchan = validate_and_normalize_with_numpy(data, normalize)\n except ImportError:\n scaled, nchan = validate_and_normalize_without_numpy(data, normalize)\n\n fp = BytesIO()\n waveobj = wave.open(fp,mode='wb')\n waveobj.setnchannels(nchan)\n waveobj.setframerate(rate)\n waveobj.setsampwidth(2)\n waveobj.setcomptype('NONE','NONE')\n waveobj.writeframes(b''.join([struct.pack('\"]]\n elif c not in self.char_to_id.keys():\n idseq += [self.char_to_id[\"\"]]\n else:\n idseq += [self.char_to_id[c]]\n idseq += [self.idim - 1] # \n return torch.LongTensor(idseq).view(-1).to(self.device)\n \n def do_talk(self, text, output):\n with torch.no_grad():\n x = self.frontend(text)\n c, _, _ = self.model.inference(x, self.inference_args)\n z = torch.randn(1, 1, c.size(0) * self.config[\"hop_size\"]).to(self.device)\n c = torch.nn.ReplicationPad1d(self.config[\"generator_params\"][\"aux_context_window\"])(c.unsqueeze(0).transpose(2, 1))\n y = self.vocoder(z, c).view(-1)\n data = make_wav(y.view(-1).cpu().numpy(), self.config[\"sampling_rate\"], True)\n with open(output, mode='wb') as fout:\n fout.write(data)\n \n async def talk(self, text, output):\n thread = threading.Thread(target=self.do_talk, args=(text, output, ))\n thread.setDaemon(True)\n thread.start()\n thread.join()\n\nclass Jtalk:\n __lock = threading.Lock()\n __esp_lock = threading.Lock()\n loop = None\n voice_dict = dict()\n ch_dict = dict()\n esp = None\n __downloading = False\n\n def clear(self):\n self.voice_dict.clear()\n self.ch_dict.clear()\n\n async def connect(self, author):\n await self.disconnect(author.id)\n if author.voice == None:\n return None\n voice = None\n if author.id in self.voice_dict:\n voice = self.ch_dict[self.voice_dict[author.id]]\n else:\n self.voice_dict[author.id] = author.voice.channel.id\n if author.voice.channel.id in self.ch_dict:\n voice = self.ch_dict[author.voice.channel.id]\n else:\n voice = await author.voice.channel.connect()\n self.ch_dict[author.voice.channel.id] = voice\n return voice\n \n async def disconnect(self, author_id):\n if author_id in self.voice_dict:\n voice_id = self.voice_dict[author_id]\n del self.voice_dict[author_id]\n for v in self.voice_dict.values():\n if v == voice_id:\n return\n voice = self.ch_dict[voice_id]\n await voice.disconnect(force=True)\n del self.ch_dict[voice_id]\n \n def talk_ai(self, t, author):\n if author.id in self.voice_dict:\n voice = self.ch_dict[self.voice_dict[author.id]]\n output = './wav/' + str(author.id) + '.wav'\n asyncio.ensure_future(self.taco2_wavegan(t, voice, output), loop=self.loop)\n\n async def download(self, url, save_path):\n start = time.time() #1\n chunk_size = 10 #2\n\n async with aiohttp.ClientSession() as session:\n async with session.get(url, timeout=3600) as resp: #3\n print('start: {}: {}'.format(resp.status,url))\n\n with open(save_path, 'wb') as fd:\n while True:\n chunk = await resp.content.read(chunk_size) #4\n if not chunk:\n break\n fd.write(chunk)\n\n elapsed = round(time.time() - start)\n print('end: {}: {}s'.format(save_path, elapsed))\n \n async def download_model(self):\n url1 = 'https://github.com/laguna-loire/discordpy-startup/blob/feature/datetime/tts/data/model.last1.avg.best?raw=true'\n await self.download(url1, './tts/data/' + os.environ['AI_MODEL'])\n url2 = 'https://github.com/laguna-loire/discordpy-startup/blob/feature/datetime/tts/data/checkpoint-400000steps.pkl?raw=true'\n await self.download(url2, './tts/data/' + os.environ['AI_VOCODER'])\n url3 = 'https://github.com/laguna-loire/discordpy-startup/blob/feature/datetime/tts/data/vocoder_config.yaml?raw=true'\n await self.download(url3, './tts/data/' + os.environ['AI_VOCODER_CONF'])\n self.esp = Esp()\n \n def pre_download_model(self):\n if self.esp == None:\n self.__esp_lock.acquire()\n if self.__downloading:\n self.__esp_lock.release()\n else:\n self.__downloading = True\n self.__esp_lock.release()\n asyncio.ensure_future(self.download_model(), loop=self.loop)\n return False\n return True\n \n async def taco2_wavegan(self, t, voice, output):\n if not self.pre_download_model():\n asyncio.ensure_future(self.jtalk(t, output, voice, 0), loop=self.loop)\n return\n await self.esp.talk(t, output)\n source = discord.FFmpegPCMAudio(output)\n await self.play(voice, source)\n os.remove(output)\n\n def talk(self, t, author, htsvoice=0):\n if author.id in self.voice_dict:\n voice = self.ch_dict[self.voice_dict[author.id]]\n wav = './wav/' + str(author.id) + '.wav'\n asyncio.ensure_future(self.jtalk(t, wav, voice, htsvoice), loop=self.loop)\n\n async def jtalk(self, t, output, voice, htsvoice):\n \n open_jtalk=[os.environ['JTALK']]\n mech=['-x',os.environ['JTALK_DIC']]\n htsvoice=['-m','./voice/' + str(htsvoice) + '.htsvoice']\n speed=['-r','1.0']\n outwav=['-ow',output]\n cmd=open_jtalk+mech+htsvoice+speed+outwav\n c = subprocess.Popen(cmd,stdin=subprocess.PIPE)\n c.stdin.write(t.encode(os.environ['JTALK_ENCODE']))\n c.stdin.close()\n c.wait()\n\n source = discord.FFmpegPCMAudio(output)\n await self.play(voice, source)\n os.remove(output)\n \n async def play(self, voice, source):\n self.__lock.acquire()\n voice.play(source, after=lambda e : self.__lock.release())\n count = 0\n while voice.is_playing():\n await asyncio.sleep(0.1)\n count += 1\n if count > 600:\n break","sub_path":"jtalk.py","file_name":"jtalk.py","file_ext":"py","file_size_in_byte":10744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"487612061","text":"from typing import Union\n\nfrom tps.modules.processor import Processor\nfrom tps.utils import load_dict, prob2bool\nfrom tps.symbols import punctuation, space, accent\nfrom tps.types import Charset\n\n\"\"\"\nIf you need to extend the Emphasizer functionality with\nlanguage-specific rules, just add a new descendant class.\n\"\"\"\n\nclass Emphasizer(Processor):\n def __init__(self, charset: Union[Charset, str], dict_source: Union[str, tuple, list, dict]=None,\n prefer_user: bool=True):\n \"\"\"\n Base emphasizer with common functionality for all languages.\n\n :param dict_source: Union[str, tuple, list, dict]\n Source of dictionary that contains stress pairs such as {'hello': 'hell+o'}.\n Options:\n * str - path to file.\n The file extension must explicitly show its format in case of json and yaml files.\n In other cases, user must set the format himself (see below).\n * tuple, list - (path, format)\n path - path to the dictionary file\n format - format of the dictionary file (see tps.utils.load_dict function)\n * dict - just a dict\n :param prefer_user: bool\n If true, words with stress tokens set by user will be passed as is\n \"\"\"\n super().__init__(charset)\n\n fmt = None\n if isinstance(dict_source, (tuple, list)):\n dict_source, fmt = dict_source\n\n self.entries = load_dict(dict_source, fmt)\n self.prefer_user = prefer_user\n\n\n def apply(self, string: str, **kwargs) -> str:\n \"\"\"\n Splits passed string to tokens and convert each to stressed one if it presents in dictionary.\n Keep it mind, that tokenization is simple here and it's better to pass normalized string.\n\n :param string: str\n Your text.\n :param kwargs:\n * mask_stress: Union[bool, float]\n Whether to mask each token.\n If float, then masking probability will be computed for each token independently.\n :return: str\n \"\"\"\n mask = kwargs.get(\"mask_stress\", False)\n\n tokens = self.split_to_tokens(string, self._punct_re)\n\n for idx, token in enumerate(tokens):\n if token in punctuation + space:\n continue\n token = self._apply_to_token(token, mask)\n tokens[idx] = token\n\n return self.join_tokens(tokens)\n\n\n def _apply_to_token(self, token, mask):\n if prob2bool(mask):\n return token.replace(accent, \"\")\n\n stress_exists = token.find(accent) != -1\n if stress_exists and self.prefer_user:\n return token\n\n token = self.entries.get(token, token)\n\n return token","sub_path":"tps/modules/emphasizer/rule_based.py","file_name":"rule_based.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"45374362","text":"# -*- coding: utf-8 -*-\n'''\nCreated on September 1, 2017\n\n@author: dabrunhosa\n'''\nfrom abc import ABC\n\nfrom Analytics.Solutions.TransientBase import TransientBase\nfrom Analytics.Solutions.Validations.YDomain.Base import YDomain\nfrom Utilities.DataEntry import Options\n'''\nProblem being solved is: [0,1]U[1,2]U[1,3] X [0,1]\n This is an example used by Jemmy in a Y Domain.\n Although it does not look like it, all the \n segments have the same length. \n BC:\n du/dx(0) = du/dx(3) = du/dx(2) = 0\n du/dx(1) - du/dx(1) - du/dx(1) = 0 - derivative continuity\n u(1) = u(1) = u(1) - variable continuity\n\n Initial Conditions:\n u(0,x) = cos(pi*x)\n'''\n\n\nclass YDomainTransientBase(TransientBase, YDomain, ABC):\n\n ########################################\n ### Constructor ###\n ########################################\n\n def __init__(self, options=Options(), **kw):\n\n # Define the default options\n default_options = Options(name=\"Y Domain for Transient Solution\",\n numSegments=None,bifurcationPoints=None,\n sElementsE=None,sElementsLw=None,sElementsUp=None,\n domainDef=[\"domainE\",\"domainLw\",\"domainUp\"],\n sElementDef=[\"sElementsE\",\"sElementsLw\",\"sElementsUp\"],\n description=\"This is a base solution for the Y Domain Problem)\")\n\n # Merge the default options and the user generated options\n whole_options = default_options << options\n\n super(YDomainTransientBase, self).__init__(whole_options, **kw)","sub_path":"Analytics/Solutions/Validations/YDomain/TransientBase.py","file_name":"TransientBase.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"588489340","text":"# This file provides functionality related to crafting - recipes, items, item categories, etc.\n\nfrom enum import unique, auto, Enum\n\nimport marshmallow\nfrom marshmallow import fields as mf\nfrom marshmallow_enum import EnumField\n\nfrom typing import Dict, Iterable, List, Optional, Set\n\nfrom . import defs\n\n\n@unique\nclass Essence(Enum):\n \"\"\"Represents a general category of an item/entity.\"\"\"\n\n # Raw materials\n ROCKS = \"rocks\"\n GOLD = \"gold\"\n MEAT = \"meat\"\n VEGY = \"vegy\"\n LOGS = \"log\"\n STICKS = \"sticks\"\n\n # Clothing\n HAT = \"hat\"\n COAT = \"coat\"\n GLOVES = \"gloves\"\n SHOES = \"shoes\"\n BELT = \"belt\"\n BOTTOM_WEAR = \"bottom_wear\"\n UPPER_WEAR = \"upper_wear\"\n BAG = \"bag\"\n\n # Other\n PLANT = \"plant\"\n HERO = \"hero\"\n TOOL = \"tool\"\n\n # Default category\n VOID = \"void\"\n\n def get_description(self) -> str:\n \"\"\"Returns a description of the essence to be used in GUI labels.\"\"\"\n\n if self in _ESSENCE_DESCRIPTIONS:\n return _ESSENCE_DESCRIPTIONS[self]\n else:\n return \"[Unknown]\"\n\n def get_image_name(self) -> str:\n return self.value\n\n\n_ESSENCE_DESCRIPTIONS: Dict[Essence, str] = {\n Essence.ROCKS: \"Rocks\",\n Essence.GOLD: \"Gold\",\n Essence.LOGS: \"Logs\",\n Essence.STICKS: \"Sticks\",\n Essence.HAT: \"Hat\",\n Essence.COAT: \"Coat\",\n Essence.GLOVES: \"Gloves\",\n Essence.SHOES: \"Shoes\",\n Essence.BELT: \"Belt\",\n Essence.BOTTOM_WEAR: \"Bottom Wear\",\n Essence.UPPER_WEAR: \"Upper Wear\",\n Essence.BAG: \"Bag\",\n Essence.PLANT: \"Plant\",\n Essence.HERO: \"Hero\",\n Essence.TOOL: \"Tool\",\n}\n\n\n@unique\nclass Material(Enum):\n \"\"\"Represents a category of a recipe ingredient. Only entities with `Essence` matching the\n `Material` can be used the given recipe ingredient.\"\"\"\n\n FABRIC = \"fabric\"\n GADGET = \"gadget\"\n LEATHER = \"leather\"\n MEAT = \"meat\"\n MINERAL = \"mineral\"\n ORNAMENT = \"ornament\"\n WATER = \"water\"\n WOOD = \"wood\"\n\n def get_description(self) -> str:\n if self in _MATERIAL_DESCRIPTIONS:\n return _MATERIAL_DESCRIPTIONS[self]\n else:\n return \"[Unknown]\"\n\n\n_MATERIAL_DESCRIPTIONS: Dict[Material, str] = {\n Material.FABRIC: \"Fabric\",\n Material.GADGET: \"Gadget\",\n Material.LEATHER: \"Leather\",\n Material.MEAT: \"Meat\",\n Material.MINERAL: \"Mineral\",\n Material.ORNAMENT: \"Ornament\",\n Material.WATER: \"Water\",\n Material.WOOD: \"Wood\",\n}\n\n\nclass Match:\n \"\"\"Matches `Material` with `Essence`.\"\"\"\n\n def __init__(self, material: Material, essence: Essence) -> None:\n self.material = material\n self.essence = essence\n\n def __repr__(self) -> str:\n return f\"Match({self.material.get_description()}, {self.essence.get_description()})\"\n\n def __eq__(self, other) -> bool:\n return self.material == other.material and self.essence == other.essence\n\n def __hash__(self) -> int:\n return hash((self.material, self.essence))\n\n\n_MATCHES = {\n Match(Material.MINERAL, Essence.ROCKS),\n Match(Material.MINERAL, Essence.GOLD),\n Match(Material.WOOD, Essence.LOGS),\n}\n\n\nclass Item:\n \"\"\"Represents an item that can be used as an ingredient in a recipe.\"\"\"\n\n class Schema(marshmallow.Schema):\n actor_id = mf.Integer()\n essence = EnumField(Essence)\n quantity = mf.Integer()\n\n @marshmallow.post_load\n def make(self, data, **kwargs):\n return Item(**data)\n\n def __init__(self, actor_id: defs.ActorId, essence: Essence, quantity: int) -> None:\n self.actor_id = actor_id\n self.essence = essence\n self.quantity = quantity\n\n def __repr__(self) -> str:\n return (\n f\"Item(id={self.actor_id}, \"\n f\"essence={self.essence.get_description()}, quantity={self.quantity})\"\n )\n\n def __eq__(self, other) -> bool:\n return (\n isinstance(other, Item)\n and self.actor_id == other.actor_id\n and self.essence == other.essence\n and self.quantity == other.quantity\n )\n\n def __hash__(self) -> int:\n return hash((self.actor_id, self.essence, self.quantity))\n\n\nclass Ingredient:\n \"\"\"Represents an ingredient in a recipe.\"\"\"\n\n def __init__(self, material: Material, value: int, optional: bool = False) -> None:\n self.material = material\n self.value = value\n self.optional = optional\n\n def get_description(self) -> str:\n \"\"\"Returns the description of the ingredients material.\"\"\"\n\n return self.material.get_description()\n\n def match_essence(self, essence: Essence) -> bool:\n \"\"\"Checks if an item with the given essence can be used as this recipe ingredient.\"\"\"\n\n return Match(self.material, essence) in _MATCHES\n\n def filter_items(self, items: Iterable[Item]) -> Set[Item]:\n \"\"\"Filters the passed iterable leaving only such items that can be used as this recipe\n ingredient.\"\"\"\n\n result: Set[Item] = set()\n for item in items:\n if self.match_essence(item.essence):\n result.add(item)\n return result\n\n def __repr__(self) -> str:\n optional = \"optional\" if self.optional else \"required\"\n return f\"Ingredient({self.material.get_description()}, {self.value}, {optional})\"\n\n\nclass Assembly:\n \"\"\"Represents a set of items that may be used in the corresponding recipe.\"\"\"\n\n class Schema(marshmallow.Schema):\n recipe_codename = mf.Str()\n sources = mf.List(mf.List(mf.Nested(Item.Schema)))\n\n @marshmallow.post_load\n def make(self, data, **kwargs):\n return Assembly(**data)\n\n def __init__(self, recipe_codename: str, sources: List[List[Item]]) -> None:\n self.recipe_codename = recipe_codename\n self.sources = sources\n\n def find_item(self, actor_id: defs.ActorId, index: Optional[int]) -> Optional[Item]:\n \"\"\"Checks if the entity with given ID is part of this assembly. If `index` is passes only\n items corresponding to the corresponding ingredient will be checked.\"\"\"\n\n if index is not None and (index < 0 or len(self.sources) < index):\n return None\n\n all_sources = self.sources if index is None else [self.sources[index]]\n for ingredient_sources in all_sources:\n for item in ingredient_sources:\n if item.actor_id == actor_id:\n return item\n return None\n\n def update_item(self, index: int, template: Item, change: int) -> bool:\n \"\"\"\n Updates (adds, removes or changes quantity) an item in this assembly.\n Returns\n * True - if the operation is allowed and was performed correctly\n * False - if the operation is not allowed or failed\n \"\"\"\n\n if index < 0 or len(self.sources) < index:\n return False\n\n item = self.find_item(template.actor_id, index)\n if item is not None:\n if (-1 * item.quantity) == change:\n self.sources[index].remove(item)\n return True\n elif (-1 * item.quantity) < change:\n item.quantity += change\n return True\n else:\n return False\n\n else:\n if 0 < change:\n self.sources[index].append(Item(template.actor_id, template.essence, change))\n return True\n else:\n return False\n\n def filter_items(self, items: Iterable[Item]) -> Set[Item]:\n \"\"\"Given an iterable of `Item`s reduces their quantity (removing if needed) by quantity of\n corresponding entities contained in this assembly.\"\"\"\n\n items = list(items)\n for sources in self.sources:\n for source in sources:\n for i in range(len(items)):\n if items[i].actor_id == source.actor_id:\n items[i].quantity -= source.quantity\n break\n return set(filter(lambda e: e.quantity > 0, items))\n\n def __repr__(self) -> str:\n return f\"Assembly({self.recipe_codename}, {self.sources})\"\n\n def __eq__(self, other) -> bool:\n return (\n isinstance(other, Assembly)\n and self.recipe_codename == other.recipe_codename\n and [set(src) for src in self.sources] == [set(src) for src in other.sources]\n )\n\n\nclass Recipe:\n \"\"\"Represents a recipe to craft items in the game.\"\"\"\n\n def __init__(self, codename: str, description: str, ingredients: List[Ingredient]) -> None:\n self._codename = codename\n self._description = description\n self._ingredients = ingredients\n\n def get_codename(self) -> str:\n return self._codename\n\n def get_description(self) -> str:\n return self._description\n\n def get_ingredients(self) -> List[Ingredient]:\n return list(self._ingredients)\n\n def make_assembly(self) -> Assembly:\n \"\"\"Returns and empty `Assembly` corresponding to this recipe.\"\"\"\n\n return Assembly(self._codename, [list() for _ in self._ingredients])\n\n def validate_assembly(self, assembly: Assembly) -> bool:\n \"\"\"Checks if the passed `Assembly` satisfies the recipes requirements.\"\"\"\n\n if len(self.get_ingredients()) != len(assembly.sources):\n return False\n\n for ingredient, sources in zip(self.get_ingredients(), assembly.sources):\n for source in sources:\n if not ingredient.match_essence(source.essence):\n return False\n\n total_quantity = sum(source.quantity for source in sources)\n if total_quantity != ingredient.value:\n return False\n\n return True\n","sub_path":"python/edgin_around_api/craft.py","file_name":"craft.py","file_ext":"py","file_size_in_byte":9709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"452024197","text":"import sys\nimport os\nimport time\nsys.path.append(\"your/path/to/shared/libraries\")\nsys.path.append(\"your/path/to/xstream/xproto/packages\")\nsys.setdlopenflags(os.RTLD_LAZY)\nimport xstream # noqa\nimport vision_type as vt # noqa\n\n# 定义一个简单的workflow\nbbox_method = xstream.Method(\"BBoxFilter\").inputs([\"in_bbox\"])\n\n\ndef my_workflow(in_bbox):\n bbox_filtered_A = bbox_method(\n in_bbox, outputs=[\"bbox_filtered_A\"],\n config_file=\"configs/pytest_configs/a_filter.json\")\n bbox_filtered_B = bbox_method(\n in_bbox, outputs=[\"bbox_filtered_B\"],\n config_file=\"configs/pytest_configs/b_filter.json\")\n\n return bbox_filtered_A, bbox_filtered_B\n\n\njson = xstream.serialize(my_workflow)\nprint(json)\n\n# 创建session对象\nsession = xstream.Session(my_workflow)\n\n\ndef nodecb(bbox):\n print(\"===========node cb start==========\")\n print(bbox.state)\n vt.bbox_dump(bbox)\n print(\"===========node cb end==========\")\n\n\ndef flowcb(bbox1, bbox2):\n print(\"===========flow cb start==========\")\n print(bbox1.state)\n print(bbox2.state)\n vt.bbox_dump(bbox1)\n vt.bbox_dump(bbox2)\n print(\"===========flow cb end==========\")\n\n\nsession.callback(nodecb, \"BBoxFilter_2\")\nsession.callback(nodecb, \"BBoxFilter_3\")\nsession.callback(flowcb)\n\nfor idx in range(1000):\n session.forward(in_bbox=vt.bbox(0, 20, idx, 50))\n time.sleep(0.01)\n\nprint(\"all complete!\")\n\ntime.sleep(1)\nsession.close()\ntime.sleep(2)\n","sub_path":"source/common/xstream/python_api/package/tests/test_session.py","file_name":"test_session.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"351858744","text":"import tkinter as tk\nimport python_mini_project.movie.movieService as ms\nimport python_mini_project.movie.UI.MovieListPage as sp\nimport python_mini_project.booking.UI.BookingPage as bp\nimport python_mini_project.staticString as ss\nimport os\n\nclass PageTwo(tk.Frame):\n def __init__(self, master):\n self.service = ms.MovieService()\n tk.Frame.__init__(self, master)\n tk.Frame.configure(self)\n\n\n tk.Label(self, text = self.service.getMovieInfo(\"도굴\"), font=('Helvetica', 10, \"bold\")).place(x = 240, y = 40)\n self.backButton()\n self.bookingButton()\n\n tk.Button(self, text=\"종료\", command=lambda: master.quit()).place(x = 870, y = 30)\n\n\n def bookingButton(self):\n b = tk.Button(self, text = \"예매하기\")\n b.bind(\"\",self.bookingButtonClicked)\n b.place(x = 480, y = 500)\n\n def bookingButtonClicked(self, event):\n # os.chdir(\"../\")\n # os.chdir(\"../\")\n print(\"예매하기 : \", os.getcwd())\n ss.screen = \"도굴\"\n self.master.switch_frame(bp.bookingPage)\n\n def backButton(self):\n b1 = tk.Button(self, text=\"뒤로가기\" )\n b1.bind(\"\",self.backButtonClicked)\n b1.place(x = 800, y = 30)\n\n def backButtonClicked(self, event):\n # os.chdir(\"../\") # movie 파일로 이동\n print(\"PageOne : \", os.getcwd())\n print(\"눌렀음\")\n self.master.switch_frame(sp.StartPage)\n","sub_path":"python_mini_project/movie/UI/MovieTwo.py","file_name":"MovieTwo.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"24686037","text":"# -*- encoding:ascii -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 8\n_modified_time = 1375514262.042973\n_enable_loop = True\n_template_filename = '/python projects/garlic/gsite/gsite/static/templates/prototypes.mak'\n_template_uri = '/python projects/garlic/gsite/gsite/static/templates/prototypes.mak'\n_source_encoding = 'ascii'\n_exports = []\n\n\ndef render_body(context,args,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(args=args,pageargs=pageargs)\n __M_writer = context.writer()\n # SOURCE LINE 1\n __M_writer('\\r\\nbody {\\r\\n font-family: ')\n # SOURCE LINE 3\n __M_writer(str(args.fonts.family))\n __M_writer(';\\r\\n font-size: 14px;\\r\\n line-height: 1.428571429;\\r\\n color: #333333;\\r\\n background-color: #ffffff;\\r\\n}\\r\\n\\r\\ninput,\\r\\nbutton,\\r\\nselect,\\r\\ntextarea {\\r\\n font-family: inherit;\\r\\n font-size: inherit;\\r\\n line-height: inherit;\\r\\n}\\r\\n\\r\\na {\\r\\n color: #428bca;\\r\\n text-decoration: none;\\r\\n}\\r\\n\\r\\na:hover,\\r\\na:focus {\\r\\n color: #2a6496;\\r\\n text-decoration: underline;\\r\\n}\\r\\n\\r\\na:focus {\\r\\n outline: thin dotted #333;\\r\\n outline: 5px auto -webkit-focus-ring-color;\\r\\n outline-offset: -2px;\\r\\n}\\r\\n\\r\\nimg {\\r\\n vertical-align: middle;\\r\\n}\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n","sub_path":"_temp_/python projects/garlic/gsite/gsite/static/templates/prototypes.mak.py","file_name":"prototypes.mak.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"527911155","text":"from attr import attrs, attrib\nfrom aiohttp.web import Request as WebRequest\nfrom aioalice.utils import ensure_cls\nfrom . import AliceObject, Meta, Session, \\\n Card, Request, Response, AliceResponse\n\n\n@attrs\nclass AliceRequest(AliceObject):\n \"\"\"AliceRequest is a request from Alice API\"\"\"\n original_request = attrib(type=WebRequest)\n meta = attrib(convert=ensure_cls(Meta))\n request = attrib(convert=ensure_cls(Request))\n session = attrib(convert=ensure_cls(Session))\n version = attrib(type=str)\n\n def _response(self, response):\n return AliceResponse(\n response=response,\n session=self.session.base,\n version=self.version\n )\n\n def response(self, responose_or_text, **kwargs):\n \"\"\"\n Generate response\n\n :param responose_or_text: Response or Response's text:\n if responose_or_text is not an instance of Response,\n it is passed to the Response initialisator with kwargs.\n Otherwise it is used as a Response\n\n :param kwargs: tts, card, buttons, end_session for Response\n NOTE: if you want to pass card, concider using one of\n these methods: response_big_image, response_items_list\n :return: AliceResponse\n \"\"\"\n if not isinstance(responose_or_text, Response):\n responose_or_text = Response(responose_or_text, **kwargs)\n return self._response(responose_or_text)\n\n def response_big_image(self, text, image_id, title, description, button=None, **kwargs):\n \"\"\"\n Generate response with Big Image card\n\n :param text: Response's text\n :param image_id: Image's id for BigImage Card\n :param title: Image's title for BigImage Card\n :param description: Image's description for BigImage Card\n :param button: Image's button for BigImage Card\n :param kwargs: tts, buttons, end_session for Response\n :return: AliceResponse\n \"\"\"\n return self._response(\n Response(\n text, **kwargs,\n card=Card.big_image(image_id, title, description, button),\n )\n )\n\n def response_items_list(self, text, header, items, footer=None, **kwargs):\n \"\"\"\n Generate response with Items List card\n\n :param text: Response's text\n :param header: Card's header\n :param items: Card's items - list of `Image` objects\n :param footer: Card's footer\n :param kwargs: tts, buttons, end_session for Response\n :return: AliceResponse\n \"\"\"\n return self._response(\n Response(\n text, **kwargs,\n card=Card.items_list(header, items, footer)\n )\n )\n","sub_path":"aioalice/types/alice_request.py","file_name":"alice_request.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"163831236","text":"#-*-coding:utf-8-*-\nimport numpy as np\nfrom keras import backend as K\nfrom keras.engine import Input, Model\nfrom keras.layers import Conv3D, MaxPooling3D, Conv2D, MaxPooling2D, Flatten, Dense, UpSampling3D, Activation, BatchNormalization, PReLU, Deconvolution3D, Lambda, Embedding\nfrom keras.layers import Dropout,add,Reshape, GlobalAveragePooling2D,Multiply,Lambda,Add,Average,GlobalAveragePooling3D,GlobalMaxPooling3D,Permute,Subtract\nfrom keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions\n\n# K.set_image_data_format(\"channels_first\")\n\ntry:\n from keras.engine import merge\nexcept ImportError:\n from keras.layers.merge import concatenate\n\ndef center_loss(x):\n # numerator = K.sum(K.square(x[0] - x[1][:, 0, :]), 1, keepdims = True)\n numerator = K.sum(K.square(x[0] - x[1][:, 0, :]), 1)\n\n return numerator\n\ndef fuse(input):\n data = input[0]\n weight = input[1]\n weight_expand1 = K.expand_dims(weight, -2)\n weight_expand2 = K.expand_dims(weight_expand1, -2)\n weight_expand3 = K.expand_dims(weight_expand2, -2)\n\n weight_repeat1 = K.repeat_elements(weight_expand3, data.shape[1], axis=1)\n weight_repeat2 = K.repeat_elements(weight_repeat1, data.shape[2], axis=2)\n weight_repeat3 = K.repeat_elements(weight_repeat2, data.shape[3], axis=3)\n\n fused_multiply = data*weight_repeat3\n fused_sum = K.sum(fused_multiply,4)\n fused_sum = K.sum(fused_sum, 1)\n fused_sum = K.expand_dims(fused_sum, -1)\n\n return fused_sum\n\ndef feature_weighting(input):\n data = input[0]\n weight = input[1]\n weight_expand1 = K.expand_dims(weight,-1)\n weight_expand2 = K.expand_dims(weight_expand1,-1)\n weight_repeat1 = K.repeat_elements(weight_expand2,data.shape[1],axis=1)\n weight_repeat2 = K.repeat_elements(weight_repeat1, data.shape[2], axis=2)\n\n weighted_feat = data*weight_repeat2\n # weighted_feat = K.squeeze(weighted_feat,axis=3)\n\n return weighted_feat\n\n# loss函数2:consistence loss\ndef rcosine_loss(input):\n dense_feat = input[0]\n backbone_dense_feat = input[1]\n\n square_dense_feat = dense_feat*dense_feat\n square_backbone_dense_feat = backbone_dense_feat * backbone_dense_feat\n\n sumsquare_dense_feat = K.sum(square_dense_feat, axis=1)\n sumsquare_backbone_dense_feat = K.sum(square_backbone_dense_feat, axis=1)\n\n norminator = dense_feat*backbone_dense_feat\n norminator = K.sum(norminator,axis=1)\n\n denominator = sumsquare_dense_feat*sumsquare_backbone_dense_feat\n denominator = denominator**0.5\n\n similarity = norminator/denominator\n return 1 - similarity\n\n# softmax融合函数:将两个softmax函数融合成一个softmax函数\ndef softmax_fuse(input):\n softmax_3Doutput = input[0]\n softmax_2Doutput = input[1]\n\n softmax_fuse_output = 0.95*softmax_2Doutput + 0.05*softmax_3Doutput\n return softmax_fuse_output\n\n\ndef unet_model_3d(first_input_shape, nb_classes):\n\n channel_first_polar_input = Input(first_input_shape)\n first_input = Permute([2, 3, 4, 1])(channel_first_polar_input)\n\n conv_layer2 = Conv3D(8, (5, 5, 5), padding='same', activation='relu')(first_input)\n conv_layer3 = Conv3D(8, (3, 3, 3), padding='same', activation='relu')(conv_layer2)\n\n # attention branch1\n attention1_permute = Permute([4, 2, 3, 1])(first_input)\n attention1_gpooling = GlobalAveragePooling3D()(attention1_permute)\n attention1_dense = Dense(units = 15, activation='relu')(attention1_gpooling)\n branch1 = Lambda(fuse)([attention1_permute, attention1_dense])\n\n # attention branch2\n attention2_permute = Permute([4, 2, 3, 1])(conv_layer2)\n attention2_gpooling = GlobalAveragePooling3D()(attention2_permute)\n attention2_dense = Dense(units = 15, activation='relu')(attention2_gpooling)\n branch2 = Lambda(fuse)([attention2_permute, attention2_dense])\n\n # attention branch3\n attention3_permute = Permute([4, 2, 3, 1])(conv_layer3)\n attention3_gpooling = GlobalAveragePooling3D()(attention3_permute)\n attention3_dense = Dense(units = 15, activation='relu')(attention3_gpooling)\n branch3 = Lambda(fuse)([attention3_permute, attention3_dense])\n\n # branch1_weighting\n branch_gpooling1 = MaxPooling2D(strides=4)(branch1)\n branch_flatten1 = Flatten()(branch_gpooling1)\n branch_dense1 = Dense(units = 1, activation='relu')(branch_flatten1)\n weighted_branch1 = Lambda(feature_weighting)([branch1, branch_dense1])\n\n # branch2_weighting\n branch_gpooling2 = MaxPooling2D(strides=4)(branch2)\n branch_flatten2 = Flatten()(branch_gpooling2)\n branch_dense2 = Dense(units = 1, activation='relu')(branch_flatten2)\n weighted_branch2 = Lambda(feature_weighting)([branch2, branch_dense2])\n\n # branch3_weighting\n branch_gpooling3 = MaxPooling2D(strides=4)(branch3)\n branch_flatten3 = Flatten()(branch_gpooling3)\n branch_dense3 = Dense(units = 1, activation='relu')(branch_flatten3)\n weighted_branch3 = Lambda(feature_weighting)([branch3, branch_dense3])\n\n # fuse_output\n fused_feat = Add()([weighted_branch1, weighted_branch2, weighted_branch3])\n\n conv_fused_feat = Conv2D(8, (3, 3), padding='same', activation='relu',strides=4)(fused_feat)\n # res_block1\n res_block1_conv1 = Conv2D(8, (3, 3), padding='same', activation='relu')(conv_fused_feat)\n res_block1_bnorm1 = BatchNormalization()(res_block1_conv1)\n res_block1_conv2 = Conv2D(8, (3, 3), padding='same', activation='relu')(res_block1_bnorm1)\n res_block1_output = Add()([res_block1_conv2,conv_fused_feat])\n res_block1_bnorm2 = BatchNormalization()(res_block1_output)\n res_block1_pooling = MaxPooling2D()(res_block1_bnorm2)\n # res_block2\n res_block2_conv1 = Conv2D(8, (3, 3), padding='same', activation='relu')(res_block1_pooling)\n res_block2_bnorm1 = BatchNormalization()(res_block2_conv1)\n res_block2_conv2 = Conv2D(8, (3, 3), padding='same', activation='relu')(res_block2_bnorm1)\n res_block2_output = Add()([res_block2_conv2,res_block1_pooling])\n res_block2_bnorm2 = BatchNormalization()(res_block2_output)\n res_block2_pooling = MaxPooling2D()(res_block2_bnorm2)\n # res_block3\n res_block3_conv1 = Conv2D(8, (3, 3), padding='same', activation='relu')(res_block2_pooling)\n res_block3_bnorm1 = BatchNormalization()(res_block3_conv1)\n res_block3_conv2 = Conv2D(8, (3, 3), padding='same', activation='relu')(res_block3_bnorm1)\n res_block3_output = Add()([res_block3_conv2,res_block2_pooling])\n res_block3_bnorm2 = BatchNormalization()(res_block3_output)\n res_block3_pooling = MaxPooling2D()(res_block3_bnorm2)\n\n # flatten\n flatten_feat = Flatten()(res_block3_pooling)\n dense_feat = Dense(units = 256, activation='linear')(flatten_feat)\n\n base_model = ResNet50(weights='imagenet',include_top=False,input_shape=[128,128,3])\n second_input = base_model.input\n resnet50_activation_98_output = base_model.output\n resnet50_gpooling = GlobalAveragePooling2D()(resnet50_activation_98_output)\n backbone_dense_feat = Dense(256, activation='relu')(resnet50_gpooling)\n\n # concat_feat\n concat_layer = concatenate([dense_feat, backbone_dense_feat],axis = 1)\n\n # polar loss\n input_target = Input(shape=(1,))\n centers = Embedding(nb_classes, 512)(input_target)\n l2_loss = Lambda(center_loss, name='l2_loss')([concat_layer, centers])\n # similarity loss\n nonsimilarity_loss = Lambda(rcosine_loss, name='consistence_loss')([dense_feat, backbone_dense_feat])\n # consistence loss\n consistence_loss = Multiply()([nonsimilarity_loss, l2_loss])\n\n # output1\n softmax_output1 = Dense(units = nb_classes, activation = 'softmax',name = 'softmax1')(dense_feat)\n # output2\n softmax_output2 = Dense(units = nb_classes, activation = 'softmax',name = 'softmax2')(backbone_dense_feat)\n # average softmax_output1 and softmax_output2\n\n # softmax_output = Average(name = 'softmax')([softmax_output1, softmax_output2])\n softmax_output = Lambda(softmax_fuse, name='softmax')([softmax_output1, softmax_output2])\n\n train_model = Model(inputs=[input_target,channel_first_polar_input,second_input],\n outputs=[softmax_output, consistence_loss])\n test_model = Model(inputs=[channel_first_polar_input,second_input],\n outputs=[softmax_output])\n\n # return model_train, model_test\n return train_model, test_model\n\n\n","sub_path":"algae_fuse_resnet50.py","file_name":"algae_fuse_resnet50.py","file_ext":"py","file_size_in_byte":8319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"466399610","text":"from app import app, db\nfrom models import SoccerGame, RequestType, Request\nfrom flask import request, render_template, redirect, url_for\nfrom auth import check_user, template\n\nimport json\nimport sms\nfrom datetime import datetime\n\n\n@app.route(\"/referee\", methods=[\"GET\"])\ndef referee():\n if not check_user():\n return redirect(url_for(\"login\"))\n if request.method == \"GET\":\n return show_all_unfinished_games()\n\n\n@app.route(\"/referee//0\", methods=[\"GET\"])\ndef referee_game(id):\n if not check_user():\n return redirect(url_for(\"login\"))\n if request.method == \"GET\":\n return show_game(int(id), False)\n\n\n@app.route(\"/referee//1\", methods=[\"GET\"])\ndef referee_game_2(id):\n if not check_user():\n return redirect(url_for(\"login\"))\n if request.method == \"GET\":\n return show_game(int(id), True)\n\n\n@app.route(\"/referee//end\", methods=[\"GET\", \"POST\"])\ndef referee_game_end(id):\n if not check_user():\n return redirect(url_for(\"login\"))\n if request.method == \"GET\":\n game = SoccerGame.query.filter_by(id=id).one()\n return render_template(\"referee_end.html\", auth=template(), game=game)\n else:\n game = SoccerGame.query.filter_by(id=id).one()\n game.winner_agrees = (request.form.get(\n \"winner_agrees\", False) == \"true\")\n game.loser_agrees = (request.form.get(\"loser_agrees\", False) == \"true\")\n game.game_finished = True\n game.ref_id = check_user().username\n db.session.commit()\n return redirect(\"referee\")\n\n\n@app.route(\"/referee//state\", methods=[\"GET\"])\ndef update_game_state(id):\n if not check_user():\n return json.dumps({\"error\": \"login_fail\"})\n\n game = SoccerGame.query.filter_by(id=id).one()\n return json.dumps({\n \"home_goals\": game.home_goals,\n \"away_goals\": game.away_goals,\n \"timer_start\": str(game.timer_start).replace(\" \", \"T\"),\n \"first_half_finished\": game.first_half_finished,\n \"half_time_finished\": game.half_time_finished,\n \"second_half_finished\": game.second_half_finished,\n \"game_ended\": game.game_finished,\n \"home_damaged_1\": str(game.home_damaged_1).replace(\" \", \"T\"),\n \"home_damaged_2\": str(game.home_damaged_2).replace(\" \", \"T\"),\n \"away_damaged_1\": str(game.away_damaged_1).replace(\" \", \"T\"),\n \"away_damaged_2\": str(game.away_damaged_2).replace(\" \", \"T\"),\n \"test\": str(datetime.now()).replace(\" \", \"T\")\n })\n\n\n@app.route(\"/referee//toggle_clock\", methods=[\"GET\"])\ndef update_game_clock(id):\n if not check_user():\n return json.dumps({\"error\": \"login_fail\"})\n\n game = SoccerGame.query.filter_by(id=id).one()\n if game.timer_start is None:\n game.timer_start = datetime.now()\n else:\n game.timer_start = None\n db.session.commit()\n return json.dumps({\"success\": \"toggle_clock\"})\n\n\n@app.route(\"/referee//next_state\", methods=[\"GET\"])\ndef next_game_state(id):\n if not check_user():\n return json.dumps({\"error\": \"login_fail\"})\n\n game = SoccerGame.query.filter_by(id=id).one()\n if not game.first_half_finished:\n game.first_half_finished = True\n elif not game.half_time_finished:\n game.half_time_finished = True\n else:\n game.second_half_finished = True\n game.timer_start = None\n game.home_damaged_1 = None\n game.home_damaged_2 = None\n game.away_damaged_1 = None\n game.away_damaged_2 = None\n db.session.commit()\n return json.dumps({\"success\": \"next_state\"})\n\n\n@app.route(\"/referee//prev_state\", methods=[\"GET\"])\ndef prev_game_state(id):\n if not check_user():\n return json.dumps({\"error\": \"login_fail\"})\n\n game = SoccerGame.query.filter_by(id=id).one()\n if game.second_half_finished:\n game.second_half_finished = False\n elif game.half_time_finished:\n game.half_time_finished = False\n else:\n game.first_half_finished = False\n game.timer_start = None\n game.home_damaged_1 = None\n game.home_damaged_2 = None\n game.away_damaged_1 = None\n game.away_damaged_2 = None\n db.session.commit()\n return json.dumps({\"success\": \"prev_state\"})\n\n\n@app.route(\"/referee//home_goal\", methods=[\"GET\"])\ndef score_home_goal(id):\n if not check_user():\n return json.dumps({\"error\": \"login_fail\"})\n\n game = SoccerGame.query.filter_by(id=id).one()\n game.home_goals += 1\n game.home_damaged_1 = None\n game.home_damaged_2 = None\n game.away_damaged_1 = None\n game.away_damaged_2 = None\n db.session.commit()\n return json.dumps({\"success\": \"home_goal\"})\n\n\n@app.route(\"/referee//away_goal\", methods=[\"GET\"])\ndef score_away_goal(id):\n if not check_user():\n return json.dumps({\"error\": \"login_fail\"})\n\n game = SoccerGame.query.filter_by(id=id).one()\n game.away_goals += 1\n game.home_damaged_1 = None\n game.home_damaged_2 = None\n game.away_damaged_1 = None\n game.away_damaged_2 = None\n db.session.commit()\n return json.dumps({\"success\": \"away_goal\"})\n\n\n@app.route(\"/referee//home_goal_cancel\", methods=[\"GET\"])\ndef score_home_goal_cancel(id):\n if not check_user():\n return json.dumps({\"error\": \"login_fail\"})\n\n game = SoccerGame.query.filter_by(id=id).one()\n if game.home_goals > 0:\n game.home_goals -= 1\n db.session.commit()\n return json.dumps({\"success\": \"home_goal_cancel\"})\n\n\n@app.route(\"/referee//away_goal_cancel\", methods=[\"GET\"])\ndef score_away_goal_cancel(id):\n if not check_user():\n return json.dumps({\"error\": \"login_fail\"})\n\n game = SoccerGame.query.filter_by(id=id).one()\n if game.away_goals > 0:\n game.away_goals -= 1\n db.session.commit()\n return json.dumps({\"success\": \"away_goal_cancel\"})\n\n\n@app.route(\"/referee//damage_home_1\", methods=[\"GET\"])\ndef damage_home_1(id):\n if not check_user():\n return json.dumps({\"error\": \"login_fail\"})\n\n game = SoccerGame.query.filter_by(id=id).one()\n if game.home_damaged_1 is None:\n game.home_damaged_1 = datetime.now()\n else:\n game.home_damaged_1 = None\n db.session.commit()\n return json.dumps({\"success\": \"damage_home_1\"})\n\n\n@app.route(\"/referee//damage_home_2\", methods=[\"GET\"])\ndef damage_home_2(id):\n if not check_user():\n return json.dumps({\"error\": \"login_fail\"})\n\n game = SoccerGame.query.filter_by(id=id).one()\n if game.home_damaged_2 is None:\n game.home_damaged_2 = datetime.now()\n else:\n game.home_damaged_2 = None\n db.session.commit()\n return json.dumps({\"success\": \"damage_home_2\"})\n\n\n@app.route(\"/referee//damage_away_1\", methods=[\"GET\"])\ndef damage_away_1(id):\n if not check_user():\n return json.dumps({\"error\": \"login_fail\"})\n\n game = SoccerGame.query.filter_by(id=id).one()\n if game.away_damaged_1 is None:\n game.away_damaged_1 = datetime.now()\n else:\n game.away_damaged_1 = None\n db.session.commit()\n return json.dumps({\"success\": \"damage_away_1\"})\n\n\n@app.route(\"/referee//damage_away_2\", methods=[\"GET\"])\ndef damage_away_2(id):\n if not check_user():\n return json.dumps({\"error\": \"login_fail\"})\n\n game = SoccerGame.query.filter_by(id=id).one()\n if game.away_damaged_2 is None:\n game.away_damaged_2 = datetime.now()\n else:\n game.away_damaged_2 = None\n db.session.commit()\n return json.dumps({\"success\": \"damage_away_2\"})\n\n\n@app.route(\"/referee//reset\", methods=[\"GET\"])\ndef reset_game(id):\n if not check_user():\n return json.dumps({\"error\": \"login_fail\"})\n\n game = SoccerGame.query.filter_by(id=id).one()\n game.timer_start = None\n game.home_damaged_1 = None\n game.home_damaged_2 = None\n game.away_damaged_1 = None\n game.away_damaged_2 = None\n game.first_half_finished = False\n game.second_half_finished = False\n game.half_time_finished = False\n game.home_goals = 0\n game.away_goals = 0\n db.session.commit()\n return json.dumps({\"success\": \"reset_game\"})\n\n\n@app.route(\"/referee//request/\", methods=[\"GET\"])\ndef send_request(id, rtype):\n if not check_user():\n return json.dumps({\"error\": \"login_fail\"})\n\n req = Request()\n req.request_type_id = int(rtype)\n req.user_id = check_user().username\n req.game_id = int(id)\n\n db.session.add(req)\n db.session.commit()\n if req.request_type.send_text:\n sms.send(req.request_type.user.phone,\n \"REQUEST: {type} at {league} {field} by {user} ({time})\"\n .format(**{\n \"type\": req.request_type.name,\n \"league\": req.game.league.name,\n \"field\": req.game.field,\n \"user\": req.user.username,\n \"time\": req.received.strftime(\"%X\"),\n }))\n return json.dumps({\"success\": \"send_request\"})\n\n\ndef show_game(id, switch):\n game = SoccerGame.query.filter_by(id=id).one()\n request_types = RequestType.query.order_by(\n RequestType.priority.desc(), RequestType.name.asc()).all()\n return render_template(\"referee.html\", game=game, auth=template(),\n switch_side=switch, request_types=request_types)\n\n\ndef show_all_unfinished_games():\n games = SoccerGame.query.filter_by(game_finished=False)\\\n .filter(SoccerGame.home_team.has(is_system=False) &\n SoccerGame.away_team.has(is_system=False))\\\n .order_by(SoccerGame.scheduled_time.asc(), SoccerGame.round.asc(),\n SoccerGame.field.asc()).all()\n return render_template(\"referee_games.html\", games=games, auth=template())\n","sub_path":"referee.py","file_name":"referee.py","file_ext":"py","file_size_in_byte":9629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"482268646","text":"'''\nCreated on Apr 1, 2013\n\n@author: root\n'''\nimport os\nimport commands\nimport re\nimport logging\n\nfrom error import AuditError\nauditError = AuditError()\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n \ndef runlevel():\n try:\n fd = open(\"/etc/inittab\", 'r')\n except:\n fd = open(\"/etc/init/rc-sysinit.conf\", 'r')\n data = fd.read()\n fd.close()\n return data\n\ndef runlevelToJson(stats):\n lines = stats.splitlines()\n runlevel = {}\n runlevel[\"current\"] = commands.getoutput(\"runlevel\").split()[1]\n for line in lines:\n m = re.search(r'id:(?P\\d):initdefault:', line.strip())\n if m:\n runlevel[\"default\"] = m.groupdict()[\"default\"]\n return runlevel\n\ndef main(path, C=None, name='runlevel'):\n auditError.init(name, path)\n data = runlevel()\n dataJson = open(os.path.join(path, name +'.json'), 'w')\n dataRaw = open(os.path.join(path, name +'.txt'), 'w')\n try:\n dataRaw.write(data)\n json.dump(runlevelToJson(data), dataJson, indent=4)\n finally:\n dataRaw.close()\n dataJson.close()\n\nif __name__ == \"__main__\":\n main('.');\n \n","sub_path":"modules/runlevel/runlevel.py","file_name":"runlevel.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"11413876","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.views import View\nfrom django.utils import timezone\n\n# Modelos\nfrom Personal.models import Personas\nfrom Equipo.models import Equipo\nfrom PersonalEquipo.models import PersonalEquipo, PersonalEquipoHistoria\n\n# forms\nfrom PersonalEquipo.forms import RelacionForm\n\nclass Asignar(View):\n template_name = 'base_crear.html'\n def context_data(self, form = RelacionForm(), title = 'Asignar Equipo'):\n return {\n 'title' : title,\n 'form' : form\n }\n\n\n def get(self, request, pk):\n context = self.context_data()\n return render(request, self.template_name, context)\n\n def post(self, request, pk):\n persona = get_object_or_404(Personas, id = pk)\n if 'btn-cancelar' in request.POST:\n return redirect('PersonaViewDetail', pk)\n form = RelacionForm(request.POST)\n\n if form.is_valid():\n r = form.save()\n r.persona = persona\n r.fecha_inicio = timezone.now().date()\n r.save()\n return redirect('PersonaViewDetail', pk)\n context = self.context_data(form = form)\n return render(request, self.template_name, context)\n\nclass Devolver(View):\n def post(self, request, pk):\n instance = get_object_or_404(PersonalEquipo, id = pk)\n\n hw = ''\n sw = ''\n for hardware in instance.equipo.hardware.all():\n hw += str(hardware) + ' '\n for software in instance.equipo.software.all():\n sw += str(software) + ' '\n\n historial = PersonalEquipoHistoria()\n historial.fecha_entrega = timezone.now()\n historial.persona = instance.persona\n historial.equipo = instance.equipo\n historial.fecha_inicio = instance.fecha_inicio\n historial.fecha_termino = instance.fecha_termino\n historial.hw = hw\n historial.sw = sw\n historial.save()\n\n instance.equipo.estado = True\n instance.equipo.save()\n instance.delete()\n return redirect('PersonaViewDetail', instance.persona.id)\n\nclass Historial(View):\n template_name = 'Historial/historial_persona.html'\n\n def get(self, request, pk):\n persona = get_object_or_404(Personas, id = pk )\n context = {\n 'equipos' : persona.personalequipohistoria_set.all().order_by('-fecha_entrega'),\n 'title' : 'Historial de {0}'.format(persona)\n }\n return render(request, self.template_name, context )\n\nclass HistorialEquipo(View):\n template_name = 'Historial/historial_equipo.html'\n def get(self, request, pk):\n equipo = get_object_or_404(Equipo, id = pk )\n context = {\n 'equipos' : equipo.personalequipohistoria_set.all().order_by('-fecha_entrega'),\n 'title' : 'Historial de {0}'.format(equipo)\n }\n return render(request, self.template_name, context )\n","sub_path":"src/PersonalEquipo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"103262202","text":"from datetime import datetime\nfrom multiprocessing import cpu_count\nfrom multiprocessing.pool import Pool\nfrom pathlib import Path\n\nimport numpy as np\n\nfrom sdnn import Model, fully_connected, input_data\nfrom sdnn.utils import get_data, train_test_split, vector2onehot\n\n\ndef xor_problem():\n np.random.seed(1)\n x = np.array([\n [1, 1], [1, 0], [0, 1], [0, 0],\n ], dtype=float)\n y = np.array([\n [1, 0], [0, 1], [0, 1], [1, 0],\n ], dtype=float)\n\n net = input_data((None, 2))\n net = fully_connected(net, 3, activation='tanh')\n net = fully_connected(net, 2, activation='tanh')\n\n model = Model(net)\n model.fit(x, y, n_epoch=200)\n model.save('xor_model.json')\n model.load('xor_model.json')\n\n for i in zip(y, model.predict(x)):\n print(*i)\n\n model.plot_error()\n\n\ndef get_accuracy(pred, y):\n axis_ = np.argmax(pred, axis=1) - np.argmax(y, axis=1)\n return 1 - np.count_nonzero(axis_) / len(y)\n\n\ndef create_network(n, shapes, activation, seed=42):\n np.random.seed(seed)\n\n net = input_data(shape=(None, n))\n for i in shapes:\n net = fully_connected(net, i, activation)\n return net\n\n\ndef test_case(shapes, activation, n_features, batch_size, learning_rate, n_epoch, model_dir, seed=42):\n name = 's_{}_a_{}_f_{}_bs_{}_lr_{}'.format(\n '_'.join(map(str, shapes)), activation, n_features, batch_size, '_'.join(map(str, learning_rate))\n )\n print(name)\n network = create_network(n_features, shapes, activation, seed=seed)\n\n data = get_data(n_features)\n\n train, test = train_test_split(data, seed=seed)\n\n x_train = train[:, :-1]\n y_train = train[:, -1] - 1\n y_train = vector2onehot(y_train)\n x_test = test[:, :-1]\n y_test = test[:, -1] - 1\n y_test = vector2onehot(y_test)\n\n model = Model(network)\n model.fit(x_train, y_train,\n validation_set=(x_test, y_test),\n n_epoch=n_epoch,\n batch_size=batch_size,\n learning_rate=learning_rate,\n train_file=f'{model_dir}/{name}_train.json',\n )\n\n model_fn = f'{model_dir}/{name}_model.json'\n model.save(model_fn)\n # model.load(model_fn)\n\n # for i, j in zip(model.predict(x_test), y_test):\n # print(np.argmax(i), np.argmax(j))\n\n print(get_accuracy(model.predict(x_test), y_test))\n\n # model.plot_error()\n\n\ndef wrapper(x):\n return test_case(**x)\n\n\ndef prepare_test_cases():\n model_dir = Path('results') / datetime.now().strftime('%s')\n Path(model_dir).mkdir(exist_ok=True, parents=True)\n for act in ['sigmoid']:\n for feat in [30]:\n for shape in [[16, 12, 8]]:\n for lr in [[.2, .01], [.2, .001]]:\n yield {\n 'shapes': shape,\n 'activation': act,\n 'n_features': feat,\n 'batch_size': 10,\n 'learning_rate': lr,\n 'n_epoch': 10,\n 'model_dir': model_dir,\n }\n\n\ndef run_all():\n np.random.seed(42)\n\n cpus = min(cpu_count(), 16)\n cases = prepare_test_cases()\n\n print(f'Running on {cpus} CPUs')\n\n with Pool(cpus) as pool:\n pool.map(wrapper, cases)\n\n\nif __name__ == '__main__':\n from time import time\n\n t0 = time()\n run_all()\n # xor_problem()\n t = time() - t0\n print(f'Done in {t} s')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"283842223","text":"from typing import Optional, Union\n\nfrom aiohttp import web\nfrom aiohttp.helpers import ChainMapProxy\nfrom openapi_core.schema.specs.models import Spec\n\nfrom .constants import (\n APP_OPENAPI_SCHEMA_KEY,\n APP_OPENAPI_SPEC_KEY,\n REQUEST_OPENAPI_CONTEXT_KEY,\n)\nfrom .data import OpenAPIContext, OpenAPIOperation\nfrom .exceptions import ConfigurationError, ContextError, OperationError\nfrom ..annotations import DictStrAny\n\n\ndef add_prefix(path: str, prefix: Optional[str]) -> str:\n return f\"{prefix}{path}\" if prefix else path\n\n\ndef get_openapi_context(request: web.Request) -> OpenAPIContext:\n \"\"\"Shortcut to retrieve OpenAPI schema from ``aiohttp.web`` request.\n\n ``OpenAPIContext`` attached to :class:`aiohttp.web.Request` instance only\n if current request contains valid data.\n\n ``ContextError`` raises if, for some reason, the function called outside of\n valid OpenAPI request context.\n \"\"\"\n try:\n return request[REQUEST_OPENAPI_CONTEXT_KEY] # type: ignore\n except KeyError:\n raise ContextError(\n \"Request instance does not contain valid OpenAPI context. In \"\n \"most cases it means, the function is called outside of valid \"\n \"OpenAPI request context.\"\n )\n\n\ndef get_openapi_schema(\n mixed: Union[web.Application, ChainMapProxy]\n) -> DictStrAny:\n \"\"\"Shortcut to retrieve OpenAPI schema from ``aiohttp.web`` application.\n\n ``ConfigruationError`` raises if :class:`aiohttp.web.Application` does not\n contain registered OpenAPI schema.\n \"\"\"\n try:\n return mixed[APP_OPENAPI_SCHEMA_KEY] # type: ignore\n except KeyError:\n raise ConfigurationError(\n \"Seems like OpenAPI schema not registered to the application. Use \"\n '\"from rororo import setup_openapi\" function to register OpenAPI '\n \"schema to your web.Application.\"\n )\n\n\ndef get_openapi_spec(mixed: Union[web.Application, ChainMapProxy]) -> Spec:\n \"\"\"Shortcut to retrieve OpenAPI spec from ``aiohttp.web`` application.\n\n ``ConfigruationError`` raises if :class:`aiohttp.web.Application` does not\n contain registered OpenAPI spec.\n \"\"\"\n try:\n return mixed[APP_OPENAPI_SPEC_KEY]\n except KeyError:\n raise ConfigurationError(\n \"Seems like OpenAPI spec not registered to the application. Use \"\n '\"from rororo import setup_openapi\" function to register OpenAPI '\n \"schema to your web.Application.\"\n )\n\n\ndef get_openapi_operation(\n oas: DictStrAny, operation_id: str\n) -> OpenAPIOperation:\n \"\"\"Go through OpenAPI schema and try to find operation details by its ID.\n\n These details allow to add given operation to router as they share:\n\n - method\n - path\n\n for the operation.\n \"\"\"\n for path, path_schema in (oas.get(\"paths\") or {}).items():\n for method, operation_schema in path_schema.items():\n if operation_schema.get(\"operationId\") == operation_id:\n return OpenAPIOperation(\n id=operation_id,\n method=method,\n path=path,\n schema=operation_schema,\n )\n\n raise OperationError(\n f'Unable to find operation \"{operation_id}\" in provided OpenAPI '\n \"Schema.\"\n )\n","sub_path":"rororo/openapi/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"511425192","text":"'''\nModified from SparseConvNet data preparation: https://github.com/facebookresearch/SparseConvNet/blob/master/examples/ScanNet/prepare_data.py\n'''\n\nimport glob, plyfile, numpy as np, multiprocessing as mp, torch, json, argparse\n\n# Map relevant classes to {0,1,...,7}, and ignored classes to -100\nremapper = np.ones(150) * (-100)\nfor i, x in enumerate([1, 2, 3, 4, 5, 6, 7, 8]):\n remapper[x] = i\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--data_split', help='data split (train / val / test)', default='train')\nopt = parser.parse_args()\n\nsplit = opt.data_split\nprint('data split: {}'.format(split))\npoint_files = sorted(glob.glob(split + '/*_vh_clean_2.ply'))\nif opt.data_split != 'test':\n point_label_files = sorted(glob.glob(split + '/*_vh_clean_2.labels.ply'))\n instance_files = sorted(glob.glob(split + '/*.instances.json'))\n assert len(point_files) == len(point_label_files)\n assert len(point_label_files) == len(instance_files)\n\ndef create_test_data(ply):\n '''\n Creates inst_nostuff.pth file for each test .ply file.\n\n ply: Name of the .ply file being used. e.g. \"scene0000_0000_00_00.ply\"\n '''\n print(ply)\n\n data = plyfile.PlyData().read(ply)\n points = np.array([list(x) for x in data.elements[0]])\n coords = np.ascontiguousarray(points[:, :3] - points[:, :3].mean(0))\n colors = np.ascontiguousarray(points[:, 3:6]) / 127.5 - 1\n\n torch.save((coords, colors), ply[:-15] + '_inst_nostuff.pth')\n print('Saving to ' + ply[:-15] + '_inst_nostuff.pth')\n\n\ndef create_train_data(ply):\n '''\n Creates inst_nostuff.pth file for each train/val .ply file.\n\n ply: Name of the .ply file being used. e.g. \"scene0000_0000_00_00.ply\"\n '''\n labels_file = ply[:-3] + 'labels.ply'\n instances_file = ply[:-15] + '.instances.json'\n print(ply)\n\n data = plyfile.PlyData().read(ply)\n points = np.array([list(x) for x in data.elements[0]])\n coords = np.ascontiguousarray(points[:, :3] - points[:, :3].mean(0))\n colors = np.ascontiguousarray(points[:, 3:6]) / 127.5 - 1\n\n data_labels = plyfile.PlyData().read(labels_file)\n sem_labels = remapper[np.array(data_labels.elements[0]['label'])]\n# sem_labels = np.array(data_labels.elements[0]['label'])\n\n with open(instances_file) as jsondata:\n data_instances = json.load(jsondata)\n instance_labels = np.array(data_instances['instances'])\n\n torch.save((coords, colors, sem_labels, instance_labels), ply[:-15] + '_inst_nostuff.pth')\n print('Saving to ' + ply[:-15] + '_inst_nostuff.pth')\n\n#for fn in files:\n# f(fn)\n\np = mp.Pool(processes=mp.cpu_count())\nif opt.data_split == 'test':\n p.map(create_test_data, point_files)\nelse:\n p.map(create_train_data, point_files)\np.close()\np.join()\n","sub_path":"dataset/DALEStext/prepare_data_inst_DALES.py","file_name":"prepare_data_inst_DALES.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"405629179","text":"from django.core.mail import send_mail\nfrom django.shortcuts import render, redirect\nfrom django.views.decorators.http import require_http_methods\n\nfrom core.utils.cache import DonateCache, uuid_group\nfrom group.models import Group\nfrom web.forms import EmailForm\nfrom django.conf import settings\n\n\n@require_http_methods(['GET'])\ndef index(request):\n return render(request, 'web/index.html')\n\n\n@require_http_methods(['GET'])\ndef get_covers(request, uuid):\n group_id = uuid_group.get(uuid)\n uuid_group.delete(uuid)\n default_url = settings.MEDIA_URL + '/' + settings.DEFAULT_AVATAR\n if group_id:\n group = Group.objects.filter(group_id=group_id).first()\n if group:\n donates_data = [DonateCache.get_data(id) for id in group.donates_list]\n target = group.active_target\n target_percents = None\n if target.amount:\n target_percents = min(100, int(100 * target.donates_sum/target.amount))\n return render(request, 'web/banner-3.html', {'donates': donates_data,\n 'target': group.active_target,\n 'group': group,\n 'default_url': default_url,\n 'target_percents': target_percents})\n return redirect('web:index')\n\n\n@require_http_methods(['POST'])\ndef send_email(request):\n form = EmailForm(request.POST)\n if form.is_valid():\n message = 'Имя:{}\\nEmail:{}\\nСообщение:{}'.format(\n form.cleaned_data.get('name'),\n form.cleaned_data.get('email'),\n form.cleaned_data.get('comment')\n )\n send_mail('new_offer', message,\n settings.EMAIL_HOST_USER, [settings.EMAIL_HOST_USER])\n return redirect('web:index')\n","sub_path":"web/views/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"372335519","text":"from sklearn.datasets import load_boston\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.metrics import mean_absolute_error\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nnp.random.seed(2) # Imposto il seed di randomizzazione a 2 --> Ottengo gli stessi risultati sempre\r\n\r\ndataset = load_boston()\r\n\r\nX = dataset['data'] # features\r\ny = dataset['target'] # target\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y) # Divido i feature e i target in due parti (una per\r\n# training, una per test)\r\n\r\nmodel = LinearRegression()\r\nmodel.fit(X_train, y_train) # addestra il modello con i dati di training\r\n\r\np_train = model.predict(X_train)\r\np_test = model.predict(X_test) # previsioni\r\n\r\nmae_train = mean_absolute_error(y_train, p_train)\r\nmae_test = mean_absolute_error(y_test, p_test) # misura gli errori tra risposte desiderate e predizioni\r\nprint(\"MAE test\", mae_test) # Errore medio assoluto\r\nprint(\"MAE train\", mae_train)\r\nprint(np.mean(y_test)) # media target\r\n\r\n\r\n","sub_path":"Boston.py","file_name":"Boston.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"113871225","text":"from flask import Flask, request, render_template\n# from whitenoise import WhiteNoise\nimport os, psycopg2, json, re\n\n# Remove this later\nfrom random import randint\n\napp = Flask(__name__)\n# app.wsgi_app = WhiteNoise(app.wsgi_app, root='static/')\n\nDATABASE_URL = os.environ.get(\"DATABASE_URL\")\nWORKING_DIR = os.environ.get(\"APP_WORKING_DIR\")\nprint( (WORKING_DIR+\"init_schema.sql\").strip() )\n\ndef init_db():\n\tconn = psycopg2.connect(DATABASE_URL)\n\tcur = conn.cursor()\n\tcur.execute(open((WORKING_DIR+\"init_schema.sql\").strip(), \"r\").read())\n\tcur.close()\n\tconn.commit()\n\tconn.close()\n\ninit_db()\n\n\n\n# --==--==--==--==--==--==--==--==--==--==--==--==--\n#\tFunctions to fetch from DB\n# --==--==--==--==--==--==--==--==--==--==--==--==--\n\ndef db_get_products(is_featured, sortBy=\"bestSelling\", search=\"\"):\n\n\tconnection = psycopg2.connect(DATABASE_URL)\n\tcursor = connection.cursor()\n\n\t# combined_products = {}\n\tcombined_products = []\n\n\tif type(is_featured).__name__ == \"str\" and is_featured == \"Both\":\n\t\tis_featured = \"True OR product_featured IS False\"\n\n\tsortMap = {\n\t\t\"bestSelling\": \"product_numOfSales ASC\",\n\t\t\"alphabetically-az\": \"product_name ASC\",\n\t\t\"alphabetically-za\": \"product_name DESC\",\n\t\t\"price-lh\": \"product_cost ASC\",\n\t\t\"price-hl\": \"product_cost DESC\"\n\t}\n\n\tcursor.execute(\"\"\"\n\t\tSELECT product_name, product_desc, product_cost, product_options, product_id FROM products \n\t\tWHERE (product_featured IS {feat}) AND product_name ILIKE '%{srch}%' ORDER BY {sort};\n\t\"\"\".format(feat=is_featured, srch=search, sort=sortMap[sortBy]))\n\n\tfone = cursor.fetchone()\n\twhile fone:\n\t\tfone = list(fone)\n\t\t# Fetch reviews for this particular product\n\t\tproduct_info = {\n\t\t\t\"product_name\": fone[0],\n\t\t\t\"product_desc\": fone[1],\n\t\t\t# \"product_cost\": '{0:.2f}'.format(fone[2]),\n\t\t\t\"product_cost_r\": int(fone[2]), # whole value\n\t\t\t\"product_cost_d\": '{:.2f}'.format(fone[2])[-2:], # decimal value\n\t\t\t\"product_opts\": fone[3],\n\t\t\t\"product_id\": fone[4],\n\t\t\t\"product_cover\": \"{id}/{id}.jpg\".format(id=fone[4]),\n\t\t\t\"reviews_rating\": db_get_reviews(fone[4])\n\t\t}\n\t\tcombined_products.append(product_info)\n\t\tfone = cursor.fetchone()\n\n\tcursor.close()\n\tconnection.close()\n\treturn combined_products\n\ndef db_get_reviews(pid):\n\t# connection = psycopg2.connect(DATABASE_URL)\n\t# cursor = connection.cursor()\n\t# This will return all of the rows for all of the reviews that match that product id.\n\t# We of course have no reviews yet so we'll just return a dummy value of 5 stars\n\t# cursor.execute(\"\"\"\n\t# \tSELECT product_rating FROM reviews WHERE {id} = product_id;\n\t# \"\"\".format(id=pid))\n\t# cursor.close()\n\t# connection.close()\n\t# print( \"Reviews: \" + str(cursor.fetchall()) )\n\treturn randint(3, 5)\n\ndef fetchUserCartContents(cookie):\n\t\n\t# Check db table if cookie exists, if it does, return row else store the cookie\n\n\tnew_cookie = cookie\n\tcone = \"\"\n\n\tconnection = psycopg2.connect(DATABASE_URL)\n\tcursor = connection.cursor()\n\n\t# cursor.fetchone() always returns tuples\n\tif cookie:\n\t\tcursor.execute(\"\"\"\n\t\t\tSELECT cart_contents FROM carts WHERE customer_cookie = {id};\n\t\t\"\"\".format(id=cookie))\n\t\tcone = cursor.fetchone()\n\telse:\n\t\tcursor.execute(\"\"\"\n\t\t\tINSERT INTO carts(cart_contents) VALUES (DEFAULT); COMMIT; SELECT COUNT(*) FROM carts;\n\t\t\"\"\")\n\t\tnew_cookie = cursor.fetchone()[0]\n\t\tcursor.execute(\"\"\"\n\t\t\tSELECT cart_contents FROM carts WHERE customer_cookie = {id};\n\t\t\"\"\".format(id=new_cookie))\n\t\tcone = cursor.fetchone()\n\n\tif cone:\n\t\tcone = cone[0] # un-tupling it here\n\n\tcursor.close()\n\tconnection.close()\n\n\treturn {\n\t\t\"user\": new_cookie,\n\t\t\"content\": cone\n\t}\n\n\ndef respond(page, template_info={}):\n\n\tcookie = request.cookies.get(\"userID\")\n\t# print( \"cookie val: \" + str(cookie) )\n\tcart_contents = fetchUserCartContents(cookie)\n\n\tmodified_return = {\n\t\t\"template_info\": template_info,\n\t\t\"cart_contents\": cart_contents\n\t}\n\n\t# print( cookie )\n\n\tresponse = app.make_response(render_template(page, info=modified_return))\n\n\tif cart_contents:\n\t\tresponse.set_cookie(\"userID\", str(cart_contents[\"user\"]))\n\t\tresponse.set_cookie(\"cart\", str(cart_contents[\"content\"]))\n\n\treturn response\n\n\n# --==--==--==--==--==--==--==--==--==--==--==--==--\n#\tDecorators\n# --==--==--==--==--==--==--==--==--==--==--==--==--\n\n@app.route(\"/\")\ndef home():\n\n\tpage_info = {\n\t\t\"featured_products\": db_get_products(True),\n\t\t\"products\": db_get_products(\"Both\")\n\t}\n\n\tresponse = respond(\"main.html\", page_info)\n\n\treturn response\n\n\n@app.route(\"/cart\")\ndef cart():\n\treturn respond(\"cart.html\")\n\n\n@app.route(\"/search/\")\ndef search(query):\n\t# query db using only specified search.\n\tmodified_q = re.sub(\"[^A-Za-z0-9]+\",\"\",query)\n\tpage_info = {\n\t\t\"user_query\": modified_q,\n\t\t\"products\": db_get_products(\"Both\", \"bestSelling\", search=modified_q),\n\t\t\"user_cart\": db_get_cart(True)\n\t}\n\treturn render_template(\"search.html\", info=page_info)\n\n@app.route(\"/catalog\")\ndef catalog():\n\t# query db for all products and display them here.\n\treturn render_template(\"main.html\")\n\n@app.route(\"/product/\")\ndef product():\n\t# Here we would query the db for the product name and\n\t# fetch appropriate details.\n\treturn render_template(\"main.html\")\n\ndef fetch_db_info_helper(fetchOne, column, table, where):\n\tconnection = psycopg2.connect(DATABASE_URL)\n\tcursor = connection.cursor()\n\n\tcursor.execute(\"\"\"\n\t\tSELECT {col} FROM {tab} WHERE {inst};\n\t\"\"\".format(col=column, tab=table, inst=where))\n\n\tif fetchOne:\n\t\treturn_value = cursor.fetchone()\n\telse:\n\t\treturn_value = cursor.fetchall()\n\n\tcursor.close()\n\tconnection.close()\n\treturn return_value\n\n# --==--==--==--==--==--==--==--==--==--==--==--==--\n#\tDecorators\n# --==--==--==--==--==--==--==--==--==--==--==--==--\n\n@app.route(\"/get/cart/\", methods=[\"GET\", \"POST\", \"DELETE\"])\ndef get_cart(user_id):\n\n\tconnection = psycopg2.connect(DATABASE_URL)\n\tcursor = connection.cursor()\n\n\tprint( \"get_cart ran!\" )\n\n\tcursor.execute(\"\"\"\n\t\tSELECT * FROM carts;\n\t\"\"\".format(id=user_id))\n\ttest123 = cursor.fetchall()\n\tprint( test123 )\n\n\tif request.method == \"GET\":\n\t\t# Return user_id cart information\n\t\tcursor.execute(\"\"\"\n\t\t\tSELECT cart_contents FROM carts WHERE customer_cookie = {id};\n\t\t\"\"\".format(id=user_id))\n\n\t\tprint( \"Fetching user cart contents... :\" + cursor.fetchone() )\n\n\t\ttemporary_cart = {\n\t\t\t\"product_id\": \"1\",\n\t\t\t\"quantity\": \"2\"\n\t\t}\n\t\treturn str(temporary_cart)\n\n\telif request.method == \"POST\":\n\t\t# Determin from request if we are adding items/removing items\n\t\tdata = request.form # POST info\n\t\tprint( data )\n\t\tprint( user_id )\n\t\tprint( data[\"user\"] )\n\t\tif data[\"action\"] == \"add\":\n\t\t\tcursor.execute(\"\"\"\n\t\t\t\tSELECT cart_contents FROM carts WHERE customer_cookie = {id};\n\t\t\t\"\"\".format(id=user_id))\n\t\t\tcone = cursor.fetchone()\n\t\t\tprint(\"fetch: \"+str(cone))\n\t\t\tif cone != None: #if cone exists\n\t\t\t\tprint( \"cone exists ran!\")\n\t\t\t\tcone = json.loads(cursor.fetchone())\n\t\t\t\tif cone[data[\"prod\"]]:\n\t\t\t\t\tcone[data[\"prod\"]] = cone[data[\"prod\"]] + 1\n\t\t\t\telse:\n\t\t\t\t\tcone[data[\"prod\"]] = 1\n\t\t\t\tprint( \"final: \"+cone )\n\t\t\t\tcursor.execute(\"\"\"\n\t\t\t\t\tUPDATE carts SET cart_contents '{updated_cart}' WHERE customer_cookie = {id}; COMMIT;\n\t\t\t\t\"\"\".format(id=user_id, updated_cart=cone))\n\t\t\telse:\n\t\t\t\tprint( \"cone not exists ran!\")\n\n\t\t\t\tcone = {\n\t\t\t\t\tdata[\"prod\"]: 1\n\t\t\t\t}\n\t\t\t\tcone = json.dumps(cone)\n\t\t\t\tprint( \"final: \"+cone+\" \"+str(type(cone)) )\n\t\t\t\tcursor.execute(\"\"\"\n\t\t\t\t\tINSERT INTO carts(customer_cookie, cart_contents) VALUES (DEFAULT, '{updated_cart}'); COMMIT;\n\t\t\t\t\"\"\".format(updated_cart=cone))\n\n\t\t\tprint( cone )\n\n\t\t\tcursor.close()\n\t\t\tconnection.close()\n\n\t\t\treturn cone\n\n\t\telse:\n\t\t\treturn \"can only currently add to cart\"\n\n\telif request.method == \"DELETE\":\n\t\t# Delete user cart information from DB\n\t\treturn \"Deleted user cart information sucessfully\"\n\n\telse:\n\t\treturn \"Error 405 Method Not Allowed\"\n\n\tcursor.close()\n\tconnection.close()\n\n\n\n### End ###\nif __name__ == \"__main__\":\n\tapp.run(debug=True)","sub_path":"vyper-led/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":7764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"650448186","text":"import pygame, math\r\nfrom constants import *\r\nfrom gamemenu import *\r\nfrom font import *\r\n\r\nclass WinMenu(GameMenu):\r\n\r\n def __init__(self, players):\r\n GameMenu.__init__(self, players)\r\n self.winner = None\r\n self.theta = math.pi/2\r\n self.winFont = fontBold120.render(\"Winner\", 1, (255, 255, 255))\r\n\r\n def setWinner(self, playerId):\r\n self.winner = self.players[playerId]\r\n self.theta = math.pi/2\r\n\r\n # Called every frame, but before rendering\r\n def tick(self, deltaTime):\r\n self.theta = math.atan2(\r\n math.sin(self.theta) + math.sin(self.winner.theta or 0) * 5 * deltaTime,\r\n math.cos(self.theta) + math.cos(self.winner.theta or 0) * 5 * deltaTime\r\n )\r\n\r\n # Called to render the current frame\r\n def render(self, screen):\r\n minDim = min(WIDTH, HEIGHT)\r\n newSurface = pygame.transform.rotate(\r\n pygame.transform.scale(self.winner.face, (minDim, minDim)),\r\n - self.theta / math.pi * 180 + 90\r\n )\r\n\r\n width = newSurface.get_width()\r\n height = newSurface.get_height()\r\n #pygame.draw.circle(screen, (255, 0, 0), (x, y), self.radius)\r\n screen.blit(newSurface, (\r\n WIDTH / 2 - width / 2,\r\n HEIGHT / 2 - height / 2,\r\n width,\r\n height\r\n ))\r\n\r\n screen.blit(self.winFont, (\r\n WIDTH/2 - self.winFont.get_width()/2,\r\n 20\r\n ))","sub_path":"code/winmenu.py","file_name":"winmenu.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"396566548","text":"from gtts import gTTS\nimport speech_recognition as sr\nimport time\nimport random\nimport playsound\nimport webbrowser\nimport os\nimport subprocess\nimport datetime\n\nr = sr.Recognizer()\n\ndef record_audio(ask= False):\n with sr.Microphone() as source:\n if ask:\n speak(ask)\n audio = r.listen(source)\n voice_data = ''\n try:\n voice_data = r.recognize_google(audio)\n\n except sr.RequestError:\n print('going offline')\n\n except sr.UnknownValueError:\n print('i did not get that')\n\n print(voice_data)\n\n return voice_data.lower()\n\n\n\n\ndef shutdown():\n subprocess.call(['osascript', '-e',\n'tell app \"System Events\" to shut down'])\n\n\ndef speak(audio_string):\n tts = gTTS(text=audio_string,lang='en')\n r = random.randint(1,200)\n audio_file = 'audio' + str(r) + '.mp3'\n tts.save(audio_file)\n playsound.playsound(audio_file)\n os.remove(audio_file)\n\ndef there_exists(terms):\n for term in terms:\n if term in voice_data:\n return True\n\ndef if_there(terms):\n for term in terms:\n if term in authenticate:\n return True\n\ndef service(authenticate):\n if if_there(['hai','hello','whatsup']):\n greetings = [\"Hey Deepak\",\"Hello, welcome back\",\"Welcome come back Deepak\"]\n greet = greetings[random.randint(0,len(greetings)-1)]\n speak(greet) \n if if_there(['netflix','movie']):\n search_term = record_audio(\"what do yuo want to search\")\n url = f\"https://www.netflix.com/search?q={search_term}\"\n webbrowser.get().open(url)\n speak('enjoy watching')\n exit()\n\n if if_there(['youtube']):\n search_term = record_audio(\"what do yuo want to watch\")\n url = f\"https://www.youtube.com/results?search_query={search_term}\"\n webbrowser.get().open(url)\n speak('enjoy watching')\n exit()\n\n if if_there(['search','google','what is ']):\n search_term = record_audio('what to search')\n url = f\"https://www.google.com/search?q={search_term}\"\n webbrowser.get().open(url)\n speak(f\"i found this on the web about {search_term}\")\n\n if if_there(['set alarm','alarm']):\n set_hour = int(record_audio('what hour should i wake you up'))\n set_minute = int(record_audio('what minute should i wake you up'))\n ste_am = record_audio('am or pm')\n if (ste_am == 'pm'):\n set_hour = set_hour+ 12\n while True:\n if(set_hour == datetime.datetime.now().hour and\n set_minute == datetime.datetime.now().minute):\n set_timer = speak('wake up')\n print(set_timer)\n if if_there(['kill alarm','set alarm']):\n speak('killing alarm')\n break\n\n\n if if_there(['shutdown']):\n speak('logging out')\n shutdown()\n\n\n\n\n if if_there(['goodbye','bye']):\n bye = [f\"going offline\",f\"bye\",\"it was nice to meet you\",f\"have a nice day\"]\n breakpoint_2 = bye[random.randint(0,len(bye)-1)]\n speak(breakpoint_2)\n exit()\n\n\ntime.sleep(1)\nwhile(1):\n voice_data = record_audio()\n if there_exists([\"wake up\"]):\n speak(\"activated...\")\n authenticate = record_audio()\n service(authenticate)\n\n \n\n\n \n","sub_path":"assistant.py","file_name":"assistant.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"283908683","text":"from tkinter import *\nimport math as m\n\ntk = Tk()\nw = Canvas(tk, width=600, height = 600)\nw.pack()\n\ncenter_x = 300\ncenter_y = 300\n\npoint =[\n #A点\n center_x,\n center_y - 100,\n #B点\n center_x - int(100*m.cos(m.pi/6)),\n center_y + 50,\n #C点\n center_x + int(100*m.cos(m.pi/6)),\n center_y + 50,\n #A点\n center_x,\n center_y - 100\n ]\n\nw.create_polygon(point, outline='red', fill='')\n\nmainloop()\n","sub_path":"杂项/tttest.py","file_name":"tttest.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"558743905","text":"\"\"\"\\file main.py\n\n Основной цикл работы программы: общение с пользователем и запуск нейросетей.\n\"\"\"\nimport os\nfrom io import BytesIO\n\nimport torchvision.transforms as transforms\nfrom aiogram import Bot, types\nfrom aiogram.dispatcher import Dispatcher\nfrom aiogram.types import InlineKeyboardButton\nfrom aiogram.types.message import ParseMode\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\nfrom aiogram.utils import executor\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\nfrom aiogram.dispatcher.filters import Text\nfrom aiogram.dispatcher import FSMContext\n\nimport config\nfrom inference_esrgan import run_super_res\nfrom net import run_style_transfer, unloader, download_cnn\nfrom users import create_user_checker, users\nimport language\n\n\n## Создание экземпляров бота, диспетчера, конечного автомата и загрузка обученной модели VGG19\nprint('Bot is starting..')\ncnn = download_cnn()\nbot = Bot(token=config.TOKEN)\nstorage = MemoryStorage()\ndp = Dispatcher(bot, storage=storage)\nprint('Bot has been started')\nadmin = 189590002\n\n\nclass Form(StatesGroup):\n report = State()\n\n\n\"\"\"\\brief Отзыв\n\n Бот просит у пользователя в следующем сообщении оставить отзыв. \n\"\"\"\n@dp.message_handler(commands=['report'])\nasync def report(message):\n create_user_checker(message.from_user.id)\n await Form.report.set()\n await message.reply(users[message.from_user.id].l.report)\n\n\n\"\"\"\\brief Отмена отзыва\n\n Возможность отмены оставления отзыва.\n\"\"\"\n@dp.message_handler(state='*', commands='cancel')\n@dp.message_handler(Text(equals='отмена', ignore_case=True), state='*')\nasync def cancel_handler(message: types.Message, state: FSMContext):\n current_state = await state.get_state()\n if current_state is None:\n return\n await state.finish()\n await message.reply('ОК')\n\n\n\"\"\"\\brief Отмена отзыва\n\n Функция добавляет вариант отмены отзыва на английском. \n\"\"\"\n@dp.message_handler(state='*', commands='cancel')\n@dp.message_handler(Text(equals='cancel', ignore_case=True), state='*')\nasync def cancel_handler(message: types.Message, state: FSMContext):\n current_state = await state.get_state()\n if current_state is None:\n return\n await state.finish()\n await message.reply('ОК')\n\n\n\"\"\"\\brief Отправка отзыва\n\n Бот отправляет отзыв пользователя админу. \n\"\"\"\n@dp.message_handler(state=Form.report)\nasync def process_name(message: types.Message, state: FSMContext):\n await bot.send_message(admin, message.text)\n await state.finish()\n\n\n\"\"\"\\brief Помощь\n\n Вывод списка команд бота. \n\"\"\"\n@dp.message_handler(commands=['help'])\nasync def def_help(message):\n create_user_checker(message.from_user.id)\n await bot.send_message(message.chat.id, users[message.from_user.id].l.help)\n\n\n\"\"\"\\brief Старт.\n\n Запуск основного цикла работы. Бот представляется и даёт пользователю выбор о том, что бот должен сделать.\n\"\"\"\n@dp.message_handler(commands=['start'])\nasync def welcome(message):\n create_user_checker(message.from_user.id)\n\n inline_keyboard = types.InlineKeyboardMarkup(row_width=2)\n item1 = InlineKeyboardButton(users[message.from_user.id].l.style_transfer, callback_data='yes')\n item2 = InlineKeyboardButton(users[message.from_user.id].l.super_res, callback_data='no')\n\n inline_keyboard.add(item1, item2)\n me = await bot.get_me()\n text = (users[message.from_user.id].l.hello +\n message.from_user.first_name + \". \" +\n users[message.from_user.id].l.i +\n me.first_name +\n users[message.from_user.id].l.start)\n await bot.send_message(message.chat.id, text,\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=inline_keyboard)\n\n\n\"\"\"\\brief Сброс.\n\n Удаление экземпляра пользователя. Удаление всех его фото.\n\"\"\"\n@dp.message_handler(commands=['reset'])\nasync def def_reset(message):\n create_user_checker(message.from_user.id) # Чтобы не выдало ошибку при отсутствии экземпляра\n\n if os.path.exists(f'images/{message.from_user.id}' + '_style_photo.pickle'):\n os.remove(f'images/{message.from_user.id}' + '_style_photo.pickle')\n\n if os.path.exists(f'images/{message.from_user.id}' + '_bad_photo.pickle'):\n os.remove(f'images/{message.from_user.id}' + '_bad_photo.pickle')\n\n if os.path.exists(f'images/{message.from_user.id}' + '_content_photo.pickle'):\n os.remove(f'images/{message.from_user.id}' + '_content_photo.pickle')\n\n if os.path.exists(f'images/{message.from_user.id}' + '_result.png'):\n os.remove(f'images/{message.from_user.id}' + '_result.png')\n\n if os.path.exists(f'images/{message.from_user.id}' + '_super_result.png'):\n os.remove(f'images/{message.from_user.id}' + '_super_result.png')\n\n await bot.send_message(message.chat.id, users[message.from_user.id].l.reset)\n users.pop(message.from_user.id)\n\n\n\"\"\"\\brief Русский язык.\n\n Включение русского языка.\n\"\"\"\n@dp.message_handler(commands=['ru'])\nasync def turn_ru(message):\n create_user_checker(message.from_user.id)\n users[message.from_user.id].l = language.Rus()\n\n await bot.send_message(message.chat.id, users[message.from_user.id].l.turn_lang)\n\n\"\"\"\\brief Русский язык.\n\n Включение английского языка.\n\"\"\"\n@dp.message_handler(commands=['eng'])\nasync def turn_eng(message):\n create_user_checker(message.from_user.id)\n users[message.from_user.id].l = language.Eng()\n\n await bot.send_message(message.chat.id, users[message.from_user.id].l.turn_lang)\n\n\n\"\"\"\\brief Приём картинок\n\n Функция проверяет, приём для какого вида работ активирован. Дальше смотрит, какая по счёту картинка, если все\n картинки приняты, то запускает нейросеть.\n\"\"\"\n@dp.message_handler(content_types=['photo'])\nasync def get_photo(message):\n create_user_checker(message.from_user.id)\n\n if users[message.from_user.id].is_getting_transfer:\n\n if message.media_group_id:\n users[message.from_user.id].group_counter += 1\n\n if users[message.from_user.id].group_counter == 1:\n await bot.send_message(message.chat.id, users[message.from_user.id].l.only_one)\n\n return\n\n try:\n photo = message.photo[-1]\n photo_id = message.photo[-1].file_id\n photo_width = message.photo[-1].width\n photo_height = message.photo[-1].height\n file = await bot.get_file(photo_id)\n\n except IndexError:\n await bot.send_message(message.chat.id, users[message.from_user.id].l.error)\n return\n\n if users[message.from_user.id].counter == 0:\n users[message.from_user.id].style_photo_size = (photo_width, photo_height)\n await photo.download(f'images/{message.from_user.id}' + '_style_photo.pickle')\n await bot.send_message(message.chat.id, users[message.from_user.id].l.ok_now_content)\n\n if users[message.from_user.id].counter == 1:\n users[message.from_user.id].content_photo_size = (photo_width, photo_height)\n await photo.download(f'images/{message.from_user.id}' + '_content_photo.pickle')\n users[message.from_user.id].is_getting_transfer = False\n users[message.from_user.id].counter = -1\n await bot.send_message(message.chat.id, users[message.from_user.id].l.receive_photo)\n await transfer_style(message)\n\n users[message.from_user.id].counter += 1\n\n if users[message.from_user.id].is_getting_quality:\n if message.media_group_id:\n users[message.from_user.id].group_counter += 1\n if users[message.from_user.id].group_counter == 1:\n await bot.send_message(message.chat.id, users[message.from_user.id].l.only_one)\n return\n try:\n photo = message.photo[-1]\n photo_id = message.photo[-1].file_id\n photo_width = message.photo[-1].width\n photo_height = message.photo[-1].height\n file = await bot.get_file(photo_id)\n\n except IndexError:\n await bot.send_message(message.chat.id, users[message.from_user.id].l.error)\n return\n if photo_width > 500 or photo_height > 500:\n await bot.send_message(message.chat.id, users[message.from_user.id].l.reject)\n return\n users[message.from_user.id].bad_photo_size = (photo_width, photo_height)\n await photo.download(f'images/{message.from_user.id}' + '_bad_photo.pickle')\n users[message.from_user.id].is_getting_quality = False\n await bot.send_message(message.chat.id, users[message.from_user.id].l.receive_photo)\n await super_res(message)\n\n elif message.media_group_id and users[message.from_user.id].not_instructed_counter < 1:\n users[message.from_user.id].not_instructed_counter += 1\n await welcome(message)\n\n\n\"\"\"\\brief Выбор\n\n Функция для понимания, чего хочет пользователь: трансферстайл или апскейлинг.\n\"\"\"\n@dp.callback_query_handler(lambda call: True)\nasync def callback_inline(call):\n create_user_checker(call.from_user.id)\n\n if call.message:\n if call.data == 'yes':\n users[call.from_user.id].is_getting_transfer = True\n await bot.send_message(call.message.chat.id, users[call.from_user.id].l.ok_send_style)\n await bot.edit_message_reply_markup(chat_id=call.message.chat.id,\n message_id=call.message.message_id,\n reply_markup=None)\n elif call.data == 'no':\n users[call.from_user.id].is_getting_quality = True\n await bot.send_message(call.message.chat.id, users[call.from_user.id].l.ok_send_res)\n await bot.edit_message_reply_markup(chat_id=call.message.chat.id,\n message_id=call.message.message_id,\n reply_markup=None)\n\n\n\"\"\"\\brief Перенос стиля\n\n Запсук нейросети переноса стиля. Полученное изображение возвращается к разрешению контента и отправляется\n пользователю после этого высвечивается запрос о том, как дальше продолжать работу.\n\"\"\"\nasync def transfer_style(message):\n output = run_style_transfer(cnn,\n f'images/{message.from_user.id}',\n users[message.from_user.id].style_photo_size,\n users[message.from_user.id].content_photo_size,\n )\n\n output = unloader(output)\n a = config.imsize\n b = config.imsize\n c = list(users[message.from_user.id].content_photo_size)[0]\n d = list(users[message.from_user.id].content_photo_size)[1]\n\n coefX = c / a\n coefY = d / b\n\n unloader1 = transforms.Resize((int(config.imsize * coefY + 0.5), int(config.imsize * coefX + 0.5)))\n output = unloader1(output)\n\n bio = BytesIO()\n bio.name = f'images/{message.from_user.id}+_result.png'\n output.save(bio, 'PNG')\n bio.seek(0)\n\n await bot.send_photo(message.chat.id, bio, users[message.from_user.id].l.here_it_is)\n\n inline_keyboard = types.InlineKeyboardMarkup(row_width=2)\n item1 = InlineKeyboardButton(users[message.from_user.id].l.style_transfer, callback_data='yes')\n item2 = InlineKeyboardButton(users[message.from_user.id].l.super_res, callback_data='no')\n inline_keyboard.add(item1, item2)\n\n await bot.send_message(message.chat.id, users[message.from_user.id].l.resume,\n reply_markup=inline_keyboard)\n\n\n\"\"\"\\brief Увеличение разрешения\n\n Запускается нейросеть для увеличения полученного изображения с сохранением качества. Изображение отправляется\n пользователю. Далее высвечивается запрос о том, как дальше продолжать работу.\n\"\"\"\nasync def super_res(message):\n run_super_res(message)\n with open(f'images/{message.from_user.id}+_super_result.png', 'rb') as photo:\n await bot.send_photo(message.chat.id, photo, users[message.from_user.id].l.here_it_is)\n\n os.remove(f'images/{message.from_user.id}+_super_result.png')\n os.remove(f'images/{message.from_user.id}' + '_bad_photo.pickle')\n inline_keyboard = types.InlineKeyboardMarkup(row_width=2)\n item1 = InlineKeyboardButton(users[message.from_user.id].l.style_transfer, callback_data='yes')\n item2 = InlineKeyboardButton(users[message.from_user.id].l.super_res, callback_data='no')\n inline_keyboard.add(item1, item2)\n\n await bot.send_message(message.chat.id, users[message.from_user.id].l.resume,\n reply_markup=inline_keyboard)\n\n\nif __name__ == \"__main__\":\n executor.start_polling(dp)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"456696817","text":"\"\"\"\nRelativictic Difference Scheme Particle-in-Cell code (REDPIC) constants file.\n\n\"\"\"\n\nimport periodictable\nimport numpy as np\nfrom collections import namedtuple\nfrom scipy import constants\n\n__all__ = [ 'speed_of_light',\n 'c',\n 'epsilon_0',\n 'ep_0',\n 'mu_0',\n 'ke',\n 'km',\n 'h',\n 'hbar',\n 'elementary_charge',\n 'e',\n 'electron_mass',\n 'm_e',\n 'electron_mass_energy',\n 'mc',\n 'electron_radius',\n 'r_0',\n 'proton_mass',\n 'm_p',\n 'neutron_mass',\n 'm_n',\n 'atomic_constant_mass',\n 'm_u',\n 'u',\n 'electron',\n 'positron',\n 'proton',\n 'antiproton',\n 'neutron',\n 'antineutron',\n 'Element',\n 'Particle'\n ]\n\n# Constants\nc = speed_of_light = constants.c\n\nep_0 = epsilon_0 = constants.epsilon_0\nmu_0 = constants.mu_0\nh = constants.h\nhbar = constants.hbar\n\nke = 1 / (4*np.pi*ep_0)\nkm = mu_0 / (4*np.pi)\n\ne = elementary_charge = constants.e\n\nm_e = electron_mass = constants.m_e\nmc = electron_mass_energy = constants.physical_constants[ 'electron mass energy equivalent in MeV' ][0]\nr_0 = electron_radius = constants.physical_constants[ 'classical electron radius' ][0]\nm_p = proton_mass = constants.m_p\nm_n = neutron_mass = constants.m_n\nu = m_u = atomic_constant_mass = constants.physical_constants[ 'atomic mass constant' ][0]\n\nElement = namedtuple('Element', [ 'name', 'symbol', 'mass', 'charge' ])\n\nelectron = Element(name='electron', symbol='e', mass=m_e, charge=-e)\npositron = Element(name='positron', symbol='e+', mass=m_e, charge=e)\nproton = Element(name='proton', symbol='p', mass=m_p, charge=e)\nantiproton = Element(name='antiproton', symbol='p-', mass=m_p, charge=-e)\nneutron = Element(name='neutron', symbol='n', mass=m_n, charge=0)\nantineutron = Element(name='antineutron', symbol='n', mass=m_n, charge=0)\nParticle = Element\n\n# Get mass of each element from periodictable\nelements = periodictable.core.default_table()\n__all__ += periodictable.core.define_elements(elements, globals())\n","sub_path":"redpic/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"469368504","text":"import time\nimport json\nfrom adbutils import adb\n\nd = adb.device(serial=\"localhost:5555\")\n\ndef screenshot(path):\n remote_tmp_path = \"/data/local/tmp/screenshot.png\"\n d.shell([\"rm\", remote_tmp_path])\n d.shell([\"screencap\", \"-p\", remote_tmp_path])\n d.sync.pull(remote_tmp_path, 'run/'+path)\n\n\n# @sio.on('Click')\ndef on_click(data):\n print(data)\n screenshot(\"r_click_\"+str(data[\"timestamp\"])+\".png\")\n d.click(data[\"x\"],data[\"y\"])\n\n\n# @sio.on('KeyInput')\ndef on_input(data):\n print(''.join([chr(data[\"keycode\"])]))\n d.send_keys(''.join([chr(data[\"keycode\"])]))\n\n\n# @sio.on('KeyEvent')\ndef on_event(data):\n print(data)\n d.keyevent(data[\"keycode\"])\n\n\n# @sio.on('Swipe')\ndef on_swipe(data):\n print(data)\n screenshot(\"r_swipe_\"+str(data[\"end\"][\"timestamp\"])+\".png\")\n d.swipe(data[\"start\"][\"x\"],data[\"start\"][\"y\"],data[\"end\"][\"x\"],data[\"end\"][\"y\"],(data[\"end\"][\"timestamp\"]-data[\"start\"][\"timestamp\"])/1000)\n\ndef read_file(filename):\n\twith open(filename) as f:\n\t\tdata = json.load(f)\n\t\tfor action in data:\n\t\t\tprint(action)\n\t\t\tif(action[\"action\"] == \"click\"):\n\t\t\t\ton_click(action[\"params\"])\n\t\t\t\ttime.sleep(3)\n\t\t\tif(action[\"action\"] == \"keyInput\"):\n\t\t\t\ton_input(action[\"params\"])\n\t\t\t\ttime.sleep(0.2)\n\t\t\tif(action[\"action\"] == \"keyEvent\"):\n\t\t\t\ton_event(action[\"params\"])\n\t\t\t\ttime.sleep(1)\n\t\t\tif(action[\"action\"] == \"swipe\"):\n\t\t\t\ton_swipe(action[\"params\"])\n\t\t\t\ttime.sleep(1.5)\n\tf.close()\n","sub_path":"python-client/src/repeat.py","file_name":"repeat.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"551311795","text":"# import the necessary packages\nfrom imutils.video import VideoStream\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nimport cv2 \nimport csv\nimport json, time, sys\nfrom datetime import datetime\nfrom collections import OrderedDict\nfrom threading import Thread\nimport boto3\nfrom boto3.dynamodb.conditions import Key,Attr\nfrom PIL import Image\n\n\n# construct the argument parse and parse the arguments\n#ap = argparse.ArgumentParser()\n#ap.add_argument(\"-p\", \"--prototxt\", required=True,\n#\thelp=\"path to Caffe 'deploy' prototxt file\")\n#ap.add_argument(\"-m\", \"--model\", required=True,\n#\thelp=\"path to Caffe pre-trained model\")\n#ap.add_argument(\"-c\", \"--confidence\", type=float, default=0.5,\n#\thelp=\"minimum probability to filter weak detections\")\n#args = vars(ap.parse_args())\ndef everything():\n args = {\"prototxt\": \"deploy.prototxt.txt\", \"model\": \"res10_300x300_ssd_iter_140000.caffemodel\", \"confidence\": 0.5}\n \n ACCOUNT_ID = '880652607631'\n IDENTITY_POOL_ID = 'us-east-1:db12e203-9a39-43fa-8dab-2309d4309d39'\n ROLE_ARN = 'arn:aws:iam::880652607631:role/Cognito_Android_Identity_PoolUnauth_Role'\n \n \n cognito = boto3.client('cognito-identity')\n cognito_id = cognito.get_id(AccountId = ACCOUNT_ID, IdentityPoolId = IDENTITY_POOL_ID)\n oidc = cognito.get_open_id_token(IdentityId = cognito_id['IdentityId'])\n \n sts = boto3.client('sts')\n assumedRoleObject = sts.assume_role_with_web_identity(RoleArn = ROLE_ARN, RoleSessionName = \"XX\",\n WebIdentityToken = oidc['Token'])\n \n client_dynamo = boto3.client('dynamodb',\n aws_access_key_id=assumedRoleObject['Credentials']['AccessKeyId'],\n aws_secret_access_key=assumedRoleObject['Credentials']['SecretAccessKey'],\n aws_session_token=assumedRoleObject['Credentials']['SessionToken']\n )\n \n s3 = boto3.client('s3',\n aws_access_key_id = assumedRoleObject['Credentials']['AccessKeyId'],\n aws_secret_access_key = assumedRoleObject['Credentials']['SecretAccessKey'],\n aws_session_token = assumedRoleObject['Credentials']['SessionToken']\n )\n data = []\n print(\"[INFO] loading model...\")\n net = cv2.dnn.readNetFromCaffe(args[\"prototxt\"], args[\"model\"])\n \n # initialize the video stream and allow the camera sensor to warm up\n print(\"[INFO] starting video stream...\")\n vs = VideoStream(src=0).start()\n time.sleep(2.0)\n try:\n \twhile True:\n \t\t# grab the frame from the threaded video stream and resize it\n \t\t# to have a maximum width of 400 pixels\n \t\tframe = vs.read()\n \t\tframe = imutils.resize(frame, width=400)\n \t \n \t\t# grab the frame dimensions and convert it to a blob\n \t\t(h, w) = frame.shape[:2]\n \t\tblob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,\n \t\t\t(300, 300), (104.0, 177.0, 123.0))\n \t \n \t\t# pass the blob through the network and obtain the detections and\n \t\t# predictions\n \t\tnet.setInput(blob)\n \t\tdetections = net.forward()\n \t\tfor i in range(0, detections.shape[2]):\n \t\t\t# extract the confidence (i.e., probability) associated with the\n \t\t\t# prediction\n \t\t\tconfidence = detections[0, 0, i, 2]\n \t \n \t\t\t# filter out weak detections by ensuring the `confidence` is\n \t\t\t# greater than the minimum confidence\n \t\t\tif confidence < args[\"confidence\"]:\n \t\t\t\tcontinue\n \t \t\t\n \t\t\ttmp_frame = np.asarray(frame)\n \t\t\t# compute the (x, y)-coordinates of the bounding box for the\n \t\t\t# object\n \t\t\tbox = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n \t\t\t(startX, startY, endX, endY) = box.astype(\"int\")\n \t \n \t\t\t# draw the bounding box of the face along with the associated\n \t\t\t# probability\n# \t\t\ttext = \"{:.2f}%\".format(confidence * 100)\n# \t\t\ty = startY - 10 if startY - 10 > 10 else startY + 10\n# \t\t\tcv2.rectangle(frame, (startX, startY), (endX, endY),\n# \t\t\t\t(0, 0, 255), 2)\n# \t\t\tcv2.putText(frame, text, (startX, y),\n# \t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)\n \t\t\tprint(frame.shape)\n \t\t\tfilename = \"face_image.jpg\"\n \t\t\tim = Image.fromarray(tmp_frame)\n \t\t\tim.save(filename)\n \t\t\twith open(filename, 'rb') as data:\n \t\t\t\ts3.upload_fileobj(data, 'iot-image-19951124', str(int(time.time())) + \".jpg\")\n \t\t\ttime.sleep(5)\n \n \t\t# show the output frame \n# \t\tcv2.imshow(\"Frame\", frame)\n# \t\tkey = cv2.waitKey(1) & 0xFF\n \t\n \t# if the `q` key was pressed, break from the loop\n \t#if key == ord(\"q\"):\n except (KeyboardInterrupt):\n \tprint(\"end\")\t\n \t#break\n \n # do a bit of cleanup\n cv2.destroyAllWindows()\n vs.stop()\neverything()","sub_path":"face_detection.py","file_name":"face_detection.py","file_ext":"py","file_size_in_byte":4807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"15069214","text":"#Создайте функцию, принимающую на вход 3 числа и возвращающую наибольшее из них.\n\ndef get_number():\n list_number=[]\n for i in range(3):\n number=int(input(f'Введите {i+1} число: '))\n list_number.append(number)\n return max(list_number)\nprint(f'Самое большое число: {get_number()}')\n\n","sub_path":"practice4-2.py","file_name":"practice4-2.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"348062602","text":"# Copyright (c) 2011-2014 Berkeley Model United Nations. All rights reserved.\n# Use of this source code is governed by a BSD License (see LICENSE).\n\nfrom django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django.views.generic import RedirectView, TemplateView\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^', include('huxley.accounts.urls', app_name='accounts', namespace='accounts')),\n url(r'^chair/', include('huxley.chairs.urls', app_name='chairs', namespace='chairs')),\n url(r'^advisor/', include('huxley.advisors.urls', app_name='advisors', namespace='advisors')),\n url(r'^api/', include('huxley.api.urls', app_name='api', namespace='api')),\n url(r'^admin/', include(admin.site.urls)),\n)\n\nurlpatterns += patterns('',\n url(r'^favicon\\.ico$', RedirectView.as_view(url='/static/img/favicon.ico')),\n url(r'^about/?$', TemplateView.as_view(template_name='about.html'), name='about'),\n url(r'^success/?$', TemplateView.as_view(template_name='registration-success.html'), name='register_success'),\n)\n\nurlpatterns += patterns('',\n url(r'^$', 'huxley.core.views.index', name='index'),\n)\n","sub_path":"huxley/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"364366208","text":"from flask import request\nfrom flask_restplus import Resource\nfrom wtforms.ext.sqlalchemy.orm import model_form\n\nfrom back.base import db\n\n\nclass ResourceBase(Resource):\n model = None\n\n def get_queryset(self, *args, **kwargs):\n return self.model.query\n\n def as_dict(self, item):\n raise NotImplementedError\n\n def get_form(self):\n return model_form(self.model)\n\n def get(self, *args, **kwargs):\n return [self.as_dict(i) for i in self.get_queryset(*args, **kwargs)]\n\n def patch(self, id):\n item = self.get_queryset().filter_by(id=id).first()\n Form = self.get_form()\n form = Form(request.form, item)\n if form.validate():\n form.populate_obj(item)\n\n db.session.add(item)\n db.session.commit()\n\n return self.as_dict(item)\n return {}\n\n def post(self, *args, **kwargs):\n Form = self.get_form()\n form = Form(request.form)\n if form.validate():\n item = self.model(**form.data)\n\n db.session.add(item)\n db.session.commit()\n\n return self.as_dict(item)\n\n def delete(self, id):\n item = self.get_queryset().filter_by(id=id).first()\n db.session.delete(item)\n db.session.commit()\n return {}\n\n","sub_path":"back/views/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"361311122","text":"from selenium import webdriver\r\nfrom selenium.webdriver.chrome.service import Service\r\nfrom selenium.webdriver.support.select import Select\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom time import sleep\r\nfrom selenium.common.exceptions import NoSuchElementException\r\n\r\n\r\ndef finnaLink(lcoin):\r\n try:\r\n ponlink = webdriver.Chrome(\"D:/bot/chromedriver_win32/chromedriver\")\r\n # ponlink.find_element_by_tag_name('body').send_keys(Keys.CONTROL + Keys.TAB)\r\n flink = []\r\n ponlink.get(lcoin)\r\n finallink = ponlink.find_element_by_xpath(\"/html/body/div[4]/div[4]/div[2]/div[2]/div[2]/div/a\").get_attribute(\"href\").splitlines()[0]\r\n \r\n if finallink:\r\n # print(finallink)\r\n flink.append(finallink)\r\n sleep(10)\r\n ponlink.close()\r\n for xl in flink:\r\n print(xl)\r\n open(\"sitecoinlist.txt\", \"a\").write(xl+\"\\n\")\r\n else:\r\n print(\"Can't grab\")\r\n \r\n except NoSuchElementException:\r\n print(\"Can't grab\")\r\n\r\n\r\ndef getwebCoin():\r\n pondev = webdriver.Chrome(\"D:/bot/chromedriver_win32/chromedriver\")\r\n # set path ke dir spesifik file chromedriver anda \r\n # jika belum punya bisa download disini :\r\n # https://chromedriver.chromium.org/downloads\r\n # dan sesuaikan dengan versi chrome anda\r\n \r\n urlreg = 'https://www.coingecko.com/id?page='\r\n for page in range(1, 98):\r\n \r\n pondev.get(urlreg+str(page))\r\n for tr in range(1, 101):\r\n coind = pondev.find_element_by_xpath(\"/html/body/div[3]/div[4]/div[7]/div[1]/div/table/tbody/tr[\"+str(tr)+\"]/td[3]/div/div[2]/a[2]\").get_attribute(\"href\").splitlines()[0]\r\n \r\n listlinkcoin = []\r\n listlinkcoin.append(coind)\r\n \r\n for lcoin in listlinkcoin:\r\n print(lcoin)\r\n finnaLink(lcoin)\r\n \r\n \r\n \r\n\r\ngetwebCoin()\r\n","sub_path":"coin graber/gc.py","file_name":"gc.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"67396290","text":"from BaseThread import BaseThread\nimport threading\nimport time\n\n# 多執行緒的前工作\ndef my_thread_job():\n with sem:\n print(\"{} runing\".format(\"hi\"))\n time.sleep(1)\n# 多執行緒的後工作\ndef cb(argv1, argv2):\n with sem:\n print(\"{} {}\".format(argv1, argv2))\n\n\nsem=threading.Semaphore(4)\n\nfor i in range(5):\n BaseThread(\n name = 'test',\n target=my_thread_job,\n callback=cb,\n callback_args=(\"hello\",\"word\")\n ).start()","sub_path":"test_BaseThread.py","file_name":"test_BaseThread.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"38944703","text":"from functools import partial\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom api.models import Employee\nfrom api.serializers import EmployeeSerializers\nfrom rest_framework.renderers import JSONRenderer\nimport io\nfrom rest_framework.parsers import JSONParser\nfrom django.views.decorators.csrf import csrf_exempt\n# Create your views here.\n\ndef display_all_employee_data(request):\n employee_data = Employee.objects.all()\n serialize_data = EmployeeSerializers(employee_data, many=True)\n json_data = JSONRenderer().render(serialize_data.data)\n return HttpResponse(json_data, content_type=\"application/json\")\n\ndef display_single_employee(request, id):\n employee = Employee.objects.get(pk=id)\n serialize_data = EmployeeSerializers(employee)\n json_data = JSONRenderer().render(serialize_data.data)\n return HttpResponse(json_data, content_type=\"application/json\")\n\n@csrf_exempt\ndef create_employee_data(request):\n if request.method == 'POST':\n json_data = request.body\n stream = io.BytesIO(json_data)\n py_data = JSONParser().parse(stream)\n serialize = EmployeeSerializers(data=py_data)\n if serialize.is_valid():\n serialize.save()\n message = {'status':'OK', 'response':201}\n json_msg = JSONRenderer().render(message)\n return HttpResponse(json_msg, content_type='application/json')\n return JsonResponse({'message':'failed', 'err':serialize.errors})\n\n@csrf_exempt\ndef update_employee_data(request, id):\n if request.method == 'PUT':\n json_data = request.body\n stream = io.BytesIO(json_data)\n py_data = JSONParser().parse(stream)\n empoyee_data = Employee.objects.get(pk=id)\n serialize = EmployeeSerializers(empoyee_data, data=py_data, partial=True)\n if serialize.is_valid():\n serialize.save()\n return JsonResponse({'message':'updated data'})\n return JsonResponse({'message':'invalid'})\n\n\n \n ","sub_path":"mysite/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"198216124","text":"from tkinter import *\nimport webbrowser\n\nwindow= Tk()\n\nnew = 1\n# url = \"https://exchange.toronto.ca/owa\"\nurl = \"www.nice-canada.ca\"\n\ndef torontoOWA():\n webbrowser.open(url,new=new)\n# webbrowser.open_new_tab(url)\n\ncityOWA= Button(window, text = \"Toronto Email\",command=torontoOWA ,height=1, width=22)\ncityOWA.pack()\n\nwindow.mainloop()","sub_path":"Resources/weblinkbutton.py","file_name":"weblinkbutton.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"637354307","text":"from urllib.parse import urlparse\nfrom robots_test import *\nimport time\n\n\n#okej, tko sm si zamislu\n#url class bo za storanje url-ja ampak bo tud mel robotse tko da pomoje je treba narest check ce ze smo kdaj bli na tem domainu\n#torej slovar bo en atribut razreda crawler, k bo torej slovar k bo mel za kljuc domain name in za value nek class site\n\n\n#ko bos dajal v bazo klices to string da bos dal kot 1 string vse skupi\nclass Site:\n\n def __init__(self, url, allow, disallow, sitemap, delay, id):\n self.url = url\n self.domain = urlparse(url).netloc\n self.allow = allow\n self.disallow = disallow\n self.sitemap = sitemap\n self.delay = delay\n self.site_id = id\n\n def set_site_id(self, site_id):\n self.site_id = site_id\n\n def get_robots_strings(self):\n robots_allow = \"\"\n for allow in self.allow:\n robots_allow += \"Allow: \" + str(allow) + \"\\n\"\n\n robots_disallow = \"\"\n for disallow in self.disallow:\n robots_disallow += \"Disallow: \" + str(disallow) + \"\\n\"\n\n robots_sitemap = \"\"\n for sitemap in self.sitemap:\n robots_sitemap += \"Sitemap: \" + str(sitemap) + \"\\n\"\n\n\n return robots_allow, robots_disallow, robots_sitemap\n\nclass URL:\n\n def __init__(self, url, allow, disallow, delay, site_id, page_id): #, html):\n self.url = url\n self.domain = urlparse(url).netloc\n self.time = None\n self.allow = allow\n self.disallow = disallow\n self.delay = delay\n self.site_id = site_id\n self.page_id = page_id\n self.html_status_code = None\n self.html = None\n\n\n def set_html(self, html):\n self.html = html\n\n def set_html_status_code(self, code):\n self.html_status_code = code\n\n def set_page_id(self, page_id):\n self.page_id = page_id\n\n def set_time(self, time):\n self.time = time\nif __name__ == \"__main__\":\n url = URL(\"http://www.e-prostor.gov.si/dostop-do-podatkov/dostop-do-podatkov/\")\n","sub_path":"stickman/URLclasses.py","file_name":"URLclasses.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"539525008","text":"import os\nimport shutil\nimport yaml\nimport unittest\nfrom panstamps import downloader, cl_utils\nfrom panstamps.image import image\nfrom panstamps.utKit import utKit\n\nfrom fundamentals import tools\n\nsu = tools(\n arguments={\"settingsFile\": None},\n docString=__doc__,\n logLevel=\"DEBUG\",\n options_first=True,\n projectName=\"panstamps\"\n)\narguments, settings, log, dbConn = su.setup()\n\n# load settings\nstream = file(\n \"/Users/Dave/.config/panstamps/panstamps.yaml\", 'r')\nsettings = yaml.load(stream)\nstream.close()\n\n# SETUP AND TEARDOWN FIXTURE FUNCTIONS FOR THE ENTIRE MODULE\nmoduleDirectory = os.path.dirname(__file__)\nutKit = utKit(moduleDirectory)\nlog, dbConn, pathToInputDir, pathToOutputDir = utKit.setupModule()\nutKit.tearDownModule()\n\nimport shutil\ntry:\n shutil.rmtree(pathToOutputDir)\nexcept:\n pass\n# COPY INPUT TO OUTPUT DIR\nshutil.copytree(pathToInputDir, pathToOutputDir)\n\n\nclass test_image(unittest.TestCase):\n\n def test_image_function(self):\n kwargs = {}\n kwargs[\"log\"] = log\n kwargs[\"settings\"] = settings\n kwargs[\"arcsecSize\"] = 4\n kwargs[\"fits\"] = False\n kwargs[\"jpeg\"] = True\n kwargs[\"arcsecSize\"] = 60\n kwargs[\"filterSet\"] = 'grizy'\n kwargs[\"color\"] = True\n kwargs[\"singleFilters\"] = True\n kwargs[\"ra\"] = \"70.60271\"\n kwargs[\"dec\"] = \"-21.72433\"\n kwargs[\"imageType\"] = \"stack\"\n # xt-kwarg_key_and_value\n\n testObject = downloader(**kwargs)\n testObject.get()\n\n kwargs[\"imageType\"] = \"warp\"\n testObject = downloader(**kwargs)\n testObject.get()\n\n kwargs[\"arcsecSize\"] = 600\n testObject = downloader(**kwargs)\n testObject.get()\n\n kwargs = {}\n kwargs[\"log\"] = log\n kwargs[\"settings\"] = settings\n # xt-kwarg_key_and_value\n kwargs[\"arcsecSize\"] = 4\n kwargs[\"imagePath\"] = pathToOutputDir + \"/something.png\"\n kwargs[\"settings\"] = False\n kwargs[\"crosshairs\"] = True\n kwargs[\"transient\"] = False\n kwargs[\"scale\"] = True\n kwargs[\"invert\"] = False\n kwargs[\"greyscale\"] = False\n testObject = image(**kwargs)\n testObject.get()\n\n def test_image_function02(self):\n kwargs = {}\n kwargs[\"log\"] = log\n kwargs[\"settings\"] = settings\n kwargs[\"fits\"] = False\n kwargs[\"jpeg\"] = True\n kwargs[\"arcsecSize\"] = 60\n kwargs[\"filterSet\"] = 'grizy'\n kwargs[\"color\"] = True\n kwargs[\"singleFilters\"] = True\n kwargs[\"ra\"] = \"208.49364\"\n kwargs[\"dec\"] = \"-27.22365\"\n kwargs[\"imageType\"] = \"stack\"\n # xt-kwarg_key_and_value\n\n testObject = downloader(**kwargs)\n testObject.get()\n\n kwargs[\"imageType\"] = \"warp\"\n testObject = downloader(**kwargs)\n testObject.get()\n\n kwargs[\"arcsecSize\"] = 600\n testObject = downloader(**kwargs)\n testObject.get()\n\n kwargs = {}\n kwargs[\"log\"] = log\n kwargs[\"settings\"] = settings\n # xt-kwarg_key_and_value\n kwargs[\"arcsecSize\"] = 4\n kwargs[\"imagePath\"] = pathToOutputDir + \"/something.png\"\n kwargs[\"settings\"] = False\n kwargs[\"crosshairs\"] = True\n kwargs[\"transient\"] = False\n kwargs[\"scale\"] = True\n kwargs[\"invert\"] = False\n kwargs[\"greyscale\"] = False\n testObject = image(**kwargs)\n testObject.get()\n\n # x-print-testpage-for-pessto-marshall-web-object\n\n # x-class-to-test-named-worker-function\n","sub_path":"panstamps/tests/test_image.py","file_name":"test_image.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"239337364","text":"#!/usr/bin/env python\nimport os\nimport stat\nimport textwrap\n\nimport virtualenv\n\n\nrequiredPackages = \"\"\"\nanfft\naudioread\nnumpy\npygame\nRPi.GPIO\n\"\"\".split()\n\n\noutput = virtualenv.create_bootstrap_script(textwrap.dedent(\"\"\"\nimport subprocess\n\ndef after_install(options, virt_env_dir):\n requiredPackages = {!r}\n subprocess.call([join(virt_env_dir, 'bin', 'pip'), 'install'] + requiredPackages)\n\ndef adjust_options(options, args):\n if len(args) == 0:\n args.append('virtenv')\n\"\"\").format(requiredPackages))\n\nwith open('bootstrap.py', 'w') as bootstrap:\n # Write out the bootstrap script.\n bootstrap.write(output)\n\n # Make the file executable.\n fileno = bootstrap.fileno()\n mode = os.fstat(fileno).st_mode\n os.fchmod(fileno, mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n","sub_path":"create-bootstrap.py","file_name":"create-bootstrap.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"118245302","text":"import datetime, json\nfrom math import ceil\nfrom django.conf import settings\nfrom django.utils.timezone import utc\nfrom itertools import chain\nfrom django.db.models import Q, F\nfrom rest_framework import serializers\nfrom rest_framework.serializers import (\n ModelSerializer, ImageField,\n FileField, PrimaryKeyRelatedField,\n)\n\nfrom .models import Product, Category, ProductImage, Brand, ProductAttributeValue\n\nfrom partner.models import StockRecord\n\nfrom logistics_exchange.models import City\nfrom common.context_processors import geo_city\nfrom logistics_exchange.cart_tariff_calculate import Calc\nfrom logistics_exchange.models import Office, OfficePickPoint\nfrom common.views import month_to_year\nfrom django.conf import settings\n\n\ndef get_date_format(lower_time_days, city_id):\n '''\n :param lower_time_array: количество дней доставки\n :param city_id: код города\n :return:\n '''\n # Функция позваляет перевести дату в селдующий вид '1 октября', '7 ноября'. Учитывает, что в выходные доставка\n # невозможна\n # - lower_time_array - кол-во дней доставки, которое возвращает система DPD\n # - additional_days - добавочные дни, использовались для подстраховки, так как ТК не доставляла вовремя\n list_month = [\n u'января',\n u'февраля',\n u'марта',\n u'апреля',\n u'мая',\n u'июня',\n u'июля',\n u'августа',\n u'сентября',\n u'октября',\n u'ноября',\n u'декабря'\n ]\n time_now = datetime.datetime.now().strftime('%H')\n print('time_now === ', time_now)\n print('city_id === ', city_id)\n if str(city_id) == '49694102':\n print('time_now === ', time_now)\n if int(time_now) < 12:\n lptime = get_date_delivery(lower_time_days, 0, 'before_12', city_id)\n else:\n lptime = get_date_delivery(lower_time_days, 1, 'after_12', city_id)\n else:\n if int(time_now) < 12:\n lptime = get_date_delivery(lower_time_days, 0, 'before_12', city_id)\n else:\n lptime = get_date_delivery(lower_time_days, 1, 'before_12', city_id)\n\n lptime = lptime.strftime(\"%d \") + list_month[lptime.month - 1]\n\n return lptime\n\n\ndef get_date_delivery(days, additional_days, mode, city_id):\n \"\"\"Получает дату доставки заказа по заданным параметрам\"\"\"\n print('days ==== ', days)\n now_date = datetime.date.today()\n # Если заказ совершен в субботу или воскресенье, то по Москве доставка ��о вторник,\n # по России начинаем отсчет от понедельника\n if now_date.weekday() == 6 or now_date.weekday() == 5:\n now_date = now_date + datetime.timedelta(7 - int(now_date.weekday()))\n if str(city_id) == '49694102':\n additional_days = 0\n # Прибавляем добавочные дни\n delta = datetime.timedelta(days=int(days) + additional_days)\n lptime = now_date + delta\n # Если доставка выпала на выходные переносим на понедельник, если заказ сделан до 12 часов и на вторник если после\n if lptime.weekday() == 6 or lptime.weekday() == 5:\n if mode == 'after_12' and now_date.weekday() == 4:\n delta_holiday = datetime.timedelta(8 - int(lptime.weekday()))\n else:\n delta_holiday = datetime.timedelta(7 - int(lptime.weekday()))\n lptime += delta_holiday\n\n return lptime\n\n\nclass CategorySerializer(ModelSerializer):\n class Meta:\n model = Category\n\n fields = (\n 'id',\n 'name',\n )\n\n\nclass ProductImageSerializer(ModelSerializer):\n class Meta:\n model = ProductImage\n fields = (\n 'original',\n 'display_order',\n )\n\n\nclass ProductStockSerializer(ModelSerializer):\n class Meta:\n model = StockRecord\n fields = (\n 'num_in_stock',\n 'num_allocated',\n )\n\n\nclass ProductSerializer(ModelSerializer):\n categories = CategorySerializer(many=True, read_only=True)\n images = ProductImageSerializer(many=True, read_only=True)\n main_img = serializers.SerializerMethodField()\n price_mrc = serializers.SerializerMethodField()\n prod = serializers.SerializerMethodField()\n variants = serializers.SerializerMethodField()\n\n class Meta:\n model = Product\n\n fields = (\n 'id',\n 'title',\n 'slug',\n 'product_1c_id',\n 'price_mrc',\n 'categories',\n 'images',\n 'main_img',\n 'prod',\n 'variants',\n\n )\n\n def get_main_img(self, obj):\n img = obj.primary_image()\n return img\n\n def get_prod(self, obj):\n return obj\n\n def get_price_mrc(self, obj):\n price_mrc = int(obj.price_mrc)\n return price_mrc\n\n def get_variants(self, obj):\n return Product.objects.prefetch_related('images'). \\\n filter(Q(item_id=obj.item_id),\n Q(is_show=True),\n Q(num_in_stock__gt=0),\n Q(num_in_stock__gt=F('num_allocated'))). \\\n exclude(item_id=None)\n\n @staticmethod\n def setup_eager_loading(queryset):\n queryset = queryset.prefetch_related('categories', 'images', 'stockrecords')\n return queryset\n\n\nclass ProductDeliverySerialaizer(ModelSerializer):\n delivery = serializers.SerializerMethodField()\n\n class Meta:\n model = Product\n\n fields = (\n 'delivery',\n )\n\n def get_delivery(self, obj):\n # получение сроков и стоимости доставки dpd по id продукта\n min_price = None\n min_price_courier = None\n lptime = None\n lptime_courier = None\n enable_apt = 0\n enable_pvz = 0\n city_object = geo_city(self.context['request'])['city_data']\n quantity = dict()\n\n if city_object:\n to_city_id = str(city_object.city_id)\n\n # Если Москва\n if to_city_id == '49694102':\n # В москве точно есть и ПВЗ и Постаматы, проверка не нужна\n enable_pvz = enable_apt = 1\n # Время доставки на следующий день по Москве (кроме выходных)\n lower_time_array = settings.ADDITIONAL_DAYS\n lptime_courier = lptime = get_date_format(lower_time_array, to_city_id)\n if int(obj.price_mrc) >= settings.ORDER_SUM_FOR_FREE_SHIP:\n min_price = min_price_courier = 'Бесплатно'\n else:\n min_price = min_price_courier = '300'\n # Если не Москва\n else:\n product_att = ProductAttributeValue.objects.filter(\n attribute__code__in=['weight', 'ind_weight', 'ind_length', 'length', 'ind_width', 'width',\n 'ind_height', 'height'], product=obj)\n # Проверяем на наличие ПВЗ и постаматы\n results = get_pvz_and_apt([obj], to_city_id)\n if results['offices_pvz'] or results['offices_pick_pvz']:\n enable_pvz = 1\n else:\n enable_pvz = 0\n if results['offices_pick_apt']:\n enable_apt = 1\n else:\n enable_apt = 0\n\n # Вычисляем курьерскую доставку\n delivery = Calc(city_object, [product_att], quantity, 'curier')\n # Вычисляем доставку в ПВЗ и постамат (стоимость доставки одинаковая)\n delivery_pvz = Calc(city_object, [product_att], quantity, 'pvz')\n\n # Форматируем стоимость и время доставки\n if delivery.min_price:\n min_price_courier = ceil(float(delivery.min_price))\n else:\n min_price_courier = None\n if delivery.lowest_price_time:\n lptime_courier = get_date_format(delivery.lowest_price_time, to_city_id)\n else:\n lptime_courier = None\n if delivery_pvz.min_price:\n min_price = ceil(float(delivery_pvz.min_price))\n else:\n min_price = None\n if delivery_pvz.lowest_price_time:\n lptime = get_date_format(delivery_pvz.lowest_price_time, get_date_format)\n else:\n lptime = None\n\n return {\n 'min_price': min_price,\n 'lowest_time': lptime,\n 'min_price_courier': min_price_courier,\n 'lowest_time_courier': lptime_courier,\n 'enable_apt': enable_apt,\n 'enable_pvz': enable_pvz\n }\n\n\nclass CityDeliverySerialaizer(ModelSerializer):\n delivery = serializers.SerializerMethodField()\n\n class Meta:\n model = City\n\n fields = (\n 'delivery',\n )\n\n def get_delivery(self, obj):\n min_price = None\n min_price_courier = None\n lptime = None\n lptime_courier = None\n enable_apt = 0\n enable_pvz = 0\n city_object = obj\n quantity = dict()\n if city_object:\n city = obj\n product_att = {\n 'weight': 1.0,\n 'ind_weight': 1.0,\n 'ind_length': 15.0,\n 'length': 15.0,\n 'ind_width': 15.0,\n 'width': 15.0,\n 'ind_height': 15.0,\n 'height': 15.0\n }\n fix_size = OfficePickPoint.objects.filter(city_id=city.city_id)\n all_postamat = fix_size.filter(type_title='П')\n fix_size_pvz = fix_size.filter(type_title='ПВП')\n all_size = Office.objects.filter(city_id=city.city_id)\n all_pvz = chain(fix_size_pvz, all_size)\n # Создаем словарь для постаматов и пвз, чтобы взять только нужные данные и потом преобразовать в json\n postamat_dict = []\n pvz_dict = []\n for postamat in all_postamat:\n p = {\n 'unified_number': postamat.unified_number,\n 'street': postamat.street,\n 'street_abbr': postamat.street_abbr,\n 'house': postamat.house,\n 'building': postamat.building,\n 'delivery_shedule': postamat.delivery_schedule,\n 'latitude': postamat.latitude,\n 'longitude': postamat.longitude,\n 'type_title': postamat.type_title,\n 'descript': postamat.descript,\n }\n postamat_dict.append(p)\n for pvz in all_pvz:\n p = {\n 'unified_number': pvz.unified_number,\n 'street': pvz.street,\n 'street_abbr': pvz.street_abbr,\n 'house': pvz.house,\n 'building': pvz.building,\n 'delivery_shedule': pvz.delivery_schedule,\n 'latitude': pvz.latitude,\n 'longitude': pvz.longitude,\n }\n pvz_dict.append(p)\n\n if str(city.city_id) == '49694102':\n lower_time_array = 1\n lptime = lptime_courier = get_date_format(lower_time_array, city.city_id)\n min_price = min_price_courier = 'Бесплатно'\n\n else:\n delivery = Calc(city, [product_att], quantity, 'curier')\n delivery_pvz = Calc(city, [product_att], quantity, 'pvz')\n if delivery.min_price:\n min_price_courier = ceil(float(delivery.min_price))\n else:\n min_price_courier = None\n if delivery.lowest_price_time:\n lptime_courier = get_date_format(delivery.lowest_price_time, city.city_id)\n else:\n lptime_courier = None\n if delivery_pvz.min_price:\n min_price = ceil(float(delivery_pvz.min_price))\n else:\n min_price = None\n if delivery_pvz.lowest_price_time:\n lptime = get_date_format(delivery_pvz.lowest_price_time, city.city_id)\n else:\n lptime = None\n\n return {\n 'min_price': min_price,\n 'lowest_time': lptime,\n 'min_price_courier': min_price_courier,\n 'lowest_time_courier': lptime_courier,\n 'enable_postomat': enable_apt,\n 'enable_pvz': enable_pvz,\n 'postomat': postamat_dict,\n 'pvz': pvz_dict\n }\n\n\nclass ProductDetailSerializer(ModelSerializer):\n categories = CategorySerializer(many=True, read_only=True)\n images = ProductImageSerializer(many=True, read_only=True)\n brand = serializers.SerializerMethodField()\n brand_option = serializers.SerializerMethodField()\n main_img = serializers.SerializerMethodField()\n price_mrc = serializers.SerializerMethodField()\n variants = serializers.SerializerMethodField()\n variant = serializers.SerializerMethodField()\n attributes = serializers.SerializerMethodField()\n attributes_order = serializers.SerializerMethodField()\n stockrecords = ProductStockSerializer(many=True, read_only=True)\n sizes_info = serializers.SerializerMethodField()\n all_sizes = serializers.SerializerMethodField()\n current_size = serializers.SerializerMethodField()\n price_with_discount = serializers.SerializerMethodField()\n discount_value = serializers.SerializerMethodField()\n\n class Meta:\n model = Product\n\n fields = (\n 'id',\n 'title',\n 'slug',\n 'product_1c_id',\n 'artikul',\n 'price_mrc',\n 'categories',\n 'images',\n 'main_img',\n 'brand',\n 'brand_option',\n 'rating',\n 'variants',\n 'variant',\n 'attributes',\n 'attributes_order',\n 'all_sizes',\n 'sizes_info',\n 'current_size',\n 'stockrecords',\n 'item_id',\n 'num_in_stock',\n 'num_allocated',\n 'price_with_discount',\n 'is_discountable_status',\n 'discount_value',\n )\n\n def get_main_img(self, obj):\n img = obj.primary_image()\n if isinstance(img, dict):\n return str(img['original'])\n return img.original.url\n\n def get_price_mrc(self, obj):\n price_mrc = int(obj.price_mrc)\n return price_mrc\n\n def get_brand_option(self, obj):\n product_brand = ProductAttributeValue.objects.filter(product=obj, attribute__code='brand').first()\n try:\n brand = product_brand.value_option.option\n except:\n brand = ''\n\n return brand\n\n def get_sizes_info(self, obj):\n data = {}\n if obj.size_item_id:\n size_items = Product.objects.values_list('size_item_id', flat=True).filter(item_id=obj.item_id).distinct()\n size_variants = Product.objects.filter(size_item_id__in=size_items)\n all_sizes = ProductAttributeValue.objects.filter(product_id__in=size_variants, attribute__code='size')\\\n .values_list('value_option__show_value',flat=True).order_by('value_option__minimum').distinct()\n data['all_sizes'] = all_sizes\n size_products = Product.objects.filter(size_item_id=obj.size_item_id)\n size_color_variants = dict()\n for p in ProductAttributeValue.objects.filter(product__in=size_products, attribute__code='size'):\n size_color_variants[p.value_option.show_value] = {\n 'id': p.product.id,\n 'value': p.value_option.show_value,\n 'stock': p.product.get_stock_status(),\n }\n\n data['size_color_variants'] = size_color_variants\n return data\n\n def get_all_sizes(self, obj):\n data = []\n if obj.size_item_id:\n size_items = Product.objects.values_list('size_item_id', flat=True).filter(item_id=obj.item_id).distinct()\n size_variants = Product.objects.filter(size_item_id__in=size_items)\n size_products = Product.objects.filter(size_item_id=obj.size_item_id)\n current_sizes = [p.value_option.show_value for p in\n ProductAttributeValue.objects.filter(product__in=size_products,\n attribute__code='size')]\n data = current_sizes\n return data\n\n def get_variants(self, obj):\n data = {}\n if (obj.item_id):\n variants = Product.objects.filter(Q(item_id=obj.item_id), Q(num_in_stock__gt=0),\n Q(num_in_stock__gt=F('num_allocated')))\n for variant in variants:\n img = variant.primary_image()\n if isinstance(img, dict):\n original_img = str(img['original'])\n else:\n original_img = img.original.url\n data[variant.pk] = [variant.pk, variant.title, original_img]\n return data\n\n def get_brand(self, obj):\n data = {}\n if obj.brand:\n data['name'] = obj.brand.name\n data['image'] = '/media/' + str(obj.brand.image) if obj.brand.image else None\n data['slug'] = obj.brand.slug\n else:\n product_brand = ProductAttributeValue.objects.filter(product=obj, attribute__code='brand').first()\n try:\n product_brand = product_brand.value_option.option\n except:\n data['name'] = None\n data['image'] = None\n data['slug'] = None\n else:\n brand = Brand.objects.filter(name=product_brand).first()\n if brand:\n data['name'] = brand.name\n data['image'] = '/media/' + str(brand.image) if brand.image else None\n data['slug'] = brand.slug\n else:\n data['name'] = None\n data['image'] = None\n data['slug'] = None\n return data\n\n def get_variant(self, obj):\n product_color = ProductAttributeValue.objects.filter(product=obj, attribute__code='color1').first()\n try:\n color = product_color.value_option.option\n except:\n color = ''\n\n return color\n\n def get_current_size(self, obj):\n return obj.get_size_value\n\n def get_attributes(self, obj):\n attributes_val = ProductAttributeValue.objects.filter(product=obj).order_by('attribute__display_order').exclude(attribute__type_group='transcharact')\n data = {}\n color, season = None, None\n for attribute_val in attributes_val:\n name = attribute_val.attribute.name\n att_type = attribute_val.attribute.type\n if str(att_type) == 'option':\n field = attribute_val.value_option.option\n elif str(att_type) == 'boolean':\n field = attribute_val.value_boolean\n elif str(att_type) == 'float':\n field = attribute_val.value_float\n elif str(att_type) == 'integer':\n field = attribute_val.value_integer\n elif str(att_type) == 'date':\n field = attribute_val.value_date\n else:\n field = attribute_val.value_text\n if attribute_val.attribute.code == 'age_from' or attribute_val.attribute.code == 'age_to':\n field = month_to_year(field)\n if attribute_val.attribute.code == 'color1' or attribute_val.attribute.code == 'color2' or attribute_val.attribute.code == 'color3':\n if color:\n color += ', '+field\n else:\n color = field\n elif attribute_val.attribute.code == 'season1' or attribute_val.attribute.code == 'season2' or attribute_val.attribute.code == 'season3':\n if season:\n season += ', '+field\n else:\n season = field\n else:\n data[name] = field\n if color:\n data['Цвет'] = color\n if season:\n data['Сезон'] = season\n return data\n\n def get_attributes_order(self, obj):\n attributes_val = ProductAttributeValue.objects.filter(product=obj).order_by('attribute__display_order').exclude(attribute__type_group='transcharact')\n data = []\n for attribute_val in attributes_val:\n if attribute_val.attribute.code == 'color1':\n name = 'Цвет'\n data.append(name)\n elif attribute_val.attribute.code == 'season1':\n name = 'Сезон'\n data.append(name)\n elif attribute_val.attribute.code == 'season2' or attribute_val.attribute.code == 'season3' or attribute_val.attribute.code == 'color2' or attribute_val.attribute.code == 'color3':\n continue\n else:\n name = attribute_val.attribute.name\n data.append(name)\n return data\n\n def get_price_with_discount(self, obj):\n return obj.get_discount_price()\n\n def get_is_discountable_status(self, obj):\n return obj.is_discountable_status()\n\n def get_discount_value(self, obj):\n discount = obj.get_discount()\n return discount.discount_value if discount else 0\n\n\ndef get_pvz_and_apt(basket_products, city_id):\n '''\n :param basket_products: товары для которые необходимо проверить по габаритам для ПВП и Постаматов (список)\n :param city_id: код города из системы DPD, для которого необходимо взять ПВП и Постаматы\n :return: Возвращает ПВП и Постаматы, в которые можно даставить данные товары\n\n Ф-ция берет ПВП и Постаматы определенного города, проверяет к каким товары похолдят по габаритам и возвращает\n список ПВП и Постаматов\n '''\n offices = Office.objects.filter(city_id=city_id)\n offices_dimensions_temp = offices_dimensions = OfficePickPoint.objects.filter(city_id=city_id)\n offices_bool = 1\n # Если нет ПВЗ и Постаматов то отдаем 0\n if not offices and not offices_dimensions:\n offices_bool = 0\n # Если есть ПВЗ и Постаматы - проверяем по габаритам товара только OfficePickPoint\n else:\n for product in basket_products:\n product_att = ProductAttributeValue.objects.filter(\n attribute__code__in=['ind_width', 'ind_height', 'ind_length', 'ind_weight', 'width', 'height',\n 'length', 'weight'], product=product)\n\n # Берем длину ширину высоту и вес, если возвращается False переходим на следующую итерацию\n product_length = get_attribute_value(product_att, 'ind_length', 'length')\n if not product_length:\n offices_dimensions_temp = []\n continue\n product_width = get_attribute_value(product_att, 'ind_width', 'width')\n if not product_width:\n offices_dimensions_temp = []\n continue\n product_height = get_attribute_value(product_att, 'ind_height', 'height')\n if not product_height:\n offices_dimensions_temp = []\n continue\n product_weight = get_attribute_value(product_att, 'ind_weight', 'weight')\n if not product_weight:\n offices_dimensions_temp = []\n continue\n product_dimen = product_length + product_width + product_height\n\n for office in offices_dimensions_temp:\n if (office.max_height and office.max_length and office.max_width) and (office.max_weight or office.max_width or office.dimension_sum):\n max_demensions = [float(office.max_length), float(office.max_height), float(office.max_width)]\n product_dimensions = [float(product_length), float(product_height), float(product_width)]\n max_demensions_sorted = sorted(max_demensions, reverse=True, key=float)\n product_dimensions_sorted = sorted(product_dimensions, reverse=True, key=float)\n if max_demensions_sorted and product_dimensions_sorted:\n x1, y1, z1 = max_demensions_sorted[0], max_demensions_sorted[1], max_demensions_sorted[2]\n x2, y2, z2 = product_dimensions_sorted[0], product_dimensions_sorted[1], product_dimensions_sorted[2]\n if x2 > x1:\n offices_dimensions_temp = offices_dimensions_temp.exclude(\n unified_number=office.unified_number)\n continue\n if y2 > y1:\n offices_dimensions_temp = offices_dimensions_temp.exclude(\n unified_number=office.unified_number)\n continue\n if z2 > z1:\n offices_dimensions_temp = offices_dimensions_temp.exclude(\n unified_number=office.unified_number)\n continue\n if office.dimension_sum:\n if float(product_dimen) > float(office.dimension_sum):\n offices_dimensions_temp = offices_dimensions_temp.exclude(\n unified_number=office.unified_number)\n continue\n if office.max_weight:\n if float(product_weight) > float(office.max_weight):\n offices_dimensions_temp = offices_dimensions_temp.exclude(\n unified_number=office.unified_number)\n continue\n else:\n offices_dimensions_temp = offices_dimensions_temp.exclude(\n unified_number=office.unified_number)\n\n offices_check = 1\n if not offices and not offices_dimensions_temp:\n offices_check = 0\n\n offices_dimensions = offices_dimensions_temp\n offices_spsr_pvz = offices or None\n offices_pick_pvz = offices_dimensions.filter(type_title='ПВП') or None\n if offices_spsr_pvz and offices_pick_pvz:\n offices_uni = set(office.unified_number for office in offices_spsr_pvz)\n offices_dim_uni = set(office.unified_number for office in offices_pick_pvz)\n offices_dublicate = offices_uni & offices_dim_uni\n offices_spsr_pvz = offices_spsr_pvz.exclude(unified_number__in=offices_dublicate)\n\n offices_pick_post = offices_dimensions.filter(type_title='П') or None\n context = dict()\n context['offices_pvz'] = offices_spsr_pvz\n context['offices_pick_pvz'] = offices_pick_pvz\n context['offices_pick_apt'] = offices_pick_post\n context['offices_bool'] = offices_bool\n context['offices_check'] = offices_check\n return context\n\n\ndef get_subway(results_pvz_apt):\n subways = set()\n if 'offices_pvz' in results_pvz_apt and results_pvz_apt.get('offices_pvz'):\n for pvz in results_pvz_apt.get('offices_pvz'):\n if pvz.subway:\n subways.add(pvz.subway)\n if 'offices_pick_pvz' in results_pvz_apt and results_pvz_apt.get('offices_pick_pvz'):\n for pvz in results_pvz_apt.get('offices_pick_pvz'):\n if pvz.subway:\n subways.add(pvz.subway)\n if 'offices_pick_apt' in results_pvz_apt and results_pvz_apt.get('offices_pick_apt'):\n for pvz in results_pvz_apt.get('offices_pick_apt'):\n if pvz.subway:\n subways.add(pvz.subway)\n return sorted(list(subways))\n\n\ndef get_attribute_value(product_atts, transp_att_name, main_att_name):\n '''\n :param product_atts: Queryset содержащий все значения аттрибуто взяты у товара (длина, ширина, высота и тд)\n :param transp_att_name: Код аттрибута, который нужно взять (транспортный аттр.)\n :param main_att_name: Код аттрибута, который нужно взять (основной аттр.)\n :return: product_dimension: Значение запрашиваемой величины (аттрибута)\n\n Ф-ция фильтрует изначально по транспортному значению, если ничего не возвращается, то фильтрует по основному,\n если снова ничего не возвращается, то вовращается False - товар не войдет ни в один ПВП.\n '''\n\n product_dimension = product_atts.filter(attribute__code=transp_att_name).first()\n if not product_dimension:\n product_dimension = product_atts.filter(attribute__code=main_att_name).first()\n if product_dimension:\n product_dimension = product_dimension.value_float\n else:\n product_dimension = False\n else:\n product_dimension = product_dimension.value_float\n\n return product_dimension\n","sub_path":"catalogue/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":30667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"283666563","text":"\"\"\"\nrequired packages:\npip install requests==2.22.0 beautifulsoup4==4.8.1\n\"\"\"\nimport requests\nimport argparse\n\nfrom bs4 import BeautifulSoup\n\nURL = 'https://sinoptik.ua/'\n\n\ndef parse_html():\n html_text = requests.get(URL).text\n parsed_html = BeautifulSoup(html_text, 'html.parser')\n\n main_elem = parsed_html.find('div', attrs={'class': 'wDescription clearfix'})\n descr_elem = main_elem.find('div', attrs={'class': 'description'})\n return descr_elem.text\n\n\nif __name__ == '__main__':\n w_today = parse_html()\n print(w_today)\n","sub_path":"python/examples/sinoptik_ua_parser.py","file_name":"sinoptik_ua_parser.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"553666539","text":"from __future__ import unicode_literals\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\n\nclass Profile(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n name = models.CharField(max_length=256, blank=True)\n\n def __str__(self):\n return self.name\n\n\n@receiver(post_save, sender=User)\ndef create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(\n user=instance,\n name=\"{} {}\".format(\n instance.first_name,\n instance.last_name\n ).strip()\n )\n\n\n@receiver(post_save, sender=User)\ndef save_user_profile(sender, instance, **kwargs):\n if not instance.profile.name:\n if instance.first_name or instance.last_name:\n instance.profile.name = \"{} {}\".format(\n instance.first_name,\n instance.last_name\n ).strip()\n instance.profile.save()\n","sub_path":"accounts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"343886129","text":"#!/usr/bin/env python3\n\"\"\"Python web proxy with content filter and caching.\n\"\"\"\nimport socketserver\nimport argparse\nimport sys\nimport logging\nfrom logging import config\nimport handler\nimport utils\n\n\nconfig.fileConfig('logs/logging.cfg')\nLOGGER = logging.getLogger('proxy')\nSH = logging.StreamHandler()\nLOGGER.addHandler(SH)\n\nHOST, PORT = \"localhost\", 8080\nMAX_CONN = 5\nBUFFER_SIZE = 1024\n\nif __name__ == \"__main__\":\n PARSER = argparse.ArgumentParser()\n PARSER.add_argument('--black', dest='black', help='blacklist file'\n ' containing blocked hosts.', default='blacklist.txt')\n PARSER.add_argument('--white', dest='white', help='whitelist file'\n ' containing allowed hosts.', default='whitelist.txt')\n PARSER.add_argument('--deny', dest='deny', help='list file containing '\n 'denied terms.', default='deny-terms.txt')\n ARGS = PARSER.parse_args()\n BLACKLIST = [line.rstrip('\\n') for line in open(ARGS.black).readlines()]\n WHITELIST = [line.rstrip('\\n') for line in open(ARGS.white).readlines()]\n DENY = [line.rstrip('\\n') for line in open(ARGS.deny).readlines()]\n LOGGER.debug(\"Blacklisted domains: %s\", str(BLACKLIST))\n LOGGER.info(\"Initializing server...\")\n handlerHTTP = handler.ProxyHandler\n # Create the server, binding to localhost on port 8080\n server = socketserver.ThreadingTCPServer((HOST, PORT), handlerHTTP)\n # handler.ThreadedTCPRequestHandler)\n try:\n LOGGER.info(\"Sockets binded successfully.\")\n LOGGER.info(\"Server started @ %s:%s\", HOST, PORT)\n # Activate the server; this will keep running until you\n # interrupt the program with Ctrl-C\n server.serve_forever()\n except KeyboardInterrupt:\n LOGGER.info(\"Finalizing connection...\")\n finally:\n server.server_close()\n LOGGER.info(\"Quiting.\")\n sys.exit(0)\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"46597684","text":"#coding=utf-8\nimport sys\n\nimport csv\nimport json\nimport os\nimport sys\n\n# Allow imports from above\nsys.path.append(\n os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))\n\nfrom newamerica_api_client import NAClient\n\n\ndef convert_to_csv(content):\n yield [\n content.get('id'),\n content.get('title'),\n content.get('slug'),\n content.get('url'),\n content.get('publish_at'),\n content.get('modified'), \n content.get('authors'),\n content.get('programs'),\n content.get('type'),\n content.get('deleted'),\n ]\n\n\ndef event_to_csv(content):\n yield [\n content.get('id'),\n content.get('title'),\n content.get('location'),\n ]\n\n\ndef articles_to_csv(content):\n yield [\n content.get('id'),\n content.get('title'),\n content.get('slug'),\n content.get('published'),\n content.get('authors'),\n content.get('programs'),\n content.get('tags')\n ]\n\n\ndef in_the_news_to_csv(content):\n yield [\n content.get('id'),\n content.get('publish_at'),\n content.get('title'),\n content.get('slug'),\n content.get('published'),\n content.get('authors'),\n content.get('programs'),\n #content.get('content')\n ]\n\n\ndef user_to_csv(content):\n yield [\n content.get('id'),\n content.get('full_name'),\n ]\n\ndef program_posts():\n with open('weekly_content_5_4_16.csv', 'w') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['id', 'title', 'slug', 'url', 'publish_at', 'modified','authors', 'programs', 'type', 'deleted'])\n idx = 0\n for content in NAClient().program_content(12):\n writer.writerows(convert_to_csv(content))\n print(idx)\n idx += 1\n\n\ndef event_addresses():\n with open('event_addresses-5-4-16.csv', 'w') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['id', 'title', 'location'])\n idx = 0\n for content in NAClient().get_events():\n writer.writerows(event_to_csv(content))\n print(idx)\n idx += 1\n\n\ndef get_users():\n with open('users-5-4-16.csv', 'w') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['id', 'full_name'])\n idx = 0\n for content in NAClient().get_users():\n writer.writerows(user_to_csv(content))\n print(idx)\n idx += 1\n\n\ndef articles():\n with open('all_articles.csv', 'w') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([\n 'id', \n 'title', \n 'slug', \n 'published', \n 'authors',\n 'programs',\n 'tags'\n ])\n idx = 0\n for content in NAClient().get_articles():\n writer.writerows(articles_to_csv(content))\n print(idx)\n idx += 1\n\n\ndef get_in_the_news():\n with open('all_in_the_news.csv', 'w') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([\n 'id', \n 'publish_at',\n 'title', \n 'slug', \n 'published', \n 'authors',\n 'programs',\n ])\n idx = 0\n for content in NAClient().get_in_the_news():\n writer.writerows(in_the_news_to_csv(content))\n print(idx)\n idx += 1","sub_path":"home/management/api/csv_scripts/program_content.py","file_name":"program_content.py","file_ext":"py","file_size_in_byte":3541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"429883974","text":"from hdmf.utils import docval, getargs\nfrom hdmf.build import ObjectMapper, RegionBuilder, BuildManager\nfrom hdmf.spec import Spec\nfrom hdmf.container import Container\nfrom .. import register_map\n\nfrom pynwb.file import NWBFile\nfrom pynwb.core import NWBData, DynamicTable, NWBContainer, VectorIndex\n\n\nclass NWBBaseTypeMapper(ObjectMapper):\n\n @staticmethod\n def get_nwb_file(container):\n curr = container\n while curr is not None:\n if isinstance(curr, NWBFile):\n return curr\n curr = container.parent\n\n\n@register_map(NWBContainer)\nclass NWBContainerMapper(NWBBaseTypeMapper):\n\n pass\n\n\n@register_map(DynamicTable)\nclass DynamicTableMap(NWBContainerMapper):\n\n def __init__(self, spec):\n super(DynamicTableMap, self).__init__(spec)\n vector_data_spec = spec.get_neurodata_type('VectorData')\n vector_index_spec = spec.get_neurodata_type('VectorIndex')\n self.map_spec('columns', vector_data_spec)\n self.map_spec('columns', vector_index_spec)\n\n @ObjectMapper.object_attr('colnames')\n def attr_columns(self, container, manager):\n if all(len(col) == 0 for col in container.columns):\n return tuple()\n return container.colnames\n\n @docval({\"name\": \"spec\", \"type\": Spec, \"doc\": \"the spec to get the attribute value for\"},\n {\"name\": \"container\", \"type\": Container, \"doc\": \"the container to get the attribute value from\"},\n {\"name\": \"manager\", \"type\": BuildManager, \"doc\": \"the BuildManager used for managing this build\"},\n returns='the value of the attribute')\n def get_attr_value(self, **kwargs):\n ''' Get the value of the attribute corresponding to this spec from the given container '''\n spec, container, manager = getargs('spec', 'container', 'manager', kwargs)\n attr_value = super(DynamicTableMap, self).get_attr_value(spec, container, manager)\n if attr_value is None and spec.name in container:\n if spec.neurodata_type_inc == 'VectorData':\n attr_value = container[spec.name]\n if isinstance(attr_value, VectorIndex):\n attr_value = attr_value.target\n elif spec.neurodata_type_inc == 'DynamicTableRegion':\n attr_value = container[spec.name]\n if attr_value.table is None:\n msg = \"empty or missing table for DynamicTableRegion '%s' in DynamicTable '%s'\" %\\\n (attr_value.name, container.name)\n raise ValueError(msg)\n elif spec.neurodata_type_inc == 'VectorIndex':\n attr_value = container[spec.name]\n return attr_value\n\n\n@register_map(NWBData)\nclass NWBDataMap(NWBBaseTypeMapper):\n\n @ObjectMapper.constructor_arg('name')\n def carg_name(self, builder, manager):\n return builder.name\n\n @ObjectMapper.constructor_arg('data')\n def carg_data(self, builder, manager):\n return builder.data\n\n\nclass NWBTableRegionMap(NWBDataMap):\n\n @ObjectMapper.constructor_arg('table')\n def carg_table(self, builder, manager):\n return manager.construct(builder.data.builder)\n\n @ObjectMapper.constructor_arg('region')\n def carg_region(self, builder, manager):\n if not isinstance(builder.data, RegionBuilder):\n raise ValueError(\"'builder' must be a RegionBuilder\")\n return builder.data.region\n","sub_path":"src/pynwb/io/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"247154050","text":"import os\r\nimport glob \r\n\r\nos.chdir(r'E:\\~entity\\Quicksilver Designs\\~Projects\\DVDCatalog\\src\\python\\page_generator\\tests\\dvd_covers')\r\n\r\nfilenames = [file for file in glob.glob('*.jpg')]\r\n\r\nfor filename in filenames:\r\n\tfilename = os.getcwd()+ '\\\\' + filename\r\n\timage = pdb.gimp_file_load(filename, filename)\r\n\tdrawable = pdb.gimp_image_get_active_layer(image) \r\n\t\r\n\tpdb.plug_in_autocrop(image, drawable)\r\n\tpdb.gimp_file_save(image, drawable, filename, filename)\r\n\tpdb.gimp_image_delete(image)\r\n \r\n ","sub_path":"batch_autocrop.py","file_name":"batch_autocrop.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"206715419","text":"# -*- coding: utf-8 -*-\n\n# =============================================================================\n# =======概述\n# 简述:数据分析类\n#\n# =======使用说明\n# \n#\n# =======日志\n# \n# =============================================================================\n\n# =============================================================================\n# Qt imports\n# =============================================================================\nfrom PyQt5.QtCore import (QSize, QCoreApplication, Qt, QObject)\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import (QWidget, QVBoxLayout, QHBoxLayout, QComboBox,\n QTabWidget, QPushButton, QGroupBox,\n QPlainTextEdit, QMessageBox, QTreeWidget, \n QTreeWidgetItem, QDialog, QDialogButtonBox,\n QMenu, QAction, QLineEdit, QHeaderView,\n QSpacerItem, QSizePolicy)\n\n# =============================================================================\n# Package models imports\n# =============================================================================\nfrom views.custom_dialog import SelParasDialog\nfrom models.analysis_model import DataAnalysis\nimport views.constant as CONSTANT\n\nclass SiftResultViewWidget(QWidget):\n\n def __init__(self, parent = None, expr = ''):\n \n super().__init__(parent)\n \n self.expr = expr\n self.setup()\n \n def setup(self):\n\n self.verticalLayout_8 = QVBoxLayout(self)\n self.verticalLayout_8.setContentsMargins(2, 2, 2, 2)\n self.verticalLayout_8.setSpacing(2)\n self.group_box_view_expression = QGroupBox(self)\n self.verticalLayout_6 = QVBoxLayout(self.group_box_view_expression)\n self.verticalLayout_6.setContentsMargins(2, 2, 2, 2)\n self.verticalLayout_6.setSpacing(2)\n self.plain_text_edit_view_expression = QPlainTextEdit(self.group_box_view_expression)\n self.plain_text_edit_view_expression.setEnabled(False)\n self.plain_text_edit_view_expression.setPlainText(self.expr)\n self.verticalLayout_6.addWidget(self.plain_text_edit_view_expression)\n self.verticalLayout_8.addWidget(self.group_box_view_expression)\n self.group_box_sift_result = QGroupBox(self)\n self.verticalLayout_7 = QVBoxLayout(self.group_box_sift_result)\n self.verticalLayout_7.setContentsMargins(2, 2, 2, 2)\n self.verticalLayout_7.setSpacing(2)\n self.tree_widget_sift_result = QTreeWidget(self.group_box_sift_result)\n# 设置树组件头部显示方式\n headerview = self.tree_widget_sift_result.header()\n headerview.setSectionResizeMode(QHeaderView.ResizeToContents)\n headerview.setMinimumSectionSize(100)\n self.tree_widget_sift_result.setHeader(headerview)\n \n self.verticalLayout_7.addWidget(self.tree_widget_sift_result)\n self.verticalLayout_8.addWidget(self.group_box_sift_result)\n self.verticalLayout_8.setStretch(0, 2)\n self.verticalLayout_8.setStretch(1, 5)\n \n self.retranslateUi()\n \n def retranslateUi(self):\n _translate = QCoreApplication.translate\n self.group_box_view_expression.setTitle(_translate('DataAnalysisWindow', '条件表达式'))\n self.group_box_sift_result.setTitle(_translate('DataAnalysisWindow', '结果'))\n self.tree_widget_sift_result.headerItem().setText(0, _translate('DataAnalysisWindow', '文件对象'))\n self.tree_widget_sift_result.headerItem().setText(1, _translate('DataAnalysisWindow', '状态'))\n self.tree_widget_sift_result.headerItem().setText(2, _translate('DataAnalysisWindow', '捕捉点'))\n self.tree_widget_sift_result.headerItem().setText(3, _translate('DataAnalysisWindow', '持续时间'))\n\nclass DataSiftWindow(QWidget):\n \n# =============================================================================\n# 初始化 \n# ============================================================================= \n def __init__(self, parent = None):\n \n super().__init__(parent)\n\n# 计算产生的数据\n self.dict_data = {}\n# 不允许改动这个变量,因为该变量连接着主窗口的变量\n self._current_files = []\n \n self.sift_search_paras = []\n \n self.tab_result_count = 0\n \n self.file_icon = QIcon(CONSTANT.ICON_FILE)\n self.time_icon = QIcon(CONSTANT.ICON_TIME)\n\n# =============================================================================\n# UI模块\n# ============================================================================= \n def setup(self):\n \n self.verticalLayout = QVBoxLayout(self)\n self.verticalLayout.setContentsMargins(2, 0, 2, 0)\n self.verticalLayout.setSpacing(2)\n \n self.tab_widget_datasift = QTabWidget(self)\n self.tab_widget_datasift.setTabsClosable(True)\n self.tab_sift = QWidget()\n self.verticalLayout_4 = QVBoxLayout(self.tab_sift)\n self.verticalLayout_4.setContentsMargins(2, 2, 2, 2)\n self.verticalLayout_4.setSpacing(2)\n self.group_box_expression = QGroupBox(self.tab_sift)\n self.verticalLayout_2 = QVBoxLayout(self.group_box_expression)\n self.verticalLayout_2.setContentsMargins(2, 2, 2, 2)\n self.verticalLayout_2.setSpacing(2)\n self.plain_text_edit_expression = QPlainTextEdit(self.group_box_expression)\n \n self.plain_text_edit_expression.setContextMenuPolicy(Qt.CustomContextMenu)\n# 添加右键动作\n self.action_add_para = QAction(self.plain_text_edit_expression)\n self.action_add_para.setText(QCoreApplication.\n translate('DataAnalysisWindow', '添加参数'))\n \n self.verticalLayout_2.addWidget(self.plain_text_edit_expression)\n self.verticalLayout_4.addWidget(self.group_box_expression)\n# self.group_box_aggregates = QGroupBox(self.tab_sift)\n# self.verticalLayout_3 = QVBoxLayout(self.group_box_aggregates)\n# self.verticalLayout_3.setContentsMargins(2, 2, 2, 2)\n# self.verticalLayout_3.setSpacing(2)\n# self.push_btn_add_aggregate = QPushButton(self.group_box_aggregates)\n# self.push_btn_add_aggregate.setMinimumSize(QSize(0, 24))\n# self.push_btn_add_aggregate.setMaximumSize(QSize(16777215, 24))\n# self.verticalLayout_3.addWidget(self.push_btn_add_aggregate)\n# self.tree_widget_aggragate_para = QTreeWidget(self.group_box_aggregates)\n# \n## 让顶级项没有扩展符空白\n# self.tree_widget_aggragate_para.setRootIsDecorated(False)\n# \n# self.verticalLayout_3.addWidget(self.tree_widget_aggragate_para)\n# self.verticalLayout_4.addWidget(self.group_box_aggregates)\n# self.button_box_sift = QDialogButtonBox(self.tab_sift)\n# self.button_box_sift.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)\n# self.verticalLayout_4.addWidget(self.button_box_sift)\n self.hlayout_btn_sc = QHBoxLayout()\n self.hlayout_btn_sc.setSpacing(4)\n spacerItem1 = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)\n self.hlayout_btn_sc.addItem(spacerItem1)\n self.btn_confirm = QPushButton(self.group_box_expression)\n self.btn_confirm.setMinimumSize(QSize(0, 24))\n self.btn_confirm.setMaximumSize(QSize(16777215, 24))\n self.hlayout_btn_sc.addWidget(self.btn_confirm)\n self.btn_cancel = QPushButton(self.group_box_expression)\n self.btn_cancel.setMinimumSize(QSize(0, 24))\n self.btn_cancel.setMaximumSize(QSize(16777215, 24))\n self.hlayout_btn_sc.addWidget(self.btn_cancel)\n self.verticalLayout_4.addLayout(self.hlayout_btn_sc)\n \n self.verticalLayout_4.setStretch(0, 2)\n self.verticalLayout_4.setStretch(1, 4)\n self.tab_widget_datasift.addTab(self.tab_sift, '')\n \n self.verticalLayout.addWidget(self.tab_widget_datasift)\n\n self.retranslateUi()\n self.tab_widget_datasift.setCurrentIndex(0)\n \n# =======连接信号与槽\n# ============================================================================= \n# self.button_box_sift.accepted.connect(self.slot_sift_ok)\n# self.button_box_sift.rejected.connect(self.slot_sift_cancel)\n self.btn_confirm.clicked.connect(self.slot_sift_ok)\n self.btn_cancel.clicked.connect(self.slot_sift_cancel)\n \n self.plain_text_edit_expression.customContextMenuRequested.connect(\n self.expression_context_menu)\n self.action_add_para.triggered.connect(self.slot_add_para)\n \n self.tab_widget_datasift.tabCloseRequested.connect(self.slot_close_tab)\n# self.push_btn_add_aggregate.clicked.connect(self.slot_add_aggregate)\n \n# =============================================================================\n# slots模块\n# =============================================================================\n def slot_sift_ok(self):\n \n list_files = self._current_files\n str_condition = self.plain_text_edit_expression.toPlainText()\n if list_files and str_condition:\n sift_object = DataAnalysis()\n# result_tuple = sift_object.condition_sift_class(list_files,\n# str_condition,\n# self.sift_search_paras)\n \n result_dict = sift_object.condition_sift_wxl(list_files,\n str_condition,\n self.sift_search_paras)\n \n if result_dict:\n \n\n# 创建一个结果显示窗口\n self.tab_result_count += 1\n tab_sift_result = SiftResultViewWidget(self.tab_widget_datasift, str_condition)\n# for file in list_files:\n for key_file in result_dict:\n sift_results=result_dict[key_file]#a list\n item = None\n first_hit = True\n for result in sift_results:\n if first_hit:\n item = QTreeWidgetItem(tab_sift_result.tree_widget_sift_result)\n tab_sift_result.tree_widget_sift_result.addTopLevelItem(item)\n filedir = key_file\n pos = filedir.rindex('\\\\')\n filename = filedir[pos+1:]\n item.setText(0, filename)\n item.setIcon(0, self.file_icon)\n item.setText(1, 'Hit')\n child = QTreeWidgetItem(item)\n child.setIcon(0, self.time_icon)\n child.setText(2, result[0] + ' - ' + result[1])\n child.setText(3, result[2])\n first_hit = False\n else:\n child = QTreeWidgetItem(item)\n child.setIcon(0, self.time_icon)\n child.setText(2, result[0] + ' - ' + result[1])\n child.setText(3, result[2])\n if first_hit:\n item = QTreeWidgetItem(tab_sift_result.tree_widget_sift_result)\n tab_sift_result.tree_widget_sift_result.addTopLevelItem(item)\n pos = key_file.rindex('\\\\')\n filename = key_file[pos+1:]\n item.setText(0, filename)\n item.setText(1, 'No Fit')\n self.tab_widget_datasift.addTab(\n tab_sift_result, \n QCoreApplication.translate('DataAnalysisWindow',\n '筛选结果' + str(self.tab_result_count)))\n self.tab_widget_datasift.setCurrentIndex(\n self.tab_widget_datasift.indexOf(tab_sift_result))\n else:\n QMessageBox.information(self,\n QCoreApplication.translate(\"DataAnalysisWindow\", \"提示\"),\n QCoreApplication.translate(\"DataAnalysisWindow\", '语法错误'))\n else:\n QMessageBox.information(self,\n QCoreApplication.translate('DataAnalysisWindow', '提示'),\n QCoreApplication.translate('DataAnalysisWindow','没有足够的输入'))\n \n \n def slot_sift_cancel(self):\n \n self.plain_text_edit_expression.clear()\n# self.tree_widget_aggragate_para.clear()\n self.sift_search_paras = []\n \n def expression_context_menu(self, pos):\n\n menu = QMenu(self.plain_text_edit_expression)\n menu.addActions([self.action_add_para])\n menu.exec_(self.plain_text_edit_expression.mapToGlobal(pos))\n \n# def slot_add_aggregate(self):\n# \n## 采用单选模式\n# dialog = SelParasDialog(self, self._current_files, 0)\n# return_signal = dialog.exec_()\n# paras = []\n# if (return_signal == QDialog.Accepted):\n# paras = dialog.get_list_sel_paras()\n# if paras:\n# widget_aggregate = QWidget(self.tree_widget_aggragate_para)\n# vlayout = QVBoxLayout()\n# vlayout.setContentsMargins(2, 2, 2, 2)\n# combo_box = QComboBox(widget_aggregate)\n# combo_box.addItem(QCoreApplication.translate('DataAnalysisWindow', '整段数据'))\n# combo_box.addItem(QCoreApplication.translate('DataAnalysisWindow', '最大值'))\n# combo_box.addItem(QCoreApplication.translate('DataAnalysisWindow', '最小值'))\n# combo_box.addItem(QCoreApplication.translate('DataAnalysisWindow', '平均值'))\n# vlayout.addWidget(combo_box)\n# widget_aggregate.setLayout(vlayout)\n# \n# widget_para = QWidget(self.tree_widget_aggragate_para)\n# hlayout = QHBoxLayout()\n# hlayout.setContentsMargins(2, 2, 2, 2)\n# hlayout.setSpacing(2)\n# line_edit = QLineEdit(widget_para)\n# line_edit.setReadOnly(True)\n# line_edit.setText(paras[0])\n# hlayout.addWidget(line_edit)\n# button = QPushButton(widget_para)\n# button.setText(QCoreApplication.translate('DataAnalysisWindow', '删除'))\n# hlayout.addWidget(button)\n# widget_para.setLayout(hlayout)\n# item = QTreeWidgetItem(self.tree_widget_aggragate_para)\n# self.tree_widget_aggragate_para.addTopLevelItem(item)\n# self.tree_widget_aggragate_para.setItemWidget(item, 0, widget_aggregate)\n# self.tree_widget_aggragate_para.setItemWidget(item, 1, widget_para)\n# button.clicked.connect(self.slot_delete_aggregate)\n \n# def slot_delete_aggregate(self):\n# \n# sender = QObject.sender(self)\n# item = self.tree_widget_aggragate_para.itemAt(sender.pos())\n# self.tree_widget_aggragate_para.takeTopLevelItem(\n# self.tree_widget_aggragate_para.indexOfTopLevelItem(item))\n \n def slot_update_current_files(self, files : list):\n \n self._current_files = files\n \n def slot_add_para(self):\n \n# 采用单选模式\n dialog = SelParasDialog(self, self._current_files, 0)\n return_signal = dialog.exec_()\n if (return_signal == QDialog.Accepted):\n paras = dialog.get_list_sel_paras()\n if paras:\n self.plain_text_edit_expression.insertPlainText(paras[0])\n self.sift_search_paras.append(paras[0])\n \n def slot_close_tab(self, index : int):\n \n# 不允许关闭第一个tab\n if index > 0:\n message = QMessageBox.warning(self,\n QCoreApplication.translate('DataAnalysisWindow', '关闭'),\n QCoreApplication.translate('DataAnalysisWindow',\n '''

    确定要关闭吗?'''),\n QMessageBox.Yes | QMessageBox.No)\n if (message == QMessageBox.Yes):\n self.tab_widget_datasift.removeTab(index)\n\n# =============================================================================\n# 功能函数模块 \n# =============================================================================\n def retranslateUi(self):\n _translate = QCoreApplication.translate\n self.group_box_expression.setTitle(_translate('DataAnalysisWindow', '条件表达式'))\n# self.group_box_aggregates.setTitle(_translate('DataAnalysisWindow', '筛选目标'))\n# self.push_btn_add_aggregate.setText(_translate('DataAnalysisWindow', '添加新目标'))\n# self.tree_widget_aggragate_para.headerItem().setText(0, _translate('DataAnalysisWindow', '条件'))\n# self.tree_widget_aggragate_para.headerItem().setText(1, _translate('DataAnalysisWindow', '参数'))\n self.tab_widget_datasift.setTabText(self.tab_widget_datasift.indexOf(self.tab_sift), _translate('DataAnalysisWindow', '数据筛选'))\n self.btn_confirm.setText(_translate(\"DataManageWindow\", \"确定\"))\n self.btn_cancel.setText(_translate(\"DataManageWindow\", \"取消\"))","sub_path":"lib/views/data_sift_window.py","file_name":"data_sift_window.py","file_ext":"py","file_size_in_byte":17695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"530213581","text":"#-*- coding: utf-8 -*-\n##x = redis.keys(\"idx3\"),y = redis.hmget(x, idx3)\n#하려고 했던 방법은 단어 첫 글자를 key로 두고 값은 그 글자로 시작하는 단어들의 [list]\n\nimport random\n\ndef load_wordfile(filename):\n data = dict()\n with open(filename) as f:\n for word in f:\n i = word[:3] #단어의 첫 글자\n if i in data:\n data[i].append(word.strip())\n else:\n data[i] = [word.strip()]\n f.close()\n\n return data\n\ndef find_next_word_for(data):\n print(\"한글 끝말잇기를 시작해볼까요?\")\n print(\"----------------------------------------------\")\n\n prev_word = None\n\n while True:\n user_word = raw_input(\"단어입력: \")\n\n if user_word == \"--\":\n if prev_word == None:\n print(\"왜~ 놀다 가지..\")\n else:\n print(\"유 루저 ㅋㅋ\")\n return\n\n end = user_word[-3:]\n\n answer = None\n\n print(\" prev_word=%s\" % prev_word)\n\n if prev_word == None:\n answer = data[end][0] if end in data else None\n\n if answer == None:\n print(\"당신 이겼어요! 컴퓨터를 이기다니 대단한데요?!\")\n return\n\n print(answer)\n else:\n print(\" prev_word=<%s>, prev_word[-3:]=<%s>, user_word=<%s>, user_word[:3]=<%s>\" % (prev_word, prev_word[-3:], user_word, user_word[:3]))\n\n if prev_word[-3:] == user_word[:3]:\n answer = data[end][0] if end in data else None\n\n if answer == None:\n print(\"당신 이겼어요! 컴퓨터를 이기다니 대단한데요?!\")\n return\n\n print(answer)\n else:\n print(\"틀렸어 다시 말해주세요.\")\n\n if answer != None:\n prev_word = answer\n\ndatafilename = \"wordfile.txt\"\ndata = load_wordfile(datafilename)\nfind_next_word_for(data)","sub_path":"ONG_WORDGAME/[cookie]_cookie_wordgame3.py","file_name":"[cookie]_cookie_wordgame3.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"79299329","text":"\r\n__author__ = 'Nishant'\r\nfrom lru import LRU\r\nimport numpy as np\r\nclass topic4:\r\n def __init__(self, c_hash, c_user, c_words):\r\n self.topic_count =1\r\n self.l1 = LRU(c_hash)\r\n self.l2 = LRU(c_user)\r\n self.l3 = LRU(c_words)\r\n\r\n def set_hashLRU(self,l):\r\n self.set(self.l1, l)\r\n\r\n def set_userLRU(self,l):\r\n self.set(self.l2, l)\r\n\r\n def set_wordLRU(self,l):\r\n self.set(self.l3, l)\r\n\r\n def set(self, lru, l):\r\n for k in l:\r\n v = lru.get(k,0)\r\n lru[k]=v+1\r\n\r\n def set_cluster(self, hashtags, users, words):\r\n for k in hashtags:\r\n self.l1[k]=self.l1.get(k,0)+1\r\n for k in users:\r\n self.l2[k]=self.l2.get(k,0)+1\r\n for k in words:\r\n self.l3[k]=self.l3.get(k,0)+1\r\n self.topic_count+=1\r\n\r\n def get_similarity(self,hashtags,users,words):\r\n h_sum = 1\r\n u_sum = 1\r\n w_sum = 1\r\n h_match =0\r\n h_ind =0\r\n u_ind =0\r\n w_ind =0\r\n c=0\r\n h1 = self.l1.get_size()\r\n u1 = self.l2.get_size()\r\n w1 = self.l3.get_size()\r\n for h in hashtags:\r\n # l1_items=zip(*self.l1.items())\r\n h_sum+= self.l1.get(h,0)\r\n if(self.l1.has_key(h)):\r\n ind = self.l1.keys().index(h)\r\n h_ind+= h1 - ind\r\n h_match+= 1 if ind<250 else 0\r\n for u in users:\r\n u_sum+= self.l2.get(u,0)\r\n if(self.l2.has_key(u)):\r\n u_ind+= u1 - self.l2.keys().index(u)\r\n for w in words:\r\n w_sum+= self.l3.get(w,0)\r\n if(self.l3.has_key(w)):\r\n w_ind+= w1 - self.l3.keys().index(w)\r\n if(h_match !=0):\r\n c = h_match -1\r\n # print(h_ind,h1,u_ind,u1,w_ind,w1, h_sum,w_sum,)\r\n similarity = (h_ind/(h1+1))*(h_sum/sum(self.l1.values() +[1])) + (u_ind/(u1+1))*(u_sum/sum(self.l2.values()+[1])) + (w_ind/(w1+1))*(w_sum/sum(self.l3.values()+[1])) +c\r\n return similarity\r\n","sub_path":"clusters6.py","file_name":"clusters6.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"265010280","text":"#!/usr/bin/env python\n#\n# Copyright 2020 Free Software Foundation, Inc.\n#\n# This file is part of GNU Radio\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n#\n#\n\nfrom gnuradio import gr\nimport pmt\n\nclass filtered_msg_pair_to_var(gr.sync_block):\n \"\"\"\n This block will take an input message pair and allow you to set a flowgraph variable\n via setter callback. The car of the message pair is compared to a filter string, and\n the variable is only set if the string matches.\n If the second element the pair is a compound PMT object or not of the datatype \n expected by the flowgraph the behavior of the flowgraph may be unpredictable.\n \"\"\"\n def __init__(self, callback, filterstring):\n gr.sync_block.__init__(self, name=\"filtered_msg_pair_to_var\", in_sig=None, out_sig=None)\n\n self.callback = callback\n self.filter = filterstring\n\n self.message_port_register_in(pmt.intern(\"inpair\"))\n self.set_msg_handler(pmt.intern(\"inpair\"), self.msg_handler)\n\n def msg_handler(self, msg):\n if not pmt.is_pair(msg) or pmt.is_dict(msg) or pmt.is_pdu(msg):\n gr.log.warn(\"Input message %s is not a simple pair, dropping\" % repr(msg))\n return\n \n if pmt.eq(pmt.car(msg), pmt.intern(self.filter)):\n new_val = pmt.to_python(pmt.cdr(msg))\n try:\n self.callback(new_val)\n except Exception as e:\n gr.log.error(\"Error when calling \" + repr(self.callback.name()) + \" with \"\n + repr(new_val) + \" (reason: %s)\" % repr(e))\n\n def stop(self):\n return True\n","sub_path":"python/filtered_msg_pair_to_var.py","file_name":"filtered_msg_pair_to_var.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"580248980","text":"# // TO DO: autoload di dashboard.json\n\n\nimport cherrypy\nimport json\nimport os\nimport socket\n\nhostname = socket.gethostname()\nIPAddr = socket.gethostbyname(hostname)\n# print(\"Your Computer Name is:\" + hostname)\nprint(\"Your Computer IP Address is:\" + IPAddr)\n\n\nclass WebIndex(object):\n exposed = True\n\n def GET(self, *uri, **param):\n with open(\"index.html\") as fp:\n index = fp.read()\n print(uri, param)\n return index\n\n\nclass WebSave(object):\n def POST(self, *uri, **params):\n dash = json.loads(params[\"json_string\"]) # Load json object\n with open(\"./dashboard.json\", \"w\") as f:\n json.dump(dash, f, indent=2) # Write json to file\n print(uri, params)\n\n\nif __name__ == '__main__':\n conf = {'/':\n\n\n {\n 'request.dispatch': cherrypy.dispatch.MethodDispatcher(),\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': os.path.abspath(os.path.join(os.path.dirname(__file__), '/'))\n },\n '/dashboard':\n {\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': os.path.abspath(os.path.join(os.path.dirname(__file__), 'dashboard.json'))\n },\n '/css':\n {\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': os.path.abspath(os.path.join(os.path.dirname(__file__), './css'))\n },\n '/img':\n {\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': os.path.abspath(os.path.join(os.path.dirname(__file__), './img'))\n },\n '/js':\n {\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': os.path.abspath(os.path.join(os.path.dirname(__file__), './js'))\n },\n '/plugins':\n {\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': os.path.abspath(os.path.join(os.path.dirname(__file__), './plugins'))\n },\n '/static':\n {\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': os.path.abspath(os.path.join(os.path.dirname(__file__), './freeboard'))\n }\n }\n cherrypy.config.update({'server.socket_host': 'localhost'})\n cherrypy.config.update({'server.socket_port': 8083})\n cherrypy.tree.mount(WebIndex(), '/', config=conf)\n cherrypy.tree.mount(WebSave(), '/WebSave', config=conf)\n cherrypy.config.update(conf)\n cherrypy.engine.start()\n cherrypy.engine.block()\n\n","sub_path":"Client/freeboard/server_for_freeboard.py","file_name":"server_for_freeboard.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"34373553","text":"from tornado import auth\nfrom tornado import web\nfrom tornado import httpclient\n\nimport random\nimport string\nimport urllib\nimport urlparse\nimport logging\nimport json\n\nimport db\nfrom helper import *\n\n\nclass ShowAuthPage(myRequestHandler):\n \"\"\"\n Show Log in page.\n\n \"\"\"\n def get(self):\n\n set_redirect(self)\n\n self.clear_cookie('session')\n self.render('auth/start.html',\n google = True if self.settings['google_client_key'] and self.settings['google_client_secret'] else False,\n facebook = True if self.settings['facebook_api_key'] and self.settings['facebook_secret'] else False,\n twitter = True if self.settings['twitter_consumer_key'] and self.settings['twitter_consumer_secret'] else False,\n live = True if self.settings['live_client_key'] and self.settings['live_client_secret'] else False,\n )\n\n\nclass Exit(myRequestHandler):\n \"\"\"\n Log out.\n\n \"\"\"\n def get(self):\n redirect_url = ''\n if self.current_user:\n if self.current_user.provider == 'google':\n redirect_url = 'https://www.google.com/accounts/logout'\n elif self.current_user.provider == 'facebook':\n redirect_url = 'https://www.facebook.com/logout.php?access_token=%s&confirm=1&next=%s://%s/auth' % (self.current_user.access_token, self.request.protocol, self.request.host)\n\n self.clear_cookie('session')\n self.redirect(redirect_url)\n\n\n\nclass AuthOAuth2(myRequestHandler, auth.OAuth2Mixin):\n \"\"\"\n Google, Facebook and MSLive authentication.\n\n \"\"\"\n @web.asynchronous\n def get(self, provider):\n set_redirect(self)\n self.oauth2_provider = None\n\n if provider == 'facebook' and 'facebook_api_key' in self.settings and 'facebook_secret' in self.settings:\n # https://developers.facebook.com/apps\n self.oauth2_provider = {\n 'provider': 'facebook',\n 'key': self.settings['facebook_api_key'],\n 'secret': self.settings['facebook_secret'],\n 'auth_url': 'https://www.facebook.com/dialog/oauth?client_id=%(id)s&redirect_uri=%(redirect)s&scope=%(scope)s&state=%(state)s',\n 'token_url': 'https://graph.facebook.com/oauth/access_token',\n 'info_url': 'https://graph.facebook.com/me?access_token=%(token)s',\n 'scope': 'email',\n 'user_id': '%(id)s',\n 'user_email': '%(email)s',\n 'user_name': '%(name)s',\n 'user_picture': 'http://graph.facebook.com/%(id)s/picture?type=large',\n }\n\n if provider == 'google' and 'google_client_key' in self.settings and 'google_client_secret' in self.settings:\n # https://code.google.com/apis/console\n self.oauth2_provider = {\n 'provider': 'google',\n 'key': self.settings['google_client_key'],\n 'secret': self.settings['google_client_secret'],\n 'auth_url': 'https://accounts.google.com/o/oauth2/auth?client_id=%(id)s&redirect_uri=%(redirect)s&scope=%(scope)s&state=%(state)s&response_type=code&approval_prompt=auto&access_type=online',\n 'token_url': 'https://accounts.google.com/o/oauth2/token',\n 'info_url': 'https://www.googleapis.com/oauth2/v2/userinfo?access_token=%(token)s',\n 'scope': 'https://www.googleapis.com/auth/userinfo.profile+https://www.googleapis.com/auth/userinfo.email',\n 'user_id': '%(id)s',\n 'user_email': '%(email)s',\n 'user_name': '%(name)s',\n 'user_picture': '%(picture)s',\n }\n if provider == 'live' and 'live_client_key' in self.settings and 'live_client_secret' in self.settings:\n # https://manage.dev.live.com/Applications/Index\n self.oauth2_provider = {\n 'provider': 'live',\n 'key': self.settings['live_client_key'],\n 'secret': self.settings['live_client_secret'],\n 'auth_url': 'https://oauth.live.com/authorize?client_id=%(id)s&redirect_uri=%(redirect)s&scope=%(scope)s&state=%(state)s&response_type=code',\n 'token_url': 'https://oauth.live.com/token',\n 'info_url': 'https://apis.live.net/v5.0/me?access_token=%(token)s',\n 'scope': 'wl.signin+wl.emails',\n 'user_id': '%(id)s',\n 'user_email': '',\n 'user_name': '%(name)s',\n 'user_picture': 'https://apis.live.net/v5.0/%(id)s/picture',\n }\n\n if not self.oauth2_provider:\n return self.finish()\n\n self._OAUTH_AUTHORIZE_URL = self.oauth2_provider['auth_url']\n\n url = self.request.protocol + '://' + self.request.host + '/auth/' + provider\n\n if not self.get_argument('code', None):\n return self.redirect(self.oauth2_provider['auth_url'] % {\n 'id': self.oauth2_provider['key'],\n 'redirect': url,\n 'scope': self.oauth2_provider['scope'],\n 'state': ''.join(random.choice(string.ascii_letters + string.digits) for x in range(16)),\n })\n\n if self.get_argument('error', None):\n logging.error('%s oauth error: %s' % (provider, self.get_argument('error', None)))\n return self.redirect(get_redirect(self))\n\n httpclient.AsyncHTTPClient().fetch(self.oauth2_provider['token_url'],\n method = 'POST',\n headers = {'Content-Type': 'application/x-www-form-urlencoded'},\n body = urllib.urlencode({\n 'client_id': self.oauth2_provider['key'],\n 'client_secret': self.oauth2_provider['secret'],\n 'redirect_uri': url,\n 'code': self.get_argument('code', None),\n 'grant_type': 'authorization_code',\n }),\n callback = self._got_token,\n )\n\n @web.asynchronous\n def _got_token(self, response):\n access_token = response.body\n try:\n access_token = json.loads(access_token)\n if 'error' in access_token:\n logging.error('%s oauth error: %s' % (provider, access_token['error']))\n return self.redirect(get_redirect(self))\n access_token = access_token['access_token']\n except:\n try:\n access_token = urlparse.parse_qs(access_token)\n if 'error' in access_token:\n logging.error('%s oauth error: %s' % (provider, access_token['error']))\n return self.redirect(get_redirect(self))\n access_token = access_token['access_token'][0]\n except:\n logging.error('%s oauth error' % provider)\n return self.redirect(get_redirect(self))\n\n httpclient.AsyncHTTPClient().fetch(self.oauth2_provider['info_url'] % {'token': access_token },\n callback = self._got_user\n )\n\n @web.asynchronous\n def _got_user(self, response):\n try:\n user = json.loads(response.body)\n access_token = response.effective_url.split('access_token=')[1]\n if 'error' in user:\n logging.error('%s oauth error: %s' % (provider, user['error']))\n return self.redirect(get_redirect(self))\n except:\n return\n\n if self.oauth2_provider['provider'] == 'facebook':\n db.User().login(\n request_handler = self,\n provider = self.oauth2_provider['provider'],\n provider_id = user.setdefault('id', None),\n email = user.setdefault('email', None),\n name = user.setdefault('name', None),\n picture = 'http://graph.facebook.com/%s/picture?type=large' % user.setdefault('id', ''),\n access_token = access_token\n )\n if self.oauth2_provider['provider'] == 'google':\n db.User().login(\n request_handler = self,\n provider = self.oauth2_provider['provider'],\n provider_id = user.setdefault('id', None),\n email = user.setdefault('email', None),\n name = user.setdefault('name', None),\n picture = user.setdefault('picture', None),\n access_token = access_token\n )\n if self.oauth2_provider['provider'] == 'live':\n db.User().login(\n request_handler = self,\n provider = self.oauth2_provider['provider'],\n provider_id = user.setdefault('id', None),\n email = user.setdefault('emails', {}).setdefault('preferred', user.setdefault('emails', {}).setdefault('preferred', user.setdefault('personal', {}).setdefault('account', None))),\n name = user.setdefault('name', None),\n picture = 'https://apis.live.net/v5.0/%s/picture' % user.setdefault('id', ''),\n access_token = access_token\n )\n\n self.redirect(get_redirect(self))\n\n\nclass AuthMobileID(myRequestHandler, auth.OpenIdMixin):\n \"\"\"\n Estonian Mobile ID authentication.\n\n \"\"\"\n @web.asynchronous\n def get(self):\n set_redirect(self)\n self._OPENID_ENDPOINT = 'https://openid.ee/server/xrds/mid'\n\n if not self.get_argument('openid.mode', None):\n url = self.request.protocol + '://' + self.request.host + '/auth/mobileid'\n self.authenticate_redirect(callback_uri=url)\n\n self.get_authenticated_user(self.async_callback(self._got_user))\n\n def _got_user(self, user):\n if not user:\n raise web.HTTPError(500, 'MobileID auth failed')\n\n db.User().login(\n request_handler = self,\n provider_id = self.get_argument('openid.identity', None)\n )\n self.redirect(get_redirect(self))\n\n\nclass AuthIDcard(myRequestHandler, auth.OpenIdMixin):\n \"\"\"\n Estonian ID card authentication.\n\n \"\"\"\n @web.asynchronous\n def get(self):\n set_redirect(self)\n self._OPENID_ENDPOINT = 'https://openid.ee/server/eid'\n\n if not self.get_argument('openid.mode', None):\n return self.authenticate_redirect()\n\n self.get_authenticated_user(self.async_callback(self._got_user))\n\n def _got_user(self, user):\n if not user:\n raise web.HTTPError(500, 'IDcard auth failed')\n\n db.User().login(\n request_handler = self,\n provider_id = self.get_argument('openid.identity', None)\n )\n self.redirect(get_redirect(self))\n\n\nclass AuthTwitter(myRequestHandler, auth.TwitterMixin):\n \"\"\"\n Twitter authentication.\n\n \"\"\"\n @web.asynchronous\n def get(self):\n set_redirect(self)\n if not self.get_argument('oauth_token', None):\n return self.authenticate_redirect()\n self.get_authenticated_user(self.async_callback(self._got_user))\n\n def _got_user(self, user):\n if not user:\n raise web.HTTPError(500, 'Twitter auth failed')\n\n db.User().login(\n request_handler = self,\n provider = 'twitter',\n provider_id = '%s' % user.setdefault('id'),\n email = None,\n name = user.setdefault('name'),\n picture = user.setdefault('profile_image_url')\n )\n self.redirect(get_redirect(self))\n\n\ndef set_redirect(rh):\n \"\"\"\n Saves requested URL to cookie, then (after authentication) we know where to go.\n\n \"\"\"\n if rh.get_argument('next', None, strip=True):\n rh.set_secure_cookie('auth_redirect', rh.get_argument('next', default='/', strip=True), 1)\n\n\ndef get_redirect(rh):\n \"\"\"\n Returns requested URL (or / if not set) from cookie.\n\n \"\"\"\n next = rh.get_secure_cookie('auth_redirect')\n if next:\n return next\n return '/'\n\n\nhandlers = [\n ('/auth', ShowAuthPage),\n ('/exit', Exit),\n ('/auth/mobileid', AuthMobileID),\n ('/auth/idcard', AuthIDcard),\n ('/auth/twitter', AuthTwitter),\n ('/auth/(.*)', AuthOAuth2),\n]\n","sub_path":"app/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":12458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"42187204","text":"#!/usr/bin/python3.5\n\n## -------------------------------------------\n## A set is an unordered collection data type that is iterable, mutable and has no duplicate elements.\n## Python's set class represents the mathmatical notion of a set. the major advantage if using set, \n## as opposed to a list, is that it has a highly optimized method for checking whether a specific\n## element is contained in the set. This is based on a data structure known as hash table.\n\nnums = {1,2,3,4,7,89,65,0}\n\n## Methods\n## 1. add(x): adds x to set if it is not already present in the set\n## 2. union(s)(set_name): returns a union of two set. \npeople = {\"Tony\",\"Eli\",\"GaoShan\"}\nstates = {\"CA\",\"MI\",\"LA\",\"NW\"}\nconcat1 = people.union(states) # via union\nconcat2 = people|states # via '|' operator\n\n## 3. intersect(s): returns an intersection of two sets\n## 4. difference(s): \n## 5. clear(): empties the whole set\n\n## ----------------------------------------------\n## Operators for set\ns1 = set()\ns2 = set()\n\ns1 == s2 # equal\ns1 != s2 # not equla\ns1 <= s2 # s1 is subset of s2\ns1 < s2 # s1 is proper subset of s2\ns1 >= s2 # s1 is superset of s2\ns1 > s2 # s1 is proper superset of s2\ns1 | s2 # the union of s1 and s2\ns1 & s2 # the intersection of s1 and s2\ns1 - s2 # the set of elements in s1 but not in s2\ns1 ^ s2 # the set of elements in precisely one of s1 or s2\n\n\n# Python program to demonstrate working# of\n# Set in Python\n \n# Creating two sets\nset1 = set()\nset2 = set()\n \n# Adding elements to set1\nfor i in range(1, 6):\n set1.add(i)\n \n# Adding elements to set2\nfor i in range(3, 8):\n set2.add(i)\n \nprint(\"Set1 = \", set1)\nprint(\"Set2 = \", set2)\nprint(\"\\n\")\n \n# Union of set1 and set2\nset3 = set1 | set2# set1.union(set2)\nprint(\"Union of Set1 & Set2: Set3 = \", set3)\n \n# Intersection of set1 and set2\nset4 = set1 & set2# set1.intersection(set2)\nprint(\"Intersection of Set1 & Set2: Set4 = \", set4)\nprint(\"\\n\")\n \n# Checking relation between set3 and set4\nif set3 > set4: # set3.issuperset(set4)\n print(\"Set3 is superset of Set4\")\nelif set3 < set4: # set3.issubset(set4)\n print(\"Set3 is subset of Set4\")\nelse : # set3 == set4\n print(\"Set3 is same as Set4\")\n \n# displaying relation between set4 and set3\nif set4 < set3: # set4.issubset(set3)\n print(\"Set4 is subset of Set3\")\n print(\"\\n\")\n \n# difference between set3 and set4\nset5 = set3 - set4\nprint(\"Elements in Set3 and not in Set4: Set5 = \", set5)\nprint(\"\\n\")\n \n# checkv if set4 and set5 are disjoint sets\nif set4.isdisjoint(set5):\n print(\"Set4 and Set5 have nothing in common\\n\")\n \n# Removing all the values of set5\nset5.clear()\n \nprint(\"After applying clear on sets Set5: \")\nprint(\"Set5 = \", set5)\n","sub_path":"GeeksforGeeks/python/data_type/set.py","file_name":"set.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"220349188","text":"#__author__ = 'robin'\r\n#coding = utf-8\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nclass showPicture:\r\n def __init__(self,data,tag,w,b):\r\n self.b = b\r\n self.w = w\r\n plt.figure(1)\r\n plt.title('Pic', size=14)\r\n plt.xlabel('x', size=14)\r\n plt.ylabel('y', size=14)\r\n\r\n xData = np.linspace(0, 5, 100)\r\n yData = self.expression(xData)\r\n plt.plot(xData, yData, color='r', label='y1 data')\r\n for i in range(len(data)):\r\n if tag[i] == 1:\r\n plt.scatter(data[i][0],data[i][1],s=50)\r\n else:\r\n plt.scatter(data[i][0],data[i][1],marker='x',s=50)\r\n plt.savefig('pic.png',dpi=75)\r\n \r\n def expression(self,x):\r\n y = (-self.b - self.w[0]*x)/self.w[1]\r\n return y\r\n \r\n def show(self):\r\n plt.show()\r\n\r\nclass perceptron:\r\n def __init__(self,x,y,eta=1):\r\n self.x = x\r\n self.y = y\r\n self.w = np.zeros((x.shape[1],1))\r\n self.b = 0\r\n self.eta = eta\r\n \r\n def sign(self,w,b,x):\r\n y = np.dot(x,w)+b\r\n return int(y)\r\n \r\n def train(self):\r\n flag = True\r\n length = len(self.x)\r\n while flag:\r\n count = 0\r\n for i in range(length):\r\n #print self.x[i,:]\r\n tmpY = self.sign(self.w,self.b,self.x[i,:])\r\n if tmpY*self.y[i]<=0:\r\n tmp = self.y[i] * self.eta * self.x[i,:]\r\n tmp = tmp.reshape(self.w.shape)\r\n self.w = self.w + tmp\r\n self.b = self.b + self.eta * self.y[i]\r\n count += 1\r\n #print \"ttt\\n\"\r\n if count == 0:\r\n flag = False\r\n return self.w,self.b\r\n\r\n#\r\nxArray = np.array([3,3,4,3,1,1])\r\nxArray = xArray.reshape((3,2))\r\nyArray = np.array([1,1,-1])\r\n\r\n#\r\nmyPerceptron = perceptron(xArray,yArray,1)\r\nw0,b0 = myPerceptron.train()\r\n\r\n#\r\npicture = showPicture(xArray,yArray,w=w0,b=b0)\r\npicture.show()\r\n\r\n","sub_path":"precenptron/percenptron.py","file_name":"percenptron.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"552835601","text":"#Altaf_Jabraan-CA06\n#201275442\n#November 2017\n# Creating a program which translates Morse Code in to English, while outputting a dictionary of english letters and their Morse equivalent. This program will also join two lists together of Numbers (0-9) and their Morse Code equivalent and create a dictionary which will be outputted in reverse order.\n\n#Sources used to help:\n#http://www.geeksforgeeks.org/morse-code-translator-python/ helped with translation \n\n# Original Morse Code without vowels\nOriginalMorse = {'B':'-...',\n\t 'C':'-.-.',\n 'D':'-..',\n\t 'F':'..-.',\n 'G':'--.',\n 'H':'....',\n 'J':'.---',\n 'K':'-.-',\n\t 'L':'.-..',\n 'M':'--',\n 'N':'-.',\n\t 'P':'.--.',\n 'Q':'--.-',\n\t 'R':'.-.',\n 'S':'...',\n 'T':'-',\n\t 'V':'...-',\n 'W':'.--',\n\t 'X':'-..-',\n 'Y':'-.--',\n 'Z':'--..'}\n#--------------------------------------------------------------------------------------------------------------------------------------------------\n#EXTENDED REQUIREMENT\ndef numberReverse():\n print (\"\\n\")\n start = input(\"Do you want to display the Morse Code of Numbers? (Y/N): \")\n start = start.upper()\n if start == \"N\":\n mainmenu()\n if start == \"Y\":\n print (\"Morse Code for Numbers in order 9-0\")\n numList = (\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\")\n morseList = (\"---\",\".----\",\"..---\",\"...--\",\"....-\",\".....\",\"-....\",\"--...\",\"---..\",\"----.\")\n\n #Zip function allows us to join both the Number List and the equivalent Morse Code List together to create a dictionary)\n zippedList = list(zip(numList, morseList))\n\n\n dictionary= dict(zippedList)\n for k in sorted(dictionary,reverse=True): # Iterates keys to sort the numbers in numberical order but in reverse\n print(k,dictionary[k]) #Prints out this column of numbers in reverse from 9 to 0 \n#--------------------------------------------------------------------------------------------------------------------------------------------------\ndef dictChange():\n FullMorse = {'B':'-...',\n\t 'C':'-.-.',\n 'D':'-..',\n\t 'F':'..-.',\n 'G':'--.',\n 'H':'....',\n 'J':'.---',\n 'K':'-.-',\n\t 'L':'.-..',\n 'M':'--',\n 'N':'-.',\n\t 'P':'.--.',\n 'Q':'--.-',\n\t 'R':'.-.',\n 'S':'...',\n 'T':'-',\n\t 'V':'...-',\n 'W':'.--',\n\t 'X':'-..-',\n 'Y':'-.--',\n 'Z':'--..'}\n#Adding vowels and their Morse Code equivalent to a new dictionary based of the original\n FullMorse[\"A\"] = '--' \n FullMorse[\"E\"] = '.'\n FullMorse[\"I\"] = '..'\n FullMorse[\"O\"] = '---'\n FullMorse[\"U\"] = '..-'\n\n return FullMorse #returns the new morse code\n#--------------------------------------------------------------------------------------------------------------------------------------------------\ndef output():\n start = input(\"Do you want to display the Morse Code Dictionaries? (Y/N): \")\n start = start.upper()\n if start == \"N\":\n mainmenu()\n if start == \"Y\":\n Original = OriginalMorse\n print (\"\\n\\n\")\n print (\"Original Morse Code Book (Random Order): \") #Printing out the original Morse Code in a random order\n for j in Original:\n print (j,Original[j]) # Prints the original Morse Code in random order in a column\n\n print(\"\\n\\n\\n\") \n dic = dictChange() # Get's the new Morse Code Dictionary (with vowels) from dictChange() function\n print(\"Updated Morse Code Book with vowels (English Alphabetical Order)\")\n\n for i in sorted(dic): # Sorted function orders letters to be in alphabetical order\n print(i,dic[i]) #Prints out the new Morse Code dictionary in alphabetical order and in a column \n\n numberReverse() # Goes to the Numbers reverse function\n#--------------------------------------------------------------------------------------------------------------------------------------------------\ndef decrypt(code):\n\n\tcode += \" \" #adds an extra space at the end of the code to get the last morse code\n\tdecipher = \"\" # Will store the decrypted code\n\tcharacters = '' # Will store the morse code characters\n\tfor letter in code: #When the letters are in the code\n\t\t# checks for when there is a space\n\t\tif (letter != ' '):\n\t\t\t# A counter is used to keep track of number of spaces\n\t\t\ti = 0\n\t\t\t# stores morse code of a character\n\t\t\tcharacters += letter\n\t\t# in case of space\n\t\telse:\n\t\t\t# i = 1 shows that there is a new morse code character as there is only one space\n\t\t\ti += 1\n\t\t\t# if i = 2 it would show a new word as there would be two spaces between the morse code characters\n\t\t\tif i == 2 :\n\t\t\t\t# adding space to separate words\n\t\t\t\tdecipher += \" \"\n\t\t\telse:\n\n\t\t\t\t# Gets the equivalent keys(letters) using the values (Morse Code Characters) and adds them to 'decipher'\n\t\t\t\tdecipher += list(OriginalMorse.keys())[list(OriginalMorse\n\t\t\t\t.values()).index(characters)] #index helps access the elements within the character lists\n\t\t\t\tcharacters = ''\n\treturn decipher\n# Function which will display translated Morse Code in to English\ndef translate():\n message = \".-- .... -.-- .-.. -.-- -. -..- -.-. .-. -.--\"\n print (\"\\n\")\n print (\"Morse Code Received: \",message) # Will print out the Morse Code\n english = decrypt(message) #Accesses 'decipher' from the function above which gets the translated Morse Code\n print (\"\\n\")\n print (\"Translating Morse Code...\")\n print (\"\\n\")\n print (\"Code is: \",english) #Prints out translated Morse Code\n\n output()\n#--------------------------------------------------------------------------------------------------------------------------------------------------\n# Function for Main Menu\ndef mainmenu(): \n print (\"Main Menu\")\n print (\"--------------\")\n print (\"A: Numbers\")\n print (\"B: Strings\")\n print (\"C: Games\")\n print (\"X: Exit\")\n print ()\n option =input(\"Enter An Option (A,B,C, or X): \")\n option = option.upper()#Using modularisation to force input of letter to be uppercase.\n\n if option == \"X\":\n import sys # Helps close the program - source: https://stackoverflow.com/questions/2823472/is-there-a-method-that-tells-my-program-to-quit\n sys.exit(0)\n\n if option == \"B\":\n translate() #loads up starting prompt\n \nmainmenu()\n","sub_path":"MorseCode Translator.py","file_name":"MorseCode Translator.py","file_ext":"py","file_size_in_byte":6507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"343495425","text":"\n\nimport os\nimport cv2\nimport random\ngts_root = \"groundtruth\"\ngts = sorted(os.listdir(gts_root),key=lambda x:int(x.split('.')[0][11:]))\ngtimage_root = \"groundtruthimage\"\ngtimages = sorted(os.listdir(gtimage_root),key=lambda x:int(x.split('.')[0]))\nprint (len(gts),len(gtimages))\nfor j in range(10 ):\n\ttruth = open(os.path.join(gts_root,gts[j]),'r').readlines()\n\tim = cv2.imread(os.path.join(gtimage_root,gtimages[j]))\n\tfor t in truth:\n\t\tinfo = t.split('|')\n\t\tX = int(info[0])\n\t\tY = int(info[1])\n\t\theight = int(info[2])\n\t\twidth = int(info[3])\n\t\tvertices = info[4]\n\t\tgame_type = str(info[5]).strip()\n\t\tstartPoint = (X,Y)\n\t\tendPoint = (X + height,Y+width)\n\t\tcv2.rectangle(im,startPoint,endPoint,(255,0,0),1)\n\t\tprint (game_type)\n\t# cv2.imwrite(\"labelimage/{}\".format(gtimages[i]),im)\n\tcv2.imshow('1',im)\n\tcv2.waitKey(0)\n\n","sub_path":"rawdata/labelimage.py","file_name":"labelimage.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"248074809","text":"from MultiPlanarUNet.logging import ScreenLogger\nimport os\n\n\ndef init_model(build_hparams, logger=None):\n from MultiPlanarUNet import models\n logger = logger or ScreenLogger()\n\n # Build new model of the specified type\n cls_name = build_hparams[\"model_class_name\"]\n logger(\"Creating new model of type '%s'\" % cls_name)\n\n return models.__dict__[cls_name](logger=logger, **build_hparams)\n\n\ndef model_initializer(hparams, continue_training, base_path, logger=None):\n logger = logger or ScreenLogger()\n\n # Init model\n model = init_model(hparams[\"build\"], logger)\n\n if continue_training:\n from MultiPlanarUNet.utils import get_last_model, get_lr_at_epoch, \\\n clear_csv_after_epoch\n model_path, epoch = get_last_model(os.path.join(base_path, \"model\"))\n model.load_weights(model_path, by_name=True)\n hparams[\"fit\"][\"init_epoch\"] = epoch+1\n\n # Get the LR at the continued epoch\n lr, name = get_lr_at_epoch(epoch, os.path.join(base_path, \"logs\"))\n hparams[\"fit\"][\"optimizer_kwargs\"][name] = lr\n\n # Remove entries in training.csv file that occurred after the\n # continued epoch\n clear_csv_after_epoch(epoch, os.path.join(base_path, \"logs\", \"training.csv\"))\n\n logger(\"[NOTICE] Training continues from:\\n\"\n \"Model: %s\\n\"\n \"Epoch: %i\\n\"\n \"LR: %s\" % (os.path.split(model_path)[-1], epoch, lr))\n else:\n hparams[\"fit\"][\"init_epoch\"] = 0\n\n return model\n","sub_path":"MultiPlanarUNet/models/model_init.py","file_name":"model_init.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"645798546","text":"import cv2\nimport json\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom torchvision import transforms\n\n\nfrom model import BaseModel\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ndef init():\n pth = '/usr/local/ev_sdk/model/best.pth'\n model = BaseModel(model_name='rep-a2')\n model.load_state_dict(torch.load(pth)['net'])\n model.to(device)\n model.eval()\n \n return model\n\n\nclass_dict = {0: 'airplane', 1: 'banana', 2: 'baseball', 3: 'bicycle', 4: 'bird', 5: 'book', 6: 'bulldozer', 7: 'cake', 8: 'camel', 9: 'camera', 10: 'cannon', 11: 'car', 12: 'cat', 13: 'chair', 14: 'computer', 15: 'cookie', 16: 'crown', 17: 'dog', 18: 'ear', 19: 'eye', 20: 'fish', 21: 'flower', 22: 'hand', 23: 'hat', 24: 'horse', 25: 'keyboard', 26: 'key', 27: 'knife', 28: 'ladder', 29: 'monkey', 30: 'mouse', 31: 'nose'}\n\nsize = 224\ntrans = transforms.Compose([\n transforms.Resize((size, size)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n ])\n\ndef process_image(net, input_image, args=None):\n img = input_image[..., ::-1]\n img = Image.fromarray(img)\n img = trans(img)\n img = img.unsqueeze(0)\n img = img.to(device)\n \n out = net(img)\n _, pred = torch.max(out, 1)\n \n return json.dumps(\n {'class': class_dict[pred[0].item()]}\n )\n\n# return json.dumps({'class': 'airplane'})\n\n\nif __name__ == '__main__':\n net = init()\n x = torch.randn((112, 112, 3)).numpy()\n print(process_image(net, x))","sub_path":"ev_sdk/src/ji.py","file_name":"ji.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"252692399","text":"import datetime as dt\n\nimport numpy as np\n\nfrom app import logger\nfrom training import munging\n\n\ndef bandit(regressor, training):\n ''' Search for the optimal number of training days. '''\n forward = 7\n delta = 7\n # Number of days on which the regressor is trained\n backwards_days = list(range(max(training.backward - 7, 1), training.backward + delta + 1))\n # Current time\n now = dt.datetime.now()\n # A week ago\n then = now - dt.timedelta(days=forward)\n # Longest time ago\n since = then - dt.timedelta(days=max(backwards_days))\n # Get all the necessary data\n try:\n data = training.station.get_updates(since, now)\n except:\n logger.info(\n 'No data available',\n city=training.station.city.name,\n station=training.station.name,\n since=since.date(),\n until=now.date()\n )\n return None\n # Run a grid search to obtain a regressor\n df = munging.prepare(data)\n best = {\n 'moment': now,\n 'backward': training.backward,\n 'forward': forward,\n 'score': np.inf\n }\n # Go through all the possible backward/forward combinations\n for backward in backwards_days:\n # Define the training timeline\n timeline = [then - dt.timedelta(days=backward), then, now]\n # Define the train and test sets\n train = df.truncate(before=timeline[0], after=timeline[1])\n test = df.truncate(before=timeline[1], after=timeline[2])\n if len(train) == 0 or len(test) == 0:\n logger.warning(\n 'Not enough training data',\n city=training.station.city.name,\n station=training.station.name,\n t0=timeline[0].date(),\n t1=timeline[1].date(),\n t2=timeline[2].date()\n )\n continue\n # Split the training set into features and targets\n X_train, Y_train = munging.split(dataframe=train, target='bikes')\n # Train the regressor\n regressor.fit(X_train, Y_train)\n # Split the test set into features and targets\n X_test, Y_test = munging.split(dataframe=test, target='bikes')\n # Predict the outcome the test set\n prediction = regressor.predict(X_test)\n # Compute the mean absolute error\n score = np.mean(abs(Y_test - prediction))\n # Compare the obtained score to the current best score\n if score < best['score']:\n best['backward'] = backward\n best['score'] = score\n # Select data backwards according to the grid search\n data = df.truncate(before=now - dt.timedelta(days=best['backward']), after=now)\n try:\n X, Y = munging.split(dataframe=data, target='bikes')\n regressor.fit(X, Y)\n best['regressor'] = regressor\n return best\n except ValueError:\n return None\n","sub_path":"training/optimization.py","file_name":"optimization.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"375874773","text":"import random\n\n\ndef func(board):\n\n for r in range(10):\n for c in range(10):\n if board[r][c]:\n print(\"#\",end=\"\")\n else:\n print(\".\",end=\"\")\n print(\"\\n\")\n\nboard = [[False for x in range(10)] for y in range(10)]\n\nfor r in range(10):\n for c in range(10):\n if(random.random() < 0.3):\n board[r][c] = True\n\nfunc(board)\n\nwhile True:\n row = int(input(\"지뢰의 위치를 입력 (행) : \"))\n col = int(input(\"지뢰의 위치를 입력 (열) : \"))\n\n if board[row-1][col-1]:\n print(\"지뢰\")\n board[row-1][col-1] = False\n else:\n print(\"땡\")\n\n ans=input(\"계속 하시겠습니까? (y/n) : \")\n\n if ans == \"n\":\n break\n else:\n func(board)\n","sub_path":"test19.py","file_name":"test19.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"510251736","text":"from tkinter import *\nfrom tkinter import ttk\nfrom CapaDeNegocio.CapaDeNegocio import *\nfrom CapaDePresentacion.PresentacionCompra import PresentacionCompra\n\n\nclass PresentacionCliente:\n\n def __init__(self, idPartido, idAsiento):\n self.idPartido = idPartido\n self.idAsiento = idAsiento\n\n def busquedaRegistro(self):\n tl=Toplevel()\n tl.title(\"Buscar Cliente\")\n tree = ttk.Treeview(tl)\n\n vp=Frame(tl)\n vp.grid(column=0, row=0, padx=(100,100), pady=(20,20), sticky=(N, S, E, W))\n\n self.dni=StringVar()\n\n etiquetadni=Label(vp, text= \"DNI: \")\n etiquetadni.grid(column=0, row=0)\n entradadni=Entry(vp, width= 20, textvariable= self.dni)\n entradadni.grid(column=1, row=0)\n\n botonBuscar = Button(vp, text = \"Buscar Cliente\", command= lambda : self.leerCampoDNI())\n botonBuscar.grid(column = 1, row = 3)\n\n botonRegistrar = Button(vp, text = \"Registrar Cliente\", command= lambda : self.datosRegistro())\n botonRegistrar.grid(column = 2, row = 3)\n\n\n def leerCampoDNI(self):\n dni=self.dni.get()\n self.buscar(dni)\n\n def buscar(self, dni):\n\n cdn=CapaDeNegocio()\n cliente= cdn.buscarCliente(dni) #objeto cliente\n\n if cliente:\n #Ventana de Confirmacion de datos de cliente\n tl=Toplevel()\n tl.title(\"Datos del Cliente\")\n vp=Frame(tl)\n vp.grid(column=0, row=0, padx=(100,100), pady=(20,20), sticky=(N, S, E, W))\n self.nombre = StringVar()\n self.dni = StringVar()\n self.apellido = StringVar()\n self.mail = StringVar()\n self.nombre.set(cliente.nombre)\n self.dni.set(int(cliente.dni))\n self.apellido.set(cliente.apellido)\n self.mail.set(cliente.mail)\n\n etiquetadni=Label(vp, text= \"DNI: \")\n etiquetadni.grid(column=0, row=2)\n entradadni=Label(vp, width= 20, textvariable= self.dni)\n entradadni.grid(column=1, row=2)\n\n etiquetanombre=Label(vp, text= \"Nombre: \")\n etiquetanombre.grid(column=0, row=0)\n entradanombre=Label(vp, width= 20, textvariable= self.nombre)\n entradanombre.grid(column=1, row=0)\n\n etiquetaapellido=Label(vp, text= \"Apellido: \")\n etiquetaapellido.grid(column=0, row=1)\n entradaapellido=Label(vp, width= 20, textvariable= self.apellido)\n entradaapellido.grid(column=1, row=1)\n\n etiquetamail=Label(vp, text= \"Mail: \")\n etiquetamail.grid(column=0, row=3)\n entradamail=Label(vp, width= 30, textvariable= self.mail)\n entradamail.grid(column=1, row=3)\n\n\n\n botonCancelar=Button(vp, text=\"Cancelar\", command=tl.destroy)\n botonCancelar.grid(column=0, row=5)\n\n compra = PresentacionCompra()\n\n botonComprar=Button(vp, text=\"Comprar\", command=lambda:compra.confirmarCompra(cliente, self.idPartido, self.idAsiento ))\n botonComprar.grid(column=1, row=5)\n\n else:\n # ventana de confirmación\n tl=Toplevel()\n tl.title(\"Error\")\n vp=Frame(tl)\n vp.grid(column=0, row=0, padx=(100,100), pady=(20,20), sticky=(N, S, E, W))\n etique=Label(vp, text=\"Cliente no encontrado.\")\n etique.grid(column=1, row=1)\n botoncerrar=Button(vp, text=\"Aceptar\", command=tl.destroy)\n botoncerrar.grid(column=1, row=2)\n\n def datosRegistro(self):\n self.nombre=StringVar()\n self.dni=StringVar()\n self.apellido= StringVar()\n self.mail = StringVar()\n\n #Ventana de Registro de datos de cliente\n tl=Toplevel()\n tl.title(\"Registro del Cliente\")\n vp=Frame(tl)\n vp.grid(column=0, row=0, padx=(100,100), pady=(20,20), sticky=(N, S, E, W))\n\n etiquetadni=Label(vp, text= \"DNI: \")\n etiquetadni.grid(column=0, row=2)\n entradadni=Entry(vp, width= 20, textvariable= self.dni)\n entradadni.grid(column=1, row=2)\n\n etiquetanombre=Label(vp, text= \"Nombre: \")\n etiquetanombre.grid(column=0, row=0)\n entradanombre=Entry(vp, width= 20, textvariable= self.nombre)\n entradanombre.grid(column=1, row=0)\n\n etiquetaapellido=Label(vp, text= \"Apellido: \")\n etiquetaapellido.grid(column=0, row=1)\n entradaapellido=Entry(vp, width= 20, textvariable= self.apellido)\n entradaapellido.grid(column=1, row=1)\n\n etiquetamail=Label(vp, text= \"Mail: \")\n etiquetamail.grid(column=0, row=3)\n entradamail=Entry(vp, width= 30, textvariable= self.mail)\n entradamail.grid(column=1, row=3)\n\n\n\n botonCancelar=Button(vp, text=\"Cancelar\", command=tl.destroy)\n botonCancelar.grid(column=1, row=5)\n\n botonRegistrar=Button(vp, text=\"Registrar\", command=lambda: self.registrar())\n botonRegistrar.grid(column=0, row=5)\n\n def registrar(self):\n nombre=self.nombre.get()\n apellido=self.apellido.get()\n dni=self.dni.get()\n mail=self.mail.get()\n\n #Instancia e inicializa el cliente\n cliente= Cliente(dni, nombre, apellido, mail)\n cdn=CapaDeNegocio()\n registro = cdn.altaCliente(cliente)\n\n if registro:\n\n # ventana de confirmación\n tl=Toplevel()\n tl.title(\"Cliente registrado\")\n vp=Frame(tl)\n vp.grid(column=0, row=0, padx=(100,100), pady=(20,20), sticky=(N, S, E, W))\n etique=Label(vp, text=\"Cliente registrado con éxito.\")\n etique.grid(column=1, row=1)\n botoncerrar=Button(vp, text=\"Aceptar\", command= lambda:self.buscar(cliente.dni))\n botoncerrar.grid(column=1, row=2)\n\n else:\n # ventana de confirmación\n tl=Toplevel()\n tl.title(\"Error\")\n vp=Frame(tl)\n vp.grid(column=0, row=0, padx=(100,100), pady=(20,20), sticky=(N, S, E, W))\n etique=Label(vp, text=\"Ha ocurrido un error.\")\n etique.grid(column=1, row=1)\n botoncerrar=Button(vp, text=\"Aceptar\", command=tl.destroy)\n botoncerrar.grid(column=1, row=2)\n\n\n\n","sub_path":"CapaDePresentacion/PresentacionCliente.py","file_name":"PresentacionCliente.py","file_ext":"py","file_size_in_byte":6218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"634551872","text":"#!/bin/python\n# coding:utf-8\n\nfrom xlwt import *\n# set style ############################\n# 字体设置 :\n# 标题1 font1\nfont1 = Font()\nfont1.height = 0x0208\nfont1.bold = True\nfont1.size = 45\nfont1.name = u'微软雅黑'\n\n# 标题2 font2\nfont2 = Font()\nfont2.height = 0x0128\nfont2.bold = True\nfont2.size = 40\nfont2.name = u'微软雅黑'\n\n# 标题3 font3\nfont3 = Font()\nfont3.bold = True\nfont3.size = 25\nfont3.name = u'微软雅黑'\n\n# 正文 font4\nfont4 = Font()\nfont4.size = 25\nfont4.name = u'微软雅黑'\n\n# 红色的正文 font5\nfont5 = Font()\nfont5.size = 25\nfont5.name = u'微软雅黑'\nfont5.colour_index = 0x2\n\n# 设置对齐方式 :\n# 左对齐\nalignmentLeft = Alignment()\nalignmentLeft.horz = Alignment.HORZ_LEFT\nalignmentLeft.vert = Alignment.VERT_CENTER\n# 居中对齐\nalignmentCenter = Alignment()\nalignmentCenter.horz = Alignment.HORZ_CENTER\nalignmentCenter.vert = Alignment.VERT_CENTER\n# 右对齐\nalignmentRight = Alignment()\nalignmentRight.horz = Alignment.HORZ_RIGHT\nalignmentRight.vert = Alignment.VERT_CENTER\n\n# 设置边框:\nborder1 = Borders()\nborder1.left = Borders.THIN\nborder1.right = Borders.THIN\nborder1.top = Borders.THIN\nborder1.bottom = Borders.THIN\nborder1.left_colour = 0x08\nborder1.right_colour = 0x08\nborder1.top_colour = 0x08\nborder1.bottom_colour = 0x08\n\n\n# 设置数字格式\n# 整型数 带千分位符\nintFormat = Style.StyleCollection._std_num_fmt_list[3]\n# 浮点型 2位小数 带千分位符\nfloatFormat = Style.StyleCollection._std_num_fmt_list[4]\n# 百分数\npercentFormat = Style.StyleCollection._std_num_fmt_list[10]\n\n# 设置警告格式(左对齐)\nwarning_left = XFStyle()\nwarning_left.font = font5\nwarning_left.alignment = alignmentLeft\nwarning_left.borders = border1\n\n# 设置一级标题格式(左对齐)\ntitle1Left = XFStyle()\ntitle1Left.font = font1\ntitle1Left.alignment = alignmentLeft\nsubHead = XFStyle()\nsubHead.font = font4\nsubHead.alignment = alignmentLeft\n\n\n# 设置二级标题格式(居中)\ntitle2Center = XFStyle()\ntitle2Center.font = font2\ntitle2Center.alignment = alignmentCenter\ntitle2Center.borders = border1\n\n# 设置二级标题格式(左对齐)\ntitle2Left = XFStyle()\ntitle2Left.font = font2\ntitle2Left.alignment = alignmentLeft\ntitle2Left.borders = border1\n\n# 设置三级标题(左对齐)\ntitle3Left = XFStyle()\ntitle3Left.font = font3\ntitle3Left.alignment = alignmentLeft\ntitle3Left.borders = border1\n# 设置三级标题(右对齐)\n\n# 设置三级标题(居中对齐)\ntitle3Center = XFStyle()\ntitle3Center.font = font3\ntitle3Center.alignment = alignmentCenter\ntitle3Center.borders = border1\n\n\n# 设置正文格式()\n# 正文左对齐\ncontentLeft = XFStyle()\ncontentLeft.font = font4\ncontentLeft.alignment = alignmentLeft\ncontentLeft.borders = border1\n\n# 正文右对齐\ncontentRight = XFStyle()\ncontentRight.font = font4\ncontentRight.alignment = alignmentRight\ncontentRight.borders = border1\n\n# 正文浮点数\nnumFloat = XFStyle()\nnumFloat.font = font4\nnumFloat.alignment = alignmentRight\nnumFloat.num_format_str = floatFormat\nnumFloat.borders = border1\n\n# 正文整数\nnumInt = XFStyle()\nnumInt.font = font4\nnumInt.alignment = alignmentRight\nnumInt.num_format_str = intFormat\nnumInt.borders = border1\n\n# 百分数格式\nnumPercent = XFStyle()\nnumPercent.font = font4\nnumPercent.alignment = alignmentRight\nnumPercent.num_format_str = percentFormat\nnumPercent.borders = border1\n\n\ndef set_cloumn_width(ws, maxlen):\n '''\n 设置列宽 ,maxlen 为table的最大的列数,列从1开始\n @param\n ws :excel工作簿\n maxlen:需要设置格式的列数\n '''\n for i in xrange(0, maxlen):\n ws.col(i).width = 0x1800 + i\n\n\nif __name__ == \"__main__\":\n wb = Workbook()\n ws = wb.add_sheet('result')\n ws.write(1, 1, 'tina,healty', title1Left)\n wb.save('tina.xls')\n","sub_path":"walrus/pylib/pyexcel.py","file_name":"pyexcel.py","file_ext":"py","file_size_in_byte":3794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"617115807","text":"\"\"\"\nCopyright (c) 2004-Present Pivotal Software, Inc.\n\nThis program and the accompanying materials are made available under\nthe terms of the under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport os\n\nimport unittest2 as unittest\n\nfrom mpp.models.mpp_tc import _MPPMetaClassType\nfrom mpp.models.mpp_tc import MPPDUT\nfrom mpp.models import MPPTestCase\n\n# Need to import hidden models for isinstance verification\nfrom mpp.models.mpp_tc import __gpdbMPPTestCase__\nfrom mpp.models.mpp_tc import __hawqMPPTestCase__\n\nclass MockMPPMetaClassTypeGPDB(_MPPMetaClassType):\n \"\"\" Mock MPPMetaClassTypeGPDB to reset DUT \"\"\"\n _MPPMetaClassType.DUT = MPPDUT('gpdb', '1.0.0.0')\n\n@unittest.skip('mock')\nclass MockMPPTestCaseGPDB(MPPTestCase):\n \"\"\" Mock MPPTestCaseGPDB to test MRO and get_version \"\"\"\n __metaclass__ = MockMPPMetaClassTypeGPDB\n def test_do_stuff(self):\n self.assertTrue(True)\n\nclass MockMPPMetaClassTypeHAWQ(_MPPMetaClassType):\n \"\"\" Mock MPPMetaClassTypeHAWQ to reset DUT \"\"\"\n _MPPMetaClassType.DUT = MPPDUT('hawq', '1.1.0.0')\n\n@unittest.skip('mock')\nclass MockMPPTestCaseHAWQ(MPPTestCase):\n \"\"\" Mock MPPTestCaseHAWQ to test MRO and get_version \"\"\"\n __metaclass__ = MockMPPMetaClassTypeHAWQ\n def test_do_stuff(self):\n self.assertTrue(True)\n\nclass MockMPPMetaClassTypeGPDB42(_MPPMetaClassType):\n _MPPMetaClassType.DUT = MPPDUT('gpdb', '4.2')\n\n@unittest.skip('mock')\nclass MockMPPTestCaseGPDB42(MPPTestCase):\n __metaclass__ = MockMPPMetaClassTypeGPDB42\n\n def test_do_stuff(self):\n (product, version) = self.get_product_version()\n self.assertEquals(prodcut, 'gpdb')\n self.assertEquals(version, '4.2')\n\n\n\nclass MPPTestCaseTests(unittest.TestCase):\n\n \n \n def test_get_product_version(self):\n gpdb_test_case = MockMPPTestCaseGPDB('test_do_stuff')\n self.assertEqual(gpdb_test_case.__class__.__product__, 'gpdb')\n self.assertEqual(gpdb_test_case.__class__.__version_string__, '1.0.0.0')\n self.assertTrue(isinstance(gpdb_test_case, __gpdbMPPTestCase__))\n self.assertFalse(isinstance(gpdb_test_case, __hawqMPPTestCase__))\n \n hawq_test_case = MockMPPTestCaseHAWQ('test_do_stuff')\n self.assertEqual(hawq_test_case.__class__.__product__, 'hawq')\n self.assertEqual(hawq_test_case.__class__.__version_string__, '1.1.0.0')\n self.assertTrue(isinstance(hawq_test_case, __hawqMPPTestCase__))\n self.assertFalse(isinstance(hawq_test_case, __gpdbMPPTestCase__))\n\n@unittest.skip('mock')\nclass MockMPPTestCaseMetadata(MPPTestCase):\n\n def test_without_metadata(self):\n self.assertTrue(True)\n\n def test_with_metadata(self):\n \"\"\"\n @gather_logs_on_failure True\n @restart_on_fatal_failure True\n @db_name blah\n \"\"\"\n self.asserTrue(True)\n\nclass MPPTestCaseMetadataTests(unittest.TestCase):\n\n def test_default_metadata(self):\n mpp_test_case = MockMPPTestCaseMetadata('test_without_metadata')\n self.assertFalse(mpp_test_case.gather_logs_on_failure)\n self.assertFalse(mpp_test_case.restart_on_fatal_failure)\n \n def test_with_metadata(self):\n mpp_test_case = MockMPPTestCaseMetadata('test_with_metadata')\n self.assertTrue(mpp_test_case.gather_logs_on_failure)\n self.assertTrue(mpp_test_case.restart_on_fatal_failure)\n\n def test_out_dir(self):\n self.assertEquals(MockMPPTestCaseMetadata.out_dir, 'output/')\n self.assertEquals(MockMPPTestCaseMetadata.get_out_dir(), os.path.join(os.path.dirname(__file__), 'output/'))\n\n def test_db_name_metadata(self):\n mpp_test_case = MockMPPTestCaseMetadata('test_with_metadata')\n self.assertEquals(mpp_test_case.db_name, 'blah')\n \n def test_db_name_default(self):\n mpp_test_case = MockMPPTestCaseMetadata('test_without_metadata')\n self.assertEquals(mpp_test_case.db_name, None)\n\n@unittest.skip('mock')\nclass MockMPPTestCaseGPOPT(MPPTestCase):\n def test_without_metadata(self):\n self.assertTrue(True)\n \n def test_with_metadata_higher(self):\n \"\"\"\n @gpopt 2.240\n \"\"\"\n self.asserTrue(True)\n \n def test_with_metadata_lower(self):\n \"\"\"\n @gpopt 1.0\n \"\"\"\n self.asserTrue(True)\n \n def test_with_metadata_same(self):\n \"\"\"\n @gpopt 2.200.1\n \"\"\"\n self.asserTrue(True)\n\nclass MPPTestCaseGPOPTTests(unittest.TestCase):\n\n def test_without_metadata(self):\n\n # Test with deployed gpopt version (simulate hawq or gpdb with optimizer)\n MockMPPTestCaseGPOPT.__product_environment__['gpopt'] = \"2.200.1\"\n mpp_test_case = MockMPPTestCaseGPOPT('test_without_metadata')\n self.assertTrue(mpp_test_case.skip is None)\n \n # Test without deployed gpopt version (simulate gpdb without optimizer)\n MockMPPTestCaseGPOPT.__product_environment__.pop('gpopt', None)\n mpp_test_case = MockMPPTestCaseGPOPT('test_without_metadata')\n self.assertTrue(mpp_test_case.skip is None)\n \n def test_with_metadata_higher(self):\n\n # Test with deployed gpopt version (simulate hawq or gpdb with optimizer)\n MockMPPTestCaseGPOPT.__product_environment__['gpopt'] = \"2.200.1\"\n mpp_test_case = MockMPPTestCaseGPOPT('test_with_metadata_higher')\n self.assertTrue(mpp_test_case.skip is not None)\n \n # Test without deployed gpopt version (simulate gpdb without optimizer)\n MockMPPTestCaseGPOPT.__product_environment__.pop('gpopt', None)\n mpp_test_case = MockMPPTestCaseGPOPT('test_with_metadata_higher')\n self.assertTrue(mpp_test_case.skip is not None)\n \n def test_with_metadata_lower(self):\n\n # Test with deployed gpopt version (simulate hawq or gpdb with optimizer)\n MockMPPTestCaseGPOPT.__product_environment__['gpopt'] = \"2.200.1\"\n mpp_test_case = MockMPPTestCaseGPOPT('test_with_metadata_lower')\n self.assertTrue(mpp_test_case.skip is None)\n \n # Test without deployed gpopt version (simulate gpdb without optimizer)\n MockMPPTestCaseGPOPT.__product_environment__.pop('gpopt', None)\n mpp_test_case = MockMPPTestCaseGPOPT('test_with_metadata_lower')\n self.assertTrue(mpp_test_case.skip is not None)\n \n def test_with_metadata_same(self):\n\n # Test with deployed gpopt version (simulate hawq or gpdb with optimizer)\n MockMPPTestCaseGPOPT.__product_environment__['gpopt'] = \"2.200.1\"\n mpp_test_case = MockMPPTestCaseGPOPT('test_with_metadata_same')\n self.assertTrue(mpp_test_case.skip is None)\n \n # Test without deployed gpopt version (simulate gpdb without optimizer)\n MockMPPTestCaseGPOPT.__product_environment__.pop('gpopt', None)\n mpp_test_case = MockMPPTestCaseGPOPT('test_with_metadata_same')\n self.assertTrue(mpp_test_case.skip is not None)\n \n","sub_path":"src/test/tinc/tincrepo/mpp/models/test/mpp_tc/test_mpp_test_case.py","file_name":"test_mpp_test_case.py","file_ext":"py","file_size_in_byte":7370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"211586809","text":"import cv2\r\nimport numpy as np\r\nimport os \r\nrecognizer = cv2.face.LBPHFaceRecognizer_create()\r\nrecognizer.read('traineddata.yml')\r\ncascade = cv2.CascadeClassifier('D:\\haarcascade_frontalface_default.xml')\r\ncam = cv2.VideoCapture(0)\r\nwhile True:\r\n ret, img =cam.read()\r\n #img = cv2.flip(img, -1) # Flip vertically\r\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n \r\n faces = cascade.detectMultiScale( \r\n gray,\r\n scaleFactor = 1.2,\r\n minNeighbors = 5\r\n )\r\n for(x,y,w,h) in faces:\r\n cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)\r\n id, confidence = recognizer.predict(gray[y:y+h,x:x+w])\r\n if id==1 and confidence<75:\r\n \r\n cv2.putText(\r\n img, \r\n 'user'+str(confidence), \r\n (x+5,y-5), \r\n cv2.FONT_HERSHEY_COMPLEX,\r\n 1, \r\n (255,255,255), \r\n 2\r\n )\r\n else:\r\n cv2.putText(\r\n img, \r\n 'unknown'+str(confidence), \r\n (x+5,y-5), \r\n cv2.FONT_HERSHEY_COMPLEX,\r\n 1, \r\n (255,255,255), \r\n 2\r\n )\r\n cv2.imshow(\"image\",img)\r\n if cv2.waitKey(1)==13:\r\n braek\r\ncam.release()\r\ncv2.destroyAllWindows()\r\n","sub_path":"realfacerecognition.py","file_name":"realfacerecognition.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"415797746","text":"import numpy as np\nimport pypcd\n\ndef transform(filename):\n pc = pypcd.PointCloud.from_path(filename)\n data = [pc.pc_data['x'],pc.pc_data['y'],pc.pc_data['z']]\n pointcloud = np.transpose(data)\n # initialize 32x32x32 voxel grid\n voxel_grid = np.zeros((32, 32, 32), dtype=int)\n VOXEL_SIZE = 0.2/32\n\n for i in range(0, len(pointcloud)):\n x = 0\n y = 0\n z = 0\n for x_n in range(32):\n vg_min = -0.1+x_n*VOXEL_SIZE\n vg_max = vg_min+VOXEL_SIZE\n if vg_min= allele2:\n return alleles[0], alleles[1]\n else:\n return alleles[1], alleles[0]\n else:\n return 'N', 'N'\n\n###############################################################################\nif __name__ == '__main__':\n parser = get_parser()\n args = vars(parser.parse_args())\n \n # Change the working directory if necessary\n if args['path'] is not None:\n os.chdir(args['path'])\n if args['input'] is None:\n warning(\"No input file.\")\n if args['output'] is None:\n warning(\"No output file.\")\n \n print(version())\n \n st = timeit.default_timer()\n \n # Load file\n checkFile(args['input'], args['modei'])\n \n # Convert\n if args['modei'] == 1:\n findAllelesDsf(args['input'], args['output'])\n elif args['modei'] == 2:\n findAllelesHmp(args['input'], args['output'])\n else:\n warning(\"Unrecognized input/output mode(s).\")\n \n et = timeit.default_timer()\n\n print(\"Conversion finished.\")\n print(\"Time: %.2f min.\" % ((et - st)/60))\n","sub_path":"src/find_alleles.py","file_name":"find_alleles.py","file_ext":"py","file_size_in_byte":5739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"241043287","text":"# -*- coding: utf-8; -*-\n#\n# This file is part of Superdesk.\n#\n# Copyright 2013, 2014, 2015, 2016, 2017 Sourcefabric z.u. and contributors.\n#\n# For the full copyright and license information, please see the\n# AUTHORS and LICENSE files distributed with this source code, or\n# at https://www.sourcefabric.org/superdesk/license\n\n\"\"\"Superdesk Planning - Agenda\"\"\"\n\nimport re\nimport superdesk\nfrom superdesk import get_resource_service\nfrom superdesk.metadata.utils import generate_guid\nfrom superdesk.metadata.item import GUID_NEWSML\nfrom apps.archive.common import set_original_creator, get_user\nfrom superdesk.errors import SuperdeskApiError\nfrom superdesk.users.services import current_user_has_privilege\nfrom superdesk.notification import push_notification\nfrom .planning import planning_schema\nfrom eve.utils import config\n\n\nclass AgendaService(superdesk.Service):\n \"\"\"Service class for the Agenda model\"\"\"\n\n def on_create(self, docs):\n \"\"\"Set default metadata\"\"\"\n for doc in docs:\n doc['guid'] = generate_guid(type=GUID_NEWSML)\n doc['planning_type'] = 'agenda'\n set_original_creator(doc)\n self._validate_unique_agenda(doc, {})\n\n def on_created(self, docs):\n for doc in docs:\n push_notification(\n 'agenda:created',\n item=str(doc[config.ID_FIELD]),\n user=str(doc.get('original_creator', ''))\n )\n\n def on_update(self, updates, original):\n if 'name' in updates and not current_user_has_privilege('planning_agenda_management'):\n raise SuperdeskApiError.forbiddenError('Insufficient privileges to update agenda.')\n\n user = get_user()\n if user and user.get(config.ID_FIELD):\n updates['version_creator'] = user[config.ID_FIELD]\n\n self._validate_unique_agenda(updates, original)\n\n def on_updated(self, updates, original):\n push_notification(\n 'agenda:updated',\n item=str(original[config.ID_FIELD]),\n user=str(updates.get('version_creator', ''))\n )\n\n def on_deleted(self, doc):\n # Make sure to remove the associated plannings from this agenda\n if 'planning_items' in doc:\n planning_service = get_resource_service('planning')\n planning_service.delete({'_id': {'$in': doc['planning_items']}})\n\n def _validate_unique_agenda(self, updates, original):\n \"\"\"Validate unique name for agenda\n\n :param dict updates:\n :param dict original:\n :raises SuperdeskApiError.badRequestError: If Agenda name is not unique\n \"\"\"\n name = updates.get('name', original.get('name'))\n if name:\n query = {\n 'planning_type': 'agenda',\n 'name': re.compile('^{}$'.format(re.escape(name.strip())), re.IGNORECASE)\n }\n\n if original:\n query[superdesk.config.ID_FIELD] = {'$ne': original.get(superdesk.config.ID_FIELD)}\n\n cursor = self.get_from_mongo(req=None, lookup=query)\n if cursor.count():\n raise SuperdeskApiError.badRequestError(message='Agenda with name {} already exists.'.format(name),\n payload={'name': {'unique': 1}})\n\n\nclass AgendaResource(superdesk.Resource):\n url = 'agenda'\n schema = planning_schema\n datasource = {\n 'source': 'planning',\n 'search_backend': 'elastic',\n 'elastic_filter': {'term': {'planning_type': 'agenda'}}\n }\n\n resource_methods = ['GET', 'POST']\n item_methods = ['GET', 'PATCH', 'PUT']\n public_methods = ['GET']\n\n # PATCH is set to `planning` so that planning_items may be updated when adding new planning items\n # This check is done in the on_update method of the service\n privileges = {\n 'POST': 'planning_agenda_management',\n 'PATCH': 'planning'\n }\n","sub_path":"server/planning/agenda.py","file_name":"agenda.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"496794318","text":"import numpy as np\nfrom OR_Tool import OR_Tool\nNUM_OF_CUSTOMERS = 100\n\nfile_state = open('data_state_100_00.npy', 'wb')\nfile_or_route = open('data_or_route_100_00.npy', 'wb')\nfile_or_cost = open('data_or_cost_100_00.npy', 'wb')\nfor i in range(1000):\n print(i)\n depot_location = np.array([0, 0])\n data_state = np.random.rand(NUM_OF_CUSTOMERS, 2)\n data_state = np.vstack((data_state, np.array([0, 0])))\n depot_idx = NUM_OF_CUSTOMERS\n or_model = OR_Tool(data_state, depot_location, depot_idx)\n or_route, or_cost = or_model.solve()\n or_route = np.asarray(or_route, dtype=np.int32)\n or_cost = np.asarray(or_cost, dtype=np.float32)\n data_state = np.asarray([data_state], dtype=np.float32)\n if i == 0:\n out_data_state = data_state\n out_data_or_route = or_route\n out_data_or_cost = or_cost\n else:\n out_data_state = np.append(out_data_state, data_state, axis=0)\n out_data_or_route = np.vstack((out_data_or_route, or_route))\n out_data_or_cost = np.append(out_data_or_cost, or_cost)\nnp.save(file_state, out_data_state)\nnp.save(file_or_route, out_data_or_route)\nnp.save(file_or_cost, out_data_or_cost)\nfile_state.close()\nfile_or_route.close()\nfile_or_cost.close()\n","sub_path":"A3C/CreateData_100_00.py","file_name":"CreateData_100_00.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"553596025","text":"# Copyright 2015 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n# TODO: We need to use sshmanager instead of executing bare commands\n# bp link: https://blueprints.launchpad.net/fuel/+spec/sshmanager-integration\n\nfrom __future__ import division\nimport re\n\nfrom devops.error import TimeoutError\nfrom devops.helpers.helpers import tcp_ping\nfrom devops.helpers.helpers import wait\nfrom proboscis import asserts\nfrom proboscis import test\n\nfrom fuelweb_test.helpers import checkers\nfrom fuelweb_test.helpers.decorators import log_snapshot_after_test\nfrom fuelweb_test.helpers import os_actions\nfrom fuelweb_test import settings\nfrom fuelweb_test import logger\nfrom fuelweb_test.tests.base_test_case import SetupEnvironment\nfrom fuelweb_test.tests.base_test_case import TestBasic\n\n\n@test(groups=[\"rh\", \"rh.ha\", \"rh.basic\"])\nclass RhHA(TestBasic):\n \"\"\"RH-based compute tests\"\"\"\n\n @staticmethod\n def wait_for_slave_provision(node_ip, timeout=10 * 60):\n \"\"\"Wait for a target node provision.\n\n :param node_ip: IP address of target node.\n :param timeout: Timeout for wait function.\n \"\"\"\n wait(lambda: tcp_ping(node_ip, 22),\n timeout=timeout, timeout_msg=\"Node doesn't appear in network\")\n\n @staticmethod\n def wait_for_slave_network_down(node_ip, timeout=10 * 20):\n \"\"\"Wait for a target node network down.\n\n :param node_ip: IP address of target node.\n :param timeout: Timeout for wait function.\n \"\"\"\n wait(lambda: (not tcp_ping(node_ip, 22)), interval=1,\n timeout=timeout, timeout_msg=\"Node doesn't gone offline\")\n\n def warm_restart_nodes(self, devops_nodes):\n logger.info('Reboot (warm restart) nodes '\n '{0}'.format([n.name for n in devops_nodes]))\n self.warm_shutdown_nodes(devops_nodes)\n self.warm_start_nodes(devops_nodes)\n\n def warm_shutdown_nodes(self, devops_nodes):\n logger.info('Shutting down (warm) nodes '\n '{0}'.format([n.name for n in devops_nodes]))\n for node in devops_nodes:\n logger.debug('Shutdown node {0}'.format(node.name))\n with self.fuel_web.get_ssh_for_node(node.name) as remote:\n remote.execute('/sbin/shutdown -Ph now & exit')\n\n for node in devops_nodes:\n ip = self.fuel_web.get_node_ip_by_devops_name(node.name)\n logger.info('Wait a {0} node offline status'.format(node.name))\n try:\n self.wait_for_slave_network_down(ip)\n except TimeoutError:\n asserts.assert_false(\n tcp_ping(ip, 22),\n 'Node {0} has not become '\n 'offline after warm shutdown'.format(node.name))\n node.destroy()\n\n def warm_start_nodes(self, devops_nodes):\n logger.info('Starting nodes '\n '{0}'.format([n.name for n in devops_nodes]))\n for node in devops_nodes:\n node.start()\n for node in devops_nodes:\n ip = self.fuel_web.get_node_ip_by_devops_name(node.name)\n try:\n self.wait_for_slave_provision(ip)\n except TimeoutError:\n asserts.assert_true(\n tcp_ping(ip, 22),\n 'Node {0} has not become online '\n 'after warm start'.format(node.name))\n logger.debug('Node {0} became online.'.format(node.name))\n\n @staticmethod\n def connect_rh_image(slave):\n \"\"\"Upload RH image into a target node.\n\n :param slave: Target node name.\n \"\"\"\n path = settings.RH_IMAGE_PATH + settings.RH_IMAGE\n\n def find_system_drive(node):\n drives = node.disk_devices\n for drive in drives:\n if drive.device == 'disk' and 'system' in drive.volume.name:\n return drive\n raise Exception('Can not find suitable volume to proceed')\n\n system_disk = find_system_drive(slave)\n vol_path = system_disk.volume.get_path()\n\n try:\n system_disk.volume.upload(path)\n except Exception as e:\n logger.error(e)\n logger.debug(\"Volume path: {0}\".format(vol_path))\n logger.debug(\"Image path: {0}\".format(path))\n\n @staticmethod\n def verify_image_connected(remote):\n \"\"\"Check that correct image connected to a target node system volume.\n\n :param remote: Remote node to proceed.\n \"\"\"\n cmd = \"cat /etc/redhat-release\"\n result = remote.execute(cmd)\n logger.debug(result)\n asserts.assert_equal(result['exit_code'], 0, \"Image doesn't connected\")\n\n @staticmethod\n def register_rh_subscription(remote):\n \"\"\"Register RH subscription.\n\n :param remote: Remote node to proceed.\n \"\"\"\n reg_command = (\n \"/usr/sbin/subscription-manager register \"\n \"--username={0} --password={1}\".format(\n settings.RH_LICENSE_USERNAME,\n settings.RH_LICENSE_PASSWORD)\n )\n\n if settings.RH_SERVER_URL:\n reg_command = reg_command + \" --serverurl={0}\".format(\n settings.RH_SERVER_URL)\n\n if settings.RH_REGISTERED_ORG_NAME:\n reg_command = reg_command + \" --org={0}\".format(\n settings.RH_REGISTERED_ORG_NAME)\n\n if settings.RH_RELEASE:\n reg_command = reg_command + \" --release={0}\".format(\n settings.RH_RELEASE)\n\n if settings.RH_ACTIVATION_KEY:\n reg_command = reg_command + \" --activationkey={0}\".format(\n settings.RH_ACTIVATION_KEY)\n\n if settings.RH_POOL_HASH:\n result = remote.execute(reg_command)\n logger.debug(result)\n asserts.assert_equal(result['exit_code'], 0,\n 'RH registration failed')\n reg_pool_cmd = (\"/usr/sbin/subscription-manager \"\n \"attach --pool={0}\".format(settings.RH_POOL_HASH))\n result = remote.execute(reg_pool_cmd)\n logger.debug(result)\n asserts.assert_equal(result['exit_code'], 0,\n 'Can not attach node to subscription pool')\n else:\n cmd = reg_command + \" --auto-attach\"\n result = remote.execute(cmd)\n logger.debug(result)\n asserts.assert_equal(result['exit_code'], 0,\n 'RH registration with auto-attaching failed')\n\n @staticmethod\n def enable_rh_repos(remote):\n \"\"\"Enable Red Hat mirrors on a target node.\n\n :param remote: Remote node for proceed.\n \"\"\"\n cmd = (\"yum-config-manager --enable rhel-{0}-server-optional-rpms && \"\n \"yum-config-manager --enable rhel-{0}-server-extras-rpms &&\"\n \"yum-config-manager --enable rhel-{0}-server-rh-common-rpms\"\n .format(settings.RH_MAJOR_RELEASE))\n\n result = remote.execute(cmd)\n logger.debug(result)\n asserts.assert_equal(result['exit_code'], 0,\n 'Enabling RH repos failed')\n\n @staticmethod\n def set_hostname(remote, host_number=1):\n \"\"\"Set hostname with domain for a target node.\n\n :param host_number: Node index nubmer (1 by default).\n :param remote: Remote node for proceed.\n \"\"\"\n hostname = \"rh-{0}.test.domain.local\".format(host_number)\n cmd = (\"sysctl kernel.hostname={0} && \"\n \"echo '{0}' > /etc/hostname\".format(hostname))\n\n result = remote.execute(cmd)\n logger.debug(result)\n asserts.assert_equal(result['exit_code'], 0,\n 'Setting up hostname for node failed')\n\n @staticmethod\n def puppet_apply(puppets, remote):\n \"\"\"Apply list of puppets on a target node.\n\n :param puppets: of puppets.\n :param remote: Remote node for proceed.\n \"\"\"\n logger.debug(\"Applying puppets...\")\n for puppet in puppets:\n logger.debug('Applying: {0}'.format(puppet))\n result = remote.execute(\n 'puppet apply -vd -l /var/log/puppet.log {0}'.format(puppet))\n if result['exit_code'] != 0:\n logger.debug(\"Failed on task: {0}\".format(puppet))\n logger.debug(\"STDERR:\\n {0}\".format(result['stderr']))\n logger.debug(\"STDOUT:\\n {0}\".format(result['stdout']))\n asserts.assert_equal(\n result['exit_code'], 0, 'Puppet run failed. '\n 'Task: {0}'.format(puppet))\n\n def apply_first_part_puppet(self, remote):\n \"\"\"Apply first part of puppet modular tasks on terget node.\n\n :param remote: Remote node for proceed.\n \"\"\"\n first_puppet_run = [\n \"/etc/puppet/modules/osnailyfacter/modular/hiera/hiera.pp\",\n \"/etc/puppet/modules/osnailyfacter/modular/globals/globals.pp\",\n \"/etc/puppet/modules/osnailyfacter/modular/firewall/firewall.pp\",\n \"/etc/puppet/modules/osnailyfacter/modular/tools/tools.pp\"\n ]\n\n self.puppet_apply(first_puppet_run, remote)\n\n @staticmethod\n def apply_networking_puppet(remote):\n \"\"\"Apply networking puppet on a target node.\n\n Puppet task will executed in screen to prevent disconnections while\n interfaces configuring.\n\n :param remote: Remote node for proceed.\n \"\"\"\n iface_check = \"test -f /etc/sysconfig/network-scripts/ifcfg-eth0\"\n result = remote.execute(iface_check)\n if result['exit_code'] == 0:\n remove_iface = \"rm -f /etc/sysconfig/network-scripts/ifcfg-eth0\"\n result = remote.execute(remove_iface)\n logger.debug(result)\n prep = \"screen -dmS netconf\"\n result = remote.execute(prep)\n logger.debug(result)\n asserts.assert_equal(result['exit_code'], 0, 'Can not create screen')\n net_puppet = ('screen -r netconf -p 0 -X stuff '\n '$\"puppet apply -vd -l /var/log/puppet.log '\n '/etc/puppet/modules/osnailyfacter/modular/'\n 'netconfig/netconfig.pp && touch ~/success ^M\"')\n result = remote.execute(net_puppet)\n\n if result['exit_code'] != 0:\n logger.debug(\"STDERR:\\n {0}\".format(result['stderr']))\n logger.debug(\"STDOUT:\\n {0}\".format(result['stdout']))\n asserts.assert_equal(\n result['exit_code'], 0, 'Can not create screen with '\n 'netconfig task')\n\n @staticmethod\n def check_netconfig_success(remote, timeout=10 * 20):\n \"\"\"Check that netconfig.pp modular task is succeeded.\n\n :param remote: Remote node for proceed.\n :param timeout: Timeout for wait function.\n \"\"\"\n\n def file_checker(connection):\n cmd = \"test -f ~/success\"\n result = connection.execute(cmd)\n logger.debug(result)\n if result['exit_code'] != 0:\n return False\n else:\n return True\n wait(lambda: file_checker(remote), timeout=timeout,\n timeout_msg='Netconfig puppet task unsuccessful')\n\n def apply_last_part_puppet(self, remote):\n \"\"\"Apply final part of puppet modular tasks on a target node.\n\n :param remote: Remote node for proceed.\n \"\"\"\n last_puppet_run = [\n \"/etc/puppet/modules/osnailyfacter/modular/roles/compute.pp\",\n \"/etc/puppet/modules/osnailyfacter/modular/\"\n \"openstack-network/common-config.pp\",\n \"/etc/puppet/modules/osnailyfacter/modular/\"\n \"openstack-network/plugins/ml2.pp\",\n \"/etc/puppet/modules/osnailyfacter/modular/\"\n \"openstack-network/agents/l3.pp\",\n \"/etc/puppet/modules/osnailyfacter/modular/\"\n \"openstack-network/agents/metadata.pp\",\n \"/etc/puppet/modules/osnailyfacter/modular/\"\n \"openstack-network/compute-nova.pp\",\n \"/etc/puppet/modules/osnailyfacter/modular/\"\n \"astute/enable_compute.pp\"\n ]\n\n self.puppet_apply(last_puppet_run, remote)\n\n @staticmethod\n def backup_required_information(remote, ip):\n \"\"\"Back up required information for compute from target node.\n\n :param remote: Remote Fuel master node.\n :param ip: Target node ip to back up from.\n \"\"\"\n logger.debug('Target node ip: {0}'.format(ip))\n cmd = (\"cd ~/ && mkdir rh_backup; \"\n \"scp -r {0}:/root/.ssh rh_backup/. ; \"\n \"scp {0}:/etc/astute.yaml rh_backup/ ; \"\n \"scp -r {0}:/var/lib/astute/nova rh_backup/\").format(ip)\n result = remote.execute(cmd)\n logger.debug(result['stdout'])\n logger.debug(result['stderr'])\n asserts.assert_equal(result['exit_code'], 0,\n 'Can not back up required information from node')\n logger.debug(\"Backed up ssh-keys and astute.yaml\")\n\n @staticmethod\n def clean_string(string):\n \"\"\"Clean string of redundant characters.\n\n :param string: String.\n :return:\n \"\"\"\n k = str(string)\n pattern = \"^\\s+|\\[|\\]|\\n|,|'|\\r|\\s+$\"\n res = re.sub(pattern, '', k)\n res = res.strip('/\\\\n')\n # NOTE(freerunner): Using sub twice to collect key without extra\n # whitespaces.\n res = re.sub(pattern, '', res)\n res = res.strip('/\\\\n')\n return res\n\n def restore_information(self, ip, remote_admin, remote_slave):\n \"\"\"Restore information on a target node.\n\n :param ip: Remote node ip.\n :param remote_admin: Remote admin node for proceed.\n :param remote_slave: Remote slave node for proceed.\n \"\"\"\n cmd = \"cat ~/rh_backup/.ssh/authorized_keys\"\n result = remote_admin.execute(cmd)\n key = result['stdout']\n logger.debug(result)\n asserts.assert_equal(result['exit_code'], 0,\n 'Can not get backed up ssh key.')\n\n key = self.clean_string(key)\n\n cmd = \"mkdir ~/.ssh; echo '{0}' >> ~/.ssh/authorized_keys\".format(key)\n result = remote_slave.execute(cmd)\n logger.debug(result['stdout'])\n logger.debug(result['stderr'])\n asserts.assert_equal(result['exit_code'], 0,\n 'Can not recover ssh key for node')\n\n cmd = \"cd ~/rh_backup && scp astute.yaml {0}@{1}:/etc/.\".format(\n settings.RH_IMAGE_USER, ip)\n logger.debug(\"Restoring astute.yaml for node with ip {0}\".format(ip))\n result = remote_admin.execute(cmd)\n logger.debug(result)\n asserts.assert_equal(result['exit_code'], 0,\n 'Can not restore astute.yaml')\n\n cmd = \"mkdir -p /var/lib/astute\"\n logger.debug(\"Prepare node for restoring nova ssh-keys\")\n result = remote_slave.execute(cmd)\n logger.debug(result)\n asserts.assert_equal(result['exit_code'], 0, 'Preparation failed')\n\n cmd = (\n \"cd ~/rh_backup && scp -r nova {0}@{1}:/var/lib/astute/.\".format(\n settings.RH_IMAGE_USER, ip)\n )\n logger.debug(\"Restoring nova ssh-keys\")\n result = remote_admin.execute(cmd)\n logger.debug(result)\n asserts.assert_equal(result['exit_code'], 0,\n 'Can not restore ssh-keys for nova')\n\n @staticmethod\n def install_yum_components(remote):\n \"\"\"Install required yum components on a target node.\n\n :param remote: Remote node for proceed.\n \"\"\"\n cmd = \"yum install yum-utils yum-priorities -y\"\n result = remote.execute(cmd)\n logger.debug(result)\n asserts.assert_equal(result['exit_code'], 0, 'Can not install required'\n 'yum components.')\n\n @staticmethod\n def set_repo_for_perestroika(remote):\n \"\"\"Set Perestroika repos.\n\n :param remote: Remote node for proceed.\n \"\"\"\n repo = settings.PERESTROIKA_REPO\n cmd = (\"curl {0}\".format(repo))\n\n result = remote.execute(cmd)\n logger.debug(result)\n asserts.assert_equal(result['exit_code'], 0,\n 'Perestroika repos unavailable from node.')\n\n cmd = (\"echo '[mos]\\n\"\n \"name=mos\\n\"\n \"type=rpm-md\\n\"\n \"baseurl={0}\\n\"\n \"gpgcheck=0\\n\"\n \"enabled=1\\n\"\n \"priority=5' >\"\n \"/etc/yum.repos.d/mos.repo && \"\n \"yum clean all\".format(repo))\n result = remote.execute(cmd)\n logger.debug(result)\n asserts.assert_equal(result['exit_code'], 0,\n 'Can not create config file for repo')\n\n @staticmethod\n def check_hiera_installation(remote):\n \"\"\"Check hiera installation on node.\n\n :param remote: Remote node for proceed.\n \"\"\"\n cmd = \"yum list installed | grep hiera\"\n logger.debug('Checking hiera installation...')\n result = remote.execute(cmd)\n if result['exit_code'] == 0:\n cmd = \"yum remove hiera -y\"\n logger.debug('Found existing installation of hiera. Removing...')\n result = remote.execute(cmd)\n asserts.assert_equal(result['exit_code'], 0, 'Can not remove '\n 'hiera')\n cmd = \"ls /etc/hiera\"\n logger.debug('Checking hiera files for removal...')\n result = remote.execute(cmd)\n if result['exit_code'] == 0:\n logger.debug('Found redundant hiera files. Removing...')\n cmd = \"rm -rf /etc/hiera\"\n result = remote.execute(cmd)\n asserts.assert_equal(result['exit_code'], 0,\n 'Can not remove hiera files')\n\n @staticmethod\n def check_rsync_installation(remote):\n \"\"\"Check rsync installation on node.\n\n :param remote: Remote node for proceed.\n \"\"\"\n cmd = \"yum list installed | grep rsync\"\n logger.debug(\"Checking rsync installation...\")\n result = remote.execute(cmd)\n if result['exit_code'] != 0:\n logger.debug(\"Rsync is not found. Installing rsync...\")\n cmd = \"yum clean all && yum install rsync -y\"\n result = remote.execute(cmd)\n logger.debug(result)\n asserts.assert_equal(result['exit_code'], 0, 'Can not install '\n 'rsync on node.')\n\n @staticmethod\n def remove_old_compute_services(remote, hostname):\n \"\"\"Remove old redundant services which was removed from services base.\n\n :param remote: Remote node for proceed.\n :param hostname: Old compute hostname.\n \"\"\"\n cmd = (\"source ~/openrc && for i in $(nova service-list | \"\n \"awk '/%s/{print $2}'); do nova service-delete $i; \"\n \"done\" % hostname)\n result = remote.execute(cmd)\n logger.debug(result)\n asserts.assert_equal(result['exit_code'], 0, 'Can not remove '\n 'old nova computes')\n\n cmd = (\"source ~/openrc && for i in $(neutron agent-list | \"\n \"awk '/%s/{print $2}'); do neutron agent-delete $i; \"\n \"done\" % hostname)\n result = remote.execute(cmd)\n logger.debug(result)\n asserts.assert_equal(result['exit_code'], 0, 'Can not remove '\n 'old neutron agents')\n\n @staticmethod\n def install_ruby_puppet(remote):\n \"\"\"Install ruby and puppet on a target node.\n\n :param remote: Remote node for proceed.\n \"\"\"\n puppet_install_cmd = \"yum install puppet ruby -y\"\n result = remote.execute(puppet_install_cmd)\n logger.debug(result)\n asserts.assert_equal(result['exit_code'], 0,\n 'Ruby and puppet installation failed')\n\n @staticmethod\n def rsync_puppet_modules(remote, ip):\n \"\"\"Rsync puppet modules from remote node to node with specified ip.\n\n :param remote: Remote node for proceed.\n :param ip: IP address of a target node where to sync.\n \"\"\"\n cmd = (\"rsync -avz /etc/puppet/modules/* \"\n \"{0}@{1}:/etc/puppet/modules/\".format(settings.RH_IMAGE_USER,\n ip))\n result = remote.execute(cmd)\n logger.debug(cmd)\n asserts.assert_equal(result['exit_code'], 0,\n 'Rsync puppet modules failed')\n\n def save_node_hostname(self, remote):\n \"\"\"Save hostname of a node.\n\n :param remote: Remote node for proceed.\n :return: Node hostname.\n \"\"\"\n cmd = \"hostname\"\n result = remote.execute(cmd)\n asserts.assert_equal(result['exit_code'], 0, 'Can not get hostname '\n 'for remote')\n nodename = self.clean_string(result['stdout'])\n return nodename\n\n @test(depends_on=[SetupEnvironment.prepare_slaves_5],\n groups=[\"deploy_rh_compute_ha_tun\"])\n @log_snapshot_after_test\n def deploy_rh_based_compute(self):\n \"\"\"Deploy RH-based compute in HA mode with Neutron VXLAN\n\n Scenario:\n 1. Check required image.\n 2. Revert snapshot 'ready_with_5_slaves'.\n 3. Create a Fuel cluster.\n 4. Update cluster nodes with required roles.\n 5. Deploy the Fuel cluster.\n 6. Run OSTF.\n 7. Backup astute.yaml and ssh keys from compute.\n 8. Boot compute with RH image.\n 9. Prepare node for Puppet run.\n 10. Execute modular tasks for compute.\n 11. Run OSTF.\n\n Duration: 150m\n Snapshot: deploy_rh_compute_ha_tun\n\n \"\"\"\n self.show_step(1, initialize=True)\n logger.debug('Check MD5 sum of RH 7 image')\n check_image = checkers.check_image(\n settings.RH_IMAGE,\n settings.RH_IMAGE_MD5,\n settings.RH_IMAGE_PATH)\n asserts.assert_true(check_image,\n 'Provided image is incorrect. '\n 'Please, check image path and md5 sum of it.')\n\n self.show_step(2)\n self.env.revert_snapshot(\"ready_with_5_slaves\")\n\n self.show_step(3)\n logger.debug('Create Fuel cluster RH-based compute tests')\n data = {\n 'net_provider': 'neutron',\n 'net_segment_type': settings.NEUTRON_SEGMENT['tun'],\n 'tenant': 'RhHA',\n 'user': 'RhHA',\n 'password': 'RhHA'\n }\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=settings.DEPLOYMENT_MODE,\n settings=data\n )\n\n self.show_step(4)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-01': ['controller'],\n 'slave-02': ['controller'],\n 'slave-03': ['controller'],\n 'slave-04': ['compute']\n }\n )\n\n self.show_step(5)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n cluster_vip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n cluster_vip, data['user'], data['password'], data['tenant'])\n\n self.show_step(6)\n self.fuel_web.run_ostf(cluster_id=cluster_id,\n test_sets=['ha', 'smoke', 'sanity'])\n\n self.show_step(7)\n compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])[0]\n controller_name = 'slave-01'\n controller_ip = self.fuel_web.get_nailgun_node_by_name(\n controller_name)['ip']\n logger.debug('Got node: {0}'.format(compute))\n target_node_name = compute['name'].split('_')[0]\n logger.debug('Target node name: {0}'.format(target_node_name))\n target_node = self.env.d_env.get_node(name=target_node_name)\n logger.debug('DevOps Node: {0}'.format(target_node))\n target_node_ip = self.fuel_web.get_nailgun_node_by_name(\n target_node_name)['ip']\n logger.debug('Acquired ip: {0} for node: {1}'.format(\n target_node_ip, target_node_name))\n\n with self.env.d_env.get_ssh_to_remote(target_node_ip) as remote:\n old_hostname = self.save_node_hostname(remote)\n\n with self.env.d_env.get_admin_remote() as remote:\n self.backup_required_information(remote, target_node_ip)\n\n self.show_step(8)\n\n target_node.destroy()\n asserts.assert_false(target_node.driver.node_active(node=target_node),\n 'Target node still active')\n self.connect_rh_image(target_node)\n target_node.start()\n asserts.assert_true(target_node.driver.node_active(node=target_node),\n 'Target node did not start')\n self.wait_for_slave_provision(target_node_ip)\n with self.env.d_env.get_ssh_to_remote(target_node_ip) as remote:\n self.verify_image_connected(remote)\n\n self.show_step(9)\n\n with self.env.d_env.get_admin_remote() as remote_admin:\n with self.env.d_env.get_ssh_to_remote(target_node_ip) as \\\n remote_slave:\n self.restore_information(target_node_ip,\n remote_admin, remote_slave)\n\n with self.env.d_env.get_ssh_to_remote(target_node_ip) as remote:\n self.set_hostname(remote)\n if not settings.CENTOS_DUMMY_DEPLOY:\n self.register_rh_subscription(remote)\n self.install_yum_components(remote)\n if not settings.CENTOS_DUMMY_DEPLOY:\n self.enable_rh_repos(remote)\n self.set_repo_for_perestroika(remote)\n self.check_hiera_installation(remote)\n self.install_ruby_puppet(remote)\n self.check_rsync_installation(remote)\n\n with self.env.d_env.get_admin_remote() as remote:\n self.rsync_puppet_modules(remote, target_node_ip)\n\n self.show_step(10)\n with self.env.d_env.get_ssh_to_remote(target_node_ip) as remote:\n self.apply_first_part_puppet(remote)\n\n with self.env.d_env.get_ssh_to_remote(target_node_ip) as remote:\n self.apply_networking_puppet(remote)\n\n with self.env.d_env.get_ssh_to_remote(target_node_ip) as remote:\n self.check_netconfig_success(remote)\n self.apply_last_part_puppet(remote)\n\n with self.env.d_env.get_ssh_to_remote(controller_ip) as remote:\n self.remove_old_compute_services(remote, old_hostname)\n\n self.fuel_web.assert_cluster_ready(os_conn, smiles_count=13)\n\n self.show_step(11)\n self.fuel_web.run_ostf(cluster_id=cluster_id,\n test_sets=['ha', 'smoke', 'sanity'])\n\n self.env.make_snapshot(\"ready_ha_with_rh_compute\", is_make=True)\n","sub_path":"fuelweb_test/tests/test_rh_compute.py","file_name":"test_rh_compute.py","file_ext":"py","file_size_in_byte":27715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"375652880","text":"import pygame;\nfrom trajectory import trajectory\n\"\"\"\nThis program should render a court, a net, a scoreboard, and two players to the screen, as well\nas a sky backdrop\n\"\"\"\n\nWINDOW_WIDTH = 700\nWINDOW_HEIGHT = 500\nCOURT_HEIGHT = 30\nBOARD_HEIGHT = 140\nBOARD_WIDTH = 300\nSCORE_HEIGHT = 60\nSCORE_WIDTH = 50\nNET_WIDTH = 4\nNET_HEIGHT = 75\nBLOB_WIDTH = 50\nBLOB_HEIGHT = 30\nBLUE = (52, 152, 219)\nYELLOW = (241, 196, 15)\nGREEN = (39, 174, 96)\nWHITE = (236, 240, 241)\nSKY = (82, 212, 255)\nPUMPKIN = (211, 84, 0)\nASBESTOS = (127, 140, 141)\nCONCRETE = (149, 165, 166)\n\n\"\"\"Player class: Adapted from Simpson College example\"\"\"\n\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self):\n \"\"\" Constructor function \"\"\"\n super().__init__()\n width = 40\n height = 60\n self.image = pygame.Surface([width, height])\n self.image.fill(YELLOW)\n self.rect = self.image.get_rect()\n self.change_x = 0\n self.change_y = 0\n self.level = None\n def update(self):\n self.calc_grav()\n self.rect.x += self.change_x\n # block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n # for block in block_hit_list:\n # if self.change_x > 0:\n # self.rect.right = block.rect.left\n # elif self.change_x < 0:\n # self.rect.left = block.rect.right\n self.rect.y += self.change_y\n # block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n # for block in block_hit_list:\n # if self.change_y > 0:\n # self.rect.bottom = block.rect.top\n # elif self.change_y < 0:\n # self.rect.top = block.rect.bottom\n # self.change_y = 0\n def calc_grav(self):\n if self.change_y == 0:\n self.change_y = 1\n else:\n self.change_y += .35\n if self.rect.y >= WINDOW_HEIGHT - self.rect.height and self.change_y >= 0:\n self.change_y = 0\n self.rect.y = WINDOW_WIDTH - self.rect.height\n def jump(self):\n self.rect.y += 2\n #platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n #if len(platform_hit_list) > 0 or self.rect.bottom >= WINDOW_HEIGHT:\n # self.change_y = -10\n def go_left(self):\n self.change_x = -6\n def go_right(self):\n self.change_x = 6\n def stop(self):\n self.change_x = 0\n\n\"\"\"\nBlob class represents the game players on the screen\n\"\"\"\n\nclass Blob:\n def __init__(self, x, y, color):\n self.x = x;\n self.y = y;\n self.color = color;\n return\n def get_blob(self):\n return pygame.Rect(self.x, self.y, BLOB_WIDTH, BLOB_HEIGHT)\n # return (self.x, self.y, BLOB_WIDTH, BLOB_HEIGHT)\n def move_up(self):\n if self.y > 0:\n self.y -= 2\n def move_down(self):\n if self.y < WINDOW_HEIGHT - COURT_HEIGHT - BLOB_HEIGHT:\n self.y += 2\n def move_right(self):\n if self.x < WINDOW_WIDTH * 0.5 - NET_WIDTH * 0.5 - BLOB_WIDTH:\n self.x += 2\n def move_left(self):\n if self.x > 0:\n self.x -= 2\n\n\"\"\"\nCourt class contains information about static court elements\n\"\"\"\n\nclass Court:\n def __init__(self):\n self.court = pygame.Rect(0, WINDOW_HEIGHT - COURT_HEIGHT, WINDOW_WIDTH, COURT_HEIGHT)\n self.net = pygame.Rect((WINDOW_WIDTH / 2) - (NET_WIDTH / 2), WINDOW_HEIGHT - COURT_HEIGHT - NET_HEIGHT, NET_WIDTH, NET_HEIGHT)\n self.net_color = WHITE\n self.court_color = GREEN\n self.sky = pygame.Rect(0, 0, WINDOW_WIDTH, WINDOW_HEIGHT - COURT_HEIGHT)\n self.sky_color = SKY\n\n\n\"\"\"\nScoreboard class maintains the scores of the players, and renders pygame fonts accordingly\n\"\"\"\nclass Board:\n def __init__(self, BOARD_FONT, SCORE_FONT):\n self.board = pygame.Rect(WINDOW_WIDTH / 2 - BOARD_WIDTH / 2, 0, BOARD_WIDTH, BOARD_HEIGHT)\n self.home_score_box = pygame.Rect(WINDOW_WIDTH * 0.5 - BOARD_WIDTH * 0.25 - SCORE_WIDTH * 0.5, BOARD_HEIGHT * 0.5 - SCORE_HEIGHT * 0.5, SCORE_WIDTH, SCORE_HEIGHT)\n self.visitor_score_box = pygame.Rect(WINDOW_WIDTH * 0.5 + BOARD_WIDTH * 0.25 - SCORE_WIDTH * 0.5, BOARD_HEIGHT * 0.5 - SCORE_HEIGHT * 0.5, SCORE_WIDTH, SCORE_HEIGHT)\n self.home_score = SCORE_FONT.render('0', True, (0, 0, 0))\n self.visitor_score = SCORE_FONT.render('0', True, (0, 0, 0))\n self.home_score_rect = self.home_score.get_rect()\n self.home_score_rect.centerx = WINDOW_WIDTH * 0.5 - BOARD_WIDTH * 0.25\n self.home_score_rect.centery = BOARD_HEIGHT * 0.5\n self.visitor_score_rect = self.visitor_score.get_rect()\n self.visitor_score_rect.centerx = WINDOW_WIDTH * 0.5 + BOARD_WIDTH * 0.25\n self.visitor_score_rect.centery = BOARD_HEIGHT * 0.5\n self.home_score_text = BOARD_FONT.render('Home', True, (0, 0, 0))\n self.visitor_score_text = BOARD_FONT.render('Away', True, (0, 0, 0))\n self.home_score_text_rect = self.home_score_text.get_rect()\n self.home_score_text_rect.centerx = WINDOW_WIDTH * 0.5 - BOARD_WIDTH * 0.25\n self.home_score_text_rect.centery = BOARD_HEIGHT * 0.5 - SCORE_HEIGHT\n self.visitor_score_text_rect = self.visitor_score_text.get_rect()\n self.visitor_score_text_rect.centerx = WINDOW_WIDTH * 0.5 + BOARD_WIDTH * 0.25\n self.visitor_score_text_rect.centery = BOARD_HEIGHT * 0.5 - SCORE_HEIGHT\n\n\n\n\n\ndef main():\n\n\n pygame.init()\n BOARD_FONT = pygame.font.Font(None, 20)\n SCORE_FONT = pygame.font.Font(None, 50)\n\n player = Player()\n active_sprite_list = pygame.sprite.Group()\n player.rect.x = 340\n player.rect.y = WINDOW_HEIGHT - player.rect.height\n\n active_sprite_list.add(player)\n\n screen =pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\n done = False\n blob = Blob(WINDOW_WIDTH * 0.25 - BLOB_WIDTH * 0.5, WINDOW_HEIGHT - COURT_HEIGHT - BLOB_HEIGHT, PUMPKIN)\n blob2 = Blob(WINDOW_WIDTH * 0.75 - BLOB_WIDTH * 0.5, WINDOW_HEIGHT - COURT_HEIGHT - BLOB_HEIGHT, BLUE)\n court = Court()\n board = Board(BOARD_FONT, SCORE_FONT)\n is_moving = False;\n direction = 0\n moving_up = False\n heading_up = False\n lame = 0;\n screen.fill((0, 0, 0))\n pygame.draw.rect(screen, court.sky_color, court.sky)\n pygame.draw.rect(screen, court.court_color, court.court)\n pygame.draw.rect(screen, court.net_color, court.net)\n pygame.draw.rect(screen, ASBESTOS, board.board)\n pygame.draw.rect(screen, CONCRETE, board.home_score_box)\n pygame.draw.rect(screen, CONCRETE, board.visitor_score_box)\n screen.blit(board.home_score, board.home_score_rect)\n screen.blit(board.visitor_score, board.visitor_score_rect)\n screen.blit(board.home_score_text, board.home_score_text_rect)\n screen.blit(board.visitor_score_text, board.visitor_score_text_rect)\n clock = pygame.time.Clock()\n\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_w:\n is_moving = True;\n direction = 0;\n if event.key == pygame.K_a:\n is_moving = True;\n direction = 1;\n if event.key == pygame.K_s:\n is_moving = True;\n direction = 2;\n if event.key == pygame.K_d:\n is_moving = True;\n direction = 3;\n if event.key == pygame.K_LEFT:\n player.go_left()\n if event.key == pygame.K_RIGHT:\n player.go_right()\n if event.key == pygame.K_UP:\n player.jump()\n\n if event.type == pygame.KEYUP:\n is_moving = False\n direction = 0;\n if event.key == pygame.K_LEFT and player.change_x < 0:\n player.stop()\n if event.key == pygame.K_RIGHT and player.change_x > 0:\n player.stop()\n\n if event.type == pygame.QUIT:\n return\n if is_moving:\n if direction == 3:\n blob.move_right()\n if direction == 1:\n blob.move_left()\n if direction == 0:\n blob.move_up()\n if direction == 2:\n blob.move_down()\n active_sprite_list.update()\n\n print(player.rect.y)\n\n if player.rect.right > WINDOW_WIDTH:\n player.rect.right = WINDOW_WIDTH\n if player.rect.left < 0:\n player.rect.left = 0\n pygame.draw.circle(screen, blob.color, (int(blob.x), int(blob.y)), BLOB_WIDTH // 2, 0)\n pygame.draw.rect(screen, blob2.color, blob2.get_blob())\n # pygame.display.update()\n active_sprite_list.draw(screen)\n clock.tick(60)\n\n pygame.display.flip()\n return\n\nif __name__ == '__main__':\n main()","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":8970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"23013623","text":"#!/bin/python\n\nimport sys\n\ndef gameOfThrones(s):\n # Complete this function\n alcounts = {}\n for ch in s:\n if ch not in alcounts:\n alcounts[ch] = 0\n alcounts[ch] += 1\n \n s_list = list(s)\n if len(s_list) % 2 == 0:\n for key in alcounts:\n if alcounts[key] % 2 != 0:\n return \"NO\"\n return \"YES\"\n else:\n odd_one_seen = False\n for key in alcounts:\n if alcounts[key] % 2 != 0:\n if odd_one_seen:\n return \"NO\"\n else:\n odd_one_seen = True\n else:\n if alcounts[key] % 2 != 0:\n return \"NO\" \n\n return \"YES\"\n\ns = raw_input().strip()\nresult = gameOfThrones(s)\nprint(result)\n","sub_path":"src/python/game-of-thrones1.py","file_name":"game-of-thrones1.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"180479008","text":"from exp_base import *\n\n\n############## choose an experiment ##############\n\ncurrent = 'builder'\ncurrent = 'feat3d_trainer'\ncurrent = 'traj_trainer'\ncurrent = 'scene_trainer'\ncurrent = 'traj_via_scene_trainer'\n\nmod = '\"bkg00\"' # latent init 200 01_s20_m128x64x128_1e-3_F3f_d4_O_c1_t1_s.1_cacs20i2one_traj12\nmod = '\"bkg01\"' # init self.bkg[:,0:1] with median occ, then optimize free \nmod = '\"bkg02\"' # 100 iters\nmod = '\"bkg03\"' # optimize bkg var\nmod = '\"bkg04\"' # use pos loss too\nmod = '\"bkg05\"' # use 0.1 smooth loss\nmod = '\"bkg06\"' # again\nmod = '\"bkg07\"' # show latent occ\nmod = '\"bkg08\"' # fixed bug in scopes; higher smooth\nmod = '\"bkg09\"' #\n\n# next:\n# project and inflate the 3d traj box to get a 2d one\n# turn that into a mask\n# train bkg color, excluding the obj region\n# train obj full, only within the obj region\n\nmod = '\"bkg10\"' # repeat whatever this is\nmod = '\"bkg11\"' # complete_ac_s30_i2\n\n\nmod = '\"bust00\"' # feat3d \nmod = '\"bust01\"' # backprop\nmod = '\"bust02\"' # do not pret\nmod = '\"bust03\"' # lower lr\nmod = '\"bust04\"' # 1k with S=2\nmod = '\"bust05\"' # si arch\nmod = '\"bust06\"' # downsamp in arch\nmod = '\"bust07\"' # 6 blocks instead of 4\nmod = '\"bust08\"' # similar but each block is just a conv\nmod = '\"bust09\"' # one compressor then three proper res blocks\nmod = '\"bust10\"' # go to quarter res with compressor\nmod = '\"bust11\"' # pret 400 01_s2_m128x64x128_1e-5_F3_d4_O_c1_t1_s.1_cacs30i2one_bust09\n\n\nmod = '\"bust12\"' # pret feat06\nmod = '\"bust13\"' # show medians\nmod = '\"bust14\"' # show medians on every step, to help it show up\nmod = '\"bust15\"' # vis-aware median\nmod = '\"bust16\"' # use gt occ\nmod = '\"bust17\"' # use vis*diff\nmod = '\"bust18\"' # init with true argmax (not soft)\nmod = '\"bust19\"' # show traj optim\nmod = '\"bust20\"' # higher smooth coeff\nmod = '\"bust21\"' # elastic coeff\nmod = '\"bust22\"' # lock in the middle\nmod = '\"bust23\"' # clone\nmod = '\"bust24\"' # set middle to mean of traj_init\nmod = '\"bust25\"' # set middle to median\nmod = '\"bust26\"' # smooth coeff 0.0\n\n# ok, the traj is not great. but maybe this is a hard example, and anyway, maybe we will still learn an ok object\n# no, that sounds super unlikely actually\n# what i ought to do is: take one proposal and track it with the eccv method, just like i did for neurips\n\nmod = '\"bust27\"' # diff example\nmod = '\"bust28\"' # blur those diffs by 4\nmod = '\"bust29\"' # fixed bug in elastic loss\n# good now\n# let's try again the harder data\nmod = '\"bust30\"' # harder (s30)\n# ok kind of reasonable but let's stay easy\nmod = '\"bust31\"' # s20\nmod = '\"bust32\"' # soft argmax\nmod = '\"bust33\"' # hard=False\nmod = '\"bust34\"' # hard=False < super similar\n\n\n# ok, now i have a traj\n# i would like to try to optimize for the scene now\n# maybe i can/should optimize for obj and bkg simultaneously\n\n# btw maybe this is another place where some convolutional priors might help... anyway let's see\n\nmod = '\"bust35\"' # init latents with (traj from) 01_s20_m128x64x128_1e-2_F3f_d4_O_c1_t1_s.5_cacs20i2one_bust34; just show some latents and return\nmod = '\"bust36\"' # show traj\nmod = '\"bust37\"' # show traj on occ\nmod = '\"bust38\"' # get median and apply loss on bkg var\nmod = '\"bust39\"' # do backprop\nmod = '\"bust40\"' # compose obj and bkg\nmod = '\"bust41\"' # render\nmod = '\"bust42\"' # train obj for occ too\nmod = '\"bust43\"' # only load data on step==1, since it's fixed\nmod = '\"bust44\"' # re-use occ/free data after first iter; also, nothing special in init of obj < but this is overwritten by the init i think\nmod = '\"bust45\"' # again\nmod = '\"bust46\"' # better scope for scene occ; smooth loss on full scene\nmod = '\"bust47\"' # 1k; log53\nmod = '\"bust48\"' # symmetry loss\nmod = '\"bust49\"' # show scene occ gif\nmod = '\"bust50\"' # be more conservative with \"free\" < indeed worth it\nmod = '\"bust51\"' # fix bug in vis\nmod = '\"bust52\"' # snap100\n\n\nmod = '\"bust53\"' # try again for traj optim, using the debugged free/vis\n\n\nmod = '\"bust54\"' # smooth loss directly on obj too\n\n\nmod = '\"bust55\"' # traj again but cleaner (no feat3d)\nmod = '\"bust56\"' # train traj via scene\nmod = '\"bust57\"' # log11; 200 iters; pret latents 500 01_s20_m128x64x128_p128x384_1e-2_O_c1_t1_s.5_R_d64_r10_cacs20i2one_bust54\n\nmod = '\"bust58\"' # train scene again, but with proper occ sup, where bkg is trained with median and obj is trained with fullscene\n\n\nmod = '\"bust59\"' # traj via scene, but pret bust54 this time for real\nmod = '\"bust60\"' # coeff 0 on elastic; \n\nmod = '\"bust61\"' # train scene again; pret with bust58; 2.0m car; smaller lr, so that it does not diverge\n\nmod = '\"bust62\"' # train_via_scene; noise_amount=0.0, so i can get a hint of whether this is optimizable at all\n\n# why isn't traj moving?\n# why is the render loss going up when i train for scene right now?\nmod = '\"bust63\"' # again but 10k iters, just to see if there's any motion overnight\n# nope.\n# i think there is some grad problem with differentiability\n# also, bust61 had loss going steadily up. let's fix that first.\n\n\nmod = '\"bust64\"' # train scene; replicate teh issue from scene61\nmod = '\"bust65\"' # flip randomly, instead of using a symm loss\nmod = '\"bust66\"' # make objvar half, with a hard mirror\nmod = '\"bust67\"' # compute-0-36\nmod = '\"bust68\"' # 1-14 again\nmod = '\"bust69\"' # do not load obj, so that we get proper mirror init\n# overall, these results go the opposite direction from bust61. so maybe that was just a gpu bug\nmod = '\"bust70\"' # traj_via_scene; pret 01_s20_m128x64x128_p128x384_1e-3_O_c1_t1_s.5_R_d64_r10_cacs20i2one_bust69 < too early\n\nmod = '\"bust71\"' # train scene; use coeffs\nmod = '\"bust72\"' # put latents earlier in autoname; pret 400 01_s20_m128x64x128_p128x384_1e-3_O_c1_t1_s.5_R_d64_r10_cacs20i2one_bust69\nmod = '\"bust73\"' # separate coeffs/stats for bkg and obj occ\nmod = '\"bust74\"' # render coeff 1\nmod = '\"bust75\"' # scale the render loss by /S\nmod = '\"bust76\"' # added hyp for weighing the total render\n# oddly enough, it seems a lot like self.obj has a seq dim, allowing it to behave differently on different steps\nmod = '\"bust77\"' # pret 800 bust69; show unsqueezed obj feats; log11; \nmod = '\"bust78\"' # show occ_objs\nmod = '\"bust79\"' # show occ_objs better\nmod = '\"bust80\"' # apply occ loss with help of full scene\n\n# i think what i need to do is:\n# first resize the image to its target size, with F.interpolate, then move the pixels one by one\n\nmod = '\"bust81\"' # bring back noise in rendering < ok, render loss is a bit jumpier, but all ok.\nmod = '\"bust82\"' # traj_via_scene; pret 1k 01_s20_m128x64x128_p128x384_1e-3_L_oo2_bo1_os2_ss1_r1_O_c1_t1_s.5_R_d64_r1_cacs20i2one_bust80\n# maybe i need to not detach the occ, to backprop into the traj?\n\nmod = '\"bust83\"' # print a few things, to see where the grads are\nmod = '\"bust84\"' # disable elastic and diff, to see if grads still appear in traj\n# ok, we're still getting grads.\nmod = '\"bust85\"' # one more step to disable traj elastic\n# ok, with high enough lr, it starts to move.\nmod = '\"bust86\"' # bring back elastic\nmod = '\"bust87\"' # also train occ\nmod = '\"bust88\"' # put render total into latents scope\nmod = '\"bust89\"' # allow obj to optimize too, with lr mult 1\nmod = '\"bust90\"' # more coeffs\n\n############## exps ##############\n\nexps['builder'] = [\n 'carla_goodvar', # mode\n 'carla_complete_train1_data', # dataset\n 'carla_8-4-8_bounds_train',\n '3_iters',\n 'lr5',\n 'B1',\n 'no_backprop',\n 'train_occ',\n # 'train_render',\n # 'train_center',\n 'log1',\n]\nexps['feat3d_trainer'] = [\n 'carla_goodvar', # mode\n 'carla_complete_train1_data', # dataset\n 'carla_8-4-8_bounds_train',\n # '10k_iters',\n '1k_iters',\n # '10_iters',\n 'lr5',\n 'B1',\n # 'no_shuf',\n # 'no_backprop',\n # 'frozen_feat3d',\n # 'pretrained_feat3d',\n 'train_feat3d',\n 'train_occ',\n # 'train_render',\n # 'pretrained_latents',\n # 'log53',\n 'snap100',\n 'log11',\n # 'log1',\n]\nexps['traj_trainer'] = [\n 'carla_goodvar', # mode\n 'carla_complete_train1_data', # dataset\n 'carla_8-4-8_bounds_train',\n # '10k_iters',\n '200_iters',\n # '3_iters',\n 'lr2',\n 'B1',\n # 'no_shuf',\n # 'no_backprop',\n 'frozen_feat3d', \n 'pretrained_feat3d',\n 'train_feat3d',\n 'train_occ',\n 'train_trajvar',\n # 'train_render',\n # 'pretrained_latents',\n # 'log53',\n 'snap100',\n 'log11',\n # 'log1',\n]\nexps['scene_trainer'] = [\n 'carla_goodvar', # mode\n 'carla_complete_train1_data', # dataset\n 'carla_8-4-8_bounds_train',\n # '10k_iters',\n '1k_iters',\n # '50_iters',\n 'lr3',\n 'B1',\n # 'no_shuf',\n # 'no_backprop',\n # 'frozen_feat3d', \n # 'pretrained_feat3d',\n 'pretrained_latents',\n # 'train_feat3d',\n 'train_scenevar',\n 'train_occ',\n 'train_render',\n # 'pretrained_latents',\n 'snap100',\n 'log53',\n # 'log11',\n]\nexps['traj_via_scene_trainer'] = [\n 'carla_goodvar', # mode\n 'carla_complete_train1_data', # dataset\n 'carla_8-4-8_bounds_train',\n '10k_iters',\n # '1k_iters',\n # '200_iters',\n 'lr3',\n 'B1',\n # 'no_shuf',\n # 'no_backprop',\n # 'frozen_feat3d', \n # 'pretrained_feat3d',\n 'pretrained_latents',\n 'train_traj_via_scene',\n 'train_occ',\n 'train_render',\n 'log53',\n # 'log11',\n]\n\n############## groups ##############\n\ngroups['carla_goodvar'] = ['do_carla_goodvar = True']\n\ngroups['train_trajvar'] = [\n 'train_trajvar = True',\n 'latent_traj_elastic_coeff = 1.0', \n 'latent_traj_diff_coeff = 1.0', \n]\ngroups['train_scenevar'] = [\n 'train_scenevar = True',\n 'latent_bkg_occ_coeff = 1.0', \n 'latent_obj_occ_coeff = 2.0', \n 'latent_scene_smooth_coeff = 1.0', \n 'latent_obj_smooth_coeff = 2.0',\n 'latent_render_coeff = 1.0',\n]\ngroups['train_traj_via_scene'] = [\n 'train_traj_via_scene = True',\n 'latent_render_coeff = 1.0',\n 'latent_traj_elastic_coeff = 0.1', \n 'latent_scene_smooth_coeff = 1.0', \n 'latent_obj_smooth_coeff = 2.0',\n 'latent_bkg_occ_coeff = 1.0', \n 'latent_obj_occ_coeff = 2.0', \n]\n\ngroups['do_test'] = ['do_test = True']\ngroups['train_feat3d'] = [\n 'do_feat3d = True',\n 'feat3d_dim = 4',\n]\n# groups['train_bkg'] = [\n# 'do_bkg = True',\n# 'bkg_coeff = 1.0',\n# 'bkg_epsilon = 0.75',\n# ]\ngroups['train_render'] = [\n 'do_render = True',\n 'render_depth = 64',\n 'render_rgb_coeff = 1.0',\n # 'render_depth_coeff = 0.1',\n # 'render_smooth_coeff = 0.01',\n]\ngroups['train_occ'] = [\n 'do_occ = True',\n 'occ_coeff = 1.0',\n 'occ_temporal_coeff = 1.0',\n 'occ_smooth_coeff = 0.5',\n]\ngroups['train_center'] = [\n 'do_center = True',\n # 'center_prob_coeff = 1.0',\n # # 'center_size_coeff = 10.0',\n # 'center_size_coeff = 1.0',\n # 'center_rot_coeff = 1.0',\n # # 'center_offset_coeff = 10.0',\n # 'center_offset_coeff = 1.0',\n # 'center_peak_coeff = 0.1',\n 'center_smooth_coeff = 0.001', \n]\ngroups['train_rgb'] = [\n 'do_rgb = True',\n 'rgb_l1_coeff = 1.0',\n # 'rgb_smooth_coeff = 0.1',\n]\ngroups['train_sigen3d'] = [\n 'do_sigen3d = True',\n 'sigen3d_coeff = 1.0',\n 'sigen3d_reg_coeff = 0.1',\n]\n\n\n############## datasets ##############\n\n# # dims for mem\n# SIZE = 32\n# Z = int(SIZE*4)\n# Y = int(SIZE*1)\n# X = int(SIZE*4)\n\nK = 2 # how many objects to consider\nN = 8 # how many objects per npz\n# S = 2\nS = 20\n# S_val = 5\nS_val = 2\nS_test = 5\nH = 128\nW = 384\n# H and W for proj stuff\n# PH = int(H/2.0)\n# PW = int(W/2.0)\nPH = int(H)\nPW = int(W)\n\n# SIZE = 32\n# SIZE_val = 32\n# SIZE_test = 32\n# SIZE_zoom = 32\n\n# SIZE = 24\n# SIZE_val = 24\n# SIZE_test = 24\n# SIZE_zoom = 24\n\n# SIZE = 20\n# SIZE_val = 20\n# SIZE_test = 20\n# SIZE_zoom = 20\n\nSIZE = 16\nSIZE_val = 16\nSIZE_test = 16\nSIZE_zoom = 16\n\n# SIZE = 12\n# SIZE_val = 12\n# SIZE_test = 12\n# SIZE_zoom = 12\n\n# SIZE = 10\n# SIZE_val = 10\n# SIZE_test = 10\n# SIZE_zoom = 10\n\n# SIZE = 8\n# SIZE_val = 8\n# SIZE_test = 8\n# SIZE_zoom = 8\n\n# SIZE = 4\n# SIZE_val = 4\n# SIZE_test = 4\n# SIZE_zoom = 4\n\n# dataset_location = \"/data/carla/processed/npzs\"\ndataset_location = \"/projects/katefgroup/datasets/carla/processed/npzs\"\n# dataset_location = \"/projects/katefgroup/datasets/carla_odometry/processed\"\n\ngroups['carla_8-4-8_bounds_train'] = [\n 'XMIN = -8.0', # right (neg is left)\n 'XMAX = 8.0', # right\n 'YMIN = -4.0', # down (neg is up)\n 'YMAX = 4.0', # down\n 'ZMIN = -8.0', # forward\n 'ZMAX = 8.0', # forward\n 'Z = %d' % (int(SIZE*8)),\n 'Y = %d' % (int(SIZE*4)),\n 'X = %d' % (int(SIZE*8)),\n]\ngroups['carla_16-8-16_bounds_train'] = [\n 'XMIN = -16.0', # right (neg is left)\n 'XMAX = 16.0', # right\n 'YMIN = -8.0', # down (neg is up)\n 'YMAX = 8.0', # down\n 'ZMIN = -16.0', # forward\n 'ZMAX = 16.0', # forward\n 'Z = %d' % (int(SIZE*8)),\n 'Y = %d' % (int(SIZE*4)),\n 'X = %d' % (int(SIZE*8)),\n]\ngroups['carla_16-8-16_bounds_val'] = [\n 'XMIN_val = -16.0', # right (neg is left)\n 'XMAX_val = 16.0', # right\n 'YMIN_val = -8.0', # down (neg is up)\n 'YMAX_val = 8.0', # down\n 'ZMIN_val = -16.0', # forward\n 'ZMAX_val = 16.0', # forward\n 'Z_val = %d' % (int(SIZE_val*8)),\n 'Y_val = %d' % (int(SIZE_val*4)),\n 'X_val = %d' % (int(SIZE_val*8)),\n]\ngroups['carla_16-8-16_bounds_test'] = [\n 'XMIN_test = -16.0', # right (neg is left)\n 'XMAX_test = 16.0', # right\n 'YMIN_test = -8.0', # down (neg is up)\n 'YMAX_test = 8.0', # down\n 'ZMIN_test = -16.0', # forward\n 'ZMAX_test = 16.0', # forward\n 'Z_test = %d' % (int(SIZE_test*8)),\n 'Y_test = %d' % (int(SIZE_test*4)),\n 'X_test = %d' % (int(SIZE_test*8)),\n]\ngroups['carla_32-16-32_bounds_train'] = [\n 'XMIN = -32.0', # right (neg is left)\n 'XMAX = 32.0', # right\n 'YMIN = -16.0', # down (neg is up)\n 'YMAX = 16.0', # down\n 'ZMIN = -32.0', # forward\n 'ZMAX = 32.0', # forward\n 'Z = %d' % (int(SIZE*8)),\n 'Y = %d' % (int(SIZE*4)),\n 'X = %d' % (int(SIZE*8)),\n]\ngroups['carla_32-16-32_bounds_val'] = [\n 'XMIN_val = -32.0', # right (neg is left)\n 'XMAX_val = 32.0', # right\n 'YMIN_val = -16.0', # down (neg is up)\n 'YMAX_val = 16.0', # down\n 'ZMIN_val = -32.0', # forward\n 'ZMAX_val = 32.0', # forward\n 'Z_val = %d' % (int(SIZE_val*8)),\n 'Y_val = %d' % (int(SIZE_val*4)),\n 'X_val = %d' % (int(SIZE_val*8)),\n]\ngroups['carla_32-16-32_bounds_test'] = [\n 'XMIN_test = -32.0', # right (neg is left)\n 'XMAX_test = 32.0', # right\n 'YMIN_test = -16.0', # down (neg is up)\n 'YMAX_test = 16.0', # down\n 'ZMIN_test = -32.0', # forward\n 'ZMAX_test = 32.0', # forward\n 'Z_test = %d' % (int(SIZE_test*8)),\n 'Y_test = %d' % (int(SIZE_test*4)),\n 'X_test = %d' % (int(SIZE_test*8)),\n]\ngroups['carla_32-32-32_bounds_train'] = [\n 'XMIN = -32.0', # right (neg is left)\n 'XMAX = 32.0', # right\n 'YMIN = -32.0', # down (neg is up)\n 'YMAX = 32.0', # down\n 'ZMIN = -32.0', # forward\n 'ZMAX = 32.0', # forward\n 'Z = %d' % (int(SIZE*8)),\n 'Y = %d' % (int(SIZE*8)),\n 'X = %d' % (int(SIZE*8)),\n]\ngroups['carla_32-32-32_bounds_val'] = [\n 'XMIN_val = -32.0', # right (neg is left)\n 'XMAX_val = 32.0', # right\n 'YMIN_val = -32.0', # down (neg is up)\n 'YMAX_val = 32.0', # down\n 'ZMIN_val = -32.0', # forward\n 'ZMAX_val = 32.0', # forward\n 'Z_val = %d' % (int(SIZE_val*8)),\n 'Y_val = %d' % (int(SIZE_val*8)),\n 'X_val = %d' % (int(SIZE_val*8)),\n]\ngroups['carla_32-32-32_bounds_test'] = [\n 'XMIN_test = -32.0', # right (neg is left)\n 'XMAX_test = 32.0', # right\n 'YMIN_test = -32.0', # down (neg is up)\n 'YMAX_test = 32.0', # down\n 'ZMIN_test = -32.0', # forward\n 'ZMAX_test = 32.0', # forward\n 'Z_test = %d' % (int(SIZE_test*8)),\n 'Y_test = %d' % (int(SIZE_test*8)),\n 'X_test = %d' % (int(SIZE_test*8)),\n]\ngroups['carla_multiview_train1_data'] = [\n 'dataset_name = \"carla\"',\n 'H = %d' % H,\n 'W = %d' % W,\n 'trainset = \"mags7i3one\"',\n 'trainset_format = \"multiview\"', \n 'trainset_seqlen = %d' % S, \n 'dataset_location = \"%s\"' % dataset_location,\n 'dataset_filetype = \"npz\"'\n]\ngroups['carla_multiview_train1_test1_data'] = [\n 'dataset_name = \"carla\"',\n 'H = %d' % H,\n 'W = %d' % W,\n 'trainset = \"mags7i3one\"',\n 'trainset_format = \"multiview\"', \n 'trainset_seqlen = %d' % S, \n 'testset = \"mags7i3one\"',\n 'testset_format = \"multiview\"', \n 'testset_seqlen = %d' % S_test, \n 'dataset_location = \"%s\"' % dataset_location,\n 'dataset_filetype = \"npz\"'\n]\ngroups['carla_multiview_test10_data'] = [\n 'dataset_name = \"carla\"',\n 'H = %d' % H,\n 'W = %d' % W,\n 'testset = \"mags7i3ten\"',\n 'testset_format = \"multiview\"', \n 'testset_seqlen = %d' % S_test, \n 'dataset_location = \"%s\"' % dataset_location,\n 'dataset_filetype = \"npz\"'\n]\ngroups['carla_multiview_train10_data'] = [\n 'dataset_name = \"carla\"',\n 'H = %d' % H,\n 'W = %d' % W,\n 'trainset = \"mags7i3ten\"',\n 'trainset_format = \"multiview\"', \n 'trainset_seqlen = %d' % S, \n 'dataset_location = \"%s\"' % dataset_location,\n 'dataset_filetype = \"npz\"'\n]\ngroups['carla_complete_train10_data'] = [\n 'dataset_name = \"carla\"',\n 'H = %d' % H,\n 'W = %d' % W,\n 'trainset = \"cacs20i2ten\"',\n 'trainset_format = \"complete\"', \n 'trainset_consec = True', \n 'trainset_seqlen = %d' % S, \n 'dataset_location = \"%s\"' % dataset_location,\n 'dataset_filetype = \"npz\"'\n]\ngroups['carla_complete_train1_data'] = [\n 'dataset_name = \"carla\"',\n 'H = %d' % H,\n 'W = %d' % W,\n 'trainset = \"cacs20i2one\"',\n 'trainset_format = \"complete\"', \n 'trainset_consec = True', \n 'trainset_seqlen = %d' % S, \n 'dataset_location = \"%s\"' % dataset_location,\n 'dataset_filetype = \"npz\"'\n]\ngroups['carla_multiview_val10_data'] = [\n 'dataset_name = \"carla\"',\n 'H = %d' % H,\n 'W = %d' % W,\n 'valset = \"mags7i3ten\"',\n 'valset_format = \"multiview\"', \n 'valset_seqlen = %d' % S, \n 'dataset_location = \"%s\"' % dataset_location,\n 'dataset_filetype = \"npz\"'\n]\ngroups['carla_multiview_train_data'] = [\n 'dataset_name = \"carla\"',\n 'H = %d' % H,\n 'W = %d' % W,\n 'trainset = \"mags7i3t\"',\n 'trainset_format = \"multiview\"', \n 'trainset_seqlen = %d' % S, \n 'dataset_location = \"%s\"' % dataset_location,\n 'dataset_filetype = \"npz\"'\n]\ngroups['carla_multiview_val_data'] = [\n 'dataset_name = \"carla\"',\n 'H = %d' % H,\n 'W = %d' % W,\n 'valset = \"mags7i3v\"',\n 'valset_format = \"multiview\"', \n 'valset_seqlen = %d' % S, \n 'dataset_location = \"%s\"' % dataset_location,\n 'dataset_filetype = \"npz\"'\n]\ngroups['carla_multiview_test_data'] = [\n 'dataset_name = \"carla\"',\n 'H = %d' % H,\n 'W = %d' % W,\n 'testset = \"mags7i3v\"',\n 'testset_format = \"multiview\"', \n 'testset_seqlen = %d' % S, \n 'dataset_location = \"%s\"' % dataset_location,\n 'dataset_filetype = \"npz\"'\n]\n\n############## verify and execute ##############\n\ndef _verify_(s):\n varname, eq, val = s.split(' ')\n assert varname in globals()\n assert eq == '='\n assert type(s) is type('')\n\nprint(current)\nassert current in exps\nfor group in exps[current]:\n print(\" \" + group)\n assert group in groups\n for s in groups[group]:\n print(\" \" + s)\n _verify_(s)\n exec(s)\n\ns = \"mod = \" + mod\n_verify_(s)\n\nexec(s)\n","sub_path":"pytorch_disco_recovery/exp_carla_goodvar.py","file_name":"exp_carla_goodvar.py","file_ext":"py","file_size_in_byte":19134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"637899702","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n#Reading the dataset\ndf=pd.read_csv('day.csv')\n\n# Converting date to Pandas datetime format\ndf['dteday'] = pd.to_datetime(df['dteday'])\n\n#Changing the season, weathersit, mnth, weekday columns from numerical values to categorical strings\ndf.season=df.season.map({1:'spring', 2:'summer', 3:'fall', 4:'winter'})\ndf.weathersit=df.weathersit.map({1:'Best', 2:'Neutral', 3:'Bad', 4:'Worse'})\ndf.mnth=df.mnth.map({1:'Jan',2:'Feb',3:'Mar',4:'Apr',5:'May',6:'June',7:'Jul',8:'Aug',9:'Sep',10:'Oct',11:'Nov',12:'Dec'})\ndf.weekday=df.weekday.map({1:'Mon',2:'Tue',3:'Wed',4:'Thu',5:'Fri',6:'Sat',0:'Sun'})\n\n#The column 'instant' is very insignificant. Hence dropping that column.\ndf=df.drop('instant',axis=1)\n\n#Inserting a new variable day in the dataframe.\ndf.insert(4,'day','')\ndf['day']=pd.DatetimeIndex(df['dteday']).day\n\n#dropping dteday\ndf=df.drop('dteday', axis=1)\n\ndf=df.drop(['casual', 'registered'],axis=1)\ndf=df.drop('atemp',axis=1,)\n\n#Creating Dummy variables\n\ndef dummies(x,dataframe):\n temp = pd.get_dummies(dataframe[x], drop_first = True)\n dataframe = pd.concat([dataframe, temp], axis = 1)\n dataframe.drop([x], axis = 1, inplace = True)\n return dataframe\n# Applying the function to the bikeSharing\n\ndf = dummies('season',df)\ndf = dummies('mnth',df)\ndf = dummies('weekday',df)\ndf = dummies('weathersit',df)\n\nimport sklearn\nfrom sklearn.model_selection import train_test_split\n\ndf_train, df_test= train_test_split(df,train_size=0.7, random_state=100)\n\nfrom sklearn.preprocessing import MinMaxScaler\nscaler=MinMaxScaler()\n\nneed_rescale=['temp','hum','windspeed']\ndf_train[need_rescale]=scaler.fit_transform(df_train[need_rescale])\n\ny_train=df_train.pop('cnt')\nX_train=df_train\n\nX_train=X_train[['Best', 'Neutral', 'spring', 'temp', 'winter', 'summer', 'hum', 'Jul', 'Sep', 'windspeed', 'yr', 'holiday']]\n\nfrom sklearn.linear_model import LinearRegression\nregressor=LinearRegression()\n\n#Fitting model with training data\nregressor.fit(X_train, y_train)\n\n#Saving model to disk #wb=write bytes\nimport pickle\npickle.dump(regressor, open('model.pkl', 'wb'))\n\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"479674029","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2020 Quoc-Nam Dessoulles\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"test_api.py\"\"\"\n\n__author__ = \"Quoc-Nam Dessoulles\"\n__email__ = \"cokie.forever@gmail.com\"\n__license__ = \"MIT\"\n\nfrom urllib.parse import quote\n\nfrom repolite.tests.util.test_base import TestBase\nfrom repolite.util.misc import changeWorkingDir\nfrom repolite.vcs import gerrit\n\n\nclass TestApi(TestBase):\n def test_getChange(self):\n self.createCommit()\n self.push()\n\n for sProjectFolder, sProjectName in self.dProjectFolders.items():\n with changeWorkingDir(sProjectFolder):\n sChangeId = gerrit.getChangeId()\n dChange = self.oApiClient.getChangeData(sChangeId, sProjectName)\n\n assert dChange is not None\n assert dChange[\"id\"] == \"%s~master~%s\" % (quote(sProjectName, safe=\"\"), sChangeId)\n assert dChange[\"project\"] == sProjectName\n assert dChange[\"branch\"] == \"master\"\n assert dChange[\"change_id\"] == sChangeId\n assert dChange[\"status\"] == \"NEW\"\n","sub_path":"repolite/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"200054300","text":"#!/usr/bin/env python3\nfrom ev3dev.ev3 import *\nfrom threading import *\nimport time, socket, json, math\n\nclass Communication(Thread):\n def __init__(self):\n self.ir_value = 0\n self.ir2_value = 0\n Thread.__init__(self)\n\n def run(self):\n while True:\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind(('169.255.168.151', 3562))\n s.listen()\n while True:\n conn, addr = s.accept()\n with conn:\n #print('Connected by', addr)\n while True:\n data = conn.recv(1024)\n\n if not data:\n break\n\n Sedex = json.loads(data.decode())\n print(Sedex['IR1'])\n except Exception as e:\n print(e)\n time.sleep(0.5)\n \n\nComm = Communication()\nComm.daemon = True\nComm.start()\n\ndef Angulo_Reta(pontos):\n metade = int(len(pontos) / 2)\n media = [0, 0]\n for i in range(0, metade):\n media[0] += pontos[i]\n media[1] += 1\n\n ponto1 = int(media[0] / media[1])\n\n media = [0, 0]\n for i in range(metade, len(pontos)):\n media[0] += pontos[i]\n media[1] += 1\n\n ponto2 = int(media[0] / media[1])\n\n m = (ponto2 - ponto1) / (2)\n angulo = math.atan(m)\n print((angulo*57.2958))\n time.sleep(10)\n\nwhile True:\n pass","sub_path":"Zandar.py","file_name":"Zandar.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"472841036","text":"\"\"\"\r\nNote: this file will be copied to the Lambda too. Do not\r\nadd dependencies carelessly.\r\n\"\"\"\r\nfrom Crypto.Cipher import AES\r\n\r\n\r\nPRIVATE_KEY_ENV_VAR = 'RSA_PRIVATE_KEY'\r\n\r\n# Since a new symmetric key is generated for each request, we can hard code\r\n# these nonces.\r\nREQUEST_META_NONCE = 'requestMeta'\r\nREQUEST_BODY_NONCE = 'requestBody'\r\nRESPONSE_META_NONCE = 'responseMeta'\r\nRESPONSE_BODY_NONCE = 'responseBody'\r\n\r\n\r\ndef encrypt_with_gcm(key, cleartext, nonce):\r\n cipher = AES.new(key, AES.MODE_GCM, nonce)\r\n ciphertext, tag = cipher.encrypt_and_digest(cleartext)\r\n return ciphertext, tag\r\n\r\n\r\ndef decrypt_with_gcm(key, ciphertext, tag, nonce):\r\n cipher = AES.new(key, AES.MODE_GCM, nonce)\r\n return cipher.decrypt_and_verify(ciphertext, tag)\r\n","sub_path":"shared/crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"254119128","text":"# http://adventofcode.com/2017/day/3\n\n# You come across an experimental new kind of memory stored on an infinite \n# two-dimensional grid.\n\n# Each square on the grid is allocated in a spiral pattern starting at a \n# location marked 1 and then counting up while spiraling outward. \n# For example, the first few squares are allocated like this:\n\n# 17 16 15 14 13\n# 18 5 4 3 12\n# 19 6 1 2 11\n# 20 7 8 9 10\n# 21 22 23---> ...\n# While this is very space-efficient (no squares are skipped), requested \n# data must be carried back to square 1 (the location of the only access \n# port for this memory system) by programs that can only move up, down, \n# left, or right. They always take the shortest path: the Manhattan \n# Distance between the location of the data and square 1.\n\n# For example:\n# Data from square 1 is carried 0 steps, since it's at the access port.\n# Data from square 12 is carried 3 steps, such as: down, left, left.\n# Data from square 23 is carried only 2 steps: up twice.\n# Data from square 1024 must be carried 31 steps.\n# How many steps are required to carry the data from the square identified \n# in your puzzle input all the way to the access port?\n\ndef manhatten_distance(n):\n\tif n == 1: return 0\n\ti = 1\n\tx = 1\n\tcorners = [0] * 5\n\twhile True:\n\t\tx += 8 * i\n\t\tif x >= n:\n\t\t\tstart = x - i * 8 + 1\n\t\t\tcorners[0] = start\n\t\t\tcorners[4] = x\n\t\t\tfor j in range(3, 0, -1):\n\t\t\t\tcorners[j] = corners[j + 1] - i * 2\n\t\t\t\n\t\t\tbreak\n\t\ti += 1\n\tfor k in range(4):\n\t\tif n in range(corners[k], corners[k + 1] + 1):\n\t\t\ttmp = abs((corners[k + 1] + corners[k]) / 2 - n)\n\t\t\treturn tmp + i\n\nassert manhatten_distance(312051) == 430\nassert manhatten_distance(1) == 0 \nassert manhatten_distance(12) == 3\nassert manhatten_distance(23) == 2\nassert manhatten_distance(1024) == 31\nassert manhatten_distance(111) == 10\n\n\n","sub_path":"2017/day03/manhattan_distance1.py","file_name":"manhattan_distance1.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"495806344","text":"\"\"\"\nimageserver.py - Sublcass of SimpleHttpServer\n\nThe purpose of this subclass is to disable directory listings.\n\"\"\"\n\n__author__ = \"Thomas J. Daley, J.D.\"\n__version__ = \"1.0.1\"\n__date__ = \"17 Sep 2017\"\n\nimport argparse\nfrom http.client import HTTPConnection\nfrom http.server import HTTPStatus\nimport os\nfrom http.server import SimpleHTTPRequestHandler\nimport signal\nimport socketserver\n\nclass NoDirectoryHandler(SimpleHTTPRequestHandler):\n ''' Subclass the handler for custom processing. '''\n\n def list_directory(self, path):\n \"\"\"\n Never let anyone list a directory.\n \"\"\"\n self.send_error(HTTPStatus.NOT_FOUND, \"No permission to list directory\")\n return None\n\nclass GracefulKiller:\n ''' Catch SIGTERM and gracefully shut down. '''\n keep_running = True\n\n def __init__(self, address, port):\n signal.signal(signal.SIGINT, self.exit_gracefully)\n signal.signal(signal.SIGTERM, self.exit_gracefully)\n self.address = address\n self.port = port\n\n def exit_gracefully(self, signum, frame):\n '''\n Clears the \"keep_running\" flag then makes a dummy request to the HTTP server\n to get the HTTP server to iterate once through its loop and test the flag.\n The signal (signum) that we caught is appended to the dummy request just so\n we can log which signal we caught and responded to.\n '''\n self.keep_running = False\n connection = HTTPConnection(self.address, self.port, timeout=2)\n connection.request(\"GET\", \"/kill_request&signum={}\".format(signum))\n\n\ndef main():\n ''' Main method to get this process started. '''\n parser = argparse.ArgumentParser(description=\"relayserver.py\")\n parser.add_argument(\n \"-p\", \"--port\",\n help=\"Port to listen on. Default is 8000\",\n default=8000,\n type=int\n )\n\n parser.add_argument(\n \"-b\", \"--bind\",\n help=\"Interface (IPv4 address) to listen on. Default is 0.0.0.0.\",\n default='0.0.0.0'\n )\n\n args = parser.parse_args()\n\n #with socketserver.TCPServer((args.bind, args.port), NoDirectoryHandler) as httpd:\n # print('imageserver.py started and is listening in port', args.port)\n # httpd.serve_forever()\n httpd = socketserver.TCPServer((args.bind, args.port), NoDirectoryHandler)\n print(\"Starting\", __file__, \"v.\", __version__, \"by\", __author__)\n print(\"Serving from port\", args.port, \"as PID\", os.getpid(), \"( kill -2\", os.getpid(), \")\")\n killer = GracefulKiller(args.bind, args.port)\n\n try:\n while killer.keep_running:\n httpd.handle_request()\n except KeyboardInterrupt:\n print(\"Good bye . . .\")\n\n print(\"Quitting after SIGNAL to PID\", os.getpid())\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"imageserver.py","file_name":"imageserver.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"527493132","text":"\"\"\"\nContains the overview of all apps that belong to this topic\n\"\"\"\n\nfrom sites.analysis_3.info import details as details_info\n\noverview = {\n \"german\": {\n \"apps\": {\n \"Übersicht\": {\n \"name\": \"info\",\n \"type\": \"simple\",\n \"details\": details_info,\n },\n }\n }\n}","sub_path":"sites/analysis_3/overview.py","file_name":"overview.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"92425285","text":"import typing\nimport decimal\nimport fractions\nimport collections\n\n\ndef get_max_and_min(data: typing.Set[typing.Union[decimal.Decimal, fractions.Fraction, str]]) -> typing.NamedTuple[\n typing.Union[decimal.Decimal, fractions.Fraction]]:\n for element in data:\n if isinstance(element, str):\n try:\n str_num = decimal.Decimal(element)\n data.discard(element)\n data.add(str_num)\n except decimal.InvalidOperation:\n digit_1 = ''\n for i in element:\n if i.isdigit():\n digit_1 += i\n else:\n break\n digit_2 = element[len(digit_1)+3:len(element)+1]\n data.discard(element)\n data.add(fractions.Fraction(int(digit_1), int(digit_2)))\n Result = collections.namedtuple('Result', 'max_value min_value')\n result = Result(max(data), min(data))\n return result\n","sub_path":"home1/homework1.py","file_name":"homework1.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"169324499","text":"#TensorFlow defines computation as Graphs:\nimport tensorflow as tf\n\n#Building a graph\n\n#Source operations - do not need any information input\na = tf.constant([2])\nb = tf.constant([3])\n\n#Just definition for computational model\nc = tf.add(a, b) #c = a + b\n\n#Session initialization for code running\nwith tf.Session() as session:\n result = session.run(c)\n print(result)","sub_path":"bdu/001_add_two_numbers.py","file_name":"001_add_two_numbers.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"520424722","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# Created by: python.exe -m py2exe mygame.py -O -W setup.py\n\n\nimport platform\nimport os\nfrom distutils.core import setup\nimport py2exe\n\nclass Target(object):\n '''Target is the baseclass for all executables that are created.\n It defines properties that are shared by all of them.\n '''\n def __init__(self, **kw):\n self.__dict__.update(kw)\n\n # the VersionInfo resource, uncomment and fill in those items\n # that make sense:\n \n # The 'version' attribute MUST be defined, otherwise no versioninfo will be built:\n # self.version = \"1.0\"\n \n # self.company_name = \"Company Name\"\n # self.copyright = \"Copyright Company Name © 2013\"\n # self.legal_copyright = \"Copyright Company Name © 2013\"\n # self.legal_trademark = \"\"\n # self.product_version = \"1.0.0.0\"\n # self.product_name = \"Product Name\"\n\n # self.private_build = \"foo\"\n # self.special_build = \"bar\"\n\n def copy(self):\n return Target(**self.__dict__)\n\n def __setitem__(self, name, value):\n self.__dict__[name] = value\n\nRT_BITMAP = 2\nRT_MANIFEST = 24\n\n# A manifest which specifies the executionlevel\n# and windows common-controls library version 6\n\nmanifest_template = '''\n\n \n %(prog)s\n \n \n \n \n \n \n \n \n \n \n \n \n \n\n'''\n\n\n\nmygame = Target(script = \"mygame.py\",\n dest_base=\"MyGame\",\n icon_resources=[(1,r\"resource/pico2d.ico\")],\n other_resources = [(RT_MANIFEST, 1, (manifest_template % dict(prog=\"mygame\", level=\"asInvoker\")).encode(\"utf-8\"))]\n )\n\npy2exe_options = dict(\n packages = [],\n optimize=1,\n compressed=True, # uncompressed may or may not have a faster startup\n bundle_files=2,\n dist_dir='dist',\n )\n\n\nresources = ('resource/1.ogg', 'resource/2011180021.txt', 'resource/bg.png', 'resource/blackbg.jpg', 'resource/blue.png', 'resource/button.png', 'resource/combo.png', 'resource/Cool.png', 'resource/ENCR10B.TTF', 'resource/Fail.png', 'resource/Good.png', 'resource/green_note.png', 'resource/keyboard_motion_green.png', 'resource/keyboard_motion_sky.png', 'resource/MINUS 1.ogg', 'resource/minus1.jpg', 'resource/Moebius.ttf', 'resource/pico2d.ico', 'resource/say that you.ogg', 'resource/saythatyou.jpg', 'resource/Score.txt', 'resource/showdown.jpg', 'resource/Showdown.ogg', 'resource/sky_note.png', 'resource/showdownbg.wav', 'resource/saythatbg.wav', 'resource/Minus1bg.wav')\n\nif platform.architecture()[0] == '32bit':\n sdl_folder = 'SDL2/x86/'\nelse:\n sdl_folder = 'SDL2/x64/'\n\nsdl_dlls = [sdl_folder + file_name for file_name in os.listdir(sdl_folder)]\n\n\nsetup(name=\"name\",\n windows=[mygame],\n data_files=[('.', resources), (sdl_folder, sdl_dlls)], # copy resource to '.' folder\n zipfile=None,\n options={\"py2exe\": py2exe_options},\n )\n\n","sub_path":"mygame/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"415184051","text":"from django import template\n\nregister = template.Library()\n\n@register.inclusion_tag('right_answer.html', takes_context=True)\ndef right_answer_for_all(context, question):\n answers=question.get_answers()\n wrong_list=context.get('wrong_answers', [])\n if question.id in wrong_list:\n user_was_wrong = True\n else:\n user_was_wrong=False\n return{\n 'previous':{'answers': answers},\n 'user_was_wrong': user_was_wrong}\n@register.filter\ndef ansChoice_string(quetion, answer):\n return question.ansChoice_string(answer)\n","sub_path":"app/templatetags/quiztag.py","file_name":"quiztag.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"606501976","text":"from sqlalchemy import (\n Column,\n Integer,\n BigInteger,\n String,\n Text,\n DateTime,\n )\n\nfrom sqlalchemy.ext.declarative import declarative_base\n\nfrom sqlalchemy.orm import (\n scoped_session,\n sessionmaker,\n )\n\nfrom zope.sqlalchemy import ZopeTransactionExtension\n\nDBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))\nBase = declarative_base()\n\nclass Location(Base):\n __tablename__ = 'locations'\n id = Column(Integer, primary_key=True)\n name = Column(String(200), unique=True)\n feature_class = Column(String(1))\n feature_code = Column(String(10))\n country_code = Column(String(2))\n admin1_code = Column(String(20))\n admin2_code = Column(String(80))\n admin3_code = Column(String(20))\n admin4_code = Column(String(20))\n population = Column(BigInteger)\n date_modified = Column(DateTime)\n\n def __init__(self, id, name, feature_class, feature_code, country_code, admin1_code,\n admin2_code, admin3_code, admin4_code, population):\n\n self.id = id\n self.name = name\n self.feature_class = feature_class\n self.feature_code = feature_code\n self.country_code = country_code\n self.admin1_code = admin1_code\n self.admin2_code = admin2_code\n self.admin3_code = admin3_code\n self.admin4_code = admin4_code\n self.population = population\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'country_code': self.country_code,\n 'population': self.population\n }\n\n\n","sub_path":"locations/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"521662710","text":"\"\"\"\nFine-tune clinicalBERT on our dataset.\n\"\"\"\n\nimport torch\nfrom torch.optim import AdamW\nfrom transformers import AutoTokenizer, AutoModel, AutoConfig, BertForMaskedLM\nfrom torch.utils.data import TensorDataset, random_split\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler\nimport os\nimport time\nimport datetime\nfrom transformers import get_linear_schedule_with_warmup\n\nif torch.cuda.is_available(): \n \n device = torch.device(\"cuda\")\n\n print('There are %d GPU(s) available.' % torch.cuda.device_count())\n\n print('We will use the GPU:', torch.cuda.get_device_name(0))\n\nelse:\n print('No GPU available, using the CPU instead.')\n device = torch.device(\"cpu\")\n\n\nlr = 0.00002\n\n\n# clinicalBERT:\ntokenizer = AutoTokenizer.from_pretrained(\"emilyalsentzer/Bio_ClinicalBERT\")\nmodel = BertForMaskedLM.from_pretrained(\"emilyalsentzer/Bio_ClinicalBERT\")\n\npretrained_model.cuda()\npretrained_model.eval()\n\noptimizer = AdamW(pretrained_model.parameters(), lr=lr, eps = 1e-8)\n\ndata = []\nf = open('corpus.txt', 'r')\nfor line in f:\n data.append(line)\n\ninput_ids = []\nattention_masks = []\nfor sentence in data:\n # encoded_sentence = torch.tensor(tokenizer.encode(sentence)).unsqueeze(0)\n encoded_sentence = tokenizer.encode_plus(\n sentence, \n add_special_tokens = True,\n max_length = 128,\n truncation=True,\n pad_to_max_length = True,\n return_attention_mask = True, \n return_tensors = 'pt',\n )\n input_ids.append(encoded_sentence['input_ids'])\n attention_masks.append(encoded_sentence['attention_mask'])\n\ninput_ids = torch.cat(input_ids, dim=0)\nattention_masks = torch.cat(attention_masks, dim=0)\n\n\ndataset = TensorDataset(input_ids, attention_masks)\n\ntrain_size = int(0.9 * len(dataset))\nval_size = len(dataset) - train_size\n\ntrain_dataset, val_dataset = random_split(dataset, [train_size, val_size])\n\nprint('{:>5,} training samples'.format(train_size))\nprint('{:>5,} validation samples'.format(val_size))\n\n\nbatch_size = 64\n\ntrain_dataloader = DataLoader(\n train_dataset,\n sampler = RandomSampler(train_dataset),\n batch_size = batch_size\n )\n\nvalidation_dataloader = DataLoader(\n val_dataset,\n sampler = SequentialSampler(val_dataset),\n batch_size = batch_size\n )\n\n\nepochs = 4\n\ntotal_steps = len(train_dataset) * epochs\n\nscheduler = get_linear_schedule_with_warmup(optimizer,\n num_warmup_steps = 0,\n num_training_steps = total_steps)\n\n\ndef format_time(elapsed):\n '''\n Takes a time in seconds and returns a string hh:mm:ss\n '''\n # Round to the nearest second.\n elapsed_rounded = int(round((elapsed)))\n\n # Format as hh:mm:ss\n return str(datetime.timedelta(seconds=elapsed_rounded))\n\n\ntraining_stats = []\nfor epoch_i in range(0, epochs):\n print(\"\")\n print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))\n print('Training...')\n\n t0 = time.time()\n\n train_loss = 0\n pretrained_model.train()\n\n for step, batch in enumerate(train_dataloader):\n\n if step % 320 == 0 and not step == 0:\n # Calculate elapsed time in minutes.\n elapsed = format_time(time.time() - t0)\n\n # Report progress.\n print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))\n\n pretrained_model.zero_grad()\n pretrained_outputs = pretrained_model(batch[0].to(device), attention_mask=batch[1].to(device),\n masked_lm_labels=batch[0].to(device))\n pretrained_loss = pretrained_outputs[0]\n train_loss += pretrained_loss.item()\n pretrained_loss.backward()\n optimizer.step()\n scheduler.step()\n\n avg_train_loss = train_loss / len(train_dataset)\n training_time = format_time(time.time() - t0)\n\n print(\"\")\n print(\" Average training loss: {0:.8f}\".format(avg_train_loss))\n print(\" Training epcoh took: {:}\".format(training_time))\n\n print(\"\")\n print(\"Running Validation...\")\n\n t0 = time.time()\n\n eval_loss = 0\n pretrained_model.eval()\n\n # Evaluate data for one epoch\n with torch.no_grad():\n for batch in validation_dataloader:\n outputs = pretrained_model(batch[0].to(device), attention_mask=batch[1].to(device),\n masked_lm_labels=batch[0].to(device))\n loss = outputs[0]\n\n eval_loss += loss.item()\n\n avg_val_loss = eval_loss / len(val_dataset)\n validation_time = format_time(time.time() - t0)\n\n print(\" Validation Loss: {0:.8f}\".format(avg_val_loss))\n print(\" Validation took: {:}\".format(validation_time))\n\n training_stats.append(\n {\n 'epoch': epoch_i + 1,\n 'Training Loss': avg_train_loss,\n 'Valid. Loss': avg_val_loss,\n # 'Valid. Accur.': avg_val_accuracy,\n 'Training Time': training_time,\n 'Validation Time': validation_time\n }\n )\n\nprint(\"\")\nprint(\"Training complete!\")\n\n\n# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()\n\noutput_dir = './model_save_clinical'\n\n# Create output directory if needed\nif not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\nprint(\"Saving model to %s\" % output_dir)\n\n# Save a trained model, configuration and tokenizer using `save_pretrained()`.\n# They can then be reloaded using `from_pretrained()`\nmodel_to_save = pretrained_model.module if hasattr(pretrained_model, 'module') else pretrained_model # Take care of distributed/parallel training\nmodel_to_save.save_pretrained(output_dir)\ntokenizer.save_pretrained(output_dir)\n\n# Good practice: save your training arguments together with the trained model\n# torch.save(args, os.path.join(output_dir, 'training_args.bin'))\n","sub_path":"training_clinicalBERT.py","file_name":"training_clinicalBERT.py","file_ext":"py","file_size_in_byte":6055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"572753625","text":"import io\nimport zipfile\nimport boto3\nimport sys\n\nsession = boto3.Session(\n\taws_access_key_id=\"AKIAYCS5P5BHX2UNCL22\", \n\taws_secret_access_key=\"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\",\n)\n\ns3 = boto3.client(\n 's3',\n)\n\n\n\ndef stream_zip_file():\n count = 0\n obj = s3.Object(\n bucket_name='home-pmt-accounting-dev',\n key='racct/DY_Position_SD/file_0_100.2019-06-17.13_39_12.IQ.csv.gz test.csv.gz'\n )\n buffer = io.BytesIO(obj.get()[\"Body\"].read())\n print (buffer)\n z = zipfile.ZipFile(buffer)\n foo2 = z.open(z.infolist()[0])\n print(sys.getsizeof(foo2))\n line_counter = 0\n for _ in foo2:\n line_counter += 1\n print (line_counter)\n z.close()\n\n\nif __name__ == '__main__':\n stream_zip_file()","sub_path":"tools/py/gz2.py","file_name":"gz2.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"212920859","text":"import sys\n\nfrom Utils.BinaryTree import BinaryTree\nfrom Utils.BinaryTreeNode import BinaryTreeNode\n\n\ndef checkBst(binaryTree):\n maxInf = sys.maxsize\n minInf = -sys.maxsize - 1\n\n leftRange = [minInf,binaryTree.root.data]\n rightRange = [binaryTree.root.data,maxInf]\n\n leftTree = isNodeInRange(binaryTree.root.left,leftRange)\n rightree = isNodeInRange(binaryTree.root.right,rightRange)\n\n result = leftTree and rightree\n\n if result:\n print(\"The tree is BST\")\n else:\n print(\"The tree is NOT BST\")\n\n\n return result\n\n\ndef isNodeInRange(node, range):\n if node is None:\n return True\n\n\n left = isNodeInRange(node.left,range)\n right = isNodeInRange(node.right,range)\n\n nodeResult = node.data >= range[0] and node.data <= range[1]\n\n return nodeResult and left and right\n\n\nif __name__ == '__main__':\n bt = BinaryTree()\n\n dataList = [1, 2, 3, 4, 5, 6]\n bt.constructBtWrt(dataList)\n bt.printPreOrder(bt.root)\n\n checkBst(bt)\n\n bt2 = BinaryTree()\n\n bt2_root = BinaryTreeNode(15)\n\n bt2_root.left = BinaryTreeNode(10)\n bt2_root.left.left = BinaryTreeNode(5)\n bt2_root.left.right = BinaryTreeNode(12)\n bt2_root.left.left.right = BinaryTreeNode(6)\n bt2_root.left.left.left = BinaryTreeNode(2)\n\n bt2_root.right = BinaryTreeNode(25)\n bt2_root.right.left = BinaryTreeNode(22)\n bt2_root.right.right = BinaryTreeNode(30)\n bt2_root.right.left.right = BinaryTreeNode(23)\n bt2_root.right.left.left = BinaryTreeNode(21)\n\n bt2.root = bt2_root\n\n bt2.printPreOrder(bt2.root)\n\n checkBst(bt2)","sub_path":"CrackingTheCode/Chapter4_TreesGraphs/question_4_5_ValidateBst.py","file_name":"question_4_5_ValidateBst.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"542007334","text":"#!/usr/bin/python3\nimport http.client\nimport json\nimport time\nimport redis\n\nr = redis.StrictRedis(host='localhost', port=6379, db=0)\nconn = http.client.HTTPConnection(\"localhost:5000\")\n\n\n# Valid token - 200 OK\nr.set('token', 'ABCD0123')\nr.set('expired', False)\nconn.request(\"GET\", \"/data\", \"\", { 'Authorization': 'ABCD0123' })\nresponse = conn.getresponse()\nassert response.status == 200\n\n# Expired token - 401\nr.set('token', 'ABCD0123')\nr.set('expired', True)\nconn.request(\"GET\", \"/data\", \"\", { 'Authorization': 'ABCD0123' })\nresponse = conn.getresponse()\nassert response.status == 401\n\n# Invalid token - 403\nr.set('token', 'QWER5555')\nr.set('expired', False)\nconn.request(\"GET\", \"/data\", \"\", { 'Authorization': 'ABCD0123' })\nresponse = conn.getresponse()\nassert response.status == 403, response.status\n\n# Refresh token\nr.set('token', 'ABCD0123')\nr.set('expired', True)\nconn.request(\"POST\", \"/refresh\", \"\", { 'Authorization': 'ABCD0123' })\nresponse = conn.getresponse()\nassert response.status == 200\ndata = response.read().decode(\"utf-8\")\nassert json.loads(data)[\"token\"] != 'ABCD0123'\nassert r.get('expired').decode() == \"False\"\n\n# Refresh token with invalid one\nr.set('token', 'QWER5555')\nr.set('expired', True)\nconn.request(\"POST\", \"/refresh\", \"\", { 'Authorization': 'ABCD0123' })\nresponse = conn.getresponse()\nassert response.status == 403\nassert r.get('token').decode() == \"QWER5555\"\n\n# Login\nr.set('token', 'ABCD0123')\nr.set('expired', True)\nconn.request(\"POST\", \"/login\")\nresponse = conn.getresponse()\nassert response.status == 200\ndata = response.read().decode(\"utf-8\")\nassert r.get('token').decode() == json.loads(data)[\"token\"]\nassert r.get('expired').decode() == \"False\"\n\nprint (\"OK\")\n","sub_path":"backend/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"79894379","text":"\"\"\"\nBiconNet: An Edge-preserved Connectivity-based Approach for Salient Object Detection\nZiyun Yang, Somayyeh Soltanian-Zadeh and Sina Farsiu\nCodes from: https://github.com/Zyun-Y/BiconNets\nPaper: https://arxiv.org/abs/2103.00334\n\"\"\"\n\nfrom random import shuffle\nimport sys\nimport datetime\nimport numpy as np\nfrom connect_loss import bicon_loss\nimport torch.nn.functional as F\nimport torch\nfrom torch.autograd import Variable\nfrom torch.optim import lr_scheduler\nimport os\nfrom tensorboardX import SummaryWriter\nimport torchvision.utils as utils\nfrom skimage.io import imread, imsave\nfrom lib.bad_grad_viz import register_hooks\nfrom utils_bicon import *\nfrom lr_update import get_lr\n\nsave = 'save'\nif not os.path.exists(save):\n os.makedirs(save)\n\ndef create_exp_directory(exp_id):\n if not os.path.exists('models/' + str(exp_id)):\n os.makedirs('models/' + str(exp_id))\n\n\nclass Solver(object):\n\n gamma = 0.1\n step_size = 15\n\n\n def train(self, model, train_loader, val_loader,cfg):\n\n # replace your loss function with a bicon loss here\n self.loss_func = bicon_loss()\n\n num_epochs = cfg.epoch\n exp_id = cfg.exp_id\n opt_group = 1\n real_batch = cfg.real_batch\n lr_mode = 'warm-up-step'\n if opt_group == 2:\n base, head = [], []\n for name, param in model.named_parameters():\n if 'bkbone.conv1' in name or 'bkbone.bn1' in name:\n print(name)\n elif 'bkbone' in name:\n base.append(param)\n else:\n head.append(param)\n optim = torch.optim.SGD([{'params':base}, {'params':head}], lr=cfg.lr, momentum=cfg.momen, weight_decay=cfg.decay, nesterov=True)\n else:\n optim = torch.optim.Adam(model.parameters(), lr=cfg.lr, weight_decay=cfg.decay,betas=cfg.betas)\n \n \n scheduler = lr_scheduler.StepLR(optim, step_size=self.step_size,gamma=self.gamma) \n\n # model, optim = amp.initialize(model, optim, opt_level='O2')\n model.cuda()\n\n print('START TRAIN.')\n curr_iter = 0\n\n create_exp_directory(exp_id)\n csv = 'results_'+str(exp_id)+'.csv'\n with open(os.path.join(save, csv), 'w') as f:\n f.write('epoch, mae \\n')\n\n best_mae = 0\n step=0\n m=0\n db_size = len(train_loader)/real_batch\n\n model.eval()\n for epoch in range(num_epochs):\n total_loss = 0\n model.zero_grad()\n iter_num = 0\n curr_step = 0\n\n\n for i_batch, sample_batched in enumerate(train_loader):\n step +=1\n curr_step += 1\n \n X = Variable(sample_batched[0])\n y = Variable(sample_batched[1])\n connect = Variable(sample_batched[2])\n\n X= X.cuda()\n y = y.long().cuda()\n connect = connect.cuda()\n \n\n out = model(X)\n\n loss_iter = self.loss_func(out, y,connect)\n\n loss = loss_iter/real_batch\n total_loss += loss.item()\n iter_num +=1\n\n loss.backward()\n\n # Update gradient every real_batch batch. Use this part if you want to train your network with the original image size. \n # Otherwise just ignore this and use the normal way to update gradient.\n if iter_num%real_batch ==0:\n\n m +=1\n optim.step()\n optim.zero_grad()\n iter_num=0\n\n if m == 10:\n print('%s | step:%d/%d/%d/%d | lr=%.6f | loss=%.6f'%(datetime.datetime.now(), step/real_batch,curr_step/real_batch, epoch+1,num_epochs, optim.param_groups[0]['lr'], total_loss))\n m=0\n total_loss=0\n\n scheduler.step()\n\n curr_mae = self.test_epoch(model,val_loader,epoch,exp_id)\n if curr_mae15:\n torch.save(model.state_dict(), 'models/' + str(exp_id) + '/relaynet_epoch' + str(epoch + 1)+'.pth')\n\n print('FINISH.')\n \n def test_epoch(self,model,loader,epoch,exp_id):\n mae_ls_binary = []\n\n model.eval()\n with torch.no_grad(): \n for j_batch, test_data in enumerate(loader):\n curr_dice = []\n X_test = Variable(test_data[0])\n y_test = Variable(test_data[1])\n\n X_test= X_test.cuda()\n\n y_test = y_test.long().cuda()\n\n output_test = model(X_test)\n\n pred = bv_test(output_test)\n\n for im in range(y_test.shape[0]):\n\n mae_bi = self.Eval_mae(pred[im],y_test[im])\n mae_ls_binary.append(mae_bi)\n\n if j_batch % 100 ==0:\n print('test [Iteration : ' + str(j_batch) + '/' + str(len(loader)) + ']' ' ave mae:%.3f' %(np.mean(mae_ls_binary)))\n\n csv = 'results_'+str(exp_id)+'.csv'\n\n with open(os.path.join(save, csv), 'a') as f:\n f.write('%03d,%0.6f\\n' % (\n (epoch + 1),\n np.mean(mae_ls_binary)))\n\n\n return np.mean(mae_ls_binary)\n\n\n def Eval_mae(self,pred,gt):\n avg_mae, img_num = 0.0, 0.0\n #mae_list = [] # for debug\n with torch.no_grad():\n mea = torch.abs(pred - gt).mean()\n\n return mea.item()\n","sub_path":"general/train/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":5668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"143664122","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport configparser\nimport shutil\nimport datetime\nimport os\n\n\n\n# Data\nconfig = configparser.ConfigParser()\nconfig_path = os.path.join(os.path.dirname(__file__), 'conf/scriptShodan.conf')\nconfig.read(config_path)\n\npathOutputs = config.get('Paths', 'OUTPUTS_PATH')\n\nsearchs = config.get('Data_Searchs', 'SEARCHS').split(\", \")\n\ndirName = datetime.date.today().strftime(\"%Y-%m-%d\")\nedate = dirName\nsdate = (datetime.datetime.now() - datetime.timedelta(days=7)).date().strftime(\"%Y-%m-%d\")\n\n\ndef main():\n print('Ejecutando Shodan...')\n createDir(dirName)\n shodanExe(searchs)\n compressDir(dirName)\n\ndef printShodan():\n print('SHODAN EJECUTANDO------')\n\n\ndef createDir(name):\n if os.path.exists(pathOutputs + name):\n print ('SI Existe directorio')\n else:\n print('No existe el directorio')\n print('Creando directorio: ' + name)\n os.mkdir(pathOutputs + name)\n\n\ndef shodanExe(searchs):\n for search in searchs:\n name = (search.split()[len(search.split()) - 2])\n try:\n os.system('shodan ' + search + ' > ' + pathOutputs + dirName + '/' + name)\n\n except OSError as err:\n print(\"Error: el archivo ya existe, hay que borrar lo anterior\".format(err))\n\n\ndef moveReportToDefaultDir(reportName):\n if os.path.exists(pathOutputs + dirName + '/' + reportName):\n try:\n print('EXISTE EL ARCHIVO')\n os.remove(pathOutputs + dirName + '/' + reportName)\n shutil.move(originalOutputsPath + reportName, pathOutputs + dirName)\n except OSError as err:\n print('Error: '.format(err))\n else:\n print('NO EXISTE EL ARCHIVO')\n try:\n shutil.move(originalOutputsPath + reportName, pathOutputs + dirName)\n except shutil.Error as err:\n print('Error: '.format(err))\n\n\ndef compressDir(dirName):\n # Si no me lo genera fuera de Outputs\n os.chdir(pathOutputs)\n shutil.make_archive(\"Shodan-\" + dirName, \"zip\", base_dir=dirName)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Shodan/scriptShodan.py","file_name":"scriptShodan.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"280718741","text":"def gemiddelde(string):\n woorden = string.split(' ')\n lengtes = []\n\n for woord in woorden:\n lengtes.append(len(woord))\n\n return sum(lengtes) / len(lengtes)\n\nprint('Voer de coolste zin die je kan bedenken in en ik bereken er een cool gemiddelde van:')\n\nzin = input()\nzinGemiddelde = gemiddelde(zin)\n\nprint('De gemiddelde woordlengte van jou coole zin is: {}'.format(zinGemiddelde))\n","sub_path":"Les 7/pe7_5.py","file_name":"pe7_5.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"332261110","text":"from collections import Counter\nfrom typing import List\n\nimport cv2 as cv\nimport numpy as np\nfrom scipy import ndimage\nfrom skimage.morphology import square, dilation\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\nclass SegmentationResult:\n def __init__(self, image, background_cluster):\n self.background_cluster = background_cluster\n self.image = image\n\n\nclass WordDescription:\n def __init__(self, from_: int, to: int, data: np.ndarray):\n self.from_ = from_\n self.to = to\n self.data = data\n\n\nclass RowDescription:\n def __init__(self, from_: int, to: int, data: np.ndarray, words: List[WordDescription]):\n self.from_ = from_\n self.to = to\n self.data = data\n self.words = words\n\n\ndef create_mask(mask_shape: tuple, output_shape: tuple, rows: List[RowDescription], moments: np.ndarray) -> np.ndarray:\n mask = np.zeros(mask_shape, dtype=np.uint8)\n for i, row in enumerate(rows):\n for word in row.words:\n start_point = (word.from_, row.from_)\n end_point = (word.to, row.to)\n mask = cv.line(mask, start_point, end_point, (i + 1,), 3)\n\n inv_moments = np.linalg.pinv(moments)\n warped_mask = cv.warpPerspective(mask, inv_moments, output_shape)\n return warped_mask\n\n\ndef get_clusters(images: List[np.ndarray]) -> List[np.ndarray]:\n result = []\n for i, image in enumerate(images):\n row_pixels = np.sum(image, axis=1)\n col_pixels = np.sum(image, axis=0)\n\n height, width = image.shape\n mask_rows = np.array([[r for _ in range(width)] for r in row_pixels])\n mask_cols = np.array([col_pixels for _ in range(height)])\n mask_comp = mask_rows * mask_cols * ndimage.convolve(image, [\n [1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1],\n ])\n\n c = KMeans(2).fit_predict(mask_comp.reshape(-1, 1))\n\n mask_clusters = c.reshape(mask_comp.shape)\n mask_clusters = dilation(mask_clusters, square(1))\n\n result.append(mask_clusters)\n return result\n\n\ndef get_segmentation_mask(images: List[np.ndarray], clusters: List[np.ndarray]) -> List[SegmentationResult]:\n result = []\n for (image_no_grid, mask_clusters) in zip(images, clusters):\n\n background_cluster = Counter(mask_clusters.flatten()).most_common(1)[0][0]\n marker_cluster = 1 - background_cluster\n\n rows, marked_rows = __find_groups(marker_cluster, mask_clusters)\n for g in marked_rows:\n a, b = g[0], g[-1]\n clipped = mask_clusters[a:b]\n cols, marked_cols = __find_groups(marker_cluster, clipped.T)\n\n if not len(marked_cols):\n rows[a:b] = background_cluster\n continue\n\n try:\n breaks = [(col_beg[0] - col_end[-1]) for col_end, col_beg in zip(marked_cols, marked_cols[1:])]\n max_connection_cols = np.max(breaks)\n min_connection_cols = np.min(breaks)\n\n for col_a, col_b in zip(marked_cols, marked_cols[1:]):\n segments_distance = col_b[0] - col_a[-1]\n\n distance_similarity_max = __calculate_similarity(max_connection_cols, segments_distance)\n distance_similarity_min = __calculate_similarity(min_connection_cols, segments_distance)\n\n if distance_similarity_max < 0.6 or distance_similarity_min > 0.2:\n group_value = cols[col_a[-1] - 1][0]\n cols[col_a[-1]:col_b[-1] + 1] = group_value\n\n rows[a:b] = cols.T\n except Exception:\n rows[a:b] = background_cluster\n\n result.append(SegmentationResult(rows, background_cluster))\n return result\n\n\ndef get_row_descriptions(images: List[np.ndarray], segmentation_results: List[SegmentationResult]):\n result: List[List[RowDescription]] = []\n for image, segmentation_result in zip(images, segmentation_results):\n segmented_image = segmentation_result.image\n background_cluster = segmentation_result.background_cluster\n rows = __extract_all_segments(image, segmented_image, background_cluster)\n result.append(rows)\n\n return result\n\n\ndef __extract_all_segments(image: np.ndarray, rows_mask, background_cluster) -> List[RowDescription]:\n all_rows: List[RowDescription] = []\n rows = __find_segments(rows_mask, background_cluster)\n for r in rows:\n words_in_row = __find_segments(r.data.T, background_cluster)\n words: List[WordDescription] = []\n\n for c in words_in_row:\n fragment = image[r.from_:r.to, c.from_:c.to]\n words.append(WordDescription(c.from_, c.to, fragment))\n\n fragment = image[r.from_:r.to]\n all_rows.append(RowDescription(r.from_, r.to, fragment, words))\n\n return all_rows\n\n\ndef __find_segments(matrix, background_cluster) -> List[WordDescription]:\n result: List[WordDescription] = []\n group: List[int] = []\n for num, dim in enumerate(matrix):\n if np.any(dim != background_cluster):\n group.append(num)\n elif len(group):\n result.append(WordDescription(group[0], group[-1], matrix[group]))\n group = []\n\n return result\n\n\ndef __find_groups(marker_cluster, matrix):\n group, marked = [], []\n for num, dim in enumerate(matrix):\n if np.any(dim == marker_cluster):\n group.append(num)\n elif len(group):\n marked.append(group)\n group = []\n\n lines_mask = np.zeros_like(matrix)\n for i, g in enumerate(marked, 1):\n a, b = g[0], g[-1]\n lines_mask[a:b] = i\n\n return lines_mask, marked\n\n\ndef __calculate_similarity(x, y):\n if x < y:\n x, y = y, x\n\n sim = cosine_similarity([[x, y]], [[y, x]])\n return sim[0][0]\n","sub_path":"src/pipeline_word_detection.py","file_name":"pipeline_word_detection.py","file_ext":"py","file_size_in_byte":5917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"652799186","text":"import csv\nimport json\nimport sys, os\nimport io\nimport subprocess\n\ndef read_CSV(csvfile):\n csv_rows = []\n reader = csv.DictReader(csvfile)\n field = reader.fieldnames\n for row in reader:\n stuff = {field[i]:row[field[i]] for i in range(len(field))}\n csv_rows.append(stuff)\n return csv_rows\n\ndef convert_write_json(data, json_file):\n f = json_file\n f.write(json.dumps(data, sort_keys=False, \\\n indent=2, separators=(',', ': ')).encode('utf-8'))\n\nfiS = None\nfor encoding in ('utf-8', 'cp1252'):\n try:\n with open(sys.argv[1], \"rb\") as fi0:\n fiS = fi0.read().decode(encoding)\n except:\n continue\n break\n\nfi = io.StringIO(fiS)\ncsv_rows = read_CSV(fi)\n\nwith open(sys.argv[2], \"wb\") as fo:\n convert_write_json(csv_rows, fo)\n","sub_path":"db-conv/csv-to-json.py","file_name":"csv-to-json.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"519936699","text":"import numpy as np\nimport pandas as pd\nimport time,datetime\nimport math\ntrain_start_date = u'2012-05-24'\ntrain_end_date = u\"2017-01-01\"\nfactor = 'PE'\nsegments = 100\ndateDelta = 5\ncalender = DataAPI.TradeCalGet(exchangeCD=u\"XSHG\",beginDate=train_start_date,endDate=train_end_date,field=u\"exchangeCD,calendarDate,isOpen\",pandas=\"1\")\nopen_date = calender[calender['isOpen']==1]['calendarDate'].tolist()\nstockBasicData = DataAPI.EquGet(equTypeCD=u\"A\",listStatusCD=u\"L\",field=u\"secID,listDate\",pandas=\"1\").set_index('secID')\nfor tradeDateNonius in open_date:\n tradeDateNoniusIndex = open_date.index(tradeDateNonius)\n resultStartNoniusIndex = tradeDateNoniusIndex + 1\n resultEndNoniusIndex = tradeDateNoniusIndex + dateDelta\n if resultEndNoniusIndex > len(open_date) - 1:\n break\n#op_data = DataAPI.MktEqudAdjGet(tradeDate=u\"20150513\",secID=u\"\",ticker=u\"\",isOpen=\"\",beginDate=u\"\",endDate=u\"\",field=u\"secID,openPrice\",pandas=\"1\")\n# cl_data = DataAPI.MktEqudAdjGet(tradeDate=u\"20150520\",secID=u\"\",ticker=u\"\",isOpen=\"\",beginDate=u\"\",endDate=u\"\",field=u\"secID,closePrice\",pandas=\"1\")\n# mg_data = pd.merge(op_data,cl_data,on=u'secID',how=u'outer').dropna()\n# mg_data = mg_data[(mg_data['closePrice'] != 0) & (mg_data['openPrice'] != 0)]\n# mg_data['score'] = mg_data['closePrice']/mg_data['openPrice'] - 1\n# oneDayGet = DataAPI.MktStockFactorsOneDayGet(tradeDate=tradeDateNonius,field=u\"secID,\"+factor,pandas=\"1\")\n# mg_data = pd.merge(mg_data,oneDayGet,on=u'secID',how=u'outer').dropna()\n# allData = mg_data.dropna(axis=0, how='any').sort(factor)\n# length = len(allData)\n# group_data = pd.Series(np.nan, index=range(1,segments+1))\n# count = 0\n# if length%segments == 0:\n# step = length/segments\n# for i in range(step,length+1,step):\n# count += 1\n# group = allData[i-step:i]\n# group_data[count] = group['score'].mean()\n# else:\n# remainder = length%(segments-1)\n# step = (length - remainder)/(segments-1)\n# for i in range(step,length - remainder+1,step):\n# count += 1\n# group = allData[i-step:i]\n# group_data[count] = group['score'].mean()\n# remainder_group = allData[length-remainder:length]\n# group_data[segments] = remainder_group['score'].mean()\n# print group_data\n\n ","sub_path":"rank1factor.py","file_name":"rank1factor.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"298631725","text":"def parts(prefix, n, gaps = ()):\n gaps = sorted(set(gaps), reverse=True)\n return (md_gen if prefix.endswith('/') else ms_doggy)(n, gaps)\n\ndef unmark(labels):\n for size, label in labels:\n if label[0] == '*':\n label = label[1:]\n yield size, label\n\ndef md_gen(n, gaps):\n if not gaps:\n return range(n)\n def gen():\n shift, gap = 0, _pop(gaps)\n for y in range(n):\n if y + shift == gap:\n yield\n (shift, gap,) = (shift + 1, _pop(gaps))\n yield y\n return gen()\n\ndef ms_doggy(n, gaps):\n def gen():\n short = n <= 4\n for y in range(1, n + 1 if short else 4):\n yield y\n if short:\n return \n for y in range(5, n + 2):\n yield y\n if not gaps:\n return gen()\n def filt():\n gap = _pop(gaps)\n for (i, y,) in enumerate(gen()):\n if i == gap:\n gap = _pop(gaps)\n continue\n yield y\n return filt()\n\ndef _pop(l):\n return l.pop() if l else None\n\nif __name__ == '__main__':\n raise Exception('making a module executable is a bad habit.')\n","sub_path":"lib/parts.py","file_name":"parts.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"277931281","text":"import logging\nimport pygame\n\n\ndef log_time(callback, *args):\n logging.info(f'measuring time for function {callback}')\n time_before = pygame.time.get_ticks()\n result = callback(*args)\n time_after = pygame.time.get_ticks()\n time_delta = time_after - time_before\n logging.info(\n f'{callback} took {time_delta} ms')\n return result\n","sub_path":"src/helper/logging_functions.py","file_name":"logging_functions.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"154101695","text":"def subset_x_y(target, features, start_index:int, end_index:int):\n \n return features[start_index:end_index], target[start_index:end_index]\n \ndef split_sets_by_time(df, target_col, test_ratio=0.2):\n \n df_copy = df.copy()\n target = df_copy.pop(target_col)\n cutoff = int(len(target) / 5)\n \n X_train, y_train = subset_x_y(target=target, features=df_copy, start_index=0, end_index=-cutoff*2)\n X_val, y_val = subset_x_y(target=target, features=df_copy, start_index=-cutoff*2, end_index=-cutoff)\n X_test, y_test = subset_x_y(target=target, features=df_copy, start_index=-cutoff, end_index=len(target))\n\n return X_train, y_train, X_val, y_val, X_test, y_test\n \n\ndef save_sets(X_train=None, y_train=None, X_val=None, y_val=None, X_test=None, y_test=None, path='../data/processed/'):\n \n import numpy as np\n\n if X_train is not None:\n np.save(f'{path}X_train', X_train)\n if X_val is not None:\n np.save(f'{path}X_val', X_val)\n if X_test is not None:\n np.save(f'{path}X_test', X_test)\n if y_train is not None:\n np.save(f'{path}y_train', y_train)\n if y_val is not None:\n np.save(f'{path}y_val', y_val)\n if y_test is not None:\n np.save(f'{path}y_test', y_test)\n\n\ndef load_sets(path='../data/processed/', val=False):\n \n import numpy as np\n import os.path\n\n X_train = np.load(f'{path}X_train.npy') if os.path.isfile(f'{path}X_train.npy') else None\n X_val = np.load(f'{path}X_val.npy' ) if os.path.isfile(f'{path}X_val.npy') else None\n X_test = np.load(f'{path}X_test.npy' ) if os.path.isfile(f'{path}X_test.npy') else None\n y_train = np.load(f'{path}y_train.npy') if os.path.isfile(f'{path}y_train.npy') else None\n y_val = np.load(f'{path}y_val.npy' ) if os.path.isfile(f'{path}y_val.npy') else None\n y_test = np.load(f'{path}y_test.npy' ) if os.path.isfile(f'{path}y_test.npy') else None\n \n return X_train, y_train, X_val, y_val, X_test, y_test\n \n\ndef pop_target(df, target_col, to_numpy=False):\n \n df_copy = df.copy()\n target = df_copy.pop(target_col)\n \n if to_numpy:\n df_copy = df_copy.to_numpy()\n target = target.to_numpy()\n \n return df_copy, target\n \n \ndef split_sets_random(df, target_col, test_ratio=0.2, to_numpy=False):\n \n from sklearn.model_selection import train_test_split\n \n features, target = pop_target(df=df, target_col=target_col, to_numpy=to_numpy)\n \n X_data, X_test, y_data, y_test = train_test_split(features, target, test_size=test_ratio, random_state=8)\n \n val_ratio = test_ratio / (1 - test_ratio)\n X_train, X_val, y_train, y_val = train_test_split(X_data, y_data, test_size=val_ratio, random_state=8)\n\n return X_train, y_train, X_val, y_val, X_test, y_test\n","sub_path":"python/sets.py","file_name":"sets.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"539057255","text":"\"\"\"\nDynamoDB Models for InPynamoDB\n\"\"\"\nimport asyncio\n\nimport copy\nimport json\nimport warnings\n\nfrom pynamodb.compat import getmembers_issubclass\nfrom pynamodb.connection.base import log\nfrom pynamodb.connection.util import pythonic\nfrom pynamodb.constants import BATCH_GET_PAGE_LIMIT, RESPONSES, UNPROCESSED_KEYS, KEYS, READ_CAPACITY_UNITS, \\\n WRITE_CAPACITY_UNITS, STREAM_VIEW_TYPE, STREAM_SPECIFICATION, STREAM_ENABLED, GLOBAL_SECONDARY_INDEXES, \\\n LOCAL_SECONDARY_INDEXES, ATTR_DEFINITIONS, ATTR_NAME, TABLE_STATUS, ACTIVE, META_CLASS_NAME, REGION, HOST, \\\n PUT_FILTER_OPERATOR_MAP, QUERY_FILTER_OPERATOR_MAP, QUERY_OPERATOR_MAP, ITEM, DELETE_FILTER_OPERATOR_MAP, \\\n BATCH_WRITE_PAGE_LIMIT, PUT, DELETE, ATTRIBUTES, UNPROCESSED_ITEMS, PUT_REQUEST, DELETE_REQUEST, RETURN_VALUES, \\\n ALL_NEW, ATTR_UPDATES, RANGE_KEY, UPDATE_FILTER_OPERATOR_MAP, ACTION, VALUE, ATTR_TYPE_MAP, ITEM_COUNT, COUNT, \\\n SCAN_OPERATOR_MAP, KEY, INDEX_NAME, KEY_SCHEMA, PROJECTION, PROJECTION_TYPE, PROVISIONED_THROUGHPUT, \\\n NON_KEY_ATTRIBUTES, ATTR_TYPE, KEY_TYPE\nfrom pynamodb.exceptions import DoesNotExist, TableError, TableDoesNotExist\nfrom pynamodb.models import Model as PynamoDBModel, \\\n ModelContextManager as PynamoDBModelContextManager, MetaModel as PynamoDBMetaModel\nfrom pynamodb.types import HASH, RANGE\n\nfrom inpynamodb.attributes import MapAttribute\nfrom inpynamodb.connection import TableConnection\nfrom inpynamodb.connection.base import MetaTable\nfrom inpynamodb.indexes import Index, GlobalSecondaryIndex\nfrom inpynamodb.pagination import ResultIterator\nfrom inpynamodb.settings import get_settings_value\n\n\nclass ModelContextManager(PynamoDBModelContextManager):\n \"\"\"\n A class for managing batch operations\n\n \"\"\"\n\n def __init__(self, model, auto_commit=True):\n self.model = model\n self.auto_commit = auto_commit\n self.max_operations = BATCH_WRITE_PAGE_LIMIT\n self.pending_operations = []\n\n async def __aenter__(self):\n return self\n\n\nclass BatchWrite(ModelContextManager):\n \"\"\"\n A class for batch writes\n \"\"\"\n\n async def save(self, put_item):\n \"\"\"\n This adds `put_item` to the list of pending operations to be performed.\n\n If the list currently contains 25 items, which is the DynamoDB imposed\n limit on a BatchWriteItem call, one of two things will happen. If auto_commit\n is True, a BatchWriteItem operation will be sent with the already pending\n writes after which put_item is appended to the (now empty) list. If auto_commit\n is False, ValueError is raised to indicate additional items cannot be accepted\n due to the DynamoDB imposed limit.\n\n :param put_item: Should be an instance of a `Model` to be written\n \"\"\"\n if len(self.pending_operations) == self.max_operations:\n if not self.auto_commit:\n raise ValueError(\"DynamoDB allows a maximum of 25 batch operations\")\n else:\n await self.commit()\n self.pending_operations.append({\"action\": PUT, \"item\": put_item})\n\n async def delete(self, del_item):\n \"\"\"\n This adds `del_item` to the list of pending operations to be performed.\n\n If the list currently contains 25 items, which is the DynamoDB imposed\n limit on a BatchWriteItem call, one of two things will happen. If auto_commit\n is True, a BatchWriteItem operation will be sent with the already pending\n operations after which put_item is appended to the (now empty) list. If auto_commit\n is False, ValueError is raised to indicate additional items cannot be accepted\n due to the DynamoDB imposed limit.\n\n :param del_item: Should be an instance of a `Model` to be deleted\n \"\"\"\n if len(self.pending_operations) == self.max_operations:\n if not self.auto_commit:\n raise ValueError(\"DynamoDB allows a maximum of 25 batch operations\")\n else:\n await self.commit()\n self.pending_operations.append({\"action\": DELETE, \"item\": del_item})\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n return await self.commit()\n\n async def commit(self):\n \"\"\"\n Writes all of the changes that are pending\n \"\"\"\n log.debug(\"%s committing batch operation\", self.model)\n put_items = []\n delete_items = []\n attrs_name = pythonic(ATTRIBUTES)\n for item in self.pending_operations:\n if item['action'] == PUT:\n put_items.append(item['item']._serialize(attr_map=True)[attrs_name])\n elif item['action'] == DELETE:\n delete_items.append(await item['item']._get_keys())\n self.pending_operations = []\n if not len(put_items) and not len(delete_items):\n return\n data = await self.model._get_connection().batch_write_item(\n put_items=put_items,\n delete_items=delete_items\n )\n if data is None:\n return\n unprocessed_items = data.get(UNPROCESSED_ITEMS, {}).get(self.model.Meta.table_name)\n while unprocessed_items:\n put_items = []\n delete_items = []\n for item in unprocessed_items:\n if PUT_REQUEST in item:\n put_items.append(item.get(PUT_REQUEST).get(ITEM))\n elif DELETE_REQUEST in item:\n delete_items.append(item.get(DELETE_REQUEST).get(KEY))\n log.info(\"Resending %s unprocessed keys for batch operation\", len(unprocessed_items))\n data = await self.model._get_connection().batch_write_item(\n put_items=put_items,\n delete_items=delete_items\n )\n unprocessed_items = data.get(UNPROCESSED_ITEMS, {}).get(self.model.Meta.table_name)\n\n\nclass MetaModel(PynamoDBMetaModel):\n \"\"\"\n Model meta class\n\n This class is just here so that index queries have nice syntax.\n Model.index.query()\n \"\"\"\n def __init__(self, name, bases, attrs):\n if isinstance(attrs, dict):\n for attr_name, attr_obj in attrs.items():\n if attr_name == META_CLASS_NAME:\n if not hasattr(attr_obj, REGION):\n setattr(attr_obj, REGION, get_settings_value('region'))\n if not hasattr(attr_obj, HOST):\n setattr(attr_obj, HOST, get_settings_value('host'))\n if not hasattr(attr_obj, 'session_cls'):\n setattr(attr_obj, 'session_cls', get_settings_value('session_cls'))\n if not hasattr(attr_obj, 'request_timeout_seconds'):\n setattr(attr_obj, 'request_timeout_seconds', get_settings_value('request_timeout_seconds'))\n if not hasattr(attr_obj, 'base_backoff_ms'):\n setattr(attr_obj, 'base_backoff_ms', get_settings_value('base_backoff_ms'))\n if not hasattr(attr_obj, 'max_retry_attempts'):\n setattr(attr_obj, 'max_retry_attempts', get_settings_value('max_retry_attempts'))\n\n super(MetaModel, self).__init__(name, bases, attrs)\n\n\nclass Model(PynamoDBModel, metaclass=MetaModel):\n \"\"\"\n Defines a `InPynamoDB` Model\n\n This model is backed by a table in DynamoDB.\n You can create the table by with the ``create_table`` method.\n \"\"\"\n\n # These attributes are named to avoid colliding with user defined\n # DynamoDB attributes\n _meta_table = None\n _indexes = None\n _connection = None\n _index_classes = None\n DoesNotExist = DoesNotExist\n\n def __init__(self, hash_key=None, range_key=None, save_on_exit=False, **attributes):\n \"\"\"\n :param hash_key: Required. The hash key for this object.\n :param range_key: Only required if the table has a range key attribute.\n :param save_on_exit: Indicates this model should be saved on exit.\n :param attrs: A dictionary of attributes to set on this object.\n \"\"\"\n\n self._hash_key = hash_key\n self._range_key = range_key\n self._save_on_exit = save_on_exit\n self._attributes = attributes\n\n @classmethod\n async def initialize(cls, hash_key=None, range_key=None, **attributes):\n warnings.warn(\"Model `initialize()` method is deprecated and will be removed on next release. \"\n \"Please use `async with Model() ...` style context manager.\", category=DeprecationWarning)\n\n return await cls(hash_key=hash_key, range_key=range_key, **attributes).__aenter__()\n\n @classmethod\n async def close_connection(cls):\n await cls._get_connection().close_connection()\n\n async def __aenter__(self):\n if self._hash_key is not None:\n self._attributes[self._dynamo_to_python_attr((await self._get_meta_data()).hash_keyname)] = self._hash_key\n if self._range_key is not None:\n range_keyname = (await self._get_meta_data()).range_keyname\n if range_keyname is None:\n raise ValueError(\n \"This table has no range key, but a range key value was provided: {0}\".format(self._range_key)\n )\n self._attributes[self._dynamo_to_python_attr(range_keyname)] = self._range_key\n\n self.save_on_exit = self._save_on_exit\n super().__init__(**self._attributes)\n\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n if self.save_on_exit:\n await self.save()\n\n @classmethod\n async def batch_get(cls, items, consistent_read=None, attributes_to_get=None, return_consumed_capacity=None,\n return_raw_data=False):\n \"\"\"\n BatchGetItem for this model\n\n :param items: Should be a list of hash keys to retrieve, or a list of\n tuples if range keys are used.\n \"\"\"\n items = list(items)\n hash_keyname = (await cls._get_meta_data()).hash_keyname\n range_keyname = (await cls._get_meta_data()).range_keyname\n keys_to_get = []\n while items:\n if len(keys_to_get) == BATCH_GET_PAGE_LIMIT:\n while keys_to_get:\n page, unprocessed_keys = await cls._batch_get_page(\n keys_to_get,\n consistent_read=consistent_read,\n attributes_to_get=attributes_to_get,\n return_consumed_capacity=return_consumed_capacity\n )\n for batch_item in page:\n if return_raw_data:\n yield batch_item\n else:\n yield (await cls.from_raw_data(batch_item))\n if unprocessed_keys:\n keys_to_get = unprocessed_keys\n else:\n keys_to_get = []\n item = items.pop()\n if range_keyname:\n hash_key, range_key = await cls._serialize_keys(item[0], item[1])\n keys_to_get.append({\n hash_keyname: hash_key,\n range_keyname: range_key\n })\n else:\n hash_key = (await cls._serialize_keys(item))[0]\n keys_to_get.append({\n hash_keyname: hash_key\n })\n\n while keys_to_get:\n page, unprocessed_keys = await cls._batch_get_page(\n keys_to_get,\n consistent_read=consistent_read,\n attributes_to_get=attributes_to_get\n )\n for batch_item in page:\n if return_raw_data:\n yield batch_item\n else:\n yield (await cls.from_raw_data(batch_item))\n if unprocessed_keys:\n keys_to_get = unprocessed_keys\n else:\n keys_to_get = []\n\n @classmethod\n async def _batch_get_page(cls, keys_to_get, consistent_read, attributes_to_get, return_consumed_capacity=None):\n \"\"\"\n Returns a single page from BatchGetItem\n Also returns any unprocessed items\n\n :param keys_to_get: A list of keys\n :param consistent_read: Whether or not this needs to be consistent\n :param attributes_to_get: A list of attributes to return\n \"\"\"\n log.debug(\"Fetching a BatchGetItem page\")\n data = await cls._get_connection().batch_get_item(\n keys_to_get, consistent_read=consistent_read, attributes_to_get=attributes_to_get,\n return_consumed_capacity=return_consumed_capacity\n )\n item_data = data.get(RESPONSES).get(cls.Meta.table_name)\n unprocessed_items = data.get(UNPROCESSED_KEYS).get(cls.Meta.table_name, {}).get(KEYS, None)\n return item_data, unprocessed_items\n\n @classmethod\n def batch_write(cls, auto_commit=True):\n \"\"\"\n Returns a BatchWrite context manager for a batch operation.\n\n :param auto_commit: If true, the context manager will commit writes incrementally\n as items are written to as necessary to honor item count limits\n in the DynamoDB API (see BatchWrite). Regardless of the value\n passed here, changes automatically commit on context exit\n (whether successful or not).\n \"\"\"\n return BatchWrite(cls, auto_commit=auto_commit)\n\n def __repr__(self):\n if self.Meta.table_name:\n serialized = self._serialize(null_check=False)\n\n msg = f\"{self.Meta.table_name}<{serialized.get(HASH)}\"\n if serialized.get(RANGE):\n msg += f\", {serialized.get(RANGE)}>\"\n else:\n msg += \">\"\n return msg\n\n async def delete(self, condition=None, conditional_operator=None, **expected_values):\n \"\"\"\n Deletes this object from dynamodb\n \"\"\"\n self._conditional_operator_check(conditional_operator)\n args, kwargs = self._get_save_args(attributes=False, null_check=False)\n if len(expected_values):\n kwargs.update(expected=self._build_expected_values(expected_values, DELETE_FILTER_OPERATOR_MAP))\n kwargs.update(conditional_operator=conditional_operator)\n kwargs.update(condition=condition)\n return await self._get_connection().delete_item(*args, **kwargs)\n\n @classmethod\n async def _get_meta_data(cls):\n \"\"\"\n A helper object that contains meta data about this table\n \"\"\"\n if cls._meta_table is None:\n cls._meta_table = MetaTable(await cls._get_connection().describe_table())\n return cls._meta_table\n\n @classmethod\n async def rate_limited_scan(cls,\n filter_condition=None,\n attributes_to_get=None,\n segment=None,\n total_segments=None,\n limit=None,\n conditional_operator=None,\n last_evaluated_key=None,\n page_size=None,\n timeout_seconds=None,\n read_capacity_to_consume_per_second=10,\n allow_rate_limited_scan_without_consumed_capacity=None,\n max_sleep_between_retry=10,\n max_consecutive_exceptions=30,\n consistent_read=None,\n index_name=None,\n **filters):\n \"\"\"\n Scans the items in the table at a definite rate.\n Invokes the low level rate_limited_scan API.\n\n :param filter_condition: Condition used to restrict the scan results\n :param attributes_to_get: A list of attributes to return.\n :param segment: If set, then scans the segment\n :param total_segments: If set, then specifies total segments\n :param limit: Used to limit the number of results returned\n :param conditional_operator:\n :param last_evaluated_key: If set, provides the starting point for scan.\n :param page_size: Page size of the scan to DynamoDB\n :param filters: A list of item filters\n :param timeout_seconds: Timeout value for the rate_limited_scan method, to prevent it from running\n infinitely\n :param read_capacity_to_consume_per_second: Amount of read capacity to consume\n every second\n :param allow_rate_limited_scan_without_consumed_capacity: If set, proceeds without rate limiting if\n the server does not support returning consumed capacity in responses.\n :param max_sleep_between_retry: Max value for sleep in seconds in between scans during\n throttling/rate limit scenarios\n :param max_consecutive_exceptions: Max number of consecutive provision throughput exceeded\n exceptions for scan to exit\n :param consistent_read: If True, a consistent read is performed\n \"\"\"\n\n cls._conditional_operator_check(conditional_operator)\n key_filter, scan_filter = cls._build_filters(\n SCAN_OPERATOR_MAP,\n non_key_operator_map=SCAN_OPERATOR_MAP,\n key_attribute_classes=cls.get_attributes(),\n filters=filters\n )\n key_filter.update(scan_filter)\n\n scan_result = cls._get_connection().rate_limited_scan(\n filter_condition=filter_condition,\n attributes_to_get=attributes_to_get,\n page_size=page_size,\n limit=limit,\n conditional_operator=conditional_operator,\n scan_filter=key_filter,\n segment=segment,\n total_segments=total_segments,\n exclusive_start_key=last_evaluated_key,\n timeout_seconds=timeout_seconds,\n read_capacity_to_consume_per_second=read_capacity_to_consume_per_second,\n allow_rate_limited_scan_without_consumed_capacity=allow_rate_limited_scan_without_consumed_capacity,\n max_sleep_between_retry=max_sleep_between_retry,\n max_consecutive_exceptions=max_consecutive_exceptions,\n consistent_read=consistent_read,\n index_name=index_name\n )\n\n async for item in scan_result:\n yield (await cls.from_raw_data(item))\n\n @classmethod\n async def scan(cls,\n filter_condition=None,\n segment=None,\n total_segments=None,\n limit=None,\n conditional_operator=None,\n last_evaluated_key=None,\n page_size=None,\n consistent_read=None,\n index_name=None,\n rate_limit=None,\n **filters):\n \"\"\"\n Iterates through all items in the table\n\n :param filter_condition: Condition used to restrict the scan results\n :param segment: If set, then scans the segment\n :param total_segments: If set, then specifies total segments\n :param limit: Used to limit the number of results returned\n :param conditional_operator:\n :param last_evaluated_key: If set, provides the starting point for scan.\n :param page_size: Page size of the scan to DynamoDB\n :param filters: A list of item filters\n :param consistent_read: If True, a consistent read is performed\n \"\"\"\n cls._conditional_operator_check(conditional_operator)\n key_filter, scan_filter = cls._build_filters(\n SCAN_OPERATOR_MAP,\n non_key_operator_map=SCAN_OPERATOR_MAP,\n key_attribute_classes=cls.get_attributes(),\n filters=filters\n )\n key_filter.update(scan_filter)\n\n if page_size is None:\n page_size = limit\n\n scan_args = ()\n scan_kwargs = dict(\n filter_condition=filter_condition,\n exclusive_start_key=last_evaluated_key,\n segment=segment,\n limit=page_size,\n scan_filter=key_filter,\n total_segments=total_segments,\n conditional_operator=conditional_operator,\n consistent_read=consistent_read,\n index_name=index_name\n )\n\n return ResultIterator(\n cls._get_connection().scan,\n scan_args,\n scan_kwargs,\n map_fn=cls.from_raw_data,\n limit=limit,\n rate_limit=rate_limit,\n )\n\n @classmethod\n async def exists(cls):\n \"\"\"\n Returns True if this table exists, False otherwise\n \"\"\"\n try:\n await cls._get_connection().describe_table()\n return True\n except TableDoesNotExist:\n return False\n\n @classmethod\n async def describe_table(cls):\n return await cls._get_connection().describe_table()\n\n @classmethod\n async def create_table(cls, wait=False, read_capacity_units=None, write_capacity_units=None):\n \"\"\"\n Create the table for this model\n\n :param wait: If set, then this call will block until the table is ready for use\n :param read_capacity_units: Sets the read capacity units for this table\n :param write_capacity_units: Sets the write capacity units for this table\n \"\"\"\n if not await cls.exists():\n schema = cls._get_schema()\n if hasattr(cls.Meta, pythonic(READ_CAPACITY_UNITS)):\n schema[pythonic(READ_CAPACITY_UNITS)] = cls.Meta.read_capacity_units\n if hasattr(cls.Meta, pythonic(WRITE_CAPACITY_UNITS)):\n schema[pythonic(WRITE_CAPACITY_UNITS)] = cls.Meta.write_capacity_units\n if hasattr(cls.Meta, pythonic(STREAM_VIEW_TYPE)):\n schema[pythonic(STREAM_SPECIFICATION)] = {\n pythonic(STREAM_ENABLED): True,\n pythonic(STREAM_VIEW_TYPE): cls.Meta.stream_view_type\n }\n if read_capacity_units is not None:\n schema[pythonic(READ_CAPACITY_UNITS)] = read_capacity_units\n if write_capacity_units is not None:\n schema[pythonic(WRITE_CAPACITY_UNITS)] = write_capacity_units\n index_data = cls._get_indexes()\n schema[pythonic(GLOBAL_SECONDARY_INDEXES)] = index_data.get(pythonic(GLOBAL_SECONDARY_INDEXES))\n schema[pythonic(LOCAL_SECONDARY_INDEXES)] = index_data.get(pythonic(LOCAL_SECONDARY_INDEXES))\n index_attrs = index_data.get(pythonic(ATTR_DEFINITIONS))\n attr_keys = [attr.get(pythonic(ATTR_NAME)) for attr in schema.get(pythonic(ATTR_DEFINITIONS))]\n for attr in index_attrs:\n attr_name = attr.get(pythonic(ATTR_NAME))\n if attr_name not in attr_keys:\n schema[pythonic(ATTR_DEFINITIONS)].append(attr)\n attr_keys.append(attr_name)\n await cls._get_connection().create_table(\n **schema\n )\n if wait:\n while True:\n status = await cls._get_connection().describe_table()\n if status:\n data = status.get(TABLE_STATUS)\n if data == ACTIVE:\n return\n else:\n asyncio.sleep(2)\n else:\n raise TableError(\"No TableStatus returned for table\")\n\n @classmethod\n async def dumps(cls):\n \"\"\"\n Returns a JSON representation of this model's table\n \"\"\"\n return json.dumps([item._get_json() async for item in await cls.scan()])\n\n @classmethod\n async def dump(cls, filename):\n \"\"\"\n Writes the contents of this model's table as JSON to the given filename\n \"\"\"\n with open(filename, 'w') as out:\n out.write(await cls.dumps())\n\n @classmethod\n async def loads(cls, data):\n content = json.loads(data)\n async with cls.batch_write() as batch:\n for item_data in content:\n item = await cls._from_data(item_data)\n await batch.save(item)\n\n @classmethod\n async def load(cls, filename):\n with open(filename, 'r') as inf:\n await cls.loads(inf.read())\n\n # Private API below\n @classmethod\n async def _from_data(cls, data):\n \"\"\"\n Reconstructs a model object from JSON.\n \"\"\"\n hash_key, attrs = data\n range_key = attrs.pop('range_key', None)\n attributes = attrs.pop(pythonic(ATTRIBUTES))\n hash_keyname = (await cls._get_meta_data()).hash_keyname\n hash_keytype = (await cls._get_meta_data()).get_attribute_type(hash_keyname)\n attributes[hash_keyname] = {\n hash_keytype: hash_key\n }\n if range_key is not None:\n range_keyname = (await cls._get_meta_data()).range_keyname\n range_keytype = (await cls._get_meta_data()).get_attribute_type(range_keyname)\n attributes[range_keyname] = {\n range_keytype: range_key\n }\n async with cls() as item:\n item._deserialize(attributes)\n return item\n\n\n @classmethod\n def _get_schema(cls):\n \"\"\"\n Returns the schema for this table\n \"\"\"\n schema = {\n pythonic(ATTR_DEFINITIONS): [],\n pythonic(KEY_SCHEMA): []\n }\n for attr_name, attr_cls in cls.get_attributes().items():\n if attr_cls.is_hash_key or attr_cls.is_range_key:\n schema[pythonic(ATTR_DEFINITIONS)].append({\n pythonic(ATTR_NAME): attr_cls.attr_name,\n pythonic(ATTR_TYPE): ATTR_TYPE_MAP[attr_cls.attr_type]\n })\n if attr_cls.is_hash_key:\n schema[pythonic(KEY_SCHEMA)].append({\n pythonic(KEY_TYPE): HASH,\n pythonic(ATTR_NAME): attr_cls.attr_name\n })\n elif attr_cls.is_range_key:\n schema[pythonic(KEY_SCHEMA)].append({\n pythonic(KEY_TYPE): RANGE,\n pythonic(ATTR_NAME): attr_cls.attr_name\n })\n return schema\n\n @classmethod\n def _get_indexes(cls):\n \"\"\"\n Returns a list of the secondary indexes\n \"\"\"\n if cls._indexes is None:\n cls._indexes = {\n pythonic(GLOBAL_SECONDARY_INDEXES): [],\n pythonic(LOCAL_SECONDARY_INDEXES): [],\n pythonic(ATTR_DEFINITIONS): []\n }\n cls._index_classes = {}\n for name, index in getmembers_issubclass(cls, Index):\n cls._index_classes[index.Meta.index_name] = index\n schema = index._get_schema()\n idx = {\n pythonic(INDEX_NAME): index.Meta.index_name,\n pythonic(KEY_SCHEMA): schema.get(pythonic(KEY_SCHEMA)),\n pythonic(PROJECTION): {\n PROJECTION_TYPE: index.Meta.projection.projection_type,\n },\n\n }\n if issubclass(index.__class__, GlobalSecondaryIndex):\n idx[pythonic(PROVISIONED_THROUGHPUT)] = {\n READ_CAPACITY_UNITS: index.Meta.read_capacity_units,\n WRITE_CAPACITY_UNITS: index.Meta.write_capacity_units\n }\n cls._indexes[pythonic(ATTR_DEFINITIONS)].extend(schema.get(pythonic(ATTR_DEFINITIONS)))\n if index.Meta.projection.non_key_attributes:\n idx[pythonic(PROJECTION)][NON_KEY_ATTRIBUTES] = index.Meta.projection.non_key_attributes\n if issubclass(index.__class__, GlobalSecondaryIndex):\n cls._indexes[pythonic(GLOBAL_SECONDARY_INDEXES)].append(idx)\n else:\n cls._indexes[pythonic(LOCAL_SECONDARY_INDEXES)].append(idx)\n return cls._indexes\n\n async def update_item(self, attribute, value=None, action=None, condition=None, conditional_operator=None,\n **expected_values):\n \"\"\"\n Updates an item using the UpdateItem operation.\n\n This should be used for updating a single attribute of an item.\n\n :param attribute: The name of the attribute to be updated\n :param value: The new value for the attribute.\n :param action: The action to take if this item already exists.\n See: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-UpdateItem-request-AttributeUpdate\n \"\"\"\n warnings.warn(\"`Model.update_item` is deprecated in favour of `Model.update` now\")\n\n self._conditional_operator_check(conditional_operator)\n args, save_kwargs = self._get_save_args(null_check=False)\n attribute_cls = None\n for attr_name, attr_cls in self.get_attributes().items():\n if attr_name == attribute:\n attribute_cls = attr_cls\n break\n if not attribute_cls:\n raise ValueError(f\"Attribute {attr_name} specified does not exist\")\n if save_kwargs.get(pythonic(RANGE_KEY)):\n kwargs = {pythonic(RANGE_KEY): save_kwargs.get(pythonic(RANGE_KEY))}\n else:\n kwargs = {}\n if len(expected_values):\n kwargs.update(expected=self._build_expected_values(expected_values, UPDATE_FILTER_OPERATOR_MAP))\n kwargs[pythonic(ATTR_UPDATES)] = {\n attribute_cls.attr_name: {\n ACTION: action.upper() if action else None,\n }\n }\n if value is not None:\n kwargs[pythonic(ATTR_UPDATES)][attribute_cls.attr_name][VALUE] = {\n ATTR_TYPE_MAP[attribute_cls.attr_type]: attribute_cls.serialize(value)\n }\n kwargs[pythonic(RETURN_VALUES)] = ALL_NEW\n kwargs.update(conditional_operator=conditional_operator)\n kwargs.update(condition=condition)\n data = await self._get_connection().update_item(\n *args,\n **kwargs\n )\n\n for name, value in data.get(ATTRIBUTES).items():\n attr_name = self._dynamo_to_python_attr(name)\n attr = self.get_attributes().get(attr_name)\n if attr:\n setattr(self, attr_name, attr.deserialize(attr.get_value(value)))\n return data\n\n async def update(self, attributes=None, actions=None, condition=None, conditional_operator=None, **expected_values):\n \"\"\"\n Updates an item using the UpdateItem operation.\n\n :param attributes: A dictionary of attributes to update in the following format\n {\n attr_name: {'value': 10, 'action': 'ADD'},\n next_attr: {'value': True, 'action': 'PUT'},\n }\n \"\"\"\n if attributes is not None and not isinstance(attributes, dict):\n raise TypeError(\"the value of `attributes` is expected to be a dictionary\")\n if actions is not None and not isinstance(actions, list):\n raise TypeError(\"the value of `actions` is expected to be a list\")\n\n self._conditional_operator_check(conditional_operator)\n args, save_kwargs = self._get_save_args(null_check=False)\n kwargs = {\n pythonic(RETURN_VALUES): ALL_NEW,\n 'conditional_operator': conditional_operator,\n }\n\n if attributes:\n kwargs[pythonic(ATTR_UPDATES)] = {}\n\n if pythonic(RANGE_KEY) in save_kwargs:\n kwargs[pythonic(RANGE_KEY)] = save_kwargs[pythonic(RANGE_KEY)]\n\n if expected_values:\n kwargs['expected'] = self._build_expected_values(expected_values, UPDATE_FILTER_OPERATOR_MAP)\n\n attrs = self.get_attributes()\n attributes = attributes or {}\n for attr, params in attributes.items():\n attribute_cls = attrs[attr]\n action = params['action'] and params['action'].upper()\n attr_values = {ACTION: action}\n if 'value' in params:\n attr_values[VALUE] = self._serialize_value(attribute_cls, params['value'])\n\n kwargs[pythonic(ATTR_UPDATES)][attribute_cls.attr_name] = attr_values\n\n kwargs.update(condition=condition)\n kwargs.update(actions=actions)\n data = await self._get_connection().update_item(*args, **kwargs)\n for name, value in data[ATTRIBUTES].items():\n attr_name = self._dynamo_to_python_attr(name)\n attr = self.get_attributes().get(attr_name)\n if attr:\n setattr(self, attr_name, attr.deserialize(attr.get_value(value)))\n\n return data\n\n async def save(self, condition=None, conditional_operator=None, **expected_values):\n \"\"\"\n Save this object to dynamodb\n \"\"\"\n self._conditional_operator_check(conditional_operator)\n args, kwargs = self._get_save_args()\n if len(expected_values):\n kwargs.update(expected=self._build_expected_values(expected_values, PUT_FILTER_OPERATOR_MAP))\n kwargs.update(conditional_operator=conditional_operator)\n kwargs.update(condition=condition)\n return await self._get_connection().put_item(*args, **kwargs)\n\n async def refresh(self, consistent_read=False):\n \"\"\"\n Retrieves this object's data from dynamodb and syncs this local object\n\n :param consistent_read: If True, then a consistent read is performed.\n \"\"\"\n args, kwargs = self._get_save_args(attributes=False)\n kwargs.setdefault('consistent_read', consistent_read)\n attrs = await self._get_connection().get_item(*args, **kwargs)\n item_data = attrs.get(ITEM, None)\n if item_data is None:\n raise self.DoesNotExist(\"This item does not exist in the table.\")\n self._deserialize(item_data)\n\n @classmethod\n def _get_connection(cls):\n \"\"\"\n Returns a (cached) connection\n \"\"\"\n if not hasattr(cls, \"Meta\"):\n raise AttributeError(\n f'As of v1.0 InPynamoDB Models require a `Meta` class.\\n'\n f'Model: {cls.__module__}.{cls.__name__}\\n'\n f'See https://pynamodb.readthedocs.io/en/latest/release_notes.html\\n'\n )\n elif not hasattr(cls.Meta, \"table_name\") or cls.Meta.table_name is None:\n raise AttributeError(\n f'As of v1.0 InPyanmoDB Models must have a table_name\\n'\n f'Model: {cls.__module__}.{cls.__name__}\\n'\n f'See https://pynamodb.readthedocs.io/en/latest/release_notes.html'\n )\n\n if cls._connection is None:\n cls._connection = TableConnection(cls.Meta.table_name,\n region=cls.Meta.region,\n host=cls.Meta.host,\n session_cls=cls.Meta.session_cls,\n request_timeout_seconds=cls.Meta.request_timeout_seconds,\n max_retry_attempts=cls.Meta.max_retry_attempts,\n base_backoff_ms=cls.Meta.base_backoff_ms,\n aws_access_key_id=cls.Meta.aws_access_key_id,\n aws_secret_access_key=cls.Meta.aws_secret_access_key,\n aws_session_token=getattr(cls.Meta, 'aws_session_token', None))\n return cls._connection\n\n @classmethod\n async def get(cls,\n hash_key,\n range_key=None,\n consistent_read=False,\n attributes_to_get=None,\n return_raw_data=False):\n \"\"\"\n Returns a single object using the provided keys\n\n :param hash_key: The hash key of the desired item\n :param range_key: The range key of the desired item, only used when appropriate.\n \"\"\"\n hash_key, range_key = await cls._serialize_keys(hash_key, range_key)\n data = await cls._get_connection().get_item(\n hash_key,\n range_key=range_key,\n consistent_read=consistent_read,\n attributes_to_get=attributes_to_get\n )\n if data:\n item_data = data.get(ITEM)\n if item_data:\n if return_raw_data:\n return item_data\n else:\n return await cls.from_raw_data(item_data)\n raise cls.DoesNotExist()\n\n @classmethod\n async def from_raw_data(cls, data):\n \"\"\"\n Returns an instance of this class\n from the raw data\n\n :param data: A serialized DynamoDB object\n \"\"\"\n mutable_data = copy.copy(data)\n if mutable_data is None:\n raise ValueError(\"Received no mutable_data to construct object\")\n hash_keyname = (await cls._get_meta_data()).hash_keyname\n range_keyname = (await cls._get_meta_data()).range_keyname\n hash_key_type = (await cls._get_meta_data()).get_attribute_type(hash_keyname)\n hash_key = mutable_data.pop(hash_keyname).get(hash_key_type)\n\n hash_key_attr = cls.get_attributes().get(cls._dynamo_to_python_attr(hash_keyname))\n\n hash_key = hash_key_attr.deserialize(hash_key)\n args = (hash_key,)\n kwargs = {}\n if range_keyname:\n range_key_attr = cls.get_attributes().get(cls._dynamo_to_python_attr(range_keyname))\n range_key_type = (await cls._get_meta_data()).get_attribute_type(range_keyname)\n range_key = mutable_data.pop(range_keyname).get(range_key_type)\n kwargs['range_key'] = range_key_attr.deserialize(range_key)\n for name, value in mutable_data.items():\n attr_name = cls._dynamo_to_python_attr(name)\n attr = cls.get_attributes().get(attr_name, None)\n if attr:\n kwargs[attr_name] = attr.deserialize(attr.get_value(value))\n async with cls(*args, **kwargs) as item:\n return item\n\n @classmethod\n async def count(cls,\n hash_key=None,\n range_key_condition=None,\n filter_condition=None,\n consistent_read=False,\n index_name=None,\n limit=None,\n rate_limit=None,\n **filters):\n \"\"\"\n Provides a filtered count\n\n :param hash_key: The hash key to query. Can be None.\n :param range_key_condition: Condition for range key\n :param filter_condition: Condition used to restrict the query results\n :param consistent_read: If True, a consistent read is performed\n :param index_name: If set, then this index is used\n :param filters: A dictionary of filters to be used in the query. Requires a hash_key to be passed.\n \"\"\"\n if hash_key is None:\n if filters:\n raise ValueError('A hash_key must be given to use filters')\n return (await cls.describe_table()).get(ITEM_COUNT)\n\n cls._get_indexes()\n if index_name:\n hash_key = cls._index_classes[index_name]._hash_key_attribute().serialize(hash_key)\n key_attribute_classes = cls._index_classes[index_name]._get_attributes()\n non_key_attribute_classes = cls.get_attributes()\n else:\n hash_key = (await cls._serialize_keys(hash_key))[0]\n non_key_attribute_classes = dict(cls.get_attributes())\n key_attribute_classes = dict(cls.get_attributes())\n for name, attr in cls.get_attributes().items():\n if attr.is_range_key or attr.is_hash_key:\n key_attribute_classes[name] = attr\n else:\n non_key_attribute_classes[name] = attr\n key_conditions, query_filters = cls._build_filters(\n QUERY_OPERATOR_MAP,\n non_key_operator_map=QUERY_FILTER_OPERATOR_MAP,\n key_attribute_classes=key_attribute_classes,\n non_key_attribute_classes=non_key_attribute_classes,\n filters=filters)\n\n query_args = (hash_key,)\n query_kwargs = dict(\n range_key_condition=range_key_condition,\n filter_condition=filter_condition,\n index_name=index_name,\n consistent_read=consistent_read,\n key_conditions=key_conditions,\n query_filters=query_filters,\n limit=limit,\n select=COUNT\n )\n\n result_iterator = ResultIterator(\n cls._get_connection().query,\n query_args,\n query_kwargs,\n limit=limit,\n rate_limit=rate_limit\n )\n\n # iterate through results\n _ = [o async for o in result_iterator]\n # list(result_iterator)\n\n return result_iterator.total_count\n\n @classmethod\n async def query(cls,\n hash_key,\n range_key_condition=None,\n filter_condition=None,\n consistent_read=False,\n index_name=None,\n scan_index_forward=None,\n conditional_operator=None,\n limit=None,\n last_evaluated_key=None,\n attributes_to_get=None,\n page_size=None,\n rate_limit=None,\n **filters):\n \"\"\"\n Provides a high level query API\n\n :param hash_key: The hash key to query\n :param range_key_condition: Condition for range key\n :param filter_condition: Condition used to restrict the query results\n :param consistent_read: If True, a consistent read is performed\n :param index_name: If set, then this index is used\n :param limit: Used to limit the number of results returned\n :param scan_index_forward: If set, then used to specify the same parameter to the DynamoDB API.\n Controls descending or ascending results\n :param conditional_operator:\n :param last_evaluated_key: If set, provides the starting point for query.\n :param attributes_to_get: If set, only returns these elements\n :param page_size: Page size of the query to DynamoDB\n :param filters: A dictionary of filters to be used in the query\n \"\"\"\n cls._conditional_operator_check(conditional_operator)\n cls._get_indexes()\n if index_name:\n hash_key = cls._index_classes[index_name]._hash_key_attribute().serialize(hash_key)\n key_attribute_classes = cls._index_classes[index_name]._get_attributes()\n non_key_attribute_classes = cls.get_attributes()\n else:\n hash_key = (await cls._serialize_keys(hash_key))[0]\n non_key_attribute_classes = {}\n key_attribute_classes = {}\n for name, attr in cls.get_attributes().items():\n if attr.is_range_key or attr.is_hash_key:\n key_attribute_classes[name] = attr\n else:\n non_key_attribute_classes[name] = attr\n\n if page_size is None:\n page_size = limit\n\n key_conditions, query_filters = cls._build_filters(\n QUERY_OPERATOR_MAP,\n non_key_operator_map=QUERY_FILTER_OPERATOR_MAP,\n key_attribute_classes=key_attribute_classes,\n non_key_attribute_classes=non_key_attribute_classes,\n filters=filters)\n\n query_args = (hash_key,)\n query_kwargs = dict(\n range_key_condition=range_key_condition,\n filter_condition=filter_condition,\n index_name=index_name,\n exclusive_start_key=last_evaluated_key,\n consistent_read=consistent_read,\n scan_index_forward=scan_index_forward,\n limit=page_size,\n key_conditions=key_conditions,\n attributes_to_get=attributes_to_get,\n query_filters=query_filters,\n conditional_operator=conditional_operator\n )\n\n return ResultIterator(\n cls._get_connection().query,\n query_args,\n query_kwargs,\n map_fn=cls.from_raw_data,\n limit=limit,\n rate_limit=rate_limit\n )\n\n @classmethod\n async def _range_key_attribute(cls):\n \"\"\"\n Returns the attribute class for the hash key\n \"\"\"\n attributes = cls.get_attributes()\n range_keyname = (await cls._get_meta_data()).range_keyname\n if range_keyname:\n attr = attributes[cls._dynamo_to_python_attr(range_keyname)]\n else:\n attr = None\n return attr\n\n @classmethod\n async def _hash_key_attribute(cls):\n \"\"\"\n Returns the attribute class for the hash key\n \"\"\"\n attributes = cls.get_attributes()\n hash_keyname = (await cls._get_meta_data()).hash_keyname\n return attributes[cls._dynamo_to_python_attr(hash_keyname)]\n\n @classmethod\n async def _serialize_keys(cls, hash_key, range_key=None):\n \"\"\"\n Serializes the hash and range keys\n\n :param hash_key: The hash key value\n :param range_key: The range key value\n \"\"\"\n hash_key = (await cls._hash_key_attribute()).serialize(hash_key)\n if range_key is not None:\n range_key = (await cls._range_key_attribute()).serialize(range_key)\n return hash_key, range_key\n\n async def _get_keys(self):\n \"\"\"\n Returns the proper arguments for deleting\n \"\"\"\n serialized = self._serialize(null_check=False)\n hash_key = serialized.get(HASH)\n range_key = serialized.get(RANGE, None)\n hash_keyname = (await self._get_meta_data()).hash_keyname\n range_keyname = (await self._get_meta_data()).range_keyname\n attrs = {\n hash_keyname: hash_key,\n }\n if range_keyname is not None:\n attrs[range_keyname] = range_key\n return attrs\n\n def as_dict(self, attributes_to_get=None, include_none=True):\n result = {}\n\n if include_none:\n if attributes_to_get is None:\n for k in self._get_attributes().keys():\n attr_value = self.__getattribute__(k)\n if isinstance(attr_value, MapAttribute):\n result[k] = attr_value.as_dict(include_none=include_none)\n else:\n result[k] = attr_value\n else:\n for k, v in self._get_attributes().items():\n if k in attributes_to_get:\n attr_value = self.__getattribute__(k)\n if isinstance(v, MapAttribute):\n result[k] = attr_value.as_dict(include_none=include_none)\n else:\n result[k] = v\n else:\n if attributes_to_get is None:\n for key, value in self.attribute_values.items():\n result[key] = value.as_dict() if isinstance(value, MapAttribute) else value\n else:\n for key, value in self.attribute_values.items():\n if key in attributes_to_get:\n result[key] = value.as_dict() if isinstance(value, MapAttribute) else value\n\n return result\n","sub_path":"inpynamodb/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":47641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"473716214","text":"import tensorflow as tf\nfrom tensorflow.keras import layers\n\n\nclass BilinearInterpolation(layers.Layer):\n def __init__(self, output_size, **kwargs):\n super(BilinearInterpolation, self).__init__(**kwargs)\n self.output_size = output_size\n\n def get_config(self):\n return {\n 'output_size': self.output_size,\n }\n\n def call(self, inputs):\n x, transformation = inputs\n output = self._transform(x, transformation, self.output_size)\n return output\n\n def _make_regular_grids(self, batch_size, height, width):\n x_linspace = tf.linspace(-1., 1., width)\n y_linspace = tf.linspace(-1., 1., height)\n x_coordinates, y_coordinate = tf.meshgrid(x_linspace, y_linspace)\n x_coordinates = tf.reshape(x_coordinates, [-1])\n y_coordinate = tf.reshape(y_coordinate, [-1])\n ones = tf.ones_like(x_coordinates)\n grid = tf.concat([x_coordinates, y_coordinate, ones], 0)\n\n grid = tf.reshape(grid, [-1])\n\n grids = tf.tile(grid, tf.stack([batch_size]))\n return tf.reshape(grids, (batch_size, 3, height * width))\n\n def _interpolate(self, image, sampled_grids, output_size):\n batch_size, height, width, num_channels = image.shape\n batch_size = tf.shape(image)[0]\n x = tf.cast(tf.reshape(sampled_grids[:, 0:1, :], [-1]), dtype=tf.float32)\n y = tf.cast(tf.reshape(sampled_grids[:, 1:2, :], [-1]), dtype=tf.float32)\n\n x = .5 * (x + 1.0) * tf.cast(width, dtype=tf.float32)\n y = .5 * (y + 1.0) * tf.cast(height, dtype=tf.float32)\n\n x0 = tf.cast(x, tf.int32)\n x1 = x0 + 1\n y0 = tf.cast(y, tf.int32)\n y1 = y0 + 1\n\n max_x = int(tf.keras.backend.int_shape(image)[2] - 1)\n max_y = int(tf.keras.backend.int_shape(image)[1] - 1)\n\n x0 = tf.keras.backend.clip(x0, 0, max_x)\n x1 = tf.keras.backend.clip(x1, 0, max_x)\n y0 = tf.keras.backend.clip(y0, 0, max_y)\n y1 = tf.keras.backend.clip(y1, 0, max_y)\n\n pixels_batch = tf.keras.backend.arange(0, batch_size) * (height * width)\n pixels_batch = tf.keras.backend.expand_dims(pixels_batch, axis=-1)\n flat_output_size = output_size[0] * output_size[1]\n base = tf.keras.backend.repeat_elements(pixels_batch, flat_output_size, axis=1)\n base = tf.keras.backend.flatten(base)\n\n # base_y0 = base + (y0 * width)\n base_y0 = y0 * width\n base_y0 = base + base_y0\n # base_y1 = base + (y1 * width)\n base_y1 = y1 * width\n base_y1 = base_y1 + base\n\n indices_a = base_y0 + x0\n indices_b = base_y1 + x0\n indices_c = base_y0 + x1\n indices_d = base_y1 + x1\n\n flat_image = tf.keras.backend.reshape(image, shape=(-1, num_channels))\n flat_image = tf.keras.backend.cast(flat_image, dtype='float32')\n pixel_values_a = tf.keras.backend.gather(flat_image, indices_a)\n pixel_values_b = tf.keras.backend.gather(flat_image, indices_b)\n pixel_values_c = tf.keras.backend.gather(flat_image, indices_c)\n pixel_values_d = tf.keras.backend.gather(flat_image, indices_d)\n\n x0 = tf.keras.backend.cast(x0, 'float32')\n x1 = tf.keras.backend.cast(x1, 'float32')\n y0 = tf.keras.backend.cast(y0, 'float32')\n y1 = tf.keras.backend.cast(y1, 'float32')\n\n area_a = tf.keras.backend.expand_dims(((x1 - x) * (y1 - y)), 1)\n area_b = tf.keras.backend.expand_dims(((x1 - x) * (y - y0)), 1)\n area_c = tf.keras.backend.expand_dims(((x - x0) * (y1 - y)), 1)\n area_d = tf.keras.backend.expand_dims(((x - x0) * (y - y0)), 1)\n\n values_a = area_a * pixel_values_a\n values_b = area_b * pixel_values_b\n values_c = area_c * pixel_values_c\n values_d = area_d * pixel_values_d\n return values_a + values_b + values_c + values_d\n\n def _transform(self, X, affine_transformation, output_size):\n batch_size, num_channels = tf.shape(X)[0], X.shape[3]\n transformations = tf.reshape(affine_transformation, (batch_size, 2, 3))\n regular_grids = self._make_regular_grids(batch_size, *output_size)\n sampled_grids = tf.keras.backend.batch_dot(transformations, regular_grids)\n\n interpolated_image = self._interpolate(X, sampled_grids, output_size)\n new_shape = (batch_size, output_size[0], output_size[1], num_channels)\n interpolated_image = tf.keras.backend.reshape(interpolated_image, new_shape)\n return interpolated_image\n","sub_path":"src/components/STN.py","file_name":"STN.py","file_ext":"py","file_size_in_byte":4500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"205084212","text":"import dill as pickle\nimport json\nimport base64\n\ndef lambda_to_string(x):\n\tz = pickle.dumps(x)\n\tz = base64.b64encode(z)\n\tz = str(z,'utf-8')\n\treturn z\n\t\ndef string_to_lambda(z):\n\tz = bytes(z,'utf-8')\n\tz = base64.b64decode(z)\n\tx = pickle.loads(z)\n\treturn x\n\nf = lambda x : x * x\n\ns = lambda_to_string(f)\n\ng = string_to_lambda(s)\n\nprint(g(3))\n","sub_path":"code/quark_web_server/demo_2.py","file_name":"demo_2.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"275140455","text":"import asyncio\nimport logging\nfrom typing import Callable, Union\n\nimport aiohttp\nimport aiojobs\n\nfrom aiotelegrambot.client import Client\nfrom aiotelegrambot.errors import BotError, TelegramApiError\nfrom aiotelegrambot.handler import Handlers\nfrom aiotelegrambot.message import Message\nfrom aiotelegrambot.middleware import Middlewares\nfrom aiotelegrambot.rules import Command, Rule\nfrom aiotelegrambot.types import Chat, Content, Incoming, recognize_type\n\nlogger = logging.getLogger(__name__)\n\n\nclass BotBase:\n def __init__(self, handlers: Handlers = None):\n self.handlers = handlers or Handlers()\n self.middlewares = Middlewares()\n self._scheduler = None\n self._closed = True\n self._update_id = 0\n self.ctx = {}\n\n async def initialize(self, *, webhook: bool = False, interval: float = 0.1, **scheduler_options):\n if self._closed is False:\n return\n\n if not self.handlers:\n raise BotError(\"Can't initialize with no one handler\")\n\n self._closed = False\n self._scheduler = await aiojobs.create_scheduler(**scheduler_options)\n if webhook is False:\n await self._scheduler.spawn(self._get_updates(interval))\n\n async def close(self):\n if self._closed:\n return\n\n self._closed = True\n\n for job in self._scheduler:\n await job.wait()\n\n await self._scheduler.close()\n self._scheduler = None\n\n self._update_id = 0\n\n def add_handler(self, handler: Callable, *args, **kwargs):\n self.handlers.add(*args, **kwargs)(handler)\n\n async def _process_updates(self, data: Union[None, dict]):\n if data:\n for raw in data[\"result\"]:\n await self.process_update(raw)\n self._update_id = max(raw[\"update_id\"], self._update_id)\n self._update_id += 1 if data[\"result\"] else 0\n\n\nclass Bot(BotBase):\n def __init__(self, *, loop=None):\n super().__init__()\n self.loop = asyncio.get_event_loop() if loop is None else loop\n self.client = None\n self.webhook = None\n\n def command(self):\n \"\"\"\n Decorator for creating bot's command.\n\n ```\n bot = Bot()\n\n @bot.command()\n def name_of_command(message: Message):\n # useful stuff here\n pass\n ```\n \"\"\"\n\n def decorator(func):\n return self.add_handler(func,\n content_type=Content.COMMAND,\n rule=Command())\n return decorator\n\n def trigger_message(self,\n message_type: Union[Chat, Incoming, Content] = None,\n rule: Rule = None):\n \"\"\"\n Decorator for creating a handler for any type of message,\n specifized in `message_type`.\n \"\"\"\n\n if not message_type:\n raise BotError(\"message_type Union[Chat, Incoming, Content] must be specifized\")\n\n def decorator(func):\n return self.add_handler(func,\n content_type=message_type,\n rule=rule)\n return decorator\n\n async def process_update(self, data: dict):\n if self._closed is True:\n raise RuntimeError(\"The bot isn't initialized\")\n\n chat_type, incoming, content_type = recognize_type(data)\n handler = self.handlers.get(chat_type, incoming, content_type, data)\n await self._scheduler.spawn(\n self.middlewares(Message(self.client, data, self.ctx, chat_type, incoming, content_type), handler)\n )\n\n def run(self, token, webhook = False):\n self.client = Client(token)\n self.webhook = webhook\n\n try:\n self.loop.run_until_complete(self._start())\n\n except KeyboardInterrupt:\n self.loop.run_until_complete(self.close())\n self.loop.run_until_complete(self.client.close())\n\n finally:\n self.loop.close()\n\n async def _get_updates(self, interval: float):\n while self._closed is False:\n try:\n data = await self.client.get_updates(self._update_id)\n await self._process_updates(data)\n await asyncio.sleep(interval)\n except TelegramApiError as e:\n self.client.process_error(str(e), e.response, e.data, False)\n if e.response.status >= 500:\n await asyncio.sleep(30)\n else:\n await asyncio.sleep(10)\n except asyncio.TimeoutError as e:\n logger.exception(str(e))\n except aiohttp.ClientError as e:\n logger.exception(str(e))\n await asyncio.sleep(10)\n\n async def _start(self):\n await self.initialize(webhook=self.webhook)\n while True:\n await asyncio.sleep(100)\n\n","sub_path":"aiotelegrambot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"398197440","text":"import pymongo\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urljoin\n\n\ndef main():\n # 指定种子页面\n base_url = 'https://www.zhihu.com/'\n seed_url = urljoin(base_url, 'explore')\n headers = {'user-agent':'Baiduspider'\n }\n resp = requests.get(seed_url, headers=headers)\n soup = BeautifulSoup(resp.text, 'lxml')\n href = re.compile(r'^/question')\n htmls = soup.find_all('a', {'href': href})\n # 创建mongodb连接\n client = pymongo.MongoClient(host='47.106.122.64', port=27017)\n link_list = []\n # html_list = []\n mylist = []\n db = client.zhihu\n myweb = db.mywebs\n for tag in htmls:\n link = urljoin(base_url, tag.attrs['href'])\n # url列表\n # 页面内容列表\n # html_list.append(requests.get(link, headers=headers).text)\n # not myweb.find_one({'url': link}) and\n if link not in link_list:\n link_list.append(link)\n content = requests.get(link, headers=headers).text\n mylist.append({'url': link, 'content': content})\n\n try:\n myweb.insert_many(mylist)\n except TypeError:\n print('没毛病老铁')\n print('Total %d question pages found.' % len(set(link_list)))\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"爬虫项目/爬虫day3/mongodb.py","file_name":"mongodb.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"505198164","text":"import serial, os, time, sys, datetime, csv\n\ndef logfilename():\n now = datetime.datetime.now()\n #Colon is not alowed in filenames so we have to include a lookalike char \n return 'CO2LOG-%0.4d-%0.2d-%0.2d-%0.2d%s%0.2d%s%0.2d.csv' % (now.year, now.month, now.day, now.hour, u'ua789',now.minute, u'ua789', now.second)\n\n#Function to calculate MH-Z19 crc according to datasheet\n\ndef crc8(a):\n crc=0x00\n count=1\n b=bytearray(a)\n while count<8:\n crc+=b[count]\n count=count+1\n #Truncate to 8 bit\n crc%=256\n #Invert number with xor\n crc=~crc&0xFF\n crc+=1\n return crc\n\n # try to open serial port\n \nport='/dev/serial0'\nsys.stderr.write('Trying port %sn' % port)\n \ntry:\n # try to read a line of data from the serial port and parse \n with serial.Serial(port, 9600, timeout=2.0) as ser:\n # 'warm up' with reading one input\n result=ser.write(\"xffx01x86x00x00x00x00x00x79\")\n time.sleep(0.1)\n s=ser.read(9)\n z=bytearray(s)\n # Calculate crc\n crc=crc8(s) \n if crc != z[8]:\n sys.stderr.write('CRC error calculated %d bytes= %d:%d:%d:%d:%d:%d:%d:%d crc= %dn' % (crc, z[0],z[1],z[2],z[3],z[4],z[5],z[6],z[7],z[8]))\n else:\n sys.stderr.write('Logging data on %s to %sn' % (port, logfilename()))\n # log data\n outfname = logfilename()\n with open(outfname, 'a') as f:\n # loop will exit with Ctrl-C, which raises a KeyboardInterrupt\n while True:\n #Send \"read value\" command to MH-Z19 sensor\n result=ser.write(\"xffx01x86x00x00x00x00x00x79\")\n time.sleep(0.1)\n s=ser.read(9)\n z=bytearray(s)\n crc=crc8(s)\n #Calculate crc\n if crc != z[8]:\n sys.stderr.write('CRC error calculated %d bytes= %d:%d:%d:%d:%d:%d:%d:%d crc= %dn' % (crc, z[0],z[1],z[2],z[3],z[4],z[5],z[6],z[7],z[8]))\n else: \n if s[0] == \"xff\" and s[1] == \"x86\":\n print( \"co2=\", ord(s[2])*256 + ord(s[3]))\n co2value=ord(s[2])*256 + ord(s[3])\n now=time.ctime()\n parsed=time.strptime(now)\n lgtime=time.strftime(\"%Y %m %d %H:%M:%S\")\n row=[lgtime,co2value]\n w=csv.writer(f)\n w.writerow(row)\n #Sample every minute, synced to local time\n t=datetime.datetime.now()\n sleeptime=60-t.second\n time.sleep(sleeptime)\nexcept Exception as e:\n print(\"Error\",e)\n","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"406030343","text":"import pandas as pd\nimport os\nimport exe_main_produce_report as gp\nimport exe_main_remove_report as rp\nimport re\n\n\nclass lookupctmr:\n\n def __init__(self):\n self.file = pd.read_csv(\"final_csv.csv\", encoding = 'utf-8-') \n \n def checkbucket(self, name):\n self.name = name\n self.bucket = []\n my_reg = '(?i)'+ re.escape(name)\n for i in range(len(self.file['Buinessname'])):\n if re.findall(my_reg, self.file['Buinessname'][i]) != []:\n if self.file['Buinessname'][i] not in self.bucket:\n self.bucket.append(self.file['Buinessname'][i]) \n return self.bucket\n \n def getbucket(self,name):\n self.name = name\n self.checkbucket(name)\n if self.bucket == []:\n print('not available in DB')\n else:\n confirm = input('We have found several names in the database, pls select the {0-n-1} item you want \\n' + '; \\n'.join([i for i in self.bucket]))\n print('The analysis for ' +self.bucket[int(confirm)]+ ' is as follows.')\n\n \n\nusername = input(\"Hi, there. What's your name? \")\nprint(\"Hi \" + username +\"! Please input some information for further assessment: \\n Which task are you going to perform? \\n 1.Refresh data visualization table \\n 2.Access a customer's creditworthiness\")\ntask = input()\ndef gettask():\n if task== '1':\n print('Please follow instruction as below.\\n 1.Download financials from CRM \\n 2.Rename as financials \\n 3.Save as csv with utf-8 encoding \\n ')\n secondstep = input('Click 3 after all actions finished \\n')\n if secondstep =='3':\n if os.path.exists('financials.csv'):\n gp.generatereport()\n rp.removereport()\n else:\n print('could not find the file, pls click 3 after actions complete')\n else:\n ctmrname = input(\"which customer are you looking for? \")\n L = lookupctmr()\n L.getbucket(ctmrname)\n\ngettask()\n","sub_path":"interactivefi.py","file_name":"interactivefi.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"411322460","text":"import time\nimport sys\nimport icm\n\n\nFTDI_URL = 'ftdi://ftdi:2232:FTVE5I3T/1'\n\n\ndef main():\n imu = icm.Icm(FTDI_URL)\n imu.calibrate_zero(1000)\n\n previous_time = time.time()\n while True:\n # Calculate dt\n current_time = time.time()\n d_t = current_time - previous_time\n previous_time = current_time\n print('[t: %f dt: %f]' % (current_time, d_t), end='')\n\n # Read accelerometer and gyroscope data. More than 2 kHz should\n # be achievable with 500 kHz clock.\n data = imu.read_data()\n print(\n '%6d %6d %6d %6d %6d %6d t: %f'\n % (\n data['accel_xout'], data['accel_yout'], data['accel_zout'],\n data['gyro_xout'], data['gyro_yout'], data['gyro_zout'],\n data['temperature'],\n )\n )\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n print('')\n except BrokenPipeError:\n print('')\n","sub_path":"tools/imu/ftdi_icm20689.py","file_name":"ftdi_icm20689.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"429604485","text":"#!/usr/bin/env python\n\nimport os\nimport argparse\nimport json\nimport re\nimport pathlib\n\nimport networkx\nfrom jinja2 import Environment, FileSystemLoader\nfrom . import __version__\n\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\nclass JsonSchema2Popo:\n \"\"\"Converts a JSON Schema to a Plain Old Python Object class\"\"\"\n\n PYTHON_CLASS_TEMPLATE_FNAME = \"python_class.tmpl\"\n JS_CLASS_TEMPLATE_FNAME = \"js_class.tmpl\"\n GO_STRUCT_TEMPLATE_FNAME = \"go_struct.tmpl\"\n\n TEMPLATES = {\n \"python\": PYTHON_CLASS_TEMPLATE_FNAME,\n \"js\": JS_CLASS_TEMPLATE_FNAME,\n \"go\": GO_STRUCT_TEMPLATE_FNAME,\n }\n\n J2P_TYPES = {\n \"string\": str,\n \"integer\": int,\n \"number\": float,\n \"object\": type,\n \"array\": list,\n \"boolean\": bool,\n \"null\": None,\n }\n\n @staticmethod\n def flatten(something):\n if isinstance(something, (list, tuple, set, range)):\n for sub in something:\n yield from JsonSchema2Popo.flatten(sub)\n else:\n yield something\n\n def __init__(\n self,\n use_types=False,\n constructor_type_check=False,\n use_slots=False,\n generate_definitions=True,\n generate_root=True,\n translate_properties=False,\n language=\"python\",\n namespace_path=\"\",\n package_name=\"\",\n custom_template=\"\",\n ):\n self.list_used = False\n self.enum_used = False\n\n search_path = SCRIPT_DIR if not custom_template else os.getcwd()\n self.jinja = Environment(\n loader=FileSystemLoader(searchpath=search_path), trim_blocks=True\n )\n self.jinja.filters[\"regex_replace\"] = lambda s, find, replace: re.sub(\n find, replace, s\n )\n self.use_types = use_types\n self.use_slots = use_slots\n self.constructor_type_check = constructor_type_check\n self.generate_root = generate_root\n self.generate_definitions = generate_definitions\n self.translate_properties = translate_properties\n self.language = language\n self.namespace_path = namespace_path\n self.package_name = package_name\n self.custom_template = custom_template\n\n self.definitions = []\n\n def load(self, json_schema_file):\n self.process(json.load(json_schema_file))\n\n def get_model_dependencies(self, model):\n deps = set()\n for prop in model[\"properties\"]:\n if prop[\"_type\"][\"type\"] not in self.J2P_TYPES.values():\n deps.add(prop[\"_type\"][\"type\"])\n if prop[\"_type\"][\"subtype\"] not in self.J2P_TYPES.values():\n deps.add(prop[\"_type\"][\"subtype\"])\n return list(deps)\n\n def process(self, json_schema):\n if \"definitions\" in json_schema:\n for _obj_name, _obj in json_schema[\"definitions\"].items():\n model = self.definition_parser(_obj_name, _obj)\n self.definitions.append(model)\n\n # topological ordered dependencies\n g = networkx.DiGraph()\n models_map = {}\n for model in self.definitions:\n models_map[model[\"name\"]] = model\n deps = self.get_model_dependencies(model)\n if not deps:\n g.add_edge(model[\"name\"], \"\")\n for dep in deps:\n g.add_edge(model[\"name\"], dep)\n\n self.definitions = []\n if self.generate_definitions:\n # use lexicographical topo sort so that the generation order is stable\n for model_name in networkx.lexicographical_topological_sort(g):\n if model_name in models_map:\n # insert to front so that the sorting is reversed\n self.definitions.insert(0, models_map[model_name])\n\n # create root object if there are some properties in the root\n if \"title\" in json_schema:\n root_object_name = \"\".join(\n x for x in json_schema[\"title\"].title() if x.isalpha()\n )\n else:\n root_object_name = \"RootObject\"\n if self.generate_root:\n root_model = self.definition_parser(root_object_name, json_schema)\n self.definitions.append(root_model)\n\n def definition_parser(self, _obj_name, _obj, sub_model=\"\"):\n model = {\"name\": _obj_name, \"subModels\": [], \"parent\": sub_model}\n\n if \"description\" in _obj:\n model[\"comment\"] = _obj[\"description\"]\n\n join_str = \"._\"\n if self.translate_properties:\n join_str = \".\"\n sub_prefix = \"_\"\n if self.translate_properties:\n sub_prefix = \"\"\n\n if \"$ref\" in _obj and _obj[\"$ref\"].startswith(\"#/definitions/\"):\n # References defined at a top level should be copied from what it is referencing\n ref_path = _obj[\"$ref\"].split(\"/\")[2:]\n ref = join_str.join(ref_path)\n\n for model in self.definitions:\n if model[\"name\"] in ref_path:\n subModels = model[\"subModels\"]\n built_path = model[\"name\"]\n\n i = 0\n while i < len(subModels) and subModels:\n subModel = subModels[i]\n i = i + 1\n\n if \"subModels\" in subModel:\n if self.strip_sub_prefix(subModel[\"name\"]) in ref_path:\n built_path = built_path + \".\" + subModel[\"name\"]\n subModels = subModel[\"subModels\"]\n model = subModel\n i = 0\n if built_path == ref:\n break\n\n if ref_path[len(ref_path) - 1] == self.strip_sub_prefix(\n model[\"name\"]\n ):\n model = model.copy()\n model[\"name\"] = _obj_name\n model[\"parent\"] = sub_model\n return model\n\n print(\"Unable to find object refs for \", \"/\".join(ref_path))\n\n if \"type\" in _obj:\n model[\"type\"] = self.type_parser(_obj)\n model[\"text_type\"] = _obj[\"type\"]\n\n if \"enum\" in _obj:\n enum = {}\n for i, v in enumerate(_obj[\"enum\"]):\n enum[v if \"javaEnumNames\" not in _obj else _obj[\"javaEnumNames\"][i]] = v\n model[\"enum\"] = enum\n self.enum_used = True\n\n if \"extends\" in _obj and \"$ref\" in _obj[\"extends\"]:\n if _obj[\"extends\"][\"$ref\"].endswith(\".json\"):\n with open(_obj[\"extends\"][\"$ref\"], \"r\") as f:\n ref_file = json.load(f)\n self.process(ref_file)\n model[\"extends\"] = ref_file[\"title\"]\n else:\n ref_path = _obj[\"extends\"][\"$ref\"].split(\"/\")[2:]\n ref = join_str.join(ref_path)\n if sub_model and sub_model.endswith(_obj_name):\n subs = sub_model.split(\".\")[-1]\n ref = ref[len(sub_model) - len(subs) :]\n model[\"extends\"] = ref\n\n model[\"properties\"] = []\n if \"properties\" in _obj:\n for _prop_name, _prop in _obj[\"properties\"].items():\n _type = self.type_parser(_prop)\n _default = None\n _comment = None\n if \"default\" in _prop:\n _default = _type[\"type\"](_prop[\"default\"])\n if _type[\"type\"] == str:\n _default = \"'{}'\".format(_default)\n\n if \"description\" in _prop:\n _comment = _prop[\"description\"]\n\n read_list = self.definitions[:]\n read_list.append(model)\n\n def find_parent(path, model):\n return [\n (path + \".\" + m[\"name\"], find_parent(path + \".\" + m[\"name\"], m))\n for m in model[\"subModels\"]\n if \"subModels\" in m\n ]\n\n potential_paths = list(\n JsonSchema2Popo.flatten(\n [find_parent(model[\"name\"], model) for model in read_list]\n )\n )\n\n parent_name = sub_model + join_str + _prop_name\n if not sub_model:\n parent_name = _obj_name + join_str + _prop_name\n for path in potential_paths:\n if path.endswith(parent_name) and len(path) > len(parent_name):\n parent_name = path\n\n if _type[\"type\"] == list and _type[\"subtype\"] == type:\n _type[\"subtype\"] = sub_prefix + _prop_name\n _type[\"parent\"] = parent_name\n model[\"subModels\"].append(\n self.definition_parser(\n sub_prefix + _prop_name,\n _prop[\"items\"],\n sub_model=parent_name,\n )\n )\n\n if \"$ref\" in _prop and _prop[\"$ref\"].startswith(\"#/definitions/\"):\n # Properties with references should reference the existing defined classes\n ref = _prop[\"$ref\"].split(\"/\")[2:]\n _type = {\"type\": join_str.join(ref), \"subtype\": None}\n\n if (\"type\" in _prop and _prop[\"type\"] == \"object\") or \"enum\" in _prop:\n _type = {\n \"type\": sub_prefix + _prop_name,\n \"subtype\": None,\n \"parent\": parent_name,\n }\n\n sub_mod = self.definition_parser(\n sub_prefix + _prop_name, _prop, sub_model=parent_name\n )\n\n # Only generate sub models when the sub model actually has properties, otherwise treat is as\n # a dict, which is what an object is to JSON\n if sub_mod[\"properties\"]:\n model[\"subModels\"].append(sub_mod)\n else:\n _type = {\n \"type\": dict,\n \"subtype\": None,\n }\n\n if \"enum\" in _prop:\n self.enum_used = True\n\n _format = None\n if \"format\" in _prop:\n _format = _prop[\"format\"]\n if (\n _type[\"type\"] == list\n and \"items\" in _prop\n and isinstance(_prop[\"items\"], list)\n ):\n _format = _prop[\"items\"][0][\"format\"]\n\n _validations = {\"required\": False}\n validation_types = [\n \"maximum\",\n \"minimum\",\n \"maxItems\",\n \"minItems\",\n \"minLength\",\n \"maxLength\",\n \"pattern\",\n ]\n for t in validation_types:\n if t in _prop:\n _validations[t] = _prop[t]\n if _type[\"type\"] == list and \"items\" in _prop:\n array_validation = _prop[\"items\"]\n if t in array_validation:\n _validations[t] = array_validation[t]\n if \"required\" in _obj and _prop_name in _obj[\"required\"]:\n _validations[\"required\"] = True\n\n prop = {\n \"_name\": self.get_prop_name(_prop_name),\n \"_original_name\": _prop_name,\n \"_type\": _type,\n \"_default\": _default,\n \"_format\": _format,\n \"_comment\": _comment,\n \"_validations\": _validations,\n }\n model[\"properties\"].append(prop)\n model[\"propertiesHaveComment\"] = any(p[\"_comment\"] for p in model[\"properties\"])\n return model\n\n def type_parser(self, t):\n _type = None\n _subtype = None\n _subformat = None\n if \"type\" in t:\n if t[\"type\"] == \"array\" and \"items\" in t:\n self.list_used = True\n _type = self.J2P_TYPES[t[\"type\"]]\n if isinstance(t[\"items\"], list):\n if \"type\" in t[\"items\"][0]:\n _subtype = self.J2P_TYPES[t[\"items\"][0][\"type\"]]\n if \"format\" in t[\"items\"][0]:\n _subformat = t[\"items\"][0][\"format\"]\n elif (\n \"$ref\" in t[\"items\"][0]\n or \"oneOf\" in t[\"items\"][0]\n and len(t[\"items\"][0][\"oneOf\"]) == 1\n ):\n if \"$ref\" in t[\"items\"][0]:\n ref = t[\"items\"][0][\"$ref\"]\n else:\n ref = t[\"items\"][0][\"oneOf\"][0][\"$ref\"]\n _subtype = ref.split(\"/\")[-1]\n elif isinstance(t[\"items\"], dict):\n if \"type\" in t[\"items\"]:\n _subtype = self.J2P_TYPES[t[\"items\"][\"type\"]]\n if \"format\" in t[\"items\"]:\n _subformat = t[\"items\"][\"format\"]\n elif (\n \"$ref\" in t[\"items\"]\n or \"oneOf\" in t[\"items\"]\n and len(t[\"items\"][\"oneOf\"]) == 1\n ):\n if \"$ref\" in t[\"items\"]:\n ref = t[\"items\"][\"$ref\"]\n else:\n ref = t[\"items\"][\"oneOf\"][0][\"$ref\"]\n _subtype = ref.split(\"/\")[-1]\n elif isinstance(t[\"type\"], list):\n _type = self.J2P_TYPES[t[\"type\"][0]]\n elif t[\"type\"]:\n _type = self.J2P_TYPES[t[\"type\"]]\n if (\n _type == str\n and \"media\" in t\n and \"binaryEncoding\" in t[\"media\"]\n and t[\"media\"][\"binaryEncoding\"] == \"base64\"\n ):\n _type = bytes\n elif \"$ref\" in t:\n _type = t[\"$ref\"].split(\"/\")[-1]\n elif \"anyOf\" in t or \"allOf\" in t or \"oneOf\" in t:\n _type = list\n return {\"type\": _type, \"subtype\": _subtype, \"subformat\": _subformat}\n\n def write_file(self, filename):\n template = self.custom_template or self.TEMPLATES[self.language]\n self.jinja.get_template(template).stream(\n models=self.definitions,\n use_types=self.use_types,\n constructor_type_check=self.constructor_type_check,\n enum_used=self.enum_used,\n list_used=self.list_used,\n use_slots=self.use_slots,\n namespace_path=self.namespace_path,\n package_name=self.package_name,\n ).dump(filename)\n if hasattr(filename, \"close\"):\n filename.close()\n\n def get_prop_name(self, name):\n if not self.translate_properties:\n return name\n s1 = re.sub(\"(.)([A-Z][a-z]+)\", r\"\\1_\\2\", name)\n return re.sub(\"([a-z0-9])([A-Z])\", r\"\\1_\\2\", s1).lower()\n\n def strip_sub_prefix(self, name):\n if self.translate_properties:\n return name\n return name.lstrip(\"_\")\n\n\ndef init_parser():\n parser = argparse.ArgumentParser(\n description=\"Converts JSON Schema to Plain Old Python Object\"\n )\n parser.add_argument(\n \"json_schema_file\",\n type=argparse.FileType(\"r\", encoding=\"utf-8\"),\n help=\"Path to JSON Schema file to load\",\n )\n parser.add_argument(\n \"-o\",\n \"--output-file\",\n type=argparse.FileType(\"w\", encoding=\"utf-8\"),\n help=\"Path to file output\",\n default=\"model.py\",\n )\n parser.add_argument(\n \"-jt\",\n \"--custom-template\",\n help=\"Path to custom Jinja template file\",\n default=\"\",\n )\n parser.add_argument(\"-t\", \"--use-types\", action=\"store_true\", help=\"Add typings\")\n parser.add_argument(\n \"-ct\",\n \"--constructor-type-check\",\n action=\"store_true\",\n help=\"Validate input types in constructor\",\n )\n parser.add_argument(\n \"-s\", \"--use_slots\", action=\"store_true\", help=\"Generate class with __slots__.\"\n )\n parser.add_argument(\n \"--no-generate-from-definitions\",\n action=\"store_false\",\n help='Don\\'t generate classes from \"definitions\" section of the schema.',\n default=True,\n )\n parser.add_argument(\n \"--no-generate-from-root-object\",\n action=\"store_false\",\n help=\"Don't generate classes from root of the schema.\",\n default=True,\n )\n parser.add_argument(\n \"-tp\",\n \"--translate-properties\",\n action=\"store_true\",\n help=\"Translate property names into snake_case.\",\n )\n parser.add_argument(\n \"-l\",\n \"--language\",\n choices=JsonSchema2Popo.TEMPLATES.keys(),\n help=\"Which language to generate in\",\n default=\"python\",\n )\n parser.add_argument(\n \"--namespace-path\",\n help=\"Namespace path to be prepended to the @memberOf for JSDoc (only used for JS)\",\n )\n parser.add_argument(\n \"--package-name\",\n help=\"Package name for generated code (only used for Go)\",\n default=\"generated\",\n )\n parser.add_argument(\n \"--version\", action=\"version\", version=\"%(prog)s v{}\".format(__version__)\n )\n return parser\n\n\ndef format_python_file(filename):\n try:\n import black\n\n black.format_file_in_place(\n pathlib.Path(filename).absolute(),\n fast=True,\n mode=black.FileMode(\n line_length=88, target_versions={black.TargetVersion.PY33}\n ),\n write_back=black.WriteBack.YES,\n )\n except:\n pass\n\n\ndef format_js_file(filename):\n try:\n import jsbeautifier\n\n format_opts = jsbeautifier.default_options()\n format_opts.end_with_newline = True\n format_opts.preserve_newlines = True\n format_opts.max_preserve_newlines = 2\n format_opts.wrap_line_length = 120\n\n with open(filename, \"r\") as fr:\n file = fr.read()\n with open(filename, \"w\") as f:\n f.write(jsbeautifier.beautify(file, opts=format_opts))\n except:\n pass\n\n\ndef format_go_file(filename):\n os.system(\"go fmt \" + filename)\n\n\ndef main():\n parser = init_parser()\n args = parser.parse_args()\n\n loader = JsonSchema2Popo(\n use_types=args.use_types,\n constructor_type_check=args.constructor_type_check,\n use_slots=args.use_slots,\n generate_definitions=args.no_generate_from_definitions,\n generate_root=args.no_generate_from_root_object,\n translate_properties=args.translate_properties,\n language=args.language,\n namespace_path=args.namespace_path,\n package_name=args.package_name,\n custom_template=args.custom_template,\n )\n loader.load(args.json_schema_file)\n\n outfile = args.output_file\n loader.write_file(outfile)\n if args.language == \"python\":\n format_python_file(outfile.name)\n elif args.language == \"js\":\n format_js_file(outfile.name)\n elif args.language == \"go\":\n format_go_file(outfile.name)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"jsonschema2popo/jsonschema2popo.py","file_name":"jsonschema2popo.py","file_ext":"py","file_size_in_byte":19622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"364042397","text":"\"\"\"project_management URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n\n # l'app taskmanager est accessible sans taper son nom\n path('', include('taskmanager.urls')),\n\n path('accounts/', include('django.contrib.auth.urls')),\n\n # je voulais inclure cet URL individuellement pour contrôler la redirection automatique\n # des utilisateurs dejà connectés, mais dans ce cas j'aurai du inclure toutes les views séparément\n # path('accounts/login', auth_views.LoginView.as_view(redirect_authenticated_user=True)),\n]\n","sub_path":"project_management/project_management/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"124697235","text":"# -*- coding: utf-8 -*-\n'''\nAlerta Blackout Regex\n=====================\n\nAlerta plugin to enhance the blackout system.\n'''\nimport re\nimport logging\n\nfrom alertaclient.api import Client\nfrom alerta.plugins import PluginBase\n\nlog = logging.getLogger('alerta.plugins.blackout_regex')\n\nclient = Client()\n\n\ndef parse_tags(tag_list):\n return {k: v for k, v in (i.split('=', 1) for i in tag_list if '=' in i)}\n\n\nclass BlackoutRegex(PluginBase):\n def post_receive(self, alert):\n '''\n The regex blackouts are evaluated in the ``post_receive`` in order to\n have the alert already correlated, therefore provide us with the real\n Alert ID (after being correlated, and not just a random fresh ID), the\n tags from the previous evaluation, as well as a pre-filtering by the\n native blackout mechanisms (i.e., if there's a blackout matching the\n alert it won't get to this point - if it gets here we therefore evaluate\n the alert and we're sure it didn't match the literal blackout attributes\n which is ideal to preserve backwards compatibility).\n '''\n if not alert:\n # It actually does happen sometimes that the Alert can be None (and\n # perhaps something else too?) - for whatever reason.\n return alert\n\n if alert.status == 'closed':\n log.debug('Alert %s status is closed, ignoring', alert.id)\n return alert\n\n blackouts = client.get_blackouts()\n alert_tags = parse_tags(alert.tags)\n log.debug(blackouts)\n\n # When an alert matches a blackout, this plugin adds a special tag\n # ``regex_blackout`` that points to the blackout ID matched.\n # This facilitates the blackout matching, by simply checking if the\n # blackout is still open.\n if 'regex_blackout' in alert_tags:\n log.debug(\n 'Checking blackout %s which used to match this alert',\n alert_tags['regex_blackout'],\n )\n for blackout in blackouts:\n if blackout.id == alert_tags['regex_blackout']:\n log.debug('Found blackout %s', blackout.id)\n if blackout.status == 'active':\n log.debug(\n 'Blackout %s is still active, setting alert %s '\n 'status as blackout',\n blackout.id,\n alert.id,\n )\n if alert.status != 'blackout':\n alert.set_status('blackout')\n return alert\n # If the blackout is no longer active, simply return\n # the alert as-is, without changing the status, but\n # removing the regex_blackout tag, so when the alert is\n # fired again, we'll know that it does no longer match\n # an active blackout.\n log.debug(\n 'Blackout %s does no longer exist, or is not active, removing '\n 'tag and leaving status unchanged',\n alert_tags['regex_blackout'],\n )\n alert.untag(['regex_blackout={}'.format(alert_tags['regex_blackout'])])\n return alert\n\n # No previous regex blackout match, let's evaluate.\n # The idea is that if a blackout has a number of attributes configured,\n # in order to match, the alert must match all of these attributes.\n for blackout in blackouts:\n # The general assumption is that a blackout has at least one of\n # these attributes set, therefore once we try to match only when an\n # attribute is configured, and skip to the next blackout when the\n # matching fails.\n match = False\n if blackout.group:\n if not re.search(blackout.group, alert.group):\n log.debug(\n '%s doesn\\'t match the blackout group %s',\n alert.group,\n blackout.group,\n )\n continue\n match = True\n log.debug('%s matched %s', blackout.group, alert.group)\n if blackout.event:\n if not re.search(blackout.event, alert.event):\n log.debug(\n '%s doesn\\'t match the blackout event %s',\n alert.event,\n blackout.event,\n )\n continue\n match = True\n log.debug('%s matched %s', blackout.event, alert.event)\n if blackout.resource:\n if not re.search(blackout.resource, alert.resource):\n log.debug(\n '%s doesn\\'t match the blackout resource %s',\n alert.resource,\n blackout.resource,\n )\n continue\n match = True\n log.debug('%s matched %s', blackout.resource, alert.resource)\n if blackout.service:\n if not re.search(blackout.service[0], alert.service[0]):\n log.debug(\n '%s doesn\\'t match the blackout service %s',\n alert.service[0],\n blackout.service[0],\n )\n continue\n match = True\n log.debug('%s matched %s', blackout.service[0], alert.service[0])\n if blackout.tags and alert.tags:\n blackout_tags = parse_tags(blackout.tags)\n if not set(blackout_tags.keys()).issubset(set(alert_tags.keys())):\n # The blackout must have at least as many tags as the alert\n # in order to match.\n continue\n if not all(\n [\n re.search(blackout_tags[blackout_tag], alert_tags[blackout_tag])\n for blackout_tag in blackout_tags\n if blackout_tag in alert_tags\n ]\n ):\n log.debug(\n '%s don\\'t seem to match the blackout tag(s) %s',\n str(alert_tags),\n str(blackout_tags),\n )\n continue\n match = True\n if match:\n log.debug(\n 'Alert %s seems to match (regex) blackout %s. '\n 'Adding regex_blackout and status',\n alert.id,\n blackout.id,\n )\n alert.tag(['regex_blackout={}'.format(blackout.id)])\n alert.set_status('blackout')\n return alert\n\n return alert\n\n def pre_receive(self, alert):\n return alert\n\n def status_change(self, alert, status, text):\n return alert, status, text\n","sub_path":"blackout_regex.py","file_name":"blackout_regex.py","file_ext":"py","file_size_in_byte":7007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"106845581","text":"from Acquisition import aq_inner, aq_parent\nfrom zope.component import getMultiAdapter\nfrom Products.Five import BrowserView\nfrom Products.statusmessages.interfaces import IStatusMessage\nfrom Products.LinguaPlone import LinguaPloneMessageFactory as _\nfrom Products.CMFCore.utils import getToolByName\n\n\nclass CreateTranslation(BrowserView):\n\n def _setCanonicalLanguage(self, obj):\n \"\"\"Make sure an object has a language set (ie is not neutral).\n \"\"\"\n lang=obj.Language()\n if not lang:\n portal_state=getMultiAdapter((self.context, self.request),\n name=\"plone_portal_state\")\n lang=portal_state.language()\n obj.setLanguage(lang)\n\n\n\n def nextUrl(self, trans):\n \"\"\"Figure out where users should go after creating the translation.\n \"\"\"\n try:\n action=trans.getTypeInfo().getActionInfo(\"object/translate\",\n object=trans)\n return action[\"url\"]\n except ValueError:\n pass\n\n try:\n action=trans.getTypeInfo().getActionInfo(\"object/edit\",\n object=trans)\n return action[\"url\"]\n except ValueError:\n pass\n\n state=getMultiAdapter((trans, self.request), name=\"plone_context_state\")\n return state.view_url()\n\n def __call__(self):\n status=IStatusMessage(self.request)\n self._setCanonicalLanguage(self.context)\n\n newlang=self.request[\"newlanguage\"]\n\n if self.context.hasTranslation(newlang):\n state=getMultiAdapter((self.context, self.request),\n name=\"plone_context_state\")\n status.addStatusMessage(_(u\"message_translation_exists\",\n default=u\"Translation already exists\"),\n type=\"error\")\n return self.request.response.redirect(state.view_url())\n\n lt=getToolByName(self.context, \"portal_languages\")\n lt.setLanguageCookie(newlang)\n\n #Customization for eduCommons to ensure parent Division --> Course --> SubObject's parent folder translated first\n if self.context.aq_inner.aq_parent.Type() == 'Plone Site':\n self.context.addTranslation(newlang)\n trans=self.context.getTranslation(newlang)\n status.addStatusMessage(_(u\"message_translation_created\",\n default=u\"Translated created.\"),\n type=\"info\")\n\n return self.request.response.redirect(self.nextUrl(trans))\n else:\n if self.context.aq_inner.aq_parent.aq_explicit.hasTranslation(newlang):\n self.context.addTranslation(newlang)\n trans=self.context.getTranslation(newlang)\n status.addStatusMessage(_(u\"message_translation_created\",\n default=u\"Translated created.\"),\n type=\"info\")\n \n return self.request.response.redirect(self.nextUrl(trans))\n else:\n url = self.context.absolute_url()\n not_available = '%s/not_available_lang/view?set_language=%s&parentNotTranslated=True' % (url, newlang)\n return self.request.response.redirect(not_available)\n\n\n\n","sub_path":"eduCommons/browser/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"49196989","text":"'''\nCreated on 15/06/2012\n\nEste codigo es parte de Marcos -- ERP Software\nCopyright (C) 2012 Eneldo Antonio Serrata Peralta. All rights reserved.\n\nMarcos is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nMarcos is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n \nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see .\n \nAuthor: Eneldo Serrata Autodidacto en progracmacion de software\nInternet: http://marcos.org.do Phone: (809)597-2221\n'''\nfrom mptt.forms import MPTTAdminForm\nfrom inventarios.models import Categoria, Colore, Marca, Medida, Producto,\\\n Existencia, Foto, ProductoDoc, ProductoPrecio, ProductoCombo\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\nfrom django.forms.models import ModelForm\n\nclass CategoriaForm(MPTTAdminForm):\n class Meta:\n model=Categoria\n exclude = ('leaf', 'expanded')\n \n def __init__(self, *args, **kwargs):\n self.helper = FormHelper()\n self.helper.form_class = 'blueForms'\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit', 'Grabar'))\n return super(CategoriaForm, self).__init__(*args, **kwargs)\n \nclass ColoreForm(MPTTAdminForm):\n class Meta:\n model=Colore\n exclude = ('leaf', 'expanded')\n \n def __init__(self, *args, **kwargs):\n self.helper = FormHelper()\n self.helper.form_class = 'blueForms'\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit', 'Grabar'))\n return super(ColoreForm, self).__init__(*args, **kwargs)\n \nclass MarcaForm(MPTTAdminForm):\n class Meta:\n model=Marca\n exclude = ('leaf', 'expanded')\n \n def __init__(self, *args, **kwargs):\n self.helper = FormHelper()\n self.helper.form_class = 'blueForms'\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit', 'Grabar'))\n return super(MarcaForm, self).__init__(*args, **kwargs)\n \nclass MedidaForm(MPTTAdminForm):\n class Meta:\n model=Medida\n exclude = ('leaf', 'expanded')\n \n def __init__(self, *args, **kwargs):\n self.helper = FormHelper()\n self.helper.form_class = 'blueForms'\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit', 'Grabar'))\n return super(MedidaForm, self).__init__(*args, **kwargs)\n\nclass ProductoForm(ModelForm):\n class Meta:\n model=Producto\n def __init__(self, *args, **kwargs):\n self.helper = FormHelper()\n self.helper.form_class = 'blueForms'\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit', 'Grabar'))\n return super(ProductoForm, self).__init__(*args, **kwargs)\n \nclass ExistenciaForm(ModelForm):\n class Meta:\n model=Existencia\n def __init__(self, *args, **kwargs):\n self.helper = FormHelper()\n self.helper.form_class = 'blueForms'\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit', 'Grabar'))\n return super(ExistenciaForm, self).__init__(*args, **kwargs)\n \nclass FotoForm(ModelForm):\n class Meta:\n model=Foto\n def __init__(self, *args, **kwargs):\n self.helper = FormHelper()\n self.helper.form_class = 'blueForms'\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit', 'Grabar'))\n return super(FotoForm, self).__init__(*args, **kwargs)\n \nclass ProductoDocForm(ModelForm):\n class Meta:\n model=ProductoDoc\n def __init__(self, *args, **kwargs):\n self.helper = FormHelper()\n self.helper.form_class = 'blueForms'\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit', 'Grabar'))\n return super(ProductoDocForm, self).__init__(*args, **kwargs)\n \nclass ProductoPrecioForm(ModelForm):\n class Meta:\n model = ProductoPrecio\n def __init__(self, *args, **kwargs):\n self.helper = FormHelper()\n self.helper.form_class = 'blueForms'\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit', 'Grabar'))\n return super(ProductoPrecioForm, self).__init__(*args, **kwargs)\n \nclass ProductoComboForm(ModelForm):\n class Meta:\n model = ProductoCombo\n def __init__(self, *args, **kwargs):\n self.helper = FormHelper()\n self.helper.form_class = 'blueForms'\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit', 'Grabar'))\n return super(ProductoComboForm, self).__init__(*args, **kwargs)\n ","sub_path":"inventarios/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"468956210","text":"# numeric integration illustration\n# Runge Kutta integration for a particle under gravity\n\nimport pygame, time\nfrom pygame.locals import *\nfrom pygame.color import *\nfrom numpy import *\n\npygame.init()\nscreen = pygame.display.set_mode((640,480))\nbackground = pygame.Surface(screen.get_size())\nbackground.fill((128,128,255))\n \nm = 10.0\ng = 4.0\nt = 0\n\nstate = array((10, 470, 12, -15, 0, g/m, t), dtype=float)\nrstate = state.copy()\nmstate = state.copy()\n\ndef d(state):\n x,y,vx,vy,ax,ay,t = state\n # deriv of position is velocity:\n dx = vx\n dy = vy\n # deriv of velocity is acceleration:\n dvx = ax\n dvy = ay\n # acceleration is constant:\n dax = 0\n day = 0\n # dt/dt:\n dt = 1\n return array((dx,dy,dvx,dvy,dax,day,dt),dtype=float)\n \ndef midpoint(state, dt):\n k1 = d(state)\n k2 = d(state + k1 * dt/2)\n newstate = state + k2 * dt\n return newstate\n\ndef rungekutta(state, dt):\n k1 = d(state)\n k2 = d(state + k1 * dt/2)\n k3 = d(state + k2 * dt/2)\n k4 = d(state + k3 * dt)\n newstate = state + (k1 + 2*k2 + 2*k3 + k4) * dt/6\n return newstate\n \ndt = 5\nscreen.blit(background, (0,0))\n\nclock = pygame.time.Clock()\n\nrunning = 1\nwhile running:\n clock.tick(60)\n for event in pygame.event.get():\n if event.type == QUIT:\n running = 0\n elif event.type == KEYDOWN and event.key == K_ESCAPE:\n running = 0\n # analytic solution:\n x = 12*t + 10 + 2\n y = .2*t*t - 15*t + 470 - 2\n pygame.draw.circle(background, (0,0,0), (x,y), 1)\n pygame.draw.circle(background, (255,0,0), state[0:2], 1)\n pygame.draw.circle(background, (0,255,0), mstate[0:2], 1)\n pygame.draw.circle(background, (0,0,255), rstate[0:2]-2, 1)\n screen.blit(background, (0,0))\n t += dt\n mstate = midpoint(mstate, dt)\n rstate = rungekutta(rstate, dt)\n state += d(state)*dt\n pygame.display.flip()\n \npygame.quit()\n\n","sub_path":"lectures/physics/NumericalIntegration/numint04.py","file_name":"numint04.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"546158034","text":"import csv\nimport os\nimport shutil\nimport glob\nimport re\nfrom config import Config\nfrom domain import Inbox, Project\n\n\nclass Data:\n\n def __init__(self):\n self.config = Config()\n\n def get_project(self, name):\n \"\"\"\n Get a project by name\n\n Args:\n name(str): name of project\n\n Returns: project object\n\n \"\"\"\n all_projects = self.get_all_projects()\n project = [p for p in all_projects if p.name == name]\n return project[0] if project else None\n\n def save_project(self, project):\n \"\"\"\n Saves project to filesystem\n Args:\n project(Project): project object you want to save\n\n If project does not exist, create project directory from template files.\n \"\"\"\n exists = self.get_project(project.name) is not None\n if not exists:\n template_dir = os.path.join(self.config.templates_dir, 'project')\n new_dir = os.path.join(\n self.config.project_category_dir('planning'), project.name)\n shutil.copytree(template_dir, new_dir)\n else:\n proj_dir = self.config.get_project_dir(project.name, project.state)\n data_file = os.path.join(proj_dir, 'data.csv')\n print(data_file)\n with open(data_file, 'wb') as csv_file:\n csv_file.truncate()\n writer = csv.DictWriter(csv_file)\n writer.writeheader()\n writer.writerows(project.data)\n\n\n def get_all_projects(self):\n \"\"\"\n Gets all projects\n\n Returns: list of projects\n\n \"\"\"\n project_folders = glob.glob(self.config.projects_dir + '/**/**')\n projects = []\n for p in project_folders:\n match = re.search(r'projects\\/(?P.+)\\/(?P.+)', p)\n name = match.group('name')\n state = match.group('state')\n projects.append(Project(name, state))\n return projects\n\n def get_inbox(self):\n \"\"\"\n Loads inbox from file\n Returns: Deserialized inbox object\n\n \"\"\"\n with open(self.config.inbox_path, 'r') as inbox_file:\n items = []\n # clean up EOL characters\n for item in inbox_file:\n str.replace(item, '\\n', '')\n items.append(item)\n return Inbox(items)\n\n def save_inbox(self, inbox):\n \"\"\"\n Saves inbox to file\n Args:\n inbox (Inbox): inbox to save\n\n \"\"\"\n with open(self.config.inbox_path, 'w') as inbox_file:\n inbox_file.truncate()\n for item in inbox.items:\n inbox_file.write(item)\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"500632705","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 20 10:46:00 2019\n\n@author: AshishModi\n\"\"\"\n\nimport os\nimport numpy as np\nfrom glob import glob\nimport cv2\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\nimport pandas as pd\nfrom itertools import combinations as iter_combinations\nimport pickle\nimport random\n\nmax_episodes=50\n\noutput_dataset_directory='test_grayscale'\n\npca=PCA(n_components=50, random_state=0)\n\ndef loadGrayscaledFrames(episode_directory):\n #returns all the r frames in an episode with the size of 210*160 i.e, r*210*160\n frames=np.array([cv2.imread(frame, 0) for frame in sorted(glob(episode_directory+'/*.png'))])\n frames=(frames/255).astype('float32')\n frames=frames[:, 35:195, :]\n #print(np.shape(frames))\n #pick alternate pixels\n frames=frames[:, ::2, ::2]\n #print(np.shape(frames))\n #print(frames[0][np.where(frames[0]>0.0)])\n return frames\n\ndef applyPCA(frames, step):\n #first step will be to apply the standard scaling to the frames \n #std_scaler=StandardScaler()\n #frames=std_scaler.fit_transform(frames)\n print('PCA')\n #apply pca on each frame individually\n #pca=PCA(n_components=50, random_state=0)\n if step==0:\n \n frames=pca.fit_transform(frames)\n else:\n frames=pca.transform(frames)\n #print(frames)\n return frames\n\ndef returnCombinations(i, no_of_combinations):\n index=np.linspace(i-6, i-1, 6).astype(int) #keeping the last frame as constant\n #print(index)\n comb_set=list(iter_combinations(index, 4))\n random_combinations=np.random.randint(0, len(comb_set), no_of_combinations).flatten()\n combinations=[]\n for step in random_combinations:\n combinations.append(comb_set[step])\n #print(combinations)\n return combinations\n\ndef preprocessTest1(dataset_directory):\n \n #load the list of all the episodes\n \n list_of_episodes=sorted(os.listdir(dataset_directory))\n #print(len(list_of_episodes))\n \n #create the output dataset directory\n if not os.path.exists(output_dataset_directory):\n os.mkdir(output_dataset_directory)\n \n #read images from each episode\n episode_count=0\n \n #frames=loadGrayscaledFrames(train_directory+'/00000001')\n #frames=applyPCA(frames, 0)\n \n #rewards=pd.read_csv(dataset_directory+'/rewards.csv', header=None).values.astype(int)\n #print(np.shape(rewards))\n features=[]\n save_count=0\n for episode in list_of_episodes:\n path_to_episode=dataset_directory+'/'+episode\n if not os.path.isdir(path_to_episode):\n continue\n #first step will be to load all the images in the directory\n frames=loadGrayscaledFrames(path_to_episode)\n #apply PCA to every frame in an episode\n #frames=applyPCA(frames, 1)\n #print(np.shape(frames))\n new_frame=[]\n for i in range(5):\n \n if i==0:\n new_frame=frames[i]\n else:\n new_frame=np.hstack([new_frame, frames[i]])\n new_frame=new_frame.reshape(new_frame.shape[0], new_frame.shape[1], 1)\n #print(np.shape(new_frame))\n \n '''if len(features)==0:\n features=new_frame\n else:\n features=np.vstack([features, new_frame])'''\n features.append(new_frame)\n #print(np.shape(features))\n #load the rewards\n #print(np.shape(frames))\n \n #print(rewards.shape[0])\n \n \n \n #dump the features\n episode_count+=1\n if episode_count%500 ==0:\n \n print(np.shape(features))\n features=np.asarray(features)\n if not os.path.exists(output_dataset_directory+'/'+str(save_count)):\n os.mkdir(output_dataset_directory+'/'+str(save_count))\n outfile=open(output_dataset_directory+'/'+str(save_count)+'/features', 'wb')\n pickle.dump(features, outfile)\n outfile.close()\n features=[]\n save_count+=1\n \n #outfile=open(output_dataset_directory+'/val_labels', 'wb')\n #pickle.dump(rewards, outfile)\n #outfile.close()\n \n print(np.shape(features))\n if save_count==61:\n print(np.shape(features))\n features=np.asarray(features)\n if not os.path.exists(output_dataset_directory+'/'+str(save_count)):\n os.mkdir(output_dataset_directory+'/'+str(save_count))\n outfile=open(output_dataset_directory+'/'+str(save_count)+'/features', 'wb')\n pickle.dump(features, outfile)\n outfile.close()\n #if episode_count>=max_episodes:\n #break\n \n \n #break'''\n \n\ndef loadFeaturesTest1():\n \n list_of_features=([feature for feature in sorted(glob(output_dataset_directory+'/*_features'))])\n print(list_of_features)\n features=[]\n for feature_file in list_of_features:\n infile=open(feature_file, 'rb')\n new_features=pickle.load(infile)\n infile.close()\n if len(features)==0:\n features=new_features\n else:\n features=np.vstack([features, new_features])\n print(np.shape(features))\n \n '''list_of_labels=([label for label in sorted(glob(output_dataset_directory+'/*_labels'))])\n print(list_of_labels)\n labels=[]\n for labels_file in list_of_labels:\n infile=open(labels_file, 'rb')\n new_labels=pickle.load(infile)\n infile.close()\n \n if len(labels)==0:\n labels=new_labels\n else:\n labels=np.append(labels, new_labels)\n print(np.shape(labels))\n print(np.shape(labels))\n \n labels=labels[:, 1]\n print(labels)'''\n \n \n \n return features\n\ndef randomizeFeatures(features, labels):\n index=np.random.permutation(len(features))\n #print(index[np.where(index==len(features)-1)])\n features=features[index]\n labels=labels[index]\n #print(np.shape(labels))\n return features, labels\n \nif __name__=='__main__':\n preprocessTest1('../SVM/test_dataset')\n \n '''features=loadSVMFeaturesTest1()'''\n","sub_path":"preprocess_final_test.py","file_name":"preprocess_final_test.py","file_ext":"py","file_size_in_byte":6125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"436180737","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------\n\nfrom typing import TYPE_CHECKING\n\nfrom azure.core import PipelineClient\nfrom msrest import Deserializer, Serializer\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import,ungrouped-imports\n from typing import Any\n\nfrom ._configuration import AzureCognitiveServiceMetricsAdvisorRESTAPIOpenAPIV2Configuration\nfrom .operations import AzureCognitiveServiceMetricsAdvisorRESTAPIOpenAPIV2OperationsMixin\nfrom . import models\n\n\nclass AzureCognitiveServiceMetricsAdvisorRESTAPIOpenAPIV2(AzureCognitiveServiceMetricsAdvisorRESTAPIOpenAPIV2OperationsMixin):\n \"\"\"Azure Cognitive Service Metrics Advisor REST API (OpenAPI v2).\n\n :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: https://:code:``.cognitiveservices.azure.com).\n :type endpoint: str\n \"\"\"\n\n def __init__(\n self,\n endpoint, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> None\n base_url = '{endpoint}/metricsadvisor/v1.0'\n self._config = AzureCognitiveServiceMetricsAdvisorRESTAPIOpenAPIV2Configuration(endpoint, **kwargs)\n self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)\n\n client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}\n self._serialize = Serializer(client_models)\n self._serialize.client_side_validation = False\n self._deserialize = Deserializer(client_models)\n\n\n def close(self):\n # type: () -> None\n self._client.close()\n\n def __enter__(self):\n # type: () -> AzureCognitiveServiceMetricsAdvisorRESTAPIOpenAPIV2\n self._client.__enter__()\n return self\n\n def __exit__(self, *exc_details):\n # type: (Any) -> None\n self._client.__exit__(*exc_details)\n","sub_path":"sdk/metricsadvisor/azure-ai-metricsadvisor/azure/ai/metricsadvisor/_generated/_azure_cognitive_service_metrics_advisor_restapi_open_ap_iv2.py","file_name":"_azure_cognitive_service_metrics_advisor_restapi_open_ap_iv2.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"410243123","text":"from django.shortcuts import render\nfrom django.views.generic import TemplateView\n\n# Create your views here.\n\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom rest_framework import status, generics\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .models import Ledger\nfrom .serializers import LedgerSerializer, UserSerializer\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import get_object_or_404\nfrom .forms import EditForm, NewLedgerWithoutLenderAccForm, NewLedgerWithoutLendeeAccForm\n\n\nclass HomeView(TemplateView):\n \"\"\" The main home page \"\"\"\n template_name = 'home.html'\n\nclass UserList(generics.ListAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\nclass UserDetail(generics.RetrieveAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\ndef aboutus(request):\n context = {}\n return render(request, 'userdata/about.html', context)\n\ndef useraccount_home(request):\n ledgers_lender = Ledger.objects.filter(lender_acc=request.user.pk, payedback=False).count()\n ledgers_lendee = Ledger.objects.filter(lendee_acc=request.user.pk, payedback=False).count()\n if ledgers_lender == 0 or ledgers_lendee == 0:\n ledgers_lender == 1\n ledgers_lender == 1\n ratio = ledgers_lender/ledgers_lendee * 100\n context = {'ledgers_lender_lendee_ratio':ratio}\n return render(request, 'userdata/profile.html', context)\n\ndef youpayback(request):\n ledgers = Ledger.objects.filter(lendee_acc=request.user.pk, payedback=False)\n context = {'ledgers':ledgers}\n return render(request, 'userdata/youpayback.html', context)\n\ndef getpayback(request):\n ledgers = Ledger.objects.filter(lender_acc=request.user.pk, payedback=False)\n context = {'ledgers':ledgers}\n return render(request, 'userdata/getpayback.html', context)\n\ndef detailpayback(request, ledger_id):\n ledger = get_object_or_404(Ledger, pk=ledger_id)\n\n if request.method == \"POST\":\n form = EditForm(request.POST,instance=ledger)\n if form.is_valid():\n model_instance = form.save(commit=False)\n model_instance.save()\n return HttpResponseRedirect(\"/accounts/\")\n\n else:\n form = EditForm(initial={'lender_acc': ledger.lender_acc, 'lendee_acc': ledger.lendee_acc,\\\n 'amount': ledger.amount, 'desc': ledger.desc, 'oweObject': ledger.oweObject, \\\n 'payedback': ledger.payedback, })\n\n context = {'ledger':ledger, 'form':form}\n return render(request, 'userdata/detailpayback.html', context)\n\n\ndef newpayback_getpayedback(request):\n if request.method == \"POST\":\n form = NewLedgerWithoutLenderAccForm(request.POST)\n if form.is_valid():\n model_instance = form.save(commit=False)\n model_instance.lender_acc = request.user\n model_instance.save()\n return HttpResponseRedirect(\"../\")\n\n else:\n form = NewLedgerWithoutLenderAccForm()\n\n return render(request, 'userdata/newpayback_getpayedback.html', {'form':form})\n\ndef newpayback_youpayback(request):\n if request.method == \"POST\":\n form = NewLedgerWithoutLendeeAccForm(request.POST)\n if form.is_valid():\n model_instance = form.save(commit=False)\n model_instance.lendee_acc = request.user\n model_instance.save()\n return HttpResponseRedirect(\"../\")\n\n else:\n form = NewLedgerWithoutLendeeAccForm()\n\n return render(request, 'userdata/newpayback_youpayback.html', {'form':form})\n\n\n@api_view(['GET', 'POST'])\ndef ledger_list(request):\n \"\"\"\n List all ledgers, or create a new ledger.\n \"\"\"\n\n if request.method == 'GET':\n ledgers = Ledger.objects.all()\n serializer = LedgerSerializer(ledgers, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = LedgerSerializer(data=request.data)\n serializer.initial_data['lender_acc'] = int(serializer.initial_data['lender_acc'][:-1])\n serializer.initial_data['lendee_acc'] = int(serializer.initial_data['lendee_acc'][:-1])\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef ledger_detail(request, pk):\n \"\"\"\n Retrieve, update or delete a ledger instance.\n \"\"\"\n try:\n snippet = Ledger.objects.get(pk=pk)\n except Ledger.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = LedgerSerializer(snippet)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = LedgerSerializer(snippet, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n snippet.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n","sub_path":"userdata/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"457998560","text":"# -*- coding: utf-8 -*-\r\nfrom Bio import SeqIO\r\nimport sys\r\nimport argparse\r\nimport multiprocessing as mp\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('-i', action='store', dest='inputFile', help='The input file (must be fasta or fastq)')\r\nparser.add_argument('-o', action='store', dest='outputFile', help='The output file')\r\nparser.add_argument('-f', action='store', dest='fileFormat', help='The file format (must be fasta or fastq)')\r\nparser.add_argument('-s', action='store', dest='windows', help='The sizes of homopolymeres to be counted (e.g. \"4,5,6\" to test all homopolymeres of a size of 4 or 5 or 6 nucleotides)')\r\nresults = parser.parse_args()\r\n\r\n\r\n\r\n# Count and compute the frequency of homopolymeres in fastq or fasta files.\r\n# @params : \r\n#\tfilePath : the input file (must be fastq or fasta)\r\n# \toutputPath : the output file path\r\n#\tfileFormat : the file format (must be fasta or fastq)\r\n#\twindows : all the size of homopolymeres to be counted for each nucleotide. must be\r\n#\t\ta list (e.g. \"4,5,6\" to test all homopolymeres of a size of 4 or 5 or 6 nucleotides). \r\n# @output:\r\n#\tvoid\r\n# @output file format:\r\n# \ttsv file\r\n#\tinput file name \\t homopolymere \\t homopolymere count \\t total bases in the file \\t percentage of homopolymeres of this size bases in the file\r\ndef homopolymeres_freq(filePath, outputPath, fileFormat, windows):\r\n\t\r\n\twindows = [int(number) for number in windows.split(\",\")]\r\n\r\n\t# Some variables...\r\n\tnucs = [\"A\",\"T\",\"C\",\"G\"]\r\n\ttotalNucs = 0\r\n\tfileW = open(outputPath, \"a\")\r\n\r\n\t# Create a dictionnary for each nucleotide and each size of the window\r\n\tresDict = {\"A\":[], \"T\":[], \"C\":[], \"G\":[]}\r\n\tfor key in resDict.keys():\r\n\t\tresDict[key] = {size : 0 for size in windows}\r\n\r\n\tprint(\"Dictionnary ready, counting the homopolymeres...\")\r\n\t# For each seq, count the number of homopolymeres and fill the dictionnary\r\n\tfor seq_record in SeqIO.parse(filePath, fileFormat):\r\n\r\n\t\ttotalNucs += len(seq_record)\r\n\t\tfor nuc in nucs:\r\n\t\t\tfor size in windows:\r\n\t\t\t\thomopolymere = nuc * size\r\n\t\t\t\tresDict[nuc][size] += seq_record.seq.count(homopolymere)\r\n\r\n\r\n\tprint(\"Homopolymeres count finished. Writing the results...\")\r\n\t# Parse the dictionnary to write clear data in the output file\r\n\tfor nuc in resDict.keys():\r\n\t\tfor size in resDict[nuc].keys():\r\n\t\t\tpercentage = ((float(resDict[nuc][size]) * float(size)) / float(totalNucs)) * 100.0\t\r\n\t\t\tfileW.write(filePath + \"\\t\" + nuc + str(size) + \"\\t\" + str(resDict[nuc][size]) + \"\\t\" + str(totalNucs) + \"\\t\" + str(percentage) + \"%\\n\") \r\n\t\r\n\r\nhomopolymeres_freq(results.inputFile, results.outputFile, results.fileFormat, results.windows)\t\t\t\r\n\r\n\r\n\r\n\r\n","sub_path":"homopolymeres_freq.py","file_name":"homopolymeres_freq.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"185357593","text":"from django.db import models\nfrom django.template.defaultfilters import slugify\n\n\nclass Category( models.Model ):\n\n\ttitle \t\t= models.CharField( max_length = 200, default = '' )\n\tslug \t\t= models.SlugField( max_length = 60, blank = True )\n\n\tclass Meta:\n\n\t\tordering = ( 'title', )\n\t\tverbose_name_plural = 'categories'\n\n\n\tdef __unicode__( self ):\n\t\t\n\t\treturn self.title\n\t\t\n\n\tdef save( self, *args, **kwargs ):\n\n\t\tif not self.id:\n\t\t\tself.slug = slugify( self.title )\n\n\t\tsuper( Category, self ).save( *args, **kwargs )\n\n\n\nclass Project( models.Model ):\n\n\tcategory\t= models.ForeignKey( Category, related_name = 'projects', null = True )\n\tslug \t\t= models.SlugField( max_length = 60, blank = True )\n\ttitle \t\t= models.CharField( max_length = 200, default = 'Case' )\n\tbrief\t\t= models.TextField( default = 'A short description of the brief in regards to the case.' )\n\tprocess\t\t= models.TextField( default = 'Some text on how we tackled the brief.' )\n\tresult\t\t= models.TextField( default = 'Elaboration on what the result of the case.' )\n\tcolor \t\t= models.CharField( max_length = 200, default = '#00D8D8' )\n\tthumbnail\t= models.ImageField( upload_to = 'images/', default = 'images/placeholder-thumbnail.jpg', help_text = 'Preferred thumbnail dimensions: 640x360 pixels' )\n\tposter\t\t= models.ImageField( upload_to = 'images/', blank = True, help_text = 'Preferred thumbnail dimensions: 1920x740 pixels' )\n\tclip\t\t= models.URLField( max_length = 200, blank = True, help_text = 'Insert entire Vimeo uri' )\n\tlink\t\t= models.URLField( max_length = 200, blank = True )\n\n\tclass Meta:\n\n\t\tordering = ( 'title', )\n\n\n\tdef __unicode__( self ):\n\t\t\n\t\treturn self.title\n\n\n\tdef save( self, *args, **kwargs ):\n\n\t\tif not self.id:\n\t\t\tself.slug = slugify( self.title )\n\n\t\tsuper( Project, self ).save( *args, **kwargs )\n\n\n\nclass Credit( models.Model ):\n\n\tproject\t\t= models.ForeignKey( Project, related_name = 'credits', null = True )\n\trole \t\t= models.CharField( max_length = 200, blank = True, default = '' )\n\tname \t\t= models.CharField( max_length = 200, default = '' )\n\tlink\t\t= models.CharField( max_length = 200, blank = True, help_text = 'Insert link to email.' )\n\n\tdef __unicode__( self ):\n\t\t\n\t\treturn ''\n\n\tclass Meta:\n\n\t\tordering = ( 'id', )\n\n\n\nclass Still( models.Model ):\n\n\tproject\t\t= models.ForeignKey( Project, related_name = 'stills', null = True )\n\timage\t\t= models.ImageField( upload_to = 'images/', default = 'images/placeholder-thumbnail.jpg', help_text = 'Preferred still dimensions: 640x360 pixels' )\n\n\tclass Meta:\n\n\t\tordering = ( 'id', )\n\n\tdef __unicode__( self ):\n\t\t\n\t\treturn ''\n\n\n\nclass Technology( models.Model ):\n\n\tproject\t\t= models.ForeignKey( Project, related_name = 'technologies', null = True )\n\tcategory \t= models.CharField( max_length = 200, blank = True, default = '' )\n\tname \t\t= models.CharField( max_length = 200, default = '' )\n\tlink\t\t= models.URLField( max_length = 200, blank = True, default = '' )\n\n\tclass Meta:\n\n\t\tordering = ( 'id', )\n\t\tverbose_name_plural = 'technologies'\n\n\tdef __unicode__( self ):\n\t\t\n\t\treturn ''\n\n","sub_path":"flipflop/apps/portfolio/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"521822839","text":"import pygame as pg\nimport sys\n\n\nYELLOW = (255,255,0)\nFIBSH_PLAVNIK = (255, 150, 0)\nFIBSH_BODY = (50, 180, 50)\nFIBSH_EYE = (0, 0, 200)\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nBEBRA_BODY = (230, 230, 230)\nICE = (70, 70, 100)\nWATER = (50, 50, 200)\nSUN = (255, 255, 0)\nSKY = (100, 100, 255)\n\nWidth = 1200\nHeight = 800\n\nsc = pg.display.set_mode((Width, Height))\nsc.fill(SKY)\n\"\"\"\nПользовательский экран\nWidth это ширина окна\nHeight это высота окна200\n\"\"\"\n\nSky = pg.Surface((Width, Height//2))\nSky.fill(SKY)\nrect = Sky.get_rect(topleft = (0,0))\nsc.blit(Sky, rect)\n\"\"\"\nПоверхность на которой будет отображено небо\nзанимает верхнюю половину пользовательского экрана\nЦвет Sky\n\"\"\"\n\nIce = pg.Surface((Width, Height//2))\nIce.fill(WHITE)\nrect = Ice.get_rect(bottomleft = (0, Height))\nsc.blit(Ice, rect)\n\"\"\"\nПоверхность на которой будет отображена земля\nзанимает нижнюю половину пользовательского экрана\nЦвет WHITE\n\"\"\"\n\nPicture = pg.Surface((Width//2, 3*Height//5))\nPicture.fill(WHITE)\nPicture.set_colorkey(WHITE)\n\"\"\"\nПоверхность на которой будет отображена картинка бэбры\nзанимает треть пользовательского экрана\nдля дальнейшей возможности сжатия\nЦвет WHITE\nКоординатами является положение центра картинки\nПрозрачна относительно неба\n\"\"\"\n\nSun = pg.Surface((Width//4, Height//4))\nSun.fill(SKY)\nSun.set_colorkey(SKY)\nrect = Sun.get_rect(center = (Width//2, Height//4))\nSun.set_alpha(10)\nsc.blit(Sun, rect)\ndef whiteball(cd):\n \"\"\"\n Рисует основной овал из которого состоит бэбра.\n param: cd это кортеж переменных вводимых в качестве параметров\n param: cd[0] это координата по x крайней левой точки\n param: cd[1] это координата по y крайней верхней точки\n param: cd[2] это высота эллипса\n param: cd[3] это ширина эллипса\n Отображается на поверхности Picture\n \"\"\"\n ori = cd[2]/abs(cd[2])\n #КОНТУР БЕЛОГО ЭЛЛИПСА\n pg.draw.ellipse(Picture, BLACK, (cd[0], cd[1], cd[2], cd[3]))\n #БЕЛЫЙ ЭЛЛИПС\n pg.draw.ellipse(Picture, BEBRA_BODY, (cd[0] + 1*ori, cd[1] + 2, cd[2] - 2*ori, cd[3] - 4))\n\ndef fibsh(cd):\n \"\"\"\n Рисует рыбу.\n Цвет тела - FIBSH_BODY,\n Цвет плавников - FIBSH_PLAVNIK,\n Цвет глаза - FIBSH_EYE,\n param: cd это кортеж переменных вводимых в качестве параметров\n param: cd[0] это координата по x крайней левой точки\n param: cd[1] это координата по y крайней верхней точки\n param: cd[2] это модуль коэффициента увеличения\n param: cd[3] это знак коэффициента увеличения, равен +-1.\n Возможно отображение относительно вертикальной оси\n Отображается на поверхности Picture\n \"\"\" \n cd = (cd[0] / cd[2], cd[1] / abs(cd[2]), abs(cd[2]), cd[2]/abs(cd[2]))\n #КОНТУР ВЕРХНЕГО ПЛАВНИКА\n pg.draw.polygon(Picture, BLACK,\n ((((cd[0] + 10) * cd[2] - 1) * cd[3], (cd[1] + 4) * cd[2] + 1), (((cd[0] + 8) * cd[2] - 1) * cd[3], (cd[1] - 3) * cd[2] - 1),\n (((cd[0] + 30) * cd[2] + 1) * cd[3], (cd[1] - 3) * cd[2] - 1), (((cd[0] + 28) * cd[2] + 1) * cd[3], (cd[1] + 4) * cd[2] + 1)))\n #ВЕРХНИЙ ПЛАВНИК\n pg.draw.polygon(Picture, FIBSH_PLAVNIK, (((cd[0] + 10)*cd[2]*cd[3], (cd[1] + 4)*cd[2]), ((cd[0] + 8)*cd[2]*cd[3], (cd[1] - 3)*cd[2]),\n ((cd[0] + 30)*cd[2]*cd[3], (cd[1] - 3)*cd[2]), ((cd[0] + 28)*cd[2]*cd[3], (cd[1]+4)*cd[2])))\n #КОНТУР ТЕЛА\n pg.draw.ellipse(Picture, BLACK, ((cd[0] * cd[2] -1) * cd[3] , cd[1] * cd[2] - 1, (40 * cd[2] + 2) * cd[3], 20 * cd[2] + 2))\n #ТЕЛО\n pg.draw.ellipse(Picture, FIBSH_BODY, (cd[0] * cd[2] * cd[3], cd[1] * cd[2], 40 * cd[2] * cd[3], 20 * cd[2]))\n #КОНТУР ЗАДНЕГО ПЛАВНИКА\n pg.draw.polygon(Picture, BLACK,\n (((cd[0] * cd[2] + 1)* cd[3], (cd[1] + 8) * cd[2]), (((int(cd[0] - 7) * cd[2] - 1)* cd[3]), (cd[1] + 15) * cd[2] + 1),\n ((int((cd[0] - 7) * cd[2] - 1)* cd[3]), cd[1] * cd[2] - 1)))\n #ЗАДНИЙ ПЛАВНИК\n pg.draw.polygon(Picture, FIBSH_BODY, ((cd[0]*cd[2]*cd[3], (cd[1] + 8)*cd[2]), (int((cd[0] - 7)*cd[2]*cd[3]), (cd[1] + 15)*cd[2]),\n (int((cd[0] - 7)*cd[2]*cd[3]), cd[1]*cd[2])))\n #ГЛАЗ\n pg.draw.ellipse(Picture, (FIBSH_EYE), ((cd[0] + 30)*cd[2]*cd[3], (cd[1] + 5)*cd[2], 5*cd[2]*cd[3], 5*cd[2]))\n #БОКОВОЙ ПЛАВНИК\n pg.draw.polygon(Picture, (FIBSH_PLAVNIK ), (((cd[0] + 24)*cd[2]*cd[3], (cd[1] + 10)*cd[2]),\n ((cd[0] + 20)*cd[2]*cd[3], (cd[1] + 6)*cd[2]),\n ((cd[0] + 20)*cd[2]*cd[3], (cd[1] + 14)*cd[2])))\n\ndef fibshes(cd): \n \"\"\"\n Рисует Улов рыб.\n param: cd это кортеж переменных вводимых в качестве параметров\n param: cd[0] это координата по x крайней левой точки\n param: cd[1] это координата по y крайней верхней точки\n param: cd[2] это модуль коэффициента увеличения\n param: cd[3] это знак коэффициента увеличения, равен +-1.\n cd[2] * cd[3] = abs(cd[2])\n \"\"\"\n fibsh(((cd[0] + 320) * cd[2] * cd[3], (cd[1] + 300) * cd[2], cd[2] * cd[3]))\n fibsh(((cd[0] + 400) * cd[2] * cd[3], (cd[1] + 305) * cd[2], cd[2] * cd[3] * (-1)))\n fibsh(((cd[0] + 360) * cd[2] * cd[3], (cd[1] + 290) * cd[2], cd[2] * cd[3]))\n fibsh(((cd[0] + 380) * cd[2] * cd[3], (cd[1] + 295) * cd[2], cd[2] * cd[3] * (-1)))\n fibsh(((cd[0] + 240) * cd[2] * cd[3], (cd[1] + 200) * cd[2], cd[2] * cd[3]))\n fibsh(((cd[0] + 280) * cd[2] * cd[3], (cd[1] + 180) * cd[2], cd[2] * cd[3]))\n fibsh(((cd[0] + 330) * cd[2] * cd[3], (cd[1] + 190) * cd[2], cd[2] * cd[3]))\n\ndef bebr(cd):\n \"\"\"\n Рисует бэбру с удочкой и лункой.\n Цвет удочки, лески, глаза, носа, улыбки - BLACK,\n Цвет льда - ICE,\n Цвет воды - WATER,\n param: cd это кортеж переменных вводимых в качестве параметров\n param: cd[0] это координата по x крайней левой точки\n param: cd[1] это координата по y крайней верхней точки\n param: cd[2] это модуль коэффициента увеличения\n param: cd[3] это знак коэффициента увеличения, равен +-1.\n Возможно отображение относительно вертикальной оси\n Отображается на поверхности Picture.\n \"\"\"\n cd = (cd[0] / cd[2], cd[1] / abs(cd[2]), abs(cd[2]), cd[2]/abs(cd[2]))\n #УЛОВ\n fibshes(cd)\n #ЛУНКА\n pg.draw.ellipse(Picture, ICE, ((cd[0] + 260)*cd[2]*cd[3], (cd[1] + 210)*cd[2], 120*cd[2]*cd[3], 50*cd[2]))\n pg.draw.ellipse(Picture, WATER, ((cd[0] + 270)*cd[2]*cd[3], (cd[1] + 220)*cd[2], 100*cd[2]*cd[3], 40*cd[2]))\n #УДОЧКА\n pg.draw.line(Picture, BLACK, ((cd[0] + 160)*cd[2]*cd[3], (cd[1] + 200)*cd[2]),\n ((cd[0] + 310)*cd[2]*cd[3], (cd[1] - 100)*cd[2]), max(int(5*cd[2]), 1))\n #ЛЕСКА\n pg.draw.line(Picture, BLACK, ((cd[0] + 310)*cd[2]*cd[3], (cd[1] - 100)*cd[2]),\n ((cd[0] + 310)*cd[2]*cd[3], (cd[1] + 240)*cd[2]), 1)\n #BEBRA'S BODY\n whiteball((cd[0]*cd[2]*cd[3], cd[1]*cd[2], 150*cd[2]*cd[3], 300*cd[2]))\n #BEBRA'S LEG\n whiteball(((cd[0] + 70)*cd[2]*cd[3], (cd[1] + 240)*cd[2], 120*cd[2]*cd[3], 80*cd[2]))\n #BEBRA'S FOOT\n whiteball(((cd[0] + 150)*cd[2]*cd[3], (cd[1] + 290)*cd[2], 60*cd[2]*cd[3], 40*cd[2]))\n #BEBRA'S ARM\n whiteball(((cd[0] + 120)*cd[2]*cd[3], (cd[1] + 80)*cd[2], 100*cd[2]*cd[3], 60*cd[2]))\n #BEBRA'S HEAD\n whiteball(((cd[0] + 100)*cd[2]*cd[3], (cd[1] - 30)*cd[2], 100*cd[2]*cd[3], 70*cd[2]))\n #BEBRA'S EAR\n whiteball(((cd[0] + 95)*cd[2]*cd[3], (cd[1] - 24)*cd[2], 20*cd[2]*cd[3], 20*cd[2]))\n #BEBRA'S SMILE\n pg.draw.line(Picture, BLACK, ((cd[0] + 180)*cd[2]*cd[3], (cd[1] + 5)*cd[2]),\n ((cd[0] + 200)*cd[2]*cd[3], (cd[1] + 10)*cd[2]))\n #BEBRA'S NOSE\n pg.draw.ellipse(Picture, BLACK, ((cd[0] + 190)*cd[2]*cd[3], (cd[1] - 10)*cd[2], 10*cd[2]*cd[3], 10*cd[2]))\n #BEBRA'S EYE\n pg.draw.ellipse(Picture, BLACK, ((cd[0] + 120)*cd[2]*cd[3], (cd[1] - 20)*cd[2], 5*cd[2]*cd[3], 5*cd[2]))\n\nbebr((50, 100, 1))\nrect = Picture.get_rect(topleft = (Width*2, 4*Height)) \nsc.blit(Picture, rect)\n\"\"\"\nКладем на поверхность Picture рисунок бэбры\nПрячем ее за экраном\n\"\"\"\n\ndef picture(cd):\n \"\"\"\n Рисует прямую бэбру с удочкой, лункой и уловом\n param: cd это кортеж переменных вводимых в качестве параметров\n param: cd[0] это координата центра по x\n param: cd[1] это координата центра по y\n param: cd[2] это коэффициент уменьшения размеров\n \"\"\"\n scale = pg.transform.scale(Picture,\n (Picture.get_width() // cd[2], Picture.get_height() //cd[2]))\n scale_rect = scale.get_rect(center = (cd[0], cd[1]))\n sc.blit(scale, scale_rect)\n \ndef antipicture(cd):\n \"\"\"\n Рисует отраженную бэбру с удочкой, лункой и уловом\n param: cd это кортеж переменных вводимых в качестве параметров\n param: cd[0] это координата центра по x\n param: cd[1] это координата центра по y\n param: cd[2] это коэффициент уменьшения размеров\n \"\"\"\n Picture_mirror = pg.transform.flip(Picture, 1, 0)\n scale_mirror = pg.transform.scale(Picture_mirror,\n (Picture_mirror.get_width() // cd[2], Picture_mirror.get_height() //cd[2]))\n rect_mirror = scale_mirror.get_rect(center = (cd[0], cd[1]))\n sc.blit(scale_mirror, rect_mirror)\n\ndef galo(R):\n \"\"\"\n Задаёт рисунок солнца и гало\n param: R задает радиус солнца и радиус гало\n \"\"\"\n pg.draw.circle(Sun, YELLOW, [Width//8, Height//8], R)\n pg.draw.circle(Sun, YELLOW, [Width//8, Height//8], 2*R, 8)\n\ndef sun(cd):\n \"\"\"\n Рисует солнце и гало\n param: cd это кортеж переменных вводимых в качестве параметров\n param: cd[0] это координата центра картики солнца и гало по x\n param: cd[1] это координата центра картики солнца и гало по y\n param: cd[2] это яркость изображения\n param: cd[3] это радиус кругов\n \"\"\"\n galo(cd[3])\n for i in range(1, cd[2]):\n scale = pg.transform.scale(\n Sun, ((i+1)*Sun.get_width() // i+3, (i+1)*Sun.get_height() // i+3))\n Sun.set_alpha(20+i*2)\n rect = scale.get_rect(center = (cd[0], cd[1]))\n sc.blit(scale, rect)\n\n\n\nsun((600, 200, 70, 40))\npicture((210, 410, 2))\npicture((600, 500, 1))\nantipicture((900, 450, 1))\n\n\npg.display.update()\n \nwhile 1:\n for i in pg.event.get():\n if i.type == pg.QUIT:\n sys.exit()\n elif i.type == pg.KEYUP \\\n and i.key == pygame.K_f: \n pg.display.update(rect)\n \n pg.time.delay(20)\n","sub_path":"lab 4/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":12278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"3230140","text":"# coding=utf-8\n'''\nCreated on Apr 11, 2016\n\n@author: yangjie\n'''\nimport unittest2\n\nfrom subjects_interface_test.subject_test import BaseSubjectTest\n\n\nclass AddSubjectTest(BaseSubjectTest):\n\n def setUp(self):\n BaseSubjectTest.setUp(self)\n\n def test_add_subject(self):\n res = self.add_subject2()\n print(res[\"result\"][\"data\"][\"startTime\"])\n uuid = res[\"result\"][\"data\"][\"uuid\"]\n self.delete_subject(uuid)\n\nif __name__ == \"__main__\":\n unittest2.main()\n","sub_path":"subjects_interface_test/add_subject_test.py","file_name":"add_subject_test.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"634562445","text":"import logging, os\n\nlogging.disable(logging.WARNING)\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport tensorflow as tf\n\nfrom autoscalingsim.scaling.policiesbuilder.metric.scaling_aspect_calculation.calculators.learning_based.model.model import ScalingAspectToQualityMetricModel\nfrom autoscalingsim.scaling.policiesbuilder.metric.scaling_aspect_calculation.calculators.learning_based.model.nonlinear.nonlinear import NonlinearModel\nfrom autoscalingsim.utils.error_check import ErrorChecker\n\n@ScalingAspectToQualityMetricModel.register('neural_net')\nclass NeuralNet(NonlinearModel):\n\n \"\"\"\n\n Reference: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDRegressor.html#sklearn.linear_model.SGDRegressor\n\n Configuration example:\n\n \"desired_aspect_value_calculator_conf\": {\n \"category\": \"learning\",\n \"config\": {\n \"fallback_calculator\": {\n \"category\": \"rule\",\n \"config\": {\n \"name\": \"ratio\",\n \"target\": {\n \"metric_name\": \"vCPU\",\n \"value\": 0.05,\n \"unit\": \"float\"\n },\n \"adjustment_heuristic_conf\": {\n \"name\": \"rescale\",\n \"scaling_factor\": 1.15\n }\n }\n },\n \"model\": {\n \"name\": \"neural_net\",\n \"layers\": [\n {\n \"type\": \"Dense\",\n \"units\": 10,\n \"params\": {}\n },\n {\n \"type\": \"Dropout\",\n \"rate\": 0.1,\n \"params\": {}\n },\n {\n \"type\": \"Dense\",\n \"units\": 1,\n \"params\": {}\n }\n ],\n \"model_params\": {\n \"learning\": {\n \"loss\": \"mean_squared_error\",\n \"optimizer\": \"adam\"\n },\n \"default_layers_params\": {\n \"Dense\" : {},\n \"Dropout\" : {}\n }\n }\n },\n \"performance_metric\": {\n \"metric_source_name\": \"response_stats\",\n \"metric_name\": \"buffer_time\",\n \"submetric_name\": \"*\",\n \"threshold\": {\n \"value\": 100,\n \"unit\": \"ms\"\n }\n },\n \"model_quality_metric\": {\n \"name\": \"mean_squared_error\",\n \"threshold\": 10\n },\n \"minibatch_size\": 2,\n \"optimizer_config\": {\n \"method\": \"trust-constr\",\n \"jac\": \"2-point\",\n \"hess\": \"SR1\",\n \"verbose\": 0,\n \"maxiter\": 100,\n \"xtol\": 0.1,\n \"initial_tr_radius\": 10\n }\n }\n }\n \"\"\"\n\n _LAYERS = {\n 'Dense': { 'model': tf.keras.layers.Dense, 'mandatory_params_names': ['units'], 'default_params': { 'activation' : 'relu' } },\n 'Dropout': { 'model': tf.keras.layers.Dropout, 'mandatory_params_names': ['rate'], 'default_params': {} }\n }\n\n def __init__(self, config):\n\n super().__init__(config)\n\n if self._model is None:\n model_params = ErrorChecker.key_check_and_load('model_params', config, default = dict())\n learning_params = ErrorChecker.key_check_and_load('learning', model_params, default = {'loss' : 'mean_squared_error', 'optimizer' : 'adam'})\n default_layers_params = ErrorChecker.key_check_and_load('default_layers_params', model_params, default = dict())\n\n self._model = tf.keras.models.Sequential()\n model_layers = ErrorChecker.key_check_and_load('layers', config, default = list())\n if len(model_layers) == 0:\n raise ValueError('No layers specified for the model')\n\n for layer_conf in model_layers:\n layer_type = ErrorChecker.key_check_and_load('type', layer_conf)\n layer_template = self.__class__._LAYERS.get(layer_type, None) # TODO: class?\n if layer_template is None:\n raise ValueError(f'Undefined layer {layer_type}')\n\n mandatory_layer_params = dict()\n for mandatory_param_name in layer_template['mandatory_params_names']:\n mandatory_param_value = ErrorChecker.key_check_and_load(mandatory_param_name, layer_conf)\n mandatory_layer_params[mandatory_param_name] = mandatory_param_value\n\n optional_params = ErrorChecker.key_check_and_load('params', layer_conf, default = default_layers_params.get(layer_type, layer_template['default_params']))\n layer_params = {**mandatory_layer_params, **optional_params}\n\n self._model.add(layer_template['model'](**layer_params))\n\n self._model.compile(**learning_params)\n\n def save_to_location(self, path_to_model_file : str):\n\n self._model.save(path_to_model_file)\n\n def load_from_location(self, path_to_model_file : str):\n\n if not path_to_model_file is None:\n if os.path.exists(path_to_model_file):\n self._model = tf.keras.models.load_model(path_to_model_file)\n\n def _internal_fit(self, model_input, model_output):\n\n model_input_t = tf.constant(model_input, dtype = tf.float32)\n model_output_t = tf.constant(model_output, dtype = tf.float32)\n self._model.fit(model_input_t, model_output_t, verbose = 0)\n","sub_path":"autoscalingsim/scaling/policiesbuilder/metric/scaling_aspect_calculation/calculators/learning_based/model/nonlinear/impl/neural_net.py","file_name":"neural_net.py","file_ext":"py","file_size_in_byte":5729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"402880199","text":"import pgzrun\nimport random\n\n# Szerokość okna\nWIDTH = 800\n\n# Wysokość okna\nHEIGHT = 600\n\n# Długość boku pojedyńczego, kwadratowego pola\nfield_size = 20\n\n# Szerokość całego pola gry - ilość pojedyńczych pól\nfield_width = WIDTH // field_size\n\n# Wysokość całego pola gry - ilość pojedyńczych pól\nfield_height = HEIGHT // field_size\n\n# Pole gry\nfield = []\n\n# Dostępne kolory pól gry. Pierwszy jest zarezerwowany na kolor początkowy.\ncolors = [(255, 255, 255), (255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255)]\n\n# Ilość dostępnych kolorów\ncolors_size = len(colors)\n\n# Początkowo gracze mają pierwszy kolor z tablicy\nplayer_color = 0\ncomputer_color = 0\n\n# Zaczyna gracz\ncurrent_player = 1\n\n\ndef draw():\n # Czyścimy ekran gry, wypełniając go czarnym kolorem\n screen.fill((0, 0, 0))\n\n # Rysujemy pole gry\n draw_field()\n\n\n# Rysowanie pola gry\ndef draw_field():\n for x in range(field_width):\n for y in range(field_height):\n # Pobieramy kolor pola gry\n field_color = field[x][y]\n\n # Obliczamy współrzędne pola na ekranie\n field_x = x * field_size\n field_y = y * field_size\n\n # Tworzymy prostokąt pola gry do narysowania\n field_rect = Rect((field_x, field_y), (field_size, field_size))\n\n # Rysujemy pole gry na ekranie\n screen.draw.rect(field_rect, field_color)\n\n\ndef update():\n global current_player\n # Jeżeli jest tura komputera\n if current_player == 2:\n # Wykonujemy ruch komputera\n computer_move()\n current_player = 1\n\n\n# Wykonujemy ruch komputera\ndef computer_move():\n global computer_color\n\n new_color = computer_color\n\n # Dopóki wylosowany ruch nie jest dozwolony\n while new_color == computer_color or new_color == player_color:\n # Losujemy nowy ruch\n new_color = random.randint(1, colors_size - 1)\n\n flood_fill(field_width - 1, field_height - 1, computer_color, new_color)\n computer_color = new_color\n\n\n# Odczytujemy ruch gracza\ndef on_key_down(key):\n global player_color, current_player\n\n # Jeżeli nie jest tura gracza\n if current_player != 1:\n # Kończymy działanie funkcji\n return\n\n new_color = player_color\n if key == keys.K_1:\n new_color = 1\n\n if key == keys.K_2:\n new_color = 2\n\n if key == keys.K_3:\n new_color = 3\n\n if key == keys.K_4:\n new_color = 4\n\n if key == keys.K_5:\n new_color = 5\n\n # Jeżeli ruch nie jest dozwolony\n if new_color == player_color or new_color == computer_color:\n return\n\n flood_fill(0, 0, player_color, new_color)\n player_color = new_color\n current_player = 2\n\n\ndef flood_fill(x, y, old_color, new_color):\n global field\n\n if x < 0 or x >= field_width:\n return\n\n if y < 0 or y >= field_height:\n return\n\n if field[x][y] != colors[old_color]:\n return\n\n field[x][y] = colors[new_color]\n flood_fill(x + 1, y, old_color, new_color)\n flood_fill(x - 1, y, old_color, new_color)\n flood_fill(x, y + 1, old_color, new_color)\n flood_fill(x, y - 1, old_color, new_color)\n\n\n# Przygotowanie pola gry, wypełniamy je losowymi wartościami\ndef prepare_field():\n for x in range(field_width):\n # Dodajemy pusty wiersz\n field.append([])\n\n for y in range(field_height):\n # Losujemy numer koloru, pomijając pierwszy z tablicy\n random_index = random.randint(1, colors_size - 1)\n\n # Wybieramy kolor, zgodnie z wylosowaną liczbą\n random_color = colors[random_index]\n\n # Dodajemy nowe pole do planszy\n field[x].append(random_color)\n\n # Ustawiamy początkowe kolory dla początkowych pól graczy\n field[0][0] = colors[0]\n field[field_width - 1][field_height - 1] = colors[0]\n\n\n# Przygotowujemy pole gry\nprepare_field()\n\n# Uruchamiamy grę\npgzrun.go()\n","sub_path":"Spotkanie 14/gra.py","file_name":"gra.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"387629585","text":"import matplotlib.pyplot as plt\n\nfrom utils import *\nfrom bernoulli import *\n\n\ndef binomial(n, p):\n \"\"\"n trials of a Bernoulli event\"\"\"\n x = 0\n for i in range(n):\n x += bernoulli(p)\n return x\n\n\nif __name__ == \"__main__\":\n params = [(10, .8), (6, .4)]\n for p in params:\n data = []\n for i in range(10000):\n data.append(binomial(p[0], p[1]))\n discrete_plot(data)\n\n plt.legend([f'n={p[0]} p={p[1]}' for p in params])\n plt.show()\n","sub_path":"2.2.distribuciones/binomial.py","file_name":"binomial.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"187890453","text":"from __future__ import print_function, unicode_literals\n\nimport json\nimport re\n\nfrom datetime import datetime\n# Conditional urllib imports for both Python 2 and Python 3\ntry:\n from urllib.parse import parse_qsl, urlencode, urlparse, urlsplit\n from urllib.request import urlopen, Request\n from urllib.error import HTTPError\nexcept ImportError:\n from urlparse import parse_qsl, urlparse, urlsplit\n from urllib import urlencode\n from urllib2 import urlopen, Request, HTTPError\n\n\nclass GitHubAPIWrapper():\n \"\"\"A wrapper class for GitHub REST API v3 requests.\n\n GitHub REST API v3 docs: https://developer.github.com/v3/\n\n It's recommended to use OAuth personal access token by default\n due to possible blocking of frequent API requests from the same IP address.\n\n Attributes:\n repo (dict): GitHub repository information.\n\n repo contents:\n repo_url (str): Full URL of GitHub repository.\n repo_owner (str): GitHub user that've created the repo.\n repo_title (str): Repository title.\n\n filters (dict): Optional filters for repository data.\n\n filters contents:\n from_date (datetime.date|None): Date filtering floor.\n to_date (datetime.date|None): Date filtering ceiling.\n branch (str): Project branch to analyze.\n\n exceptions (dict): Possible exceptions of parsing date filters.\n\n exceptions contents:\n from_date (bool): If floor date exception has occured.\n to_date (bool): If ceiling date exception has occured.\n \"\"\"\n def __init__(self, repo_url,\n from_date=None, to_date=None, branch='master',\n auth_token=None):\n # Parse repository URL to validate and get its owner and title\n self.repo = {}\n\n result = urlparse(repo_url)\n if result.scheme and result.netloc and result.path:\n self.repo['url'] = repo_url\n\n parsed_repo_url = repo_url.split('//')[1].split('/')\n self.repo['owner'] = parsed_repo_url[1]\n self.repo['title'] = parsed_repo_url[2]\n else:\n self.repo['url'] = None\n self.repo['owner'] = None\n self.repo['title'] = None\n\n # Parse optional filters by date\n self.filters = {}\n self.filters['from_date'] = None\n self.filters['to_date'] = None\n\n self.exceptions = {}\n self.exceptions['from_date'] = None\n self.exceptions['to_date'] = False\n\n if from_date:\n try:\n self.filters['from_date'] = datetime.strptime(\n from_date, '%Y-%m-%d'\n ).date()\n except ValueError:\n self.exceptions['from_date'] = True\n\n if to_date:\n try:\n self.filters['to_date'] = datetime.strptime(\n to_date, '%Y-%m-%d'\n ).date()\n except ValueError:\n self.exceptions['to_date'] = True\n\n self.filters['branch'] = branch\n\n # Optional OAuth personal access token\n self._auth_token = auth_token\n\n def _compose_full_api_request_url(self, resource, qs=None):\n \"\"\"Compose full URL for a given API resource.\n\n Args:\n resource (str): API resource title.\n qs (dict, optional): Data for GET request query string.\n\n Returns:\n str: Full URL for a given API resource.\n \"\"\"\n full_url = '{root}/repos/{owner}/{title}/{resource}'.format(\n root='https://api.github.com',\n owner=self.repo['owner'],\n title=self.repo['title'],\n resource=resource\n )\n\n if qs:\n qs = urlencode(qs)\n full_url = '{}?{}'.format(full_url, qs)\n\n return full_url\n\n def _get_pages_count(self, value):\n \"\"\"Get Pages count parsed from a given `Link` header.\n\n Args:\n value (str): Description\n\n Returns:\n int: Pages count.\n \"\"\"\n replace_chars = \" '\\\"\"\n\n for val in re.split(\", *<\", value):\n try:\n url, params = val.split(\";\", 1)\n except ValueError:\n url, params = val, ''\n\n link = {}\n\n link['url'] = url.strip(\"<> '\\\"\")\n\n for param in params.split(\";\"):\n try:\n key, value = param.split(\"=\")\n except ValueError:\n break\n\n link[key.strip(replace_chars)] = value.strip(replace_chars)\n parsed_result = dict(parse_qsl(urlsplit(link['url']).query))\n\n if link['rel'] == 'last':\n pages_count = int(parsed_result['page'])\n\n return pages_count\n\n def _api_request(self, url, method='GET', qs=None):\n \"\"\"Wrapper for requests to API resources.\n\n Args:\n url (str): A given API resource URL.\n method (str): HTTP method (HEAD, GET).\n qs (dict, optional): Query string.\n\n Returns:\n list|dict: Response object parsed from JSON.\n\n Returns in case of error:\n dict: Error information.\n\n dict contents:\n * `success` (bool): `False` for error response.\n * `message` (str): Error message.\n \"\"\"\n full_url = self._compose_full_api_request_url(url, qs=qs)\n\n # Optional OAuth authorization token\n headers = {}\n if self._auth_token:\n headers['Authorization'] = 'token {}'.format(self._auth_token)\n\n # Ugly workaround for Python 2 `(\n try:\n request = Request(full_url, method=method, headers=headers)\n except TypeError:\n request = Request(full_url, headers=headers)\n request.get_method = lambda: method\n\n try:\n response = urlopen(request)\n except HTTPError as e:\n message = ('{message}.'.format(message=e))\n\n if e.code == 403:\n ban_info = 'You`ve been banned by GitHub. ' \\\n 'Wait a while or use -a for OAuth access token.'\n message = '{}\\n{}'.format(message, ban_info)\n\n response = {\n 'success': False,\n 'code': e.code,\n 'message': message,\n }\n return response\n else:\n if method == 'HEAD':\n try:\n link_header = response.info().dict.get('link', None)\n except AttributeError:\n link_header = response.getheader('Link')\n\n pages_count = (\n self._get_pages_count(link_header) if\n link_header else\n 0\n )\n\n return pages_count\n\n return json.loads(response.read().decode('utf-8'))\n\n def get_commits(self):\n \"\"\"Get commits list for a given repository.\n\n Returns:\n dict: Dictionary with information about repo contributors.\n\n dict contents:\n * `success` (bool): `True` for successful response.\n * `commits` (list): Contributors data.\n\n Returns in case of error:\n dict: Error information.\n\n dict contents:\n * `success` (bool): `False` for error response.\n * `message` (str): Error message.\n \"\"\"\n # Compose HEAD request to get pagination info\n qs = {}\n qs['per_page'] = 100\n\n if self.filters['from_date']:\n qs['since'] = datetime.strftime(\n self.filters['from_date'], '%Y-%m-%dT%H:%M:%SZ'\n )\n if self.filters['to_date']:\n qs['until'] = datetime.strftime(\n self.filters['to_date'], '%Y-%m-%dT%H:%M:%SZ'\n )\n qs['sha'] = self.filters['branch']\n\n # Get pages count for possible request pagination\n pages_count = self._api_request('commits', method='HEAD', qs=qs)\n\n # Catch possible mistyped URLs or other network issues\n if isinstance(pages_count, dict) and not pages_count['success']:\n return pages_count\n\n commits = []\n\n # If pages count exists - make requests for every page\n if pages_count > 0:\n for page in range(pages_count):\n qs['page'] = page + 1\n co = self._api_request('commits', qs=qs)\n commits += co\n # If there`s no pages count - make one request\n else:\n commits = self._api_request('commits', qs=qs)\n\n # print('commits len:', len(commits))\n\n return commits\n\n def get_resources(self, res_name):\n \"\"\"Get resources for a given repository.\n\n Resource may be either a pull request or an issue.\n\n Args:\n res_name (str): Resource name (`pulls` or `issues`).\n\n Returns:\n dict: Counter of resources.\n\n dict contents:\n * `success` (bool): Successful response or not.\n * `pulls` or `issues` (collections.Counter): Counter.\n \"\"\"\n # Compose resource API request\n qs = {}\n qs['state'] = 'all'\n qs['per_page'] = 100\n\n if self.filters['from_date']:\n qs['since'] = datetime.strftime(\n self.filters['from_date'], '%Y-%m-%dT%H:%M:%SZ'\n )\n if self.filters['to_date']:\n qs['until'] = datetime.strftime(\n self.filters['to_date'], '%Y-%m-%dT%H:%M:%SZ'\n )\n qs['base'] = self.filters['branch'] # `head` or `base` ?\n\n # Get pages count for possible request pagination\n pages_count = self._api_request(res_name, method='HEAD', qs=qs)\n\n # Catch possible mistyped URLs or other network issues\n if isinstance(pages_count, dict) and not pages_count['success']:\n return pages_count\n\n resources = []\n\n # If pages count exists - make requests for every page\n if pages_count > 0:\n for page in range(pages_count):\n qs['page'] = page + 1\n res = self._api_request(res_name, qs=qs)\n resources += res\n # If there`s no pages count - make one request\n else:\n resources = self._api_request(res_name, qs=qs)\n\n # print('{} len:'.format(res_name), len(resources))\n\n return resources\n","sub_path":"github_repo_analyzer/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":10472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"392982288","text":"#集合间的关系操作\ns1 = {1,2,3,4,5,6}\ns2 = {6,5,4,3,2,1}\n# == 判断两个集合的元素是否完全相同\nprint(s1 == s2)\n\ns3 = {4,5,6,7}\ns4 = {1,2,3,4,5,7,6,8}\n#issubset判断是否为\"子集\"\nprint(s3.issubset(s4))\n\n#issuperset判断是否为“父集”\nprint(s4.issuperset(s3))\n\ns5 = {5}\ns6 = {1,2,3,4,5}\n#isdisjoin 函数判断两个集合是否存在重复\n#True 代表不存在重复元素,False则代表重复\ns5.isdisjoint(s6)","sub_path":"步骤一:Python基础知识/day11/集合间的关系操作.py","file_name":"集合间的关系操作.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"146998696","text":"from bs4 import BeautifulSoup\nimport requests\nimport os\n\nurls = [] #获取图片的链接集合\n\ndef getUrls(url):\n #url = 'https://www.douban.com/doulist/30327/'\n r = requests.get(url)\n soup = BeautifulSoup(r.text,'lxml')\n img = soup.select('div.post > a > img')\n for i in img:\n urls.append(i.attrs['src'])\n\ndef save_img(urls):\n root = 'H://Pycharm_Test//Img//'\n for url in urls:\n #print(url)\n path = root + url.split('/')[-1]\n print(path)\n try:\n if not os.path.exists(root):\n os.mkdir(root)\n if not os.path.exists(path):\n r = requests.get(url)\n with open(path, 'wb') as f:\n f.write(r.content)\n f.close()\n print(\"文件保存成功!\")\n else:\n print(\"文件已经存在\")\n except:\n print(\"爬取失败\")\n\ngetUrls('https://www.douban.com/doulist/30327/')\nsave_img(urls)","sub_path":"imgSpider.py","file_name":"imgSpider.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"29014570","text":"import numpy as np\nfrom scipy.stats import f\nfrom matplotlib import pyplot as plt\n\ndef seasonal_individual_averages(y, x, i):\n \"\"\"Function that computes grouped averages of y by factors x (month) and i\n (individual identifier). x are considered to be months and are grouped \n according to meteorological seasons (Northern Hemisphere)\n \n Parameters\n ----------\n y : numpy.ndarray\n 1-dimensional numpy array with outcome measurements\n x : numpy.ndarray\n 1-dimensional numpy array with month indentifiers\n i : numpy.ndarray\n 1-dimensional numpy array with individual indentifiers\n\n Returns\n -------\n y_avg : numpy.ndarray\n outcome averages by season and identifier\n x_avg : numpy.ndarray\n season identifiers\n i_avg : numpy.ndarray\n individual identifiers\n \n \"\"\"\n y_avg = []\n x_avg = []\n i_avg = []\n \n # Loop through all unique values for x and i\n for xi in ['winter', 'spring', 'summer', 'autumn']:\n for ii in np.unique(i):\n y_mean = []\n # Loop through all elements of the array\n for k in range(len(y)):\n if xi == 'winter':\n if x[k] in [12,1,2] and i[k] == ii:\n y_mean.append(y[k])\n elif xi == 'spring':\n if x[k] in [3,4,5] and i[k] == ii:\n y_mean.append(y[k])\n elif xi == 'summer':\n if x[k] in [6,7,8] and i[k] == ii:\n y_mean.append(y[k])\n elif xi == 'autumn':\n if x[k] in [9,10,11] and i[k] == ii:\n y_mean.append(y[k])\n # Calculate mean\n y_mean = np.mean(y_mean)\n # And store results\n y_avg.append(y_mean)\n x_avg.append(xi)\n i_avg.append(ii)\n \n # Convert to array and return results\n y_avg = np.array(y_avg)\n x_avg = np.array(x_avg)\n i_avg = np.array(i_avg)\n \n return y_avg, x_avg, i_avg\n \n\ndef repeated_measures_oneway_anova(y, x, i, path):\n \"\"\"Function to compute repeated measures one-way ANOVA for a variable y \n with groups x, in which each individual i is measured several times.\n \n Parameters\n ----------\n y : numpy.ndarray\n 1-dimensional numpy array with outcome measurements\n x : numpy.ndarray\n 1-dimensional numpy array with group indentifiers\n i : numpy.ndarray\n 1-dimensional numpy array with individual indentifiers\n\n Returns\n -------\n F: numpy.float64\n F test statistic\n pval: numpy.float64\n P-value of the test\n \"\"\"\n \n # Running message\n print('Computing repeated measures anova analysis...')\n \n # Data checks: numpy array\n if type(y) != np.ndarray or type(x) != np.ndarray or type(i) != np.ndarray:\n raise Exception('Inputs must be of type numpy.ndarray')\n \n # Data checks: Same length\n if len(list(set([len(x), len(y), len(i)]))) != 1:\n raise Exception('Input arrays must have the same length')\n \n # Total sum of squares (SST)\n mean_all = np.mean(y)\n SST = np.sum((y-mean_all)**2)\n print('SST: '+ str(round(SST,2)))\n \n # Between sum of squares (SSB)\n mean_x = []\n n_i = len(set(i))\n for s in np.unique(x):\n tmp = y[np.where(x == s)]\n mean_x.append(np.mean(tmp))\n SSB = np.sum(n_i * ((mean_x-mean_all)**2))\n print('SSB: '+ str(round(SSB, 2)))\n \n # Within sum of squares (SSW)\n mean_w = {}\n for t in np.unique(x):\n tmp = y[np.where(x == t)]\n mean_w[t] = np.mean(tmp)\n ss_w = []\n for u in range(y.shape[0]):\n ss_w.append((y[u] - mean_w[x[u]])**2)\n SSW = np.sum(ss_w) \n print('SSW: '+ str(round(SSW, 2)))\n \n # Subject sum of squares (SSS)\n mean_i = []\n n_x = len(set(x))\n for v in np.unique(i):\n tmp = y[np.where(i == v)]\n mean_i.append(np.mean(tmp))\n SSS = np.sum(n_x * ((mean_i-mean_all)**2))\n print('SSS: '+ str(round(SSS, 2)))\n \n # Error variability (SSE)\n SSE = SSW - SSS \n print('SSR: '+ str(round(SSE, 2)))\n \n # F statistic\n df1 = n_x-1\n MSB = SSB/df1\n df2 = (n_x-1)*(n_i-1)\n MSE = SSE/df2\n F = MSB/MSE\n print('F: ' + str(round(F, 2)))\n \n # Compute p-value (F distribution)\n pval = 1-f.cdf(F, df1, df2)\n print('p-value: '+ str(round(pval, 4)))\n \n # Call plot function\n anova_plot(y, x, i, df1, df2, F, path)\n \n # Return results\n return F, pval\n\ndef anova_plot(y, x, i, df1, df2, F, path):\n \"\"\"Function to do a plot of the ANOVA results\n \n Parameters\n ----------\n y : numpy.ndarray\n 1-dimensional numpy array with outcome measurements\n x : numpy.ndarray\n 1-dimensional numpy array with group indentifiers\n i : numpy.ndarray\n 1-dimensional numpy array with individual indentifiers\n df1: integer\n degrees of freedom of the numerator\n df2: integer\n degrees of freedom of the denominator\n F: float\n test statistic\n path: string\n path to write figure to disk\n \"\"\"\n # ANOVA plot\n fig, axes = plt.subplots(1, 3, figsize=(16, 5))\n # 1st plot: Assumptions\n axes[0].hist(y, bins = 'auto')\n axes[0].set_title('ANOVA assumptions:\\nNormality and outliers')\n axes[0].set_ylabel('Frequency')\n axes[0].set_xlabel('Seasonal mean temperature')\n # 2nd plot: Boxplots\n yplot = []\n temp_mean = []\n xplot = ['winter', 'spring', 'summer', 'autumn']\n for xi in xplot:\n ysub = []\n for k in range(len(y)):\n if x[k] == xi:\n ysub.append(y[k])\n yplot.append(ysub)\n temp_mean.append(sum(ysub)/len(ysub))\n \n axes[1].boxplot(yplot)\n axes[1].scatter([1,2,3,4], temp_mean)\n axes[1].set_xticklabels(xplot)\n axes[1].set_title('ANOVA exploration:\\nSeasonal boxplots')\n axes[1].set_ylabel('Seasonal mean temperature')\n # 3th plot: Result\n # Derive pdf of the F distribution\n x_dist = np.linspace(f.ppf(0.0001, df1, df2),\n f.ppf(0.9999, df1, df2), 1000)\n rv = f(df1, df2)\n # Find critical value for distribution\n x_vals = rv.pdf(x_dist)\n crit = min(abs(x_vals-0.05))\n crit = x_dist[np.min(np.where(abs(x_vals-0.05)==crit))]\n # Plot\n axes[2].plot(x_dist, x_vals, 'k-', lw=2, label='Test F distribution')\n axes[2].axvline(x = F, label='Observed statistic', c = 'blue')\n axes[2].axvline(x = crit, label='Critical value', c = 'red')\n axes[2].set_title('ANOVA test results: Statistic \\n and critical value (5% confidence)') \n axes[2].set_ylabel('Probability')\n axes[2].set_xlabel('F value')\n plt.legend()\n fig.show()\n plt.savefig(path)\n","sub_path":"analysis/anova.py","file_name":"anova.py","file_ext":"py","file_size_in_byte":6969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"175226604","text":"#-*- coding=utf-8 -*-\n'''\nCreated on \n\n@author:Eden\n'''\nfrom django import template\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef get_number_index(son,father):\n d = {}\n i=1\n for item in father:\n d[item] = i\n i+=1\n return d[son]","sub_path":"blog/templatetags/get_number.py","file_name":"get_number.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"13587704","text":"import cv2\r\n\r\n\r\ndef transparent_overlay(src, overlay, pos=(0, 0), scale=1):\r\n overlay = cv2.resize(overlay, (0, 0), fx=scale, fy=scale)\r\n h, w, _ = overlay.shape # Size of foreground\r\n rows, cols, _ = src.shape # Size of background Image\r\n y, x = pos[0], pos[1] # Position of foreground/overlay image\r\n\r\n # loop over all pixels and apply the blending equation\r\n for i in range(h):\r\n for j in range(w):\r\n if x + i >= rows or y + j >= cols:\r\n continue\r\n alpha = float(overlay[i][j][3] / 255.0) # read the alpha channel\r\n src[x + i][y + j] = alpha * overlay[i][j][:3] + (1 - alpha) * src[x + i][y + j]\r\n return src\r\n","sub_path":"mask_helper.py","file_name":"mask_helper.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"421229803","text":"#!/usr/bin/env python3\n\nfrom typing import List\n\n\nclass Solution:\n def maximalRectangle(self, matrix: List[List[str]]) -> int:\n n = len(matrix)\n if n == 0:\n return 0\n\n m = len(matrix[0])\n if m == 0:\n return 0\n\n histograms = [[0 for _ in range(m)] for _ in range(n)]\n\n for j in range(m):\n if matrix[0][j] == '1':\n histograms[0][j] = 1\n\n for i in range(1, n):\n for j in range(m):\n if matrix[i][j] == '0':\n histograms[i][j] = 0\n else:\n histograms[i][j] = histograms[i - 1][j] + 1\n\n # for line in histograms:\n # print(line)\n # print()\n\n res = histograms[0][0]\n\n stack = [-1 for _ in range(m + 1)]\n for line in histograms:\n top = 0\n for j in range(m):\n if top == 0 or line[stack[top]] <= line[j]:\n top += 1\n stack[top] = j\n elif line[stack[top]] > line[j]:\n while top > 0 and line[stack[top]] > line[j]:\n # if line[stack[top]] * (j - stack[top - 1] - 1) == 6:\n # print('sum is 6')\n # print(f'line: {line}\\nstack: {stack}\\ntop: {top}, j: {j}')\n res = max(line[stack[top]] * (j - stack[top - 1] - 1), res)\n top -= 1\n top += 1\n stack[top] = j\n while top > 0:\n res = max(line[stack[top]] * (m - stack[top - 1] - 1), res)\n top -= 1\n\n return res\n","sub_path":"85-maximal-rectangle/maximal_rectangle.py","file_name":"maximal_rectangle.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"151840863","text":"import numpy as np\nimport napari\nfrom pycromanager import Bridge\nfrom matplotlib import pyplot as plt\nfrom shared.find_organelles import organelle_analysis, find_organelle\nimport shared.display as dis\nfrom skimage.measure import label, regionprops_table\nimport shared.objects as obj\nimport pandas as pd\nimport shared.analysis as ana\nimport shared.dataframe as dat\nfrom matplotlib.figure import Figure\nfrom vispy.color import Colormap\nfrom matplotlib.backends.qt_compat import QtCore, QtWidgets\n\nif QtCore.qVersion() >= \"5.\":\n from matplotlib.backends.backend_qt5agg import FigureCanvas\nelse:\n from matplotlib.backends.backend_qt4agg import FigureCanvas\nimport seaborn as sns\nimport os\n\n# --------------------------\n# PARAMETERS ALLOW CHANGE\n# --------------------------\n# paths\ndata_path = \"/Users/xiaoweiyan/Dropbox/LAB/ValeLab/Projects/Blob_bleacher/Data/20210224_SG_Top10_ArsTreatments/\"\\\n \"1235678910_1\"\n\n# values for analysis\nname = 'SF1-N'\ndata_p = 128\ndata_c_G3BP1 = 1 # channel for G3BP1-mScarlet 1:G3BP1-mScarlet channel\ndata_c_sample = 0 # channel for sample 0:GFP channel\nthresholding = 'na' # only accepts 'na', 'otsu', 'yen', 'local-nucleoli' and 'local-sg'\nmin_size = 5\nmax_size = 200\n\n# modes\ndisplay_mode = 'Y'\n\n\"\"\"\n# ---------------------------------------------------------------------------------------------------\n# PLEASE DO NOT CHANGE AFTER THIS\n# ---------------------------------------------------------------------------------------------------\n\"\"\"\n\n# --------------------------\n# LOAD DATA\n# --------------------------\nprint(\"### Load data ...\")\n# build up pycromanager bridge\n# first start up Micro-Manager (needs to be compatible version)\nbridge = Bridge()\nmmc = bridge.get_core()\nmm = bridge.get_studio()\n\n# load time series data\nstore = mm.data().load_data(data_path, True)\nmax_t = store.get_max_indices().get_t()\ncb = mm.data().get_coords_builder()\ncb.t(0).p(0).c(0).z(0)\n\n# ------------------------------\n# IMAGE ANALYSIS based on position\n# ------------------------------\nprint(\"### Image analysis: calculate SG mask/pd ...\")\n# test image of position\nnum = []\nsize = []\nraw_int_G3BP1 = []\nbg_G3BP1 = []\nraw_int_sample = []\nbg_sample = []\npix_tseries = [] # G3BP1\npix1_tseries = [] # sample\nsg_tseries = []\n\nraw_int_G3BP1_full = []\nraw_int_sample_full = []\nbg_G3BP1_full = []\nbg_sample_full = []\nx_frame = []\n\nfor i in range(max_t+1):\n temp = store.get_image(cb.t(i).c(data_c_G3BP1).z(0).p(data_p).build())\n pix = np.reshape(temp.get_raw_pixels(), newshape=[temp.get_height(), temp.get_width()])\n temp1 = store.get_image(cb.t(i).c(data_c_sample).z(0).p(data_p).build())\n pix1 = np.reshape(temp1.get_raw_pixels(), newshape=[temp1.get_height(), temp1.get_width()])\n sg = find_organelle(pix, thresholding, min_size=min_size, max_size=max_size)\n # cell =\n\n pix_tseries.append(pix)\n pix1_tseries.append(pix1)\n sg_tseries.append(sg)\n\n if 1 in sg:\n label_sg = label(sg, connectivity=1)\n sg_props = regionprops_table(label_sg, pix, properties=('label', 'area', 'mean_intensity'))\n sg_props1 = regionprops_table(label_sg, pix1, properties=('label', 'area', 'mean_intensity'))\n sg_pd = pd.DataFrame(sg_props)\n sg_pd1 = pd.DataFrame(sg_props1)\n\n num_temp = len(sg_pd)\n num.append(num_temp)\n size_temp = np.mean(sg_pd['area'])\n size.append(size_temp)\n raw_int_G3BP1_temp = np.mean(sg_pd['mean_intensity'])\n raw_int_G3BP1.append(raw_int_G3BP1_temp)\n bg_G3BP1_temp = ana.get_bg_int([pix])[0]\n bg_G3BP1.append(bg_G3BP1_temp)\n raw_int_sample_temp = np.mean(sg_pd1['mean_intensity'])\n raw_int_sample.append(raw_int_sample_temp)\n bg_sample_temp = ana.get_bg_int([pix1])[0]\n bg_sample.append(bg_sample_temp)\n\n raw_int_G3BP1_full = raw_int_G3BP1_full + sg_pd['mean_intensity'].tolist()\n raw_int_sample_full = raw_int_sample_full + sg_pd1['mean_intensity'].tolist()\n x_frame = x_frame + [i] * len(sg_pd)\n bg_G3BP1_full = bg_G3BP1_full + [bg_G3BP1_temp] * len(sg_pd)\n bg_sample_full = bg_sample_full + [bg_sample_temp] * len(sg_pd1)\n else:\n num.append(0)\n size.append(0)\n raw_int_G3BP1.append(0)\n bg_G3BP1.append(0)\n raw_int_sample.append(0)\n bg_sample.append(0)\n\nmov = np.stack(pix_tseries, axis=0)\nmov1 = np.stack(pix1_tseries, axis=0)\nmov_sg = np.stack(sg_tseries, axis=0)\n\nana_pd = pd.DataFrame({'number': num, 'size': size, 'raw_int_G3BP1': raw_int_G3BP1, 'bg_G3BP1': bg_G3BP1,\n 'raw_int_sample': raw_int_sample, 'bg_sample': bg_sample})\nana_pd['int_G3BP1'] = ana_pd['raw_int_G3BP1'] - ana_pd['bg_G3BP1']\nana_pd['int_sample'] = ana_pd['raw_int_sample'] - ana_pd['bg_sample']\nana_pd[ana_pd < 0] = 0\nana_pd['int_ratio'] = ana_pd['int_sample']/(ana_pd['int_G3BP1']+0.0001)\nana_pd = dat.get_normalized(ana_pd, 'number', 'number_norm')\nana_pd = dat.get_normalized(ana_pd, 'int_G3BP1', 'int_G3BP1_norm')\nana_pd = dat.get_normalized(ana_pd, 'int_sample', 'int_sample_norm')\nana_pd = dat.get_normalized(ana_pd, 'int_ratio', 'int_ratio_norm')\n\nprint(ana_pd)\n\nint_G3BP1_full = dat.list_subtraction(raw_int_G3BP1_full, bg_G3BP1_full)\nint_sample_full = dat.list_subtraction(raw_int_sample_full, bg_sample_full)\nint_ratio_full = [list1_i / (list2_i+0.0001) for list1_i, list2_i in zip(int_sample_full, int_G3BP1_full)]\n\nfull_pd = pd.DataFrame({'frame': x_frame, 'int_G3BP1': int_G3BP1_full, 'int_sample': int_sample_full,\n 'int_ratio': int_ratio_full})\n\n# --------------------------\n# OUTPUT DISPLAY\n# --------------------------\nif display_mode == 'Y':\n print(\"### Output display ...\")\n\n with napari.gui_qt():\n # embed mpl widget in napari viewer\n mpl_widget = FigureCanvas(Figure(figsize=(5, 3)))\n [(ax1, ax2), (ax3, ax4)] = mpl_widget.figure.subplots(nrows=2, ncols=2)\n viewer = napari.Viewer()\n viewer.window.add_dock_widget(mpl_widget)\n\n # napari display\n # display time series movies in napari main viewer\n viewer.add_image(mov, name='G3BP1-mScarlet', colormap='red', blending='additive')\n viewer.add_image(mov1, name='sample-GFP', colormap='green', blending='additive')\n violet_woBg = Colormap([[0.0, 0.0, 0.0, 0.0], [129 / 255, 55 / 255, 114 / 255, 1.0]])\n viewer.add_image(mov_sg, name='SG', contrast_limits=[0, 1], colormap=('violet woBg', violet_woBg))\n","sub_path":"test/sg_two-channel_test.py","file_name":"sg_two-channel_test.py","file_ext":"py","file_size_in_byte":6446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"496483191","text":"import hashlib\nimport logging\nfrom ast import literal_eval\nfrom pathlib import Path\n\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom decouple import config\n\nlogger = logging.getLogger(__name__)\n\nFILES_TO_UPLOAD = [\"articles_mails.csv\", \"mails.csv\"]\n\n\ndef upload_files(data_path):\n bucket = config(\"BUCKET\")\n\n file_hashes = get_file_hashes(data_path)\n\n client = boto3.client(\n \"s3\",\n aws_access_key_id=config(\"ACCESS_KEY\"),\n aws_secret_access_key=config(\"SECRET_KEY\"),\n region_name=\"eu-west-2\",\n )\n\n for file, hash in file_hashes.items():\n try:\n head = client.head_object(Bucket=bucket, Key=file.name)\n if \"ETag\" in head and literal_eval(head[\"ETag\"]) == hash:\n logger.info(\"skipping %s\" % str(file))\n continue\n except ClientError:\n pass\n client.upload_file(str(file), bucket, file.name)\n logger.info(\"uploaded %s\" % str(file))\n\n\ndef get_file_hashes(data_path):\n file_hashes = {}\n for file in FILES_TO_UPLOAD:\n file_path = Path(data_path, file)\n hash_md5 = hashlib.md5()\n with open(file_path, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n file_hashes[file_path] = hash_md5.hexdigest()\n return file_hashes\n","sub_path":"medium_collector/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"507274520","text":"#!/usr/bin/env python\r\n\r\nimport sys\r\nimport dff\r\n\r\ndef correlation(Zh, l):\r\n\tcount = 0.0\r\n\tsum = 0.0\r\n\tfor i in range(0, Zh.shape[1]-l-1): \r\n\t\tfor j in range(0, Zh.shape[0]-1):\r\n\t\t\tsum = sum + (Zh[j,i]-Zh[j,i+l])**2\r\n\t\t\tcount = count + 1\r\n#\tprint sum, count\r\n\tsum = sum / count\r\n\treturn sum\r\n\r\nsys.argv.pop(0)\r\nname = sys.argv.pop(0)\r\nfor l in range(1, 5,1):\r\n\tfd = open(name + \".\" + str(l) + \".dat\", 'w')\r\n\tfor i in range(0, len(sys.argv)):\r\n\t\tdeformation = dff.dff(sys.argv[i])\r\n\t\trelativestrains = deformation.get_relativestrain()\r\n\t\tfd.write(\"%lf %e\\n\" % (deformation.get_time(), correlation(relativestrains, l)));\r\n\tfd.close()\r\n","sub_path":"dff-analysis/corrfunc.py","file_name":"corrfunc.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"428744679","text":"# coding: utf8\n\nfrom random import choice\nfrom scrapy.exceptions import NotConfigured\nfrom yarl import URL\n\n\nclass UserAgentMiddleware(object):\n\n def __init__(self, user_agents=[]):\n self.user_agents = user_agents\n\n @classmethod\n def from_crawler(cls, crawler):\n user_agents = crawler.settings.get('USER_AGENT_LIST', [])\n if not user_agents:\n raise NotConfigured(\"USER_AGENT_LIST not set or empty\")\n\n return cls(user_agents)\n\n def process_request(self, request, spider):\n # 添加UA会导致hotelsCN抓取失败\n _url = URL(request.url)\n if _url.host == 'www.hotels.cn':\n return\n elif _url.host == 'm.ctrip.com':\n hid = _url.path.split('/')[-1].replace('.html', '')\n request.headers['User-Agent'] = 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1'\n request.headers['Referer'] = f\"http://m.ctrip.com/webapp/hotel/oversea/hoteldetail/{hid}.html\"\n else:\n request.headers['User-Agent'] = choice(self.user_agents)\n","sub_path":"flashtripdemo/scripture/scripture/middlewares/user_agent.py","file_name":"user_agent.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"170903355","text":"\"\"\" An example application using PGV! \"\"\"\n\n\nfrom enthought.traits.api import Int, Str\n\nfrom pgv.spike.model import Model\nfrom pgv.spike.model_edit import ModelEdit\nfrom pgv.spike.toolkit import QtGui\n\n \nclass Person(Model):\n name = Str\n age = Int\n\n\nclass MainWindow(QtGui.QMainWindow):\n \"\"\" The main application window. \"\"\"\n\n def __init__(self, parent=None):\n \"\"\" Constructor. \"\"\"\n\n super(MainWindow, self).__init__(parent)\n\n self.setMenuBar(self._create_menu_bar())\n\n model = Person(name='fred', age=42)\n model_edit = ModelEdit(\n model=model, include=['name', 'age'], parent=self\n )\n\n self.setCentralWidget(model_edit)\n \n return\n\n ###########################################################################\n # Private protocol.\n ###########################################################################\n\n def _create_menu_bar(self):\n \"\"\" Factory method for the menu bar. \"\"\"\n\n menu_bar = QtGui.QMenuBar(self)\n\n # Actions.\n quit_action = QtGui.QAction(self)\n quit_action.setText('&Quit')\n quit_action.triggered.connect(self.close)\n\n # Menus.\n file_menu = QtGui.QMenu(menu_bar)\n file_menu.setTitle('&File')\n menu_bar.addMenu(file_menu)\n\n # Populate the menus with the actions!\n file_menu.addAction(quit_action)\n\n return menu_bar\n\n\ndef main(argv):\n \"\"\" Entry point. \"\"\"\n\n application = QtGui.QApplication(argv)\n\n main_window = MainWindow()\n main_window.show()\n \n application.exec_()\n\n return\n\n\nif __name__ == '__main__':\n import sys; sys.exit(main(sys.argv))\n\n#### EOF ######################################################################\n","sub_path":"source/examples/pyside_application.py","file_name":"pyside_application.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"59174907","text":"import json\nfrom datetime import date, datetime\n\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect\nfrom django.views import View\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom app_checklist.forms import ChekListInput4Form\nfrom app_checklist.models import CheckListPhoto, CheckListDone\nfrom app_create_chklst.models import CheckList\nfrom app_input_chklst.models import Material, Manager\n\n\nclass ChekListInput4(View):\n \"\"\"\n view for input manager of a checklist\n \"\"\"\n context = {'title': 'LastPart'}\n template_name = \"app_checklist/checklist_finale.html\"\n form = ChekListInput4Form\n context_object_name = 'Checklist'\n\n def get(self, request):\n self.context['checklist'] = \"\"\n self.context['form'] = self.form()\n self.context['newchecklist_id'] = request.session['newchecklist_id']\n # 1st load\n if 'chksave' not in request.session or request.session['chksave'] == 0:\n request.session['chksave'] = {}\n request.session['chksave'] = 0\n self.context['form'] = self.form\n else:\n newchecklist = CheckListDone.objects.get(pk=request.session['newchecklist_id'])\n fotos = newchecklist.pho_chklst.all()\n fotosave = []\n for foto in fotos:\n fotosave.append(str(foto.pho_file))\n # print(foto.pho_file)\n self.context['form'] = self.form(initial={'cld_key': request.session['chksave']['cld_key'],\n 'cld_valid': request.session['chksave']['cld_valid'],\n 'cld_remarks': request.session['chksave']['cld_remarks'],\n 'cld_fotosave': fotosave})\n return render(request, self.template_name, context=self.context)\n\n def post(self, request, *args, **kwargs):\n form = self.form(request.POST)\n if form.is_valid():\n new_checklist = before_preview(request)\n # valid --> save the form in session\n if 'previous' in request.POST:\n request.session['chksave'] = {}\n request.session['chksave']['cld_key'] = request.POST['cld_key']\n\n request.session['chksave']['cld_valid'] = request.POST.get('cld_valid', False)\n request.session['chksave']['cld_remarks'] = request.POST.get('cld_remarks', '')\n return redirect('app_checklist:saisie3')\n else:\n new_checklist.cld_status = 1\n new_checklist.save()\n return redirect('app_checklist:pdf', save='1')\n\n return redirect('app_home:main')\n\n\n# @csrf_exempt\ndef before_preview(request):\n \"\"\"\n function to save datas in database before preview PDF and previous button.\n The save should be the same but if preview the POST request datas are not the same.\n If ajax --> get data in the \"request post json data\" and treat it\n If Previous button : Get the data in the request POST.\n Generate a ChecklistDone model to get it in the pdfpreview (no context data because not html render)\n args : request (POST but with data or json depending on the request)\n return : json response if ajax or nothing (called by the post-view)\n \"\"\"\n newchecklist = CheckListDone.objects.get(pk=request.session['newchecklist_id'])\n checklist = CheckList.objects.get(pk=request.session['checklist_id'])\n newchecklist.cld_status = 0\n\n if request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest':\n is_ajax = True\n # data = {'data': 'ERREUR'}\n request_data = json.loads(request.read().decode('utf-8'))\n # print(request.headers)\n # print(request_data)\n cld_key = request_data['cld_key']\n cld_valid = request_data['cld_valid']\n if cld_valid:\n cld_valid = 'on'\n cld_remarks = request_data['cld_remarks']\n else:\n is_ajax = False\n cld_key = request.POST['cld_key']\n cld_valid = request.POST.get('cld_valid', 'off')\n cld_remarks = request.POST['cld_remarks']\n\n if len(cld_key) == 0:\n newchecklist.cld_key = str(datetime.now().timestamp())[:15]\n else:\n newchecklist.cld_key = cld_key\n newchecklist.cld_remarks = cld_remarks\n if cld_valid == 'on':\n newchecklist.cld_valid = True\n else:\n newchecklist.cld_valid = False\n newchecklist.cld_user = request.user\n newchecklist.cld_company = request.user.user_company\n newchecklist.cld_checklist = checklist\n if request.session['mat']['id'] != '0':\n newchecklist.cld_material = Material.objects.get(pk=request.session['mat']['id'])\n newchecklist.cld_mat = newchecklist.cld_material.mat_designation\n if request.session['mgr']['id'] != '0':\n newchecklist.cld_manager = Manager.objects.get(pk=request.session['mgr']['id'])\n newchecklist.cld_man = newchecklist.cld_manager.mgr_name\n newchecklist.save()\n if is_ajax:\n data = {'data': 'OK'}\n return JsonResponse(data)\n return newchecklist\n\n\n@csrf_exempt\ndef file_upload_view(request):\n # print(request.FILES)\n data = {'data': 'ERREUR'}\n if request.method == 'POST':\n # print(request.POST)\n my_file = request.FILES.get('file')\n newchecklist_id = request.POST.get('newchecklist_id', None)\n caption = request.POST.get('caption', None)\n data = {}\n if newchecklist_id is not None:\n newchecklist = CheckListDone.objects.get(pk=newchecklist_id)\n CheckListPhoto.objects.create(pho_file=my_file,\n pho_caption=caption,\n pho_chklst_done=newchecklist)\n data['data'] = 'OK'\n return JsonResponse(data)\n\n\n@csrf_exempt\ndef file_remove_view(request):\n data = {'data': 'ERREUR'}\n if request.method == 'POST':\n request_data = json.loads(request.read().decode('utf-8'))\n # print(request_data)\n filename = request_data['filename'].split('.')[0]\n checklist_id = request_data['checklist_id']\n today = date.today()\n newchecklist = CheckListDone.objects.get(pk=checklist_id)\n foto = newchecklist.pho_chklst.filter(pho_file__contains=filename).\\\n filter(created_date__year=today.year, created_date__month=today.month, created_date__day=today.day)\n foto[0].delete()\n data = {'data': 'OK'}\n return JsonResponse(data)\n\n","sub_path":"checklistmgr/app_checklist/saveviews.py","file_name":"saveviews.py","file_ext":"py","file_size_in_byte":6527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"305529854","text":"# -*- coding: utf-8 -*-\r\n'''\r\n\r\n'''\r\nimport os\r\nimport subprocess\r\nfrom music21 import converter, instrument, note, chord # 音符和和弦的类\r\n\r\nimport pickle # \r\nimport glob # 用于读取文件\r\n\r\n\r\n\r\n\r\ndef convertMidi2Mp3():\r\n '''\r\n 将神经网络生成的MIDI文件 转为MP3文件\r\n '''\r\n input_file = '1.mid'\r\n output_file = 'output.mp3'\r\n\r\n assert os.path.exists(input_file)\r\n\r\n print(\"Converting %s to Mp3\" % input_file)\r\n\r\n # 用timidity软件生成mp3文件\r\n\r\n command = 'timidity {} -0w -o - | ffmpeg -i - -acodec libmp3lame - ab 64k {}'.format(input_file,output_file)\r\n\r\n # 运行这个命令\r\n subprocess.call(command,shell=True)\r\n\r\n print(\"Convert. Generate File is %s\" % output_file)\r\n\r\n\r\ndef get_notes():\r\n '''\r\n 从 music _midi 目录中所有MIDI文件里读取note(音符) 和chord(和弦)\r\n Note 样例: A,B, A#, B#, G#, E,...\r\n Chord 样例: [B4,B5,G#5],[c5 E5]...\r\n 因为 Chord 就是多个 Note的集合, 所以我们把他们简单的统称为“NOTE”\r\n\r\n '''\r\n notes = []\r\n\r\n # glob : 匹配所有符合条件的文件,并且以list的形式返回\r\n for file in glob.glob(\"music_midi/*.mid\"):\r\n stream = converter.parse(file)\r\n\r\n # 获取所有乐器部分\r\n parts = instrument.partitionByInstrument(stream)\r\n\r\n if parts: # 如果有乐器部分, 取第一个乐器部分\r\n notes_to_parse = parts.parts[0].recurse()\r\n \r\n else:\r\n notes_to_parse = stream.flat.notes\r\n \r\n # 打印出每一个元素\r\n for element in notes_to_parse:\r\n\r\n # 如果是Note 类型, 那么取它的音调(pitch)\r\n if isinstance(element, note.Note): # 判断是不是它的类\r\n # 格式例如: E6\r\n notes.append(str(element.pitch))\r\n elif isinstance(element, chord.Chord):\r\n # 将音符转为数字格式例如: 4.15.7\r\n notes.append('.'.join(str(n) for n in element.normalOrder))\r\n\r\n # 将数据写入 data/notes 文件\r\n with open('data/notes','wb') as filepath:\r\n pickle.dump(notes,filepath)\r\n \r\n return notes\r\n\r\ndef create_music(prediction):\r\n '''\r\n 用神经网络“预测” 的音乐数据来生成MIDI文件,在转成MP3文件\r\n '''\r\n\r\n offset = 0 # 偏移\r\n output_notes=[]\r\n # 生成 Note(音符) 或者Chord(和弦) 对象\r\n for data in prediction:\r\n # 是 Chord 格式,例如:4.15.7\r\n if('.' in data ) or data.isdigit():\r\n notes_in_chord = data.split('.')\r\n notes = []\r\n\r\n for current_note in notes_in_chord:\r\n new_note = note.Note(int(current_note))\r\n new_note.storedInstrument = instrument.Piano() # 乐器 用钢琴\r\n\r\n notes.append(new_note)\r\n new_chord = chord.Chord(notes)\r\n new_chord.offset=offset\r\n output_notes.append(new_chord)\r\n # 是 note\r\n else:\r\n\r\n new_note = note.Note(data) \r\n new_note.offset = offset\r\n new_note.storedInstrument =instrument.Piano()\r\n output_notes.append(new_note)\r\n \r\n # 每次迭代 都将偏移增加, 这样才不会交叠覆盖\r\n offset+=0.5\r\n\r\n # 创建音乐流(stream)\r\n midi_stream = stream.Stream(output_notes)\r\n\r\n # 写入MIDI文件\r\n midi_stream.write('midi', fp='output.mid')\r\n # 将生成的文件转化为Mp3\r\n\r\n\r\n\r\n\r\n ","sub_path":"mooc_lstm_music/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"424268536","text":"from app.models.customer import Customer\nfrom app.util.database import db\nfrom app.models.customer import CustomerTest\nfrom app.util.database import db2\n\n# | _id | email | name | password | address | zipcode |\n# | 4 | dpanchan@uncc.edu | Dinesh | 123 | | 0 |\n\ndef main():\n\twith open('data/sample_users.txt') as f:\n\t\tdb.session.query(Customer).delete()\n\t\tdb.session.commit()\n\t\tdb2.session.query(CustomerTest).delete()\n\t\tdb2.session.commit()\n\t\tdata = f.readlines()\n\t\trecords = [[_.strip() for _ in res.strip().strip('|').split('|')] for res in data]\n\t\tfor record in records:\n\t\t\tdb.session.add(Customer(record[1], record[2], record[3], record[4], int(record[5])))\n\t\t\tdb2.session.add(CustomerTest(record[1], record[2], record[3], record[4], int(record[5])))\n\t\tdb.session.commit()\n\t\tdb2.session.commit()","sub_path":"load/setup_users.py","file_name":"setup_users.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"485478616","text":"import datetime\n\nfrom flask import json\nimport fitbit\nfrom fitbit import FitbitOauth2Client\nimport config\n\nfrom models import User\n\n\n# Fitbit auth shit\ndef get_fitbit_authorize_token_url():\n oauth = FitbitOauth2Client(config.FITBIT_CLIENT_ID, config.FITBIT_CLIENT_SECRET)\n url, _ = oauth.authorize_token_url(redirect_uri=config.OAUTH_REDIRECT_URL)\n return url\n\n\ndef exchange_code_for_access_token(code):\n oauth = FitbitOauth2Client(config.FITBIT_CLIENT_ID, config.FITBIT_CLIENT_SECRET)\n token = oauth.fetch_access_token(code, config.OAUTH_REDIRECT_URL)\n return token\n\n\ndef refresh_fitbit_token(user):\n f = fitbit.Fitbit()\n # not sure if the python-fitbit refresh token function is working\n\n\ndef convert_seconds(timestring):\n h, m, s = map(int, timestring.split(\":\"))\n return h * 3600 + m * 60 + s\n\n\ndef get_user_event_data(user, event):\n f = fitbit.Fitbit(config.FITBIT_CLIENT_KEY, config.FITBIT_CLIENT_SECRET,\n oauth2=True,\n access_token=user.fit_access_token,\n refresh_token=user.fit_refresh_token)\n\n response = f.intraday_time_series(\"activities/heart\",\n base_date=event.start_time.date(),\n detail_level=\"1min\",\n start_time=event.start_time,\n end_time=event.end_time)\n heart_data = response[\"activities-heart-intraday\"][\"dataset\"]\n time_array, heartrate_array = zip(*[(convert_seconds(x[\"time\"]), x[\"value\"]) for x in heart_data])\n\n return time_array, heartrate_array\n\n\ndef get_user_info(user):\n f = fitbit.Fitbit(config.FITBIT_CLIENT_KEY, config.FITBIT_CLIENT_SECRET,\n oauth2=True,\n access_token=user.fit_access_token,\n refresh_token=user.fit_refresh_token)\n\n response = f.user_profile_get()\n return response[\"user\"]\n","sub_path":"src/things.py","file_name":"things.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"329178973","text":"\"\"\"\n\n BioVida-Images Subpackage Unit Testing\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\"\"\"\n# Note: 'medpix.png' is simply a blank images of same correct size.\n# To test the OpeniImageProcessing() class, it will need to be replaced\n# with an actual MedPix image.\n\nimport os\nimport sys\nimport unittest\nimport pandas as pd\nfrom os.path import join as os_join\n\n\n# Allow access to modules\nsys.path.insert(0, os.path.abspath(\".\"))\nsys.path.insert(0, os.path.abspath(\"../\"))\n\n\nfrom biovida import images\nfrom biovida.support_tools.support_tools import items_null\nfrom biovida.images._interface_support.openi.openi_text_processing import openi_raw_extract_and_clean\n\n\ndata_path = os_join(str(os.getcwd()).split(\"/tests\")[0], \"tests/images/data\")\nraw_openi_data_df = pd.read_pickle(os_join(data_path, \"sample_records_raw.p\"))\n\n\nclass OpeniInterfaceTests(unittest.TestCase):\n \"\"\"\n\n Unit Tests for the Images Subpackage.\n\n \"\"\"\n\n def test_cleaning_raw(self):\n \"\"\"Test Extracting Features From Raw Open-i Data & Cleaning it.\"\"\"\n cleaned_df = openi_raw_extract_and_clean(raw_openi_data_df, clinical_cases_only=False,\n verbose=False, cache_path=data_path)\n\n # Tests for the newly generate columns\n expected_new_columns = ('diagnosis', 'imaging_modality_from_text', 'sex',\n 'illness_duration_years', 'modality_full', 'image_problems_from_text',\n 'parsed_abstract', 'image_id_short', 'age', 'ethnicity', 'image_plane')\n new_columns = set(cleaned_df.columns) - set(raw_openi_data_df.columns)\n\n # - Number of new columns\n self.assertEqual(len(new_columns) >= 11, True)\n\n # - Checks that least all `expected_new_columns` columns are in `new_columns`,\n # However, this will not fail if additional columns are added.\n self.assertEqual(all(e in new_columns for e in expected_new_columns), True)\n\n # Test for only floats\n for c in ('illness_duration_years', 'age'):\n float_test = all(isinstance(i, float) for i in cleaned_df[c])\n self.assertEqual(float_test, True)\n\n # Test for only strings\n for c in ('diagnosis', 'imaging_modality_from_text', 'sex',\n 'modality_full', 'image_plane', 'image_id_short'):\n string_test = all(isinstance(i, str) or items_null(i) for i in cleaned_df[c])\n self.assertEqual(string_test, True)\n\n # Test for only dictionaries\n dict_test = all(isinstance(i, dict) or items_null(i) for i in cleaned_df['parsed_abstract'])\n self.assertEqual(dict_test, True)\n\n # Test for tuples\n tuple_test = all(isinstance(i, tuple) or items_null(i) for i in cleaned_df['image_problems_from_text'])\n self.assertEqual(tuple_test, True)\n\n\nunittest.main()\n","sub_path":"tests/images/biovida_images_tests.py","file_name":"biovida_images_tests.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"392823099","text":"import os\nimport shutil\nfrom os import path\n\nimport pandas as pd\n\nfrom clinicadl.utils.caps_dataset.data import load_data_test\nfrom clinicadl.utils.split_manager import KFoldSplit\n\nmerged_tsv = \"data/tsvtool/anonymous_BIDS.tsv\"\nmissing_mods = \"data/tsvtool/anonymous_missing_mods\"\nreference_path = \"data/tsvtool/anonymous_reference\"\ndiagnoses = \"AD CN MCI pMCI sMCI\"\n\n\"\"\"\nCheck the absence of data leakage\n 1) Baseline datasets contain only one scan per subject\n 2) No intersection between train and test sets\n 3) Absence of MCI train subjects in test sets of subcategories of MCI\n\"\"\"\n\n\ndef check_subject_unicity(diagnosis_path):\n print(\"Check unicity\", diagnosis_path)\n diagnosis_df_paths = os.listdir(diagnosis_path)\n diagnosis_df_paths = [x for x in diagnosis_df_paths if x.endswith(\"_baseline.tsv\")]\n\n for diagnosis_df_path in diagnosis_df_paths:\n flag_unique = True\n check_df = pd.read_csv(path.join(diagnosis_path, diagnosis_df_path), sep=\"\\t\")\n check_df.set_index([\"participant_id\", \"session_id\"], inplace=True)\n for subject, subject_df in check_df.groupby(level=0):\n if len(subject_df) > 1:\n flag_unique = False\n\n assert flag_unique\n\n\ndef check_independance(train_path, test_path):\n print(\"Check independence\")\n diagnosis_df_paths = os.listdir(train_path)\n diagnosis_df_paths = [x for x in diagnosis_df_paths if x.endswith(\"_baseline.tsv\")]\n\n for diagnosis_df_path in diagnosis_df_paths:\n flag_independant = True\n train_df = pd.read_csv(path.join(train_path, diagnosis_df_path), sep=\"\\t\")\n train_df.set_index([\"participant_id\", \"session_id\"], inplace=True)\n test_df = pd.read_csv(path.join(test_path, diagnosis_df_path), sep=\"\\t\")\n test_df.set_index([\"participant_id\", \"session_id\"], inplace=True)\n\n for subject, session in train_df.index:\n if subject in test_df.index:\n flag_independant = False\n\n assert flag_independant\n\n\ndef check_subgroup_independence(train_path, test_path):\n print(\"Check subgroup independence\")\n diagnosis_df_paths = os.listdir(test_path)\n diagnosis_df_paths = [x for x in diagnosis_df_paths if x.endswith(\"_baseline.tsv\")]\n sub_diagnosis_list = [\n x for x in diagnosis_df_paths if \"MCI\" in x and x != \"MCI_baseline.tsv\"\n ]\n\n MCI_train_df = pd.read_csv(path.join(train_path, \"MCI_baseline.tsv\"), sep=\"\\t\")\n MCI_train_df.set_index([\"participant_id\", \"session_id\"], inplace=True)\n for sub_diagnosis in sub_diagnosis_list:\n flag_independant = True\n sub_test_df = pd.read_csv(path.join(test_path, sub_diagnosis), sep=\"\\t\")\n sub_test_df.set_index([\"participant_id\", \"session_id\"], inplace=True)\n\n for subject, session in MCI_train_df.index:\n if subject in sub_test_df.index:\n flag_independant = False\n\n assert flag_independant\n\n MCI_test_df = pd.read_csv(path.join(test_path, \"MCI_baseline.tsv\"), sep=\"\\t\")\n MCI_test_df.set_index([\"participant_id\", \"session_id\"], inplace=True)\n for sub_diagnosis in sub_diagnosis_list:\n flag_independant = True\n sub_test_df = pd.read_csv(path.join(train_path, sub_diagnosis), sep=\"\\t\")\n sub_test_df.set_index([\"participant_id\", \"session_id\"], inplace=True)\n\n for subject, session in MCI_test_df.index:\n if subject in sub_test_df.index:\n flag_independant = False\n\n assert flag_independant\n\n\ndef run_test_suite(formatted_data_path, n_splits, subset_name):\n check_train = True\n\n if n_splits == 0:\n train_path = path.join(formatted_data_path, \"train\")\n test_path = path.join(formatted_data_path, subset_name)\n if not path.exists(train_path):\n check_train = False\n\n check_subject_unicity(test_path)\n if check_train:\n check_subject_unicity(train_path)\n check_independance(train_path, test_path)\n MCI_path = path.join(train_path, \"MCI_baseline.tsv\")\n if path.exists(MCI_path):\n check_subgroup_independence(train_path, test_path)\n\n else:\n for split in range(n_splits):\n train_path = path.join(\n formatted_data_path,\n \"train_splits-\" + str(n_splits),\n \"split-\" + str(split),\n )\n test_path = path.join(\n formatted_data_path,\n subset_name + \"_splits-\" + str(n_splits),\n \"split-\" + str(split),\n )\n\n if not path.exists(train_path):\n check_train = False\n\n check_subject_unicity(test_path)\n if check_train:\n check_subject_unicity(train_path)\n check_independance(train_path, test_path)\n MCI_path = path.join(train_path, \"MCI_baseline.tsv\")\n if path.exists(MCI_path):\n check_subgroup_independence(train_path, test_path)\n\n\ndef test_getlabels():\n \"\"\"Checks that getlabels is working and that it is coherent with previous version in reference_path\"\"\"\n output_path = \"data/tsvtool_test\"\n flag_getlabels = not os.system(\n f\"clinicadl -vvv tsvtool getlabels {merged_tsv} {missing_mods} {output_path} \"\n f\"--diagnoses AD --diagnoses CN --diagnoses MCI --diagnoses pMCI --diagnoses sMCI\"\n )\n assert flag_getlabels\n tsv_list = [\n tsv_file for tsv_file in os.listdir(output_path) if tsv_file.endswith(\".tsv\")\n ]\n for file in tsv_list:\n out_df = pd.read_csv(path.join(output_path, file), sep=\"\\t\")\n ref_df = pd.read_csv(path.join(reference_path, file), sep=\"\\t\")\n out_df_sorted = out_df.reindex(sorted(out_df.columns), axis=1)\n ref_df_sorted = ref_df.reindex(sorted(ref_df.columns), axis=1)\n assert out_df_sorted.equals(ref_df_sorted)\n\n shutil.rmtree(output_path)\n\n\ndef test_split():\n \"\"\"Checks that:\n - split and kfold are working\n - the loading functions can find the output\n - no data leakage is introduced in split and kfold.\n \"\"\"\n n_splits = 5\n train_path = path.join(reference_path, \"train\")\n flag_split = not os.system(f\"clinicadl -vvv tsvtool split {reference_path}\")\n flag_kfold = not os.system(\n f\"clinicadl -vvv tsvtool kfold {reference_path} --n_splits {n_splits}\"\n )\n assert flag_split\n assert flag_kfold\n flag_load = True\n try:\n _ = load_data_test(\n path.join(reference_path, \"validation\"), diagnoses.split(\" \")\n )\n split_manager = KFoldSplit(\".\", reference_path, diagnoses.split(\" \"), n_splits)\n for fold in split_manager.fold_iterator():\n _ = split_manager[fold]\n except FileNotFoundError:\n flag_load = False\n assert flag_load\n\n run_test_suite(reference_path, 0, \"validation\")\n run_test_suite(reference_path, n_splits, \"validation\")\n\n shutil.rmtree(path.join(reference_path, \"train\"))\n shutil.rmtree(path.join(reference_path, \"validation\"))\n shutil.rmtree(path.join(reference_path, \"train_splits-5\"))\n shutil.rmtree(path.join(reference_path, \"validation_splits-5\"))\n\n\ndef test_analysis():\n \"\"\"Checks that analysis can be performed\"\"\"\n results_path = path.join(\"data\", \"tsvtool\", \"analysis.tsv\")\n ref_analysis_path = path.join(\"data\", \"tsvtool\", \"anonymous_analysis.tsv\")\n flag_analysis = not os.system(\n f\"clinicadl tsvtool analysis {merged_tsv} {reference_path} {results_path} \"\n f\"--diagnoses AD --diagnoses CN --diagnoses MCI --diagnoses pMCI --diagnoses sMCI\"\n )\n\n assert flag_analysis\n ref_df = pd.read_csv(ref_analysis_path, sep=\"\\t\")\n out_df = pd.read_csv(results_path, sep=\"\\t\")\n out_df_sorted = out_df.reindex(sorted(out_df.columns), axis=1)\n ref_df_sorted = ref_df.reindex(sorted(ref_df.columns), axis=1)\n assert out_df_sorted.equals(ref_df_sorted)\n os.remove(results_path)\n","sub_path":"tests/test_tsvtool.py","file_name":"test_tsvtool.py","file_ext":"py","file_size_in_byte":7911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"223935497","text":"# This file is part of salobj.\n#\n# Developed for the LSST Telescope and Site Systems.\n# This product includes software developed by the LSST Project\n# (https://www.lsst.org).\n# See the COPYRIGHT file at the top-level directory of this distribution\n# for details of code ownership.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n__all__ = [\"STD_SLEEP\", \"index_generator\", \"set_random_lsst_dds_domain\"]\n\nimport os\nimport random\nimport socket\nimport string\nimport time\n\nimport numpy as np\n\nimport SALPY_Test\n\n# standard sleep time for SAL (sec)\nSTD_SLEEP = 0.001\n\nMAX_SAL_INDEX = (2 << 30) - 1\n\n\ndef index_generator(imin=1, imax=MAX_SAL_INDEX):\n \"\"\"Sequential index generator, e.g. for SAL components.\n\n Returns values min, min+1, ..., max, min, min + 1, ...\n\n Parameters\n ----------\n imin : `int`\n Minimum index (inclusive).\n imax : `int`\n Maximum index (inclusive).\n\n Raises\n ------\n ValueError\n If imin >= imax\n \"\"\"\n if imax <= imin:\n raise ValueError(f\"imin={imin} must be less than imax={imax}\")\n\n # define an inner generator and return that\n # in order to get immediate argument checking\n def index_impl():\n index = imin - 1\n while True:\n index += 1\n if index > imax:\n index = imin\n\n yield index\n\n return index_impl()\n\n\ndef set_random_lsst_dds_domain():\n \"\"\"Set a random value for environment variable LSST_DDS_DOMAIN\n\n Call this for each unit test method that uses SAL message passing,\n in order to avoid collisions with other tests. Note that pytest\n can run unit test methods in parallel.\n\n The set name will contain the hostname and current time\n as well as a random integer.\n \"\"\"\n hostname = socket.gethostname()\n curr_time = time.time()\n random_int = random.randint(0, 999999)\n os.environ[\"LSST_DDS_DOMAIN\"] = f\"Test-{hostname}-{curr_time}-{random_int}\"\n\n\nclass TestData:\n \"\"\"Generate random test data and compare topics.\n \"\"\"\n __test__ = False # stop pytest from warning that this is not a test\n\n @property\n def arrays_fields(self):\n \"\"\"Get a tuple of the fields in an arrays struct.\"\"\"\n return (\n \"boolean0\", \"byte0\", \"short0\",\n \"int0\", \"long0\", \"longLong0\", \"octet0\",\n \"unsignedShort0\", \"unsignedInt0\", \"unsignedLong0\",\n \"float0\", \"double0\")\n\n @property\n def scalars_fields(self):\n \"\"\"Get a tuple of the fields in a scalars struct.\"\"\"\n return (\n \"boolean0\", \"byte0\", \"char0\", \"short0\",\n \"int0\", \"long0\", \"longLong0\", \"octet0\",\n \"unsignedShort0\", \"unsignedInt0\", \"unsignedLong0\",\n \"float0\", \"double0\", \"string0\")\n\n def assert_arrays_equal(self, arrays1, arrays2):\n \"\"\"Assert that two arrays data structs are equal.\n\n The types need not match; each struct can be command, event\n or telemetry data.\n \"\"\"\n # use reversed so boolean0 is not compared first,\n # as a discrepancy there is harder to interpret\n for field in reversed(self.arrays_fields):\n if np.any(getattr(arrays1, field) != getattr(arrays2, field)):\n raise AssertionError(\"arrays1.{} = {} != {} = arrays2.{}\".format(\n field, getattr(arrays1, field), getattr(arrays2, field), field))\n\n def assert_scalars_equal(self, scalars1, scalars2):\n \"\"\"Assert that two scalars data structs are equal.\n\n The types need not match; each struct can be command, event\n or telemetry data.\n \"\"\"\n # use reversed so boolean0 is not compared first,\n # as a discrepancy there is harder to interpret\n for field in reversed(self.scalars_fields):\n if getattr(scalars1, field) != getattr(scalars2, field):\n raise AssertionError(\"scalars1.{} = {} != {} = scalars2.{}\".format(\n field, getattr(scalars1, field), getattr(scalars2, field), field))\n\n def copy_arrays(self, src_arrays, dest_arrays):\n \"\"\"Copy arrays data from one struct to another.\n\n The types need not match; each struct can be command, event\n or telemetry data.\n \"\"\"\n for field_name in self.arrays_fields:\n data = getattr(src_arrays, field_name)\n if isinstance(data, np.ndarray):\n getattr(dest_arrays, field_name)[:] = data\n else:\n setattr(dest_arrays, field_name, data)\n\n def copy_scalars(self, src_scalars, dest_scalars):\n \"\"\"Copy scalars data from one struct to another.\n\n The types need not match; each struct can be command, event\n or telemetry data.\n \"\"\"\n for field_name in self.scalars_fields:\n setattr(dest_scalars, field_name, getattr(src_scalars, field_name))\n\n def set_random_arrays(self, data):\n \"\"\"Make random data for the arrays or setArrays topic.\"\"\"\n nelts = 5\n data.boolean0[:] = np.random.choice([False, True], size=(nelts,))\n for field_name in (\n \"byte0\",\n \"octet0\",\n \"short0\",\n \"int0\",\n \"long0\",\n \"longLong0\",\n \"unsignedShort0\",\n \"unsignedInt0\",\n \"unsignedLong0\",\n ):\n field = getattr(data, field_name)\n iinfo = np.iinfo(field.dtype)\n print(f\"{field_name} has type {field.dtype}\")\n field[:] = np.random.randint(iinfo.min, iinfo.max, size=(nelts,), dtype=field.dtype)\n data.float0[:] = np.random.uniform(-1e5, 1e5, size=(nelts,))\n data.double0[:] = np.random.uniform(-1e5, 1e5, size=(nelts,))\n return data\n\n def set_random_scalars(self, data):\n \"\"\"Make random data for scalars or setScalars topic.\"\"\"\n # also make an empty arrays struct to get dtype of int fields,\n # since that information is lost in the scalars pybind11 wrapper\n empty_arrays = SALPY_Test.Test_arraysC()\n data.boolean0 = np.random.choice([False, True])\n printable_chars = [c for c in string.ascii_letters + string.digits]\n # char0 is a string of arbitrary length (IDL_Size not specified)\n data.char0 = \"\".join(np.random.choice(printable_chars, size=(100,)))\n # string0 is a string with max length 20 (IDL_Size=20)\n data.string0 = \"\".join(np.random.choice(printable_chars, size=(20,)))\n for field_name in (\n \"byte0\",\n \"octet0\",\n \"short0\",\n \"int0\",\n \"long0\",\n \"longLong0\",\n \"unsignedShort0\",\n \"unsignedInt0\",\n \"unsignedLong0\",\n ):\n dtype = getattr(empty_arrays, field_name).dtype\n # work around a bug in numpy 1.14.5 that causes\n # TypeError: 'numpy.dtype' object is not callable\n if dtype == np.int64:\n dtype = np.int64\n iinfo = np.iinfo(dtype)\n setattr(data, field_name, np.random.randint(iinfo.min, iinfo.max, dtype=dtype))\n data.float0 = np.random.uniform(-1e5, 1e5)\n data.double0 = np.random.uniform(-1e5, 1e5)\n return data\n","sub_path":"python/lsst/ts/sal/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":7752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"484976209","text":"\"\"\"\nFile: train_emotion_classifier.py\nAuthor: Octavio Arriaga\nEmail: arriaga.camargo@gmail.com\nGithub: https://github.com/oarriaga\nDescription: Train emotion classification model\n\"\"\"\nfrom keras.callbacks import CSVLogger, ModelCheckpoint\nfrom data_loader import DataLoader\nfrom models import simple_CNN\nfrom utils import preprocess_input\n\n# parameters\nbatch_size = 128\nnum_epochs = 1000\ntraining_split = .8\ndataset_name = 'fer2013'\nlog_file_path = 'log_files/emotion_training.log'\ntrained_models_path = '../trained_models/emotion_models/simple_CNN'\n\n# data loader\ndata_loader = DataLoader(dataset_name)\nfaces, emotions = data_loader.get_data()\nprint(len(faces))\nfaces = preprocess_input(faces)\nnum_classes = emotions.shape[1]\ninput_shape = faces.shape[1:]\n\n# model parameters/compilation\nmodel = simple_CNN(input_shape, num_classes)\nmodel.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['accuracy'])\nmodel.summary()\n\n# model callbacks\ncsv_logger = CSVLogger(log_file_path, append=False)\nmodel_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'\nmodel_checkpoint = ModelCheckpoint(model_names,\n 'val_acc', verbose=1,\n save_best_only=True)\ncallbacks = [model_checkpoint, csv_logger]\n\n# model training\nmodel.fit(faces, emotions, batch_size, num_epochs,verbose=1,\n callbacks=callbacks,\n validation_split=(1-training_split),\n shuffle=True)\n","sub_path":"src/train_emotion_classifier.py","file_name":"train_emotion_classifier.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"442798483","text":"\"\"\"\r\n!/bin/python\r\n-*- coding: utf-8 -*\r\n\r\n### Author ###\r\n S. Crommelinck, 2018\r\n\r\n### Description ###\r\n This script applies SLIC image segmentation (segments image using k-means clustering in Color-(x,y,z) space) on a RGB raster file and saves the output as xy. The parameters of the image segmentation are calculated automatically based on the size of the input image.\r\n Source: http://scikit-image.org/docs/dev/api/skimage.segmentation.html\r\n\"\"\"\r\n\r\n# Import required modules\r\nimport os\r\nfrom skimage.segmentation import slic\r\nfrom skimage.util import img_as_float\r\nfrom skimage import io\r\n\r\nclass ImageSegmentation:\r\n def __init__(self):\r\n # Predefine variables\r\n self.scriptDir = os.path.dirname(os.path.realpath(__file__))\r\n self.rasterFile = self.scriptDir + r'\\data\\Muhoza_RGB_GSD_5cm_mini.tif'\r\n self.outputFile = self.scriptDir + r'\\data\\Muhoza_RGB_GSD_5cm_mini_SLIC.png'\r\n\r\n def applyImageSegmentation(self):\r\n # Open raster\r\n image = img_as_float(io.imread(self.rasterFile))\r\n\r\n # Calculate number of segments based on image size\r\n imageWidth = image.shape[1]\r\n imageHeight = image.shape[0]\r\n segmentNumber = int(imageWidth*imageHeight/1000000*30)\r\n\r\n rasterSegments = slic(image, n_segments=segmentNumber, compactness=10, sigma=1)\r\n # segments = slic(image, n_segments=numSegments, sigma=5)\r\n # segments = slic(img, n_segments=250, sigma=0.01)\r\n\r\n # Save results\r\n io.imsave(self.outputFile, rasterSegments)\r\n\r\nif __name__ == '__main__':\r\n data = ImageSegmentation()\r\n rasterSegments = data.applyImageSegmentation()\r\n print('\\nAll processing finished')\r\n","sub_path":"2_Scripts/A_ImageSegmenation.py","file_name":"A_ImageSegmenation.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"48123685","text":"#!/usr/bin/env python3\n# Clock numbers like it's in a mirror\n# Made by Samuel Davenport\n\nSCREENSIZE = (25, 75)\nNAME = 'Clock'\n__version__ = '0.0.2'\n\nFPS = 2\nWHITE = (255, 255, 255)\nTEXT_COLOR = (0, 0, 0)\n\nfrom time import asctime\nfrom os import sys, system, abort\ntry:\n from pygame.locals import *\n import pygame\nexcept ImportError:# If pygame is not installed, help the user install it.\n print('Error: Pygame Module is not installed!', file=sys.stderr)\n while True:\n answer = input('Would you like to automatically install Pygame? (y/n) : ')\n if answer.lower() in ('y', 'n'):\n break\n else:\n print('Please type a valid response')\n if answer.lower() == 'y':\n print('Attemting to install Pygame...')\n resp = system('pip3 install Pygame')\n if str(resp) != '0':\n print('Something went wrong installing Pygame.')\n answer = 'n'\n else:\n print('Pygame installed successfully! Please restart the program.')\n input('Press Return to Continue. ')\n if answer.lower() == 'n':\n print('To manually install Pygame, go to your system command prompt and type in the command \\'pip3 install Pygame\\'.')\n input('Press Return to continue. ')\n abort()\n\ndef _int(*args):\n data = []\n for i in args:\n data.append(int(i))\n return tuple(data)\n\ndef _float(*args):\n data = []\n for i in args:\n data.append(float(i))\n return tuple(data)\n\ndef _amol(lst, **kwargs):\n # Math Operator acting appon All values of a List\n data = list(lst)\n rng = range(len(data))\n operators = kwargs.keys()\n if 'a' in operators:#add\n for i in rng:\n data[i] += kwargs['a']\n if 's' in operators:#subtract\n for i in rng:\n data[i] -= kwargs['s']\n if 'm' in operators:#multiply\n for i in rng:\n data[i] *= kwargs['m']\n if 'd' in operators:#divide\n for i in rng:\n data[i] /= kwargs['d']\n if 'p' in operators:#power\n for i in rng:\n data[i] **= kwargs['p']\n return tuple(data)\n\ndef rev(text):\n # Reverse text\n v = list(text)\n v.reverse()\n return ''.join(v)\n\ndef revlistitems(_list):\n # Reverse the items of a list\n data = []\n for i in _list:\n data.append(rev(str(i)))\n return data\n\ndef run():\n pygame.init()\n \n font = pygame.font.SysFont('Bookman Old Style', 32)\n font_height = font.get_linesize()\n \n SCREENSIZE = (round(font_height*2), round(font_height*3))\n SCREENSIZE = _amol(SCREENSIZE, m=2)\n \n screen = pygame.display.set_mode(SCREENSIZE, 0, 32)\n pygame.display.set_caption(NAME) \n \n display_text = ''\n \n clock = pygame.time.Clock()\n \n RUNNING = True\n while RUNNING:\n events = pygame.event.get()\n for event in events:\n if event.type == QUIT:\n RUNNING = False\n \n clock.tick(FPS)\n display_text = ' '.join(asctime().split(' ')).split(' ')[3].split(':')\n screen.fill(WHITE)\n \n x = SCREENSIZE[0]/2-(font_height/2)\n y = SCREENSIZE[1]/2-font_height\n for i in display_text:\n text_surf = font.render( i, True, TEXT_COLOR )\n flipped_surf = pygame.transform.flip(text_surf, 1, 0)\n screen.blit( flipped_surf, (x, y) )\n y += font_height\n \n pygame.display.update()\n pygame.quit()\n\nif __name__ == '__main__':\n run()\n","sub_path":"mirrored digital clock.py","file_name":"mirrored digital clock.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"635499879","text":"from math import *\nfrom . import Node\n\n\nclass SOM :\n\n # Recommend to let set raidus=False to auto-caculating the radius\n def __int__(self, height=10, width=10, FV_size=10, PV_size=10, radius=False, learning_rate=0.005):\n self.height = height\n self.width = width\n self.radius = radius if radius else (height+width)/2\n self.total = height * width\n self.learning_rate = learning_rate\n self.nodes = [0] * (self.total)\n self.FV_size = FV_size\n self.PV_size = PV_size\n\n for i in range(self.height):\n\n for j in range(self.width):\n self.nodes[(i)*(self.width) + j] = Node(FV_size, PV_size, i, j)\n # nodes 의 한 노드에 node가 또 있는건가..?\n\n\n # Train_vector format : [ [FV[0], PV[0]], [FV[1], PV[1]], so on .. ]\n def train(self, iterations=1000, train_vector=[[[0.0], [0.0]]]):\n # 변수 선언\n time_constant = iterations / log(self.radius)\n radius_decaying = 0.0\n learning_rate_decaying = 0.0\n influence = 0.0\n stack = [] # Stack for storing best matching unit's index and updated FV and PV\n temp_FV = [0.0] * self.FV_size\n temp_PV = [0.0] * self.PV_size\n\n for i in range(1, iterations +1):\n print(\"Iteration number: \", i)\n radius_decaying = self.radius * exp( (-1.0)*i/time_constant )\n learning_rate_decaying = self.learning_rate * exp( (-1.0)*i/time_constant )\n\n for j in range(len(train_vector)):\n input_FV = train_vector[j][0]\n input_PV = train_vector[j][1]\n best = self.best_match(input_FV)\n stack = []\n for k in range(self.total):\n dist = self.distance(self.nodes[best], self.nodes[k])\n\n if dist < radius_decaying:\n temp_FV = [0.0] * self.FV_size\n temp_PV = [0.0] * self.PV_size\n influence = exp( (-1.0 * (dist**2)) / (2*radius_decaying*i) )\n\n for l in range(self.FV_size):\n # Learning\n temp_FV[l] = self.nodes[k].FV[l] + \\\n influence * learning_rate_decaying * (input_FV[l] - self.nodes[k].FV[l])\n\n # Push the unit onto stack to update in next interval\n stack[0:0] = [[[k], temp_FV, temp_PV]]\n\n for l in range(len(stack)):\n self.nodes[stack[l][0][0]].FV[:] = stack[l][1][:]\n self.nodes[stack[l][0][0]].PV[:] = stack[l][2][:]\n\n\n # Returns prediction vector\n def predict(self, FV=[0.0]):\n best = self.best_match(FV)\n return self.nodes[best].PV\n\n\n # Returns best matching unit's index\n def best_match(self, target_FV=[0.0]):\n minimum = sqrt(self.FV_size) # Minimum distance\n minimum_index = 1 # Minimum distance unit\n temp = 0.0\n\n for i in range(self.total):\n temp = 0.0\n temp = self.FV_distance(self.nodes[i].FV, target_FV)\n\n if temp < minimum:\n minimum = temp\n minimum_index = i\n\n return minimum_index\n\n\n def FV_distance(self, FV_1=[0.0], FV_2=[0.0]):\n temp = 0.0\n\n for j in range(self.FV_size):\n temp = temp + (FV_1[j] - FV_2[j]) ** 2\n\n temp = sqrt(temp)\n return temp\n\n\n def distance(self, node1, node2):\n return sqrt((node1.X - node2.X) ** 2 + (node1.Y - node2.Y) ** 2)\n\n\nif __name__ == \"__main__\":\n print( \"Initialization...\", a = SOM(5, 5, 2, 1, False, 0.05) )\n print( \"Training for the XOR function...\", a.train(100, [[[1,0], [1]],\\\n [[1,1], [0]],\\\n [[0,1], [1]],\\\n [[0,0], [0]]]))\n print( \"Predictions for the XOR function...\" )\n print( \"Prediction 0 0,\", round(a.predict([0,0])[0]) )\n print( \"Prediction 1 0,\", round(a.predict([1,0])[0]) )\n print( \"Prediction 0 1,\", round(a.predict([0,1])[0]) )\n print( \"Prediction 1 1,\", round(a.predict([1,1])[0]) )","sub_path":"testfiles_sey/SOM/Function/SOM.py","file_name":"SOM.py","file_ext":"py","file_size_in_byte":4267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"525828874","text":"from django.shortcuts import render\nfrom django.views.generic.base import View\nfrom search.models import JobboleItemType\nfrom django.http import HttpResponse\nimport json\nfrom elasticsearch import Elasticsearch\nfrom datetime import datetime\n\n\nclient = Elasticsearch(hosts=['localhost'])\n\n# Create your views here.\nclass SearchSuggest(View):\n def get(self, request):\n key_words = request.GET.get('s','')\n datas = []\n if key_words:\n s = JobboleItemType.search()\n s = s.suggest('my_suggest', key_words, completion={\n 'field':\"suggest\",\n 'fuzzy':{\n 'fuzziness':2,\n },\n \"size\":10,\n })\n suggestion = s.execute_suggest()\n for m in suggestion.my_suggest[0].options:\n source = m._source\n datas.append(source['title'])\n return HttpResponse(json.dumps(datas), content_type=\"application/json\")\n\nclass SearchView(View):\n def get(self, request):\n key_words = request.GET.get('q', '')\n page = request.GET.get('p',\"1\")\n try:\n page = int(page)\n except:\n page = 1\n start_time = datetime.now()\n response = client.search(\n index='jobbole',\n body={\n \"query\":{\n \"multi_match\":{\n \"query\":key_words,\n \"fields\":[\"tags\", \"title\", \"content\"]\n }\n },\n \"from\":(page-1)*10,\n \"size\":10,\n \"highlight\":{\n \"pre_tags\": [\"\"],\n \"post_tags\": [\"\"],\n \"fields\":{\n \"title\":{},\n \"content\":{},\n }\n }\n }\n )\n end_time = datetime.now()\n last_seconds = (end_time-start_time).total_seconds()\n total_nums = response[\"hits\"][\"total\"]\n if (page%10) > 0:\n page_nums = int(total_nums/10+1)\n else:\n page_nums = int(total_nums/10)\n hit_lst = []\n for hit in response['hits'][\"hits\"]:\n hit_dict = {}\n if \"title\" in hit[\"highlight\"]:\n hit_dict['title'] = \"\".join(hit[\"highlight\"][\"title\"])\n else:\n hit_dict['title'] = hit[\"_source\"][\"title\"]\n\n if \"content\" in hit[\"highlight\"]:\n hit_dict['content'] = \"\".join(hit[\"highlight\"][\"content\"])[:500]\n else:\n hit_dict['content'] = hit[\"_source\"][\"content\"][:500]\n hit_dict['create_time'] = hit[\"_source\"][\"create_time\"]\n hit_dict['url'] = hit[\"_source\"][\"url\"]\n hit_dict['score'] = hit[\"_score\"]\n hit_lst.append(hit_dict)\n return render(request, \"result.html\", {\"page\":page,\n \"all_hits\":hit_lst,\n \"key_words\":key_words,\n \"total_nums\":total_nums,\n \"page_nums\":page_nums,\n \"last_seconds\":last_seconds})\n\n","sub_path":"els_search/search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"122306580","text":"# %% [20. *Valid Parentheses](https://leetcode.com/problems/valid-parentheses/)\n# 問題:括弧が有効かどうかを返す\n# 解法:1文字ずつ括弧開きをスタックに積む。括弧閉じが対応しているかを調べる\nclass Solution:\n def isValid(self, s: str) -> bool:\n lst, dc = [], dict(zip(\"({[\", \")}]\"))\n for c in s:\n if c in \"({[\":\n lst.append(dc[c])\n elif not lst or c != lst.pop():\n return False\n return not lst\n","sub_path":"codes_/0020_Valid_Parentheses.py","file_name":"0020_Valid_Parentheses.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"536003127","text":"# -*- coding: utf-8 -*-\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QApplication, QLineEdit, QDialog, QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QFileDialog , QLabel, QTextEdit, QMessageBox\nfrom pre_processing import check_social_distance\n\nclass Ui_MainWindow(QWidget):\n\n\t#Variables to send\n\tfile_path = \"\"\n\tminimum_dist = \"\"\n\ttime_to_wait_before = \"\"\n\ttime_to_wait_between = \"\" \n\toutput_frame_size = \"\"\n\taudio_path = \"\"\n\twebcam_center_target_distance = \"\"\n\n\tdef setupUi(self, MainWindow):\n\t\tself.window = MainWindow\n\t\tMainWindow.setObjectName(\"SocialDistanceCheck\")\n\t\tMainWindow.resize(600, 350)\n\t\tsizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n\t\tsizePolicy.setHorizontalStretch(0)\n\t\tsizePolicy.setVerticalStretch(0)\n\t\tsizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())\n\t\tMainWindow.setSizePolicy(sizePolicy)\n\t\tMainWindow.setMinimumSize(QtCore.QSize(600, 350))\n\t\tMainWindow.setMaximumSize(QtCore.QSize(600, 350))\n\t\tMainWindow.setDocumentMode(False)\n\t\tMainWindow.setUnifiedTitleAndToolBarOnMac(False)\n\t\tself.centralwidget = QtWidgets.QWidget(MainWindow)\n\t\tsizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n\t\tsizePolicy.setHorizontalStretch(0)\n\t\tsizePolicy.setVerticalStretch(0)\n\t\tsizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())\n\t\tself.centralwidget.setSizePolicy(sizePolicy)\n\t\tself.centralwidget.setMinimumSize(QtCore.QSize(600, 350))\n\t\tself.centralwidget.setMaximumSize(QtCore.QSize(600, 350))\n\t\tself.centralwidget.setBaseSize(QtCore.QSize(600, 350))\n\t\tself.centralwidget.setObjectName(\"centralwidget\")\n\t\tself.gridLayout = QtWidgets.QGridLayout(self.centralwidget)\n\t\tself.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)\n\t\tself.gridLayout.setContentsMargins(15, 15, 15, 15)\n\t\tself.gridLayout.setHorizontalSpacing(35)\n\t\tself.gridLayout.setVerticalSpacing(10)\n\t\tself.gridLayout.setObjectName(\"gridLayout\")\n\t\tself.horizontalLayout = QtWidgets.QHBoxLayout()\n\t\tself.horizontalLayout.setSpacing(10)\n\t\tself.horizontalLayout.setObjectName(\"horizontalLayout\")\n\t\tself.gridLayout.addLayout(self.horizontalLayout, 0, 1, 1, 1)\n\n\t\t#Input Video file Label\n\t\tself.input_video_file_label = QtWidgets.QLabel(self.centralwidget)\n\t\tfont = QtGui.QFont()\n\t\tfont.setPointSize(10)\n\t\tself.input_video_file_label.setFont(font)\n\t\tself.input_video_file_label.setObjectName(\"input_video_file_label\")\n\t\tself.gridLayout.addWidget(self.input_video_file_label, 0, 0, 1, 1)\n\n\t\t#Webcam button\n\t\tself.select_webcam_button = QtWidgets.QPushButton(self.centralwidget)\n\t\tsizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n\t\tsizePolicy.setHorizontalStretch(0)\n\t\tsizePolicy.setVerticalStretch(0)\n\t\tsizePolicy.setHeightForWidth(self.select_webcam_button.sizePolicy().hasHeightForWidth())\n\t\tself.select_webcam_button.setSizePolicy(sizePolicy)\n\t\tfont = QtGui.QFont()\n\t\tfont.setPointSize(10)\n\t\tself.select_webcam_button.setFont(font)\n\t\tself.select_webcam_button.setObjectName(\"select_webcam_button\")\n\t\tself.horizontalLayout.addWidget(self.select_webcam_button)\n\t\tself.select_webcam_button.clicked.connect(self.webcam_clicked)\n\n\t\t#Online Video Button\n\t\tself.online_video_button = QtWidgets.QPushButton(self.centralwidget)\n\t\tsizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n\t\tsizePolicy.setHorizontalStretch(0)\n\t\tsizePolicy.setVerticalStretch(0)\n\t\tsizePolicy.setHeightForWidth(self.online_video_button.sizePolicy().hasHeightForWidth())\n\t\tself.online_video_button.setSizePolicy(sizePolicy)\n\t\tfont = QtGui.QFont()\n\t\tfont.setPointSize(10)\n\t\tself.online_video_button.setFont(font)\n\t\tself.online_video_button.setObjectName(\"online_video_button\")\n\t\tself.horizontalLayout.addWidget(self.online_video_button)\n\t\tself.online_video_button.clicked.connect(self.online_clicked)\n\n\t\t#Browse Video Button\n\t\tself.browse_video_file_button = QtWidgets.QPushButton(self.centralwidget)\n\t\tsizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n\t\tsizePolicy.setHorizontalStretch(0)\n\t\tsizePolicy.setVerticalStretch(0)\n\t\tsizePolicy.setHeightForWidth(self.browse_video_file_button.sizePolicy().hasHeightForWidth())\n\t\tself.browse_video_file_button.setSizePolicy(sizePolicy)\n\t\tfont = QtGui.QFont()\n\t\tfont.setPointSize(10)\n\t\tself.browse_video_file_button.setFont(font)\n\t\tself.browse_video_file_button.setObjectName(\"browse_video_file_button\")\n\t\tself.horizontalLayout.addWidget(self.browse_video_file_button)\n\t\tself.browse_video_file_button.clicked.connect(lambda: self.browse_files(\"Video files (*.mp4 *.avi)\"))\n\n\n\t\t#Minimum distance label\n\t\tself.minimum_distance_input_label = QtWidgets.QLabel(self.centralwidget)\n\t\tfont = QtGui.QFont()\n\t\tfont.setPointSize(10)\n\t\tself.minimum_distance_input_label.setFont(font)\n\t\tself.minimum_distance_input_label.setObjectName(\"minimum_distance_input_label\")\n\t\tself.gridLayout.addWidget(self.minimum_distance_input_label, 1, 0, 1, 1)\n\n\t\t\n\t\t#Minimum distance between people\n\t\tself.minimum_distance_combo_box = QtWidgets.QComboBox(self.centralwidget)\n\t\tsizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n\t\tsizePolicy.setHorizontalStretch(0)\n\t\tsizePolicy.setVerticalStretch(0)\n\t\tsizePolicy.setHeightForWidth(self.minimum_distance_combo_box.sizePolicy().hasHeightForWidth())\n\t\tself.minimum_distance_combo_box.setSizePolicy(sizePolicy)\n\t\tfont = QtGui.QFont()\n\t\tfont.setPointSize(10)\n\t\tself.minimum_distance_combo_box.setFont(font)\n\t\tself.minimum_distance_combo_box.setStatusTip(\"\")\n\t\tself.minimum_distance_combo_box.setLayoutDirection(QtCore.Qt.LeftToRight)\n\t\tself.minimum_distance_combo_box.setAutoFillBackground(False)\n\t\tself.minimum_distance_combo_box.setEditable(False)\n\t\tself.minimum_distance_combo_box.setInsertPolicy(QtWidgets.QComboBox.InsertBeforeCurrent)\n\t\tself.minimum_distance_combo_box.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContentsOnFirstShow)\n\t\tself.minimum_distance_combo_box.setMinimumContentsLength(0)\n\t\tself.minimum_distance_combo_box.setIconSize(QtCore.QSize(16, 16))\n\t\tself.minimum_distance_combo_box.setFrame(True)\n\t\tself.minimum_distance_combo_box.setObjectName(\"minimum_distance_combo_box\")\n\t\tself.minimum_distance_combo_box.addItem(\"\")\n\t\tself.minimum_distance_combo_box.addItem(\"\")\n\t\tself.minimum_distance_combo_box.addItem(\"\")\n\t\tself.minimum_distance_combo_box.addItem(\"\")\n\t\tself.minimum_distance_combo_box.addItem(\"\")\n\t\tself.minimum_distance_combo_box.addItem(\"\")\n\t\tself.gridLayout.addWidget(self.minimum_distance_combo_box, 1, 1, 1, 1)\n\n\t\t\n\t\t#Time to wait before starting warning label\n\t\tself.time_to_wait_to_start_label = QtWidgets.QLabel(self.centralwidget)\n\t\tfont = QtGui.QFont()\n\t\tfont.setPointSize(10)\n\t\tself.time_to_wait_to_start_label.setFont(font)\n\t\tself.time_to_wait_to_start_label.setObjectName(\"time_to_wait_to_start_label\")\n\t\tself.gridLayout.addWidget(self.time_to_wait_to_start_label, 2, 0, 1, 1)\n\n\t\t\n\t\t\n\t\t#Time to wait before starting warning\n\t\tself.time_to_wait_to_start_combo_box = QtWidgets.QComboBox(self.centralwidget)\n\t\tsizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n\t\tsizePolicy.setHorizontalStretch(0)\n\t\tsizePolicy.setVerticalStretch(0)\n\t\tsizePolicy.setHeightForWidth(self.time_to_wait_to_start_combo_box.sizePolicy().hasHeightForWidth())\n\t\tself.time_to_wait_to_start_combo_box.setSizePolicy(sizePolicy)\n\t\tfont = QtGui.QFont()\n\t\tfont.setPointSize(10)\n\t\tself.time_to_wait_to_start_combo_box.setFont(font)\n\t\tself.time_to_wait_to_start_combo_box.setObjectName(\"time_to_wait_to_start_combo_box\")\n\t\tself.time_to_wait_to_start_combo_box.addItem(\"\")\n\t\tself.time_to_wait_to_start_combo_box.addItem(\"\")\n\t\tself.time_to_wait_to_start_combo_box.addItem(\"\")\n\t\tself.time_to_wait_to_start_combo_box.addItem(\"\")\n\t\tself.time_to_wait_to_start_combo_box.addItem(\"\")\n\t\tself.time_to_wait_to_start_combo_box.addItem(\"\")\n\t\tself.time_to_wait_to_start_combo_box.addItem(\"\")\n\t\tself.time_to_wait_to_start_combo_box.addItem(\"\")\n\t\tself.time_to_wait_to_start_combo_box.addItem(\"\")\n\t\tself.time_to_wait_to_start_combo_box.addItem(\"\")\n\t\tself.time_to_wait_to_start_combo_box.addItem(\"\")\n\t\tself.time_to_wait_to_start_combo_box.addItem(\"\")\n\t\tself.gridLayout.addWidget(self.time_to_wait_to_start_combo_box, 2, 1, 1, 1)\n\n\t\t\n\t\t#Time to wait between warning label\n\t\tself.time_to_wait_in_between_label = QtWidgets.QLabel(self.centralwidget)\n\t\tfont = QtGui.QFont()\n\t\tfont.setPointSize(10)\n\t\tself.time_to_wait_in_between_label.setFont(font)\n\t\tself.time_to_wait_in_between_label.setObjectName(\"time_to_wait_in_between_label\")\n\t\tself.gridLayout.addWidget(self.time_to_wait_in_between_label, 3, 0, 1, 1)\n\n\t\t#Time to wait between warnings\n\t\t\n\t\tself.time_to_wait_in_between = QtWidgets.QComboBox(self.centralwidget)\n\t\tsizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n\t\tsizePolicy.setHorizontalStretch(0)\n\t\tsizePolicy.setVerticalStretch(0)\n\t\tsizePolicy.setHeightForWidth(self.time_to_wait_in_between.sizePolicy().hasHeightForWidth())\n\t\tself.time_to_wait_in_between.setSizePolicy(sizePolicy)\n\t\tfont = QtGui.QFont()\n\t\tfont.setPointSize(10)\n\t\tself.time_to_wait_in_between.setFont(font)\n\t\tself.time_to_wait_in_between.setObjectName(\"time_to_wait_in_between\")\n\t\tself.time_to_wait_in_between.addItem(\"\")\n\t\tself.time_to_wait_in_between.addItem(\"\")\n\t\tself.time_to_wait_in_between.addItem(\"\")\n\t\tself.time_to_wait_in_between.addItem(\"\")\n\t\tself.time_to_wait_in_between.addItem(\"\")\n\t\tself.time_to_wait_in_between.addItem(\"\")\n\t\tself.time_to_wait_in_between.addItem(\"\")\n\t\tself.time_to_wait_in_between.addItem(\"\")\n\t\tself.time_to_wait_in_between.addItem(\"\")\n\t\tself.time_to_wait_in_between.addItem(\"\")\n\t\tself.time_to_wait_in_between.addItem(\"\")\n\t\tself.time_to_wait_in_between.addItem(\"\")\n\t\tself.gridLayout.addWidget(self.time_to_wait_in_between, 3, 1, 1, 1)\n\n\t\t\n\t\t#Output Frame size label\n\t\tself.output_frame_size_label = QtWidgets.QLabel(self.centralwidget)\n\t\tfont = QtGui.QFont()\n\t\tfont.setPointSize(10)\n\t\tself.output_frame_size_label.setFont(font)\n\t\tself.output_frame_size_label.setObjectName(\"output_frame_size_label\")\n\t\tself.gridLayout.addWidget(self.output_frame_size_label, 4, 0, 1, 1)\n\n\t\t#Output frame size combobox\n\t\tself.frame_size_combobox = QtWidgets.QComboBox(self.centralwidget)\n\t\tsizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n\t\tsizePolicy.setHorizontalStretch(0)\n\t\tsizePolicy.setVerticalStretch(0)\n\t\tsizePolicy.setHeightForWidth(self.frame_size_combobox.sizePolicy().hasHeightForWidth())\n\t\tself.frame_size_combobox.setSizePolicy(sizePolicy)\n\t\tfont = QtGui.QFont()\n\t\tfont.setPointSize(10)\n\t\tself.frame_size_combobox.setFont(font)\n\t\tself.frame_size_combobox.setObjectName(\"frame_size_combobox\")\n\t\tself.frame_size_combobox.addItem(\"\")\n\t\tself.frame_size_combobox.addItem(\"\")\n\t\tself.frame_size_combobox.addItem(\"\")\n\t\tself.gridLayout.addWidget(self.frame_size_combobox, 4, 1, 1, 1)\n\n\t\t#Alert filename label\n\t\tself.alert_filename_label = QtWidgets.QLabel(self.centralwidget)\n\t\tfont = QtGui.QFont()\n\t\tfont.setPointSize(10)\n\t\tself.alert_filename_label.setFont(font)\n\t\tself.alert_filename_label.setObjectName(\"alert_filename_label\")\n\t\tself.gridLayout.addWidget(self.alert_filename_label, 5, 0, 1, 1)\n\n\t\t#Alert file browser button\n\t\tself.alert_file_browser_button = QtWidgets.QPushButton(self.centralwidget)\n\t\tsizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n\t\tsizePolicy.setHorizontalStretch(0)\n\t\tsizePolicy.setVerticalStretch(0)\n\t\tsizePolicy.setHeightForWidth(self.alert_file_browser_button.sizePolicy().hasHeightForWidth())\n\t\tself.alert_file_browser_button.setSizePolicy(sizePolicy)\n\t\tfont = QtGui.QFont()\n\t\tfont.setPointSize(10)\n\t\tself.alert_file_browser_button.setFont(font)\n\t\tself.alert_file_browser_button.setObjectName(\"alert_file_browser_button\")\n\t\tself.gridLayout.addWidget(self.alert_file_browser_button, 5, 1, 1, 1)\n\t\tself.alert_file_browser_button.clicked.connect(lambda: self.browse_files(\"Audio files (*.wav)\"))\n\n\n\n\t\t#Proceed button\n\t\tself.proceed_to_detection_button = QtWidgets.QPushButton(self.centralwidget)\n\t\tsizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n\t\tsizePolicy.setHorizontalStretch(0)\n\t\tsizePolicy.setVerticalStretch(0)\n\t\tsizePolicy.setHeightForWidth(self.proceed_to_detection_button.sizePolicy().hasHeightForWidth())\n\t\tself.proceed_to_detection_button.setSizePolicy(sizePolicy)\n\t\tfont = QtGui.QFont()\n\t\tfont.setPointSize(10)\n\t\tself.proceed_to_detection_button.setFont(font)\n\t\tself.proceed_to_detection_button.setObjectName(\"proceed_to_detection_button\")\n\t\tself.gridLayout.addWidget(self.proceed_to_detection_button, 6, 1, 1, 1)\n\t\tself.proceed_to_detection_button.clicked.connect(self.proceed_processing)\n\n\n\t\tMainWindow.setCentralWidget(self.centralwidget)\n\t\tself.menubar = QtWidgets.QMenuBar(MainWindow)\n\t\tself.menubar.setGeometry(QtCore.QRect(0, 0, 600, 21))\n\t\tself.menubar.setObjectName(\"menubar\")\n\t\tMainWindow.setMenuBar(self.menubar)\n\t\tself.statusbar = QtWidgets.QStatusBar(MainWindow)\n\t\tself.statusbar.setObjectName(\"statusbar\")\n\t\tMainWindow.setStatusBar(self.statusbar)\n\n\t\tself.retranslateUi(MainWindow)\n\t\tself.time_to_wait_to_start_combo_box.setCurrentIndex(5)\n\t\tself.time_to_wait_in_between.setCurrentIndex(5)\n\t\tself.minimum_distance_combo_box.setCurrentIndex(1)\n\t\tself.frame_size_combobox.setCurrentIndex(0)\n\t\tQtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n\tdef webcam_clicked(self):\n\t\tself.webcam_target_distance = QDialog()\n\t\t# add a vertical layout to dialog\n\t\tself.vlayout1 = QVBoxLayout()\n\t\t# add a horizontal layout to take user inputs(URL).\n\t\tself.hlayout1_1 = QHBoxLayout()\n\t\t# add a horizontal layout to take user inputs.\n\t\tself.hlayout1_2 = QHBoxLayout()\n\t\t# add a horizontal layout for buttons.\n\t\tself.hlayout1_3 = QHBoxLayout()\n\n\t\tself.webcam_target_distance.setLayout(self.vlayout1)\n\t\tself.vlayout1.addLayout(self.hlayout1_1)\n\t\tself.vlayout1.addLayout(self.hlayout1_2)\n\t\tself.vlayout1.addLayout(self.hlayout1_3)\n\t\t# add the items to layout instead of dialog\n\t\tself.ipCam_address_label = QLabel(\"IP Camera URL:\", self.webcam_target_distance)\n\t\tself.hlayout1_1.addWidget(self.ipCam_address_label)\n\t\tself.webcam_target_distance_label = QLabel(\"Distance between webcam and target:\", self.webcam_target_distance)\n\t\tself.hlayout1_2.addWidget(self.webcam_target_distance_label)\n\n\t\tself.ipCam_address = QLineEdit(self.webcam_target_distance)\n\t\tself.hlayout1_1.addWidget(self.ipCam_address)\n\n\t\tself.webcam_target_distance_user_input = QLineEdit(self.webcam_target_distance)\n\t\tself.hlayout1_2.addWidget(self.webcam_target_distance_user_input)\n\n\t\tself.webcam_target_distance_unit = QLabel(\"Meters\", self.webcam_target_distance)\n\t\tself.hlayout1_2.addWidget(self.webcam_target_distance_unit)\n\n\t\tself.okButton1 = QPushButton(\"OK\", self.webcam_target_distance)\n\t\tself.hlayout1_3.addWidget(self.okButton1)\n\t\tself.cancelButton1 = QPushButton(\"Cancel\", self.webcam_target_distance)\n\t\tself.hlayout1_3.addWidget(self.cancelButton1)\n\t\t\n\t\tself.webcam_target_distance.setWindowTitle(\"Get Distance\")\n\t\tself.webcam_target_distance.show()\n\t\t\n\t\tself.okButton1.clicked.connect(lambda: self.ok_pressed(self.ipCam_address, self.webcam_target_distance_user_input, self.webcam_target_distance))\n\t\tself.cancelButton1.clicked.connect(lambda: self.cancel_pressed(self.webcam_target_distance))\n\n\n\n\tdef online_clicked(self):\n\t\tself.get_online_data = QDialog()\n\t\t# add a vertical layout to dialog\n\t\tself.vlayout2 = QVBoxLayout()\n\n\t\t# add a horizontal layout for buttons.\n\t\tself.hlayout2_1 = QHBoxLayout()\n\n\t\tself.get_online_data.setLayout(self.vlayout2)\n\t\t# add the items to layout instead of dialog\n\t\tself.online_data_link_label = QLabel(\"Please provide us some link to the online video:\", self.get_online_data)\n\t\tself.vlayout2.addWidget(self.online_data_link_label)\n\n\t\tself.online_data_link_user_input = QLineEdit(self.get_online_data)\n\t\tself.vlayout2.addWidget(self.online_data_link_user_input)\n\n\t\tself.vlayout2.addLayout(self.hlayout2_1)\n\t\tself.okButton2 = QPushButton(\"OK\", self.get_online_data)\n\t\tself.hlayout2_1.addWidget(self.okButton2)\n\t\tself.cancelButton2 = QPushButton(\"Cancel\", self.get_online_data)\n\t\tself.hlayout2_1.addWidget(self.cancelButton2)\n\t\t\n\t\tself.get_online_data.setWindowTitle(\"Get video link\")\n\t\tself.get_online_data.show()\n\n\t\tself.okButton2.clicked.connect(lambda: self.ok_pressed(self.online_data_link_user_input,\"15\", self.get_online_data))\n\t\tself.cancelButton2.clicked.connect(lambda: self.cancel_pressed(self.get_online_data))\n\n\tdef ok_pressed(self, ipcam_url, user_input, dialog ):\n\t\tif ipcam_url == \"\":\n\t\t\tself.file_path = \"WebCam\"\n\t\telse:\n\t\t\tself.file_path = ipcam_url.text()\n\t\ttry:\n\t\t\tself.data = user_input.text()\n\t\texcept:\n\t\t\tself.data = user_input\n\n\t\ttry:\n\t\t\tif user_input == self.webcam_target_distance_user_input:\n\t\t\t\tself.title = \"No Distance\"\n\t\t\telse:\n\t\t\t\tself.title = \"No Link\"\n\t\texcept:\n\t\t\tif user_input == self.online_data_link_user_input:\n\t\t\t\tself.title = \"No Link\"\n\t\t\telse:\n\t\t\t\tself.title = \"No Distance\"\n\t\tif self.data == \"\":\n\t\t\tself.msg = QMessageBox()\n\t\t\tself.msg.setWindowTitle(self.title)\n\t\t\tself.msg.setText(\"Please provide us some data to work on.\")\n\t\t\tself.msg.setIcon(QMessageBox.Critical)\n\t\t\tself.msg.setStandardButtons(QMessageBox.Ok)\n\t\t\tself.msg.setDefaultButton(QMessageBox.Ok)\n\t\t\tself.msg.buttonClicked.connect(self.close)\n\t\t\tx = self.msg.exec_()\n\t\telse:\n\t\t\tif self.data.isdigit():\n\t\t\t\tself.webcam_center_target_distance = self.data\n\t\t\telse:\n\t\t\t\tif self.data.startswith(\"http\"):\n\t\t\t\t\tself.file_path = self.data\n\t\t\t\telse:\n\t\t\t\t\tself.warn = QMessageBox()\n\t\t\t\t\tself.warn.setWindowTitle(\"Wrong Link\")\n\t\t\t\t\tself.warn.setText(\"The link provided is wrong. Please try again.\")\n\t\t\t\t\tself.warn.setIcon(QMessageBox.Critical)\n\t\t\t\t\tself.warn.setStandardButtons(QMessageBox.Ok)\n\t\t\t\t\tself.warn.setDefaultButton(QMessageBox.Ok)\n\t\t\t\t\tself.warn.buttonClicked.connect(self.online_clicked)\n\t\t\t\t\tw = self.warn.exec_()\n\t\t\tdialog.close()\n\n\tdef cancel_pressed(self, dialog):\n\t\tdialog.close()\n\n\n\tdef browse_files(self, title):\n\t\tfname = QFileDialog.getOpenFileName(self, 'Open file','c:/', title)\n\t\tif fname[0].endswith(\".wav\"):\n\t\t\tself.audio_path = fname[0]\n\t\telse:\n\t\t\tself.file_path = fname[0]\n\n\n\tdef proceed_processing(self):\n\t\tself.minimum_dist = self.minimum_distance_combo_box.currentText()\n\t\tself.time_to_wait_before = self.time_to_wait_to_start_combo_box.currentText()\n\t\tself.time_to_wait_between = self.time_to_wait_in_between.currentText()\n\t\tself.output_frame_size = self.frame_size_combobox.currentText()\n\n\t\tself.default_combobox_value = False\n\n\t\tif self.minimum_dist == \"1 Meters\" and self.time_to_wait_before == \"30 Seconds\" and self.time_to_wait_between == \"30 Seconds\" and self.output_frame_size == \"720 x 480\":\n\t\t\tself.default_combobox_value = True\n\n\t\tif self.file_path == \"\" and self.audio_path==\"\" and self.webcam_center_target_distance==\"\" and self.default_combobox_value == True:\n\t\t\tself.title = \"No Info Provided\"\n\t\t\tself.info = \"You have not provided any info. We will proceed with our default.\"\n\t\t\tself.webcam_center_target_distance = \"4\"\n\t\t\tself.show_pop_up_1(self.title, self.info)\n\n\t\telif self.file_path == \"\" and self.webcam_center_target_distance == \"\":\n\t\t\tself.title = \"No video filepath and Distance\"\n\t\t\tself.info = \"You have not provided us video file path and distance between webcam and target info. We will proceed with our default video and distance.\"\n\t\t\tself.webcam_center_target_distance = \"4\"\n\t\t\tself.show_pop_up_1(self.title, self.info)\n\n\t\telif self.file_path == \"WebCam\":\n\t\t\tif self.audio_path == \"\" and self.webcam_center_target_distance == \"\":\n\t\t\t\tself.title = \"No audio filepath and Distance\"\n\t\t\t\tself.info = \"You have not provided us audio file path and distance between webcam and target info. We will proceed with our default audio and distance.\"\n\t\t\t\tself.webcam_center_target_distance = \"4\"\n\t\t\t\tself.show_pop_up_1(self.title, self.info)\n\t\t\telif self.audio_path == \"\":\n\t\t\t\tself.title = \"No audio filepath\"\n\t\t\t\tself.info = \"You have not provided us audio file path. We will proceed with our default audio.\"\n\t\t\t\tself.show_pop_up_1(self.title, self.info)\n\t\t\telse:\n\t\t\t\tself.title = \"No distance\"\n\t\t\t\tself.info = \"You have not provided us distance between webcam and target. We will proceed with our default distance value.\"\n\t\t\t\tself.webcam_center_target_distance = \"4\"\n\t\t\t\tself.show_pop_up_1(self.title, self.info)\n\t\telif self.file_path == \"\":\n\t\t\tself.title = \"No Video\"\n\t\t\tself.info = \"You have neither selected webcam, nor given any video file path or any online video link. We will proceed with our default video.\"\n\t\t\tself.show_pop_up_1(self.title, self.info)\n\n\t\telif self.audio_path==\"\":\n\t\t\tself.title = \"No Audio Path\"\n\t\t\tself.info = \"You have not provided us audio file path to play warning. We will proceed with our default audio.\"\n\t\t\tself.webcam_center_target_distance = \"4\"\n\t\t\tself.show_pop_up_1(self.title, self.info)\n\n\t\telse:\n\t\t\tself.show_pop_up_2()\n\n\tdef show_pop_up_1(self, title, message):\n\t\tself.msg1 = QMessageBox()\n\t\tself.msg1.setWindowTitle(title)\n\t\tself.msg1.setText(message)\n\t\tself.msg1.setIcon(QMessageBox.Warning)\n\t\tself.msg1.setStandardButtons(QMessageBox.Ok|QMessageBox.Cancel)\n\t\tself.msg1.setDefaultButton(QMessageBox.Cancel)\n\t\tself.msg1.buttonClicked.connect(self.pop_up_button1)\n\t\tx1 = self.msg1.exec_()\n\n\tdef pop_up_button1(self, i):\n\t\tif i.text() == \"OK\":\n\t\t\tself.msg1.close()\n\t\t\tMainWindow.close()\n\t\t\tcheck_social_distance(self.file_path, self.minimum_dist, self.time_to_wait_before, self.time_to_wait_between, self.output_frame_size, self.audio_path, self.webcam_center_target_distance)\n\t\t\t\n\n\tdef show_pop_up_2(self):\n\t\tself.msg2 = QMessageBox()\n\t\tself.msg2.setWindowTitle(\"Confirmation\")\n\t\tself.msg2.setText(f\"Your have selected:\\n \\\n\t\t\t{self.file_path} as video input.\\n \\\n\t\t\t{self.minimum_dist} as minimum distance to maintain between people.\\n \\\n\t\t\t{self.time_to_wait_before} as time to wait before starting to play warning. \\n \\\n\t\t\t{self.time_to_wait_between} as time to wait between playing warnings. \\n \\\n\t\t\t{self.output_frame_size} as size of output video frame. \\n \\\n\t\t\t{self.audio_path} as audio file to play warning. \\n \\\n\t\t\t{self.webcam_center_target_distance} as distance between camera and target.\")\n\t\tself.msg2.setIcon(QMessageBox.Information)\n\t\tself.msg2.setStandardButtons(QMessageBox.Ok|QMessageBox.Cancel)\n\t\tself.msg2.setDefaultButton(QMessageBox.Cancel)\n\t\tself.msg2.buttonClicked.connect(self.pop_up_button2)\n\t\tx2 = self.msg2.exec_()\n\n\tdef pop_up_button2(self, j):\n\t\tif j.text() == \"OK\":\n\t\t\tself.msg2.close()\n\t\t\tMainWindow.close()\n\t\t\tcheck_social_distance(self.file_path, self.minimum_dist, self.time_to_wait_before, self.time_to_wait_between)\n\t\t\t\n\n\n\tdef retranslateUi(self, MainWindow):\n\t\t_translate = QtCore.QCoreApplication.translate\n\t\tMainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n\t\tself.time_to_wait_to_start_combo_box.setItemText(0, _translate(\"MainWindow\", \"5 Seconds\"))\n\t\tself.time_to_wait_to_start_combo_box.setItemText(1, _translate(\"MainWindow\", \"10 Seconds\"))\n\t\tself.time_to_wait_to_start_combo_box.setItemText(2, _translate(\"MainWindow\", \"15 Seconds\"))\n\t\tself.time_to_wait_to_start_combo_box.setItemText(3, _translate(\"MainWindow\", \"20 Seconds\"))\n\t\tself.time_to_wait_to_start_combo_box.setItemText(4, _translate(\"MainWindow\", \"25 Seconds\"))\n\t\tself.time_to_wait_to_start_combo_box.setItemText(5, _translate(\"MainWindow\", \"30 Seconds\"))\n\t\tself.time_to_wait_to_start_combo_box.setItemText(6, _translate(\"MainWindow\", \"35 Seconds\"))\n\t\tself.time_to_wait_to_start_combo_box.setItemText(7, _translate(\"MainWindow\", \"40 Seconds\"))\n\t\tself.time_to_wait_to_start_combo_box.setItemText(8, _translate(\"MainWindow\", \"45 Seconds\"))\n\t\tself.time_to_wait_to_start_combo_box.setItemText(9, _translate(\"MainWindow\", \"50 Seconds\"))\n\t\tself.time_to_wait_to_start_combo_box.setItemText(10, _translate(\"MainWindow\", \"55 Seconds\"))\n\t\tself.time_to_wait_to_start_combo_box.setItemText(11, _translate(\"MainWindow\", \"60 Seconds\"))\n\t\tself.minimum_distance_combo_box.setItemText(0, _translate(\"MainWindow\", \"0.5 Meters\"))\n\t\tself.minimum_distance_combo_box.setItemText(1, _translate(\"MainWindow\", \"1 Meters\"))\n\t\tself.minimum_distance_combo_box.setItemText(2, _translate(\"MainWindow\", \"1.5 Meters\"))\n\t\tself.minimum_distance_combo_box.setItemText(3, _translate(\"MainWindow\", \"2 Meters\"))\n\t\tself.minimum_distance_combo_box.setItemText(4, _translate(\"MainWindow\", \"2.5 Meters\"))\n\t\tself.minimum_distance_combo_box.setItemText(5, _translate(\"MainWindow\", \"3 Meters\"))\n\t\tself.output_frame_size_label.setText(_translate(\"MainWindow\", \"Output Video Frame Size\"))\n\t\tself.input_video_file_label.setText(_translate(\"MainWindow\", \"Input video file:\"))\n\t\tself.proceed_to_detection_button.setText(_translate(\"MainWindow\", \"Proceed\"))\n\t\tself.minimum_distance_input_label.setText(_translate(\"MainWindow\", \"Minimum distance to maintain between people:\"))\n\t\tself.time_to_wait_in_between.setItemText(0, _translate(\"MainWindow\", \"5 Seconds\"))\n\t\tself.time_to_wait_in_between.setItemText(1, _translate(\"MainWindow\", \"10 Seconds\"))\n\t\tself.time_to_wait_in_between.setItemText(2, _translate(\"MainWindow\", \"15 Seconds\"))\n\t\tself.time_to_wait_in_between.setItemText(3, _translate(\"MainWindow\", \"20 Seconds\"))\n\t\tself.time_to_wait_in_between.setItemText(4, _translate(\"MainWindow\", \"25 Seconds\"))\n\t\tself.time_to_wait_in_between.setItemText(5, _translate(\"MainWindow\", \"30 Seconds\"))\n\t\tself.time_to_wait_in_between.setItemText(6, _translate(\"MainWindow\", \"35 Seconds\"))\n\t\tself.time_to_wait_in_between.setItemText(7, _translate(\"MainWindow\", \"40 Seconds\"))\n\t\tself.time_to_wait_in_between.setItemText(8, _translate(\"MainWindow\", \"45 Seconds\"))\n\t\tself.time_to_wait_in_between.setItemText(9, _translate(\"MainWindow\", \"50 Seconds\"))\n\t\tself.time_to_wait_in_between.setItemText(10, _translate(\"MainWindow\", \"55 Seconds\"))\n\t\tself.time_to_wait_in_between.setItemText(11, _translate(\"MainWindow\", \"60 Seconds\"))\n\t\tself.select_webcam_button.setText(_translate(\"MainWindow\", \"WebCam\"))\n\t\tself.online_video_button.setText(_translate(\"MainWindow\", \"Online Video\"))\n\t\tself.browse_video_file_button.setText(_translate(\"MainWindow\", \"Browse\"))\n\t\tself.time_to_wait_in_between_label.setText(_translate(\"MainWindow\", \"Time to wait between playing warnings:\"))\n\t\tself.time_to_wait_to_start_label.setText(_translate(\"MainWindow\", \"Time to wait before playing warning:\"))\n\t\tself.alert_filename_label.setText(_translate(\"MainWindow\", \"Input alert wav file:\"))\n\t\tself.frame_size_combobox.setItemText(0, _translate(\"MainWindow\", \"720 x 480\"))\n\t\tself.frame_size_combobox.setItemText(1, _translate(\"MainWindow\", \"1280 x 720\"))\n\t\tself.frame_size_combobox.setItemText(2, _translate(\"MainWindow\", \"1920 x 1080\"))\n\t\tself.alert_file_browser_button.setText(_translate(\"MainWindow\", \"Browse audio(wav) file\"))\n\n\nif __name__ == \"__main__\":\n\timport sys\n\tapp = QtWidgets.QApplication(sys.argv)\n\tMainWindow = QtWidgets.QMainWindow()\n\tui = Ui_MainWindow()\n\tui.setupUi(MainWindow)\n\tMainWindow.show()\n\tsys.exit(app.exec_())\n\n","sub_path":"social_distance_check.py","file_name":"social_distance_check.py","file_ext":"py","file_size_in_byte":27028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"393209229","text":"# git clone url\n# git status\n# git add filename.py\n# git commit -m 'message'\n# git push\n#\n# Undoing/Reverting/Resetting CODE:----\n#\n# Undoing(after Adding):-- before adding, do you want to undo your changes in a file\n# git status\n# git checkout -- filename.py\n# git checkout -- . [do you want to undo your changes in a Multiple files]\n#\n# Reverting(after Commit):--\n# git status\n# git add filename.py\n# git commit -m 'message'\n# git log [it will get commit history wit ids]\n# git revert id [id: commit id]\n# git revert -n id [changes are cleared but not commited]\n#\n# Resetting(after Push):--\n# git log\n# git reset --hard id\n\n# Create new Branch & Merge branch & delete branch\n# Create Branch:--\n# git branch [we will get list of branches]\n# git branch branchname\n# git checkout branchname [switch to branch]\n# git checkout master [switch to masterbranch]\n\n# Merge:--\n# git merge branchname\n\n# Delete:--\n# git branch -d branchname\n#------------------------------------\n\n# git checkout -b branchname [creating a new branch and active that branch]\n\n\ndef add(a, b):\n return a + b\n\n\nobj = add(10, 20)\nprint(obj)\n\ndef checkpalindrom(val):\n a = str(val)\n b = a[::-1]\n if a == b:\n print(a,' is Palindrom')\n else:\n print(a,' is not Palindrom')\nobj = checkpalindrom(2432)\n","sub_path":"gitDocument.py","file_name":"gitDocument.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"46821817","text":"# -*- coding: utf-8 -*-\n\n# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport itertools\nimport logging\nimport random\nimport threading\n\nfrom oslo.utils import reflection\nimport six\n\nfrom taskflow.engines.worker_based import protocol as pr\nfrom taskflow.types import cache as base\nfrom taskflow.types import timing as tt\n\nLOG = logging.getLogger(__name__)\n\n\nclass RequestsCache(base.ExpiringCache):\n \"\"\"Represents a thread-safe requests cache.\"\"\"\n\n def get_waiting_requests(self, worker):\n \"\"\"Get list of waiting requests that the given worker can satisfy.\"\"\"\n waiting_requests = []\n with self._lock:\n for request in six.itervalues(self._data):\n if request.state == pr.WAITING \\\n and worker.performs(request.task):\n waiting_requests.append(request)\n return waiting_requests\n\n\n# TODO(harlowja): this needs to be made better, once\n# https://blueprints.launchpad.net/taskflow/+spec/wbe-worker-info is finally\n# implemented we can go about using that instead.\nclass TopicWorker(object):\n \"\"\"A (read-only) worker and its relevant information + useful methods.\"\"\"\n\n _NO_IDENTITY = object()\n\n def __init__(self, topic, tasks, identity=_NO_IDENTITY):\n self.tasks = []\n for task in tasks:\n if not isinstance(task, six.string_types):\n task = reflection.get_class_name(task)\n self.tasks.append(task)\n self.topic = topic\n self.identity = identity\n\n def performs(self, task):\n if not isinstance(task, six.string_types):\n task = reflection.get_class_name(task)\n return task in self.tasks\n\n def __eq__(self, other):\n if not isinstance(other, TopicWorker):\n return NotImplemented\n if len(other.tasks) != len(self.tasks):\n return False\n if other.topic != self.topic:\n return False\n for task in other.tasks:\n if not self.performs(task):\n return False\n # If one of the identity equals _NO_IDENTITY, then allow it to match...\n if self._NO_IDENTITY in (self.identity, other.identity):\n return True\n else:\n return other.identity == self.identity\n\n def __repr__(self):\n r = reflection.get_class_name(self, fully_qualified=False)\n if self.identity is not self._NO_IDENTITY:\n r += \"(identity=%s, tasks=%s, topic=%s)\" % (self.identity,\n self.tasks, self.topic)\n else:\n r += \"(identity=*, tasks=%s, topic=%s)\" % (self.tasks, self.topic)\n return r\n\n\nclass TopicWorkers(object):\n \"\"\"A collection of topic based workers.\"\"\"\n\n @staticmethod\n def _match_worker(task, available_workers):\n \"\"\"Select a worker (from geq 1 workers) that can best perform the task.\n\n NOTE(harlowja): this method will be activated when there exists\n one one greater than one potential workers that can perform a task,\n the arguments provided will be the potential workers located and the\n task that is being requested to perform and the result should be one\n of those workers using whatever best-fit algorithm is possible (or\n random at the least).\n \"\"\"\n if len(available_workers) == 1:\n return available_workers[0]\n else:\n return random.choice(available_workers)\n\n def __init__(self):\n self._workers = {}\n self._cond = threading.Condition()\n # Used to name workers with more useful identities...\n self._counter = itertools.count()\n\n def __len__(self):\n return len(self._workers)\n\n def _next_worker(self, topic, tasks, temporary=False):\n if not temporary:\n return TopicWorker(topic, tasks,\n identity=six.next(self._counter))\n else:\n return TopicWorker(topic, tasks)\n\n def add(self, topic, tasks):\n \"\"\"Adds/updates a worker for the topic for the given tasks.\"\"\"\n with self._cond:\n try:\n worker = self._workers[topic]\n # Check if we already have an equivalent worker, if so just\n # return it...\n if worker == self._next_worker(topic, tasks, temporary=True):\n return worker\n # This *fall through* is done so that if someone is using an\n # active worker object that already exists that we just create\n # a new one; so that the existing object doesn't get\n # affected (workers objects are supposed to be immutable).\n except KeyError:\n pass\n worker = self._next_worker(topic, tasks)\n self._workers[topic] = worker\n self._cond.notify_all()\n return worker\n\n def wait_for_workers(self, workers=1, timeout=None):\n \"\"\"Waits for geq workers to notify they are ready to do work.\n\n NOTE(harlowja): if a timeout is provided this function will wait\n until that timeout expires, if the amount of workers does not reach\n the desired amount of workers before the timeout expires then this will\n return how many workers are still needed, otherwise it will\n return zero.\n \"\"\"\n if workers <= 0:\n raise ValueError(\"Worker amount must be greater than zero\")\n w = None\n if timeout is not None:\n w = tt.StopWatch(timeout).start()\n with self._cond:\n while len(self._workers) < workers:\n if w is not None and w.expired():\n return max(0, workers - len(self._workers))\n timeout = None\n if w is not None:\n timeout = w.leftover()\n self._cond.wait(timeout)\n return 0\n\n def get_worker_for_task(self, task):\n \"\"\"Gets a worker that can perform a given task.\"\"\"\n available_workers = []\n with self._cond:\n for worker in six.itervalues(self._workers):\n if worker.performs(task):\n available_workers.append(worker)\n if available_workers:\n return self._match_worker(task, available_workers)\n else:\n return None\n\n def clear(self):\n with self._cond:\n self._workers.clear()\n self._cond.notify_all()\n\n\nclass PeriodicWorker(object):\n \"\"\"Calls a set of functions when activated periodically.\n\n NOTE(harlowja): the provided timeout object determines the periodicity.\n \"\"\"\n def __init__(self, timeout, functors):\n self._timeout = timeout\n self._functors = []\n for f in functors:\n self._functors.append((f, reflection.get_callable_name(f)))\n\n def start(self):\n while not self._timeout.is_stopped():\n for (f, f_name) in self._functors:\n LOG.debug(\"Calling periodic function '%s'\", f_name)\n try:\n f()\n except Exception:\n LOG.warn(\"Failed to call periodic function '%s'\", f_name,\n exc_info=True)\n self._timeout.wait()\n\n def stop(self):\n self._timeout.interrupt()\n\n def reset(self):\n self._timeout.reset()\n","sub_path":"taskflow/engines/worker_based/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":7930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"448335764","text":"def get_color_for_cell_type(cell_type):\n # some cells have the type -1 - which means something like undefined?\n if cell_type == -1:\n return '#000000'\n # handle cell_type = -1\n colors_for_type = {\n 0: '#373f51', # street\n 1: '#002dd5', # residential low\n 2: '#008dd5', # residential high\n 3: '#e43f0f', # working low\n 4: '#f51476', # working high\n 5: '#000000', # unknown\n 6: '#000000', # unknown\n }\n\n return colors_for_type[cell_type]\n\n\ndef get_height_for_cell_type(cell_type):\n # some cells have the type -1 - which means something like undefined?\n if cell_type == -1:\n return 0\n\n heights_for_type = {\n 0: 0, # street\n 1: 20, # residential low\n 2: 40, # residential high\n 3: 30, # working low\n 4: 50, # working high\n 5: 0, # unknown\n 6: 0, # unknown\n }\n\n return heights_for_type[cell_type]\n\n\ndef get_base_height_for_cell_type(cell_type):\n # some cells have the type -1 - which means something like undefined?\n if cell_type == -1:\n return 0\n\n base_heigt_for_type = {\n 0: 0, # street\n 1: 0, # residential low\n 2: 0, # residential high\n 3: 0, # working low\n 4: 0, # working high\n 5: 0, # unknown\n 6: 0, # unknown\n }\n\n return base_heigt_for_type[cell_type]\n","sub_path":"project_properties.py","file_name":"project_properties.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"22320408","text":"import os\n\nimport click\n\nfrom app.lib import OUTPUT_FLD, ROOT_DIR\nfrom app.sampler import PairSampler, SinSampler\nfrom app.visualizer import Visualizer\nfrom app.emulator import Market, PlayMarket\nfrom app.simulators import Simulator\nfrom app.agents import load_model\nfrom app.agents import (\n Agent,\n QModelConv,\n QModelMLP,\n QModelGRU,\n QModelConvGRU,\n)\n\n\ndef get_model(model_type, env, learning_rate, fld_load=None):\n if model_type == 'MLP':\n m = 16\n layers = 5\n hidden_size = [m]*layers\n model = QModelMLP(env.state_shape, env.n_action)\n model.build_model(\n hidden_size,\n learning_rate=learning_rate,\n activation='tanh',\n )\n elif model_type == 'conv':\n m = 16\n layers = 2\n filter_num = [m]*layers\n filter_size = [3] * len(filter_num)\n #use_pool = [False, True, False, True]\n #use_pool = [False, False, True, False, False, True]\n use_pool = None\n #dilation = [1,2,4,8]\n dilation = None\n dense_units = [48, 24]\n model = QModelConv(env.state_shape, env.n_action)\n model.build_model(\n filter_num,\n filter_size,\n dense_units,\n learning_rate,\n dilation=dilation,\n use_pool=use_pool,\n )\n elif model_type == 'RNN':\n m = 32\n layers = 3\n hidden_size = [m]*layers\n dense_units = [m, m]\n model = QModelGRU(env.state_shape, env.n_action)\n model.build_model(hidden_size, dense_units, learning_rate=learning_rate)\n elif model_type == 'ConvRNN':\n m = 8\n conv_n_hidden = [m, m]\n RNN_n_hidden = [m, m]\n dense_units = [m, m]\n model = QModelConvGRU(env.state_shape, env.n_action)\n model.build_model(\n conv_n_hidden,\n RNN_n_hidden,\n dense_units,\n learning_rate=learning_rate,\n )\n elif model_type == 'pretrained':\n model = load_model(fld_load, learning_rate)\n else:\n raise ValueError('Incorrect model type was selected')\n\n return model\n\n\ndef main():\n model_type = 'conv' # default model type\n fld_load = None\n n_episode_training = 10 # number of training episodes\n n_episode_testing = 20 # number of testing episodes\n open_cost = 3.3 # cost of opening a bid\n univariate = True\n # which data to use when training\n if univariate:\n db_type = 'SinSamplerDB'\n db = 'concat_half_base_'\n Sampler = SinSampler\n else:\n db_type = 'PairSamplerDB'\n db = 'randjump_100,1(10, 30)[]_'\n Sampler = PairSampler\n # directory for the data to load from\n fld = os.path.join(ROOT_DIR, 'data', db_type, db+'A')\n # load data from directory specified\n sampler = Sampler('load', fld=fld)\n\n # RL-related settings\n batch_size = 8\n learning_rate = 1e-4\n discount_factor = 0.8\n exploration_init = 1.\n exploration_decay = 0.99\n exploration_min = 0.01\n window_state = 40\n\n # create environment (market) from the data loaded\n env = Market(sampler, window_state, open_cost)\n # create a model based on type selected\n model = get_model(model_type, env, learning_rate, fld_load)\n model.model.summary()\n\n # create an RL agent\n agent = Agent(model, discount_factor=discount_factor, batch_size=batch_size)\n visualizer = Visualizer(env.action_labels)\n\n # output directory to save intermediate results\n fld_save = os.path.join(\n OUTPUT_FLD, sampler.title, model.model_name,\n str((env.window_state, sampler.window_episode, agent.batch_size,\n learning_rate, agent.discount_factor, exploration_decay,\n env.open_cost)))\n\n print('='*20)\n print(fld_save)\n print('='*20)\n\n # train a model\n simulator = Simulator(agent, env, visualizer=visualizer, fld_save=fld_save)\n simulator.train(\n n_episode_training,\n save_per_episode=1, # save data on each episode\n exploration_init=exploration_init,\n exploration_decay=exploration_decay,\n exploration_min=exploration_min,\n )\n\n #agent.model = load_model(os.path.join(fld_save,'model'), learning_rate)\n\n if univariate:\n print('Testing trained model for univariate (sin-like) data')\n fld = os.path.join(ROOT_DIR, 'data', db_type, db+'B')\n sampler = SinSampler('load', fld=fld)\n simulator.env.sampler = sampler\n simulator.test(\n n_episode_testing,\n save_per_episode=1,\n subfld='out-of-sample testing',\n )\n else:\n print('Testing trained model for bivariate (rand-jump) data')\n simulator.test(\n n_episode_testing,\n save_per_episode=1,\n subfld='in-sample testing',\n )\n\n\ndef custom_launch():\n from app.sampler import PBSampler\n\n model_type = 'conv'\n # n_episode_training = 300\n n_episode_testing = 1\n open_cost = 0.1\n\n sampler = PBSampler()\n window_state = 40 # set to month by default\n learning_rate = 1e-4\n discount_factor = 0.95\n batch_size = 8\n\n exploration_init = 1. # always explore at the beginning\n exploration_decay = 0.99\n exploration_min = 0.01\n ma_window = 60 # just to measure overall performance\n\n env = Market(\n sampler=sampler,\n window_state=window_state,\n open_cost=open_cost,\n )\n # n_episode_training = 2*len(sampler)\n n_episode_training = 5\n\n model = get_model(\n model_type=model_type,\n env=env,\n learning_rate=learning_rate,\n )\n\n fld_save = os.path.join(\n # OUTPUT_FLD, 'PB_2018_180d_40s_test1'\n OUTPUT_FLD, 'debug',\n )\n\n # fld_load_model = os.path.join(fld_save, 'model')\n # model = get_model(\n # model_type='pretrained',\n # env=env,\n # learning_rate=learning_rate,\n # fld_load=fld_load_model,\n # )\n model.model.summary()\n\n agent = Agent(\n model=model,\n discount_factor=discount_factor,\n batch_size=batch_size,\n )\n\n visualizer = Visualizer(env.action_labels)\n\n # env.sampler.offset = 90\n simulator = Simulator(\n agent=agent,\n env=env,\n visualizer=visualizer,\n fld_save=fld_save,\n ma_window=ma_window,\n )\n\n click.secho('Training agent...', fg='green')\n simulator.train(\n n_episode=n_episode_training,\n save_per_episode=1,\n exploration_init=exploration_init,\n exploration_decay=exploration_decay,\n exploration_min=exploration_min,\n )\n\n click.secho('Testing agent...', fg='green')\n simulator.test(\n n_episode=n_episode_testing,\n save_per_episode=1,\n subfld='in-sample-testing',\n verbose=True,\n )\n\n\ndef play_launch():\n from app.sampler import PlaySampler\n\n db_name = 'uah_to_usd_2017_both_scaled_1_10.csv'\n episode_length = 180\n sampler = PlaySampler(db_name=db_name, episode_length=episode_length)\n n_episode_training = 1000\n n_episode_testing = sampler.test_samples\n # n_episode_testing = 1\n\n window_state = 60 # num of days\n learning_rate = 1e-4\n discount_factor = 0.98\n batch_size = 8\n\n exploration_init = 1. # always explore at the beginning\n exploration_decay = 0.99\n exploration_min = 0.1\n ma_window = 80\n\n env = PlayMarket(\n sampler=sampler,\n window_state=window_state,\n )\n\n model_type = 'conv'\n model = get_model(\n model_type=model_type,\n env=env,\n learning_rate=learning_rate,\n )\n\n fld_save = os.path.join(\n OUTPUT_FLD, 'Play_2017_180d_60s_both_scaled1',\n # OUTPUT_FLD, 'debug',\n )\n\n # model_type = 'pretrained'\n # fld_load_model = os.path.join(fld_save, 'model')\n # model = get_model(\n # model_type=model_type,\n # env=env,\n # learning_rate=learning_rate,\n # fld_load=fld_load_model,\n # )\n\n model.model.summary()\n\n agent = Agent(\n model=model,\n discount_factor=discount_factor,\n batch_size=batch_size,\n )\n\n visualizer = Visualizer(env.action_labels)\n\n simulator = Simulator(\n agent=agent,\n env=env,\n visualizer=visualizer,\n fld_save=fld_save,\n ma_window=ma_window,\n )\n\n if model_type != 'pretrained':\n click.secho('Training agent...', fg='green')\n simulator.train(\n n_episode=n_episode_training,\n save_per_episode=1,\n exploration_init=exploration_init,\n exploration_decay=exploration_decay,\n exploration_min=exploration_min,\n chart_per_episode=10,\n )\n\n click.secho('Testing agent...', fg='green')\n sampler.reset()\n simulator.test(\n n_episode=n_episode_testing,\n save_per_episode=1,\n subfld='out-sample-testing',\n verbose=True,\n )\n\n\ndef debug_one_episode():\n from app.sampler import PBSampler\n\n model_type = 'conv'\n n_episode_training = 1\n n_episode_testing = 10\n open_cost = 0.1\n\n sampler = PBSampler()\n window_state = 30 # set to month by default\n learning_rate = 1e-4\n discount_factor = 0.95\n batch_size = 8\n\n exploration_init = 1. # always explore at the beginning\n exploration_decay = 0.99\n exploration_min = 0.01\n ma_window = 60 # just to measure overall performance\n\n env = Market(\n sampler=sampler,\n window_state=window_state,\n open_cost=open_cost,\n )\n n_episode_training = len(sampler)\n model = get_model(\n model_type=model_type,\n env=env,\n learning_rate=learning_rate,\n )\n\n fld_save = os.path.join(\n OUTPUT_FLD, 'debug'\n )\n\n # fld_load_model = os.path.join(fld_save, 'model')\n # model = get_model(\n # model_type='pretrained',\n # env=env,\n # learning_rate=learning_rate,\n # fld_load=fld_load_model,\n # )\n model.model.summary()\n\n agent = Agent(\n model=model,\n discount_factor=discount_factor,\n batch_size=batch_size,\n )\n\n visualizer = Visualizer(env.action_labels)\n\n # env.sampler.offset = 90\n simulator = Simulator(\n agent=agent,\n env=env,\n visualizer=visualizer,\n fld_save=fld_save,\n ma_window=ma_window,\n )\n\n click.secho('One episode play...', fg='green')\n simulator.play_one_episode(\n exploration=1,\n rand_price=True,\n verbose=True,\n )\n\n\nif __name__ == '__main__':\n # main()\n # custom_launch()\n # debug_one_episode()\n play_launch()\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"14336407","text":"import requests\nimport urllib\nimport re\nimport sys\nfrom enum import Enum\n\nbaseURL = \"https://api.shodan.io\"\nkey = \"API-Key\"\ndebug = 1\n\nclass API(Enum):\n honeypot = \"labs/honeyscore\"\n dnsResolve = \"dns/resolve\"\n apiInfo = \"api-info\"\n\ndef constructURL(*path, **params):\n url = baseURL\n for r in path:\n url = '{}/{}'.format(url, r)\n params['key'] = key\n url = '{}?{}'.format(url, urllib.parse.urlencode(params))\n return url\n\ndef makeURLRequest(ip):\n r = requests.get(url=ip)\n debug(\"URL Request: {} \\n Output: {}\".format(r.url,r.json()))\n return r.json()\n\ndef validateIP(ip):\n regex = '''^(25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\\.(\n 25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\\.(\n 25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\\.(\n 25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)'''\n if (re.search(regex, ip)):\n debug(\"Input is a valid IP\")\n return ip\n else:\n debug(\"Resolving IP for {}\".format(ip))\n return dnsResolve(ip)[ip]\n\ndef debug(str):\n if debug:\n print(str)\n\ndef dnsResolve(webURL):\n params = {webURL}\n url = constructURL(API.dnsResolve.value,hostnames=webURL)\n ip = makeURLRequest(url)\n return ip\n\ndef printCredits():\n print(makeURLRequest(constructURL(API.apiInfo.value)))\n\ndef ifHoneypot(url):\n ip = validateIP(url)\n honeyPotURL = constructURL(API.honeypot.value,ip)\n probability = makeURLRequest(honeyPotURL)\n if(probability == 0):\n print(\"Not a honeypot\")\n elif(probability == 1):\n print(\"It's a honeypot\")\n else:\n print(\"Probability that it's a honeypot is {}%\".format(str(probability*100)))\n\n\n\nif __name__ == '__main__':\n ifHoneypot(sys.argv[1])","sub_path":"shodan.py","file_name":"shodan.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"524164172","text":"from math import factorial\nfat = int(input('Digite um número para calcular seu fatorial: '))\nc = fat\n\nprint(f'Calculando {fat}! = ', end='')\n\nwhile c > 0:\n print(f'{c}', end='')\n print(' x ' if c > 1 else f' = {factorial(fat)}', end='')\n c -= 1\n","sub_path":"fatorial.py","file_name":"fatorial.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"432099249","text":"import netCDF4 as nc\r\nimport sys\r\nimport numpy as np\r\nfrom MakeRGB import my_cmap\r\nfrom matplotlib import pyplot as plt\r\n\r\nfrom copy import deepcopy\r\nmap_cloud = deepcopy(my_cmap)\r\nmap_cloud.set_over((128 / 255.0, 128 / 255.0, 128 / 255.0))\r\npath = r\"D:\\Users\\Alexander\\ansel\\TEMP\\2021-05-19\\20210519120000-STAR-L3S_GHRSST-SSTsubskin-LEO_PM_D-ACSPO_V2.80-v02.0-fv01.0.nc\"\r\n\r\ndata = nc.Dataset(path,\"r\")\r\nprint(data.variables.keys())\r\nsst = np.array(data[\"sea_surface_temperature\"][:]).reshape(9000,18000)\r\nl2p_flags = np.array(data[\"l2p_flags\"][:]).reshape(9000,18000)\r\nsst[sst==-32768] = np.NaN\r\nprint(data.variables.keys())\r\n\r\ncloud_mask = (l2p_flags==16896) |(l2p_flags==-7676)\r\nsst[cloud_mask] = 600\r\n\r\nplt.imshow(sst,interpolation=\"none\",vmin=283,vmax=301,cmap=map_cloud)\r\n# plt.imshow(l2p_flags, interpolation=\"none\", cmap=my_cmap)\r\nplt.colorbar()\r\nplt.show()\r\nwhile not plt.waitforbuttonpress(): pass\r\nsys.exit()\r\n","sub_path":"create_images.py","file_name":"create_images.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"91995406","text":"import math\nimport random as rn\nfrom sqlalchemy import create_engine, or_, and_, MetaData, Table\nfrom sqlalchemy.sql import select\nimport json \n\nfrom sqs_handler import receive_message, send_message, delete_message\n\nclass Simulator:\n def __init__(self, enable_contact_tracing, saver, numberofHomes, numberofOffices, numberofAgents, infection_risk_Office, infection_risk_Home, total_days_sick):\n self.enable_contact_tracing = enable_contact_tracing\n self.current_day = 0\n self.step_size = 1\n self._sick = total_days_sick\n self.saver = saver\n self.nrHomes= numberofHomes\n self.nrOffices = numberofOffices\n self.nrAgents = numberofAgents\n self.inf_Office = infection_risk_Office\n self.inf_Home = infection_risk_Home\n self.batchsize = 10\n self.queue_url_receive = 'https://sqs.eu-west-2.amazonaws.com/459864568246/conf_queue'\n self.queue_url_send = 'https://sqs.eu-west-2.amazonaws.com/459864568246/sim_test'\n self.number_of_batchesOffices = math.ceil(self.nrOffices/self.batchsize)\n self.number_of_batchesHomes = math.ceil(self.nrHomes/self.batchsize)\n\n # Amount of per step\n def step(self):\n # Simulate infections in each location\n\n array = []\n for office in range(self.nrOffices):\n array.append(office+1)\n if len(array) == self.batchsize or office+1 == self.nrOffices:\n #simw.simulate_infection('office', array, self.inf_Office, self.current_day)\n message = json.dumps({'method':'simulate_infection', 'location': 'office', 'array_subjects': array, 'risk': self.inf_Office, 'current_day': self.current_day})\n send_message(message,self.queue_url_send)\n array.clear()\n \n #wait all \n nr_msg = 0\n while nr_msg < self.number_of_batchesOffices:\n \n response = receive_message(self.queue_url_receive, 1000)\n print(\"nr_msg office infections = \", nr_msg, \"exptected :\", self.nrOffices)\n try:\n for message in response['Messages']:\n if message['MessageAttributes']['R']['StringValue']=='1':\n nr_msg+=1\n delete_message(message, self.queue_url_receive)\n\n except KeyError as e:\n print(repr(e))\n print(\"Queue is empty\")\n \n array.clear()\n\n for home in range(self.nrHomes):\n array.append(home+1)\n if len(array) == self.batchsize or home+1 == self.nrHomes:\n #simw.simulate_infection('home', array, self.inf_Home, self.current_day)\n message = json.dumps({'method':'simulate_infection', 'location': 'home', 'array_subjects': array, 'risk': self.inf_Home, 'current_day': self.current_day})\n send_message(message,self.queue_url_send)\n array.clear()\n \n #wait all\n nr_msg = 0\n while nr_msg < self.number_of_batchesHomes:\n \n print(\"nr_msg office infections = \", nr_msg, \"exptected :\", self.nrHomes)\n response = receive_message(self.queue_url_receive, 1000)\n try:\n for message in response['Messages']:\n if message['MessageAttributes']['R']['StringValue']=='1':\n nr_msg+=1\n delete_message(message, self.queue_url_receive)\n except KeyError as e:\n print(repr(e))\n print(\"Queue is empty\")\n \n #End of day update \n message = json.dumps({'method':'end_of_day', 'contact_tracing': int(self.enable_contact_tracing), 'max_sick_time':self._sick,'current_day': self.current_day})\n send_message(message,self.queue_url_send)\n \n #wait end_of_day\n reply = ''\n while reply is not 'end_of_day':\n \n print(\"waiting for end_of_day\")\n response = receive_message(self.queue_url_receive, 1)\n try:\n reply = response['Messages'][0]['Body']\n print(reply)\n if response['Messages'][0]['MessageAttributes']['R']['StringValue']=='1':\n delete_message(response['Messages'][0], self.queue_url_receive)\n reply = 'end_of_day'\n\n except KeyError as e:\n print(repr(e))\n print(\"Queue is empty\")\n \n self.save_status()\n self.current_day += 1\n\n def save_status(self):\n host = \"sim-database.cum3wdeshheg.eu-west-2.rds.amazonaws.com\"\n username = \"\"\n password = \"\"\n port = 3306\n dbname = \"simdatabase\"\n engine = create_engine(\"mysql+pymysql://\" + username +\":\" + password +\"@\"+host + \"/\"+dbname,echo = True)\n meta = MetaData(engine)\n conn = engine.connect()\n table = Table('Agent_Table', meta, autoload=True, autoload_with=engine)\n select_o= table.select()\n agent_array= conn.execute(select_o)\n currently_infected_agents = sum(1 if agent.is_infected else 0 for agent in agent_array)\n agent_arra= conn.execute(select_o)\n total_infected_agents = sum(1 if agent.has_been_infected else 0 for agent in agent_arra)\n agent_arr= conn.execute(select_o)\n currently_symptomatic_agents = sum(1 if agent.has_symptoms else 0 for agent in agent_arr)\n agent_ar= conn.execute(select_o)\n total_isolated_agents = sum(1 if agent.is_isolated else 0 for agent in agent_ar)\n agent_a= conn.execute(select_o)\n dead_agents = sum(1 if not agent.is_alive else 0 for agent in agent_a)\n self.saver.save_overview(self.current_day, currently_infected_agents, total_infected_agents, currently_symptomatic_agents,total_isolated_agents, dead_agents)\n\n","sub_path":"simulator/simulator_with_worker/Coordinator/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":5851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"304428975","text":"import os\n\nfrom classes.CKafkaPC import KafkaPC\n\nprint(\"start 2c_print\")\n\nenv_vars = {'config_path': os.getenv('config_path'),\n 'config_section': os.getenv('config_section')}\n\nnew_c = KafkaPC(**env_vars)\nprint(\"created KafkaPC\")\n\ntry:\n while True:\n msg = new_c.consumer.poll(0.1)\n\n if msg is None:\n continue\n\n elif msg.error() is not None:\n print(f\"Error occured: {str(msg.error())}\")\n\n else:\n new_message = new_c.decode_msg(msg)\n print(f\"Received on topic '{msg.topic()}': {new_message}\")\n\nexcept KeyboardInterrupt:\n pass\n\nfinally:\n new_c.consumer.close()\n","sub_path":"Big_Data_Platform/Docker/Kafka_Client/Confluent_Kafka_Python/src/2c_print.py","file_name":"2c_print.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"548013456","text":"#!/usr/bin/python3\n\nimport time\nimport socket\nimport re\nimport sys\nsuccess = 0\nfail = 0\ntimeFactor = 5\ndef check_server(address, port):\n\t# Create a TCP socket\n\ts = socket.socket()\n\tprint (\"Attempting to connect to %s on port %s\" % (address, port))\n\ttry:\n\t\ts.connect((address, port))\n\t\t#print (s)\n\t\tprint (\"Connected to %s on port %s\" % (address, port))\n\t\treturn (True,s)\n\texcept socket.error as e:\n\t\tprint (\"Connection to %s on port %s failed: %s\" % (address, port, e))\n\t\treturn (False,0)\n\t\t\ndef closePort(s):\n\ts.close()\nfor t in range(0,timeFactor):\n\tif __name__ == '__main__':\n\t\tfrom optparse import OptionParser\n\t\tparser = OptionParser()\n\n\t\tparser.add_option(\"-a\", \"--address\", dest=\"address\", default='localhost', help=\"ADDRESS for server\", metavar=\"ADDRESS\")\n\t\tparser.add_option(\"-p\", \"--port\", dest=\"port\", type=\"int\", default=80, help=\"PORT for server\", metavar=\"PORT\")\n\t\tparser.add_option(\"-c\", \"--count\", dest=\"count\", type=\"int\", default=50, help=\"COUNT for server\", metavar=\"COUNT\")\n\t\t(options, args) = parser.parse_args()\n\t\t#print ('options: %s, args: %s' % (options, args))\n\t\tfor n in range(0,options.count):\n\t\t\tcheck = check_server(options.address, options.port)\n\t\t\tprint ('check_server returned %s' % check[0])\n\t\t\tprint ('Count = {} in Sequence {}'.format(n,t))\n\t\t\tif check[0] == True:\n\t\t\t\tsuccess += 1\n\t\t\t\tclosePort(check[1])\n\t\t\t\tprint ('Connection Closed\\n\\n')\n\t\t\tif check[0] == False:\n\t\t\t\tfail += 1\n\t\t\ttime.sleep(timeFactor)\n\t\ttimeFactor = timeFactor - 1\n\t\n\tprint ('\\n\\tSequence {} seconds Success = {}, Fail = {}\\n\\n'.format(timeFactor,success,fail))\n\tsuccess = 0\n\tfail = 0\nsys.exit(not check)\n\n","sub_path":"portconnect.py","file_name":"portconnect.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"501234098","text":"import xgboost as xgb\nfrom sklearn.datasets import load_svmlight_file\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import roc_curve, auc, roc_auc_score\nfrom sklearn.externals import joblib\nimport numpy as np\nfrom scipy.sparse import hstack\nfrom sklearn.preprocessing.data import OneHotEncoder\ndef import_data(data,label,path):\n for path_tmp in path:\n file = open(path_tmp)\n file.readline() # skip the header\n for line in file:\n cons = line.strip().split(\"\\t\")\n data.append(list(map(float, cons[5].split(',')))\n )\n label.append(float(cons[-2]))\n return np.array(data),np.array(label)\ndef xgboost_lr_train():\n path = [\n 'data_11_02.csv','data_11_03.csv']\n data, label = import_data([], [], path)\n\n X_train, X_test, y_train, y_test = train_test_split(data, label, test_size = 0.1, random_state = 42)\n\n\n xgboost = xgb.XGBClassifier(nthread=4, learning_rate=0.02,min_child_weight=5,tree_method='gpu_hist',\n n_estimators=500, max_depth=5)\n global model_xgb,model_tmp,model_lr\n model_xgb=xgboost.fit(X_train, y_train)\n\n y_pred_test = xgboost.predict_proba(X_test)[:, 1]\n xgb_test_auc = roc_auc_score(y_test, y_pred_test)\n print('xgboost test auc: %.5f' % xgb_test_auc)\n\n X_train_leaves = xgboost.apply(X_train)\n X_test_leaves = xgboost.apply(X_test)\n\n\n All_leaves = np.concatenate((X_train_leaves, X_test_leaves), axis=0)\n All_leaves = All_leaves.astype(np.int32)\n\n xgbenc = OneHotEncoder()\n X_trans = xgbenc.fit_transform(All_leaves)\n\n (train_rows, cols) = X_train_leaves.shape\n\n lr = LogisticRegression()\n model_tmp=lr.fit(X_trans[:train_rows, :], y_train)\n y_pred_xgblr1 = lr.predict_proba(X_trans[train_rows:, :])[:, 1]\n xgb_lr_auc1 = roc_auc_score(y_test, y_pred_xgblr1)\n print('LR based on XGB AUC: %.5f' % xgb_lr_auc1)\n\n lr = LogisticRegression(n_jobs=-1)\n X_train_ext = hstack([X_trans[:train_rows, :], X_train])\n X_test_ext = hstack([X_trans[train_rows:, :], X_test])\n\n model_lr=lr.fit(X_train_ext, y_train)\n joblib.dump(model_lr,'lr')\n y_pred_xgblr2 = lr.predict_proba(X_test_ext)[:, 1]\n xgb_lr_auc2 = roc_auc_score(y_test, y_pred_xgblr2)\n print('AUC bsed on combined features of lr: %.5f' % xgb_lr_auc2)\n\nif __name__ == '__main__':\n xgboost_lr_train()\n'''for model in model_all:\n def value(features):\n # print(features)\n val = 0\n val += model.predict_proba(np.array(features).reshape(1, -1))[:, 1]\n # print(val)\n # val=val/10\n return val'''\n","sub_path":"优惠券/code/xgb_lr (2).py","file_name":"xgb_lr (2).py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"109964360","text":"# -*- coding: utf-8 -*-\n\nimport database\nimport os\nimport re\nimport requests\nimport sites\nimport logging\n\n\nclass Downloader:\n \"\"\"Downloader class that manages individual downloads\"\"\"\n\n def __init__(self, config):\n self.grabber = sites.get_grabber(config)\n logging.debug('site grabber {} created'.format(self.grabber))\n self.client = TransmissionDownloader(config)\n logging.debug('client {} created'.format(self.client))\n self.conf = config\n logging.debug('downloader initialized')\n\n def download(self, show, season, ep):\n \"\"\"Download the given episode and return True if succeeded.\n\n Arguments:\n ----------\n show: string\n Name of the show\n\n season: int\n Season number\n\n ep: int\n Episode number\n \"\"\"\n #Invoke the grabber to get the magnet link\n uri = self.grabber.get_url(show, season, ep)\n logging.info('uri grabbed: {}'.format(uri))\n\n #Invoke the client to add the file to the program\n uid = self.client.download(show, season, ep, uri)\n logging.debug('done messaging client')\n\n #Should return true when succesfull added\n return uid\n\n\nclass SkeletonDownloader:\n \"\"\"Downloader baseclass\"\"\"\n\n def __init__(self, config):\n self.c = config\n logging.debug('superclass skeletondownloader initialized')\n\n def get_status(self, uuid):\n \"\"\"Gives the status of the torrent in the format of database meaning\n TODO when not found\n BUSY when downloading\n DONE when seeding\n\n Arguments:\n ----------\n uuid: string\n uuid of the download\n \"\"\"\n raise NotImplementedError()\n\n def download(self, show, season, ep, uri):\n \"\"\"Downloads the torrent file or magnet link from the given uri,\n returns unique uuid to refer to later\n\n Arguments:\n ----------\n show: string\n Name of the show\n\n season: int\n Season number\n\n ep: int\n Episode number\n \"\"\"\n raise NotImplementedError()\n\n def delete(self, uuid):\n \"\"\"Removes the episode from the download client, return True when\n uccesfull\n\n Arguments:\n ----------\n uuid: string\n uuid of the download\n \"\"\"\n raise NotImplementedError()\n\n\n\nclass TransmissionDownloader(SkeletonDownloader):\n \"\"\"Transmission downloader\"\"\"\n\n json_add = '{{\"arguments\":{{\"download-dir\":\"{}\",\"filename\":\"{}\"}},\"method\"\\\n:\"torrent-add\",\"tag\": 1337}}'\n\n json_del = '{{\"arguments\":{{\"ids\":[{}]}},\"method\": \"torrent-remove\",\"tag\":\\\n1337}}'\n\n json_status = '{\"arguments\":{\"fields\":[\"hashString\",\"status\",\"uploadRatio\"\\\n,\"id\"]},\"method\":\"torrent-get\",\"tag\":1338}'\n\n extract_hash = re.compile(\n '{\\s*\"hashString\"\\s*:\\s*\"(?P.*?)\"\\s*,\\s*\"id\"\\s*:\\s*' +\n '(?P.*?)\\s*,\\s*\"status\"\\s*:\\s*(?P.*?)\\s*,\\s*\"upl' +\n 'oadRatio\"\\s*:\\s*(?P.*?)\\s*}')\n\n status_convert = {\n '0': database.Database.OTHER,\n '1': database.Database.BUSY,\n '2': database.Database.BUSY,\n '3': database.Database.BUSY,\n '4': database.Database.BUSY,\n '5': database.Database.DONE,\n '6': database.Database.DONE\n }\n\n def __init__(self, config):\n SkeletonDownloader.__init__(self, config)\n self.sid = ''\n logging.debug('transmission downloader initialized')\n\n def request(self, data):\n logging.debug('sending request {}'.format(data))\n headers = {'X-Transmission-Session-Id': self.sid} if self.sid else {}\n r = requests.post(self.c['host'], data=data, headers=headers)\n if r.status_code == 409:\n logging.debug('409 received with body')\n self.sid = re.search(\n 'X-Transmission-Session-Id:\\s*(.*)',\n r.text).group(1)\n logging.debug('sid parsed and calling function again')\n return self.request(data)\n logging.debug('request succesfull, returned: {}'.format(r.text))\n return r.text\n\n def get_status(self, uuid):\n r = self.request(self.json_status)\n for match in self.extract_hash.finditer(r):\n logging.debug('trying to match hash')\n if match.group('h') == uuid:\n st = match.groupdict()\n st['s'] = self.status_convert[match.group('s')]\n logging.debug('match found, returning status {}'.format(st))\n return st\n logging.debug('hash not found, assuming REMOVED')\n return {'s': database.Database.REMOVED}\n\n def download(self, show, season, ep, uri):\n if not uri:\n logging.info('no uri found, skipping')\n return\n directory = '{}db/{}/S{:02}/E{:02}'.format(self.c['droot'], show,\n season, ep)\n logging.debug('os.path.exists({})'.format(directory))\n if not os.path.exists(directory):\n if not self.c['dry']:\n os.makedirs(directory)\n os.chmod(directory, self.c['permission'])\n logging.info('{} created'.format(directory))\n jsond = self.json_add.format(directory, uri)\n logging.debug('json data formatted: {}'.format(jsond))\n hashstring = ''\n if not self.c['dry']:\n r = self.request(jsond)\n logging.debug('response succesfull')\n hashstring = re.search('\"hashString\"\\s*:\\s*\"(?P.*?)\"', r)\n# logging.debug('hashstring {} parsed'.format(hashstring.group('h')))\n logging.info('tried adding {} {:02} {:02}'.format(show, season,\n ep))\n return None if not hashstring else hashstring.group('h')\n\n def delete(self, uuid):\n status = self.get_status(uuid)\n logging.debug('going to delete: {}'.format(uuid))\n if not self.c['dry']:\n status = self.request(self.json_del.format(status['i']))\n logging.debug('done deleting, response: {}'.format(status))\n","sub_path":"downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":6131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"340196412","text":"import sys\nfrom PyQt4 import QtGui, QtCore\nfrom items_management.group_properties import GroupProperties\n\n\nclass PropertiesGroupView(QtGui.QDialog):\n def __init__(self, parent_object, list_files):\n super(PropertiesGroupView, self).__init__(parent_object)\n self.group_properties = GroupProperties(list_files)\n self.init_ui()\n\n def init_ui(self):\n height = 380\n width = 280\n self.setFixedSize(height, width)\n self.setWindowTitle(\"General Properties\")\n self.show()\n self.create_info_components()\n self.create_buttons_components()\n\n def create_info_components(self):\n self.grid = QtGui.QGridLayout(self)\n\n # Set File Info into Vbox Layout\n self.vbox = QtGui.QVBoxLayout()\n location_label = QtGui.QLabel(\"File location: \" + str(self.group_properties.get_location()), self)\n self.vbox.addWidget(location_label)\n self.vbox.addStretch(0.5)\n type_label = QtGui.QLabel(\"Type: \" + str(self.group_properties.get_type()), self)\n self.vbox.addWidget(type_label)\n self.vbox.addStretch(0.5)\n size_label = QtGui.QLabel(\"Size: \" + str(self.group_properties.get_size_in_bytes()), self)\n self.vbox.addWidget(size_label)\n self.vbox.addStretch(0.5)\n attributes_label = QtGui.QLabel(\"Attributes: \", self)\n self.vbox.addWidget(attributes_label)\n\n read_only_cb = QtGui.QCheckBox('&Read only', self)\n read_only_cb.setChecked(self.group_properties.is_readonly())\n self.vbox.addWidget(read_only_cb)\n\n hidden_cb = QtGui.QCheckBox('&Hidden', self)\n hidden_cb.setChecked(self.group_properties.is_hidden())\n self.vbox.addWidget(hidden_cb)\n\n self.vbox.addStretch()\n\n def create_buttons_components(self):\n # Set Buttons into Hbox Layout\n first_column = 0\n first_row = 0\n second_row = 1\n\n self.hbox = QtGui.QHBoxLayout()\n generate_ok_button = QtGui.QPushButton(\"OK\", self)\n generate_ok_button.resize(generate_ok_button.sizeHint())\n self.hbox.addWidget(generate_ok_button)\n\n generate_cancel_button = QtGui.QPushButton(\"Cancel\", self)\n generate_cancel_button.resize(generate_cancel_button.sizeHint())\n self.hbox.addWidget(generate_cancel_button)\n\n generate_apply_button = QtGui.QPushButton(\"Apply\", self)\n generate_apply_button.resize(generate_apply_button.sizeHint())\n self.hbox.addWidget(generate_apply_button)\n\n self.grid.addLayout(self.vbox, first_row, first_column)\n self.grid.addLayout(self.hbox, second_row, first_column)","sub_path":"views/panels/properties_group_view.py","file_name":"properties_group_view.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"464051227","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport os\nimport sys\n\nnumParentLambdas = int(sys.argv[1])\n\n\nlogdir = \"video-lambda-logs\"\n\nf1 = open(\"crail-logs-reqids.txt\", \"w\")\nf2 = open(\"crail-logs-reqids-netstats.txt\", \"w\")\n\nfor i in range (0,numParentLambdas):\n\tf1.write(\"/\" + logdir + \"/lambda\" + str(i) + \"\\n\")\n\n\nfor i in range (0,numParentLambdas):\n\tf2.write(\"/\" + logdir + \"/netstats-lambda\" + str(i) + \"\\n\")\n","sub_path":"logs/gen-crail-lognames.py","file_name":"gen-crail-lognames.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"256488411","text":"# built-in\nimport json\nimport os\nfrom typing import Dict\n\n# third-party\nimport numpy as np\nimport pandas as pd\n\n# local\nfrom biosppy import utils\n\ndef signal_temp(signal, FS):\n \"\"\"Compute various metrics describing the signal.\n\n Parameters\n ----------\n signal : array\n Input signal.\n\n FS : float\n Sampling frequency\n Returns\n -------\n maxAmp : float\n Signal maximum amplitude.\n\n minAmp : float\n Signal minimum amplitude.\n\n max : float\n Signal max value.\n\n min : float\n Signal min value.\n\n dist : float\n Length of the signal.\n\n autocorr : float\n Signal autocorrelation.\n\n zero_cross : int\n Number of times the sinal crosses the zero axis.\n\n meanadiff : float\n Mean absolute differences.\n\n medadiff : float\n Median absolute differences.\n\n mindiff : float\n Min differences.\n\n maxdiff : float\n Maximum differences.\n\n sadiff : float\n Sum of absolute differences.\n\n meandiff : float\n Mean of differences.\n\n meddiff : float\n Median of differences.\n\n temp_centroid : float\n Temporal centroid.\n\n total_energy : float\n Total energy.\n\n minpeaks : int\n Number of minimum peaks.\n\n maxpeaks : int\n Number of maximum peaks.\n\n temp_dev : float\n Temporal deviation.\n\n counter : int\n Length of the signal.\n\n References\n ----------\n TSFEL library: https://github.com/fraunhoferportugal/tsfel\n Peeters, Geoffroy. (2004). A large set of audio features for sound description (similarity and classification) in the CUIDADO project.\n \"\"\"\n\n # check inputs\n # if signal is None or signal == []:\n # print(\"Signal is empty.\")\n\n # ensure numpy\n signal = np.array(signal)\n # assert len(signal) > 1, 'Signal size < 1'\n sig_diff = np.diff(signal)\n mean = np.mean(signal)\n maxAmp = np.max(np.abs(signal - mean))\n minAmp = np.min(np.abs(signal - mean))\n max = np.max(signal)\n min = np.min(signal)\n dist = np.sum([np.sqrt(1+df**2) for df in sig_diff])\n # autocorrelation\n autocorr = np.correlate(signal, signal)[0]\n\n zero_cross = len(np.where(np.diff(np.sign(signal)))[0])\n meanadiff = np.mean(np.abs(sig_diff))\n medadiff = np.median(np.abs(sig_diff))\n mindiff = np.min(np.abs(sig_diff))\n # max absolute differences\n maxdiff = np.max(np.abs(sig_diff))\n # sum of absolute differences\n sadiff = np.sum(abs(sig_diff))\n # mean of differences\n meandiff = np.mean(sig_diff)\n # median of differences\n meddiff = np.median(sig_diff)\n # total energy\n time = range(len(signal))[1:]\n time = [float(x) / FS for x in time]\n signal = signal[1:]\n total_energy = np.sum(np.array(signal)**2)/(time[-1]-time[0])\n # number of minimum peaks\n minpeaks = np.sum([1 for nd in range(len(sig_diff[:-1])) if (sig_diff[nd]<0 and sig_diff[nd+1]>0)])\n # number of maximum peaks\n maxpeaks = np.sum([1 for nd in range(len(sig_diff[:-1])) if (sig_diff[nd+1]<0 and sig_diff[nd]>0)])\n # temporal deviation\n temp_dev = (1/np.sum(signal)) * np.sum((signal[:] - signal[1])/np.array(time))\n counter = len(signal)\n #return utils.ReturnTuple(tuple(args), tuple(names))\n return maxAmp, minAmp, max, min, dist, autocorr, zero_cross, meanadiff, medadiff, mindiff, maxdiff, sadiff, meandiff, meddiff, total_energy, minpeaks, maxpeaks, temp_dev, counter\n","sub_path":"features/temporal_features.py","file_name":"temporal_features.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"624117744","text":"from django import forms\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\n\n\nclass SubmitMixin(object):\n def __init__(self, *args, **kwargs):\n super(SubmitMixin, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.add_input(Submit('submit', 'Submit'))\n\n\nclass MarketViewForm(SubmitMixin, forms.Form):\n site_lookup_key = forms.CharField(\n label='Publisher/Property Name',\n max_length=100,\n required=True,\n )\n\n\nclass BrandLookupForm(SubmitMixin, forms.Form):\n brand_lookup_key = forms.CharField(\n label='Brand/Advertiser Name',\n max_length=100,\n required=True,\n )\n\n\nclass CampaignInstanceForm(SubmitMixin, forms.Form):\n campaign_id = forms.CharField(\n label='Campaign ID', \n max_length=100,\n required=True,\n )\n\n\nclass PlacementResourceForm(SubmitMixin, forms.Form):\n campaign_id = forms.CharField(\n label='Campaign ID', \n max_length=100,\n required=True,\n )\n\nclass PlacementForm(SubmitMixin, forms.Form):\n campaign_id = forms.CharField(\n label='Campaign ID', \n max_length=100,\n required=True,\n )\n placement_id = forms.CharField(\n label='Placement ID', \n max_length=100,\n required=True,\n )\n\n\nclass CreativeResourceForm(SubmitMixin, forms.Form):\n campaign_id = forms.CharField(\n label='Campaign ID', \n max_length=100,\n required=True,\n )\n placement_id = forms.CharField(\n label='Placement ID', \n max_length=100,\n required=True,\n )\n creative_id = forms.CharField(\n label='Creative ID', \n max_length=100,\n required=True,\n )\n\n\nclass TagResourceForm(SubmitMixin, forms.Form):\n campaign_id = forms.CharField(\n label='Campaign ID', \n max_length=100,\n required=True,\n )\n\n\nclass UnregisteredPlacementsResourceForm(SubmitMixin, forms.Form):\n pass\n\n\nclass CancelPlacementResourceForm(SubmitMixin, forms.Form):\n pass\n\n","sub_path":"vagrant/dar/tags/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"566091849","text":"import datetime\nimport re\nimport logging\n\n#Takes a string and trys to determine if it is an ACE, ACL, or Not sure.\n'''\nWhen dealing with access lists there will be 3 types. ACL_Name, ACL, and ACE (access list entries).\nACL_Name: \t\taccess-list TEST; 3 elements; name hash: 0xd37fdb2b\nACL \t\t\taccess-list TEST line 1 extended permit ip object-group LAN any 0xeb9e6e99\n\t\t\tACE:\t\taccess-list TEST line 1 extended permit ip 10.10.10.0 255.255.255.0 any (hitcnt=0) 0x365de33c\n\t\t\tACE:\t\taccess-list TEST line 1 extended permit ip 10.10.20.0 255.255.255.0 any (hitcnt=0) 0xc98d1b29\n\t\t\tACE:\t\taccess-list TEST line 1 extended permit ip 10.10.30.0 255.255.255.0 any (hitcnt=2342345222) 0x2a9982d3\nCisco HASH re match:\nre.search(0x[0-9A-Fa-f]{8})\n\nHitcount re match:\n\n'''\nclass ACL:\n\tlog = logging.getLogger('Mod_acl')\n\tfc = logging.FileHandler('acl.log')\n\t#Parsing should be incoming as a string. Any ACL from Database should come in form of dict.\n\tdef __init__(self, new_str= None, dev= None, db_acl= None):\n\t\tif db_acl == None and new_str != None:\n\t\t\tself.type = None\n\t\t\tself.id = None\n\t\t\tself.line = None\n\t\t\tself.hit_count = None\n\t\t\tself.device = dev\n\t\t\tself.org_str = new_str\n\t\t\tself.parse_acl(new_str)\n\t\telif isinstance(db_acl):\n\t\t\tself.type = db_acl['fk_type']\n\t\t\tself.id = db_acl['id']\n\t\t\tself.line = db_acl['line']\n\t\t\tself.hit_count = db_acl['Hits']\n\t\t\tif dev == None:\n\t\t\t\tself.device = db_acl['Device']\n\t\t\telse:\n\t\t\t\tself.device = dev\n\t\t\tself.org_str = db_acl['ACL']\n\t\telse:\n\t\t\tself.type = None\n\t\t\tself.id = None\n\t\t\tself.line = None\n\t\t\tself.hit_count = None\n\t\t\tself.org_str = None\n\n\tdef parse_acl(self, s_acl):\n\t\tif re.search('^access-list', s_acl, re.I):\n\t\t\tself.set_type(s_acl)\n\t\t\tself.set_line(s_acl)\n\t\t\tself.set_id(s_acl)\n\t\t\tself.set_hit_count(s_acl)\n\t\t\tself.org_str = s_acl\n\t\telse:\n\t\t\tlogging.info('Does not appear to be an ACL item: ' + s_acl)\n\n\tdef set_line(self, s_acl):\n\t\tline_num = re.search('line ([0-9]+)', s_acl, re.I)\n\t\tif line_num != None:\n\t\t\tself.line = line_num.group(1)\n\t\telse:\n\t\t\tself.line = None\n\n\tdef set_type(self, s_acl):\n\t\tif re.search('name hash:', s_acl, re.I):\n\t\t\tself.type = 1\n\t\telif re.search('object-group', s_acl, re.I):\n\t\t\tself.type = 3\n\t\telif re.search('access-list', s_acl, re.I) and re.search('hitcnt=', s_acl, re.I) or re.search('fqdn',s_acl, re.I):\n\t\t\tself.type = 2\n\t\telse:\n\t\t\tself.type = None\n\t\t\tlogging.info('Not Sure: ' + s_acl)\n\n\tdef set_id(self, s_acl):\n\t\tresult = re.search('0x[0-9A-Fa-f]{5,9}',s_acl)\n\t\tif result:\n\t\t\tself.id = result.group(0)\n\t\telse:\n\t\t\tlogging.info('No hash in: ' + s_acl)\n\n\n\tdef set_hit_count(self, s_acl):\n\t\ta_result = re.search('hitcnt=[0-9]{1,}', s_acl)\n\t\tif a_result:\n\t\t\tb_result = re.search('[0-9]{1,}', a_result.group(0))\n\t\t\tself.hit_count = int(b_result.group(0))\n\t\telse:\n\t\t\tif self.type == 3 or self.type == 1:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tlogging.info('No hitcnt in: ' + s_acl)\n\t\n\t# access-list TEST line 1 extended permit ip 10.10.30.0 255.255.255.0 any (hitcnt=2342345222) 0x2a9982d3\n\t# acl_dict = {'Type': {1-3}, 'ID': '0x2a9982d3', 'Hits': int(2342345222), 'Device': 12 'ACL': 'access-list TEST line 1 extended permit ip 10.10.30.0 255.255.255.0 any (hitcnt=2342345222) 0x2a9982d3'}\n\tdef get_acl_dict(self):\n\t\tacl_dict = {'fk_type': self.type,'line': self.line, 'id': self.id, 'Hits': self.hit_count, 'fk_device': self.device, 'acl': self.org_str}\n\t\treturn acl_dict\n\t\n\tdef __str__(self):\n\t\treturn self.org_str\n\n\n#Class based on cisco's brief access-list\n#Example of DB return of HitCount Obj.\n#{'rule_uid': '00146461', 'parent_uid': '419b789d', 'hit_count': '000002bf', 'last_hit_date': '5787f0a2', 'fk_hitcount_status': 1, 'fk_device': 2, 'fk_acl_name': 'acl_main'}\nclass HitCount:\n\tdef __init__(self, dev = None, name = None, str_full = None, dct_full = None):\n\t\tif str_full: #Nate\n\t\t\tcheck = re.split(' ',str_full)\n\t\t\tfor i in check:\n\t\t\t\tt = self.validate_hex(i)\n\t\t\t\tif not t:\n\t\t\t\t\tstr_full = None\n\t\t\t\t\tbreak\n\t\t\tself.org_str = str_full\n\t\t\tself.acl_id = None\n\t\t\tself.acl_parent = None\n\t\t\tself.hit_count = None\n\t\t\tself.ls_hex_date = None\n\t\t\tself.lh_date = None\n\t\t\tself.last_update = None\n\t\t\tif self.parse_str(str_full):\n\t\t\t\tself.device = dev\n\t\t\t\tself.acl_name = name\n\t\t\telse:\n\t\t\t\tself.device = None\n\t\t\t\tself.acl_name = None\n\t\telif dct_full != None:\n\t\t\tself.org_str = '%s %s %s %s' % (dct_full['rule_uid'],dct_full['parent_uid'],dct_full['hit_count'],dct_full['last_hit_date'])\n\t\t\tself.acl_id = self.validate_hex(dct_full['rule_uid'])\n\t\t\tself.acl_parent = self.validate_hex(dct_full['parent_uid'])\n\t\t\tself.hit_count = self.validate_hex(dct_full['hit_count'])\n\t\t\tself.ls_hex_date = self.validate_hex(dct_full['last_hit_date'])\n\t\t\tself.lh_date = self.shex_to_date(dct_full['last_hit_date'])\n\t\t\tself.device = dct_full['fk_device']\n\t\t\tself.acl_name = dct_full['fk_acl_name']\n\t\t\tself.last_update = dct_full['last_update']\n\t\telse:\n\t\t\tself.org_str = None\n\t\t\tself.acl_id = None\n\t\t\tself.acl_parent = None\n\t\t\tself.hit_count = None\n\t\t\tself.ls_hex_date = None\n\n\n#Sets all based on string\n\tdef parse_str(self,sHC):\n\t\tif sHC:\n\t\t\tls_hex = re.split(' ',sHC)\n\t\t\tif len(ls_hex) == 4:\n\t\t\t\tself.acl_id = self.validate_hex(ls_hex[0])\n\t\t\t\tself.acl_parent = self.validate_hex(ls_hex[1])\n\t\t\t\tself.hit_count = self.validate_hex(ls_hex[2])\n\t\t\t\tself.ls_hex_date = self.validate_hex(ls_hex[3])\n\t\t\t\tself.lh_date = self.shex_to_date(ls_hex[3])\n\t\t\t\treturn True\n\t\telse:\n\t\t\treturn None\n\t\t\t\n\tdef parse_dhitcount(self,dHC):\n\t\tif self.validate_hex(dct_full):\n\t\t\tself.acl_id = self.validate_hex(dct_full['acl_id'])\n\t\t\tself.acl_parent = self.validate_hex(dct_full['acl_parent'])\n\t\t\tself.hit_count = self.validate_hex(dct_full['hit_count'])\n\t\t\tself.ls_hex_date = self.validate_hex(dct_full['ls_hex_date'])\n\t\t\tself.lh_date = self.shex_to_date()\n\n\tdef shex_to_date(self,h):\n\t\tt = datetime.datetime.fromtimestamp(self.shex_to_float(h))\n\t\ttt = t.strftime(\"%Y-%m-%d %H:%M:%S\")\n\t\treturn tt\n\n\tdef shex_to_float(self,str):\n\t\tf = float(int(str,16))\n\t\treturn f\n\n\tdef validate_hex(self,str):\n\t\tif str != None:\n\t\t\tif re.match('^[A-F,a-f,0-9,X,x]{8}',str):\n\t\t\t\treturn str\n\t\t\telse:\n\t\t\t\treturn None\n\n#hex string to Hex value\n\tdef str_to_hex(self,str):\n\t\th = hex(int(str,16))\n\t\treturn h\n\n\tdef get_hc_dict(self):\n\t\td = {'rule_uid': self.acl_id, 'parent_uid': self.acl_parent,\n\t\t\t'hit_count':self.hit_count, 'last_hit_date':self.ls_hex_date,\n\t\t\t'fk_device': self.device, 'fk_acl_name': self.acl_name}\n\t\treturn d\n\n#OverRides\n\tdef __str__(self):\n\t\treturn self.org_str\n\n\tdef __eq__(self,obj):\n\t\tif self.acl_id == obj.acl_id and self.device == obj.device and self.acl_name == obj.acl_name:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef __hash__(self):\n\t\treturn hash((self.acl_id,self.device,self.acl_name))\n\n\tdef __hash_time__(self):\n\t\treturn hash((self.acl_id,self.device,self.acl_name,self.ls_hex_date))\n\n\n\n#*** acl methods on mass import ***\n# loop through new HC objs and compared to old HC objs. If last hit changed update DB.\ndef update_compare(hc_new, hc_old):\n\tupdate = list()\n\tfor hcn in hc_new:\n\t\thash_id_new = hcn.__hash__()\n\t\tfor hco in hc_old:\n\t\t\thash_id_old = hco.__hash__()\n\t\t\tif hash_id_new == hash_id_old:\n\t\t\t\tif hcn.__hash_time__() != hco.__hash_time__():\n\t\t\t\t\tupdate.append(hcn)\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tcontinue\n\tif len(update) > 0:\n\t\treturn update\n\telse:\n\t\treturn None\n\n#Returns list of acls objects cleaned up\ndef parse_buffer_strs(buf_str):\n\tls_strs = buf_str.split('\\r\\n')\n\tfor i in range(0,len(ls_strs)):\n\t\tls_strs[i] = ls_strs[i].strip()\n\treturn ls_strs\n\n#Returns a list of acl names on a device. Limit one device parse per call.\ndef parse_acl_names(ls_acl):\n\tls_m = []\n\tls_s = []\n\tfor i in ls_acl:\n\t\tm = re.search('access-list ([A-Z,a-z,0-9,\\-]+)', i)\n\t\tif m:\n\t\t\tls_m.append(m.group(1))\n\treturn ls_m\n\n# Returns list of ACL objects (dict)\ndef imp_ls_acls(ls_acl, device_id):\n\tls_acl_objs = list()\n\tfor s_acl in ls_acl:\n\t\ta = ACL(s_acl, device_id)\n\t\tls_acl_objs.append(a)\n\treturn ls_acl_objs\n\n#Returns list of HitCount (dict)\ndef imp_ls_hits_str(device,ls_hit,acl_name):\n\tls_hit_objs = list()\n\tfor s_hit in ls_hit:\n\t\ta = HitCount(dev = device, name = acl_name, str_full= s_hit)\n\t\tif a and a.acl_id:\n\t\t\tls_hit_objs.append(a)\n\treturn ls_hit_objs\n\ndef imp_ls_hits_db(device,db_hit,acl_name):\n\tls_hit_objs = list()\n\tfor db_hit in db_hit:\n\t\ta = HitCount(dev = device, name = acl_name, dct_full= db_hit)\n\t\tif a and a.acl_id:\n\t\t\tls_hit_objs.append(a)\n\treturn ls_hit_objs\n","sub_path":"acltracker/acl.py","file_name":"acl.py","file_ext":"py","file_size_in_byte":8317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"325407939","text":"#!/usr/bin/python\n#-*- coding:utf-8 –*-\nfrom functools import wraps\nimport time\ndef timmer(func):\n @wraps(func)\n def inner(*args,**kwargs):\n start=time.time()\n res=func(*args,**kwargs)\n stop=time.time()\n print('run time is %s' %(stop-start))\n return res\n # inner.__doc__=func.__doc__\n # inner.__name__=func.__name__\n return inner\n\n@timmer\ndef index(name): #index=inner\n '''index 函数。。。。。'''\n time.sleep(1)\n print('welecome %s to index' %name)\n return 1111\n\n# res=index('egon')\n# print(res)\n\nprint(help(index))","sub_path":"day3/装饰器/9 wraps补充.py","file_name":"9 wraps补充.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"621946791","text":"from flask import Blueprint, jsonify\nfrom flask_jwt import jwt_required\nfrom flask_babel import gettext as _\n\nfrom http_quest.quiz.model import QuizType\nfrom http_quest.quiz.quiz import create_new_candidate_token\nfrom http_quest.quiz.schema import new_candidate_token_schema\nfrom http_quest.utilities import validate_json\n\nquiz_view = Blueprint('quiz', __name__)\n\n\n@quiz_view.route('/quiz/new_candidate_token', methods=('POST',))\n@validate_json(new_candidate_token_schema)\n@jwt_required()\ndef create_candidate_token():\n response = {\n 'message': _('Token is successfully generated and sent by email to you and the candidate.')\n }\n create_new_candidate_token()\n return jsonify(response), 200\n\n\n@quiz_view.route('/quiz/list_quiz_types', methods=('GET',))\n@jwt_required()\ndef list_quiz_types():\n response = {\n 'list_quiz_types': [str(type_) for type_ in QuizType]\n }\n return jsonify(response), 200\n","sub_path":"http_quest/quiz/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"526357246","text":"import requests\n\ndef pre_login():\n url=\"https://uums.test.chinaclear.cn/login.action\"\n payload={}\n files={}\n headers={\n 'Host':'uums.test.chinaclear.cn',\n 'Connection':'keep-alive',\n 'Upgrade-Insecure-Requests':'1',\n 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,'\n 'application/signed-exchange;v=b3;q=0.9',\n 'Purpose':'prefetch',\n 'Sec-Fetch-Site':'none',\n 'Sec-Fetch-Mode':'navigate',\n 'Sec-Fetch-User':'?1',\n 'Sec-Fetch-Dest':'document',\n 'Accept-Encoding':'gzip,deflate,br',\n 'Accept-Language':'zh-CN,zh;q=0.9',\n }\n response=requests.request(\"GET\",url,headers=headers,data=payload,files=files,verify=False)\n session = requests.session()\n return session\n\nif __name__ == '__main__':\n a = pre_login()\n print(a)","sub_path":"Python20191110/test20191110/webTest/intetface/pre_login.py","file_name":"pre_login.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"304282400","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# pylint: disable=line-too-long\n# pylint: disable=too-many-statements\n# pylint: disable=too-many-lines\n# pylint: disable=too-many-locals\n# pylint: disable=unused-argument\n\nfrom azure.cli.core.util import sdk_no_wait\n\n\ndef account_subscription_create_subscription(cmd, client,\n billing_account_name,\n billing_profile_name,\n invoice_section_name,\n display_name,\n sku_id,\n cost_center=None,\n owner=None,\n management_group_id=None,\n no_wait=False):\n\n body = {}\n body['display_name'] = display_name\n body['sku_id'] = sku_id\n body['cost_center'] = cost_center\n body['owner'] = {'object_id': owner}\n body['management_group_id'] = management_group_id\n return sdk_no_wait(no_wait, client.create_subscription, billing_account_name=billing_account_name, billing_profile_name=billing_profile_name, invoice_section_name=invoice_section_name, body=body)\n\n\ndef account_subscription_create_subscription_in_enrollment_account(cmd, client,\n enrollment_account_name,\n display_name=None,\n management_group_id=None,\n owners=None,\n offer_type=None,\n no_wait=False):\n if owners is not None:\n owners = [{'object_id': x} for x in owners]\n\n body = {}\n body['display_name'] = display_name\n body['management_group_id'] = management_group_id\n body['owners'] = owners\n body['offer_type'] = offer_type\n return sdk_no_wait(no_wait, client.create_subscription_in_enrollment_account, enrollment_account_name=enrollment_account_name, body=body)\n\n\ndef account_subscription_create_csp_subscription(cmd, client,\n billing_account_name,\n customer_name,\n display_name,\n sku_id,\n reseller_id=None,\n no_wait=False):\n body = {}\n body['display_name'] = display_name\n body['sku_id'] = sku_id\n body['reseller_id'] = reseller_id\n return sdk_no_wait(no_wait, client.create_csp_subscription, billing_account_name=billing_account_name, customer_name=customer_name, body=body)\n\n\ndef account_subscription_rename(cmd, client, subscription_id,\n subscription_name=None):\n return client.rename(subscription_id=subscription_id, subscription_name=subscription_name)\n\n\ndef account_subscription_cancel(cmd, client, subscription_id):\n return client.cancel(subscription_id=subscription_id)\n\n\ndef account_subscription_enable(cmd, client, subscription_id):\n return client.enable(subscription_id=subscription_id)\n","sub_path":"src/account/azext_account/generated/custom.py","file_name":"custom.py","file_ext":"py","file_size_in_byte":3776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"514062181","text":"\"\"\"\n * Copyright 2020, Departamento de sistemas y Computación\n * Universidad de Los Andes\n *\n *\n * Desarrolado para el curso ISIS1225 - Estructuras de Datos y Algoritmos\n *\n *\n * This program is free software: you can redistribute it and/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with this program. If not, see .\n * Contribución de:\n *\n * Dario Correal\n *\n \"\"\"\n\n\nimport sys\nimport config\nfrom App import controller\nfrom DISClib.ADT import stack\nimport timeit \nassert config\nfrom DISClib.DataStructures import listiterator as it \nfrom time import perf_counter\nfrom DISClib.ADT import list as lt\n\n\n\"\"\"\nLa vista se encarga de la interacción con el usuario.\nPresenta el menu de opciones y por cada seleccion\nhace la solicitud al controlador para ejecutar la\noperación seleccionada.\n\"\"\"\n \n\n# ___________________________________________________\n# Variables\n# ___________________________________________________\n\nservicefile = '201801-1-citibike-tripdata.csv'\ninitialStation = None\nrecursionLimit = 20000\n# ___________________________________________________\n# Printeos y organizacion\n# ___________________________________________________\ndef Printop3(list):\n iterador=it.newIterator(list)\n while it.hasNext(iterador):\n dato_lista=it.next(iterador)\n if dato_lista == True:\n print(\"si pertenecen al mismo clúster\")\n elif dato_lista == False:\n print(\"no pertenecen al mismo clúster\")\n else:\n print(\"hay un total de \",dato_lista,\" clusters\")\ndef information(citibike):\n \"Funcion para la info del grafo\"\n numedges = controller.totalConnections(cont)\n numvertex = controller.totalStops(cont)\n print('Numero de vertices: ' + str(numvertex))\n print('Numero de arcos: ' + str(numedges))\n print(\"Numero de clusters\", controller.clusters(citibike,None,None))\n print(\"numero de viajes cargados: \",citibike[\"num\"])\n \n# ___________________________________________________\n# Menu principal\n# ___________________________________________________\n\ndef optionTwo(cont):\n print(\"\\nCargando información de transporte de singapur ....\")\n t1 = perf_counter() \n controller.loadTrips(cont) \n num_caminos_con=controller.onlycosajaru(cont)\n t2 = perf_counter()\n print(\"tiempo de carga:\", t2 - t1)\n print('El limite de recursion actual: ' + str(sys.getrecursionlimit()))\n sys.setrecursionlimit(recursionLimit)\n print('El limite de recursion se ajusta a: ' + str(recursionLimit))\n return num_caminos_con\n\ndef optionthree():\n estacion1=input(\"estacion1\")\n estacion2=input(\"estacion2\")\n t1 = perf_counter() \n bol_num=controller.clusters(cont,estacion1,estacion2)\n t2 = perf_counter()\n print(\"tiempo de carga:\", t2 - t1)\n Printop3(bol_num)\n\ndef optionfour():\n tiempo_ini=input(\"ingrese tiempo inicial: \")\n tiempo_fin=input(\"ingrese tiempo final: \")\n station_id=input(\"ingrese la estacion de inicio: \")\n tiempo_de_demora=input(\"Cuanto se demora en analizar los alrededores?: \")\n lista_caminos=controller.funcion2(cont,station_id,tiempo_ini,tiempo_fin,tiempo_de_demora)\n print(lt.size(lista_caminos))\n # iterador_lista=it.newIterator(lista_caminos)\n # while it.hasNext(iterador_lista):\n # next=\n\n\ndef optionSeven():\n latitud_1=input(\"ingrese la latitud 1: \")\n longitud_1=input(\"ingrese longitud 1: \")\n latitud_2=input(\"ingrese la latitud 2: \")\n longitud_2=input(\"ingrese longitud 2: \")\n \n\ndef printMenu():\n print(\"\\n\")\n print(\"*******************************************\")\n print(\"Bienvenido\")\n print(\"q- Inicializar Analizador\")\n print(\"w- Cargar información\")\n print(\"1- Cantidad de clusters de Viajes\")\n print(\"2- RQ2:\")\n print(\"5- N/A: \")\n print(\"6- N/A: \")\n print(\"7- N/A: \")\n print(\"0- Salir\")\n print(\"*******************************************\")\nwhile True:\n printMenu()\n inputs = input('Seleccione una opción para continuar\\n>')\n\n if inputs[0] == \"q\":\n print(\"\\nInicializando....\")\n # cont es el controlador que se usará de acá en adelante\n cont = controller.init()\n\n elif inputs[0] == \"w\":\n ncont= optionTwo(cont)\n cont[\"scc\"]=ncont\n print(ncont.keys())\n \n \n\n elif int(inputs[0]) == 1:\n optionthree()\n \n\n elif int(inputs[0]) == 2:\n optionfour()\n\n\n elif int(inputs[0]) == 3:\n print(\"d\")\n\n elif int(inputs[0]) == 4:\n destStation = input(\"Estación destino (Ej: 15151-10): \")\n \n \n\n elif int(inputs[0]) == 5:\n \n print(\"Tiempo de ejecución: \" + str(executiontime))\n elif int(inputs[0]) == 6:\n optionSeven()\n else:\n sys.exit(0)\nsys.exit(0)\n\n\"\"\"\nMenu principal\n\"\"\"","sub_path":"App/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":5243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"551393378","text":"class UserDao:\n def enroll_user(self, user_info, session): \n \"\"\"\n 고객 정보 생성(Persistence Layer)\n Args :\n user_info: 고객 데이터\n session : 데이터베이스 연결객체\n Returns :\n results_2: 고객 정보 생성 결과\n Author :\n taeha7b@gmail.com (김태하)\n \"\"\"\n \n sql_1 = \"\"\" \n INSERT INTO users (\n account\n ) VALUES(%s)\n \"\"\"\n lastrowid = session.bind.execute(sql_1,user_info['account']).lastrowid\n sql_2 = \"\"\"\n INSERT INTO user_details (\n user_id,\n name,\n birthday,\n memo\n ) VALUES (%s,%s,%s,%s)\n \"\"\"\n results = session.bind.execute(sql_2, \n (\n lastrowid,\n user_info['name'],\n user_info['birthday'],\n user_info['memo']\n )\n )\n return results\n\n def user_list(self, pagination, session):\n \"\"\"\n 고객 정보 목록 조회(Persistence Layer)\n Args :\n pagination: LIMIT와 OFFSET가 있는 딕셔너리\n session : 데이터베이스 연결객체\n Returns :\n results: 고객 정보 목록 조회 결과\n Author :\n taeha7b@gmail.com (김태하)\n \"\"\"\n sql = \"\"\" \n SELECT u.id, u.account, ud.name, ud.birthday, ud.memo \n FROM users as u INNER JOIN user_details as ud \n ON u.id = ud.user_id LIMIT %s OFFSET %s;\n \"\"\"\n results = session.bind.execute(sql,(\n pagination['limit'],\n pagination['limit']*pagination['page']\n )).fetchall()\n return results\n\n def user_detail(self, account, session):\n \"\"\"\n 고객 정보 상세 조회(Persistence Layer)\n Args :\n account: 유저의 아이디\n session : 데이터베이스 연결객체\n Returns :\n results: 고객 정보 상세 조회 결과\n Author :\n taeha7b@gmail.com (김태하)\n \"\"\"\n \n sql = \"\"\" \n SELECT u.id, u.account, ud.name, ud.birthday, ud.memo \n FROM users as u INNER JOIN user_details as ud \n ON u.id = ud.user_id \n AND u.account = %s;\n \"\"\"\n \n results = session.bind.execute(sql, account).fetchone()\n return results\n\n def update_user(self, user_info, session):\n \"\"\"\n 고객 정보 변경(Persistence Layer)\n Args :\n user_info: 유저의 아이디\n session : 데이터베이스 연결객체\n Returns :\n results: 고객 정보 변경 결과\n Author :\n taeha7b@gmail.com (김태하)\n \"\"\"\n \n sql = \"\"\"\n UPDATE user_details SET\n name = %s,\n birthday = %s,\n memo = %s\n WHERE user_id = %s\n \"\"\"\n results = session.bind.execute(sql, \n ( \n user_info['name'],\n user_info['birthday'],\n user_info['memo'],\n user_info['id']\n )\n )\n return results\n\n def delete_user(self, user_info, session):\n \"\"\"\n 고객 정보 삭제(Persistence Layer)\n Args :\n user_info: 유저의 아이디\n session : 데이터베이스 연결객체\n Returns :\n results: 고객 정보 삭제 결과\n Author :\n taeha7b@gmail.com (김태하)\n \"\"\"\n \n sql = \"\"\" \n DELETE FROM users WHERE account = %s\n \"\"\"\n results = session.bind.execute(sql,user_info['account'])\n return results\n\n def account_checker(self, user_info, session):\n \"\"\"\n 아이디 확인(Persistence Layer)\n Args :\n user_info: 유저의 아이디\n session : 데이터베이스 연결객체\n Returns :\n results: 아이디 확인 결과값 \n 아이디가 일을시: {\"check\":1} \n 아이디가 없을시: {\"check\":0}\n Author :\n taeha7b@gmail.com (김태하)\n \"\"\"\n \n sql = \"\"\" \n SELECT EXISTS (\n SELECT account \n FROM users \n WHERE account = %s) \n as 'check'\n \"\"\"\n results = session.bind.execute(sql, user_info['account']).fetchone()\n return results\n\n def pk_checker(self, user_info, session):\n \"\"\"\n 고유키 확인(Persistence Layer)\n Args :\n user_info: 유저의 아이디\n session : 데이터베이스 연결객체\n Returns :\n results: 아이디 확인 결과값 \n 아이디가 일을시: {\"check\":1} \n 아이디가 없을시: {\"check\":0}\n Author :\n taeha7b@gmail.com (김태하)\n \"\"\"\n \n sql = \"\"\" \n SELECT EXISTS (\n SELECT account \n FROM users \n WHERE id = %s) \n as 'check'\n \"\"\"\n results = session.bind.execute(sql, user_info['id']).fetchone()\n return results","sub_path":"crud_ver2/model/user_dao.py","file_name":"user_dao.py","file_ext":"py","file_size_in_byte":5588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"73754766","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport logging\nlogger = logging.getLogger(\"simple_example\")#设置日志名字\nlogger.setLevel(logging.DEBUG)#设置全局的日志级别\n\n#设置显示器\nch = logging.StreamHandler()\nch.setLevel(logging.WARNING)\n\n#设置文件\nfh = logging.FileHandler(\"log.log\")\nfh.setLevel(logging.INFO)\n\n#设置日志格式\nformatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\nch.setFormatter(formatter)#设置显示器的日志格式\nfh.setFormatter(formatter)#设置文件的日志格式\n\n#将指定的处理程序添加到全局变量logger执行\nlogger.addHandler(ch)\nlogger.addHandler(fh)\n\n#打印日志级别\nlogger.debug('debug msg...')\nlogger.info('info msg...')\nlogger.warning('warning msg...')\nlogger.error('error msg...')\nlogger.critical('critical msg...')\nlogger.log(10,'log msg...')\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Python自动化开发/day5/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"148821567","text":"import numpy as np\nimport cv2\ndef konturkeres(kep):\n #pirosítás\n blurred_kep = cv2.GaussianBlur(kep, (5, 5), 0)\n hsv = cv2.cvtColor(blurred_kep, cv2.COLOR_BGR2HSV)\n \n #szín tartomány szerinti szűrése\n color_min = np.array([0, 120, 120])\n color_max = np.array([180, 240, 240])\n mask = cv2.inRange(hsv, color_min, color_max)\n #cv2.imshow(\"Mask\", mask)\n\n _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n c = max(contours, key = cv2.contourArea)\n return cv2.minAreaRect(c)\n\ndef tavolsag(knownWidth, focalLength, perWidth):\n # távolság számítás\n return (knownWidth * focalLength) / perWidth\n","sub_path":"ingredients/definitions.py","file_name":"definitions.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"475628977","text":"# Creates a vocabulary file containing all words in files in the\n# word_sim_dir. Meant for collecting all words in one file.\nimport sys\nimport os\n\nif __name__ == '__main__':\n word_sim_dir = sys.argv[1]\n out_vocab = sys.argv[2]\n vocab = set()\n for i, filename in enumerate(os.listdir(word_sim_dir)):\n for line in open(os.path.join(word_sim_dir, filename), 'r'):\n line = line.strip().lower()\n word1, word2, val = line.split()\n vocab.update([word1, word2])\n f = open(out_vocab, 'wb')\n for word in vocab:\n f.write(\"%s\\n\" % word)\n","sub_path":"create_vocab.py","file_name":"create_vocab.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"239312420","text":"import re\nfrom Token import Token\n# 关键字\nkeywords = [\n ['int', 'float', 'double', 'char', 'void'],\n ['if', 'for', 'while', 'do', 'else'], ['include', 'return'],\n]\n# 运算符\noperators = [\n '=', '&', '<', '>', '++', '--', '+', '-', '*', '/', '>=', '<=', '!='\n]\n# 分隔符\ndelimiters = ['(', ')', '{', '}', '[', ']', ',', '\\\"', ';']\nclass Lexer(object):\n '''词法分析器'''\n\n def __init__(self):\n # 用来保存词法分析出来的结果\n self.tokens = []\n\n # 判断是否是空白字符\n def is_blank(self, index,content):\n return (\n content[index] == ' ' or\n content[index] == '\\t' or\n content[index] == '\\n' or\n content[index] == '\\r'\n )\n\n # 跳过空白字符\n def skip_blank(self, index,content):\n while index < len(content) and self.is_blank(index,content):\n index += 1\n return index\n\n # 打印\n def print_log(self, style, value):\n print ('(%s, %s)' % (style, value))\n\n # 判断是否是关键字\n def is_keyword(self, value):\n for item in keywords:\n if value in item:\n return True\n return False\n\n # 词法分析主程序\n def main(self,content):\n i = 0\n while i < len(content):\n i = self.skip_blank(i,content)\n # 如果是引入头文件,还有一种可能是16进制数,这里先不判断\n if content[i] == '#':\n #self.print_log( '分隔符', content[ i ] )\n self.tokens.append(Token(4, content[i]))\n i = self.skip_blank(i + 1,content)\n # 分析这一引入头文件\n while i < len(content):\n # 匹配\"include\"\n if re.match('include', content[i:]):\n # self.print_log( '关键字', 'include' )\n self.tokens.append(Token(0, 'include'))\n i = self.skip_blank(i + 7,content)\n # 匹配\"或者<\n elif content[i] == '\\\"' or content[i] == '<':\n # self.print_log( '分隔符', content[ i ] )\n self.tokens.append(Token(4, content[i]))\n i = self.skip_blank(i + 1,content)\n close_flag = '\\\"' if content[i] == '\\\"' else '>'\n # 找到include的头文件\n lib = ''\n while content[i] != close_flag:\n lib += content[i]\n i += 1\n # self.print_log( '标识符', lib )\n self.tokens.append(Token(1, lib))\n # 跳出循环后,很显然找到close_flog\n # self.print_log( '分隔符', close_flag )\n self.tokens.append(Token(4, close_flag))\n i = self.skip_blank(i + 1,content)\n break\n else:\n print ('include error!')\n exit()\n # 如果是字母或者是以下划线开头\n elif content[i].isalpha() or content[i] == '_':\n # 找到该字符串\n temp = ''\n while i < len(content) and (\n content[i].isalpha() or\n content[i] == '_' or\n content[i].isdigit()):\n temp += content[i]\n i += 1\n # 判断该字符串\n if self.is_keyword(temp):\n # self.print_log( '关键字', temp )\n self.tokens.append(Token(0, temp))\n else:\n # self.print_log( '标识符', temp )\n self.tokens.append(Token(1, temp))\n i = self.skip_blank(i,content)\n # 如果是数字开头\n elif content[i].isdigit():\n temp = ''\n while i < len(content):\n if content[i].isdigit() or (\n content[i] == '.' and content[i + 1].isdigit()):\n temp += content[i]\n i += 1\n elif not content[i].isdigit():\n if content[i] == '.':\n print ('float number error!')\n exit()\n else:\n break\n # self.print_log( '常量' , temp )\n self.tokens.append(Token(2, temp))\n i = self.skip_blank(i,content)\n # 如果是分隔符\n elif content[i] in delimiters:\n # self.print_log( '分隔符', content[ i ] )\n self.tokens.append(Token(4, content[i]))\n # 如果是字符串常量\n if content[i] == '\\\"':\n i += 1\n temp = ''\n while i < len(content):\n if content[i] != '\\\"':\n temp += content[i]\n i += 1\n else:\n break\n else:\n print ('error:lack of \\\"')\n exit()\n # self.print_log( '常量' , temp )\n self.tokens.append(Token(5, temp))\n # self.print_log( '分隔符' , '\\\"' )\n self.tokens.append(Token(4, '\\\"'))\n i = self.skip_blank(i + 1,content)\n # 如果是运算符\n elif content[i] in operators:\n # 如果是++或者--\n if (content[i] == '+' or content[i] == '-') and (\n content[i + 1] == content[i]):\n # self.print_log( '运算符', content[ i ] * 2 )\n self.tokens.append(Token(3, content[i] * 2))\n i = self.skip_blank(i + 2,content)\n # 如果是>=或者<=\n elif (content[i] == '>' or content[i] == '<') and content[i + 1] == '=':\n # self.print_log( '运算符', content[ i ] + '=' )\n self.tokens.append(Token(3, content[i] + '='))\n i = self.skip_blank(i + 2,content)\n # 其他\n else:\n # self.print_log( '运算符', content[ i ] )\n self.tokens.append(Token(3, content[i]))\n i = self.skip_blank(i + 1,content)","sub_path":"Lexer.py","file_name":"Lexer.py","file_ext":"py","file_size_in_byte":6596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"541421355","text":"from flask import Flask, render_template\nimport datetime\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef agenda():\n base = datetime.datetime.today()\n output = []\n for x in range(1, 36):\n output.append(base + datetime.timedelta(days=x))\n return render_template('testing.html', dates=output)\n\n\n@app.route('/benis')\ndef benis(var):\n print(var)\n return 'Success'\n\n\nif __name__ == '__main__':\n app.run()","sub_path":"app2.py","file_name":"app2.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"339836972","text":"import pytest\nfrom typing import List\n\nfrom regex_lookahead_lookbehind import (\n count_n_repetitions,\n count_n_reps_or_n_chars_following,\n check_surrounding_chars,\n)\n@pytest.mark.parametrize(\n \"n, text, expected\",\n [\n (1, \"\", 0),\n (1, \"1\", 0),\n (1, \"11\", 1),\n (2, \"1\", 0),\n (2, \"111\", 1),\n (2, \"1122\", 0),\n (2, \"1112345\", 1),\n ],\n)\ndef test_count_n_repetitions_digits(n, text, expected):\n assert count_n_repetitions(text, n=n) == expected\n\n\n@pytest.mark.parametrize(\n \"n, text, expected\",\n [\n (1, \"a\", 0),\n (1, \"aa\", 1),\n (1, \"????{{{?}}}\", 7),\n (2, \"b\", 0),\n (2, \"ccc\", 1),\n (2, \"ZZaa\", 0),\n (2, \"ZZzz\", 0),\n (2, \"zzZZ\", 0),\n (2, r\" \\\\\\ \", 1),\n (2, \" Spaces are fun\", 1),\n (2, \"\\n\\n\\nAs are newlines\\n\\n\\n\", 2),\n (2, \"As \\t\\t\\t are tabs\\t\\t\", 1),\n ],\n)\ndef test_count_n_repetitions_chars(n, text, expected):\n assert count_n_repetitions(text, n=n) == expected\n\n\n@pytest.mark.parametrize(\n \"n, text, expected\",\n [\n (1, \"Ä\", 0),\n (1, \"ÄÄ\", 1),\n (1, \"※※※ - Monster Hunter - ※※※\", 4),\n (2, \"Ö\", 0),\n (2, \"ßßß\", 1),\n (2, \"ZZÄÄ\", 0),\n (2, \"Greek: εζεζεζεηηηη\", 2),\n ],\n)\ndef test_count_n_repetitions_unicode(n, text, expected):\n assert count_n_repetitions(text, n=n) == expected\n\n@pytest.mark.parametrize(\n \"n, char, text, expected\",\n [\n (1, \"\", \"\", 0),\n (2, \"\", \"1112345\", 1),\n (1, \"\", \"????{{{?}}}\", 7),\n (2, \"\", \"\\n\\n\\nAs are newlines\\n\\n\\n\", 2),\n ],\n)\ndef test_count_n_reps_or_n_chars_following_no_char(n, char, text, expected):\n assert count_n_reps_or_n_chars_following(text, n=n, char=char) == expected\n\n\n@pytest.mark.parametrize(\n \"n, char, text, expected\",\n [\n (1, \"z\", \"\", 0),\n (2, \"z\", \"1112345\", 1),\n (1, \"z\", \"????{{{?}}}\", 7),\n (2, \"z\", \"\\n\\n\\nAs are newlines\\n\\n\\n\", 2),\n ],\n)\ndef test_count_n_reps_or_n_chars_following_no_containing_char(n, char, text, expected):\n assert count_n_reps_or_n_chars_following(text, n=n, char=char) == expected\n\n\n@pytest.mark.parametrize(\n \"n, char, text, expected\",\n [\n (1, \"z\", \"zz Don't count double!\", 1),\n (1, \"z\", \"9z\", 1),\n (1, \"z\", \"9zz\", 2),\n (1, \"Z\", \"9Zz\", 1),\n (1, \"?\", \"????{{{?}}}\", 8),\n (1, \"[\", \"????[[[?]]]\", 8),\n (1, \"]\", \"????[[[?]]]\", 8),\n (1, \"^\", \"Hello^there\", 2),\n (2, \"z\", \"\\n\\n\\nzz newlines\\n\\n\", 2),\n (2, \"a\", \"Kai is mean...aarg\", 2),\n (2, \"\\t\", \"But bob isn't...\\t\\t\", 2),\n ],\n)\ndef test_count_n_reps_or_n_chars_following_mix(n, char, text, expected):\n assert count_n_reps_or_n_chars_following(text, n=n, char=char) == expected\n@pytest.mark.parametrize(\n \"surrounding_chars, text, expected\",\n [\n ([\"Z\", \"A\"], \"ZZZZZ\", 3),\n ([\"Z\", \"A\"], \"ABCCBAAAZz\", 2),\n ([\"\\n\", \"\\t\"], \"\\nK\\nA\\tI\\t\", 3),\n ([\"R\", \"?\", \"^\"], \"SPECIAL^C^HARS?\", 2),\n ([\"^\", \"$\"], \"^S^tar$t$\", 2),\n ([\":\", \"|\"], \"?:A:lmost|t|here\", 2),\n ],\n)\ndef test_check_surrounding_chars_valid(surrounding_chars: List[str], text, expected):\n assert check_surrounding_chars(text, surrounding_chars) == expected\n","sub_path":"125_algorithms/_examples/_algorithms_challenges/pybites/advanced/280/test_regex_lookahead_lookbehind.py","file_name":"test_regex_lookahead_lookbehind.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"632764740","text":"# -*- encoding: utf-8 -*-\n\"\"\"\nCopyright (c) 2019 - present AppSeed.us\n\"\"\"\n\n# Flask modules\nfrom flask import render_template, request\nfrom jinja2 import TemplateNotFound\nimport pandas as pd\n# App modules\nfrom app import app\nimport dash_inputs\nimport json\n\n# App main route + generic routing\n@app.route('/')\ndef index():\n try:\n daily = get_daily_data()\n inv_data = dash_inputs.chart_data(dash_inputs.sample_inv(),'invoice_type','amount')\n return render_template( 'index.html', daily=daily,inv_data=inv_data)\n except exception(e):\n print(e)\n return\n\ndef get_daily_data():\n daily = {}\n daily['new_users']=900\n daily['user_logins']=5000\n daily['new_deployments']=1200\n daily['new_strategies']=125\n return daily\n\n\n \n\n@app.route('/sales')\ndef sales():\n \n try:\n\n # Detect the current page\n inv_data = dash_inputs.chart_data(dash_inputs.sample_inv(),'invoice_type','amount')\n\n # Serve the file (if exists) from app/templates/FILE.html\n return json.dumps(inv_data)\n \n except TemplateNotFound:\n return render_template('page-404.html'), 404\n\n@app.route('/users')\ndef users():\n df=pd.read_csv('app/dash_data/active_subs.csv')\n df_plan_wise = df.groupby(['exchange','name']).agg({'users':'sum'}).reset_index()\n datasets = []\n plans= ['Starter','Retail','Retail+','Creator','Creator+']\n colours = ['#FF4500','#D2691E','#00BFFF','#66CDAA','#FFA500']\n counter = 0\n ds=[]\n for i in list(df['exchange'].unique()):\n points=[]\n for j in plans: \n points.append(str(df[(df['exchange']==i) & (df['name']==j)].users.sum()))\n ds.append({'label':i,\n 'data': points,\n 'backgroundColor': colours[counter]})\n counter+=1\n users = {'labels':plans,\n 'datasets': ds} \n datasets = []\n plan_types= ['Monthly','Quarterly','Yearly']\n colours = ['#00BFFF','#66CDAA','#FFA500','#FF4500','#D2691E']\n counter = 0\n ds2=[]\n for i in plans:\n points=[]\n for j in plan_types: \n points.append(str(df[(df['name']==i) & (df['sub_type']==j)].users.sum()))\n ds2.append({'label':i,\n 'data': points,\n 'backgroundColor': colours[counter]})\n counter+=1\n sub_type = {'labels':plan_types,\n 'datasets': ds2} \n\n users_data = {'users':users,\n 'sub_type':sub_type}\n return json.dumps(users_data)\n\n\n@app.route('/creators')\ndef creators():\n df = pd.read_pickle('dash_data/pm_invoices.pkl')\n \n return df\n\n\n@app.route('/sales_data')\ndef sales_data():\n df = pd.read_pickle('dash_data/sales_data.pkl')\n return df\n\n\n@app.route('/deployments')\ndef deployments():\n df = pd.read_pickle('dash_data/strats_report.pkl')\n return df\n\n\n\n\n ","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"270343059","text":"import re\nfrom bs4 import BeautifulSoup\nimport wikipedia\nfrom stop_words import get_stop_words\nfrom collections import Counter\nimport pandas as pd\n\n# Get Wikipedia page HTML via API\nws = wikipedia.WikipediaPage('Web scraping')\nws_html = ws.html()\n\n# Parse HTML with Beautiful Soup\nsoup = BeautifulSoup(ws_html, 'lxml')\n\n# Identify Stopwords\nstop_words = list(get_stop_words('en'))\n\n#For each section identify titles, total word counts, and links\nfor header in soup.find_all(['h1', 'h2', 'h3']): \n title = header.get_text() \n title = title[:-6] \n print(title) \n for elem in header.next_siblings: \n if elem.name and elem.name.startswith('h'):\n break\n if elem.name in ('p', 'ul'): \n text = elem.get_text() \n words = re.findall(r'\\b\\w+', text) \n lower_case = Counter([word.lower() for word in words if not word.lower() in stop_words]) \n df = pd.DataFrame(lower_case.most_common(5)) \n df.columns = ['words', 'frequency'] \n print(df) \n for links in elem.find_all('a'): \n print(links['href']) \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"299495223","text":"### tensorflow==2.3.0\n\n### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D\n### https://www.tensorflow.org/api_docs/python/tf/keras/layers/DepthwiseConv2D\n### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Add\n### https://www.tensorflow.org/api_docs/python/tf/keras/layers/ReLU\n### https://www.tensorflow.org/api_docs/python/tf/keras/layers/MaxPool2D\n### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Reshape\n### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Concatenate\n### https://www.tensorflow.org/api_docs/python/tf/keras/layers/AveragePooling2D\n### https://www.tensorflow.org/api_docs/python/tf/keras/backend/l2_normalize\n### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer\n\n### How to initialize a convolution layer with an arbitrary kernel in Keras? https://stackoverrun.com/ja/q/12269118\n\n### saved_model_cli show --dir saved_model_0200/ --tag_set serve --signature_def serving_default\n\nimport tensorflow as tf\nfrom tensorflow.keras import Model, Input\nfrom tensorflow.keras.layers import Conv2D, DepthwiseConv2D, Add, ReLU, MaxPool2D, Reshape, Concatenate, AveragePooling2D, Layer\nfrom tensorflow.keras.backend import l2_normalize\nimport keras\nfrom tensorflow.keras.initializers import Constant\nimport numpy as np\nimport sys\nimport tensorflow_datasets as tfds\n\n# tmp = np.load('weights/depthwise_conv2d_Kernel')\n# print(tmp.shape)\n# print(tmp)\n\n# def init_f(shape, dtype=None):\n# ker = np.load('weights/depthwise_conv2d_Kernel')\n# print(shape)\n# return ker\n\n# sys.exit(0)\n\ninputs = Input(shape=(32, 32, 1), batch_size=200, name='rgb_to_grayscale_1')\n\n# Block_01\ndepthconv1_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding=\"same\", depth_multiplier=16, dilation_rate=[1, 1],\n depthwise_initializer=Constant(np.load('weights_200/siamese_neural_congas_feature_extraction_Conv_weights')),\n bias_initializer=Constant(np.load('weights_200/siamese_neural_congas_1_feature_extraction_Conv_Conv2D_bias')))(inputs)\nrelu1_1 = ReLU(max_value=6.)(depthconv1_1)\nconv1_1 = Conv2D(filters=32, kernel_size=[3, 3], strides=[2, 2], padding=\"same\", dilation_rate=[1, 1],\n kernel_initializer=Constant(np.load('weights_200/siamese_neural_congas_feature_extraction_Conv_1_weights').transpose(1,2,3,0)),\n bias_initializer=Constant(np.load('weights_200/siamese_neural_congas_1_feature_extraction_Conv_1_Conv2D_bias')))(relu1_1)\nrelu1_2 = ReLU(max_value=6.)(conv1_1)\nconv1_2 = Conv2D(filters=32, kernel_size=[1, 1], strides=[1, 1], padding=\"same\", dilation_rate=[1, 1],\n kernel_initializer=Constant(np.load('weights_200/siamese_neural_congas_feature_extraction_Conv_2_weights').transpose(1,2,3,0)),\n bias_initializer=Constant(np.load('weights_200/siamese_neural_congas_1_feature_extraction_Conv_2_Conv2D_bias')))(relu1_2)\nrelu1_3 = ReLU(max_value=6.)(conv1_2)\n\n# Block_02\nconv2_1 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding=\"same\", dilation_rate=[1, 1],\n kernel_initializer=Constant(np.load('weights_200/siamese_neural_congas_Mixed_6a_Branch_0_Conv2d_0a_1x1_weights').transpose(1,2,3,0)),\n bias_initializer=Constant(np.load('weights_200/siamese_neural_congas_1_Mixed_6a_Branch_0_Conv2d_0a_1x1_Conv2D_bias')))(relu1_3)\nrelu2_1 = ReLU(max_value=6.)(conv2_1)\nconv2_2 = Conv2D(filters=16, kernel_size=[3, 3], strides=[2, 2], padding=\"same\", dilation_rate=[1, 1],\n kernel_initializer=Constant(np.load('weights_200/siamese_neural_congas_Mixed_6a_Branch_0_Conv2d_0b_3x3_weights').transpose(1,2,3,0)),\n bias_initializer=Constant(np.load('weights_200/siamese_neural_congas_1_Mixed_6a_Branch_0_Conv2d_0b_3x3_Conv2D_bias')))(relu2_1)\nrelu2_2 = ReLU(max_value=6.)(conv2_2)\n\nconv2_3 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding=\"same\", dilation_rate=[1, 1],\n kernel_initializer=Constant(np.load('weights_200/siamese_neural_congas_Mixed_6a_Branch_1_Conv2d_1a_1x1_weights').transpose(1,2,3,0)),\n bias_initializer=Constant(np.load('weights_200/siamese_neural_congas_1_Mixed_6a_Branch_1_Conv2d_1a_1x1_Conv2D_bias')))(relu1_3)\nrelu2_3 = ReLU(max_value=6.)(conv2_3)\nconv2_4 = Conv2D(filters=16, kernel_size=[3, 3], strides=[1, 1], padding=\"same\", dilation_rate=[1, 1],\n kernel_initializer=Constant(np.load('weights_200/siamese_neural_congas_Mixed_6a_Branch_1_Conv2d_1b_3x3_weights').transpose(1,2,3,0)),\n bias_initializer=Constant(np.load('weights_200/siamese_neural_congas_1_Mixed_6a_Branch_1_Conv2d_1b_3x3_Conv2D_bias')))(relu2_3)\nrelu2_4 = ReLU(max_value=6.)(conv2_4)\nconv2_5 = Conv2D(filters=16, kernel_size=[3, 3], strides=[2, 2], padding=\"same\", dilation_rate=[1, 1],\n kernel_initializer=Constant(np.load('weights_200/siamese_neural_congas_Mixed_6a_Branch_1_Conv2d_1c_3x3_weights').transpose(1,2,3,0)),\n bias_initializer=Constant(np.load('weights_200/siamese_neural_congas_1_Mixed_6a_Branch_1_Conv2d_1c_3x3_Conv2D_bias')))(relu2_4)\nrelu2_5 = ReLU(max_value=6.)(conv2_5)\n\nmaxpool2_1 = MaxPool2D(pool_size=[3, 3], strides=[2, 2], padding='same')(relu1_3)\n\nconcat2_1 = Concatenate(axis=3)([relu2_2, relu2_5, maxpool2_1])\n\n# Block_03\nconv3_1 = Conv2D(filters=32, kernel_size=[1, 1], strides=[1, 1], padding=\"same\", dilation_rate=[1, 1],\n kernel_initializer=Constant(np.load('weights_200/siamese_neural_congas_Mixed7a_Branch_0_Conv2d_0a_1x1_weights').transpose(1,2,3,0)),\n bias_initializer=Constant(np.load('weights_200/siamese_neural_congas_1_Mixed7a_Branch_0_Conv2d_0a_1x1_Conv2D_bias')))(concat2_1)\nrelu3_1 = ReLU(max_value=6.)(conv3_1)\nconv3_2 = Conv2D(filters=32, kernel_size=[3, 3], strides=[2, 2], padding=\"same\", dilation_rate=[1, 1],\n kernel_initializer=Constant(np.load('weights_200/siamese_neural_congas_Mixed7a_Branch_0_Conv2d_0b_3x3_weights').transpose(1,2,3,0)),\n bias_initializer=Constant(np.load('weights_200/siamese_neural_congas_1_Mixed7a_Branch_0_Conv2d_0b_3x3_Conv2D_bias')))(relu3_1)\nrelu3_2 = ReLU(max_value=6.)(conv3_2)\n\nconv3_3 = Conv2D(filters=32, kernel_size=[1, 1], strides=[1, 1], padding=\"same\", dilation_rate=[1, 1],\n kernel_initializer=Constant(np.load('weights_200/siamese_neural_congas_Mixed7a_Branch_1_Conv2d_1a_1x1_weights').transpose(1,2,3,0)),\n bias_initializer=Constant(np.load('weights_200/siamese_neural_congas_1_Mixed7a_Branch_1_Conv2d_1a_1x1_Conv2D_bias')))(concat2_1)\nrelu3_3 = ReLU(max_value=6.)(conv3_3)\nconv3_4 = Conv2D(filters=32, kernel_size=[1, 7], strides=[1, 1], padding=\"same\", dilation_rate=[1, 1],\n kernel_initializer=Constant(np.load('weights_200/siamese_neural_congas_Mixed7a_Branch_1_Conv2d_1b_1x7_weights').transpose(1,2,3,0)),\n bias_initializer=Constant(np.load('weights_200/siamese_neural_congas_1_Mixed7a_Branch_1_Conv2d_1b_1x7_Conv2D_bias')))(relu3_3)\nrelu3_4 = ReLU(max_value=6.)(conv3_4)\nconv3_5 = Conv2D(filters=32, kernel_size=[7, 1], strides=[1, 1], padding=\"same\", dilation_rate=[1, 1],\n kernel_initializer=Constant(np.load('weights_200/siamese_neural_congas_Mixed7a_Branch_1_Conv2d_1c_7x1_weights').transpose(1,2,3,0)),\n bias_initializer=Constant(np.load('weights_200/siamese_neural_congas_1_Mixed7a_Branch_1_Conv2d_1c_7x1_Conv2D_bias')))(relu3_4)\nrelu3_5 = ReLU(max_value=6.)(conv3_5)\nconv3_6 = Conv2D(filters=32, kernel_size=[3, 3], strides=[2, 2], padding=\"same\", dilation_rate=[1, 1],\n kernel_initializer=Constant(np.load('weights_200/siamese_neural_congas_Mixed7a_Branch_1_Conv2d_1d_3x3_weights').transpose(1,2,3,0)),\n bias_initializer=Constant(np.load('weights_200/siamese_neural_congas_1_Mixed7a_Branch_1_Conv2d_1d_3x3_Conv2D_bias')))(relu3_5)\nrelu3_6 = ReLU(max_value=6.)(conv3_6)\n\nmaxpool3_1 = MaxPool2D(pool_size=[3, 3], strides=[2, 2], padding='same')(concat2_1)\n\nconcat3_1 = Concatenate(axis=3)([relu3_2, relu3_6, maxpool3_1])\n\n# Block_04\navgpool4_1 = AveragePooling2D(pool_size=[4, 4], strides=[1, 1], padding='valid')(concat3_1)\nconv4_1 = Conv2D(filters=40, kernel_size=[1, 1], strides=[1, 1], padding=\"same\", dilation_rate=[1, 1],\n kernel_initializer=Constant(np.load('weights_200/siamese_neural_congas_feature_compression_Conv2d_0a_weights').transpose(1,2,3,0)),\n bias_initializer=Constant(np.load('weights_200/siamese_neural_congas_1_feature_compression_Conv2d_0a_Conv2D_bias')))(avgpool4_1)\nreshape4_1 = tf.reshape(conv4_1, (200, 40))\nl2norm4_1 = l2_normalize(reshape4_1)\nnormalize_embeddings = keras.backend.identity(l2norm4_1, name='normalize_embeddings')\n\nmodel = Model(inputs=inputs, outputs=[normalize_embeddings])\n\nmodel.summary()\n\ntf.saved_model.save(model, 'saved_model_0200')\nmodel.save('knift_0200.h5')\n\n\n# No Quantization - Input/Output=float32\nconverter = tf.lite.TFLiteConverter.from_keras_model(model)\ntflite_model = converter.convert()\nwith open('knift_0200_32x32_float32.tflite', 'wb') as w:\n w.write(tflite_model)\nprint(\"tflite convert complete! - knift_0200_32x32_float32.tflite\")\n\n\n# Weight Quantization - Input/Output=float32\nconverter = tf.lite.TFLiteConverter.from_keras_model(model)\nconverter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]\ntflite_model = converter.convert()\nwith open('knift_0200_32x32_weight_quant.tflite', 'wb') as w:\n w.write(tflite_model)\nprint(\"Weight Quantization complete! - knift_0200_32x32_weight_quant.tflite\")\n\n\ndef representative_dataset_gen():\n imagelist = None\n for data in raw_test_data:\n image = data['image'].numpy()\n image = tf.image.resize(image, (32, 32))\n image = tf.image.rgb_to_grayscale(image)\n image = image[np.newaxis,:,:,:]\n # image = image - 127.5\n # image = image * 0.007843\n image = image / 255.0\n if imagelist is None:\n imagelist = np.asarray(image)\n else:\n imagelist = np.vstack((imagelist, image))\n if imagelist.shape[0] == 200:\n yield [imagelist]\n imagelist = None\n\nraw_test_data, info = tfds.load(name=\"cifar10\", with_info=True, split=\"test\", data_dir=\"~/TFDS\", download=True)\n\n\n# Integer Quantization - Input/Output=float32\nconverter = tf.lite.TFLiteConverter.from_keras_model(model)\nconverter.optimizations = [tf.lite.Optimize.DEFAULT]\nconverter.representative_dataset = representative_dataset_gen\ntflite_quant_model = converter.convert()\nwith open('knift_0200_32x32_integer_quant.tflite', 'wb') as w:\n w.write(tflite_quant_model)\nprint(\"Integer Quantization complete! - knift_0200_32x32_integer_quant.tflite\")\n\n\n# # Full Integer Quantization - Input/Output=int8\n# converter = tf.lite.TFLiteConverter.from_keras_model(model)\n# converter.optimizations = [tf.lite.Optimize.DEFAULT]\n# converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n# converter.inference_input_type = tf.uint8\n# converter.inference_output_type = tf.uint8\n# converter.representative_dataset = representative_dataset_gen\n# tflite_quant_model = converter.convert()\n# with open('knift_0200_32x32_full_integer_quant.tflite', 'wb') as w:\n# w.write(tflite_quant_model)\n# print(\"Integer Quantization complete! - knift_0200_32x32_full_integer_quant.tflite\")\n\n\n# Float16 Quantization - Input/Output=float32\nconverter = tf.lite.TFLiteConverter.from_keras_model(model)\nconverter.optimizations = [tf.lite.Optimize.DEFAULT]\nconverter.target_spec.supported_types = [tf.float16]\ntflite_quant_model = converter.convert()\nwith open('knift_0200_32x32_float16_quant.tflite', 'wb') as w:\n w.write(tflite_quant_model)\nprint(\"Float16 Quantization complete! - knift_0200_32x32_float16_quant.tflite\")\n\n\n# # EdgeTPU\n# import subprocess\n# result = subprocess.check_output([\"edgetpu_compiler\", \"-s\", \"knift_0200_32x32_full_integer_quant.tflite\"])\n# print(result)\n","sub_path":"054_KNIFT/01_float32/01_knift_0200_tflite2h5_weight_int_fullint_float16_quant.py","file_name":"01_knift_0200_tflite2h5_weight_int_fullint_float16_quant.py","file_ext":"py","file_size_in_byte":11928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"463045189","text":"# -*- coding: utf-8 -*-\n#\nimport os\nimport tempfile\n\nimport pytest\n\nimport meshio\n\nimport helpers\n\n\n@pytest.mark.parametrize('filename', [\n 'test.e',\n 'test.med',\n 'test.mesh',\n 'test.msh',\n 'test.xml',\n 'test.post',\n 'test.h5m',\n 'test.off',\n 'test.vtk',\n 'test.vtu',\n 'test.xmf',\n ])\ndef test_generic_io(filename):\n with tempfile.TemporaryDirectory() as temp_dir:\n filepath = os.path.join(temp_dir, filename)\n\n meshio.write(\n filepath,\n helpers.tri_mesh['points'],\n helpers.tri_mesh['cells'],\n )\n\n points, cells, _, _, _ = meshio.helpers.read(filepath)\n\n assert (abs(points - helpers.tri_mesh['points']) < 1.0e-15).all()\n assert (\n helpers.tri_mesh['cells']['triangle'] == cells['triangle']\n ).all()\n return\n","sub_path":"test/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"428071978","text":"# https://zh.wikipedia.org/wiki/幻方\n# https://www.zhihu.com/question/30498489\n# https://blog.csdn.net/u010039377/article/details/40866451\n\n# 奇数阶幻方\n# 偶数阶幻方\n# - 4M阶幻方\n# - 4M+2阶幻方\n\n# 1 2 3 4 \n# 5 6 7 8 \n# 9 10 11 12 \n# 13 14 15 16\n\n# n为幻方平方根,16宫格n=4\n# 如果两个数字的和,等于幻方最大数和最小数的和,即 n²+1,称为互补。\n# 将对角线的数字换成与它互补的数:\n# 1 -> 16\n# 6 -> 11\n# 11 -> 6\n# 16 -> 1\n# 4 -> 13\n# 7 -> 10\n# 10 -> 7\n# 13 -> 4\n\n# 这样就完成了4阶幻方\n# 16 2 3 13 \n# 5 11 10 8 \n# 9 7 6 12 \n# 4 14 15 1\n\n# 对于n=4k阶幻方,先把数字按顺序填写,写好后,按4*4把它划分成k*k个方阵。\n# 由于n是4的倍数,一定能用4*4小方阵分割,然后每个4*4小方阵跟4阶幻方一样,对角线上的数字换成其互补的数字,就变成幻方。\n\n# 给定一个3*3方阵,求其转置矩阵\n# https://zh.wikipedia.org/wiki/转置矩阵\n# https://www.zhihu.com/question/39660985\n\n# 1 2 3 1 4 7\n# 4 5 6 2 5 8\n# 7 8 9 3 6 9\n\nmatrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n\nfor i in range(len(matrix)):\n for j in range(i):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n\nprint(matrix)\n# [[1, 4, 7], [2, 5, 8], [3, 6, 9]]\n\nprint(list(zip(*matrix)))\n# [(1, 2, 3), (4, 5, 6), (7, 8, 9)]\n\n# 1 2 3 1 4\n# 4 5 6 2 5\n# 3 6\n\nmatrix = [[1,4], [2, 5], [3, 6]]\nprint(list(zip(*matrix)))\n# [(1, 2, 3), (4, 5, 6)]\n\nmatrix = [[1, 2, 3], [4, 5, 6]]\nprint(list(zip(*matrix)))\n# [(1, 4), (2, 5), (3, 6)]\n\n\nmatrix = [[1,2,3], [4,5,6]]\n# 一次性开辟一个新的矩阵\ntm = [[0 for col in range(len(matrix))] for row in range(len(matrix[0]))]\n# print(tm)\n# [[0, 0], [0, 0], [0, 0]]\nfor i,row in enumerate(tm):\n for j,col in enumerate(row):\n # 将matrix中的所有元素填充到tm中\n tm[i][j] = matrix[j][i] \n\nprint(tm)\n# [[1, 4], [2, 5], [3, 6]]\n","sub_path":"algorithm/magic_square.py","file_name":"magic_square.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"643492872","text":"import json\nimport sys\nfrom alternator import alternator\nimport data_extraction_tools as det\nfrom request_handler import request_handler\nfrom utilities import get_multiple_pages_diamond, unix_time_millis, check_progress, save_progress\nimport datetime\nimport os\n\n#Definimos la configuración ---------\n\nAPI_KEY = sys.argv[1] #La api key se pasa como argumento\nprint(os.getcwd())\nservers = [\"EUW\", \"KR\", \"NA\", \"EUNE\"] #Los servidores que vamos a utilizar.\n#corea (KR) esta muy lejos por lo que a veces da error 504.\ndesired_matches = 30000 #Los datos de cuantas partidas queremos obtener.\nplayers_til_dump = 50 #Cuantos jugadores analizar antes de guardar el progreso\nsave_progress_path = \"data/pickle_progress\" #Donde se guardara el progreso\n#Fecha a partir de la cual queremos partidas\nstartime = int(unix_time_millis(datetime.datetime(2020, 12, 24)))\n\n#Inicializamos valores ------------\n\nrh = request_handler(API_KEY) #La api key queda almacenada en el objeto que\n# se encarga de las requests\n\n\n#Estos son dos sets que llevan cuenta del progreso de la ejecución. Los iremos\n# guardando en memoria para asi no tener que empezar de 0 cada vez que guardamos\nanalized_players, analized_matches = check_progress(save_progress_path)\ndesired_matches = desired_matches - len(analized_matches)\nif desired_matches <= 0:\n sys.exit(\"Datos ya existentes\")\n\nmatches = list()\n\ndiamond_leagues = [get_multiple_pages_diamond(rh, server, 4)for server in servers]\ndiamond_iterator = alternator(index = servers, iterators = diamond_leagues)\n\ntildump = 0\nfor server, summoner in diamond_iterator:\n if (server, summoner) in analized_players:\n continue\n try:\n account = det.get_summoner_account_id(rh.get_account_id(server, summoner))\n history = det.get_summoner_history(rh.get_match_history(server,\n account, params = {\"queue\": 420, \"beginTime\": startime}))\n except (ValueError, RuntimeError):\n continue #Si salta este buscando el historial eso quiere decir que el\n # usuario no jugo ninguna partida en el perido para el que hemos pedido\n # el historial. Tambien puede significar que el servidor no ha respondido\n # a la request por cualquier motivo.\n for match in history:\n if (server, match) not in analized_matches:\n try:\n matches.append(det.get_info_from_match(rh.get_match(server, match)))\n except ValueError: #Puede saltar una request con respuesta 504\n continue\n analized_matches.add((server, match))\n \n analized_players.add((server, summoner))\n #Comprueba si tiene que guardar el progreso.\n tildump += 1\n if tildump >= players_til_dump:\n save_progress(save_progress_path, analized_players, analized_matches)\n with open(\"data/matches.json\", mode = \"w\") as file:\n json.dump(matches, file)\n\n if len(analized_matches) >= desired_matches:\n break # Salir del bucle si ya obtuvimos suficientes partidas.\n\n \n \nwith open(\"data/matches.json\", mode = \"w\") as file:\n json.dump(matches, file)","sub_path":"pysrc/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"207261329","text":"# chomp.py\r\n# this is the main program for implementing player for chomp!\r\n\r\nfrom chomp_board import ChompBoard\r\nfrom board_util import BoardUtil\r\n\r\nsize = (4,5)\r\n\r\ndef main():\r\n ##create a chomp_board of size mxn\r\n chomp_board = ChompBoard(size) \r\n ##print board\r\n chomp_board.showboard()\r\n ##play chomp!\r\n play_chomp(chomp_board)\r\n #mode = int(input(\"1-pvp, 2-pvc, 3-solver\"))\r\n #if mode == 1:\r\n #play_chomp(chomp_board)\r\n #elif mode == 2:\r\n #pass\r\n #elif mode == 3:\r\n #solve_chomp(chomp_board)\r\n \r\n\r\ndef play_chomp(chomp_board):\r\n game_end = False\r\n \r\n print(\"Note: move format is row number followed by column number, separated by a space\")\r\n while not game_end:\r\n \r\n command = input(\">>>Enter command/move: \")\r\n if command == \"solve\":\r\n game_end = BoardUtil.solve_chomp(chomp_board)\r\n else: \r\n try:\r\n row,col = command.split()\r\n row = int(row)\r\n col = int(col)\r\n if row>size[0] or col>size[1]:\r\n raise\r\n \r\n ##check if the move is legal\r\n if chomp_board.is_legal(row,col):\r\n \r\n ##check if the game is over\r\n game_end,winner = chomp_board.is_game_end(row,col)\r\n print(\">>>Player {}: move ({},{})\".format(chomp_board.current_player,row,col))\r\n if game_end:\r\n print(\">>>Game over, winner is {}\".format(winner))\r\n else:\r\n ##play the move, update board.\r\n chomp_board.play_move(row,col)\r\n chomp_board.showboard()\r\n else:\r\n print(\"Illegal move, please pick move that is non-zero.\")\r\n except ValueError:\r\n print(\"Invalid input, please enter two integers\")\r\n except:\r\n print(\"Input number out of range\")\r\n \r\n \r\n \r\nmain()","sub_path":"chomp.py","file_name":"chomp.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"594982330","text":"'''\nA 3 x 3 magic square is a 3 x 3 grid filled with distinct numbers from 1 to 9 \nsuch that each row, column, and both diagonals all have the same sum.\n\nGiven an grid of integers, how many 3 x 3 \"magic square\" subgrids are there?\n(Each subgrid is contiguous).\n\nExample 1:\nInput: [[4,3,8,4],\n [9,5,1,9],\n [2,7,6,2]]\nOutput: 1\nExplanation: \nThe following subgrid is a 3 x 3 magic square:\n438\n951\n276\nwhile this one is not:\n384\n519\n762\nIn total, there is only one magic square inside the given grid.\n\nNote:\n1 <= grid.length <= 10\n1 <= grid[0].length <= 10\n0 <= grid[i][j] <= 15\n'''\n\nclass Solution:\n def numMagicSquaresInside(self, grid: List[List[int]]) -> int:\n ans = 0\n\n def distinct(i, j):\n nums = []\n for k in range(3):\n nums.extend(grid[i+k][j:j+3])\n check = set()\n for n in nums:\n if n in check or n == 0 or n > 9:\n return False\n check.add(n)\n return True\n\n def row_sum(i, j):\n return sum(grid[i][j:j+3])\n\n def col_sum(i, j):\n return grid[i][j] + grid[i+1][j] + grid[i+2][j]\n\n m = len(grid)\n n = len(grid[0])\n for i in range(m-2):\n for j in range(n-2):\n if distinct(i, j) \\\n and row_sum(i, j) == row_sum(i+1, j) == row_sum(i+2, j) \\\n == col_sum(i, j) == col_sum(i, j+1) == col_sum(i, j+2) \\\n == (grid[i][j] + grid[i+1][j+1] + grid[i+2][j+2]):\n ans += 1\n return ans\n# Runtime: 36 ms, faster than 62.58% of Python3 online submissions for Magic Squares In Grid.\n# Memory Usage: 12.7 MB, less than 100.00% of Python3 online submissions for Magic Squares In Grid.\n","sub_path":"801-900/840. Magic Squares In Grid.py","file_name":"840. Magic Squares In Grid.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"362033175","text":"\r\nfname = input(\"Enter a file name: \")\r\nfhand = open(fname)\r\ncounts = dict()\r\nfor line in fhand:\r\n if line.startswith('From'):\r\n if not '@' in line : continue\r\n str = line.split('@')\r\n words = str[1].split()\r\n for word in words:\r\n domain = words[0]\r\n counts[domain] = counts.get(domain, 0) + 1\r\nprint(counts)\r\n","sub_path":"chap9/ex09_05.py","file_name":"ex09_05.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"410068843","text":"import asyncio\nimport time\nimport random\nfrom concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor\n\nfrom multiprocessing import Value, Lock\n\n\"\"\"\n多进程 + event_loop\n\"\"\"\ntotal = Value(\"d\", 0.0)\nlock = Lock()\n\n\nasync def do_work(messages: list) -> set:\n async def fetch():\n \"\"\"\n 运行一个耗时的job, 这里的job可以并发执行\n \"\"\"\n runtime = random.randint(1, 40) / 100\n global total, lock\n with lock:\n total.value += runtime\n time.sleep(runtime)\n\n ids = set()\n for mid in messages:\n await fetch()\n time.sleep(0.0001)\n if random.random() > 0.5:\n ids.add(mid)\n\n return ids\n\n\ndef run_warper(function, *args):\n \"\"\"\n 包装一个function, 这个function可以是一个协程函数, 也可以是一个普通函数, 返回一个 Future 对象\n \"\"\"\n loop = asyncio.new_event_loop()\n try:\n coroutine = function(*args)\n asyncio.set_event_loop(loop)\n return loop.run_until_complete(coroutine)\n finally:\n loop.close()\n\n\ndef main():\n start = time.time()\n\n loop = asyncio.get_event_loop()\n messages = list(range(0, 10000))\n tasks = []\n step = 10000 // 15\n executor = ProcessPoolExecutor(max_workers=16) # 事件循环执行器, 默认是 ThreadPoolExecutor(1)\n for i in range(16):\n # 直接获取的 coroutine\n # task = do_work(messages[i * step:(i + 1) * step])\n #\n # 获取Future, 这里的do_work是普通函数\n # task = loop.run_in_executor(executor, do_work, messages[i * step:(i + 1) * step])\n\n # run是普通函数, 使用run包裹协程do_work\n task = loop.run_in_executor(executor, run_warper, do_work, messages[i * step:(i + 1) * step])\n tasks.append(task)\n\n # 执行\n dones = loop.run_until_complete(asyncio.gather(*tasks))\n ids = set()\n for done in dones:\n ids = ids.union(done)\n\n print(\"花费 %0.2f s 删除了 %d 封邮件\" % (time.time() - start, len(ids)))\n\n\nif __name__ == '__main__':\n main()\n print(total.value)\n","sub_path":"async/event_in_process.py","file_name":"event_in_process.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"446931497","text":"import torch\n\nclass SimpleBlinkNeuralNetwork(torch.nn.Module):\n def __init__(self, hiddenNodes = 20):\n super(SimpleBlinkNeuralNetwork, self).__init__()\n\n # Down sample the image to 12x12\n self.avgPooling = torch.nn.AvgPool2d(kernel_size = 2, stride = 2) \n\n # Fully connected layer to all the down-sampled pixels to all the hidden nodes\n self.fullyConnectedOne = torch.nn.Sequential(\n torch.nn.Linear(12*12, hiddenNodes),\n torch.nn.Sigmoid()\n )\n\n # Fully connected layer to all the down-sampled pixels to all the hidden nodes\n #self.fullyConnectedTwo = torch.nn.Sequential(\n # torch.nn.Linear(hiddenNodes, hiddenNodes),\n # torch.nn.Sigmoid()\n # )\n\n # Fully connected layer from the hidden layer to a sin gle output node\n self.outputLayer = torch.nn.Sequential(\n torch.nn.Linear(hiddenNodes, 1),\n torch.nn.Sigmoid()\n )\n\n def forward(self, x):\n # Apply the layers created at initialization time in order\n out = self.avgPooling(x)\n out = out.reshape(out.size(0), -1)\n out = self.fullyConnectedOne(out)\n #out = self.fullyConnectedTwo(out)\n out = self.outputLayer(out)\n\n return out","sub_path":"BlinkSupport/SimpleBlinkNeuralNetwork.py","file_name":"SimpleBlinkNeuralNetwork.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"135161978","text":"from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nfrom .serializers import calculate_data, CalculateSerializer\n\n\n# Note about choosing GET:\n# As this endpoint does not update any data on the server, and we are GETting information from the server, we ought to\n# use GET as out method.\nclass CalculateView(APIView):\n def get(self, request, format=None):\n serializer = CalculateSerializer(data=request.GET)\n\n if serializer.is_valid():\n years = 50\n graph_data, result = calculate_data(\n float(serializer.validated_data[\"savingsAmount\"]),\n float(serializer.validated_data[\"monthlySaving\"]),\n float(serializer.validated_data[\"interestRate\"]),\n serializer.validated_data[\"interestFreq\"],\n years * 12)\n\n return Response({'result': result, 'graph_data': graph_data})\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n","sub_path":"interest_calculator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"9369164","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n \n path('', views.HomePageView.as_view(),name='index'),\n path('balance/', views.BalanceListView.as_view(), name='balance'),\n path('balance/add', views.BalanceCreateView.as_view(), name='balance/add'),\n path('order/', views.OrderPageView.as_view(), name='order'),\n path('order/add-', views.OrderNewView.as_view(), name='order/add'),\n path('orderlist/', views.OrderListView.as_view(), name='orderlist'),\n]","sub_path":"billing/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"343960055","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"A script for downloading (crawling) tweets by their IDs. \nA useful tool for creating datasets of tweets, as requested in popular research challenges on Twitter data \n(e.g., SemEval, #Microposts and TREC Microblog Track). \nIt requires Twython (it optionally requires bz2file for compression).\nThis code is in https://gist.github.com/giacbrd/b996cfe2f1d24752f23bd119fdd678f2\"\"\"\n\n__author__ = 'Giacomo Berardi '\n\nimport io, json, time, os, logging, argparse, atexit, gzip, sys\nfrom tempfile import NamedTemporaryFile\nfrom twython import Twython\nfrom twython.exceptions import TwythonError, TwythonRateLimitError\n\n# Minimal time accepted between two Rate Limit Errors\nTOO_SOON = 10\n# Time to wait if we receive a Rate Limit Error too soon after a previous one\nWAIT_SOME_MORE = 60\n\n# Default values set according to https://dev.twitter.com/rest/reference/get/statuses/show/%3Aid\nSLEEP_SECS_DEFAULT = 0\nREQUEST_LIMIT_DEFAULT = 180\nBATCH_TIME_DEFAULT = 15 * 60\n\nlogging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef check_negative(value):\n value = int(value)\n if value < 0:\n raise argparse.ArgumentTypeError('%s is an invalid positive int value' % value)\n return value\n\n\nparser = argparse.ArgumentParser(description=__doc__)\nparser.add_argument('-i', '--input', required=False, help='Tweets IDs file, first element of each line is an ID. '\n 'If not set, the standard input is read.')\nparser.add_argument('-o', '--output', required=True,\n help='Tweets dataset in jsonl: a complete Twython tweet json per line. One can set the same '\n 'output file for several runs of this script: already downloaded tweets '\n 'are not re-processed, new tweets are appended.')\nparser.add_argument('-r', '--responses', required=False,\n help='If set, it outputs a json map from the http response statuses to the tweet IDs.')\nparser.add_argument('-s', '--sleep', required=False, default=SLEEP_SECS_DEFAULT, type=check_negative,\n help='Time to wait between each request, in seconds.')\nparser.add_argument('-l', '--limit', required=False, default=REQUEST_LIMIT_DEFAULT, type=check_negative,\n help='Number of requests before stopping for wating the \"totaltime\" passed since the first request.')\nparser.add_argument('-t', '--totaltime', required=False, default=BATCH_TIME_DEFAULT, type=check_negative,\n help='Window time necessary for each batch of \"limit\" requests, in seconds.')\nparser.add_argument('-c', '--compress', required=False, choices=['bz2', 'gzip'],\n help='Chose the compression format for the out files, no extension is appended.')\nparser.add_argument('--consumerkey', required=True, help='Consumer key.')\nparser.add_argument('--consumersecret', required=True, help='Consumer secret.')\nparser.add_argument('--accesstoken', required=True, help='Access token.')\nparser.add_argument('--accesssecret', required=True, help='Access token secret.')\nparser.add_argument('--test', required=False, action='store_true', default=False,\n help='If set, run some tests for this script. All other parameters are ignored, '\n 'except for the authentication codes.')\nargs = parser.parse_args()\n\nin_path = args.input\nout_path = args.output\nresponses_path = args.responses\nsleep_secs = args.sleep\nrequest_limit = args.limit\nbatch_time = args.totaltime\ncompression = args.compress\ndo_test = args.test\n\nstart = -1\nresponses_store = {}\n\n# http://stackoverflow.com/questions/13044562/python-mechanism-to-identify-compressed-file-type-and-uncompress\n_SIGNATURES = {\n '\\x1f\\x8b\\x08': 'gzip',\n '\\x42\\x5a\\x68': 'bz2'\n # '\\x50\\x4b\\x03\\x04': 'zip'\n}\n_MAX_SIGN_LEN = max(len(x) for x in _SIGNATURES)\n\n\ndef which_compression(path):\n with open(path) as f:\n file_start = f.read(_MAX_SIGN_LEN)\n for signature, file_type in _SIGNATURES.items():\n if file_start.startswith(signature):\n return file_type\n\n\ndef get_open(path, mode, file_type=None, encoding='utf-8'):\n def wrapper(opener):\n if 'r' in mode:\n return io.TextIOWrapper(io.BufferedReader(opener), encoding=encoding)\n else:\n return io.TextIOWrapper(opener, encoding=encoding)\n\n if file_type == 'gzip':\n return wrapper(gzip.GzipFile(path, mode))\n if file_type == 'bz2':\n import bz2file\n return wrapper(bz2file.BZ2File(path, mode))\n else:\n return io.open(path, mode, encoding=encoding)\n\n\ndef dump_responses():\n global responses_path, responses_store, compression\n if responses_path and responses_store:\n with get_open(responses_path, 'w', compression) as responses_out:\n responses_out.write(unicode(json.dumps(responses_store)))\n\n\ndef log(msg, id=None):\n if id is not None:\n logger.info('%s: %s' % (id, msg))\n else:\n logger.info('%s' % msg)\n\n\ndef wait():\n global start\n # Wait a total of batch_time\n time.sleep((batch_time - (time.time() - start)) + 1)\n start = time.time()\n\n\ndef save_response(responses_dict, id, status_code):\n if status_code not in responses_dict:\n responses_dict[status_code] = []\n responses_dict[status_code].append(id)\n\n\ndef download(in_path, out_path, twitter, responses_store=None, sleep_secs=SLEEP_SECS_DEFAULT,\n batch_time=BATCH_TIME_DEFAULT, request_limit=REQUEST_LIMIT_DEFAULT, compression=False):\n global start\n start = time.time()\n seen = frozenset()\n if os.path.exists(out_path):\n with get_open(out_path, 'r', which_compression(out_path)) as current:\n seen = frozenset(json.loads(line.strip())['id'] for line in current)\n count = 0\n with (get_open(in_path, 'r', which_compression(in_path)) if in_path else sys.stdin) as input_stream:\n with get_open(out_path, 'a', compression) as out:\n for line in input_stream:\n id = int(line.strip().split()[0])\n if id in seen:\n log('Already downloaded', id)\n continue\n try:\n while True:\n try:\n tweet = twitter.show_status(id=id)\n out.write(unicode(json.dumps(tweet) + '\\n'))\n except TwythonRateLimitError as e:\n # If this error is received after only few calls (10 seconds of calls) wait just a minute\n if time.time() - start < TOO_SOON and count > 0:\n log('Waiting %s seconds more for resuming download after recurrent rate limit error ...'\n % WAIT_SOME_MORE)\n time.sleep(WAIT_SOME_MORE)\n else:\n log(e, id)\n log('Waiting %s seconds for resuming download after rate limit error ...'\n % (batch_time - (time.time() - start)))\n wait()\n continue\n count += 1\n break\n except TwythonError as e:\n log(e, id)\n if responses_store is not None and isinstance(responses_store, dict):\n save_response(responses_store, id, e.error_code)\n continue\n finally:\n if sleep_secs:\n time.sleep(sleep_secs)\n if count > 0 and count % request_limit == 0:\n log('Waiting %s seconds for resuming download after rate limit check of %s calls ...'\n % (batch_time - (time.time() - start), request_limit))\n wait()\n if responses_store is not None and isinstance(responses_store, dict):\n save_response(responses_store, id, 200)\n log('Done!', id)\n\n\ndef test():\n log('Testing...')\n with NamedTemporaryFile() as temp_input:\n with NamedTemporaryFile() as temp_output:\n temp_input.write('20\\n10')\n temp_input.seek(0)\n temp_resp = {}\n download(temp_input.name, temp_output.name,\n Twython(args.consumerkey, args.consumersecret, args.accesstoken, args.accesssecret),\n temp_resp, 0, BATCH_TIME_DEFAULT, REQUEST_LIMIT_DEFAULT, 'bz2')\n assert temp_resp == {200: [20], 404: [10]}\n assert 'twttr' in json.loads(get_open(temp_output.name, 'r', 'bz2').read().strip())['text']\n log('Test passed!')\n\n\nif __name__ == '__main__':\n if do_test:\n test()\n else:\n twitter = Twython(args.consumerkey, args.consumersecret, args.accesstoken, args.accesssecret)\n atexit.register(dump_responses)\n download(in_path, out_path, twitter, responses_store if responses_path else None, sleep_secs, batch_time,\n request_limit, compression)\n","sub_path":"all-gists/b996cfe2f1d24752f23bd119fdd678f2/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":9231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"191708209","text":"def bubblesort(a):\n n = len(a)\n \n for i in range(n):\n \n for j in range(0, n-i-1):\n \n if a[j] > a[j+1] :\n a[j], a[j+1] = a[j+1], a[j]\n\na = ['t','u','t','o','r','i','a','l']\nbubblesort(a)\nprint (\"Sorted array is:\")\nfor i in range(len(a)):\n print (a[i])","sub_path":"Programs/bubble_sort.py","file_name":"bubble_sort.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"627029158","text":"#released on: 2020-05-24 11:35\r\nfrom tkinter import *\r\nimport time\r\nimport json\r\nimport sys\r\ngamerunning = True\r\ntk = Tk()\r\ntk.title('SUper street slapper v0.0')\r\nwindow = Canvas(tk, width=500, height = 250/1.7)\r\nwindow.pack()\r\nimg = PhotoImage(file='load.gif')\r\nplayer = PhotoImage(file='player.gif')\r\ndef image():\r\n window.create_image(0,0,image=img, anchor='nw')\r\n print('image called')\r\n \r\ndef text(ttr):\r\n window.create_text(80,10,text= ttr, font=('courier',20))\r\n print('text called')\r\n \r\nimage()\r\ntext('Loading...')\r\ndef clear():\r\n window.delete('all')\r\n print('clear called')\r\n \r\ndef __init__():\r\n global img\r\n img = PhotoImage(file=\"game.gif\")\r\n clear()\r\n image()\r\n button = Button(tk,text='Play!',command=initgame)\r\n button.pack()\r\n print('init called')\r\ntk.after(5000, __init__) \r\ndef renderPlayer(playerX):\r\n window.create_image(playerX,0,image=player, anchor='nw')\r\ndef loop():\r\n img = PhotoImage(file=\"game.gif\")\r\n clear()\r\n image()\r\n renderPlayer(x)\r\n tk.after(10,loop)\r\n print('loop called')\r\n\r\ndef initgame():\r\n #eat\r\n global x\r\n x=0\r\n loop()\r\n print('initgame run')\r\ndef moveleft(event):\r\n global x\r\n x-=10\r\ndef moveright(event):\r\n global x\r\n x+=10\r\nwindow.bind_all('',moveleft)\r\nwindow.bind_all('',moveright)\r\ntk.mainloop() \r\n","sub_path":"SUper street slapper PY v0.0/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"591157981","text":"'''\nsort the string in the following manner:\n\nAll sorted lowercase letters are ahead of uppercase letters.\nAll sorted uppercase letters are ahead of digits.\nAll sorted odd digits are ahead of sorted even digits.\n'''\n\n# ord() -- gives you the numeric value of the character in whatever encoding it's in,\n# chr(c) --> to find character given ascii\n\n\ndef so(x):\n # Increase the priority by adding\n assci = ord(x)\n if assci > 64 and assci < 91:\n assci += 35\n elif assci > 47 and assci < 58:\n assci += 120\n return assci\n\n\ns1 = input().strip()\ns3 = sorted(s1, key=so)\n\n# ~ to ''.join(s3)\nprint(*s3, sep='')\n\n# same in one liners! 4 ways!\n# print(*sorted(input(), key=lambda c: (-ord(c) >> 5, c in '02468', c)), sep='')\n#\n# print(*sorted(input(), key=lambda c: (c.isdigit() - c.islower(), c in '02468', c)), sep='')\n#\n# order = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1357902468'\n# print(*sorted(input(), key=order.index), sep='')\n#\n# import string\n# print(*sorted(input(), key=(string.ascii_letters + '1357902468').index), sep='')\n\n","sub_path":"delve/ginortS.py","file_name":"ginortS.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"286744210","text":"import argparse\nimport sys\nimport os\nimport numpy as np\nimport pandas as pd\nimport re as re\nimport sklearn.metrics\nfrom sklearn.feature_extraction import stop_words\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.naive_bayes import ComplementNB, MultinomialNB, GaussianNB \nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.preprocessing import PolynomialFeatures\n\ndef calc_avg_y(predict_list, num):\n y_predict = 0\n for predictions in predict_list:\n y_predict = y_predict + predictions\n return y_predict/num\n\ndef tokenizer(str_input):\n words = re.sub(r\"[^A-Za-z0-9\\-]\", \" \", str_input).lower().split()\n return words\ndef bootstrap_sample(x_NF,y_NF, random_state = np.random):\n N = x_NF.shape[0]\n row_ids = random_state.choice(np.arange(N),size = N, replace = True)\n return x_NF[row_ids].copy(), y_NF[row_ids].copy()\n\nx_test_NF = pd.read_csv('data_reviews/x_test.csv')\nx_NF = pd.read_csv('data_reviews/x_train.csv')\ny_train_df = pd.read_csv('data_reviews/y_train.csv')\nw_e = {}\n\n#Reading in GLOVE\nglove = open('pretrained_word_embeddings/glove.6B.50d.txt', 'r')\nfor i in glove:\n i = i.split()\n w_e[i[0]] = np.array([float(s) for s in i[1:]])\nglove.close()\n\nFP_list = list()\nFN_list = list()\n\n#Y values\ny_NF = np.array(y_train_df['is_positive_sentiment'].values.tolist())\ntr_text_list = x_NF['text'].values.tolist()\nprocessed_list = list()\ntest_text_list = x_test_NF['text'].values.tolist()\ntest_processed_list = list()\n\n#Function to do shorten, may change later\ndef makelist(text_list, end_list):\n for text in text_list:\n sen_val = np.full((50,),0)\n w_len = 0\n new_t = tokenizer(text)\n sentence = list()\n for i in range(0,len(new_t)):\n #if (new_t[i] in w_e): #For without stop words\n if (new_t[i] in w_e and new_t[i] not in stop_words.ENGLISH_STOP_WORDS): \n sentence.append(w_e.get(new_t[i], \"none\"))\n sen_val = sen_val + np.array(w_e.get(new_t[i], \"none\"))\n w_len = w_len + 1\n if w_len != 0:\n # sen_val = np.hstack([np.hstack([np.amin(sentence,axis=0),(sen_val / w_len)]),np.amax(sentence,axis=0)])\n sen_val = np.hstack([np.hstack([np.hstack([np.amin(sentence,axis=0),(sen_val / w_len)]),np.std(sentence,axis=0)]),np.amax(sentence,axis=0)])\n else:\n sen_val = np.full((200,),0)\n end_list.append(sen_val)\n\nmakelist(tr_text_list, processed_list)\nmakelist(test_text_list, test_processed_list)\n\n#Polynomial feature transform\nx_NF = np.array(processed_list)\npoly = PolynomialFeatures(2, interaction_only=True)\nX_NF = poly.fit_transform(x_NF)\nx_test_NF = poly.fit_transform(np.array(test_processed_list))\nprint(X_NF.shape)\nprint(x_test_NF.shape)\n\n#BAGGING\nrange1 = 2\nrun = 0\nx_list = list()\ny_list = list()\ny_predict_list = list()\ny_predict_proba_list = list()\nfor i in range(0,range1):\n x_i, y_i = bootstrap_sample(X_NF,y_NF)\n x_list.append(x_i)\n y_list.append(y_i)\n\n\"\"\"_____Logistic Regression using GridSearchCV_____\"\"\"\n\n#max_iter: No tuning needed bc they all converge\n#C: tuned from 0.0001, 0.01, 1, 100, 10000 and 100 was the best.\n#solver: newton-cg, sag, lbfg - newton was slightly better \ngnb_params = {'C':[0.0001,0.01,1,100,10000], 'solver':['newton-cg']}\n\ny_predict_on_train = 0\nfor x_NF,y_NF in zip(x_list,y_list):\n run = run +1\n gnb = LogisticRegression(max_iter = 10000)\n gnb_cv = GridSearchCV(gnb,gnb_params,cv = 3, return_train_score = True)\n gnb_cv.fit(x_NF,np.ravel(y_NF))\n print(\"Run no: %d\" %run)\n print(1-gnb_cv.cv_results_.get(\"split0_train_score\"))\n print(1-gnb_cv.cv_results_.get(\"split1_train_score\"))\n print(1-gnb_cv.cv_results_.get(\"split2_train_score\"))\n print(1-gnb_cv.cv_results_.get(\"split0_test_score\"))\n print(1-gnb_cv.cv_results_.get(\"split1_test_score\"))\n print(1-gnb_cv.cv_results_.get(\"split2_test_score\"))\n y_predict_proba_list.append(gnb_cv.predict_proba(x_test_NF)[:,1])\n y_predict_list.append(gnb_cv.predict(x_test_NF))\n y_predict_on_train = gnb_cv.predict(x_NF)\n for x,y in zip(y_predict_on_train,y_list[1]):\n if x == y:\n correct = correct + 1\n elif x < y:\n FN_list.append(x)\n elif x > y:\n FP_list.append(x)\n print(\"FP no.: %d\"%len(FP_list))\n print(\"FN no.: %d\"%len(FN_list))\n print(\"Correct no.: %d\"%correct)\n\nyproba1_te_N = calc_avg_y(y_predict_proba_list,range1)\nyproba1_te_N_predict = calc_avg_y(y_predict_list,range1)\n\n\n\n\"\"\"_________To get testing metrics________\"\"\"\n# gnb = LogisticRegression(max_iter = 10000)\n# gnb_cv = GridSearchCV(gnb,gnb_params,cv = 3, return_train_score = True)\n# gnb_cv.fit(X_NF,np.ravel(y_NF))\n# print(\"Running metrics LR:\")\n# metrics=list()\n# for i in range(0,5):\n# curr_metrics = dict(\n# split0_train=(1-gnb_cv.cv_results_.get(\"split0_train_score\")[i]),\n# split1_train=(1-gnb_cv.cv_results_.get(\"split1_train_score\")[i]),\n# split2_train=(1-gnb_cv.cv_results_.get(\"split2_train_score\")[i]), \n# split0_test=(1-gnb_cv.cv_results_.get(\"split0_test_score\")[i]),\n# split1_test=(1-gnb_cv.cv_results_.get(\"split1_test_score\")[i]),\n# split2_test=(1-gnb_cv.cv_results_.get(\"split2_test_score\")[i])\n# )\n# metrics.append(curr_metrics)\n# metrics=pd.DataFrame(metrics)\n# metrics.to_csv(\n# os.path.join(\"P2_LR.csv\"),\n# index=False,\n# float_format='%.4f',\n# columns=['split0_train','split1_train','split2_train','split0_test','split1_test','split2_test'])\n\n#Obtain FP/FN values\n\ncorrect = 0\n\n\nnp.savetxt(\"P2_LR_FP.txt\", FP_list, delimiter=\",\")\nnp.savetxt(\"P2_LR_FN.txt\", FN_list, delimiter=\",\")\n\n\"\"\"_____Naive Bayes using GridSearchCV_____\"\"\"\n\n# gnb_params = {'var_smoothing':[0, 0.1, 1, 10, 100]}\n# for x_NF,y_NF in zip(x_list,y_list):\n# run = run +1\n# gnb = GaussianNB()\n# gnb_cv = GridSearchCV(gnb,gnb_params,cv = 3)\n# gnb_cv.fit(x_NF,np.ravel(y_NF))\n# print(\"Run no: %d\" %run)\n# print(1-gnb_cv.cv_results_.get(\"split0_test_score\"))\n# print(1-gnb_cv.cv_results_.get(\"split1_test_score\"))\n# print(1-gnb_cv.cv_results_.get(\"split2_test_score\"))\n# y_predict_proba_list.append(gnb_cv.predict_proba(x_test_NF)[:,1])\n# y_predict_list.append(gnb_cv.predict(x_test_NF))\n\n# yproba1_te_N = calc_avg_y(y_predict_proba_list,range1)\n# yproba1_te_N_predict = calc_avg_y(y_predict_list,range1)\n\n\n# np.savetxt(\"P2_yproba1_test_proba.txt\", yproba1_te_N, delimiter=\",\")\n# np.savetxt(\"P2_yproba1_test_predict.txt\", yproba1_te_N_predict, delimiter=\",\")","sub_path":"project2/P2_word-embeddings.py","file_name":"P2_word-embeddings.py","file_ext":"py","file_size_in_byte":6720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"244445889","text":"d = []\n\n\ndef fibo(a, b, d, n):\n for i in range(n):\n c = a + b\n a = b\n b = c\n d.append(c)\n return d\n\n\nprint(fibo(0, 1, d, 10))\n","sub_path":"fibo.py","file_name":"fibo.py","file_ext":"py","file_size_in_byte":160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"144083683","text":"import retro\nfrom datetime import datetime\nimport time\nimport random\nimport itertools\nimport time\nimport numpy as np\n\n\ndef get_all_pairwise_actions(possible_actions):\n groupings = []\n for each in itertools.permutations(possible_actions.keys(),2):\n groupings.append(possible_actions[each[0]] + possible_actions[each[1]])\n return groupings\ndef spin_run(possible_actions, direction, run_distance_before_spin):\n set_of_actions = []\n for i in range(run_distance_before_spin):\n set_of_actions.append(possible_actions[direction])\n set_of_actions.append(possible_actions[\"DOWN\"])\n return set_of_actions\ndef spin_dash(possible_actions, direction, taps):\n set_of_actions = []\n set_of_actions.append(possible_actions[direction])\n for i in range(taps):\n set_of_actions.append(possible_actions[\"DOWN\"])\n set_of_actions.append(possible_actions[\"DOWN\"] + possible_actions[\"B\"])\n return set_of_actions\ndef run(possible_actions, direction, run_distance):\n set_of_actions = []\n for i in range(run_distance):\n set_of_actions.append(possible_actions[direction])\n return set_of_actions\n\ndef jump(possible_actions, direction, jump_duration):\n set_of_actions = []\n\n set_of_actions.append(possible_actions[\"A\"])\n set_of_actions.append(possible_actions[\"A\"])\n for i in range(jump_duration):\n set_of_actions.append(possible_actions[direction])\n\n return set_of_actions\n\ndef actions_available(possible_actions, action_chosen, rand = True):\n #https://info.sonicretro.org/index.php?title=File:Sonic2_MD_US_manual.pdf&page=7\n\n x = {\"run_left_spin_X_frames\" : spin_run(possible_actions, \"LEFT\", 100), # This could be a variable amount of running\n \"run_left_spin_Y_frames\" : spin_run(possible_actions, \"LEFT\", 200), # This could be a variable amount of running\n \"run_right_spin_X_frames\" : spin_run(possible_actions, \"RIGHT\", 100), # This could be a variable amount of running\n \"run_right_spin_Y_frames\" : spin_run(possible_actions, \"RIGHT\", 200), # This could be a variable amount of running\n \"run_left_X_frames\" : run(possible_actions, \"LEFT\", 100), # This could be a variable amount of running\n \"run_left_Y_frames\" : run(possible_actions, \"LEFT\", 200), # This could be a variable amount of running\n \"run_right_X_frames\" : run(possible_actions, \"RIGHT\", 100), # This could be a variable amount of running\n \"run_right_Y_frames\" : run(possible_actions, \"RIGHT\", 200), # This could be a variable amount of running\n \"spindash_right_X_times\" : spin_dash(possible_actions, \"RIGHT\", 5),\n \"spindash_left_X_times\" : spin_dash(possible_actions, \"LEFT\", 5),\n \"jump_right_X_frames\" : jump(possible_actions, \"RIGHT\", 5),\n \"jump_left_X_frames\" : jump(possible_actions, \"LEFT\", 5),\n \"jump_up_X_frames\" : jump(possible_actions, \"RIGHT\", 5),\n \n }\n if rand:\n action_chosen = random.randint(0,len(x.keys())-1)\n \n\n return x[list(x.keys())[action_chosen]], action_chosen\n\ndef main():\n\n # From gensis.json \"buttons\": [\"B\", \"A\", \"MODE\", \"START\", \"UP\", \"DOWN\", \"LEFT\", \"RIGHT\", \"C\", \"Y\", \"X\", \"Z\"],\n \n actions_by_value = {\"B\" : 2**11, \"A\" : 2**10, \"START\" : 2**8, \"UP\" : 2**7, \"DOWN\" : 2**6, \"LEFT\" : 2**5, \"RIGHT\" : 2**4, \"C\" : 2**3, \"Y\" : 2**2, \"X\" : 2**1, \"Z\" : 2**0} # \"MODE\" : 2**9\n env = retro.make(game='SonicTheHedgehog2-Genesis')\n obs = env.reset()\n obs, rew, done, info = env.step([0] * env.action_space.shape[0])\n env.render()\n q_learner = {}\n #print (\"press enter to start\")\n #print (obs)\n #print (info)\n #input()\n last_x = info[\"x\"]\n last_y = info[\"y\"]\n reward = 0\n alpha = .9\n gamma = .995\n while True:\n random_action = 35 > random.randint(1,100)\n if info[\"x\"] in q_learner and info[\"y\"] in q_learner[info[\"x\"]]:\n action_selection = np.argmax(q_learner[info[\"x\"]][info[\"y\"]])\n else:\n action_selection = 0\n random_action = True\n action_sequence, action_index = actions_available(actions_by_value, action_selection, random_action)\n last_x = info[\"x\"]\n last_y = info[\"y\"]\n for i in action_sequence:\n binary_value = str(bin(i))\n binary_value = ''.join(list(binary_value)[2:])\n action = list(map(int,list(str(binary_value).zfill(12))))\n \n obs, rew, done, info = env.step(action)\n\n time.sleep(1.0/60.0)\n env.render()\n if done:\n print (\"bdone\", done)\n print (info)\n print (rew)\n #print (\"WHAT???\")\n input()\n obs = env.reset()\n continue\n \n\n # This seems like it should move to out of the for loop it should run on completion of the whole thing\n # I speculate if we hit \"done\" there is gonna be something wonky as our state changes drastically\n if last_x not in q_learner:\n q_learner[last_x] = {} \n if last_y not in q_learner[last_x]:\n q_learner[last_x][last_y] = np.zeros(13)\n\n if last_x == info[\"x\"]:\n reward += -1\n\n if last_y == info[\"y\"]:\n reward += -1\n old_value = q_learner[last_x][last_y][action_index]\n next_max = np.argmax(q_learner[last_x][last_y])\n if info[\"x\"] not in q_learner:\n q_learner[info[\"x\"]] = {} \n if info[\"y\"] not in q_learner[info[\"x\"]]:\n q_learner[info[\"x\"]][info[\"y\"]] = np.zeros(13)\n q_learner[info[\"x\"]][info[\"y\"]][action_index] = (1 - alpha) * old_value + alpha * (reward + gamma * next_max)# Q learner stuff\n\n env.close()\n\n\nif __name__ == \"__main__\":\n main()\n\n'''\n/home/hotdog/.local/lib/python3.6/site-packages/retro/data/stable/SonicTheHedgehog2-Genesis/\n\ncontest.json\ndata.json\nmetadata.json\nscenario.json\nscript.lua\nxpos.json\n'''","sub_path":"Antsy Annie.py","file_name":"Antsy Annie.py","file_ext":"py","file_size_in_byte":5933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"444134869","text":"# -*- coding:utf-8 -*-\n\nRESOURCES_BASE_PATH = './resources/restore_flash_pic'\n\n# ==========================================\n\n# 屏蔽群 例:[12345678, 87654321]\nblockGroupNumber = []\n# 服务器配置\nhost = 'http://127.0.0.1'\nport = 8888\n\n# ==========================================\n\nfrom iotbot import Action, GroupMsg\nfrom enum import Enum\n\ntry:\n import ujson as json\nexcept:\n import json\n\n\ndef receive_group_msg(ctx: GroupMsg):\n userGroup = ctx.FromGroupId\n\n if Tools.commandMatch(userGroup, blockGroupNumber):\n return\n\n if not Tools.picOnly(ctx.MsgType):\n return\n\n msg = ctx.Content\n\n bot = Action(\n qq_or_bot = ctx.CurrentQQ,\n host = host,\n port = port\n )\n\n content = json.loads(msg)\n\n if content[\"Tips\"] == '[群消息-QQ闪照]':\n Tools.sendPictures(bot, userGroup, content['FileMd5'], content['ForwordBuf'], '你竟然发闪照[表情176]')\n\n\nclass Model(Enum):\n ALL = '_all'\n\n BLURRY = '_blurry'\n\n SEND_AT = '_send_at'\n\n SEND_DEFAULT = '_send_default'\n\n\nclass Status(Enum):\n SUCCESS = '_success'\n\n FAILURE = '_failure'\n\n\nclass Tools():\n\n @staticmethod\n def picOnly(msgType):\n return True if msgType == 'PicMsg' else False\n\n @classmethod\n def sendPictures(cls, bot, userGroup, FileMd5, ForwordBuf, content='', atUser=0):\n bot.send_group_pic_msg(\n toUser=int(userGroup),\n fileMd5=FileMd5,\n picBase64Buf=ForwordBuf,\n atUser=int(atUser),\n content=str(content)\n )\n\n @staticmethod\n def sendText(userGroup, msg, bot, model=Model.SEND_DEFAULT, atQQ=''):\n if msg != '' and msg != Status.FAILURE:\n if model == Model.SEND_DEFAULT:\n bot.send_group_text_msg(\n toUser=int(userGroup),\n content=str(msg)\n )\n if model == Model.SEND_AT:\n if atQQ == '':\n raise Exception('没有指定 at 的人!')\n at = f'[ATUSER({atQQ})]\\n'\n bot.send_group_text_msg(\n toUser=int(userGroup),\n content=at + str(msg)\n )\n\n @staticmethod\n def commandMatch(msg, commandList, model=Model.ALL):\n if model == Model.ALL:\n for c in commandList:\n if c == msg:\n return True\n if model == Model.BLURRY:\n for c in commandList:\n if msg.find(c) != -1:\n return True\n return False\n\n @staticmethod\n def atQQ(userQQ):\n return f'[ATUSER({userQQ})]\\n'\n\n @staticmethod\n def identifyAt(content):\n try:\n result = json.loads(content)\n return [result['Content'], result['UserID']]\n except:\n return Status.FAILURE\n","sub_path":"plugins/bot_restore_flash_pic.py","file_name":"bot_restore_flash_pic.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"358433892","text":"\"\"\"Unit tests for the CalibrationSystem/Utilities/functions.py.\"\"\"\n\nfrom __future__ import print_function\nimport pytest\nimport os\nimport string\nimport random\nimport tempfile\nimport shutil\n\nfrom ILCDIRAC.CalibrationSystem.Utilities.functions import readParameterDict\nfrom ILCDIRAC.CalibrationSystem.Utilities.functions import readParametersFromSteeringFile\nfrom ILCDIRAC.CalibrationSystem.Utilities.functions import updateSteeringFile\nfrom ILCDIRAC.CalibrationSystem.Utilities.functions import addParameterToProcessor\n\n__RCSID__ = \"$Id$\"\nMODULE_NAME = 'ILCDIRAC.CalibrationSystem.Utilities.functions'\n\n\ndef copySteeringFile(tag, calibID):\n \"\"\"Copy steering files to local test directory.\"\"\"\n workdirName = 'calib%s' % calibID\n if not os.path.exists(workdirName):\n os.makedirs(workdirName)\n\n if tag == 'CLIC':\n src = os.path.join(os.environ['DIRAC'], \"ILCDIRAC\", \"Testfiles\", \"clicReconstruction.xml\")\n shutil.copyfile(src, '%s/clicReconstruction.xml' % workdirName)\n return '%s/clicReconstruction.xml' % workdirName\n elif tag == 'FCCee':\n src = os.path.join(os.environ['DIRAC'], \"ILCDIRAC\", \"Testfiles\", \"fccReconstruction.xml\")\n shutil.copyfile(src, '%s/fccReconstruction.xml' % workdirName)\n return '%s/fccReconstruction.xml' % workdirName\n else:\n return None\n\n\ndef cleanDir(calibID):\n \"\"\"Remove test directory.\"\"\"\n workdirName = 'calib%s' % calibID\n if os.path.exists(workdirName):\n try:\n shutil.rmtree(workdirName)\n except EnvironmentError as e:\n print(\"Failed to delete directory: %s; ErrMsg: %s\" % (workdirName, str(e)))\n assert False\n\n\n@pytest.yield_fixture\ndef copyFccSteeringFile():\n \"\"\"Copy FCC steering file.\"\"\"\n calibID = 1\n yield copySteeringFile('FCCee', calibID)\n # cleanDir(calibID)\n\n\n@pytest.yield_fixture\ndef copyClicSteeringFile():\n \"\"\"Copy CLIC steering file.\"\"\"\n calibID = 1\n yield copySteeringFile('CLIC', calibID)\n cleanDir(calibID)\n\n\n@pytest.yield_fixture\ndef produceRandomTextFile():\n \"\"\"Produce random text.\"\"\"\n f = tempfile.NamedTemporaryFile(delete=False)\n nLines = random.randint(2, 20)\n for _ in range(0, nLines):\n nSymbolsInLine = random.randint(0, 120)\n line = ''\n for _ in range(0, nSymbolsInLine):\n line += random.choice(string.ascii_letters + ' ')\n f.write(line)\n f.close()\n yield f.name\n os.unlink(f.name)\n\n\n@pytest.fixture\ndef readEmptyParameterDict():\n \"\"\"Read parameters from the file.\"\"\"\n import ILCDIRAC.CalibrationSystem.Utilities as utilities\n fileDir = os.path.join(utilities.__path__[0], 'auxiliaryFiles')\n\n inFileName = os.path.join(fileDir, 'parameterListMarlinSteeringFile.txt')\n parDict = readParameterDict(inFileName)\n for iKey in parDict.keys():\n if 'RootFile' in iKey:\n parDict.pop(iKey, None)\n return parDict\n\n\ndef test_addParameterToProcessor(produceRandomTextFile, copyFccSteeringFile, mocker):\n \"\"\"Test adding of parameter to processor in Marlin steering file.\"\"\"\n # non-existing input file\n res = addParameterToProcessor('dummy.xml', 'dummyProc', {'name': 'dummyValue'})\n assert not res['OK']\n assert \"cannot find input\" in res['Message']\n # non-xml input file\n randomFile = produceRandomTextFile\n res = addParameterToProcessor(randomFile, 'dummyProc', {'name': 'dummyValue'})\n assert not res['OK']\n assert \"cannot parse input\" in res['Message']\n # good input file, non-existing processor\n steeringFile = copyFccSteeringFile\n res = addParameterToProcessor(steeringFile, 'dummyProc', {'name': 'dummyValue'})\n assert not res['OK']\n assert \"Can't find processor\" in res['Message']\n # good input file, good processor name, no 'name' key in the parameter dict\n steeringFile = copyFccSteeringFile\n res = addParameterToProcessor(steeringFile, 'dummyProc', {'dummy': 'dummyValue'})\n assert not res['OK']\n assert \"parameter dict should have key 'name'\" in res['Message']\n # good input file, good processor name\n res = addParameterToProcessor(steeringFile, 'MyAIDAProcessor', {'name': 'dummyValue'})\n assert res['OK']\n # good input file, good processor name, second append of the parameter with the same name\n res = addParameterToProcessor(steeringFile, 'MyAIDAProcessor', {'name': 'dummyValue'})\n assert not res['OK']\n assert (\"parameter with name %s already exists\" % 'dummyValue') in res['Message']\n # good input file, good processor name\n res = addParameterToProcessor(steeringFile, 'MyDDCaloDigi_10ns', {'name': 'ECALLayers', 'type': 'IntVec',\n 'value': '10 31'})\n assert res['OK']\n\n\ndef test_updateSteeringFile(copyClicSteeringFile, readEmptyParameterDict):\n \"\"\"Test updateSteeringFile.\"\"\"\n initialParDict = readEmptyParameterDict\n\n parDict1 = dict(initialParDict)\n # inFileName = os.path.join(self.fileDir, 'clicReconstruction_2019-04-17.xml')\n inFileName = copyClicSteeringFile\n res = readParametersFromSteeringFile(inFileName, parDict1)\n # key1 = \"processor[@name='MyPfoAnalysis']/parameter[@name='RootFile']\"\n # parDict1[key1] = \"dummyDummyRootFile.root\"\n # key2 = \"global/parameter[@name='LCIOInputFiles']\"\n # parDict1[key2] = \"in1.slcio, in2.slcio\"\n # self.assertTrue(len(parDict1) == len(initialParDict),\n # \"two dictionaries have to be the same size. len1: %s; len2: %s\"\n # % (len(parDict1), len(initialParDict)))\n\n outFileName = os.path.join(os.path.dirname(inFileName), 'out1.xml')\n res = updateSteeringFile(inFileName, outFileName, parDict1)\n assert res['OK']\n\n parDict2 = dict(initialParDict)\n res = readParametersFromSteeringFile(outFileName, parDict2)\n assert len(parDict1) == len(parDict2)\n\n notEqualValues = False\n for iKey in initialParDict:\n if parDict1[iKey] != parDict2[iKey]:\n notEqualValues = True\n assert not notEqualValues\n\n\ndef test_readParameterDict(readEmptyParameterDict):\n \"\"\"Test readParameterDict.\"\"\"\n parDict = readEmptyParameterDict\n assert '' not in parDict.keys()\n\n allValuesAreNone = True\n for _, iVal in parDict.iteritems():\n if iVal is not None:\n allValuesAreNone = False\n assert allValuesAreNone\n\n\ndef test_readParametersFromSteeringFile(copyClicSteeringFile, readEmptyParameterDict):\n \"\"\"Test readParametersFromSteeringFile.\"\"\"\n parDict = readEmptyParameterDict\n inFileName = copyClicSteeringFile\n res = readParametersFromSteeringFile(inFileName, parDict)\n print(res)\n assert res['OK']\n\n someValuesAreNone = False\n for _, iVal in parDict.iteritems():\n if iVal is None:\n someValuesAreNone = True\n assert not someValuesAreNone\n\n\ndef test_splitFilesAcrossJobs(mocker):\n \"\"\"Test splitting of file across jobs.\"\"\"\n from ILCDIRAC.CalibrationSystem.Utilities.functions import splitFilesAcrossJobs\n inputFiles = {'muon': ['muon1', 'muon2', 'muon3', 'muon4', 'muon5'],\n 'kaon': ['kaon1', 'kaon2', 'kaon3', 'kaon4', 'kaon5'],\n 'gamma': ['gamma1', 'gamma2', 'gamma3', 'gamma4', 'gamma5'],\n 'zuds': ['zuds1', 'zuds2', 'zuds3', 'zuds4', 'zuds5']}\n nEventsPerFile = {'muon': 20, 'kaon': 24, 'gamma': 25, 'zuds': 30}\n\n def printOut(nJobs):\n print(\"\\nnEventsPerFile: %s\" % nEventsPerFile)\n print(\"nTotalEvents:\")\n for iKey in inputFiles.keys():\n print(\"%s: %s\" % (iKey, len(inputFiles[iKey]) * nEventsPerFile[iKey]))\n print(\"\")\n\n outDict = splitFilesAcrossJobs(inputFiles, nEventsPerFile, nJobs)\n for i in range(0, nJobs):\n print(\"Job #%s:\" % i)\n for iKey, iVal in outDict[i].iteritems():\n print(\"%s\\t --> %s\" % (iKey, iVal))\n\n nJobs = 5\n printOut(nJobs)\n outDict = splitFilesAcrossJobs(inputFiles, nEventsPerFile, nJobs)\n for i in range(0, nJobs):\n for iKey, iVal in outDict[i].iteritems():\n assert len(iVal[0]) == 1\n assert iVal[1] == 0\n assert iVal[2] == nEventsPerFile[iKey]\n\n nJobs = 2\n printOut(nJobs)\n outDict = splitFilesAcrossJobs(inputFiles, nEventsPerFile, nJobs)\n for i in range(0, nJobs):\n for iKey, iVal in outDict[i].iteritems():\n assert len(iVal[0]) == 3\n assert iVal[1] == 0 or iVal[1] == nEventsPerFile[iKey] / 2\n assert iVal[2] == len(inputFiles[iKey]) * nEventsPerFile[iKey] / 2\n\n # assert False\n","sub_path":"CalibrationSystem/Test/Test_Util_functions.py","file_name":"Test_Util_functions.py","file_ext":"py","file_size_in_byte":8134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"67365262","text":"from collections import deque\n\nDIRS = [(-1,0), (1, 0), (0, -1), (0, 1)]\n\ndef solution(m, n, infests, vaccinateds):\n not_infected = m * n - len(infests) - len(vaccinateds)\n new_infests = deque([(i-1, j-1, 0) for i, j in infests])\n visited = [[False for _ in range(n)] for _ in range(m)]\n for i, j in infests + vaccinateds:\n visited[i-1][j-1] = True\n while new_infests:\n i, j, day = new_infests.popleft()\n for di, dj in DIRS:\n next_i, next_j = i + di, j + dj\n if 0 <= next_i < m and 0 <= next_j < n:\n if not visited[next_i][next_j]:\n new_infests.append((next_i, next_j, day + 1))\n visited[next_i][next_j] = True\n not_infected -= 1\n if not_infected == 0:\n return day\n else:\n return -1","sub_path":"Programmers/Lv3_전염병.py","file_name":"Lv3_전염병.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"77070030","text":"# coding=utf-8\n# Author: Jianghan LI\n# Question: 783.Minimum_Distance_Between_BST_Nodes\n# Complexity: O(N) O(1)\n# Date: 2018-02-11 0:00:00 - 0:09:06, 0 wrong try\n\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n\n def minDiffInBST(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n bfs = [root]\n for i in bfs:\n if i.left:\n bfs.append(i.left)\n if i.right:\n bfs.append(i.right)\n bfs = sorted(i.val for i in bfs)\n return min(b - a for a, b in zip(bfs, bfs[1:]))\n","sub_path":"problems/783.Minimum_Distance_Between_BST_Nodes/li_sort.py","file_name":"li_sort.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"259741922","text":"# -*- coding: utf-8 -*-\nfrom datetime import timedelta, datetime\nfrom openprocurement.api.constants import TZ\n\nimport standards\n\nWORKING_DAYS = {}\nHOLIDAYS = standards.load(\"calendars/workdays_off.json\")\nWORKING_WEEKENDS = standards.load(\"calendars/weekends_on.json\")\nfor date_str in HOLIDAYS:\n WORKING_DAYS[date_str] = True\nfor date_str in WORKING_WEEKENDS:\n WORKING_DAYS[date_str] = False\n\nTENDERING_DAYS = 6\nTENDERING_DURATION = timedelta(days=TENDERING_DAYS)\nSTAND_STILL_TIME = timedelta(days=4)\nENQUIRY_STAND_STILL_TIME = timedelta(days=2)\nCLAIM_SUBMIT_TIME = timedelta(days=3)\nCOMPLAINT_SUBMIT_TIME = timedelta(days=2)\nCOMPLAINT_OLD_SUBMIT_TIME = timedelta(days=3)\nCOMPLAINT_OLD_SUBMIT_TIME_BEFORE = datetime(2016, 7, 5, tzinfo=TZ)\nENQUIRY_PERIOD_TIME = timedelta(days=3)\nTENDERING_EXTRA_PERIOD = timedelta(days=2)\nABOVE_THRESHOLD_UA_DEFENSE = \"aboveThresholdUA.defense\"\nDEFENSE_KINDS = (\"authority\", \"central\", \"defense\", \"general\", \"social\", \"special\")\n","sub_path":"src/openprocurement/tender/openuadefense/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"524967689","text":"# -*- coding: utf-8 -*-\r\n\r\nimport sys, csv, glob, os, time, numpy as np\r\nfrom scipy.optimize import curve_fit\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom datetime import datetime\r\n\r\nfrom matplotlib import rcParams\r\nlabelsize = 18 #make size of axis tick labels larger\r\nplt.rcParams['xtick.labelsize'] = labelsize \r\nplt.rcParams['ytick.labelsize'] = labelsize\r\n\r\n\r\n\r\ndef find_times(filename='2018-05-04pedotpss', pressure_col_name='p_abs'):\r\n \r\n '''Read file which contains timestamps and changing pressures. The \r\n function retuns a dataframe with times and corresponding pressures.\r\n '''\r\n \r\n data = pd.read_table(str(filename))\r\n pressure_col_name = str(pressure_col_name)\r\n \r\n p_raw = np.array(data[pressure_col_name])\r\n \r\n p_indices = np.array([])\r\n time_table = []\r\n \r\n for i in range(len(data)-1):\r\n #get indices of times to keep\r\n if p_raw[i] != p_raw[i+1]:\r\n p_indices = np.append(p_indices, i).astype(int)\r\n \r\n time_table.append([data['date_time'].iloc[i],\r\n data[pressure_col_name].iloc[i]])\r\n \r\n time_table = pd.DataFrame(time_table, columns=['time', 'pressure'])\r\n \r\n return time_table\r\n\r\n\r\n\r\n#%%\r\n \r\ntime_table = find_times('exp_data\\\\2018-06-15_rh_chamber',\r\n pressure_col_name='p_abs')\r\n\r\n\r\n\r\n","sub_path":"Python_code/format_date_times.py","file_name":"format_date_times.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"191944362","text":"import pandas as pd\nimport numpy as np\nimport os\n\nfrom keras.models import model_from_json\nfrom keras.models import Model\nfrom PIL import Image\nfrom skimage import transform\n\ntrain_image_path = '/mnt/pgth06a/saliency/train/'\ntest_image_path = '/mnt/pgth06a/saliency/test/'\ntrain_embeddings_output = '../../data/corpus/devset/dev-set/train_saliency_embeddings_splitted.csv'\ntest_embeddings_output = 'test_saliency_embeddings_splitted.csv'\nmodel_path = 'saliency_autoencoder_model.json'\nweight_path = 'saliency_autoencoder_weight.h5'\npath_to_train = '/home/aitorgalan/Escritorio/tfm-impacto-youtube-cortos/saliency/models/saliency/short_saliency/train'\npath_to_test = '/home/aitorgalan/Escritorio/tfm-impacto-youtube-cortos/saliency/models/saliency/short_saliency/test'\n\norganize = pd.read_csv('/mnt/pgth04b/DATABASES_CRIS/final_organization.csv')\norg_df = pd.DataFrame(organize)\ncorto = []\nfor index, row in org_df.iterrows():\n if row['0 = CORTO | 1 = LARGO'] == 0:\n corto.append(row['id'])\n\ntraining = []\ntesting = []\n\nfor index, row in org_df.iterrows():\n if row['0 = TEST | 1 = TRAIN'] == 0:\n testing.append(row['id'])\n else:\n training.append(row['id'])\n\nEMBEDDING_LENGTH = 84\n\ndef process_image(np_image):\n \"\"\"\n Preprocess the image to be fed into the model.\n \"\"\"\n np_image = np.array(np_image).astype('float32')/255\n np_image = np_image.T\n np_image = transform.resize(np_image, (384, 224, 1))\n np_image = np.expand_dims(np_image, axis=0)\n return np_image\n\ndef final_process(video_folder, video_name, train_or_test):\n # load json and create model\n with open(model_path, 'r') as model_file:\n model = model_file.read()\n\n complete_model = model_from_json(model)\n\n # load weights into new model\n complete_model.load_weights(weight_path)\n print(\"Loaded model from disk\")\n\n # Generate new model in which output is the bottleneck layer output so we can extract the embedding\n bottleneck_layer = \"dense_1\"\n intermediate_output_model = Model(\n inputs=complete_model.input, outputs=complete_model.get_layer(bottleneck_layer).output)\n\n # Create array in which we will save the embeddings\n data = []\n\n csv_name = video_name + '.csv'\n if train_or_test == 'train':\n csv_path = os.path.join(path_to_train, csv_name)\n else:\n csv_path = os.path.join(path_to_test, csv_name)\n\n images = sorted(os.listdir(video_folder))\n if len(images) != 0:\n print(video_name + ' is not empty!')\n cuentaux = 0\n for image in images:\n cuentaux +=1\n print(image)\n if cuentaux == 1 or cuentaux == 12:\n print(str(cuentaux))\n input_image = Image.open(os.path.join(video_folder, image))\n pixels = process_image(input_image)\n\n # Get the embedding\n embedding = intermediate_output_model.predict(pixels)\n\n this_data = list(embedding[0])\n\n # Save it in the array\n this_data.insert(0, video_name)\n this_data.insert(1, int(image[0:-4]))\n\n data.append(this_data)\n elif cuentaux >= 24:\n cuentaux = 0\n\n # Save data.\n df = pd.DataFrame(data)\n final_df = df.sort_values(1)\n final_df.to_csv(csv_path)\n\n print(video_name + ' was succesfully proccessed')\n final_df.head(2)\n\n\ndef execute_emb_saliency(years_f, path_f):\n\n output_tased = 'output_tased'\n\n for y in years_f:\n path_to_year = os.path.join(path_f, y)\n videos_in_year = os.listdir(path_to_year)\n\n for v in videos_in_year:\n if v in corto:\n video_name = v\n\n if v in testing:\n train_or_test = 'test'\n else:\n train_or_test = 'train'\n\n path_to_video = os.path.join(path_to_year, v)\n path_to_saliency = os.path.join(path_to_video, output_tased)\n final_process(path_to_saliency, video_name, train_or_test)\n\nyears_fin = ['2014', '2015', '2016', '2017', '2018', '2019']\npath_fin = '/mnt/pgth04b/DATABASES_CRIS/FINALISTAS_ORIGINAL/DATABASES/'\n\nyears_no_fin = ['2017', '2018', '2019']\npath_no_fin = '/mnt/pgth04b/DATABASES_CRIS/NO_FINALISTAS/DATABASES'\n\nexecute_emb_saliency(years_fin, path_fin)\nexecute_emb_saliency(years_no_fin, path_no_fin)\n","sub_path":"saliency/models/saliency/extract_short_saliency.py","file_name":"extract_short_saliency.py","file_ext":"py","file_size_in_byte":4408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"371710506","text":"#!/usr/bin/env python3\nimport sys\nimport os\nimport re\nimport operator\n\ndef clean_list(list_word):\n\n c = []\n for word in list_word:\n if '\\n' in word:\n c += word.split('\\n')\n list_word.remove(word)\n list_word += c\n list_word.remove('')\n return list_word\n\nwords, word_len = {} , {}\nfilename = sys.argv[1]\n\nwith open(filename) as file:\n text = file.read()\n list_word = text.split(\" \")\n list_word = clean_list(list_word)\n\n for word in list_word:\n words[word] = list_word.count(word)\n\n file.close()\n\nfor key in words.keys():\n word_len[key] = len(key)\nword_len = dict(sorted(word_len.items(),key=operator.itemgetter(1),reverse=True))\n\n\nprint(\"{:>20} {:>20}\".format(\"Word\",\"Len\"))\nfor key, value in word_len.items():\n print(\"{:>20} {:>20}\".format(key,value))\n","sub_path":"Chapter7/longest_word_file.py","file_name":"longest_word_file.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"580039872","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: C:\\Files\\Research\\databrowse\\databrowse\\plugins\\db_limatix_viewer\\db_limatix_viewer.py\n# Compiled at: 2020-02-17 23:23:49\n# Size of source mod 2**32: 30617 bytes\n\"\"\" plugins/renderers/db_xlg_viewer.py - Experiment Log Viewer \"\"\"\nimport sys, os, random, string, glob, zipfile, tempfile\nfrom lxml import etree\ntry:\n from urllib import pathname2url\nexcept ImportError:\n from urllib.request import pathname2url\n\nfrom databrowse.support.renderer_support import renderer_class\nimport databrowse.plugins.db_data_table.db_data_table as db_data_table\nimport magic\n\nclass db_limatix_viewer(renderer_class):\n __doc__ = ' Experiment Log Viewer '\n _namespace_uri = 'http://limatix.org/datacollect'\n _namespace_local = 'dc'\n _default_content_mode = 'full'\n _default_style_mode = 'log_view'\n _default_recursion_depth = 2\n\n def getContent(self):\n if self._caller != 'databrowse':\n return\n if self._content_mode == 'full':\n if self._style_mode in ('old_log_view', 'old_tabular_view'):\n p = etree.XMLParser(huge_tree=True)\n xmlroot = etree.parse((self._fullpath), parser=p).getroot()\n try:\n reldest = xmlroot.xpath('dc:summary/dc:reldest', namespaces={'dc': 'http://limatix.org/datacollect'})[0].text\n reldesturl = self.getURL(os.path.abspath(os.path.join(os.path.dirname(self._relpath), reldest)))\n xmlroot.set('reldesturl', reldesturl)\n except:\n xmlroot.set('reldesturl', '')\n\n configlist = xmlroot.xpath('dc:configstr', namespaces={'dc': 'http://limatix.org/datacollect'})\n for item in configlist:\n try:\n fname = item.get('fname')\n fnames = item.get('fnames')\n if fname:\n path = os.path.realpath(fname)\n if path.startswith(os.path.normpath(self._web_support.dataroot)) and os.access(path, os.R_OK) and os.path.exists(path):\n relpath = path.replace(self._web_support.dataroot, '')\n url = self.getURL(relpath)\n item.set('url', url)\n elif fnames:\n if fnames[0] == '[':\n if fnames[(-1)] == ']':\n urls = []\n fnamelist = fnames[1:-1].split(',')\n for fname in fnamelist:\n fname = fname.replace(\"'\", '').replace('\"', '').strip()\n path = os.path.realpath(fname)\n if path.startswith(os.path.normpath(self._web_support.dataroot)) and os.access(path, os.R_OK) and os.path.exists(path):\n relpath = path.replace(self._web_support.dataroot, '')\n url = self.getURL(relpath)\n urls.append(url)\n else:\n urls.append('')\n\n item.set('urls', repr(urls))\n except:\n pass\n\n specimenlist = xmlroot.xpath('//dc:specimen', namespaces={'dc':'http://limatix.org/datacollect', 'dcv':'http://limatix.org/dcvalue'})\n for item in specimenlist:\n if item.text:\n relpath = '/specimens/' + item.text + '.sdb'\n if os.access(os.path.abspath(self._web_support.dataroot + '/' + relpath), os.R_OK) and os.path.exists(os.path.abspath(self._web_support.dataroot + '/' + relpath)):\n url = self.getURL(relpath)\n item.set('url', url)\n\n transducerlist = xmlroot.xpath('//dc:xducer', namespaces={'dc':'http://limatix.org/datacollect', 'dcv':'http://limatix.org/dcvalue'})\n for item in transducerlist:\n if item.text:\n relpath = '/transducers/' + item.text + '.tdb'\n if os.access(os.path.abspath(self._web_support.dataroot + '/' + relpath), os.R_OK) and os.path.exists(os.path.abspath(self._web_support.dataroot + '/' + relpath)):\n url = self.getURL(relpath)\n item.set('url', url)\n\n return xmlroot\n if self._content_mode == 'full':\n if self._style_mode != 'limatix_custom_view':\n p = etree.XMLParser(huge_tree=True)\n xmlroot = etree.parse((self._fullpath), parser=p).getroot()\n configlist = xmlroot.xpath('dc:config/dc:configfile', namespaces={'dc': 'http://limatix.org/datacollect'})\n for item in configlist:\n try:\n xlink = item.get('{http://www.w3.org/1999/xlink}href')\n if xlink:\n path = os.path.realpath(fname)\n if path.startswith(os.path.normpath(self._web_support.dataroot)) and os.access(path, os.R_OK):\n if os.path.exists(path):\n relpath = path.replace(self._web_support.dataroot, '')\n url = self.getURL(relpath)\n item.set('url', url)\n except:\n pass\n\n specimenlist = xmlroot.xpath('//dc:specimen', namespaces={'dc':'http://limatix.org/datacollect', 'dcv':'http://limatix.org/dcvalue'})\n for item in specimenlist:\n if item.text:\n relpath = '/specimens/' + item.text + '.sdb'\n if os.access(os.path.abspath(self._web_support.dataroot + '/' + relpath), os.R_OK) and os.path.exists(os.path.abspath(self._web_support.dataroot + '/' + relpath)):\n url = self.getURL(relpath)\n item.set('url', url)\n\n transducerlist = xmlroot.xpath('//dc:xducer', namespaces={'dc':'http://limatix.org/datacollect', 'dcv':'http://limatix.org/dcvalue'})\n for item in transducerlist:\n if item.text:\n relpath = '/transducers/' + item.text + '.tdb'\n if os.access(os.path.abspath(self._web_support.dataroot + '/' + relpath), os.R_OK) and os.path.exists(os.path.abspath(self._web_support.dataroot + '/' + relpath)):\n url = self.getURL(relpath)\n item.set('url', url)\n\n return xmlroot\n if self._content_mode == 'full':\n if self._style_mode == 'limatix_custom_view':\n self._namespace_local = 'dt'\n self._namespace_uri = 'http://limatix.org/databrowse/datatable'\n if 'custom_view' not in self._web_support.req.form:\n raise self.RendererException('Custom View Selection Required')\n xml = etree.parse(os.path.join(os.path.dirname(self._fullpath), self._web_support.req.form['custom_view'].value))\n namespaces = ' '.join(['xmlns:' + str(item) + '=\"' + str(value) + '\"' for item, value in xml.getroot().nsmap.iteritems()])\n root = xml.getroot()\n root.set('filenamematch', os.path.basename(self._fullpath))\n ext_module = db_data_table.MyExt(os.path.join(os.path.dirname(self._fullpath), self._web_support.req.form['custom_view'].value))\n extensions = etree.Extension(ext_module, ('data', 'xmlassert'), ns='http://limatix.org/databrowse/datatable/functions')\n root = xml.xslt((etree.XML(db_data_table._table_transform % (namespaces, self._web_support.siteurl, self.getURL(os.path.join(os.path.dirname(self._relpath), self._web_support.req.form['custom_view'].value)), self._web_support.req.form['custom_view'].value))), extensions=extensions).getroot()\n root.set('custom_view', self._web_support.req.form['custom_view'].value)\n return root\n if self._content_mode == 'raw':\n if 'filetype' in self._web_support.req.form:\n self._namespace_local = 'dt'\n self._namespace_uri = 'http://limatix.org/databrowse/datatable'\n if 'custom_view' not in self._web_support.req.form:\n raise self.RendererException('Custom View Selection Required')\n xml = etree.parse(os.path.join(os.path.dirname(self._fullpath), self._web_support.req.form['custom_view'].value))\n namespaces = ' '.join(['xmlns:' + str(item) + '=\"' + str(value) + '\"' for item, value in xml.getroot().nsmap.iteritems()])\n root = xml.getroot()\n root.set('filenamematch', os.path.basename(self._fullpath))\n ext_module = db_data_table.MyExt(os.path.join(os.path.dirname(self._fullpath), self._web_support.req.form['custom_view'].value))\n extensions = etree.Extension(ext_module, ('data', 'xmlassert'), ns='http://limatix.org/databrowse/datatable/functions')\n base = xml.xslt((etree.XML(db_data_table._table_transform % (namespaces, self._web_support.siteurl, self.getURL(os.path.join(os.path.dirname(self._relpath), self._web_support.req.form['custom_view'].value)), self._web_support.req.form['custom_view'].value))), extensions=extensions)\n filename = str(base.xpath('//@title')[0])\n if self._web_support.req.form['filetype'].value == 'ods':\n result = etree.tostring(base.xslt(etree.XML(db_data_table._ods_transform)))\n f = tempfile.TemporaryFile()\n if sys.version_info[0] <= 2 and sys.version_info[1] < 7:\n zipfile_compression = zipfile.ZIP_STORED\n else:\n zipfile_compression = zipfile.ZIP_DEFLATED\n zf = zipfile.ZipFile(f, 'w', zipfile_compression)\n if sys.version_info[0] <= 2 and sys.version_info[1] < 7:\n zf.writestr('mimetype', 'application/vnd.oasis.opendocument.spreadsheet')\n else:\n zf.writestr('mimetype', 'application/vnd.oasis.opendocument.spreadsheet', compress_type=(zipfile.ZIP_STORED))\n zf.writestr('META-INF/manifest.xml', '\\n\\n \\n \\n \\n \\n \\n\\n')\n zf.writestr('content.xml', result)\n zf.close()\n self._web_support.req.response_headers['Content-Type'] = 'application/vnd.oasis.opendocument.spreadsheet'\n self._web_support.req.response_headers['Content-Length'] = str(f.tell())\n f.seek(0, 0)\n self._web_support.req.response_headers['Content-Disposition'] = 'attachment; filename=' + filename + '.ods'\n self._web_support.req.start_response(self._web_support.req.status, self._web_support.req.response_headers.items())\n self._web_support.req.output_done = True\n if 'wsgi.file_wrapper' in self._web_support.req.environ:\n return self._web_support.req.environ['wsgi.file_wrapper'](f, 1024)\n return iter(lambda : f.read(1024), '')\n elif self._web_support.req.form['filetype'].value == 'csv':\n f = tempfile.TemporaryFile()\n coldef = base.xpath('dt:header/dt:coldef', namespaces={'dt': 'http://limatix.org/databrowse/datatable'})\n f.write(','.join([x.text for x in coldef]) + '\\n')\n for row in base.xpath('dt:row', namespaces={'dt': 'http://limatix.org/databrowse/datatable'}):\n datadef = row.xpath('dt:data/.', namespaces={'dt': 'http://limatix.org/databrowse/datatable'})\n f.write(','.join([x.text if x.text is not None else '' for x in datadef]) + '\\n')\n\n f.flush()\n f.seek(0, 2)\n self._web_support.req.response_headers['Content-Type'] = 'text/csv'\n self._web_support.req.response_headers['Content-Length'] = str(f.tell())\n f.seek(0, 0)\n self._web_support.req.response_headers['Content-Disposition'] = 'attachment; filename=' + filename + '.csv'\n self._web_support.req.start_response(self._web_support.req.status, self._web_support.req.response_headers.items())\n self._web_support.req.output_done = True\n if 'wsgi.file_wrapper' in self._web_support.req.environ:\n return self._web_support.req.environ['wsgi.file_wrapper'](f, 1024)\n return iter(lambda : f.read(1024), '')\n else:\n raise self.RendererException('Invalid File Type')\n else:\n size = os.path.getsize(self._fullpath)\n try:\n magicstore = magic.open(magic.MAGIC_MIME)\n magicstore.load()\n contenttype = magicstore.file(os.path.realpath(self._fullpath))\n except AttributeError:\n contenttype = magic.from_file((os.path.realpath(self._fullpath)), mime=True)\n\n if contenttype is None:\n contenttype = 'text/plain'\n f = open(self._fullpath, 'rb')\n self._web_support.req.response_headers['Content-Type'] = contenttype\n self._web_support.req.response_headers['Content-Length'] = str(size)\n self._web_support.req.response_headers['Content-Disposition'] = 'attachment; filename=' + os.path.basename(self._fullpath)\n self._web_support.req.start_response(self._web_support.req.status, self._web_support.req.response_headers.items())\n self._web_support.req.output_done = True\n if 'wsgi.file_wrapper' in self._web_support.req.environ:\n return self._web_support.req.environ['wsgi.file_wrapper'](f, 1024)\n return iter(lambda : f.read(1024), '')\n else:\n raise self.RendererException('Invalid Content Mode')\n\n def loadMenu(self):\n \"\"\" Load Menu Items for all current handlers \"\"\"\n newmenu = etree.Element('{http://thermal.cnde.iastate.edu/databrowse}navbar')\n isDirectory = os.path.isdir(self._fullpath)\n for handler in reversed(self._handlers):\n dirlist = [os.path.splitext(item)[0][4:] for item in os.listdir(os.path.abspath(os.path.dirname(sys.modules[('databrowse.plugins.' + handler)].__file__) + '/')) if item.lower().startswith('dbs_')]\n additionalitems = []\n if isDirectory:\n if os.path.exists(os.path.join(self._fullpath, '.databrowse', 'stylesheets', handler)):\n additionalitems = [os.path.splitext(item)[0][4:] for item in os.listdir(os.path.join(self._fullpath, '.databrowse', 'stylesheets', handler)) if item.lower().startswith('dbs_')]\n elif os.path.exists(os.path.join(os.path.dirname(self._fullpath), '.databrowse', 'stylesheets', handler)):\n additionalitems = [os.path.splitext(item)[0][4:] for item in os.listdir(os.path.join(os.path.dirname(self._fullpath), '.databrowse', 'stylesheets', handler)) if item.lower().startswith('dbs_')]\n dirlist = dirlist + additionalitems\n navelem = etree.SubElement(newmenu, '{http://thermal.cnde.iastate.edu/databrowse}navelem')\n title = etree.SubElement(navelem, '{http://www.w3.org/1999/xhtml}a')\n title.text = ' '.join([i[0].title() + i[1:] for i in handler[3:].split('_')])\n navitems = etree.SubElement(navelem, '{http://thermal.cnde.iastate.edu/databrowse}navdir', alwaysopen='true')\n for item in dirlist:\n if item not in self._handler_support.hiddenstylesheets:\n if not isDirectory:\n if item not in self._handler_support.directorystylesheets:\n link = self.getURL((self._relpath), handler=handler, style_mode=item)\n if self._style_mode == item and self.__class__.__name__ == handler:\n itemelem = etree.SubElement(navitems, '{http://thermal.cnde.iastate.edu/databrowse}navelem', selected='true')\n else:\n itemelem = etree.SubElement(navitems, '{http://thermal.cnde.iastate.edu/databrowse}navelem')\n menuitem = etree.SubElement(itemelem, '{http://www.w3.org/1999/xhtml}a', href=link)\n menuitem.text = ' '.join([i[0].title() + i[1:] for i in item.split('_')])\n continue\n if isDirectory:\n link = self.getURL((self._relpath), handler=handler, style_mode=item)\n if self._style_mode == item and self.__class__.__name__ == handler:\n itemelem = etree.SubElement(navitems, '{http://thermal.cnde.iastate.edu/databrowse}navelem', selected='true')\n else:\n itemelem = etree.SubElement(navitems, '{http://thermal.cnde.iastate.edu/databrowse}navelem')\n menuitem = etree.SubElement(itemelem, '{http://www.w3.org/1999/xhtml}a', href=link)\n menuitem.text = ' '.join([i[0].title() + i[1:] for i in item.split('_')])\n continue\n\n curdirlist = [item for item in os.listdir(os.path.abspath(os.path.dirname(self._fullpath))) if os.path.splitext(item)[1] == '.tbl']\n customitems = {}\n cwd = os.getcwd()\n os.chdir(os.path.dirname(self._fullpath))\n for item in curdirlist:\n try:\n xml = etree.parse(os.path.abspath(os.path.join(os.path.dirname(self._fullpath), item)))\n filename = xml.xpath('@filenamematch')[0]\n filelist = glob.glob(filename)\n for filename in filelist:\n if filename == os.path.basename(self._fullpath):\n it = item if not item.startswith('.') else item[1:]\n title = ' '.join([i[0].title() + i[1:] for i in os.path.splitext(it)[0].split('_')])\n customitems[item] = title\n\n except:\n pass\n\n os.chdir(cwd)\n navelem = newmenu[0]\n navitems = navelem[1]\n for item in customitems:\n link = self.getURL((self._relpath), handler='db_limatix_viewer', style_mode='limatix_custom_view', custom_view=item)\n if self._style_mode == 'limatix_custom_view' and self._web_support.req.form['custom_view'].value == item:\n itemelem = etree.SubElement(navitems, '{http://thermal.cnde.iastate.edu/databrowse}navelem', selected='true')\n else:\n itemelem = etree.SubElement(navitems, '{http://thermal.cnde.iastate.edu/databrowse}navelem')\n menuitem = etree.SubElement(itemelem, '{http://www.w3.org/1999/xhtml}a', href=link)\n menuitem.text = customitems[item]\n\n self._web_support.menu.AddMenu(newmenu)\n\n def loadStyleFunction(self):\n \"\"\" Override Load Style Function to Replace URL \"\"\"\n custompath = os.path.abspath((self._fullpath if os.path.isdir(self._fullpath) else os.path.dirname(self._fullpath)) + '/.databrowse/stylesheets/' + self.__class__.__name__ + '/dbs_' + self._style_mode + '.xml')\n defaultpath = os.path.abspath(os.path.dirname(sys.modules[('databrowse.plugins.' + self.__class__.__name__)].__file__) + '/dbs_' + self._style_mode + '.xml')\n filename = custompath if os.path.exists(custompath) else None\n override = False\n if filename is not None:\n override = True if (os.path.exists(defaultpath) or hasattr(self, '_style_' + self._style_mode)) else False\n else:\n if filename is None:\n if self._web_support.style.IsStyleLoaded(self._namespace_uri):\n if override != True:\n return\n filename = defaultpath if os.path.exists(defaultpath) else None\n if filename is None:\n if hasattr(self, '_style_' + self._style_mode):\n stylestring = getattr(self, '_style_' + self._style_mode)\n else:\n raise self.RendererException('Unable To Locate Stylesheet for Style Mode %s in %s' % (self._style_mode, self.__class__.__name__))\n else:\n f = open(filename, 'r')\n stylestring = f.read()\n f.close()\n stylestring = stylestring.replace('/usr/local/limatix-qautils/checklist/datacollect2.xsl', pathname2url(os.path.join(self._web_support.limatix_qautils, 'checklist/datacollect2.xsl')).replace('///', ''))\n if override is True:\n randomid = ''.join((random.choice(string.ascii_uppercase + string.digits) for x in range(10)))\n newnamespace = self._namespace_uri + randomid\n newlocalns = self._namespace_local + randomid\n newnamedtemplates = self.__class__.__name__ + '-' + randomid + '-'\n stylestring = stylestring.replace(self._namespace_uri, newnamespace)\n stylestring = stylestring.replace(self._namespace_local + ':', newlocalns + ':')\n stylestring = stylestring.replace('xmlns:' + self._namespace_local, 'xmlns:' + newlocalns)\n stylestring = stylestring.replace(self.__class__.__name__ + '-', newnamedtemplates)\n self._namespace_uri = newnamespace\n self._namespace_local = newlocalns\n self._web_support.style.AddStyle(self._namespace_uri, stylestring)","sub_path":"pycfiles/databrowse-0.8.4-py2-none-any/db_limatix_viewer.cpython-37.py","file_name":"db_limatix_viewer.cpython-37.py","file_ext":"py","file_size_in_byte":23217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"235044959","text":"'''\nCreated on Aug 11, 2015\n\n@author: Guohong\n'''\nimport pygame\nimport math\nfrom os.path import join, splitext\nfrom os import listdir\nfrom core.arena import Arena, Tile\nfrom core.tank import Tank, EnemyTank\nfrom utils.common import rotate_rect_offset\n#Some constants of pixel conversion are needed here\n\nTILE_SIZE = 40\nimages = {}\n\ndef load_images():\n '''\n Load the images from the resources.\n Return a dictionary that map the images to their names.\n '''\n images_dir = join(__file__, '..', '..', 'resources', 'images')\n image_names = listdir(images_dir)\n for name in image_names:\n filename, file_extension = splitext(name)\n if file_extension == '.png':\n images[filename] = pygame.image.load(join(images_dir, name))\n\ndef draw_scene(game_model, interpolation=1.0):\n '''\n game_model: the game model used to draw the scene\n interpolation: range in (0, 1]. used to draw smoother scene when game logic\n is updating at low rate.\n Draw the content of the game model (arena, tanks, missiles etc.) on a \n surface and return that surface.\n '''\n #multiple helper functions should be used.\n scene = pygame.Surface((TILE_SIZE*game_model.arena.width, TILE_SIZE*game_model.arena.height))\n terrain = game_model.arena.get_terrain()\n for row in range(game_model.arena.width):\n for col in range(game_model.arena.height):\n draw_tile(terrain[row][col], row, col, scene)\n \n draw_tank(game_model.player, scene, interpolation)\n return scene\n\nBARREL_CENTER = (20, 25)\n\ndef draw_tank(tank, scene, interpolation):\n '''\n tank: the specific tank\n scene: the surface to draw onto\n interpolation: use to draw the tank between two game updates\n Draw the specific tank to the given scene\n '''\n # use player's or enemy's image\n tank_base = images['tank_base1']\n tank_barrel = images['tank_barrel1']\n if not isinstance(tank, EnemyTank):\n tank_base = images['tank_base0']\n tank_barrel = images['tank_barrel0']\n \n # the interpolated base angle of the tank\n intpl_base_ang = tank.base_ang\n if tank.state == Tank.STATE_TURN_L:\n intpl_base_ang -= interpolation*Tank.TURN_STEP\n elif tank.state == Tank.STATE_TURN_R:\n intpl_base_ang += interpolation*Tank.TURN_STEP\n # the interpolated barrel angle of the tank\n intpl_barrel_ang = tank.barrel_ang\n if tank.barrel_cmd_reg == Tank.BARREL_CW:\n intpl_barrel_ang += (interpolation-1)*Tank.BARREL_STEP\n elif tank.barrel_cmd_reg == Tank.BARREL_CCW:\n intpl_barrel_ang -= (interpolation-1)*Tank.BARREL_STEP\n if tank.state == Tank.STATE_TURN_R:\n intpl_barrel_ang += interpolation*Tank.TURN_STEP\n elif tank.state == Tank.STATE_TURN_L:\n intpl_barrel_ang -= interpolation*Tank.TURN_STEP\n # the interpolated location of the tank\n intpl_x = tank.x\n intpl_y = tank.y\n if tank.state == Tank.STATE_FORWARD:\n intpl_x += tank.base_dir[0]*Tank.MOVE_FRACTION*interpolation\n intpl_y += tank.base_dir[1]*Tank.MOVE_FRACTION*interpolation\n elif tank.state == Tank.STATE_BACKWARD:\n intpl_x -= tank.base_dir[0]*Tank.MOVE_FRACTION*interpolation\n intpl_y -= tank.base_dir[1]*Tank.MOVE_FRACTION*interpolation\n \n # the offset needed so that the image center would not move\n base_offset = rotate_rect_offset(TILE_SIZE, TILE_SIZE, TILE_SIZE/2, \n TILE_SIZE/2, -intpl_base_ang)\n barrel_offset = rotate_rect_offset(TILE_SIZE, TILE_SIZE, BARREL_CENTER[0], \n BARREL_CENTER[1], -intpl_barrel_ang)\n # rotate the base and barrel images\n tank_base = pygame.transform.rotate(tank_base, -intpl_base_ang)\n tank_barrel = pygame.transform.rotate(tank_barrel, -intpl_barrel_ang)\n # the location of the base image in the scene\n base_dest = (intpl_x*TILE_SIZE-base_offset[0], intpl_y*TILE_SIZE-base_offset[1])\n \n # the center of the barrel image is moving when the base rotate since it\n # does not match with the middle of the square. this need to be compensate\n # the off (base) center radius of the barrel center\n barrel_off_r = abs(BARREL_CENTER[0]-BARREL_CENTER[1])\n # the off center distance in x/y directions after the base rotates\n barrel_off_cntr = (barrel_off_r*math.sin(math.radians(-intpl_base_ang)), \n barrel_off_r*math.cos(math.radians(-intpl_base_ang)))\n # the compensated location of the barrel image in the scene\n barrel_dest = (intpl_x*TILE_SIZE-barrel_offset[0]-BARREL_CENTER[0]+0.5*TILE_SIZE+barrel_off_cntr[0], \n intpl_y*TILE_SIZE-barrel_offset[1]-BARREL_CENTER[1]+0.5*TILE_SIZE+barrel_off_cntr[1])\n scene.blit(source=tank_base, dest=base_dest)\n scene.blit(source=tank_barrel, dest=barrel_dest)\n \ndef draw_tile(tile, row, col, scene):\n '''\n Tile: the specific tile\n row, col: the location of the tile\n scene: the surface to draw onto\n Draw a specific tile at the location given on the given surface.\n '''\n tile_image = None\n if tile.tile_type == Tile.TYPE_SLOPE:\n if tile.slope_height == 0:\n tile_image = images['slope0']\n elif tile.slope_height == 1:\n tile_image = images['slope1']\n \n if tile.slope_dir == Arena.DIR_EAST:\n tile_image = pygame.transform.rotate(tile_image, -90)\n elif tile.slope_dir == Arena.DIR_SOUTH:\n tile_image = pygame.transform.rotate(tile_image, -180)\n elif tile.slope_dir == Arena.DIR_WEST:\n tile_image = pygame.transform.rotate(tile_image, -270)\n else:\n if tile.obstacle == Tile.OBS_IRON:\n tile_image = images['iron']\n elif tile.obstacle == Tile.OBS_BRICK:\n tile_image = images['brick']\n elif tile.obstacle == None:\n if tile.tile_type == Tile.TYPE_DEPRESSION:\n tile_image = images['depression']\n elif tile.tile_type == Tile.TYPE_PLAIN:\n tile_image = images['plain']\n elif tile.tile_type == Tile.TYPE_HIGH_GROUND:\n tile_image = images['high_ground'] \n dest = (col*TILE_SIZE, row*TILE_SIZE)\n scene.blit(tile_image, dest=dest)\n \n ","sub_path":"graphics/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":6262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"407060413","text":"from django.conf.urls.defaults import *\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('cmsplugin_swingtime',\n url(r'^$', 'views.eventIndexView', name=\"swingtime-index\"),\n url(r'^archiv/$', 'views.eventArchiveView', name=\"swingtime-archive\"),\n url(r'^(?P[-\\w\\d]+)/$', 'views.eventTypeView', name=\"swingtime-type-index\"),\n url(r'^(?P[-\\w\\d]+)/(?P\\d+)/$', 'views.eventView', name=\"swingtime-event\"),\n)\n\n","sub_path":"cmsplugin_swingtime/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"138462520","text":"from time import sleep\n\nprint('FINANCIAMENTO IMOBILIÁRIO')\nvalor_imovel = float(input('Quanto custa o imóvel desejado? R$'))\nrenda_mensal = float(input('Quanto é a sua renda mensal? '))\nanos = int(input('Em quantos anos deseja quitar o financiamento? '))\nprint('PROCESSANDO...')\nsleep(2)\nparcela_mensal = valor_imovel / (anos * 12)\n# if parcela_mensal > ((30 * renda_mensal) / 100):\nif parcela_mensal >= ((((3.0 / 10) * 100) * renda_mensal) / 100):\n\tprint('Infelizmente não é possível financiar este imóvel.\\nO valor da parcela ficou acima do teto.')\nelse:\n\tprint('O financiamento está pré aprovado!\\nVocê poderá financiar o imóvel desejado.')\nprint('Para pagar um imóvel de R${:.2f} em {} anos o valor da parcela mensal será de R${:.2f}'.format(valor_imovel, anos, parcela_mensal))\n","sub_path":"mundo-2-estruturas-controle/6-condicoes-python-if-elif-else/ex036-aprovando-emprestimo.py","file_name":"ex036-aprovando-emprestimo.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"186650505","text":"from webob import Response\nfrom z3c.rml.rml2pdf import parseString\n\nfrom eportfolio.views.api import TemplateAPI \nfrom repoze.bfg.chameleon_zpt import render_template\n\n\ndef objectives_view(context, request):\n \n project = context.__parent__ \n \n return dict(api = TemplateAPI(request),\n project = project) \n \ndef objectives_pdf_view(context, request):\n\n project = context.__parent__ \n\n result = render_template('templates/objectives_pdf.pt',\n api=TemplateAPI(request),\n project = project)\n \n response = Response(parseString(result.encode('utf-8')).read())\n response.content_type = \"application/pdf\"\n return response","sub_path":"eportfolio/views/objectives.py","file_name":"objectives.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"554574897","text":"def get_bullcow(user_num, random_num):\n bull = 0\n cow = 0\n #calculating individual digits in the number entered by the user and random number and comparing them\n for u in range(len(user_num)) : # run loop for each digit in user number\n list_of_digits_counted = []\n for r in range(len(random_num)): # run loop for each digit in random number\n# instance_counted = False\n if u == r: # if index positions are same and digits at these positions are also same, increment bull by one\n if user_num[u] == random_num[r]:\n bull += 1\n# instance_counted = True\n list_of_digits_counted.append(user_num[u]) # List to add occurances of all digits till now\n \n if (user_num[u] not in list_of_digits_counted) and (user_num[u] in random_num):\n cow += 1\n \n values = [bull, cow]\n return values\n","sub_path":"mimsmind_bullcow.py","file_name":"mimsmind_bullcow.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"142437069","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/harness/dataframe.py\n# Compiled at: 2016-09-14 09:24:20\n# Size of source mod 2**32: 7029 bytes\n\"\"\"setup\"\"\"\nimport pandas\nfrom sklearn import *\nimport sklearn\nfrom pandas import np\nfrom typing import Iterable\nfrom whatever.callables import DictCallable\nfrom toolz.curried import *\nfrom whatever import *\nimport time\nfrom typing import Callable\n__all__ = [\n 'Harness']\n\nclass PandasBase(object):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **{key:value for key, value in kwargs.items() if key not in self._metadata})\n for attr in self._metadata:\n if not hasattr(self, attr):\n if attr in kwargs:\n setattr(self, attr, kwargs[attr])\n else:\n setattr(self, attr, None)\n\n @property\n def _constructor(self):\n return self.__class__\n\n def copy(self, deep=True):\n data = self.values\n return self.__class__(data.values.copy() if deep else data.values, index=self.index.copy()).__finalize__(self)\n\n def __dir__(self):\n return super().__dir__() + self._metadata\n\n\nclass SeriesBase(PandasBase, pandas.Series):\n\n def __init__(self, *args, **kwargs):\n if ~('name' in self._metadata):\n self._metadata.append('name')\n super().__init__(*args, **kwargs)\n\n @property\n def _constructor_expanddim(self):\n return self._dataframe\n\n @property\n def _constructor(self):\n return self.__class__\n\n def __finalize__(self, other, method=None, **kwargs):\n \"\"\" propagate metadata from other to self \"\"\"\n for attr in self._metadata:\n setattr(self, attr, getattr(other, attr, None))\n\n return self\n\n\nclass FrameBase(PandasBase, pandas.DataFrame):\n\n @property\n def _constructor_sliced(self):\n return self._series\n\n def __getitem__(self, key):\n obj = super().__getitem__(key).__finalize__(self)\n if obj.name:\n obj.name = (\n *obj.name, key)\n else:\n obj.name = (\n key,)\n return obj\n\n def __finalize__(self, other, method=None, **kwargs):\n \"\"\"propagate metadata from other to self \"\"\"\n if method == 'merge':\n for name in self._metadata:\n setattr(self, name, getattr(other.left, name, None))\n\n else:\n if method == 'concat':\n for name in self._metadata:\n setattr(self, name, getattr(other.objs[0], name, None))\n\n else:\n for name in self._metadata:\n setattr(self, name, getattr(other, name, None))\n\n return self\n\n\nclass HarnessSeries(SeriesBase):\n _metadata = [\n 'name', 'parent', 'model']\n\n\nclass HarnessBase(FrameBase):\n __doc__ = 'A dataframe as a test harness for machine learning.'\n _series = HarnessSeries\n _metadata = [\n 'matrix', 'pipeline', 'parent',\n 'scorer', 'model', 'n_folds', 'name']\n scorer = callables.DictCallable({'score': metrics.accuracy_score})\n history = pandas.DataFrame([], columns=scorer.keys())\n\n\nclass Harness(HarnessBase):\n\n def __init__(self, *args, n_folds=1, **kwargs):\n self.n_folds = n_folds\n super().__init__(*args, **kwargs)\n\n def cross_validation(self, **kwargs):\n if self.n_folds == 1:\n self.folds = [\n (\n range(len(self)),)]\n else:\n folds = cross_validation.StratifiedKFold(self.index.tolist(), n_folds=self.n_folds, **merge({'random_state': 42}, kwargs))\n self.folds = list(folds)\n return self\n\n def fit(self, level=-1, **kwargs):\n self.cross_validation(**kwargs)\n self.matrix = []\n for fold in self.folds:\n self.matrix.append(self._fit_models(level, first(fold)))\n\n self.matrix = np.array(self.matrix)\n return self\n\n def _fit_models(self, level, indices):\n \"\"\"Fit a few models with the same data set indices.\"\"\"\n models = pipe(self.pipeline, map(sklearn.base.clone), list)\n for model in models:\n t = time.time()\n if level is None:\n model.fit(self.iloc[indices].values)\n else:\n model.fit(self.iloc[indices].values, self.iloc[indices].index.get_level_values(level))\n self.history = self.history.append(pandas.DataFrame([\n {'time': time.time() - t, \n 'model': model, \n 'model_id': id(model), \n 'data': self, \n 'length': len(model.steps) if hasattr(model, 'steps') else 1}]).set_index('model_id'))\n\n return models\n\n def predict(self, models=None):\n return self._predict_transform('predict', models)\n\n def transform(self, models=None):\n return self._predict_transform('transform', models)\n\n def _predict_transform(self, key, models=None):\n if models is None:\n models = self.matrix\n frames = []\n for model in concat(models):\n if hasattr(model, key):\n frames.append(Harness(getattr(model, key)(self.values), index=self.index, model=model))\n\n if frames:\n new_harness = pandas.concat(_X(frames).zip(_X(frames) * this().model.f * id > list) > dict, axis=1)\n new_harness.parent, new_harness.history = self, self.history\n return new_harness\n return Harness(index=self.index)\n\n def score(self, models=None):\n predicted = self.predict(models)\n self.history.update(pandas.DataFrame(_X(predicted.iteritems()).map(second).map(_X()[flip(self.scorer)](predicted.index).f) > list, index=_X(predicted.iteritems()).map(first).map(first) > list))\n return self.history\n\n def _id_to_model(self, index):\n return self.history.loc[index].model\n\n def _model_to_id(self, model):\n return self.history[(self.history.model == model)]\n\n def __getitem__(self, key):\n if isinstance(key, sklearn.base.BaseEstimator):\n key = id(key)\n return super().__getitem__(key)\n\n\nHarnessSeries._dataframe = Harness","sub_path":"pycfiles/harness_it-0.1.3-py3.5/dataframe.cpython-35.py","file_name":"dataframe.cpython-35.py","file_ext":"py","file_size_in_byte":6273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"495892166","text":"import numpy as np \nimport cv2\nimport matplotlib.pyplot as plt \n\ndef load_img():\n\tblank_img = np.zeros((600,600))\n\tfont = cv2.FONT_HERSHEY_SIMPLEX\n\tcv2.putText(blank_img,text='ABCDE',org=(50,300), fontFace=font, fontScale=5, color=(255,255,255),thickness=25)\n\treturn blank_img\n\ndef display_img(img):\n\tfig = plt.figure(figsize=(12,10))\n\tax = fig.add_subplot(111)\n\tax.imshow(img,cmap='gray')\n\tplt.show()\n\ndef display_2img(img1,img2,str1,str2):\n\t\n\tfig = plt.figure(figsize=(12,12))\n\tax1 = fig.add_subplot(221)\n\tax1.imshow(img1,cmap='gray')\n\tplt.title(str1)\n\tax2 = fig.add_subplot(222)\n\tax2.imshow(img2,cmap='gray')\n\tplt.title(str2)\n\tplt.show()\n\n\nimg = load_img()\n#display_img(img)\n\n## Erode the image\nkernel = np.ones((5,5),dtype=np.uint8)\nerode = cv2.erode(img,kernel,iterations=4)\n#display_img(results)\ndisplay_2img(img,erode,'orgin','eosion')\n\n## Dilate the image\n\ndilate = cv2.dilate(img,kernel,iterations=4)\ndisplay_2img(img,dilate,'orgin','dilate')\n\n\n\nimg = load_img()\nwhite_noise = np.random.randint(low=0,high=2,size=(600,600))\n# Change scale\nwhite_noise = white_noise * 255\n\nnoise_img = white_noise + img\n#display_img(noise_img)\ndisplay_2img(white_noise,noise_img,'white nosie','white noise img')\n\nopening = cv2.morphologyEx(noise_img,cv2.MORPH_OPEN,kernel)\n#display_img(opening)\ndisplay_2img(img,opening,'orgin','opening')\n\n\nimg = load_img()\nblack_noise = np.random.randint(low=0,high=2,size=(600,600))\nblack_noise = black_noise * -255\nblack_noise_img = img + black_noise\nblack_noise_img[black_noise_img == -255] = 0\n#display_img(black_noise_img)\nclosing = cv2.morphologyEx(black_noise_img,cv2.MORPH_CLOSE,kernel)\ndisplay_2img(black_noise_img,closing,'black noise img','closing')\n\ngradient = cv2.morphologyEx(img,cv2.MORPH_GRADIENT,kernel)\ndisplay_2img(black_noise_img,gradient,'black noise img','gradient')\n\n\n","sub_path":"python_cv2/opencv_practice/Image_Basic/Morphological_Operators.py","file_name":"Morphological_Operators.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"438525268","text":"# coding=utf-8\n#\n# The Qubes OS Project, http://www.qubes-os.org\n#\n# Copyright (C) 2023 Piotr Bartman \n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,\n# USA.\nimport re\nimport signal\nimport subprocess\nimport threading\nimport time\nimport gi\nfrom typing import Dict\n\ngi.require_version('Gtk', '3.0') # isort:skip\nfrom gi.repository import Gtk, Gdk, GLib, GObject # isort:skip\nfrom locale import gettext as l\n\nfrom qubes_config.widgets.gtk_utils import copy_to_global_clipboard, \\\n load_icon_at_gtk_size\nfrom qui.updater.updater_settings import Settings\nfrom qui.updater.utils import UpdateStatus, RowWrapper\n\n\nclass ProgressPage:\n\n def __init__(\n self,\n builder,\n log,\n header_label,\n next_button,\n cancel_button\n ):\n self.builder = builder\n self.log = log\n self.header_label = header_label\n self.next_button = next_button\n self.cancel_button = cancel_button\n self.vms_to_update = None\n self.exit_triggered = False\n self.update_thread = None\n\n self.update_details = QubeUpdateDetails(self.builder)\n\n self.stack: Gtk.Stack = self.builder.get_object(\"main_stack\")\n self.page: Gtk.Box = self.builder.get_object(\"progress_page\")\n self.progressbar: Gtk.TreeView = self.builder.get_object(\"progressbar\")\n progress_store = self.progressbar.get_model()\n progress_store.append([0])\n self.total_progress = progress_store[-1]\n self.progressbar_renderer: Gtk.CellRendererProgress = \\\n self.builder.get_object(\"progressbar_renderer\")\n self.progressbar_renderer.set_fixed_size(-1, 26)\n\n self.progress_list: Gtk.TreeView = self.builder.get_object(\n \"progress_list\")\n self.selection: Gtk.TreeSelection = self.progress_list.get_selection()\n self.progress_list.connect(\"row-activated\", self.row_selected)\n progress_column: Gtk.TreeViewColumn = self.builder.get_object(\n \"progress_column\")\n renderer = CellRendererProgressWithResult()\n renderer.props.ypad = 10\n progress_column.pack_start(renderer, True)\n progress_column.add_attribute(renderer, \"pulse\", 7)\n progress_column.add_attribute(renderer, \"value\", 7)\n progress_column.add_attribute(renderer, \"status\", 8)\n\n @property\n def is_visible(self):\n \"\"\"Returns True if page is shown by stack.\"\"\"\n return self.stack.get_visible_child() == self.page\n\n def init_update(self, vms_to_update, settings):\n \"\"\"Starts `perform_update` in new thread.\"\"\"\n self.log.info(\"Prepare updating\")\n self.vms_to_update = vms_to_update\n self.progress_list.set_model(vms_to_update.list_store_raw)\n self.next_button.set_sensitive(False)\n self.cancel_button.set_sensitive(True)\n self.cancel_button.set_label(l(\"_Cancel updates\"))\n self.cancel_button.show()\n\n self.header_label.set_text(l(\"Update in progress...\"))\n self.header_label.set_halign(Gtk.Align.CENTER)\n\n self.update_thread = threading.Thread(\n target=self.perform_update,\n args=(settings,)\n )\n self.update_thread.start()\n\n def interrupt_update(self):\n \"\"\"\n Finish ongoing updates, but skip the ones that haven't started yet.\n \"\"\"\n self.log.debug(\"Interrupting updates\")\n self.exit_triggered = True\n GLib.idle_add(self.header_label.set_text,\n l(\"Interrupting the update...\"))\n\n def perform_update(self, settings):\n \"\"\"Updates dom0 and then other vms.\"\"\"\n admins = [row for row in self.vms_to_update\n if row.vm.klass == 'AdminVM']\n templs = [row for row in self.vms_to_update\n if row.vm.klass != 'AdminVM']\n GLib.idle_add(self.set_total_progress, 0)\n\n if admins:\n self.update_admin_vm(admins)\n\n if templs:\n self.update_templates(templs, settings)\n\n GLib.idle_add(self.next_button.set_sensitive, True)\n GLib.idle_add(self.header_label.set_text, l(\"Update finished\"))\n GLib.idle_add(self.cancel_button.set_visible, False)\n\n def update_admin_vm(self, admins):\n \"\"\"Runs command to update dom0.\"\"\"\n admin = admins[0]\n if self.exit_triggered:\n self.log.info(\"Update canceled: skip adminVM updating\")\n GLib.idle_add(admin.set_status, UpdateStatus.Cancelled)\n GLib.idle_add(\n admin.append_text_view,\n l(\"Canceled update for {}\\n\").format(admin.vm.name))\n self.update_details.update_buffer()\n return\n self.log.debug(\"Start adminVM updating\")\n\n info = f\"Updating {admin.name}...\\n\" \\\n \"Detailed information will be displayed after update.\\n\" \\\n f\"{admin.name} does not support in-progress update \" \\\n \"information.\\n\"\n GLib.idle_add(\n admin.append_text_view,\n l(info).format(admin.name))\n GLib.idle_add(admin.set_status, UpdateStatus.ProgressUnknown)\n\n self.update_details.update_buffer()\n\n try:\n with Ticker(admin):\n untrusted_output = subprocess.check_output(\n ['sudo', 'qubesctl', '--dom0-only', '--no-color',\n 'pkg.upgrade', 'refresh=True'],\n stderr=subprocess.STDOUT)\n output = self._sanitize_line(untrusted_output)\n\n GLib.idle_add(admin.append_text_view, output)\n GLib.idle_add(admin.set_status, UpdateStatus.Success)\n except subprocess.CalledProcessError as ex:\n GLib.idle_add(\n admin.append_text_view,\n l(\"Error on updating {}: {}\\n{}\").format(\n admin.vm.name, str(ex), ex.output.decode()))\n GLib.idle_add(admin.set_status, UpdateStatus.Error)\n\n self.update_details.update_buffer()\n\n def update_templates(self, to_update, settings):\n \"\"\"Updates templates and standalones and then sets update statuses.\"\"\"\n if self.exit_triggered:\n self.log.info(\"Update canceled: skip templateVM updating\")\n for row in to_update:\n GLib.idle_add(row.set_status, UpdateStatus.Cancelled)\n GLib.idle_add(\n row.append_text_view,\n l(\"Canceled update for {}\\n\").format(row.vm.name))\n GLib.idle_add(self.set_total_progress, 100)\n self.update_details.update_buffer()\n return\n self.log.debug(\"Start templateVM updating\")\n\n for row in to_update:\n GLib.idle_add(\n row.append_text_view,\n l(\"Updating {}\\n\").format(row.name))\n GLib.idle_add(row.set_status, UpdateStatus.InProgress)\n self.update_details.update_buffer()\n\n try:\n rows = {row.name: row for row in to_update}\n self.do_update_templates(rows, settings)\n GLib.idle_add(self.set_total_progress, 100)\n except subprocess.CalledProcessError as ex:\n for row in to_update:\n GLib.idle_add(\n row.append_text_view,\n l(\"Error on updating {}: {}\\n{}\").format(\n row.name, str(ex), ex.output.decode()))\n GLib.idle_add(row.set_status, UpdateStatus.Error)\n self.update_details.update_buffer()\n\n def do_update_templates(\n self, rows: Dict[str, RowWrapper], settings: Settings):\n \"\"\"Runs `qubes-vm-update` command.\"\"\"\n targets = \",\".join((name for name in rows.keys()))\n\n args = []\n if settings.max_concurrency is not None:\n args.extend(\n ('--max-concurrency',\n str(settings.max_concurrency)))\n\n # pylint: disable=consider-using-with\n proc = subprocess.Popen(\n ['qubes-vm-update',\n '--show-output',\n '--just-print-progress',\n *args,\n '--targets', targets],\n stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n\n read_err_thread = threading.Thread(\n target=self.read_stderrs,\n args=(proc, rows)\n )\n read_out_thread = threading.Thread(\n target=self.read_stdouts,\n args=(proc, rows)\n )\n read_err_thread.start()\n read_out_thread.start()\n\n while proc.poll() is None \\\n or read_out_thread.is_alive() \\\n or read_err_thread.is_alive():\n time.sleep(1)\n if self.exit_triggered and proc.poll() is None:\n proc.send_signal(signal.SIGINT)\n proc.wait()\n read_err_thread.join()\n read_out_thread.join()\n\n def read_stderrs(self, proc, rows):\n for untrusted_line in iter(proc.stderr.readline, ''):\n if untrusted_line:\n self.handle_err_line(untrusted_line, rows)\n else:\n break\n proc.stderr.close()\n\n def handle_err_line(self, untrusted_line, rows):\n line = self._sanitize_line(untrusted_line)\n try:\n name, status, info = line.split()\n if status == \"updating\":\n progress = int(float(info))\n GLib.idle_add(rows[name].set_update_progress, progress)\n total_progress = sum(\n row.get_update_progress()\n for row in rows.values()) / len(rows)\n GLib.idle_add(self.set_total_progress, total_progress)\n\n except ValueError:\n return\n\n try:\n if status == \"done\":\n update_status = UpdateStatus.from_name(info)\n GLib.idle_add(rows[name].set_status, update_status)\n except KeyError:\n return\n\n def read_stdouts(self, proc, rows):\n curr_name_out = \"\"\n for untrusted_line in iter(proc.stdout.readline, ''):\n if untrusted_line:\n line = self._sanitize_line(untrusted_line)\n maybe_name, text = line.split(' ', 1)\n suffix = len(\":out:\")\n if maybe_name[:-suffix] in rows.keys():\n curr_name_out = maybe_name[:-suffix]\n if curr_name_out:\n rows[curr_name_out].append_text_view(text)\n else:\n break\n self.update_details.update_buffer()\n proc.stdout.close()\n\n @staticmethod\n def _sanitize_line(untrusted_line: bytes) -> str:\n ansi_escape = re.compile(r'(\\x9B|\\x1B\\[)[0-?]*[ -/]*[@-~]')\n line = ansi_escape.sub('', untrusted_line.decode())\n return line\n\n def set_total_progress(self, progress):\n \"\"\"Set the value of main big progressbar.\"\"\"\n self.total_progress[0] = progress\n\n def back_by_row_selection(self, _emitter, path, *args):\n \"\"\"Show this page and select row selected on summary page.\"\"\"\n self.show()\n self.row_selected(_emitter, path, *args)\n\n def show(self):\n \"\"\"Show this page and handle buttons.\"\"\"\n self.log.debug(\"Show progress page\")\n self.selection.unselect_all()\n self.update_details.set_active_row(None)\n self.stack.set_visible_child(self.page)\n\n self.next_button.set_label(l(\"_Next\"))\n self.cancel_button.hide()\n\n def row_selected(self, _emitter, path, _col):\n \"\"\"Handle clicking on a row to show more info.\n\n Set updated details (name of vm and textview).\"\"\"\n self.selection.unselect_all()\n self.selection.select_path(path)\n self.update_details.set_active_row(\n self.vms_to_update[path.get_indices()[0]])\n\n def get_update_summary(self):\n \"\"\"Returns update summary.\n\n It is a triple of:\n 1. number of updated vms,\n 2. number of vms that tried to update but no update was found,\n 3. vms that update was canceled before starting.\n \"\"\"\n vm_updated_num = len(\n [row for row in self.vms_to_update\n if row.status == UpdateStatus.Success])\n vm_no_updates_num = len(\n [row for row in self.vms_to_update\n if row.status == UpdateStatus.NoUpdatesFound])\n vm_failed_num = len(\n [row for row in self.vms_to_update\n if row.status in (UpdateStatus.Error, UpdateStatus.Cancelled)])\n return vm_updated_num, vm_no_updates_num, vm_failed_num\n\n\nclass Ticker:\n \"\"\"Helper for dom0 progressbar.\"\"\"\n def __init__(self, *args):\n self.ticker_done = False\n self.args = args\n\n def __enter__(self):\n thread = threading.Thread(target=self.tick, args=self.args)\n thread.start()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.ticker_done = True\n\n def tick(self, row):\n while not self.ticker_done:\n new_value = (row.get_update_progress()) % 96 + 1\n GLib.idle_add(row.set_update_progress, new_value)\n time.sleep(1 / 12)\n\n\nclass QubeUpdateDetails:\n\n def __init__(self, builder):\n self.active_row = None\n self.builder = builder\n\n self.qube_details: Gtk.Box = self.builder.get_object(\"qube_details\")\n self.details_label: Gtk.Label = self.builder.get_object(\"details_label\")\n self.qube_icon: Gtk.Image = self.builder.get_object(\"qube_icon\")\n self.qube_label: Gtk.Label = self.builder.get_object(\"qube_label\")\n self.colon: Gtk.Label = self.builder.get_object(\"colon\")\n\n self.copy_button: Gtk.Button = self.builder.get_object(\"copy_button\")\n self.copy_button.connect(\"clicked\", self.copy_content)\n\n self.progress_textview: Gtk.TextView = self.builder.get_object(\n \"progress_textview\")\n self.progress_scrolled_window: Gtk.ScrolledWindow = \\\n self.builder.get_object(\"progress_scrolled_window\")\n\n def copy_content(self, _emitter):\n if self.active_row is None:\n return\n\n text = self.active_row.buffer\n if not text:\n return\n\n copy_to_global_clipboard(text)\n\n def set_active_row(self, row):\n self.active_row = row\n row_activated = self.active_row is not None\n if not row_activated:\n self.details_label.set_text(l(\"Select a qube to see details.\"))\n else:\n self.details_label.set_text(l(\"Details for\") + \" \")\n self.qube_icon.set_from_pixbuf(self.active_row.icon)\n self.qube_label.set_markup(\" \" + str(self.active_row.color_name))\n self.update_buffer()\n\n self.qube_icon.set_visible(row_activated)\n self.qube_label.set_visible(row_activated)\n self.colon.set_visible(row_activated)\n self.progress_scrolled_window.set_visible(row_activated)\n self.progress_textview.set_visible(row_activated)\n self.copy_button.set_visible(row_activated)\n\n def update_buffer(self):\n if self.active_row is not None:\n buffer_ = self.progress_textview.get_buffer()\n GLib.idle_add(buffer_.set_text, self.active_row.buffer)\n\n\nclass CellRendererProgressWithResult(\n Gtk.CellRendererProgress\n):\n \"\"\"\n Custom Cell Renderer to show progressbar or finish icon.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._status = None\n\n @GObject.Property\n def status(self):\n return self._status\n\n @status.setter\n def status(self, value):\n self._status = value\n\n # pylint: disable=arguments-differ\n def do_render(self, context, widget, background_area, cell_area, flags):\n status: UpdateStatus = self.get_property('status')\n if status == UpdateStatus.Success:\n self.draw_icon('qubes-check-yes', context, cell_area)\n elif status == UpdateStatus.NoUpdatesFound:\n self.draw_icon('qubes-check-maybe', context, cell_area)\n elif status in (UpdateStatus.Error, UpdateStatus.Cancelled):\n self.draw_icon('qubes-delete-x', context, cell_area)\n elif status == UpdateStatus.ProgressUnknown:\n Gtk.CellRendererProgress.do_render(\n self, context, widget, background_area, cell_area, flags)\n else:\n self.set_property(\"pulse\", -1)\n Gtk.CellRendererProgress.do_render(\n self, context, widget, background_area, cell_area, flags)\n\n def draw_icon(self, icon_name: str, context, cell_area):\n # pylint: disable=no-member\n pixbuf = load_icon_at_gtk_size(icon_name, Gtk.IconSize.SMALL_TOOLBAR)\n Gdk.cairo_set_source_pixbuf(\n context,\n pixbuf,\n cell_area.x + self.props.xpad,\n cell_area.y + self.props.ypad\n )\n context.paint()\n","sub_path":"qui/updater/progress_page.py","file_name":"progress_page.py","file_ext":"py","file_size_in_byte":17551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"160005364","text":"# coding=utf8\n\n\nimport unittest\n\nfrom django_echarts.plugins.hosts import LibHostStore, MapHostStore\n\n\nclass HostStoreTestCase(unittest.TestCase):\n def test_lib_host(self):\n # Basic tests\n m_context = {\n 'STATIC_URL': '/static/',\n 'echarts_version': '3.7.0'\n }\n hs = LibHostStore(context=m_context, default_host='bootcdn')\n self.assertEqual(\n 'https://cdn.bootcss.com/echarts/3.7.0/echarts.min.js',\n hs.generate_js_link('echarts.min')\n )\n\n self.assertEqual(\n 'https://cdnjs.cloudflare.com/ajax/libs/echarts/3.7.0/echarts.min.js',\n hs.generate_js_link('echarts.min', js_host='cdnjs')\n )\n self.assertEqual(\n 'https://cdn.bootcss.com/echarts/3.7.0/echarts.min.js',\n hs.generate_js_link(\n 'echarts.min',\n js_host='https://cdn.bootcss.com/echarts/{echarts_version}'\n )\n )\n\n def test_map_host(self):\n m_context = {\n 'STATIC_URL': '/static/',\n 'echarts_version': '3.7.0'\n }\n hs = MapHostStore(context=m_context, default_host='echarts')\n self.assertEqual(\n 'https://echarts-maps.github.io/echarts-china-provinces-js/china.js',\n hs.generate_js_link('china', js_host='china-provinces')\n )\n # Add\n hs.add_host('https://amap.com/js', 'amap')\n self.assertEqual(\n 'https://amap.com/js/fujian.js',\n hs.generate_js_link('fujian', 'amap')\n )\n self.assertEqual(\n 'http://echarts.baidu.com/asset/map/js/china.js',\n hs.generate_js_link('china')\n )\n\n\nclass CustomHostTestCase(unittest.TestCase):\n def test_add_host(self):\n m_context = {\n 'echarts_version': '3.8.5'\n }\n\n mhs = MapHostStore(context=m_context, default_host='pyecharts')\n mhs.add_host('/demo/', 'demo')\n mhs.add_host('/demo2/{echarts_version}', 'demo2')\n self.assertEqual(\n '/demo/fujian.js',\n mhs.generate_js_link('fujian', js_host='demo')\n )\n self.assertEqual(\n '/demo2/3.8.5/fujian.js',\n mhs.generate_js_link('fujian', js_host='demo2')\n )\n","sub_path":"tests/test_hosts.py","file_name":"test_hosts.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"335517431","text":"from PIL import Image\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n\ntf.get_logger().setLevel('ERROR') # sneaky, sneaky\n\nTARGET_SIZE = (150,150)\nBATCH_SIZE = 16\nDATA_DIR = 'data' \n\ndef get_generator():\n # create duplicate images\n BATCHES_PER_EPOCH = 300//BATCH_SIZE\n classes = ['pavlos', 'not-pavlos']\n for img_class in classes:\n img = Image.open((f'{DATA_DIR}/{img_class}.jpeg'))\n for i in range(1, BATCH_SIZE*BATCHES_PER_EPOCH//2+1):\n img.thumbnail(TARGET_SIZE, Image.ANTIALIAS)\n img.save(f'{DATA_DIR}/{img_class}/{img_class}{i:0>3}.jpeg', \"JPEG\")\n \n data_gen = ImageDataGenerator(\n rescale=1./255,\n height_shift_range=0.5,\n width_shift_range=0.5)\n\n img_generator = data_gen.flow_from_directory(\n DATA_DIR, \n target_size=(TARGET_SIZE),\n batch_size=BATCH_SIZE,\n classes=classes,\n class_mode='binary') \n return img_generator\n","sub_path":"content/labs/lab05/notebook/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"367963065","text":"import random\n\nmessages = ['Yes', 'No', 'Perhaps', 'Your Mom']\n\nwhile True:\n question = input('Ask the 8 Ball your question: ')\n if question.upper() == 'MIKE':\n print('Some questions only God knows the answer to...')\n continue\n if question == '':\n break\n else:\n print(messages[random.randint(0, len(messages) -1)])\n \n","sub_path":"simple_tests/eightBallList.py","file_name":"eightBallList.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"117282709","text":"segmentsFolder = \"C:/SVN/Sites/trunk/UK/ASR/Data/Segments\"\r\noutputFile = \"all_xa_20160812.txt\"\r\nfindoutF = \"t\"\r\nfindoutAG = \"xa\"\r\n\r\nimport os, time\r\n\r\nstart_t = time.time()\r\n\r\nsegmentListAll = os.listdir(segmentsFolder)\r\n##segmentListAll = [\r\n##'ECM9~Waverley_East_End~Edinburgh_Waverley_Station.seg',\r\n##'EGM4~Edinburgh_Waverley_Station~Waverley_West_End.seg',\r\n##'EGM3~Waverley_West_End~Haymarket.seg',\r\n##'EGM2~Haymarket~Haymarket_East_Jn.seg',\r\n##'EGM2~Haymarket_East_Jn~Haymarket_Central_Jn.seg',\r\n##'EGM2~Haymarket_Central_Jn~Haymarket_West_Jn.seg',\r\n##]\r\n\r\nprint(\"total segments: \", len(segmentListAll))\r\n\r\nag_list_seg, ag_list_km, ag_list_name = [],[],[]\r\nF_list_seg, F_list_km, F_list_name = [],[],[]\r\n\r\nfor seg in segmentListAll:\r\n\r\n current_segment = segmentsFolder + \"/\" + seg\r\n print (current_segment)\r\n fin = open(current_segment)\r\n\r\n for line in fin:\r\n if line[:1].isdigit() or line[:1] == \"#\":\r\n route_t = line.split('\\t')\r\n\r\n if route_t[1] == findoutAG:\r\n ag_list_seg.append(seg)\r\n ag_list_km.append(route_t[0])\r\n ag_list_name.append(route_t[2] +','+ route_t[3] +','+ route_t[4].replace('\"',''))\r\n \r\n elif route_t[1] == \"f\" and route_t[3] == findoutF:\r\n F_list_seg.append(seg)\r\n F_list_km.append(route_t[0])\r\n if len(route_t) == 4:\r\n F_list_name.append(\"_blank_\")\r\n elif len(route_t) == 5:\r\n F_list_name.append(route_t[4].replace('\\n',''))\r\n else:\r\n F_list_name.append(''.join(route_t[4]))\r\n\r\n fin.close()\r\n\r\nfout = open(outputFile, \"w\")\r\n\r\nprint(len(F_list_km), len(F_list_km))\r\nprint(len(ag_list_km), len(ag_list_km))\r\n\r\nfor i in range(len(ag_list_km)):\r\n fout.write(ag_list_seg[i]+','+ag_list_km[i]+','+ag_list_name[i]+'\\n')\r\n\r\n#for j in range(len(F_list_km)):\r\n# fout.write(F_list_seg[j]+','+F_list_km[j]+','+ findoutF +','+F_list_name[j]+'\\n')\r\n\r\nfout.close()\r\n\r\nend_t = time.time()\r\nprint('Completed computing in: ', round(end_t-start_t, 2),'s')\r\n","sub_path":"segment_list_xa_location_v0.1.py","file_name":"segment_list_xa_location_v0.1.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"215079876","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport cv2\nimport base64\nimport torch\nimport numpy as np\nimport json\nimport waitress\n\nfrom flask import Flask, request, make_response\n\nimport rorlkit.torch.pytorch_util as ptu\nfrom rorlkit.util.io import load_local_or_remote_file\n\n\napp = Flask(__name__, template_folder=\"templates\")\n\nvae_path = '../models/vae/cmm_rgby_xy_vae.pkl'\nvae = load_local_or_remote_file(vae_path)\n\n\ndef get_latent(raw_image):\n \"\"\"Get latent variables (mean vector)\"\"\"\n image = cv2.resize(raw_image, (vae.imsize, vae.imsize))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = normalize_image(image)\n # swap order and reshape\n flat_img = torch.from_numpy(image).permute(2, 1, 0).flatten(start_dim=1).numpy()\n latent_distribution_params = vae.encode(ptu.from_numpy(flat_img.reshape(1, -1)).cuda())\n latents = ptu.get_numpy(latent_distribution_params[0])\n return latents\n\n\ndef reconstruct_img(flat_img):\n latent_distribution_params = vae.encode(ptu.from_numpy(flat_img.reshape(1, -1)).cuda())\n reconstructions, _ = vae.decode(latent_distribution_params[0])\n imgs = ptu.get_numpy(reconstructions)\n imgs = imgs.reshape(\n 1, vae.input_channels, vae.imsize, vae.imsize\n ).transpose(0, 3, 2, 1) # BCWH -> BHWC\n img = cv2.cvtColor(imgs[0], cv2.COLOR_RGB2BGR)\n return img\n\n\ndef normalize_image(image, dtype=np.float64):\n assert image.dtype == np.uint8\n return dtype(image) / 255.0\n\n\ndef decode_b64_to_image(b64_str: str) -> [bool, np.ndarray]:\n \"\"\"解码base64字符串为OpenCV图像, 适用于解码三通道彩色图像编码.\n :param b64_str: base64字符串\n :return: ok, cv2_image\n \"\"\"\n if \",\" in b64_str:\n b64_str = b64_str.partition(\",\")[-1]\n else:\n b64_str = b64_str\n\n try:\n img = base64.b64decode(b64_str)\n return True, cv2.imdecode(np.frombuffer(img, dtype=np.int8), 1)\n except cv2.error:\n return False, None\n\n\n@app.route('/get_image_latent', methods=['POST'])\ndef get_image_latent():\n try:\n req_data = json.loads(request.data)\n if 'body' not in req_data:\n return make_response(\"加载JSON失败\", 200)\n else:\n req_body = req_data['body']\n except ValueError:\n return make_response(\"加载JSON失败\", 200)\n\n header = {}\n response = {'results': []}\n\n src_img_str = req_body['image']\n ok, src_image = decode_b64_to_image(src_img_str)\n means = get_latent(src_image)\n torch.cuda.empty_cache()\n\n response['means'] = json.dumps(means.tolist())\n feedback = {'header': header, 'response': response}\n return make_response(json.dumps(feedback), 200)\n\n\nif __name__ == \"__main__\":\n waitress.serve(app, host='0.0.0.0', port=6060, threads=6)\n","sub_path":"scripts/vae_port.py","file_name":"vae_port.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"104593840","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom django.db import models\nfrom django.contrib.auth.models import Group\n\nfrom core.models import Pessoa\n\nfrom datetime import date\n\n\nDESTINO = (\n ('todos','TODOS'),\n ('pessoa','PESSOA'),\n ('grupo','GRUPO'),\n )\n\nclass Mensagem(models.Model): \n '''\n @Mensagem: Modelo de dados das Mensagens do sistema para os Funcionarios/Alunos\n ''' \n texto \t\t= models.CharField(max_length=250, blank=True, verbose_name='Mensagem') \n destino = models.CharField(max_length=6,blank=True,null=True,choices=DESTINO,default='todos')\n data_criacao \t= models.DateField(default=date.today(),verbose_name='Data de Criação')\n pessoa_destino = models.ForeignKey(Pessoa,blank=True,null=True,related_name='pessoa_mensagens')\n grupo \t\t\t\t= models.ForeignKey(Group,blank=True,null=True,related_name='grupo_mensagens')\n ","sub_path":"academia_app/mensagem/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"608969501","text":"from Server.db.server_db import Client_db, Message_db, ClientHistory_db, Contacts_db\nfrom Server.db.server_db import Base\nfrom Server.db.ServerStorage import Storage\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\n\nclass TestStorage():\n def setup(self):\n engine = create_engine('sqlite:///:memory:', echo=False)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n self.session = session\n\n client1 = Client_db('Tor', '123')\n client1_h = ClientHistory_db(1, 'current_time', None)\n session.add(client1)\n session.add(client1_h)\n client2 = Client_db('Ironman', '1234')\n session.add(client2)\n client3 = Client_db('Halk', '12345')\n session.add(client3)\n client4 = Client_db('Groot', '123456')\n session.add(client4)\n\n self.storage = Storage(session)\n\n def test_sign_up(self):\n assert self.storage.sign_up_client('Black widow', '1234567') == True\n assert self.storage.sign_up_client('Tor', '123') == False\n\n def test_sign_in(self):\n assert self.storage.sign_in_client('Ironman', '1234') == True\n assert self.storage.sign_in_client('Ironman', '555') == False\n\n def test_client_by_login(self):\n client = self.storage.get_client_by_login('Tor')\n assert client.id == 1\n\n def test_client_by_id(self):\n client = self.storage.get_client_by_id(4)\n assert client == 'Groot'\n\n def test_get_all_clients(self):\n clients = self.storage.get_all_client()\n assert clients == ['Tor', 'Ironman', 'Halk', 'Groot', 'Общий чат']\n\n def test_add_client_history(self):\n self.storage.add_client_history('Ironman', 'current_time2')\n assert self.storage.get_client_history('Ironman') == {\n 'Client': 'Ironman',\n 'Enter time': 'current_time2',\n 'IP adress': None\n }\n\n def test_get_client_history(self):\n assert self.storage.get_client_history('Tor') == {\n 'Client': 'Tor',\n 'Enter time': 'current_time',\n 'IP adress': None\n }\n\n def test_get_contacts(self):\n self.storage.sign_up_client('Loki', 'False')\n self.storage.add_contacts('Loki', 'Groot')\n assert self.storage.get_contacts('Loki') == ['Общий чат', 'Groot']\n\n def test_get_messages(self):\n self.storage.add_message_history('current_time', 'Groot', 'Halk', 'I am Goot')\n message = self.storage.get_message_history()\n assert message == [['current_time', 'Groot', 'Halk', 'I am Goot']]\n\n def teardown(self):\n self.session.rollback()\n","sub_path":"tests/test_server_storage.py","file_name":"test_server_storage.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"472279313","text":"import time\nimport pandas as pd\nimport numpy as np\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\nmonths = ['january', 'february', 'march', 'april', 'may', 'june']\ndays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n '''This while loop asks for user input regarding city location.'''\n \n while True:\n city = input(\"What city would you like data for? (Chicago, New York City, Washington)\\n--> \").lower()\n \n if city != 'all':\n \n if city == 'chicago':\n pd.read_csv('chicago.csv')\n\n elif city == \"new york city\":\n pd.read_csv('new_york_city.csv')\n\n elif city == 'washington':\n pd.read_csv('washington.csv')\n\n else:\n print('Please choose one of the following cities: Chicago, New York City, or Washington.')\n continue\n else:\n pd.read_csv('chicago.csv', 'new_york_city.csv', 'washington.csv')\n break\n # TO DO: get user input for month (all, january, february, ... , june)\n '''This while loop asks for user input regarding a particular month of data.'''\n \n while True:\n \n month_u = input(\"Type a month between January and June for a specific time frame of data. For all months type 'all'\\n--> \").lower()\n while len(month_u.strip()) == 0: \n month_u = input(\"Please try again.\\n-->\")\n \n if month_u not in months and month_u != 'all':\n month_u = input(\"Please input a month spelled correctly\\n-->\")\n \n break\n \n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n \n while True:\n day_u = input(\"Type a day of the week. If you want all days, type 'all'.\\n-->\").lower()\n \n while day_u not in days and day_u != 'all': \n day_u = input(\"Please input a day spelled correctly.\\n-->\")\n break \n \n break\n \n print('-'*40)\n return city, month_u, day_u\n\ndef load_data(city, month_u, day_u):\n \n \"\"\"\n Loads data for the user-specified filters above.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time']= pd.to_datetime(df['Start Time'])\n\n df['month_u'] = df['Start Time'].dt.month\n df['day_u'] = df['Start Time'].dt.weekday_name\n \n \n if month_u != 'all':\n month_u = months.index(month_u) + 1\n df = df[df['month_u'] == month_u]\n \n elif month_u == 'all':\n month_u == months \n \n if day_u != 'all':\n\n df = df[df['day_u'] == day_u.title()]\n\n elif day_u == 'all':\n day_u = days\n \n return df\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel by using the 'Start Time' column of selected city.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n \n \n \n # TO DO: display the most common month\n df['month']= df['Start Time'].dt.month\n month_comm = df['month'].mode()[0]\n print(\"The most usage occurs during the month of {}.\\n\".format(month_comm))\n\n # TO DO: display the most common day of week\n df['day']= df['Start Time'].dt.weekday_name\n day_comm = df['day'].mode()[0]\n \n print(\"The most common day of travel is {}.\\n\".format(day_comm))\n \n \n # TO DO: display the most common start hour\n df['hour']= df['Start Time'].dt.hour\n hour_comm = df['hour'].mode()[0]\n \n print(\"The most common hour of travel is at {} (military time).\\n\".format(hour_comm))\n \n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n \n return month_comm,day_comm,hour_comm\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n # TO DO: display most commonly used end station\n sc = df['Start Station'].mode()[0]\n ec = df['End Station'].mode()[0]\n \n if sc == ec:\n print(\"Station {} is both the most common starting and ending station!\\n\".format(sc))\n else: \n print(\"The most commonly used starting and ending stations are \\n{} and {}.\\n\".format(sc, ec))\n \n # TO DO: display most frequent combination of start station and end station trip\n se_combo = (df['Start Station'] + \" \" + df['End Station']).mode()[0]\n\n print(\"The most common trip between stations is \\n{}.\\n\".format(se_combo))\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n # TO DO: display mean travel time\n total = df['Trip Duration'].sum()\n avg = df['Trip Duration'].mean() \n \n print(\"The total travel time is {}.\\nThe average travel time is {}.\\n\".format(total,avg))\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n if 'User Type' in df.columns:\n user_types = df['User Type'].value_counts()\n print(\"Here are the counts of user types: \\n{}\\n\".format(user_types))\n\n # TO DO: Display counts of gender\n try: \n gender = df['Gender'].value_counts()\n print(\"Here are the counts for gender types: \\n{}\\n\".format(gender))\n except:\n print(\"Sorry. No data available for Gender statistics! Try another city.\")\n # TO DO: Display earliest, most recent, and most common year of birth\n try:\n earliest = int(min(df['Birth Year']))\n most_recent = int(max(df['Birth Year'])) \n most_common = int(df['Birth Year'].mode())\n \n print(\"The earliest year of birth is {}, while the most recent year of birth is {}.\\n\".format(earliest,most_recent)) \n print(\"The most common year of birth shared between users is {}.\\n\".format(most_common))\n \n except:\n print(\"Sorry. Invalid/No data for Birth Year statistics! Try another city.\")\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef raw(df):\n '''This function displays raw data five lines at a time.'''\n lower = 0\n upper= 5\n \n while True:\n raw_u = input(\"Would you like to view 5 lines of raw data for your city? Enter 'yes' if so.\\n-->\").lower()\n if raw_u != 'yes' or upper >= len(df):\n break\n \n else:\n print(df.iloc[lower:upper])\n lower += 5\n upper += 5 \n \ndef main():\n \n while True:\n city, month_u, day_u = get_filters()\n df = load_data(city, month_u, day_u)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n raw(df)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n-->').lower()\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n#I would refactor if I was more advanced here. And another comment...\n#These comments are for the git project. I found it not too bad at all!","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":8606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"130017886","text":"import os\nimport logging\n\nfrom rest_framework.exceptions import APIException\n\nIS_TEST_ENV = os.getenv('ENV') == 'test'\nlogger = logging.getLogger(__name__)\n\n\nclass ValidationException(APIException):\n status_code = 400\n default_detail = 'There is an error in your request'\n default_code = 'client_error'\n slug = None\n\n def __init__(self, details, code=400, slug=None):\n self.status_code = code\n self.default_detail = details\n self.slug = slug\n\n if IS_TEST_ENV and slug:\n logger.error(f'Status {str(self.status_code)} - {slug}')\n super().__init__(slug)\n else:\n logger.error(f'Status {str(self.status_code)} - {details}')\n super().__init__(details)\n","sub_path":"breathecode/utils/validation_exception.py","file_name":"validation_exception.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"590385739","text":"from Log import Log\nfrom Interruption import Interruption\nfrom PcbState import Finish\n\n\nclass FinishPcbInIO(Interruption):\n \n def manage(self, interruption_manager):\n self.pcb.change_state(Finish())\n interruption_manager.get_long_term_scheduler().add_finished_processes(self.pcb)\n interruption_manager.get_mmu().free(self.pcb)\n Log().print_memory_state(interruption_manager.get_mmu().memory)\n Log().print_interruption(\"IO Finish Process \" + str(self.pcb.pid) + \" state: \" + self.pcb.state.state)","sub_path":"kernel/src/FinishPcbInIO.py","file_name":"FinishPcbInIO.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"312096300","text":"\n\n\ndef gsearch(string,num):\n from googleapiclient.discovery import build\n\n apikey = \"AIzaSyAdg6K5gxSaLaXLVT3nbqDNqLnp0587FaI\"\n cseid = \"017578349630011676667:i5oi_ecmspk\"\n\n def google_search(search_term, api_key, cse_id, **kwargs): #Function from https://github.com/frrmack/googlesearch\n service = build(\"customsearch\", \"v1\", developerKey=api_key)\n res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute() # seperate key value pairs using the ** syntax\n return res['items']\n\n results = google_search(\n string, apikey, cseid, num=10)\n \n # Added code\n resultlist = []\n # iterate through the results num amount and then \n print(len(results))\n for x in range(0,num):\n if x == 10:\n break\n resultlist.append(results[x]['link'])\n \n \n return (resultlist)\n","sub_path":"GoogleSearch.py","file_name":"GoogleSearch.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"595649668","text":"def click_me():\n print(\"Button was clicked\") \n\nfrom tkinter import *\nwindow = Tk()\nwindow.geometry('500x400')\nwindow.title(\"First program\")\nl=Label(window,text=\"Hello\").pack()\nbutton = Button(window, text='Click me', bd=10,width=25,bg = \"yellow\",fg = \"black\",command=click_me)\nbutton.pack(padx=100, pady=50)\nwindow.mainloop()\n","sub_path":"Lab12.1.py","file_name":"Lab12.1.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"120647007","text":"from nonebot import on_command, CommandSession\nfrom data.json.json_editor import JSONEditor\nimport config\nfrom pcr.plugins.get_best_name import get_best_name\nfrom data.player_name import qq_to_game_name\n\n\n@on_command('no_report', aliases=['停止播报', '停止汇报', '停止报告'], only_to_me=False)\nasync def no_report(session: CommandSession):\n name: str = get_best_name(session)\n game_name = qq_to_game_name(name)\n JSONEditor(session.event.group_id).add_to_no_report_list(game_name)\n await session.send(message='将不会再播报' + name + '的出击了喵~')\n\n\n@on_command('do_report', aliases=['开始播报', '重新汇报', '开始报告', '开始汇报'], only_to_me=False)\nasync def do_report(session: CommandSession):\n group_id = session.event.group_id\n name: str = get_best_name(session)\n game_name = qq_to_game_name(name, group_id=group_id)\n JSONEditor(group_id).remove_from_no_report_list(game_name)\n await session.send(message='将会播报' + name + '的出击了喵~')\n","sub_path":"pcr/plugins/no_report.py","file_name":"no_report.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"315223877","text":"#!/usr/bin/env python\r\n\r\nimport unicornhat as unicorn\r\nfrom random import randint\r\nfrom time import sleep\r\n\r\nprint('''Scroller\r\n\r\nDisplays each pixel in turn.\r\nCurrent version picks a random color,\r\nend state should cycle colors after\r\neach complete board cycle\r\n''')\r\n\r\nunicorn.set_layout(unicorn.AUTO)\r\nunicorn.rotation(0)\r\nunicorn.brightness(0.6)\r\nwidth,height=unicorn.get_shape()\r\n\r\n\r\nwhile True:\r\n for y in range(height):\r\n for x in range(width):\r\n unicorn.clear()\r\n unicorn.show()\r\n r=randint(0, 255)\r\n g=randint(0, 255)\r\n b=randint(0, 255)\r\n unicorn.set_pixel(x,y,r,g,b)\r\n unicorn.show()\r\n sleep(0.1)","sub_path":"onepixel.py","file_name":"onepixel.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"177616530","text":"from mpl_toolkits.mplot3d import axes3d\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n# table = pd.read_csv(\"recomb_relax.info\", sep=\"\\s\")\n# print(table)\n# fig = plt.figure()\n# ax = fig.add_subplot(111, projection='3d')\n#\n# # Grab some test data.\n# X = np.arange(-5.5, 6.5, 1)\n# Y = np.arange(-5.5, 6.5, 1)\n#\n# R = np.sqrt(X**2 + Y**2)\n# # for i in X:\n# # for j in Y:\n# # for k_x in table[x]:\n# # for k_y in table[y]:\n# # if table[x][k_x] ==\n# # if table\n#\n# for k_x in range(len(table[x])):\n# print(table[x][k_x])\nfileDir = \"recomb_relax.info\"\n\ndata = pd.read_csv(fileDir, sep=\"\\s\", dtype=np.float64)\n\ndataTop = data.drop_duplicates(subset=['x', 'y'], keep='first', inplace=False)\nXTop = dataTop['x']\nYTop = dataTop['y']\nZTop = dataTop['Energy']\n\ndataMid = data.drop_duplicates(subset=['x', 'y'], keep=False, inplace=False)\nXMid = dataMid['x']\nYMid = dataMid['y']\nZMid = dataMid['Energy']\n\ndataBottom = data.drop_duplicates(subset=['x', 'y'], keep='last', inplace=False)\nXBottom = dataBottom['x']\nYBottom = dataBottom['y']\nZBottom = dataBottom['Energy']\n\nfig = plt.figure(figsize=(11.5, 8.5))\nax = fig.add_subplot(111, projection='3d')\n\nax.plot_trisurf(XTop, YTop, ZTop, cmap='viridis', alpha=0.5)\nax.plot_trisurf(XMid, YMid, ZMid, cmap='viridis', alpha=0.5)\nax.plot_trisurf(XBottom, YBottom, ZBottom, cmap='viridis', alpha=0.5)\n\nplt.show()\n\n\n# print(X)\n# print(Y)\n\n\n\n# Plot a basic wireframe.\n# ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10)\n#\n# plt.show()\n","sub_path":"Recomb/Relax/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"458306911","text":"import numpy as np\nfrom nets import ssd_vgg_300, ssd_common, np_methods\nfrom preprocessing import ssd_vgg_preprocessing\n\nNET_SHAPE = (300, 300)\nSELECT_TRESHOLD=0.5\nNMS_TRESHOLD=0.45\n\nssd_net = ssd_vgg_300.SSDNet()\nssd_anchors = ssd_net.anchors(NET_SHAPE)\n\ndef execute(data: list, **kwargs):\n for row in data:\n rpredictions = [\n np.array(row['ssd_300_vgg/softmax/Reshape_1:0']),\n np.array(row['ssd_300_vgg/softmax_1/Reshape_1:0']),\n np.array(row['ssd_300_vgg/softmax_2/Reshape_1:0']),\n np.array(row['ssd_300_vgg/softmax_3/Reshape_1:0']),\n np.array(row['ssd_300_vgg/softmax_4/Reshape_1:0']),\n np.array(row['ssd_300_vgg/softmax_5/Reshape_1:0'])\n ]\n rlocalisations = [\n np.array(row['ssd_300_vgg/block4_box/Reshape:0']),\n np.array(row['ssd_300_vgg/block7_box/Reshape:0']),\n np.array(row['ssd_300_vgg/block8_box/Reshape:0']),\n np.array(row['ssd_300_vgg/block9_box/Reshape:0']),\n np.array(row['ssd_300_vgg/block10_box/Reshape:0']),\n np.array(row['ssd_300_vgg/block11_box/Reshape:0'])\n ]\n rbbox_img = row['bbox_img']\n\n # Get classes and bboxes from the net outputs.\n rclasses, rscores, rbboxes = np_methods.ssd_bboxes_select(\n rpredictions, rlocalisations, ssd_anchors,\n select_threshold=SELECT_TRESHOLD, img_shape=NET_SHAPE, num_classes=21, decode=True)\n\n rbboxes = np_methods.bboxes_clip(rbbox_img, rbboxes)\n rclasses, rscores, rbboxes = np_methods.bboxes_sort(rclasses, rscores, rbboxes, top_k=400)\n rclasses, rscores, rbboxes = np_methods.bboxes_nms(rclasses, rscores, rbboxes, nms_threshold=NMS_TRESHOLD)\n # Resize bboxes to original image shape. Note: useless for Resize.WARP!\n rbboxes = np_methods.bboxes_resize(rbbox_img, rbboxes)\n\n row[\"classes\"] = rclasses.tolist()\n row[\"scores\"] = rscores.tolist()\n row[\"boxes\"] = rbboxes.tolist()\n\n return data\n","sub_path":"models/ssd_postprocessing/src/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"322918849","text":"# 다익스트라\nimport heapq\nimport sys\n\ninput = sys.stdin.readline\nINF = 99999999999\n\nN, M = map(int, input().split())\nstart = int(input())\n\ngraph = [[] for i in range(N)]\ndist = [INF] * N\nfor _ in range(M):\n s, e, w = map(int, input().split())\n graph[s].append((w, e))\n\ndef dijkstra(start):\n q = []\n\n heapq.heappush(q, (0, start))\n dist[start] = 0\n\n while q:\n w, e = heapq.heappop(q)\n\n if dist[e] < w: # 처리할 필요가 없음\n continue\n \n for nextnode in graph[e]:\n nw, ne = nextnode\n cost = w + nw\n\n if cost < dist[ne]:\n dist[ne] = cost\n heapq.heappush(q, (cost, ne))\n\ndijkstra(start)\n\nfor i in range(N):\n print(dist[i] if dist[i] != INF else 'INF')\n","sub_path":"9-2.py","file_name":"9-2.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"527138258","text":"\"\"\"\nCopyright (c) 2015 Wellcome Trust Sanger Institute\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport math\nimport networkx\n\nclass CalculateSimilarity(object):\n \"\"\" calculate graph similarity scores\n \"\"\"\n \n def __init__(self, hpo_by_individual, hpo_graph):\n \"\"\"\n \n Args:\n hpo_by_individual: dictionary of hpo terms for each individual\n graph: graph of hpo ontology, as networkx object\n \"\"\"\n \n self.graph = hpo_graph\n \n self.descendant_cache = {}\n self.ancestor_cache = {}\n \n self.hpo_counts = {}\n self.total_freq = 0\n \n self.tally_hpo_terms(hpo_by_individual)\n \n def tally_hpo_terms(self, hpo_terms):\n \"\"\" tallies each HPO term across the DDG2P genes\n \n Args:\n hpo_terms: dictionary of HPO terms for each individual\n \"\"\"\n \n for proband in hpo_terms:\n child_terms = hpo_terms[proband]\n for term in child_terms:\n self.add_hpo(term)\n \n def add_hpo(self, term):\n \"\"\" increments the count for an HPO term\n \n This increments a) the count for the specific term, and b) the total\n count of all terms.\n \n Args:\n term: HPO term (e.g. \"HP:0000001\")\n \"\"\"\n \n if term not in self.hpo_counts:\n # don't use terms which cannot be placed on the graph\n if not self.graph.has_node(term):\n return\n \n self.hpo_counts[term] = 0\n \n self.hpo_counts[term] += 1\n self.total_freq += 1\n \n def get_descendants(self, term):\n \"\"\" finds the set of subterms that descend from a top level HPO term\n \n Args:\n term: hpo term to find descendants of\n \n Returns:\n set of descendant HPO terms\n \"\"\"\n \n if term not in self.descendant_cache:\n self.descendant_cache[term] = networkx.descendants(self.graph, term)\n \n return self.descendant_cache[term]\n \n def get_ancestors(self, bottom_term):\n \"\"\" finds the set of subterms that are ancestors of a HPO term\n \n NOTE: this also includes the search node in the list of ancestors. This\n is so that when we look for matches of common ancestors between two\n nodes, and the two node terms are for the same node, we also include the\n common node in the list. That was awkwardly phrased.\n \n Args:\n bottom_term: hpo term to find ancestors of\n \n Returns:\n set of ancestor HPO terms\n \"\"\"\n \n if bottom_term not in self.ancestor_cache:\n subterms = networkx.ancestors(self.graph, bottom_term)\n subterms.add(bottom_term)\n self.ancestor_cache[bottom_term] = subterms\n \n return self.ancestor_cache[bottom_term]\n \n def find_common_ancestors(self, term_1, term_2):\n \"\"\" finds the common ancestors of two hpo terms\n \n Args:\n term_1: hpo term, eg HP:0000002\n term_2: hpo term, eg HP:0000003\n \n Returns:\n a list of all the common ancestors for the two terms\n \"\"\"\n \n # ignore terms that are obsolete (ie are not in the graph)\n if term_1 not in self.graph or term_2 not in self.graph:\n return set()\n \n return set(self.get_ancestors(term_1)) & set(self.get_ancestors(term_2))\n\n\nclass ICSimilarity(CalculateSimilarity):\n \"\"\" calculate similarity by IC score\n \"\"\"\n \n counts_cache = {}\n ic_cache = {}\n most_informative_cache = {}\n \n def get_most_informative_ic(self, term_1, term_2):\n \"\"\" calculate the information content between two HPO terms using the most informative common ancestor\n \n Args:\n term_1: hpo term, eg HP:0000003\n term_2: hpo term, eg HP:0000002\n \n Returns:\n the maximum information content value from the common ancestors of\n term_1 and term_2.\n \"\"\"\n \n terms = (term_1, term_2)\n \n if terms not in self.most_informative_cache:\n \n ancestors = self.find_common_ancestors(term_1, term_2)\n ic_values = [self.calculate_information_content(x) for x in ancestors]\n \n # cache the most informative IC value, so we only compute this once\n # per pair of HPO terms.\n most_informative = max(ic_values)\n self.most_informative_cache[terms] = most_informative\n self.most_informative_cache[(term_2, term_1)] = most_informative\n \n return self.most_informative_cache[terms]\n \n def calculate_information_content(self, term):\n \"\"\" calculates the information content for an hpo term\n \n For discussion of information content and similarity scores, see:\n Van der Aalst et al., (2007) Data & Knowledge Engineering 61:137-152\n \n Args:\n term: hpo term, eg \"HP:0000001\"\n \n Returns:\n the information content value for a single hpo term\n \"\"\"\n \n if term not in self.ic_cache:\n term_count = self.get_term_count(term)\n \n if term not in self.graph:\n return 0\n \n # cache the IC, so we don't have to recalculate for the term\n self.ic_cache[term] = -math.log(term_count/self.total_freq)\n \n return self.ic_cache[term]\n \n def get_term_count(self, term):\n \"\"\" Count how many times a term (or its subterms) was used.\n \n Args:\n term: hpo term, eg \"HP:0000001\"\n \n Returns:\n the number of times a term (or its subterms) was used.\n \"\"\"\n \n if term not in self.counts_cache:\n if term not in self.graph:\n return 0\n \n descendants = self.get_descendants(term)\n \n count = 0\n if term in self.hpo_counts:\n count += self.hpo_counts[term]\n for subterm in descendants:\n if subterm in self.hpo_counts:\n count += self.hpo_counts[subterm]\n \n # cache the count, so we only have to calculate this once\n self.counts_cache[term] = count\n \n return self.counts_cache[term]\n","sub_path":"hpo_similarity/similarity.py","file_name":"similarity.py","file_ext":"py","file_size_in_byte":7651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"591571975","text":"import itertools as it\r\nfrom typing import List, Any\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef R(theta):\r\n s,c = np.sin(theta), np.cos(theta)\r\n return np.array([[c, -s],[s, c]])\r\n\r\n\r\nclass Vertex:\r\n r : np.ndarray\r\n\r\n def __init__(self, r:np.ndarray):\r\n self.r = np.asarray(r)\r\n self.edges = list()\r\n self.is_apex = False\r\n\r\n self.edges = list()\r\n self.elements = list()\r\n\r\n def transform_by(self, func):\r\n self.r = func(self.r)\r\n return self\r\n\r\n def add_element(self, element):\r\n self.elements.append(element)\r\n\r\n def add_edge(self, edge):\r\n self.edges.append(edge)\r\n\r\n def move_by(self, dr):\r\n self.r += dr\r\n\r\n def move_to(self, r):\r\n self.r = r\r\n\r\n def get_neighbours(self):\r\n neighbours = []\r\n for element in self.elements:\r\n for vertex in element.vertices:\r\n if vertex != self and vertex not in neighbours:\r\n neighbours.add(vertex)\r\n return neighbours\r\n\r\n def __sub__(self, other):\r\n return other.r - self.r\r\n\r\n def __repr__(self):\r\n return self.r.round(3).__repr__().replace('array', 'apex' if self.is_apex else 'vertex')\r\n\r\n def __hash__(self):\r\n return tuple(self.r).__hash__()\r\n\r\nclass Line2D:\r\n seeds: np.ndarray\r\n v1:Vertex\r\n v2:Vertex\r\n\r\n def __init__(self, v1, v2):\r\n if type(v1) is np.ndarray or type(v1) is tuple:\r\n self.v1 = Vertex(v1)\r\n self.v2 = Vertex(v2)\r\n else:\r\n self.v1 = v1\r\n self.v2 = v2\r\n\r\n self.v1.add_edge(self)\r\n self.v2.add_edge(self)\r\n\r\n self.seeds = None\r\n\r\n @property\r\n def r(self):\r\n return np.array((self.r1,self.r2))\r\n\r\n @property\r\n def r1(self):\r\n return self.v1.r\r\n\r\n @property\r\n def r2(self):\r\n return self.v2.r\r\n\r\n @property\r\n def num_seeds(self):\r\n if self.seeds is None:\r\n return 0\r\n else:\r\n return len(self.seeds)\r\n\r\n def tangent(self):\r\n return (self.r2 - self.r1) / np.linalg.norm(self.r2 - self.r1)\r\n\r\n def left_normal(self):\r\n \"\"\"\r\n Normalized\r\n :return:\r\n \"\"\"\r\n\r\n m = np.array([[0, -1], [1, 0]])\r\n n = m @ (self.r2 - self.r1)\r\n n = n / np.linalg.norm(n)\r\n return n\r\n\r\n def length(self):\r\n return np.linalg.norm(self.r2 - self.r1)\r\n\r\n def split(self, num_total: int) -> np.ndarray:\r\n num_total += 2\r\n retval = np.linspace(self.r1, self.r2, num_total)\r\n self.seeds = retval[range(1,num_total-1)]\r\n return retval[range(1,num_total-1)]\r\n\r\n def intersect(self, other, in_domain=False):\r\n \"\"\"\r\n\r\n :param other:\r\n :param in_domain: If True, only vertices in the domain of the lines are considered\r\n :return:\r\n \"\"\"\r\n t1 = self.r2 - self.r1\r\n t2 = other.r2 - other.r1\r\n\r\n M = np.array((t1, t2)).T\r\n if np.linalg.matrix_rank(M) < 2: # Singular matrix: parallell lines: return None\r\n return None\r\n\r\n ab = np.linalg.solve(M, self.r1 - other.r1)\r\n\r\n if np.abs(ab[0]) == np.abs(ab[1]) and 0 <= np.abs(ab[0]) <= 1 or not in_domain:\r\n return self.r1 - ab[0] * t1\r\n else:\r\n return None\r\n\r\n def left_or_right(self, r):\r\n \"\"\"\r\n :return: 1 (left), -1 (right), 0 (on line)\r\n \"\"\"\r\n det = np.linalg.det(self.r - r)\r\n if det == 0:\r\n return 0\r\n if det > 0:\r\n return 1\r\n if det < 0:\r\n return -1\r\n\r\n def contains_point(self, r) -> bool:\r\n kkT = (r - self.r1) / (self.r2 - self.r1)\r\n if np.allclose(kkT[0], kkT) and 0 <= kkT[0] <= 1:\r\n return True\r\n return False\r\n\r\n def absolute_to_normalized(self, r) -> float:\r\n dr = self.r2 - self.r1\r\n kk = np.divide(r[r != 0] , dr[dr != 0])\r\n if np.allclose(kk[0], kk[~np.isnan(kk)]):\r\n return kk[0]\r\n\r\n def normalized_to_absolute(self, flt) -> np.ndarray:\r\n return self.r1 + flt * (self.r2 - self.r1)\r\n\r\n def element_size(self):\r\n return self.length() / self.num_seeds\r\n\r\n def plot(self, fig, ax):\r\n ax.plot(*self.r.T, color='C2')\r\n if self.seeds is not None:\r\n for seed in self.seeds:\r\n ax.plot(*seed, 'ko')\r\n\r\n def __eq__(self, other):\r\n return np.allclose(self.r, other.r)\r\n\r\n def __hash__(self):\r\n return (tuple(self.r1) + tuple(self.r2)).__hash__()\r\n\r\n def __repr__(self):\r\n return \"Line between {} and {}\".format(self.r1, self.r2)\r\n\r\n def __add__(self, other):\r\n return Line2D(self.r1 + other, self.r2 + other)\r\n\r\nclass LineChain:\r\n edges: List[Line2D]\r\n\r\n def __init__(self, list_of_vertices: List[Vertex]):\r\n if type(list_of_vertices[0]) is np.ndarray or type(list_of_vertices[0]) is tuple:\r\n self.vertices = [Vertex(r) for r in list_of_vertices]\r\n else:\r\n self.vertices = np.asarray(list_of_vertices)\r\n self.edges = list()\r\n self.edge_me()\r\n\r\n def edge_me(self):\r\n self.edges = list()\r\n for v1, v2 in zip(self.vertices, self.vertices[1:]):\r\n edge = Line2D(v1,v2)\r\n self.edges.append(edge)\r\n v1.add_edge(edge)\r\n v2.add_edge(edge)\r\n\r\n def interior_angles(self) -> np.ndarray:\r\n \"\"\"\r\n Angles at corners (r1, r2, .., rn), same order\r\n \"\"\"\r\n angles = []\r\n for i in range(len(self.vertices)):\r\n prev = self.vertices[i - 1].r\r\n this = self.vertices[i].r\r\n next = self.vertices[(i + 1) % len(self.vertices)].r\r\n t1 = (this - prev) / np.linalg.norm(this - prev)\r\n t2 = (next - this) / np.linalg.norm(next - this)\r\n angl = np.pi - np.arccos(t1 @ t2)\r\n angles.append(angl)\r\n return angles\r\n\r\n def total_length(self) -> float:\r\n return sum(e.length() for e in self.edges)\r\n\r\n def find_edge_starting_at(self, r:np.ndarray) -> Line2D:\r\n for edge in self.edges:\r\n if np.allclose(edge.v1.r, r):\r\n return edge\r\n return None\r\n\r\n def find_edge_ending_at(self, r:np.ndarray) -> Line2D:\r\n for edge in self.edges:\r\n if np.allclose(edge.v2.r, r):\r\n return edge\r\n return None\r\n\r\n def next_vertex(self, r:np.ndarray) -> Vertex:\r\n e = self.find_edge_starting_at(r)\r\n return e.v2\r\n\r\n def prev_vertex(self, r:np.ndarray) -> Vertex:\r\n e = self.find_edge_ending_at(r)\r\n return e.v1\r\n\r\n def next_edge(self, e) -> Line2D:\r\n return self.edges[self.edges.index(e) + 1]\r\n\r\n def prev_edge(self, e) -> Line2D:\r\n return self.edges[self.edges.index(e) - 1]\r\n\r\n def seed_all_by_number(self, num):\r\n for edge in self.edges:\r\n edge.seeds = edge.split(num)\r\n\r\n def seed_all_by_elm_size(self, size):\r\n for edge in self.edges:\r\n num = int(np.round(edge.length() / size))\r\n edge.split(num)\r\n\r\n def seed_idx_by_number(self, edge_idx, num):\r\n self.edges[edge_idx].split(num)\r\n\r\n def get_seeds_by_edge_idx(self, edge_idx) -> np.ndarray:\r\n return self.edges[edge_idx].seeds\r\n\r\n def get_seeds_by_edge(self, edge) -> np.ndarray:\r\n return edge.seeds\r\n\r\n def plot(self, fig, ax):\r\n for edge in self.edges:\r\n edge.plot(fig, ax)\r\n\r\n @staticmethod\r\n def from_edges(self):\r\n pass\r\n\r\n def __eq__(self, other):\r\n return np.allclose(self.vertices, other.points)\r\n\r\n def __iter__(self):\r\n yield from self.vertices\r\n\r\nclass Loop(LineChain):\r\n edges: List[Line2D]\r\n\r\n def normal(self):\r\n pass\r\n\r\n def edge_me(self):\r\n LineChain.edge_me(self)\r\n last_edge = Line2D(self.vertices[-1], self.vertices[0])\r\n self.edges.append(last_edge)\r\n self.vertices[-1].add_edge(last_edge)\r\n self.vertices[0].add_edge(last_edge)\r\n\r\n def point_curvature(self, r) -> int:\r\n \"\"\"\r\n 1 : Loop curves inward at this point (convex)\r\n 0 : Edges are parallell\r\n -1 : Loop curves outward at this point (concave)\r\n \"\"\"\r\n e1 = self.find_edge_ending_at(r)\r\n e2 = self.find_edge_starting_at(r)\r\n return e1.left_or_right(e2.r[1])\r\n\r\n def get_first_convex_vertex(self) -> Vertex:\r\n for vertex in self.vertices:\r\n if self.point_curvature(vertex.r) < 0:\r\n return vertex\r\n return None\r\n\r\n def split_from_concave(self) -> tuple:\r\n cvx = self.get_first_convex_vertex()\r\n e1 = self.find_edge_starting_at(cvx.r)\r\n e2 = self.find_edge_ending_at(cvx.r)\r\n\r\n shortest_dist = np.inf\r\n closest_intersect = None\r\n split_edge = None\r\n for i, edge in enumerate(self.edges):\r\n if edge == e1 or edge == e2:\r\n continue\r\n\r\n n = edge.left_normal()\r\n split_line = Line2D(cvx.r, cvx.r + n)\r\n intersection = edge.intersect(split_line)\r\n this_dist = np.linalg.norm(intersection - cvx.r)\r\n\r\n if this_dist < shortest_dist:\r\n shortest_dist = this_dist\r\n closest_intersect = Vertex(intersection)\r\n split_edge = edge\r\n\r\n # Now we have the shortest line\r\n left_vertices = [cvx, closest_intersect, split_edge.v2]\r\n next_vtx = self.next_vertex(split_edge.r2)\r\n\r\n while not np.allclose(next_vtx.r, cvx.r):\r\n left_vertices.append(next_vtx)\r\n next_vtx = self.next_vertex(next_vtx.r)\r\n\r\n right_vertices = [cvx, closest_intersect, split_edge.v1]\r\n prev_vtx = self.prev_vertex(split_edge.r2)\r\n\r\n while not np.allclose(prev_vtx.r, split_edge.r1):\r\n right_vertices.append(prev_vtx)\r\n prev_vtx = self.prev_vertex(prev_vtx.r)\r\n right_vertices.reverse()\r\n\r\n right_loop = Loop(right_vertices)\r\n left_loop = Loop(left_vertices)\r\n\r\n right_loop.edge_me()\r\n left_loop.edge_me()\r\n\r\n return (left_loop, right_loop)\r\n\r\n\r\nclass ElementaryMesh:\r\n def __init__(self):\r\n pass\r\n\r\nclass EQ53(ElementaryMesh):\r\n n1 = 5\r\n n2 = 3\r\n def __init__(self, r1, r2, r3, r4):\r\n self.r1 = r1\r\n self.r2 = r2\r\n self.r3 = r3\r\n self.r4 = r4\r\n pass\r\n\r\n @property\r\n def external_nodes(self):\r\n return np.array((self.r1, self.r2, self.r3, self.r4))\r\n\r\n @property\r\n def internal_nodes(self):\r\n return self.mapper_matrix() @ self.external_nodes\r\n\r\n def mapper_matrix(self):\r\n return np.array([[3/4, 1/4, 0, 0],\r\n [1/2, 1/2, 0, 0],\r\n [1/4, 3/4, 0, 0],\r\n [ 0, 1/2, 1/2, 0],\r\n [ 0, 0, 1/2, 1/2],\r\n [1/2, 0, 0, 1/2],\r\n [1/2, 0, 1/2, 0],\r\n [1/3, 0, 2/3, 0],\r\n [1/3, 0, 1/3, 0]])\r\n\r\nclass GWithShapeFunctions:\r\n\r\n @property\r\n def r(self) -> np.ndarray:\r\n pass\r\n\r\n def N(self, xi, eta) -> np.ndarray:\r\n pass\r\n\r\n def G(self, xi, eta) -> np.ndarray:\r\n pass\r\n\r\n def jacobian(self, xi, eta) -> np.ndarray:\r\n return self.G(xi, eta) @ self.r\r\n\r\n def as_loop(self) -> Loop:\r\n return Loop([r for r in self.r])\r\n\r\n def plot(self, fig, ax):\r\n rr = np.vstack((self.r, self.r[0]))\r\n ax.plot(*rr.T, color='black')\r\n for vertex in self.vertices:\r\n ax.plot(*vertex.r, 'go', markersize=2)\r\n\r\nclass Quadrilateral(GWithShapeFunctions):\r\n def __init__(self, v1, v2, v3, v4):\r\n self.v1 = Vertex(v1) if type(v1) is np.ndarray or type(v1) is tuple else v1\r\n self.v2 = Vertex(v2) if type(v2) is np.ndarray or type(v2) is tuple else v2\r\n self.v3 = Vertex(v3) if type(v3) is np.ndarray or type(v3) is tuple else v3\r\n self.v4 = Vertex(v4) if type(v4) is np.ndarray or type(v4) is tuple else v4\r\n self.vertices = (v1,v2,v3,v4)\r\n\r\n self.v1.add_element(self)\r\n self.v2.add_element(self)\r\n self.v3.add_element(self)\r\n self.v4.add_element(self)\r\n\r\n @property\r\n def r(self):\r\n return np.array([self.v1.r, self.v2.r, self.v3.r, self.v4.r])\r\n\r\n def N(self, xi, eta):\r\n return 1 / 4 * np.array([(1 - xi) * (1 - eta),\r\n (1 + xi) * (1 - eta),\r\n (1 + xi) * (1 + eta),\r\n (1 - xi) * (1 + eta)])\r\n\r\n def G(self, xi, eta):\r\n return 1/4 * np.array([[ -(1-eta), 1-eta, 1+eta, -(1+eta)],\r\n [ -(1-xi), -(1+xi), 1+xi, 1-xi]])\r\n\r\n def diag_skew(self):\r\n l1 = np.linalg.norm(self.v1.r - self.v3.r)\r\n l2 = np.linalg.norm(self.v2.r - self.v4.r)\r\n return np.max((l1/l2, l2/l1))\r\n\r\n def aspect_ratio(self):\r\n r = self.r\r\n r_rolled = np.roll(r, 1, axis=0)\r\n side_lengths = np.linalg.norm(r_rolled - r, axis=1)\r\n return np.max(side_lengths) / np.min(side_lengths)\r\n\r\n def area(self):\r\n x,y = self.r.T\r\n return 1/2 * (x[:-1] @ y[1:] - x[1:] @ y[:-1])\r\n\r\n def centroid(self):\r\n x,y = self.r.T\r\n A = self.area()\r\n cx = 1/(6*A) * (x[:-1] + x[1:]) @ (x[:-1]*y[1:] - x[1:]*y[:-1])\r\n cy = 1/(6*A) * (y[:-1] + y[1:]) @ (x[:-1]*y[1:] - x[1:]*y[:-1])\r\n return np.array([cx,cy])\r\n\r\n def adjacent_elements(self, of_degree=1):\r\n adjacents = []\r\n for v in self.vertices:\r\n for e in v.elements:\r\n if e == self:\r\n continue\r\n common_vertices = [v for v in e.vertices if v in self.vertices]\r\n if len(common_vertices) > of_degree:\r\n adjacents.append(e)\r\n return adjacents\r\n\r\n def is_self_intersecting(self) -> bool:\r\n edges = []\r\n for v1,v2 in zip(self.vertices, self.vertices[1:]):\r\n edges.append(Line2D(v1, v2))\r\n edges.append(Line2D(self.vertices[-1], self.vertices[0]))\r\n for edge1 in edges:\r\n for edge2 in edges:\r\n if edge1 == edge2:\r\n continue\r\n intersect = edge1.intersect(edge2, in_domain=True)\r\n if intersect is not None and not np.allclose(edge1.r1, intersect) and not np.allclose(edge1.r2, intersect):\r\n return True\r\n return False\r\n\r\n\r\nclass NormalizedTriangle(GWithShapeFunctions):\r\n \"\"\"\r\n r1: At origin\r\n Then counterclockwise\r\n \"\"\"\r\n def __init__(self, v1, v2, v3):\r\n self.v1 = v1\r\n self.v2 = v2\r\n self.v3 = v3\r\n\r\n @property\r\n def r(self):\r\n return np.array([self.v1.r, self.v2.r, self.v3.r])\r\n\r\n def N(self, xi, eta):\r\n return np.array([1 - eta - xi,\r\n xi,\r\n eta])\r\n\r\n def G(self, xi, eta) -> np.ndarray:\r\n return np.array([[-1, 1, 0]\r\n [-1, 0, 1]])\r\n\r\nclass StaticInterpolatorTriangle:\r\n def __init__(self, rr, rr_, origin=(0,0)):\r\n \"\"\"\r\n\r\n :param rr: Original coordinates\r\n :param rr_: Transformed coordinates\r\n \"\"\"\r\n\r\n self.origin = np.asarray(origin)\r\n self.rr = np.asarray(rr)\r\n self.rr_ = np.asarray(rr_)\r\n\r\n self.transformation = (self.rr_ - self.origin).T @ np.linalg.inv(self.rr - self.origin).T\r\n\r\n def transform(self, pt:np.ndarray):\r\n return self.transformation @ (pt - self.origin) + self.origin\r\n\r\n def detransform(self, pt:np.ndarray):\r\n return np.linalg.solve(self.transformation, pt - self.origin) + self.origin\r\n\r\n def contains_point(self, pt:np.ndarray):\r\n lines = [\r\n Line2D(self.rr[0], self.rr[1]),\r\n Line2D(self.rr[1], self.origin),\r\n Line2D(self.origin, self.rr[0])]\r\n sides = [line.left_or_right(pt) for line in lines]\r\n return all(side == sides[0] for side in sides)\r\n\r\nclass QuadMesher:\r\n elements: List[Quadrilateral]\r\n\r\n def __init__(self):\r\n self.elements = []\r\n self.loops = []\r\n\r\n @staticmethod\r\n def mesh_structured_quad(quad:Quadrilateral, num_nodes_x:int, num_nodes_y:int):\r\n elements = []\r\n xx = np.linspace(-1, 1, num_nodes_x)\r\n yy = np.linspace(-1, 1, num_nodes_y)\r\n\r\n for xi1,xi2 in zip(xx, xx[1:]):\r\n for eta1,eta2 in zip(yy, yy[1:]):\r\n rr = np.array([[xi1, eta1],[xi2,eta1],[xi2,eta2],[xi1,eta2]])\r\n #r1, r2, r3, r4 = quad.N(*rr) @ quad.r\r\n \"\"\"\r\n The above is shorthand for \r\n r1 = quad.N(xi1, eta1) @ quad.r1\r\n r2 = quad.N(xi2, eta1) @ quad.r2\r\n and so on\r\n \"\"\"\r\n r1 = quad.N(xi1, eta1) @ quad.r\r\n r2 = quad.N(xi2, eta1) @ quad.r\r\n r3 = quad.N(xi2, eta2) @ quad.r\r\n r4 = quad.N(xi1, eta2) @ quad.r\r\n\r\n q = Quadrilateral(r1, r2, r3, r4)\r\n elements.append(q)\r\n\r\n return elements\r\n\r\n def mesh_padding(self, loop:Loop):\r\n \"\"\"\r\n Loop must be pre-seeded. Seeds must be excluding endpoints\r\n :param loop:\r\n :return:\r\n \"\"\"\r\n self.loops.append(loop)\r\n for _ in range(1): # Boilerplate: Will be while(criterion based on smallness of inner loop)\r\n inner = self.offset_loop(loop)\r\n self.loops.append(inner)\r\n self.mesh_between_loops(loop, inner)\r\n\r\n def mesh_between_loops(self, outer:Loop, inner:Loop):\r\n \"\"\"\r\n Outer loop must be pre-seeded\r\n \"\"\"\r\n if not inner.edges:\r\n inner.edge_me()\r\n\r\n # No tuck\r\n\r\n for i in range(len(outer.edges)):\r\n if i == 0:\r\n pass # Do something\r\n\r\n outer_edge = outer.edges[i]\r\n inner_edge = inner.edges[i]\r\n\r\n rel = outer_edge.length() / inner_edge.length()\r\n assert rel >= 1 # Concave loops not yet supported\r\n\r\n if 1 - rel > outer_edge.element_size() / outer_edge.length():\r\n tuck_last = True\r\n inner_edge.split(outer_edge.num_seeds - 1, False)\r\n else:\r\n inner_edge.split(outer_edge.num_seeds, False)\r\n tuck_last = False\r\n\r\n outer_pts = outer_edge.seeds\r\n inner_pts = inner_edge.seeds\r\n\r\n for p1, p2, p3, p4 in zip(outer_pts, inner_pts, inner_pts[1:], outer_pts[1:]):\r\n q = Quadrilateral(p1, p2, p3, p4)\r\n self.elements.append(q)\r\n\r\n def offset_loop(self, loop:Loop):\r\n \"\"\"\r\n Loop must be pre-seeded. Seeds must be excluding endpoints\r\n :param loop:\r\n :return:\r\n \"\"\"\r\n #if len(loop.edges) == 0:\r\n # loop.edge_me()\r\n # loop.seed_all_by_number(5)\r\n\r\n inner = []\r\n #inner = [edge + edge.left_normal() * edge.length() / edge.num_seeds for edge in loop.edges]\r\n for vtx in loop.vertices:\r\n e0 = loop.find_edge_ending_at(vtx.r)\r\n e1 = loop.find_edge_starting_at(vtx.r)\r\n\r\n e0plus = e0 + e0.left_normal() * e0.element_size()\r\n e1plus = e1 + e1.left_normal() * e1.element_size()\r\n\r\n corner_pt = e0plus.intersect(e1plus)\r\n inner.append(corner_pt)\r\n\r\n newloop = Loop(inner)\r\n\r\n return newloop\r\n\r\n def plot(self, fig, ax):\r\n for l in self.loops:\r\n l.plot(fig, ax)\r\n for q in self.elements:\r\n q.plot(fig, ax)\r\n\r\nclass __QuadMesher:\r\n def __init__(self, loop:Loop, fig=None, ax=None):\r\n self.fig, self.ax = fig, ax\r\n if fig is None or ax is None:\r\n self.fig,self.ax = plt.subplots()\r\n self.loop = loop # Must be edged\r\n\r\n self.splitlines = list()\r\n self.crosslines = list()\r\n self.quads = list()\r\n self.tris = list()\r\n\r\n self.edges = loop.edges\r\n\r\n # Pick denseliest seeded edge as starting edge\r\n lengths = [edge.length() for edge in self.edges]\r\n first_edge_idx = lengths.index(max(lengths))\r\n\r\n\r\n first_edge = self.edges[first_edge_idx]\r\n left_edge = self.edges[first_edge_idx - 1]\r\n last_edge = self.edges[first_edge_idx - 2]\r\n right_edge = self.edges[first_edge_idx - 3]\r\n\r\n n1 = len(first_edge.seeds)\r\n if first_edge.tangent() @ last_edge.tangent() < 0:\r\n last_edge.seeds = last_edge.seeds[::-1]\r\n\r\n if right_edge.tangent() @ left_edge.tangent() < 0:\r\n left_edge.seeds = left_edge.seeds[::-1]\r\n\r\n\r\n delta = n1 - len(last_edge.seeds)\r\n assert len(left_edge.seeds) == len(right_edge.seeds)\r\n assert delta >= 0\r\n assert delta < len(left_edge.seeds)\r\n\r\n # For 3-to-2 reduction, delta must be smaller than the number of side seeds\r\n # Map first seeds to last seeds\r\n self.mapper = dict()\r\n f = n1 / len(last_edge.seeds)\r\n assert f > 1 and f < 2 # For now\r\n\r\n j = np.nan\r\n for i in range(n1):\r\n if int(np.round(i / f)) == j:\r\n continue ## This will be an \"orphaned\" node on first_edge\r\n j = int(np.round(i/f))\r\n self.mapper[i] = j\r\n splitline = Line2D(first_edge.seeds[i], last_edge.seeds[j])\r\n self.splitlines.append(splitline)\r\n\r\n self.plot_splitlines()\r\n\r\n for i, right_pt, left_pt in zip(it.count(), right_edge.seeds, left_edge.seeds):\r\n crossline = Line2D(right_pt, left_pt)\r\n self.crosslines.append(crossline)\r\n break\r\n\r\n self.plot_crosslines()\r\n\r\n def map_points(self, points1, points2):\r\n n1 = len(points1)\r\n n2 = len(points2)\r\n\r\n def plot_splitlines(self):\r\n for line in self.splitlines:\r\n self.ax.plot(*line.r.T, color='C1')\r\n\r\n def plot_crosslines(self):\r\n for line in self.crosslines:\r\n self.ax.plot(*line.r.T, color='C2')\r\n\r\nclass QuadPadMesher:\r\n vertices: List[Vertex]\r\n\r\n def __init__(self, loop:Loop):\r\n self.loop = loop\r\n self.triangles = list()\r\n self.elements = list()\r\n self.element_size = None\r\n self.vertices = []\r\n\r\n if not loop.edges:\r\n loop.edge_me()\r\n\r\n self.calculate_mesh_size()\r\n\r\n self.smoothing_factor = 1/50\r\n\r\n def calculate_mesh_size(self):\r\n # Better be pre-seeded\r\n self.element_size = np.linalg.norm(self.loop.edges[0].seeds[1] - self.loop.edges[0].seeds[0])\r\n\r\n def seed_and_triangle_generator(self, edges, triangles):\r\n for edge, tri in zip(edges, triangles):\r\n f = tri.detransform\r\n edge.v1.is_apex = True\r\n yield edge.v1.transform_by(f)\r\n for seed in edge.seeds:\r\n yield Vertex(seed).transform_by(f)\r\n yield edges[-1].v2.transform_by(f) # Eventually, this will re-yield the first vertex\r\n\r\n def mesh(self):\r\n elm_size = self.element_size\r\n straightened = self.straighten_and_triangulate()\r\n seeds = list()\r\n\r\n for r in self.seed_and_triangle_generator(straightened.edges, self.triangles):\r\n seeds.append(r)\r\n\r\n self.vertices.extend(seeds)\r\n\r\n for _ in range(5):\r\n seeds = self.pave_with_auto_tuck(seeds)\r\n self.vertices.extend(seeds)\r\n\r\n self.smooth()\r\n\r\n def pave_with_edge_tuck(self, outer_seeds:List[Vertex]) -> List[Vertex]:\r\n i = 0\r\n passed_apex = False\r\n midpt = self.midpoint()\r\n inner_seeds = []\r\n while i < (len(outer_seeds) - 1):\r\n v1 = outer_seeds[i]\r\n v3 = outer_seeds[i + 1]\r\n v2 = v2 if passed_apex \\\r\n else v4 if i != 0 \\\r\n else Vertex(v1.r + self.element_size * (midpt - v1.r) / np.linalg.norm(midpt - v1.r))\r\n v4 = Vertex(v3.r + self.element_size * (midpt - v3.r) / np.linalg.norm(midpt - v3.r))\r\n\r\n if not passed_apex:\r\n inner_seeds.append(v2)\r\n\r\n passed_apex = False\r\n if v3.is_apex: # Corner tuck! New order will be v1, v2 (inner) v4 (reassigned, past the corner) and v3 (apex)\r\n del v4\r\n passed_apex = True\r\n v4 = outer_seeds[i + 2] # Past the corner\r\n v2.is_apex = True # Have v2 be an apex for the next round\r\n i += 1 # Increment i by one more\r\n\r\n quad = Quadrilateral(v1, v2, v4, v3)\r\n\r\n self.elements.append(quad)\r\n i += 1\r\n inner_seeds.append(v4)\r\n\r\n return inner_seeds\r\n\r\n def pave_without_tuck(self, outer_seeds:List[Vertex]) -> List[Vertex]:\r\n i = 0\r\n quads = []\r\n midpt = self.midpoint()\r\n inner_seeds = []\r\n while i < (len(outer_seeds) - 1):\r\n v1 = outer_seeds[i]\r\n v3 = outer_seeds[i + 1]\r\n v2 = v4 if i != 0 \\\r\n else Vertex(v1.r + self.element_size * (midpt - v1.r) / np.linalg.norm(midpt - v1.r))\r\n v4 = Vertex(v3.r + self.element_size * (midpt - v3.r) / np.linalg.norm(midpt - v3.r))\r\n\r\n inner_seeds.append(v2)\r\n quad = Quadrilateral(v1, v2, v4, v3)\r\n quads.append(quad)\r\n i += 1\r\n inner_seeds.append(v4)\r\n\r\n self.elements.extend(quads)\r\n return inner_seeds\r\n\r\n def pave_with_auto_tuck(self, outer_seeds:List[Vertex]):\r\n i = 0\r\n quads = []\r\n midpt = self.midpoint()\r\n inner_seeds = []\r\n seed_lengths = [np.linalg.norm(v2 - v1) for v1,v2 in zip(outer_seeds[:-1], outer_seeds[1:])]\r\n total_length = sum(seed_lengths)\r\n n_seeds = len(outer_seeds)\r\n n_elements = int(np.round(total_length / self.element_size))\r\n n_tucks = n_seeds - n_elements - 1\r\n\r\n min_spacing = 5\r\n #i_with_tuck = np.round( np.linspace(0, n_seeds - 2, n_tucks + 2)[1:-1])\r\n indices = np.asarray(seed_lengths).argsort()[::1]\r\n i_with_tuck = [indices[0]]\r\n for idx in indices:\r\n if all([np.abs(i - idx) > min_spacing for i in i_with_tuck]):\r\n i_with_tuck.append(idx)\r\n if len(i_with_tuck) >= n_tucks:\r\n break\r\n\r\n while i < n_seeds - 1:\r\n v1 = outer_seeds[i]\r\n v3 = outer_seeds[i + 1]\r\n\r\n v2 = v4 if i != 0 \\\r\n else Vertex(v1.r + self.element_size * (midpt - v1.r) / np.linalg.norm(midpt - v1.r))\r\n v4 = Vertex(v3.r + self.element_size * (midpt - v3.r) / np.linalg.norm(midpt - v3.r))\r\n\r\n passed_tuck = False\r\n if i in i_with_tuck and i < n_seeds - 2:\r\n passed_tuck = True\r\n v4 = v2\r\n v1 = outer_seeds[i]\r\n v2 = outer_seeds[i+1]\r\n v3 = outer_seeds[i+2]\r\n v4.move_to(v2.r + self.element_size * (midpt - v2.r) / np.linalg.norm(midpt - v2.r))\r\n\r\n quad = Quadrilateral(v1, v2, v3, v4)\r\n i += 1\r\n\r\n #if i < n_seeds - 3 and Quadrilateral(v1, v2, v3, outer_seeds[i+3]).diag_skew() < 2.5:\r\n # trial_quad = Quadrilateral(v1, v2, v3, outer_seeds[i+3])\r\n # if not trial_quad.is_self_intersecting():\r\n # quad = trial_quad\r\n # i += 1\r\n\r\n else:\r\n quad = Quadrilateral(v1, v2, v4, v3)\r\n inner_seeds.append(v2)\r\n\r\n quads.append(quad)\r\n i += 1\r\n\r\n inner_seeds.append(v4)\r\n self.elements.extend(quads)\r\n #self.smooth(inner_seeds)\r\n return inner_seeds\r\n\r\n def find_and_create_tucks(self, element_subset):\r\n for i in range(1, len(element_subset)-1):\r\n element = element_subset[i]\r\n if not element.aspect_ratio() > 2:\r\n continue\r\n\r\n prev = element_subset[i-1]\r\n next = element_subset[i+1]\r\n\r\n v1 = element.v2\r\n v2 = element.v3\r\n adj = element.adjacent_elements(of_degree=2)\r\n adj.remove(prev)\r\n adj.remove(next)\r\n rear = adj[0]\r\n pass\r\n\r\n def smooth(self, subset=[], factor=1/50):\r\n if not subset:\r\n subset = self.vertices # Smooth the entire mesh\r\n for vertex in subset:\r\n skew_0 = np.array([e.diag_skew() for e in vertex.elements])\r\n aspect_0 = np.array([e.aspect_ratio() for e in vertex.elements])\r\n\r\n vertex.move_by(np.array([1e-5, 0])) # dx\r\n skew_x = np.array([e.diag_skew() for e in vertex.elements]) - skew_0\r\n aspect_x = np.array([e.aspect_ratio() for e in vertex.elements]) - aspect_0\r\n\r\n vertex.move_by(np.array([-1e-5, 1e-5])) # dy\r\n skew_y = np.array([e.diag_skew() for e in vertex.elements]) - skew_0\r\n aspect_y = np.array([e.aspect_ratio() for e in vertex.elements]) - aspect_0\r\n vertex.move_by(np.array([0, -1e-5])) # 0\r\n\r\n D = np.array([skew_x, skew_y]).T\r\n ds = -skew_0 - 1\r\n\r\n dr = np.linalg.solve(D.T @ D, D.T) @ ds\r\n dr *= self.smoothing_factor * self.element_size / np.max(dr) # Move at maximum of the element size\r\n vertex.move_by(dr)\r\n\r\n\r\n def straighten_and_triangulate(self) -> LineChain:\r\n loop = self.loop\r\n straight_verts = list()\r\n start_vtx = loop.vertices[0]\r\n straight_verts.append(start_vtx)\r\n for edge in loop.edges:\r\n l = edge.length()\r\n end_vtx = Vertex(start_vtx.r + np.array((l, 0)))\r\n\r\n rr = edge.r\r\n rr_ = np.array((start_vtx.r, end_vtx.r))\r\n self.triangles.append(StaticInterpolatorTriangle(rr, rr_, self.midpoint()))\r\n\r\n start_vtx = end_vtx\r\n straight_verts.append(end_vtx)\r\n\r\n straight = LineChain(straight_verts)\r\n straight.edge_me()\r\n for straightedge, loopedge in zip(straight.edges, loop.edges):\r\n n = loopedge.num_seeds\r\n straightedge.split(n)\r\n return straight\r\n\r\n def midpoint(self):\r\n return np.average([v.r for v in self.loop.vertices], axis=0)\r\n\r\n def plot(self, fig, ax):\r\n for edge in self.loop.edges:\r\n edge.plot(fig, ax)\r\n for q in self.elements:\r\n q.plot(fig, ax)\r\n\r\nif __name__ == \"__main__\":\r\n\r\n def a():\r\n fig, ax = plt.subplots()\r\n pts5 = np.array([[0, 0], [1, 1], [0, 2], [-1, 1.5], [-1, 0.5]])\r\n\r\n loop1 = Loop(pts5)\r\n loop1.edge_me()\r\n loop1.seed_all_by_number(7, False)\r\n qm = QuadMesher()\r\n qm.mesh_padding(loop1)\r\n\r\n qm.plot(fig, ax)\r\n\r\n def b():\r\n global qpm\r\n fig,ax = plt.subplots()\r\n\r\n ax.set_aspect('equal')\r\n\r\n pts = np.array([[0, 0], [1, 0.9], [0, 2], [-1, 1.5], [-1, 0.5]])\r\n loop = Loop(pts)\r\n\r\n loop.seed_all_by_elm_size(0.15)\r\n qpm = QuadPadMesher(loop)\r\n qpm.mesh()\r\n loop.plot(fig, ax)\r\n qpm.plot(fig, ax)\r\n\r\n plt.show()\r\n\r\n def c():\r\n global qpm\r\n fig,axes = plt.subplots(nrows=2, ncols=2)\r\n axes = axes.flatten()\r\n for ax in axes:\r\n ax.set_aspect('equal')\r\n\r\n pts = np.array([[0, 0], [1, 0.9], [0, 2], [-1, 1.5], [-1, 0.5]])\r\n loop = Loop(pts)\r\n\r\n for i,f in enumerate((0, )):\r\n loop.seed_all_by_elm_size(0.15)\r\n qpm = QuadPadMesher(loop)\r\n qpm.smoothing_factor = f\r\n qpm.mesh()\r\n loop.plot(fig, axes[i])\r\n qpm.plot(fig, axes[i])\r\n\r\n plt.show()\r\n\r\n b()\r\n\r\n\r\n\r\n","sub_path":"mlmesh.py","file_name":"mlmesh.py","file_ext":"py","file_size_in_byte":31929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"567158003","text":"from tkinter import *\nfrom functools import partial\n\n# This class runs when the program starts and is the main window.\n# User selects 1 of 3 options rounds, timer, unlimited. When the button is pressed, a number will pass through a function which is a while loop, checking\n# Which mode the user selected. This will then call the class for the given mode.\nclass Start:\n def __init__(self):\n # Heading frame\n self.start_frame = Frame(padx=40, pady=15)\n self.start_frame.grid()\n\n # Heading label (row 0)\n self.heading_label = Label(self.start_frame, font='arial 24', text=\"Math quiz\")\n self.heading_label.grid(row=0)\n\n # Min frames\n self.min_max_frame = Frame(padx=10)\n self.min_max_frame.grid()\n # Min label (row 1, column 0)\n self.min_label = Label(self.min_max_frame, font='arial 12', text=\"Min number\", justify=LEFT)\n self.min_label.grid(row=1, column=0)\n # Min slider (row 1, column 1)\n self.min_slider = Entry(self.min_max_frame, width=4, font='arial 14', )\n self.min_slider.grid(row=1, column=1)\n\n # Max label (row 2, column 0)\n self.max_label = Label(self.min_max_frame, font='arial 12', text=\"Max number\", justify=LEFT)\n self.max_label.grid(row=2, column=0, pady=10, padx=10)\n # Max slider (row 2, column 1)\n self.min_slider = Entry(self.min_max_frame, font='arial 14', width=4)\n self.min_slider.grid(row=2, column=1)\n\n # Modes frame\n self.mode_frame = Frame(padx=5, pady=5)\n self.mode_frame.grid()\n # Mode label (row 1)\n self.mode_label = Label(self.mode_frame, font='arial 16 bold', text=\"Modes:\")\n self.mode_label.grid(row=1, pady=10)\n # Rounds button (row 2)\n # If rounds button is pressed, it calls the mode_select function and checks which button the user pressed, e.g, rounds, unlimited, timer.\n self.rounds_button = Button(self.mode_frame, font='arial 12 bold', text=\"Rounds\", padx=10, command=lambda:Selection.__init__(self, 1))\n self.rounds_button.grid(row=2, sticky=\"ew\")\n # Unlimited button (row 3)\n self.unlimited_button = Button(self.mode_frame, font='arial 12 bold', text=\"Unlimited\", padx=10)\n self.unlimited_button.grid(row=3, pady=5, sticky=\"ew\")\n # Rounds button (row 4)\n self.Timer_button = Button(self.mode_frame, font='arial 12 bold', text=\"Timer\", padx=10)\n self.Timer_button.grid(row=4, sticky=\"ew\")\n\n # Instructions button\n self.instructions_button = Button(self.mode_frame, font='arial 12 bold', text=\"Help/Instructions\", padx=5,\n command=self.to_help)\n self.instructions_button.grid(row=5, pady=20)\n\n def to_help(self):\n self.instructions_button.config(state=DISABLED)\n #Help(self)\n\n\nclass Selection:\n def __init__(self, mode):\n self.selection_box = Toplevel()\n # Main window frame\n self.selection_frame = Frame(self.selection_box)\n self.selection_frame.grid(padx=20, pady=5)\n\n # Main heading label (row 0)\n self.heading_label = Label(self.selection_frame, text=\"Select\", font='arial 27 bold')\n self.heading_label.grid()\n\n # Buttons frame\n self.buttons_frame = Frame(self.selection_box)\n self.buttons_frame.grid(padx=15, pady=15)\n\n # Addition button (row 1, column 0)\n self.addition_button = Button(self.buttons_frame, text=\"+\", font='arial 12', width=3, command=lambda:Selection.mode_select(self, mode))\n self.addition_button.grid(row=1, column=0)\n\n # Subtraction button (row 1, column 1)\n self.subtraction_button = Button(self.buttons_frame, text='-', font='arial 12', width=3)\n self.subtraction_button.grid(row=1, column=1, padx=5)\n\n # Multiplication button (row 1, column 2)\n self.multiplication_button = Button(self.buttons_frame, text='x', font='arial 12', width=3)\n self.multiplication_button.grid(row=1, column=2, padx=5)\n\n # Division button (row 1, column 3)\n self.division_button = Button(self.buttons_frame, text='/', font='arial 12', width=3)\n self.division_button.grid(row=1, column=3)\n\n # This function is used to check which mode button the user pressed (rounds, unlimited, timer)\n def mode_select(self, mode):\n # if mode == 1, it's rounds.\n if mode == 1:\n # The variable pass through is to tell the function which mode the user selected.\n Rounds()\n self.selection_box.withdraw()\n\n\n# Rounds class\nclass Rounds:\n def __init__(self):\n # Creates new window called rounds_box\n self.rounds_box = Toplevel()\n\n # This \n self.rounds_box.protocol('WM_DELETE_WINDOW', self.close_rounds)\n\n # Main window frame\n self.rounds_frame = Frame(self.rounds_box, padx=20, pady=5)\n self.rounds_frame.grid()\n \n # Heading label (row 0)\n self.heading_label = Label(self.rounds_frame, text=\"Rounds\", font='arial 30 bold')\n self.heading_label.grid(padx=10, pady=10)\n\n # Question label (row 1)\n self.question_label = Label(self.rounds_frame, text=\"How many rounds?\", font='arial 15 italic')\n self.question_label.grid(row=1, pady=10)\n\n # Question entry (row 2)\n self.question_entry = Entry(self.rounds_frame)\n self.question_entry.grid(row=2)\n\n # Buttons frame\n self.buttons_frame = Frame(self.rounds_box)\n self.buttons_frame.grid()\n\n # Back button (row 3, column 0)\n self.back_button = Button(self.buttons_frame, text=\"Back\", fg='white', bg='black', font='arial 12')\n self.back_button.grid(row=3, pady=10, padx=5)\n # Enter button (row 3, column 1)\n self.enter_button = Button(self.buttons_frame, text=\"Enter\", fg='white', bg='black', font='arial 12')\n self.enter_button.grid(row=3, column=1)\n\n def close_rounds(self):\n self.rounds_box.destroy()\n Selection(1)\n\n \n\n\n# Main routine\nif __name__ == \"__main__\":\n root = Tk()\n root.title(\"Math Program\")\n something = Start()\n root.mainloop()\n","sub_path":"04c_Rounds.py","file_name":"04c_Rounds.py","file_ext":"py","file_size_in_byte":6183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"5245175","text":"import functools\nimport simplejson as json\nimport threading\nimport time\nimport uuid\nfrom redis import StrictRedis\nimport datetime\nfrom .domainmodel import DomainModel\n\n\nclass Event(object):\n \"\"\"\n Event class.\n \"\"\"\n\n def __init__(self, _topic, _action, **_entity):\n \"\"\"\n Initialize an event.\n\n :param _topic: The event topic.\n :param _action: The event action.\n :param _entity: The event entity.\n \"\"\"\n self.id = str(uuid.uuid4())\n self.ts = time.time()\n self.topic = _topic\n self.action = _action\n self.entity = _entity\n\n\nclass EventStore(object):\n \"\"\"\n Event Store class.\n \"\"\"\n\n def __init__(self, _redis):\n \"\"\"\n Initialize an event store.\n\n :param _redis: A Redis instance.\n \"\"\"\n self.redis = _redis\n\n self.subscribers = {}\n self.domain_model = DomainModel(_redis)\n\n def publish(self, _event):\n \"\"\"\n Publish an event.\n\n :param _event: The event to publish.\n :return: Success.\n \"\"\"\n\n enco = lambda obj: (\n obj.isoformat()\n if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date)\n else None\n )\n\n key = 'events:{}_{}'.format(_event.topic, _event.action)\n entity = json.dumps(_event.entity, default=enco, use_decimal=True)\n entry_id = '{0:.6f}'.format(_event.ts).replace('.', '-')\n\n return self.redis.xadd(key, {'event_id': _event.id, 'entity': entity}, id=entry_id)\n\n def subscribe(self, _topic, _action, _handler):\n \"\"\"\n Subscribe to an event channel.\n\n :param _topic: The event topic.\n :param _action: The event action.\n :param _handler: The event handler.\n :return: Success.\n \"\"\"\n if (_topic, _action) in self.subscribers:\n self.subscribers[(_topic, _action)].add_handler(_handler)\n else:\n subscriber = Subscriber(_topic, _action, _handler, self.redis)\n subscriber.start()\n self.subscribers[(_topic, _action)] = subscriber\n\n return True\n\n def unsubscribe(self, _topic, _action, _handler):\n \"\"\"\n Unsubscribe from an event channel.\n\n :param _topic: The event topic.\n :param _action: The event action.\n :param _handler: The event handler.\n :return: Success.\n \"\"\"\n subscriber = self.subscribers.get((_topic, _action))\n if not subscriber:\n return False\n\n subscriber.rem_handler(_handler)\n if not subscriber:\n subscriber.stop()\n del self.subscribers[(_topic, _action)]\n\n return True\n\n def find_one(self, _topic, _id):\n \"\"\"\n Find an event from a topic with an specific id.\n\n :param _topic: The event topic.\n :param _id: The event id.\n :return: The event dict.\n \"\"\"\n return self.find_all(_topic).get(_id)\n\n def find_all(self, _topic):\n \"\"\"\n Find all aggregated events for a topic.\n\n :param _topic: The event topic.\n :return: A dict mapping id -> dict of all aggregated events.\n \"\"\"\n\n result = self.read_from_entity_cache(_topic)\n if not result:\n\n # get created entities\n\n self.redis.xrange\n created_events = self.redis.xrange('events:{}_created'.format(_topic))\n if created_events:\n created_entities = map(lambda x: json.loads(x[1]['entity']), created_events)\n result = dict(map(lambda x: (x['id'], x), created_entities))\n\n # remove deleted entities\n deleted_events = self.redis.xrange('events:{}_deleted'.format(_topic))\n if deleted_events:\n deleted_entities = map(lambda x: json.loads(x[1]['entity']), deleted_events)\n deleted_entities = map(lambda x: x['id'], deleted_entities)\n result = EventStore.remove_deleted(result, deleted_entities)\n\n # set updated entities\n updated_events = self.redis.xrange('events:{}_updated'.format(_topic))\n if updated_events:\n updated_entities = map(lambda x: json.loads(x[1]['entity']), updated_events)\n updated_entities = dict(map(lambda x: (x['id'], x), updated_entities))\n result = EventStore.set_updated(result, updated_entities)\n\n self.write_into_entity_cache(_topic, result)\n\n return result\n\n def read_from_entity_cache(self, _topic):\n if self.domain_model.exists(_topic):\n return self.domain_model.retrieve(_topic)\n\n def write_into_entity_cache(self, _topic, _values):\n for k, v in _values.items():\n self.domain_model.create(_topic, v)\n\n def subscribe_to_entity_events(self, _topic):\n self.subscribe(_topic, 'created', functools.partial(self.entity_created, _topic))\n self.subscribe(_topic, 'deleted', functools.partial(self.entity_deleted, _topic))\n self.subscribe(_topic, 'updated', functools.partial(self.entity_updated, _topic))\n\n def unsubscribe_from_entity_events(self, _topic):\n self.unsubscribe(_topic, 'created', functools.partial(self.entity_created, _topic))\n self.unsubscribe(_topic, 'deleted', functools.partial(self.entity_deleted, _topic))\n self.unsubscribe(_topic, 'updated', functools.partial(self.entity_updated, _topic))\n\n def entity_created(self, _topic, _item):\n if self.domain_model.exists(_topic):\n entity = json.loads(_item[1][0][1]['entity'])\n self.domain_model.create(_topic, entity)\n\n def entity_deleted(self, _topic, _item):\n if self.domain_model.exists(_topic):\n entity = json.loads(_item[1][0][1]['entity'])\n self.domain_model.delete(_topic, entity)\n\n def entity_updated(self, _topic, _item):\n if self.domain_model.exists(_topic):\n entity = json.loads(_item[1][0][1]['entity'])\n self.domain_model.update(_topic, entity)\n\n @staticmethod\n def remove_deleted(created, deleted):\n \"\"\"\n Remove deleted events.\n\n :param created: A dict mapping id -> dict of created events.\n :param deleted: A list of deleted ids.\n :return: A dict without deleted events.\n \"\"\"\n for d in deleted:\n del created[d]\n return created\n\n @staticmethod\n def set_updated(created, updated):\n \"\"\"\n Adapt updated events.\n\n :param created: A dict mapping id -> dict of created events.\n :param updated: A dict mapping id -> dict of updated events.\n :return: A dict with updated events.\n \"\"\"\n for k, v in updated.items():\n created[k] = v\n return created\n\n\nclass Subscriber(threading.Thread):\n \"\"\"\n Subscriber Thread class.\n \"\"\"\n\n def __init__(self, _topic: str, _action, _handler, _redis: StrictRedis):\n \"\"\"\n :param _topic: The topic to subscribe to.\n :param _action: The action to subscribe to.\n :param _handler: A handler function.\n :param _redis: A Redis instance.\n \"\"\"\n super(Subscriber, self).__init__()\n self._running = False\n self.key = 'events:{}_{}'.format(_topic, _action)\n self.subscribed = True\n self.handlers = [_handler]\n self.redis = _redis\n\n def __len__(self):\n return bool(self.handlers)\n\n def run(self, block: None):\n if self._running:\n return\n\n\n last_id = '$'\n self._running = True\n while self.subscribed:\n items = self.redis.xread({self.key: last_id}, block=block) or []\n for item in items:\n for handler in self.handlers:\n handler(item)\n last_id = item[1][0][0]\n self._running = False\n\n def stop(self):\n self.subscribed = False\n\n def add_handler(self, _handler):\n self.handlers.append(_handler)\n\n def rem_handler(self, _handler):\n self.handlers.remove(_handler)\n","sub_path":"eventstore/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":8063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"634887430","text":"import chessapi\nimport _\nfrom django.db import models\nfrom django.conf import settings\n\n\nclass BotNotReadyError(Exception):\n pass\n\n\nclass Bot(models.Model):\n name = models.CharField(max_length=120)\n description = models.TextField(null=True, blank=True)\n creator = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n null=True,\n related_name='bots'\n )\n underscore = models.TextField()\n pickle = models.BinaryField()\n ready = models.BooleanField(default=False)\n created = models.DateTimeField(auto_now=False, auto_now_add=True)\n updated = models.DateTimeField(auto_now=True, auto_now_add=False)\n\n def compile(self):\n # Ensure the bot has a player.\n Player.objects.get_or_create(is_bot=True, bot=self)\n program, self.pickle = _.smart_compile_string(self.underscore)\n self.ready = True\n self.save()\n\n def get_move(\n self,\n piece_data,\n valid_moves,\n white_is_in_check,\n black_is_in_check\n ):\n # The bot will assume that it is its turn.\n # When users are allowed to submit, time and memory limits must be\n # lowered!\n if not self.ready:\n raise BotNotReadError(\n 'the bot\\'s underscore code has not yet been compiled'\n )\n # running_underscore_standard_library is set to try as there were\n # problems otherwise.\n program, self.pickle = _.smart_compile_string(\n program_string = self.underscore,\n pickle_bytes_string=self.pickle,\n underscore_last_modified_time=0,\n pickle_last_modified_time=1,\n time_limit=10000,\n memory_limit=10000,\n running_underscore_standard_library=True,\n )\n # When users are allowed to submit the output will need to be validated\n underscore_function = program.run()['make_move']\n\n piece_data_for_underscore = {}\n for index, item in enumerate(piece_data):\n piece_data_for_underscore[index] = item\n\n return underscore_function(\n {},\n [\n _.nodes.ValueNode(piece_data_for_underscore),\n _.nodes.ValueNode(valid_moves),\n _.nodes.ValueNode(white_is_in_check),\n _.nodes.ValueNode(black_is_in_check)\n ]\n )\n\n\nclass Player(models.Model):\n is_bot = models.BooleanField(default=False)\n wins = models.IntegerField(default=0)\n draws = models.IntegerField(default=0)\n losses = models.IntegerField(default=0)\n bot = models.OneToOneField(\n Bot,\n on_delete=models.CASCADE,\n null=True,\n blank=True\n )\n user = models.OneToOneField(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n null=True,\n blank=True\n )\n\n\nclass Game(models.Model):\n #piece_data stores a JSON representation of the game.\n piece_data = models.TextField()\n is_whites_turn = models.BooleanField(default=True)\n white_player = models.ForeignKey(\n Player,\n on_delete=models.CASCADE,\n related_name=\"games_as_white\"\n )\n black_player = models.ForeignKey(\n Player,\n on_delete=models.CASCADE,\n related_name=\"games_as_black\"\n )\n created = models.DateTimeField(auto_now=False, auto_now_add=True)\n updated = models.DateTimeField(auto_now=True, auto_now_add=False)\n finished = models.BooleanField(default=False)\n history = models.TextField(blank=True)\n name = models.CharField(blank=True, max_length=120)\n white_is_in_check = models.BooleanField(default=False)\n black_is_in_check = models.BooleanField(default=False)\n white_is_in_stalemate = models.BooleanField(default=False)\n black_is_in_stalemate = models.BooleanField(default=False)\n white_is_in_checkmate = models.BooleanField(default=False)\n black_is_in_checkmate = models.BooleanField(default=False)\n\n def save(self, *args, **kwargs):\n self._update_check_status()\n\n if (\n self.white_is_in_checkmate or\n self.black_is_in_checkmate or\n self.white_is_in_stalemate or\n self.black_is_in_stalemate\n ):\n self.finished = True\n else:\n self.finished = False\n super(Game, self).save(*args, **kwargs)\n\n def _update_check_status(self):\n WHITE, BLACK = chessapi.WHITE, chessapi.BLACK\n\n chessapi_game = chessapi.Game(\n chessapi.Player(WHITE),\n chessapi.Player(BLACK)\n )\n chessapi_game.build_board_from_json(self.piece_data)\n if self.is_whites_turn:\n chessapi_game.colour_for_next_turn = WHITE\n else:\n chessapi_game.colour_for_next_turn = BLACK\n\n self.white_is_in_check = chessapi_game.is_in_check(WHITE)\n self.black_is_in_check = chessapi_game.is_in_check(BLACK)\n self.white_is_in_stalemate = chessapi_game.is_in_stalemate(WHITE)\n self.black_is_in_stalemate = chessapi_game.is_in_stalemate(BLACK)\n self.white_is_in_checkmate = chessapi_game.is_in_checkmate(WHITE)\n self.black_is_in_checkmate = chessapi_game.is_in_checkmate(BLACK)\n\n class Meta:\n ordering = ['-updated', '-created']\n","sub_path":"game/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"509730128","text":"import logging\n\nfrom celery import shared_task\n\nfrom grandchallenge.publications.models import Publication\nfrom grandchallenge.publications.utils import get_identifier_csl\n\nlogger = logging.getLogger(__name__)\n\n\n@shared_task\ndef update_publication_metadata():\n for publication in Publication.objects.all():\n try:\n csl, new_identifier = get_identifier_csl(\n doi_or_arxiv=publication.identifier\n )\n except ValueError:\n logger.warning(\n f\"Identifier {publication.identifier} not recognised\"\n )\n continue\n\n publication.identifier = new_identifier\n publication.csl = csl\n publication.save()\n","sub_path":"app/grandchallenge/publications/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"157601328","text":"'''\n=> DFS\nRemember to backtrack!!!!!!\n'''\nclass Solution:\n \"\"\"\n @param: root: the root of binary tree\n @param: target: An integer\n @return: all valid paths\n \"\"\"\n def binaryTreePathSum2(self, root, target):\n # write your code here\n res = []\n self.traverse(root, target, 0, [], res)\n \n return res\n \n def traverse(self, root, target, level, path, res):\n if not root:\n return\n \n path.append(root.val)\n tmp_target = target\n for i in range(level, -1, -1):\n tmp_target -= path[i]\n if tmp_target == 0:\n tmp = []\n for j in range(i, level + 1):\n tmp.append(path[j])\n res.append(tmp)\n \n self.traverse(root.left, target, level + 1, path, res)\n self.traverse(root.right, target, level + 1, path, res)\n # !!!!!!!!!!!! Always remember to backtrack in dfs\n path.pop()\n \n ","sub_path":"246_binary-tree-path-sum-ii/binary-tree-path-sum-ii.py","file_name":"binary-tree-path-sum-ii.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"652626798","text":"import tensorflow as tf\n\n\ndef create_dir(path: str, flag_delete_existing: bool) -> bool:\n \"\"\"Returns True if created new dir, else False.\"\"\"\n if flag_delete_existing and tf.io.gfile.exists(path):\n tf.io.gfile.rmtree(path)\n if not tf.io.gfile.exists(path):\n tf.io.gfile.makedirs(path)\n else:\n return False\n\n return True\n","sub_path":"directory.py","file_name":"directory.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"72199682","text":"#!/usr/bin/env python\n# Licensed to Cloudera, Inc. under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. Cloudera, Inc. licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\n\nfrom django.db import connection, models, transaction\nfrom django.db.models import Q\nfrom django.db.models.query import QuerySet\nfrom django.utils.translation import ugettext as _, ugettext_lazy as _t\n\nfrom desktop.conf import CONNECTORS\nfrom desktop.lib.connectors.types import CONNECTOR_TYPES, CATEGORIES\nfrom desktop.lib.exceptions_renderable import PopupException\n\n\nLOG = logging.getLogger(__name__)\n\n\n# TODO: persist in DB and migrations\n# TODO: connector groups: if we want one dialect (e.g. hive) to show-up with multiple/transient computes and the same saved query\n\nCONNECTOR_INSTANCES = None\nCONNECTOR_IDS = 1\n\n# class Connector(models.Model):\n # '''\n # Instance of a connector pointing to an external service: connection\n # '''\n# type = models.CharField(max_length=32, db_index=True, help_text=_t('Type of connector, e.g. hive-tez, '))\n# name = models.CharField(default='', max_length=255)\n# description = models.TextField(default='')\n# is_valid # Must be in lib\n\n# settings = models.TextField(default='{}')\n# last_modified = models.DateTimeField(auto_now=True, db_index=True, verbose_name=_t('Time last modified'))\n\n# organization = models.ForeignKey(Organization, on_delete=models.CASCADE)\n\n # class Meta:\n # verbose_name = _t('connector')\n # verbose_name_plural = _t('connectors')\n # unique_together = ('name', 'organization',)\n\n\ndef _group_category_connectors(connectors):\n return [{\n 'category': category['type'],\n 'category_name': category['name'],\n 'description': category['description'],\n 'values': [_connector for _connector in connectors if _connector['category'] == category['type']],\n } for category in CATEGORIES\n ]\n\nAVAILABLE_CONNECTORS = _group_category_connectors(CONNECTOR_TYPES)\n\n\ndef _get_installed_connectors(category=None, categories=None, dialect=None, interface=None, user=None):\n global CONNECTOR_INSTANCES\n global CONNECTOR_IDS\n config_connectors = CONNECTORS.get()\n\n if CONNECTOR_INSTANCES is None:\n CONNECTOR_INSTANCES = []\n\n for i in config_connectors:\n connector_types = []\n\n for connector_type in CONNECTOR_TYPES:\n if connector_type['dialect'] == config_connectors[i].DIALECT.get():\n connector_types.insert(0, connector_type)\n elif connector_type.get('interface') == config_connectors[i].INTERFACE.get():\n connector_types.append(connector_type)\n\n if not connector_types:\n LOG.warn('Skipping connector %s as connector dialect %s or interface %s are not installed' % (\n i, config_connectors[i].DIALECT.get(), config_connectors[i].INTERFACE.get()\n )\n )\n else:\n connector_type = connector_types[0]\n connector = {\n 'nice_name': config_connectors[i].NICE_NAME.get() or i,\n 'name': i,\n 'dialect': config_connectors[i].DIALECT.get(),\n 'interface': config_connectors[i].INTERFACE.get() or connector_type.get('interface'),\n 'settings': config_connectors[i].SETTINGS.get(),\n 'id': CONNECTOR_IDS,\n 'category': connector_type['category'],\n 'description': connector_type['description'],\n 'dialect_properties': connector_type.get('properties', {})\n }\n CONNECTOR_INSTANCES.append(connector)\n CONNECTOR_IDS += 1\n\n connectors = CONNECTOR_INSTANCES\n\n if categories is not None:\n connectors = [connector for connector in connectors if connector['category'] in categories]\n if category is not None:\n connectors = [connector for connector in connectors if connector['category'] == category]\n if dialect is not None:\n connectors = [connector for connector in connectors if connector['dialect'] == dialect]\n if interface is not None:\n connectors = [connector for connector in connectors if connector['interface'] == interface]\n if user is not None:\n allowed_connectors = user.get_permissions().values_list('app', flat=True)\n connectors = [connector for connector in connectors if connector['name'] in allowed_connectors]\n\n return connectors\n\n\ndef _get_connector_by_id(id):\n global CONNECTOR_INSTANCES\n\n instance = [connector for connector in CONNECTOR_INSTANCES if connector['id'] == id]\n\n if instance:\n return instance[0]\n else:\n raise PopupException(_('No connector with the id %s found.') % id)\n","sub_path":"desktop/core/src/desktop/lib/connectors/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"171024753","text":"#!/bin/python3\nimport http.server\nimport socketserver\nfrom urllib.parse import urlparse\nfrom urllib.parse import parse_qs\nimport json\nimport datetime\nimport time\nimport config\nimport scheduler\nimport heat_control\nfrom gpiozero import OutputDevice\n\nTIME_FORMAT = \"%H:%M\"\nLIGHT_GPIO = 27\nPUMP_GPIO = 22\n\nlight = OutputDevice(LIGHT_GPIO)\npump = OutputDevice(PUMP_GPIO)\n\napp_config = config.GrowBoxConfig()\nheater = heat_control.HeatControl(app_config.get_day_temperature(),\n app_config.get_night_temperature())\nw_sched_list = list()\nday_scheduler = scheduler.EventScheduler()\nnight_scheduler = scheduler.EventScheduler()\n\npump_status = False\n\nclass MyHttpRequestHandler(http.server.SimpleHTTPRequestHandler):\n def do_GET(self):\n # Sending an '200 OK' response\n self.send_response(200)\n # Setting the header\n self.send_header(\"Content-type\", \"text/html\")\n # Whenever using 'send_header', you also have to call 'end_headers'\n self.end_headers()\n \n config_changed = False\n\n # Extract query param\n query_components = parse_qs(urlparse(self.path).query)\n if query_components:\n html = json.dumps(query_components)\n # Parse parameters\n \n # Mode (AUTO|FORCED_DAY|FORCED_NIGHT)\n if (config.KEY_MODE in query_components):\n mode = query_components[config.KEY_MODE][0]\n app_config.set_mode(mode)\n config_changed = True\n\n # Day time\n if (config.KEY_DAY_TIME in query_components):\n day_time = query_components[config.KEY_DAY_TIME][0]\n app_config.set_day_time(test_time(day_time))\n config_changed = True\n \n # Night time\n if (config.KEY_NIGHT_TIME in query_components):\n night_time = query_components[config.KEY_NIGHT_TIME][0]\n app_config.set_night_time(test_time(night_time))\n config_changed = True\n \n # Day temperature\n if (config.KEY_DAY_TEMP in query_components):\n day_temp = query_components[config.KEY_DAY_TEMP][0]\n app_config.set_day_temperature(float(day_temp))\n config_changed = True\n \n # Night temperature\n if (config.KEY_NIGHT_TEMP in query_components):\n night_temp = query_components[config.KEY_NIGHT_TEMP][0]\n app_config.set_night_temperature(float(night_temp))\n config_changed = True\n \n # Waterings\n if (config.KEY_ADD_WATERING in query_components):\n watering_list = query_components[config.KEY_ADD_WATERING]\n for watering in watering_list:\n app_config.add_watering(watering)\n config_changed = True\n \n if (config.KEY_DEL_WATERING in query_components):\n watering_index = query_components[config.KEY_DEL_WATERING][0]\n app_config.del_watering(watering_index)\n config_changed = True\n \n if config_changed:\n initialize()\n\n else:\n # Report current parameters here\n html = \"Parameters:
    \"\n html += \"Mode: \" + app_config.get_mode() + \"
    \"\n html += \"Day time: \" + str(app_config.get_day_time()) + \"
    \"\n html += \"Night time: \" + str(app_config.get_night_time()) + \"
    \"\n html += \"Day temperature: \" + str(app_config.get_day_temperature()) + \"
    \"\n html += \"Night temperature: \" + str(app_config.get_night_temperature()) + \"
    \"\n html += \"Waterings:
    \"\n if app_config.get_waterings():\n w_idx = 0\n for watering in app_config.get_waterings():\n html += \"Watering \" + str(w_idx) + \": \" + str(watering) + \"
    \"\n w_idx += 1\n html += \"Pump status: \" + str(pump_status) + \"
    \"\n html += \"Temperature: \" + str(heater.get_temperature()) + \"
    \"\n \n self.wfile.write(bytes(html, \"utf8\"))\n return\n\nclass SimpleServer(socketserver.ThreadingMixIn, http.server.HTTPServer):\n allow_reuse_address = True\n daemon_threads = True\n\ndef test_time(input):\n input = input.replace(\"\\\"\", \"\")\n try:\n time.strptime(input, TIME_FORMAT)\n return input\n except ValueError:\n print(\"WARNING: Invalid time\")\n return None\n\ndef trigger_watering(duration):\n print(\"INFO: Trigger watering for\", duration, \"seconds...\")\n pump.on()\n pump_status = True\n time.sleep(duration)\n pump.off()\n pump_status = False\n print(\"INFO: End of watering\")\n\ndef trigger_day_mode():\n print(\"INFO: Trigger day mode at\",\n datetime.datetime.now().strftime(\"%H:%M:%S\"))\n heater.set_day_mode()\n light.on()\n\ndef trigger_night_mode():\n print(\"INFO: Trigger night mode\",\n datetime.datetime.now().strftime(\"%H:%M:%S\"))\n heater.set_night_mode()\n light.off()\n\ndef initialize():\n # Apply current mode from configuration\n if app_config.get_mode() == config.MODE_FORCE_DAY:\n print(\"INFO: Day mode forced\")\n trigger_day_mode()\n elif app_config.get_mode() == config.MODE_FORCE_NIGHT:\n print(\"INFO: Night mode forced\")\n trigger_night_mode()\n elif app_config.get_mode() == config.MODE_AUTO:\n print(\"INFO: Mode auto\")\n if app_config.get_day_time and app_config.get_night_time: \n print(\"INFO: Configure scheduler from configuration\")\n day_scheduler.cancel_scheduler()\n night_scheduler.cancel_scheduler()\n config_day_str = str(app_config.get_day_time())\n config_night_str = str(app_config.get_night_time())\n day_scheduler.set_scheduler(config_day_str, trigger_day_mode)\n night_scheduler.set_scheduler(config_night_str, trigger_night_mode)\n \n # Determine if it's day or night\n current_time_str = str(datetime.datetime.now().hour) + \\\n \":\" + \\\n str(datetime.datetime.now().minute)\n if time.strptime(current_time_str, TIME_FORMAT) < time.strptime(config_night_str, TIME_FORMAT):\n trigger_day_mode()\n else:\n trigger_night_mode()\n else:\n print(\"WARNING: No valid day and night times, force day mode\")\n trigger_day_mode()\n\n # Retreiving waterings from configuration\n if app_config.get_waterings():\n # Cancel schedulers if exists\n if len(w_sched_list):\n for w_sched in w_sched_list:\n w_sched.cancel_scheduler()\n w_sched_list.clear()\n\n for watering in app_config.get_waterings():\n watering_tuple = tuple(eval(watering)) \n watering_sched = scheduler.EventScheduler()\n watering_sched.set_scheduler(watering_tuple, trigger_watering, watering_tuple[scheduler.DURATION])\n w_sched_list.append(watering_sched)\n\n # Heater\n heater.set_day_temperature(app_config.get_day_temperature())\n heater.set_night_temperature(app_config.get_night_temperature())\n \n\nif __name__ == \"__main__\": \n initialize()\n address = ('', 8000)\n my_server = SimpleServer(address, MyHttpRequestHandler)\n my_server.serve_forever()","sub_path":"growbox.py","file_name":"growbox.py","file_ext":"py","file_size_in_byte":7566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"255079562","text":"import numpy as np\nfrom scipy import linalg\n\ndef pca(A):\n # function [W_norm ,eigenvalue ,mean] = pca(A)\n # computes PCA of matrix A\n # A: D by N data matrix. Each column is a random vector\n # W_norm: D by K matrix whose columns are the principal components in decreasing order\n # eigenvalue: eigenvalues\n # mean: mean of columns of A\n\n (d, n) = np.shape(A) # n: pixel #, m: sample #\n\n # mean-normalize\n mean = np.mean(A, axis=1).reshape([d,1])\n A_norm = A - mean # d x n\n\n # inner-product\n ATA = np.dot(A_norm.T, A_norm) # n x n\n [eigen_value, eigen_vector] = linalg.eig(ATA) # [1 x n, n x n]\n\n # order eigenvectors\n order_index = np.argsort(eigen_value)\n order_index = order_index[::-1]\n eigen_value = eigen_value[order_index]\n eigen_vector = eigen_vector[:, order_index]\n\n # actual eigenvector\n W = np.dot(A_norm, eigen_vector) # d x n\n W_norm = W / linalg.norm(W, axis=0) # normalization\n\n '''\n # choose 90% eigen vector\n pdf = eigen_value / np.sum(eigen_value)\n temp, k = 0.0, 0\n for k, v in enumerate(pdf):\n temp += v\n if temp > 0.9:\n break\n\n print('Choose %d eigen vectors. '% (k))\n eigen_value = eigen_value[:k]\n W_norm = W_norm[:, :k]\n '''\n return W_norm, eigen_value, mean\n\n\n'''\nx = np.array([\n [1,2,3,4,5],\n [2,3,4,1,5],\n [1,4,2,3,5],\n [3,5,1,2,4],\n [3,2,1,4,5]\n])\nprint(pca(x))\n'''","sub_path":"feature_extraction/PCA/PCA.py","file_name":"PCA.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"65319376","text":"#! /usr/bin/env python3\nfrom argparse import ArgumentParser, FileType\nimport json\nimport datetime\nimport os\nfrom uuid import UUID\n\nimport googleapiclient\nfrom googleapiclient.discovery import build\nfrom google.oauth2 import service_account\nfrom google.auth.transport.requests import Request\n\nimport requests\n\nsilent = False\n\nclass UserList:\n def __init__(self):\n self.users = []\n self.index = 0\n \n def add(self, user, index=None):\n \"\"\"Add a User to the list\n\n :param user (User) The User object to add\n \"\"\"\n self.users.append((index or self.index, user))\n self.index += 1\n \n def search(self, key, value):\n \"\"\"Search for a user in the list given a variable to index on and the value to find\n \n :param key (string) The variable to index on\n :param value (string) The value to find\n :return (tuple) A tuple containing the index of the found item (or -1 if nothing was found), and the found object or None\n \"\"\"\n for i, user in self.users:\n if user is not None and key == \"email\" and user.email == value or key == \"username\" and user.username == value or key == \"uuid\" and user.uuid == value:\n return (i, user)\n\n return (-1, None)\n\n @staticmethod\n def fromGoogleSheet(sheet):\n \"\"\"Creates a userlist from a given list of rows from a Google Sheet\n\n :param sheet (list) The Google Sheet containing rows of data in the format described later in this file\n :returns (UserList) A UserList containing all users retrieved from the sheet\n \"\"\"\n users = UserList()\n\n for i, row in enumerate(sheet.rows):\n if row is not None:\n users.add(User(email=row[\"email\"], username=row[\"username\"], uuid=UUID(row[\"uuid\"])), index=i)\n\n return users\n\nclass User:\n def __init__(self, email=None, username=None, uuid=None):\n self.email = email\n self.username = username\n self.uuid = uuid\n\n def toTuple(self):\n \"\"\"Returns a tuple representation of the User\n\n :return (tuple) The tuple representation, in the format (email, username, uuid)\n \"\"\"\n return ( self.email, self.username, str(self.uuid) )\n\nclass GoogleSheet:\n def __init__(self, service, sheet_id, sheet_name, cell_range, columns):\n self.service = service\n self.sheet_id = sheet_id\n self.sheet_name = sheet_name\n self.range_start, self.range_end = cell_range\n self.columns = columns\n\n self.fetch()\n\n def fetch(self):\n \"\"\"Fetch rows from the Google Sheet given the preconfigured specifications\n\n :returns (GoogleSheet) After fetching, returns self\n \"\"\"\n cell_range = f\"{self.sheet_name}!{self.range_start}2:{self.range_end}\"\n request = self.service.values().get(spreadsheetId=self.sheet_id, range=cell_range).execute()\n\n # Map all values to their columns within each row\n rows = []\n for cells in request.get(\"values\", []):\n row = {}\n for i in range(min(len(cells), len(self.columns))):\n row[self.columns[i]] = cells[i]\n\n if row == {}:\n row = None\n\n rows.append(row)\n \n self.rows = rows\n\n return self\n\n def append(self, row):\n \"\"\"Append a row to the sheet\n\n :param row (list) An array representing a row to add to the sheet\n \"\"\"\n cell_range = f\"{self.sheet_name}!{self.range_start}:{self.range_end}\"\n body = {\n \"values\": row\n }\n self.service.values().append(spreadsheetId=self.sheet_id, valueInputOption=\"USER_ENTERED\", range=cell_range, body=body).execute()\n\n def delete(self, row_number):\n \"\"\"Delete a row from the sheet\n\n :param row_number (int) The number of the row to delete (0-indexed)\n \"\"\"\n row_number += 1 # Converts to 1-indexed\n\n cell_range = f\"{self.sheet_name}!{self.range_start}{row_number}:{self.range_end}{row_number}\"\n self.service.values().clear(spreadsheetId=self.sheet_id, range=cell_range).execute()\n\nclass GoogleSheets:\n def __init__(self, sheet_id):\n self.sheet_id = sheet_id\n self.sheets = {}\n\n def login(self, credential_file):\n \"\"\"Logs into the Google service account with the Google Sheets scope\n\n :param credential_file (string) The location of the service account credentials file\n \"\"\"\n try:\n log(\"🔑 Attempting to log in with service account credentials from credentials.json\")\n creds = service_account.Credentials.from_service_account_file(credential_file, scopes=[\n \"https://www.googleapis.com/auth/spreadsheets\"\n ])\n except:\n raise IOError(\"🔒 Failed to log in, did you specify a valid credentials.json file?\")\n \n # Set up a connection to the spreadsheet\n service = build(\"sheets\", \"v4\", credentials=creds)\n self.service = service.spreadsheets()\n\n return creds\n\n def store_sheet(self, internal_name, sheet_name, cell_range, columns):\n \"\"\"Creates, fetches, and stores a GoogleSheet\n\n :param internal_name (string) The internal name to store the sheet under\n :param sheet_name (string) The identifier for the specific sheet in Google Sheets\n :param cell_range (tuple) The start and end columns for the table\n :param columns (list) A list of column headers to be mapped to each row's columns\n :return (GoogleSheet) The created GoogleSheet object\n \"\"\"\n sheet = GoogleSheet(self.service, self.sheet_id, sheet_name, cell_range, columns)\n\n self.sheets[internal_name] = sheet\n return sheet\n\ndef log(message):\n \"\"\"Log the given message if the verbosity is high enough\n\n :param message (string) The message to print\n \"\"\"\n if not silent:\n print(message)\n\ndef sync(local, gsheets):\n # Explanation of this madness:\n # Local banlist takes precedence over remote banlist (banning is performed via /ban)\n # Remote banlist takes precedence over remote whitelist (bans propagate to the whitelist)\n # Remote whitelist takes precedence over local whitelist (whitelisting should not be performed by /whitelist add, only through form)\n\n # The following logic is how to merge all sources of information:\n # Get the local banlist\n # Get the remote banlist\n # Get the remote whitelist\n # For entries that are not on the remote banlist, look up any emails for the given username\n # on the remote whitelist\n # Remove the entries from the remote whitelist, and add them to the remote banlist\n # Fetch an updated remote banlist\n # Get the remote requests\n # Check them against the remote banlist\n # If they are banned, ignore the request\n # Otherwise, add the user to the remote whitelist\n # Fetch the remote whitelist and use it to update the local whitelist\n \n # Extract the local file handles\n banlist_file, whitelist_file = local\n\n log(f\"📂 Parsing local banlist from {banlist_file.name}\")\n\n # Get the local banlist\n local_banlist = UserList()\n for ban in json.loads(banlist_file.read()):\n local_banlist.add(User(username=ban[\"name\"], uuid=UUID(ban[\"uuid\"])))\n\n log(f\"📊 Parsing remote banlist from sheet \\\"{gsheets.sheets['banlist'].sheet_name}\\\"\")\n\n # Get the remote banlist\n remote_banlist = UserList.fromGoogleSheet(gsheets.sheets[\"banlist\"])\n\n log(f\"📊 Parsing remote whitelist from sheet \\\"{gsheets.sheets['whitelist'].sheet_name}\\\"\")\n\n # Get the remote whitelist\n remote_whitelist = UserList.fromGoogleSheet(gsheets.sheets[\"whitelist\"])\n\n log(\"🔨 Resolving missing local ban data\")\n\n # For entries that are not on the remote banlist, look up any emails for the given username\n # on the remote whitelist\n for _, ban in local_banlist.users:\n row_number, user = remote_whitelist.search(\"username\", ban.username)\n if row_number != -1:\n ban.email = user.email\n\n log(\"⏳ Processing pending bans\")\n\n # Remove the entries from the remote whitelist, and add them to the remote banlist\n # TODO Add expiration checks and store reasons\n banlist_additions = []\n for _, ban in local_banlist.users:\n if remote_banlist.search(\"uuid\", ban.uuid)[0] == -1:\n # Get the email for the given UUID\n row_number, reference_user = remote_whitelist.search(\"uuid\", ban.uuid)\n\n # Ban all accounts added by a user\n if row_number != -1:\n while(True):\n row_number, user = remote_whitelist.search(\"email\", reference_user.email)\n\n if row_number != -1:\n gsheets.sheets[\"whitelist\"].delete(row_number + 1)\n\n # Append the entry to the remote banlist\n banlist_additions.append(user.toTuple())\n\n # Update the sheet so we don't keep getting the same entry over and over again when we search\n remote_whitelist = UserList.fromGoogleSheet(gsheets.sheets[\"whitelist\"].fetch())\n else:\n break\n\n if len(banlist_additions) > 0:\n gsheets.sheets[\"banlist\"].append(banlist_additions)\n\n log(f\"📊 Parsing updated remote banlist from sheet \\\"{gsheets.sheets['banlist'].sheet_name}\\\"\")\n\n # Fetch an updated remote banlist\n remote_banlist = UserList.fromGoogleSheet(gsheets.sheets[\"banlist\"].fetch())\n\n log(f\"⏳ Processing new whitelist requests from sheet \\\"{gsheets.sheets['requests'].sheet_name}\\\"\")\n\n # Get the remote requests\n whitelist_additions = []\n for request in gsheets.sheets[\"requests\"].rows:\n if remote_banlist.search(\"username\", request[\"username\"])[0] == -1 and remote_whitelist.search(\"username\", request[\"username\"])[0] == -1:\n # Resolve the UUID using the Minecraft API\n response = requests.get(f\"https://api.mojang.com/users/profiles/minecraft/{request['username']}\")\n\n if response.status_code == 200:\n body = response.json()\n\n # Add the user to the remote whitelist\n user = User(email=request[\"email\"], username=request[\"username\"], uuid=UUID(body[\"id\"]))\n whitelist_additions.append(user.toTuple())\n elif response.status_code >= 500:\n log(\"❗❗ Mojang API error\")\n\n if len(whitelist_additions) > 0:\n gsheets.sheets[\"whitelist\"].append(whitelist_additions)\n \n log(f\"📊 Parsing updated remote whitelist from sheet \\\"{gsheets.sheets['whitelist'].sheet_name}\\\"\")\n\n # Fetch the updated remote whitelist and use it to update the local whitelist\n remote_whitelist = UserList.fromGoogleSheet(gsheets.sheets[\"whitelist\"].fetch())\n\n log(\"💾 Saving whitelist\")\n\n temp_whitelist = []\n for _, user in remote_whitelist.users:\n _, username, user_id = user.toTuple()\n temp_whitelist.append({ \"uuid\": user_id, \"name\": username })\n\n json.dump(temp_whitelist, whitelist_file, indent=2)\n\n log(\"✅ Sync completed successfully\")\n\ndef __main__():\n parser = ArgumentParser(\n description=\"Syncs the whitelist with an external Google sheet\",\n prog=\"whitelist\",\n epilog=\"In order to connect to the remote sheet, a credentials.json file needs to be in the working directory or specified by the --credentials flag\")\n\n # Command line arguments\n parser.add_argument(\"sheet_id\", help=\"The ID of the Google sheet containing the whitelisted users\", type=str)\n parser.add_argument(\"-d\", \"--minecraft-folder\", help=\"The path to the Minecraft server folder, where the whitelist and banned players files are stored\", required=True, type=str)\n parser.add_argument(\"-c\", \"--credentials\", help=\"The path to the Google Service Account credentials file\", default=\"credentials.json\", type=str)\n parser.add_argument(\"-w\", \"--whitelist\", help=\"The path to the whitelist.json file, relative to the Minecraft server folder\", default=\"whitelist.json\", type=str)\n parser.add_argument(\"-b\", \"--banlist\", help=\"The path to the banned-players.json file, relative to the Minecraft server folder\", default=\"banned-players.json\", type=str)\n parser.add_argument(\"--forms-sheet\", help=\"The name of the form responses sheet in the spreadsheet\", default=\"Whitelist Form Responses\", type=str)\n parser.add_argument(\"--whitelist-sheet\", help=\"The name of the whitelist sheet in the spreadsheet\", default=\"Whitelist\", type=str)\n parser.add_argument(\"--banlist-sheet\", help=\"The name of the ban list sheet in the spreadsheet\", default=\"Ban List\", type=str)\n parser.add_argument(\"-s\", \"--silent\", help=\"Suppress script output\", action=\"store_true\")\n\n args = parser.parse_args()\n\n # Set program verbosity\n global silent\n silent = args.silent\n\n # Login to the service account\n gsheets = GoogleSheets(args.sheet_id)\n gsheets.login(args.credentials)\n\n # Data format of each source:\n # Local whitelist:\n # | UUID | Username |\n # Local banlist:\n # | UUID | Username | Reason |\n # Form Response sheet:\n # | A | B | C |\n # | Timestamp | Email Address | Username |\n # Remote whitelist:\n # | A | B | C |\n # | Email Address | Username | UUID |\n # Remote banlist:\n # | A | B | C |\n # | Email address | Username | UUID |\n\n # Fetch the needed sheets\n gsheets.store_sheet(\"requests\", args.forms_sheet, (\"B\", \"C\"), [ \"email\", \"username\" ])\n gsheets.store_sheet(\"whitelist\", args.whitelist_sheet, (\"A\", \"C\"), [ \"email\", \"username\", \"uuid\" ])\n gsheets.store_sheet(\"banlist\", args.banlist_sheet, (\"A\", \"D\"), [ \"email\", \"username\", \"uuid\" ])\n\n # Sync the whitelist\n banlist_file = open(os.path.join(args.minecraft_folder, args.banlist), \"r\")\n whitelist_file = open(os.path.join(args.minecraft_folder, args.whitelist), \"w+\")\n\n sync((banlist_file, whitelist_file), gsheets)\n\n banlist_file.close()\n whitelist_file.close()\n\n__main__()\n","sub_path":"whitelist.py","file_name":"whitelist.py","file_ext":"py","file_size_in_byte":14163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"451602152","text":"\"\"\"empty message\n\nRevision ID: a275a5f60dc5\nRevises: ab909e276fd0\nCreate Date: 2019-05-06 11:15:50.017922\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a275a5f60dc5'\ndown_revision = 'ab909e276fd0'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('perfil', sa.Column('is_especialista', sa.Boolean(), nullable=True))\n op.drop_column('perfil', 'is_especilista')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('perfil', sa.Column('is_especilista', sa.BOOLEAN(), autoincrement=False, nullable=True))\n op.drop_column('perfil', 'is_especialista')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/a275a5f60dc5_.py","file_name":"a275a5f60dc5_.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"281932539","text":"# Unless explicitly stated otherwise all files in this repository are licensed\n# under the Apache License 2.0.\n# This product includes software developed at Datadog (https://www.datadoghq.com/).\n# Copyright 2018 Datadog, Inc.\n\nimport numpy as np\n\n\nDEFAULT_EPS = 0.01\n\n\nclass UnequalEpsilonException(Exception):\n pass\n\n\nclass Entry(object):\n\n def __init__(self, val, g, delta):\n self.val = val\n self.g = g\n self.delta = delta\n\n def __repr__(self):\n return 'Entry(val={}, g={}, delta={})'.format(self.val, self.g, self.delta)\n\n\nclass GKArray(object):\n\n def __init__(self, eps=None):\n if eps is None or eps <= 0 or eps >= 1:\n self.eps = DEFAULT_EPS\n else:\n self.eps = eps\n self.entries = []\n self.incoming = []\n self._min = float('+inf')\n self._max = float('-inf')\n self._count = 0\n self._sum = 0\n\n def __repr__(self):\n return \"entries: {}, incoming: {}, count: {}, min: {}, max: {}, sum: {}\\n\".format(\n self.entries, self.incoming, self._count, self._min, self._max, self._sum)\n\n @property\n def name(self):\n return 'GKArray'\n\n @property\n def num_values(self):\n return self._count\n\n @property\n def avg(self):\n return float(self._sum)/self._count\n\n @property\n def sum(self):\n return self._sum\n\n def size(self):\n if len(self.incoming) > 0:\n self.merge_compress()\n return len(self.entries)\n\n def add(self, val):\n \"\"\" Add a value to the sketch.\n \"\"\"\n self.incoming.append(val)\n self._count += 1\n self._sum += val\n if val < self._min:\n self._min = val\n if val > self._max:\n self._max = val\n if self._count % (int(1.0/self.eps) + 1) == 0:\n self.merge_compress()\n\n def merge_compress(self, entries=[]):\n \"\"\" Merge the given entry list into self.entries as well as compressing any values in\n self.incoming buffer.\n\n Parameters:\n entries: list of Entry\n \"\"\"\n removal_threshold = np.floor(2.0*self.eps*(self._count - 1))\n incoming = [Entry(val, 1, 0) for val in self.incoming] + [Entry(e.val, e.g, e.delta) for e in entries]\n incoming = sorted(incoming, key=lambda x: x.val)\n\n merged = []\n i, j = 0, 0\n while i < len(incoming) or j < len(self.entries):\n if i == len(incoming):\n # done with incoming; now only considering entries\n if j + 1 < len(self.entries) and\\\n self.entries[j].g + self.entries[j+1].g + self.entries[j+1].delta <= removal_threshold:\n self.entries[j+1].g += self.entries[j].g\n else:\n merged.append(self.entries[j])\n j += 1\n elif j == len(self.entries):\n # done with entries; now only considering incoming\n if i+1 < len(incoming) and\\\n incoming[i].g + incoming[i+1].g + incoming[i+1].delta <= removal_threshold:\n incoming[i+1].g += incoming[i].g\n else:\n merged.append(incoming[i])\n i += 1\n elif incoming[i].val < self.entries[j].val:\n if incoming[i].g + self.entries[j].g + self.entries[j].delta <= removal_threshold:\n self.entries[j].g += incoming[i].g\n else:\n incoming[i].delta = self.entries[j].g + self.entries[j].delta - incoming[i].g\n merged.append(incoming[i])\n i += 1\n else:\n if j + 1 < len(self.entries) and\\\n self.entries[j].g + self.entries[j+1].g + self.entries[j+1].delta <= removal_threshold:\n self.entries[j+1].g += self.entries[j].g\n else:\n merged.append(self.entries[j])\n j += 1\n\n self.entries = merged\n self.incoming = []\n\n def merge(self, sketch):\n \"\"\" Merge another GKArray into the current. The two sketches should have the same\n epsilon value.\n\n Parameters:\n other: GKArray\n \"\"\"\n if self.eps != sketch.eps:\n raise UnequalEpsilonException(\"Cannot merge two GKArrays with different epsilon values\")\n\n if sketch._count == 0:\n return\n\n if self._count == 0:\n self.entries = [Entry(e.val, e.g, e.delta) for e in sketch.entries]\n self.incoming = sketch.incoming[:]\n self._min = sketch._min\n self._max = sketch._max\n self._count = sketch._count\n self._sum = sketch._sum\n return\n\n entries = []\n spread = int(sketch.eps*(sketch._count - 1))\n sketch.merge_compress()\n g = sketch.entries[0].g + sketch.entries[0].delta - spread - 1\n if g > 0:\n entries.append(Entry(sketch._min, g, 0))\n for i in range(len(sketch.entries)-1):\n g = sketch.entries[i+1].g + sketch.entries[i+1].delta - sketch.entries[i].delta\n if g > 0:\n entries.append(Entry(sketch.entries[i].val, g, 0))\n g = spread + 1 - sketch.entries[len(sketch.entries) - 1].delta\n if g > 0:\n entries.append(Entry(sketch.entries[len(sketch.entries) - 1].val, g, 0))\n\n self._count += sketch._count\n self._sum += sketch._sum\n self._min = min(self._min, sketch._min)\n self._max = max(self._max, sketch._max)\n\n self.merge_compress(entries)\n\n def quantile(self, q):\n \"\"\" Return an epsilon-approximate element at quantile q.\n\n Parameters:\n q: quantile to query for\n 0 <= q <= 1\n \"\"\"\n if q < 0 or q > 1 or self._count == 0:\n return np.nan\n\n if len(self.incoming) > 0:\n self.merge_compress()\n\n rank = int(q*(self._count - 1) + 1)\n spread = int(self.eps*(self._count - 1))\n g_sum = 0.0\n i = 0\n while i < len(self.entries):\n g_sum += self.entries[i].g\n if g_sum + self.entries[i].delta > rank + spread:\n break\n i += 1\n if i == 0:\n return self._min\n\n return self.entries[i-1].val\n","sub_path":"gkarray/gkarray.py","file_name":"gkarray.py","file_ext":"py","file_size_in_byte":6333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"323399285","text":"##!/usr/bin/python\n\nimport numpy as np\nimport pylab as plt\nimport matplotlib as mpl\n#import seaborn as sns\n\n#sns.set_context(\"poster\")\n\nmpl.rcParams['lines.linewidth'] = 2\n\ndata = np.genfromtxt(fname='cor.dat') \n\nncols = data.shape[1]\n\n#for x in range(1,ncols):\nplt.plot(data[:,0],data[:,1],linewidth=2,label='$\\Re(C_{xx})$')\nplt.plot(data[:,0],data[:,2],linewidth=2,label='$\\Im(C_{xx})$')\nplt.plot(data[:,0],data[:,3],linewidth=2,label='$|C_{xx}|$')\n\n \n\n#plt.figure(1) \n#plt.plot(x,y1,'-')\n#plt.plot(x,y2,'g-')\n#pl.ylim(0,1)\nplt.legend(loc=2)\nplt.xlabel('Time [a.u.]')\nplt.ylabel('Positions')\nplt.savefig('traj.pdf')\nplt.show() \n\n","sub_path":"cor.py","file_name":"cor.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"615342599","text":"import json\nimport math\nimport time\nfrom flask import Flask\nfrom flask import request\nfrom flask import render_template\nfrom trumppetserver import TweetStorage\nfrom trumppetserver import TweetAnalyzer\n\n_app = Flask(__name__)\n_storage = TweetStorage()\n_analyzer = TweetAnalyzer()\n\n_PLAYBACK_PER_PAGE = 20\n_FREQUENCY_PER_PAGE = 100\n\n\n@_app.route(\"/\", methods=[\"GET\"])\ndef index():\n start_date = _storage.get_oldest_tweet()['created_at']\n ts = time.strftime('%B %d, %Y', time.strptime(\n start_date, '%a %b %d %H:%M:%S +0000 %Y'))\n return render_template('index.html', num_tweets=_storage.get_num_tweets(), start_date=ts)\n\n\n@_app.route(\"/playback\", methods=[\"GET\", \"POST\"])\ndef playback():\n page = request.args.get('page') or '1'\n num_tweets = _storage.get_num_tweets()\n num_pages = math.ceil(num_tweets / _PLAYBACK_PER_PAGE)\n start, end = _get_paginated_range(\n page, num_pages, num_tweets, _PLAYBACK_PER_PAGE)\n\n tweets = _storage.get_range_of_tweets(start, end)\n return render_template('playback.html', tweets=tweets, page=int(page), num_pages=num_pages, start=start, end=end, num_tweets=num_tweets, screen_name=_storage.get_screen_name())\n\n\n@_app.route(\"/frequency\", methods=[\"GET\"])\ndef frequency():\n page = request.args.get('page') or '1'\n num_unique_words = _analyzer.get_num_unique_words()\n num_pages = math.ceil(num_unique_words / _FREQUENCY_PER_PAGE)\n start, end = _get_paginated_range(\n page, num_pages, num_unique_words, _FREQUENCY_PER_PAGE)\n\n word_on_page = _analyzer.get_range_of_word_freqs(start, end)\n return render_template('frequency.html', words=word_on_page, page=int(page), num_pages=num_pages, start=start, end=end, num_unique_words=num_unique_words)\n\n\n@_app.route(\"/search\", methods=[\"GET\", \"POST\"])\ndef search():\n phrase = None\n results = None\n\n if request.method == 'POST':\n data = request.form\n phrase = data['phrase'].strip()\n results = list(reversed(list(_analyzer.search_tweets(phrase))))\n\n return render_template('search.html', phrase=phrase, results=results, screen_name=_storage.get_screen_name())\n\n\n@_app.route(\"/freestyle\", methods=[\"GET\"])\ndef freestyle():\n trumpian_tweet, original_tweets = _analyzer.generate_trumpian_tweet()\n return render_template('freestyle.html', trumpian_tweet=trumpian_tweet, original_tweets=original_tweets, screen_name=_storage.get_screen_name())\n\n\ndef _get_paginated_range(page, num_pages, num_items, per_page):\n if page and page.isnumeric():\n page = int(page)\n if page > num_pages:\n page = num_pages\n elif page < 1:\n page = 1\n else:\n page = 1\n\n start = (page - 1) * per_page\n end = start + per_page\n if end > num_items:\n end = num_items\n\n return start, end\n\n\nif __name__ == \"__main__\":\n _app.run(host=\"localhost\", port=5001, debug=True)\n","sub_path":"trumppetweb/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"268978805","text":"#!/usr/bin/env python3\n\nimport math\n\nfrom fairseq import utils\nfrom fairseq.criterions import register_criterion\nfrom fairseq.criterions.label_smoothed_cross_entropy import (\n LabelSmoothedCrossEntropyCriterion,\n)\n\n\n@register_criterion(\"word_prediction\")\nclass WordPredictionCriterion(LabelSmoothedCrossEntropyCriterion):\n \"\"\"\n Implement a combined loss from translation and target words prediction.\n \"\"\"\n\n def __init__(self, args, task):\n super().__init__(args, task)\n self.eps = args.label_smoothing\n\n def forward(self, model, sample, reduce=True):\n \"\"\"Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) total loss, as a Variable\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n \"\"\"\n predictor_output, decoder_output = model(**sample[\"net_input\"])\n # translation loss\n translation_loss, nll_loss = super().compute_loss(\n model, decoder_output, sample, reduce\n )\n prediction_target = model.get_target_words(sample)\n # predictor loss\n prediction_lprobs = model.get_predictor_normalized_probs(\n predictor_output, log_probs=True\n )\n prediction_lprobs = prediction_lprobs.view(-1, prediction_lprobs.size(-1))\n # prevent domination of padding idx\n non_pad_mask = prediction_target.ne(model.encoder.padding_idx)\n\n assert prediction_lprobs.size(0) == prediction_target.size(0)\n assert prediction_lprobs.dim() == 2\n word_prediction_loss = -prediction_lprobs.gather(\n dim=-1, index=prediction_target\n )[non_pad_mask]\n # TODO: normalize , sentence avg\n if reduce:\n word_prediction_loss = word_prediction_loss.sum()\n else:\n word_prediction_loss = word_prediction_loss.sum(1) # loss per batch element\n\n assert translation_loss.size() == word_prediction_loss.size()\n loss = translation_loss + word_prediction_loss\n\n if self.args.sentence_avg:\n sample_size = sample[\"target\"].size(0)\n else:\n sample_size = sample[\"ntokens\"]\n\n logging_output = {\n \"nll_loss\": nll_loss,\n \"translation_loss\": translation_loss.data,\n \"word_prediction_loss\": word_prediction_loss.data,\n \"ntokens\": sample[\"ntokens\"],\n \"nsentences\": sample[\"target\"].size(0),\n \"sample_size\": sample_size,\n }\n\n if reduce:\n logging_output[\"translation_loss\"] = utils.item(\n logging_output[\"translation_loss\"]\n )\n logging_output[\"word_prediction_loss\"] = utils.item(\n logging_output[\"word_prediction_loss\"]\n )\n logging_output[\"nll_loss\"] = utils.item(logging_output[\"nll_loss\"])\n logging_output[\"loss\"] = utils.item(logging_output[\"translation_loss\"])\n\n return loss, sample_size, logging_output\n\n @staticmethod\n def aggregate_logging_outputs(logging_outputs):\n \"\"\"Aggregate logging outputs from data parallel training.\"\"\"\n ntokens = sum(log.get(\"ntokens\", 0) for log in logging_outputs)\n nsentences = sum(log.get(\"nsentences\", 0) for log in logging_outputs)\n sample_size = sum(log.get(\"sample_size\", 0) for log in logging_outputs)\n agg_output = {\n \"ntokens\": ntokens,\n \"nsentences\": nsentences,\n \"sample_size\": sample_size,\n }\n\n for loss in [\"translation_loss\", \"word_prediction_loss\"]:\n loss_sum = sum(log.get(loss, 0) for log in logging_outputs)\n\n agg_output[loss] = loss_sum / sample_size / math.log(2)\n if loss == \"translation_loss\" and sample_size != ntokens:\n agg_output[\"nll_loss\"] = loss_sum / ntokens / math.log(2)\n\n return agg_output\n","sub_path":"pytorch_translate/word_prediction/word_prediction_criterion.py","file_name":"word_prediction_criterion.py","file_ext":"py","file_size_in_byte":3923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"579073893","text":"# MIT License\n#\n# Copyright (c) 2022-2023, Alex M. Maldonado\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"Compute RDF curves under periodic boundary conditions.\"\"\"\n\nimport ray\nimport numpy as np\nfrom ..periodic import Cell\nfrom ..utils import gen_combs, chunk_iterable\nfrom ..logger import GDMLLogger\n\nlog = GDMLLogger(__name__)\n\n\n# Possible ray task.\ndef _bin_distances(R, R_idxs, atom_pairs, rdf_settings, cell_vectors):\n r\"\"\"Compute relevant RDF data for one or more structures.\n\n Parameters\n ----------\n R : :obj:`numpy.ndarray` or :obj:`numpy.memmap`, ndim: ``3``\n Atomic coordinates of one or more structures.\n R_idxs : :obj:`int` or :obj:`list`\n Indices of ``R`` to compute RDF contributions.\n atom_pairs : :obj:`numpy.ndarray`, ndim: ``2``\n Indices of all atom pairs to consider for each structure.\n rdf_settings : :obj:`dict`\n Keyword arguments for :func:`numpy.histogram` to bin distances.\n cell_vectors : :obj:`numpy.ndarray`\n The three periodic cell vectors. For example, a cube of length 16.0 would\n be ``[[16.0, 0.0, 0.0], [0.0, 16.0, 0.0], [0.0, 0.0, 16.0]]``.\n\n Returns\n -------\n :obj:`numpy.ndarray`\n Histogram count of distances.\n :obj:`float`\n Cumulative volume for this set of structures.\n :obj:`int`\n Number of structures computed here.\n \"\"\"\n R = R[R_idxs]\n if R.ndim == 2:\n R = R[None, ...]\n n_R = R.shape[0]\n\n cell = Cell(cell_vectors, cell_vectors[0][0] / 2, True)\n\n # Compute histogram of distances for structure(s)\n D = R[:, atom_pairs[:, 1]] - R[:, atom_pairs[:, 0]]\n new_shape = (np.prod(D.shape[:2]), 3)\n D = D.reshape(new_shape)\n D = cell.d_mic(D, check_cutoff=False) # Should check_cutoff be false?\n dists = np.linalg.norm(D, ord=2, axis=1).flatten()\n count, _ = np.histogram(dists, **rdf_settings)\n\n # Determine volume contribution.\n vol_contrib = n_R * cell.volume\n\n return count, vol_contrib, n_R\n\n\nclass RDF:\n r\"\"\"Handles calculating the radial distribution function (RDF),\n :math:`g(r)`, of a constant volume simulation.\n \"\"\"\n\n def __init__(\n self,\n Z,\n entity_ids,\n comp_ids,\n cell_vectors,\n bin_width=0.05,\n rdf_range=(0.0, 15.0),\n inter_only=True,\n use_ray=False,\n ray_address=\"auto\",\n n_workers=1,\n ):\n r\"\"\"\n Parameters\n ----------\n Z : :obj:`numpy.ndarray`, ndim: ``1``\n Atomic numbers of all atoms in the system.\n entity_ids : :obj:`numpy.ndarray`, ndim: ``1``\n Integers that specify which fragment each atom belongs to for all\n structures.\n comp_ids : :obj:`numpy.ndarray`, ndim: ``1``\n Labels for each ``entity_id`` used to determine the desired entity\n for RDF computations.\n cell_vectors : :obj:`numpy.ndarray`\n The three cell vectors.\n inter_only : :obj:`bool`, default: ``True``\n Only intermolecular distances are allowed. If ``True``, atoms that\n have the same ``entity_id`` are ignored.\n use_ray : :obj:`bool`, default: ``False``\n Use `ray `__ to parallelize\n computations.\n n_workers : :obj:`int`, default: ``1``\n Total number of workers available for ray. This is ignored if ``use_ray``\n is ``False``.\n ray_address : :obj:`str`, default: ``\"auto\"``\n Ray cluster address to connect to.\n \"\"\"\n # Store data\n self.Z = Z\n self.entity_ids = entity_ids\n self.comp_ids = comp_ids\n self.cell_vectors = cell_vectors\n self.bin_width = bin_width\n self.rdf_range = rdf_range\n self.inter_only = inter_only\n self.use_ray = use_ray\n self.n_workers = n_workers\n\n # Setup ray\n if use_ray:\n if not ray.is_initialized():\n log.debug(\"ray is not initialized\")\n # Try to connect to already running ray service (from ray cli).\n try:\n log.debug(\"Trying to connect to ray at address %r\", ray_address)\n ray.init(address=ray_address)\n except ConnectionError:\n log.debug(\"Failed to connect to ray at %r\", ray_address)\n log.debug(\"Trying to initialize ray with %d cores\", n_workers)\n ray.init(num_cpus=n_workers)\n log.debug(\"Successfully initialized ray\")\n else:\n log.debug(\"Ray was already initialized\")\n self._max_chunk_size = 300\n\n def _setup(self, comp_id_pair, entity_idxs):\n r\"\"\"Prepare to do RDF computation.\n\n Parameters\n ----------\n comp_id_pair : :obj:`tuple`\n The component ID of the entities to consider.\n entity_idxs : :obj:`tuple`, ndim: ``1`` or ``2``\n The atom indices in each component to compute distances from.\n \"\"\"\n # Setup histogram\n rdf_span = self.rdf_range[-1] - self.rdf_range[0]\n nbins = int(rdf_span / self.bin_width)\n self._hist_settings = {\"bins\": nbins, \"range\": self.rdf_range}\n count, edges = np.histogram([-1], **self._hist_settings)\n count = count.astype(np.float64)\n count *= 0.0\n self._count = count\n self.edges = edges\n self.bins = 0.5 * (edges[:-1] + edges[1:])\n\n # Cumulative volume for rdf normalization.\n self._cuml_volume = 0.0 # Cumulative volume.\n self._n_analyzed = 0 # Number of structures analyzed\n\n # Compute atom pairs indices.\n atom_sets = []\n for i, comp_id in enumerate(comp_id_pair):\n entity_idx = entity_idxs[i] # Could contain an int or tuple\n avail_entities = np.where(comp_id == self.comp_ids)[0]\n # Convert entity_ids into atom indices\n sets = []\n for entity_id in avail_entities:\n if isinstance(entity_idx, (tuple, list)):\n entity_idx = list(entity_idx)\n elif isinstance(entity_idx, int):\n entity_idx = [entity_idx]\n sets.extend(\n np.argwhere(entity_id == self.entity_ids).T[0][entity_idx].tolist()\n )\n atom_sets.append(sets)\n self._atom_sets = atom_sets\n\n # Invalid or unwanted values will be labeled with -1 to drop later.\n atom_pairs = np.empty(\n (len(tuple(gen_combs(atom_sets))), len(atom_sets)), dtype=np.int32\n )\n self._n_pairs, self._n_sets = atom_pairs.shape\n i = 0\n for comb in gen_combs(atom_sets):\n # Check if the pair is on the same entity if requested.\n if self.inter_only:\n if len(set(self.entity_ids[list(comb)])) == 1:\n atom_pairs[i] = -1\n i += 1\n continue\n atom_pairs[i] = comb\n i += 1\n atom_pairs = atom_pairs[atom_pairs >= 0]\n self._atom_pairs = atom_pairs.reshape(\n (int(len(atom_pairs) / self._n_sets), self._n_sets)\n )\n\n def run(self, R, comp_id_pair, entity_idxs, step=1):\n r\"\"\"Perform the RDF computation.\n\n Parameters\n ----------\n R : :obj:`numpy.ndarray`, ndim: ``3``\n Cartesian coordinates of all atoms in the system under periodic\n boundary conditions.\n\n .. tip::\n We recommend a memory-map for large ``R`` to reduce memory\n requirements.\n\n comp_id_pair : :obj:`tuple`\n The component ID of the entities to consider. For example,\n ``('h2o', 'h2o')`` or ``('h2o', 'meoh')``.\n entity_idxs : :obj:`tuple`, ndim: ``1`` or ``2``\n The atom indices in each component to compute distances from.\n step : :obj:`int`\n Number of structures/frames to skip between each analyzed frame.\n\n Returns\n -------\n :obj:`numpy.ndarray`\n ``self.bins``: :math:`r` as the midpoint of each histogram bin.\n :obj:`numpy.ndarray`\n ``self.results``: :math:`g(r)` with respect to ``bins``.\n\n Examples\n --------\n Suppose we want to compute the :math:`O_{w}`-:math:`O_{m}` RDF where\n :math:`O_{w}` and :math:`O_{m}` are the oxygen atoms of water and\n methanol, respectively. We can define our system as such.\n\n >>> import numpy as np\n >>> Z = np.array([8, 1, 1, 8, 1, 6, 1, 1, 1]*25)\n >>> entity_ids = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1]*25)\n >>> comp_ids = np.array(['h2o', 'meoh']*25)\n >>> cell_vectors = np.array([[16., 0., 0.], [0., 16., 0.], [0., 0., 16.]])\n\n .. note::\n The above information is an arbitrary system made up of 25 water and\n 25 methanol molecules. They are contained in a 16 Angstrom periodic box\n where the atoms are always in the same order: water, methanol, water,\n methanol, water, etc.\n\n This information completely specifies our system to prepare for computing\n the RDF. We initialize our object with this information.\n\n >>> from mbgdml.analysis.rdf import RDF\n >>> rdf = RDF(Z, entity_ids, comp_ids, cell_vectors)\n\n From here we need to specify what RDF to compute with ``rdf.run()``.\n We assume the Cartesian coordinates for ``R`` are already loaded as\n a 3D array or memory-map.\n\n The last two pieces of information are ``comp_id_pair`` and\n ``entity_idxs``. ``comp_id_pair`` specifies what components or species\n we want to compute our RDF with respect to. In this example, we want\n :math:`O_{w}`-:math:`O_{m}`. ``entity_idxs`` specifies which atom in\n each entity to use. Oxygen is the first atom in both water and\n methanol (i.e., index of ``0``).\n\n >>> comp_id_pair = ('h2o', 'meoh')\n >>> entity_idxs = (0, 0)\n\n We can then compute our RDF!\n\n >>> bins, gr = rdf.run(R, comp_id_pair, entity_idxs)\n\n Notes\n -----\n ``inter_only`` only comes into play when there is a chance of also\n computing intramolecular distances during the RDF calculation. Take the\n hydroxyl OH RDF in a pure methanol simulation for instance. Our\n ``comp_id_pair`` and ``entity_idxs`` would be ``('meoh', 'meoh')`` and\n ``(0, 1)``. The O-H intramolecular bond distance would be a perfectly\n valid atom pair. Usually we are interested in intermolecular distances.\n ``inter_only`` controls whether intramolecular distances are included\n (``inter_only = False``) or not (``inter_only = True``).\n\n ``entity_idxs`` can specify one or more atoms for each component. For\n example, if you wanted to compute the OH RDF of pure water you could\n use ``entity_idxs = (0, (1, 2))``.\n\n TODO: Support different cell sizes for each structure.\n \"\"\"\n self._setup(comp_id_pair, entity_idxs)\n\n # Computing histogram.\n # Serial operation.\n if not self.use_ray:\n for i in range(0, len(R), step):\n count, volume_contrib, n_R = _bin_distances(\n R, i, self._atom_pairs, self._hist_settings, self.cell_vectors\n )\n self._count += count\n self._cuml_volume += volume_contrib\n self._n_analyzed += n_R\n # Parallel operation with ray.\n else:\n _bin_distances_remote = ray.remote(_bin_distances)\n chunk_size = min(\n self._max_chunk_size,\n int(len(tuple(range(0, len(R), step))) / self.n_workers),\n )\n chunker = chunk_iterable(range(0, len(R), step), chunk_size)\n\n R = ray.put(R)\n atom_pairs = ray.put(self._atom_pairs)\n hist_settings = ray.put(self._hist_settings)\n cell_vectors = ray.put(self.cell_vectors)\n\n # Initialize ray workers\n workers = []\n\n def add_worker(workers, chunker):\n try:\n chunk = list(next(chunker))\n except StopIteration:\n return\n workers.append(\n _bin_distances_remote.remote(\n R, chunk, atom_pairs, hist_settings, cell_vectors\n )\n )\n return\n\n for _ in range(self.n_workers):\n add_worker(workers, chunker)\n\n while len(workers) != 0:\n done_id, workers = ray.wait(workers)\n\n count, volume_contrib, n_R = ray.get(done_id)[0]\n self._count += count\n self._cuml_volume += volume_contrib\n self._n_analyzed += n_R\n\n add_worker(workers, chunker)\n\n # Normalize the RDF\n norm = self._n_analyzed # Number of analyzed frames\n # Volume in each radial shell\n vols = np.power(self.edges, 3)\n norm *= 4 / 3 * np.pi * np.diff(vols) # Array of shape self.edges\n # Average number density\n N = self._n_pairs\n avg_volume = self._cuml_volume / self._n_analyzed\n norm *= N / avg_volume\n\n self.results = self._count / norm\n\n return self.bins, self.results\n","sub_path":"mbgdml/analysis/rdf.py","file_name":"rdf.py","file_ext":"py","file_size_in_byte":14428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"647239274","text":"import requests\nimport numpy as np\nimport pickle\nimport random\nfrom PIL import Image\n\nimg = Image.open(\"/home/ocamlmycaml/Downloads/deer-silo.jpg\")\nimg = img.resize(size=(299, 299))\nimg = np.array(img)\n\nprint(\"before request\")\nresp = requests.post('http://169.63.11.147:8000/api/inferences', json={\n \"cam_id\": \"0\",\n \"device_id\": \"local-test-script\",\n 'image': pickle.dumps(img).decode('latin-1'),\n \"updated\": True,\n \"inference_response\": {\n \"detected_animals\": random.choice([\n 'skunk', 'fox', 'rodent', 'dog', 'squirrel', 'cat', 'rabbit',\n 'bird', 'cow', 'bobcat', 'deer', 'raccoon', 'coyote', 'opossum',\n 'other'\n ]),\n \"found_something\": True\n },\n \"deterrent_response\": {\n \"deployed_deterrent\": {\n \"played_sound\": \"/home/pi/crow/edge/deterrent-service/sound/test_script/sound.mp3\",\n \"type\": \"sound\"\n }\n },\n}, headers={\n 'Content-type': 'application/json'\n})\n\nprint(f\"Status code: {resp.status_code}\")\nprint(f\"Response:\\n{resp.text}\")\n","sub_path":"test_web_backend.py","file_name":"test_web_backend.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"274453627","text":"# rm *.txt & ./bash.sh\nimport argparse\nimport logging\nimport os\nimport time\nimport tensorflow as tf\nfrom model.utils import Params\nfrom model.utils import set_logger\nfrom model.training import train_and_evaluate\nfrom model.reader import load_dataset_from_tfrecords\nfrom model.reader import input_fn\nfrom model.modeling import model_fn\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model_dir', default='experiments/base_model',\n help=\"Directory containing params.json\")\n# loss\nparser.add_argument('--loss_fn', default='urrank', help=\"model loss function\") # rrank, urrank, ranknet, listnet, listmle, lambdarank, mdprank\n# data\nparser.add_argument('--data_dir', default='../data/OHSUMED/2', help=\"Directory containing the dataset\") # OHSUMED, MQ2007, MSLR-WEB10K, MSLR-WEB30K\nparser.add_argument('--tfrecords_filename', default='OHSUMED.tfrecords', help=\"Directory containing the dataset\") # OHSUMED, MQ2007, MSLR-WEB10K, MSLR-WEB30K\nparser.add_argument('--restore_dir', default=None, # experiments/base_model/best_weights\n help=\"Optional, directory containing weights to reload before training\")\n# python main.py --restore_dir experiments/base_model/best_weights\n\n\nif __name__ == '__main__':\n tf.reset_default_graph()\n # Set the random seed for the whole graph for reproductible experiments\n tf.set_random_seed(230)\n\n # Load the parameters from the experiment params.json file in model_dir\n args = parser.parse_args()\n json_path = os.path.join(args.model_dir, 'params.json')\n assert os.path.isfile(json_path), \"No json configuration file found at {}\".format(json_path)\n params = Params(json_path)\n params.dict['loss_fn'] = args.loss_fn\n\n # # Load the parameters from the dataset, that gives the size etc. into params\n json_path = os.path.join(args.data_dir, 'dataset_params.json')\n assert os.path.isfile(json_path), \"No json file found at {}, run prepare_data.py\".format(json_path)\n params.update(json_path)\n\n # Set the logger\n set_logger(os.path.join(args.model_dir, 'train.log'))\n\n path_train_tfrecords = os.path.join(args.data_dir, 'train_' + args.tfrecords_filename)\n path_eval_tfrecords = os.path.join(args.data_dir, 'eval_' + args.tfrecords_filename)\n\n # Create the input data pipeline\n logging.info(\"Creating the datasets...\")\n train_dataset = load_dataset_from_tfrecords(path_train_tfrecords)\n eval_dataset = load_dataset_from_tfrecords(path_eval_tfrecords)\n\n # Specify other parameters for the dataset and the model\n\n # Create the two iterators over the two datasets\n train_inputs = input_fn('train', train_dataset, params)\n eval_inputs = input_fn('vali', eval_dataset, params)\n logging.info(\"- done.\")\n\n # Define the models (2 different set of nodes that share weights for train and validation)\n logging.info(\"Creating the model...\")\n train_model_spec = model_fn('train', train_inputs, params)\n eval_model_spec = model_fn('vali', eval_inputs, params, reuse=True)\n logging.info(\"- done.\")\n\n # Train the model\n # log tim\n # start_time = time.time()\n logging.info(\"Starting training for {} epoch(s)\".format(params.num_epochs))\n train_and_evaluate(train_model_spec, eval_model_spec, args.model_dir, params, args.restore_dir)\n # print(\"--- %s seconds ---\" % (time.time() - start_time)) ","sub_path":"uRank_urRank/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"6110693","text":"import numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport scipy.optimize as opt\nimport cmath\nimport sys\nimport warnings\n\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n\nL = int(sys.argv[1]) # lattice size\nprint(\"L: %d\" % (L))\n\nLz = int(sys.argv[2])\nprint(\"Lz: %d\" % (Lz))\n\nbeta = float(sys.argv[3])\nprint(\"beta: %f\" % (beta))\n\neps3 = float(sys.argv[4])\nprint(\"eps3: %f\" % (eps3))\n\nm_fermion = float(sys.argv[5])\nprint(\"m_fermion: %f\" % (m_fermion))\n\nR_half = int(L / 2) # half lattice size\ntmin = int(sys.argv[6]) # min t value for fit\nprint(\"tmin: %d\" % (tmin))\ntmax = R_half # max t value for fit\n\nfirst_traj = int(sys.argv[7])\nprint(\"first_traj: %d\" % (first_traj))\n\nm_sign = \"p\"\nif m_fermion < 0.0:\n\tm_sign = \"m\"\n\nif (Lz == 1):\n id = \"2D/%d_%d_%s%d\" % (L, round(beta * 1000), m_sign, round(abs(m_fermion) * 1000))\nelse:\n id = \"3D/%d_%d_%d_%d_%s%d\" % (L, Lz, round(beta * 1000), round(eps3 * 1000), m_sign, round(abs(m_fermion) * 1000))\nprint(\"id: %s\" % (id))\n\ndef corr_fit(t, m, z):\n\ta = z * np.exp(-m * t)\n\tb = z * np.exp(-m * (L - t))\n\treturn a + b\n\n\ndef pion_mass(corr, tmin, tmax):\n\tn = float(corr.shape[1])\n\tr = np.empty(R_half + 1)\n\tcorr_bar = np.empty(R_half + 1)\n\td_corr = np.empty(R_half + 1)\n\n\tfor i in range(0, R_half + 1):\n\t\tr[i] = i;\n\t\tcorr_bar[i] = np.mean(corr[i]);\n\t\td_corr[i] = np.std(corr[i]) / np.sqrt(n);\n\n\tpopt, pcov = opt.curve_fit(corr_fit, r[tmin:tmax], corr_bar[tmin:tmax], [1.0, 1.0], sigma=d_corr[tmin:tmax])\n\tm = popt[0]\n\tz = popt[1]\n\n\terr = 0.0\n\tfor t in range(tmin, tmax):\n\t\terr += (corr_fit(t, m, z) - corr_bar[t])**2.0\n\n\treturn m, z, np.sqrt(err) / float(tmax - tmin)\n\n\ndef jackknife_pion_mass(corr, tmin, tmax):\n\tn = corr.shape[1]\n\tf_n = float(n) # number of data subsets\n\n\tm_bar, z_bar, _ = pion_mass(corr, tmin, tmax)\n\td_m = 0.0\n\td_z = 0.0\n\n\tfor i in range(0, n):\n\t\t# copy the array and delete the current trajectory\n\t\t# calculate the pion mass and add to the error\n\t\tm_del, z_del, _ = pion_mass(np.delete(corr, i, axis=1), tmin, tmax)\n\t\td_m += (m_del - m_bar)**2.0\n\t\td_z += (z_del - z_bar)**2.0\n\n\td_m = np.sqrt((f_n - 1) / f_n * d_m)\n\td_z = np.sqrt((f_n - 1) / f_n * d_z)\n\treturn m_bar, d_m, z_bar, d_z\n\n\ndef parse_data_file(file):\n\tlines = file.readlines()\n\n\tfirst_l = 0\n\tfor l, line in enumerate(lines):\n\t\ttokens = line.split()\n\t\ttraj = int(tokens[0])\n\t\tif traj < first_traj:\n\t\t\t# skip lines before first trajectory\n\t\t\tcontinue\n\t\telif traj == first_traj:\n\t\t\t# create the array\n\t\t\tfirst_l = l;\n\t\t\t# 0 axis is distance, 1 axis is trajectory\n\t\t\ta = np.empty((R_half + 1, len(lines) - l))\n\n\t\t# populate the array\n\t\tfor t, token in enumerate(tokens):\n\t\t\tif (t == 0):\n\t\t\t\tcontinue # skip trajectory index\n\t\t\ta[t-1, l - first_l] = float(token)\n\treturn a\n\n\n# parse the data file\npion_file = open(\"../jobs/%s/pion_corr.dat\" % (id), \"r\")\nC_pi = parse_data_file(pion_file)\n\nm_bar, d_m, z_bar, d_z = jackknife_pion_mass(C_pi, tmin, tmax)\nm = (m_bar, d_m)\nz = (z_bar, d_z)\npion_file.close()\n\n# print(\"tmin = %d\" % (tmin_best))\n# print(\"tmax = %d\" % (tmax_best - 1))\nprint(\"m = %.12f (%.12f)\" % (m[0], m[1]))\nprint(\"z = %.12f (%.12f)\" % (z[0], z[1]))\n\n# if (Lz == 1):\n# \tmass_file = open(\"../jobs/2D/m_pi_%d_%d.dat\" % (L, beta * 1000), \"a\")\n# else:\n# \tmass_file = open(\"../jobs/3D/m_pi_%d_%d_%d_%d.dat\" % (L, Lz, round(beta * 1000), round(eps3 * 1000)), \"a\")\n# mass_file.write(\"%.3f %.12e %.12e %.12e %.12e\\n\" % (m_fermion, m_bar, d_m, z_bar, d_z))\n# mass_file.close()\n\nresult_file = open(\"m_pi.dat\", \"a\")\nresult_file.write(\"%d %d %.3f %.3f %.3f %.12e %.12e %.12e %.12e\\n\" % (L, Lz, beta, eps3, m_fermion, m[0], m[1], z[0], z[1]))\nresult_file.close()\n\nn = R_half + 1\nR = np.empty(n)\ncorr_bar = np.empty(n)\nd_corr = np.empty(n)\n\nfor i in range(0, n):\n\tR[i] = i;\n\tcorr_bar[i] = np.mean(C_pi[i]);\n\td_corr[i] = np.std(C_pi[i]) / np.sqrt(n);\n\n# calculate best fit curves\nR_A = np.linspace(0, R_half + 1, 1000)\ncorr_A = np.zeros(len(R_A))\nfor r in range(0, len(R_A)):\n\tcorr_A[r] = corr_fit(R_A[r], m[0], z[0])\n\nplt.rcParams.update({\n\t\"text.usetex\": False,\n\t\"font.family\": \"sans-serif\"})\n\n# plot C vs r\nplt.figure()\nplt.xlim(0, R_half + 1)\nplt.yscale(\"log\")\nplt.errorbar(R, corr_bar, yerr=d_corr, color=\"blue\", marker='o', ms=5, mew=0.5, mfc='none', linestyle='none', linewidth=0.5, capsize=2.5, capthick=0.5)\nplt.plot(R_A, corr_A, color=\"blue\", linewidth=0.5)\nplt.xlabel(r\"$t$\")\nplt.ylabel(r\"$\\langle C_{\\pi}(0) C_{\\pi}(t) \\rangle $\")\nplt.savefig(\"../jobs/%s/pi_corr.pdf\" % (id))\nplt.close()\n","sub_path":"scripts/calc_m_pi.py","file_name":"calc_m_pi.py","file_ext":"py","file_size_in_byte":4477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"305375646","text":"from flask import Flask, render_template\nfrom werkzeug.exceptions import HTTPException\nimport os\n\ndef create_app():\n app = Flask(__name__)\n app.config.from_pyfile('settings.cfg')\n\n # database and encrypt info\n from .initializers import db, bcrypt, login_manager\n db.init_app(app)\n bcrypt.init_app(app)\n login_manager.init_app(app)\n\n # All the individual application should be imported before populating the DB.\n # blog folder\n from . import users, posts, main\n main.init_app(app)\n users.init_app(app)\n posts.init_app(app)\n \n login_manager.login_view = \"users.login\"\n login_manager.login_message_category = \"info\"\n \n # After importing all the models, populate the DB. Otherwise SQLAlchemy will think as if\n # there is nothing to do.\n with app.app_context():\n db.create_all()\n \n @app.errorhandler(HTTPException)\n def error_404(e):\n return render_template(f\"errors/{e.code}.html\")\n \n return app\n","sub_path":"flask_blog/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"212945358","text":"from ftw.builder import Builder\nfrom ftw.builder import create\nfrom ftw.testbrowser import browsing\nfrom opengever.testing import FunctionalTestCase\nfrom plone.locking.interfaces import IRefreshableLockable\n\n\nclass TestDocumentQuickupload(FunctionalTestCase):\n\n def setUp(self):\n super(TestDocumentQuickupload, self).setUp()\n self.dossier = create(Builder('dossier'))\n\n @browsing\n def test_upload_box_is_hidden_when_document_is_not_checked_out(self, browser):\n document = create(Builder('document').within(self.dossier))\n\n browser.login().open(document)\n self.assertEquals([], browser.css('#uploadbox'),\n 'uploadbox is wrongly displayed')\n\n @browsing\n def test_upload_box_is_hidden_when_document_is_locked(self, browser):\n document = create(Builder('document').within(self.dossier))\n IRefreshableLockable(document).lock()\n\n browser.login().open(document)\n self.assertEquals([], browser.css('#uploadbox'),\n 'uploadbox is wrongly displayed')\n\n @browsing\n def test_upload_box_is_shown_when_document_is_checked_out_and_not_locked(self, browser):\n document = create(Builder('document')\n .within(self.dossier)\n .checked_out())\n\n browser.login().open(document)\n self.assertIsNotNone(browser.css('#uploadbox'))\n","sub_path":"opengever/document/tests/test_tabbed.py","file_name":"test_tabbed.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"367905183","text":"\"\"\"\nUseful functions for spatial interpolation methods of tobler\n\"\"\"\n\nimport numpy as np\nimport math\nfrom warnings import warn\nfrom pyproj import CRS\n\n\ndef _check_crs(source_df, target_df):\n \"\"\"check if crs is identical\"\"\"\n if not (source_df.crs == target_df.crs):\n print(\"Source and target dataframes have different crs. Please correct.\")\n return False\n return True\n\n\ndef _nan_check(df, column):\n \"\"\"Check if variable has nan values.\n\n Warn and replace nan with 0.0.\n \"\"\"\n values = df[column].values\n if np.any(np.isnan(values)) or np.any(np.isinf(values)):\n wherenan = np.isnan(values)\n values[wherenan] = 0.0\n warn(f\"nan values in variable: {column}, replacing with 0\")\n return values\n\n\ndef _inf_check(df, column):\n \"\"\"Check if variable has nan values.\n\n Warn and replace inf with 0.0.\n \"\"\"\n values = df[column].values\n if np.any(np.isinf(values)):\n wherenan = np.isinf(values)\n values[wherenan] = 0.0\n warn(f\"inf values in variable: {column}, replacing with 0\")\n return values\n\n\ndef _check_presence_of_crs(geoinput):\n \"\"\"check if there is crs in the polygon/geodataframe\"\"\"\n if geoinput.crs is None:\n raise KeyError(\n \"Geodataframe must have a CRS set before using this function.\"\n )\n\n\ndef is_crs_utm(crs):\n \"\"\"\n Determine if a CRS is a UTM CRS\n Parameters\n ----------\n crs : dict or string or pyproj.CRS\n a coordinate reference system\n Returns\n -------\n bool\n True if crs is UTM, False otherwise\n \"\"\"\n if not crs:\n return False\n crs_obj = CRS.from_user_input(crs)\n if crs_obj.coordinate_operation and crs_obj.coordinate_operation.name.upper().startswith('UTM'):\n return True\n return False\n\n\ndef project_gdf(gdf, to_crs=None, to_latlong=False):\n \"\"\"\n lovingly copied from OSMNX \n\n Project a GeoDataFrame to the UTM zone appropriate for its geometries'\n centroid.\n The simple calculation in this function works well for most latitudes, but\n won't work for some far northern locations like Svalbard and parts of far\n northern Norway.\n\n Parameters\n ----------\n gdf : GeoDataFrame\n the gdf to be projected\n to_crs : dict or string or pyproj.CRS\n if not None, just project to this CRS instead of to UTM\n to_latlong : bool\n if True, projects to latlong instead of to UTM\n\n Returns\n -------\n GeoDataFrame\n \"\"\"\n assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.'\n\n # else, project the gdf to UTM\n # if GeoDataFrame is already in UTM, just return it\n if is_crs_utm(gdf.crs):\n return gdf\n\n # calculate the centroid of the union of all the geometries in the\n # GeoDataFrame\n avg_longitude = gdf['geometry'].unary_union.centroid.x\n\n # calculate the UTM zone from this avg longitude and define the UTM\n # CRS to project\n utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)\n utm_crs = '+proj=utm +zone={} +ellps=WGS84 +datum=WGS84 +units=m +no_defs'.format(utm_zone)\n\n # project the GeoDataFrame to the UTM CRS\n projected_gdf = gdf.to_crs(utm_crs)\n\n return projected_gdf\n","sub_path":"tobler/util/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"521785611","text":"#!/usr/bin/env python\n\n\"\"\"\nThis script transforms the cameras to the center of the rover.\n\nThe cameras are assumed to be in the following order:\ncam0: BB2 left\ncam1: BB2 right\ncam2: BB3 left\ncam3: BB3 center\ncam4: BB3 right\ncam5: pancam left\ncam6: pancam right\n\"\"\"\n\n__author__ = \"Martin Azkarate, Levin Gerdes, Karl Kangur, Marco Pagnamenta\"\n\nimport yaml\nimport re\nimport numpy as np\nimport os.path\nfrom decimal import *\nfrom Tkinter import Tk\nfrom tkFileDialog import askopenfilename\nimport transformations as TF\nimport math\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nax.set_xlim(-1.5,1.5)\nax.set_ylim(-1.5,1.5)\nax.set_zlim(-0.5,1.5)\n\n# precision for Decimal\ngetcontext().prec = 16\n\n## Ask for calibration results\n#print(\"Please choose the files containing the Kalibr calibration output\\n\")\n#Tk().withdraw()\n#calibResults = askopenfilename(title=\"Locate results-cam-kalibr.txt\",\n# initialdir=\"~/Desktop\")\n#camChain = askopenfilename(title=\"Locate camchain-kalibr.yaml\",\n# initialdir=\"~/Desktop\")\n\n# translations are given as [x,y,z] in cm, rotations as [yaw, pitch, roll]\n\n# kalibrs output files\ncamChain = 'sampleFiles/camchain-kalibr.yaml'\ncalibResults = 'sampleFiles/results-cam-kalibr.txt'\n\nf = open(camChain)\nfCamChain = f.read()\nf.close()\ndata = yaml.load(fCamChain)\n\n# get rotation from transformation matrix\ndef getR(tf):\n return tf[:-1,:-1]\n# get translation from transformation matrix\ndef getT(tf):\n return tf[:-1,3:]\n\n# get quaternions from transformation\ndef getQfromTF(tf):\n return TF.quaternion_from_matrix(getR(tf))\n\ndef drawToGround(vec):\n x = vec[0][0]\n y = vec[1][0]\n z = vec[2][0]\n ax.plot3D([x,x],[y,y],[0,z], color='y')\n\ndef drawReference(pos, rot):\n x_axis = np.array([[.1],[0],[0]])\n y_axis = np.array([[0],[.1],[0]])\n z_axis = np.array([[0],[0],[.1]])\n x_axis = qv_mult(rot, x_axis)\n y_axis = qv_mult(rot, y_axis)\n z_axis = qv_mult(rot, z_axis)\n x_axis = np.add(x_axis, pos)\n y_axis = np.add(y_axis, pos)\n z_axis = np.add(z_axis, pos)\n myPlotLine(pos, x_axis, \"r\")\n myPlotLine(pos, y_axis, \"g\")\n myPlotLine(pos, z_axis, \"b\")\n\n# camera transformations from yaml file\ntf_BB2Right_BB2Left = np.array(data['cam1']['T_cn_cnm1'])\ntf_BB3Left_BB2Right = np.array(data['cam2']['T_cn_cnm1'])\ntf_BB3Center_BB3Left = np.array(data['cam3']['T_cn_cnm1'])\ntf_BB3Right_BB3Center = np.array(data['cam4']['T_cn_cnm1'])\ntf_PanCamLeft_BB3Right = np.array(data['cam5']['T_cn_cnm1'])\ntf_PanCamRight_PanCamLeft = np.array(data['cam6']['T_cn_cnm1'])\n\ndef arrayToColVec(a):\n return [[a[0]],[a[1]],[a[2]]]\n\ndef colVecToArray(v):\n return [v[0][0],v[1][0],v[2][0]]\n\n# rotate vector v by quaternion q\n# 0 0 \n# vr0 = z v0 z*\n# vr1 v1 \n# vr2 v2 \ndef qv_mult(q,v):\n q = TF.unit_vector(q)\n q2 = list([0,v[0],v[1],v[2]])\n vr = TF.quaternion_multiply(\n TF.quaternion_multiply(q,q2),\n TF.quaternion_conjugate(q)\n )[1:]\n return vr\n\ndef getOppositeFacingQuaternion(q):\n euler = TF.euler_from_quaternion(q)\n return TF.quaternion_from_euler( -euler[0], -euler[1], -euler[2] )\n\ndef labelPos(pos, label):\n pos = colVecToArray(pos)\n ax.text3D(pos[0], pos[1], pos[2], label)\n\ndef myPlotLine(p1,p2,color=\"b\"):\n p1 = np.asarray(colVecToArray(p1))\n p2 = np.asarray(colVecToArray(p2))\n ax.plot3D([p1[0], p2[0]], [p1[1], p2[1]], [p1[2], p2[2]], color)\n\ndrawReference(np.array([[0],[0],[0]]), TF.quaternion_from_euler(0,0,0))\n\n# transformation from kalibr apriltag demo:\n# left pancam to target\n# x=0.364034, y=0.0139871, z=1.30524, yaw=0.256855, pitch=0.103205, roll=2.69673 # -165deg\n#x=0.237599, y=0.00594341, z=1.26584, yaw=0.099315, pitch=0.0509062, roll=2.65734 # -175deg\n# x=0.178021, y=0.00981665, z=1.25501, yaw=0.0235408, pitch=0.0114584, roll=2.64158 # -180deg\nyaw = 0.099315 #175\npitch=0.0509062\nroll=2.65734\n\nyaw = 0.0235408 #180\npitch=0.0114584\nroll=2.64158\n\n#yaw = 0.256855 #165\n#pitch=0.103205\n#roll=2.69673\n\n\nprint(np.degrees([yaw,pitch,roll]))\nq_PanCamLeft_Target = TF.quaternion_from_euler(yaw, pitch, roll, 'rzyx') #TODO\n#q_PanCamLeft_Target = TF.quaternion_from_euler(pitch, roll, yaw, 'rxyz') #TODO\nt_PanCamLeft_Target = np.array([[0.237599],#175\n [0.00594341], \n [1.26855], ])\nt_PanCamLeft_Target = np.array([[0.178021], #180\n [0.00981665], \n [1.25501], ])\n#t_PanCamLeft_Target = np.array([[0.364034], #165\n# [0.0139871], \n# [1.30524], ]) 145020999925178\n\n \n#t_PanCamLeft_Target = np.array([ [0],\n# [0],\n# [1] ])\n\nq_PanCamLeft_Rotated = TF.quaternion_from_euler(np.radians(0),np.radians(60),np.radians(-180), 'rzxy')\n\npos_BB2Left = arrayToColVec([0,0,0])\npos_BB2Right = arrayToColVec([0,0,0])\npos_BB3Left = arrayToColVec([0,0,0])\npos_BB3Center = arrayToColVec([0,0,0])\npos_BB3Right = arrayToColVec([0,0,0])\npos_PanCamLeft = arrayToColVec([0,0,0])\npos_PanCamRight = arrayToColVec([0,0,0])\npos_Center = arrayToColVec([0,0,0])\nrot_Center = TF.quaternion_from_euler(0,0,0)\npos_Target = arrayToColVec([-0.403,-0.086,0.115])\nrot_Target = TF.quaternion_from_euler(np.radians(90),np.radians(-1.0),np.radians(-0.0), 'rzyx') # assumed z,y,x\n\nt_Center_Target = np.subtract(pos_Center,pos_Target)\nq_Center_Target = TF.quaternion_multiply(rot_Center, rot_Target)\n\ndef posToVec4(pos):\n return [pos[0],pos[1],pos[2],[1]]\n\nimport pyquaternion\n\ntf_Center_Target = pyquaternion.Quaternion(rot_Target).unit.transformation_matrix\n\ntf_Center_Target[0][3] = pos_Target[0][0]\ntf_Center_Target[1][3] = pos_Target[1][0]\ntf_Center_Target[2][3] = pos_Target[2][0]\n\ntf_Center_Target = np.asmatrix(tf_Center_Target)\n\ntf_PanCamLeft_Target = pyquaternion.Quaternion(q_PanCamLeft_Target).unit.transformation_matrix\n\ntf_PanCamLeft_Target[0][3] = t_PanCamLeft_Target[0]\ntf_PanCamLeft_Target[1][3] = t_PanCamLeft_Target[1]\ntf_PanCamLeft_Target[2][3] = t_PanCamLeft_Target[2]\n\ntf_PanCamLeft_Target = np.asmatrix(tf_PanCamLeft_Target)\n\ntf_Target_PancamLeft=np.linalg.inv(tf_PanCamLeft_Target)\nRI = np.linalg.inv(tf_PanCamLeft_Target[0:3,0:3])\nt = -RI*t_PanCamLeft_Target\n\n#tf_Center_PanCamLeft = tf_Target_PancamLeft*tf_Center_Target\ntf_Center_PanCamLeft = tf_Center_Target*tf_Target_PancamLeft\n#print(np.matrix(posToVec4(pos_Center)))\npos_PanCamLeft = tf_Center_PanCamLeft * np.matrix(posToVec4(pos_Center))\nprint(pos_Target)\nprint(pos_PanCamLeft)\n\n# try to go from the calculated pancamleft position to the target using the values from apriltagdemo\n#tf_ptu_rotation = pyquaternion.Quaternion(q_PanCamLeft_Rotated_Target).unit.transformation_matrix\n#pos_Target_from_Inverted = tf_ptu_rotation * np.asarray(tf_PanCamLeft_Target) * np.asarray(pos_PanCamLeft)\n#myPlotLine(pos_Target_from_Inverted, pos_PanCamLeft, 'r')\n#a = np.asarray(getT(tf_PanCamLeft_Target))\n#crosspr = np.cross([a[0][0],a[1][0],a[2][0]],[1,0,0])\n#myPlotLine(pos_Center,pos_Center + arrayToColVec(crosspr))\n\n#myPlotLine(pos_Center,pos_Target)crosspr = np.cross([a[0][0],a[1][0],a[2][0]],[1,0,0])\ndrawReference(pos_Target, TF.quaternion_from_matrix(tf_Center_Target))\nmyPlotLine(pos_Target,pos_PanCamLeft)\npos_PanCamLeft = np.asarray(pos_PanCamLeft)\npos_PanCamLeft = pos_PanCamLeft[0:3]\ndrawReference(pos_PanCamLeft, TF.quaternion_from_matrix(tf_Center_PanCamLeft))\n\n# Go from the rotated left pancam to the PTU\nt_PanCamLeft_PTU = np.array([[0.25],\n [0.055], \n [0.01]])\n\n## NOT EXACTLZ RIHT, CONTINUE HERE FOR NEUTRAL POSITION OF PTU, CHECK ALSO THE TRANSLATIONS UP HERE\n#q_PanCamLeft_PTU = TF.quaternion_from_euler(np.radians(175),np.radians(60-20),np.radians(0), 'szyx')\ntf_PanCamLeft_PTU = pyquaternion.Quaternion(q_PanCamLeft_Rotated).unit.transformation_matrix\n#tf_PanCamLeft_Rotation = pyquaternion.Quaternion(q_PanCamLeft_Rotated).unit.transformation_matrix\n\ntf_PanCamLeft_PTU[0][3] = t_PanCamLeft_PTU[0]\ntf_PanCamLeft_PTU[1][3] = t_PanCamLeft_PTU[1]\ntf_PanCamLeft_PTU[2][3] = t_PanCamLeft_PTU[2]\n\ntf_PanCamLeft_PTU = np.asmatrix(tf_PanCamLeft_PTU)\n\ntf_Center_PTU = tf_Center_PanCamLeft*tf_PanCamLeft_PTU\npos_PTU = tf_Center_PTU * np.matrix(posToVec4(pos_Center))\nmyPlotLine(np.asarray(pos_PTU),pos_PanCamLeft)\nmyPlotLine(pos_Center,pos_Target)\n\ndrawReference(np.asarray(pos_PTU)[:-1],getQfromTF(tf_Center_PTU))\n\n# Go from the PTU to the front facing left pancam\nq_PTU_PanCamLeftKalibr = TF.quaternion_from_euler(np.radians(0),np.radians(0),np.radians(-20), 'rzyx')\ntf_PTU_PanCamLeftKalibr = pyquaternion.Quaternion(q_PTU_PanCamLeftKalibr).unit.transformation_matrix\ntf_PTU_PanCamLeftKalibr[0][3] = -t_PanCamLeft_PTU[0]\ntf_PTU_PanCamLeftKalibr[1][3] = -t_PanCamLeft_PTU[1]\ntf_PTU_PanCamLeftKalibr[2][3] = -t_PanCamLeft_PTU[2]\n\ntf_Center_PanCamLeftKalibr = tf_Center_PTU*tf_PTU_PanCamLeftKalibr\n\npos_PanCamLeftKalibr = tf_Center_PanCamLeftKalibr * np.matrix(posToVec4(pos_Center))\nmyPlotLine(np.asarray(pos_PanCamLeftKalibr),np.asarray(pos_PTU))\ndrawReference(np.asarray(pos_PanCamLeftKalibr)[:-1],getQfromTF(tf_Center_PanCamLeftKalibr))\n\n# Go from the right pancam to the left one (kalibr tilt)\ntf_PanCamLeftKalibr_PanCamRight = np.linalg.inv(tf_PanCamRight_PanCamLeft)\ntf_Center_PanCamRight = tf_Center_PanCamLeftKalibr * tf_PanCamLeftKalibr_PanCamRight\npos_PanCamRight = tf_Center_PanCamRight * np.matrix(posToVec4(pos_Center))\nmyPlotLine(np.asarray(pos_PanCamLeftKalibr),np.asarray(pos_PanCamRight))\ndrawReference(np.asarray(pos_PanCamRight)[:-1],getQfromTF(tf_Center_PanCamRight))\n\n# Go from the left pancam to the right bb3\ntf_Center_BB3Right = tf_Center_PanCamLeftKalibr * tf_PanCamLeft_BB3Right\npos_BB3Right = tf_Center_BB3Right * np.matrix(posToVec4(pos_Center))\nmyPlotLine(np.asarray(pos_PanCamLeftKalibr),np.asarray(pos_BB3Right))\ndrawReference(np.asarray(pos_BB3Right)[:-1],getQfromTF(tf_Center_BB3Right))\n\n# Go from the Right BB3 to the Center BB3\ntf_Center_BB3Center = tf_Center_BB3Right * tf_BB3Right_BB3Center\npos_BB3Center = tf_Center_BB3Center * np.matrix(posToVec4(pos_Center))\nmyPlotLine(np.asarray(pos_BB3Right),np.asarray(pos_BB3Center))\ndrawReference(np.asarray(pos_BB3Center)[:-1],getQfromTF(tf_Center_BB3Center))\n\n# Go from the Center BB3 to the Left BB3\ntf_Center_BB3Left = tf_Center_BB3Center * tf_BB3Center_BB3Left\npos_BB3Left = tf_Center_BB3Left * np.matrix(posToVec4(pos_Center))\nmyPlotLine(np.asarray(pos_BB3Center),np.asarray(pos_BB3Left))\ndrawReference(np.asarray(pos_BB3Left)[:-1],getQfromTF(tf_Center_BB3Left))\n\n# Go from the Left BB3 to the Right BB2\ntf_Center_BB2Right = tf_Center_BB3Left * tf_BB3Left_BB2Right\npos_BB2Right = tf_Center_BB2Right * np.matrix(posToVec4(pos_Center))\nmyPlotLine(np.asarray(pos_BB3Left),np.asarray(pos_BB2Right))\ndrawReference(np.asarray(pos_BB2Right)[:-1],getQfromTF(tf_Center_BB2Right))\n\n# Go from the Left BB3 to the Right BB2\ntf_Center_BB2Left = tf_Center_BB2Right * tf_BB2Right_BB2Left\npos_BB2Left = tf_Center_BB2Left * np.matrix(posToVec4(pos_Center))\nmyPlotLine(np.asarray(pos_BB2Right),np.asarray(pos_BB2Left))\ndrawReference(np.asarray(pos_BB2Left)[:-1],getQfromTF(tf_Center_BB2Left))\n\n# Output positions and rotations\nprint(\"BB2 Left:\\nposition\\n\" + repr(pos_BB2Left) + \"\\nrotation\\n\" + repr(TF.euler_from_matrix(tf_Center_BB2Left)))\nprint(\"BB3 Left:\\nposition\\n\" + repr(pos_BB3Left) + \"\\nrotation\\n\" + repr(TF.euler_from_matrix(tf_Center_BB3Left)))\nprint(\"PTU:\\nposition\\n\" + repr(pos_PTU) + \"\\nrotation\\n\" + repr(TF.euler_from_matrix(tf_Center_PTU)))\n\n# Get output for stereo yml (BB3)\ntf_BB3Right_BB3Left = tf_BB3Center_BB3Left * tf_BB3Right_BB3Center\nm = TF.inverse_matrix(tf_BB3Right_BB3Left)\nprint(\"\\nBB3 Left to Right, Translation:\\n\" + repr(getT(m)*1e3))\nprint(\"\\nBB3 Left to Right, Rotation:\\n\" + repr(TF.euler_from_matrix(m)))\n\n#myPlotLine(pos_Center,pos_Target)\n#drawReference(pos_Target, TF.quaternion_from_matrix(tf_Center_Target))\n#\n#t_Target_PanCamLeft = -t_PanCamLeft_Target\n#q_Target_PanCamLeft = getOppositeFacingQuaternion(q_PanCamLeft_Target)\n#t_Center_PanCamLeft = np.subtract(t_Target_PanCamLeft, t_Center_Target)\n#q_Center_PanCamLeft = TF.quaternion_multiply(q_Target_PanCamLeft, q_Center_Target)\n#\n#v_Target_PanCamLeft = qv_mult(rot_Target,t_Target_PanCamLeft)\n#pos_PanCamLeft = np.add(pos_Target,v_Target_PanCamLeft)\n#pos_PanCamLeft = np.asarray(pos_PanCamLeft)\n#pos_PanCamLeft = pos_PanCamLeft[0:3]\n#\n#myPlotLine(pos_PanCamLeft, pos_Target)\n#drawReference(pos_PanCamLeft, TF.quaternion_from_matrix(tf_Center_PanCamLeft))\n#\n#pos_PTU = np.asarray(pos_PTU)\n#pos_PTU = pos_PTU[0:3]\n#\n#myPlotLine(pos_PTU, pos_PanCamLeft)\n#drawReference(pos_PTU, TF.quaternion_from_matrix(tf_Center_PTU))\n##q_Target_PanCamLeft = TF.quaternion_multiply(q_Target_PanCamLeft, rot_Target)\n#\n##pos_PanCamLeft = np.add(pos_Target, t_Target_PanCamLeft)\n##rot_PanCamLeft = TF.quaternion_multiply(q_Target_PanCamLeft, rot_Target)\n#\n#\n#\n##pos_PanCamLeft = qv_mult(q_Target_PanCamLeft,t_Target_PanCamLeft)\n##myPlotLine(pos_Target, pos_PanCamLeft)\n##rot_PanCamLeft = TF.quaternion_multiply(q_Target_PanCamLeft, rot_Target)\n##drawReference(pos_PanCamLeft, rot_PanCamLeft)\n#\n##drawReference(pos_PanCamLeft, q_Target_PanCamLeft)\n##v2 = qv_mult(q_Target_PanCamLeft,t_PanCamLeft_Target)\n##print(v2)\n## rotate by target orientation\n##v2 = qv_mult( TF.quaternion_from_euler(0,np.radians(-90),np.radians(180), \"rzyx\"), v2)\n##v2 = [[v2[2]], [v2[1]], [v2[0]]]\n##print(v2)\n##pos_PanCamLeft = np.add(v2, pos_Target)\n##print(pos_PanCamLeft)\n##myPlotLine(pos_Center,pos_Target)\n##myPlotLine(pos_Target,pos_PanCamLeft)\n#\n##q_90_up = getOppositeFacingQuaternion(TF.quaternion_from_euler(np.radians(0), np.radians(90), np.radians(0), 'rzyx'))\n##v2 = qv_mult(q_90_up,t_Center_Target)\n##pos_test2 = np.add(v2, pos_Target)\n##myPlotLine(pos_Center,v2)\n#\n## find unrotated right pancam\n\n\n# labels in figure\n#labelPos(pos_PanCamLeft, \"PanCam Left (Rotated)\")\nlabelPos(pos_Target, \"Target\")\nlabelPos(pos_Center, \"Center\")\nlabelPos(pos_PTU, \"PTU\")\n#drawToGround(pos_Target)\n#drawToGround(pos_PanCamLeft)\n\n# draw dots at interesting positions\n#lst = [\n# colVecToArray(pos_BB2Left),\n# colVecToArray(pos_BB2Right),\n# colVecToArray(pos_BB3Left),\n# colVecToArray(pos_BB3Center),\n# colVecToArray(pos_BB3Right),\n# colVecToArray(pos_PanCamLeft),\n# colVecToArray(pos_PanCamRight),\n# colVecToArray(pos_Target),\n# colVecToArray(pos_Center)\n# ]\n#zipit = zip(*lst)\n#ax.scatter(zipit[0],zipit[1],zipit[2])\n\n#plt.gca().invert_yaxis()\nplt.xlabel('x')\nplt.ylabel('y')\nplt.show()\n","sub_path":"src/kalibrTransformations.py","file_name":"kalibrTransformations.py","file_ext":"py","file_size_in_byte":14789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"423147755","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n作者:林海健\n日期:2018.9.30\n\n\"\"\"\n\nfrom slave.items import NewsItem\nfrom scrapy import Request, Spider\nfrom scrapy.spiders import Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy_redis.spiders import RedisSpider\nfrom datetime import datetime\nimport re\nimport redis\nimport pymongo\nfrom itertools import product\nimport requests\nfrom lxml import etree\n\nclass XhnnewsSpider(RedisSpider):\n\n name = 'xhnNews'\n redis_key = 'xhn:urls'\n\n label_dict = {'politics': '时政', 'local': '地方', 'legal': '法治', '高层': '高层', 'renshi': '人事', \\\n 'world': '寰球立方体', 'mil': '军事', 'video': '动新闻', 'gangao': '港澳', 'tw': '台湾', \\\n 'overseas': '华人', 'fortune': '财发现', 'auto': '汽车', 'house': '新华有约', 'education': '教育', \\\n 'tech': '科技', 'energy': '能源', 'forum': '议起来', 'sike': '思客会', 'comments': '网评', \\\n 'photo': '图片', 'caipiao': '彩票', 'ent': '娱乐', 'money': '金融', 'sports': '体育', 'food': '食品', \\\n 'travel': '旅游', 'health': '健康', 'info': '溯源中国', 'datanews': '数据', 'yuqing': '舆情参考', \\\n 'vr': 'VR/AR', 'gongyi': '公益', 'uav': '无人机', 'silkroad': '一带一路', 'lianzheng': '反腐', \\\n 'fashion': '时尚', 'xhsd': '新华深度', 'cx': '双创', 'zhigu': '智谷', 'korea': '韩国', 'sg': '新加坡', \\\n 'th': '泰国', 'asia': '亚太网', 'finance': '投教基地', 'chanye': '产业', 'newmedia': '传媒', \\\n 'air': '航空', 'xhwq': '网群', 'shuhua': '书画', 'expo': '会展', 'abroad': '出国', 'city': '城市排行', \\\n 'book': '读书', 'power': '电力', 'culture': '文化', 'jiaju': '家居', 'jiadian': '家电', 'foto': '摄影', \\\n 'zhcs': '智慧城市', 'science': '科普'}\n\n local_dict = {'bj': '北京', 'tj': '天津', 'he': '河北', 'sx': '山西', 'ln': '辽宁', 'jl': '吉林', 'sh': '上海', \\\n 'js': '江苏', 'zj': '浙江', 'ah': '安徽', 'fj': '福建', 'jx': '江西', 'sd': '山东', 'ha': '河南', \\\n 'hb': '湖北', 'hn': '湖南', 'gd': '广东', 'gx': '广西', 'hq': '海南', 'cq': '重庆', 'sc': '四川', \\\n 'gz': '贵州', 'yn': '云南', 'sn': '陕西', 'gs': '甘肃', 'qh': '青海', 'nx': '宁夏', 'xj': '新疆', \\\n 'nmg': '内蒙古', 'hlj': '黑龙江'}\n\n def parse(self, response):\n url = response.url\n self.mongodb = pymongo.MongoClient(self.settings.attributes.get('MONGO_URI').value)['URL_eternal']\n request = Request(url, callback=self.parse_standard_news_item)\n yield request\n\n def parse_standard_news_item(self, response):\n base = re.match('.*(?=c_)', response.url).group()\n \n #体育新闻\n if re.search('sports', response.url):\n item = self.get_single_info(base, response.xpath('//div[@class=\"content\"]'))\n item['title'] = response.xpath('//h1/text()').extract_first().strip()\n item['structure'] = 1\n item['url'] = response.url\n self.get_basic_info(item, response, 1)\n\n #过滤\n # if not '新华' in item['source']:\n # return\n\n item['final_index']-=1\n yield item\n\n #亚太新闻\n elif re.search('asia', response.url):\n item = self.get_single_info(base, response.xpath('//div[@class=\"bai14\"]'))\n item['title'] = response.xpath('//span[@class=\"zt_titi\"]/text()').extract_first().strip()\n item['structure'] = 1\n item['url'] = response.url\n self.get_basic_info(item, response, 2)\n\n #过滤\n # if not '新华' in item['source']:\n # return\n\n item['final_index']-=1\n yield item\n\n #普通新闻\n elif response.xpath('//span[contains(@class, \"bai13\") or @id=\"bai13\"]')==[]:\n if response.xpath('//div[@class=\"h-title\"]'):\n item = self.get_single_info(base, response.xpath('//div[@id=\"p-detail\"]'))\n item['title'] = response.xpath('//div[@class=\"h-title\"]/text()').extract_first().strip()\n else:\n item = self.get_single_info(base, response.xpath('//div[@class=\"article\"]'))\n item['title'] = response.xpath('//h1[@id=\"title\"]/text()').extract_first().strip()\n item['structure'] = 1\n item['url'] = response.url\n self.get_basic_info(item, response, 3)\n\n #过滤\n # if not '新华' in item['source']:\n # return\n\n item['final_index']-=1\n yield item\n\n #图片新闻\n else:\n if not 'item' in response.meta.keys():\n item = NewsItem()\n cs = response.xpath('//div[@class=\"content\"]')\n item = self.get_single_info(base, cs)\n item['url'] = response.url\n item['title'] = response.xpath('//span[@id=\"title\"]/text()').extract_first().strip()\n self.get_basic_info(item, response, 4)\n\n #过滤\n # if not '新华' in item['source']:\n # return\n \n item['website'] = 'xhn'\n item['structure'] = 2\n if cs.xpath('.//a[@class=\"nextpage\"]') != []:\n url = cs.xpath('.//div[@id=\"div_currpage\"]//a[@class=\"nextpage\"]/@href').extract_first()\n # print ('first:', url)\n request = Request(url, callback=self.parse_standard_news_item)\n request.meta['item'] = item\n yield request\n else:\n # item['structure'] = 3\n item['final_index']-=1\n yield item\n else:\n pre_item = response.meta['item']\n cs = response.xpath('//div[@class=\"content\"]')\n cur_item = self.get_single_info(base, cs)\n pre_item['text'] += '\\n'+ cur_item['text']\n pre_item['image_urls'] += cur_item['image_urls']\n pre_item['image_index'] += [i+pre_item['final_index'] for i in cur_item['image_index']]\n pre_item['strong_index'] += [i+pre_item['final_index'] for i in cur_item['strong_index']]\n pre_item['final_index'] += cur_item['final_index']\n if cs.xpath('.//div[@id=\"div_currpage\"]//a[@class=\"nextpage\"]/text()').extract()[-1] == '下一页':\n url = cs.xpath('.//div[@id=\"div_currpage\"]//a[@class=\"nextpage\"]/@href').extract()[-1]\n # print ('more:', url)\n request = Request(url, callback=self.parse_standard_news_item)\n request.meta['item'] = pre_item\n yield request\n else:\n pre_item['final_index']-=1\n yield pre_item\n\n def get_single_info(self, base, cs):\n \"\"\"\n 获取新闻的日期来源信息\n \"\"\"\n item = NewsItem()\n text_list = []\n img_list = []\n img_index = []\n strong_index = []\n counter = 0\n for i in cs.xpath('./descendant::*'):\n if re.search('zan-wap', i.extract()):\n break\n temp_tag = self.get_name_from_sel(i)\n if temp_tag == 'img':\n # 需要过滤末尾新闻链接的策略\n if (i.xpath('./@alt').extract_first() == '点击查看专题') or (i.xpath('./@sourcename')=='本地文件'): \n break\n img_index.append(counter)\n counter += 1\n\n # print (i.xpath('./@src').extract_first())\n src = i.xpath('./@src').extract_first()\n if not re.match('http://', src):\n src = base+src\n img_list.append(src)\n\n elif temp_tag in ['p']:\n #获取该标签所包含的所有文字,存在一段文字中嵌套有个别粗体字的情况\n all_text = i.xpath('./descendant::text()')\n p_text = i.xpath('./text()')\n if all_text:\n sing = ''.join(all_text.extract())\n if sing.strip() == '新闻链接:': break\n if sing.strip()!='':\n text_list.append(' '+sing.strip())\n if ((''.join(p_text.extract())).strip() =='') & (i.xpath('./descendant::strong')!=[]):\n strong_index.append(counter)\n counter+=1\n item['final_index'] = counter\n item['image_urls'] = img_list\n item['image_index'] = img_index\n item['strong_index'] = strong_index\n item['text'] = '\\n'.join(text_list)\n item['website'] = 'xhn'\n return item\n\n def get_basic_info(self, item, response, case):\n \"\"\"\n 获取新闻的日期来源信息\n item: 新闻对象\n response: 页面请求响应\n case: 网页标签结构类别\n \"\"\"\n if response.xpath('//a[@class=\"news-location\"]'):\n label = response.xpath('//a[@class=\"news-location\"]/text()').extract_first().strip()\n else:\n res = re.search('education|sports', response.url)\n if res is None:\n res = re.search('(?<=www\\.).*?(?=\\.xin)', response.url)\n if res is None:\n res = re.search('(?<=com\\/)(.*?)(?=\\/)', response.url)\n label = res.group()\n\n if case == 1:\n source = response.xpath('//div[@class=\"ly\"]/text()').extract_first().strip()\n dt = response.xpath('//div[@class=\"sj\"]/text()').extract_first().strip()\n dt = datetime.strptime(dt, '%Y-%m-%d %H:%M')\n\n elif case == 2:\n tt = response.xpath('//td[@class=\"bai12\"]/text()').extract()[-1]\n source = re.search('(?<=来源:)[\\s\\S]*', tt).group().strip()\n dt = response.xpath('//span[@id=\"pubtime\"]/text()').extract_first().strip()\n dt = datetime.strptime(dt, '%Y年%m月%d日 %H:%M:%S')\n\n elif case == 3:\n\n # http://www.xinhuanet.com/science/2018-10/15/c_137528021.htm\n if response.xpath('//div[contains(@class, \"cy-logo\")]'):\n tt = response.xpath('//div[@class=\"info\"]/text()').extract_first()\n source = re.search('(?<=来源:).*', tt).group()\n dt = re.search('[0-9]{4}.*:[0-9]{2}', tt).group()\n dt = datetime.strptime(dt, '%Y-%m-%d %H:%M')\n else:\n if label == '高层':\n source = response.xpath('//span[@class=\"aticle-src\"]/text()').extract_first()\n elif response.xpath('//em[@id=\"source\"]'):\n source = response.xpath('//em[@id=\"source\"]/text()').extract_first()\n else:\n if response.xpath('//div[@class=\"h-info\"]'):\n tt = response.xpath('//div[@class=\"h-info\"]//span[2]/text()').extract_first()\n else :\n tt = response.xpath('//div[@class=\"info\"]/text()').extract()[-1]\n source = re.search('(?<=:).*', tt).group().strip()\n if label == 'mil':\n dt = response.xpath('//span[@class=\"time\"]/text()').extract_first().strip()\n dt = datetime.strptime(dt, '%Y年%m月%d日 %H:%M:%S')\n else:\n dt = response.xpath('//span[@class=\"h-time\"]/text()').extract_first().strip()\n dt = datetime.strptime(dt, '%Y-%m-%d %H:%M:%S')\n\n elif case == 4:\n if response.xpath('//em[@id=\"source\"]'):\n source = response.xpath('//em[@id=\"source\"]/text()').extract_first()\n else: \n tt = response.xpath('//div[@class=\"info\"]/text()').extract()[-1]\n source = re.search('(?<=:).*', tt).group()\n dt = response.xpath('//span[@id=\"pubtime\"]/text()').extract_first().strip()\n dt = datetime.strptime(dt, '%Y年%m月%d日 %H:%M:%S')\n\n year = dt.year\n month = dt.month\n day = dt.day\n hour = dt.hour\n minute = dt.minute\n if source:\n source = source.strip()\n else:\n source = '新华网'\n\n front = ''\n query = self.mongodb['xhnURL'].find({'url':response.url})\n for i in query:\n if 'front' in i.keys():\n front = i['front']\n\n if label in self.label_dict.keys():\n label = self.label_dict[label]\n elif label in self.local_dict.keys():\n label = self.local_dict[label]\n else:\n label = ''\n\n item['source'] = source\n item['label'] = label\n item['year'] = year\n item['month'] = month\n item['day'] = day\n item['hour'] = hour\n item['minute'] = minute\n item['front'] = front\n\n def get_name_from_sel(self, sel):\n return re.search('(?<=\\<).*?(?=\\s|>)', sel.extract()).group()","sub_path":"slave/slave/spiders/xhnNews.py","file_name":"xhnNews.py","file_ext":"py","file_size_in_byte":13312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"324678031","text":"import sys\nfrom tutotialQT.py import *\n\nclass MiFormulario(QtGui.QDialog):\n\tdef __init__(self, parent=None):\n\t\tQtGui.QtWidget.__init__(self,parent)\n\t\t#creo instancia de uidialog\n\t\tself.ui=Ui_Dialog()\n\t\tself.ui.setupUi(self)\n\nif __name__==\"__main__\":\n\tapp=QtGui.QApplication(sys.argv)\n\tmyapp=MiFormulario()\n\tmyapp.show()\n\t#loop de eventos hasta que se cierre la ventana\n\tsys.exit(app.exec_())","sub_path":"PyQt/VentanaPrincipal/llamaPrimeraApp.py","file_name":"llamaPrimeraApp.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"518192208","text":"from math import floor\nfrom urllib.parse import parse_qs, urlencode, urlparse, urlunparse\n\nfrom django import forms, template\nfrom django.template.loader import get_template\n\n\nregister = template.Library()\n\n\nINPUT_WIDGETS = (\n forms.TextInput,\n forms.NumberInput,\n forms.EmailInput,\n forms.URLInput,\n forms.PasswordInput,\n forms.DateInput,\n forms.TimeInput,\n forms.DateTimeInput\n)\n\n\n@register.simple_tag\ndef bulma_field(field, extra_class=''):\n widget = field.field.widget\n html_class = extra_class.split()\n\n if isinstance(widget, INPUT_WIDGETS):\n html_class.insert(0, \"input\")\n elif isinstance(widget, forms.Textarea):\n html_class.insert(0, \"textarea\")\n elif isinstance(widget, forms.Select):\n html_class.insert(0, \"select\")\n template = get_template(\"bulma/select.html\")\n return template.render({'field': field, 'html_class': html_class})\n\n widget.attrs[\"class\"] = ' '.join(html_class)\n return field\n\n\n@register.inclusion_tag(\"bulma/pagination.html\")\ndef bulma_pagination(page_obj, pages_to_show=5, url=None, extra=None, parameter_name=\"page\"):\n \"\"\" Render pagination for a page\n Parameters:\n page_obj : The page object to show\n pages_to_show : Number of pages in total\n url : URL to navigate to for pagination forward and pagination back\n extra : Any extra page parameters\n parameter_name : Name of the paging URL parameter\n \"\"\"\n\n pages_to_show = int(pages_to_show)\n if pages_to_show < 1:\n pages_to_show = 1\n\n num_pages = page_obj.paginator.num_pages\n current_page = page_obj.number\n half_page = int(floor(pages_to_show / 2))\n if half_page < 0:\n half_page = 0\n\n first_page = current_page - half_page\n if first_page <= 1:\n first_page = 1\n if first_page > 1:\n pages_back = first_page - half_page\n if pages_back < 1:\n pages_back = 1\n else:\n pages_back = None\n\n last_page = first_page + pages_to_show - 1\n if pages_back is None:\n last_page += 1\n if last_page > num_pages:\n last_page = num_pages\n if last_page < num_pages:\n pages_forward = last_page + half_page\n if pages_forward > num_pages:\n pages_forward = num_pages\n else:\n pages_forward = None\n if first_page > 1:\n first_page -= 1\n if pages_back is not None and pages_back > 1:\n pages_back -= 1\n else:\n pages_back = None\n\n pages_shown = list(range(first_page, last_page + 1))\n\n parts = urlparse(url or \"\")\n params = parse_qs(parts.query)\n\n if extra:\n params.update(extra)\n\n if params.get(parameter_name):\n del params[parameter_name]\n\n url = urlunparse(\n [\n parts.scheme,\n parts.netloc,\n parts.path,\n parts.params,\n urlencode(params, doseq=True),\n parts.fragment,\n ]\n )\n\n if '?' in url:\n url += '&'\n else:\n url += '?'\n\n return {\n \"pagination_url\": url,\n \"num_pages\": num_pages,\n \"current_page\": current_page,\n \"pages_shown\": pages_shown,\n \"pages_back\": pages_back,\n \"pages_forward\": pages_forward,\n \"parameter_name\": parameter_name,\n }\n","sub_path":"home/templatetags/bulma.py","file_name":"bulma.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"238665577","text":"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport os\n\nfrom paddle.distributed.fleet.launch_utils import (\n DeviceMode,\n get_cluster,\n get_host_name_ip,\n)\n\n__all__ = []\n\n\ndef _get_ascend_rankfile(rank_table_file_path):\n \"\"\"\n Args:\n rank_table_file_path: ascend npu rank file json\n {\n \"status\": \"completed\",\n \"version\": \"1.0\",\n \"server_count\": \"2\",\n \"server_list\": [\n {\n \"server_id\": \"192.168.24.217\",\n \"device\": [\n {\n \"device_id\": \"0\",\n \"device_ip\": \"192.1.184.23\",\n \"rank_id\": \"0\"\n },\n {\n \"device_id\": \"1\",\n \"device_ip\": \"192.2.21.93\",\n \"rank_id\": \"1\"\n }\n ]\n },\n {\n \"server_id\": \"192.168.26.177\",\n \"device\": [\n {\n \"device_id\": \"0\",\n \"device_ip\": \"192.1.94.132\",\n \"rank_id\": \"2\"\n },\n {\n \"device_id\": \"1\",\n \"device_ip\": \"192.2.94.30\",\n \"rank_id\": \"3\"\n }\n ]\n }\n ]\n }\n\n Returns:\n node_ips: node ip list\n device_count: number of npu per machine\n \"\"\"\n json_data = None\n with open(rank_table_file_path) as json_file:\n json_data = json.load(json_file)\n\n node_ips = []\n device_count = 0\n server_list = json_data['server_list']\n for server in server_list:\n device_list = server['device']\n device_count = len(device_list)\n if os.getenv(\"FLAGS_MODELARTS\", None):\n nodes = os.getenv(\"DLS_TASK_NUMBER\", None)\n assert nodes is not None, \"DLS_TASK_NUMBER didn't set!\"\n for node in range(int(nodes)):\n node_ip = os.getenv(\"VC_CUSTOM{}_HOSTS\".format(node), None)\n assert (\n node_ip is not None\n ), \"VC_CUSTOM{}_HOSTS didn't set!\".format(node)\n node_ips.append(node_ip)\n return node_ips, device_count\n node_ips.append(server['server_id'])\n return node_ips, device_count\n\n\ndef get_cloud_cluster(\n rank_table_file=None, device_mode=DeviceMode.ASCEND_NPU, start_port=6070\n):\n \"\"\"\n Args:\n rank_table_file: string, ascend npu rank file path\n device_mode: DeviceMode(Int)\n start_port: the start port of current runtime env\n \"\"\"\n if rank_table_file:\n # multi trainers\n node_ips, device_count = _get_ascend_rankfile(rank_table_file)\n if len(node_ips) == 1:\n node_ip = node_ips[0]\n else:\n node_index = os.environ.get(\"PADDLE_TRAINER_ID\")\n node_ip = None\n if node_index:\n node_ip = node_ips[int(node_index)]\n else:\n _, node_ip = get_host_name_ip()\n\n assert (\n node_ip in node_ips\n ), \"Can't find your local ip {%s} in node_ips: {%s}\" % (\n node_ip,\n node_ips,\n )\n else:\n # single trainer (single ascend card)\n node_ips = [\"127.0.0.1\"]\n node_ip = node_ips[0]\n device_count = 1\n\n devices_per_proc = [str(x) for x in range(device_count)]\n free_ports = [\n x for x in range(start_port, start_port + len(devices_per_proc))\n ]\n\n trainer_endpoints = []\n for ip in node_ips:\n trainer_endpoints.append([\"%s:%d\" % (ip, port) for port in free_ports])\n\n return get_cluster(\n node_ips, node_ip, trainer_endpoints, device_mode, devices_per_proc\n )\n","sub_path":"python/paddle/distributed/fleet/ascend_utils.py","file_name":"ascend_utils.py","file_ext":"py","file_size_in_byte":4367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"298583243","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nimport logging\nimport sys\nimport threading\nfrom time import sleep\nimport docker\nfrom docker.errors import APIError\nfrom requests import ConnectionError\nfrom future.utils import raise_\n\n__all__ = ['PythonDockerTestMixin', 'ConfigurationError', 'ContainerNotReady']\n\nlog = logging.getLogger(__name__)\n\nDEFAULT_READY_TRIES = 10\nDEFAULT_READY_SLEEP = 3\n\n\nclass ConfigurationError(Exception):\n pass\n\n\nclass ContainerNotReady(Exception):\n pass\n\n\nclass ContainerStartThread(threading.Thread):\n\n def __init__(\n self, image, ready_callback, ready_tries, ready_sleep,\n environment=None\n ):\n self.is_ready = threading.Event()\n self.error = None\n self.image = image\n self.ready_tries = ready_tries\n self.ready_sleep = ready_sleep\n self.ready_callback = ready_callback\n\n self.environment = environment\n\n super(ContainerStartThread, self).__init__()\n\n def run(self):\n log.debug(\"ContainerStartThread.run() executed\")\n try:\n try:\n self.client = docker.Client(version='auto')\n self.client.ping()\n except ConnectionError as e:\n self.error = \"Can't connect to docker. Is it installed/running?\"\n raise\n\n # confirm that the image we want to run is present and pull if not\n try:\n self.client.inspect_image(self.image)\n except APIError as e:\n if '404' in str(e.message):\n print(\"{} image not found; pulling...\".format(self.image),\n file=sys.stderr)\n result = self.client.pull(self.image)\n if 'error' in result:\n raise ConfigurationError(result['error'])\n\n run_args = {'image': self.image, 'environment': self.environment}\n\n # create and start the container\n self.container = self.client.create_container(**run_args)\n self.client.start(self.container)\n self.container_data = self.client.inspect_container(self.container)\n\n if self.ready_callback is not None:\n # wait for the container to be \"ready\"\n print(\"Waiting for container to start...\", file=sys.stderr)\n tries = self.ready_tries\n while tries > 0:\n try:\n print(\"Number of tries left: {}\".format(tries),\n file=sys.stderr)\n self.ready_callback(self.container_data)\n break\n except ContainerNotReady:\n tries -= 1\n sleep(self.ready_sleep)\n\n self.is_ready.set()\n\n except Exception as e:\n self.exc_info = sys.exc_info()\n if self.error is None:\n self.error = e\n self.is_ready.set()\n\n def terminate(self):\n if hasattr(self, 'container'):\n self.client.stop(self.container)\n self.client.remove_container(self.container)\n\n\nclass PythonDockerTestMixin(object):\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Checks that image\n defined in cls.CONTAINER_IMAGE is present and pulls if not. Starts\n the container in a separate thread to allow for better cleanup if\n exceptions occur during test setup.\n \"\"\"\n log.debug(\"custom setup class executed\")\n\n if not hasattr(cls, 'CONTAINER_IMAGE'):\n raise ConfigurationError(\n \"Test class missing CONTAINER_IMAGE attribute\"\n )\n\n ready_tries = getattr(\n cls, 'CONTAINER_READY_TRIES', DEFAULT_READY_TRIES\n )\n ready_sleep = getattr(\n cls, 'CONTAINER_READY_SLEEP', DEFAULT_READY_SLEEP\n )\n ready_callback = getattr(cls, 'container_ready_callback')\n environment = getattr(cls, 'CONTAINER_ENVIRONMENT', None)\n\n cls.container_start_thread = ContainerStartThread(\n cls.CONTAINER_IMAGE,\n ready_callback,\n ready_tries,\n ready_sleep,\n environment\n )\n cls.container_start_thread.daemon = True\n cls.container_start_thread.start()\n\n # wait for the container startup to complete\n cls.container_start_thread.is_ready.wait()\n if cls.container_start_thread.error:\n exc_info = cls.container_start_thread.exc_info\n # Clean up behind ourselves,\n # since tearDownClass won't get called in case of errors.\n cls._tearDownClassInternal()\n raise raise_(exc_info[1], None, exc_info[2])\n\n cls.container_data = cls.container_start_thread.container_data\n\n super(PythonDockerTestMixin, cls).setUpClass()\n\n @classmethod\n def _tearDownClassInternal(cls):\n if hasattr(cls, 'container_start_thread'):\n cls.container_start_thread.terminate()\n cls.container_start_thread.join()\n delattr(cls, 'container_start_thread')\n\n @classmethod\n def tearDownClass(cls):\n super(PythonDockerTestMixin, cls).tearDownClass()\n cls._tearDownClassInternal()\n\n def setUp(self):\n self.container_ip = self.container_data['NetworkSettings']['IPAddress']\n self.docker_gateway_ip = self.container_data['NetworkSettings']['Gateway']\n","sub_path":"python_docker_test/mixin.py","file_name":"mixin.py","file_ext":"py","file_size_in_byte":5460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"3346250","text":"import numpy as np\nimport scipy\nimport scipy.signal\n\n# Define a class to receive the characteristics of each line detection\nclass LaneLine():\n def __init__(self):\n # was the line detected in the last iteration?\n self.detected = False \n #polynomial coefficients averaged over the last n iterations\n self.best_fit = None \n #polynomial coefficients for the most recent fit\n self.current_fit = [np.array([False])] \n #x values for detected line pixels\n self.allx = None \n #y values for detected line pixels\n self.ally = None\n #latest lane fit\n self.last_fit = None\n # was the last detection plausible?\n self.last_plausible = False;\n\n def fit_next(self, y, x, conf):\n self.ally = y\n self.allx = x\n self.last_fit = self.current_fit;\n\n # Fit a second order polynomial to pixel positions in each lane line\n if True:\n idx = self.idx_reject_outliers(self.allx, conf);\n # make sure at least 3 data points are present for fitting\n if np.sum( idx ) >= 3:\n self.current_fit = np.polyfit(self.ally[idx], self.allx[idx], 2)\n else:\n self.current_fit = np.array([0.,0.,0.]);\n else:\n self.current_fit = self.fit_poly_RANSAC(self.ally, self.allx);\n\n if self.detected:\n self.diffs = self.current_fit - self.last_fit;\n ploty = np.array([50, 150, 250, 350, 450, 550])\n fit1 = self.last_fit[0]*ploty**2 + self.last_fit[1]*ploty + self.last_fit[2]\n fit2 = self.current_fit[0]*ploty**2 + self.current_fit[1]*ploty + self.current_fit[2]\n err = np.sqrt(np.mean((fit1-fit2)**2))\n if err > 10 or np.sum(self.current_fit) == 0:\n self.last_plausible = False;\n else:\n self.best_fit = self.best_fit - 0.25 * (self.best_fit - self.current_fit);\n self.last_plausible = True;\n else:\n self.best_fit = self.current_fit\n self.detected = True;\n return idx\n\n def fit_poly_RANSAC(self, y, x):\n best_mse = 1e99;\n best_result = None;\n best_choice = None;\n for i in range(100):\n r = np.ones(shape=(len(x),), dtype=np.bool)\n r[np.random.randint(0,len(x))] = False;\n r[np.random.randint(0,len(x))] = False;\n result = np.polyfit(y[r], x[r], 2, full=True)\n mse = result[1];\n mse = 0 if mse.size == 0 else mse[0]\n if mse < best_mse:\n best_mse = mse;\n best_result = result;\n best_choice = r;\n return best_result[0]\n\n def idx_reject_outliers(self, data, conf, m = 3.0, low = 5):\n d = np.abs(data - np.median(data[data > low]))\n mdev = np.median(d)\n s = d/mdev if mdev else 0.*d\n return np.logical_and( np.logical_and(s < m, data > low ), conf > 0.1 );\n\n def generate_road(self, offset, window_height):\n ploty = offset + np.linspace(0, window_height, num=200)\n fitx = self.best_fit[0]*ploty**2 + self.best_fit[1]*ploty + self.best_fit[2]\n return (ploty, fitx)\n \n def get_radius(self):\n #radius of curvature of the line in some units\n self.fit_cr = self.best_fit;\n y_eval = np.max(self.ally)\n factor_x = 50 # px/ft\n factor_y = 5 # px/ft\n ft_to_m = 0.3048 # m/ft\n ym_per_pix = (1/factor_y) * ft_to_m # ft/px * m/ft => m/px\n xm_per_pix = (1/factor_x) * ft_to_m # ft/px * m/ft => m/px\n curverad = ((1 + (2*self.fit_cr[0]*y_eval*ym_per_pix + self.fit_cr[1])**2)**1.5) / np.absolute(2*self.fit_cr[0])\n return curverad\n\n def get_vehicle_offset(self):\n self.fit_cr = self.best_fit; \n factor_x = 50 # px/ft\n ft_to_m = 0.3048 # m/ft\n xm_per_pix = (1/factor_x) * ft_to_m # ft/px * m/ft => m/px\n center_px = 450\n return (center_px - self.fit_cr[2])*xm_per_pix\n","sub_path":"lib/lane_line.py","file_name":"lane_line.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"235707814","text":"from math import *\nimport sys\nimport util\nimport logging\n\nCOLORS = [\"FF0000\", \"00FF00\", \"0000FF\", \"000000\"]\nEXTENDED_MAP = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-.'\nSIMPLE_MAP = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'\nMAX_SIZE = 858\n\ndef simple_encode(value_s, min_value, max_value):\n chart_data = ''\n range_used = max_value - min_value\n scale = (len(SIMPLE_MAP) - 1) / range_used\n\n for i in range(len(value_s)):\n scaled_val = (value_s[i] - min_value) * scale\n\n if scaled_val < 0:\n chart_data += '_'\n else:\n #Calculate first and second digits and add them to the output.\n quotient = int(floor(scaled_val))\n chart_data += SIMPLE_MAP[quotient]\n return chart_data \n \n\ndef encode(value_s, min_value, max_value):\n chart_data = \"\"\n ## min_value = float(1e+6)\n ## max_value = float(-1e+6)\n\n ## for i in range(len(value_s)):\n ## min_value = min(min_value, value_s[i])\n ## max_value = max(max_value, value_s[i])\n\n range_used = max_value - min_value\n scale = len(EXTENDED_MAP) * len(EXTENDED_MAP) / range_used\n \n for i in range(len(value_s)):\n scaled_val = int(floor((value_s[i] - min_value) * scale)) #Scale the value to max_value.\n\n if scaled_val > len(EXTENDED_MAP) * len(EXTENDED_MAP) - 1:\n chart_data += '..'\n elif scaled_val < 0:\n chart_data += '__'\n else:\n #Calculate first and second digits and add them to the output.\n quotient = int(floor(scaled_val / float(len(EXTENDED_MAP))))\n remainder = scaled_val - len(EXTENDED_MAP) * quotient\n chart_data += EXTENDED_MAP[quotient]\n chart_data += EXTENDED_MAP[remainder]\n return chart_data\n\ndef fit_data(value_s):\n value_size = len(value_s) * len(value_s[0])\n\n if value_size <= MAX_SIZE:\n return value_s\n else:\n quotient = int(floor(value_size / float(MAX_SIZE))) + 1\n output_s = []\n for i in range(len(value_s)):\n output_ss = [value_s[i][j] for j in range(len(value_s[i])) if j % quotient == 0]\n output_s.append(output_ss)\n\n return output_s\n \ndef line_chart(values, labels, x_labels = [], width = 800, height = 375):\n\n value_s = fit_data(values) # reduce the data size to the maximum allowed\n \n path = 'http://chart.apis.google.com/chart?'\n chart_type = 'cht=lc'\n chart_data = 'chd=e:'\n chart_labels = 'chxt=x,y&chdl='\n chart_colors = 'chco='\n chart_size = 'chs=' + str(width) + 'x' + str(height)\n chart_others = 'chdlp=bv&chg=15,15'\n\n max_y = float(-1e+6)\n min_y = float(1e+6)\n \n for i in range(len(labels)):\n for j in range(len(value_s[i])):\n #Calculate the max/min values so far\n max_y = max(max_y, value_s[i][j])\n min_y = min(min_y, value_s[i][j])\n\n for i in range(len(labels)):\n if i > 0:\n chart_labels += '|'\n chart_colors += ','\n chart_data += ','\n \n chart_labels += labels[i]\n chart_colors += COLORS[i % len(COLORS)]\n chart_data += encode(value_s[i], min_y, max_y)\n\n chart_range= 'chds=a&chxr=1,%(min2).2f,%(max2).2f' % {'min2':(min_y), 'max2':(max_y)}\n if len(x_labels) == 0:\n chart_range += '|0,0,%(lens)d' % {'lens':(len(value_s[0]) + 1)}\n else:\n chart_range += '&chxl=0:'\n for i in range(len(x_labels)):\n chart_range += '|' + x_labels[i]\n chart_range += '|'\n\n logging.debug(chart_range)\n \n return path + '&' + chart_type + '&' + chart_data + '&' + chart_labels + '&' + chart_size + '&' + chart_colors + '&' + chart_range + '&' + chart_others\n","sub_path":"googlechart.py","file_name":"googlechart.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"618850936","text":"import dbutils\nimport re\n\n\ndef validate_url(url):\n regex = re.compile(\n r'^(?:http|ftp)s?://' # http:// or https://\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|' # domain...\n r'localhost|' # localhost...\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\n r'(?::\\d+)?' # optional port\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n return regex.match(url)\n\n\ndef main():\n\n committed_urls = []\n\n while True:\n arg = input(\"INPUT YOUR SITE OR PRESS 'Q'/'q' TO QUIT:\\n\")\n if len(arg) == 1 and arg in ('Q', 'q'):\n break\n\n if not validate_url(arg):\n print('THE INPUT IS NOT A VALID URL. '\n 'THE URL FORMAT SHOULD BE \"http[https]://:/\"')\n continue\n\n with dbutils.UseMongoDB(dbutils.COL_START_URLS) as col:\n try:\n col.insert({'url': arg})\n except Exception as e:\n print('INSERT FAILED!')\n print(e)\n else:\n print('INSERT SUCCESSFULLY!')\n committed_urls.append(arg)\n\n print('YOU HAVE COMMITTED %d URLS:\\n%s' % (len(committed_urls), committed_urls))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"urlcommitter.py","file_name":"urlcommitter.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"385058016","text":"places = 37\nwhole = [0] * places\nincount, n, black, red = 0, 0, 0, 0\nredarr = [1, 3, 5, 7, 9, 12, 14, 16, 18, 19, 21, 23, 25, 27, 30, 32, 34, 36]\nfor i in range (places):\n whole[i] = 0\nwhile True:\n\tincount = int(input(\"Введите номер:\\n\"))\n\tif incount >= 0:\n\t\twhole[incount] += 1\n\t\tfor i in range (18):\n\t\t\tif incount == redarr[i]:\n\t\t\t\tred += 1\n\t\t\t\tbreak\n\t\t\telif i == 17:\n\t\t\t black += 1\n\t\t\t break\n\telse:\n\t\tbreak\n\tfor i in range (places):\n\t \tif whole[i] > n:\n\t \t n = whole[i]\n\tfor i in range (places):\n\t\tif whole[i] == n:\n\t\t print(i, end = ' ')\n\tprint()\n\tfor i in range (places):\n\t\tif whole[i] == 0:\n\t\t print(i, end = ' ')\n\tprint('\\n', red, ' ', black, '\\n', sep = '')\n","sub_path":"practice/17/python/17.py","file_name":"17.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"342040614","text":"#-*-coding:utf-8-*-\n\n\nimport sys\n\nsys.path.append('.')\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\n\nimport math\n\nfrom train_config import config as cfg\n\n\nfrom lib.core.model.Mobilenet import mobilenet\nfrom lib.helper import logger\n\n\n\n###also u can change it to a specific model\n\nmodel_folder = './model'\ncheckpoint = tf.train.get_checkpoint_state(model_folder).model_checkpoint_path\npretrained_model=checkpoint\nsaved_name ='./model/landmark_deploy.ckpt'\nclass trainner():\n def __init__(self):\n\n self.inputs=[]\n self.outputs=[]\n\n self.ite_num=1\n\n def _wing_loss(self,landmarks, labels, w=10.0, epsilon=2.0):\n \"\"\"\n Arguments:\n landmarks, labels: float tensors with shape [batch_size, landmarks]. landmarks means x1,x2,x3,x4...y1,y2,y3,y4 1-D\n w, epsilon: a float numbers.\n Returns:\n a float tensor with shape [].\n \"\"\"\n with tf.name_scope('wing_loss'):\n x = landmarks - labels\n c = w * (1.0 - math.log(1.0 + w / epsilon))\n absolute_x = tf.abs(x)\n losses = tf.where(\n tf.greater(w, absolute_x),\n w * tf.log(1.0 + absolute_x / epsilon),\n absolute_x - c\n )\n if cfg.TRAIN.ohem:\n return losses\n else:\n loss = tf.reduce_mean(tf.reduce_mean(losses, axis=[1]), axis=0)\n return loss\n def tower_loss(self,scope, images, labels, L2_reg, training):\n \"\"\"Calculate the total loss on a single tower running the model.\n\n Args:\n scope: unique prefix string identifying the CIFAR tower, e.g. 'tower_0'\n images: Images. 4D tensor of shape [batch_size, height, width, 3].\n labels: Labels. 1D tensor of shape [batch_size].\n\n Returns:\n Tensor of shape [] containing the total loss for a batch of data\n \"\"\"\n\n # Build the portion of the Graph calculating the losses. Note that we will\n # assemble the total_loss using a custom function below.\n\n #net_out = shufflenet_v2(images, L2_reg, False)\n #net_out = resnet(images, L2_reg, False)\n net_out = mobilenet(images, L2_reg, False)\n\n loss, leye_loss, reye_loss, mouth_loss, leye_cla_accuracy,\\\n reye_cla_accuracy, mouth_cla_accuracy, l2_loss=calculate_loss(net_out,labels,scope)\n return loss,leye_loss,reye_loss,mouth_loss,leye_cla_accuracy,reye_cla_accuracy,mouth_cla_accuracy, l2_loss\n\n\n def build(self):\n \"\"\"Train faces data for a number of epoch.\"\"\"\n with tf.Graph().as_default(), tf.device('/cpu:0'):\n # Create a variable to count the number of train() calls. This equals the\n # number of batches processed * FLAGS.num_gpus.\n global_step = tf.get_variable(\n 'global_step', [],\n initializer=tf.constant_initializer(0), dtype=tf.int32, trainable=False)\n\n # Decay the learning rate\n lr = tf.train.piecewise_constant(global_step,\n cfg.TRAIN.lr_decay_every_step,\n cfg.TRAIN.lr_value_every_step\n )\n\n keep_prob = tf.placeholder(tf.float32, name=\"keep_prob\")\n L2_reg = tf.placeholder(tf.float32, name=\"L2_reg\")\n training = tf.placeholder(tf.bool, name=\"training_flag\")\n\n images_place_holder_list = []\n labels_place_holder_list = []\n\n # Create an optimizer that performs gradient descent.\n #opt = tf.train.AdamOptimizer(lr)\n opt = tf.train.MomentumOptimizer(lr,momentum=0.9,use_nesterov=False)\n # Get images and labels\n\n weights_initializer = slim.xavier_initializer()\n biases_initializer = tf.constant_initializer(0.)\n biases_regularizer = tf.no_regularizer\n weights_regularizer = tf.contrib.layers.l2_regularizer(L2_reg)\n\n # Calculate the gradients for each model tower.\n tower_grads = []\n with tf.variable_scope(tf.get_variable_scope()):\n for i in range(1):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('tower_%d' % (i)) as scope:\n with slim.arg_scope([slim.model_variable, slim.variable], device='/cpu:0'):\n images_ = tf.placeholder(tf.float32, [None, cfg.MODEL.hin, cfg.MODEL.win, 3], name=\"images\")\n labels_ = tf.placeholder(tf.float32, [None, cfg.MODEL.out_channel],name=\"labels\")\n\n images_place_holder_list.append(images_)\n labels_place_holder_list.append(labels_)\n\n with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, \\\n slim.conv2d_transpose, slim.separable_conv2d,\n slim.fully_connected],\n weights_regularizer=weights_regularizer,\n biases_regularizer=biases_regularizer,\n weights_initializer=weights_initializer,\n biases_initializer=biases_initializer):\n loss, leye_loss, reye_loss, mouth_loss,leye_cla_accuracy,reye_cla_accuracy,mouth_cla_accuracy, l2_loss = self.tower_loss(\n scope, images_, labels_, L2_reg, training)\n\n ##use muti gpu ,large batch\n if i == cfg.TRAIN.num_gpu - 1:\n total_loss = tf.add_n([loss, leye_loss, reye_loss, mouth_loss, l2_loss])\n else:\n total_loss = tf.add_n([loss, leye_loss, reye_loss, mouth_loss])\n\n # Reuse variables for the next tower.\n tf.get_variable_scope().reuse_variables()\n\n ##when use batchnorm, updates operations only from the\n ## final tower. Ideally, we should grab the updates from all towers\n # but these stats accumulate extremely fast so we can ignore the\n # other stats from the other towers without significant detriment.\n bn_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=scope)\n\n # Retain the summaries from the final tower.\n #summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)\n summaries = tf.get_collection('%smutiloss'%scope, scope)\n # Calculate the gradients for the batch of data on this CIFAR tower.\n grads = opt.compute_gradients(total_loss)\n\n # Keep track of the gradients across all towers.\n tower_grads.append(grads)\n # We must calculate the mean of each gradient. Note that this is the\n # synchronization point across all towers.\n\n\n\n\n\n self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=10)\n\n # Build the summary operation from the last tower summaries.\n\n\n # Build an initialization operation to run below.\n init = tf.global_variables_initializer()\n\n\n # Start running operations on the Graph. allow_soft_placement must be set to\n # True to build towers on GPU, as some of the ops do not have GPU\n # implementations.\n\n tf_config = tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=False)\n tf_config.gpu_options.allow_growth = True\n self.sess = tf.Session(config=tf_config)\n self.sess.run(init)\n\n\n #########################restore the params\n variables_restore = tf.get_collection(tf.GraphKeys.MODEL_VARIABLES)#,scope=cfg.MODEL.net_structure)\n\n saver2 = tf.train.Saver(variables_restore)\n saver2.restore(self.sess, pretrained_model)\n\n\n logger.info('landmark_deploy saved')\n self.saver.save(self.sess, save_path=saved_name)\n\n\n self.sess.close()\n\n\n\n\n\ntrain=trainner()\ntrain.build()\n","sub_path":"tools/net_work_for_tf_lite.py","file_name":"net_work_for_tf_lite.py","file_ext":"py","file_size_in_byte":8639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"512722717","text":"\r\n\"\"\"https://github.com/zhuhm1996/bgnn\"\"\"\r\n\r\nimport torch\r\nfrom torch import nn, optim\r\nfrom mode_zoo import gnn\r\nfrom mode_zoo.utils import EdgeDropout\r\n\r\ndef bgnn_pool(xw, adj):\r\n sum = adj@xw\r\n sum_squared = sum.square()\r\n # step2 squared_sum\r\n squared = xw.square()\r\n squared_sum = torch.square(adj)@squared\r\n # step3\r\n new_embedding = sum_squared - squared_sum\r\n return new_embedding\r\n\r\ndef bgcn_a_norm(edge_index):\r\n adj_t = edge_index.to_dense()\r\n adj_all = adj_t+torch.eye(adj_t.shape[0])\r\n num_nei = adj_all.sum(dim=-1)\r\n norm = (adj_all.sum(dim=-1).square()-adj_all.square().sum(dim=-1))\r\n # norm = num_nei*(num_nei-1)\r\n norm = norm.pow(-1)\r\n norm.masked_fill_(torch.isinf(norm), 0.)\r\n norm = torch.diag(norm)\r\n norm = norm.to_sparse()\r\n adj_all = adj_all.to_sparse()\r\n return adj_all, norm\r\n\r\ndef bgcn_t_norm(edge_index):\r\n adj_t = edge_index.to_dense()\r\n adj_all = adj_t+torch.eye(adj_t.shape[0])\r\n norm = adj_t.sum(dim=-1)\r\n norm = norm.pow(-1)\r\n norm.masked_fill_(torch.isinf(norm), 0.)\r\n norm = torch.diag(norm)\r\n norm = norm.to_sparse()\r\n adj_all = adj_all.to_sparse()\r\n return adj_all, norm\r\n\r\n\r\nclass BGCNA(nn.Module):\r\n def __init__(self, in_channels: int, out_channels: int,\r\n improved: bool = False, cached: bool = False,\r\n add_self_loops: bool = True, normalize: bool = True,\r\n bias: bool = True, **kwargs):\r\n super(BGCNA, self).__init__()\r\n self.in_channels = in_channels\r\n self.out_channels = out_channels\r\n self.cached = cached\r\n self._cache = None\r\n self.weight = nn.Parameter(torch.Tensor(in_channels, out_channels))\r\n if bias:\r\n self.bias = nn.Parameter(torch.zeros(out_channels))\r\n else:\r\n self.register_parameter('bias', None)\r\n self.reset_parameters()\r\n\r\n def reset_parameters(self):\r\n nn.init.xavier_uniform_(self.weight)\r\n # if self.bias is not None:\r\n # nn.init.xavier_uniform_(self.bias)\r\n self._cached_edge_index = None\r\n self._cached_adj_t = None\r\n\r\n def forward(self, x, edge_index, edge_weight):\r\n xw = x@self.weight\r\n if self.cached:\r\n if self._cache is None:\r\n adj, norm = bgcn_a_norm(edge_index)\r\n self._cache = (adj, norm)\r\n else:\r\n adj, norm = self._cache\r\n else:\r\n adj, norm = bgcn_a_norm(edge_index)\r\n out = bgnn_pool(xw, adj)\r\n out = norm@out\r\n if self.bias is not None:\r\n out += self.bias\r\n return out\r\n\r\n def __repr__(self):\r\n return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,\r\n self.out_channels)\r\n\r\n\r\nclass BGCNT(BGCNA):\r\n def __init__(self, in_channels: int, out_channels: int,\r\n improved: bool = False, cached: bool = False,\r\n add_self_loops: bool = True, normalize: bool = True,\r\n bias: bool = True, **kwargs):\r\n super(BGCNT, self).__init__(in_channels=in_channels, out_channels=out_channels,\r\n cached=cached, bias=bias, **kwargs)\r\n\r\n def forward(self, x, edge_index, edge_weight):\r\n xw = x @ self.weight\r\n if self.cached:\r\n if self._cache is None:\r\n adj, norm = bgcn_a_norm(edge_index)\r\n self._cache = (adj, norm)\r\n else:\r\n adj, norm = self._cache\r\n else:\r\n adj, norm = bgcn_t_norm(edge_index)\r\n out = bgnn_pool(xw, adj) - bgnn_pool(xw, edge_index)\r\n out = norm @ out\r\n if self.bias is not None:\r\n out += self.bias\r\n return xw-out\r\n\r\nclass GCNConv(nn.Module):\r\n def __init__(self, in_channels: int, out_channels: int,\r\n gnn_mode=\"gcnt\", improved: bool = False, cached: bool = False,\r\n add_self_loops: bool = True, normalize: bool = True,\r\n bias: bool = True, **kwargs):\r\n super(GCNConv, self).__init__()\r\n assert gnn_mode in [\"gcn\", \"gcna\", \"gcnt\", \"a\", \"t\"]\r\n self.in_channels = in_channels\r\n self.out_channels = out_channels\r\n self.mode = gnn_mode\r\n if gnn_mode.startswith(\"gcn\"):\r\n self.gcn = gnn.GCNConv(in_channels=in_channels, out_channels=out_channels,\r\n normalize=normalize, add_self_loops=add_self_loops,\r\n cached=cached, bias=bias)\r\n self.weight = self.gcn.weight\r\n if gnn_mode.endswith(\"a\"):\r\n self.bgnn = BGCNA(in_channels=in_channels, out_channels=out_channels, cached=cached, bias=bias)\r\n self.weight = self.bgnn.weight\r\n else:\r\n self.bgnn = BGCNT(in_channels=in_channels, out_channels=out_channels, cached=cached, bias=bias)\r\n self.weight = self.bgnn.weight\r\n if len(gnn_mode)==4:\r\n self.attention = nn.Parameter(torch.ones(2, 1, 1)/2)\r\n\r\n def forward(self, x, edge_index, edge_weight):\r\n if self.mode.startswith(\"gcn\"):\r\n x_gcn = self.gcn(x, edge_index, edge_weight)\r\n if self.mode.endswith(\"a\") or self.mode.endswith(\"t\"):\r\n x_bgnn = self.bgnn(x, edge_index, edge_weight)\r\n if len(self.mode)==1:\r\n return x_bgnn\r\n elif len(self.mode)==3:\r\n return x_gcn\r\n attention = torch.softmax(self.attention, dim=0)\r\n feature = torch.stack([x_gcn, x_bgnn])\r\n feature = torch.sum(attention*feature, dim=0)\r\n return feature\r\n","sub_path":"old_src/mode_zoo/bgnn.py","file_name":"bgnn.py","file_ext":"py","file_size_in_byte":5640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"58611957","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom azure.mgmt.datamigration.models import (MigrateSqlServerSqlDbTaskInput,\n MigrateSqlServerSqlDbDatabaseInput,\n MigrationValidationOptions,\n MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput,\n MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInput,\n MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseTableInput,\n MigrateMySqlAzureDbForMySqlOfflineTaskInput,\n MigrateMySqlAzureDbForMySqlOfflineDatabaseInput)\n\nfrom azure.cli.core.azclierror import ValidationError\n\n\ndef get_migrate_sql_to_sqldb_offline_input(database_options_json,\n source_connection_info,\n target_connection_info,\n enable_schema_validation,\n enable_data_integrity_validation,\n enable_query_analysis_validation):\n database_options = []\n\n for d in database_options_json:\n database_options.append(\n MigrateSqlServerSqlDbDatabaseInput(\n name=d.get('name', None),\n target_database_name=d.get('target_database_name', None),\n make_source_db_read_only=d.get('make_source_db_read_only', None),\n table_map=d.get('table_map', None)))\n\n validation_options = MigrationValidationOptions(enable_schema_validation=enable_schema_validation,\n enable_data_integrity_validation=enable_data_integrity_validation,\n enable_query_analysis_validation=enable_query_analysis_validation)\n\n return MigrateSqlServerSqlDbTaskInput(source_connection_info=source_connection_info,\n target_connection_info=target_connection_info,\n selected_databases=database_options,\n validation_options=validation_options)\n\n\ndef get_migrate_postgresql_to_azuredbforpostgresql_sync_input(database_options_json,\n source_connection_info,\n target_connection_info):\n database_options = []\n\n for d in database_options_json:\n s_t = d.get('selectedTables', None)\n t = None if s_t is None else [MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseTableInput(name=t) for t in s_t]\n database_options.append(\n MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInput(\n name=d.get('name', None),\n target_database_name=d.get('target_database_name', None),\n migration_setting=d.get('migrationSetting', None),\n source_setting=d.get('sourceSetting', None),\n target_setting=d.get('targetSetting', None),\n selected_tables=t))\n\n return MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput(source_connection_info=source_connection_info,\n target_connection_info=target_connection_info,\n selected_databases=database_options)\n\n\ndef get_migrate_mysql_to_azuredbformysql_offline_input(database_options_json,\n source_connection_info,\n target_connection_info):\n database_options = []\n migration_level_settings = {}\n make_source_server_read_only = False\n selected_databases = []\n\n if not isinstance(database_options_json, dict):\n raise ValidationError('Format of the database option file is wrong')\n\n if 'selected_databases' not in database_options_json:\n raise ValidationError('Database option file should contain atleast one selected database for migration')\n selected_databases = database_options_json.get('selected_databases')\n\n for database in selected_databases:\n if not isinstance(database, dict):\n raise ValidationError('Format of the selected database file is wrong')\n if 'name' not in database:\n raise ValidationError('Selected database should have a name')\n if 'target_database_name' not in database:\n raise ValidationError('Selected database should have a target_database_name')\n if 'table_map' in database and (not isinstance(database.get('table_map'), dict) or\n len(database.get('table_map')) == 0):\n raise ValidationError('Table map should be dictionary and non empty, to select all tables remove table_map')\n database_options.append(\n MigrateMySqlAzureDbForMySqlOfflineDatabaseInput(\n name=database.get('name', None),\n target_database_name=database.get('target_database_name', None),\n table_map=database.get('table_map', None)))\n\n if 'migration_level_settings' in database_options_json and \\\n (not isinstance(database_options_json, dict) or len(\n database_options_json.get('migration_level_settings')) == 0):\n raise ValidationError('migration_level_settings have wrong format or is empty')\n if 'migration_level_settings' in database_options_json and isinstance(database_options_json, dict):\n migration_level_settings = database_options_json.get('migration_level_settings', None)\n if 'make_source_server_read_only' in database_options_json and isinstance(database_options_json, dict):\n make_source_server_read_only = database_options_json.get('make_source_server_read_only', None)\n\n return MigrateMySqlAzureDbForMySqlOfflineTaskInput(source_connection_info=source_connection_info,\n target_connection_info=target_connection_info,\n selected_databases=database_options,\n optional_agent_settings=migration_level_settings,\n make_source_server_read_only=make_source_server_read_only)\n","sub_path":"src/azure-cli/azure/cli/command_modules/dms/scenario_inputs.py","file_name":"scenario_inputs.py","file_ext":"py","file_size_in_byte":6826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"274551633","text":"# coding: utf-8\n\nimport sys\nfrom PyQt5 import QtWidgets\nfrom PyQt5 import QtCore\nfrom PyQt5 import QtGui\nimport time\nfrom PyQt5.QtCore import pyqtSlot\nimport random\n\nclass Form(QtWidgets.QWidget):\n def __init__(self):\n super(Form, self).__init__()\n self.setGeometry(QtCore.QRect(10, 10, 640, 480))\n lb = QtWidgets.QPushButton('Hello', self)\n self.show()\n\n self.ani = QtCore.QPropertyAnimation(lb, \"geometry\")\n self.ani.setDuration(1000)\n\n self.ani.setKeyValueAt(0, QtCore.QRect(0, 0, 100, 30))\n self.ani.setKeyValueAt(0.5, QtCore.QRect(250, 250, 100, 30))\n self.ani.setKeyValueAt(1, QtCore.QRect(0, 0, 100, 30))\n self.ani.start()\n\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n app.processEvents(QtCore.QEventLoop.AllEvents)\n w = Form()\n sys.exit(app.exec())","sub_path":"PyQT_Widget_Examples/QAbstractAnimation/example_2.py","file_name":"example_2.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"337304284","text":"import unittest\nimport random\nfrom models import TerrainContinu, Robot, Polygone, Vecteur\n\n\nclass TerrainContinuTest(unittest.TestCase):\n def test_contruct_tc(self):\n tc = TerrainContinu.Carre(20)\n self.assertIsNotNone(tc.polygoneSurface)\n self.assertIsNotNone(tc.listePolygone)\n\n def test_ajout_polygone(self):\n tc = TerrainContinu.Carre(20)\n length = len(tc.listePolygone)\n p = Polygone.Polygone(((0., 1.), (1., 0), (4, 2)))\n tc.ajoutPolygone(p)\n self.assertTrue(length + 1 == len(tc.listePolygone))\n\n def test_collision(self):\n \"\"\"tuple (int * int) * Vecteur -> boolean\n méthode qui verifie la collision du robot avec les objets contenu sur le terrain ainsi qu'avec \n les vecteurs qui le delimitent\n : param tuple : coordonnees du robot\n : param Vecteur : vecteur de deplacement du robot\n \"\"\"\n\n tc = TerrainContinu.Carre(20)\n tc.ajoutPolygone(Polygone.Carre((10., 10.), 5))\n\n posOrigine = (3., 3.)\n vecteurDeplacement = Vecteur.Vecteur(random.uniform(-10., 10.), random.uniform(-10., 10.))\n\n b = False\n for p in tc.listePolygone:\n if p.collision(posOrigine, vecteurDeplacement):\n b = True\n # posX, posY : position du premier vecteur du terrain\n posX = tc.polygoneSurface.liste_sommet[0].x\n posY = tc.polygoneSurface.liste_sommet[1].y\n for v in tc.polygoneSurface.liste_vecteur:\n # vecteurDeplacement et (x,y) du robot\n if v.collision((posX, posY), vecteurDeplacement, posOrigine):\n b = True\n # calcul de l'origine des vecteurs suivants\n posX = posX + v.x\n posY = posY + v.y\n\n self.assertTrue(b == tc.collision(posOrigine, vecteurDeplacement))\n","sub_path":"tests/models/test_terrainContinu.py","file_name":"test_terrainContinu.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"156607512","text":"from django.http.response import HttpResponse\nfrom meuapp.models.ContaReceber import ContaReceber, SITUACAO_CHOICE\nfrom meuapp.models.Classificacao import Classificacao\nfrom django.http.request import HttpRequest\nfrom django.shortcuts import redirect, render, resolve_url\n\ndef create_receive_bills(request: HttpRequest):\n categories = Classificacao.objects.filter(classificacao='R')\n if request.method == 'GET':\n status = SITUACAO_CHOICE\n return render(request, 'finances/receive/create.html', {\n 'status': status,\n 'categories': categories,\n })\n else:\n data = request.POST\n _value = data['valor']\n _description = data['descricao']\n _receive_date = data['dataRecebimento']\n _status = data['situacao']\n _category_id = data['classificacao']\n\n category = Classificacao.objects.get(id=_category_id)\n\n ContaReceber.objects.create(valor=_value, descricao=_description, dataRecebimento=_receive_date, situacao=_status, classificacao=category)\n\n return redirect(resolve_url('home'))\n","sub_path":"meuapp/views/recebimentos.py","file_name":"recebimentos.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"291234484","text":"import os\n\nimport subprocess\nimport time\nfrom ffmpy import FFmpeg\nfrom ffmpy import *\ndef change_file_name(path):\n # 文件路径注意结尾要加\\\n files = os.listdir(path)\n n = 0\n # print files\n for f in files:\n # 设置旧文件名(路径+文件名)\n oldname = path + files[n]\n print(oldname)\n # 设置新文件名\n # newname=path + files[n][0:2]+'.wav' # 01-99\n newname = path + files[n][0:3] + '.wav' # 100-201\n print(newname)\n # 改文件名\n # 01设置四点十分的闹铃.wav -> 01.wav\n os.rename(oldname, newname)\n n += 1\n\n\ndef convertfiles(file_dir):\n for root, dirs, files in os.walk(file_dir):\n # print(unicode(root)) #当前目录路径\n # print(dirs) #当前路径下所有子目录\n # print(files) #当前路径下所有非目录子文件\n for f in files:\n #print(f)\n f=os.path.join(file_dir,f)\n print(f)\n if os.path.splitext(f)[1] == '.wav':\n # fname = os.path.splitext(f)[0]\n # fname=fname+'-cvt.wav'\n # fname=os.path.join(file_dir,fname)\n # print(fname)\n # cmd_cvt = \"ffmpeg -i \" + f + \" -ac 2 \" + fname\n # print(cmd_cvt)\n # print(cmd_cvt)\n # reval = os.popen(cmd_cvt)\n # print(reval.read())\n\n fname = os.path.splitext(f)[0]\n # print(newname)\n cmd_cvt = \"ffmpeg -i \" + f + \" -ac 2 \" + fname + \"-cvt.wav\"\n print(cmd_cvt)\n #cmd_cvt=\"dir\"\n reval = os.popen(cmd_cvt)\n print(reval.read())\n\n\n\nif __name__ == '__main__':\n path = u'C:\\\\Users\\\\weiwei\\\\Desktop\\语音\\\\App\\\\1'\n # change_file_name(path)\n convertfiles(path)","sub_path":"trans_yuliao.py","file_name":"trans_yuliao.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"514968615","text":"import requests\nimport json\nfrom time import sleep\nfrom bs4 import BeautifulSoup\n\n\ndef getgallname(id,code,csrf): # gall_code를 gall_name으로 변환시키는 function \n _url = \"https://m.dcinside.com/gallog/list-direct\"\n _hd = {\n \"User-agent\" : \"Mozilla/5.0 (Linux; Android 5.1.1; SM-G955N Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.136 Mobile Safari/537.36\",\n \"Referer\" : \"https://m.dcinside.com/gallog/%s\" % (id),\n \"X-CSRF-TOKEN\" : csrf\n }\n _data = {\n \"gall_code\" : code\n }\n req = requests.post(url=_url,headers=_hd,data=_data)\n print(req.text)\n data = req.json()\n print(\"\\\"\"+code+\"\\\" : \\\"\"+data[\"gall_id\"]+\"\\\",\")\n return data[\"gall_id\"]\n \n\ndef appendlist(id,data,list,csrf,gallcodedic):\n for v in data['gallog_list']['data']:\n if v['cid'] in gallcodedic.keys(): # gallcode에 많은요청을보내면 차단을먹어 똑같은값보낼시 딕셔너리참고\n list.append(v['pno']+\",\"+gallcodedic[v['cid']]+\",\"+v['cno']) # [pno,gall_name,cno] 으로 저장됌\n else:\n gall_name = getgallname(id,v['cid'],csrf) # gall_code를 gall_name으로 변환\n sleep(3)\n gallcodedic[v['cid']] = gall_name\n list.append(v['pno']+\",\"+gall_name+\",\"+v['cno']) # [pno,gall_name,cno] 으로 저장됌\n # pno = 댓글번호\n # cno = 게시글 번호\n return list\n\ndef getCSRFtoken(id,cookies,c):\n _hd = {\n \"User-Agent\" : \"Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36\",\n \"Cookie\" : cookies\n }\n url = \"https://m.dcinside.com/gallog/%s/menu=%s\" % (id,c)\n res = requests.get(url=url,headers=_hd)\n html = res.text\n soup = BeautifulSoup(html, 'lxml')\n csrf = soup.find_all(\"meta\",{\"name\" : \"csrf-token\"}) # get csrf token\n return csrf[0].get(\"content\")\n\ndef getlist(id,cookies,c):\n returnlist = list()\n gallcodedic = {\n \"2489\" : \"superidea\",\n \"1290\" : \"slife\",\n \"1128\" : \"kyonggi\",\n \"2127\" : \"ncdinos\",\n \"1029\" : \"eunuch\",\n \"7\" : \"fish\",\n \"7101\" : \"tigers_new\",\n \"1240\" : \"baseball_ab\",\n \"4497\" : \"beard\",\n \"2928\" : \"godverfool\",\n \"463\" : \"football_k\",\n \"362\" : \"game1\",\n \"2117\" : \"football_new5\",\n \"410\" : \"smile\",\n \"2220\" : \"pokemon\",\n \"2320\" : \"giants_new1\",\n \"498\" : \"book\",\n \"1668\" : \"ma9\",\n \"197\" : \"movie2\",\n \"332\" : \"pop\",\n \"1506\" : \"parkboyoung\",\n \"8602\" : \"winkgall\",\n \"1320\" : \"chicken\",\n \"1913\" : \"starcraft_new\",\n \"2446\" : \"baseball_new4\",\n \"6148\" : \"hobgoblin\",\n \"346\" : \"america_ani\",\n \"283\" : \"hiphop\",\n \"543\" : \"wwe\",\n \"19931\" : \"top12\",\n \"1725\" : \"jejungwon\",\n \"601\" : \"etc_g\",\n \"7234\" : \"stock_new2\",\n \"5437\" : \"bang_dream\",\n \"7126\" : \"epicseven\",\n \"364\" : \"fps\",\n \"19298\" : \"github\",\n \"839\" : \"hit\",\n \"2343\" : \"lgbt\",\n \"169\" : \"pmp\",\n \"193\" : \"room\",\n \"8599\" : \"kdani\",\n \"22257\" : \"game1_new\",\n \"7092\" : \"game_classic1\",\n \"791\" : \"sweets\",\n \"26249\" : \"baseball_new8\",\n \"10520\" : \"drama_new2\",\n \"596\" : \"food\",\n \"6406\" : \"yourname\",\n \"4043\" : \"netflix\",\n \"1333\" : \"sweet\",\n \"1802\" : \"loan\",\n \"1624\" : \"touhou\",\n \"187\" : \"dongau\",\n \"17494\" : \"doosanbears_new1\",\n \"2946\" : \"sunshine\",\n \"2872\" : \"r6\",\n \"2933\" : \"lostark\",\n \"17521\" : \"leagueoflegends2\",\n \"544\" : \"rhythmgame\",\n \"2119\" : \"comic_new1\",\n \"62\" : \"dog\",\n \"404\" : \"food_noodle\",\n \"1634\" : \"physicalscience\",\n \"657\" : \"bike\",\n \"1967\" : \"battlefield3\",\n \"11046\" : \"brawl\",\n \"2239\" : \"bitcoins\",\n \"147\" : \"samgugji\",\n \"17341\" : \"3017\",\n \"2741\" : \"firefighter\",\n \"3811\" : \"soulworker\",\n \"1830\" : \"exam_new\",\n \"1716\" : \"smartphone\",\n \"210\" : \"tree\",\n \"979\" : \"arbeit\",\n \"2256\" : \"idolmaster\",\n \"519\" : \"cat\",\n \"1814\" : \"elsword\",\n \"8368\" : \"mnet_k\",\n \"3455\" : \"yjrs\",\n \"211\" : \"history\",\n \"2099\" : \"worldoftanks\",\n \"1436\" : \"army\",\n \"1248\" : \"earphone\",\n \"13905\" : \"ib_new\",\n \"2623\" : \"japan_voice\",\n \"408\" : \"pride\",\n \"6965\" : \"aoegame\",\n \"2159\" : \"shingeki\",\n \"151\" : \"train\",\n \"14869\" : \"kaguya\",\n \"216\" : \"cartoon\",\n \"1872\" : \"pridepc_new3\",\n \"1980\" : \"comedy_new1\",\n \"1821\" : \"tabletpc\",\n \"237\" : \"toy\",\n \"407\" : \"lotto\",\n \"744\" : \"different\",\n \"2481\" : \"ff14\",\n \"649\" : \"reptile\",\n \"4634\" : \"paradox\",\n \"1804\" : \"cs\",\n \"733\" : \"programming\",\n \"20918\" : \"projectgirlgroup\",\n \"3311\" : \"pripara\",\n \"2264\" : \"hearthstone\",\n \"1099\" : \"adexam\",\n \"263\" : \"modernwar\",\n \"2429\" : \"twice\", \n \"1100\" : \"government\",\n \"2512\" : \"granblue\",\n \"10519\" : \"m_entertainer1\",\n \"7100\" : \"d_fighter_new1\",\n \"2254\" : \"lovelive\",\n \"1510\" : \"maplestory\",\n \"2428\" : \"monsterhunter\",\n \"262\" : \"divination\",\n \"2470\" : \"depression\",\n \"2269\" : \"kancolle\",\n \"2422\" : \"typemoon\",\n \"2100\" : \"fantasy_new\",\n \"22771\" : \"producex\",\n \"18353\" : \"football_new6\",\n \"180\" : \"extra\",\n \"8746\" : \"tmfro\",\n \"13901\" : \"etc_program2\",\n \"6017\" : \"piyo\",\n \"17801\" : \"rome\",\n \"17501\" : \"baseball_new7\",\n \"17313\" : \"vr\",\n \"2399\" : \"akb48\",\n \"1892\" : \"pc\",\n \"1933\" : 'wow_new3',\n \"39\" : \"nintendo\",\n \"619\" : \"diet\",\n \"2124\" : \"anigallers_new\",\n \"377\" : \"lotto2\",\n \"235\" : \"mystery\",\n \"239\" : \"immovables\",\n \"270\" : \"announcer2\",\n \"1976\" : \"ani1_new1\",\n \"165\" : \"English\",\n \"9308\" : \"wannaone\",\n \"3390\" : \"warframe\",\n \"4860\" : \"gfl\",\n \"2677\" : \"nogada\",\n \"15538\" : \"blnovel\",\n \"2\" : \"bicycle\",\n \"1928\" : \"jdh\",\n \"4591\" : \"fightgametekken\",\n \"2371\" : \"closers\",\n \"6889\" : \"315pro\",\n \"12975\" : \"gotoyome\",\n \"7126\" : \"epicseven\",\n \"20234\" : \"manjuugame\",\n \"18701\" : \"stream\",\n \"4310\" : \"grand3chase\",\n \"5766\" : \"dbd\",\n \"6592\" : \"pebble\",\n \"22589\" : \"lastorigin\",\n \"18845\" : \"langrisser\",\n \"6260\" : \"rimworld\",\n \"21062\" : \"stockus\",\n \"8443\" : \"theaterdays\",\n \"13916\" : \"vespa\",\n \"17700\" : \"gfl2\",\n \"2839\" : \"jusik\",\n \"8815\" : \"arma\",\n \"19342\" : \"asiaenter\",\n \"26859\" : \"shouta\",\n \"8725\" : \"coin\",\n \"9081\" : \"soviet\",\n \"4447\" : \"pumpitup\",\n \"1579\" : \"fight_game\",\n \"672\" : \"agony\",\n \"615\" : \"fishing\",\n \"3207\" : \"r6s\",\n \"495\" : \"baduk\",\n \"1080\" : \"societyexam\",\n \"274\" : \"plastic_ss\",\n \"1705\" : \"iphone\",\n \"2545\" : \"entertain\",\n \"2571\" : \"overwatch\",\n \"2395\" : \"got\",\n \"1092\" : \"faexam\",\n \"7309\" : \"old_game\",\n \"5804\" : \"car\",\n \"2498\" : \"sc\",\n \"2162\" : \"hero2009\",\n \"1813\" : \"gongik_new\",\n \"361\" : \"game_classic\",\n \"2391\" : \"hos\",\n \"13823\" : \"baseball_new6\",\n \"2215\" : \"baseball_new2\",\n \"2341\" : \"baseball_new3\",\n \"2009\" : \"baseball_new1\",\n \"7251\" : \"baseball_new5\",\n \"587\" : \"lineage\",\n \"2078\" : \"fashion_new1\",\n \"1434\" : \"baram\",\n \"1843\" : \"d_fighter_new\",\n \"2077\" : \"leagueoflegends1\",\n \"1885\" : \"leagueoflegends\",\n \"1879\" : \"sundaynight\",\n \"2438\" : \"stock_new1\",\n \"1841\" : \"news_new\",\n \"8858\" : \"female__singer\",\n \"8819\" : \"girlgroup\",\n \"25028\" : \"dota2autochess\",\n \"20958\" : \"johong\",\n \"12218\" : \"kawai3\",\n \"3411\" : \"ttwar\",\n \"23968\" : \"tullius\",\n \"10917\" : \"powerlifting\",\n \"13801\" : \"mfgo\",\n \"20510\" : \"fromis9real\",\n \"25314\" : \"purikone_redive\",\n \"17500\" : \"hanwhaeagles_new\",\n \"2381\" : \"fifaonline\",\n \"2008\" : \"doosanbears_new\",\n } \n csrf = getCSRFtoken(id,cookies,c)\n page = 1\n nowPage = \"https://m.dcinside.com/gallog/%s?menu=%s&page=1\" %(id,c)\n while(1):\n _hd = {\n \"User-Agent\" : \"Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36\",\n \"Cookie\" : cookies,\n \"Referer\" : \"https://m.dcinside.com/gallog/%s?menu=%s&page=%s\" % (id,c,page),\n \"X-TOKEN-CSRF\" : csrf,\n \"X-Requested-With\" : \"XMLHttpRequest\"\n }\n url = \"https://m.dcinside.com/ajax/response-galloglist\"\n _payload = {\n \"g_id\" : id,\n \"menu\" : c,\n \"page\" : page,\n \"list_more\" : \"1\",\n }\n try:\n res = requests.post(url,data=_payload,headers=_hd)\n data = res.json()\n appendlist(id,data,returnlist,csrf,gallcodedic)\n nowPage = \"http://m.dcinside.com/gallog/%s?menu=%s&page=%s\" %(id,c,page)\n snowPage = \"https://m.dcinside.com/gallog/%s?menu=%s&page=%s\" %(id,c,page) # last_page_url에서 뱉는값이 https 일때 가정\\)\n endPage = data['gallog_list']['last_page_url']\n if((nowPage == endPage) or (snowPage == endPage)):\n break\n else:\n page = page + 1\n except:\n print(\"감지\")\n return returnlist\n\ndef main(id,cookies): \n commentlist = getlist(id,cookies,\"R_all\")\n return commentlist","sub_path":"src/commentparselist.py","file_name":"commentparselist.py","file_ext":"py","file_size_in_byte":9032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"616680497","text":"from setuptools import setup, find_packages\n\nimport os\n\nfh = os.path.join(os.path.dirname(__file__), 'README.rst')\nwith open(fh, 'r') as f:\n long_description = f.read()\n\nsetup(name='spvcm',\n version='0.2.1post1',\n long_description = long_description,\n description='Fit spatial multilevel models and diagnose convergence',\n url='https://github.com/ljwolf/spvcm',\n author='Levi John Wolf',\n author_email='levi.john.wolf@gmail.com',\n license='3-Clause BSD',\n packages= find_packages(),\n install_requires=['numpy','scipy','libpysal', 'spreg', 'pandas','seaborn'],\n include_package_data=True,\n zip_safe=False)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"242637553","text":"import math\r\nimport wave\r\nimport struct\r\nfrom scipy.signal import chirp\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n#_________________________________________________________CONSTANTS_____________\r\nfreq = 440.0\r\ndata_size = 40000\r\nfname = \"WaveTest.wav\"\r\nfrate = 11025.0 # framerate as a float\r\namp = 64000.0 # multiplier for amplitude\r\n\r\nnchannels = 1\r\nsampwidth = 2\r\nframerate = int(frate)\r\nnframes = data_size\r\ncomptype = \"NONE\"\r\ncompname = \"not compressed\"\r\n\r\n#__________________________________________________________GLOBALS_______________\r\n\r\nmeend=False\r\n\r\nphase = 0\r\nprev_ampl=1\r\nduration = 0.5\r\n\r\ndampening = False\r\nhalf_time = duration\r\nhalf_length = framerate*half_time\r\nhalf_length_factor = 1\r\n\r\nharmonics = []\r\nphases = [0]*len(harmonics)\r\n\r\nmeend_method = 'linear'\r\n\r\nwav_file = wave.open(fname, \"w\")\r\nwav_file.setparams((nchannels, sampwidth, framerate, nframes,\r\n comptype, compname))\r\n\r\n# dictionary mapping swaras to codes\r\nswar_code = {'S':61, 'r':62, 'R':63, 'g':64,'G':65,'m':66,'M':67,'P':68,'d':69,'D':70,'n':71,'N':72,'-':'-','[':'[',']':']', '(':'(',')':')'}\r\n# inverse mapping using map and reversed \r\nswar = dict(map(reversed, swar_code.items()))\r\n\r\nbhup_pitches=[168.1, 178.6, 173, 192.3, 172.4, 180.2, 200, 181.8, 183.8, 178.6, 192.3, 175.4, 163.9, 156.2, 188.8, 208.3, 215.1, 217.4, 232.6, 227.3, 235.3, 212.8, 208.3, 215.1, 222.2, 206.2, 183.3, 190.5, 188.7, 185.2, 178.6, 188.7, 192.3, 180.2, 185.2, 190.5, 180.2, 188.8, 161.3, 156.2]\r\n#________________________________________________________________________________\r\n#__________________________________________________________CLASSES_______________\r\n \r\nclass Stream:\r\n def __init__(self):\r\n self.stream = [] # creates a new empty list for each stream object\r\n\r\n def append(self, note):\r\n self.stream.append(note)\r\n\r\n def write(self, wavfile = wav_file):\r\n for entity in self.stream:\r\n entity.write(wavfile)\r\n \r\n def add_duration(self, duration=0):\r\n self.stream[-1].add_duration(duration)\r\n \r\n \r\n#________________________________________________________________________________\r\nclass Note:\r\n def __init__(self, freq, duration=duration):\r\n self.freq = freq # an int or char\r\n self.duration = duration\r\n\r\n def write(self, wavfile = wav_file):\r\n write_to_wav(wavfile = wav_file, fbeg=self.freq, fend=self.freq, duration=self.duration, pluck=True)\r\n\r\n def add_duration(self, duration=0):\r\n self.duration+=duration\r\n#________________________________________________________________________________\r\nclass Meend:\r\n def __init__(self, beg, end, duration, pluck=True):\r\n self.beg = beg # an int \r\n self.end = end # an int \r\n self.duration = duration\r\n self.pluck = pluck\r\n\r\n def write(self, wavfile = wav_file):\r\n if self.pluck==True:\r\n write_to_wav(wavfile = wav_file, fbeg=self.beg, fend=self.end, duration=self.duration, method = meend_method, pluck=True)\r\n else:\r\n write_to_wav(wavfile = wav_file, fbeg=self.beg, fend=self.end, duration=self.duration, method = meend_method, pluck=False)\r\n \r\n def add_duration(self, duration=0):\r\n self.duration+=duration\r\n\r\n#________________________________________________________________________________\r\n#_____________________________________________________________FUNCTIONS__________\r\n\r\n\r\ndef parse(mystr):\r\n retstr = []\r\n for s in mystr:\r\n if s == '.':\r\n popped=retstr.pop()\r\n retstr.append(popped-12)\r\n elif s == \"'\":\r\n popped=retstr.pop()\r\n retstr.append(popped+12)\r\n elif s==' ' or s==';':\r\n continue\r\n #retstr.append(swar_code['-']) \r\n else:\r\n retstr.append(swar_code[s]) \r\n return retstr\r\n\r\ndef unparse(mystr):\r\n retstr = []\r\n for i in mystr:\r\n if i =='-':\r\n retstr.append(swar[i])\r\n elif int(i)<61:\r\n retstr.append(str(swar[i+12])+\".\")\r\n elif int(i)>72:\r\n retstr.append(str(swar[i-12])+\"'\")\r\n else:\r\n retstr.append(str(swar[i]))\r\n return retstr\r\n \r\ndef play(mystr, mystream, myduration=duration):\r\n global meend\r\n meend_count=0\r\n pluck=True\r\n for n in mystr:\r\n if n=='-':\r\n mystream.add_duration(myduration)\r\n elif n=='[':\r\n myduration/=2\r\n elif n==']':\r\n myduration*=2\r\n elif n==')':\r\n meend=False\r\n pluck=True\r\n elif n=='(':\r\n meend=True\r\n meend_count=0\r\n elif meend==True:\r\n if meend_count!=0:\r\n mystream.append(Meend(freq(prev),freq(n),duration=myduration, pluck=pluck))\r\n pluck=False\r\n prev=n\r\n meend_count+=1\r\n else:\r\n mystream.append(Note(freq(n),duration=myduration))\r\n prev=n\r\n\r\ndef freq(note): #takes int, returns freq\r\n a = 440 #frequency of A (common value is 440Hz)\r\n return (a / 32) * (2 ** ((note - 9) / 12))\r\n\r\ndef endphase(p1,p2,half_ampl=1):\r\n theta = math.degrees(math.acos(p2/half_ampl))\r\n if p2>p1:\r\n theta = 360 - theta\r\n return theta\r\n\r\ndef smoothen_end(sine_arr_x,fraction=0.01):\r\n beg = int((1-fraction)*len(sine_arr_x))\r\n end = len(sine_arr_x)\r\n for i in range(beg,end):\r\n sine_arr_x[i]*=(end-i)/(end-beg)\r\ndef smoothen_beg(sine_arr_x,fraction=0.01):\r\n beg = 0\r\n end = int(fraction*len(sine_arr_x))\r\n for i in range(beg,end):\r\n sine_arr_x[i]*=(i)/(end-beg)\r\n\r\ndef damper(x):\r\n return math.exp(-x/half_length)\r\ndef dampen(sine_arr_x, half_length=half_length):\r\n global prev_ampl\r\n for i in range(len(sine_arr_x)):\r\n sine_arr_x[i]*=math.exp(-i/half_length)\r\n \r\ndef write_to_wav(fbeg=freq, fend=freq, wavfile=wav_file, duration=duration, frate=frate, method='linear', pluck=True):\r\n global phase\r\n global dampen\r\n global harmonics\r\n global prev_ampl\r\n global half_length\r\n global half_length_factor\r\n \r\n phase+=360*fbeg/frate\r\n t = np.linspace(0, duration, int((duration)*frate))\r\n sine_arr_x = chirp(t, f0=fbeg, f1=fend, t1=duration, method=method, vertex_zero=False, phi=phase)\r\n phase = endphase(sine_arr_x[-2], sine_arr_x[-1])\r\n \r\n \r\n for i in range(len(harmonics)):\r\n factor = i+2\r\n ch = chirp(t, f0=factor*fbeg, f1=factor*fend, t1=duration, method=method, vertex_zero=False, phi=phases[i])\r\n sine_arr_x += harmonics[i]*ch\r\n \r\n if pluck==False and dampening==True:\r\n sine_arr_x*=prev_ampl\r\n else:\r\n prev_ampl = 1\r\n #if pluck ==True:\r\n #smoothen_beg(sine_arr_x,fraction=0.01)\r\n #smoothen_end(sine_arr_x,fraction=0.01)\r\n\r\n if dampening==True:\r\n dampen(sine_arr_x)\r\n prev_ampl*=math.exp(-(len(sine_arr_x)-1)/half_length)\r\n \r\n \r\n\r\n sine_arr_x/=1+sum(map(float,harmonics))\r\n print(sine_arr_x[0],sine_arr_x[1],sine_arr_x[2],sine_arr_x[3],sine_arr_x[-2],sine_arr_x[-1])\r\n for s in sine_arr_x:\r\n # write the audio frames to file\r\n wav_file.writeframes(struct.pack('h', int(s*amp/2)))\r\n \r\n\r\n\r\n\r\n#________________________________________________________________________________\r\n#______________________________________________________________MAIN______________ \r\n\r\nmystream = Stream()\r\nfor i in range(1,len(bhup_pitches)):\r\n print(i)\r\n mystream.append(Meend(2*bhup_pitches[i-1],2*bhup_pitches[i],duration=0.1, pluck=True))\r\n\"\"\"\r\n#read from file\r\nstrs = []\r\nwith open('bhairav_bandish_copy.txt') as f:\r\n strs = f.read().splitlines()\r\n\r\nparsed_strs = []\r\nfor mystr in strs:\r\n parsed_strs.append(parse(mystr)) \r\n#print(*parsed_strs, sep=\"\\n\")\r\nfor mystr in parsed_strs[:10]:\r\n play(mystr, mystream)\r\n print(' '.join(map(str, mystr)) )\r\n\"\"\"\r\nmystream.write()\r\nwav_file.close()\r\n\r\n\r\n#________________________________________________________________________________\r\n#________________________________________________________________________________\r\n\r\n\r\n\r\n\"\"\"\r\nphase = 0 \r\n#write_to_wav(freq(71),freq(71))\r\n#write_to_wav(freq(69),freq(68), duration = 1, method=\"quadratic\")\r\nwrite_to_wav(freq(68),freq(68), duration = 1)\r\n#write_to_wav(freq(69),freq(68), duration = 1, method=\"quadratic\")\r\nwrite_to_wav(freq(68),freq(68), duration = 1)\r\nwrite_to_wav(freq(67),freq(67))\r\nwav_file.close() \r\n\"\"\"\r\n\r\n\r\n\r\n\"\"\"\r\nt_beg=0.0\r\nt_end=1.0\r\nfa=277.18\r\nfb=415.3047\r\n#sine_list_x = []\r\n#for x in range(data_size):\r\n# sine_list_x.append(math.sin(2*math.pi*freq*(x/frate)))\r\n\r\nt = np.linspace(t_beg, t_end, int((t_end-t_beg)*frate))\r\nsine_list_x = chirp(t, f0=fa, f1=fb, t1=t_end, method='quadratic', phi=-90)\r\nsine_list_x2 = chirp(t, f0=fb, f1=fa, t1=t_end, method='linear')\r\n#np.append(sine_list_x,sine_list_x2)\r\n\r\nplt.plot(sine_list_x[:100])\r\nplt.show()\r\n\r\n\r\n\"\"\"\r\n\"\"\"\r\nfor s in sine_list_x:\r\n # write the audio frames to file\r\n wav_file.writeframes(struct.pack('h', int(s*amp/2)))\r\nfor s in sine_list_x2:\r\n # write the audio frames to file\r\n wav_file.writeframes(struct.pack('h', int(s*amp/2)))\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n","sub_path":"archive/meendmusic_expt.py","file_name":"meendmusic_expt.py","file_ext":"py","file_size_in_byte":9213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"319607343","text":"# Write a Python program that will calculate the length of a string (We already have a function lenthat does that, but we want to implement our own)\n\nmy_str = input(\"Enter a string: \")\n\ncount = 0\n\nfor c in my_str:\n count += 1\n\nprint(count)","sub_path":"venv/Lecture 5/Exercise 2.py","file_name":"Exercise 2.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"519007604","text":"# much inspiration from sentdex, especially\n# https://pythonprogramming.net/own-environment-q-learning-reinforcement-learning-python-tutorial/?completed=/q-learning-analysis-reinforcement-learning-python-tutorial/\n\n\nimport pygame\nimport time\nimport random\nimport os\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom resources.mobs import Predator, Prey, Food\n\n''' TODO\n\n condense/commonize training and running\n multi-step future_q\n 2 q_tables for mobs (1 for target, 1 for flee; prioritize flee for action)\n'''\n\n\npygame.init()\n\nMODE='prey'\n\n# resources\nRES = 'resources'\nLOG = open(os.path.join(RES, 'game.log'), 'w')\n\n# pygame setup\nWIDTH = 400 # 1080\nHEIGHT = 400 # 800\ngameDisplay = pygame.display.set_mode((WIDTH, HEIGHT))\nclock = pygame.time.Clock()\nFPS = 30\n\n# Q learning variables [DEFAULTS]\nEPISODES = 1000 # 22500 # with epsilon decay rate at 0.9998, this corresponds to <1% random moves\nSHOW = 1000 # how often to visualize\nFRAMES = 100 # per episode\nEPSILON = 0.9 # random action threshhold\nDECAY_RATE = 0.9998 # espilon *= DECAY_RATE\n\n# load/save Q tables\nTABLES = 'q_tables'\nPREY_TABLE = False # 'Prey-7965313'\nPRED_TABLE = False # 'Predator-8637585'\nSAVE_Q = True\n\n# plotting\nPLOTS = 'plots'\nM_AVG = 50\n\n# colors\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\n\n\ndef sim_init(food=0, prey=0, pred=0):\n\n mobs = init_mobs(food=food, prey=(prey, PREY_TABLE), pred=(pred, PRED_TABLE))\n\n epsilon = EPSILON\n\n rewards = {}\n for mob_type, mob_list in mobs.items():\n for mob in mob_list:\n rewards[mob] = [ 0 ] * EPISODES\n\n return mobs, epsilon, rewards\n\n\ndef init_mobs(food=0, prey=(0, False), pred=(0, False)):\n mobs = {'Food': [],\n 'Prey': [],\n 'Predator': [],\n }\n \n for f in range(food):\n mobs['Food'].append(Food(x=0, y=0))\n \n for p in range(prey[0]):\n mobs['Prey'].append(Prey(x=0, y=0, load=prey[1]))\n \n for p in range(pred[0]):\n mobs['Predator'].append(Predator(x=0, y=0, load=pred[1]))\n \n print('\\n' + '='*60 + '\\n')\n return mobs\n\n\ndef reset_mobs(mobs=None, center=None):\n for mob_type, mob_list in mobs.items():\n for mob in mob_list:\n x = random.randint(0, WIDTH)\n y = random.randint(0, HEIGHT)\n if mob_type == center:\n x = WIDTH / 2\n y = HEIGHT / 2\n mob.reset(x=x, y=y)\n\n\ndef display_stats(episode, frame, mobs):\n font = pygame.font.SysFont(None, 32)\n \n text = font.render('episode/frame: {}/{}'.format(episode, frame), True, WHITE)\n gameDisplay.blit(text,(0, 0))\n \n for num, key in enumerate(mobs.keys()):\n tally = 0\n total = len(mobs[key])\n for mob in mobs[key]:\n tally = tally + 1 if mob.alive else tally\n message = '{}: {}/{}'.format(key, tally, total)\n text = font.render(message, True, WHITE)\n gameDisplay.blit(text,(0, (num+1)*40))\n\n\ndef mob_update(mode='run', mobs=None, epsilon=0, rewards=None, episode=0, allow_prey_movement=True):\n end_episode = False\n update_types = ('Food', 'Prey', 'Predator') if allow_prey_movement else ('Food', 'Predator')\n update_q_tables = ('Prey') if mode in ('prey', 'evade') else ('Predator') if mode in ('pred') else ('Prey', 'Predator')\n \n \n for mob_type, mob_list in mobs.items():\n for mob in mob_list:\n if mob.alive and mob_type in update_types:\n q_key = mob.observe(mobs=mobs) # find the closest food/prey/predator\n mx, my, choice = mob.action(epsilon=epsilon, q_key=q_key, max_dims=(WIDTH, HEIGHT)) # take an action\n reward, _ = mob.check(mobs=mobs, mx=mx, my=my) # check to see what has happened\n if mob_type in update_q_tables:\n mob.update_q(mobs=mobs, q_key=q_key, choice=choice, reward=reward) # learn from what mob did\n \n rewards[mob][episode] += (reward[0] + reward[1]) # tally for episode rewards\n elif not mob.alive:\n end_episode = True # die when one of the mobs does\n \n return end_episode\n\n\ndef display_mobs(show_this=False, mobs=None):\n if not show_this:\n return\n for mob_type, mob_list in mobs.items():\n for mob in mob_list:\n if mob.alive:\n mob.display(gameDisplay)\n\n\ndef episode_cleanup(episode, mobs, rewards):\n print('Episode {}/{} completed at {}'.format(episode+1, EPISODES, time.asctime()))\n for mob_type, mob_list in mobs.items():\n if mob_type != 'Food':\n for mob in mob_list:\n print('{} {:>8}: {:<9} ({})'.format(mob.__class__, mob.serial, round(rewards[mob][episode], 3), mob.alive))\n print('\\n' + '='*60 + '\\n')\n\n\ndef save_q_tables(save_enabled, mobs=None, which=('Prey', 'Predator')):\n if save_enabled:\n for mob_type in which:\n for mob in mobs[mob_type]:\n mob.q_table.save(os.path.join(RES, TABLES), mob_type, mob.serial)\n else:\n print('Q table saving disabled')\n\n\ndef plot_q_tables(mobs=None, valued_customer=None):\n mobs_to_plot = [valued_customer] if valued_customer else ('Prey', 'Predator')\n \n for mob_type in mobs_to_plot: # 'Food' has no q_table\n for mob in mobs[mob_type]:\n mob.q_table.plot_q(os.path.join(RES, PLOTS, '{}_Q.png'.format(mob.serial))) # moving out of game loop removed seg fault\n\n\ndef plot_rewards(mobs=None, rewards=None, valued_customer=None):\n mobs_to_plot = [valued_customer] if valued_customer else ('Prey', 'Predator')\n \n for mob_type, mob_list in mobs.items():\n if mob_type in mobs_to_plot:\n for mob in mob_list:\n plt.plot(rewards[mob], label='{}:{}'.format(mob_type, mob.serial))\n moving_avg = np.convolve(rewards[mob], np.ones((M_AVG,))/M_AVG, mode='valid')\n plt.plot(moving_avg, label='moving average, {}'.format(M_AVG))\n plt.xlabel('Episode')\n plt.ylabel('Reward')\n plt.legend()\n fig_name = os.path.join(RES, PLOTS, '{}-{}.png'.format(mob.__class__, mob.serial))\n print('Saving episode rewards plot as {}'.format(fig_name))\n plt.savefig(fig_name)\n plt.close()\n\n\ndef exit_sim():\n fade_out = 1\n pygame.mixer.music.fadeout(fade_out * 1000)\n LOG.write('\\nExiting normally!\\n')\n #LOG.close() # FIXME when you remove traceback\n time.sleep(fade_out)\n pygame.display.quit()\n pygame.quit()\n\n\ndef train(mode='prey', food=0, prey=(0, False), pred=0):\n global WIDTH, HEIGHT\n\n allow_prey_movement = prey[1]\n valued_customer = 'Prey' if allow_prey_movement else 'Predator'\n \n # set the sceen size\n WIDTH = int(Prey.sight * 1.5) if pred == 0 else int(Predator.sight * 1.5)\n HEIGHT = WIDTH\n pygame.display.set_mode((WIDTH, HEIGHT))\n mobs, epsilon, rewards = sim_init(food=food, prey=prey[0], pred=pred)\n\n for episode in range(EPISODES):\n show_this = True if episode % SHOW == 0 else False\n end_ep = False\n \n # reset all the mobs for this episode\n reset_mobs(mobs=mobs, center=valued_customer)\n \n # run the episode\n for k in range(FRAMES):\n gameDisplay.fill(BLACK)\n \n # update mobs\n end_ep = mob_update(mode=mode, mobs=mobs, epsilon=epsilon, rewards=rewards, episode=episode, allow_prey_movement=allow_prey_movement)\n\n display_mobs(show_this=show_this, mobs=mobs)\n \n # complete the render and wait to cycle\n display_stats(episode, k+1, mobs)\n pygame.display.update()\n if show_this:\n clock.tick(FPS)\n else:\n clock.tick(10**10)\n\n if end_ep:\n if show_this:\n time.sleep(1) # pause at the end state\n break\n\n # clean up the episode\n episode_cleanup(episode, mobs, rewards)\n epsilon *= DECAY_RATE\n\n save_q_tables(SAVE_Q, mobs=mobs, which=[valued_customer])\n\n return mobs, rewards, valued_customer\n\n\ndef run(mode='run', food=0, prey=0, pred=0):\n mobs, epsilon, rewards = sim_init(food=food, prey=prey, pred=pred)\n \n for episode in range(EPISODES):\n show_this = True if episode % SHOW == 0 else False\n \n # reset all the mobs for this episode\n reset_mobs(mobs=mobs)\n \n # run the episode\n for k in range(FRAMES):\n gameDisplay.fill(BLACK)\n \n # update all mobs\n mob_update(mode=mode, mobs=mobs, epsilon=epsilon, rewards=rewards, episode=episode)\n \n display_mobs(show_this=show_this, mobs=mobs)\n \n # complete the render and wait to cycle\n display_stats(episode, k+1, mobs)\n pygame.display.update()\n if show_this:\n clock.tick(FPS)\n else:\n clock.tick(10**10)\n\n # clean up the episode\n episode_cleanup(episode, mobs, rewards)\n epsilon *= DECAY_RATE\n\n save_q_tables(SAVE_Q, mobs=mobs)\n\n return mobs, rewards\n\n\ndef main():\n parser = argparse.ArgumentParser(description='''Predator/Prey AI Trainer and Visualizer''')\n\n parser.add_argument('-m', '--mode', help='training/execution mode for AI', default=MODE)\n\n # mob selection\n parser.add_argument('--pred', help='number of predator mobs', default=0)\n parser.add_argument('--prey', help='number of prey mobs', default=1)\n parser.add_argument('--food', help='number of food mobs', default=100)\n\n # load/save mob q_tables\n parser.add_argument('--q_pred', help='pre-generated predator Q table', default=False)\n parser.add_argument('--q_prey', help='pre-generated prey Q table', default=False)\n parser.add_argument('--save-q', help='save final Q tables', dest='save_q', action='store_true')\n parser.add_argument('--no-q', help='save final Q tables', dest='save_q', action='store_false')\n parser.set_defaults(save_q=SAVE_Q)\n parser.add_argument('--no-plot', help='don\\'t plot episode rewards', dest='plot_rew', action='store_false')\n parser.set_defaults(plot_rew=True)\n parser.add_argument('--mvg-avg', help='moving average history for plot', default=M_AVG)\n\n # training variables\n parser.add_argument('--episodes', help='number of training episodes', default=EPISODES)\n parser.add_argument('--show', help='regularity to visualize environment', default=SHOW)\n parser.add_argument('--frames', help='steps per training episode', default=FRAMES)\n parser.add_argument('--epsilon', help='random decision threshold', default=EPSILON)\n parser.add_argument('--decay', help='random decision threshold decay rate', default=DECAY_RATE)\n\n args = parser.parse_args()\n mobs = None\n rewards = None\n valued_customer = False\n\n globals()['PREY_TABLE'] = False if not args.q_prey else os.path.join(RES, TABLES, args.q_prey)\n globals()['PRED_TABLE'] = False if not args.q_pred else os.path.join(RES, TABLES, args.q_pred)\n globals()['SAVE_Q'] = args.save_q\n\n globals()['EPISODES'] = int(args.episodes)\n globals()['SHOW'] = int(args.show)\n globals()['FRAMES'] = int(args.frames)\n globals()['EPSILON'] = float(args.epsilon)\n globals()['DECAY_RATE'] = float(args.decay)\n \n globals()['M_AVG'] = int(args.mvg_avg)\n\n if args.mode == 'pred':\n mobs, rewards, valued_customer = train(mode=args.mode, food=0, prey=(1, False), pred=1) # train the predator Q table\n elif args.mode == 'prey':\n mobs, rewards, valued_customer = train(mode=args.mode, food=1, prey=(1, True), pred=0) # train the prey Q table (target)\n elif args.mode == 'evade':\n mobs, rewards, valued_customer = train(mode=args.mode, food=0, prey=(1, True), pred=1) # train the prey Q table (flee)\n else:\n mobs, rewards = run(food=int(args.food), prey=int(args.prey), pred=int(args.pred))\n\n exit_sim()\n\n plot_q_tables(mobs=mobs, valued_customer=valued_customer)\n if args.plot_rew:\n plot_rewards(mobs=mobs, rewards=rewards, valued_customer=valued_customer)\n\n\nif __name__ == '__main__':\n import traceback\n tb = 'no error'\n try:\n main() # FIXME just run this\n except Exception as e:\n LOG.write('{}\\n'.format(e))\n tb = traceback.format_exc()\n finally:\n LOG.write('{}\\n'.format(tb))\n LOG.write('End!')\n LOG.close()\n print('Exiting main() with {}'.format(tb))\n","sub_path":"predprey/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"182142686","text":"from __future__ import print_function\nfrom six.moves import xrange\nimport os\nimport better_exceptions\nimport tensorflow as tf\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom model import VQVAE, _cifar10_arch\n\n# The codes are borrowed from\n# https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10.py\n# https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10_input.py\nDATA_DIR = 'datasets/cifar10'\nDATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'\ndef maybe_download_and_extract():\n import sys, tarfile\n from six.moves import urllib\n \"\"\"Download and extract the tarball from Alex's website.\"\"\"\n if not os.path.exists(DATA_DIR):\n os.makedirs(DATA_DIR)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(DATA_DIR, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n extracted_dir_path = os.path.join(DATA_DIR, 'cifar-10-batches-bin')\n if not os.path.exists(extracted_dir_path):\n tarfile.open(filepath, 'r:gz').extractall(DATA_DIR)\n\ndef read_cifar10(filename_queue):\n class CIFAR10Record(object):\n pass\n result = CIFAR10Record()\n record_bytes = 1 + 32*32*3\n\n reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)\n result.key, value = reader.read(filename_queue)\n record_bytes = tf.decode_raw(value, tf.uint8)\n\n result.label = tf.cast(\n tf.strided_slice(record_bytes, [0], [1]), tf.int32)\n depth_major = tf.reshape(\n tf.strided_slice(record_bytes, [1],\n [1 + 32*32*3]),\n [3, 32, 32])\n # Convert from [depth, height, width] to [height, width, depth].\n result.uint8image = tf.transpose(depth_major, [1, 2, 0])\n return result\n\ndef get_image(train=True,num_epochs=None):\n maybe_download_and_extract()\n if train:\n filenames = [os.path.join(DATA_DIR, 'cifar-10-batches-bin', 'data_batch_%d.bin' % i) for i in xrange(1, 6)]\n else:\n filenames = [os.path.join(DATA_DIR, 'cifar-10-batches-bin', 'test_batch.bin')]\n filename_queue = tf.train.string_input_producer(filenames,num_epochs=num_epochs)\n read_input = read_cifar10(filename_queue)\n return tf.cast(read_input.uint8image, tf.float32) / 255.0\n\n\ndef main(config,\n RANDOM_SEED,\n LOG_DIR,\n TRAIN_NUM,\n BATCH_SIZE,\n LEARNING_RATE,\n DECAY_VAL,\n DECAY_STEPS,\n DECAY_STAIRCASE,\n BETA,\n K,\n D,\n SAVE_PERIOD,\n SUMMARY_PERIOD):\n np.random.seed(RANDOM_SEED)\n tf.set_random_seed(RANDOM_SEED)\n\n # >>>>>>> DATASET\n image = get_image()\n images = tf.train.shuffle_batch(\n [image],\n batch_size=BATCH_SIZE,\n num_threads=4,\n capacity=BATCH_SIZE*10,\n min_after_dequeue=BATCH_SIZE*2)\n valid_image = get_image(False)\n valid_images = tf.train.shuffle_batch(\n [valid_image],\n batch_size=BATCH_SIZE,\n num_threads=1,\n capacity=BATCH_SIZE*10,\n min_after_dequeue=BATCH_SIZE*2)\n # <<<<<<<\n\n # >>>>>>> MODEL\n with tf.variable_scope('train'):\n global_step = tf.Variable(0, trainable=False)\n learning_rate = tf.train.exponential_decay(LEARNING_RATE, global_step, DECAY_STEPS, DECAY_VAL, staircase=DECAY_STAIRCASE)\n tf.summary.scalar('lr',learning_rate)\n\n with tf.variable_scope('params') as params:\n pass\n net = VQVAE(learning_rate,global_step,BETA,images,K,D,_cifar10_arch,params,True)\n\n with tf.variable_scope('valid'):\n params.reuse_variables()\n valid_net = VQVAE(None,None,BETA,valid_images,K,D,_cifar10_arch,params,False)\n\n with tf.variable_scope('misc'):\n # Summary Operations\n tf.summary.scalar('loss',net.loss)\n tf.summary.scalar('recon',net.recon)\n tf.summary.scalar('vq',net.vq)\n tf.summary.scalar('commit',BETA*net.commit)\n tf.summary.scalar('nll',tf.reduce_mean(net.nll))\n tf.summary.image('origin',images,max_outputs=4)\n tf.summary.image('recon',net.p_x_z,max_outputs=4)\n # TODO: logliklihood\n\n summary_op = tf.summary.merge_all()\n\n # Initialize op\n init_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n config_summary = tf.summary.text('TrainConfig', tf.convert_to_tensor(config.as_matrix()), collections=[])\n\n extended_summary_op = tf.summary.merge([\n tf.summary.scalar('valid_loss',valid_net.loss),\n tf.summary.scalar('valid_recon',valid_net.recon),\n tf.summary.scalar('valid_vq',valid_net.vq),\n tf.summary.scalar('valid_commit',BETA*valid_net.commit),\n tf.summary.scalar('valid_nll',tf.reduce_mean(valid_net.nll)),\n tf.summary.image('valid_origin',valid_images,max_outputs=4),\n tf.summary.image('valid_recon',valid_net.p_x_z,max_outputs=4),\n ])\n\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Run!\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n sess.graph.finalize()\n sess.run(init_op)\n\n summary_writer = tf.summary.FileWriter(LOG_DIR,sess.graph)\n summary_writer.add_summary(config_summary.eval(session=sess))\n\n try:\n # Start Queueing\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord,sess=sess)\n for step in tqdm(xrange(TRAIN_NUM),dynamic_ncols=True):\n it,loss,_ = sess.run([global_step,net.loss,net.train_op])\n\n if( it % SAVE_PERIOD == 0 ):\n net.save(sess,LOG_DIR,step=it)\n\n if( it % SUMMARY_PERIOD == 0 ):\n tqdm.write('[%5d] Loss: %1.3f'%(it,loss))\n summary = sess.run(summary_op)\n summary_writer.add_summary(summary,it)\n\n if( it % (SUMMARY_PERIOD*2) == 0 ): #Extended Summary\n summary = sess.run(extended_summary_op)\n summary_writer.add_summary(summary,it)\n\n except Exception as e:\n coord.request_stop(e)\n finally :\n net.save(sess,LOG_DIR)\n\n coord.request_stop()\n coord.join(threads)\n\n net.save(sess,LOG_DIR)\n\ndef test(MODEL,\n BETA,\n K,\n D,\n **kwargs):\n # >>>>>>> DATASET\n image = get_image(num_epochs=1)\n images = tf.train.batch(\n [image],\n batch_size=100,\n num_threads=1,\n capacity=100,\n allow_smaller_final_batch=True)\n valid_image = get_image(False,num_epochs=1)\n valid_images = tf.train.batch(\n [valid_image],\n batch_size=100,\n num_threads=1,\n capacity=100,\n allow_smaller_final_batch=True)\n # <<<<<<<\n\n # >>>>>>> MODEL\n with tf.variable_scope('net'):\n with tf.variable_scope('params') as params:\n pass\n x = tf.placeholder(tf.float32,[None,32,32,3])\n net= VQVAE(None,None,BETA,x,K,D,_cifar10_arch,params,False)\n\n init_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Run!\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n sess.graph.finalize()\n sess.run(init_op)\n net.load(sess,MODEL)\n\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord,sess=sess)\n try:\n nlls = []\n while not coord.should_stop():\n nlls.append(\n sess.run(net.nll,feed_dict={x:sess.run(valid_images)}))\n print('.', end='', flush=True)\n except tf.errors.OutOfRangeError:\n nlls = np.concatenate(nlls,axis=0)\n print(nlls.shape)\n print('NLL for test set: %f bits/dims'%(np.mean(nlls)))\n\n try:\n nlls = []\n while not coord.should_stop():\n nlls.append(\n sess.run(net.nll,feed_dict={x:sess.run(images)}))\n print('.', end='', flush=True)\n except tf.errors.OutOfRangeError:\n nlls = np.concatenate(nlls,axis=0)\n print(nlls.shape)\n print('NLL for training set: %f bits/dims'%(np.mean(nlls)))\n\n coord.request_stop()\n coord.join(threads)\n\ndef get_default_param():\n from datetime import datetime\n now = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n return {\n 'LOG_DIR':'./log/cifar10/%s'%(now),\n\n 'TRAIN_NUM' : 250000, #Size corresponds to one epoch\n 'BATCH_SIZE': 128,\n\n 'LEARNING_RATE' : 0.0002,\n 'DECAY_VAL' : 1.0,\n 'DECAY_STEPS' : 20000, # Half of the training procedure.\n 'DECAY_STAIRCASE' : False,\n\n 'BETA':0.25,\n 'K':10,\n 'D':256,\n\n 'SUMMARY_PERIOD' : 20,\n 'SAVE_PERIOD' : 10000,\n 'RANDOM_SEED': 0,\n }\n\nif __name__ == \"__main__\":\n class MyConfig(dict):\n pass\n params = get_default_param()\n config = MyConfig(params)\n def as_matrix() :\n return [[k, str(w)] for k, w in config.items()]\n config.as_matrix = as_matrix\n\n main(config=config,**config)\n #test(MODEL='models/cifar10/last.ckpt',**config)\n","sub_path":"cifar10.py","file_name":"cifar10.py","file_ext":"py","file_size_in_byte":9555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"18385680","text":"import random\n\n\nclass Select:\n def __init__(self):\n pass\n\n def preprocess(self, pop, fit):\n self.pop = pop\n self.fit = fit\n self.accum = [sum(self.fit[0:i]) for i in range(0, len(self.fit) + 1)]\n # print(self.accum)\n\n def __call__(self):\n rand = random.uniform(0, self.accum[-1])\n index = self.__bin_search(self.accum, rand)\n return self.pop.accesspop(index)\n\n @staticmethod\n def __bin_search(x, i):\n end = len(x) - 1\n beg = 0\n mid = 0\n while beg < end:\n mid = (end + beg) // 2\n if x[mid] <= i < x[mid + 1]:\n break\n elif i >= x[mid + 1]:\n beg = mid + 1\n else:\n end = mid\n\n return mid\n","sub_path":"genetic_operator/selection/roulette.py","file_name":"roulette.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"43330105","text":"\nimport time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.cluster import KMeans\n\ndata = pd.read_csv('sh_area_level2.csv')\n# data = pd.read_csv('sh_area_level3.csv')\n\n# 读取前五行数据,如果是最后五行,用data.tail()\nprint(data.head(5))\n\n# 我们看看数据的维度:\nprint(data.shape)\n\n# 现在我们开始准备样本特征X,我们用AT, V,AP和RH这4个列作为样本特征。\nX = data[['lng', 'lat']]\nprint(X.head())\n\n# #############################################################################\n# Compute clustering with Means\n\n#转换 成 numpy array ,不转换会报错\nX=np.array(X)\n#print(X.head())\n\nn_clusters = 5\n\nk_means = KMeans(n_clusters)\nt0 = time.time()\ncls = k_means.fit(X)\nt_batch = time.time() - t0\n\ncls.labels_\n\n# 画图\nmarkers = ['^', 'x', 'o', '*', '+']\nfor i in range(n_clusters):\n members = cls.labels_ == i\n plt.scatter( X[members,0], X[members,1], s=60, marker=markers[i], c='b', alpha= 0.5)\n\n\n\nprint(t_batch)\nprint(k_means.inertia_)\nplt.title('K-Means')\nplt.show()\n\n\n","sub_path":"白话大数据与机器学习/聚类/KMeans/city_cluster_kmeans.py","file_name":"city_cluster_kmeans.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"516004652","text":"import sys,threading,time # thread \nimport serial # zigbee 통신\nimport binascii,encodings # 바이너리 유니코드 네트워크 전송\nimport re # 정규표현식\nimport socket # 소켓 통신\nimport struct # 바이너리 통신\n\nclass ComThread: # 쓰레드 클래스\n def __init__(self, Port = None): # 아두이노 or 라즈베리파이 port\n self.l_serial = None # serial 통신 \n self.alive = False # 통신 상태\n self.waitEnd = None # 기다림 끝\n self.port = Port # 포트\n\n def waiting(self): # 일시 정지\n if not self.waitEnd is None:\n self.waitEnd.wait()\n\n def SetStopEvent(self): # 정지 상태에서의 이벤트\n if not self.waitEnd is None:\n self.waitEnd.set()\n self.alive = False\n self.stop()\n\n def start(self): # 시작\n self.l_serial = serial.Serial() # zigbee 시리얼\n self.l_serial.port = self.port # 시리얼 포트 \n self.l_serial.baudrate = 19200 \n self.l_serial.timeout = 2\n self.l_serial.open()\n # 쓰레딩\n if self.l_serial.isOpen(): # 통신 연결(소켓)\n self.waitEnd = threading.Event() # 쓰레드 이벤트\n self.alive = True # 통신 상태 여부\n self.thread_read = None \n self.thread_read = threading.Thread(target=self.FirstReader)\n self.thread_read.setDaemon(1)\n self.thread_read.start()\n return True\n else:\n return False\n\n def FirstReader(self): # 첫번째 쓰레딩 함수\n while self.alive:\n time.sleep(0.1) # 일시정지\n try:\n data = ''\n n = self.l_serial.inWaiting()\n if n:\n data = data + self.l_serial.read(n)\n for l in range(len(data)): # 데이터의 요소 개수만큼\n print('%02X' % ord(data[l])) \n \n except Exception as ex:\n print(str(ex)) # 예외 처리\n\n self.waitEnd.set() # 집합\n self.alive = False\n\n def stop(self):\n self.alive = False\n self.thread_read.join()\n if self.l_serial.isOpen():\n self.l_serial.close()\n\nif __name__ == '__main__':\n rt = ComThread() # 쓰레드 불러옴\n try: # 쓰레드 예외처리\n if rt.start(): \n rt.waiting()\n rt.stop()\n else:\n pass \n except Exception as se:\n print(str(se))\n\n if rt.alive: # 만약 작업이 끝나면\n rt.stop() \n print('')\n print('End OK .')\n del rt # 쓰레드 class 종료","sub_path":"Zigbee.py","file_name":"Zigbee.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"422156307","text":"# Call Module\nimport os\nimport re\nimport sys\nimport sqlite3\nimport webbrowser\nimport zipfile\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5 import uic\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import * \nfrom requests import get\nfrom pathlib import Path\n\n# File Download Function\ndef download(url, file_name):\n with open(file_name, \"wb\") as file:\n response = get(url)\n file.write(response.content)\n\n# Log Timestamp Function\ndef timestamp():\n import datetime\n return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n# Log Function\ndef log(message):\n message = timestamp() + ' > ' + message\n print(message, file=log_file)\n\n# Open log file (log.txt)\nlog_file = open(\"log.txt\", 'w', -1, 'utf-8')\nlog(\"*** Start Program ***\")\n\n# Check Chrome Version\ntry :\n try:\n chrome_version = os.listdir('C:/Program Files (x86)/Google/Chrome/Application/')[0][:2]\n except :\n chrome_version = os.listdir('C:/Program Files/Google/Chrome/Application/')[0][:2]\n log(\"Chrome browser is installed.\")\n chrome_check = 1\nexcept :\n log(\"Chrome browser is not installed.\")\n chrome_check = 0\n\n# Check chromedriver is exist\nfileObj = Path(\"chromedriver.exe\")\nif fileObj.is_file() == True :\n check = 1\nelse :\n check = 0\n\n# Auto login Check Function\ndef auto_login(student_id, student_pw):\n result = [student_id, student_pw]\n conn = sqlite3.connect(\"user.db\")\n cur = conn.cursor()\n cur.execute(\"create table user (user_id text, user_pw text)\")\n cur.execute(\"insert into user values (?, ?)\", result)\n conn.commit()\n conn.close()\n\n# Call ui(Login.ui) File\nui_path = \"src/login.ui\"\nui = uic.loadUiType(ui_path)[0]\n\n# Call Gui Enviroment (Login)\nclass LoginWindow(QMainWindow, ui):\n def __init__(self):\n super().__init__()\n\n # Download Chromedriver\n if chrome_check == 0 :\n QMessageBox.information(self, 'Chrome Browser', '크롬 브라우져를 설치해주세요.', QMessageBox.Ok, QMessageBox.Ok)\n quit()\n if check == 0 :\n QMessageBox.information(self, 'ChromeDriver', '필요한 프로그램을 다운받습니다.', QMessageBox.Ok, QMessageBox.Ok)\n log(\"Download Chromedriver\")\n if chrome_version == '90' :\n chrome_version_90 = 'https://chromedriver.storage.googleapis.com/90.0.4430.24/chromedriver_win32.zip'\n download(chrome_version_90, \"chromedriver.zip\")\n log(\"Download Chromedriver Version 90\")\n zipfile.ZipFile('chromedriver.zip').extract('chromedriver.exe')\n log(\"Unziped Chromedriver.zip\")\n elif chrome_version == '89' :\n chrome_version_89 = 'https://chromedriver.storage.googleapis.com/89.0.4389.23/chromedriver_win32.zip'\n download(chrome_version_89, \"chromedriver.zip\")\n log(\"Download Chromedriver Version 89\")\n zipfile.ZipFile('chromedriver.zip').extract('chromedriver.exe')\n log(\"Unziped Chromedriver.zip\")\n elif chrome_version == '88' :\n chrome_version_88 = 'https://chromedriver.storage.googleapis.com/88.0.4324.96/chromedriver_win32.zip'\n download(chrome_version_88, \"chromedriver.zip\")\n log(\"Download Chromedriver Version 988\")\n zipfile.ZipFile('chromedriver.zip').extract('chromedriver.exe')\n log(\"Unziped Chromedriver.zip\")\n elif chrome_version == '87' :\n chrome_version_87 = 'https://chromedriver.storage.googleapis.com/87.0.4280.88/chromedriver_win32.zip'\n download(chrome_version_87, \"chromedriver.zip\")\n log(\"Download Chromedriver Version 87\")\n zipfile.ZipFile('chromedriver.zip').extract('chromedriver.exe')\n log(\"Unziped Chromedriver.zip\")\n elif check == 1 :\n log(\"Chromedriver is installed\")\n \n self.setupUi(self)\n self.setWindowIcon(QIcon('src\\icon.ico')) # Icon setting\n\n # Auto login check file (user.db)\n global auto_login_check\n auto_login_check = 0\n try :\n fileObj = Path(\"user.db\")\n if fileObj.is_file() == True :\n conn = sqlite3.connect(\"user.db\")\n cur = conn.cursor()\n cur.execute('select * from user')\n global user\n user = cur.fetchall()\n user_id = user[0][0]\n user_pw = user[0][1]\n self.login_id.setText(user_id)\n self.login_pw.setText(user_pw)\n auto_login_check = 1\n except :\n pass\n\n # Login Buttons\n self.login_button.clicked.connect(self.login)\n\n # Login Function\n def login(self) :\n if self.login_id.text() == \"\" : # school id is blank\n QMessageBox.information(self, '로그인', '학번을 입력해주세요.', QMessageBox.Ok, QMessageBox.Ok)\n log(\"Login > School id is blank\")\n elif self.login_pw.text() == \"\" : # passwrod is blank\n QMessageBox.information(self, '로그인', '비밀번호를 입력해 주세요.', QMessageBox.Ok, QMessageBox.Ok)\n log(\"Login > School Password is blank\")\n else : # login\n log(\"Login > Try Login\")\n options = webdriver.ChromeOptions()\n options.add_argument('headless')\n options.add_argument('window-size=1920x1080')\n options.add_argument(\"disable-gpu\")\n log(\"Webdriver > headless, window-size=1920x1080, disable-gpu options\")\n global driver\n try : \n driver = webdriver.Chrome('chromedriver.exe', chrome_options=options) # Run chromedriver.exe\n log(\"Webdriver > Try to run Chrome\")\n QMessageBox.information(self, 'Notice', '모든 강의를 확인하기 때문에 시간이 소요될수 있습니다.', QMessageBox.Ok, QMessageBox.Ok)\n except :\n QMessageBox.warning(self, 'File Error', 'chromedriver.exe 파일을 찾을수 없습니다.', QMessageBox.Ok, QMessageBox.Ok)\n log(\"Webdriver > Does not exist chromedriver.exe\")\n return\n \n login_url = \"https://cyber.jj.ac.kr/login.php\"\n class_url = \"http://cyber.jj.ac.kr/local/ubion/user/\"\n\n student_id = (self.login_id.text())\n student_pw = (self.login_pw.text())\n \n # Login Page\n driver.get(login_url) # Open Login Page\n log(\"Webdriver > Access Url > https://cyber.jj.ac.kr/login.php\")\n driver.find_element_by_name('username').send_keys(student_id) # Send ID\n driver.find_element_by_name('password').send_keys(student_pw) # Send Password\n driver.find_element_by_xpath('/html/body/div[2]/div[2]/div/div/div/div/div[1]/div[1]/div[1]/form/div[2]/input').click() # Button Click\n log(\"Webdriver > Try to Login\")\n\n # user info Page\n driver.get(class_url) # Open User Info Page\n log(\"Webdriver > Access Url > http://cyber.jj.ac.kr/local/ubion/user/\")\n\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n\n # Get Class Course Name\n log(\"*** Get Class Course Name ***\")\n class_url_html = str(soup.find_all(class_=\"coursefullname\"))\n class_count = len(soup.find_all(class_='coursefullname'))\n class_name = []\n class_url = []\n \n for i in range(class_count) :\n class_name.append(soup.find_all(class_='coursefullname')[i].get_text())\n\n class_url_soup = BeautifulSoup(class_url_html)\n\n for a in class_url_soup.find_all('a', href=True):\n class_url.append(a['href'])\n \n global class_all\n class_all = []\n\n for i in range(class_count) :\n class_all.append([class_name[i], class_url[i]])\n\n for i in range(len(class_all)) :\n log(\"Webdriver > Parse > Class > \" + str(class_all[i]))\n\n if class_all == [] : # If class is blank, Login Fail\n QMessageBox.warning(self, '로그인 실패', '학번 또는 비밀번호를 확인해 주세요.', QMessageBox.Ok, QMessageBox.Ok)\n log(\"*** Login Fail ***\")\n else : # If class is full, Login Success\n # Auto Login Function Activation\n log(\"*** Login Success ***\")\n if auto_login_check == 0 :\n reply = QMessageBox.question(self, '로그인 성공', '자동 로그인 기능을 활성화 하시겠습니까?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if reply == QMessageBox.Yes :\n try :\n auto_login(student_id, student_pw)\n except :\n QMessageBox.warning(self, '자동 로그인', '이미 자동로그인 기능이 활성화 되어 있습니다.', QMessageBox.Ok, QMessageBox.Ok)\n else :\n pass\n log(\"*** Get Notice ***\")\n notice_url = \"http://cyber.jj.ac.kr/local/ubnotification/\"\n driver.get(notice_url) # Open Notice Page\n log(\"Webdriver > Access Url > http://cyber.jj.ac.kr/local/ubnotification/\")\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n\n # Notice Value Url\n temp = str(soup.find_all(class_=\"well wellnopadding\"))\n temp = BeautifulSoup(temp)\n notice_url_value = []\n\n global notice_value\n notice_value = []\n\n for a in temp.find_all('a', href=True):\n notice_url_value.append(a['href'])\n notice_url_value.pop()\n\n for i in range(len(notice_url_value)): # Get Notice Detail\n name = (soup.find_all(class_=\"media-heading\")[i].get_text())\n timeago = (soup.find_all(class_=\"timeago\")[i].get_text())\n message = str(soup.find_all(class_=\"media-body\")[i])\n message = message.partition(\"

    \")[-1].replace(\"

    \",\"\")\n log(\"Webdriver > Parse > Notice > \" + str([name, timeago, message, notice_url_value[i]]))\n notice_value.append([name, timeago, message, notice_url_value[i]])\n\n global class_id\n global class_detail\n class_id = []\n class_detail = []\n\n # Get class id\n for i in range(len(class_all)) :\n class_id.append(class_all[i][1].split(\"?\")[1][3:])\n\n # Class Detail (Run time, etc)\n log(\"*** Get Class Detail ***\")\n for i in range(len(class_id)) :\n class_name = class_all[i][0]\n class_process_url = \"http://cyber.jj.ac.kr/report/ubcompletion/user_progress.php?id=\" + class_id[i]\n driver.get(class_process_url)\n log(\"Webdriver > Access Url > \" + str(class_process_url))\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n for j in range(1,50) :\n v = '#ubcompletion-progress-wrapper > div:nth-child(3) > table > tbody > tr:nth-child(' + str(j) + ')'\n a = str(soup.select(v))\n try :\n regex = re.compile('{}(.*){}'.format(re.escape('icon\"/>'), re.escape('')))\n title = regex.findall(a)[0]\n\n regex = re.compile('{}(.*){}'.format(re.escape(''), re.escape('')))\n need_time = regex.findall(a)[0]\n\n try :\n regex = re.compile('{}(.*){}'.format(re.escape(''), re.escape('
    ')))\n my_time = regex.findall(a)[0]\n except :\n my_time = \"미수강\"\n \n check_need_time = int(need_time.replace(\":\",\"\"))\n if my_time == \"미수강\" :\n check_my_time = 0\n else :\n check_my_time = int(my_time.replace(\":\",\"\"))\n if check_my_time > check_need_time :\n check = \"PASS\"\n else :\n check = \"FAIL\"\n log(\"Webdriver > Parse > Class Detail > \" + str([class_name, title, need_time, my_time, check, check_my_time, check_need_time]))\n class_detail.append([class_name, title, need_time, my_time, check])\n except :\n log(\"Webdriver > Parse > Class Detail > Error (No Videos)\")\n break\n\n # Get Link for Watch Cyber class\n log(\"*** Get Watch Video Link ***\")\n video = []\n for i in range(len(class_id)) :\n video_url = \"http://cyber.jj.ac.kr/mod/vod/index.php?id=\" + class_id[i]\n driver.get(video_url)\n log(\"Webdriver > Access Url > \" + str(video_url))\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n for j in range(100) :\n html_1 = str(soup.select(\"#region-main > div > table > tbody > tr:nth-child(\" + str(j) + \") > td.cell.c1 > a\"))\n url_soup = BeautifulSoup(html_1)\n for a in url_soup.find_all('a', href=True):\n log(\"Webdriver > Parse > Video Link > \" + str([class_id[i], a['href']]))\n video.append([class_id[i], a['href']])\n \n for i in range(len(class_detail)) :\n class_detail[i].append(video[i])\n\n # Call Main Window\n self.management = MainWindow()\n self.management.show()\n self.close()\n\n# Call ui(main.ui) File\nui_main_path = \"src/main.ui\"\nui_main = uic.loadUiType(ui_main_path)[0]\n\n# Call Gui Enviroment (MainWindow)\nclass MainWindow(QMainWindow, ui_main):\n def __init__(self):\n log(\"*** Open MainWindow ***\")\n super().__init__()\n self.setupUi(self)\n self.setWindowIcon(QIcon('src\\icon.ico'))\n\n # Item Double Clicked Function\n self.notice_listWidget.itemDoubleClicked.connect(self.notice_ItemDoubleClicked)\n self.class_listWidget.itemDoubleClicked.connect(self.class_ItemDoubleClicked)\n self.tableWidget.cellDoubleClicked.connect(self.table_ItemDoubleClicked)\n\n # Button\n self.assign_button.clicked.connect(self.assign)\n self.grade_button.clicked.connect(self.grade)\n self.error_send.clicked.connect(self.error)\n self.exit_button.clicked.connect(self.exit)\n\n # Logo\n self.logo_label.setPixmap(QPixmap('src\\logo.jpg'))\n\n #Get User Infomormation\n get_user_url = \"http://cyber.jj.ac.kr/report/ubcompletion/user_progress.php?id=\" + class_id[0]\n driver.get(get_user_url)\n log(\"Webdriver > Access Url > \" + str(get_user_url))\n \n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n\n # Get School User ID\n school_id = str(soup.select(\"#ubcompletion-progress-wrapper > div:nth-child(1) > table > tbody > tr:nth-child(1) > td\"))\n regex = re.compile('{}(.*){}'.format(re.escape(''), re.escape('')))\n global user_school_id\n user_school_id = regex.findall(school_id)[0]\n self.school_number_line.setText(str(user_school_id))\n log(\"Webdriver > Get Informtaion > \" + str(user_school_id))\n\n # Get User Name\n name = str(soup.select(\"#ubcompletion-progress-wrapper > div:nth-child(1) > table > tbody > tr:nth-child(2) > td\"))\n regex = re.compile('{}(.*){}'.format(re.escape(''), re.escape('')))\n global user_name\n user_name = regex.findall(name)[0]\n self.name_line.setText(str(user_name))\n log(\"Webdriver > Get Informtaion > \" + str(user_name))\n\n # Get User Phone Number\n phone_number = str(soup.select(\"#ubcompletion-progress-wrapper > div:nth-child(1) > table > tbody > tr:nth-child(3) > td\"))\n regex = re.compile('{}(.*){}'.format(re.escape(''), re.escape('')))\n global user_phone_number\n user_phone_number = regex.findall(phone_number)[0]\n self.phone_number_line.setText(str(user_phone_number))\n log(\"Webdriver > Get Informtaion > \" + str(user_phone_number))\n\n for i in range(len(class_all)) :\n self.class_listWidget.addItem(class_all[i][0])\n\n for i in range(len(notice_value)) :\n self.notice_listWidget.addItem(notice_value[i][0])\n\n # QtableWidget - Class Table\n _translate = QCoreApplication.translate\n self.tableWidget.setColumnCount(5)\n self.tableWidget.setRowCount(len(class_detail))\n\n for i in range(len(class_detail)):\n item = QTableWidgetItem()\n self.tableWidget.setVerticalHeaderItem(i, item)\n\n for i in range(5):\n item = QTableWidgetItem()\n self.tableWidget.setHorizontalHeaderItem(i, item)\n item = QTableWidgetItem()\n\n for i in range(len(class_detail)):\n for j in range(5):\n self.tableWidget.setItem(i, j, item)\n item = QTableWidgetItem()\n\n for i in range(len(class_detail)) :\n item = self.tableWidget.verticalHeaderItem(i)\n item.setText(_translate(\"MainWindow\", str(i)))\n\n item = self.tableWidget.horizontalHeaderItem(0)\n item.setText(_translate(\"MainWindow\", \"강의 이름\"))\n item = self.tableWidget.horizontalHeaderItem(1)\n item.setText(_translate(\"MainWindow\", \"강의 제목\"))\n item = self.tableWidget.horizontalHeaderItem(2)\n item.setText(_translate(\"MainWindow\", \"인정 시간\"))\n item = self.tableWidget.horizontalHeaderItem(3)\n item.setText(_translate(\"MainWindow\", \"들은 시간\"))\n item = self.tableWidget.horizontalHeaderItem(4)\n item.setText(_translate(\"MainWindow\", \"통과\"))\n __sortingEnabled = self.tableWidget.isSortingEnabled()\n self.tableWidget.setSortingEnabled(False)\n\n self.tableWidget.setColumnWidth(0, 150)\n self.tableWidget.setColumnWidth(1, 360)\n self.tableWidget.setColumnWidth(2, 65)\n self.tableWidget.setColumnWidth(3, 65)\n self.tableWidget.verticalHeader().setVisible(False)\n\n for i in range(len(class_detail)):\n for j in range(5):\n item = self.tableWidget.item(i, j)\n item.setFlags(QtCore.Qt.ItemIsEnabled) # Locked Cell\n if str(class_detail[i][j]) == \"미수강\" or str(class_detail[i][j]) == \"FAIL\":\n item.setForeground(QBrush(Qt.red))\n item.setBackground(QBrush(Qt.yellow))\n item.setText(_translate(\"MainWindow\", str(class_detail[i][j])))\n else :\n item.setText(_translate(\"MainWindow\", str(class_detail[i][j])))\n self.tableWidget.setSortingEnabled(__sortingEnabled)\n\n # Call Assign Function\n def assign(self):\n log(\"*** Get Assignment ***\")\n QMessageBox.information(self, \"과제 확인\", \"과제를 확인하는데 시간이 소요될수 있습니다.\", QMessageBox.Ok, QMessageBox.Ok)\n global assign\n assign = []\n for i in range(len(class_id)) :\n class_name = class_all[i][0]\n class_assign_url = \"http://cyber.jj.ac.kr/mod/assign/index.php?id=\" + class_id[i]\n driver.get(class_assign_url) # Open Class Assign Page\n log(\"Webdriver > Access Url > \" + str(class_assign_url))\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n temp = (soup.select(\"#region-main > div > table > tbody > tr:nth-child(1)\"))\n temp1 = []\n temp2 = []\n for i in soup.select(\"#region-main > div > table > tbody > tr:nth-child(1)\") :\n string = i.text.split(\"\\n\")\n string.pop()\n del string[0]\n temp1 = (string)\n temp = (soup.select(\"#region-main > div > table > tbody > tr.lastrow\"))\n for i in soup.select(\"#region-main > div > table > tbody > tr.lastrow\") :\n string = i.text.split(\"\\n\")\n string.pop()\n del string[0]\n temp2 = (string)\n if temp1 == [] :\n continue\n else :\n temp1.insert(0, class_name)\n temp2.insert(0,class_name)\n if temp1 == temp2 :\n assign.append(temp1)\n log(\"Webdriver > Parse > Get Assignment > \" + str(temp1))\n else :\n assign.append(temp1)\n assign.append(temp2)\n log(\"Webdriver > Parse > Get Assignment > \" + str([temp1, temp2]))\n \n # Call Assign Window\n self.assignment = AssignWindow()\n self.assignment.show()\n\n # Call Grade Function\n def grade(self):\n log(\"*** Get Grade ***\")\n QMessageBox.information(self, \"성작 확인\", \"성적을 확인하는데 시간이 소요될수 있습니다.\", QMessageBox.Ok, QMessageBox.Ok)\n grade_url = \"http://cyber.jj.ac.kr/local/ubion/user/grade.php\"\n driver.get(grade_url) # Open Grade Page\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n global grade_all\n grade_all = []\n try :\n for j in range(1, 9) :\n for i in soup.select(\"#region-main > div > div > div > table > tbody > tr:nth-child(\" + str(j) + \") > td:nth-child(1)\"):\n year = (i.text)\n for i in soup.select(\"#region-main > div > div > div > table > tbody > tr:nth-child(\" + str(j) + \") > td:nth-child(2)\"):\n semester = (i.text)\n for i in soup.select(\"#region-main > div > div > div > table > tbody > tr:nth-child(\" + str(j) + \") > td:nth-child(3)\"):\n classname = (i.text)\n for i in soup.select(\"#region-main > div > div > div > table > tbody > tr:nth-child(\" + str(j) + \") > td:nth-child(4)\"):\n professor = (i.text)\n for i in soup.select(\"#region-main > div > div > div > table > tbody > tr:nth-child(\" + str(j) + \") > td:nth-child(5)\"):\n grade = (i.text)\n for i in soup.select(\"#region-main > div > div > div > table > tbody > tr:nth-child(\" + str(j) + \") > td:nth-child(6)\"):\n grade_percent = (i.text)\n for i in soup.select(\"#region-main > div > div > div > table > tbody > tr:nth-child(\" + str(j) + \") > td:nth-child(7)\"):\n complete_grade = (i.text)\n log(\"Webdriver > Parse > Grade > \" + str([year, semester, classname, professor, grade, grade_percent, complete_grade]))\n grade_all.append([year, semester, classname, professor, grade, grade_percent, complete_grade])\n except :\n QMessageBox.warning(self, '오류', '성적을 가져오는데 오류가 발생하였습니다.\\n오류 제보를 통해서 log.txt 파일을 보내주세요', QMessageBox.Ok, QMessageBox.Ok)\n \n # Call Grade Window\n self.grade = GradeWindow()\n self.grade.show()\n\n # Call Error Window\n def error(self) :\n self.error_window = ErrorWindow()\n self.error_window.show()\n\n def notice_ItemDoubleClicked(self) :\n get_notice_url = notice_value[self.notice_listWidget.currentRow()][3]\n driver.get(get_notice_url)\n log(\"DoubleClick > Notice > \" + str([notice_value[self.notice_listWidget.currentRow()][0], notice_value[self.notice_listWidget.currentRow()][1], notice_value[self.notice_listWidget.currentRow()][2], notice_value[self.notice_listWidget.currentRow()][3]]))\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n try : \n title = soup.find_all(class_=\"subject\")[0].get_text()\n time = (soup.select(\"#region-main > div > div > div > div.well > div:nth-child(2) > div.date\")[0].get_text())\n time = time.replace(\"\\n\", \"\").replace(\" \", \"\").replace(\"\\t\",\"\")\n time = (time[:14] + \" \" + time[14:])\n message = \"작성시간 : \" + str(notice_value[self.notice_listWidget.currentRow()][1]) + \" (\" + time + \")\" + \"\\n\" + str(soup.find_all(class_=\"text_to_html\")[0].get_text())\n message = message.replace(\".\", \".\\n\")\n except :\n title = self.notice_listWidget.currentItem().text()\n message = \"작성시간 : \" + str(notice_value[self.notice_listWidget.currentRow()][1]) + \"\\n\" + str(notice_value[self.notice_listWidget.currentRow()][2])\n QMessageBox.information(self, title, message, QMessageBox.Ok, QMessageBox.Ok)\n\n def class_ItemDoubleClicked(self) :\n row = self.class_listWidget.currentRow()\n url = class_all[row][1]\n log(\"DoubleClick > Class > \" + str([row, url]))\n webbrowser.open(url)\n log(\"DoubleClick > Class > Open > \" + str(url))\n \n def table_ItemDoubleClicked(self) :\n row = self.tableWidget.currentRow()\n column = self.tableWidget.currentColumn()\n msgbox_title = self.tableWidget.item(self.tableWidget.currentRow(), 0).text()\n need_time = self.tableWidget.item(self.tableWidget.currentRow(), 2).text()\n my_time = self.tableWidget.item(self.tableWidget.currentRow(), 3).text()\n log(\"DoubleClick > Class Detail > \" + str([msgbox_title, need_time, my_time]))\n if self.tableWidget.item(row, 3).text() == \"미수강\" or need_time > my_time:\n reply = QMessageBox.question(self, msgbox_title, '미수강된 강의입니다.\\n강의 홈페이지로 이동하기를 원하십니까?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if reply == QMessageBox.Yes:\n log(\"DoubleClick > Class Detail > Question > Y\")\n url = (class_detail[row][5][1]).replace(\"view.php?id=\", 'http://cyber.jj.ac.kr/mod/vod/view.php?id=') \n webbrowser.open(url)\n log(\"DoubleClick > Class Detail > Open > \" + str(url))\n else:\n log(\"DoubleClick > Class Detail > Question > N\")\n return\n else :\n reply = QMessageBox.question(self, msgbox_title, '수강완료된 강의입니다.\\n강의 홈페이지로 이동하기를 원하십니까?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if reply == QMessageBox.Yes:\n log(\"DoubleClick > Class Detail > Question > Y\")\n url = (class_detail[row][5][1]).replace(\"view.php?id=\", 'http://cyber.jj.ac.kr/mod/vod/view.php?id=')\n webbrowser.open(url)\n log(\"DoubleClick > Class Detail > Open > \" + str(url))\n else:\n log(\"DoubleClick > Class Detail > Question > N\")\n return\n \n # Exit Function (Close Program)\n def exit(self) :\n log(\"*** Exit Program ***\")\n self.close()\n\n# Call ui(assign.ui) File\nui_assign_path = \"src/assign.ui\"\nui_assign = uic.loadUiType(ui_assign_path)[0]\n\n# Call Gui Enviroment (AssignWindow)\nclass AssignWindow(QMainWindow, ui_assign):\n def __init__(self):\n log(\"*** Open Assinment Window ***\")\n super().__init__()\n self.setupUi(self)\n self.setWindowIcon(QIcon('src\\icon.ico'))\n\n # Button - Exit\n self.exit_button.clicked.connect(self.exit)\n\n # QtableWidget - Assign Table\n _translate = QCoreApplication.translate\n self.assign_tableWidget.setColumnCount(6)\n self.assign_tableWidget.setRowCount(len(assign))\n self.assign_tableWidget.verticalHeader().setVisible(False)\n\n for i in range(len(assign)):\n item = QTableWidgetItem()\n self.assign_tableWidget.setVerticalHeaderItem(i, item)\n\n for i in range(6):\n item = QTableWidgetItem()\n self.assign_tableWidget.setHorizontalHeaderItem(i, item)\n item = QTableWidgetItem()\n\n for i in range(len(assign)):\n for j in range(6):\n self.assign_tableWidget.setItem(i, j, item)\n item = QTableWidgetItem()\n\n for i in range(len(assign)) :\n item = self.assign_tableWidget.verticalHeaderItem(i)\n item.setText(_translate(\"MainWindow\", str(i)))\n\n item = self.assign_tableWidget.horizontalHeaderItem(0)\n item.setText(_translate(\"MainWindow\", \"강의 이름\"))\n item = self.assign_tableWidget.horizontalHeaderItem(1)\n item.setText(_translate(\"MainWindow\", \"주차\"))\n item = self.assign_tableWidget.horizontalHeaderItem(2)\n item.setText(_translate(\"MainWindow\", \"과제명\"))\n item = self.assign_tableWidget.horizontalHeaderItem(3)\n item.setText(_translate(\"MainWindow\", \"종료일\"))\n item = self.assign_tableWidget.horizontalHeaderItem(4)\n item.setText(_translate(\"MainWindow\", \"제출여부\"))\n item = self.assign_tableWidget.horizontalHeaderItem(5)\n item.setText(_translate(\"MainWindow\", \"성적\"))\n __sortingEnabled = self.assign_tableWidget.isSortingEnabled()\n self.assign_tableWidget.setSortingEnabled(False)\n\n self.assign_tableWidget.setColumnWidth(0, 135)\n self.assign_tableWidget.setColumnWidth(1, 170)\n self.assign_tableWidget.setColumnWidth(2, 200)\n self.assign_tableWidget.setColumnWidth(3, 130)\n self.assign_tableWidget.setColumnWidth(4, 100)\n self.assign_tableWidget.setColumnWidth(5, 30)\n\n for i in range(len(assign)):\n for j in range(6):\n item = self.assign_tableWidget.item(i, j)\n item.setFlags(QtCore.Qt.ItemIsEnabled) # Locked Cell\n item.setText(_translate(\"MainWindow\", str(assign[i][j])))\n self.assign_tableWidget.setSortingEnabled(__sortingEnabled)\n\n # Exit Function (Close Assignment Window)\n def exit(self) :\n log(\"*** Exit Assignment Window ***\")\n self.close()\n\n# Call ui(grade.ui) File\nui_grade_path = \"src/grade.ui\"\nui_grade = uic.loadUiType(ui_grade_path)[0]\n\n# Call Gui Enviroment (Grade Window)\nclass GradeWindow(QMainWindow, ui_grade):\n def __init__(self):\n log(\"*** Open Grade Window ***\")\n super().__init__()\n self.setupUi(self)\n self.setWindowIcon(QIcon('src\\icon.ico'))\n\n # Button - Exit\n self.exit_button.clicked.connect(self.exit)\n\n # QtableWidget - Grade Table\n _translate = QCoreApplication.translate\n self.grade_tableWidget.setColumnCount(7)\n self.grade_tableWidget.setRowCount(len(grade_all))\n self.grade_tableWidget.verticalHeader().setVisible(False)\n\n for i in range(len(grade_all)):\n item = QTableWidgetItem()\n self.grade_tableWidget.setVerticalHeaderItem(i, item)\n\n for i in range(7):\n item = QTableWidgetItem()\n self.grade_tableWidget.setHorizontalHeaderItem(i, item)\n item = QTableWidgetItem()\n\n for i in range(len(grade_all)):\n for j in range(7):\n self.grade_tableWidget.setItem(i, j, item)\n item = QTableWidgetItem()\n\n for i in range(len(grade_all)) :\n item = self.grade_tableWidget.verticalHeaderItem(i)\n item.setText(_translate(\"MainWindow\", str(i)))\n\n item = self.grade_tableWidget.horizontalHeaderItem(0)\n item.setText(_translate(\"MainWindow\", \"연도\"))\n item = self.grade_tableWidget.horizontalHeaderItem(1)\n item.setText(_translate(\"MainWindow\", \"학기\"))\n item = self.grade_tableWidget.horizontalHeaderItem(2)\n item.setText(_translate(\"MainWindow\", \"강좌명\"))\n item = self.grade_tableWidget.horizontalHeaderItem(3)\n item.setText(_translate(\"MainWindow\", \"담당교수\"))\n item = self.grade_tableWidget.horizontalHeaderItem(4)\n item.setText(_translate(\"MainWindow\", \"성적\"))\n item = self.grade_tableWidget.horizontalHeaderItem(5)\n item.setText(_translate(\"MainWindow\", \"백분환산점수\"))\n item = self.grade_tableWidget.horizontalHeaderItem(6)\n item.setText(_translate(\"MainWindow\", \"최종성적\"))\n __sortingEnabled = self.grade_tableWidget.isSortingEnabled()\n self.grade_tableWidget.setSortingEnabled(False)\n\n for i in range(len(grade_all)):\n for j in range(7):\n item = self.grade_tableWidget.item(i, j)\n item.setFlags(QtCore.Qt.ItemIsEnabled) # Locked Cell\n item.setText(_translate(\"MainWindow\", str(grade_all[i][j])))\n self.grade_tableWidget.setSortingEnabled(__sortingEnabled)\n \n # Exit Function (Close Grade Window)\n def exit(self) :\n log(\"*** Exit Grade Window ***\")\n self.close()\n\n# Call ui(error.ui) File\nui_error_path = \"src/error.ui\"\nui_error = uic.loadUiType(ui_error_path)[0]\n\n# Call Gui Enviroment (Error Window)\nclass ErrorWindow(QMainWindow, ui_error):\n def __init__(self):\n log(\"*** Open Error Report Window ***\")\n super().__init__()\n self.setupUi(self)\n self.setWindowIcon(QIcon('src\\icon.ico'))\n\n # Button\n self.send_button.clicked.connect(self.send)\n self.include_button.clicked.connect(self.error_file_select)\n\n # radio\n self.writer_open_radio.clicked.connect(self.groupboxRadFunction)\n self.writer_close_radio.clicked.connect(self.groupboxRadFunction)\n \n def groupboxRadFunction(self) :\n global Disclosure_status\n if self.writer_open_radio.isChecked() :\n Disclosure_status = 1\n log(\"Error Report > Disclosure_status = 1\")\n elif self.writer_close_radio.isChecked() :\n Disclosure_status = 0\n log(\"Error Report > Disclosure_status = 0\")\n\n # Path Select \n def error_file_select(self) :\n try:\n log(\"Error Report > Include File > Try\")\n dialog = QFileDialog()\n global file_path\n file_filter = 'All files (*.*)'\n file_path = QFileDialog.getOpenFileName(self, 'Select File', filter=file_filter)\n file_path = file_path[0]\n self.file_label.setText(str(file_path))\n log(\"Error Report > Include File > \" + file_path)\n except :\n log(\"Error Report > Include File > Fail\")\n QMessageBox.information(self, \"오류제보\", \"Error\", QMessageBox.Ok, QMessageBox.Ok)\n\n # Mail Send Function\n def send(self) :\n from requests import get\n import socket\n import re, uuid\n User_Host_Name = socket.gethostname()\n User_IP_Internal = socket.gethostbyname(socket.gethostname())\n User_IP_External = get(\"https://api.ipify.org\").text\n User_Mac = ':'.join(re.findall('..', '%012x' % uuid.getnode()))\n User_Computer_Information = \"Information : \" + str([User_Host_Name, User_IP_Internal, User_IP_External, User_Mac])\n\n log(\"Error Report > Send > Try\")\n title = self.title_message.toPlainText()\n if title == \"\" :\n QMessageBox.information(self, \"오류제보\", \"제목을 입력해주세요.\", QMessageBox.Ok, QMessageBox.Ok)\n log(\"Error Report > Send > Blank Title\")\n return\n content = str(self.content_message.toPlainText())\n if content == \"\" :\n QMessageBox.information(self, \"오류제보\", \"내용을 입력해주세요.\", QMessageBox.Ok, QMessageBox.Ok)\n log(\"Error Report > Send > Blank Content\")\n return\n \n try :\n if Disclosure_status == 1:\n contact = \"작성자 공개 : \" + str([user_school_id, user_name, user_phone_number])\n else :\n contact = \"작성자 공개 : 익명\"\n except :\n QMessageBox.information(self, \"오류제보\", \"작성자 공개 여부를 체크해주세요.\", QMessageBox.Ok, QMessageBox.Ok)\n log(\"Error Report > Send > Error Disclosure_status\")\n return\n\n user_contact = \"연락쳐 : \" + str(self.contact_message.toPlainText())\n if user_contact == \"\" :\n user_contact = \"연락쳐 : 익명\"\n \n content = content + \"\\n\\n=====================================\\n\\n\" + str(contact) + \"\\n\\n\" + str(user_contact) + \"\\n\\n\" + str(User_Computer_Information)\n\n try :\n import smtplib\n from email.mime.text import MIMEText\n from email.mime.multipart import MIMEMultipart\n from email.mime.base import MIMEBase\n from email import encoders\n s = smtplib.SMTP('smtp.gmail.com', 587)\n s.starttls()\n s.login('pental.system32@gmail.com', 'emwqpqjkhjbeoern')\n msg = MIMEMultipart()\n msg['Subject'] = title\n msg.attach(MIMEText(content, 'plain'))\n try :\n #File Upload\n attachment = open(file_path, 'rb')\n part = MIMEBase('application', 'octet-stream')\n part.set_payload((attachment).read())\n encoders.encode_base64(part)\n file_name = file_path.split(\"/\")[-1]\n part.add_header('Content-Disposition', \"attachment; filename= \" + file_name)\n msg.attach(part)\n except :\n pass\n s.sendmail(\"pental.system32@gmail.com\", \"pental@kakao.com\", msg.as_string())\n s.quit()\n log(\"Error Report > Send > Success\")\n QMessageBox.information(self, \"오류제보\", \"오류제보가 정상적으로 처리되었습니다.\", QMessageBox.Ok, QMessageBox.Ok)\n except :\n log(\"Error Report > Send > Fail\")\n QMessageBox.information(self, \"오류제보\", \"문제가 발생하였습니다.\", QMessageBox.Ok, QMessageBox.Ok)\n\n# Main Function\ndef main():\n app = QApplication(sys.argv)\n window = LoginWindow()\n window.show()\n app.exec_()\n\nif __name__ == \"__main__\":\n main()","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":39036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"320712268","text":"from dataclasses import dataclass\nfrom magic_list import MagicList\n\n\ndef main():\n print(\"Hello World!\")\n\n magic_list1 = MagicList()\n magic_list1[0] = 4\n magic_list1[1] = 5\n print(magic_list1)\n\n magic_list2 = MagicList(cls_type=Person)\n magic_list2[0].age = 4\n magic_list2[1].age = 5\n print(magic_list2)\n\n\n@dataclass\nclass Person:\n age: int = 1\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"610863611","text":"from math import floor\r\nfrom trade.policy.kdj import KDJTradePolicy_2_2b\r\nfrom utility import DateUtils\r\n\r\n__author__ = 'Cedric Zhuang'\r\n\r\n\r\nclass KDJTradePolicy_2_2c(KDJTradePolicy_2_2b):\r\n\r\n def get_buy_strategy(self, holding, record):\r\n should_buy = False\r\n buy_price = None\r\n stock_index = self.get_current_stock_index()\r\n pe = self.get_pe(stock_index, record.close)\r\n ppmo = self.get_profit_percentage_from_main_op(stock_index)\r\n if holding is None \\\r\n and pe < self.per_share_earning_less_than \\\r\n and ppmo > self.profit_percentage_from_main_operation_greater_than \\\r\n and not DateUtils.in_month(record.date, self.not_in_month):\r\n if record.K is not None \\\r\n and record['K_-1_s'] < record['D_-1_s'] \\\r\n and record.K < self.KD_less_than \\\r\n and record.D < self.KD_less_than \\\r\n and floor(record.K) >= floor(record.D):\r\n self.buy_with_cross = True\r\n should_buy = True\r\n elif record.J < self.J_less_than\\\r\n and record['last_volumed'] < 0:\r\n self.buy_with_J = True\r\n should_buy = True\r\n if should_buy:\r\n if record.low <= record.bollLB:\r\n self.buy_day_low_le_boll_LB = True\r\n if record.low > record.bollLB:\r\n self.buy_day_low_gt_boll_LB = True\r\n buy_price = record.open\r\n return should_buy, buy_price","sub_path":"source/trade/policy/kdj/KDJTradePolicy_2_2c.py","file_name":"KDJTradePolicy_2_2c.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"587638710","text":"import serialpart\nfrom statistics import mean\nfrom random import *\nimport pygame\nimport datetime\n\npygame.init() # intialisation of pygame\nh_screen = 800 # definition of the screen\nw_screen = 1280\nscreen = pygame.display.set_mode((w_screen, h_screen)) # display\ndone = False # basis state of while\nrectScreen = screen.get_rect()\n\nbackgr=pygame.image.load(\"ressources/Cartoon_Forest_BG_01.png\").convert_alpha()\nbackgr = pygame.transform.scale(backgr, (w_screen, h_screen))\n\n\nbasketp=pygame.image.load(\"ressources/cesta.png\").convert_alpha()\nbasketp = pygame.transform.scale(basketp, (60, 60))\n\nnuttp=pygame.image.load(\"ressources/noisette.png\").convert_alpha()\nnuttp = pygame.transform.scale(nuttp, (20, 20))\n\nsq_d_1frame=pygame.image.load(\"ressources/sq_d_1.png\").convert_alpha()\nsq_d_2frame=pygame.image.load(\"ressources/sq_d_2.png\").convert_alpha()\nsq_g_1frame=pygame.image.load(\"ressources/sq_g_1.png\").convert_alpha()\nsq_g_2frame=pygame.image.load(\"ressources/sq_g_2.png\").convert_alpha()\n\nx = int(100)\ny = int(100)\n\n\"\"\" the two following lines are about the dictionnary of projectiles called \"objects\" and goes from top to down\"\"\"\nobjectnumber = int(1)\ndico = {}\n\n\"\"\" about the MPU6050 fonctionnement\"\"\"\nfrom serialpart import * #will import serialpart.py (with port name and baudrate)\nnum_val_mean = 5 # value of softener function\nnb_of_px = 20 # speed of the controlled object (in px/while )\ndiff_factor = 1.0 # a factor that will change the values of nmin/nmax (used to change difficulty)\n\nclock = pygame.time.Clock()\nwith open(\"ac_user\", \"r\") as myuser: # put it into a calibration_vars\n ac_us = myuser.read()\nfenetre = pygame.display.set_mode((w_screen, h_screen))\npolice = pygame.font.Font(None, 72) # size of the font\nscore_count=int(0)\n\nxp = int(10)\nyp = int(10)\n\nn_m = int(0) # meaned position of mpu on selected axis\n\ncolor1 = (0, 128, 255) # two differents colors that will be used. Or not.\ncolor2 = (128, 255, 0)\n\nlist_to_meanX = [] # create a list in wich we'll put \"num_val_mean\" values before meaning it.\nlist_to_meanY = [] # the same with y\n\n\"\"\"These two lines are used to \"heat\" the program, or more seriously to avoid launch errors \n(incomplete signals at the beginning)\"\"\"\nprint(ser.readline())\nprint(ser.readline())\n\nwith open(\"Calibration1D.txt\", \"r\") as mycalfile: # put it into a calibration_vars\n text_of_cal = mycalfile.read()\n list_of_cal1D = text_of_cal.split()\n\naxis = int(list_of_cal1D[0]) # axis will be 0 if x, 1 if y\nbasis_value = int(list_of_cal1D[1]) # the value on axis of neutral articulation position\nnmin = int(list_of_cal1D[2]) * diff_factor # minimum recorded value time difficultyfactor\nnmax = int(list_of_cal1D[3]) * diff_factor # maximum recorded value time difficultyfactor\n\n\nclass Lanceur(pygame.sprite.Sprite): # Creation of Launcher objet that will cruise the top of the screen\n def __init__(self, taille, distancex, distancey):\n self.taille = taille\n self.distancex = distancex\n self.distancey = distancey\n self.but = int(0) # that remember if the object has gone on the extrems (in order to give the orientation)\n\n def afficher(self):\n\n if (int(self.distancex/10))%2==0:\n if self.but == 0:\n screen.blit(sq_d_1frame, (self.distancex-40, self.distancey - 40)) # we ajust the x position of sprite to avoid\n # the sensation that the squiell poes nuts and y position for conflict with score\n if self.but == 1:\n screen.blit(sq_g_1frame, (self.distancex, self.distancey - 40))\n else :\n if self.but == 0:\n screen.blit(sq_d_2frame, (self.distancex-40, self.distancey - 40))\n if self.but == 1:\n screen.blit(sq_g_2frame, (self.distancex, self.distancey - 40))\n\n def move(self, x): # that will alternate between go right and go left\n\n if self.distancex >= w_screen - (self.taille / 2):\n self.but = 1\n if self.distancex <= 0:\n self.but = 0\n\n if self.but == 0:\n self.distancex = self.distancex + x\n if self.but == 1:\n self.distancex = self.distancex - x\n\n\n\nclass ObjetsLances(pygame.sprite.Sprite): # about projectiles\n def __init__(self, distancex, distancey):\n pygame.sprite.Sprite.__init__(self)\n self.distancex = distancex\n self.distancey = distancey\n self.speedofprojectile = 2\n self.collid = 0\n\n def afficher(self):\n if self.collid==0:\n screen.blit(nuttp, (self.distancex, self.distancey))\n\n def move(self): # that will alternate between go right and go left\n if self.distancey < h_screen-5:\n self.distancey = self.distancey + self.speedofprojectile\n\n def collide(self):\n if self.distancey == h_screen*0.93 and self.distancex > xp and self.distancex < xp+60:\n self.collid = 1\n global score_count\n score_count += 1\n\n\n\nlanceur = Lanceur(20, 50, 50)\n\nwhile not done: # main loop\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n with open(\"scores.txt\", \"r+\") as myscorefile: # put it into a scores with date\n text_rep = myscorefile.read()\n strsc = str(\"\\n\") + str(ac_us) + str(\" \")+ str(\"CatchyP\")+ str(\" \") +str(datetime.datetime.now().day) + str('-') + str(datetime.datetime.now().month)\\\n + str('-') + str(datetime.datetime.now().year) + str(\" \")+ str(datetime.datetime.now().hour)\\\n + str(':') + str(datetime.datetime.now().minute)\\\n + str(\" \") + str(score_count)\n myscorefile.write(strsc)\n done = True\n\n # reading part\n liste_acc_val=serialpart.simpleard_to_xyz_list()\n if len(liste_acc_val) == 3: # check is lengt of recorded values on one line is 3 : x, y, and z.\n # If not (because not complete) this if avoid a bug and dont use it...\n\n # meaning part\n if axis == 0:\n list_to_meanX.append(int(liste_acc_val[1]))\n if axis == 1:\n list_to_meanY.append(int(liste_acc_val[0]))\n\n else:\n continue\n\n if axis == 0:\n if len(\n list_to_meanX) == num_val_mean: # when a list reaches num-val_mean : meaning starts and produces x_m or y_m\n n_m = int(mean(list_to_meanX)) # n_m will be the meaned value\n list_to_meanX = [] # putting the xlist to 0\n if axis == 1:\n if len(\n list_to_meanY) == num_val_mean: # when a list reaches num-val_mean : meaning starts and produces x_m or y_m\n n_m = int(mean(list_to_meanY)) # n_m will be the meaned value\n list_to_meanY = [] # putting the xlist to 0\n\n lmin = int(\n 0) # resquale the values of lmin (=0), lmax = nmax-nmin and lact is the instantaneous position of axis sensor\n lmax_temp = nmax - nmin\n lmax = lmax_temp\n lact = n_m - nmin\n pclact = lact / lmax # a percentage _ = resquale from 0 to 1\n\n\n if xp < float(pclact * w_screen) and xp < w_screen-60: xp += nb_of_px # go right and down util x=w_screen\n if xp > float(pclact * w_screen) and xp > 5: xp -= nb_of_px # go_left and up until x=0\n\n screen.blit(backgr,(0,0))\n\n lanceur.move(2) # movement of top-launcher , in braquets : speed\n lanceur.afficher() # displaying of top-launcher\n\n for chaqueobjet in dico.values(): # look into the dictionnary the differents existing objects.\n chaqueobjet.move() # change the postition\n chaqueobjet.afficher() # print it\n chaqueobjet.collide() #check a collision\n\n if random() < 0.01: # random generation of objects\n nomtemp = str(\"object\" + str(objectnumber)) # each object will have a id\n dico[nomtemp] = ObjetsLances(lanceur.distancex, 50) # add the object in the dictionnary\n objectnumber = objectnumber + 1 # increase of one the next id\n\n\n texttoprint1 = police.render(str(score_count), True, pygame.Color(\"#000000\")) # about the text that will be printed\n screen.blit(basketp, (xp, h_screen * 0.9))\n screen.blit(texttoprint1, (w_screen * 0.9, 20)) # the prit of text (scoring)\n# pygame.draw.rect(screen, (0, 128, 255), pygame.Rect(xp, h_screen * 0.9, 60, 60)) # the MPU6050' controlled item\n\n pygame.display.flip() # screen refresh\n\n screen.fill((255, 255, 255)) # fullfillment of the screen with a color\n clock.tick(90) #speed of looping\n\nser.close() # close port","sub_path":"CatchyP.py","file_name":"CatchyP.py","file_ext":"py","file_size_in_byte":8501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"557943877","text":"import datetime\n\ndef printTimeStamp(name):\t\t\t\t\t\t\t\t\t\n print('Автор програми: ' + name)\t\t\t\t\t\t\n print('Час компіляції: ' + str(datetime.datetime.now()))\t\t\t\n\npath = input('Введіть номерний знак: ')\nold_letters = path[0:3] \nold_numbers = path[3:7] \nnew_numbers = path[0:4] \nnew_letters = path[4:7]\n\nold_x = old_letters.isupper()\nold_y = old_numbers.isdigit()\n\nnew_x = new_numbers.isdigit()\nnew_y = new_letters.isupper()\n\nif old_x == True and old_y == True:\n\tprint('Цей номерний знак - старий')\nelif new_x == True and new_y == True:\n\tprint('Цей номерний знак - новий')\nelse:\n\tprint('Неправельно введено номер, спробуйте по шаблону (ABC123 або 1234ABC)')\n\nprintTimeStamp(\"Alexey.\")\n","sub_path":"B9.py","file_name":"B9.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"614992622","text":"#!/usr/bin/env python\n\n\"\"\"\nTarget GRE - API Consumption\nnextbus.py\n~~~~~~~~~~~~~~~~\n\nGiven a bus route, direction, and stop name,\nthis script prints out the next time of departure\nusing Metro Transit's NexTrip API\n\n:copyright: (c) 2017 by Mackenzie Grimes\n\nUsage:\n> python3 nextbus.py {BUS_ROUTE} {BUS STOP NAME} {DIRECTION}\n\n\"\"\"\nimport sys\nimport os.path\nimport transitAPI as api\n\nclass NextBus:\n\tdef __init__(self, route = None, direction = None, stop = None, departures = 1):\n\t\tself.route = route\n\t\tself.direction = direction\n\t\tself.stop = stop\n\t\tself.departures = departures\n\n\tdef answerInquiry(self):\n\t\tanswer = None\n\t\t# If no route passed, find all routes\n\t\tif not self.route:\n\t\t\tanswer = self.getAllRoutes()\n\n\t\t# If no stop passed, then find all stops for this route and direction\n\t\telif not self.stop:\n\t\t\tanswer = self.getAllStops()\n\t\t\n\t\telse:\t\n\t\t\tanswer = self.getNextDeparture()\n\n\t\treturn answer\n\n\t\"\"\"\n\tclass NextBus stores all information for nextBus inquiry\n\tand makes API calls to get the next departure \n\tof the route of interest\n\n\t:param route: str of (partial) description of route\n\t:param direction: str of cardinal direction \n\t\t\t\t\t{north, south, east, west}\n\t:param stop:\n\t\"\"\"\n\tdef getNextDeparture(self):\n\t\treturn api.getNextDeparture(self.route, self.direction, self.stop, self.departures)\n\n\tdef getAllStops(self):\n\t\treturn api.getAllStops(self.route, self.direction)\n\n\tdef getAllRoutes(self):\n\t\treturn api.getAllRoutes()\n\ndef main(args):\n\n\t# Print usage if help requested\n\tif (\"-h\" in args) or (\"--help\" in args):\n\t\tsys.exit(\n\t\t\t\"Welcome to NextBus.\\n\" +\n\t\t\t\"Usage:\\tnextbus [DIRECTION] [BUS STOP NAME] [NUMBER OF UPCOMING DEPARTURES]\\n\")\n\n\taBus = None\n\tinquiryLabel = \"\"\n\n\t# Inquiry is all bus routes\n\tif len(args) == 1:\n\t\taBus = NextBus()\n\t\tinquiryLabel = \"Available routes\"\n\n\t# Can't pass only two arguments \n\telif len(args) == 2:\n\t\tbusRoute = args[1]\n\t\tsys.exit(\n\t\t\t\"Sorry, we need a cardinal direction (north/south, east/west)\\n\" +\n\t\t\t\"to show you stops for route \" + busRoute + \".\\n\")\n\n\t# Inquiry is only bus route and direction, so return all stops\t\t\n\telif len(args) == 3:\n\t\tbusRoute, direction = args[1:]\n\t\taBus = NextBus(route = str(busRoute), direction = str(direction))\n\t\tinquiryLabel = \"Available stops for route \" + busRoute + \" going \" + direction\n\n\t# Inquiry is next departure time\n\telif len(args) == 4:\n\t\tbusRoute, direction, stopName = args[1:4]\n\t\taBus = NextBus(route = str(busRoute), direction = str(direction), \n\t\t\tstop = str(stopName))\n\t\tinquiryLabel = \"Next departure(s) for \" + busRoute + \" at stop \" + stopName\n\n\t# Inquiry is multiple departure times\n\telse:\n\t\tbusRoute, direction, stopName, quantity = args[1:5]\n\t\taBus = NextBus(route = str(busRoute), direction = str(direction), \n\t\t\tstop = str(stopName), departures = int(quantity))\n\t\tinquiryLabel = \"Next departure(s) for \" + busRoute + \" at stop \" + stopName\n\n\t# Retrieve and print the answer\n\tinquiryResult = aBus.answerInquiry()\n\tprint(inquiryLabel, \":\\n\\n\", inquiryResult, sep = \"\")\n\nif __name__ == \"__main__\": \n\tmain(sys.argv)","sub_path":"nextbus2.0.py","file_name":"nextbus2.0.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"272702271","text":"from sys import exit\n\nn = int(input())\nzestawy = []\nif n < 1:\n\texit()\n\n\nmax_wartosc = 1\nfor i in range(0, n):\n\tzestawy.append(list(map(int, input().split())))\n\tif zestawy[i][1] > max_wartosc:\n\t\tmax_wartosc = zestawy[i][1]\n\n\nwyniki_zestawy = [0] * n\npierwsze = [True] * (max_wartosc + 1)\nfor i in range(2, max_wartosc):\n\tif pierwsze[i] == True:\n \n\t\tfor l in range(0, len(zestawy)):\n\t\t\tif i >= zestawy[l][0] and i <= zestawy[l][1]:\n\t\t\t\twyniki_zestawy[l] += 1\n\n\t\tj = 2\n\t\twhile i*j <= max_wartosc:\n\t\t\tpierwsze[i*j] = False\n\t\t\tj += 1\n \n\nfor i in wyniki_zestawy:\n\tprint(i)\n","sub_path":"pierwsze_zakres.py","file_name":"pierwsze_zakres.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"261321549","text":"from tkinter import Tk, TOP, BOTH, X, Y, N, W, E, S, N, LEFT, RIGHT, END, YES, NO, SUNKEN, ALL, VERTICAL, HORIZONTAL, BOTTOM, CENTER\nfrom tkinter import FIRST, LAST, ROUND, NONE, DISABLED\nfrom tkinter import Text, Canvas, Listbox, Scale, Checkbutton, Label, Entry, Scrollbar, Frame, Button, Spinbox\nfrom tkinter import BooleanVar, StringVar# , IntVar, DoubleVar\nfrom tkinter import Toplevel, TclError, filedialog, messagebox\nfrom utils.param_ops import less_kwargs, more_kwargs\n# from tkinter.ttk import Frame, Label, Entry, Button #- old fashion\nfrom collections import namedtuple\nfrom itertools import count\n\nCheckboxes = namedtuple('Checkboxes', 'ckb, var')\nEntries = namedtuple('Entries', 'lbl, etr, var, color')\nCheckEntries = namedtuple('CheckEntries', 'ckb, etr, bvar, svar, color')\n\ndef __checkbox(panel, text, value, callback, gui_kwargs):\n var = BooleanVar(panel)\n gui = Checkbutton(panel, text = text, variable = var, command = callback)\n var.set(value)\n gui.var = var # delete?\n return gui, var\n\ndef make_checkbox(panel, text, value, callback, gui_kwargs, control = 1):\n gui, var = __checkbox(panel, text, value, callback, gui_kwargs)\n if control > 0: # 0 line for raw combination\n gui.pack(side = TOP, anchor = W, **gui_kwargs) # sticky = W is special for gird\n return Checkboxes(gui, var)\n\ndef __entry(panel, value, callback, gui_kwargs):\n char_width = less_kwargs(gui_kwargs, 'char_width', None)\n prompt_str = less_kwargs(gui_kwargs, 'prompt_str', str(value) + \" \")\n var = StringVar(panel)\n common_args = dict(textvariable = var, width = char_width, justify = CENTER)\n if isinstance(value, tuple):\n value, start, end, inc = value\n gui = Spinbox(panel, from_ = start, to = end, increment = inc, **common_args)\n def spin_click(*event):\n callback(None) # ???\n # gui.bind('', lambda *e: callback(None))\n var.trace('w', spin_click)\n else:\n gui = Entry(panel, **common_args)\n var.set(value) # even no initial value?\n default_color = gui.cget('highlightbackground')\n gui.bind('', callback)\n # gui.var = var # delete ?\n def on_entry_click(event):\n if gui.get() == prompt_str:\n gui.delete(0, \"end\") # delete all the text in the entry\n gui.insert(0, '') # insert blank for user input\n gui.config(fg = 'black')\n def on_focusout(event):\n if gui.get() == '':\n gui.insert(0, prompt_str)\n gui.config(fg = 'grey')\n else:\n callback(None)\n gui.bind('', on_entry_click)\n gui.bind('', on_focusout)\n if value:\n on_entry_click(None)\n else:\n on_focusout(None)\n return gui, var, default_color\n\ndef make_entry(panel, text, value, callback, gui_kwargs, control = 1):\n pnl = Frame(panel)\n if isinstance(control, dict):\n gui_kwargs.update(control)\n control = 1\n elif less_kwargs(gui_kwargs, 'char_width', None) is None:\n gui_kwargs['char_width'] = 4 if control == 1 else 20\n gui, var, clr = __entry(pnl, value, callback, gui_kwargs)\n lbl = Label(pnl, text = text)\n pnl.pack(side = TOP, fill = BOTH, **gui_kwargs)\n if control == 1:\n lbl.pack(side = LEFT, anchor = W, fill = X, expand = YES)\n gui.pack(side = RIGHT, anchor = E)\n else: # 2 lines\n lbl.pack(side = TOP, anchor = W)\n gui.pack(side = TOP, anchor = E, expand = YES, fill = X)\n return Entries(lbl, gui, var, clr)\n\ndef make_checkbox_entry(panel, text, values, callbacks, gui_kwargs, control = 2):\n # e.g. (panel, 'curve', (True, 'x'), (func1, func2), {char_width:3, padx:...})\n ckb_value, etr_value = values\n ckb_callback, etr_callback = callbacks\n pnl = Frame(panel)\n pnl.pack(side = TOP, fill = X, anchor = W, **gui_kwargs)\n if less_kwargs(gui_kwargs, 'char_width', None) is None:\n gui_kwargs['char_width'] = 4 if control == 1 else 20\n etr, svar, clr = __entry(pnl, etr_value, etr_callback, gui_kwargs)\n ckb, bvar = __checkbox(pnl, 'Apply ' + text, ckb_value, ckb_callback, gui_kwargs)\n if control == 1:\n wht = Label(pnl)\n ckb.pack(side = LEFT, anchor = W)\n wht.pack(side = LEFT, fill = X, expand = YES)\n etr.pack(side = RIGHT, fill = X)\n else: # 2 lines\n ckb.pack(side = TOP, anchor = W)\n etr.pack(side = TOP, fill = X, anchor = E, expand = YES)\n return CheckEntries(ckb, etr, bvar, svar, clr)\n\ndef get_checkbox(ckbxes, ctype = 0):\n if ctype == 0:\n gen = (v.get() for _, v in ckbxes)\n else:\n gen = (v.get() for _, _, v, _, _ in ckbxes)\n return ckbxes.__class__(*gen)\n\ndef get_entry(entries, entry_dtypes, fallback_values, ctype = 0):\n gen = zip(entries, entry_dtypes, fallback_values)\n res = []\n if ctype == 0:\n for (l, g, v, c), d, f in gen:\n try:\n res.append(d(v.get()))\n g.config(highlightbackground = c)\n except Exception as e:\n print(l.cget('text'), e, 'use', f, 'instead')\n g.config(highlightbackground = 'pink')\n res.append(f)\n else:\n for (b, g, _, v, c), d, f in gen:\n try:\n t = d(v.get())\n if d is eval:\n t(0.5)\n res.append(t)\n g.config(highlightbackground = c)\n except Exception as e:\n print(b.cget('text'), e, 'use', f, 'instead')\n g.config(highlightbackground = 'pink')\n res.append(f)\n if entries.__class__ is tuple:\n return tuple(res)\n return entries.__class__(*res)\n\ndef make_namedtuple_gui(make_func, panel, values, callback, control = None, **gui_kwargs):\n if control is None:\n return values.__class__(\n *(make_func(panel, n.replace('_', ' ').title(), v, callback, gui_kwargs.copy()) for n, v in zip(values._fields, values))\n )\n widgets = []\n for n, v, c in zip(values._fields, values, control):\n w = make_func(panel, n.replace('_', ' ').title(), v, callback, gui_kwargs.copy(), c)\n widgets.append(w)\n return values.__class__(*widgets)\n\n # demo_func = less_kwargs(gui_kwargs, 'demo_func', 'lambda x:x')\n # entry.pack(side = TOP, anchor = W, **gui_kwargs)\n # def on_entry_click(event):\n # if entry.get() == demo_func:\n # entry.delete(0, \"end\") # delete all the text in the entry\n # entry.insert(0, 'x') # Insert blank for user input\n # entry.config(fg = 'black')\n\n # def on_focusout(event):\n # if entry.get().strip() in ('', 'x'):\n # entry.delete(0, \"end\")\n # entry.insert(0, demo_func)\n # entry.config(fg = 'grey')\n # entry.bind('', on_entry_click)\n # entry.bind('', on_focusout)\n # entry.config(fg = 'grey')\n\nimport numpy as np\ndef bezier_curve(canvas, center, length, top, bottom, func = lambda x: x, num_points = 5, **draw_kwargs):\n assert num_points > 2 and num_points % 2 # even\n t_point = np.asarray([center, top])\n b_point = np.asarray([center, bottom])\n c_point = np.asarray([center + length, 0.5 * (top + bottom)])\n \n coord = []\n for i in range(num_points):\n ratio = i / (num_points - 1)\n l_ratio = func(ratio)\n r_ratio = func(1 - ratio)\n l = t_point * l_ratio + c_point * (1 - l_ratio)\n r = b_point * r_ratio + c_point * (1 - r_ratio)\n point = l * ratio + r * (1 - ratio)\n coord.extend(point)\n if i << 1 == num_points - 1: # 5 == 2 * 2 + 1\n mid_point = point\n canvas.create_line(*coord, smooth = True, **draw_kwargs)# outline = '#f11', fill = '#1f1', width = 2)#, start=30)#, extent=120, style=tk.ARC, width=3)\n return mid_point\n\nimport platform\nOS = platform.system()\n\nclass _MousewheelSupport(object):\n\n # implemetation of singleton pattern\n _instance = None\n\n def __new__(cls, *args, **kwargs):\n if not cls._instance:\n cls._instance = object.__new__(cls)\n return cls._instance\n\n def __init__(self, root, horizontal_factor=2, vertical_factor=2):\n\n self._active_area = None\n\n if isinstance(horizontal_factor, int):\n self.horizontal_factor = horizontal_factor\n else:\n raise Exception(\"Vertical factor must be an integer.\")\n\n if isinstance(vertical_factor, int):\n self.vertical_factor = vertical_factor\n else:\n raise Exception(\"Horizontal factor must be an integer.\")\n\n if OS == \"Linux\":\n root.bind_all('<4>', self._on_mousewheel, add='+')\n root.bind_all('<5>', self._on_mousewheel, add='+')\n else:\n # Windows and MacOS\n root.bind_all(\"\", self._on_mousewheel, add='+')\n\n def _on_mousewheel(self, event):\n if self._active_area:\n self._active_area.onMouseWheel(event)\n\n def _mousewheel_bind(self, widget):\n self._active_area = widget\n\n def _mousewheel_unbind(self):\n self._active_area = None\n\n def add_support_to(self, widget=None, xscrollbar=None, yscrollbar=None, what=\"units\", horizontal_factor=None, vertical_factor=None):\n if xscrollbar is None and yscrollbar is None:\n return\n\n if xscrollbar is not None:\n horizontal_factor = horizontal_factor or self.horizontal_factor\n\n xscrollbar.onMouseWheel = self._make_mouse_wheel_handler(widget, 'x', self.horizontal_factor, what)\n xscrollbar.bind('', lambda event, scrollbar=xscrollbar: self._mousewheel_bind(scrollbar))\n xscrollbar.bind('', lambda event: self._mousewheel_unbind())\n\n if yscrollbar is not None:\n vertical_factor = vertical_factor or self.vertical_factor\n\n yscrollbar.onMouseWheel = self._make_mouse_wheel_handler(widget, 'y', self.vertical_factor, what)\n yscrollbar.bind('', lambda event, scrollbar=yscrollbar: self._mousewheel_bind(scrollbar))\n yscrollbar.bind('', lambda event: self._mousewheel_unbind())\n\n main_scrollbar = yscrollbar if yscrollbar is not None else xscrollbar\n\n if widget is not None:\n if isinstance(widget, list) or isinstance(widget, tuple):\n list_of_widgets = widget\n for widget in list_of_widgets:\n widget.bind('', lambda event: self._mousewheel_bind(widget))\n widget.bind('', lambda event: self._mousewheel_unbind())\n\n widget.onMouseWheel = main_scrollbar.onMouseWheel\n else:\n widget.bind('', lambda event: self._mousewheel_bind(widget))\n widget.bind('', lambda event: self._mousewheel_unbind())\n\n widget.onMouseWheel = main_scrollbar.onMouseWheel\n\n @staticmethod\n def _make_mouse_wheel_handler(widget, orient, factor=1, what=\"units\"):\n view_command = getattr(widget, orient + 'view')\n\n if OS == 'Linux':\n def onMouseWheel(event):\n if event.num == 4:\n view_command(\"scroll\", (-1) * factor, what)\n elif event.num == 5:\n view_command(\"scroll\", factor, what)\n\n elif OS == 'Windows':\n def onMouseWheel(event):\n view_command(\"scroll\", (-1) * int((event.delta / 120) * factor), what)\n\n elif OS == 'Darwin':\n def onMouseWheel(event):\n view_command(\"scroll\", event.delta, what)\n\n return onMouseWheel\n\n\nclass ScrollingArea(Frame, object):\n\n def __init__(self,\n master,\n width = None,\n height = None,\n anchor = N,\n scroll_vertically = True,\n background = None, show_scrollbar = False, inner_frame_cls = Frame, **kw):\n Frame.__init__(self, master, class_=\"Scrolling_Area\", background=background)\n # self.grid_columnconfigure(0, weight=1)\n # self.grid_rowconfigure(0, weight=1)\n\n self._width = width\n self._height = height\n\n self._canvas = Canvas(self, background = background, highlightthickness = 0, width = width, height = height)\n self._canvas.pack(side = LEFT)\n # self.canvas.grid(row=0, column=0, sticky=N + E + W + S)\n\n self._scrollbar = Scrollbar(self, orient = VERTICAL if scroll_vertically else HORIZONTAL)\n if show_scrollbar:\n self._scrollbar.pack()#row=0, column=1, sticky=N + S)\n\n self._canvas.configure(yscrollcommand = self._scrollbar.set)\n self._scrollbar['command'] = self._canvas.yview\n\n # self.rowconfigure(0, weight=1)\n # self.columnconfigure(0, weight=1)\n\n self._inner_frame = inner_frame_cls(self._canvas, **kw)\n self._inner_frame.pack(anchor = anchor)\n\n self._canvas.create_window(0, 0, window = self._inner_frame, anchor='nw', tags=\"inner_frame\")\n\n self._canvas.bind('', self._on_canvas_configure)\n\n if scroll_vertically:\n _MousewheelSupport(self).add_support_to(self._canvas, yscrollbar = self._scrollbar)\n else:\n _MousewheelSupport(self).add_support_to(self._canvas, xscrollbar = self._scrollbar)\n\n @property\n def width(self):\n return self._canvas.winfo_width()\n\n @width.setter\n def width(self, width):\n self._canvas.configure(width=width)\n\n @property\n def height(self):\n return self._canvas.winfo_height()\n\n @height.setter\n def height(self, height):\n self._canvas.configure(height=height)\n\n @property\n def inner_frame(self):\n return self._inner_frame\n\n def set_size(self, width, height):\n self._canvas.configure(width=width, height=height)\n\n def _on_canvas_configure(self, event):\n width = max(self._inner_frame.winfo_reqwidth(), event.width)\n height = max(self._inner_frame.winfo_reqheight(), event.height)\n\n self._canvas.configure(scrollregion=\"0 0 %s %s\" % (width, height))\n self._canvas.itemconfigure(\"inner_frame\", width=width, height=height)\n\n def update_viewport(self):\n self.update()\n\n window_width = self._inner_frame.winfo_reqwidth()\n window_height = self._inner_frame.winfo_reqheight()\n\n if self._width is None:\n canvas_width = window_width\n else:\n canvas_width = min(self._width, window_width)\n\n if self._height is None:\n canvas_height = window_height\n else:\n canvas_height = min(self._height, window_height)\n\n self._canvas.configure(scrollregion=\"0 0 %s %s\" % (window_width, window_height), width=canvas_width, height=canvas_height)\n self._canvas.itemconfigure(\"inner_frame\", width=window_width, height=window_height)","sub_path":"utils/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":14894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"249725115","text":"\"\"\"Adding Box, Tray, Rack, Chamber, Freezer, Laboratory Models\n\nRevision ID: 60cdff3a515f\nRevises: 1aa169d60117\nCreate Date: 2019-11-13 16:39:16.227917\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '60cdff3a515f'\ndown_revision = '1aa169d60117'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('chamber',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('chamber_type', sa.String(length=50), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('laboratory',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('lab_name', sa.String(length=65), nullable=False),\n sa.Column('room_number', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('freezer',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('laboratory_id', sa.Integer(), nullable=True),\n sa.Column('freezer_number', sa.Integer(), nullable=False),\n sa.Column('room_located', sa.String(length=65), nullable=False),\n sa.ForeignKeyConstraint(['laboratory_id'], ['laboratory.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('rack',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('chamber_id', sa.Integer(), nullable=True),\n sa.Column('rack_number', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['chamber_id'], ['chamber.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('tray',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('rack_id', sa.Integer(), nullable=True),\n sa.Column('tray_number', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['rack_id'], ['rack.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('box',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('tray_id', sa.Integer(), nullable=True),\n sa.Column('box_label', sa.String(length=65), nullable=False),\n sa.ForeignKeyConstraint(['tray_id'], ['tray.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.add_column(u'sample', sa.Column('box_id', sa.Integer(), nullable=True))\n op.add_column(u'sample', sa.Column('retention_period', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'sample', 'box', ['box_id'], ['id'])\n op.drop_column(u'sample', 'retension_period')\n # op.add_column(u'users', sa.Column('first_name', sa.String(length=65), nullable=False))\n # op.add_column(u'users', sa.Column('last_name', sa.String(length=65), nullable=False))\n # op.alter_column(u'users', sa.Column('password', sa.String(length=128), nullable=False))\n # op.alter_column(u'users', 'email',\n # existing_type=sa.VARCHAR(length=65),\n # nullable=False)\n # op.alter_column(u'users', 'role_id',\n # existing_type=sa.INTEGER(),\n # nullable=False)\n # op.drop_column(u'users', 'lastname')\n # op.drop_column(u'users', 'firstname')\n # op.drop_column(u'users', 'password_hash')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column(u'users', sa.Column('password_hash', sa.VARCHAR(length=128), autoincrement=False, nullable=True))\n op.add_column(u'users', sa.Column('firstname', sa.VARCHAR(length=65), autoincrement=False, nullable=False))\n op.add_column(u'users', sa.Column('lastname', sa.VARCHAR(length=65), autoincrement=False, nullable=False))\n op.alter_column(u'users', 'role_id',\n existing_type=sa.INTEGER(),\n nullable=True)\n op.alter_column(u'users', 'email',\n existing_type=sa.VARCHAR(length=65),\n nullable=True)\n op.drop_column(u'users', 'password')\n op.drop_column(u'users', 'last_name')\n op.drop_column(u'users', 'first_name')\n op.add_column(u'sample', sa.Column('retension_period', sa.INTEGER(), autoincrement=False, nullable=True))\n op.drop_constraint(None, 'sample', type_='foreignkey')\n op.drop_column(u'sample', 'retention_period')\n op.drop_column(u'sample', 'box_id')\n op.drop_table('box')\n op.drop_table('tray')\n op.drop_table('rack')\n op.drop_table('freezer')\n op.drop_table('laboratory')\n op.drop_table('chamber')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/60cdff3a515f_adding_box_tray_rack_chamber_freezer_.py","file_name":"60cdff3a515f_adding_box_tray_rack_chamber_freezer_.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"475199424","text":"#!/opt/local/bin/python\n# python 3.6\n# 1. Calculate the hbond between waters and every residues.\n# 2. Two outfile is the histogram of hbond(water - every residues) and hbond(water-type of residues)\n# 3. For multipule chains, use sep.py to separate residues into groups.\n# python + py_file + tpr_file + csv_file + outfile1 + outfile2 + residue number + chain number + elements\n# python xxx.py xxx.tpr xxx.csv 1.dat 2.dat 20 30 OZ OE2 OE1 O NZ N HZ3 HZ2 HZ1 HZ HNT HN\n\nimport numpy, pandas, MDAnalysis, collections, sys, os\n\n#nhbond_max=20\nnhbond_max=100 # The maxmun of hydrogen bonds, to generate histograms (maxmun of the histogram x axial).\n\n## set number of residues, chains, etc\n## do this first because I always forgot and\n## this will make it fail sooner\nnResChain=int(sys.argv[5])\nnChain=int(sys.argv[6])\nnRes=nChain*nResChain\n\n## read in configuration (needed pnly for getting indices of acceptor atoms)\n## and pandas data frame\nu=MDAnalysis.Universe(sys.argv[1])\ndata_frame=pandas.read_csv(sys.argv[2])\n\n## extract ids for acceptors and create look up dictionary\nprint (\"extracting acceptor/donor ids\")\nacc_sel='name'\nacc_names=sys.argv[7:]\nfor acc in acc_names:\n acc_sel=acc_sel+' '+acc\nacceptors=u.select_atoms(acc_sel)\nnacc=len(acceptors)\n\nresnames=acceptors.residues.resnames\n\nhistogram_dict={}\nfor resname in set(acceptors.residues.resnames):\n print (resname)\n histogram_dict[resname]=numpy.zeros((nhbond_max+1))\n\nacc_id=[]\nid_to_res={}\nfor acc in acceptors:\n acc_id.append(acc.index)\n id_to_res[acc]=acc.resid\n\n\n## get unique timesteps\ntimesteps=data_frame.time.unique()\n\n## initialise arrays\nnhbond_ave=numpy.zeros((nRes))\nnhbond_sqr=numpy.zeros((nRes))\nnsets=0\n\n## extract columns from data frame\n## and organise into dictionary\nprint (\"extracting data\")\nt=data_frame['time']\nacc=data_frame['acceptor_index']\ndcc=data_frame['donor_index']\naname=data_frame['acceptor_atom']\ndname=data_frame['donor_atom']\nacc_dict={}\n\n#print (t)\n#os.system(\"pause\")\n#print (acc)\n#os.system(\"pause\")\n#print (dcc)\n#os.system(\"pause\")\n#print (aname)\n#os.system(\"pause\")\n#print (dname)\n#os.system(\"pause\")\n\nfor tt,aa,dd,an,dn in zip(t,acc,dcc,aname,dname):\n\n# if acc_dict.has_key(tt):\n if tt in acc_dict:\n if an in acc_names:\n acc_dict[tt].append(aa)\n if dn in acc_names:\n acc_dict[tt].append(dd)\n else:\n if an in acc_names:\n acc_dict[tt]=[aa]\n# acc_dict[tt]=aa\n if dn in acc_names:\n acc_dict[tt]=[dd]\n# acc_dict[tt]=dd\n\n#print (acc_dict)\n#os.system(\"pause\")\n## loop over timesteps\nprint (\"looping over timesteps\")\nfor ts in timesteps:\n\n print (ts)\n ## tally up number of times each acceptor\n ## appears\n d=collections.Counter(acc_dict[ts])\n nhbond_step=numpy.zeros((nRes))\n \n# print (d)\n# os.system(\"pause\")\n# nhbond_step=numpy.zeros(d)\n\n\n ## assign H-bonds to residues\n for acc in acceptors:\n\n# if d.has_key(acc.index):\n if acc.index in d:\n ires=id_to_res[acc]\n nhbond_step[ires-1] += d[acc.index]\n\n ## update averages\n nhbond_ave+=nhbond_step\n nhbond_sqr+=nhbond_step**2\n nsets+=1.0\n\n for ires in range(nRes):\n ibin=int(nhbond_step[ires])\n histogram_dict[resnames[ires]][ibin]+=1.0\n\n## get final averages\nnhbond_ave/=nsets\nnhbond_sqr/=nsets\nnhbond_std=numpy.sqrt(nhbond_sqr-nhbond_ave**2)\n\nouf=open(sys.argv[3],'w')\nfor ires in range(nRes):\n print (ires+1,nhbond_ave[ires],nhbond_std[ires],file=ouf)\n\n\nouf=open(sys.argv[4],'w')\nprint ('##',end=' ',file=ouf)\nk=list(histogram_dict.keys())\nk.sort()\nfor kk in k:\n print (kk,end=' ',file=ouf)\nprint ('',file=ouf)\n#print >> ouf\nfor ihbond in range(nhbond_max+1):\n print (ihbond,end=' ',file=ouf)\n for kk in k:\n print (histogram_dict[kk][ihbond],end=' ',file=ouf)\n print (\" \",file=ouf)\n","sub_path":"attachment/gmx/hbond/get_hbond_cnt.py","file_name":"get_hbond_cnt.py","file_ext":"py","file_size_in_byte":3866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"388796624","text":"# -*- encoding: utf-8 -*-\n# @time: 2018/10/10 11:14\n\n\nclass Solution:\n def is_palindrome(self, x):\n \"\"\"\n :Time: 408ms\n :param x: int\n :return: boolean\n \"\"\"\n return str(x)[0:len(str(x))//2][::-1] == str(x)[(len(str(x))+1)//2:]\n\n\nif __name__ == '__main__':\n res = Solution()\n print(res.is_palindrome(666))\n","sub_path":"9_palindrime_number.py","file_name":"9_palindrime_number.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"435501429","text":"import socket\nfrom threading import Thread\n\nslave_servers = (\n\t{ 'ip':('172.17.0.2',8000), 'name':'server1', 'status':'avaible'},\n\t{ 'ip':('172.17.0.3',8000), 'name':'server2', 'status':'avaible'},\n)\ncounter = 0\n\ndef create_socket():\n\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\tslave_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\tsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\tsock.bind(('', 80))\n\treturn sock\n\ndef handle_client(client_sock): # handle request of loadbalancer\n\trequest = client_sock.recv(1024)\n\tresponse = forward_to_slave_server(request)\n\tclient_sock.send(response)\n\tclient_sock.close()\n\ndef forward_to_slave_server(request): # get request of loadbalancer and forward to a server\n\tglobal counter\n\tslave_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\tslave_server = slave_servers[counter]\n\twhile(slave_server['status'] != 'avaible'):\n\t\tcounter += 1\n\t\tif(counter >= len(slave_servers)):\n\t\t\tcounter = 0\n\t\t\treturn \"error!\"\n\t\tslave_server = slave_servers[counter]\n\tslave_sock.connect(slave_server['ip'])\n\tslave_sock.send(request)\n\tresponse = ''\n\tresult = slave_sock.recv(1024)\n\tresponse += result\n\twhile( len(result) > 0 ):\n\t\tresult = slave_sock.recv(1024)\n\t\tresponse += result\n\n\tif(counter + 1 >= len(slave_servers)): # if out of range of slave servers set 0nth server\n\t\tcounter = 0\n\telse:\n\t\tcounter += 1\n\n\treturn response\n\t\nif __name__ == '__main__':\n\tsock = create_socket()\n\tsock.listen(5)\n\n\twhile(True):\n\t\tclient_sock, client_address = sock.accept()\n\t\tth = Thread(target=handle_client, args=(client_sock,))\n\t\tth.daemon = True\n\t\tth.start()\n\n\n\n\n\n\n","sub_path":"lb.py","file_name":"lb.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"75794254","text":"from django.shortcuts import render, redirect, HttpResponse\nfrom .models import Course, Description, Comment\n# Create your views here.\n\ndef index(request):\n context = {\n 'courses':Course.objects.all()\n }\n return render(request, 'courses/index.html', context)\n\ndef addCourse(request):\n if request.method == \"POST\":\n course = Course.objects.create(name=request.POST['name'])\n Description.objects.create(course=course, description=request.POST['description'])\n return redirect('/')\n\ndef deleteCourse(request, id):\n if request.method == 'GET':\n context = {\n 'course':Course.objects.get(id=id)\n }\n return render(request, 'courses/delete.html', context)\n\n elif request.method == 'POST':\n delete = Course.objects.get(id=id).delete()\n return redirect('/')\n\ndef comment(request, id):\n if request.method == 'GET':\n context = {\n 'course':Course.objects.get(id=id)\n }\n return render(request, 'courses/comment.html', context)\n\n elif request.method == 'POST':\n course = Course.objects.get(id=id)\n Comment.objects.create(comment=request.POST['comment'], course=course)\n return redirect('/comment/{}'.format(id))\n","sub_path":"PC/08_courses/main/apps/courses/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"449307536","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n__author__ = 'Martin Pihrt'\n# this plugins send GET data to remote web server\n\nimport json\nimport time\nimport os\nimport os.path\nimport traceback\nimport urllib2 \nimport re\nfrom threading import Thread, Event\n\nimport web\nfrom ospy.webpages import ProtectedPage\nfrom plugins import PluginOptions, plugin_url\nfrom ospy.options import options\nfrom ospy.stations import stations\nfrom ospy.inputs import inputs\nfrom ospy.log import log, EVENT_FILE\nfrom ospy.helpers import datetime_string, get_input\n\nimport i18n\n\n\nNAME = 'Remote Notifications'\nLINK = 'settings_page'\n\nremote_options = PluginOptions(\n NAME,\n {\n 'use': False,\n 'rem_adr': \"your web server\",\n 'api': \"123456789\"\n }\n)\n\n\n################################################################################\n# Main function loop: #\n################################################################################\nclass RemoteSender(Thread):\n def __init__(self):\n Thread.__init__(self)\n self.daemon = True\n self._stop = Event()\n\n self._sleep_time = 0\n self.start()\n\n def stop(self):\n self._stop.set()\n\n def update(self):\n self._sleep_time = 0\n\n def _sleep(self, secs):\n self._sleep_time = secs\n while self._sleep_time > 0 and not self._stop.is_set():\n time.sleep(1)\n self._sleep_time -= 1\n\n def try_send(self, text):\n log.clear(NAME)\n try:\n send_data(text) # send get data\n log.info(NAME, _('Remote was sent') + ':\\n' + text)\n except Exception:\n log.error(NAME, _('Remote was not sent') + '!\\n' + traceback.format_exc())\n\n def run(self):\n send_msg = False # send get data if change (rain, end program ....\n last_rain = False \n en_rain = True\n en_line = True\n en_line2 = True\n \n # ex: tank=100&rain=1&humi=55&line=1&lastrun=15.4.2016&station=kurnik&duration=5min 3sec&api=a1b2v5f4 \n rain = 0 # rain status for rain=0 or rain=1 in get data\n lastrun = \"\" # date and time for lastrun=xxxxx in get data\n tank = \"\" # actual %0-100 in water tank for tank=xx in get data\n duration = \"\" # duration in last program for duration=xx:yy in get data\n station = \"\" # name end station for station=abcde in get data\n humi = \"\" # humidity in station for humi=xx in get data\n line = \"\" # actual state from UPS plugin for line=0 or line=1 in get data\n\n finished_count = len([run for run in log.finished_runs() if not run['blocked']]) \n\n while not self._stop.is_set():\n try:\n \n # Send data if rain detected, power line state a new finished run is found\n if remote_options[\"use\"]: \n ### water tank level ###\n try:\n from plugins import tank_humi_monitor\n tank = tank_humi_monitor.get_tank()\n if tank < 0: # -1 is error I2C device for ping not found in tank_humi_monitor\n tank = \"\"\n except Exception:\n tank = \"\"\n\n ### power line state ###\n try:\n from plugins import ups_adj\n lin = ups_adj.get_check_power() # read state power line from plugin\n if lin==1: \n if en_line: # send only if change \n send_msg = True \n en_line = False\n en_line2 = True \n line = 0 # no power on web\n \n if lin==0:\n if en_line2: # send only if change\n send_msg = True\n en_line2 = False\n en_line = True\n line = 1 # power ok on web\n \n except Exception:\n line = \"\"\n\n ### rain state ###\n if inputs.rain_sensed() and not last_rain:\n send_msg = True\n last_rain = inputs.rain_sensed()\n if inputs.rain_sensed():\n rain = 1\n en_rain = True\n else:\n rain = 0 \n if en_rain: # send data if no rain (only if change rain/norain...)\n send_msg = True\n en_rain = False\n\n if not options.rain_sensor_enabled: # if rain sensor not used\n rain = \"\"\n \n ### program and station ###\n finished = [run for run in log.finished_runs() if not run['blocked']] \n if len(finished) > finished_count:\n las = datetime_string()\n lastrun = re.sub(\" \", \"_\", las) # eliminate gap in the title to _\n send_msg = True\n ### humidity in station ###\n try:\n from plugins import tank_humi_monitor\n humi = int(tank_humi_monitor.get_humidity((stations.get(run['station']).index)+1)) # 0-7 to 1-8 humidity \n if humi < 0:\n humi = \"\" \n \n except Exception:\n humi = \"\"\n \n for run in finished[finished_count:]:\n dur = (run['end'] - run['start']).total_seconds()\n minutes, seconds = divmod(dur, 60)\n sta = \"%s\" % stations.get(run['station']).name \n station = re.sub(\" \", \"_\", sta) # eliminate gap in the title to _\n duration = \"%02d:%02d\" % (minutes, seconds) \n\n finished_count = len(finished)\n\n if (send_msg): # if enabled send data\n body = ('tank=' + str(tank))\n body += ('&rain=' + str(rain))\n body += ('&humi=' + str(humi))\n body += ('&line=' + str(line))\n body += ('&lastrun=' + str(lastrun)) \n body += ('&station=' + str(station))\n body += ('&duration=' + str(duration))\n body += ('&api=' + remote_options['api']) # API password\n self.try_send(body) # Send GET data to remote server \n send_msg = False # Disable send data \n \n self._sleep(2)\n\n except Exception:\n log.error(NAME, _('Remote plug-in') + ':\\n' + traceback.format_exc())\n self._sleep(60)\n\n\nremote_sender = None\n\n\n################################################################################\n# Helper functions: #\n################################################################################\ndef start():\n global remote_sender\n if remote_sender is None:\n remote_sender = RemoteSender()\n\n\ndef stop():\n global remote_sender\n if remote_sender is not None:\n remote_sender.stop()\n remote_sender.join()\n remote_sender = None\n\n\ndef send_data(text):\n \"\"\"Send GET data\"\"\"\n if remote_options['use'] != '' and remote_options['api'] != '' and remote_options['rem_adr'] != '':\n req = urllib2.Request(url=remote_options['rem_adr']+'save.php/?' + text)\n req.add_header('Referer', 'OSPy sprinkler') \n f = urllib2.urlopen(req)\n log.info(NAME, _('Remote server reply') + ':\\n' + f.read())\n else:\n raise Exception(_('Remote plug-in is not properly configured') + '!')\n\n\n################################################################################\n# Web pages: #\n################################################################################\nclass settings_page(ProtectedPage):\n \"\"\"Load an html page for entering remote adjustments.\"\"\"\n\n def GET(self):\n return self.plugin_render.remote_notifications(remote_options, log.events(NAME))\n\n def POST(self):\n remote_options.web_update(web.input())\n qdict = web.input()\n test = get_input(qdict, 'test', False, lambda x: True)\n\n if remote_sender is not None:\n remote_sender.update()\n\n if test:\n body = ('tank=')\n body += ('&rain=')\n body += ('&humi=')\n body += ('&line=')\n body += ('&lastrun=')\n body += ('&station=')\n body += ('&duration=')\n body += ('&program=')\n body += ('&api=' + remote_options['api']) # API password\n remote_sender.try_send(body)\n\n raise web.seeother(plugin_url(settings_page), True)\n\n\nclass settings_json(ProtectedPage):\n \"\"\"Returns plugin settings in JSON format.\"\"\"\n\n def GET(self):\n web.header('Access-Control-Allow-Origin', '*')\n web.header('Content-Type', 'application/json')\n return json.dumps(remote_options)\n","sub_path":"plugins/remote_notifications/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"410275415","text":"import psycopg2\r\nimport pandas as pd\r\nimport numpy as np\r\nimport pymssql\r\nimport sys\r\nfrom pathlib import Path\r\n\r\n## Append the the path to the custom modules i.e. CernDBConnector\r\nsys.path.append(\"C://Users/nb044705/OneDrive - Cerner Corporation/DEVELOPMENT/github\")\r\n\r\n## Set variable to location of database.ini file\r\n## https://vault.cerner.com/credential/read?credID=680228\r\nCernDBConnector_INI = (\"C:/Users/nb044705/OneDrive - Cerner Corporation/development/credentials/database.ini\")\r\nfrom CernDBConnector import config\r\n\r\nSQLPath = (\"C:/Users/NB044705/OneDrive - Cerner Corporation/development/github/VMData/\")\r\n\r\n\r\n## MSSQL DB Connection\r\n## Obtain the db connection parameters and pass to MS SQL\r\n## module returning a connection to the database\r\ndef connectMSSQL(db,CernDBConnector_INI):\r\n params = config.config(db,CernDBConnector_INI)\r\n conn = pymssql.connect(**params)\r\n return(conn)\r\n\r\n\r\n## Pass in the database to connect to and the sql via a file handle\r\n## return the queried data as a pandas dataframe object\r\ndef getMSDBData(db,sql,):\r\n conn = None\r\n try:\r\n\r\n ## Open database connection\r\n conn = connectMSSQL(db,CernDBConnector_INI)\r\n\r\n ## Open and read the file as a single buffer\r\n sqlFile = open(SQLPath + 'SQL/' + sql,'r')\r\n\r\n df = pd.read_sql_query(sqlFile.read(),conn)\r\n\r\n ## close db conn and sql file\r\n sqlFile.close()\r\n #cur.close()\r\n return(df)\r\n\r\n except (Exception, pymssql.DatabaseError) as error:\r\n print(error)\r\n finally:\r\n if conn is not None:\r\n conn.close()\r\n print('Database connection closed.')\r\n\r\n## PostgreSQL DB Connection\r\n## Obtain the db connection parameters and pass to Postgres\r\n## module returning a connection to the database\r\ndef connectPGSQL(db,CernDBConnector_INI):\r\n params = config.config(db,CernDBConnector_INI)\r\n conn = psycopg2.connect(**params)\r\n return(conn)\r\n\r\n\r\n## Pass in the database to connect to and the sql via a file handle\r\n## return the queried data as a pandas dataframe object\r\ndef getDBData(db,sql):\r\n conn = None\r\n try:\r\n\r\n ## Open database connection\r\n conn = connectPGSQL(db,CernDBConnector_INI)\r\n\r\n ## Open and read the file as a single buffer\r\n sqlFile = open(SQLPath + 'SQL/' + sql,'r')\r\n\r\n df = pd.read_sql_query(sqlFile.read(),conn)\r\n\r\n ## close db conn and sql file\r\n sqlFile.close()\r\n #cur.close()\r\n return(df)\r\n\r\n except (Exception, psycopg2.DatabaseError) as error:\r\n print(error)\r\n finally:\r\n if conn is not None:\r\n conn.close()\r\n print('Database connection closed.')\r\n\r\n\r\n#######################################################\r\n## VM Summary for Servers in the ESXA and ESXB Pools ##\r\n#######################################################\r\n\r\n## get server os data from Remedy\r\nrem_data = getDBData('cmis','rem_data.sql')\r\nrem_data['name'] = rem_data['name'].str.lower()\r\n\r\n## split fq name to name and domain\r\nnew = rem_data[\"name\"].str.split(\".\", n = 1, expand = True)\r\n ## Making separate name column from new data frame \r\nrem_data[\"vmname\"]= new[0]\r\n ## Making separate domain column from new data frame \r\nrem_data[\"domain\"]= new[1]\r\n\r\n## get vm data from vcenter\r\nvm_data = getMSDBData('vcenter','vm_data.sql')\r\nvm_data['location'] = vm_data['ESXiHost'].map(lambda x: x[0:3])\r\nvm_data['type'] = vm_data['ESXiHost'].map(lambda x: x[3:7])\r\nvm_data.rename(columns = {'VMName' : 'vmname'}, inplace = True)\r\nvm_data['vmname'] = vm_data['vmname'].str.lower()\r\nvm_data.drop_duplicates(subset =\"id\",keep = False, inplace = True) \r\n\r\n## add sql clusters\r\nsqlvm_data = getMSDBData('vcenter','vm_data.sql')\r\nsqlvm_data['location'] = vm_data['ESXiHost'].map(lambda x: x[0:3])\r\nsqlvm_data['type'] = vm_data['ESXiHost'].map(lambda x: x[3:9])\r\nsqlvm_data.rename(columns = {'VMName' : 'vmname'}, inplace = True)\r\nsqlvm_data['vmname'] = vm_data['vmname'].str.lower()\r\nsqlvm_data.drop_duplicates(subset =\"id\",keep = False, inplace = True)\r\nsqlvm_data = pd.merge(sqlvm_data,rem_data, on='vmname',how='left')\r\n\r\n## merge vcenter data and remedy data\r\nvm_data = pd.merge(vm_data,rem_data, on='vmname',how='left')\r\n\r\n## fill in missing os from vc with remedy data\r\nvm_data.os.fillna(vm_data.rem_os, inplace=True)\r\n\r\n## fill blank os with other\r\nvm_data.os.fillna('Other', inplace=True)\r\n\r\n## prod mt servers\r\nesxa = vm_data['type']=='esxa'\r\nesxa = vm_data[esxa]\r\nesxa = esxa.reset_index(drop=True)\r\nprod = esxa.groupby(['location','type','os'])['VMMemoryGB'].sum().reset_index()\r\n\r\n## non-prod mt servers\r\nesxb = vm_data['type']=='esxb'\r\nesxb = vm_data[esxb]\r\nesxb = esxb.reset_index(drop=True)\r\nnonprod = esxb.groupby(['location','type','os'])['VMMemoryGB'].sum().reset_index()\r\n\r\n## sql mt servers\r\nesxsql = sqlvm_data['type']=='esxsql'\r\nesxsql = sqlvm_data[esxsql]\r\nesxsql = esxsql.reset_index(drop=True)\r\nsql = esxsql.groupby(['location','type','os'])['VMMemoryGB'].sum().reset_index()\r\n\r\n## vm data from the esxa, esxb, and esxsql pools\r\nvm_data = [esxa,esxb,esxsql]\r\nvm_data = pd.concat(vm_data)\r\nvm_data = vm_data.reset_index(drop=True)\r\n\r\n## merge the prod, nonprod, and sql df's\r\nfull_data = [prod,nonprod,sql]\r\nfull_data = pd.concat(full_data)\r\nsum_data = full_data.reset_index(drop=True)\r\n\r\n## get esx quantity\r\nsum_data['esx_qty'] = sum_data['VMMemoryGB']/690\r\n\r\nsum_data = sum_data.round()\r\n\r\n\r\n##########################################################\r\n## Host and Cluster Summary for the ESXA and ESXB Pools ##\r\n##########################################################\r\n\r\n## get host data\r\nhost_data = getMSDBData('vcenter','host_data.sql')\r\nsqlhost_data = getMSDBData('vcenter','host_data.sql')\r\n\r\n## Extract region, dc, and pool type from the hostname\r\nhost_data['region'] = host_data['HostName'].map(lambda x: x[0:2])\r\nhost_data['location'] = host_data['HostName'].map(lambda x: x[0:3])\r\nhost_data['type'] = host_data['HostName'].map(lambda x: x[3:7])\r\n\r\nsqlhost_data['region'] = host_data['HostName'].map(lambda x: x[0:2])\r\nsqlhost_data['location'] = host_data['HostName'].map(lambda x: x[0:3])\r\nsqlhost_data['type'] = host_data['HostName'].map(lambda x: x[3:9])\r\n\r\n## Change region from KC and LS to US\r\nhost_data['region']= host_data['region'].replace('kc', 'us') \r\nhost_data['region']= host_data['region'].replace('ls', 'us')\r\n\r\nsqlhost_data['region']= host_data['region'].replace('kc', 'us') \r\nsqlhost_data['region']= host_data['region'].replace('ls', 'us')\r\n\r\n## Use the US region to filter out global hosts\r\nushost_data = host_data['region']=='us'\r\nhost_data = host_data[ushost_data]\r\nhost_data = host_data.reset_index(drop=True) \r\n\r\nsqlushost_data = sqlhost_data['region']=='us'\r\nsqlhost_data = sqlhost_data[ushost_data]\r\nsqlhost_data = sqlhost_data.reset_index(drop=True) \r\n\r\n## esxa hosts\r\nesxa_cluster = host_data['type']=='esxa'\r\nhesxa = host_data[esxa_cluster]\r\nhesxa = hesxa.reset_index(drop=True)\r\n\r\n## esxb hosts\r\nesxb_cluster = host_data['type']=='esxb'\r\nhesxb = host_data[esxb_cluster]\r\nhesxb = hesxb.reset_index(drop=True)\r\n\r\n## esxsql hosts\r\nesxsql_cluster = sqlhost_data['type']=='esxsql'\r\nhesxsql = sqlhost_data[esxb_cluster]\r\nhesxsql = hesxsql.reset_index(drop=True)\r\n\r\n## host data\r\nhost_data = [hesxa,hesxb,hesxsql]\r\nhost_data = pd.concat(host_data, sort=False)\r\nhost_data = host_data.reset_index(drop=True)\r\n\r\n## host memory total\r\nmemory = host_data.groupby(['location','Model','CPUModel'])['MemorySize'].sum().reset_index()\r\n\r\n## cluster summary\r\ncluster_summary = host_data.groupby(['location','Cluster','Model','CPUModel'])['HostName'].count().reset_index()\r\ncluster_summary.rename(columns = {'HostName' : 'count'}, inplace = True)\r\ncluster_summary['count'].apply(str)\r\ncluster_summary['count'] = cluster_summary['count'].apply(str)\r\ncluster_summary['cluster_count'] = cluster_summary['Cluster'] + \" \" + cluster_summary['count']\r\ncluster_summary = cluster_summary.groupby(['location','Model','CPUModel'])['cluster_count'].apply(' | '.join).reset_index()\r\n\r\n## dc host summary\r\nhost_summary = host_data.groupby(['location','Model','CPUModel'])['HostName'].count().reset_index()\r\nhost_summary.rename(columns = {'HostName' : 'count'}, inplace = True)\r\nhost_summary = pd.merge(host_summary, cluster_summary, on=['location','Model','CPUModel'], how='left')\r\nhost_summary = pd.merge(host_summary, memory, on=['location','Model','CPUModel'], how='left')\r\nhost_summary['usable_memory'] = host_summary['MemorySize']*.9\r\nhost_summary.rename(columns = {'MemorySize' : 'installed_memory'}, inplace = True)\r\nhost_summary.rename(columns = {'Model' : 'model'}, inplace = True)\r\nhost_summary.rename(columns = {'CPUModel' : 'cpu_model'}, inplace = True)\r\n\r\n\r\n## write df's to csv\r\nvm_data.to_csv(r'C:/Users/nb044705/Cerner Corporation/SSE IPA Capacity Management - Reference Documents/misc/full_vm_data.csv',index=False)\r\nsum_data.to_csv(r'C:/Users/nb044705/Cerner Corporation/SSE IPA Capacity Management - Reference Documents/misc/vm_data.csv',index=False)\r\nhost_summary.to_csv(r'C:/Users/nb044705/Cerner Corporation/SSE IPA Capacity Management - Reference Documents/misc/host_summary.csv',index=False)\r\n\r\nprint(hesxsql)\r\n\r\n#sum_data.head()\r\n\r\n","sub_path":"vm_capacity.py","file_name":"vm_capacity.py","file_ext":"py","file_size_in_byte":9197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"263376262","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 26 00:08:09 2018\n\n@author: Mauro\n\"\"\"\n\nimport bfh\nimport io\nimport os\n\nimport Logging\n\n#==============================================================================\n# Logging\n#==============================================================================\n\nlog = Logging.get_logger(__name__, \"WARNING\")\n\n#==============================================================================\n# Banned Items class\n#==============================================================================\nclass BannedItems:\n \n \n def __init__(self, file, item_type):\n self.file = file\n \n if not os.path.isfile(self.file): \n with open(self.file, \"wb\") as f:\n bf = bfh.BinaryFile(f)\n bf.write(\"I\", 0) \n \n self.ids = []\n self.item_type = item_type\n \n self.loadFile()\n \n\n \n def loadFile(self):\n \n with open(self.file, \"rb\") as f:\n bf = bfh.BinaryFile(f)\n \n nitems = bf.read('I')\n \n for i in range(nitems):\n if self.item_type == \"string\":\n item = bf.read_string()\n elif self.item_type == \"hash_id\":\n item = bf.read_256hash()\n else:\n item = bf.read(self.item_type)\n self.ids.append(item)\n \n log.debug(\"read items:\")\n for item in self.ids:\n log.debug(\"{}\".format(item))\n \n def addItem(self, item):\n \n if item in self.ids:\n raise Exception(\"Banned items duplicate id\")\n \n self.ids.append(item)\n \n with open(self.file, \"wb\") as f:\n # change the size\n co = 0\n bf = bfh.BinaryFile(f, co)\n bf.write('I', len(self.ids))\n \n # adjust offset\n co = bf.file.seek(0, io.SEEK_END)\n bf.co = co\n \n # write data\n if self.item_type == \"string\":\n bf.write_string(item)\n elif self.item_type == \"hash_id\":\n bf.write_256hash(item)\n else:\n bf.write(self.item_type, item)\n \n \n","sub_path":"src/BannedItems.py","file_name":"BannedItems.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"354189616","text":"import igraph as ig\ndef simple_1():\n vertices=['A','B','C','D','E','F','G','H','I','J']#顶点\n edges=[(0,1),(1,2),(2,3),(3,4),(4,5),(5,6),(6,7),(7,1),(1,8),(8,2),(2,4),(4,9),(9,5),(5,7),(7,0)]\n graphStyle={'vertex_size':20}\n g=ig.Graph(vertex_attrs={\"label\":vertices},edges=edges,directed=True)\n g.write_svg(\"simple_star.svg\",width=500,height=300,**graphStyle)\n\ndef simple_read_net():\n '''读取pajek格式文件'''\n g=ig.read(\"testdata/GR3_60.NET\",format=\"pajek\")\n # 设置边和顶点颜色\n g.vs[\"color\"]=\"#3d679d\"\n g.es[\"color\"]=\"red\"\n\n graphStyle={\"vertex_size\":12,'margin':6}\n graphStyle[\"layout\"]=g.layout(\"fr\")#设置布局\n g.write_svg('GR3_60_graph.svg',width=600,height=600,**graphStyle)\n\ndef protein_interaction_network():\n g = ig.read(\"yeast/YeastS.net\", format=\"pajek\")\n #g.vs[\"color\"] = \"#3d679d\"\n #g.es[\"color\"] = \"red\"\n graphStyle = {\"layout\":'auto'}\n g.write_svg('YeastS_graph.svg', width=600, height=600,**graphStyle)\nif __name__=='__main__':\n simple_1()\n simple_read_net()\n protein_interaction_network()","sub_path":"数据可视化/图、网络模型/igraph演示.py","file_name":"igraph演示.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"304212227","text":"import pickle\nfrom collections import OrderedDict\n\nimport numpy as np\n\nfrom holoviews import Image, Layout\nfrom holoviews.element.comparison import ComparisonTestCase\nfrom holoviews.interface.collector import ViewRef\n\n\nclass LayoutTest(ComparisonTestCase):\n\n def setUp(self):\n self.fixed_error = (\"No attribute 'Test' in this AttrTree,\"\n \" and none can be added because fixed=True\")\n super(LayoutTest, self).setUp()\n\n def test_layout_init(self):\n Layout()\n\n def test_layout_getter(self):\n tr = Layout()\n self.assertEqual(isinstance(tr.Test.Path, Layout), True)\n\n def test_layout_getter_fixed(self):\n tr = Layout()\n tr.fixed = True\n try:\n tr.Test.Path\n raise AssertionError\n except AttributeError as e:\n self.assertEqual(str(e), self.fixed_error)\n\n def test_layout_setter(self):\n tr = Layout()\n tr.Test.Path = 42\n self.assertEqual(tr.Test.Path, 42)\n\n def test_layout_setter_fixed(self):\n tr = Layout()\n tr.fixed = True\n try:\n tr.Test.Path = 42\n raise AssertionError\n except AttributeError as e:\n self.assertEqual(str(e), self.fixed_error)\n\n def test_layout_shallow_fixed_setter(self):\n tr = Layout()\n tr.fixed = True\n try:\n tr.Test = 42\n raise AssertionError\n except AttributeError as e:\n self.assertEqual(str(e), self.fixed_error)\n\n def test_layout_toggle_fixed(self):\n tr = Layout()\n tr.fixed = True\n try:\n tr.Test = 42\n raise AssertionError\n except AttributeError as e:\n self.assertEqual(str(e), self.fixed_error)\n tr.fixed = False\n tr.Test = 42\n\n def test_layout_set_path(self):\n tr = Layout()\n tr.set_path(('Test', 'Path'), -42)\n self.assertEqual(tr.Test.Path, -42)\n\n\n def test_layout_update(self):\n tr1 = Layout()\n tr2 = Layout()\n tr1.Test1.Path1 = 42\n tr2.Test2.Path2 = -42\n tr1.update(tr2)\n self.assertEqual(tr1.Test1.Path1, 42)\n self.assertEqual(tr1.Test2.Path2, -42)\n\n\n def test_contains_child(self):\n tr = Layout()\n tr.Test.Path = 42\n self.assertEqual('Path' in tr.Test, True)\n\n def test_contains_tuple(self):\n tr = Layout()\n tr.Test.Path = 42\n self.assertEqual(('Test', 'Path') in tr, True)\n\n def test_simple_pickle(self):\n tr = Layout()\n dumped = pickle.dumps(tr)\n tr2 = pickle.loads(dumped)\n self.assertEqual(tr.data, OrderedDict())\n self.assertEqual(tr.data, tr2.data)\n\n def test_pickle_with_data(self):\n tr = Layout()\n tr.Example1.Data = 42\n tr.Example2.Data = 'some data'\n dumped = pickle.dumps(tr)\n tr2 = pickle.loads(dumped)\n self.assertEqual(tr.data, OrderedDict([(('Example1', 'Data'), 42),\n (('Example2', 'Data'), 'some data')]))\n self.assertEqual(tr.data, tr2.data)\n\n\n\nclass ViewRefTest(ComparisonTestCase):\n\n def setUp(self):\n super(ViewRefTest, self).setUp()\n tree = Layout()\n tree.Example.Path1 = Image(np.random.rand(5,5))\n tree.Example.Path2 = Image(np.random.rand(5,5))\n self.tree = tree\n\n def test_resolve_constructor(self):\n ref = ViewRef('Example.Path1 * Example.Path2')\n overlay = ref.resolve(self.tree)\n self.assertEqual(len(overlay), 2)\n\n def test_resolve_setattr(self):\n ref = ViewRef().Example.Path1 * ViewRef().Example.Path2\n overlay = ref.resolve(self.tree)\n self.assertEqual(len(overlay), 2)\n\n def test_viewref_pickle(self):\n ref = ViewRef('Example.Path1 * Example.Path2')\n dumped = pickle.dumps(ref)\n ref2 = pickle.loads(dumped)\n self.assertEqual(ref.specification, [('Example', 'Path1'), ('Example', 'Path2')])\n self.assertEqual(ref.specification, ref2.specification)\n\n","sub_path":"tests/testcollector.py","file_name":"testcollector.py","file_ext":"py","file_size_in_byte":4065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"124350483","text":"# -*- encoding: utf-8 -*-\n\nimport datetime\nimport math\n\nfrom PyQt4.QtCore import QObject, pyqtSignal\nfrom src.base.monitor import CX4Monitor\n\n\nclass DeltaCXMonitor(CX4Monitor):\n valueToStorage = pyqtSignal(QObject, object)\n\n def processing(self, *args):\n now_time = str(datetime.datetime.now())\n handle, val, params = self._gfa(args, 1), self._gfa(args, 2), self._gfa(\n args, 3)\n\n if handle is not None:\n if self.ch_prev_value is None:\n self.ch_prev_value = self.ch_now_value\n self.ch_now_value = handle\n\n if self.ch_prev_value is not None and \\\n math.fabs(self.ch_prev_value - self.ch_now_value) > \\\n self.get_property('delta'):\n self.ch_prev_value = self.ch_now_value\n\n text = '(%s), %s %s %s' % (self.personal_name, handle, val,\n params)\n self.default_log(text)\n\n self.send_data(self.default_form([self.name,\n self.personal_name, handle,\n now_time]))\n\n return 0\n\n def _post_init(self):\n self.ch_now_value = None\n self.ch_prev_value = None\n","sub_path":"src/monitors/cx4monitors/deltamonitor.py","file_name":"deltamonitor.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"216160408","text":"import numpy as np\n#import pyfits\nimport matplotlib.pyplot as plt\nimport pdb\n\n#plots histograms of what fraction of each value for each parameter\n# produced \"acceptably fitting\" chi^2 fits.\n\ndef main(save=False):\n\n #set variables to plot against each other\n vars = ['R0', 'ALP_I', 'ALP_O', 'G', 'KSI0', 'BETA']\n dir = '20180503/'\n data = np.loadtxt(dir+'grid_search_stats_20180503.txt', dtype='str')\n chisq_accept = 1.047 #threshold for acceptable fit\n \n for i in range(len(vars)):\n col = np.squeeze(np.where(data[0,:] == vars[i])) #eg 9\n values = np.unique(data[1:,col]).astype('float')\n if i==0:\n params = [values]\n else:\n params.append(values) #create list of all values for all params\n\n chi_col_index = np.squeeze(np.where(data[0,:] == 'CHISQ/dof'))\n\n plt.figure(figsize=(7.5, 10), dpi=100)\n \n for i in range(len(params)): #same as for var in vars\n xparam = vars[i] #eg 'r0'\n \n #number of values for that param, value and n_good and n_occurances\n tabulation = np.zeros((len(params[i]) , 3))\n tabulation[:, 0] = params[i] #first columns is the values\n\n col = np.squeeze(np.where(data[0,:] == vars[i])) #e.g. 9\n for j in range(1, np.shape(data)[0]): #go down column\n loc = np.where(tabulation[:,0] == float(data[j, col]))\n tabulation[loc, 2] += 1 #add to n_occurences\n if float(data[j, chi_col_index]) <= chisq_accept:\n tabulation[loc, 1] += 1 # add to n_good\n\n\n if xparam == 'R0': tit1 = r'$r_0$'\n elif xparam == 'ALP_I': tit1 = r'$\\alpha_{in}$'\n elif xparam == 'ALP_O': tit1 = r'$\\alpha_{out}$'\n elif xparam == 'BETA': tit1 = r'$\\beta$'\n elif xparam == 'KSI0': tit1 = r'$\\xi$'\n elif xparam == 'G': tit1 = r'$g$'\n\n plt.subplot(len(vars)/2, 2, i+1)\n plt.bar(range(len(tabulation[:,0])), tabulation[:,1] / tabulation[:,2],\n align='center', width=0.1*len(params[i]))#, color='black')\n if xparam=='R0':\n params[i] = params[i].astype('string')\n for j in range(len(params[i])):\n params[i][j] = params[i][j][:2]\n plt.xticks(range(len(params[i])) , params[i], fontsize=14)\n plt.yticks(fontsize=14)\n #label=tabulation[:,0])\n\n #plt.title('Fraction of '+ tit1 + ' values that produced '+\n # r'$\\chi_{\\nu}^{2} \\leq$'+str(chisq_accept)[:6], fontsize=21)\n #plt.ylabel('Fraction', fontsize=21)\n plt.xlabel(tit1+' Value', fontsize=21)\n if i==2:\n plt.ylabel('Fraction', fontsize=21)\n\n plt.suptitle('Fraction of values that produced '+\n r'$\\chi_{\\nu}^{2} \\leq$'+str(chisq_accept)[:6], fontsize=21)\n plt.subplots_adjust(left=0.12, bottom=0.08, right=0.99, top=0.92, hspace=0.31)\n if save==True:\n plt.savefig('figs/histograms.png', dpi=150)\n plt.close()\n else:\n plt.show()\n \n","sub_path":"hip79977/plthists.py","file_name":"plthists.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"222457398","text":"# 对数据库进行测试添加和删除\nimport unittest\nfrom demo3_bookDemo import app, db, Author\n\n\nclass DataBaseTestCase(unittest.TestCase):\n # 因为是测试数据的添加和删除,所以需要单独为测试创建一个database\n def setUp(self):\n app.config['SQLALCHEMY_DATABASE_URI'] = \"mysql://root:mysql@127.0.0.1:3306/booktest_unitest\"\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n db.create_all()\n\n def tearDown(self):\n \"\"\"在测试完毕之后会进行调用,可以做数据的清除操作\"\"\"\n db.session.remove()\n db.drop_all()\n\n def test_add_and_delete_author(self):\n author = Author(name='哈哈')\n db.session.add(author)\n db.session.commit()\n\n # 查询\n author = Author.query.filter(Author.name == \"哈哈\").first()\n self.assertIsNotNone(author)\n\n # import time\n # time.sleep(15)\n\n # 删除\n db.session.delete(author)\n db.session.commit()\n\n def test_query_author(self):\n print(\"哈哈\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Flask_Demo_All/Flask_day04_04_unittest/demo3_bookDemo_test.py","file_name":"demo3_bookDemo_test.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"250175086","text":"\"\"\"\nTrains an agent with (stochastic) Policy Gradients on Pong. Uses OpenAI Gym.\nModified from https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5\n\n\"\"\"\n\nimport argparse\nimport datetime\nimport os\nimport random\nimport time\n\nimport gym\nimport tensorflow as tf\nimport torch\nimport torch.nn as nn\n\ndef preprocess(image):\n \"\"\" Pre-process 210x160x3 uint8 frame into 6400 (80x80) 1D float vector. \"\"\"\n\n image = torch.Tensor(image)\n\n # Crop, downsample by factor of 2, and turn to grayscale by keeping only red channel\n image = image[35:195]\n image = image[::2,::2, 0]\n\n image[image == 144] = 0 # erase background (background type 1)\n image[image == 109] = 0 # erase background (background type 2)\n image[image != 0] = 1 # everything else (paddles, ball) just set to 1\n\n return image.flatten().float()\n\n\ndef calc_discounted_future_rewards(rewards, discount_factor):\n r\"\"\"\n Calculate the discounted future reward at each timestep.\n\n discounted_future_reward[t] = \\sum_{k=1} discount_factor^k * reward[t+k]\n\n \"\"\"\n\n discounted_future_rewards = torch.empty(len(rewards))\n\n # Compute discounted_future_reward for each timestep by iterating backwards\n # from end of episode to beginning\n discounted_future_reward = 0\n for t in range(len(rewards) - 1, -1, -1):\n # If rewards[t] != 0, we are at game boundary (win or loss) so we\n # reset discounted_future_reward to 0 (this is pong specific!)\n if rewards[t] != 0:\n discounted_future_reward = 0\n\n discounted_future_reward = rewards[t] + discount_factor * discounted_future_reward\n discounted_future_rewards[t] = discounted_future_reward\n\n return discounted_future_rewards\n\n\nclass PolicyNetwork(nn.Module):\n \"\"\" Simple two-layer MLP for policy network. \"\"\"\n\n def __init__(self, input_size, hidden_size):\n super().__init__()\n\n self.fc1 = nn.Linear(input_size, hidden_size)\n self.fc2 = nn.Linear(hidden_size, 1)\n\n def forward(self, x):\n x = self.fc1(x)\n x = nn.functional.relu(x)\n\n x = self.fc2(x)\n prob_up = torch.sigmoid(x)\n\n return prob_up\n\n\ndef run_episode(model, env, discount_factor, render=False):\n UP = 2\n DOWN = 3\n\n observation = env.reset()\n prev_x = preprocess(observation)\n\n action_chosen_log_probs = []\n rewards = []\n\n done = False\n timestep = 0\n\n while not done:\n if render:\n # Render game window at 30fps\n time.sleep(1 / 30)\n env.render()\n\n # Preprocess the observation, set input to network to be difference\n # image between frames\n cur_x = preprocess(observation)\n x = cur_x - prev_x\n prev_x = cur_x\n\n # Run the policy network and sample action from the returned probability\n prob_up = model(x)\n action = UP if random.random() < prob_up else DOWN # roll the dice!\n\n # Calculate the probability of sampling the action that was chosen\n action_chosen_prob = prob_up if action == UP else (1 - prob_up)\n action_chosen_log_probs.append(torch.log(action_chosen_prob))\n\n # Step the environment, get new measurements, and updated discounted_reward\n observation, reward, done, info = env.step(action)\n rewards.append(torch.Tensor([reward]))\n timestep += 1\n\n # Concat lists of log probs and rewards into 1-D tensors\n action_chosen_log_probs = torch.cat(action_chosen_log_probs)\n rewards = torch.cat(rewards)\n\n # Calculate the discounted future reward at each timestep\n discounted_future_rewards = calc_discounted_future_rewards(rewards, discount_factor)\n\n # Standardize the rewards to have mean 0, std. deviation 1 (helps control the gradient estimator variance).\n # It encourages roughly half of the actions to be rewarded and half to be discouraged, which\n # is helpful especially in beginning when positive reward signals are rare.\n discounted_future_rewards = (discounted_future_rewards - discounted_future_rewards.mean()) \\\n / discounted_future_rewards.std()\n\n # PG magic happens right here, multiplying action_chosen_log_probs by future reward.\n # Negate since the optimizer does gradient descent (instead of gradient ascent)\n loss = -(discounted_future_rewards * action_chosen_log_probs).sum()\n\n return loss, rewards.sum()\n\n\ndef train(render=False):\n # Hyperparameters\n input_size = 80 * 80 # input dimensionality: 80x80 grid\n hidden_size = 200 # number of hidden layer neurons\n learning_rate = 7e-4\n discount_factor = 0.99 # discount factor for reward\n\n batch_size = 4\n save_every_batches = 5\n\n # Create policy network\n model = PolicyNetwork(input_size, hidden_size)\n\n # Load model weights and metadata from checkpoint if exists\n if os.path.exists('checkpoint.pth'):\n print('Loading from checkpoint...')\n save_dict = torch.load('checkpoint.pth')\n\n model.load_state_dict(save_dict['model_weights'])\n start_time = save_dict['start_time']\n last_batch = save_dict['last_batch']\n else:\n start_time = datetime.datetime.now().strftime(\"%H.%M.%S-%m.%d.%Y\")\n last_batch = -1\n\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n # Set up tensorboard logging\n tf_writer = tf.summary.create_file_writer(\n os.path.join('tensorboard_logs', start_time))\n tf_writer.set_as_default()\n\n # Create pong environment (PongDeterministic versions run faster)\n env = gym.make(\"PongDeterministic-v4\")\n\n # Pick up at the batch number we left off at to make tensorboard plots nicer\n batch = last_batch + 1\n while True:\n\n mean_batch_loss = 0\n mean_batch_reward = 0\n for batch_episode in range(batch_size):\n\n # Run one episode\n loss, episode_reward = run_episode(model, env, discount_factor, render)\n mean_batch_loss += loss / batch_size\n mean_batch_reward += episode_reward / batch_size\n\n # Boring book-keeping\n print(f'Episode reward total was {episode_reward}')\n\n # Backprop after `batch_size` episodes\n optimizer.zero_grad()\n mean_batch_loss.backward()\n optimizer.step()\n\n # Batch metrics and tensorboard logging\n print(f'Batch: {batch}, mean loss: {mean_batch_loss:.2f}, '\n f'mean reward: {mean_batch_reward:.2f}')\n tf.summary.scalar('mean loss', mean_batch_loss.detach().item(), step=batch)\n tf.summary.scalar('mean reward', mean_batch_reward.detach().item(), step=batch)\n\n if batch % save_every_batches == 0:\n print('Saving checkpoint...')\n save_dict = {\n 'model_weights': model.state_dict(),\n 'start_time': start_time,\n 'last_batch': batch\n }\n torch.save(save_dict, 'checkpoint.pth')\n\n batch += 1\n\n\ndef main():\n # By default, doesn't render game screen, but can invoke with `--render` flag on CLI\n parser = argparse.ArgumentParser()\n parser.add_argument('--render', action='store_true')\n args = parser.parse_args()\n\n train(render=args.render)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"solution (spoiler alert!)/pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":7245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"272343379","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# DO NOT EDIT! This is a generated sample (\"LongRunningPromise\", \"translate_v3_batch_translate_text_with_glossary_and_model\")\n\n# To install the latest published package dependency, execute the following:\n# pip install google-cloud-translate\n\n# sample-metadata\n# title: Batch Translate with Glossary and Model\n# description: Batch translate text with Glossary using AutoML Translation model\n# usage: python3 translate_v3_batch_translate_text_with_glossary_and_model.py [--input_uri \"gs://cloud-samples-data/text.txt\"] [--output_uri \"gs://YOUR_BUCKET_ID/path_to_store_results/\"] [--project \"[Google Cloud Project ID]\"] [--location \"us-central1\"] [--target_language en] [--source_language de] [--model_id \"{your-model-id}\"] [--glossary_id \"{your-glossary-id}\"]\n\n# [START translate_v3_batch_translate_text_with_glossary_and_model]\nfrom google.cloud import translate\n\n\ndef sample_batch_translate_text_with_glossary_and_model(\n input_uri,\n output_uri,\n project_id,\n location,\n target_language,\n source_language,\n model_id,\n glossary_id,\n):\n \"\"\"\n Batch translate text with Glossary and Translation model\n \"\"\"\n\n client = translate.TranslationServiceClient()\n\n # TODO(developer): Uncomment and set the following variables\n # input_uri = 'gs://cloud-samples-data/text.txt'\n # output_uri = 'gs://YOUR_BUCKET_ID/path_to_store_results/'\n # project = '[Google Cloud Project ID]'\n # location = 'us-central1'\n # target_language = 'en'\n # source_language = 'de'\n # model_id = '{your-model-id}'\n # glossary_id = '[YOUR_GLOSSARY_ID]'\n target_language_codes = [target_language]\n gcs_source = {\"input_uri\": input_uri}\n\n # Optional. Can be \"text/plain\" or \"text/html\".\n mime_type = \"text/plain\"\n input_configs_element = {\"gcs_source\": gcs_source, \"mime_type\": mime_type}\n input_configs = [input_configs_element]\n gcs_destination = {\"output_uri_prefix\": output_uri}\n output_config = {\"gcs_destination\": gcs_destination}\n parent = f\"projects/{project_id}/locations/{location}\"\n model_path = \"projects/{}/locations/{}/models/{}\".format(\n project_id, \"us-central1\", model_id\n )\n models = {target_language: model_path}\n\n glossary_path = client.glossary_path(\n project_id, \"us-central1\", glossary_id # The location of the glossary\n )\n\n glossary_config = translate.TranslateTextGlossaryConfig(glossary=glossary_path)\n glossaries = {\"ja\": glossary_config} # target lang as key\n\n operation = client.batch_translate_text(\n request={\n \"parent\": parent,\n \"source_language_code\": \"en\",\n \"target_language_codes\": target_language_codes,\n \"input_configs\": input_configs,\n \"output_config\": output_config,\n \"models\": models,\n \"glossaries\": glossaries,\n }\n )\n\n print(\"Waiting for operation to complete...\")\n response = operation.result()\n\n # Display the translation for each input text provided\n print(\"Total Characters: {}\".format(response.total_characters))\n print(\"Translated Characters: {}\".format(response.translated_characters))\n\n\n# [END translate_v3_batch_translate_text_with_glossary_and_model]\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--input_uri\", type=str, default=\"gs://cloud-samples-data/text.txt\"\n )\n parser.add_argument(\n \"--output_uri\", type=str, default=\"gs://YOUR_BUCKET_ID/path_to_store_results/\"\n )\n parser.add_argument(\"--project_id\", type=str, default=\"[Google Cloud Project ID]\")\n parser.add_argument(\"--location\", type=str, default=\"us-central1\")\n parser.add_argument(\"--target_language\", type=str, default=\"en\")\n parser.add_argument(\"--source_language\", type=str, default=\"de\")\n parser.add_argument(\"--model_id\", type=str, default=\"{your-model-id}\")\n parser.add_argument(\n \"--glossary_id\", type=str, default=\"[YOUR_GLOSSARY_ID]\",\n )\n args = parser.parse_args()\n\n sample_batch_translate_text_with_glossary_and_model(\n args.input_uri,\n args.output_uri,\n args.project_id,\n args.location,\n args.target_language,\n args.source_language,\n args.model_id,\n args.glossary_id,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"samples/snippets/translate_v3_batch_translate_text_with_glossary_and_model.py","file_name":"translate_v3_batch_translate_text_with_glossary_and_model.py","file_ext":"py","file_size_in_byte":4919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"185843720","text":"from selenium.webdriver.firefox.webdriver import WebDriver\nfrom fixture.session import SessionHelper\nfrom fixture.group import GroupHelper\nfrom fixture.user import UserHelper\n\n\nclass Application:\n\n def __init__(self):\n self.wd = WebDriver()\n self.wd.implicitly_wait(30)\n self.session = SessionHelper(self)\n self.group = GroupHelper(self)\n self.user = UserHelper(self)\n\n def open_home_page(self, homepage_url):\n wd = self.wd\n wd.get(homepage_url)\n\n def destroy(self):\n self.wd.quit()\n","sub_path":"fixture/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"380140444","text":"import os\nimport csv\n\n\ndef import_from_csv(data_store, path, files, change_id):\n for file in sorted(files):\n # split file into filename and extension\n table_name, _ = os.path.splitext(file)\n possible_method = \"add_to_\" + table_name.lower().replace(\" \", \"_\")\n method_to_call = getattr(data_store, possible_method, None)\n if method_to_call:\n with open(os.path.join(path, file), \"r\") as f:\n reader = csv.reader(f)\n # skip header\n _ = next(reader)\n for row in reader:\n method_to_call(*row, change_id=change_id)\n else:\n print(f\"Method({possible_method}) not found!\")\n","sub_path":"pepys_import/utils/data_store_utils.py","file_name":"data_store_utils.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"176237149","text":"from setuptools import setup, find_packages\n\nwith open('./README.md', 'r') as f:\n long_description = f.read()\n\n# Version\n# Info: https://packaging.python.org/guides/single-sourcing-package-version/\n# Example: https://github.com/pypa/warehouse/blob/64ca42e42d5613c8339b3ec5e1cb7765c6b23083/warehouse/__about__.py\nmeta_package = {}\nwith open('./minet/__version__.py') as f:\n exec(f.read(), meta_package)\n\n\nsetup(name='minet',\n version=meta_package['__version__'],\n description='A webmining CLI tool & library for python.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='http://github.com/medialab/minet',\n license='MIT',\n author='Jules Farjas, Guillaume Plique, Pauline Breteau',\n keywords='webmining',\n python_requires='>=3',\n packages=find_packages(exclude=['ftest', 'scripts', 'test']),\n install_requires=[\n 'beautifulsoup4>=4.7.1',\n 'browser-cookie3==0.7.6',\n 'casanova==0.7.0',\n 'cchardet==2.1.4',\n 'cython>=0.29.4',\n 'dateparser>=0.7.1',\n 'json5>=0.8.5',\n 'keyring<19.3',\n 'lxml>=4.3.0',\n 'ndjson>=0.3.1',\n 'numpy>=1.16.1',\n 'persist-queue>=0.4.2',\n 'pytz>=2019.3',\n 'pyyaml',\n 'quenouille>=0.6.2',\n 'tqdm>=4.31.1',\n 'twitter>=1.18.0',\n 'ural>=0.25.0',\n 'urllib3[secure]>=1.25.3'\n ],\n entry_points={\n 'console_scripts': ['minet=minet.cli.__main__:main']\n },\n zip_safe=True)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"421922085","text":"# -*- coding: utf-8 -*-\n# @File: VolGAN_PyTorch/preprocess_audio.py\n# @Author: Qinlong Huang\n# @Create Date: 2019/11/19 19:05\n# @Contact: qinlonghuang@gmail.com\n# @Description:\n\n# coding = utf-8\nimport os\nimport time\nimport numpy as np\nimport math\nfrom misc.config import cfg\nfrom six.moves import xrange\n\nimport librosa\nfrom scipy.io import wavfile\nfrom scipy.interpolate import interp1d\n\n\"\"\"\n Constants\n\"\"\"\n_CLIP_NSTD_SPEC = cfg.AUDIO._CLIP_NSTD_SPEC\n_CLIP_NSTD_TVS = cfg.AUDIO._CLIP_NSTD_TVS\n_CLIP_NSTD_TRACK = cfg.AUDIO._CLIP_NSTD_TRACK\n_LOG_EPS = cfg.AUDIO._LOG_EPS\n_USE_MEL = cfg.AUDIO._USE_MEL\n\n\nclass audioProcess(object):\n def __init__(self):\n # fbank: [num_fank, NFFT/2+1], inv_fank: [NFFT/2+1, num_fbank]\n self.fbank, self.freq_point = self.get_filter_banks(filters_num=cfg.AUDIO.FBANK_NUM,\n NFFT=cfg.AUDIO.NFFT,\n samplerate=cfg.AUDIO.FS,\n high_freq=cfg.AUDIO.HIGH_F)\n self.inv_fbank = np.linalg.pinv(self.fbank)\n self.tvs_dim_list = []\n\n def read_audio(self, _audio_fp):\n '''Just for some test'''\n wav = wave.open(_audio_fp, \"rb\") # 打开一个wav格式的声音文件流\n num_frame = wav.getnframes() # 获取帧数\n num_channel = wav.getnchannels() # 获取声道数\n framerate = wav.getframerate() # 获取帧速率\n num_sample_width = wav.getsampwidth() # 获取实例的比特宽度,即每一帧的字节数\n str_data = wav.readframes(num_frame) # 读取全部的帧\n wav.close() # 关闭流\n wave_data = np.fromstring(str_data, dtype=np.short) # 将声音文件数据转换为数组矩阵形式\n wave_data.shape = -1, num_channel # 按照声道数将数组整形,单声道时候是一列数组,双声道时候是两列的矩阵\n print('num_frame:{}, num_channel:{}, framerate:{}'.format(num_frame, num_channel, framerate))\n\n def load_wav(self, path, sr=cfg.AUDIO.FS):\n '''\n Method to load audio, attention: signal will be rescaled to have uniform amplitude\n input:\n path: audio file path\n sr: sample rate of the returned audio signal\n return:\n sample_rate, audio signal\n '''\n audio, sample_rate = librosa.core.load(path, sr=sr)\n # temporarily, will be remove when reconstruct dataset\n # TODO:\n # Why rescaled here?\n audio *= 32767 / max(0.01, np.max(np.abs(audio)))\n return sample_rate, audio\n\n def save_wav(self, wav, path, sr=cfg.AUDIO.FS):\n wav *= 32767 / max(0.01, np.max(np.abs(wav)))\n # proposed by @dsmiller\n wavfile.write(path, sr, wav.astype(np.int16))\n\n def add_wgn_noise(self, audio_fp_in, audio_fp_out, SNR=cfg.AUDIO.SNR):\n '''\n Add noise to audio with SNR specified, noised audio will be saved\n input:\n audio_fp_in: file path to read original audio\n audio_fp_out: file path to save noised audio\n SNR: signal-noise ratio\n '''\n\n def wgn(signal_ori, snr):\n snr = 10 ** (snr / 10.0)\n sig_power = np.sum(signal_ori ** 2) / len(signal_ori)\n noise_power = sig_power / snr\n return np.random.randn(len(signal_ori)) * np.sqrt(noise_power)\n\n signal, _ = librosa.core.load(audio_fp_in, sr=cfg.AUDIO.FS)\n noise = wgn(signal, snr=SNR)\n signal_n = signal + noise\n self.save_wav(signal_n, audio_fp_out, sr=cfg.AUDIO.FS)\n\n def get_filter_banks(self, filters_num=cfg.AUDIO.FBANK_NUM, NFFT=cfg.AUDIO.NFFT,\n samplerate=cfg.AUDIO.FS, low_freq=cfg.AUDIO.LOW_F, high_freq=cfg.AUDIO.HIGH_F):\n '''\n 计算梅尔三角间距滤波器,该滤波器在第一个和第三个频率处为0,在第二个频率处为1\n filters_num: 滤波器个数\n NFFT:FFT的总计的个数\n samplerate:采样频率\n low_freq:最低频率\n high_freq:最高频率\n '''\n\n def hz2mel(hz):\n '''\n 把hz转换为mel频率\n hz: 频率\n '''\n return 2592 * np.log10(1 + hz / 700.0)\n\n def mel2hz(mel):\n '''\n 把梅尔频率转化为hz\n mel: 梅尔频率\n '''\n return 700 * (10 ** (mel / 2592.0) - 1)\n\n high_freq = high_freq or samplerate / 2\n\n if _USE_MEL:\n # use mel frequency\n # 首先,将频率hz转化为梅尔频率,因为人耳分辨声音的大小与频率并非线性正比,所以化为梅尔频率再线性分割\n low_mel = hz2mel(low_freq)\n high_mel = hz2mel(high_freq)\n # 需要在low_mel和high_mel之间等间距插入filters_num个点,一个filters_num+2个点\n mel_points = np.linspace(low_mel, high_mel, filters_num + 2)\n # 再将梅尔频率化为hz频率,并且对应到FFT中的位置\n hz_points = mel2hz(mel_points)\n else:\n # use normal frequency\n hz_points = np.linspace(low_freq, high_freq, filters_num + 2)\n # print('frequency bin: {}'.format(hz_points))\n\n # 需要知道这些hz_points对应到fft中的位置,fft的点在整个hz频域空间是均匀分割的,但是在每个mel滤波器中的点的数目就不一样了\n # 最大不会超过NFFT/2+1个\n bin = np.floor((NFFT + 1) * hz_points / samplerate)\n # 接下来建立滤波器的表达式,每个滤波器在第一个和第三个点处均为0, 中间为三角形状, 这个是最多有NFFT/2+1个点\n fbank = np.zeros([filters_num, int(NFFT / 2 + 1)])\n for j in xrange(0, filters_num):\n if bin[j + 2] - bin[j] <= 1:\n fbank[j, int(bin[j + 1])] = 1.\n else:\n for i in xrange(int(bin[j]), int(bin[j + 1])):\n fbank[j, i] = (i - bin[j]) / (bin[j + 1] - bin[j])\n for i in xrange(int(bin[j + 1]), int(bin[j + 2])):\n fbank[j, i] = (bin[j + 2] - i) / (bin[j + 2] - bin[j + 1])\n fbank[j, :] = fbank[j, :] * (bin[-1] - bin[-3]) / (bin[j + 2] - bin[j])\n if _USE_MEL:\n for j in xrange(1, filters_num):\n if bin[j + 2] - bin[j] <= 2 and fbank[j, int(bin[j + 1])] > fbank[0, int(bin[1])]:\n fbank[j, int(bin[j + 1])] = fbank[0, int(bin[1])]\n\n return fbank, hz_points[1: -1]\n\n def spectrum_magnitude(self, frames, NFFT):\n '''\n 计算每一帧经过FFT变换以后的频谱的幅度,若frames的大小为N*L, 则返回的矩阵的大小为N*NFFT/2+1\n 参数说明:\n frames:即audio2frame函数返回的帧矩阵\n NFFT:FFT变换的数组大小,如果帧长度小于NFFT, 则帧的其余部分用0填充\n '''\n # 对frames进行FFT变换,返回frames_num * NFFT/2+1的矩阵,最大的频率是samplerate/2\n complex_spectrum = np.fft.rfft(frames, NFFT)\n return np.absolute(complex_spectrum) # 返回频谱的幅度值\n\n def spectrum_power(self, frames, NFFT):\n '''\n 计算每一帧傅里叶变换之后的功率谱\n 参数说明:\n frames: audio2frame函数计算出来的帧矩阵\n NFFT:FFT的大小\n '''\n # 功率谱等于每一点的幅度平方/NFFT\n return 1.0 / NFFT * np.square(self.spectrum_magnitude(frames, NFFT))\n\n def log_spectrum_power(self, frames, NFFT, norm=False):\n '''\n 计算每一帧的功率谱的对数形式\n frames: audio2frame返回的帧矩阵\n NFFT: FFT变换的大小\n norm: 范数,即归一化系数\n '''\n spec_power = self.spectrum_power(frames, NFFT)\n spec_power[spec_power < _LOG_EPS] = _LOG_EPS # 防止出现功率谱等于0, 因为0无法取对数\n log_spec_power = 10 * np.log10(spec_power)\n if norm:\n return log_spec_power - np.max(log_spec_power)\n else:\n return log_spec_power\n\n def pre_emphasis(self, signal, coefficient=0.97):\n '''\n 对信号预加重,实际上是做高通滤波,做了差分\n 参数含义:\n signal: 原始信号\n coefficient: 加重系数, 默认为0.95, f(t_new) = f(t_ori) - coef * f(t-1_ori)\n '''\n return np.append(signal[0], signal[1:] - coefficient * signal[:-1])\n\n def inv_pre_emphasis(self, signal, coefficient=0.97):\n '''\n f(t_ori) = f(t_new) + coef * f(t-1_ori), iterate in an autoregressive way\n todo: matrix implementation\n '''\n for i in range(1, signal.shape[0]):\n signal[i] += coefficient * signal[i - 1]\n return signal\n\n def audio2frame(self, signal, frame_length, frame_step, winfunc=lambda x: np.hamming(x)):\n '''\n 将音频信号转化为帧\n 参数含义:\n signal:原始音频信号\n frame_length:每一帧的长度(这里指采样点的长度,即采样频率乘时间间隔)\n frame_step:相邻帧的间隔,下一帧向后移动多少采样点\n winfunc:lambda函数, 用于生成一个向量\n '''\n signal_length = len(signal) # 信号总长度\n frame_length = int(round(frame_length))\n frame_step = int(round(frame_step))\n\n if signal_length <= frame_length: # 若信号长度小于一个帧的长度,则帧数定义为1\n frames_num = 1\n else:\n frames_num = 1 + int(math.ceil((1.0 * signal_length - frame_length) / frame_step))\n pad_length = int((frames_num - 1) * frame_step +\n frame_length) # 所有帧加起来去掉重叠后的长度\n # 不够的长度用0填补,类似于FFT中的扩充数组操作\n zeros = np.zeros(pad_length - signal_length)\n pad_signal = np.concatenate((signal, zeros)) # 填补后的信号记为Pad_signal\n indices = np.tile(np.arange(0, frame_length), (frames_num, 1)) + \\\n np.tile(np.arange(0, frames_num * frame_step, frame_step), (frame_length, 1)).T\n # 得到frames_num*frame_length长度的矩阵,保存的是帧中每一个采样点在原来序列中的序号\n indices = np.array(indices, dtype=np.int32) # 将indice转化为矩阵\n frames = pad_signal[indices] # 得到帧信号,每一帧都保存在一行中\n win = np.tile(winfunc(frame_length), (frames_num, 1)) # window窗函数\n return frames * win\n\n def get_mel_spectrogram(self,\n signal,\n frame_length=cfg.AUDIO.FRAME_LENGTH,\n frame_step=cfg.AUDIO.FRAME_STEP,\n NFFT=cfg.AUDIO.NFFT,\n pre_emphasis_coeff=0.97):\n '''\n 计算音频信号的经过梅尔三角滤波的能量谱\n 参数说明:\n frame_length:每一帧的长度(这里指采样点的长度,即采样频率乘时间间隔)\n frame_step:相邻帧的间隔,下一帧向后移动多少采样点\n NFFT:FFT采样点个数\n pre_emphasis_coeff:预加重系数\n '''\n # 预加重处理\n signal_emp = self.pre_emphasis(signal, pre_emphasis_coeff)\n # 得到帧数组\n frames = self.audio2frame(signal_emp, frame_length, frame_step)\n # 得到对数能量谱, [frames_num, NFFT/2+1]\n spec_power = self.log_spectrum_power(frames, NFFT, False)\n # Mel滤波,[num_frames x num_filter]\n mel_fbank = np.dot(spec_power, self.fbank.T)\n\n return mel_fbank\n\n def inv_mel_spectrogram(self, mel_spectrogram, NFFT=cfg.AUDIO.NFFT):\n '''\n Converts mel spectrogram to waveform using librosa\n input spectrogram should be a unnormalized one\n '''\n\n def _stft(y):\n return librosa.stft(y=y, n_fft=NFFT, hop_length=cfg.AUDIO.FRAME_STEP,\n win_length=cfg.AUDIO.FRAME_LENGTH, pad_mode='constant')\n\n def _istft(y):\n return librosa.istft(y, hop_length=cfg.AUDIO.FRAME_STEP,\n win_length=cfg.AUDIO.FRAME_LENGTH)\n\n def _griffin_lim(S):\n '''librosa implementation of Griffin-Lim\n Based on https://github.com/librosa/librosa/issues/434\n S: [NFFT/2+1, num_frame]\n '''\n angles = np.exp(2j * np.pi * np.random.rand(*S.shape))\n S_complex = np.abs(S).astype(np.complex)\n y = _istft(S_complex * angles)\n for i in range(cfg.AUDIO.GRIFFIN_LIM_ITERS):\n angles = np.exp(1j * np.angle(_stft(y)))\n y = _istft(S_complex * angles)\n return y\n\n # spec = self.denormalize_spec(mel_spectrogram, _spec_mean, _spec_std)\n # mel to linear, [seq_length, num_fbank] --> [seq_length, NFFT/2+1]\n # Convert back to linear\n spec = np.maximum(1e-10, np.dot(mel_spectrogram, self.inv_fbank.T))\n # inverse till get abs(spec_magnitude)\n spec = np.power(10.0, spec * 0.1) # bel to power\n spec = (spec * NFFT) ** 0.5 # power to magnitude\n # use griffin_lim algorithm\n signal = _griffin_lim(spec.T ** cfg.AUDIO.GRIFFIN_LIM_POWER)\n # inverse pre emphasis\n signal = self.inv_pre_emphasis(signal)\n return signal\n\n def normalize_spec(self, spec, _spec_mean, _spec_std):\n '''\n dim related and dim unrelated\n '''\n _spec_mean = np.mean(_spec_mean)\n _spec_std = np.mean(_spec_std)\n spec = (spec - _spec_mean) / _spec_std # broadcast\n spec /= _CLIP_NSTD_SPEC\n spec = np.clip(spec, -0.4, 2.0) + 0.5\n return spec\n\n def unnormalize_spec(self, spec, _spec_mean, _spec_std):\n spec = (spec - 0.5) * _CLIP_NSTD_SPEC\n spec = spec * _spec_std + _spec_mean\n return spec\n\n def calculate_spec_statistics(self, spec):\n '''\n calculate statistics of a single spectrum\n input:\n spec: nd_array of single spec, with shape [num_fbank, seq_length]\n return:\n mean value and std of the given spec\n '''\n spec_mean, spec_std = np.mean(spec, axis=0), np.std(spec, axis=0)\n return spec_mean, spec_std\n\n def process_single_audio(self, audio_fp, spec_mean, spec_std, self_normalize=False, add_noise=False):\n \"\"\"\n process a single audio to normalized spectrum\n input:\n audio_fp: audio file path\n spec_mean: mean value to normalize spec\n spec_std: standard deviation to normalize spec\n self_normalize: use statistics of the spec to normalize itself,\n need spec_mean and spec_std be none\n return: normalized spectrum of audio, [seq_length, num_fbank]\n \"\"\"\n # Get rescaled wavform & given sample rate (i.e., cfg.AUDIO.FS)\n sample_rate, audio = self.load_wav(audio_fp, sr=cfg.AUDIO.FS)\n assert (int(sample_rate) == int(cfg.AUDIO.FS)) # make sure sample rates match\n spec = self.get_mel_spectrogram(audio)\n # If use self_normalize, then mean and std should not be provided\n if self_normalize:\n assert (spec_mean == None and spec_std == None)\n spec_mean, spec_std = self.calculate_spec_statistics(spec)\n spec = self.normalize_spec(spec, spec_mean, spec_std)\n return spec\n\n ####################################################################\n # Following are tvs, track process methods\n\n def extract_trm_parameters(self, parameters_file):\n '''\n extract track struture and tvs from the parameters file\n input: filename\n output:\n tvs: 2-d array, [cfg.AUDIO.WINDOW_LEN, cfg.AUDIO.TVS_DIM]\n *** track_params: 1-d array, [cfg.AUDIO.TRACK_params_DIM]\n '''\n selected_param_idx = cfg.AUDIO.TRACK_IDX\n with open(parameters_file, 'r', encoding='utf-8') as f_r:\n lines = f_r.readlines()\n # Extract track_params\n track_params = [float(lines[idx].strip()) for idx in selected_param_idx]\n # track_params.append(get_mean_pitch(parameters_file)) # temporarily\n track_params = np.array(track_params, dtype=np.float32)\n\n # Extract tvs, need to sample later\n tvs_lines = lines[cfg.AUDIO.TVS_START:]\n tvs = np.array([line.strip().split(' ') for line in tvs_lines], dtype=np.float32)\n\n return tvs, track_params\n\n def save_trm_params(self, tvs, track, save_path, pad=True):\n '''\n Simply save processed track and tvs\n Input:\n pad: whether should add additional static track parameters,\n if track directly comes from a2t model, then should set pad=True\n '''\n with open(save_path, 'w', encoding='utf8') as f_w:\n if pad:\n for track_info in cfg.AUDIO.TRACK_PAD_BEFORE:\n f_w.writelines(str(track_info) + '\\n')\n for track_info in track:\n f_w.writelines('{:.2f}'.format(track_info) + '\\n')\n for track_info in cfg.AUDIO.TRACK_PAD_AFTER:\n f_w.writelines(str(track_info) + '\\n')\n else:\n for track_info in track:\n f_w.writelines('{:.2f}'.format(track_info) + '\\n')\n\n seq_length = tvs.shape[0]\n for idx in range(seq_length):\n f_w.writelines(' '.join(['{:.5f}'.format(tvs_dim_v) for tvs_dim_v in tvs[idx, :]]) + '\\n')\n\n def smooth_data(self, ori, smooth_steps=10):\n '''\n Smooth input data\n Input:\n ori: input data to be smoothed\n smooth_steps: range of data points to take average on\n Todo: implement in parallel\n '''\n d_len = len(ori)\n out = np.zeros(d_len)\n for idx in range(d_len):\n out[idx] = np.mean(ori[max(0, idx - smooth_steps // 2): min(d_len, idx + smooth_steps // 2)])\n return out\n\n def adjust_mean(self, ori, expect_mean):\n '''\n Adjust mean value\n Input:\n ori: input data, np.1darray\n expect_mean: expected mean value of output\n '''\n return ori + (expect_mean - np.mean(ori))\n\n def adjust_range(self, ori, datum_point, expect_range):\n '''\n Adjust range of a list of data\n *** Hint: if datum_point not equal to mean value, mean value will change\n Input:\n ori: input data, np.1darray\n datum_point: datum point of rerange, may not be mean value\n expect_range: expected range of output\n Todo: adaptively adjust range\n '''\n # adjust range\n actual_range = (max(ori) - min(ori)) if (max(ori) - min(ori)) > 0.001 else expect_range # accroding to range\n expand_ratio = expect_range / actual_range\n out = datum_point + (ori - datum_point) * expand_ratio\n return out\n\n def rerange_tvs_dim(self, tvs_s, tvs_refer):\n '''\n refine according to a standard tvs\n '''\n aspVol_idx = 2\n fricVol_idx = 3\n fricCF_idx = 5\n fricBW_idx = 6\n velum_idx = 15\n radii_list = [8, 9, 10, 11, 12, 13, 14]\n # get reference\n refer_mean = np.mean(tvs_refer, axis=0)\n refer_range = np.ptp(tvs_refer, axis=0)\n\n # adjust aspiration and frication\n # tvs_s[:, aspVol_idx] = self.adjust_range(tvs_s[:, aspVol_idx], 0, 1 * refer_range[aspVol_idx]) # should be 1, make sure to be clipped\n # tvs_s[:, fricVol_idx] = self.adjust_range(tvs_s[:, fricVol_idx], 0, 1 * refer_range[fricVol_idx]) # should be 1, make sure to be clipped\n # tvs_s[:, fricCF_idx] = self.adjust_range(tvs_s[:, fricCF_idx], np.mean(tvs_s[:, fricCF_idx]), 1.5 * refer_range[fricCF_idx])\n # tvs_s[:, fricBW_idx] = self.adjust_range(tvs_s[:, fricBW_idx], np.mean(tvs_s[:, fricBW_idx]), 1.5 * refer_range[fricBW_idx])\n # tvs_s[:, fricBW_idx] = np.where(tvs_s[:, fricBW_idx] < 500, 500, tvs_s[:, fricBW_idx])\n\n # Adjust r1-r8\n for tvs_dim in radii_list:\n tvs_s[:, tvs_dim] = self.adjust_range(tvs_s[:, tvs_dim], np.mean(tvs_s[:, tvs_dim]),\n 1 * refer_range[tvs_dim])\n\n # Adjust velum\n # tvs_s[:, velum_idx] = self.adjust_range(tvs_s[:, velum_idx], 0, 1 * refer_range[velum_idx])\n\n # Clip data to allowed range\n for tvs_dim in range(tvs_s.shape[1]):\n tvs_s[:, tvs_dim] = np.clip(tvs_s[:, tvs_dim], min(tvs_refer[:, tvs_dim]), max(tvs_refer[:, tvs_dim]))\n\n return tvs_s\n\n def truncate_tvs(self, tvs):\n '''\n Truncate tvs, would make sure value of tvs be in proper range.\n Actual effect if suppress some value of aspiration and frication to zero\n Range can be found in config.\n '''\n glotVol_idx = 1\n aspVol_idx = 2\n fricVol_idx = 3\n # glotal volumn truncation\n tvs[:, glotVol_idx] = np.where(tvs[:, glotVol_idx] < 10, 0, tvs[:, glotVol_idx])\n # aspiration truncation\n tvs[:, aspVol_idx] = np.where(tvs[:, aspVol_idx] < 1, 0, tvs[:, aspVol_idx])\n tvs[:, aspVol_idx] = np.where((tvs[:, glotVol_idx] < 0.1) & (tvs[:, aspVol_idx] < 10), 0, tvs[:, aspVol_idx])\n # frication truncation\n tvs[:, fricVol_idx] = np.where(tvs[:, fricVol_idx] < 0.1, 0, tvs[:, fricVol_idx])\n tvs[:, fricVol_idx] = np.where((tvs[:, glotVol_idx] < 0.1) & (tvs[:, fricVol_idx] < 0.5), 0,\n tvs[:, fricVol_idx])\n\n return tvs\n\n def truncate_trm_params(self, trm_fp_s, trm_fp_t):\n '''\n Truncate specified dim of tvs in trm_fp_s with, resulted trm params will be saved in trm_fp_t.\n '''\n tvs_s, track_s = self.extract_trm_parameters(trm_fp_s)\n tvs_s = self.truncate_tvs(tvs_s)\n self.save_trm_params(tvs_s, track_s, trm_fp_t, pad=True)\n\n def refine_trm_params(self, trm_fp_s, trm_fp_t, trm_fp_refer):\n '''\n refine according to a standard tvs, steps including truncate, rerange and smoothing\n '''\n tvs_s, track_s = self.extract_trm_parameters(trm_fp_s)\n\n # get reference\n tvs_refer, _ = self.extract_trm_parameters(trm_fp_refer)\n # truncate tvs\n tvs_s = self.truncate_tvs(tvs_s)\n # rerange tvs\n tvs_s = self.rerange_tvs_dim(tvs_s, tvs_refer)\n # Smoothing\n for tvs_dim in range(tvs_s.shape[1]):\n tvs_s[:, tvs_dim] = self.smooth_data(tvs_s[:, tvs_dim], smooth_steps=10)\n\n self.save_trm_params(tvs_s, track_s, trm_fp_t, pad=True)\n\n def rewrite_trm_dim_r0(self, parameters_file):\n '''\n rewrite r0 value of tvs in place\n '''\n tvs, track = self.extract_trm_parameters(parameters_file)\n tvs[:, cfg.AUDIO.TVS_R0_DIM] = cfg.AUDIO.TVS_R0_VALUE\n self.save_trm_params(tvs, track, parameters_file, pad=True)\n\n def rewrite_trm_dim_specified(self, trm_fp_s, trm_fp_r, trm_fp_t, dim):\n '''\n replace specified dim of tvs in trm_fp_r with the corresponding dim of tvs in trm_fp_s,\n resulting trm params will be saved in trm_fp_t.\n dim: the specified tvs dim\n '''\n tvs_s, track_s = self.extract_trm_parameters(trm_fp_s)\n tvs_r, track_r = self.extract_trm_parameters(trm_fp_r)\n assert (abs(len(tvs_s) - len(tvs_r)) < 5)\n seq_length = min([len(tvs_s), len(tvs_r)])\n tvs_r = tvs_r[:seq_length, :]\n tvs_s = tvs_s[:seq_length, :]\n tvs_r[:, dim] = tvs_s[:, dim]\n self.save_trm_params(tvs_r, track_r, trm_fp_t, pad=True)\n\n def normalize_tvs(self, tvs, tvs_mean, tvs_std):\n tvs_norm = (tvs - tvs_mean) / tvs_std # broadcast\n tvs_norm /= _CLIP_NSTD_TVS\n # tvs_norm = tvs / tvs_mean\n # tvs_norm = (tvs_norm - 0.5) * 2\n tvs_norm = np.clip(tvs_norm, -1., 1.)\n\n return tvs_norm\n\n def unnormalize_tvs(self, normalized_tvs, tvs_mean, tvs_std):\n tvs = normalized_tvs * _CLIP_NSTD_TVS\n tvs = tvs * tvs_std + tvs_mean\n return tvs\n\n def interpolate_tvs_1d(self, tvs_1d, sample_x, kind='linear'):\n '''\n tvs_1d: np.array, one dim of tvs\n up_sample_rate: the ratio to upsample original tvs\n kind: 'linear', 'quadratic'\n '''\n seq_length = len(tvs_1d)\n x = np.linspace(0, seq_length - 1, seq_length)\n y = tvs_1d\n f = interp1d(x, y, kind=kind)\n return f(sample_x)\n\n def interpolate_tvs(self, tvs, sample_rate, kind='linear'):\n '''\n sample tvs on time stamps of spec frames\n input:\n tvs: original tvs\n sample_rate: expect_rate / ori_rate\n kind: 'linear', 'quadratic'\n return: interpolated tvs, [seq_length, tvs_dim]\n '''\n sample_x = np.arange(0, tvs.shape[0] - 1, step=sample_rate)\n tvs_dim = tvs.shape[-1]\n up_sampled_tvs = [self.interpolate_tvs_1d(\n tvs[:, idx], sample_x) for idx in range(tvs_dim)]\n return np.stack(up_sampled_tvs).T\n\n def normalize_track(self, track, track_mean, track_std):\n track_norm = (track - track_mean) / track_std # broadcast\n track_norm /= _CLIP_NSTD_TRACK\n # track_norm = track / track_mean\n # track_norm = (track_norm - 1) * 10\n track_norm = np.clip(track_norm, -1., 1.)\n return track_norm\n\n def unnormalize_track(self, normalized_track, track_mean, track_std):\n track = normalized_track * _CLIP_NSTD_TRACK\n track = track * track_std + track_mean\n return track\n\n def process_single_trm_params(self, trm_params_fp, tvs_mean, tvs_std, track_mean, track_std):\n '''\n process trm parameter file to normalized and interpolated tvs and track\n input:\n trm_params_fp: trm parameter file path\n tvs_mean, tvs_std, track_mean, track_std: for normalization\n return: normalized tvs and track\n '''\n tvs, track = self.extract_trm_parameters(trm_params_fp)\n tvs = self.normalize_tvs(tvs, tvs_mean, tvs_std)\n # Make sure time stamps of tvs and spec match\n tvs = self.interpolate_tvs(tvs, sample_rate=cfg.AUDIO.TIME_STEP * cfg.AUDIO.INPUT_CONTROL_RATE)\n track = self.normalize_track(track, track_mean, track_std)\n return tvs, track\n","sub_path":"model_utils/preprocess_audio.py","file_name":"preprocess_audio.py","file_ext":"py","file_size_in_byte":26575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"184354326","text":"### Module: subgrid_module.py of SIGAME \t\t###\n### - calculates gas properties on sub-grid scales \t###\n\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport pdb\nimport scipy\nfrom scipy.interpolate import InterpolatedUnivariateSpline,interp1d,interp2d\nimport time\nimport multiprocessing as mp\nimport subprocess as sub\nimport aux as aux\nimport matplotlib.pyplot as plt\n\nparams = np.load('sigame/temp/params.npy').item()\nfor key,val in params.items():\n exec(key + '=val')\n\ndef subgrid(galname=galnames[0],zred=zreds[0]):\n '''\n Purpose\n -------\n On subgrid scales calculate:\n - local FUV field using grid of starburst99 models \n - local pressure using velocity dispersion, surface densities of gas and stars \n and hydrostatic equilibrium\n\n Arguments\n ---------\n galname: galaxy name - str\n default = first galaxy name in galnames list from parameter file\n\n zred: redshift of galaxy - float/int\n default = first redshift name in redshift list from parameter file\n\n '''\n\n print('\\n** Derive local FUV field and pressure **')\n\n # Declare these variables to be global for availability in other\n # functions called by subprocess:\n global simgas,simgas1,simstar,L_FUV\n\n # Load simulation data for gas and stars\n simgas = pd.read_pickle(d_sim+'z'+'{:.2f}'.format(zred)+'_'+galname+'_sim0.gas')\n\n if verbose: print('Number of gas elements: %s' % str(len(simgas)))\n\n print('\\nFind local FUV field from stellar population synthesis! ')\n # Read grid parameters for FUV grid of 1e4 Msun stellar populations\n grid = pd.read_pickle(d_t+'FUV/FUVgrid_'+z1+'_noneb')\n # Read corresponding [age,Z,L_FUV] values\n FUV = pd.read_pickle(d_t+'FUV/FUV_'+z1+'_noneb') \n l_FUV = FUV['L_FUV'].values\n l_FUV = l_FUV.reshape((len(grid['Ages']),len(grid['Zs'])))\n print('First, calculate FUV luminosity of each stellar particle')\n part = 0.1\n L_FUV = np.zeros(len(simstar))\n for i in range(0,len(simstar)):\n f = interp2d(grid['Zs'],grid['Ages'],l_FUV)\n L_FUV[i] = simstar.loc[i]['m']/1e5*f(simstar.loc[i]['Z'],simstar.loc[i]['age']) # ergs/s\n if 1.*i/len(simstar) > part:\n print(int(part*100),' % done!')\n part = part+0.1\n simstar['L_FUV'] = L_FUV\n print('Then, find FUV flux at gas particle positions')\n FUV = np.zeros(len(simgas))\n print('(Multiprocessing starting up!)')\n pool = mp.Pool(processes=3) # 8 processors on my Mac Pro, 16 on Betty\n results = [pool.apply_async(FUVfunc, args=(i,)) for i in range(0,len(simgas))]#len(simgas)\n FUV = [p.get() for p in results]\n print('(Multiprocessing done!)')\n simgas['FUV'] = FUV\n simgas['FUV'] = simgas['FUV']/(kpc2cm**2*FUV_ISM)\n print('Finally, scale local CR intensity to follow fluctuations in local FUV field')\n simgas['CR'] = simgas['FUV']*CR_ISM\n\n print('\\nFind local hydrostatic mid-plane pressure!')\n # Extract star forming gas only:\n simgas1 = simgas.copy()\n simgas1 = simgas1[simgas1['SFR'] > 0].reset_index()\n global m_gas,m_star\n m_gas,m_star = simgas1['m'].values,simstar['m'].values\n print('(Multiprocessing starting up!)')\n pool = mp.Pool(processes=3) # 8 processors on my Mac Pro, 16 on Betty\n results = [pool.apply_async(Pfunc, args=(i,)) for i in range(0,len(simgas))]#len(simgas)\n res = [p.get() for p in results]\n print('(Multiprocessing done!)')\n P_ext,surf_gas,surf_star,sigma_gas,sigma_star,vel_disp_gas = np.zeros(len(res)),np.zeros(len(res)),np.zeros(len(res)),np.zeros(len(res)),np.zeros(len(res)),np.zeros(len(res))\n for i in range(0,len(res)):\n P_ext[i],surf_gas[i],surf_star[i],sigma_gas[i],sigma_star[i],vel_disp_gas[i] = res[i][0],res[i][1],res[i][2],res[i][3],res[i][4],res[i][5]\n simgas['P_ext'] = P_ext*Msun**2/kpc2m**4/kB/1e6 # right units!! Msun/kpc^2 -> kg/cm^2 and J/m^3 -> K/cm^3\n simgas['sigma_gas'] = sigma_gas\n simgas['sigma_star'] = sigma_star\n simgas['vel_disp_gas'] = vel_disp_gas\n print('Min and max of log(P_ext): %s and %s' % (min(np.log10(simgas['P_ext'])),max(np.log10(simgas['P_ext']))))\n simgas['surf_gas'] = surf_gas\n simgas['surf_star'] = surf_star\n\n # Save results\n simgas.to_pickle('sigame/temp/sim_FUV/z'+'{:.2f}'.format(zred)+'_'+galname+'_sim1.gas')\n simstar.to_pickle('sigame/temp/sim_FUV/z'+'{:.2f}'.format(zred)+'_'+galname+'_sim1.star')\n\ndef FUVfunc(i):\n dist = aux.rad(simstar[pos]-simgas.loc[i][pos],pos).values\n dist[dist == 0] = 1000\n # Count everything < h, scaled by stellar mass and compare to\n # the MW FUV radiation field, 0.6 Habing Seon+11 = 0.6*1.6e-3 erg cm^-2 s^-1\n return sum(L_FUV[dist < simgas['h'][i]]/(4*np.pi*dist[dist < simgas['h'][i]]**2)) # divided by area of sphere in cm^2\n\ndef Pfunc(i):\n # Distance to other gas particles in disk plane:\n dist1 = aux.rad(simgas1[posxy]-simgas.loc[i][posxy],posxy).values\n m_gas1 = m_gas[dist1 < simgas['h'][i]]\n p,surf_gas,surf_star,sigma_gas,sigma_star,vel_disp_gas = [0 for j in range(0,6)]\n if len(m_gas1) >= 1:\n surf_gas = sum(m_gas1)/(np.pi*simgas['h'][i]**2.)\n sigma_gas = np.std(simgas1.loc[dist1 < simgas['h'][i]]['vz'])\n # Distance to other star particles in disk plane:\n dist2 = aux.rad(simstar[posxy]-simgas.loc[i][posxy],posxy).values\n m_star1 = m_star[dist2 < simgas['h'][i]]\n if len(m_star1) >= 1:\n surf_star = sum(m_star1)/(np.pi*simgas['h'][i]**2.)\n sigma_star = np.std(simstar.loc[dist2 < simgas['h'][i]]['vz'])\n # Total velocity dispersion of gas\n vel_disp_gas = np.std(np.sqrt((simgas.loc[dist1 < simgas['h'][i]]['vx'].values)**2+\\\n (simgas1.loc[dist1 < simgas['h'][i]]['vy'].values)**2+\\\n (simgas1.loc[dist1 < simgas['h'][i]]['vz'].values)**2))\n # Count everything < h, scaled by stellar mass and compare to\n # the MW FUV radiation field, 0.6 Habing Seon+11 = 0.6*1.6e-3 erg cm^-2 s^-1\n if len(simstar.loc[dist2 < simgas['h'][i]]) == 0: sigma_star = 0\n if sigma_star != 0: p = np.pi/2.*G_grav*surf_gas*(surf_gas+(sigma_gas/sigma_star)*surf_star)/1.65\n if sigma_star == 0: p = np.pi/2.*G_grav*surf_gas*(surf_gas)/1.65\n\n else: \n if simgas['SFR'][i] > 0:\n surf_gas = simgas['m'][i]/(np.pi*simgas['h'][i]**2.)\n m_star1 = m_star[dist2 < simgas['h'][i]]\n if len(m_star1) >= 1:\n surf_star = sum(m_star1)/(np.pi*simgas['h'][i]**2.)\n\n return p,surf_gas,surf_star,sigma_gas,sigma_star,vel_disp_gas\n\ndef grid_radiation():\n print('** Get FUV flux from stellar population **')\n\n # Ages [Myr]\n if z1 == 'z6': Ages = 10.**np.array([-0.5,0,0.5,1,1.5,2,2.5,3])\n if z1 == 'z2': Ages = 10.**np.array([1.8,2.0,2.2,2.4,2.6,2.8,3,3.2])\n if z1 == 'z0': Ages = 10.**np.array([0.5,1.0,1.5,2.0,2.5,3.0,3.5,4.0])\n \n # Metallicities\n if z1 == 'z6': Zs = 10.**np.array([-1.4,-1.0,-0.6,-0.2])\n if z1 == 'z2': Zs = 10.**np.array([-1.5,-1.0,-0.5,0.0])\n if z1 == 'z0': Zs = 10.**np.linspace(-2,0.2,4)\n Z_sb99 = ['51','52','53','54','55'] # Z actually available in starburst99\n # f = interp1d([0.0001,0.002,0.008,0.014,0.040],[0,0,1,2,3,4]) \n # Z1 = np.zeros(len(Zs))\n # i = 0\n # for Z in Zs:\n # Z1[i] = Z_sb99[int(round(f(0.02*Z)))]\n # i += 1\n\n # Use all metallicities available in starburst99:\n # 51=0.001; 52=0.002; 53=0.008; 54=0.014; 55=0.040\n Z1 = Z_sb99\n Zs = np.array([0.0001,0.002,0.008,0.014,0.040])/0.0134 # 0.0134 from R. Dave\n\n nmodels = len(Ages)*1.*len(Zs)\n # pdb.set_trace()\n\n # Save grid axes\n FUVgrid = {'Ages':Ages,'Zs':Zs}\n pickle.dump(FUVgrid,open(d_t+'FUV/FUVgrid_'+z1+'_noneb','wb'))\n\n foo = raw_input('Run starburst99? [default: n] ... ')\n if foo == '': foo = 'n'\n if foo == 'y':\n i = 0\n for i1 in range(0,len(Ages)):\n Age = Ages[i1]\n for i2 in range(0,len(Zs)):\n name = 'sb_'+str(i)\n print(name)\n # Add line in the beginning of script and change age, mass and metallicity\n script_in = open(d_sb+'template.input','r') # template file\n script_out = open(d_sb+name+'.input','w')\n nextline = -1\n for line in script_in:\n\n if line.find('') >= 0:\n line = line.replace('', name) # log [cm]\n\n if line.find('') >= 0:\n line = line.replace('', str(int(Z1[i2])))\n\n if line.find('') >= 0:\n line = line.replace('', str(Age*1.1))\n\n if line.find('') >= 0:\n line = line.replace('', str(Age/2.))\n\n script_out.write(line)\n\n script_in.close()\n script_out.close()\n # Edit run file for starburst99 scripts and run them\n name = 'sb_'+str(i)\n go_in = open(d_sb+'go_galaxy_template','r')\n go_out = open(d_sb+'go_galaxy_1','w')\n for line in go_in:\n\n if line.find('') >= 0:\n line = line.replace('', name)\n\n go_out.write(line)\n\n go_in.close()\n go_out.close()\n # And run!!\n pro = sub.Popen(['./go_galaxy_1'],cwd=d_sb,stdout=sub.PIPE)\n text = u'Done with stellar population # '+str(i1)\n stdout,stderr = pro.communicate() # wait until starburst is done\n i += 1\n\n i = 0\n\n foo = raw_input('Save FUV grid? [default: n] ... ')\n if foo == '': foo = 'n'\n if foo == 'y':\n # Make a dataframe with results\n FUV = pd.DataFrame({'Age':np.zeros(int(nmodels)),'Z':np.zeros(int(nmodels)),'L_FUV':np.zeros(int(nmodels))})\n FUV = FUV[['Age','Z','L_FUV']] # ordering dataframe\n i = 0\n for i1 in range(0,len(Ages)):\n Age = Ages[i1]\n for i2 in range(0,len(Zs)):\n name = 'sb_'+str(i)\n Z = Zs[i2]\n FUV['Age'][i] = Age\n FUV['Z'][i] = Z\n # Calculate luminosity of this population:\n columns = ['time','wavelength','ltot','lstellar','lnebular']\n spec = pd.read_table(d_sb+name+'.spectrum1',names=columns,skiprows=6,sep='\\s*',engine='python')\n x = spec['wavelength'][spec['time']==spec['time'].max()].values # AA\n y = spec['lstellar'][spec['time']==spec['time'].max()].values # ergs/s/AA\n int_range = clight*hplanck/np.array([6,13.6])*1e10 # eV -> AA\n L_FUV = scipy.integrate.simps(10**y[(x>int_range[1]) & (xint_range[1]) & (x\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nfrom subprocess import Popen, PIPE\nENCODING = 'utf-8'\n\ndef spawn(*args):\n\t\"\"\"Runs a program, waits for its termination and returns its stdout\"\"\"\n\tif len(args) == 1:\n\t\tpopen_arguments = args[0]\n\telse:\n\t\tpopen_arguments = args\n\tprocess = Popen(popen_arguments, stdout=PIPE)\n\tstdout, stderr = process.communicate()\n\treturn stdout.decode(ENCODING)\n","sub_path":"config/chroot_local-includes/usr/local/lib/python2.6/dist-packages/ranger/ext/spawn.py","file_name":"spawn.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"469116437","text":"# *************************************************** Import ***********************************************************\r\nimport tkinter as tk\r\nfrom datetime import datetime\r\n\r\n\r\n# *************************************************** Variables ********************************************************\r\nclick_time = 0\r\ncurrent_time = 0\r\n\r\n\r\n# *************************************************** Functions ********************************************************\r\ndef update(event):\r\n \"\"\"Get the value of time when we release the key.\"\"\"\r\n global click_time\r\n click_time = int(str(datetime.now().time()).split(\":\")[1])*60 + float(str(datetime.now().time()).split(\":\")[-1])\r\n window.after(5000, updated_time)\r\n\r\n\r\ndef updated_time():\r\n \"\"\"Get the value of time after 5 sec.\"\"\"\r\n global current_time\r\n current_time = int(str(datetime.now().time()).split(\":\")[1])*60 + float(str(datetime.now().time()).split(\":\")[-1])\r\n\r\n\r\ndef check():\r\n \"\"\"Check the Difference between time when we release the key and value of time after 5 sec if the difference is more\r\n than 5 sec then clear the text in the Text box\"\"\"\r\n if current_time - click_time > 5:\r\n text.delete(\"1.0\", \"end\")\r\n window.after(50, check)\r\n\r\n\r\n# *************************************************** Main *************************************************************\r\n# Creating and config Window\r\nwindow = tk.Tk()\r\nwindow.title(\"Disappearing Text Writing App\")\r\nwindow.minsize(600, 600)\r\nwindow.maxsize(600, 600)\r\n\r\n# Creating and config Label\r\nlabel_1 = tk.Label(text=\"Write Something\", font=(\"Times New Roman\", 18))\r\nlabel_2 = tk.Label(text=\"What you write will Disappear after 5 second\", font=(\"Times New Roman\", 14))\r\nlabel_1.place(relx=.36, rely=0)\r\nlabel_2.place(relx=.23, rely=.05)\r\n\r\n# Creating and config Text\r\ntext = tk.Text(width=74, height=33)\r\ntext.place(relx=.004, rely=.1)\r\n\r\n# setting bind so that when key is released after 5 sec the text is removed from text widget\r\nwindow.bind(\"\", update)\r\nwindow.after(50, check)\r\n\r\nwindow.mainloop()\r\n\r\n# **********************************************************************************************************************\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"248139667","text":"import datetime\nfrom mongokit import Document\nfrom app.DAO import mongo, configParser\nfrom app.DAO import card\n\n__author__ = 'Davor Obilinovic'\n\n\nclass TradeProposal:\n\n def __init__(self, document= None):\n if document:\n self.document = document\n else:\n self.document = mongo.TradeProposalDocument()\n self.document[\"id\"] = get_next_id()\n\n pass\n\n def save(self):\n self.document.save()\n return self\n\n def get_id(self):\n return self.document[\"id\"]\n\n def add_card_in_TO(self, cardId, save=False):\n list = self.document[\"to_cards\"]\n if cardId not in list:\n list.append(cardId)\n if save:\n return self.save()\n return self\n\n def add_card_in_FROM(self, cardId, save=False):\n list = self.document[\"from_cards\"]\n if cardId not in list:\n list.append(cardId)\n if save:\n return self.save()\n return self\n\n def set_jad_in_TO(self, amount, save=False):\n self.document[\"to_jad\"] = amount\n if save:\n return self.save()\n return self\n\n def set_jad_in_FROM(self, amount, save=False):\n self.document[\"from_jad\"] = amount\n if save:\n return self.save()\n return self\n\n def is_negotiable(self):\n return self.document['negotiable']\n\n def is_empty_proposal(self):\n return len(self.document['from_cards'])==0\n\n def get_from(self):\n return self.document[\"from\"]\n\n def get_to(self):\n return self.document['to']\n\n def get_from_ids(self):\n return self.document[\"from_cards\"]\n\n def get_to_ids(self):\n return self.document[\"to_cards\"]\n\n def get_from_cards(self):\n idList = self.get_from_ids()\n cards = []\n for id in idList:\n c = card.get_by_id(id)\n cards.append(c)\n return cards\n\n def get_to_cards(self):\n idList = self.get_to_ids()\n cards = []\n for id in idList:\n c = card.get_by_id(id)\n cards.append(c)\n return cards\n\n def set_from(self, name):\n self.document[\"from\"] = name\n\n def set_to(self, name):\n self.document['to'] = name\n\n def is_full(self):\n return len(self.document[\"to_cards\"])>=10\n\n def is_valid(self):\n from_user = self.get_from()\n to_user = self.get_to()\n for card in self.get_from_cards():\n if card.get_owner() != from_user:\n return False\n for card in self.get_to_cards():\n if card.get_owner() != to_user:\n return False\n return True\n\n\n\ndef get_next_id():\n result = mongo[\"Magic2\"].administration.update({'q':'q'},{'$inc':{'maxTradeProposalId':int(1)}}, upsert=True)\n return mongo[\"Magic2\"].administration.find_one({'q':'q'},{'maxTradeProposalId':1})['maxTradeProposalId']\n\ndef append_empty_proposal(fr, to, ids):\n cur = mongo.TradeProposalDocument.find({'from':fr, 'to':to,'from_cards':{'$size':0} })\n tp = None\n for obj in cur:\n if len(obj[\"to_cards\"])<10:\n tp =TradeProposal(obj)\n if not tp:\n tp = TradeProposal()\n tp.set_from(fr)\n tp.set_to(to)\n for id in ids:\n if tp.is_full():\n tp.save()\n tp = TradeProposal()\n tp.set_from(fr)\n tp.set_to(to)\n tp.add_card_in_TO(int(id))\n tp.save()\n return True\n\ndef remove_proposal(id):\n mongo.TradeProposalDocument.remove({\"id\":id})\n\n\n@mongo.register\nclass TradeProposalDocument(Document):\n __database__ = configParser.get(\"Mongo\",\"DBname\")\n __collection__ = 'tradeProposals'\n\n use_schemaless = True\n use_dot_notation = True\n\n structure = {\n 'id' : int,\n 'creation_date' : datetime.datetime,\n 'from': basestring,\n 'to' : basestring,\n 'from_cards' : [int],\n 'to_cards' : [int],\n 'from_jad' : int,\n 'to_jad' : int,\n 'negotiable' : bool\n }\n\n default_values = {\n 'creation_date' : datetime.datetime.utcnow,\n 'from_cards' : [],\n 'to_cards' : [],\n 'from_jad' : 0,\n 'to_jad' : 0,\n 'negotiable' : True\n }","sub_path":"app/DAO/tradeProposal.py","file_name":"tradeProposal.py","file_ext":"py","file_size_in_byte":4217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"284654021","text":"from utils import run_extractor, AbundanceMapsExtractor, profile_code\n\nimport argparse\nimport cProfile\nimport numpy as np\nfrom sklearn import manifold\n\nclass MDSExtractor(AbundanceMapsExtractor):\n\tdef __init__(self, args, profile):\n\t\tself.profile = profile\n\t\tself.profile_filename = \"mds.prof\"\n\t\tself.model = manifold.MDS(**args)\n\t\tsuper(MDSExtractor, self).__init__()\n\n\tdef extract_abundance_maps(self, hsi_3d, n_endmembers):\n\t\thsi_2d = hsi_3d.reshape( (-1, hsi_3d.shape[2]) )\n\t\tabundance_maps = profile_code(\n\t\t\t\tself.profile, self.profile_filename)(\n\t\t\t\t\t\tself.model.fit_transform)(hsi_2d)\n\t\tabundance_maps = np.moveaxis(abundance_maps, 1, 0)\n\t\tabundance_maps = np.reshape(abundance_maps,\n\t\t\t\t(abundance_maps.shape[0], hsi_3d.shape[0], hsi_3d.shape[1]))\n\t\treturn abundance_maps\n\ndef main(in_filename, out_filename, mds_args, profile):\n\textractor = MDSExtractor(mds_args, profile)\n\trun_extractor(in_filename,\n\t\t\tout_filename, extractor, mds_args[\"n_components\"])\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser(\n\t\t\tdescription=\"Multi-Dimensional Scaling\",\n\t\t\tformatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n\tparser.add_argument(\"--mds-n-components\",\n\t\t\ttype=int, default=2, metavar=\"int\",\n\t\t\thelp=\"number of coordinates for the manifold\")\n\tparser.add_argument(\"--mds-metric\",\n\t\t\ttype=bool, default=True, metavar=\"bool\",\n\t\t\thelp=\"use metric (instead of nonmetric) MDS\")\n\tparser.add_argument(\"--mds-n-init\",\n\t\t\ttype=int, default=4, metavar=\"int\",\n\t\t\thelp=\"number of times the SMACOF algorithm will be run with different initializations\")\n\tparser.add_argument(\"--mds-max-iter\",\n\t\t\ttype=int, default=300, metavar=\"int\",\n\t\t\thelp=\"number of times the SMACOF algorithm will be run with different initializations\")\n\tparser.add_argument(\"--mds-verbose\",\n\t\t\ttype=int, default=0, metavar=\"int\",\n\t\t\thelp=\"level of verbosity\")\n\tparser.add_argument(\"--mds-eps\",\n\t\t\ttype=float, default=1e-3, metavar=\"float\",\n\t\t\thelp=\"relative tolerance with respect to stress at which to declare convergence\")\n\tparser.add_argument(\"--mds-n-jobs\",\n\t\t\ttype=int, default=1, metavar=\"int\",\n\t\t\thelp=\"number of parallel jobs; if -1, then the number of jobs is set to the number of cores\")\n\tparser.add_argument(\"--mds-random-state\",\n\t\t\ttype=int, default=None, metavar=\"int\",\n\t\t\thelp=\"seed used by the random number generator\")\n\tparser.add_argument(\"--mds-dissimilarity\",\n\t\t\ttype=str, default=\"euclidean\", metavar=\"str\",\n\t\t\tchoices=[\"euclidean\", \"precomputed\"],\n\t\t\thelp=\"choices: {0}\".format([\"euclidean\", \"precomputed\"]))\n\n\tparser.add_argument(\"in_filename\", type=str, metavar=\"infile\")\n\tparser.add_argument(\"out_filename\", type=str, metavar=\"outfile\")\n\t\n\tparser.add_argument(\"--profile\",\n\t\t\taction=\"store_true\",\n\t\t\thelp=\"profile program execution\")\n\t\n\targuments = vars(parser.parse_args())\n\t\n\targs = {}\n\tfor key,value in arguments.iteritems():\n\t\tif key.startswith(\"mds_\"):\n\t\t\targs.setdefault(\"mds_args\", {})[key[len(\"mds_\"):]] = value\n\t\telse:\n\t\t\targs[key] = value\n\t\n\tmain(**args)\n","sub_path":"Notebooks/unmixing_scripts/mds.py","file_name":"mds.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"524343558","text":"#\n# [316] Remove Duplicate Letters\n#\n# https://leetcode.com/problems/remove-duplicate-letters/description/\n#\n# algorithms\n# Hard (30.57%)\n# Total Accepted: 41.2K\n# Total Submissions: 134.8K\n# Testcase Example: '\"bcabc\"'\n#\n# Given a string which contains only lowercase letters, remove duplicate\n# letters so that every letter appear once and only once. You must make sure\n# your result is the smallest in lexicographical order among all possible\n# results.\n#\n# Example 1:\n#\n#\n# Input: \"bcabc\"\n# Output: \"abc\"\n#\n#\n# Example 2:\n#\n#\n# Input: \"cbacdcbc\"\n# Output: \"acdb\"\n#\n#\n#\n\n\nclass Solution(object):\n def removeDuplicateLetters(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n counter = collections.Counter(s)\n res = []\n visited = set()\n for c in s:\n counter[c] -= 1\n if c in visited:\n continue\n while res and res[-1] > c and counter[res[-1]] > 0:\n visited.remove(res[-1])\n res.pop()\n res.append(c)\n visited.add(c)\n return \"\".join(res)\n","sub_path":"316.remove-duplicate-letters.py","file_name":"316.remove-duplicate-letters.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"118209802","text":"#!usr/bin/env python \n#-*- coding:utf-8 _*- \n\"\"\"\n@version: python3.6\n@author: ikkyu-wen\n@contact: wenruichn@gmail.com\n@time: 2019-08-02 10:26\n公众号:AI成长社\n知乎:https://www.zhihu.com/people/qlmx-61/columns\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler\nfrom sklearn.model_selection import KFold\nimport gc\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense,BatchNormalization,Dropout\nimport keras\nfrom keras import backend as K\n\n## load data\ntrain_data = pd.read_csv('../../data/train.csv')\ntest_data = pd.read_csv('../../data/test.csv')\nepochs = 3\nbatch_size = 1024\nclasses = 1\n\n\n## category feature one_hot\ntest_data['label'] = -1\ndata = pd.concat([train_data, test_data])\ncate_feature = ['gender', 'cell_province', 'id_province', 'id_city', 'rate', 'term']\nfor item in cate_feature:\n data[item] = LabelEncoder().fit_transform(data[item])\n item_dummies = pd.get_dummies(data[item])\n item_dummies.columns = [item + str(i + 1) for i in range(item_dummies.shape[1])]\n data = pd.concat([data, item_dummies], axis=1)\ndata.drop(cate_feature,axis=1,inplace=True)\n\ntrain = data[data['label'] != -1]\ntest = data[data['label'] == -1]\n\n## Clean up the memory\ndel data, train_data, test_data\ngc.collect()\n\n## get train feature\ndel_feature = ['auditing_date', 'due_date', 'label']\nfeatures = [i for i in train.columns if i not in del_feature]\n\n\n## Convert the label to two categories\ntrain['label'] = train['label'].apply(lambda x: 1 if x==32 else 0)\ntrain_x = train[features]\ntrain_y = train['label'].values\ntest = test[features]\n\n## Fill missing value\nfor i in train_x.columns:\n # print(i, train_x[i].isnull().sum(), test[i].isnull().sum())\n if train_x[i].isnull().sum() != 0:\n train_x[i] = train_x[i].fillna(-1)\n test[i] = test[i].fillna(-1)\n\n## normalized\nscaler = StandardScaler()\ntrain_X = scaler.fit_transform(train_x)\ntest_X = scaler.transform(test)\n\n## simple mlp model\nK.clear_session()\ndef MLP(dropout_rate=0.25, activation='relu'):\n start_neurons = 512\n model = Sequential()\n model.add(Dense(start_neurons, input_dim=train_X.shape[1], activation=activation))\n model.add(BatchNormalization())\n model.add(Dropout(dropout_rate))\n\n model.add(Dense(start_neurons // 2, activation=activation))\n model.add(BatchNormalization())\n model.add(Dropout(dropout_rate))\n\n model.add(Dense(start_neurons // 4, activation=activation))\n model.add(BatchNormalization())\n model.add(Dropout(dropout_rate))\n\n model.add(Dense(start_neurons // 8, activation=activation))\n model.add(BatchNormalization())\n model.add(Dropout(dropout_rate / 2))\n\n model.add(Dense(classes, activation='sigmoid'))\n return model\n\n\ndef plot_loss_acc(history, fold):\n plt.plot(history.history['loss'][1:])\n plt.plot(history.history['val_loss'][1:])\n plt.title('model loss')\n plt.ylabel('val_loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'Validation'], loc='upper left')\n plt.savefig('../../result/model_loss' + str(fold) + '.png')\n plt.show()\n\n plt.plot(history.history['acc'][1:])\n plt.plot(history.history['val_acc'][1:])\n plt.title('model Accuracy')\n plt.ylabel('val_acc')\n plt.xlabel('epoch')\n plt.legend(['train', 'Validation'], loc='upper left')\n plt.savefig('../../result/model_accuracy' + str(fold) + '.png')\n plt.show()\n\n\nfolds = KFold(n_splits=5, shuffle=True, random_state=2019)\nNN_predictions = np.zeros((test_X.shape[0], classes))\noof_preds = np.zeros((train_X.shape[0], classes))\n\npatience = 50 ## How many steps to stop\ncall_ES = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=patience, verbose=1,\n mode='auto', baseline=None)\n\nfor fold_, (trn_, val_) in enumerate(folds.split(train_x)):\n print(\"fold {}\".format(fold_ + 1))\n x_train, y_train = train_X[trn_], train_y[trn_]\n x_valid, y_valid = train_X[val_], train_y[val_]\n\n\n model = MLP(dropout_rate=0.5, activation='relu')\n model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n history = model.fit(x_train, y_train,\n validation_data=[x_valid, y_valid],\n epochs=epochs,\n batch_size=batch_size,\n callbacks=[call_ES, ],\n shuffle=True,\n verbose=1)\n\n # plot_loss_acc(history, fold_ + 1)\n\n # # Get predicted probabilities for each class\n oof_preds[val_] = model.predict_proba(x_valid, batch_size=batch_size)\n NN_predictions += model.predict_proba(test_X, batch_size=batch_size) / folds.n_splits\n\nthreshold = 0.5\nresult = []\nfor pred in NN_predictions:\n result.append(1 if pred > threshold else 0)\nprint(result)\n","sub_path":"code/keras/binary_class.py","file_name":"binary_class.py","file_ext":"py","file_size_in_byte":4843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"92138541","text":"from enums import enum\n\nkeystates = enum(down=1, up=0, hold=2)\n\nbuckies = [\"KEY_LEFTSHIFT\", \"KEY_RIGHTSHIFT\",\n \"KEY_LEFTCTRL\", \"KEY_RIGHTCTRL\",\n \"KEY_CAPSLOCK\", \"KEY_LEFTALT\", \"KEY_RIGHTALT\"]\n\nalphabet = \"abcdefghijklmnopqrstuvwxyz\"\nnumeric_keys = \"0123456789)!@#$%^&*(\"\nchar_keys = {\n \"KEY_GRAVE\": \"`\",\n \"S-KEY_GRAVE\": \"~\",\n \"KEY_SPACE\": \" \",\n \"KEY_MINUS\": \"-\",\n \"S-KEY_MINUS\": \"_\",\n \"KEY_EQUAL\": \"=\",\n \"S-KEY_EQUAL\": \"+\",\n \"KEY_SEMICOLON\": \";\",\n \"S-KEY_SEMICOLON\": \":\",\n \"KEY_APOSTROPHE\": \"'\",\n \"S-KEY_APOSTROPHE\": '\"',\n \"KEY_COMMA\": \",\",\n \"S-KEY_COMMA\": \"<\",\n \"KEY_DOT\": \".\",\n \"S-KEY_DOT\": \">\",\n \"KEY_SLASH\": \"/\",\n \"S-KEY_SLASH\": \"?\",\n \"KEY_LEFTBRACE\": \"[\",\n \"S-KEY_LEFTBRACE\": \"{\",\n \"KEY_RIGHTBRACE\": \"]\",\n \"S-KEY_RIGHTBRACE\": \"}\",\n \"KEY_BACKSLASH\": \"\\\\\",\n \"S-KEY_BACKSLASH\": \"|\",\n \"KEY_TAB\": \"\\t\"\n}\n\nfor c in alphabet:\n char_keys[\"KEY_%s\" % c.upper()] = c\n char_keys[\"S-KEY_%s\" % c.upper()] = c.upper()\n\nfor i in range(10):\n char_keys[\"KEY_%s\" % i] = numeric_keys[i]\n char_keys[\"S-KEY_%s\" % i] = numeric_keys[i+10]\n\nclass KeyTranslator(object):\n def __init__(self):\n self.alt, self.ctrl, self.shift = False, False, False\n def _prefix(self):\n return \"%s%s%s\" % (self.alt and \"A-\" or \"\",\n self.ctrl and \"C-\" or \"\",\n self.shift and \"S-\" or \"\")\n def translate(self, keycode, keystate):\n if keycode in buckies:\n if keystate in [keystates.down, keystates.up]:\n if keycode in [\"KEY_LEFTSHIFT\", \"KEY_RIGHTSHIFT\"]:\n self.shift = (not keystate == keystates.up)\n elif keycode in [\"KEY_LEFTCTRL\", \"KEY_RIGHTCTRL\", \"KEY_CAPSLOCK\"]:\n self.ctrl = (not keystate == keystates.up)\n elif keycode in [\"KEY_LEFTALT\", \"KEY_RIGHTALT\"]:\n self.alt = (not keystate == keystates.up)\n return None, None\n\n elif keystate in [keystates.down, keystates.hold]:\n complete_code = \"%s%s\" % (self._prefix(), keycode)\n try:\n return char_keys[complete_code], complete_code\n except KeyError:\n return None, complete_code\n else:\n return None, None\n \n","sub_path":"paperui/keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"106354576","text":"import datetime\r\nfrom flask import Flask,render_template\r\n\r\napp=Flask(__name__)\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n now=datetime.datetime.now()\r\n new_year=now.month==1 and now.day==1\r\n return render_template(\"ind.html\",new_year=new_year)\r\n\r\n@app.route(\"/for\")\r\ndef arr():\r\n names=[\"Kanish\",\"Mayu\",\"Mayank\"]\r\n return render_template('ind2.html',names=names)","sub_path":"Flask/newyear/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"230204030","text":"import asyncio\nimport contextlib\n\nimport synapse.exc as s_exc\nimport synapse.common as s_common\n\nimport synapse.lib.modelrev as s_modelrev\nimport synapse.lib.remotelayer as s_remotelayer\n\nimport synapse.tests.utils as s_t_utils\nimport synapse.tests.test_cortex as t_cortex\n\nclass RemoteLayerTest(t_cortex.CortexTest):\n\n @contextlib.asynccontextmanager\n async def getTestCore(self, conf=None, dirn=None):\n # make remote core from provided dirn for repeatability\n dirn0 = None\n if dirn is not None:\n dirn0 = s_common.gendir(dirn, 'remotecore')\n\n async with self.getRemoteCores(dirn0=dirn0, conf1=conf, dirn1=dirn) as (core0, core1):\n yield core1\n\n @contextlib.asynccontextmanager\n async def getTestReadWriteCores(self, conf=None, dirn=None):\n dirn0 = None\n if dirn is not None:\n dirn0 = s_common.gendir(dirn, 'remotecore')\n\n async with self.getRemoteCores(dirn0=dirn0, conf1=conf, dirn1=dirn) as (core0, core1):\n yield core1, core0\n\n @contextlib.asynccontextmanager\n async def getRemoteCores(self, conf0=None, conf1=None, dirn0=None, dirn1=None):\n '''\n Returns a cortex and a second cortex that has a second remote layer pointing to the first cortex's layer\n '''\n async with t_cortex.CortexTest.getTestCore(self, conf=conf0, dirn=dirn0) as core0:\n async with t_cortex.CortexTest.getTestCore(self, conf=conf1, dirn=dirn1) as core1:\n conf = {'url': core0.getLocalUrl('*/layer')}\n layr = await core1.addLayer(type='remote', config=conf)\n await core1.view.addLayer(layr)\n yield core0, core1\n\n async def test_cortex_readonly_toplayer(self):\n '''\n Test the various ways to incorrectly put a remote layer as the write layer\n '''\n async with t_cortex.CortexTest.getTestCore(self) as core0:\n async with t_cortex.CortexTest.getTestCore(self) as core1:\n conf = {'url': core0.getLocalUrl('*/layer')}\n layr = await core1.addLayer(type='remote', config=conf)\n await self.asyncraises(s_exc.ReadOnlyLayer, core1.view.addLayer(layr, indx=0))\n await self.asyncraises(s_exc.ReadOnlyLayer, core1.view.setLayers([layr.iden]))\n await self.asyncraises(s_exc.ReadOnlyLayer, core1.addView(s_common.guid(), 'root', [layr.iden]))\n view = await core1.addView(s_common.guid(), 'root', [])\n await self.asyncraises(s_exc.ReadOnlyLayer, view.addLayer(layr))\n\n async def test_cortex_remote_layer(self):\n\n async with self.getRemoteCores() as (directcore, core):\n # We write to directcore and make sure we can read from core\n\n await s_common.aspin(directcore.eval('[ test:str=woot :tick=2015 ]'))\n\n layr = core.view.layers[1]\n self.true(isinstance(layr, s_remotelayer.RemoteLayer))\n\n self.len(1, [x async for x in layr.iterFormRows('test:str')])\n self.len(1, [x async for x in layr.iterPropRows('test:str', 'tick')])\n self.len(2, [x async for x in layr.iterUnivRows('.created')])\n\n iden = s_common.guid()\n\n buid = s_common.buid(('test:str', 'woot'))\n props = await layr.getBuidProps(buid)\n\n self.eq('woot', props.get('*test:str'))\n\n await layr.setOffset(iden, 200)\n self.eq(200, await layr.getOffset(iden))\n\n self.ne((), tuple([x async for x in layr.splices(0, 200)]))\n\n self.eq(s_modelrev.maxvers, await layr.getModelVers())\n await self.asyncraises(s_exc.SynErr, layr.setModelVers((9, 9, 9)))\n\n async def test_cortex_iter_props(self):\n self.skip('test_cortex_iter_props directly uses layers')\n\n async def test_cortex_remote_reconn(self):\n\n async with self.getRemoteCores() as (core0, core1):\n\n await core0.eval('[test:str=woot]').list()\n self.len(1, await core1.eval('test:str=woot').list())\n\n # hulk smash the proxy\n await core1.view.layers[1].proxy.fini()\n\n # cause a reconnect...\n self.len(1, await core1.eval('test:str=woot').list())\n\nclass RemoteLayerConfigTest(s_t_utils.SynTest):\n\n async def test_cortex_remote_config(self):\n\n # use the original API so we dont do yodawg layers remote layers\n async with self.getTestCoreAndProxy() as (core0, prox0):\n\n rem1 = await core0.auth.addUser('remuser1')\n\n await rem1.setPasswd('beep')\n await rem1.addRule((True, ('layer:lift', core0.iden)))\n\n # make a test:str node\n nodes = await core0.eval('[test:str=woot]').list()\n self.len(1, nodes)\n\n created = nodes[0].get('.created')\n\n addr, port = await core0.dmon.listen('tcp://127.0.0.1:0/')\n\n layerurl = f'tcp://remuser1:beep@127.0.0.1:{port}/cortex/layer'\n\n await asyncio.sleep(0.002)\n\n with self.getTestDir() as dirn:\n\n async with self.getTestCore(dirn=dirn) as core1:\n\n self.len(0, await core1.eval('test:str=woot').list())\n self.len(1, core1.view.layers)\n\n # Add the remote layer via Telepath\n self.nn(await core1.joinTeleLayer(layerurl))\n self.len(2, core1.view.layers)\n\n self.len(1, await core1.eval('test:str=woot').list())\n\n # Lift the node and set a prop in our layer\n nodes = await core1.eval('test:str=woot [:tick=2018]').list()\n self.len(1, nodes)\n self.eq(created, nodes[0].get('.created'))\n self.eq(1514764800000, nodes[0].get('tick'))\n\n async with self.getTestCore(dirn=dirn) as core1:\n\n self.len(2, core1.view.layers)\n\n nodes = await core1.eval('test:str=woot').list()\n self.len(1, nodes)\n self.eq(created, nodes[0].get('.created'))\n self.eq(1514764800000, nodes[0].get('tick'))\n","sub_path":"synapse/tests/test_lib_remotelayer.py","file_name":"test_lib_remotelayer.py","file_ext":"py","file_size_in_byte":6154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"300788004","text":"# coding:utf8\nimport os\nimport numpy as np\nimport cv2\nimport random\nimport uuid\n\n\nclass Code:\n # 随机一个字母或者数字\n def random_chr(self):\n num = random.randint(1, 3)\n if num == 1:\n # 随机一个数字\n char = random.randint(48, 57)\n elif num == 2:\n # 随机一个大写字母\n char = random.randint(97, 122)\n else:\n # 随机一个小写字母\n char = random.randint(65, 90)\n return chr(char)\n\n # 随机一个干扰字符\n def random_dis(self):\n arr = [\"^\", \"_\", \"-\", \".\", \"~\", \"%\"]\n idx = random.randint(0, len(arr) - 1)\n return arr[idx]\n\n # 定义干扰字符颜色\n def random_dis_color(self):\n return (random.randint(64, 255), random.randint(64, 255), random.randint(64, 255))\n\n # 定义字符颜色\n def random_chr_color(self):\n return (random.randint(32, 127), random.randint(32, 127), random.randint(32, 127))\n\n # 生成验证码\n def create_code(self):\n width = 240 # 240px\n height = 60 # 60px\n # 创建一个画布\n img = np.zeros((height, width, 3), np.uint8)\n color = (192, 192, 192)\n img[:, :, 0] = color[0] # B\n img[:, :, 1] = color[1] # G\n img[:, :, 2] = color[2] # R\n # 创建font对象,定义字体大小\n font_name = random.randint(0, 5)\n font_size = random.randint(10, 20) / 10.0\n # 创建像素点\n for x in range(0, width, 5):\n for y in range(0, height, 5):\n cv2.circle(img, (x, y), 1, self.random_dis_color(), -1)\n # 填充干扰字符\n for v in range(0, width, 30):\n dis_char = self.random_dis()\n xloc = 5 + v\n # 字符底部所在位置范围\n yloc = random.randint(height - 15, height)\n cv2.putText(img, dis_char, (xloc, yloc), font_name, font_size, self.random_dis_color(), 2)\n # 填充字符\n chars = \"\"\n for v in range(4):\n c = self.random_chr()\n chars += str(c)\n # 字符底部所在范围\n yloc = random.randint(height - 15, height)\n xloc = int(v * (width / 4)) + random.randint(0, 30)\n cv2.putText(img, c, (xloc, yloc), font_name, font_size, self.random_chr_color(), 2)\n # 图片保存\n image_name = \"%s.jpg\" % uuid.uuid4().hex\n save_dir = os.path.join(os.path.dirname(__file__), \"static/code\")\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n cv2.imwrite(save_dir + \"/\" + image_name, img)\n return dict(\n img_name=image_name,\n code=chars\n )\n\n\nif __name__ == \"__main__\":\n code = Code()\n print(code.create_code())\n","sub_path":"a_simple_flask_project/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"588031964","text":"# This file is a part of quicksave project.\n# Copyright (c) 2017 Aleksander Gajewski .\n\nfrom selenium import webdriver\n\n\ndef save_thumbnail(url, thumbnail_file):\n browser = webdriver.PhantomJS()\n browser.get(url)\n browser.save_screenshot(thumbnail_file)\n browser.quit()\n","sub_path":"src/quicksave_async/libs/selenium_thumbnail.py","file_name":"selenium_thumbnail.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"599697463","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]\n# Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\household_milestones\\household_milestone.py\n# Compiled at: 2019-03-11 21:55:19\n# Size of source mod 2**32: 2120 bytes\nfrom event_testing.milestone import Milestone\nfrom sims4.tuning.instances import HashedTunedInstanceMetaclass\nfrom sims4.tuning.tunable import OptionalTunable\nfrom ui.ui_dialog_notification import UiDialogNotification\nimport services, sims4.log, sims4.resources\nlogger = sims4.log.Logger('Milestone')\n\nclass HouseholdMilestone(Milestone, metaclass=HashedTunedInstanceMetaclass, manager=services.get_instance_manager(sims4.resources.Types.HOUSEHOLD_MILESTONE)):\n INSTANCE_TUNABLES = {'notification': OptionalTunable(description='\\n If enabled then we will display a notification when this milestone\\n is completed.\\n ',\n tunable=UiDialogNotification.TunableFactory(description='\\n This text will display in a notification pop up when completed.\\n '))}\n\n @classmethod\n def handle_event(cls, sim_info, event, resolver):\n if sim_info is None:\n return\n if sim_info.household is None:\n logger.error(\"Household doesn't exist for milestone {} and SimInfo {}\", cls, sim_info, owner='camilogarcia')\n return\n household_milestone_tracker = sim_info.household.household_milestone_tracker\n if household_milestone_tracker is None:\n return\n household_milestone_tracker.handle_event(cls, event, resolver)\n\n @classmethod\n def register_callbacks(cls):\n tests = [objective.objective_test for objective in cls.objectives]\n services.get_event_manager().register_tests(cls, tests)","sub_path":"Scripts/simulation/household_milestones/household_milestone.py","file_name":"household_milestone.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"386324556","text":"\"\"\"\nWrite a game where the player can move 5 times in\n ONE direction \n\nThe player has a chance to find a chest(40%) or nothing(60%)\n\nThe chests have different colours and different rewards based on colours\n\nThe chance to get\ngreen - 75%\norange - 20%\npurple - 4%\ngold (legendary) - 1%\n\nGold is a thing that can reward a player that opens the chest:\ngreen - 1000\norange - 4000\npurple - 9000\ngold (legendary) - 16000\n\n1 1 *1 (0 +1) * (0 + 1) = 1\n4 2*2 1 (1 + 1) * (1 + 1) = 4 * 1000\n9 3* 3 2\n16 4 * 4 3\n\nMake sure to write:\n1) clean code\n2) make self-descriptive variables\n\n\"\"\"\nimport random\nfrom enum import Enum\n\nEvent = Enum('Event', ['Chest', 'Nothing'])\n\neventDictionary = {\n Event.Chest: 0.8,\n Event.Nothing: 0.2\n }\neventList = tuple(eventDictionary.keys())\neventProbability = tuple(eventDictionary.values())\n\nColours = Enum('Colours', {\"Green\": \"green\",\n \"Orange\": \"orange\",\n \"Purple\": \"purple\",\n \"Gold\": \"LEGENDARY\"\n })\n\nchestColoursDictionary = {\n Colours.Green : 0.75,\n Colours.Orange : 0.2,\n Colours.Purple : 0.04,\n Colours.Gold : 0.01\n }\nchestColourList = tuple(chestColoursDictionary.keys())\n\nchestColourProbability = tuple(chestColoursDictionary.values())\n\nrewardsForChests = {\n chestColourList[reward]: (reward + 1) * (reward + 1) * 1000\n for reward in range(4)\n }\n\ngameLength = 5\ngoldAcquired = 0\nwhile gameLength > 0:\n playerAnswer = input(\"Do you want to move forward?\")\n if (playerAnswer == \"yes\"):\n print(\"Great, let's see what you got...\")\n drawnEvent = random.choices(eventList, eventProbability)[0]\n if(drawnEvent == Event.Chest):\n print(\"You've drawn a CHEST\")\n drawnColour = random.choices(chestColourList, chestColourProbability)[0]\n print(\"The chest color is\", drawnColour.value)\n playerReward = rewardsForChests[drawnColour]\n goldAcquired = goldAcquired + playerReward\n elif(drawnEvent == Event.Nothing):\n print(\"You've drawn nothing, you are so unlucky!\")\n \n else:\n print(\"You can go just straight man, nothing else, this game is dumb\")\n continue \n \n gameLength = gameLength - 1\n\nprint(\"Congratulation, you have acquired:\", goldAcquired)\n\n\n","sub_path":"61. Game drawing chests with gold/gamedrawingchests.py","file_name":"gamedrawingchests.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"571135747","text":"#!/usr/bin/env python3\nn=int(input(\"enter the value of n: \"))\n\nprint(\"enter values for the matria A\")\n\na=[]\n\nfor i in range(n):\n \n a.append([ int (x) for x in input().split()])\n \nprint(\"enter values for the Matri B\")\n\nb=[]\n\nfor i in range(n):\n \n b.append([int(x) for x in input().split()])\n\nc=[]\n\nfor i in range(n):\n\n c.append(a[i][j] * b[i][j] for j in range(n))\n\nprint(\"after matrix multiplication\")\n\nprint(\" _\"*7*n)\n\nfor x in c:\n for y in x:\n \n print(str(y).rjust(5), end=' ')\n print()\n\nprint(\"_\" *7 *n)\n","sub_path":"python/math.py","file_name":"math.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"514811581","text":"## purpose of this script is to remove variants that are very unlikely to be informative in the fine-mapping process.\nimport os\nimport sys\n\noutput_dir=sys.argv[1]\ninput_dir=sys.argv[2]\nfile_stem=sys.argv[3]\n\ninput_file=input_dir + \"/\" + file_stem\nout_file_name=output_dir + \"/\" + file_stem\n\nprint(input_file)\nprint(out_file_name)\nif os.path.isfile(out_file_name) is True:\n print(input_file + \" already exists. Skipping trim step for this locus. \\n\")\n exit()\n\nwith open(out_file_name, 'a') as out_file:\n\twith open(input_file, 'r') as locus:\n\t\theader=locus.readline()\n\t\tout_file.write(header)\n\t\tfor line in locus:\t\t\t## At this point, lines look like this:\n\t\t\tline=line.strip().split() \t## ['chr5:8004319', 'chr5', '8004319', 'T', 'C', 'NA', '-0.425757821365', 'NA']\n\t\t\tsumZ=0\n\t\t\tNA_count=0\n\n\t\t\tfor i in range(5, len(line)):\n\t\t\t\tif line[i]==\"NA\":\n\t\t\t\t\tNA_count+=1\n\t\t\t\telse:\n\t\t\t\t\tsumZ=float(sumZ) + abs(float(line[i]))\n\n\t\t\tnum_eth=len(line) - (5 + NA_count)\t\t\n\t\t\tif num_eth==1 and float(sumZ) < 1.5:\n\t\t\t\tcontinue\n#\t\t\telif (float(sumZ)/float(num_eth)) < 1:\n#\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tout_file.write(\" \".join(line) + \"\\n\")\n\n","sub_path":"PAINTOR/s3_irrelevant_variant_trimmer.py","file_name":"s3_irrelevant_variant_trimmer.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"171105657","text":"# coding: utf-8\r\nfrom utils.logging_utils import global_logger\r\n\r\n\r\n__all__ = [\r\n \"DetectorBase\",\r\n \"PatternDetectorBase\",\r\n]\r\n\r\n\r\nclass DetectorBase(object):\r\n detector_name = \"\"\r\n\r\n @property\r\n def name(self):\r\n return self.detector_name\r\n\r\n def match(self, event, content, *args, **kwargs):\r\n result = []\r\n return result\r\n\r\n\r\nclass PatternDetectorBase(DetectorBase):\r\n patterns = []\r\n\r\n def __init__(self):\r\n pass\r\n\r\n def match(self, event, content, *args, **kwargs):\r\n result = []\r\n\r\n for pattern in self.patterns:\r\n search_result = pattern.match(event, content)\r\n\r\n if search_result:\r\n global_logger.warning(\r\n \"Detector %s : %s pattern matched : %s\" % (\r\n self.name, event.src_path, pattern\r\n )\r\n )\r\n result.append(pattern)\r\n\r\n return result\r\n","sub_path":"detectors/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"43199852","text":"#!/usr/bin/env python3\n\nimport reddit.reddit as rd\n# import nltk\n\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nfrom nltk import tokenize\n\n\ndef word_feats(words):\n return dict([(word, True) for word in words])\n\n\ncomments = rd.get_comments(sub_id='2np694', more_comments=False)\n\nsid = SentimentIntensityAnalyzer()\n\narrays = [tokenize.sent_tokenize(x) for x in comments]\n\nsentences = [item for sublist in arrays for item in sublist]\n\nprint(sentences)\n\nfor sentence in sentences:\n print(sentence)\n ss = sid.polarity_scores(sentence)\n for k in sorted(ss):\n print('{0}: {1}, '.format(k, ss[k]), end='')\n print()\n","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"173133172","text":"import sys\nimport json\nimport os \nimport time \nimport datetime\nfrom datetime import timedelta\nfrom pandas import DataFrame\nimport pandas as pd\n\ndef incMonth(d, m, y):\n if m == 1 or m == 3 or m == 5 or m== 7 or m == 8 or m == 10 or m == 12 :\n if d == 31 :\n if m < 12:\n return m + 1\n else:\n return 1\n elif m == 2 :\n if y%4 != 0:\n if d == 28 :\n return 3\n else:\n if d == 29 :\n return 3\n else:\n if d == 30:\n return m+1\n \n return m\n\ndef incrementDate(currDate):\n result = \"\"\n dSplit = currDate.split(\"T\")\n if dSplit[1] != \"23:59:59\":\n result += dSplit[0]\n else:\n m0 = int(dSplit[0].split(\"-\")[1])\n m1 = incMonth(int(dSplit[0].split(\"-\")[2]), int(dSplit[0].split(\"-\")[1]), int(dSplit[0].split(\"-\")[0]))\n if m0 != m1:\n if m1 < 10:\n result += dSplit[0].split(\"-\")[0] + \"-0\" + str(m1) + \"-01\"\n else:\n result += dSplit[0].split(\"-\")[0] + \"-\" + str(m1) + \"-01\"\n else:\n print(\"day\")\n result += dSplit[0].split(\"-\")[0] + \"-\" +dSplit[0].split(\"-\")[1] + \"-\" + str(int(dSplit[0].split(\"-\")[2])+1)\n result += \"T00:00:00\"\n return result\n result += \"T\"\n h = dSplit[1].split(\":\")\n if int(h[1]) < 59:\n result += h[0] + \":\"\n if int(h[2]) < 59 :\n if int(h[2])+1 < 10:\n result += h[1] + \":0\" + str(int(h[2])+1)\n else:\n result += h[1] + \":\" + str(int(h[2])+1)\n else:\n if int(h[1])+1 < 10:\n result += \"0\" + str(int(h[1])+1) + \":00\"\n else:\n result += str(int(h[1])+1) + \":00\"\n else:\n if int(h[2]) < 59 :\n if int(h[2])+1 < 10:\n result += h[0] + \":\" + h[1] + \":0\" + str(int(h[2])+1)\n else:\n result += h[0] + \":\" + h[1] + \":\" + str(int(h[2])+1)\n else:\n if int(h[0])+1 < 10 :\n result += \"0\" + str(int(h[0])+1) + \":00:00\"\n else:\n result += str(int(h[0])+1) + \":00:00\"\n \n return result\n\ndef processJson(filename, filei, csvFile):\n #print(\"hey\")\n #with open(\"jseconds.json\" ,'r') as f:\n with open(filename ,'r') as f:\n data=json.load(f)\n #intervalinfo=data.get('intervalsDataPoints')\n data1=data.get('data')\n\n print(\"file {0} with {1} packets\".format(filename, len(data1)))\n\n fill_count = 0\n\n #print(\"length is :{}\".format(len(datapoint)),datapoint) \n #print(datapoint[1],type(datapoint[1]))#print(len(datapoint[1]))\n daystamp=[]\n timestamp=[]\n openPrice_ask=[]\n openPrice_bid=[]\n closePrice_ask=[]\n closePrice_bid=[]\n highPrice_ask=[]\n highPrice_bid=[]\n lowPrice_ask=[]\n lowPrice_bid=[]\n lastTradedVolume=[]\n lastline = None\n secs = 1\n newDate = \"\"\n idx = 0\n\n for d in range(len(data1)):\n intervalinfo=data1[d].get('intervalsDataPoints')\n datapoint=[item.get('dataPoints') for item in intervalinfo]\n for i in datapoint:\n #for j in i:\n while idx < len(i):\n #print(type(j),j)\n #print(j.get('timestamp'))\n j = i[idx]\n \n if lastline != None and (datetime.datetime.fromtimestamp(j.get('timestamp')/1000).strftime(\"%y-%m-%dT%H:%M:%S\")) > newDate:\n if newDate.split(\"T\")[1] < \"21:00:00\" or newDate.split(\"T\")[1] > \"22:00:00\" :\n if fill_count < 120:\n #while (datetime.date.fromtimestamp(j.get('timestamp')/1000)) > newTStamp and :\n t = datetime.datetime.fromtimestamp(j.get('timestamp')/1000).strftime(\"%y-%m-%dT%H:%M:%S\")\n #print(\"t = {}\".format(t))\n #print(newDate)\n openPrice_ask.append(lastline.get('openPrice').get('ask'))\n openPrice_bid.append(lastline.get('openPrice').get('bid'))\n closePrice_ask.append(lastline.get('closePrice').get('ask'))\n closePrice_bid.append(lastline.get('closePrice').get('bid'))\n highPrice_ask.append(lastline.get('closePrice').get('ask'))\n highPrice_bid.append(lastline.get('closePrice').get('bid'))\n lowPrice_bid.append(lastline.get('closePrice').get('bid'))\n lowPrice_ask.append(lastline.get('closePrice').get('ask'))\n \n daystamp.append(newDate.split(\"T\")[0])\n timestamp.append(newDate)\n \n lastTradedVolume.append(0)\n #print(datetime.date.fromtimestamp(j.get('timestamp')/1000).strftime(\"%d-%m-%yT%H:%M:%S\"))\n #print(datetime.date.fromtimestamp(lastline.get('timestamp')/1000).strftime(\"%d-%m-%yT%H:%M:%S\"))\n #print(newTStamp.strftime(\"%d-%m-%yT%H:%M:%S\"))\n fill_count += 1\n newDate = incrementDate(newDate)\n\n elif len(j['openPrice']) > 0:\n openPrice_ask.append(j.get('openPrice').get('ask'))\n openPrice_bid.append(j.get('openPrice').get('bid'))\n closePrice_ask.append(j.get('closePrice').get('ask'))\n closePrice_bid.append(j.get('closePrice').get('bid'))\n highPrice_ask.append(j.get('highPrice').get('ask'))\n highPrice_bid.append(j.get('highPrice').get('bid'))\n lowPrice_bid.append(j.get('lowPrice').get('bid'))\n lowPrice_ask.append(j.get('closePrice').get('ask'))\n \n daystamp.append(datetime.date.fromtimestamp(j.get('timestamp')/1000).strftime(\"%y-%m-%d\"))\n timestamp.append(datetime.datetime.fromtimestamp(j.get('timestamp')/1000).strftime(\"%y-%m-%dT%H:%M:%S\"))\n \n lastTradedVolume.append(j.get('lastTradedVolume'))\n\n lastline = j\n newDate = incrementDate(datetime.datetime.fromtimestamp(j.get('timestamp')/1000).strftime(\"%y-%m-%dT%H:%M:%S\"))\n idx += 1\n fill_count = 0\n else:\n if lastline != None:\n if fill_count < 120:\n openPrice_ask.append(lastline.get('openPrice').get('ask'))\n openPrice_bid.append(lastline.get('openPrice').get('bid'))\n closePrice_ask.append(lastline.get('closePrice').get('ask'))\n closePrice_bid.append(lastline.get('closePrice').get('bid'))\n highPrice_ask.append(lastline.get('closePrice').get('ask'))\n highPrice_bid.append(lastline.get('closePrice').get('bid'))\n lowPrice_bid.append(lastline.get('closePrice').get('bid'))\n lowPrice_ask.append(lastline.get('closePrice').get('ask'))\n \n daystamp.append(datetime.date.fromtimestamp(j.get('timestamp')/1000).strftime(\"%y-%m-%d\"))\n timestamp.append(datetime.datetime.fromtimestamp(j.get('timestamp')/1000).strftime(\"%y-%m-%dT%H:%M:%S\"))\n \n lastTradedVolume.append(j.get('lastTradedVolume'))\n newDate = incrementDate(datetime.datetime.fromtimestamp(j.get('timestamp')/1000).strftime(\"%y-%m-%dT%H:%M:%S\"))\n fill_count += 1\n idx += 1\n\n\n PackedData=[daystamp,\n timestamp,\n openPrice_ask,\n openPrice_bid, \n highPrice_ask,\n highPrice_bid,\n lowPrice_ask,\n lowPrice_bid,\n closePrice_ask,\n closePrice_bid,\n lastTradedVolume,\n ]\n df=DataFrame(PackedData).transpose()\n df.columns=['Day',\n 'Time',\n 'Open Price ask',\n 'Open Price bid',\n 'High Price ask',\n 'High Price bid',\n 'Low Price ask',\n 'Low Price bid',\n 'Close Price ask',\n 'Close Price bid',\n 'Last Trade Volume',\n ]\n\n from time import strftime,localtime\n t = time.localtime()\n timeop = time.strftime(\"%d-%m-%Y %H'%M\", t)\n\n df.reset_index(drop=True, inplace=True)\n #df.to_csv(\"IsaacScrapper_Pumped{}.csv\".format(timeop),index=False)\n #df.to_csv(str(sys.argv[2]),index=False)\n outputF = \"c\" + str(filei) + \".csv\"\n df.to_csv(outputF,index=False)\n csvFile.write(outputF+'\\n')\n\nif __name__ == \"__main__\":\n os.environ['TZ'] = 'Europe/London'\n fl = []\n idx = 0\n txtF = open(\"jsonFIles.txt\", \"r\")\n csvF = open(\"csvfiles.txt\", \"w\")\n for l in txtF :\n fl.append(l.rstrip('\\n'))\n print(fl[len(fl)-1])\n for z in fl:\n if len(z) > 0:\n processJson(z, idx, csvF)\n idx += 1\n txtF.close()\n csvF.close()\n\n\n\n\n","sub_path":"x64/Release/blanket1.py","file_name":"blanket1.py","file_ext":"py","file_size_in_byte":9177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"540449197","text":"from urllib.request import urlopen, urljoin\r\nimport re\r\n\r\ndef download_page(url):\r\n return urlopen(url).read().decode('utf-8')\r\n\r\ndef extract_image_locations(page):\r\n img_regex = re.compile(']+src=[\"\\'](.*?)[\"\\']',re.IGNORECASE)\r\n return img_regex.findall(page)\r\n\r\nif __name__ == '__main__':\r\n target_url = 'https://cloudnfv.wordpress.com/2019/11/19/regular-expression/'\r\n packtpub = download_page(target_url)\r\n image_locations = extract_image_locations(packtpub)\r\n for src in image_locations:\r\n print(urljoin(target_url, src))","sub_path":"Extracting Images from webpage.py","file_name":"Extracting Images from webpage.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"363518226","text":"import hashlib\n\nfrom django.db import DatabaseError\nfrom rest_framework import serializers\nfrom rest_framework.serializers import CharField\nfrom rest_framework.validators import ValidationError\n\nfrom problem.models import Problem\nfrom submission.models import Submission\nfrom support.models import Language, Support\n\n\nclass VerdictSerializer(serializers.ModelSerializer):\n class Meta:\n model = Submission\n fields = (\n 'id', 'remote_oj', 'remote_id', 'verdict_info', 'verdict', 'execute_time', 'execute_memory', 'status',\n 'reloadable', 'language_name', 'user', 'code', 'create_time')\n\n\nclass SubmissionSerializer(serializers.Serializer):\n code = CharField()\n language = CharField()\n remote_oj = CharField()\n remote_id = CharField()\n\n def save(self, user):\n \"\"\"\n 在这里可以统计提交量\n :param user: request.user\n :return: submission object\n \"\"\"\n try:\n language = self.validated_data['language']\n remote_oj = self.validated_data['remote_oj']\n language_obj = Language.objects.get(oj_name=remote_oj, oj_language=language)\n submission = Submission(code=self.validated_data['code'],\n user=user,\n language=language_obj.oj_language,\n language_name=language_obj.oj_language_name,\n sha256=hashlib.sha256(self.validated_data['code'].encode('utf-8')).hexdigest(),\n remote_id=self.validated_data['remote_id'],\n remote_oj=self.validated_data['remote_oj'])\n\n submission.save()\n return submission\n except DatabaseError:\n import traceback\n traceback.print_exc()\n return None\n\n def validate_code(self, value):\n if len(value) < 20:\n raise ValidationError('code is too short')\n return value\n\n def validate_remote_oj(self, remote_oj):\n if remote_oj not in list({item.oj_name for item in Support.objects.filter(oj_enable=True)}):\n raise ValidationError(str(remote_oj) + ' is not supported')\n return remote_oj\n\n def validate(self, value):\n remote_oj = value['remote_oj']\n remote_id = value['remote_id']\n language = value['language']\n if Problem.objects.filter(remote_oj=remote_oj, remote_id=remote_id).exists() is False:\n raise ValidationError('problem not exist')\n if Language.objects.filter(oj_name=remote_oj, oj_language=language).exists() is False:\n raise ValidationError('language not exist')\n return value\n\n\nclass SubmissionListSerializer(serializers.ModelSerializer):\n class Meta:\n model = Submission\n fields = (\n 'id', 'remote_oj', 'user', 'remote_id', 'language',\n 'language_name', 'verdict_info', 'verdict', 'execute_time', 'reloadable',\n 'execute_memory', 'create_time', 'status')\n","sub_path":"submission/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"149050265","text":"\"\"\"Redis Sorted Set Commands Mixin\"\"\"\n\n\nclass SortedSetsMixin(object):\n \"\"\"Redis Sorted Set Commands Mixin\"\"\"\n\n def zadd(self, key, *members, xx=False, nx=False, ch=False, incr=False):\n \"\"\"Adds all the specified members with the specified scores to the\n sorted set stored at key. It is possible to specify multiple score /\n member pairs. If a specified member is already a member of the sorted\n set, the score is updated and the element reinserted at the right\n position to ensure the correct ordering.\n\n If key does not exist, a new sorted set with the specified members as\n sole members is created, like if the sorted set was empty. If the key\n exists but does not hold a sorted set, an error is returned.\n\n The score values should be the string representation of a double\n precision floating point number. +inf and -inf values are valid values\n as well.\n\n **Members parameters**\n\n ``members`` could be either:\n - a single dict where keys correspond to scores and values to elements\n - multiple strings paired as score then element\n\n .. code::\n\n yield client.zadd('myzset', {'1': 'one', '2': 'two'})\n yield client.zadd('myzset', '1', 'one', '2', 'two')\n\n **ZADD options (Redis 3.0.2 or greater)**\n ZADD supports a list of options. Options are:\n ``xx``: Only update elements that already exist. Never add elements.\n ``nx``: Don't update already existing elements. Always add new elements.\n ``ch``: Modify the return value from the number of new elements added,\n to the total number of elements changed (CH is an abbreviation of\n changed). Changed elements are new elements added and elements already\n existing for which the score was updated. So elements specified in the\n command having the same score as they had in the past are not counted.\n Note: normally the return value of ZADD only counts the number of new\n elements added.\n ``incr``: When this option is specified ZADD acts like\n :meth:`~tredis.RedisClient.zincrby`. Only one score-element pair can be\n specified in this mode.\n\n .. note::\n\n **Time complexity**: ``O(log(N))`` for each item added, where ``N`` is\n the number of elements in the sorted set.\n\n :param key: The key of the sorted set\n :type key: :class:`str`, :class:`bytes`\n :param members: Elements to add\n :type members: :class:`dict`, :class:`str`, :class:`bytes`\n :param bool xx: Only update elements that already exist\n :param bool nx: Don't update already existing elements\n :param bool ch: Return the number of changed elements\n :param bool incr: Increment the score of an element\n :rtype: int, :class:`str`, :class:`bytes`\n :returns: Number of elements changed, or the new score if incr is set\n :raises: :exc:`~tredis.exceptions.RedisError`\n \"\"\"\n command = [b'ZADD', key]\n if xx:\n command += ['XX']\n if nx:\n command += ['NX']\n if ch:\n command += ['CH']\n if incr:\n command += ['INCR']\n\n if len(members) == 1:\n for k in members[0]:\n command += [k, members[0][k]]\n else:\n command += list(members)\n return self._execute(command)\n\n def zrangebyscore(self,\n key,\n min_score,\n max_score,\n with_scores=False,\n offset=0,\n count=0):\n \"\"\"Returns all the elements in the sorted set at key with a score\n between min and max (including elements with score equal to min or\n max). The elements are considered to be ordered from low to high\n scores.\n\n The elements having the same score are returned in lexicographical\n order (this follows from a property of the sorted set implementation in\n Redis and does not involve further computation).\n\n The optional ``offset`` and ``count`` arguments can be used to only get\n a range of the matching elements (similar to SELECT LIMIT offset, count\n in SQL). Keep in mind that if offset is large, the sorted set needs to\n be traversed for offset elements before getting to the elements to\n return, which can add up to ``O(N)`` time complexity.\n\n The optional ``with_scores`` argument makes the command return both the\n element and its score, instead of the element alone. This option is\n available since Redis 2.0.\n\n **Exclusive intervals and infinity**\n\n ``min_score`` and ``max_score`` can be ``-inf`` and ``+inf``, so that\n you are not required to know the highest or lowest score in the sorted\n set to get all elements from or up to a certain score.\n\n By default, the interval specified by ``min_score`` and ``max_score``\n is closed (inclusive). It is possible to specify an open interval\n (exclusive) by prefixing the score with the character ``(``. For\n example:\n\n .. code::\n\n ZRANGEBYSCORE zset (1 5\n\n Will return all elements with ``1 < score <= 5`` while:\n\n .. code::\n\n ZRANGEBYSCORE zset (5 (10\n\n Will return all the elements with ``5 < score < 10`` (5 and 10\n excluded).\n\n .. note::\n\n **Time complexity**: ``O(log(N)+M)`` with ``N`` being the number of\n elements in the sorted set and ``M`` the number of elements being\n returned. If ``M`` is constant (e.g. always asking for the first 10\n elements with ``count``), you can consider it ``O(log(N))``.\n\n :param key: The key of the sorted set\n :type key: :class:`str`, :class:`bytes`\n :param min_score: Lowest score definition\n :type min_score: :class:`str`, :class:`bytes`\n :param max_score: Highest score definition\n :type max_score: :class:`str`, :class:`bytes`\n :param bool with_scores: Return elements and scores\n :param offset: The number of elements to skip\n :type min_score: :class:`str`, :class:`bytes`\n :param count: The number of elements to return\n :type min_score: :class:`str`, :class:`bytes`\n :rtype: list\n :raises: :exc:`~tredis.exceptions.RedisError`\n \"\"\"\n command = [b'ZRANGEBYSCORE', key, min_score, max_score]\n if with_scores:\n command += ['WITHSCORES']\n if offset or count:\n command += ['LIMIT', offset, count]\n return self._execute(command)\n\n def zremrangebyscore(self, key, min_score, max_score):\n \"\"\"Removes all elements in the sorted set stored at key with a score\n between min and max.\n\n Intervals are described in :meth:`~tredis.RedisClient.zrangebyscore`.\n\n Returns the number of elements removed.\n\n .. note::\n\n **Time complexity**: ``O(log(N)+M)`` with ``N`` being the number of\n elements in the sorted set and M the number of elements removed by\n the operation.\n\n :param key: The key of the sorted set\n :type key: :class:`str`, :class:`bytes`\n :param min_score: Lowest score definition\n :type min_score: :class:`str`, :class:`bytes`\n :param max_score: Highest score definition\n :type max_score: :class:`str`, :class:`bytes`\n :rtype: int\n :raises: :exc:`~tredis.exceptions.RedisError`\n \"\"\"\n return self._execute([b'ZREMRANGEBYSCORE', key, min_score, max_score])\n","sub_path":"tredis/sortedsets.py","file_name":"sortedsets.py","file_ext":"py","file_size_in_byte":7653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"606850954","text":"import h5py\nimport PIL\nimport scipy\nimport scipy.io as sio\nimport scipy.ndimage.interpolation\nimport scipy.misc\nimport numpy as np\nfrom random import uniform\n\nnyu_depth = h5py.File('./nyu_depth_v2_labeled.mat', 'r')\n\nimage = nyu_depth['images']\nprint(len(image))\ndepth = nyu_depth['depths']\n\n# img = image[1,:,:,:].astype(float)\n# img = np.swapaxes(img, 0, 2)\n# img = scipy.misc.imresize(img, [480, 640]).astype(float)\n# scipy.misc.imsave('./haze.jpg', img)\n\nprint(depth[0].shape)\n\n\ndef get_image(one):\n img = one.astype(float)\n img = np.swapaxes(img, 0, 2)\n return img\n\ndef get_depth(one):\n maxhazy = one.max()\n minhazy = one.min()\n print(maxhazy, minhazy)\n img = (one) / (maxhazy)\n\n img = np.swapaxes(img, 0, 1)\n\n return img\n\ndef main():\n index = 10\n img = get_image(image[index])\n scipy.misc.imsave('./img.bmp', img)\n\n depth_img = get_depth(depth[index])\n scipy.misc.imsave('./depth.jpg', depth_img)\n\n # scale1 = (depth_img.shape[0]) / 480\n # scale2 = (depth_img.shape[1]) / 640\n\n # print(scale1, scale2)\n\n # gt_depth = scipy.ndimage.zoom(depth_img, (1 / scale1, 1 / scale2), order=1)\n # scipy.misc.imsave('./depth2.jpg', gt_depth)\n\n beta = uniform(0.5, 2)\n\n tx1 = np.exp(-beta * depth_img)\n\n a = 1 - 0.5 * uniform(0, 1)\n\n print('beta', beta)\n print('a', a)\n # A = [a,a,a]\n\n\n #beta\n bias = 0.05\n temp_beta = 0.4 + 0.2*1\n beta = uniform(temp_beta-bias, temp_beta+bias)\n print('bera', beta)\n\n tx1 = np.exp(-beta * depth_img)\n \n #A\n abias = 0.1\n temp_a = 0.5 + 0.2*1\n a = uniform(temp_a-abias, temp_a+abias)\n print('a', a)\n A = [a,a,a]\n\n m = img.shape[0]\n n = img.shape[1]\n\n rep_atmosphere = np.tile(np.reshape(A, [1, 1, 3]), [m, n, 1])\n tx1 = np.reshape(tx1, [m, n, 1])\n\n max_transmission = np.tile(tx1, [1, 1, 3])\n\n haze_image = img * max_transmission + rep_atmosphere * (1 - max_transmission)\n\n scipy.misc.imsave('./haze.jpg', haze_image)\n\nif __name__ == \"__main__\":\n main()\n pass\n","sub_path":"make_dataset/test_get_data.py","file_name":"test_get_data.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"266419817","text":"from django.core.management.base import BaseCommand, CommandError\n\nimport helium as He\nimport helium.algorithmic as algorithmic # brain for algorithmic scoring\n\nclass Command(BaseCommand):\n\thelp = \"Runs algorithmic scoring, and computes (then stores) all betas and alphas\"\n\n\tdef handle(self, *args, **kwargs):\n\t\tproblems = [p.id for p in He.models.Problem.objects\\\n\t\t\t\t.filter(exam__is_alg_scoring=True)]\n\t\t\n\t\tverdicts = He.models.Verdict.objects.filter(\\\n\t\t\t\tproblem__exam__is_alg_scoring = True)\n\t\tscores = [(v.problem.id, v.entity.id, v.score) for v in verdicts \\\n\t\t\t\tif v.is_valid is True \\\n\t\t\t\tand v.is_done is True \\\n\t\t\t\tand v.entity is not None]\n\t\tmathletes = list(set([s[1] for s in scores]))\n\n\t\t# Run the main procedure, get alpha and beta values\n\t\talphas, betas = algorithmic.main(problems, mathletes, scores)\n\n\t\tfor mathlete_id, alpha in alphas.iteritems():\n\t\t\ta, _ = He.models.EntityAlpha.objects\\\n\t\t\t\t\t.get_or_create(entity = He.models.Entity.objects.get(id=mathlete_id))\n\t\t\ta.cached_alpha = alpha\n\t\t\ta.save()\n\n\t\tfor problem_id, beta in betas.iteritems():\n\t\t\tproblem = He.models.Problem.objects.get(id = problem_id)\n\t\t\tproblem.weight = beta\n\t\t\tproblem.save()\n","sub_path":"management/commands/algscore.py","file_name":"algscore.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"577960189","text":"class Node:\n def __init__(self, k, v):\n self.key = k\n self.val = v\n self.pre = None\n self.nxt = None\n\n\nclass LRUCache:\n\n def __init__(self, capacity: int):\n self.store = {}\n self.curCap = 0\n self.cap = capacity\n self.tail = Node(0, 0)\n self.head = Node(0, 0)\n self.head.nxt = self.tail\n self.tail.pre = self.head\n\n def get(self, key: int) -> int:\n if key not in self.store:\n return -1\n val = self.store[key].val\n self._remove(self.store[key])\n self._add(Node(key, val))\n return val\n\n def put(self, key: int, value: int) -> None:\n # remove the original if exists\n if key in self.store:\n self._remove(self.store[key])\n if self.curCap == self.cap:\n self._remove(self.tail.pre)\n self._add(Node(key, value))\n\n def _remove(self, Node):\n p, n = Node.pre, Node.nxt\n p.nxt, n.pre = n, p\n assert(Node.key in self.store)\n del self.store[Node.key]\n self.curCap -= 1\n\n def _add(self, Node):\n # add a node to the beginning\n tmp = self.head.nxt\n self.head.nxt = Node\n Node.nxt = tmp\n tmp.pre = Node\n Node.pre = self.head\n assert(Node.key not in self.store)\n self.store[Node.key] = Node\n self.curCap += 1\n\n\n# Your LRUCache object will be instantiated and called as such:\n# obj = LRUCache(capacity)\n# param_1 = obj.get(key)\n# obj.put(key,value)\n","sub_path":"leetcode-algorithms/146. LRU Cache/lrucache.py","file_name":"lrucache.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"265553003","text":"from sklearn import neighbors\nfrom sklearn.svm import LinearSVC\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import ParameterGrid\n#from ml_experiments.feature_reduction import decorrelation, del_base_features\n#from ml_experiments.plot_coefficients import f_importances\n#from ml_experiments.ml_execute.combined_datasets.twitter_as_training.merge_data import merge\nfrom merge_data import merge\n\n\ndef execute_ml(user, real, loc, bio, post, pic, friend):\n\n X_train, X_test = merge(user, real, loc, bio, post, pic, friend)\n\n # delete base features\n #X_train, X_test = del_base_features(X_train, X_test, user, real, loc)\n\n\n X_train = X_train.sample(frac=1, random_state=0)\n X_test = X_test.sample(frac=1, random_state=0)\n\n y_train = X_train.pop('label')\n y_test = X_test.pop('label')\n\n n = len(X_train)\n print('number of observations: ' + str(n))\n\n feature_names = X_train.columns\n\n scaler = MinMaxScaler()\n X_train = scaler.fit_transform(X_train)\n X_test = scaler.transform(X_test)\n\n # decorrelation\n #X_train, X_test = decorrelation(X_train, X_test)\n\n\n clf = neighbors.KNeighborsClassifier(1, weights='uniform')\n clf.fit(X_train, y_train)\n knn_acc = clf.score(X_test, y_test)\n print('knn accuracy: ' + str(knn_acc))\n\n grid = {\"C\": [0.01, 0.1, 1, 10]}\n\n clf = LinearSVC(max_iter=5000, dual=True, random_state=0)\n best_score = 0\n best_grid = []\n for g in ParameterGrid(grid):\n print(g)\n clf.set_params(**g)\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test)\n # save if better than current best\n if clf.score(X_test, y_test) > best_score:\n best_score = score\n best_grid = g\n\n print(f\"best svc score: {best_score}\")\n print(f\"best svc grid: {best_grid}\")\n print('--------------------------------------------------------')\n\n #clf.fit(X_train, y_train)\n #svc_acc = clf.score(X_test, y_test)\n #print('linear svc accuracy: ' + str(svc_acc))\n\n # f_importances(abs(clf.coef_[0]), feature_names, top=30)\n\n return knn_acc, best_score, n\n\n\nif __name__ == '__main__':\n\n user = True\n real = True\n loc = False\n bio = False\n post = False\n pic = False\n friend = False\n\n execute_ml(user, real, loc, bio, post, pic, friend)\n","sub_path":"ml_experiments/ml_execute/combined_datasets/twitter_as_training/ml_pipeline.py","file_name":"ml_pipeline.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"653806460","text":"from tatu2 import Cliente\nfrom tatu2 import Conta\njoão = Cliente ('João da Silva', '777-1234')\nmaria = Cliente ('Maria da Silva', '555-4321')\nconta1 = Conta ([joão], 1, 1000)\nconta2 = Conta ([maria], 2, 500)\nconta1.saque(50)\nconta2.deposito(300)\nconta1.saque(190)\nconta2.deposito(95.15)\nconta2.saque(250)\nconta1.extrato()\nconta2.extrato()\n","sub_path":"Exercicios em Sala/teste2.py","file_name":"teste2.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"609807272","text":"import os\nimport psycopg2\nfrom bs4 import BeautifulSoup as bs\nimport magonote_funcs as mf\nfrom db_info import db_name, db_user, db_pw, db_host\n\n\"\"\"\n\nlooks through post html files for user urls, puts them in a set\nthese html files are stored in the db, other ones might be in the fs\n\nhelpful queries:\nSELECT post_id, SUBSTRING (post_html, 195, 150) FROM itch_post_html LIMIT 10;\n\n\"\"\"\n\nuser_urls = set()\n\ndb_connection=psycopg2.connect( database=db_name, user=db_user, host=db_host, password=db_pw )\ncur = db_connection.cursor()\n\ncur.execute(\"SELECT post_html FROM itch_post_html LIMIT 100\")\n\nfor row in cur:\n html = row[0]\n soup = bs(html, \"lxml\")\n pa_span = soup.find_all(\"span\", class_='post_author')\n if len(pa_span) > 0:\n pa_block = str(pa_span[0])\n print(pa_block[43:])\n cp_div = soup.find_all(\"div\", class_='community_post')\n if len(cp_div) > 0:\n uid_block = str(cp_div[0])\n print(uid_block[70:100])\n\ncur.close()\ndb_connection.close()","sub_path":"prod/magonote_get_user_info_from_post_html_db.py","file_name":"magonote_get_user_info_from_post_html_db.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"306424215","text":"# Authors: CS-World Domination Summer19 - CB\r\n'''\r\nSimply display the contents of the webcam with optional mirroring using OpenCV \r\nvia the new Pythonic cv2 interface. Press to quit.\r\nNOTE: Key bindings:\r\ng: BGR->RGB\r\nf: vertical flip\r\nd: horizontal flip\r\n\r\nesc: quit\r\n'''\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\n\r\ncascPath = \"haarcascade_frontalface_default.xml\"\r\nfaceCascade = cv2.CascadeClassifier(cascPath)\r\n\r\ndef demo1():\r\n \"\"\"\r\n just run it to see the video from the built-in webcam\r\n if CAPTURE_NUM = 0 doesn't work, try -1, 1, 2, 3\r\n (if none of those work, the webcam's not supported!)\r\n \"\"\"\r\n CAPTURE_NUM = 0\r\n cam = cv2.VideoCapture(CAPTURE_NUM)\r\n mirror=False\r\n VERTICAL_FLIP=False\r\n BGR=False \r\n while True:\r\n ret_val, orig = cam.read()\r\n\r\n #shrink image to half size (.5)\r\n #\"None\" argument refers to the size in pixels, which we don't care about since we're scaling\r\n img=cv2.resize(orig, None, fx=.5, fy=.5)\r\n #NOTE: if the image is original size, it will be very slow and laggy, and will miss some keypresses.\r\n #adjust size as desired if your machine is powerful enough\r\n \r\n \"\"\" key-press handling \"\"\"\r\n k = cv2.waitKey(20) & 0xFF\r\n k_char = chr(k)\r\n if k_char == 'g': \r\n BGR = not BGR # fun!\r\n if k_char == 'f': \r\n VERTICAL_FLIP = not VERTICAL_FLIP # fun!\r\n if k_char == 'd': \r\n mirror = not mirror # fun!\r\n if mirror: \r\n img = cv2.flip(img, 1)\r\n if VERTICAL_FLIP: \r\n img = cv2.flip(img, 0)\r\n if BGR: \r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n if k == 27: \r\n break # esc to quit\r\n #facial recognition handling\r\n faces = faceCascade.detectMultiScale(\r\n img,\r\n scaleFactor=1.1,\r\n minNeighbors=5,\r\n minSize=(30, 30),\r\n flags = cv2.CASCADE_SCALE_IMAGE\r\n )\r\n #print(\"Found {0} faces!\".format(len(faces)))\r\n\r\n # Draw a rectangle around the faces\r\n for (x, y, w, h) in faces:\r\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)\r\n\r\n\r\n \r\n cv2.imshow('my webcam', img)\r\n \r\n cv2.destroyAllWindows()\r\n\r\n\r\n\r\n\r\n\r\ndef demo3():\r\n \"\"\"\r\n to demo: click to bring focus to the messi image\r\n move mouse around and hit 'r' (lowercase r)\r\n a cyan rectangle should appear at your mouse\r\n hit spacebar to clear\r\n\r\n drawing reference:\r\n http://docs.opencv.org/2.4/modules/core/doc/drawing_functions.html\r\n \"\"\"\r\n # Create a black image, a window and bind the function to window\r\n # this is from here:\r\n FILE_NAME = \"messi5.jpg\"\r\n image_orig = cv2.imread(FILE_NAME, cv2.IMREAD_COLOR)\r\n #image_orig = cv2.cvtColor(image_orig, cv2.COLOR_BGR2RGB)\r\n image = image_orig.copy()\r\n current_mouse_pos = [0,0] # not true yet...\r\n\r\n def mouse_handler(event,x,y,flags,param):\r\n \"\"\" a function that gets called on mouse events \r\n reference: \r\n \"\"\"\r\n current_mouse_pos[0] = x\r\n current_mouse_pos[1] = y\r\n #print(\"The mouse is currently at\", current_mouse_pos)\r\n if event == cv2.EVENT_LBUTTONDOWN: print(\"Left button clicked!\")\r\n if event == cv2.EVENT_RBUTTONDOWN: print(\"Right button clicked!\")\r\n\r\n cv2.namedWindow('image')\r\n cv2.setMouseCallback('image',mouse_handler)\r\n\r\n while True:\r\n cv2.imshow('image',image)\r\n\r\n \"\"\" key-press handling \"\"\"\r\n k = cv2.waitKey(20) & 0xFF\r\n k_char = chr(k)\r\n if k_char == 'm': print('mmmm!') # fun!\r\n if k_char == 'r': \r\n x, y = current_mouse_pos # adjusted by the mouse_handler!\r\n DELTA = 42\r\n UL = (x-DELTA,y-DELTA) # Upper Left\r\n LR = (x+DELTA,y+DELTA) # Lower Right\r\n CLR = (255,255,0) # color\r\n WIDTH = 1 # rectangle width\r\n cv2.rectangle( image, UL, LR, CLR, WIDTH ) # draw a rectangle\r\n if k_char == ' ': image = image_orig.copy() # clear by re-copying!\r\n if k == 27: # escape key has value 27 (no string represetation...)\r\n print(\"Quitting!\")\r\n break\r\n \"\"\" end of key-press handling \"\"\"\r\n\r\n # outside of the while True loop...\r\n cv2.destroyAllWindows()\r\n\r\n\r\n\r\ndef main():\r\n #demo3()\r\n #demo2()\r\n demo1()\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"OpenCV-Examples/Image Detection/cam.py","file_name":"cam.py","file_ext":"py","file_size_in_byte":4474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"413980625","text":"#!/usr/bin/env python3\nimport json\nimport os\nimport database\n\n\ndef make_pictures_csv():\n c = database.init_db().cursor()\n headers = ['rowid', 'name', 'link', 'evaluated', 'username', 'finished']\n\n data = c.execute('SELECT rowid, * FROM pictures;').fetchall()\n\n output = \"\\t\".join(headers) + '\\n'\n\n for row in data:\n row = [str(i) for i in row]\n output += '\\t'.join(row) + '\\n'\n\n with open('./output/pictures.csv', 'w') as f:\n f.write(output)\n\n\ndef make_f_survey_csv():\n c = database.init_db().cursor()\n headers = ['user_id', 'id', 'person_from_left', 'answer_1', 'answer_1_text',\n 'answer_2', 'answer_3', 'answer_4', 'answer_5', 'answer_6',\n 'answer_7', 'answer_8', 'answer_9', 'answer_10', 'answer_11',\n 'answer_12']\n\n data = c.execute('SELECT * FROM picture_focal_result_data;').fetchall()\n\n output = \"\\t\".join(headers) + '\\n'\n\n for row in data:\n row = [str(i) for i in row]\n subject_id = database.get_user_id_by_picture_id(row[0])\n output += subject_id + '\\t' + '\\t'.join(row) + '\\n'\n\n with open('./output/picture_focal_result_data.csv', 'w') as f:\n f.write(output)\n\n\ndef make_nf_survey_csv():\n c = database.init_db().cursor()\n data = c.execute(\"SELECT * FROM picture_non_focal_result_data\").fetchall()\n\n keys = []\n content = ''\n\n for row in data:\n json_object = json.loads(row[1])\n json_object['id'] = row[0]\n json_object['subject_id'] = database.get_user_id_by_picture_id(row[0])\n\n # Merge legacy data with old textbox name.\n try:\n if json_object['q1_other_textbox'] is not None:\n json_object['q1_textbox'] = json_object['q1_other_textbox']\n except KeyError:\n pass\n\n # Restore lost 'other' column in Q1 from rest of data.\n ufp = database.get_picture_eval_data_by_id(row[0])[4]\n sum = 0\n q1_answers = ['q1_acquaintance', 'q1_close_friend', 'q1_coworker',\n 'q1_family', 'q1_friend', 'q1_spouse', 'q1_stranger']\n\n for answer in q1_answers:\n sum += int(json_object[answer])\n\n assert ufp >= sum\n json_object['q1_other'] = ufp - sum\n\n # Create keys for the headers, this is only done once.\n if not keys:\n keys = sorted(json_object.keys())\n keys.insert(10, 'q1_textbox')\n content = \"\\t\".join(keys) + '\\n'\n\n # Iterate over the row, writing down values in alphabetic order.\n for key in keys:\n try:\n content += str(json_object[key]) + '\\t'\n except KeyError:\n content += ' \\t'\n\n content += '\\n'\n\n with open('./output/picture_non_focal_result_data.csv', 'w') as f:\n f.write(content)\n\n\ndef make_evaluation_data_csv():\n c = database.init_db().cursor()\n headers = ['user_id', 'id', 'picture_name', 'shows_people',\n 'focused_people',\n 'nonfocused_people']\n\n data = c.execute('SELECT * FROM picture_evaluation_data;').fetchall()\n\n output = \"\\t\".join(headers) + '\\n'\n\n for row in data:\n row = [str(i) for i in row]\n subject_id = database.get_user_id_by_picture_id(row[0])\n output += subject_id + '\\t' + '\\t'.join(row) + '\\n'\n\n with open('./output/picture_evaluation_data.csv', 'w') as f:\n f.write(output)\n\n\nif __name__ == '__main__':\n os.makedirs('./output/', exist_ok=True)\n\n make_pictures_csv()\n make_f_survey_csv()\n make_nf_survey_csv()\n make_evaluation_data_csv()\n","sub_path":"reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"130316427","text":"# Game status categories\n# Change the values as you see fit\nSTATUS_WIN = \"win\"\nSTATUS_LOSE = \"lose\"\nSTATUS_ONGOING = \"ongoing\"\n\n\nclass Hangman:\n\n def __init__(self, word:str):\n self.word = word\n self.letters = set()\n self.remaining_guesses = 9\n self.status = STATUS_ONGOING\n\n def guess(self, char:chr):\n if(self.status != STATUS_ONGOING):\n raise ValueError('guess is not available, game already finished')\n if (not char in self.word or char in self.letters):\n self.remaining_guesses -= 1\n \n self.letters.add(char)\n \n if ( all( c in self.letters for c in self.word ) ):\n self.status = STATUS_WIN\n elif (self.remaining_guesses < 0):\n self.status = STATUS_LOSE\n\n def get_masked_word(self) -> str:\n res = ''\n for char in self.word:\n if (char in self.letters):\n res += char\n else:\n res += '_'\n return res\n\n def get_status(self) -> str:\n return self.status\n","sub_path":"exercism/python/hangman/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"542825669","text":"import pygame, sys, random\r\nfrom pygame.locals import * \r\n\r\n#constants representing colors\r\nBLACK = (0,0,0)\r\nBROWN = (153,76,0)\r\nGREEN = (0,255,0)\r\nBLUE = (0,0,255)\r\nWHITE = (255,255,255)\r\n\r\n#Sets the measurments for the game screen\r\nTILESIZE = 20\r\nMAPWIDTH = 30\r\nMAPHEIGHT = 20\r\n\r\n#gives each resources a variable number to define it\r\nDIRT = 0\r\nGRASS = 1\r\nWATER = 2\r\nCOAL =3\r\nLAVA = 4\r\nROCK = 5\r\nDIAMOND = 6\r\nCLOUD = 7\r\n\r\n#Gives each resource a personal texture\r\ntextures = {\r\n\tDIRT : pygame.image.load('Pictures/dirt.png'),\r\n\tGRASS : pygame.image.load('Pictures/grass.png'),\r\n\tWATER : pygame.image.load('Pictures/water.png'),\r\n\tCOAL : pygame.image.load('Pictures/coal.png'),\r\n\tLAVA : pygame.image.load('Pictures/lava.png'),\r\n\tROCK : pygame.image.load('Pictures/the_magic.jpg'),\r\n\tDIAMOND : pygame.image.load('Pictures/diamond.png'),\r\n\tCLOUD : pygame.image.load('Pictures/Randall_The_TRUE_Homie.png')\r\n\t}\r\n\r\n#Creates a list containing each resource to be randomly choses and assigned a point on the display\r\nresources = [DIRT, GRASS, WATER, COAL, LAVA, ROCK, DIAMOND]\r\ntilemap = [ [DIRT for w in range(MAPWIDTH)] for h in range(MAPWIDTH)]\r\n\r\ninventory = {\r\n\tDIRT : 0,\r\n\tGRASS : 0,\r\n\tWATER : 0,\r\n\tCOAL : 0,\r\n\tLAVA : 0,\r\n\tROCK : 0,\r\n\tDIAMOND : 0\r\n\t}\r\n#Begin program, set display size, and label the window caption and icon\r\npygame.init()\r\nDISPLAYSURF = pygame.display.set_mode((MAPWIDTH*TILESIZE,MAPHEIGHT*TILESIZE + 50))\r\npygame.display.set_caption('Resetti\\'s Adventure!')\r\n# pygame.display.set_icon(pygame.image.load(''))\r\n\r\n#Loads the player into the program\r\nPLAYER = pygame.image.load('Pictures/Resetti_ingame.png').convert_alpha()\r\nplayerPos = [0,0]\r\n\r\n#Sets the cloud's position\r\ncloudx = -200\r\ncloudy = 0\r\n\r\n#sets the fpsclock for the cloud i think\r\nfpsClock = pygame.time.Clock()\r\n\r\n#add a font for our inventory\r\nINVFONT = pygame.font.Font('Fonts/FreeSansBold.ttf', 18)\r\n\r\n#Goes through each square on the program window and assigns it a resource randomly\r\nfor rw in range(MAPHEIGHT):\r\n\tfor cl in range(MAPWIDTH):\r\n\t\trandomNumber = random.randint(0,20)\r\n\t\tif randomNumber == 0:\r\n\t\t\trandNumber_2 = random.randint(0,3)\r\n\t\t\tif randNumber_2 == 0:\r\n\t\t\t\ttitle = DIAMOND\r\n\t\t\telif randNumber_2 > 0:\r\n\t\t\t\ttitle = WATER\r\n\t\telif randomNumber == 1:\r\n\t\t\ttitle = COAL\r\n\t\telif randomNumber == 2 or randomNumber == 3:\r\n\t\t\ttitle = WATER\r\n\t\telif randomNumber >=4 and randomNumber <= 7:\r\n\t\t\ttitle = GRASS\r\n\t\telif randomNumber >= 8 and randomNumber <= 10:\r\n\t\t\ttitle = LAVA\r\n\t\telif randomNumber >=11 and randomNumber <= 14:\r\n\t\t\ttitle = ROCK\r\n\t\telse:\r\n\t\t\ttitle = DIRT\r\n\t\t\t\r\n\t\ttilemap[rw][cl] = title\r\n\t\t\r\n\t\t\r\n#Main loop to \r\nwhile True:\r\n\t\r\n\t#clears gameboard after every loop, fixes cloud issues\r\n\tDISPLAYSURF.fill(BLACK)\r\n\t\r\n\t#Allows for events to be assigned \r\n\tfor event in pygame.event.get():\r\n\t\tif event.type == QUIT:\r\n\t\t\tpygame.quit()\r\n\t\t\tsys.exit()\r\n\t\telif event.type == KEYDOWN:\r\n\t\t\tprint(event)\r\n\t\t\t#Allows for movement.\r\n\t\t\tif (event.key == K_RIGHT) and playerPos[0] < MAPWIDTH - 1:\r\n\t\t\t\tplayerPos[0] += 1\r\n\t\t\tif (event.key == K_LEFT) and playerPos[0] > 0:\r\n\t\t\t\tplayerPos[0] -= 1\r\n\t\t\tif (event.key == K_UP) and playerPos[1] > 0:\r\n\t\t\t\tplayerPos[1] -= 1\r\n\t\t\tif (event.key == K_DOWN) and playerPos[1] < MAPHEIGHT - 1:\r\n\t\t\t\tplayerPos[1] += 1\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t#Allows to check inventory\r\n\t\t\tif event.key == K_SPACE:\r\n\t\t\t\t#what resource is the player standing on?\r\n\t\t\t\tcurrentTile = tilemap[playerPos[1]][playerPos[0]]\r\n\t\t\t\t#player now has 1 more of this resource\r\n\t\t\t\tinventory[currentTile] +=1\r\n\t\t\t\t#the player is now standing on dirt\r\n\t\t\t\ttilemap[playerPos[1]][playerPos[0]] = DIRT\r\n\t\t\t\tprint(inventory)\r\n\t\t\t\t\r\n\t\t\t#placing dirt\r\n\t\t\tif (event.key == K_1):\r\n\t\t\t\t#get the tile to swap with the dirt\r\n\t\t\t\tcurrentTile = tilemap[playerPos[1]][playerPos[0]]\r\n\t\t\t\t#if we have any dirt in our inventory\r\n\t\t\t\tif inventory[DIRT] > 0:\r\n\t\t\t\t\t#remove one dirt and place it\r\n\t\t\t\t\tinventory[DIRT] -= 1\r\n\t\t\t\t\ttilemap[playerPos[1]][playerPos[0]] = DIRT\r\n\t\t\t\t\t#swap the item that was there before\r\n\t\t\t\t\tinventory[currentTile] += 1\r\n\t\t\t\t\t\r\n\t\t\t#placing grass\r\n\t\t\tif (event.key == K_2):\r\n\t\t\t\t#get the tile to swap with the grass\r\n\t\t\t\tcurrentTile = tilemap[playerPos[1]][playerPos[0]]\r\n\t\t\t\t#if we have any grass in our inventory\r\n\t\t\t\tif inventory[GRASS] > 0:\r\n\t\t\t\t\t#remove one grass and place it\r\n\t\t\t\t\tinventory[GRASS] -= 1\r\n\t\t\t\t\ttilemap[playerPos[1]][playerPos[0]] = GRASS\r\n\t\t\t\t\t#swap the item that was there before\r\n\t\t\t\t\tinventory[currentTile] += 1\r\n\t\t\t\t\t\r\n\t\t\t#placing water\r\n\t\t\tif (event.key == K_3):\r\n\t\t\t\t#get the tile to swap with the water\r\n\t\t\t\tcurrentTile = tilemap[playerPos[1]][playerPos[0]]\r\n\t\t\t\t#if we have any water in our inventory\r\n\t\t\t\tif inventory[WATER] > 0:\r\n\t\t\t\t\t#remove one water and place it\r\n\t\t\t\t\tinventory[WATER] -= 1\r\n\t\t\t\t\ttilemap[playerPos[1]][playerPos[0]] = WATER\r\n\t\t\t\t\t#swap the item that was there before\r\n\t\t\t\t\tinventory[currentTile] += 1\r\n\t\t\t\t\t\r\n\t\t\t#placing COAL\r\n\t\t\tif (event.key == K_4):\r\n\t\t\t\t#get the tile to swap with the coal\r\n\t\t\t\tcurrentTile = tilemap[playerPos[1]][playerPos[0]]\r\n\t\t\t\t#if we have any dirt in our inventory\r\n\t\t\t\tif inventory[COAL] > 0:\r\n\t\t\t\t\t#remove one coal and place it\r\n\t\t\t\t\tinventory[COAL] -= 1\r\n\t\t\t\t\ttilemap[playerPos[1]][playerPos[0]] = COAL\r\n\t\t\t\t\t#swap the item that was there before\r\n\t\t\t\t\tinventory[currentTile] += 1\r\n\t\t\t\t\t\r\n\t\t\t#placing LAVA\r\n\t\t\tif (event.key == K_5):\r\n\t\t\t\t#get the tile to swap with the dirt\r\n\t\t\t\tcurrentTile = tilemap[playerPos[1]][playerPos[0]]\r\n\t\t\t\t#if we have any lava in our inventory\r\n\t\t\t\tif inventory[LAVA] > 0:\r\n\t\t\t\t\t#remove one lava and place it\r\n\t\t\t\t\tinventory[LAVA] -= 1\r\n\t\t\t\t\ttilemap[playerPos[1]][playerPos[0]] = LAVA\r\n\t\t\t\t\t#swap the item that was there before\r\n\t\t\t\t\tinventory[currentTile] += 1\t\r\n\t\t\t\t\t\r\n\t\t\t#placing DIAMOND \r\n\t\t\tif (event.key == K_7):\r\n\t\t\t\t#KEY SET TO 7^^^ NOT 6\r\n\t\t\t\t#get the tile to swap with the diamond\r\n\t\t\t\tcurrentTile = tilemap[playerPos[1]][playerPos[0]]\r\n\t\t\t\t#if we have any diamond in our inventory\r\n\t\t\t\tif inventory[DIAMOND] > 0:\r\n\t\t\t\t\t#remove one diamond and place it\r\n\t\t\t\t\tinventory[DIAMOND] -= 1\r\n\t\t\t\t\ttilemap[playerPos[1]][playerPos[0]] = DIAMOND\r\n\t\t\t\t\t#swap the item that was there before\r\n\t\t\t\t\tinventory[currentTile] += 1\r\n\t\t\t\t\t\r\n\t\t\t#placing ROCK\r\n\t\t\tif (event.key == K_6):\r\n\t\t\t\t#KEY SET TO 6^^^ NOT 7\r\n\t\t\t\t#get the tile to swap with the rock\r\n\t\t\t\tcurrentTile = tilemap[playerPos[1]][playerPos[0]]\r\n\t\t\t\t#if we have any rock in our inventory\r\n\t\t\t\tif inventory[ROCK] > 0:\r\n\t\t\t\t\t#remove= one rock and place it\r\n\t\t\t\t\tinventory[ROCK] -= 1\r\n\t\t\t\t\ttilemap[playerPos[1]][playerPos[0]] = ROCK\r\n\t\t\t\t\t#swap the item that was there before\r\n\t\t\t\t\tinventory[currentTile] += 1\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t#Places the assigned resources on the correct spaces\r\n\tfor row in range(MAPHEIGHT):\r\n\t\tfor column in range(MAPWIDTH):\r\n\t\t\tDISPLAYSURF.blit(textures[tilemap[row][column]], (column*TILESIZE,row*TILESIZE))\r\n\t\r\n\t\r\n\t\r\n\t#Displays the inventory, starting 10 pixels in\r\n\tplacePosition = 10\r\n\tfor item in resources:\r\n\t\t#add the image\r\n\t\tDISPLAYSURF.blit(textures[item],(placePosition,MAPHEIGHT*TILESIZE+20))\r\n\t\tplacePosition += 30\r\n\t\t#add the text showing ther amount in the inventory\r\n\t\ttextObj = INVFONT.render(str(inventory[item]), True, WHITE, BLACK)\r\n\t\tDISPLAYSURF.blit(textObj,(placePosition,MAPHEIGHT*TILESIZE+20))\r\n\t\tplacePosition += 50\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t#Displays the cloud\t\r\n\tDISPLAYSURF.blit(textures[CLOUD],(cloudx,cloudy))\r\n\t#move the cloud slightly to the left\r\n\tcloudx += 1\r\n\t#if the cloud has moved past the map\r\n\tif cloudx > MAPWIDTH*TILESIZE:\r\n\t\t#pick a new position to place the cloud\r\n\t\tcloudy = random.randint(0,MAPHEIGHT*TILESIZE)\r\n\t\tcloudx = -200\r\n\t\t\r\n\t\t\r\n\t#Displays the player avatar\r\n\tDISPLAYSURF.blit(PLAYER,(playerPos[0]*TILESIZE,playerPos[1]*TILESIZE))\r\n\t\r\n\t#updates the total display\r\n\tpygame.display.update()\r\n\tfpsClock.tick(24)\r\n","sub_path":"Pygame MC/animations.py","file_name":"animations.py","file_ext":"py","file_size_in_byte":7730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"355736804","text":"#!/usr/bin/python3\n\nimport os, sys, re\nimport string\nimport argparse\nimport yaml\nimport json\n\nfrom bs4 import BeautifulSoup\n\noptions = {\n 'format' : 'text',\n}\n\nclass PhishingMailParser:\n def __init__(self, options):\n self.options = options\n self.results = {}\n\n def parse(self, html):\n self.html = html\n self.soup = BeautifulSoup(html, features=\"lxml\")\n\n self.results['Embedded Images'] = self.testEmbeddedImages()\n self.results['Images without ALT'] = self.testImagesNoAlt()\n self.results['Masqueraded Links'] = self.testMaskedLinks()\n\n return {k: v for k, v in self.results.items() if v}\n\n @staticmethod\n def context(tag):\n s = str(tag)\n\n if len(s) < 100:\n return s\n\n beg = s[:50]\n end = s[-50:]\n\n return f'{beg}...{end}'\n\n def testMaskedLinks(self):\n links = self.soup('a')\n\n desc = 'Links that masquerade their href= attribute by displaying different link are considered harmful and will increase Spam score.'\n context = ''\n result = ''\n num = 0\n embed = ''\n\n for link in links:\n try:\n href = link['href']\n except:\n continue\n \n text = link.getText()\n\n url = re.compile(r'((http|https)\\:\\/\\/)?[a-zA-Z0-9\\.\\/\\?\\:@\\-_=#]+\\.([a-zA-Z]){2,6}([a-zA-Z0-9\\.\\&\\/\\?\\:@\\-_=#])*')\n\n m1 = url.match(href)\n m2 = url.match(text)\n\n if m1 and m2:\n num += 1\n context += '- ' + PhishingMailParser.context(link) + '\\n'\n context += f'\\thref = \"{href[:64]}\"\\n'\n context += f'\\ttext = \"{text[:64]}\"\\n\\n'\n\n if num > 0:\n result += f'- Found {num}
    tags that masquerade their href=\"\" links with text!\\n'\n result += '\\t Links that try to hide underyling URL are harmful and will be considered as Spam!\\n'\n\n if len(result) == 0:\n return []\n\n return {\n 'description' : desc,\n 'context' : context,\n 'analysis' : result\n }\n\n def testImagesNoAlt(self):\n images = self.soup('img')\n\n desc = 'Images without ALT=\"value\" attribute may increase Spam scorage.'\n context = ''\n result = ''\n num = 0\n embed = ''\n\n for img in images:\n src = img['src']\n alt = ''\n\n try:\n alt = img['alt']\n except:\n pass\n\n if alt == '':\n num += 1\n context += '- ' + PhishingMailParser.context(img) + '\\n'\n\n if num > 0:\n result += f'- Found {num} tags without ALT=\"value\" attribute.\\n'\n result += '\\t Images without alternate text set in their attribute may increase Spam score\\n'\n\n if len(result) == 0:\n return []\n\n return {\n 'description' : desc,\n 'context' : context,\n 'analysis' : result\n }\n\n def testEmbeddedImages(self):\n images = self.soup('img')\n\n desc = 'Embedded images can increase Spam Confidence Level (SCL) in Office365 by 4 points. Embedded images are those with \"/> . They should be avoided.'\n context = ''\n result = ''\n num = 0\n embed = ''\n\n for img in images:\n src = img['src']\n alt = ''\n\n try:\n alt = img['alt']\n except:\n pass\n\n if src.lower().startswith('data:image/'):\n if len(embed) == 0:\n embed = src[:30]\n\n num += 1\n if len(alt) > 0:\n context += f'- ALT=\"{alt}\": ' + PhishingMailParser.context(img) + '\\n'\n else:\n context += '- ' + PhishingMailParser.context(img) + '\\n'\n\n if num > 0:\n result += f'- Found {num} tags with embedded image ({embed}).\\n'\n result += '\\t Embedded images increase Office365 SCL (Spam) level by 4 points!\\n'\n\n if len(result) == 0:\n return []\n\n return {\n 'description' : desc,\n 'context' : context,\n 'analysis' : result\n }\n\n\ndef printOutput(out):\n if options['format'] == 'text':\n width = 100\n num = 0\n\n for k, v in out.items():\n num += 1\n analysis = v['analysis']\n context = v['context']\n\n analysis = analysis.replace('- ', '\\t- ')\n\n print(f'''\n------------------------------------------\n({num}) Test: {k}\n\nCONTEXT: \n {context}\n\nANALYSIS:\n {analysis}\n''')\n \n elif options['format'] == 'json':\n print(json.dumps(out))\n\ndef opts(argv):\n global options\n global headers\n\n o = argparse.ArgumentParser(\n usage = 'phishing-HTML-linter.py [options] '\n )\n \n req = o.add_argument_group('Required arguments')\n req.add_argument('file', help = 'Input HTML file')\n\n args = o.parse_args()\n return args\n\ndef main(argv):\n args = opts(argv)\n if not args:\n return False\n\n print('''\n :: Phishing HTML Linter\n Shows you bad smells in your HTML code that will get your mails busted!\n Mariusz Banach / mgeeky\n''')\n\n html = ''\n with open(args.file, 'rb') as f:\n html = f.read()\n\n p = PhishingMailParser({})\n ret = p.parse(html.decode())\n\n printOutput(ret)\n \n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"phishing/phishing-HTML-linter.py","file_name":"phishing-HTML-linter.py","file_ext":"py","file_size_in_byte":5577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"450127073","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom selenium import webdriver\nfrom pandas import *\nimport pandas\nimport json\n\n\n# In[2]:\n\n\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument('--user-agent=\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36\"')\n\n\n# In[16]:\n\n\npath_to_chromedriver = '/usr/local/bin/chromedriver' # Path to access a chrome driver\nbrowser = webdriver.Chrome(executable_path=path_to_chromedriver, options=chrome_options)\n\n\n# In[17]:\n\n\nurl = 'https://stats.nba.com/team/1610612744/boxscores-traditional/'\nbrowser.get(url)\n\n\n# In[5]:\n\n\ntable = browser.find_element_by_class_name('nba-stat-table__overflow')\n\n\n# In[6]:\n\n\nteam_date = []\nteam_win = []\nteam_stats = []\ncolumn_names = []\n\nfor line_id, lines in enumerate(table.text.split('\\n')):\n if line_id == 0:\n column_names = lines.split(' ')[1:]\n else:\n temp = []\n parse = lines.split()\n team_date.append(parse[0] + \" \" + parse[1] + \" \" + parse[2] + \" \" + parse[3] + \" \" + parse[4] + \" \" + parse[5] + \" \" + parse[6])\n team_win.append(parse[7])\n for i in range(8,28):\n temp.append(float(parse[i]))\n team_stats.append(temp)\n\n\n# In[7]:\n\n\n# Next Page\nbrowser.find_element_by_xpath('/html/body/main/div[2]/div/div/div[3]/div/div/div/nba-stat-table/div[3]/div/div/a[2]').click()\n\n\n# In[8]:\n\n\ntable = browser.find_element_by_class_name('nba-stat-table__overflow')\n\n\n# In[9]:\n\n\nfor line_id, lines in enumerate(table.text.split('\\n')):\n if line_id == 0:\n column_names = lines.split(' ')[1:]\n else:\n temp = []\n parse = lines.split()\n team_date.append(parse[0] + \" \" + parse[1] + \" \" + parse[2] + \" \" + parse[3] + \" \" + parse[4] + \" \" + parse[5] + \" \" + parse[6])\n team_win.append(parse[7])\n for i in range(8,28):\n temp.append(float(parse[i]))\n team_stats.append(temp)\n\n\n# In[10]:\n\n\ndb = pandas.DataFrame({'MATCH UP': team_date,\n 'W/L': team_win,\n 'MIN': [i[0] for i in team_stats],\n 'PTS': [i[1] for i in team_stats],\n 'FGM': [i[2] for i in team_stats], \n 'FGA': [i[3] for i in team_stats],\n 'FG%': [i[4] for i in team_stats],\n '3PM': [i[5] for i in team_stats],\n '3PA': [i[6] for i in team_stats],\n '3P%': [i[7] for i in team_stats],\n 'FTM': [i[8] for i in team_stats],\n 'FTA': [i[9] for i in team_stats],\n 'FT%': [i[10] for i in team_stats],\n 'OREB': [i[11] for i in team_stats],\n 'DREB': [i[12] for i in team_stats],\n 'REB': [i[13] for i in team_stats],\n 'AST': [i[14] for i in team_stats],\n 'TOV': [i[15] for i in team_stats],\n 'STL': [i[16] for i in team_stats],\n 'BLK': [i[17] for i in team_stats],\n 'PF': [i[18] for i in team_stats],\n '+/-': [i[19] for i in team_stats],\n }\n )\n\n\n# In[11]:\n\n\ndb\n\n\n# In[12]:\n\n\n# Set to Export Location\ndb.to_json(r'/Users/kevin/Documents/Sublime/SeleksiBasdat/data.json', orient='records', lines = True)\n\n\n# In[13]:\n\n\nbrowser.close()\n\n\n# In[14]:\n\n\nprint('Export data to JSON file as \"data.json\" success')\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"src/Scraping.py","file_name":"Scraping.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"66114625","text":"from urllib2 import urlopen\nfrom selenium import webdriver\nimport time\nimport random\nimport requests\n\ndriver = webdriver.Chrome('path/to/chromedriver')\ndriver.get(\"http://bolanee.bc.edu:8080/home.html?src=3&mode=0\")\n\nwhile True:\n\timg = driver.find_element_by_xpath('//html/body[@id=\"webcamXP-body\"]/div[@id=\"container\"]/div[@id=\"intro\"]/div[@class=\"wxpcontainer\"]\\\n\t\t/div[@id=\"wxpcamdiv\"]/img')\n\t# get the image source\n\tsrc = img.get_attribute(\"src\")\n\tr = requests.get(src)\n\twith open('cam_photo.jpg','wb') as f:\n\t\tf.write(r.content)\n\tsleeptime = random.randint(3,15)\n\ttime.sleep(sleeptime)\n\t#print 'tick'\n\t\ndriver.close()\n","sub_path":"skyline.py","file_name":"skyline.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"108832440","text":"# -*- coding: UTF-8 -*-\nfrom flask import Flask, request, render_template\nfrom werkzeug.utils import secure_filename\nimport os\nimport uuid\n\n\napp = Flask(__name__)\n\n@app.route('/audiovideo')\ndef upload_file():\n return render_template('index.html')\n\n@app.route('/audiovideo', methods=['GET', 'POST'])\ndef audiovideo():\n if request.method == 'POST':\n file = request.files['audiovideo']\n uuidName = str(uuid.uuid1())\n filename_mkv = uuidName + \".mkv\"\n filename = secure_filename(filename_mkv)\n file_save_path = os.path.join('.', 'static', 'uploads', filename_mkv)\n file.save(file_save_path)\n return \"susses\"\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=7881, debug=True, ssl_context=(\n \"./rtc-video-room-cert.pem\",\n \"./rtc-video-room-key.pem\"\n ))\n","sub_path":"background_video_post/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"604049711","text":"\"\"\"empty message\n\nRevision ID: 2f9dfdf164de\nRevises: f867f07d830d\nCreate Date: 2019-10-01 23:08:58.267555\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '2f9dfdf164de'\ndown_revision = 'f867f07d830d'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('blog_comment', sa.Column('first_name_of_commenter', sa.String(length=40), nullable=True))\n op.add_column('blog_comment', sa.Column('last_name_of_commenter', sa.String(length=40), nullable=True))\n op.alter_column('blog_comment', 'user_id',\n existing_type=sa.INTEGER(),\n nullable=True)\n op.create_foreign_key(None, 'blog_comment', 'blog', ['blog_of_comment'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'blog_comment', type_='foreignkey')\n op.alter_column('blog_comment', 'user_id',\n existing_type=sa.INTEGER(),\n nullable=False)\n op.drop_column('blog_comment', 'last_name_of_commenter')\n op.drop_column('blog_comment', 'first_name_of_commenter')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/2f9dfdf164de_.py","file_name":"2f9dfdf164de_.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"374997857","text":"'''\nGiven a string, find the length of the longest substring without repeating characters.\n\n给定一个字符串,找到最长子字符串的长度而不重复字符。\n'''\n\n\nclass Solution:\n def lengthOfLongestSubstring(self, s):\n dicts = {}\n maxlength = start = 0\n for i in range(len(s)):\n if s[i] in dicts:\n start = max(dicts[s[i]] + 1, start) # 从重复单词的下一个位置开始\n maxlength = max(i - start + 1, maxlength) \n dicts[s[i]] = i\n return maxlength\n\n\na = Solution()\nprint(a.lengthOfLongestSubstring(\"pwwkew\"))","sub_path":"LeetCode/Problems/3. Longest Substring Without Repeating Characters/3. Longest Substring Without Repeating Characters.py","file_name":"3. Longest Substring Without Repeating Characters.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"279976022","text":"# Copyright 2014 The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Python wrapper for C socket calls and data structures.\"\"\"\n\nimport ctypes\nimport ctypes.util\nimport os\nimport socket\nimport struct\n\nimport cstruct\nimport util\n\n\n# Data structures.\n# These aren't constants, they're classes. So, pylint: disable=invalid-name\nCMsgHdr = cstruct.Struct(\"cmsghdr\", \"@Lii\", \"len level type\")\nIovec = cstruct.Struct(\"iovec\", \"@PL\", \"base len\")\nMsgHdr = cstruct.Struct(\"msghdr\", \"@LLPLPLi\",\n \"name namelen iov iovlen control msg_controllen flags\")\nSockaddrIn = cstruct.Struct(\"sockaddr_in\", \"=HH4sxxxxxxxx\", \"family port addr\")\nSockaddrIn6 = cstruct.Struct(\"sockaddr_in6\", \"=HHI16sI\",\n \"family port flowinfo addr scope_id\")\nSockaddrStorage = cstruct.Struct(\"sockaddr_storage\", \"=H126s\", \"family data\")\nSockExtendedErr = cstruct.Struct(\"sock_extended_err\", \"@IBBBxII\",\n \"errno origin type code info data\")\nInPktinfo = cstruct.Struct(\"in_pktinfo\", \"@i4s4s\", \"ifindex spec_dst addr\")\nIn6Pktinfo = cstruct.Struct(\"in6_pktinfo\", \"@16si\", \"addr ifindex\")\n\n# Constants.\n# IPv4 socket options and cmsg types.\nIP_TTL = 2\nIP_MTU_DISCOVER = 10\nIP_PKTINFO = 8\nIP_RECVERR = 11\nIP_RECVTTL = 12\nIP_MTU = 14\n\n# IPv6 socket options and cmsg types.\nIPV6_MTU_DISCOVER = 23\nIPV6_RECVERR = 25\nIPV6_RECVPKTINFO = 49\nIPV6_PKTINFO = 50\nIPV6_RECVHOPLIMIT = 51\nIPV6_HOPLIMIT = 52\nIPV6_PATHMTU = 61\nIPV6_DONTFRAG = 62\n\n# PMTUD values.\nIP_PMTUDISC_DO = 1\n\nCMSG_ALIGNTO = struct.calcsize(\"@L\") # The kernel defines this as sizeof(long).\n\n# Sendmsg flags\nMSG_CONFIRM = 0X800\nMSG_ERRQUEUE = 0x2000\n\n# Linux errqueue API.\nSO_ORIGIN_ICMP = 2\nSO_ORIGIN_ICMP6 = 3\n\n# Find the C library.\nlibc = ctypes.CDLL(ctypes.util.find_library(\"c\"), use_errno=True)\n\n\n# TODO: Unlike most of this file, these functions aren't specific to wrapping C\n# library calls. Move them to a utils.py or constants.py file, once we have one.\ndef LinuxVersion():\n # Example: \"3.4.67-00753-gb7a556f\".\n # Get the part before the dash.\n version = os.uname()[2].split(\"-\")[0]\n # Convert it into a tuple such as (3, 4, 67). That allows comparing versions\n # using < and >, since tuples are compared lexicographically.\n version = tuple(int(i) for i in version.split(\".\"))\n return version\n\n\ndef AddressVersion(addr):\n return 6 if \":\" in addr else 4\n\n\ndef SetSocketTimeout(sock, ms):\n s = ms / 1000\n us = (ms % 1000) * 1000\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO,\n struct.pack(\"LL\", s, us))\n\n\ndef VoidPointer(s):\n return ctypes.cast(s.CPointer(), ctypes.c_void_p)\n\n\ndef MaybeRaiseSocketError(ret):\n if ret < 0:\n errno = ctypes.get_errno()\n raise socket.error(errno, os.strerror(errno))\n\n\ndef Sockaddr(addr):\n if \":\" in addr[0]:\n family = socket.AF_INET6\n if len(addr) == 4:\n addr, port, flowinfo, scope_id = addr\n else:\n (addr, port), flowinfo, scope_id = addr, 0, 0\n addr = socket.inet_pton(family, addr)\n return SockaddrIn6((family, socket.ntohs(port), socket.ntohl(flowinfo),\n addr, scope_id))\n else:\n family = socket.AF_INET\n addr, port = addr\n addr = socket.inet_pton(family, addr)\n return SockaddrIn((family, socket.ntohs(port), addr))\n\n\ndef _MakeMsgControl(optlist):\n \"\"\"Creates a msg_control blob from a list of cmsg attributes.\n\n Takes a list of cmsg attributes. Each attribute is a tuple of:\n - level: An integer, e.g., SOL_IPV6.\n - type: An integer, the option identifier, e.g., IPV6_HOPLIMIT.\n - data: The option data. This is either a string or an integer. If it's an\n integer it will be written as an unsigned integer in host byte order. If\n it's a string, it's used as is.\n\n Data is padded to an integer multiple of CMSG_ALIGNTO.\n\n Args:\n optlist: A list of tuples describing cmsg options.\n\n Returns:\n A string, a binary blob usable as the control data for a sendmsg call.\n\n Raises:\n TypeError: Option data is neither an integer nor a string.\n \"\"\"\n msg_control = \"\"\n\n for i, opt in enumerate(optlist):\n msg_level, msg_type, data = opt\n if isinstance(data, int):\n data = struct.pack(\"=I\", data)\n elif isinstance(data, ctypes.c_uint32):\n data = struct.pack(\"=I\", data.value)\n elif not isinstance(data, str):\n raise TypeError(\"unknown data type for opt (%d, %d): %s\" % (\n msg_level, msg_type, type(data)))\n\n datalen = len(data)\n msg_len = len(CMsgHdr) + datalen\n padding = \"\\x00\" * util.GetPadLength(CMSG_ALIGNTO, datalen)\n msg_control += CMsgHdr((msg_len, msg_level, msg_type)).Pack()\n msg_control += data + padding\n\n return msg_control\n\n\ndef _ParseMsgControl(buf):\n \"\"\"Parse a raw control buffer into a list of tuples.\"\"\"\n msglist = []\n while len(buf) > 0:\n cmsghdr, buf = cstruct.Read(buf, CMsgHdr)\n datalen = cmsghdr.len - len(CMsgHdr)\n padlen = util.GetPadLength(CMSG_ALIGNTO, datalen)\n data, buf = buf[:datalen], buf[padlen + datalen:]\n\n if cmsghdr.level == socket.IPPROTO_IP:\n if cmsghdr.type == IP_PKTINFO:\n data = InPktinfo(data)\n elif cmsghdr.type == IP_TTL:\n data = struct.unpack(\"@I\", data)[0]\n\n if cmsghdr.level == socket.IPPROTO_IPV6:\n if cmsghdr.type == IPV6_PKTINFO:\n data = In6Pktinfo(data)\n elif cmsghdr.type == IPV6_RECVERR:\n err, source = cstruct.Read(data, SockExtendedErr)\n if err.origin == SO_ORIGIN_ICMP6:\n source, pad = cstruct.Read(source, SockaddrIn6)\n data = (err, source)\n elif cmsghdr.type == IPV6_HOPLIMIT:\n data = struct.unpack(\"@I\", data)[0]\n\n # If not, leave data as just the raw bytes.\n\n msglist.append((cmsghdr.level, cmsghdr.type, data))\n\n return msglist\n\n\ndef Bind(s, to):\n \"\"\"Python wrapper for bind.\"\"\"\n ret = libc.bind(s.fileno(), VoidPointer(to), len(to))\n MaybeRaiseSocketError(ret)\n return ret\n\n\ndef Connect(s, to):\n \"\"\"Python wrapper for connect.\"\"\"\n ret = libc.connect(s.fileno(), VoidPointer(to), len(to))\n MaybeRaiseSocketError(ret)\n return ret\n\n\ndef Sendmsg(s, to, data, control, flags):\n \"\"\"Python wrapper for sendmsg.\n\n Args:\n s: A Python socket object. Becomes sockfd.\n to: An address tuple, or a SockaddrIn[6] struct. Becomes msg->msg_name.\n data: A string, the data to write. Goes into msg->msg_iov.\n control: A list of cmsg options. Becomes msg->msg_control.\n flags: An integer. Becomes msg->msg_flags.\n\n Returns:\n If sendmsg succeeds, returns the number of bytes written as an integer.\n\n Raises:\n socket.error: If sendmsg fails.\n \"\"\"\n # Create ctypes buffers and pointers from our structures. We need to hang on\n # to the underlying Python objects, because we don't want them to be garbage\n # collected and freed while we have C pointers to them.\n\n # Convert the destination address into a struct sockaddr.\n if to:\n if isinstance(to, tuple):\n to = Sockaddr(to)\n msg_name = to.CPointer()\n msg_namelen = len(to)\n else:\n msg_name = 0\n msg_namelen = 0\n\n # Convert the data to a data buffer and a struct iovec pointing at it.\n if data:\n databuf = ctypes.create_string_buffer(data)\n iov = Iovec((ctypes.addressof(databuf), len(data)))\n msg_iov = iov.CPointer()\n msg_iovlen = 1\n else:\n msg_iov = 0\n msg_iovlen = 0\n\n # Marshal the cmsg options.\n if control:\n control = _MakeMsgControl(control)\n controlbuf = ctypes.create_string_buffer(control)\n msg_control = ctypes.addressof(controlbuf)\n msg_controllen = len(control)\n else:\n msg_control = 0\n msg_controllen = 0\n\n # Assemble the struct msghdr.\n msghdr = MsgHdr((msg_name, msg_namelen, msg_iov, msg_iovlen,\n msg_control, msg_controllen, flags)).Pack()\n\n # Call sendmsg.\n ret = libc.sendmsg(s.fileno(), msghdr, 0)\n MaybeRaiseSocketError(ret)\n\n return ret\n\n\ndef _ToSocketAddress(addr, alen):\n addr = addr[:alen]\n\n # Attempt to convert the address to something we understand.\n if alen == 0:\n return None\n elif alen == len(SockaddrIn) and SockaddrIn(addr).family == socket.AF_INET:\n return SockaddrIn(addr)\n elif alen == len(SockaddrIn6) and SockaddrIn6(addr).family == socket.AF_INET6:\n return SockaddrIn6(addr)\n elif alen == len(SockaddrStorage): # Can this ever happen?\n return SockaddrStorage(addr)\n else:\n return addr # Unknown or malformed. Return the raw bytes.\n\n\ndef Recvmsg(s, buflen, controllen, flags, addrlen=len(SockaddrStorage)):\n \"\"\"Python wrapper for recvmsg.\n\n Args:\n s: A Python socket object. Becomes sockfd.\n buflen: An integer, the maximum number of bytes to read.\n addrlen: An integer, the maximum size of the source address.\n controllen: An integer, the maximum size of the cmsg buffer.\n\n Returns:\n A tuple of received bytes, socket address tuple, and cmg list.\n\n Raises:\n socket.error: If recvmsg fails.\n \"\"\"\n addr = ctypes.create_string_buffer(addrlen)\n msg_name = ctypes.addressof(addr)\n msg_namelen = addrlen\n\n buf = ctypes.create_string_buffer(buflen)\n iov = Iovec((ctypes.addressof(buf), buflen))\n msg_iov = iov.CPointer()\n msg_iovlen = 1\n\n control = ctypes.create_string_buffer(controllen)\n msg_control = ctypes.addressof(control)\n msg_controllen = controllen\n\n msghdr = MsgHdr((msg_name, msg_namelen, msg_iov, msg_iovlen,\n msg_control, msg_controllen, flags))\n ret = libc.recvmsg(s.fileno(), VoidPointer(msghdr), flags)\n MaybeRaiseSocketError(ret)\n\n data = buf.raw[:ret]\n msghdr = MsgHdr(str(msghdr._buffer.raw))\n addr = _ToSocketAddress(addr, msghdr.namelen)\n control = control.raw[:msghdr.msg_controllen]\n msglist = _ParseMsgControl(control)\n\n return data, addr, msglist\n\n\ndef Recvfrom(s, size, flags=0):\n \"\"\"Python wrapper for recvfrom.\"\"\"\n buf = ctypes.create_string_buffer(size)\n addr = ctypes.create_string_buffer(len(SockaddrStorage))\n alen = ctypes.c_int(len(addr))\n\n ret = libc.recvfrom(s.fileno(), buf, len(buf), flags,\n addr, ctypes.byref(alen))\n MaybeRaiseSocketError(ret)\n\n data = buf[:ret]\n alen = alen.value\n\n addr = _ToSocketAddress(addr.raw, alen)\n\n return data, addr\n\n\ndef Setsockopt(s, level, optname, optval, optlen):\n \"\"\"Python wrapper for setsockopt.\n\n Mostly identical to the built-in setsockopt, but allows passing in arbitrary\n binary blobs, including NULL options, which the built-in python setsockopt does\n not allow.\n\n Args:\n s: The socket object on which to set the option.\n level: The level parameter.\n optname: The option to set.\n optval: A raw byte string, the value to set the option to (None for NULL).\n optlen: An integer, the length of the option.\n\n Raises:\n socket.error: if setsockopt fails.\n \"\"\"\n ret = libc.setsockopt(s.fileno(), level, optname, optval, optlen)\n MaybeRaiseSocketError(ret)\n","sub_path":"kernel/tests/net/test/csocket.py","file_name":"csocket.py","file_ext":"py","file_size_in_byte":11277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"413287813","text":"from django.urls import reverse\nfrom menus.base import NavigationNode\nfrom menus.menu_pool import menu_pool\nfrom django.utils.translation import ugettext_lazy as _\nfrom cms.menu_bases import CMSAttachMenu\nfrom .templatetags.image_gallery_tags import get_slug\nfrom .models import Gallery\n\n\nclass ImageGalleryMenu(CMSAttachMenu):\n\n name = _(\"Image Gallery menu\")\n\n # import ipdb; ipdb.set_trace()\n\n def get_nodes(self, request):\n \"\"\" creates parent-child list for extending django-cms navigation (menu & breadcrumb) \"\"\"\n\n nodes = []\n galleries = list(Gallery.objects.filter(is_published=True))\n\n menu_id = 1\n for gallery in galleries:\n gallery_menu_id = menu_id\n nodes.append(\n NavigationNode(\n gallery.title,\n gallery.get_absolute_url(),\n gallery_menu_id,\n )\n )\n menu_id += 1\n\n for img in gallery.get_folder_image_list():\n nodes.append(\n NavigationNode(\n img.name or img.original_filename,\n get_slug(img, gallery),\n menu_id,\n parent_id=gallery_menu_id\n )\n )\n menu_id += 1\n\n # print(\"*\"*20)\n # for n in nodes:\n # print(f\"Node: {n}, url: {n.url}, id: {n.id}, parent: {n.parent_id}\")\n\n return nodes\n\nmenu_pool.register_menu(ImageGalleryMenu)","sub_path":"image_gallery/cms_menus.py","file_name":"cms_menus.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"418582310","text":"from markdown.extensions import Extension\nfrom markdown.inlinepatterns import SimpleTagPattern\n\n\n#: Pattern which recognise dates with the format: 25.01.89\nDATE_RE = r'(^|(\\d{2}\\.\\d{2}\\.\\d{2}))(\\n)'\n\n#: Pattern which recognise hours and minutes with the format: 16:45\nTIME_RE = r'(^|(\\d{2}\\:\\d{2}))(\\n)'\n\n#: Pattern which recognise the place of the events.\nPLACE_RE = r'(\\@)(.*?)(\\n)'\n\n\nclass MyExtension(Extension):\n \"\"\"\n This class inherites Extension class from Markdown and extends its functionality.\n \"\"\"\n def extendMarkdown(self, md, md_globals):\n date_tag = SimpleTagPattern(DATE_RE, 'date')\n md.inlinePatterns.add('date', date_tag, '>not_strong')\n hour_tag = SimpleTagPattern(TIME_RE, 'hour')\n md.inlinePatterns.add('hour', hour_tag, '>not_strong')\n place_tag = SimpleTagPattern(PLACE_RE, 'place')\n md.inlinePatterns.add('place', place_tag, '>not_strong')\n\n\ndef makeExtension(*args, **kwargs):\n return MyExtension(*args, **kwargs)\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()","sub_path":"ievv_opensource/ievv_eventframework/myextension.py","file_name":"myextension.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"252841881","text":"# Create lists in dictionary to add more values.\r\nfavorite_nums = {\r\n 'patric': [987, 52, 45544],\r\n 'louise': [21, 577, 5],\r\n 'andy': [1, 6, 22]\r\n }\r\n\r\n# Use for to pass all keys and values. \r\nfor friend, numbers in favorite_nums.items():\r\n print(\"\\nThree favorite numbers of \" + friend.title() + \" are:\")\r\n for number in numbers:\r\n print(\"\\t\" + str(number))","sub_path":"nest5.py","file_name":"nest5.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"196821327","text":"import atexit\nimport logging\nimport sys\nimport time\nfrom logging.config import dictConfig\n\nimport numpy as np\nfrom bokeh.client import push_session\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import gridplot\nfrom bokeh.models import ColumnDataSource, LinearAxis, Range1d\nfrom bokeh.models.widgets import PreText, CheckboxGroup\nfrom bokeh.palettes import brewer\nfrom bokeh.plotting import figure\n\nimport settings\nfrom logger_class import Logger\n\n\n# def dateparse (time_in_secs):\n# return datetime.datetime.fromtimestamp(float(time_in_secs))\n\n\ndef update():\n logging.debug('streaming data to browser')\n # construct the new values for all columns, and pass to stream\n # noinspection PyShadowingNames\n try:\n temp = l.get_new_values()\n new_data = temp\n\n logging.debug('old values' + str(settings.source.data))\n new_data = new_data.to_dict('list') # ColumnDataSource(data=temp)\n\n for key, value in new_data.items():\n value = np.array(value)\n list_with_nan_indices = np.isnan(value) # ==np.nan\n value[list_with_nan_indices] = 'NaN'\n new_data[key] = value\n logging.debug('new values' + str(new_data))\n\n logging.debug('number of elements in new_data dict: {:d}'.format(len(new_data)))\n settings.source.stream(new_data=new_data, rollover=300)\n except Exception as e:\n logging.warning('Bokeh Update\\n' + str(e))\n except:\n logging.critical(\"Unexpected error:\", sys.exc_info()[0])\n\n\ndef main():\n if settings.debug:\n print(\"Debugging enabled!\")\n logging.info(\"Debugging enabled!\")\n\n logging.info(\"Logger instance created\")\n l.start_continuous_measurement_updates(update_time=1, log_every=settings.LOG_EVERY_x_seconds)\n logging.info(\"Logger: continuous measurement updates started\")\n if settings.plotting:\n logging.info('plotting enabled - live plotting of measured values')\n else:\n logging.info('plotting disabled - no live plotting of measured values')\n\n while True:\n try:\n if settings.plotting:\n logging.debug('reading old values')\n # data = pd.read_table(filepath_or_buffer=file, sep=',', na_values='', header=1, index_col=0,\n # names=['g1', 'g2', 'g3', 'g4', 'g5', 'g6', 'temp', 'HeaterOutput',\n # 'electron_current_tti_dmm', 'electron_energy', 'filament_current',\n # 'unused'], low_memory=True)\n # data = pd.read_table(filepath_or_buffer=file, sep=',', na_values='', header=1, parse_dates=True,\n # date_parser=dateparse, index_col='DateTime',\n # names=['DateTime', 'g1', 'g2', 'g3', 'g4', 'g5', 'g6', 'temp', 'HeaterOutput',\n # 'electron_current_tti_dmm', 'electron_energy_tti_dmm',\n # 'filament_current_dmm', 'channeltron_voltage', 'channeltron_current',\n # 'electron_energy_voltage', 'electron_energy_current',\n # 'sector_minus_voltage', 'sector_minus_current', 'sector_plus_voltage',\n # 'sector_plus_current', 'filament_voltage', 'filament_current'],\n # low_memory=True)\n # data = pd.read_table(filepath_or_buffer=file, sep=',', na_values='', header=1, parse_dates=True,\n # date_parser=dateparse,\n # names=['DateTime', 'g1', 'g2', 'g3', 'g4', 'g5', 'g6', 'temp', 'HeaterOutput',\n # 'electron_current_tti_dmm', 'electron_energy_tti_dmm',\n # 'filament_current_dmm', 'channeltron_voltage', 'channeltron_current',\n # 'electron_energy_voltage', 'electron_energy_current',\n # 'sector_minus_voltage', 'sector_minus_current', 'sector_plus_voltage',\n # 'sector_plus_current', 'filament_voltage', 'filament_current'],\n # low_memory=True)\n # data = pd.read_table(filepath_or_buffer=settings.file, sep=',', na_values='', header=1,\n # names=['DateTime', 'g1', 'g2', 'g3', 'g4', 'g5', 'g6', 'temp', 'HeaterOutput',\n # 'electron_current_tti_dmm', 'electron_energy_tti_dmm',\n # 'filament_current_dmm', 'channeltron_voltage', 'channeltron_current',\n # 'electron_energy_voltage', 'electron_energy_current',\n # 'sector_minus_voltage', 'sector_minus_current', 'sector_plus_voltage',\n # 'sector_plus_current', 'filament_voltage', 'filament_current'],\n # low_memory=True)\n\n # get 1st line with data from the logger\n data = l.get_new_values()\n\n # create Bokeh Plots\n if True:\n logging.debug('generating bokeh plot datasource')\n\n # open a session to keep our local document in sync with server\n session = push_session(curdoc())\n\n # NEW: create a column data source for the plots to share\n settings.source = ColumnDataSource(data=data.to_dict('list'))\n source = settings.source\n\n # y_range = (pd.DataFrame.min(subset).min(), pd.DataFrame.max(subset).max())\n y_range = (1e-8, 1e-1)\n\n # create a new plot\n s1 = figure(width=750, plot_height=250, title='Pressure', x_axis_type=\"datetime\", y_axis_type=\"log\",\n y_range=y_range)\n s1.line(x='DateTime', y='g1', line_width=1, source=source, line_color='MediumBlue', legend='cluster source')\n s1.line(x='DateTime', y='g2', line_width=1, source=source, line_color='Green', legend='sector 1')\n s1.line(x='DateTime', y='g4', line_width=1, source=source, line_color='Orange', legend='deposition')\n s1.line(x='DateTime', y='g6', line_width=1, source=source, line_color='Red', legend='prevac')\n s1.legend.location = \"bottom_left\"\n\n # NEW: create a new plot and share both ranges\n colors = ['MediumBlue', 'Green']\n s2 = figure(width=750, height=250, x_range=s1.x_range, x_axis_type=\"datetime\", title='Temperature')\n s2.line(x='DateTime', y='temp', source=source, line_color=colors[0], legend='temp')\n\n s2.extra_y_ranges = {\"foo\": Range1d(start=0, end=100)}\n s2.line(x='DateTime', y='HeaterOutput', source=source, color=\"blue\", y_range_name=\"foo\",\n line_color=colors[1], legend='HeaterOutput')\n s2.add_layout(LinearAxis(y_range_name=\"foo\"), 'left')\n s1.legend.location = \"bottom_left\"\n\n # NEW: create a new plot and share both ranges\n colors = ['MediumBlue', 'DeepSkyBlue', 'Green', 'Lime', 'Red', 'LightCoral', 'Orange'] #see http://bokeh.pydata.org/en/latest/docs/reference/palettes.html#bokeh-palettes\n s3 = figure(width=750, height=250, x_range=s1.x_range, x_axis_type=\"datetime\", title='Ionisation')\n s3.line(x='DateTime', y='electron_current_tti_dmm', source=source, legend='electron_current_tti_dmm',\n line_color=colors[1], line_dash='4 4')\n s3.line(x='DateTime', y='electron_energy_tti_dmm', source=source, legend='electron_energy_tti_dmm',\n line_color=colors[2])\n s3.line(x='DateTime', y='electron_energy_voltage', source=source, legend='electron_energy_voltage',\n line_color=colors[3])\n s3.line(x='DateTime', y='electron_energy_current', source=source, legend='electron_energy_current',\n line_color=colors[4], line_dash='4 4')\n s3.line(x='DateTime', y='filament_voltage', source=source, legend='filament_voltage',\n line_color=colors[5])\n s3.line(x='DateTime', y='filament_current', source=source, legend='filament_current',\n line_color=colors[6], line_dash='4 4')\n s3.line(x='DateTime', y='filament_current_dmm', source=source, legend='filament_current_dmm',\n line_color=colors[0], line_dash='4 4')\n s3.legend.location = \"bottom_left\"\n\n # NEW: create a new plot and share both ranges\n colors = ['MediumBlue', 'DeepSkyBlue', 'Green', 'Lime'] #see http://bokeh.pydata.org/en/latest/docs/reference/palettes.html#bokeh-palettes\n s4 = figure(width=750, height=250, x_range=s1.x_range, x_axis_type=\"datetime\", title='Sector 1')\n s4.line(x='DateTime', y='sector_minus_voltage', source=source, legend='sector_minus_voltage',\n line_color=colors[0])\n s4.line(x='DateTime', y='sector_minus_current', source=source, legend='sector_minus_current',\n line_color=colors[1], line_dash='4 4')\n s4.line(x='DateTime', y='sector_plus_voltage', source=source, legend='sector_plus_voltage',\n line_color=colors[2])\n s4.line(x='DateTime', y='sector_plus_current', source=source, legend='sector_plus_current',\n line_color=colors[3], line_dash='4 4')\n s4.legend.location = \"bottom_left\"\n\n # NEW: create a new plot and share both ranges\n colors = ['MediumBlue', 'DeepSkyBlue'] #see http://bokeh.pydata.org/en/latest/docs/reference/palettes.html#bokeh-palettes\n s5 = figure(width=750, height=250, x_range=s1.x_range, x_axis_type=\"datetime\", title='Detection')\n s5.line(x='DateTime', y='channeltron_voltage', source=source, legend='channeltron_voltage',\n line_color=colors[0])\n s5.line(x='DateTime', y='channeltron_current', source=source, legend='channeltron_current',\n line_color=colors[1], line_dash='4 4')\n s5.legend.location = \"bottom_left\"\n\n # put the subplots in a gridplot\n # p = gridplot([[s1],[s2]])#,[s4],[s5]])\n # set up layout\n stats = PreText(text='')\n stats.text = str(data[['g1', 'g2', 'g6']].tail(5).describe().T.reset_index())\n checkbox_group = CheckboxGroup(\n labels=[\"Gauge 1\", \"Gauge 2\", \"Gauge 3\", \"Gauge 4\", \"Gauge 5\", \"Gauge 6\"], active=[0, 1])\n p = gridplot(\n children=[[s1, checkbox_group, stats], [s2], [s3], [s4], [s5]],\n toolbar_location='left',\n # responsive =True,\n plot_width=1000,\n plot_height=250\n )\n curdoc().add_periodic_callback(update, 1000)\n curdoc().title = \"Snowball Measurements\"\n\n session.show(p) # open the document in a browser\n session.loop_until_closed() # run forever\n # curdoc().add_root(p)\n\n else:\n time.sleep(10)\n\n except RuntimeError:\n # this error occours if xdata and ydata are not the same length, when the data is plotted\n pass\n except:\n logging.critical(\"Unexpected error:\", sys.exc_info()[0])\n\n\ndef exit_handler():\n logging.info(\"Application closed.\")\n\n\nif __name__ == '__main__':\n atexit.register(exit_handler)\n\n if settings.debug:\n loglevel = logging.DEBUG\n else:\n loglevel = logging.INFO\n\n logging_config = dict(\n version=1,\n formatters={\n 'f': {'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'}\n },\n handlers={\n 'h': {'class': 'logging.FileHandler',\n 'formatter': 'f',\n 'level': loglevel,\n 'filename': 'Logger_Error.log'}\n },\n root={\n 'handlers': ['h'],\n 'level': loglevel,\n },\n )\n\n dictConfig(logging_config)\n logger = logging.getLogger()\n\n try:\n l = Logger(maxigauge_com_port=settings.Maxigauge1_COM_Port,\n lakeshore_com_port=settings.LakeShore_COM_Port,\n aimtti_multimeter_port=settings.AIMTTI_1705_Mutlimeter_COM_Port,\n NI_Inputs=settings.ni_inputs,\n debug=settings.debug\n )\n main()\n except Exception as e:\n logging.exception('exception in main\\n' + str(e))\n\n\ndef exit_handler():\n logging.info(\"Application closed.\")\n","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":13463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"177710507","text":"# coding=utf-8\n# Copyright 2022 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Main file for pre-training or fine-tuning models.\"\"\"\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nfrom clu import platform\nimport jax\nfrom ml_collections import config_flags\nimport tensorflow as tf\n\nfrom f_net import run_classifier\nfrom f_net import run_pretraining\nfrom f_net.configs.base import TrainingMode\n\nconfig_flags.DEFINE_config_file(\n \"config\", None, \"Training configuration.\", lock_config=True)\nflags.mark_flags_as_required([\"config\"])\nflags.DEFINE_string(\"workdir\", None, \"Work unit directory.\", required=True)\nflags.DEFINE_string(\n \"vocab_filepath\",\n None,\n \"Absolute path to SentencePiece vocab model.\",\n required=True)\n\nFLAGS = flags.FLAGS\n\n\ndef main(argv):\n del argv\n\n # Hide any GPUs form TensorFlow. Otherwise TF might reserve memory and make\n # it unavailable to JAX.\n tf.config.experimental.set_visible_devices([], \"GPU\")\n\n logging.info(\"JAX process: %d / %d\", jax.process_index(), jax.process_count())\n logging.info(\"JAX devices: %r\", jax.devices())\n\n # Add a note so that we can tell which task is which JAX process.\n platform.work_unit().set_task_status(\n f\"process_index: {jax.process_index()}, process_count: {jax.process_count()}\"\n )\n platform.work_unit().create_artifact(platform.ArtifactType.DIRECTORY,\n FLAGS.workdir, \"workdir\")\n\n train_mode = FLAGS.config.mode\n if train_mode == TrainingMode.PRETRAINING:\n train_lib = run_pretraining\n elif train_mode == TrainingMode.CLASSIFICATION:\n train_lib = run_classifier\n else:\n raise ValueError(\"Unknown training mode: %s\" % train_mode)\n\n train_lib.train_and_evaluate(FLAGS.config, FLAGS.workdir,\n FLAGS.vocab_filepath)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n","sub_path":"f_net/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"624606865","text":"# step-123.py -- For this drill, you will need to write a script that \n# creates a GUI with a button widget and a text widget. Your script will \n# also include a function that when it is called will invoke a dialog \n# modal which will allow users with the ability to select a folder \n# directory from their system. Finally, your script will show the user’s \n# selected directory path into the text field.\n# Your script will need to use the askdirectory() method from the Tkinter \n# module. Your script will need to have a function linked to the button \n# widget so that once the button has been clicked will take the user’s \n# selected file path retained by the askdirectory() method and print it \n# within your GUI’s text widget.\n\nimport tkinter as tk\nfrom tkinter import filedialog\nimport os\n\nclass App(tk.Frame):\n def __init__(self, master, *args, **kwargs):\n tk.Frame.__init__(self, master, *args, **kwargs)\n self.master = master\n self.master.title('Python Course Step 123')\n self.master.config(padx=20, pady=16)\n self.master.bind(sequence='', func=exit)\n self.loadGUI()\n\n def loadGUI(self):\n self.folderName = tk.StringVar()\n # create widgets\n self.btnSelectFolder = tk.Button(self.master, text='Select Folder...', \n width=12, command=self.chooseFolder)\n self.txtFolder = tk.Entry(self.master, width=84, textvariable=self.folderName)\n self.btnQuit = tk.Button(self.master, text='Quit', width=12, command=exit)\n\n # customize and show widgets\n self.btnSelectFolder.grid(row=0, column=0, sticky='w', padx=(0,8))\n self.btnQuit.grid(row=0, column=1, sticky='e', padx=(8,0))\n self.txtFolder.grid(row=1, column=0, columnspan=2, pady=(12,0), ipady=4, sticky='ew')\n\n def chooseFolder(self):\n path = os.getcwd()\n self.folderName.set(filedialog.askdirectory(initialdir=path))\n self.txtFolder.focus()\n\nif __name__ == '__main__':\n root = tk.Tk()\n app = App(root)\n root.mainloop()\n","sub_path":"step-123/step-123.py","file_name":"step-123.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"347186001","text":"import time, types\n\nclass D(object):\n\n OK = 0\n FAILD = 1\n LIMIT_TIME = 60*5\n\n #subth\n COL = 'col'\n RWTH = 'rwth'\n REMU = 'remu'\n IS_USER = 'isuser'\n\n sample = {'port':000,\n 'add': 'add',\n 'name':'user',\n 'timestamp':time.time()}\n FUNC = types.FunctionType\n\n #subth2\n EVERYONE = 'everyone'\n WRITE_COM = 'writecomment'\n READ_COM = 'readcomment'\n READ_BCKCOM = 'backlog'\n MAX_BCKLOG = 500\n\n COM_TO_EVERYONE = '{}:@ {}\\n'\n COM_TO_FROM = '{}:{} {}\\n'\n\n\n # system message\n LOGIN_ERROR1 ='''\n plaese type your login name \n '''\n LOGIN_ERROR2 ='''\n too longer\n '''\n LOGIN_ERROR3 ='''\n only use \"0-9\",\"a-z\",\"A-Z\"\n '''\n LOGIN_ERROR4 ='''\n user name {} is already used \n '''\n LOGIN_COMFIRMED='''\n collect login\n '''\n LOGIN_FAILD='''\n login Faild\n '''\n ALREADY_LOGIN='''\n you are already logined.\n your user name is {} \n '''\n\n LOGOUT_ERROR1='''\n you are already logined.\n your user name is {} \n '''\n LOGOUT_COMFIRMED='''\n logout comfirmed\n '''\n LOGOUT_FAILD='''\n logout faild\n '''\n\n ALREADY_LOGOUT='''\n you are already logouted. \n '''\n\n NONE_COMMAND='''\n Thare is not '{}' command\n you can know some it with using 'help' command\n '''\n\n PLEASE_LOGIN='''\n please login\n '''\n\n BACKLOG_ERROR1='''\n please key-in intger number\n '''\n\n TOUSER_PLEASE_USER='''\n please select user name\n '''\n\n TOUSER_COM_ERROR1='''\n user name {} is not found. \n '''\n\n TOUSER_COM_ERROR2='''\n non message length \n '''\n TOUSER_COM_ERROR3='''\n user name {} is you\n '''\n\n HELP_MSG = '''\n :login [your user name]\n ログインしないと何もできません \n アクションなしのまま5分経過で強制ログアウトします\n\n :logout\n 退出するときはログアウトしましょう\n \n :chat [your message]\n チャットができます\n chat コマンドをつかわなくても\n そのままキーインすればチャット可能です\n メッセージは全員にみえます\n\n >[user name] [your message]\n 対象のユーザーにチャットします\n メッセージはその人にしかみえません\n \n :backlog [int]\n バックログを閲覧します\n 最新から引数の整数値分のコメントを表示します\n '''","sub_path":"_defA.py","file_name":"_defA.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"38377972","text":"import sys, time\nfrom datetime import date\nsys.path.extend(['..', '.'])\nfrom collections import *\nfrom fetch import *\nfrom util import *\n#import drawgraph\n#lo, hi, lt, pw = lazy_ints(multisplit(line, '-: ')) #chars only!\n#or lo, hi, lt, pw = lazy_ints(multisplit(line, ['-',': ','))\nimport re\n#use regex re.split(' |,|: ', line)\n\ndef db(*a): \n if DB: print(*a)\n\ndef parse(line):\n return int(line)\n\ndef getsoll(N):\n soll = [0] * N\n for i in range(2, N):\n if soll[i] == 0:\n #prime\n for k in range(i*i, N, i):\n soll[k] = i\n return soll\n\n\ndef getprimes(i, soll):\n solval = soll[i]\n primes = []\n while solval > 0:\n primes.append(solval)\n i //= solval\n solval = soll[i]\n primes.append(i)\n return primes\n\ndef divs(primes):\n if len(primes) == 0: return [1]\n for key in primes.keys(): break\n val = primes.pop(key)\n li = divs(primes)\n li2 = list(li)\n for v in li:\n for mult in range(1, val+1):\n li2.append(v * key ** mult)\n return li2\n\ndef presents(h, soll):\n cnt = 0\n pr = Counter(getprimes(h, soll))\n divisors = divs(pr)\n su = sum(divisors)\n return 10 * su\n\ndef deliver(N):\n houses = [0] * N\n for elf in range(1, N):\n for h in range(elf, min(N, 50*elf), elf):\n houses[h] += 11 * elf\n return houses\n\ndef p1(v):\n val = int(v.strip())\n #val = 130\n N = 4000000\n soll = getsoll(N)\n for h in range(1, 4000000):\n cnt = presents(h, soll)\n \n if h % 100000 == 0: db(h, cnt)\n\n if cnt > val:\n return h\n return -1\n\ndef p2(v):\n val = int(v.strip())\n #val = 130\n N = 40000000\n houses = deliver(N)\n for i, cnt in enumerate(houses):\n if cnt > val:\n return i\n return -1\n\n\ndef manual():\n v = open(\"real.txt\", 'r').read().strip('\\n')\n print('part_1: {}\\npart2: {}'.format(p1(v), p2(v)))\n \ncmds, stats, io, so, DB = get_args(sys.argv) \nif not io: run_samples(p1, p2, cmds)\nif not so: run(2015,20, p1, p2, cmds)\nif stats: print_stats()\n#manual()\n\n#submitted: 36960","sub_path":"aoc15/D20/d20.py","file_name":"d20.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"4800740","text":"#!/usr/bin/env python\n#\n# Place-in Sample Exam 2\n# Seventeen\n#\n# seventeen1.py\n########################################################################\n\n# Start the game loop, prompt the human to enter a move\ndef game_loop(num_marbles=17, winner_msg=\"\"):\n print(\"\\nLet's play the game of Seventeen!\")\n while num_marbles > 0:\n # player's move\n player_removed = players_move(num_marbles)\n print(\"You removed {} marbles.\".format(player_removed))\n \n # calculate and print number of marbles remaining\n num_marbles = get_marbles_remaining(num_marbles, player_removed)\n \n # Check if the Computer won, i.e. the human removes the last marble\n if num_marbles == 0:\n winner_msg = \"Computer wins!\"\n break\n \n # computer's move\n computer_removed = computers_move(num_marbles, player_removed)\n print(\"Computer removed {} marbles.\".format(computer_removed))\n \n # Calculate and print number of marbles remaining\n num_marbles = get_marbles_remaining(num_marbles, computer_removed)\n \n # Check if the human won, i.e. computer removes the last marble\n if num_marbles == 0:\n winner_msg = \"You win!\"\n break\n # find winner and print to terminal\n print(\"\\nThere are no marbles left. {}\".format(winner_msg))\n\n# The human player's move\ndef players_move(num_marbles):\n \"\"\"Prompts the player to pick a move and then returns the number\n of marbles removed\n \n num_marbles -- the number of marbles before the player chooses\n a move\n \"\"\"\n marbles_left = \"Number of marbles left in jar: {}\".format(num_marbles)\n print(marbles_left)\n removed = input(\"\\nYour turn: How many marbles will you remove (1-3)? \")\n removed = validate_input(removed, num_marbles)\n return removed\n\n# Validate the human's input\ndef validate_input(remove_marbles, num_marbles):\n \"\"\"Validates if the number of marbles to be removed is an integer\n Or if it's a string, return invalid input error msg. Also \n validates if number entered is greater than 3 or less than 1\n \n Paramter:\n remove_marbles -- the number of marbles to remove as entered by the\n user\n num_marbles -- the number of marbles before the player's move\n \"\"\"\n try:\n remove_marbles = int(remove_marbles)\n except ValueError:\n print(\"Sorry, that is not a valid option. Try again!\")\n # print(\"Please enter non-word numerals.\")\n remove_marbles = players_move(num_marbles) \n else:\n # Check out of bounds conditions\n if num_marbles < remove_marbles:\n print(\"Sorry, that is not a valid option. Try again!\")\n # print(\"There are only {} marbles left in the jar.\".format(num_marbles))\n remove_marbles = players_move(num_marbles)\n \n if remove_marbles < 1 or remove_marbles > 3:\n print(\"Sorry, that is not a valid option. Try again!\")\n remove_marbles = players_move(num_marbles)\n finally:\n return remove_marbles\n\n# Proceed with the computer's strategy\ndef computers_move(num_marbles, human_removed):\n \"\"\"Returns the number of marbles the computer removes based on\n some strategy. Currently the computer will employ the simple\n strategy of matching the same move as the human player\n \n Parameter:\n num_marbles -- the number of marbles before the computer makes\n its move\n human_removed -- the number of marbles that the human removed\n \"\"\"\n print(\"Number of marbles left in jar: {} \".format(num_marbles))\n print(\"\\nComputer's turn...\")\n if num_marbles < human_removed:\n return num_marbles\n else:\n return human_removed\n\n# Gets the number of marbles remaining at the end of the turn\ndef get_marbles_remaining(previous_count, marbles_removed):\n \"\"\"Get the current number of marbles remaining in the jar\n at the end of each turn\n \n Parameters:\n previous_count -- the previous count of marbles in the jar\n marbles_removed -- the number of marbles removed in a turn\n \"\"\"\n remaining = previous_count - marbles_removed\n return remaining\n\ndef main():\n game_loop()\n\nif __name__ == \"__main__\":\n main()","sub_path":"seventeen1.py","file_name":"seventeen1.py","file_ext":"py","file_size_in_byte":4261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"615676196","text":"'''\nRecBin v20140709 - John Moran (john@jtmoran.com)\n\nParses all $I recycle bin artifacts from Windows Vista+ from a given directory\nand displays date/time deleted, original file path and original file size.\n\nSyntax: recbin.py -d \n'''\n\nimport getopt\nimport sys\nimport glob\nimport os\nimport datetime\nimport re\n\ndef readDir (dir):\n if not os.path.exists(dir):\n print(\"\\nDirectory '\" + dir + \"' does not exist!\")\n return\n os.chdir(dir)\n fileList = []\n #Get list of $I files in dir\n for file in glob.glob(\"$I*\"):\n fileList.append(dir + \"/\" + file)\n #If 1+ $I files found continue\n if(len(fileList) < 1):\n print(\"\\nNo $I files found in '\" + dir + \"'\")\n return\n else:\n print(\"\\n(\" + str(len(fileList)) + \") $I files found in '\" + dir + \"'\\n\")\n print(\"File Date\\\\Time Deleted Original Path (Original Size)\")\n print(\"---- ----------------- -----------------------------\")\n #Read each file\n for f in fileList:\n readI(f)\n\ndef readI(fname):\n #Open file and read into 'data'\n I = open(fname, \"rb\")\n data = I.read()\n #Read Windows FILETIME obj at bytes 16-23\n date = datetime.datetime.utcfromtimestamp(((int.from_bytes(data[16:24], byteorder='little') - 116444736000000000) / 10000000)).strftime('%H:%M:%S %m/%d/%Y')\n #Read original file name at bytes 24+\n filename = data[24:]\n filename = filename.decode(\"utf16\").rstrip('\\0')\n #Read original file size at bytes 8-15\n filesize = int.from_bytes(data[8:16], byteorder='little')\n basename = os.path.basename(fname)\n date = date + \" GMT\"\n print(basename.ljust(20) + date.ljust(28) + filename + \" (\" + str(filesize) + \" bytes)\")\n\t\t\ndef main (argv):\n\t#Get command line options\n try:\n\t opts, args = getopt.getopt(argv, \"d:h\", [\"directory=\", \"help\"])\n except getopt.GetoptError:\n print(\"\\nInvalid options!\")\n print(__doc__)\n sys.exit(2)\n\t#Check that only one command line option is specified\t\n if len(opts) != 1:\n print(\"\\nInvalid options!\")\n print(__doc__)\n sys.exit(2)\t\n\t\t\n for opt, arg in opts:\n\t #Help\n if opt in (\"-h\", \"--help\"):\n print(__doc__)\n sys.exit(2)\t\n \t#Directory\n if opt in (\"-d\", \"--directory\"):\n readDir(arg)\n\n\t\t\nif __name__ == \"__main__\":\n main(sys.argv[1:])","sub_path":"recbin.py","file_name":"recbin.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"186907054","text":"def resolve():\n n, m = map(int, input().split())\n pss = [list(input().split()) for _ in range(m)]\n ac = [0] * n\n wa = [0] * n\n ans1 = 0\n ans2 = 0\n for i in range(m):\n if ac[int(pss[i][0]) - 1] == 0:\n if pss[i][1] == 'AC':\n ans1 += 1\n ans2 += wa[int(pss[i][0]) - 1]\n ac[int(pss[i][0]) - 1] = 1\n else:\n wa[int(pss[i][0]) - 1] += 1\n else:\n continue\n print(str(ans1) + ' ' + str(ans2))\n\n\nimport sys\nfrom io import StringIO\nimport unittest\n\n\nclass TestClass(unittest.TestCase):\n def assertIO(self, input, output):\n stdout, stdin = sys.stdout, sys.stdin\n sys.stdout, sys.stdin = StringIO(), StringIO(input)\n resolve()\n sys.stdout.seek(0)\n out = sys.stdout.read()[:-1]\n sys.stdout, sys.stdin = stdout, stdin\n self.assertEqual(out, output)\n\n def test_入力例_1(self):\n input = \"\"\"2 5\n1 WA\n1 AC\n2 WA\n2 AC\n2 WA\"\"\"\n output = \"\"\"2 2\"\"\"\n self.assertIO(input, output)\n\n def test_入力例_2(self):\n input = \"\"\"100000 3\n7777 AC\n7777 AC\n7777 AC\"\"\"\n output = \"\"\"1 0\"\"\"\n self.assertIO(input, output)\n\n def test_入力例_3(self):\n input = \"\"\"6 0\"\"\"\n output = \"\"\"0 0\"\"\"\n self.assertIO(input, output)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"ABC-C/ABC151C.py","file_name":"ABC151C.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"38915012","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/chris/GitHub/MetaWards/build/lib.macosx-10.9-x86_64-3.7/metawards/utils/_vaccination.py\n# Compiled at: 2020-04-15 04:10:58\n# Size of source mod 2**32: 1026 bytes\nfrom array import array\nimport os\nfrom .._network import Network\n__all__ = [\n 'allocate_vaccination',\n 'how_many_vaccinated', 'vaccinate_same_id']\n\ndef allocate_vaccination(network: Network, output_dir: str):\n \"\"\"Allocate memory and open files needed to track vaccination\"\"\"\n null_int = (network.nnodes + 1) * [0]\n null_float = (network.nnodes + 1) * [0.0]\n int_t = 'i'\n float_t = 'd'\n vac = array(int_t, null_int)\n wards_ra = array(int_t, null_int)\n risk_ra = array(float_t, null_float)\n sort_ra = array(int_t, null_int)\n VACF = open(os.path.join(output_dir, 'Vaccinated.dat', 'w'))\n trigger = 0\n return (\n vac, wards_ra, risk_ra, sort_ra, VACF, trigger)\n\n\ndef how_many_vaccinated(vac):\n raise AssertionError('how_many_vaccinated has not yet been written')\n\n\ndef vaccinate_same_id(network: Network, risk_ra, sort_ra, infections, play_infections, vac, params):\n raise AssertionError('vaccinate_same_id has not yet been written')","sub_path":"pycfiles/metawards-0.7.1-cp37-cp37m-macosx_10_9_x86_64/_vaccination.cpython-37.py","file_name":"_vaccination.cpython-37.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"1153691","text":"import sys\nimport requests\nimport csv\nfrom fetchArtist import *\nimport pandas as pd\n\n\ndef getrelatedArtists(artistID):\n url= 'https://api.spotify.com/v1/artists/' + artistID + '/related-artists'\n #print url\n req=requests.get(url)\n \n data=req.json()\n \n relatedArtists= []\n for item in data['artists']:\n relatedArtists.append(item['id'])\n \n return relatedArtists\n\n#print getrelatedArtists(fetchArtistId('Beck'))\n \ndef getDepthEdges(artistID, depth):\n if depth==0:\n return []\n \n related_artist= getrelatedArtists(artistID)\n tpl_lst= []\n \n for artist in related_artist:\n tpl_lst.append((artistID, artist))\n\n for artist_relatedartist in tpl_lst:\n tpl_lst= set(tpl_lst)\n tpl_lst = list(tpl_lst)\n tpl_lst= tpl_lst + getDepthEdges(artist_relatedartist[1], depth-1)\n \n \n return tpl_lst \n\n\n#print getDepthEdges(fetchArtistId('Beck'), 2)\n\ndef getEdgeList(artistID, depth):\n \n df=pd.DataFrame(getDepthEdges(fetchArtistId('name'), depth))\n df.columns = ['artist' , 'related artist']\n \n return df\n \n#print getEdgeList('Beck', 2)\n\ndef writeEdgeList(artistID, depth, filename):\n \n EL=getEdgeList(artistID, depth).to_csv(filename, index=False)\n \n return EL\n \n\n#print writeEdgeList('Beck', 2, 'Beck_csv2')\nwriteEdgeList('Def Leppard', 3, 'def_csv') \n#writeEdgeList('Bay City Rollers', 2, 'bcr_csv') \n\n","sub_path":"Assignment7/artistNetworks.py","file_name":"artistNetworks.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"522025352","text":"#récupère le cube de départ entrée par l'utilisateur dans InterfaceIO\n#et renvoie à InterfaceIO la résolution du cube\n\nfrom Cube import *\nclass Resolution:\n\n def __init__(self,c):\n self.cube=c\n self.mouv = 0\n self.vr = 0\n self.br = 0\n self.bo = 0\n self.vo = 0\n self.a = 0\n self.b = 0\n \n \n # liste des indexes servant à la croix\n self.liCross=1,5,7,3\n self.liCorner=0,2,8,6\n\n # liste des rotation effectué durant la résolution \n self.liCmd=''\n self.nbCmd=0\n self.liRota='','2',\"'\"\n\n \n # Utiliser cette fonction permet de garder en mémoire les mouvements effectué durant la résolution\n def rotation(self,cmd):\n if(cmd=='' or cmd==' '):\n return 0\n if(len(cmd)!=1 and len(cmd)!=2):\n print(\"rotation : INVALID ROTATION NAME\",cmd)\n return -1\n if(len(cmd)==2 and ( cmd[1]!=\"2\" and cmd[1]!=\"'\")):\n print(\"rotation : INVALID ROTATION NAME\",cmd)\n return -1\n self.liCmd+=cmd\n self.nbCmd+=1\n #print(cmd,end='')\n self.cube.rotation(cmd)\n \n # Fonction qui renvoit l'inverse d'une rotation L2 => L2 L=>L' L'=>L \n def getInvRot(self,cmd):\n if(len(cmd)!=1 and len(cmd)!=2):\n print(\"getInvRot : INVALID ROTATION NAME\",cmd)\n return -1\n if(len(cmd)==1):\n return cmd+\"'\"\n if(cmd[1]=='2'):\n return cmd\n return cmd[0]\n \n # Cette fonction renvoit le type de rotation a ��ffectué\n # pour que le cube sur la face origin se retrouve sur la face\n # destination\n # la rotation doit etre une composante de la face rotatingF\n def getApproRot(self,origin,destination,rotatingF):\n if(origin == destination):\n return ''\n if(rotatingF==destination):\n return -2\n i=-1\n x=0\n m=self.cube.getMouv(rotatingF.upper())\n while(x<=6):\n if(origin==m[1][x%4][0]):\n i=0\n if(destination==m[1][x%4][0] and i!=-1):\n return rotatingF.upper()+self.liRota[i-1]\n x+=1\n if(i!=-1):\n i+=1\n return -1\n\n # Permet d'effectuer un certain nombre de rotation à la suite \n def applyCmd(self,cmd):\n cpt=0\n while(cpt!=len(cmd)):\n tmp=cmd[cpt]\n if(cpt==len(cmd)-1):\n self.rotation(tmp)\n return 0\n if(cmd[cpt+1]==\"'\" or cmd[cpt+1]==\"2\"):\n tmp+=cmd[cpt+1]\n self.rotation(tmp)\n cpt+=1\n else :\n self.rotation(tmp)\n tmp=''\n cpt+=1\n return 0\n \n\n\n \n \n \n \n \n\n def theCross(self,nameFace):\n tab=self.checkCross(nameFace)\n colorcross=self.cube.getCentralColor(nameFace)\n if(tab[0]==True):\n return 0\n # pour toute les faces qui n'ont pas encore été traitée\n while(len(tab[1])!=0):\n for x in tab[1]:\n #print(tab)\n \n # On cherche le cube de couleur \"Face à traiter\" + \" Face où se trouve la croix\"\n # on récupere donc la couleur de \"Face à traiter\" \n curColor=self.cube.getCentralColor(x)\n result=self.cube.findCube([colorcross,curColor])\n #print(result)\n #cube.displayCube()\n\n # on traite les différents cas en fonction de la position du cube\n\n # cas ou la face de la couleur dont on veut faire la croix (ex blanc) se trouve sur la face de la croix\n if(result[0][1]==nameFace):\n\n # si la croix n'a pas été commencé on peut économiser un mouvement\n if(len(tab[1])==4):\n self.applyCmd(self.getApproRot(result[1][1],x,result[0][1]))\n else:\n self.applyCmd(result[1][1].upper()+'2'+self.getApproRot(result[1][1],x,self.cube.getFaceInversed(nameFace))+x.upper()+'2')\n\n #cas ou la face de la couleur dont on veut faire la croix se trouve sur la face inverse de la croix\n if(result[0][1]==self.cube.getFaceInversed(nameFace)):\n self.applyCmd(self.getApproRot(result[1][1],x,result[0][1])+x.upper()+'2')\n\n # si on se trouve dans aucun des deux\n if(result[0][1]!=self.cube.getFaceInversed(nameFace) and result[0][1]!=nameFace):\n\n # on essaye d'approcher le morceau de croix par une seule rotation\n # le nom de la rotation est donnée par getApproRot si une seule rotation suffit\n # -1 est renvoyé si ce n'est pas possible ( le pire des cas )\n # -2 est renvoyé si le la face blanche ( exemple ) se trouve sur la face de destination\n rot=self.getApproRot(result[1][1],x,result[0][1])\n if(rot==-1):\n \n # si ce n'est pas possible un minimum de trois rotations sera nécessaire\n \n # j'ai découpé cette partie en 2 cas à cause de certaines différences de procédure\n # mais je pense qu'il est pssible de factoriser cette partie du code \n if(result[1][1]==nameFace or result[1][1]==self.cube.getFaceInversed(nameFace)):\n\n # si la croix n'a pas été commencée\n if(len(tab[1])==4):\n tmpcmd=result[1][1].upper()\n self.rotation(tmpcmd)\n \n tmppos=self.cube.findCube([colorcross,curColor])\n tmpcmd=self.getApproRot(tmppos[1][1],x,tmppos[0][1])\n self.rotation(tmpcmd)\n \n tmppos=self.cube.findCube([colorcross,curColor])\n tmpcmd=self.getApproRot(tmppos[0][1],nameFace,x)\n self.rotation(tmpcmd)\n \n # si la croix a été commencé on garde en mémoire le mouvement qui perturbe le travail déjà réalisé\n # on fait ce mouvement à l'inverse une fois la face terminée\n else:\n tmpcmd=self.getApproRot(result[1][1],self.cube.getFaceInversed(nameFace),result[0][1])\n self.rotation(tmpcmd)\n self.rotation(self.cube.getFaceInversed(nameFace).upper())\n tmppos=self.cube.findCube([colorcross,curColor])\n tmpcmd=self.getApproRot(tmppos[1][1],x,tmppos[0][1])\n \n faceinter=tmppos[0][1]\n mouvinter=self.getInvRot(tmpcmd)\n \n self.rotation(tmpcmd)\n \n tmppos==self.cube.findCube([colorcross,curColor])\n tmpcmd=self.getApproRot(tmppos[0][1],nameFace,x)\n self.rotation(tmpcmd)\n if(faceinter not in tab[1]):\n self.rotation(mouvinter)\n \n else:\n tmpcmd=self.getApproRot(result[0][1],self.cube.getFaceInversed(nameFace),result[1][1])\n self.rotation(tmpcmd)\n \n faceinter=result[1][1]\n mouvinter=self.getInvRot(tmpcmd)\n \n tmppos=self.cube.findCube([colorcross,curColor])\n tmpcmd=self.getApproRot(tmppos[1][1],x,tmppos[0][1])\n self.rotation(tmpcmd)\n \n tmppos=self.cube.findCube([colorcross,curColor])\n tmpcmd=self.getApproRot(tmppos[0][1],nameFace,x)\n self.rotation(tmpcmd)\n\n if(faceinter not in tab[1]):\n self.rotation(mouvinter)\n \n\n \n # si la face blanche est sur la face cherchée il faut retourner le cube\n elif(rot==-2):\n #print('ok')\n comeB=False\n rr=result[1][1].upper()\n if(result[1][1]!=self.cube.getFaceInversed(nameFace) and result[1][1]!=nameFace):\n rr=self.getApproRot(result[0][1],self.cube.getFaceInversed(nameFace),result[1][1])\n \n self.rotation(rr)\n tmppos=self.cube.findCube([colorcross,curColor])\n self.rotation(self.getApproRot(tmppos[1][1],x,tmppos[0][1]))\n if(result[1][1]==self.cube.getFaceInversed(nameFace)):\n comeB=True\n elif((rr[0].lower() not in tab[1] or (rr[0].lower()==nameFace and len(tab[1]!=4)))):\n self.rotation(self.getInvRot(rr))\n self.rotation(self.getApproRot(tmppos[0][1],nameFace,x))\n if(comeB):\n self.rotation(self.getInvRot(self.getApproRot(tmppos[1][1],x,tmppos[0][1])))\n \n # cas le plus simple ou il sufft de placer la partie de la croix sur la face ou on fait la croix\n elif(rot==''):\n self.rotation(self.getApproRot(result[0][1],nameFace,x))\n \n \n \n \n \n # si rot est égal à une rotation \n else :\n if(result[0][1] not in tab[1]):\n self.applyCmd(rot+self.getApproRot(result[0][1],nameFace,x)+self.getInvRot(rot))\n else :\n self.applyCmd(rot+self.getApproRot(result[0][1],nameFace,x))\n #print(\"\")\n tab[1].remove(x) \n \n \n \n # Cette fonction permet de verifier si la croix a été réalisée sur une face\n # Le nom de la face à vérifier est donné dans nameFace\n # On utilisera la fonction findCube() en limitant les recherches à\n # la face nameFace\n \n # Si un ou des cube n'est/ne sont pas placé(s) au bon endroit\n # La structure rénvoyée sera de la forme\n # [ False , [ nom de la face1 avec un cube mal placé, .....]]\n # si tout est bien placé on aura\n # [ True ,[] ]\n \n def checkCross(self,nameFace):\n tmp=[True,[]]\n colorcross=self.cube.getCentralColor(nameFace)\n m=self.cube.getMouv(nameFace.upper())\n \n for x in m[1]:\n coloredge=self.cube.getCentralColor(x[0])\n result=self.cube.findCube([colorcross,coloredge],nameFace)\n \n if(result==-1 or result[1][1]!=x[0]):\n tmp[1]+=[x[0]]\n tmp[0]=False\n \n \n return tmp\n\n #verifie si les coin sont fait\n #entré : nom de la face à vérifier\n #sortie : tableau à deux case :\n #premiere case boolean true si les coins sont bien fait false sinon\n #deuxieme case : tableau à trois dimension chaque case represente un coin mal placé avec\n #[[face ou doit etre la couleur de gauche,couleur gauche],[face ou doit etre la couleur de droite,couleur droite]]\n # si tout est bien placé on aura\n # [ True ,[] ]\n def checkCorner(self,nameFace):\n tmp=[True,[]]\n colorcorner=self.cube.getCentralColor(nameFace)\n m=self.cube.getMouv(nameFace.upper())\n for x in range (4):\n colorPrev=self.cube.getCentralColor(m[1][(x+3)%4][0])\n colorNext=self.cube.getCentralColor(m[1][x][0])\n \n\n result=self.cube.findCube([colorcorner,colorPrev,colorNext],nameFace)\n \n if(result==-1 or result[1][1]!=m[1][(x+3)%4][0] or result[2][1]!=m[1][x][0] ):\n tmp[1]+=[[[m[1][(x+3)%4][0],colorPrev],[m[1][x][0],colorNext]]]\n #[[face ou doit etre la couleur de gauche,couleur gauche],[face ou doit etre la couleur de droite,couleur droite]] \n\n\n tmp[0]=False\n \n return tmp\n\n #the corner permet de faire les coin d'une face sans prendre en compte se qu'elle fait sur les autre face\n def theCorner(self,nameFace):\n tab=self.checkCorner(nameFace)\n colorCorner=self.cube.getCentralColor(nameFace)\n inv=self.cube.getFaceInversed(nameFace)\n\n \n if(tab[0]==True):\n #si les coins sont bien placé on ne fait rien\n \n return 0\n else :\n \n for idx, x in enumerate (tab[1]) :\n #pour chaque coins mal placés\n \n \n tmp=self.cube.findCube([colorCorner,x[0][1],x[1][1]])\n #on trouve le cube à replacer corectement\n \n \n if (tmp[0][1] != inv):\n #si la couleur de la face à aranger n'est pas à l'opposer de où elle devrait etre (cas les plus simple)\n \n for i in range (0,2) :\n #on regarde quelle couleur est donc à l'opposé les rotation change selon celle ci c'est pour ça qu'il y à cette boucle\n \n if (tmp[1+i][1] == inv):\n #la couleur choisi est bien à l'opposé de la face à aranger\n \n m=self.cube.getMouv(tmp[2-i][1].upper())\n if (tmp[0][1] == m[1][1][0]):\n #si la couleur de la face à aranger est à la droite de la dernière couleur faire c'est rotation\n self.rotation(self.getApproRot(tmp[0][1],self.cube.getFaceInversed(x[1-i][0]),inv))\n self.rotation(x[1-i][0].upper())\n self.rotation(self.getInvRot(inv.upper()))\n self.rotation(self.getInvRot(x[1-i][0].upper()))\n \n else :\n #si la couleur de la face à aranger est à la gauche de la dernière couleur faire c'est rotation\n self.rotation(self.getApproRot(tmp[0][1],self.cube.getFaceInversed(x[1-i][0]),inv))\n\n self.rotation(self.getInvRot(x[1-i][0].upper()))\n self.rotation(inv.upper())\n self.rotation(x[1-i][0].upper())\n else :\n #si la couleur de la face à aranger est à l'opposer de où elle devrait etre\n \n self.rotation(self.getApproRot(tmp[1][1],x[1][0],inv))\n m=self.cube.getMouv(tmp[1][1].upper())\n\n if tmp[2][1] == m[1][1][0] :\n self.rotation(self.getInvRot(x[0][0].upper()))\n self.rotation(inv.upper()+\"2\")\n self.rotation(x[0][0].upper())\n self.rotation(inv.upper()) \n self.rotation(self.getInvRot(x[0][0].upper()))\n self.rotation(self.getInvRot(inv.upper()))\n self.rotation(x[0][0].upper())\n else :\n self.rotation(self.getInvRot(x[1][0].upper()))\n self.rotation(inv.upper()+\"2\")\n self.rotation(x[1][0].upper())\n self.rotation(inv.upper()) \n self.rotation(self.getInvRot(x[1][0].upper()))\n self.rotation(self.getInvRot(inv.upper()))\n\n self.rotation(m[1][1][0].upper())\n\n\n \n \n \n \n \n\n self.rotation(x[1][0].upper())\n tab=self.checkCorner(nameFace)\n if(tab[0]==True):\n #on vérifie si les rotation que l'on à fait on suffit\n #c.a.d qu'aucun des cube mal placé est été sur la couronne de la face à modifier\n \n return 0\n else :\n for idx, x in enumerate (tab[1]) :\n \n tmp=self.cube.findCube([colorCorner,x[0][1],x[1][1]])\n #on trouve les cubes manquant \n \n for i in range (3) :\n if tmp[i][1] == nameFace :\n #ontrouve la face étant sur la face à modifier\n \n mtp=self.cube.getMouv(tmp[(i+1)%3][1].upper())\n\n\n #on va placer le cube pour qu'il soit sur la couronne opposer de là ou il est et qu'il puisse ainsi etre traiter par theCorner\n\n if tmp[(i+2)%3][1] == mtp[1][1][0] :\n self.rotation(tmp[(i+1)%3][1].upper())\n self.rotation(inv.upper())\n self.rotation(self.getInvRot(tmp[(i+1)%3][1].upper()))\n else:\n self.rotation(tmp[(i+2)%3][1].upper())\n self.rotation(inv.upper())\n self.rotation(self.getInvRot(tmp[(i+2)%3][1].upper()))\n i=(len(tab[1])) \n self.theCorner(nameFace)\n #on envoie la récursivité et normalement ça doit bien placé le cube deplacer sur la couronne opposer\n \n \n \n \n \n \n\n def rfjaune(self):\n cube=self.cube\n \n j=cube.down\n r=cube.front\n b=cube.right\n g=cube.left\n o=cube.back\n if not cube.faceFinished('d') :\n \n if b[2][2]==j[1][1] and r[2][0]==b[2][2] and j[0][2]==b[2][2] and j[2][0]==b[2][2] and j[2][2]!=b[2][2] and j[0][0]!=b[2][2]:\n self.applyCmd(\"F'RFL'F'R'FL\")\n \n elif b[2][0]==j[1][1] and b[2][2]==b[2][0] and j[0][0]==b[2][0] and j[2][0]==b[2][0] and j[2][2]!=b[2][0] and j[0][2]!=b[2][0] :\n self.applyCmd(\"F2UF'D2FU'F'D2F'\")\n \n elif b[2][0]==j[1][1] and r[2][0]==b[2][0] and j[2][2]==b[2][0] and j[0][0]!=b[2][0] and j[0][2]!=b[2][0] and j[2][0]!=b[2][0]:\n self.applyCmd(\"FDF'DFD2F'\")\n \n elif r[2][0]==j[1][1] and j[0][2]== r[2][0] and j[2][2]== r[2][0] and j[2][0]!= r[2][0] and j[0][0]!= r[2][0] :\n self.applyCmd(\"F'R'FL'F'RFL\")\n \n\n elif r[2][2]==j[1][1] and b[2][2]==r[2][2] and j[0][0]==r[2][2] and j[2][2]!=r[2][2] and j[2][0]!=r[2][2] and j[0][2]!=r[2][2]:\n self.applyCmd(\"FD2F'D'FD'F'\")\n \n elif r[2][2]==r[2][0] and r[2][2]==j[1][1] and j[0][0]!=r[2][0] and j[0][2]!=r[2][0] and j[2][0]!=r[2][0] and j[2][2]!=r[2][0] :\n self.applyCmd(\"BD2B2D'B2D'B2D2B\")\n \n \n elif j[1][1]==b[2][0] and j[0][0]!=b[2][0] and j[0][2]!=b[2][0] and j[2][0]!=b[2][0] and j[2][2]!=b[2][0] and r[2][0]!=r[2][2] and b[2][0]!=b[2][2] and g[2][0]!=g[2][2] and o[2][0]!=o[2][2] :\n self.applyCmd(\"FD2F2D'F2D'F2D2F\")\n \n else :\n self.rotation('D') \n self.rfjaune()\n \n else :\n return 1 \n \n############## PARTIE 2ND COURONNE #################################\n\n def checkscdcouronne(self):\n \n f = self.cube.getCentralColor('f')\n r = self.cube.getCentralColor('r')\n b = self.cube.getCentralColor('b')\n l = self.cube.getCentralColor('l')\n\n if self.cube.front[1] == [f,f,f] and self.cube.right[1] == [r,r,r] and self.cube.back[1] == [b,b,b] and self.cube.left[1] == [l,l,l]:\n return True\n else:\n return False\n\n def deuxcubeinv(self):\n #si 2 cubes sont inversé sur une 2 face opposées\n a = 0\n self.majcube()\n\n #si le cube bleu/rouge inversé avec le vert/rouge\n if (self.br[0][1] == 'l' and self.vr[0][1] == 'r') or (self.br[0][1] == 'f' and self.vr[0][1] == 'f') or (self.br[0][1] == 'l' and self.vr[0][1] == 'f') or (self.br[0][1] == 'f' and self.vr[0][1] == 'r'):\n a = \"F\"\n #si le cube bleu/rouge inversé avec le bleu/orange\n elif (self.br[1][1] == 'b' and self.bo[1][1] == 'f') or (self.br[1][1] == 'r' and self.bo[1][1] == 'r') or (self.br[1][1] == 'b' and self.bo[1][1] == 'r') or (self.br[1][1] == 'r' and self.bo[1][1] == 'f'):\n a = \"R\"\n #si le cube vert/orange inversé avec le bleu/orange\n elif (self.vo[0][1] == 'r' and self.bo[0][1] == 'l') or (self.vo[0][1] == 'b' and self.bo[0][1] == 'b') or (self.vo[0][1] == 'r' and self.bo[0][1] == 'b') or (self.vo[0][1] == 'b' and self.bo[0][1] == 'l'):\n a = \"B\"\n #si le cube vert/orange inversé avec le vert/rouge\n elif (self.vo[1][1] == 'f' and self.vr[1][1] == 'b') or (self.vo[1][1] == 'l' and self.vr[1][1] == 'l') or (self.vo[1][1] == 'f' and self.vr[1][1] == 'l') or (self.vo[1][1] == 'l' and self.vr[1][1] == 'b'):\n a = \"L\"\n if a!= 0:\n self.rotation(str(a)+str(2))\n self.rotation(\"D2\")\n self.rotation(str(a)+str(2))\n self.rotation(\"D2\")\n self.rotation(str(a)+str(2))\n\n#si le cube est au bon endroit mais les couleurs sont inversés\n def cubeinv(self):\n #si cube au bon endroit mais couleurs inversées\n self.majcube()\n a = 0\n b = 0\n \n if self.br[0][1] == 'f' and self.br[1][1] == 'r':\n a = \"F\"\n b = \"R\"\n if self.vr[0][1] == 'f' and self.vr[1][1] == 'l':\n a = \"L\"\n b = \"F\"\n if self.vo[0][1] == 'b' and self.vo[1][1] == 'l':\n a = \"B\"\n b = \"L\"\n if self.bo[0][1] == 'b' and self.bo[1][1] == 'r':\n a = \"R\"\n b = \"B\"\n if a != 0:\n self.rotation(str(a))\n self.rotation(\"D\")\n self.rotation(str(a)+\"'\")\n self.rotation(\"D2\")\n self.rotation(str(a))\n self.rotation(\"D2\")\n self.rotation(str(a)+\"'\")\n self.rotation(\"D\")\n self.rotation(str(b)+\"'\")\n self.rotation(\"D'\")\n self.rotation(str(b))\n \n#fonction finale pour la deuxième couronne regroupant toutes les méthodes \n def deuxcouronne(self):\n self.majcube()\n while self.checkscdcouronne()== False:\n while self.br[0][1] == 'd' or self.br[1][1] == 'd' or self.vr[0][1] == 'd' or self.vr[1][1] == 'd' or self.vo[0][1] == 'd' or self.vo[1][1] == 'd' or self.bo[0][1] == 'd' or self.bo[1][1] == 'd':\n self.deuxiemecouronne()\n if self.checkscdcouronne():\n break\n self.cubeinv()\n if self.checkscdcouronne():\n break\n self.deuxcubeinv()\n if self.checkscdcouronne():\n break\n if self.a == 4:\n self.a=0\n self.cubeinvenface()\n \n#cette fonction permet de récupérer l'emplacement des cubes en coins de la deuxième couronne\n#elle est utilisée en mise à jour lorsqu'il y a eu des changements sur le cube\n def majcube(self):\n #recupère la couleur des faces left, right, front, back\n f = self.cube.getCentralColor('f')\n r = self.cube.getCentralColor('r')\n b = self.cube.getCentralColor('b')\n l = self.cube.getCentralColor('l')\n #recupère les 4 coins de la deuxieme couronne, la couleur n'a pas d'importance. Les couleurs en commentaire sont la pour une meilleure visualisation\n self.br = self.cube.findCube([r, f]) #cube bleu/rouge\n self.vr = self.cube.findCube([l, f]) #vert/rouge\n self.vo = self.cube.findCube([l, b]) #cube vert/orange\n self.bo = self.cube.findCube([r, b]) #cube bleu/orange\n \n#cette fonction permet de débloquer des situations rares dans la dispositions des 4 coins de la 2eme couronne\n def cubeinvenface(self):\n #si le cube vert/rouge est inversé avec le cube bleu/orange\n #if (self.bo[0][1] == 'l' or self.bo[0][1] == 'f') and (self.vr[0][1] == 'r' or self.vr[0][1] == 'b'):\n if self.a == 0:\n self.rotation(\"D\")\n self.rotation(\"L\")\n self.rotation(\"D'\")\n self.rotation(\"L'\")\n self.rotation(\"D'\")\n self.rotation(\"F'\")\n self.rotation(\"D\")\n self.rotation(\"F\")\n\n #si le cube vert/orange est inversé avec le cube bleu/rouge\n #elif (self.vo[0][1] == 'f' or self.vo[0][1] == 'r') and (self.br[0][1] == 'l' or self.br[0][1] == 'b'):\n if self.a == 1:\n #on doit faire basculer le cube a gauche/ au dessus du rouge\n self.rotation(\"D'\")\n self.rotation(\"R'\")\n self.rotation(\"D\")\n self.rotation(\"R\")\n self.rotation(\"D\")\n self.rotation(\"F\")\n self.rotation(\"D'\")\n self.rotation(\"F'\")\n \n if self.a == 2:\n self.rotation(\"D\")\n self.rotation(\"R\")\n self.rotation(\"D'\")\n self.rotation(\"R'\")\n self.rotation(\"D'\")\n self.rotation(\"B'\")\n self.rotation(\"D\")\n self.rotation(\"B\")\n\n self.a += 1\n\n def deuxiemecouronne(self):\n #regarder les 4 coins au dessus et si il n'y a pas de jaune la bouger au bon endroit\n #cube bleu/rouge\n self.majcube()\n #on remet le cube bleu/rouge sur sa face correspondante \n if self.br[0][1] == 'd': #ici le cube bleu est sur la face down\n if self.br[1][1] == 'l':\n self.rotation(\"D\")\n \n elif self.br[1][1] == 'b':\n self.rotation(\"D2\")\n \n elif self.br[1][1] == 'r':\n self.rotation(\"D'\")\n \n #on doit faire basculer le cube a gauche/ au dessus du rouge\n self.rotation(\"D'\")\n self.rotation(\"R'\")\n self.rotation(\"D\")\n self.rotation(\"R\")\n self.rotation(\"D\")\n self.rotation(\"F\")\n self.rotation(\"D'\")\n self.rotation(\"F'\")\n \n elif self.br[1][1] == 'd': #ici le cube rouge est sur la face down\n if self.br[0][1] == 'f':\n self.rotation(\"D\")\n \n elif self.br[0][1] == 'l':\n self.rotation(\"D2\")\n \n elif self.br[0][1] == 'b':\n self.rotation(\"D'\")\n \n #on doit faire basculer le cube a droite\n self.rotation(\"D\")\n self.rotation(\"F\")\n self.rotation(\"D'\")\n self.rotation(\"F'\")\n self.rotation(\"D'\")\n self.rotation(\"R'\")\n self.rotation(\"D\")\n self.rotation(\"R\")\n\n \n #cube vert/rouge\n #on remet le cube vert/rouge sur sa face correspondante\n if self.vr[0][1] == 'd': #ici le cube vert est sur la face down\n \n if self.vr[1][1] == 'l':\n self.rotation(\"D\")\n \n elif self.vr[1][1] == 'b':\n self.rotation(\"D2\")\n \n elif self.vr[1][1] == 'r':\n self.rotation(\"D'\")\n \n #on doit faire basculer le cube a droite\n self.rotation(\"D\")\n self.rotation(\"L\")\n self.rotation(\"D'\")\n self.rotation(\"L'\")\n self.rotation(\"D'\")\n self.rotation(\"F'\")\n self.rotation(\"D\")\n self.rotation(\"F\")\n \n elif self.vr[1][1] == 'd': #ici le cube rouge est sur la face down\n \n if self.vr[0][1] == 'f':\n self.rotation(\"D'\")\n \n elif self.vr[0][1] == 'r':\n self.rotation(\"D2\")\n \n elif self.vr[0][1] == 'b':\n self.rotation(\"D\")\n \n #on doit faire basculer le cube a gauche\n self.rotation(\"D'\")\n self.rotation(\"F'\")\n self.rotation(\"D\")\n self.rotation(\"F\")\n self.rotation(\"D\")\n self.rotation(\"L\")\n self.rotation(\"D'\")\n self.rotation(\"L'\")\n\n #cube vert/orange\n #on remet le cube vert/orange sur sa face correspondante \n if self.vo[0][1] == 'd': #ici le cube vert est sur la face down\n if self.vo[1][1] == 'f':\n self.rotation(\"D2\")\n \n elif self.vo[1][1] == 'l':\n self.rotation(\"D'\")\n\n elif self.vo[1][1] == 'r':\n self.rotation(\"D\")\n \n #on doit faire basculer le cube a gauche\n self.rotation(\"D'\")\n self.rotation(\"L'\")\n self.rotation(\"D\")\n self.rotation(\"L\")\n self.rotation(\"D\")\n self.rotation(\"B\")\n self.rotation(\"D'\")\n self.rotation(\"B'\")\n \n elif self.vo[1][1] == 'd': #ici le cube orange est sur la face down\n if self.vo[0][1] == 'f':\n self.rotation(\"D'\")\n \n elif self.vo[0][1] == 'r':\n self.rotation(\"D2\")\n \n elif self.vo[0][1] == 'b':\n self.rotation(\"D\")\n\n #on doit faire basculer le cube a droite\n self.rotation(\"D\")\n self.rotation(\"B\")\n self.rotation(\"D'\")\n self.rotation(\"B'\")\n self.rotation(\"D'\")\n self.rotation(\"L'\")\n self.rotation(\"D\")\n self.rotation(\"L\")\n\n #cube bleu/orange\n #on remet le cube bleu/orange sur sa face correspondante \n if self.bo[0][1] == 'd': #ici le cube bleu est sur la face down\n if self.bo[1][1] == 'f':\n self.rotation(\"D2\")\n \n elif self.bo[1][1] == 'l':\n self.rotation(\"D'\")\n\n elif self.bo[1][1] == 'r':\n self.rotation(\"D\")\n #on doit faire basculer le cube a droite\n self.rotation(\"D\")\n self.rotation(\"R\")\n self.rotation(\"D'\")\n self.rotation(\"R'\")\n self.rotation(\"D'\")\n self.rotation(\"B'\")\n self.rotation(\"D\")\n self.rotation(\"B\")\n \n elif self.bo[1][1] == 'd': #ici le cube orange est sur la face down\n if self.bo[0][1] == 'f':\n self.rotation(\"D\")\n\n elif self.bo[0][1] == 'b':\n self.rotation(\"D'\")\n \n elif self.bo[0][1] == 'l':\n self.rotation(\"D2\")\n \n #on doit faire basculer le cube a gauche\n self.rotation(\"D'\")\n self.rotation(\"B'\")\n self.rotation(\"D\")\n self.rotation(\"B\")\n self.rotation(\"D\")\n self.rotation(\"R\")\n self.rotation(\"D'\")\n self.rotation(\"R'\") \n\n############## PARTIE 2ND COURONNE #################################\n \n\n############## PARTIE CROIX JAUNE #################################\n\n#Dans la suite de l'algorithme, lorsque les fonctions font référence à la face jaune, il s'agit de la face sur laquelle nous utilisons notre algorithme, soit celle opposée à la face Up, donc Down.\n\n#Fonction qui renvoie quelle face est de la couleur recherchée\n#On compare avec la couleur de chaque face en [1][1] et donc au milieu\n#Fonction plus utilisée à la fin car on se base directement sur la face Down\n def whichIsColor(self,color):\n\n colorF = self.cube.up[1][1] \n if colorF == color :\n return \"u\"\n\n colorF = self.cube.down[1][1]\n if colorF == color :\n return \"d\"\n\n colorF = self.cube.right[1][1]\n if colorF == color :\n return \"r\"\n\n colorF = self.cube.left[1][1]\n if colorF == color :\n return \"l\"\n\n colorF = self.cube.back[1][1]\n if colorF == color :\n return \"b\"\n\n colorF = self.cube.front[1][1]\n if colorF == color:\n return \"f\"\n\n\n \n#Fonction qui vérifie si la croix non orienté est vérifiée\n def checkCrossNonOriente(self):\n posColor = 'd' #on trouve la position de la face jaune // au final on va se baser sur la base Down dans tous les cas\n listeColors=[self.cube.getCentralColor('f'),self.cube.getCentralColor('b'),self.cube.getCentralColor('r'),self.cube.getCentralColor('l')] #c'est la liste des couleurs composants les aretes avec une face jaune\n for i in range(len(listeColors)): #on parcout la liste des couleurs\n pos = self.cube.findCube([self.cube.getCentralColor('d'),listeColors[i][0][0]]) #on récupère la position des aretes\n if pos[0][1] != posColor: #si la position de la face jaune des aretes n'est pas sur la face jaune, alors la croix n'est pas vérifiée\n return False\n return True\n\n\n#on récupère l'emplacement des aretes entre la face Down et les faces adjacentes\n def checkEmplacement(self):\n posColor = 'd'\n listeColors=[self.cube.getCentralColor('f'),self.cube.getCentralColor('b'),self.cube.getCentralColor('r'),self.cube.getCentralColor('l')]\n listos=[]\n liste=[]\n for i in range(len(listeColors)):\n #plutot que leur couleur ; on va mettre la position de la couleur\n listos=[]\n pos = self.cube.findCube([self.cube.getCentralColor('d'),listeColors[i][0][0]])\n listos.append(pos[0][1])\n listos.append(pos[1][1])\n liste.append(listos) \n #liste contenant les listes des arêtes avec (1) : sur quelle face se trouve la partie Y de l'arete et (2) : de quelle couleur est l'autre partie\n return liste\n\n#fonction qui renvoie la face opposée, mais en majuscule pour éviter de devoir utiliser la fonction .upper() par la suite\n def opposite(self,face):\n if face == 'u':\n return 'D'\n elif face == 'd':\n return 'U'\n elif face == 'r':\n return 'L'\n elif face == 'l':\n return 'R'\n elif face == 'f':\n return 'B'\n elif face == 'b':\n return 'F'\n\n#fonction qui renvoie les faces a utiliser pour le cas 1 de la résolution croix jaune\n def case1(self):\n posY='d'\n index=['u','d','f','b','r','l'] #l'index servait ici dans le cas de posY différent de Down, mais nous ne l'avons pas utilisé au final\n liste=[['F','U','R'],['F','D','L'],['D','F','R'],['U','B','R'],['F','R','D'],['F','L','U']]\n return liste[index.index(posY)]\n\n#fonction qui renvoie les deux aretes dans le bon ordre pour le cas 2 de la croix jaune\n def case2(self,pos1,pos2):\n \n posY = 'd'\n index=['u','d','f','b','r','l'] #l'index servait ici dans le cas de posY différent de Down, mais nous ne l'avons pas utilisé au final\n liste=[['L','B','R','F'],['R','B','L','F'],['L','U','R','D'],['R','U','L','D'],['F','U','B','D'],['B','U','F','D']] #chaque liste correspond a un index respectif\n\n ind=index.index(posY)\n a=liste[ind].index(pos1.upper())\n b=liste[ind].index(pos2.upper())\n if a > b:\n if liste[ind][(a+1)%4] != pos2.upper() or a < 3:\n return [pos1,pos2]\n else:\n return [pos2,pos1] #L'ordre de renvoie est utile pour la résolution qui suit l'utilisation de cette fonction\n else:\n if liste[ind][(b+1)%4] != pos1.upper() or b <3:\n return [pos2,pos1]\n else:\n return [pos1,pos2]\n\n\n#fonction qui renvoie les deux aretes dans le bon ordre pour le cas 3 de la croix jaune\n def case3(self,pos1):\n\n posY='d'\n index=['u','d','f','b','r','l'] #Comme pour les autres, on avait un index pour les différents cas up, down, etc... mais non utilisé\n liste=[['L','B','R','F'],['R','B','L','F'],['L','U','R','D'],['R','U','L','D'],['F','U','B','D'],['B','U','F','D']]\n listeDroite=[['F','R','B','L'],['F','L','B','R'],['R','U','L','D'],['R','D','L','U'],['D','B','U','F'],['D','F','U','B']]\n ind = index.index(posY)\n ind1 = liste[ind].index(pos1.upper())\n ind2 = listeDroite[ind].index(liste[ind][(ind1+1)%4])\n return [liste[ind][(ind1+1)%4] , listeDroite[ind][(ind2+1)%4] , posY.upper()]\n\n\n def resolutionCroixJaune(self):\n \n\n posY='d'\n\n #adj : lorsque on passe d'une des deux faces a l'autre par une simple rotation\n\n dicAdj = [['u','r'],['l','u'],['r','d'],['d','l'],['l','f'],['b','l'],['r','b'],['f','r']]\n\n #tant que la croix jaune n'est pas vérifiée\n while self.checkCrossNonOriente() != True:\n\n \n adj=False\n\n #on récupère la position des aretes jaunes qui sont sur la face jaune\n liste=self.checkEmplacement() \n #liste contenant le placement des aretes dont la partie jaune est déjà sur la face jaune\n listeAretes=[]\n\n #on récupère la position des aretes dont la partie jaune est sur la face jaune\n for i in range(len(liste)):\n #si la partie jaune de l'arete est sur la face jaune\n if liste[i][0] == 'd':\n #alors on récupère l'emplacement de la partie de l'autre couleur\n listeAretes.append(liste[i][1])\n #si il n'y a que la case jaune du milieu\n\n if len(listeAretes) == 0 or len(listeAretes) == 3 or len(listeAretes)==1:\n\n # CAS 1\n\n tmp = self.case1()\n\n self.rotation(tmp[0])\n self.rotation(tmp[1])\n self.rotation(tmp[2]) \n self.rotation(tmp[1]+\"'\")\n self.rotation(tmp[2]+\"'\")\n self.rotation(tmp[0]+\"'\")\n \n \n\n # D F R Fi Ri Di\n #on prend n'import lequel balek\n\n # F U R Ui Ri Fi\n if len(listeAretes) == 2:\n\n #Cas si les aretes sont 'adjacentes'\n for i in range(len(dicAdj)):\n\n if listeAretes[0] in dicAdj[i] and listeAretes[1] in dicAdj[i]: #On vérifie qu'on est dans le cas de l'adjacence\n adj = True\n\n if adj == True : #CAS 2\n\n tmp = self.case2(listeAretes[0],listeAretes[1]) #on récupère les aretes dans le bon ordre pour notre algorithme\n #on applique les rotations par rapport aux bonnes faces du coup\n self.rotation(self.opposite(tmp[0]))\n self.rotation(posY.upper())\n self.rotation(self.opposite(tmp[1]))\n self.rotation(posY.upper() + \"'\")\n self.rotation(self.opposite(tmp[1])+\"'\")\n self.rotation(self.opposite(tmp[0])+\"'\")\n\n # F U R Ui Ri Fi\n else : #CAS 3\n tmp = self.case3(listeAretes[0])\n \n self.rotation(tmp[0])\n self.rotation(tmp[1])\n self.rotation(tmp[2])\n self.rotation(tmp[1]+\"'\") \n self.rotation(tmp[2]+\"'\")\n self.rotation(tmp[0]+\"'\")\n\n # F R U Ri Ui Fi\n\n\n\n\n############## PARTIE CROIX JAUNE ################################# \n\n## ETAPE 6 & 7 : FIN ##\n \n \n def lastStep(self) :\n if self.cube.cubeFinished() == False :\n # Récupération des différentes variables nécéssaire à la résolution\n faceJaune = self.getFaceJaune()\n tabLiFaceChange = self.getTabLiFaceChange(faceJaune)\n\n #Test si tout est bien placé (si c'est le cas on effectue une seule rotation et le cube est fini)\n faceTest = tabLiFaceChange[0]\n if faceJaune == 'u' :\n index = 0\n elif faceJaune == 'd' :\n index = 2\n\n counter = 0\n for i in tabLiFaceChange :\n if self.cube.getFace(i)[index][0] == self.cube.getFace(i)[index][1] == self.cube.getFace(i)[index][2] :\n counter += 1\n if counter == 4 :\n for i in range(3) :\n if self.cube.getCentralColor(tabLiFaceChange[i+1]) == self.cube.getFace(faceTest)[index][0] :\n faceDest = tabLiFaceChange[i+1]\n\n self.rotation(self.getApproRot(faceTest,faceDest,faceJaune))\n return\n #Test si ...\n \n tabParc = self.getTabParc(faceJaune)\n\n # Résolution des coins et des arrêtes\n self.putCornerLastFace(faceJaune, tabParc, tabLiFaceChange)\n \n if self.cube.cubeFinished() == False :\n self.putAreteLastFace(faceJaune, tabParc, tabLiFaceChange)\n\n def getFaceJaune(self) :\n faceBlanche = ''\n faceJaune = ''\n\n # On trouve les deux face \"finies\" pour savoir quelles faces modifier par la suite\n for face in self.cube.liFace :\n faceBlanche = face\n faceJaune = self.cube.getFaceInversed(face)\n if self.cube.faceFinished(faceBlanche) and self.cube.faceFinished(faceJaune):\n break\n\n # Si les deux faces sont inversé\n counter = 0\n tabTemp = []\n for i in self.cube.liFace :\n if i != 'u' and i != 'd' :\n faceTest = self.cube.getFace(self.cube.liFace[(self.cube.liFace.index(i)+2)%6])\n if (faceJaune == 'u' and faceTest[0][0] == faceTest[0][1] == faceTest[0][2] == faceTest[1][1]) or ((faceJaune == 'd') and faceTest[2][0] == faceTest[2][1] == faceTest[2][2] == faceTest[1][1]) :\n counter += 1\n\n if counter == 4 :\n temp = faceJaune\n faceJaune = faceBlanche\n faceBlanche = temp\n\n return faceJaune\n \n def getTabParc(self, faceJaune) :\n # TabParc va nous servir à parcourir le cube pour trouver les cubes inversés en fonction des 4 faces à finir\n if faceJaune == 'u' :\n tabParc = [0,'x']\n \n elif faceJaune == 'd' :\n tabParc = [2,'x']\n \n elif faceJaune == 'l' or 'f' :\n tabParc = ['x',0]\n \n elif faceJaune == 'r' or 'b' :\n tabParc = ['x',2]\n\n return tabParc\n\n def getTabLiFaceChange(self, faceJaune) :\n # Ce tableau contient les face non finies, il va nous permettre de nous afranchir des couleurs\n # car les mouvements à faire pour résoudre le cube sont symétrique\n if faceJaune == 'd' or faceJaune == 'u' :\n return ['l','f','r','b']\n elif faceJaune == 'l' or faceJaune == 'r' :\n return ['f','u','b','d']\n else :\n return ['d','r','u','l']\n \n def putCornerLastFace(self, faceJaune, tabParc, tabLiFaceChange) :\n \n tabMiniReplace = []\n \n #A ce niveau de résolution il est possible de bien placer deux coins,\n #donc tant que je n'obtient pas seulement deux coins mal placés :\n while len(tabMiniReplace) != 4 :\n tabMiniReplace = []\n for i in range(4) :\n faceEnCours = self.cube.getFace(tabLiFaceChange[i])\n if tabParc[0] != 'x' :\n if faceEnCours[tabParc[0]][0] != faceEnCours[(tabParc[0]+1)%2][0] :\n tabMiniReplace.append([i,tabParc[0],0])\n if faceEnCours[tabParc[0]][2] != faceEnCours[(tabParc[0]+1)%2][2] :\n tabMiniReplace.append([i,tabParc[0],2])\n else :\n if faceEnCours[0][tabParc[1]] != faceEnCours[0][(tabParc[1]+1)%2] :\n tabMiniReplace.append([i,0,tabParc[1]])\n if faceEnCours[2][tabParc[1]] != faceEnCours[2][(tabParc[1]+1)%2] :\n tabMiniReplace.append([i,2,tabParc[1]])\n # Si les coins sont déjà tous bien placés\n if len(tabMiniReplace) == 0 :\n return\n #(suite) je tourne la face \"Jaune\" (non résolue)\n if len(tabMiniReplace) != 4 :\n self.rotation(faceJaune.upper())\n\n # cas 1 : les deux cubes à intervertir sont sur la même face\n # cas 2 : les deux cubes à intervertir sont des coins opposés\n cas1 = False\n for i in range(4):\n if tabMiniReplace[i][0] == tabMiniReplace[(i+1)%4][0] or tabMiniReplace[i][0] == tabMiniReplace[(i+2)%4][0] or tabMiniReplace[i][0] == tabMiniReplace[(i+3)%4][0] :\n cas1 = True\n faceChange = tabMiniReplace[i][0]\n break\n\n if faceJaune == 'd' or faceJaune == 'r' or faceJaune == 'b' :\n sensRotation = 1\n else :\n sensRotation = -1\n\n#CAS 1 : \n if cas1 :\n self.rotation(tabLiFaceChange[faceChange].upper())\n self.rotation(faceJaune.upper())\n self.rotation(tabLiFaceChange[faceChange].upper()+'\\'')\n self.rotation(faceJaune.upper()+'\\'')\n self.rotation(tabLiFaceChange[faceChange].upper()+'\\'')\n self.rotation(tabLiFaceChange[(faceChange+(1*sensRotation))%4].upper())\n self.rotation(tabLiFaceChange[faceChange].upper()+'2')\n self.rotation(faceJaune.upper()+'\\'')\n self.rotation(tabLiFaceChange[faceChange].upper()+'\\'')\n self.rotation(faceJaune.upper()+'\\'')\n self.rotation(tabLiFaceChange[faceChange].upper())\n self.rotation(faceJaune.upper())\n self.rotation(tabLiFaceChange[faceChange].upper()+'\\'')\n self.rotation(tabLiFaceChange[(faceChange+(1*sensRotation))%4].upper()+'\\'')\n else :\n for i in range(4):\n if tabParc[0] != 'x' :\n if tabMiniReplace[i][2] == 2 - tabParc[0] :\n faceChange = tabMiniReplace[i][0]\n break\n else : \n if tabMiniReplace[i][1] == tabParc[1] :\n faceChange = tabMiniReplace[i][0]\n break\n#CAS 2 :\n self.rotation(tabLiFaceChange[faceChange].upper())\n self.rotation(tabLiFaceChange[(faceChange+(3*sensRotation))%4].upper())\n self.rotation(faceJaune.upper()+'\\'') \n self.rotation(tabLiFaceChange[(faceChange+(3*sensRotation))%4].upper()+'\\'')\n self.rotation(faceJaune.upper()+'\\'')\n self.rotation(tabLiFaceChange[(faceChange+(3*sensRotation))%4].upper())\n self.rotation(faceJaune.upper())\n self.rotation(tabLiFaceChange[(faceChange+(3*sensRotation))%4].upper()+'\\'')\n self.rotation(tabLiFaceChange[faceChange].upper()+'\\'')\n self.rotation(tabLiFaceChange[(faceChange+(3*sensRotation))%4].upper())\n self.rotation(faceJaune.upper())\n self.rotation(tabLiFaceChange[(faceChange+(3*sensRotation))%4].upper()+'\\'')\n self.rotation(faceJaune.upper()+'\\'')\n self.rotation(tabLiFaceChange[(faceChange+(3*sensRotation))%4].upper()+'\\'')\n self.rotation(tabLiFaceChange[faceChange].upper())\n self.rotation(tabLiFaceChange[(faceChange+(3*sensRotation))%4].upper())\n self.rotation(tabLiFaceChange[faceChange].upper()+'\\'')\n\n def putAreteLastFace(self, faceJaune, tabParc, tabLiFaceChange) :\n if faceJaune == 'd' or faceJaune == 'r' or faceJaune == 'b' :\n sensRotation = 1\n else :\n sensRotation = -1\n\n faceOpposeFinie = None\n for i in range(4) :\n if self.cube.faceFinished(tabLiFaceChange[i]) :\n faceOpposeFinie = self.cube.getFaceInversed(tabLiFaceChange[i])\n break\n\n if faceOpposeFinie != None :\n if tabParc[0] == 'x' :\n couleurMiniCube = self.cube.getFace(faceOpposeFinie)[1][tabParc[1]]\n else :\n couleurMiniCube = self.cube.getFace(faceOpposeFinie)[tabParc[0]][1]\n \n\n faceSuivanteOF = tabLiFaceChange[(tabLiFaceChange.index(faceOpposeFinie)+(1*sensRotation))%4]\n\n # CAS 2\n #R' U R' U' R' U' R' U R U R2\n if couleurMiniCube == self.cube.getCentralColor(self.cube.liFace[self.cube.liFace.index(faceSuivanteOF)]) :\n self.rotation(faceSuivanteOF.upper()+'\\'')\n self.rotation(faceJaune.upper())\n self.rotation(faceSuivanteOF.upper()+'\\'')\n self.rotation(faceJaune.upper()+'\\'')\n self.rotation(faceSuivanteOF.upper()+'\\'')\n self.rotation(faceJaune.upper()+'\\'')\n self.rotation(faceSuivanteOF.upper()+'\\'')\n self.rotation(faceJaune.upper())\n self.rotation(faceSuivanteOF.upper())\n self.rotation(faceJaune.upper())\n self.rotation(faceSuivanteOF.upper()+'2')\n\n # CAS 1\n # R2 U' R' U' R U R U R U' R\n else :\n self.rotation(faceSuivanteOF.upper()+'2')\n self.rotation(faceJaune.upper()+'\\'')\n self.rotation(faceSuivanteOF.upper()+'\\'')\n self.rotation(faceJaune.upper()+'\\'')\n self.rotation(faceSuivanteOF.upper())\n self.rotation(faceJaune.upper())\n self.rotation(faceSuivanteOF.upper())\n self.rotation(faceJaune.upper())\n self.rotation(faceSuivanteOF.upper())\n self.rotation(faceJaune.upper()+'\\'')\n self.rotation(faceSuivanteOF.upper())\n\n else :\n if tabParc[0] == 'x' :\n couleurMiniCube = self.cube.getFace(tabLiFaceChange[2])[1][tabParc[1]]\n index = 2\n else :\n couleurMiniCube = self.cube.getFace(tabLiFaceChange[0])[tabParc[0]][1]\n index = 0\n\n # CAS 1\n # M2 U M2 U2 M2 U M2 \n if couleurMiniCube == self.cube.getFace(self.cube.getFaceInversed(tabLiFaceChange[index]))[1][1] :\n # L2 + R2 = M2 ou B2 + F2 = M2 mais cela inverse la face haute et basse\n self.rotation(tabLiFaceChange[0].upper()+'2')\n self.rotation(tabLiFaceChange[2].upper()+'2')\n self.rotation(self.cube.getFaceInversed(faceJaune).upper())\n self.rotation(tabLiFaceChange[0].upper()+'2')\n self.rotation(tabLiFaceChange[2].upper()+'2')\n self.rotation(faceJaune.upper()+'2')\n self.rotation(tabLiFaceChange[0].upper()+'2')\n self.rotation(tabLiFaceChange[2].upper()+'2')\n self.rotation(self.cube.getFaceInversed(faceJaune).upper())\n self.rotation(tabLiFaceChange[0].upper()+'2')\n self.rotation(tabLiFaceChange[2].upper()+'2')\n\n self.nbCmd -= 4 \n\n # CAS 2\n # U R' U' R U' R U R U' R' U R U R2 U' R' U\n else :\n if self.cube.getFace(tabLiFaceChange[0])[2][1] == self.cube.getFace(tabLiFaceChange[1])[1][1] :\n face = 0\n else :\n face = 1\n \n self.rotation(faceJaune.upper())\n self.rotation(tabLiFaceChange[face].upper()+'\\'')\n self.rotation(faceJaune.upper()+'\\'')\n self.rotation(tabLiFaceChange[face].upper())\n self.rotation(faceJaune.upper()+'\\'')\n self.rotation(tabLiFaceChange[face].upper())\n self.rotation(faceJaune.upper())\n self.rotation(tabLiFaceChange[face].upper())\n self.rotation(faceJaune.upper()+'\\'')\n self.rotation(tabLiFaceChange[face].upper()+'\\'')\n self.rotation(faceJaune.upper())\n self.rotation(tabLiFaceChange[face].upper())\n self.rotation(faceJaune.upper())\n self.rotation(tabLiFaceChange[face].upper()+'2')\n self.rotation(faceJaune.upper()+'\\'')\n self.rotation(tabLiFaceChange[face].upper()+'\\'')\n self.rotation(faceJaune.upper())\n \n## ETAPE 6 & 7 : FIN ##\n \ndef resolutionFinale(strcu=\"WWWWWWWWWGGGRRRBBBOOOGGGRRRBBBOOOGGGRRRBBBOOOYYYYYYYYY\",entry=''):\n cube = Cube(strcu)\n resolution = Resolution(cube)\n resolution.applyCmd(entry)\n resolution.theCross('u')\n resolution.theCorner('u')\n resolution.deuxcouronne()\n resolution.resolutionCroixJaune()\n resolution.rfjaune()\n resolution.lastStep()\n return (resolution.liCmd)\n","sub_path":"Resolution.py","file_name":"Resolution.py","file_ext":"py","file_size_in_byte":54764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"490568295","text":"G = nx.MultiGraph() # or MultiDiGraph\nkey = G.add_edge(0, 1, key='a', weight=7)\nG[0][1]['a'] # key='a'\n{'weight': 7}\nG.edges[0, 1, 'a'] # key='a'\n{'weight': 7}\n#Warning: we protect the graph data structure by making G.edges and G[1][2] read-only dict-like structures.\n#However, you can assign values to attributes in e.g. G.edges[1, 2, 'a'] or G[1][2]['a'] using an\n#additional bracket as shown next. You need to specify all edge info to assign to the edge data associated with an\n#edge.\nG[0][1]['a']['weight'] = 10\nG.edges[0, 1, 'a']['weight'] = 10\nG[0][1]['a']['weight']\n10\nG.edges[1, 0, 'a']['weight']\n10\n\nG = nx.MultiGraph() # or MultiDiGraph\nnx.add_path(G, [0, 1, 2, 3])\nG.get_edge_data(0, 1)\n{0: {}}\ne = (0, 1)\nG.get_edge_data(*e) # tuple form\n{0: {}}\nG.get_edge_data('a', 'b', default=0) # edge not in graph, return 0\n0\n","sub_path":"Multidigraph/Python/get_edge_data.py","file_name":"get_edge_data.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"65251896","text":"import numpy as np\nfrom rtx.plane import Plane\nfrom rtx.ray import Ray\nfrom rtx.sphere import Sphere\nfrom rtx.ellipse import Ellipse\nfrom rtx.util.util import transform3d_z\nimport rtx.render.two_dim_render as two_dim_render\nimport rtx.render.three_dim_render as three_dim_render\n\n\ndef show_two_dim_render_demo():\n ray = Ray(2, np.array([3, 6]), np.array([-1, -7]))\n plane = Plane(2, 1.4, 1, np.array([-6, -1]), np.array([-2, -2]))\n two_dim_render.plane_render(ray, plane)\n\n r1 = Ray(2, np.array([-2, 2]), np.array([1, 1]))\n r2 = Ray(2, np.array([7, 5]), np.array([1, 2]))\n sphere1 = Sphere(2, 1, 1.8, np.array([5, 6]), 4.3)\n two_dim_render.sphere_render(r1, sphere1, 100)\n\n sphere2 = Sphere(2, 1, 1.8, np.array([5, 6]), 4)\n two_dim_render.sphere_render(r2, sphere2, 100)\n\n ellipse1 = Ellipse(2, 1.2, 1, np.array([5, 6]), np.array([60, 30]))\n two_dim_render.ellipse_render(r1, ellipse1, 300)\n\n ellipse2 = Ellipse(2, 1.2, 1, np.array([15, 25]), np.array([6, 10]))\n two_dim_render.ellipse_render(r2, ellipse2, 200)\n\n\ndef three_d_plane():\n ray = Ray(3, np.array([4, 1, -10]), np.array([0, 1, 1]))\n plane = Plane(3, 1, 2, np.array([5, 5, 10]), np.array([0, 0, 1]))\n three_dim_render.plane_render(ray,\n plane,\n figsize=(7, 7),\n plane_length=30,\n ray_length=30,\n normal_length=30,\n xlim=(-15, 35),\n ylim=(-15, 35),\n zlim=(-10, 60))\n\n\ndef three_d_multiple_plane():\n ray_point = np.array([0, 0, 70])\n plane = Plane(3, 1.230, 1, np.array([0, 0, 10]), np.array([0, 0, 1]))\n ray_direction = np.array([40, 40, -100])\n rays = [Ray(3, ray_point, transform3d_z(i).dot(ray_direction)) for i in np.linspace(0, 2 * np.pi, 150)]\n three_dim_render.multiple_rays_plane_render(rays,\n plane,\n figsize=(7, 7),\n plane_length=70,\n ray_length=40,\n xlim=(-60, 60),\n ylim=(-60, 60),\n zlim=(-20, 100))\n\n\ndef three_d_sphere():\n ray = Ray(3, np.array([0, 5, 0]), np.array([-1.3, 1, 0.5]))\n sphere = Sphere(3, 1.23, 1, np.array([0, 0, 0]), 20)\n three_dim_render.sphere_render(ray,\n sphere,\n 100,\n sphere_color=\"#470180\",\n ray_color=\"#0245d4\",\n reflected_color=\"#ffc400\",\n refracted_color=\"#ff0000\",\n ray_length=20,\n figsize=(7, 7),\n xlim=(-30, 30),\n ylim=(-30, 30),\n zlim=(-30, 30))\n\n\ndef three_d_ellipse():\n ray = Ray(3, np.array([0, 5, 0]), np.array([-1.3, 1, 0.5]))\n ellipse = Ellipse(3, 1.23, 1, np.array([0, 0, 0]), np.array([12, 30, 12]))\n three_dim_render.ellipse_render(ray,\n ellipse,\n 100,\n sphere_color=\"#470180\",\n ray_color=\"#0245d4\",\n reflected_color=\"#ffc400\",\n refracted_color=\"#ff0000\",\n ray_length=20,\n figsize=(7, 7),\n xlim=(-30, 30),\n ylim=(-30, 30),\n zlim=(-30, 30))\n\n\ndef show_three_dim_render_demo():\n three_d_plane()\n three_d_multiple_plane()\n three_d_sphere()\n three_d_ellipse()\n\n\nif __name__ == \"__main__\":\n show_two_dim_render_demo()\n show_three_dim_render_demo()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"507808250","text":"# https://www.geeksforgeeks.org/python-get-key-with-maximum-value-in-dictionary/\n# https://stackoverflow.com/questions/268272/getting-key-with-maximum-value-in-dictionary\nimport operator\n\nclass PythonDict_GetKeyWithMaxValue:\n\n def dict_getKeyWithMaxValue_1(self, d:dict):\n print(\" using max function\")\n maxVal = max(d, key=d.get)\n print(\" maxVal -> \", maxVal)\n\n def dict_getKeyWithMaxValue_2(self, d:dict):\n print(\" using operator\")\n d_items = d.items()\n maxV1 = max(d_items, key=operator.itemgetter(0))[0]\n maxV2 = max(d_items, key=operator.itemgetter(1))[0]\n print(\" maxV1 -> \", maxV1, \" maxV2 -> \", maxV2)\n\n\n def dict_getKeyWithMaxValue_3(self, d: dict):\n print(\" get list of keys, values, and get index of max value, the use that index to get corresponding key in list(keys\")\n d_values = list(d.values())\n d_keys = list(d.keys())\n\n max_d_value = max(d_values)\n index_maxval = d_values.index(max_d_value)\n print(\" d_keys[index_maxval] -> \", d_keys[index_maxval])\n\n def dict_getMaxValueAndKey(self):\n pass\n\n\n\nsol = PythonDict_GetKeyWithMaxValue()\nd = {'A':121, 'B':45, 'C':10, 'D':119}\n# sol.dict_getKeyWithMaxValue_1(d)\n# sol.dict_getKeyWithMaxValue_2(d)\nsol.dict_getKeyWithMaxValue_3(d)\n","sub_path":"python_examples/PythonDict_GetKeyWithMaxValue.py","file_name":"PythonDict_GetKeyWithMaxValue.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"654228190","text":"r\"\"\"\nMachine learning - Radiogaga.\n\nThis file belongs to Joachim Blom Hansen, Rasmus Jessen Aaskov and Soren\nTrads Steen.\n\"\"\"\n\nimport numpy as np\nfrom sklearn.decomposition import PCA\n\n\ndef do_pca(radiokanal, time, connection):\n \"\"\"Compute PCA and return data projected onto 2 principal directions.\n\n Arguments:\n radiokanal: the radio channel to be examined.\n time: ex '%2014-11-11%'.\n connection: connection to database, see class MySQLConnection.\n\n Output:\n a NumPy array with track and artist in two first columns and the\n projection onto the principal directions in the last two.\n .csv files with radio station followed by:\n X.csv includes only bpm and moods.\n X_pca.csv includes only first two PCA coordinates.\n Y_pca.csv includes track, artist and PCA coordinates.\n\n \"\"\"\n sqlstring = \"\"\"SELECT track, artist, bpm, angry, happy, relaxed, sad\n FROM {0} WHERE history LIKE {1} AND bpm NOT IN (0) AND\n angry NOT IN (0) \"\"\".format(radiokanal, time)\n connection.cursor.execute(sqlstring)\n data = connection.cursor.fetchall()\n Y = np.array(data)\n X = Y[:, 2:]\n pca = PCA(n_components=2)\n X_pca = pca.fit_transform(X)\n Y_pca = np.concatenate((Y[:, :2], X_pca), axis=1)\n np.savetxt(str(radiokanal)+'_Y_pca.csv', Y_pca, delimiter=',', fmt=\"%s\")\n np.savetxt(str(radiokanal)+'_X.csv', X, delimiter=',', fmt='%s')\n np.savetxt(str(radiokanal)+'_X_pca.csv', X_pca, delimiter=',', fmt='%s')\n return(Y_pca)\n\n\ndef EJ(a, b):\n \"\"\"Calculate the Extended Jaccard similarity meassure.\n\n Arguments:\n a,b: two list of same length\n\n Output:\n the value of EJ\n \"\"\"\n numerator = np.dot(a, b)\n denominator = np.sum(np.power(a, 2)) + np.sum(np.power(b, 2)) - numerator\n if np.sum(numerator+denominator) == 0:\n value = 1\n else:\n value = np.divide(numerator, denominator)\n return(value)\n","sub_path":"ml.py","file_name":"ml.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"418191632","text":"\nimport dash_core_components as dcc\nimport dash_html_components as html\n\n\n\ndef gen_navbar(brand, items,\n barClass='navbar-dark bg-dark p-1',\n brandClass='col-sm-3 col-md-2 mr-0',\n listClass='px-3',\n itemLiClass='text-nowrap',\n itemAClass=''):\n item_list = []\n for key in items:\n item_list.append(\n html.Li(\n html.A(key, href=items[key],\n className=f\"nav-link {itemAClass}\"),\n className=f\"nav-item {itemLiClass}\"\n )\n )\n return html.Nav(\n [\n html.A(brand, className=f\"navbar-brand {brandClass}\"),\n html.Ul(item_list, className=f\"navbar-nav {listClass}\")\n ], className=f\"navbar {barClass}\"\n )\n\n\n\ndef gen_sidebar_layout(sidebar, content, sidebar_size=2,\n sidebarClass='bg-light p-5', contentClass='', mainClass=''):\n return html.Div(\n [html.Div(sidebar, className=f\"sidebar col-md-{sidebar_size} {sidebarClass}\"),\n html.Div(content, className=f\"col-md-{12-sidebar_size} {contentClass}\")],\n className=f\"row {mainClass}\"\n )\n\n\n\ndef gen_grid(items, gridClass='', colClass='', rowClass=''):\n rows = []\n for row in items:\n cols = []\n size = int(12 / len(row))\n for col in row:\n cols.append(html.Div(col, className=f\"col-md-{size} {colClass}\"))\n rows.append(html.Div(cols, className=f\"row {rowClass}\"))\n return html.Div(rows, className=f\"{gridClass}\")\n\n\n\ndef gen_card(text, id=None, title='', cardClass='border-light', \n textClass='text-center', titleClass='text-center'):\n return html.Div([\n html.Div([\n html.H5(title, className=f'card-title {titleClass}'),\n html.P(text, id=id, className=f'card-text {textClass}')\n ], className='card-body')\n ], className=f'card {cardClass}')\n\n","sub_path":"layout_helpers.py","file_name":"layout_helpers.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"360307099","text":"# -*- coding: utf-8 -*-\nfrom twisted.internet import defer\nfrom lxml import etree, html\nfrom pc.couch import couch, designID\n\nsimple_titles = {\n '/howtochoose':u' Как выбирать компьютер',\n '/howtouse':u'Как пользоваться сайтом',\n '/howtobuy':u'Как покупать',\n '/warranty':u'Гарантии',\n '/support':u'Поддержка',\n '/about':u'Про магазин',\n '/whyauth':u'Зачем нужна авторизация',\n '/upgrade_set':u'Наборы для апгрейда',\n}\n\ndef simplePage(template, skin, request):\n if request.path in simple_titles:\n title = skin.root().xpath('//title')[0]\n title.text = simple_titles[request.path]\n\n # skin.top = template.top\n # skin.middle = template.middle\n # skin.root().xpath('//div[@id=\"gradient_background\"]')[0].set('style','min-height: 190px;')\n # skin.root().xpath('//div[@id=\"middle\"]')[0].set('class','midlle_how')\n for el in template.top:\n skin.top.append(el)\n for el in template.middle:\n skin.middle.append(el)\n d = defer.Deferred()\n d.addCallback(lambda some:skin.render())\n d.callback(None)\n return d\n\nparts_aliases = {\n 'motherboard':('how_7388', u'Как выбирать материнскую плату'),\n 'processor':('how_7399', u'Как выбирать процессор'),\n 'video':('how_7396', u'Как выбирать видеокарту'),\n 'hdd':('how_7394', u'Как выбирать жесткий диск'),\n 'ram':('how_7369', u'Как выбирать память'),\n 'case':('how_7387', u'Как выбирать корпус'),\n 'display':('how_7390', u'Как выбирать монитор'),\n 'keyboard':('how_7389', u'Как выбирать клавиатуру'),\n 'mouse':('how_7383', u'Как выбирать мышь'),\n 'audio':('how_7406', u'Как выбирать аудиосистему'),\n}\n\ndef renderPartPage(doc, header, template, skin):\n\n container = template.middle.find('div')\n\n els = html.fragments_fromstring(doc['html'])\n container.text = ''\n for el in els:\n if type(el) is unicode:\n container.text +=el\n else:\n container.append(el)\n template.top.find('h1').text = header\n if doc['_id'] == 'how_7396':\n video_link = etree.Element('a')\n video_link.set('href','/videocard')\n video_link.text=u'Лучшие видеокарты для апгрейда'\n video_link.tail = u' собраны на '\n\n video_link1 = etree.Element('a')\n video_link1.set('href','/videocard')\n video_link1.text=u'отдельной странице.'\n\n d = etree.Element('div')\n d.append(video_link)\n d.append(video_link1)\n\n template.top.find('h1').getparent().insert(1,d)\n\n title = skin.root().xpath('//title')[0]\n title.text = header\n for el in template.top:\n skin.top.append(el)\n for el in template.middle:\n skin.middle.append(el)\n \n return skin\n\ndef partPage(template, skin, request):\n name = request.path.split('/')[-1]\n d = couch.openDoc(parts_aliases[name][0])\n d.addCallback(renderPartPage, parts_aliases[name][1], template, skin)\n d.addCallback(lambda some:some.render())\n return d\n\n","sub_path":"simple_pages.py","file_name":"simple_pages.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"603644056","text":"from pulumi_aws.iam import (\n Role,\n RolePolicyAttachment,\n get_policy_document,\n get_policy,\n GetPolicyDocumentStatementArgs,\n GetPolicyDocumentStatementPrincipalArgs,\n)\nfrom pulumi import ResourceOptions\nfrom data_engineering_pulumi_components.utils import Tagger\n\ntagger = Tagger(environment_name=\"dev\")\n\nexecution_role_assume_role_policy = get_policy_document(\n statements=[\n GetPolicyDocumentStatementArgs(\n effect=\"Allow\",\n actions=[\"sts:AssumeRole\"],\n principals=[\n GetPolicyDocumentStatementPrincipalArgs(\n type=\"Service\",\n identifiers=[\"airflow-env.amazonaws.com\", \"airflow.amazonaws.com\"],\n )\n ],\n )\n ]\n)\n\nexecutionRole = Role(\n resource_name=\"airflow\",\n assume_role_policy=execution_role_assume_role_policy.json,\n description=\"Execution role for Airflow\",\n name=\"AmazonMWAAExecutionRole\",\n tags=tagger.create_tags(\"AmazonMWAAExecutionRole\"),\n)\n\npolicy = get_policy(arn=\"arn:aws:iam::aws:policy/AdministratorAccess\")\n\nRolePolicyAttachment(\n resource_name=\"airflow\",\n policy_arn=policy.arn,\n role=executionRole.name,\n opts=ResourceOptions(parent=executionRole),\n)\n\n","sub_path":"airflow/iam.py","file_name":"iam.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"199495712","text":"import os\nfrom flask import Flask, render_template, Response\nfrom camera import Camera\n\napp = Flask(__name__)\ncamera = Camera(\"http://192.168.0.23:8080/shot.jpg\")\n\n@app.route('/')\ndef index():\n \"\"\"Video streaming home page.\"\"\"\n return render_template('index.html')\n\ndef gen(camera):\n \"\"\"Video streaming generator function.\"\"\"\n for frame in camera.frames():\n yield b'--frame\\r\\nContent-Type: image/jpeg\\r\\n\\r\\n'\n yield frame\n yield b'\\r\\n\\r\\n'\n\n\n@app.route('/video_feed')\ndef video_feed():\n \"\"\"Video streaming route. Put this in the src attribute of an img tag.\"\"\"\n return Response(gen(camera),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', threaded=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"97650641","text":"# https://www.codewars.com/kata/5264d2b162488dc400000001\n\n\ndef spin_words(sentence):\n result = [x[::-1] if len(x) >= 5 else x for x in sentence.split()]\n\n return ' '.join(result)\n\n\nif __name__ == '__main__':\n print(spin_words('This is another test'))","sub_path":"spin_words.py","file_name":"spin_words.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"99616414","text":"# import required modules\nimport numpy as np\nimport math\nfrom sklearn import linear_model\nimport operator\n# ======================\n\n\ndef var_y(data, lag=1):\n \"\"\"\n :param data: volatiltiies dataframe with Date\n :param lag: the lags of AR\n :return: for example, if lag = 4, then the first part of 4 periods would be\n # deleted because we cannot estimate them\n \"\"\"\n y = data.drop(\"Date\", axis=1).drop(range(lag)).values.transpose()\n return y\n\n\ndef shift_right(data, n_shift):\n \"\"\"\n :param data: matrix without Date\n :param n_shift: the number of lags\n :return: shifted data (lag is the number of position to shift and filled\n # the blank with NA)\n \"\"\"\n nrow = data.shape[0]\n ncol = data.shape[1]\n matrix_na = np.empty((nrow, n_shift,))\n matrix_na[:] = np.nan\n matrix_num = np.delete(data, [list(range(ncol - n_shift, ncol))], 1)\n result = np.concatenate((matrix_na, matrix_num), axis=1)\n return result\n\n\ndef var_x(data, lag=1):\n \"\"\"\n :param data: volatiltiies dataframe with Date\n :param lag: the lag od AR\n :return: a matrix with stacked shifted past period data\n \"\"\"\n y = data.drop(\"Date\", axis=1).values.transpose()\n x = y\n if lag > 1:\n for i in range(1, lag):\n x_down = shift_right(y, i)\n x = np.concatenate((x, x_down), 0)\n x = np.delete(x, range(lag - 1), 1)\n x = np.delete(x, x.shape[1] - 1, 1)\n return x\n\n\ndef ols(sy, sx):\n \"\"\"\n :param sy: output of var_y\n :param sx: output of var_x\n :return: ols estimated coef\n \"\"\"\n a = np.dot(sy, sx.transpose())\n b = np.linalg.inv(np.dot(sx, sx.transpose()))\n coef_result = np.dot(a, b)\n return coef_result\n\n\ndef mle_sigma(sy, sx, coef):\n t = sx.shape[1]\n std = sy - np.dot(coef, sx)\n mle_sigma_result = np.dot(std, std.transpose()) / t\n return mle_sigma_result\n\n\ndef aic(mle_sigma_estimates, coef, t):\n length = coef.shape[0] * coef.shape[1]\n eq_0 = np.count_nonzero(coef == 0)\n length = length - eq_0\n aic_result = math.log(np.linalg.det(mle_sigma_estimates)) + 2 / t * length\n return aic_result\n\n\ndef lag_chooser(data, max_lag):\n list_aic = []\n for i in range(1, max_lag + 1):\n sx = var_x(data, i)\n sy = var_y(data, i)\n t = sx.shape[1]\n reg = linear_model.LinearRegression(fit_intercept=False)\n reg.fit(sx.transpose(), sy.transpose())\n coef = reg.coef_\n mle_sigma_result = mle_sigma(sy, sx, coef)\n aic_result = aic(mle_sigma_result, coef, t)\n list_aic.append(aic_result)\n index, value = min(enumerate(list_aic), key=operator.itemgetter(1))\n return index + 1, value\n\n\nclass Coef:\n\n def __init__(self, data, max_lag):\n # The variables we need to launch this class\n # the lag chooses from lag_chooser\n self.Lag = lag_chooser(data, max_lag)\n # the x and y to calculate coef\n self.x = var_x(data, self.Lag[0])\n self.y = var_y(data, self.Lag[0])\n\n # The place where to save calculated coef\n self.OLS_coef = None\n self.LASSO_coef = None\n self.LASSO_score = None\n self.LASSO_alpha = None\n\n # The place where to save calculated sigma\n self.OLS_sigma = None\n\n # accuracy\n self.accuracy = None\n\n def f_ols_coef(self):\n\n sx = self.x\n sy = self.y\n\n reg = linear_model.LinearRegression(fit_intercept=False)\n reg.fit(sx.transpose(), sy.transpose())\n self.accuracy = reg.score(sx.transpose(), sy.transpose())\n ols_coef_result = reg.coef_\n self.OLS_coef = ols_coef_result\n self.OLS_sigma = mle_sigma(sy, sx, self.OLS_coef)\n\n def f_lasso_coef(self, cv_value, max_iter):\n\n data = self.Data\n lag = self.Lag\n\n sx = self.var_x(data, lag[0])\n sy = self.var_y(data, lag[0])\n\n lasso_model = (linear_model.\n MultiTaskLassoCV(cv=cv_value, fit_intercept=False,\n max_iter=max_iter).\n fit(sx.transpose(), sy.transpose()))\n\n self.LASSO_alpha = lasso_model.alpha_\n\n clf = (linear_model.\n MultiTaskLasso(fit_intercept=False, alpha=self.LASSO_alpha,\n max_iter=1000))\n\n clf.fit(sx.transpose(), sy.transpose())\n\n self.LASSO_score = clf.score(sx.transpose(), sy.transpose())\n self.LASSO_coef = clf.coef_\n\n \"\"\"\n ## Ridge\n# SX = VAR_X(stock_volatility, lag[0]) # 23 determined by lag_chooser in OLS\n# SY = VAR_Y(stock_volatility, lag[0]) # 23 determined by lag_chooser in OLS\n# n_alphas = 200\n# alphas = np.logspace(-10, 1, n_alphas)\n# Ridge_model = linear_model.RidgeCV(alphas=alphas, cv=10, fit_intercept=False).fit(SX.transpose(), SY.transpose())\n# alpha = Ridge_model.alpha_\n# clf = linear_model.Ridge(fit_intercept=False, alpha=alpha)\n# clf.fit(SX.transpose(), SY.transpose())\n# clf.score(SX.transpose(), SY.transpose())\n #coef_Ridge = clf.coef_ # get it\n\n## Elastic net (combine LASSO and ridge, l = 1 for LASSO and l = 0 for Ridge)\n# SX = VAR_X(stock_volatility, lag[0]) # 23 determined by lag_chooser in OLS\n# SY = VAR_Y(stock_volatility, lag[0]) # 23 determined by lag_chooser in OLS\n# n_alphas = 10\n# alphas = np.logspace(-10, 1, n_alphas)\n# ElasticNet_model = linear_model.MultiTaskElasticNetCV(alphas=alphas, cv=10, fit_intercept=False, l1_ratio=0.5, max_iter=10000).fit(SX.transpose(), SY.transpose())\n #alpha = ElasticNet_model.alpha_\n# clf = linear_model.MultiTaskElasticNet(fit_intercept=False, alpha=alpha, max_iter=10000)\n# clf.fit(SX.transpose(), SY.transpose())\n# clf.score(SX.transpose(), SY.transpose())\n# coef_ElasticNet = clf.coef_\n\n# get the coefficients\n# os.chdir(\"/Users/rucachen/Desktop/open_pycharm_virtualenv_3.6.4/financial_connectedness/coef\")\n# dict_coef = {}\n# name = [\"OLS\", \"LASSO\", \"Ridge\", \"ElasticNet\"]\n# list = [coef_OLS, coef_LASSO, coef_Ridge, coef_ElasticNet]\n# for i in range(len(name)):\n# dict_coef[name[i]] = list[i]\n\n# for i in range(len(name)):\n# Name = name[i] + \"_\" + \"coef\" + \".csv\"\n# coef = pd.DataFrame(dict_coef[name[i]]).to_csv(Name)\n \"\"\"\n","sub_path":"functions/f_coef.py","file_name":"f_coef.py","file_ext":"py","file_size_in_byte":6147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"29262135","text":"def races():\n races = ['nord', 'khajit', 'redgaurd', 'woodelf', 'darkelf', 'highelf', 'orc', 'imperial', 'breton', 'argonian']\n print('Choose your race from the following optinons.\\n')\n for i in races:\n print('{} '.format(i), end='')\n print()\n \n # get choice\n race = input('\\nrace: ')\n \n return race\n \ndef nose():\n print('On a scale from 1-10, 1 being short 10 being tall.')\n height = input('How high would you like your nose? ')\n print('On a scale from 1-10, 1 being narrow 10 being wide.')\n width = input('How wide would you like your nose? ')\n print('On a scale from 1-10, 1 being shallow 10 being deep.')\n depth = input('How deep would you like your nose? ')\n return [height, width, depth]\n \ndef main():\n my_race = races()\n my_nose = nose()\n print(my_race)\n print(my_nose)","sub_path":"skyrim/races.py","file_name":"races.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"450893873","text":"import csv\n\nfrom winorient.wdb_reader import WinOrientBinary\n\n\nclass WinOrient:\n def __init__(self, file):\n self.wdb = WinOrientBinary(file)\n self._data = None\n\n @property\n def data(self):\n if self._data is None:\n self._data = self.wdb.create_data().data\n\n return self._data\n\n def csv(self, file, race=1):\n csvout = csv.writer(open(file, 'a'), lineterminator='\\n')\n for person in self.data['persons']:\n row = (\n race,\n person['group'],\n person['full_name'],\n person['team'],\n person['qual'],\n person['year'],\n person['result'].strftime('%H:%M:%S'),\n person['place'] if person['place'] is not None else ''\n )\n csvout.writerow(row)\n\n\ndef main():\n race = 1\n wdb = WinOrient('file.wdb')\n wdb.csv('person.csv', race)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"wdb_parser.py","file_name":"wdb_parser.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"150869789","text":"\nfrom configparser import RawConfigParser\nimport os.path\nimport sys\n\nthis = sys.modules[__name__]\nthis.defaults = {}\nthis.google = {}\n\nhome = os.path.expanduser('~')\n\n# If ~/.ioxrc exists, read it\nconfig_file = os.path.join(home, '.ioxrc')\nif os.path.exists(config_file):\n # Parse the configuration file\n parser = RawConfigParser()\n parser.read(config_file)\n\n # Defaults\n database = parser.get('defaults', 'database', fallback=None)\n this.defaults['database'] = database if database is None else database.lower()\n\n # Google\n this.google['credentials'] = parser.get('google', 'credentials', fallback='credentials.json')\n this.google['project_id'] = parser.get('google', 'project_id', fallback=None)\n","sub_path":"iox/_config.py","file_name":"_config.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"471976529","text":"\nimport os\n#import pandas as pd\nimport csv\n\naws_s3_bucket =\"ajeya-s3-bucket\"\naws_profile = \"cross_ec2_instance\"\nlist_bucket= \"aws s3 ls s3://ajeya-s3-bucket/ --profile cross_ec2_instance\"\nsync_bucket = \"aws s3 sync s3://ajeya-s3-bucket/ /home/ec2-user/aws_s3/ --profile cross_ec2_instance\"\npath = \"/home/ec2-user/capstone_project\" \nclone = \"git clone git@github.com:Akash-bhat/Capstone_project_AWS.git .\" \npull = \"git pull origin master\"\nstatus = \"git status\"\n\n\nos.chdir(path) #path where the cloned project needs to be copied\nos.system(clone) #Cloning\nos.system(pull)\nos.system(status)\n\nos.system(list_bucket)\nos.system(sync_bucket)\n#data= pd.read_csv(\"addresses.csv\")\n#data\n\n\nprint(\"\\n Account 2 S3 Bucket - CSV File Output\\n\")\n\nfile = open(\"/home/ec2-user/aws_s3/addresses.csv\")\ncsvreader = csv.reader(file)\nheader = next(csvreader)\nprint(header)\nrows = []\nfor row in csvreader:\n rows.append(row)\nprint(rows)\nfile.close()\n\n\n","sub_path":"read_s3.py","file_name":"read_s3.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"576660390","text":"\"\"\"\nHelp and documentation.\n\nThis service displays help information on the web server.\n\"\"\"\n\nimport itertools\nimport re\n\nfrom kochira import config\nfrom kochira.service import Service, Config\nfrom docutils.core import publish_parts\nfrom tornado.web import RequestHandler, Application, HTTPError, UIModule\n\nservice = Service(__name__, __doc__)\n\n\ndef rst(s, **kw):\n return publish_parts(s, writer_name=\"html\", **kw)[\"fragment\"]\n\n\ndef trim_docstring(docstring):\n inf = float(\"inf\")\n\n if not docstring:\n return ''\n # Convert tabs to spaces (following the normal Python rules)\n # and split into a list of lines:\n lines = docstring.expandtabs().splitlines()\n # Determine minimum indentation (first line doesn't count):\n indent = inf\n for line in lines[1:]:\n stripped = line.lstrip()\n if stripped:\n indent = min(indent, len(line) - len(stripped))\n # Remove indentation (first line is special):\n trimmed = [lines[0].strip()]\n if indent < inf:\n for line in lines[1:]:\n trimmed.append(line[indent:].rstrip())\n # Strip off trailing and leading blank lines:\n while trimmed and not trimmed[-1]:\n trimmed.pop()\n while trimmed and not trimmed[0]:\n trimmed.pop(0)\n # Return a single string:\n return '\\n'.join(trimmed)\n\n\ndef _get_doc_parts(doc):\n if doc is None:\n return None\n return trim_docstring(doc).strip().split(\"\\n\\n\")\n\n\ndef get_short_doc(doc):\n parts = _get_doc_parts(doc)\n if parts is None:\n return None\n\n return parts[0]\n\n\ndef get_long_doc(doc):\n parts = _get_doc_parts(doc)\n if parts is None:\n return None\n\n return \"\\n\\n\".join(parts[1:])\n\n\nclass RequestHandler(RequestHandler):\n def render(self, name, **kwargs):\n return super().render(name,\n rst=rst,\n trim_docstring=trim_docstring,\n get_long_doc=get_long_doc,\n get_short_doc=get_short_doc,\n **kwargs)\n\n\nclass IndexHandler(RequestHandler):\n def get(self):\n services = [bound.service for bound in self.application.ctx.bot.services.values()]\n services.sort(key=lambda s: s.name)\n\n self.render(\"help/index.html\", services=services,\n bot_config=self.application.ctx.bot.config_class)\n\n\nclass ServiceHelpHandler(RequestHandler):\n def get(self, service_name):\n try:\n service = self.application.ctx.bot.services[service_name].service\n except KeyError:\n raise HTTPError(404)\n self.render(\"help/service.html\", service=service)\n\n\nclass ConfigModule(UIModule):\n def render(self, cfg):\n return self.render_string(\"help/_modules/config.html\",\n config=cfg, ConfigType=config.Config,\n rst=rst)\n\n\ndef make_application(settings):\n settings = settings.copy()\n settings[\"ui_modules\"][\"Config\"] = ConfigModule\n\n return Application([\n (r\"/\", IndexHandler),\n (r\"/(.*)\", ServiceHelpHandler)\n ], **settings)\n\n\n@service.hook(\"services.net.webserver\")\ndef webserver_config(ctx):\n return {\n \"name\": \"help\",\n \"title\": \"Help\",\n \"menu_order\": 9999,\n \"application_factory\": make_application\n }\n\n\n@service.command(r\"!commands\")\n@service.command(r\"!help(?: (?P.+))?\")\n@service.command(r\"help(?: me)?!?$\", mention=True)\ndef help(ctx, trigger=None):\n \"\"\"\n Help.\n\n Links the user to the web help service, if available.\n \"\"\"\n\n if \"kochira.services.net.webserver\" not in ctx.bot.services:\n ctx.respond(ctx._(\"Help currently unavailable.\"))\n else:\n if trigger is not None:\n matches = []\n\n for service_name, binding in ctx.bot.services.items():\n for command in binding.service.commands:\n for pattern, _ in command.patterns:\n if re.match(pattern, trigger) is not None:\n matches.append((command, service_name))\n\n if matches:\n command, service_name = next(iter(matches))\n ctx.respond(ctx._(\"Help for that command is available at {url}\").format(\n url=ctx.bot.config.services[\"kochira.services.net.webserver\"].base_url.rstrip(\"/\") + \"/help/\" + service_name + \"#\" + command.__name__\n ))\n else:\n ctx.respond(ctx._(\"Sorry, no help is available for that command.\"))\n else:\n ctx.respond(ctx._(\"My help is available at {url}\").format(\n url=ctx.bot.config.services[\"kochira.services.net.webserver\"].base_url.rstrip(\"/\") + \"/help/\"\n ))\n\n\n@service.command(r\"!source\")\n@service.command(r\"source\", mention=True)\n@service.command(r\"repo\", mention=True)\n@service.command(r\"github\", mention=True)\ndef show_source(ctx):\n \"\"\"\n Show source.\n\n Links the user to the source code repository.\n \"\"\"\n ctx.respond(ctx._(\"My source code is at: https://github.com/rfw/kochira\"))\n\n\n\n@service.command(r\"!bugs\")\n@service.command(r\"report (?:a )?bug\", mention=True)\n@service.command(r\"bugs\", mention=True)\n@service.command(r\"u stink\", mention=True)\ndef bug_report(ctx):\n \"\"\"\n Bug report.\n\n Links the user to the bug report URL.\n \"\"\"\n ctx.respond(ctx._(\"Found a bug? Report it! https://github.com/rfw/kochira/issues\"))\n\n","sub_path":"kochira/services/core/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":5450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"568795245","text":"#simple program to understand the flag concept\r\nvalid=True\r\nwhile True:\r\n a=input(\"enter the word:\")\r\n for i in a:\r\n if i!=\"b\":\r\n valid1=False\r\n else:\r\n valid1=True\r\n \r\n break\r\n if not a.isalpha():\r\n valid2=False\r\n else:\r\n valid2=True\r\n valid=valid1 and valid2\r\n \r\n if valid:\r\n print(\"valid word\")\r\n else:\r\n print(\"invalid word\")\r\n","sub_path":"flag.py","file_name":"flag.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"468857038","text":"import xml.etree.cElementTree as ET\r\nimport os\r\nimport csv\r\nimport pandas as pd\r\nimport io\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import patches\r\n\r\n\r\nheader = [\"xmin\", \"ymin\", \"width\", \"heigth\"]\r\nmainpath_ann = f\"./Annotations\"\r\n\r\nlista_df = os.listdir(mainpath_ann)\r\n# print(lista_df)\r\ncantidad_colonias_anotacion=[]\r\n\r\nfor i in lista_df:\r\n path_annotation_xml = f\"{mainpath_ann}/{i}\"\r\n annotation_xml = ET.parse(path_annotation_xml)\r\n root = annotation_xml.getroot()\r\n\r\n if (len(root))==1:\r\n j = root[0]\r\n else:\r\n j = root[1]\r\n\r\n data_string = j.text\r\n #for i in root:\r\n #data_string = j.text\r\n\r\n if data_string==\"- \":\r\n cantidad_colonias_anotacion.append(0)\r\n continue\r\n\r\n data = io.StringIO(data_string.strip())\r\n df = pd.read_csv(data, sep=\",\", header=None, names=header, lineterminator=\";\")\r\n cantidad_colonias_anotacion.append(df.shape[0])\r\n # df.head()\r\n\r\nprint(sum(cantidad_colonias_anotacion))\r\n\r\n\r\nprint(\"Fin del analisis\")","sub_path":"scripts/inferencia/cantidad colonias en anotaciones.py","file_name":"cantidad colonias en anotaciones.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"245384970","text":"#!/usr/bin/python3\n# -*- encoding: utf-8 -*-\n\nimport struct\nimport argparse\nimport sys\nimport tempfile\nfrom shutil import make_archive, unpack_archive\nfrom bitstring import BitArray\nfrom itertools import chain\nfrom functools import reduce\nfrom math import ceil, sqrt\nimport shutil\nimport os\n\nclass BmpWriter:\n def __init__(self,path,height,width):\n self.bmp_file = open(path,\"wb\")\n self.height = height\n self.width = width\n bmp_header = struct.Struct(\"<2sI4xIIIIHHIIIIII\")\n header = bmp_header.pack( b\"BM\", \n 3 * width * height + 54, \n 54, \n 40,\n width, \n height, \n 1, \n 24, \n 0, \n 3 * width * height + (width % 4) * height\n , \n 10, \n 10, \n 0, \n 0\n )\n self.bmp_file.write(header)\n self.row_pixel_count = 0\n\n def put_pixel(self, pixel):\n if pixel == 0:\n self.bmp_file.write(b\"\\xff\\xff\\xff\")\n else:\n self.bmp_file.write(b\"\\x00\\x00\\x00\")\n \n self.row_pixel_count += 1\n\n if self.row_pixel_count == self.width:\n if self.row_pixel_count % 4 != 0:\n self.bmp_file.write(b\"\\x00\" * (self.row_pixel_count % 4))\n self.row_pixel_count = 0\n\n def close(self):\n self.bmp_file.close()\n\ndef main():\n args = ParseArguments()\n if not args:\n return -1\n\n if not args.image_path:\n save(args)\n elif not args.data_path:\n restore(args)\n\ndef restore(args):\n image_data = open(args.image_path, \"rb\").read()\n height, width, bits = unpack_bmp(image_data)\n square_size = int(width/7) - 2;\n\n squares = []\n square_row_size = (square_size + 2) * width;\n\n for column in range(0,7):\n column_squares = []\n for row_offset in range(0,10 * square_row_size,square_row_size):\n square = []\n for square_row_number in range(1,square_size + 1):\n abs_offset = row_offset + width * square_row_number + 1 + (square_size + 2) * column\n square_row = bits[abs_offset:abs_offset + square_size]\n square.append(square_row)\n column_squares.append(square)\n squares.append(column_squares)\n\n seven_parts = []\n\n for column in range(0,7):\n seven_part = []\n for row in range(0,10):\n mapping = square_inv_mapping(row,column)\n square = squares[ mapping[0] ][ mapping[1] ]\n for square_row in square:\n seven_part += square_row\n seven_parts.append(seven_part)\n\n data = [] \n for fours in decoding(join(seven_parts)):\n data += fours\n data = \"\".join(map(str,data))\n\n destination = open(args.dest + \".zip\",\"wb\")\n destination.write(BitArray(bin=data).bytes)\n destination.close()\n unpack_archive(args.dest + \".zip\", args.dest)\n os.remove(args.dest + \".zip\")\n\ndef unpack_bmp(raw_bmp):\n image = raw_bmp\n header = image[:54]\n data = image[54:]\n height = struct.unpack(\"65535 or endPort<1 or endPort>65535:\n raise ValueError\n if startPort>endPort:\n raise ArithmeticError\n return startPort,endPort\n except TypeError:\n print(\"Ports can only be a whole number.\\n\")\n return validatePorts()\n except ValueError:\n print(\"Ports can only be between 1 & 65535.\\n\")\n return validatePorts()\n except ArithmeticError:\n print(\"The start port cannot be larger than the end point.\\n\")\n return validatePorts()\n\ndef setTarget():\n \"\"\"Get's user to set target, then select scan type\"\"\"\n print(\"____________________________________________________\\nTCP Scan\\n\")\n custom=input(\"Would you like to use an IP in the ARP table [Y] or a custom IP [N]? [Y/N]: \")\n if custom==\"y\" or custom==\"Y\":\n if ARPresults.fetchARPtable():\n print(\"s\")\n target=ARPresults.getTarget(\"Please type ID of target: \")\n print(target)\n if target==\"No targets found - try running an ARP scan from the main menu\":\n return\n target=target[0]\n else:\n print(\"Database cannot be found either try again or use custom mode.\")\n return setTarget()\n else:\n target=input(\"Enter target IP: \")\n ping=checkPing(target)\n if ping: print(\"Responds to ping\\n\")\n print(\"What would you like to do?\")\n print(\"1. Scan Top 20 Most Likely Open Ports\")\n print(\"2. Scan Well Known Ports (Ports 1-1024)\")\n print(\"3. Enter Custom Range (must be between 1 - 65535\")\n while(True):\n scanType=input(\"Enter Type of Scan: \")\n if scanType==\"1\":\n openPorts=topPortScan(target)\n sql_ports=\"INSERT INTO portScan(IP,scanTime,protocol,port,portDescription) VALUES (?,?,?,?,?)\"\n break\n elif scanType==\"2\":\n openPorts=fullScan(target)\n break\n elif scanType==\"3\":\n startPort,endPort=validatePorts()\n openPorts=fullScan(target,start=startPort, end=endPort)\n break\n else:\n print(\"That's an invalid option - try again!\\n\")\n\n if scanType==\"2\" or scanType==\"3\": \n sql_ports=\"INSERT INTO portScan(IP,scanTime,protocol,port) VALUES (?,?,?,?)\"\n\n with sqlite3.connect(\"info.sqlite3\") as db: cursor = db.cursor()#This creates a connection to the database\n if ping:\n data=[target,str(datetime.datetime.now()),\"ICMP\",\"Target responds to ping request\"]\n cursor.execute(\"INSERT INTO portScan(IP,scanTime,protocol,portDescription) VALUES (?,?,?,?)\",data)\n cursor.executemany(sql_ports,openPorts)\n db.commit()\n db.close()\n \n\ndef checkPing(IP):\n \"\"\"Given an IP address, checks that it can be pinged\"\"\"\n reply = system(\"ping -c 1 \" + IP)\n if reply==0: return True\n\ndef portScanner(target,port):\n \"\"\"This scans the port\"\"\"\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.connect((target,port))#Attempts to connect to port\n s.close()#Closes connection\n return True#If successful, the port was open\n except: return False\n\ndef fullScan(target,start=1,end=1024):\n \"\"\"Given an IP, the address will be scanned for ports 1 tp 65535\"\"\"\n openPorts=[]\n currentTime=str(datetime.datetime.now())\n print(\"This will scan all 65535 tcp ports... There may a be a wait...\")\n for port in range(start,end+1):\n if portScanner(target,port): \n print(port,\"is open\")\n openPorts.append((target,currentTime,\"TCP\",port),)\n return openPorts\n \ndef topPortScan(target):\n \"\"\"Given an IP, scans top 20 used TCP ports\"\"\"\n topPorts = {\n 21:\"FTP (File Transfer Protocol)\",\n 22:\"SSH (Secure Shell)\",\n 23:\"Telnet\",\n 25:\"SMTP (Simple Mail Transfer Protocol)\",\n 53:\"DNS (Domain Name System)\",\n 80:\"HTTP (HyperText Transfer Protocol)\",\n 110:\"POP3 (Post Office Protocol 3)\",\n 111:\"rpcbind\",\n 135:\"MSRPC (Microsoft Remote Procedure Call)\",\n 139:\"netbios-ssn\",\n 143:\"IMAP (Internet Message Access Protocol)\",\n 443:\"HTTPS (HyperText Transfer Protocol Secure)\",\n 445:\"microsoft-ds (Microsoft Directory Services)\",\n 993:\"IMAPS (Internet Message Access Protocol Secure)\",\n 995:\"POP3S (Post Office Protocol 3 Secure)\",\n 1723:\"PPTP (Point-to-Point Tunneling Protocol)\",\n 3306:\"MySQL\",\n 3389:\"RDP (Remote Desktop Protocol)\",\n 5900:\"VNC (Virtual Network Computing)\",\n 8080:\"http-proxy\"\n } #Dictionary of NMap top 20 ports\n openPorts=[]\n currentTime=str(datetime.datetime.now())\n print (\"Scanning... This may take a moment...\")\n print(\"The following ports are open:\\n\\nPort | Use\")\n for port in topPorts:\n if portScanner(target,port):#Scans each port, if successful, it is displayed in a table\n print(\"{:<5} {:<15}\".format(port,topPorts[port]))#Formats table\n openPorts.append((target,currentTime,\"TCP\",port,topPorts[port]),)\n return openPorts\n\nif __name__ == '__main__': setTarget()#If the program isn't being imported - it will automatically run\n","sub_path":"tcpScan.py","file_name":"tcpScan.py","file_ext":"py","file_size_in_byte":5400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"441121284","text":"# -*- coding: utf-8 -*-\n\n#########################################################################\n## This scaffolding model makes your app work on Google App Engine too\n## File is released under public domain and you can use without limitations\n#########################################################################\n\n## if SSL/HTTPS is properly configured and you want all HTTP requests to\n## be redirected to HTTPS, uncomment the line below:\n# request.requires_https()\n\nif not request.env.web2py_runtime_gae:\n ## if NOT running on Google App Engine use SQLite or other DB\n db = DAL('sqlite://storage.sqlite',pool_size=1,check_reserved=['all'])\nelse:\n ## connect to Google BigTable (optional 'google:datastore://namespace')\n db = DAL('google:datastore')\n ## store sessions and tickets there\n session.connect(request, response, db=db)\n ## or store session in Memcache, Redis, etc.\n ## from gluon.contrib.memdb import MEMDB\n ## from google.appengine.api.memcache import Client\n ## session.connect(request, response, db = MEMDB(Client()))\n\n## by default give a view/generic.extension to all actions from localhost\n## none otherwise. a pattern can be 'controller/function.extension'\nresponse.generic_patterns = ['*'] if request.is_local else []\n## (optional) optimize handling of static files\n# response.optimize_css = 'concat,minify,inline'\n# response.optimize_js = 'concat,minify,inline'\n\n#########################################################################\n## Here is sample code if you need for\n## - email capabilities\n## - authentication (registration, login, logout, ... )\n## - authorization (role based authorization)\n## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)\n## - old style crud actions\n## (more options discussed in gluon/tools.py)\n#########################################################################\n\nfrom gluon.tools import Auth, Crud, Service, PluginManager, prettydate, Mail\nauth = Auth(db)\ncrud, service, plugins = Crud(db), Service(), PluginManager()\n\nfrom gluon.contrib.login_methods.cas_auth import CasAuth\nauth.settings.login_form=CasAuth(urlbase = \"https://login.iiit.ac.in/cas\",actions = ['login','validate','logout'],casversion = 2,casusername=\"cas:user\")\n\n\t\n## create all tables needed by auth if not custom tables\n\nauth.settings.extra_fields['auth_user']=[\n\t\tField('age','integer',label='Please Enter your Age ',requires=IS_NOT_EMPTY()),\n\t\tField('sex','string',label='Please Select your Gender ',requires=IS_IN_SET(['Male','Female']),default='Male',widget=SQLFORM.widgets.radio.widget),\n\t\tField('phone','integer',label='Please Enter your Contact Number ',requires=IS_NOT_EMPTY()),\n\t\tField('photo','upload',label='Please select your Profile Picture '),\n\t\tField('usertype','string',label='You are a ',requires=IS_IN_SET(['Faculty','Student','Librarian']),default='Student',widget=SQLFORM.widgets.radio.widget),\n]\n\n\nauth.define_tables(username=False, signature=False)\n\n## configure email\nmail = Mail()\nauth.settings.actions_disabled.append('register')\nmail.settings.server = 'students.iiit.ac.in:25'\nmail.settings.sender = 'mohit.jain@students.iiit.ac.in'\n\n## configure auth policy\nauth.settings.registration_requires_verification = False\nauth.settings.registration_requires_approval = False\nauth.settings.reset_password_requires_verification = True\n\nauth.settings.login_next=URL('default','profile')\n\n## if you need to use OpenID, Facebook, MySpace, Twitter, Linkedin, etc.\n## register with janrain.com, write your domain:api_key in private/janrain.key\nfrom gluon.contrib.login_methods.rpx_account import use_janrain\nuse_janrain(auth, filename='private/janrain.key')\n\n#########################################################################\n## Define your tables below (or better in another model file) for example\n##\n## >>> db.define_table('mytable',Field('myfield','string'))\n##\n## Fields can be 'string','text','password','integer','double','boolean'\n## 'date','time','datetime','blob','upload', 'reference TABLENAME'\n## There is an implicit 'id integer autoincrement' field\n## Consult manual for more options, validators, etc.\n##\n## More API examples for controllers:\n##\n## >>> db.mytable.insert(myfield='value')\n## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL)\n## >>> for row in rows: print row.id, row.myfield\n#########################################################################\n\ndb.define_table('authtalk',\n\t\tField('subject','string',label='Please provide the subject of your mail ',requires=IS_NOT_EMPTY()),\n\t\tField('conte','text',label='Your message goes here ',requires=IS_NOT_EMPTY()))\n\ndb.define_table('usertalk',\n\t\tField('reciever','string',label=\"Enter Email-Id of the reciever \",requires=[IS_NOT_EMPTY(),IS_EMAIL()]),\n\t\tField('subject','string',label='Enter Subject of the mail here ',requires=IS_NOT_EMPTY()),\n\t\tField('conte','text',label='Your message goes here ',requires=IS_NOT_EMPTY()))\n\ndb.define_table('questions',\n\t\tField('question','string',label='Please post your question here ',requires=IS_NOT_EMPTY()))\n\ndb.define_table('answers',\n\t\tField('question',db.questions,requires=IS_IN_DB(db,'questions.id','questions.question')),\n\t\tField('answer','text',label='Know the answer? Be helpful! ',requires=IS_NOT_EMPTY()))\n\ndb.define_table('newbookreq',\n\t\tField('book','string',label='Enter Name of the Book ',requires=IS_NOT_EMPTY()),\n\t\tField('author','string',label='Enter Name of the Author ',requires=IS_NOT_EMPTY()),\n\t\tField('edition','integer',label='Edition of the Requested Book ',requires=IS_NOT_EMPTY()),\n\t\tField('publisher','string',label='Publisher of the Book ',requires=IS_NOT_EMPTY()))\n\ndb.define_table('bookresponse',\n\t\tField('book',db.newbookreq,requires=IS_IN_DB(db,'newbookreq.id','newbookreq.book')),\n\t\tField('status','string',requires=IS_IN_SET(['Accepted','Rejected','Pending']),default='Pending'))\n\n\n#db.define_table(auth.settings.table_user_name,\n#\t\tField('name','string',label='Please Enter your Name ',requires=IS_NOT_EMPTY()),\n#\t\tField('userid','string',label='Enter your unique Username ',requires=IS_NOT_EMPTY(),unique=True),\n#\t\tField('passwd','password',label='Please Enter a Password ',requires=IS_NOT_EMPTY()),\n#\t\tField('confpasswd','password',label='Please Confirm your Password ',requires=IS_NOT_EMPTY()),\n#\t\tField('branch','string',label='Please Enter your Branch ',requires=IS_IN_SET(['CSE','CSD','ECE','ECD','CLD','CND','EHD','M.Tech','PhD']),widget=SQLFORM.widgets.radio.widget),\n#\t\tField('photo','upload',label='Please upload a profile picture '))\n\n\n#db.define_table('stud_reg',\n#\t\tField('name','string',label='Please Enter your Name ',requires=IS_NOT_EMPTY()),\n#\t\tField('roll','integer',label='Please Enter your RollNo. ',unique=True,requires=IS_NOT_EMPTY()),\n#\t\tField('userid','string',label='Please Enter a Unique Id ',unique=True,requires=IS_NOT_EMPTY()),\n#\t\tField('passwd','password',label='Please Enter your Password ',requires=IS_NOT_EMPTY()),\n#\t\tField('confpasswd','password',label='Please Re-Enter your Password ',requires=IS_NOT_EMPTY()),\n#\t\tField('category','string',label='You are a ',requires=IS_IN_SET(['Faculty','Student','Librarian']),widget=SQLFORM.widgets.radio.widget),\n#\t\tField('branch','string',label='Please Enter your Branch ',requires=IS_IN_SET(['CSE','CSD','ECE','ECD','CLD','CND','EHD','M.Tech','PhD']),widget=SQLFORM.widgets.radio.widget),\n#\t\tField('email','string',label='Please Enter your \\'IIIT-H Mail\\' Email-Id ',requires=[IS_EMAIL(),IS_NOT_EMPTY()]),\n#\t\tField('phone','integer',label='Please Enter your Contact Number ',requires=IS_NOT_EMPTY()))\n\n#db.define_table('fac_reg',\n#\t\tField('name','string',label='Please Enter your Name ',requires=IS_NOT_EMPTY()),\n#\t\tField('userid','string',label='Please Enter a Unique Id ',unique=True,requires=IS_NOT_EMPTY()),\n#\t\tField('area','text',label='Please Enter your Area of Expertise '),\n#\t\tField('passwd','password',label='Please Enter your Password ',requires=IS_NOT_EMPTY()),\n#\t\tField('confpasswd','password',label='Please Re-Enter your Password ',requires=IS_NOT_EMPTY()),\n#\t\tField('email','string',label='Please Enter your \\'IIIT\\' Email-Id ',requires=[IS_EMAIL(),IS_NOT_EMPTY()]),\n#\t\tField('phone','integer',label='Please Enter your Contact Number ',requires=IS_NOT_EMPTY()))\n\n\ndb.define_table('issue',\n\t\tField('userid','integer'),\n\t\tField('bookid','integer'),\n\t\tField('idate','datetime'))\n\ndb.define_table('books',\n\t\tField('isbn','integer',label='ISBN',requires=IS_NOT_EMPTY()),\n\t\tField('name','string',label='Name',requires=IS_NOT_EMPTY()),\n\t\tField('subject','string',label='Subject',requires=IS_NOT_EMPTY()),\n\t\tField('author','string',label='Author',requires=IS_NOT_EMPTY()),\n\t\tField('publisher','string',label='Publisher',requires=IS_NOT_EMPTY()),\n\t\tField('edition','integer',label='Edition',requires=IS_NOT_EMPTY()),\n\t\tField('issued','integer',label='# Issued',requires=IS_NOT_EMPTY()),\n\t\tField('copies','integer',label='# on Shelf',requires=IS_NOT_EMPTY()),\n\t\tField('canbe','string',label='Enter type of Document',requires=IS_IN_SET(['Book','Journal','Magazine']),default='Book',widget=SQLFORM.widgets.radio.widget),\n\t\tField('photo','upload'))\n\ndb.define_table('rate',\n\t\tField('bookid',db.books,requires=IS_IN_DB(db,'books.id','books.name')),\n\t\tField('rating','float',requires=IS_IN_SET(['1','2','3','4','5']),default='3'))\n\n## after defining tables, uncomment below to enable auditing\n# auth.enable_record_versioning(db)\n","sub_path":"LibMan/models/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":9368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"525333482","text":"#!/usr/bin/python\n\nimport subprocess\nimport time\nfrom math import *\n\noptions = {\n 'bitrate': 8000,\n 'length': 0.5,\n}\n\nfreq_min = 10\nfreq_max = 30\nfreq = freq_max\nfreq_step = -1\n\ntemplate_no_touch = [sin(x * pi * 2 / 12) for x in range(12)]\ntemplate_touch = [sin(x * pi * 2 / 24) for x in range(24)]\nprocess = subprocess.Popen([\"aplay\"], stdin=subprocess.PIPE, stderr=subprocess.DEVNULL)\n\ndef makesound(template):\n return bytearray(max(0, min(255, int(template[x % len(template)] * 127 + 127))) for x in range(int(options['bitrate'] / 4 * options['length'])))\n\nwhile True:\n with open(\"/tmp/out\", \"r\") as f:\n value = int(f.read().strip())\n\n if value >= 100:\n freq += freq_step\n else:\n freq -= freq_step\n\n freq = max(freq_min, min(freq_max, freq))\n\n wavelength = 5 * freq\n wave = [int(255 * max(0, min(255, sin(x * pi * 2 / wavelength)))) for x in range(wavelength)]\n\n process.stdin.write(bytearray(wave))\n process.stdin.flush()\n print(freq)\n time.sleep(len(wave) / options['bitrate'])\n# time.sleep(0.20 * options['length'])\n","sub_path":"convertToMusic/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"175409175","text":"with open(\"filenamehere.txt\", \"a\") as open_file:\n current_text = open_file.read()\n open_file.write(\"yada yada yada \\r\\n\")\n\n# this is identical to\n\nopen_file = open(\"filenamehere.txt\", \"a\")\ncurrent_text = open_file.read()\nopen_file.write(\"yada yada yada \\r\\n\")\nopen_file.close()\n\n# use the first one! that way you don't have to remember to close\n\n# a, r, w and c are all valid file modes\n# as are a+, r+, w+ and c+\n# append \"b\" to a file mode to make it binary\n","sub_path":"Programming Basics 6 File and IO Operations/3_files.py","file_name":"3_files.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"604155146","text":"#!/usr/local/bin/python\n#-*- coding:utf-8 -*-\n\nimport os\nimport codecs\nimport h5py\nimport numpy as np\nimport cv2\nimport multiprocessing\n\nnum_cores_use = 1\n\ndef convert2h5(im_path, hesaff_path, sift_path, h5_desc_path, h5_attr_path):\n im = cv2.imread(im_path)\n height = im.shape[0]\n width = im.shape[1]\n f = codecs.open(sift_path, \"r\", \"utf-8\")\n data = f.read().split(\"\\n\")\n f.close()\n ks = []\n scales = []\n angles = []\n ds = []\n for i in range(2,len(data)-1):\n #print(data[i])\n es = data[i].split(\" \")\n ks.append(np.array([float(es[0]), float(es[1])]))\n scales.append(np.array([float(es[3])]))\n angles.append(np.array([float(es[4])]))\n ds.append(np.array([float(es[j]) for j in range(12, 140)]))\n #print(ds[i-2])\n #quit()\n ks = np.array(ks)\n scales = np.array(scales)\n angles = np.array(angles)\n ds = np.array(ds)\n\n f = codecs.open(hesaff_path, \"r\", \"utf-8\")\n data = f.read().split(\"\\n\")\n f.close()\n #ks = []\n #scales = []\n pas = []\n pbs = []\n pcs = []\n for i in range(2,len(data)-1):\n #print(data[i])\n es = data[i].split(\" \")\n #print(es)\n pas.append(np.array([float(es[2])]))\n pbs.append(np.array([float(es[3])]))\n pcs.append(np.array([float(es[4])]))\n #ds.append(np.array([float(es[j]) for j in range(12, 140)]))\n #print(ds[i-2])\n #quit()\n #ks = np.array(ks)\n pas = np.array(pas)\n pbs = np.array(pbs)\n pcs = np.array(pcs)\n #print(pas.shape)\n #print(pbs.shape)\n #print(pcs.shape)\n #print(ks.shape)\n #print(scales.shape)\n #print(angles.shape)\n #print(ds.shape)\n #print(pas,pbs,pcs)\n #quit()\n # desc\n f = h5py.File(h5_desc_path, 'w')\n f.create_dataset('ds', data=ds)\n f.create_dataset('height', data=height)\n f.create_dataset('width', data=width)\n f.flush()\n f.close()\n # attr\n f = h5py.File(h5_attr_path, 'w')\n f.create_dataset('ks', data=ks)\n f.create_dataset('scales', data=scales)\n f.create_dataset('angles', data=angles)\n f.create_dataset('pas', data=pas)\n f.create_dataset('pbs', data=pbs)\n f.create_dataset('pcs', data=pcs)\n f.create_dataset('height', data=height)\n f.create_dataset('width', data=width)\n f.flush()\n f.close()\n #quit()\n\n\n#\ndef step1(im_path, ppm_path):\n print(im_path, ppm_path)\n im = cv2.imread(im_path)\n cv2.imwrite(ppm_path, im)\n\n#\ndef step2(ppm_path, hesaff_path, hesaff_threshold):\n cmd = \"./h_affine.ln.gz -hesaff -i \" + ppm_path + \" -o \" + hesaff_path + \" -thres \" + hesaff_threshold\n print(cmd)\n os.system(cmd)\n\n#\ndef step3(ppm_path, hesaff_path, sift_path):\n cmd = \"./compute_descriptors.ln.gz -sift -i \" + ppm_path + \" -p1 \" + hesaff_path + \" -o2 \" + sift_path\n print(cmd)\n os.system(cmd)\n\n#\ndef step4(im_path, hesaff_path, sift_path, h5_desc_path, h5_attr_path):\n convert2h5(im_path, hesaff_path, sift_path, h5_desc_path, h5_attr_path)\n\n#\ndef extract_feature_in_dir(im_dir, ppm_dir, feat_dir, hesaff_threshold):\n\n # -> ppm\n arg_lists1 = []\n im_names = os.listdir(im_dir)\n im_names.sort()\n for im_name in im_names:\n #print(im_name)\n im_path = os.path.join(im_dir, im_name)\n ppm_path = os.path.join(ppm_dir, im_name.split(\".\")[0]+\".ppm\")\n if not os.path.exists(ppm_path):\n arg_lists1.append((im_path, ppm_path))\n if num_cores_use > 1 and len(arg_lists1) > 1:\n with multiprocessing.Pool(processes=num_cores_use) as pool:\n pool.starmap(step1, arg_lists1)\n else:\n for arg_list in arg_lists1:\n step1(*arg_list)\n\n # -> hesaff\n arg_lists2 = []\n im_names = os.listdir(im_dir)\n im_names.sort()\n for im_name in im_names:\n #print(im_name)\n ppm_path = os.path.join(ppm_dir, im_name.split(\".\")[0]+\".ppm\")\n hesaff_path = os.path.join(feat_dir, im_name.split(\".\")[0]+\".hesaff\")\n if not os.path.exists(hesaff_path):\n arg_lists2.append((ppm_path, hesaff_path, hesaff_threshold))\n if num_cores_use > 1 and len(arg_lists2) > 1:\n with multiprocessing.Pool(processes=num_cores_use) as pool:\n pool.starmap(step2, arg_lists2)\n else:\n for arg_list in arg_lists2:\n step2(*arg_list)\n\n # -> sift\n arg_lists3 = []\n im_names = os.listdir(im_dir)\n im_names.sort()\n for im_name in im_names:\n #print(im_name)\n ppm_path = os.path.join(ppm_dir, im_name.split(\".\")[0]+\".ppm\")\n hesaff_path = os.path.join(feat_dir, im_name.split(\".\")[0]+\".hesaff\")\n sift_path = os.path.join(feat_dir, im_name.split(\".\")[0]+\".hesaff.sift\")\n if not os.path.exists(sift_path):\n arg_lists3.append((ppm_path, hesaff_path, sift_path))\n if num_cores_use > 1 and len(arg_lists3) > 1:\n with multiprocessing.Pool(processes=num_cores_use) as pool:\n pool.starmap(step3, arg_lists3)\n else:\n for arg_list in arg_lists3:\n print(*arg_list)\n step3(*arg_list)\n\n # -> h5\n arg_lists4 = []\n im_names = os.listdir(im_dir)\n im_names.sort()\n for im_name in im_names:\n #print(im_name)\n im_path = os.path.join(im_dir, im_name)\n hesaff_path = os.path.join(feat_dir, im_name.split(\".\")[0]+\".hesaff\")\n sift_path = os.path.join(feat_dir, im_name.split(\".\")[0]+\".hesaff.sift\")\n #h5_path = os.path.join(feat_dir, im_name.split(\".\")[0]+\".h5\")\n h5_desc_path = os.path.join(feat_dir, im_name.split(\".\")[0]+\"_desc.h5\")\n h5_attr_path = os.path.join(feat_dir, im_name.split(\".\")[0]+\"_attr.h5\")\n if os.path.exists(sift_path):\n if not os.path.exists(h5_desc_path) or not os.path.exists(h5_attr_path):\n arg_lists4.append((im_path, hesaff_path, sift_path, h5_desc_path, h5_attr_path))\n if num_cores_use > 1 and len(arg_lists4) > 1:\n with multiprocessing.Pool(processes=num_cores_use) as pool:\n pool.starmap(step4, arg_lists4)\n else:\n for arg_list in arg_lists4:\n step4(*arg_list)\n\n # check\n for im_name in im_names:\n #print(im_name)\n sift_path = os.path.join(feat_dir, im_name.split(\".\")[0]+\".hesaff.sift\")\n if not os.path.exists(sift_path):\n print(\"{}\".format(sift_path))\n\n\n\n#\nif __name__==\"__main__\":\n\n ts = [\"500\"]\n for hesaff_threshold in ts:\n im_dir = \"../test_data/images\"\n ppm_dir = \"../test_data/ppms\"\n feat_dir = \"../test_data/feats_hesaff_t\" + hesaff_threshold\n if not os.path.exists(ppm_dir):\n os.makedirs(ppm_dir)\n if not os.path.exists(feat_dir):\n os.makedirs(feat_dir)\n extract_feature_in_dir(im_dir, ppm_dir, feat_dir, hesaff_threshold)\n","sub_path":"extract_hesaff/src/extract_feature.py","file_name":"extract_feature.py","file_ext":"py","file_size_in_byte":6825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"109605562","text":"#!/usr/bin/env python\nimport sys\nimport string\n\nfor line in sys.stdin:\n line = line.strip()\n words = line.split()\n for w in words:\n table = w.maketrans('', '', string.punctuation)\n w = w.translate(table).lower()\n print(w, '\\t', 1)\n","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"364487437","text":"import subprocess\nimport os\nimport sys\nfrom datetime import datetime\nfrom fbchat import Client\nfrom fbchat.models import *\nimport pprint\nimport argparse\n\nclass WatchDog():\n \n def get_arguments(self):\n parser = argparse.ArgumentParser(description=\"Watchdog for gsm scripts [-options]\")\n parser.add_argument(\"-g\", \"--gsm_id\", help=\"GSM ID to watch E.g. 20Users\")\n parser.add_argument(\"-m\", \"--module\", help=\"Users/Loggers\")\n try:\n args = parser.parse_args()\n return args\n except IndexError:\n print('>> Error in parsing arguments')\n error = parser.format_help()\n print(error)\n sys.exit()\n\n def main(self,gsm_id,module):\n screen = \"screen -ls g\"+str(gsm_id)\n status = os.system(screen)\n if (status != 0):\n test = \"screen -d -m -S g\"+str(gsm_id)+\" /usr/local/bin/python3.6 /home/pi/dyna3_gsm/runner.py -t \"+str(module).lower()+\" -db 192.168.150.112 -g\"+str(gsm_id[:2])\n print(test)\n os.system(test)\n sys.exit(0)\n\nif __name__ == \"__main__\":\n initialize_watchdog = WatchDog()\n args = initialize_watchdog.get_arguments()\n initialize_watchdog.main(args.gsm_id, args.module)\n","sub_path":"utils/watchdog.py","file_name":"watchdog.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"618767316","text":"# To run this, you can install BeautifulSoup\n# https://pypi.python.org/pypi/beautifulsoup4\n\n# Or download the file\n# http://www.py4e.com/code3/bs4.zip\n# and unzip it in the same directory as this file\n\nimport urllib.request, urllib.parse, urllib.error\nfrom bs4 import BeautifulSoup\nimport ssl\n\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nurl = input('Enter - ')\n\ncount = int(input(\"Enter count: \"))\nposition = int(input(\"Enter position: \"))\n\ndef find_url(url):\n\thtml = urllib.request.urlopen(url, context=ctx).read()\n\tsoup = BeautifulSoup(html, 'html.parser')\n\tnames = []\n\ttags = soup('a')\n\tfor tag in tags:\n\t\tnames.append(tag.get('href', None))\n\treturn names\n\nfor x in range(count):\n\turl = find_url(url)[position - 1]\nprint(url[30:-10])","sub_path":"hw6partb.py","file_name":"hw6partb.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"169834581","text":"# -*- coding: utf-8 -*-\nfrom odoo import api, fields, models\n\nclass WorkOrderLine(models.Model):\n _name = 'work.order.line'\n\n work_order_id = fields.Many2one('work.order', string='Work Order ID', copy=False)\n name = fields.Char('Work Order', related='work_order_id.name', readonly=True)\n contractor_id = fields.Many2one('res.partner', string='Sub Contractor')\n start_date = fields.Date('Scheduled Start Date', required=True)\n end_date = fields.Date('Scheduled End Date', required=True)\n percentage = fields.Integer('Percentage of Completion (%)')\n purchase_line_id = fields.Many2one('purchase.order.line', string='PO Line Reference')\n purchase_id = fields.Many2one('purchase.order', related='purchase_line_id.order_id', string='PO Reference')\n state = fields.Selection([('draft', 'Waiting'), ('progress', 'In Progress'), ('done', 'Done')], related='work_order_id.state', string='Status')\n cost = fields.Float('Cost')\n\nWorkOrderLine()\n\nclass PurchaseOrderLine(models.Model):\n _inherit = 'purchase.order.line'\n\n @api.multi\n def compute_work_order_count(self):\n for record in self:\n record.work_order_count = len(record.work_order_line_ids.ids)\n\n work_order_line_ids = fields.One2many('work.order.line', 'purchase_line_id', 'Work Orders')\n product_type = fields.Selection(related=\"product_id.type\", string='Product Type')\n description = fields.Text('Description')\n work_order_count = fields.Integer(compute='compute_work_order_count', string='Work Order Count')\n\nPurchaseOrderLine()\n\nclass PurchaseOrder(models.Model):\n _inherit = 'purchase.order'\n\n @api.multi\n def compute_work_order_count(self):\n for record in self:\n work_order_count = 0\n for line in record.order_line:\n work_order_count += len([1 for line2 in line.work_order_line_ids if line2.work_order_id])\n record.work_order_count = work_order_count\n\n work_order_count = fields.Integer(compute='compute_work_order_count', string='Work Order Count')\n\n @api.multi\n def button_confirm(self):\n super(PurchaseOrder, self).button_confirm()\n # Create work orders\n for record in self:\n for purchase_line in record.order_line:\n for work_line in purchase_line.work_order_line_ids:\n vals = {}\n vals['product_id'] = purchase_line.product_id.id\n vals['contractor_id'] = work_line.contractor_id.id\n vals['partner_id'] = purchase_line.partner_id.id\n vals['start_date'] = work_line.start_date\n vals['end_date'] = work_line.end_date\n vals['percentage'] = work_line.percentage\n vals['cost'] = work_line.cost\n vals['purchase_id'] = work_line.purchase_id.id\n vals['purchase_line_id'] = work_line.purchase_line_id.id\n vals['currency_id'] = work_line.purchase_id.currency_id.id\n work_order_id = self.env['work.order'].create(vals)\n work_line.work_order_id = work_order_id.id\n return True\n\n @api.multi\n def action_view_workorder(self):\n action = self.env.ref('internal_purchase_milestones.action_work_order').read()[0]\n action['domain'] = [('purchase_id', 'in', self.ids)]\n return action\n\nPurchaseOrder()","sub_path":"beta-dev1/opt/odoo/odoo/addons/core/internal_purchase_milestones/models/purchase_order.py","file_name":"purchase_order.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"636642785","text":"# -*- coding: utf-8 -*-\n# *****************************************************************************\n#\n# Module authors:\n# Alexander Lenz \n#\n# *****************************************************************************\n\nfrom augutil.parameter import ParameterizedBase, SingleLevelParameterizedBase\nfrom sphinx.ext.autodoc import ClassDocumenter\n\nPARAMETER_SECTION_HEADER = '''\n**Parameter Specifications: %r**\n\n'''\n\nPARAMETER_SECTION_HEADER_SINGLE = '''\n**Parameter Specifications**\n'''\n\nPARAM_TABLE = '''\n.. list-table::\n :header-rows: 1\n \n * - Parameter\n - Type\n - Category\n - Default value\n - Description\n'''\n\nPARAMETER_ROW = '''\n * - {name}\n - {parameter.validator!r}\n - {parameter.category_path!s}\n - {parameter.default!r}\n - {parameter.description!s}\n'''\n\n\nclass ParameterizedDocumenter(ClassDocumenter):\n priority = 20\n\n def document_members(self, all_members=False):\n if not issubclass(self.object, ParameterizedBase):\n ClassDocumenter.document_members(self, all_members)\n return\n\n for entry in self.object.parameter_dicts:\n self._document_parameter_dict(entry)\n\n ClassDocumenter.document_members(self, all_members)\n\n\n\n def _document_parameter_dict(self, dict_name):\n self._add_multiline_string(\n PARAMETER_SECTION_HEADER_SINGLE\n if issubclass(self.object, SingleLevelParameterizedBase)\n else PARAMETER_SECTION_HEADER % dict_name\n )\n\n self._add_multiline_string(PARAM_TABLE)\n\n for name, parameter in getattr(self.object, dict_name).items():\n self._document_parameter(name, parameter)\n\n def _document_parameter(self, name, parameter):\n self._add_multiline_string(PARAMETER_ROW.format(name=name, parameter=parameter))\n\n def _add_multiline_string(self, value):\n for entry in value.splitlines():\n self.add_line(entry, '')\n\n\ndef setup(app):\n app.add_autodocumenter(ParameterizedDocumenter)\n\n return {'parallel_read_safe': True,\n 'version': '0.1.0'}\n","sub_path":"python3_projects/testing/augutil/sphinx/ext/parameterized.py","file_name":"parameterized.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"625558539","text":"from twisted.internet.protocol import Factory, Protocol\nfrom twisted.internet import reactor\nfrom twisted.names import dns\nfrom twisted.names import client, server\n\nfrom oonib.config import config\n\nclass DNSTestHelper(server.DNSServerFactory):\n def __init__(self, authorities = None,\n caches = None, clients = None,\n verbose = 0):\n try:\n host, port = config.helpers.dns.split(':')\n port = int(port)\n # XXX remove this when we have configuration file versioning.\n # https://github.com/TheTorProject/ooni-probe/issues/190\n except:\n host, port = '8.8.8.8', 53\n resolver = client.Resolver(servers=[(host, port)])\n server.DNSServerFactory.__init__(self, authorities = authorities,\n caches = caches, clients = [resolver],\n verbose = verbose)\n def handleQuery(self, message, protocol, address):\n server.DNSServerFactory.handleQuery(self, message, protocol, address)\n","sub_path":"oonib/testhelpers/dns_helpers.py","file_name":"dns_helpers.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"214775383","text":"\ndef run_test_gzip():\n def test_gzip(f):\n from numpy import copy, mean, random\n return copy(f['ndarray'][random.randint(600),:,:])\n\n root = '/Users/femto-13/Downloads/hdf5_test/'\n from h5py import File\n f = File(root+ '2.hdf5','r')\n\n from time import time\n t1 = time()\n\n arr = test_gzip(f)\n t2 = time()\n print('non-zip:',arr.mean(),t2-t1)\n\n f2 = File(root+ '3.hdf5','r')\n\n from time import time\n t1 = time()\n\n arr2 = test_gzip(f2)\n t2 = time()\n print('gzip:',arr2.mean(),arr2.std(),t2-t1)\n\n from numpy import allclose\n print(allclose(arr,arr2))\n","sub_path":"lcp_video/procedures/gzip_test.py","file_name":"gzip_test.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"276402943","text":"import configparser\n\n# My packages\nimport parser\nimport matcher\nfrom ynab_client import YNAB\nfrom amazon_client import Amazon\n\n# Use encrypted secrets config\n\nconfig = configparser.ConfigParser()\nconfig.read(\"secrets/credentials.ini\")\nmyConfig = config['DEFAULT']\notpSecret = myConfig[\"otpSecret\"]\nuserEmail = myConfig[\"userEmail\"]\nuserPassword = myConfig[\"userPassword\"]\nynabToken = myConfig[\"ynabToken\"]\n\ndef main():\n amazon = Amazon()\n orderIDs = amazon.getAllOrderIDs()\n amazonT = []\n for orderID in orderIDs:\n try:\n iPage = amazon.getInvoicePage(orderID)\n afterTaxItems, transactions = parser.parseInvoicePage(iPage)\n if afterTaxItems == None or transactions == None:\n continue\n matched = matcher.matchAmazonTransactions(afterTaxItems, transactions)\n amazonT.append(matched)\n print(afterTaxItems, transactions, matched)\n except Exception as e:\n print(f\"Something went wrong processing order {orderID}: {e}\")\n myYNAB = YNAB(ynabToken)\n ynabT = myYNAB.list_recent_amazon_transactions()\n transactions = matcher.matchAmazonToYNAB(amazonT, ynabT)\n myYNAB.patch_transactions(transactions)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"193378559","text":"import matplotlib.pyplot as plt\nimport torch\nimport os\nimport numpy as np\nimport argparse\nfrom dataloader_shapes_new import Shapes_dataset\nfrom new_arch_models import autoencoder\nfrom utils import *\nimport copy\nimport seaborn as sns\nfrom torchvision.utils import save_image\nimport argparse\n\ncuda_available = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if cuda_available else \"cpu\")\n\n# ------------------------------------------ Set configuration -----------------------------------------------------\nparser = argparse.ArgumentParser()\nparser.add_argument('C', default=3, type=int, nargs='?', help='Number of channels in input image')\nparser.add_argument('H', default=64, type=int, nargs='?', help='Height of the input image')\nparser.add_argument('W', default=64, type=int, nargs='?', help='Width of the input image')\nparser.add_argument('hidden_size', default=3, nargs='?', type=int, help='Size of the latent layer in VAE')\nparser.add_argument('num_epochs', default=25, nargs='?', type=int, help='Number of epochs to train model')\nparser.add_argument('batch_size', default=100, nargs='?', type=int, help='Batch size')\nparser.add_argument('learning_rate', default=9, nargs='?', type=int, help='Learning rate')\nparser.add_argument('l2_penalty', default=0.0, nargs='?', type=float, help='l2_penalty')\nparser.add_argument('validate_during_training', nargs='?', default=False, type=bool, help='If True, Performance on validation set, '\n 'will be computed, after each epoch')\nparser.add_argument('load_from_checkpoint', default=False, nargs='?', type=bool, help='Load pre-trained model for further training')\nargs = parser.parse_args()\n\nC = 3 # Number of input & output channels\nH = 64 # Height of input\nW = 64 # Width of input\ninput_size = H * W # The image size = 64 x 64 = 4096\nhidden_size = args.hidden_size # The number of nodes at the hidden layer\nnum_epochs = 25 # The number of times entire dataset is trained\nbatch_size = 100 # changed from 128 # The size of input data took for one iteration\nlearning_rate = 5e-4 # The speed of convergence\nl2_penalty = 0 # weight decay for optimizer\nvalidate_during_training = False # If True, prediction will be performed on the test set, after each epoch\nload_from_checkpoint = False\n\nBASE_SAVE_DIR = './shapes_new_arch_unsupervised'\nmodel_name = 'unsupervised_teacher_x3_teach4'\nprint(\"model_name: \", model_name)\nMODEL_SAVE_DIR = BASE_SAVE_DIR + '/' + model_name\nprint(\"MODEL_SAVE_DIR: \", MODEL_SAVE_DIR)\n# exit()\nSAMPLES_SAVE_DIR = MODEL_SAVE_DIR + '/reconstructed_samples'\nPLOTS_SAVE_DIR = MODEL_SAVE_DIR + '/plots'\ntraining_log_file = MODEL_SAVE_DIR + '/training_log.txt'\ntry:\n os.remove(training_log_file) # Delete the old log file, if exists\nexcept OSError:\n pass\n\nif not os.path.exists(BASE_SAVE_DIR):\n os.mkdir(BASE_SAVE_DIR)\nif not os.path.exists(MODEL_SAVE_DIR):\n os.mkdir(MODEL_SAVE_DIR)\nif not os.path.exists(SAMPLES_SAVE_DIR):\n os.mkdir(SAMPLES_SAVE_DIR)\nif not os.path.exists(PLOTS_SAVE_DIR):\n os.mkdir(PLOTS_SAVE_DIR)\n\n\n# ------------------------------------------------------------------------------------------------------------------\n\n\ndef train_model(model, train_loader, test_loader=None, validate_during_training=False):\n model.train()\n total_loss_log = []\n kld_loss_log = []\n recon_loss_log = []\n for epoch in range(num_epochs):\n print(\"Starting Epoch: {}\".format(epoch+1))\n epoch_train_loss = []\n epoch_recon_loss = []\n epoch_kld_loss = []\n count = 0\n batch_idx = 0\n for batch_idx, data in enumerate(train_loader):\n img, y = data\n img = img.view(img.size(0), -1)\n img = img.to(device)\n y = y.to(device)\n img = img.view(img.size(0), C, H, W)\n\n x_hat, z_sample, z_mean, z_stddev = model(img)\n\n # loss = criterion(output, img)\n loss, reconstruction_loss, kld_loss = compute_elbo_loss(input=img, x_hat=x_hat, z_mean=z_mean,\n z_stddev=z_stddev)\n\n total_loss_log.append(loss.item())\n kld_loss_log.append(kld_loss.item())\n recon_loss_log.append(reconstruction_loss.item())\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n epoch_train_loss.append(loss.data.item())\n epoch_recon_loss.append(reconstruction_loss.item())\n epoch_kld_loss.append(kld_loss.item())\n\n if batch_idx % 500 == 0:\n batch_log_string = \"Batch: {} \\t Train Loss: {:.8f} \\t Recon. Loss: {:.8f} \\t KLD Loss: {:.8f}\" \\\n .format(batch_idx, loss.data.item(), reconstruction_loss.item(), kld_loss.item())\n print(batch_log_string)\n with open(training_log_file, 'a+') as fp:\n fp.write(batch_log_string + '\\n')\n plt.figure()\n npimg = img[0, :, :, :].cpu().detach().permute(1, 2, 0).numpy()\n plt.subplot(2, 1, 1)\n plt.imshow(npimg, interpolation='nearest', aspect='equal')\n plt.subplot(2, 1, 2)\n npimg_op = x_hat[0, :, :, :].cpu().detach().permute(1, 2, 0).numpy()\n plt.imshow(npimg_op, interpolation='nearest', aspect='equal')\n plt.savefig(SAMPLES_SAVE_DIR + '/epoch_' + str(epoch) + '_batch_' + str(batch_idx) + '_sample_0')\n plt.close()\n\n # if batch_idx == 3:\n # break\n\n # ===================log========================\n print(\"Epoch Average Summary on the training set: \")\n epoch_log_string = 'Epoch [{}/{}], \\t Total loss: {:.7f} \\t Recon. Loss: {:.7f} \\t KLD Loss: {:.7f}'.format(\n epoch + 1,\n num_epochs, np.mean(epoch_train_loss), np.mean(epoch_recon_loss), np.mean(epoch_kld_loss))\n print(epoch_log_string)\n with open(training_log_file, 'a+') as fp:\n fp.write(epoch_log_string + '\\n')\n if validate_during_training:\n print()\n print(\"Average performance on the test set: \")\n avg_total_loss, avg_recon_loss, avg_kld_loss, hidden_mean, hidden_sigma = \\\n test_model(model, test_loader, hidden_size, 'whole', plot_latent_dist=False)\n print('Epoch [{}/{}], \\t Total loss: {:.7f} \\t Recon. Loss: {:.7f} \\t KLD Loss: {:.7f}'\n .format(epoch + 1, num_epochs, avg_total_loss, avg_recon_loss, avg_kld_loss))\n\n torch.save(model.state_dict(), MODEL_SAVE_DIR + '/' + model_name + '.pt')\n torch.save(optimizer.state_dict(), MODEL_SAVE_DIR + '/' + 'optimizer.pt')\n print(\"-\" * 100)\n\n total_loss_log = np.array(total_loss_log)\n recon_loss_log = np.array(recon_loss_log)\n kld_loss_log = np.array(kld_loss_log)\n np.save(file=MODEL_SAVE_DIR + '/train_total_loss_log.npy', arr=total_loss_log)\n np.save(file=MODEL_SAVE_DIR + '/train_recon_loss_log.npy', arr=recon_loss_log)\n np.save(file=MODEL_SAVE_DIR + '/train_kld_loss_log.npy', arr=kld_loss_log)\n\n loss_names = ['ELBO Loss', 'Reconstruction Loss', 'KL-Divergence']\n losses = [total_loss_log, recon_loss_log, kld_loss_log]\n\n return model, losses, loss_names\n\n\ndef get_model(read_checkpoint=False, checkpoint_path=None, optimizer_checkpoint_path=None):\n model = autoencoder(hidden_size=hidden_size, device=device).to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=l2_penalty)\n if read_checkpoint:\n model.load_state_dict(torch.load(checkpoint_path))\n print(\"Model loaded with weights at: \", checkpoint_path)\n if optimizer_checkpoint_path is not None:\n try:\n print(\"optimizer_checkpoint_path: \", optimizer_checkpoint_path)\n optimizer.load_state_dict(torch.load(optimizer_checkpoint_path))\n except:\n print(\"Optimizer checkpoint not found ... Using new instace of optimizer ...\")\n\n return model, optimizer\n\n\ndef get_data_loader(dataset_type, load_all_files=False):\n dataset_type = dataset_type.lower()\n is_test = (dataset_type == 'test')\n if dataset_type == 'whole_mig':\n load_all_files = True\n shuffle_flags = {'train': True, 'validation': False, 'test': False, 'whole_mig': False}\n print(\"shuffle_flags[dataset_type]: \", shuffle_flags[dataset_type])\n # dataset = Shapes_dataset(test=is_test, dir='data/', size=(H, W), all_files=load_all_files)\n # data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size,\n # shuffle=shuffle_flags[dataset_type])\n # print(\"Number of batches per epoch: {}\".format(len(data_loader)))\n dataset = Shapes_dataset(dir='./data', test=True, size=(H, W))\n data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n shuffle=shuffle_flags[dataset_type], num_workers=0,\n pin_memory=True)\n print(\"Number of batches per epoch: {}\".format(len(data_loader)))\n return data_loader\n\n\ndef disentangle_check_image_row(model, data_loader, hidden_size):\n model.eval()\n imgs_save_dir = MODEL_SAVE_DIR+'/disentangle_img_row'\n gif_save_dir = MODEL_SAVE_DIR+'/disentangle_gif'\n if not os.path.exists(imgs_save_dir):\n os.mkdir(imgs_save_dir)\n if not os.path.exists(gif_save_dir):\n os.mkdir(gif_save_dir)\n\n image_shape = (64, 64, 3)\n with torch.no_grad():\n for data in data_loader:\n img, y = data\n img = img.to(device)\n x_hat_batch, z_sample_batch, mean_batch, sigma_batch = model(img)\n log_sigma_batch = torch.log(sigma_batch)\n break\n select_dim = []\n samples_allz = []\n z_mean = mean_batch[0].detach().cpu().numpy()\n z_sigma_sq = np.exp(log_sigma_batch[0].detach().cpu().numpy()) ** 2\n for ind in range(len(z_sigma_sq)):\n if z_sigma_sq[ind] < 0.2:\n select_dim.append(str(ind))\n\n plot_flag = True\n if plot_flag:\n n_z = z_mean.shape[0]\n for target_z_index in range(n_z):\n print(\"Traversing latent unit : \", target_z_index)\n samples = []\n gif_nums = 20\n for ri in range(gif_nums + 1):\n # value = -3.0 + (6.0 / 9.0) * ri\n maxi = 3\n value = -maxi + 2 * maxi / gif_nums * ri\n code2 = copy.deepcopy(z_sample_batch.detach().cpu().numpy())\n for i in range(n_z):\n if i == target_z_index:\n code2[0][i] = value\n else:\n code2[0][i] = code2[0][i]\n reconstr_img = model.decode(torch.from_numpy(code2).cuda())\n rimg = reconstr_img[0, :, :, :].permute(1, 2, 0)\n samples.append(rimg)\n samples_allz.append(samples)\n imgs_comb = np.hstack((img.detach().cpu().numpy() for img in samples))\n image_path = imgs_save_dir+\"/check_z{0}_{1}.png\".format(target_z_index, 0)\n save_image(torch.from_numpy(imgs_comb).permute(2, 0, 1), image_path)\n samples = [samples[i].detach().cpu().numpy() for i in range(len(samples))]\n make_gif(samples, gif_save_dir+\"/\" + 'stu_latent' + \"_z_%s.gif\" % (target_z_index), duration = 2, true_image = False)\n print()\n final_gif = []\n for i in range(gif_nums + 1):\n gif_samples = []\n for j in range(hidden_size):\n gif_samples.append(samples_allz[j][i])\n imgs_comb = np.hstack((img.detach().cpu().numpy() for img in gif_samples))\n final_gif.append(imgs_comb)\n make_gif(final_gif, gif_save_dir+\"/all_z_step{0}.gif\".format(0), true_image=False)\n\n return select_dim\n\n\ndef test_model(model, data_loader, hidden_size, dataset_type, plot_latent_dist):\n model.eval()\n val_total_loss_log = []\n val_kld_loss_log = []\n val_recon_loss_log = []\n hidden_means_log = None\n hidden_sigma_log = None\n total_elbo_loss_value = 0\n total_kld_loss_value = 0\n total_recon_loss_value = 0\n with torch.no_grad():\n for batch_idx, data in enumerate(data_loader):\n img, y = data\n img = img.view(img.size(0), -1)\n img = img.to(device)\n y = y.to(device)\n img = img.view(img.size(0), C, H, W)\n\n x_hat, z_sample, z_mean, z_stddev = model(img)\n if hidden_means_log is None:\n hidden_means_log = z_mean\n hidden_sigma_log = z_stddev\n else:\n hidden_means_log = torch.cat([hidden_means_log, z_mean], dim=0)\n hidden_sigma_log = torch.cat([hidden_sigma_log, z_stddev], dim=0)\n\n # loss = criterion(output, img)\n loss, reconstruction_loss, kld_loss = compute_elbo_loss(input=img, x_hat=x_hat, z_mean=z_mean,\n z_stddev=z_stddev)\n\n val_total_loss_log.append(loss.item())\n val_recon_loss_log.append(reconstruction_loss.item())\n val_kld_loss_log.append(kld_loss.item())\n total_elbo_loss_value += loss.item()\n total_kld_loss_value += kld_loss.item()\n total_recon_loss_value += reconstruction_loss.item()\n\n # if batch_idx == 3:\n # break\n\n np.save(file=MODEL_SAVE_DIR + '/test_total_loss_log.npy', arr=val_total_loss_log)\n np.save(file=MODEL_SAVE_DIR + '/test_recon_loss_log.npy', arr=val_recon_loss_log)\n np.save(file=MODEL_SAVE_DIR + '/test_kld_loss_log.npy', arr=val_kld_loss_log)\n\n hidden_means_log = hidden_means_log.cpu().numpy()\n hidden_sigma_log = hidden_sigma_log.cpu().numpy()\n\n if plot_latent_dist:\n plt.figure(figsize=(15, 12))\n for i in range(hidden_size):\n plt.subplot(hidden_size, 1, i + 1)\n sns.kdeplot(hidden_means_log[:, i], shade=True, color=\"b\")\n plt.ylabel(\"Latent Factor: \" + str(i + 1))\n # plt.tight_layout()\n plt.title(\"KDE plot for the latent distributions - \" + dataset_type)\n plt.savefig(MODEL_SAVE_DIR + '/latent_distribution_' + str(dataset_type), dpi=500)\n plt.close()\n\n # average mean and sigma for each latent factor\n hidden_mean = np.mean(hidden_means_log, axis=0)\n hidden_sigma = np.mean(hidden_sigma_log, axis=0)\n\n print(\"Avg. ELBO loss per sample: {:.5f}\".format(total_elbo_loss_value))\n print(\"Avg. KLD loss per sample: {:.5f}\".format(total_kld_loss_value))\n print(\"Avg. recon loss per sample: {:.5f}\".format(total_recon_loss_value))\n\n return np.mean(val_total_loss_log), np.mean(val_recon_loss_log), np.mean(val_kld_loss_log), hidden_mean, \\\n hidden_sigma\n\n\ndef traverse_latent_space(model, latent_size, n=5, gif_num=20, maxi=5, data_loader=None, dataset_type=None):\n # from unsupervised_disentangling_shapes.utils import imgs2gif\n\n gif_num = 20 # how many pics in the gif\n maxi = 5.0\n\n for iter, batch in enumerate(data_loader):\n # print(batch)\n batch, y = batch\n batch = batch.to(device) # only fine resolution image\n # x_hat, z_sample, mean, self.sigma\n # mu, logvar, x_recon = model(batch)\n x_hat, z_sample, mean, sigma = model(batch)\n break\n\n for k in range(latent_size):\n train_loader = get_data_loader(dataset_type='train', load_all_files=True)\n z = mean.clone()\n z = z[:n, :]\n\n if not os.path.exists(PLOTS_SAVE_DIR + '/z_{}_{}'.format(k, dataset_type)):\n os.mkdir(PLOTS_SAVE_DIR + '/z_{}_{}'.format(k, dataset_type))\n\n for ri in range(gif_num + 1):\n value = -maxi + (2.0 * maxi / gif_num) * ri\n z[:, k] = value\n\n out = model.decode(z)\n singleImage = out.view(n, 3, 64, 64)\n\n singleImage = singleImage.cpu().detach()\n\n save_image(singleImage, PLOTS_SAVE_DIR + '/z_{}_{}/img_{}.png'.format(k, dataset_type, ri), nrow=n)\n # plt.figure()\n # plt.imshow(singleImage.numpy())\n # plt.savefig(PLOTS_SAVE_DIR+'/z_{}/img_{}.png'.format(k, ri))\n # plt.close()\n\n str_path = PLOTS_SAVE_DIR + '/z_{}_{}/'.format(k, dataset_type)\n\n imgs2gif(image_path=str_path, gif_path=PLOTS_SAVE_DIR, gif_name='gif_image_{}_{}'.format(k, dataset_type))\n\n\nif __name__ == \"__main__\":\n choice = int(input(\"Enter Choice: 1] Train \\t 2] Test\"))\n optimizer = None\n if choice == 1:\n if load_from_checkpoint:\n model, optimizer = get_model(read_checkpoint=True, checkpoint_path=MODEL_SAVE_DIR + '/' + model_name + '.pt',\n optimizer_checkpoint_path=MODEL_SAVE_DIR + '/optimizer.pt')\n else:\n model, optimizer = get_model(read_checkpoint=False)\n\n\n train_loader = get_data_loader(dataset_type='train', load_all_files=True)\n # from dsprites_prashnna import Dsprites_dataset\n # train_loader = Dsprites_dataset()\n # exit()\n\n test_loader = None\n if validate_during_training:\n test_loader = get_data_loader(dataset_type='test')\n model, losses, loss_names = train_model(model, train_loader, test_loader=test_loader,\n validate_during_training=validate_during_training)\n plot_training_history(losses, loss_names, PLOTS_SAVE_DIR, batch_size)\n elif choice == 2:\n print(\"passing: \", MODEL_SAVE_DIR + '/' + model_name)\n model, optimizer = get_model(read_checkpoint=True, checkpoint_path=MODEL_SAVE_DIR + '/' + model_name + '.pt')\n\n data_loader = get_data_loader(dataset_type='whole_mig', load_all_files=True)\n\n avg_total_loss, avg_recon_loss, avg_kld_loss, hidden_mean, hidden_sigma = \\\n test_model(model, data_loader, hidden_size, 'whole_mig', plot_latent_dist=True)\n\n select_dim = disentangle_check_image_row(model, data_loader, hidden_size)\n print(\"select_dim: \", select_dim)\n print('-' * 50)\n \n print(\"Computing MIG metric ...\")\n metric, marginal_entropies, cond_entropies = mutual_info_metric_shapes(vae=model, shapes_dataset=None,\n dataset_loader=data_loader, nparams=2,\n K=hidden_size)\n print(\"MIG metric on {} dataset: {:.5f}\".format('Whole', metric))\n \n del data_loader\n print(\"Computing Factor-VAE score ...\")\n dataset = Shapes_dataset(dir='./data', test=True, size=(args.H, args.W))\n fac_metric = factor_metric_dsprite(dataset='3dshapes', dataset_reference=dataset)\n factor_vae_score = fac_metric.evaluate_mean_disentanglement(model)\n print(\"Mean Disentanglement Metric: \" + str(factor_vae_score))\n\n disentangle_layer_sample(model, data_loader, args, PLOTS_SAVE_DIR, step=1)\n\n else:\n print(\"Entered choice: {}. Please enter valid option.\".format(choice))","sub_path":"train_teacher_shapes.py","file_name":"train_teacher_shapes.py","file_ext":"py","file_size_in_byte":18907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"342793951","text":"# -*- coding: utf-8 -*-\n\"\"\"\nRoutines for orientation estimation.\n\nFurther description will follow.\n\n\"\"\"\n\nimport numpy as np\nfrom .mathfuncs import *\n\n__all__ = ['q_conj', 'q_random', 'q_norm', 'q_prod', 'q_mult_L', 'q_mult_R',\n'q_rot', 'axang2quat', 'quat2axang', 'q_correct', 'q2R', 'q2euler', 'rotation', 'rot_seq',\n'R2q', 'dcm2quat', 'cardan2q', 'q2cardan', 'am2q', 'acc2q', 'slerp']\n\ndef q_conj(q):\n \"\"\"\n Return the conjugate of a unit quaternion\n\n A unit quaternion, whose form is :math:`\\\\mathbf{q} = (q_w, q_x, q_y, q_z)`,\n has a conjugate of the form :math:`\\\\mathbf{q}^* = (q_w, -q_x, -q_y, -q_z)`.\n\n Remember, unit quaternions must have a norm equal to 1:\n\n .. math::\n\n \\\\|\\\\mathbf{q}\\\\| = \\\\sqrt{q_w^2+q_x^2+q_y^2+q_z^2} = 1.0\n\n Parameters\n ----------\n q : array\n Unit quaternion or 2D array of Quaternions.\n\n Returns\n -------\n q_conj : array\n Conjugated quaternion or 2D array of conjugated Quaternions.\n\n Examples\n --------\n >>> q = np.array([0.603297, 0.749259, 0.176548, 0.20850 ])\n >>> ahrs.common.orientation.q_conj(q)\n array([0.603297, -0.749259, -0.176548, -0.20850 ])\n >>> Q = np.array([[0.039443, 0.307174, 0.915228, 0.257769],\n [0.085959, 0.708518, 0.039693, 0.699311],\n [0.555887, 0.489330, 0.590976, 0.319829],\n [0.578965, 0.202390, 0.280560, 0.738321],\n [0.848611, 0.442224, 0.112601, 0.267611]])\n >>> ahrs.common.orientation.q_conj(Q)\n array([[ 0.039443, -0.307174, -0.915228, -0.257769],\n [ 0.085959, -0.708518, -0.039693, -0.699311],\n [ 0.555887, -0.489330, -0.590976, -0.319829],\n [ 0.578965, -0.202390, -0.280560, -0.738321],\n [ 0.848611, -0.442224, -0.112601, -0.267611]])\n\n References\n ----------\n .. [1] Dantam, N. (2014) Quaternion Computation. Institute for Robotics\n and Intelligent Machines. Georgia Tech.\n (http://www.neil.dantam.name/note/dantam-quaternion.pdf)\n .. [2] https://en.wikipedia.org/wiki/Quaternion#Conjugation,_the_norm,_and_reciprocal\n\n \"\"\"\n if q.ndim > 2 or q.shape[-1] != 4:\n return None\n # I think that can be done better with some clever use of numpy.ndim\n return np.array([1., -1., -1., -1.])*np.array(q)\n\ndef q_random(size=1):\n \"\"\"\n Generate random quaternions\n\n Parameters\n ----------\n size : int\n Number of Quaternions to generate. Default is 1 quaternion only.\n\n Returns\n -------\n q : array\n M-by-4 array of generated random Quaternions, where M is the requested size.\n\n Examples\n --------\n >>> import ahrs\n >>> q = ahrs.common.orientation.q_random()\n array([0.65733485, 0.29442787, 0.55337745, 0.41832587])\n >>> q = ahrs.common.orientation.q_random(5)\n >>> q\n array([[-0.81543924, -0.06443342, -0.08727487, -0.56858621],\n [ 0.23124879, 0.55068024, -0.59577746, -0.53695855],\n [ 0.74998503, -0.38943692, 0.27506719, 0.45847506],\n [-0.43213176, -0.55350396, -0.54203589, -0.46161954],\n [-0.17662536, 0.55089287, -0.81357401, 0.05846234]])\n >>> np.linalg.norm(q, axis=1) # Each quaternion is, naturally, normalized\n array([1., 1., 1., 1., 1.])\n\n \"\"\"\n assert size > 0 and type(size) is int, \"size must be a positive non-zero integer value.\"\n q = np.random.random((size, 4))-0.5\n q /= np.linalg.norm(q, axis=1)[:, np.newaxis]\n if size == 1:\n return q[0]\n return q\n\n\ndef q_norm(q):\n \"\"\"\n Return the normalized quaternion [WQ1]_ :math:`\\\\mathbf{q}_u`, also known as a\n versor [WV1]_ :\n\n .. math::\n\n \\\\mathbf{q}_u = \\\\frac{1}{\\\\|\\\\mathbf{q}\\\\|} \\\\mathbf{q}\n\n where:\n\n .. math::\n\n \\\\|\\\\mathbf{q}_u\\\\| = 1.0\n\n Parameters\n ----------\n q : array\n Quaternion to normalize\n\n Returns\n -------\n q_u : array\n Normalized Quaternion\n\n Examples\n --------\n >>> import numpy as np\n >>> import ahrs\n >>> q = np.random.random(4)\n >>> q\n array([0.94064704, 0.12645116, 0.80194097, 0.62633894])\n >>> q = ahrs.common.orientation.q_norm(q)\n >>> q\n array([0.67600473, 0.0908753 , 0.57632232, 0.45012429])\n >>> np.linalg.norm(q)\n 1.0\n\n References\n ----------\n .. [WQ1] https://en.wikipedia.org/wiki/Quaternion#Unit_quaternion\n .. [WV1] https://en.wikipedia.org/wiki/Versor\n\n \"\"\"\n if len(q)!=4:\n return None\n return q/np.linalg.norm(q)\n\ndef q_prod(p, q):\n \"\"\"\n Product of two unit quaternions.\n\n Given two unit quaternions :math:`\\\\mathbf{p}=(p_w, \\\\mathbf{p}_v)` and\n :math:`\\\\mathbf{q} = (q_w, \\\\mathbf{q}_v)`, their product is defined [ND]_ [MWQW]_\n as:\n\n .. math::\n\n \\\\begin{eqnarray}\n \\\\mathbf{pq} & = & \\\\big( (q_w p_w - \\\\mathbf{q}_v \\\\cdot \\\\mathbf{p}_v) \\\\; ,\n \\\\; \\\\mathbf{q}_v \\\\times \\\\mathbf{p}_v + q_w \\\\mathbf{p}_v + p_w \\\\mathbf{q}_v \\\\big) \\\\\\\\\n & = &\n \\\\begin{bmatrix}\n p_w & -\\\\mathbf{p}_v^T \\\\\\\\ \\\\mathbf{p}_v & p_w \\\\mathbf{I}_3 + \\\\lfloor \\\\mathbf{p}_v \\\\rfloor\n \\\\end{bmatrix}\n \\\\begin{bmatrix} q_w \\\\\\\\ \\\\mathbf{q}_v \\\\end{bmatrix}\n \\\\\\\\\n & = &\n \\\\begin{bmatrix}\n p_w & -p_x & -p_y & -p_z \\\\\\\\\n p_x & p_w & -p_z & p_y \\\\\\\\\n p_y & p_z & p_w & -p_x \\\\\\\\\n p_z & -p_y & p_x & p_w\n \\\\end{bmatrix}\n \\\\begin{bmatrix} q_w \\\\\\\\ q_x \\\\\\\\ q_y \\\\\\\\ q_z \\\\end{bmatrix}\n \\\\\\\\\n & = &\n \\\\begin{bmatrix}\n p_w q_w - p_x q_x - p_y q_y - p_z q_z \\\\\\\\\n p_x q_w + p_w q_x - p_z q_y + p_y q_z \\\\\\\\\n p_y q_w + p_z q_x + p_w q_y - p_x q_z \\\\\\\\\n p_z q_w - p_y q_x + p_x q_y + p_w q_z\n \\\\end{bmatrix}\n \\\\end{eqnarray}\n\n Parameters\n ----------\n p : array\n First quaternion to multiply\n q : array\n Second quaternion to multiply\n\n Returns\n -------\n pq : array\n Product of both quaternions\n\n Examples\n --------\n >>> import numpy as np\n >>> from ahrs import quaternion\n >>> q = ahrs.common.orientation.q_random(2)\n >>> q[0]\n array([0.55747131, 0.12956903, 0.5736954 , 0.58592763])\n >>> q[1]\n array([0.49753507, 0.50806522, 0.52711628, 0.4652709 ])\n >>> quaternion.q_prod(q[0], q[1])\n array([-0.36348726, 0.38962514, 0.34188103, 0.77407146])\n\n References\n ----------\n .. [ND] Dantam, N. (2014) Quaternion Computation. Institute for Robotics\n and Intelligent Machines. Georgia Tech.\n (http://www.neil.dantam.name/note/dantam-quaternion.pdf)\n .. [MWQM] Mathworks: Quaternion Multiplication.\n https://www.mathworks.com/help/aeroblks/quaternionmultiplication.html\n\n \"\"\"\n pq = np.zeros(4)\n pq[0] = p[0]*q[0] - p[1]*q[1] - p[2]*q[2] - p[3]*q[3]\n pq[1] = p[0]*q[1] + p[1]*q[0] + p[2]*q[3] - p[3]*q[2]\n pq[2] = p[0]*q[2] - p[1]*q[3] + p[2]*q[0] + p[3]*q[1]\n pq[3] = p[0]*q[3] + p[1]*q[2] - p[2]*q[1] + p[3]*q[0]\n return pq\n\ndef q_mult_L(q):\n \"\"\"\n Return the matrix form of a left-sided quaternion multiplication Q.\n\n Parameters\n ----------\n q : array\n Quaternion to multiply from the left side.\n\n Returns\n -------\n Q : array\n Matrix form of the left side quaternion multiplication.\n\n \"\"\"\n q /= np.linalg.norm(q)\n Q = np.array([\n [q[0], -q[1], -q[2], -q[3]],\n [q[1], q[0], -q[3], q[2]],\n [q[2], q[3], q[0], -q[1]],\n [q[3], -q[2], q[1], q[0]]])\n return Q\n\ndef q_mult_R(q):\n \"\"\"\n Return the matrix form of a right-sided quaternion multiplication Q.\n\n Parameters\n ----------\n q : array\n Quaternion to multiply from the right side.\n\n Returns\n -------\n Q : array\n Matrix form of the right side quaternion multiplication.\n\n \"\"\"\n q /= np.linalg.norm(q)\n Q = np.array([\n [q[0], -q[1], -q[2], -q[3]],\n [q[1], q[0], q[3], -q[2]],\n [q[2], -q[3], q[0], q[1]],\n [q[3], q[2], -q[1], q[0]]])\n return Q\n\ndef q_rot(v, q):\n \"\"\"\n Rotate vector :math:`\\\\mathbf{v}` through quaternion :math:`\\\\mathbf{q}`.\n\n It should be equal to calling `q2R(q)@v`.\n\n Parameters\n ----------\n v : array\n Vector to rotate in 3 dimensions.\n q : array\n Quaternion to rotate through.\n\n \"\"\"\n qw, qx, qy, qz = q\n return np.array([\n -2.0*v[0]*(qy**2 + qz**2 - 0.5) + 2.0*v[1]*(qw*qz + qx*qy) - 2.0*v[2]*(qw*qy - qx*qz),\n -2.0*v[0]*(qw*qz - qx*qy) - 2.0*v[1]*(qx**2 + qz**2 - 0.5) + 2.0*v[2]*(qw*qx + qy*qz),\n 2.0*v[0]*(qw*qy + qx*qz) - 2.0*v[1]*(qw*qx - qy*qz) - 2.0*v[2]*(qx**2 + qy**2 - 0.5)])\n\ndef axang2quat(axis, angle, rad=True):\n \"\"\"\n Return Quaternion from given Axis-Angle.\n\n Parameters\n ----------\n axis : array\n Unit vector indicating the direction of an axis of rotation.\n angle : float\n Angle describing the magnitude of rotation about the axis.\n\n Returns\n -------\n q : array\n Unit quaternion\n\n Examples\n --------\n >>> import numpy as np\n >>> from ahrs import quaternion\n >>> q = quaternion.axang2quat([1.0, 0.0, 0.0], np.pi/2.0)\n array([0.70710678 0.70710678 0. 0. ])\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Axis%E2%80%93angle_representation\n .. [2] https://www.mathworks.com/help/robotics/ref/axang2quat.html\n\n \"\"\"\n if axis is None:\n return [1.0, 0.0, 0.0, 0.0]\n if len(axis) != 3:\n return None\n axis /= np.linalg.norm(axis)\n qw = np.cos(angle/2.0) if rad else cosd(angle/2.0)\n s = np.sin(angle/2.0) if rad else sind(angle/2.0)\n q = np.array([qw] + list(s*axis))\n return q/np.linalg.norm(q)\n\ndef quat2axang(q):\n \"\"\"\n Return Axis-Angle representation from a given Quaternion.\n\n Parameters\n ----------\n q : array\n Unit quaternion\n\n Returns\n -------\n axis : array\n Unit vector indicating the direction of an axis of rotation.\n angle : float\n Angle describing the magnitude of rotation about the axis.\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Recovering_the_axis-angle_representation\n\n \"\"\"\n if q is None:\n return [0.0, 0.0, 0.0], 1.0\n if len(q) != 4:\n return None\n # Normalize input quaternion\n q /= np.linalg.norm(q)\n axis = np.asarray(q[1:])\n denom = np.linalg.norm(axis)\n angle = 2.0*np.arctan2(denom, q[0])\n axis = np.array([0.0, 0.0, 0.0]) if angle == 0.0 else axis/denom\n return axis, angle\n\ndef q_correct(q, full=True):\n \"\"\"\n Correct quaternion from flipping to its conjugate.\n\n If a quaternion jumps to its conjugate, it will be corrected and brought\n back to its original position.\n\n Parameters\n ----------\n q : array\n N-by-4 array of quaternions, where N is the number of continuous\n quaternions.\n full : bool\n Indiciates whether the flip is full (entirely negative) or only at the\n conjugate (scalar part).\n\n Returns\n -------\n q : array\n Corrected array of quaternions.\n \"\"\"\n q_v = -q.copy() if full else -1.0*q[:, 1:]\n q_diff = np.diff(q_v, axis=0)\n q_spikes = np.unique(np.where(abs(q_diff) > 1.0)[0]) + 1\n if len(q_spikes) < 1:\n return q\n if len(q_spikes)%2:\n q_spikes = np.concatenate((q_spikes, [len(q_v)]))\n spans = q_spikes.reshape((len(q_spikes)//2, 2))\n q_corrected = q.copy()\n for s in spans:\n if full:\n q_corrected[s[0]:s[1]] = q_v[s[0]:s[1]]\n else:\n q_corrected[s[0]:s[1], 1:] = q_v[s[0]:s[1]]\n return q_corrected\n\ndef q2R(q):\n \"\"\"\n Return a rotation matrix :math:`\\\\mathbf{R} \\\\in SO(3)` from a given unit\n quaternion :math:`\\\\mathbf{q}`.\n\n The given unit quaternion :math:`\\\\mathbf{q}` must have the form\n :math:`\\\\mathbf{q} = (q_w, q_x, q_y, q_z)`, where :math:`\\\\mathbf{q}_v = (q_x, q_y, q_z)`\n is the vector part, and :math:`q_w` is the scalar part.\n\n The resulting rotation matrix :math:`\\\\mathbf{R}` has the form [W1]_ [W2]_:\n\n .. math::\n\n \\\\mathbf{R}(\\\\mathbf{q}) =\n \\\\begin{bmatrix}\n 1 - 2(q_y^2 + q_z^2) & 2(q_xq_y - q_wq_z) & 2(q_xq_z + q_wq_y) \\\\\\\\\n 2(q_xq_y + q_wq_z) & 1 - 2(q_x^2 + q_z^2) & 2(q_yq_z - q_wq_x) \\\\\\\\\n 2(q_xq_z - q_wq_y) & 2(q_wq_x + q_yq_z) & 1 - 2(q_x^2 + q_y^2)\n \\\\end{bmatrix}\n\n The default value is the unit Quaternion :math:`\\\\mathbf{q} = (1, 0, 0, 0)`,\n which produces a :math:`3 \\\\times 3` Identity matrix :math:`\\\\mathbf{I}_3`.\n\n Parameters\n ----------\n q : array\n Unit quaternion\n\n Returns\n -------\n R : array\n 3-by-3 rotation matrix R.\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Rotation_matrix#Quaternion\n .. [2] https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Quaternion-derived_rotation_matrix\n\n \"\"\"\n if q is None:\n return np.identity(3)\n if len(q) != 4:\n return None\n if type(q) is not np.ndarray:\n q = np.asarray(q)\n q /= np.linalg.norm(q)\n return np.array([\n [1.0-2.0*(q[2]**2+q[3]**2), 2.0*(q[1]*q[2]-q[0]*q[3]), 2.0*(q[1]*q[3]+q[0]*q[2])],\n [2.0*(q[1]*q[2]+q[0]*q[3]), 1.0-2.0*(q[1]**2+q[3]**2), 2.0*(q[2]*q[3]-q[0]*q[1])],\n [2.0*(q[1]*q[3]-q[0]*q[2]), 2.0*(q[0]*q[1]+q[2]*q[3]), 1.0-2.0*(q[1]**2+q[2]**2)]])\n\ndef q2euler(q):\n \"\"\"\n Convert from a unit Quaternion to Euler Angles.\n\n Parameters\n ----------\n q : array\n Unit quaternion\n\n Returns\n -------\n angles : array\n Euler Angles around X-, Y- and Z-axis.\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles#Quaternion_to_Euler_Angles_Conversion\n\n \"\"\"\n if sum(np.array([1., 0., 0., 0.])-q) == 0.0:\n return np.zeros(3)\n if len(q) != 4:\n return None\n R_00 = 2.0*q[0]**2 - 1.0 + 2.0*q[1]**2\n R_10 = 2.0*(q[1]*q[2] - q[0]*q[3])\n R_20 = 2.0*(q[1]*q[3] + q[0]*q[2])\n R_21 = 2.0*(q[2]*q[3] - q[0]*q[1])\n R_22 = 2.0*q[0]**2 - 1.0 + 2.0*q[3]**2\n phi = np.arctan2( R_21, R_22)\n theta = -np.arctan( R_20/np.sqrt(1.0-R_20**2))\n psi = np.arctan2( R_10, R_00)\n return np.array([phi, theta, psi])\n\ndef rotation(ax=None, ang=0.0):\n \"\"\"\n Return a :math:`3 \\\\times 3` rotation matrix :math:`\\\\mathbf{R} \\\\in SO(3)`\n\n The rotation matrix :math:`\\\\mathbf{R}` [1]_ is created for the given axis\n with the given angle :math:`\\\\theta`. Where the possible rotation axes are:\n\n .. math::\n\n \\\\mathbf{R}_X(\\\\theta) =\n \\\\begin{bmatrix}\n 1 & 0 & 0 \\\\\\\\\n 0 & \\\\cos \\\\theta & -\\\\sin \\\\theta \\\\\\\\\n 0 & \\\\sin \\\\theta & \\\\cos \\\\theta\n \\\\end{bmatrix}\n\n \\\\mathbf{R}_Y(\\\\theta) =\n \\\\begin{bmatrix}\n \\\\cos \\\\theta & 0 & \\\\sin \\\\theta \\\\\\\\\n 0 & 1 & 0 \\\\\\\\\n -\\\\sin \\\\theta & 0 & \\\\cos \\\\theta\n \\\\end{bmatrix}\n\n \\\\mathbf{R}_Z(\\\\theta) =\n \\\\begin{bmatrix}\n \\\\cos \\\\theta & -\\\\sin \\\\theta & 0 \\\\\\\\\n \\\\sin \\\\theta & \\\\cos \\\\theta & 0 \\\\\\\\\n 0 & 0 & 1\n \\\\end{bmatrix}\n\n where :math:`\\\\theta` is a float number representing the angle of rotation\n in degrees.\n\n Parameters\n ----------\n ax : string or int\n Axis to rotate around. Possible are `X`, `Y` or `Z` (upper- or\n lowercase) or the corresponding axis index 0, 1 or 2. Defaults to 'z'.\n angle : float\n Angle, in degrees, to rotate around. Default is 0.\n\n Returns\n -------\n R : ndarray\n 3-by-3 rotation matrix.\n\n Examples\n --------\n >>> from ahrs import quaternion\n >>> quaternion.rotation()\n array([[1. 0. 0.],\n [0. 1. 0.],\n [0. 0. 1.]])\n >>> ahrs.rotation('z', 30.0)\n array([[ 0.8660254 -0.5 0. ],\n [ 0.5 0.8660254 0. ],\n [ 0. 0. 1. ]])\n >>> # Accepts angle input as string\n ... ahrs.rotation('x', '-30')\n array([[ 1. 0. 0. ],\n [ 0. 0.8660254 0.5 ],\n [ 0. -0.5 0.8660254]])\n\n Handles wrong inputs\n\n >>> ahrs.rotation('false_axis', 'invalid_angle')\n array([[1. 0. 0.],\n [0. 1. 0.],\n [0. 0. 1.]])\n >>> ahrs.rotation(None, None)\n array([[1. 0. 0.],\n [0. 1. 0.],\n [0. 0. 1.]])\n\n References\n ----------\n .. [1] http://mathworld.wolfram.com/RotationMatrix.html\n\n \"\"\"\n # Default values\n valid_axes = list('xyzXYZ')\n I_3 = np.identity(3)\n # Handle input\n if ax is None:\n if ang == 0.0:\n return I_3\n ax = \"z\"\n if type(ax) is int:\n if ax < 0:\n ax = 2 # Negative axes default to 2 (Z-axis)\n ax = valid_axes[ax] if ax < 3 else \"z\"\n try:\n ang = float(ang)\n except:\n return I_3\n # Return 3-by-3 Identity matrix if invalid input\n if ax not in valid_axes:\n return I_3\n # Compute rotation\n ca, sa = cosd(ang), sind(ang)\n if ax.lower() == \"x\":\n return np.array([[1.0, 0.0, 0.0], [0.0, ca, -sa], [0.0, sa, ca]])\n if ax.lower() == \"y\":\n return np.array([[ca, 0.0, sa], [0.0, 1.0, 0.0], [-sa, 0.0, ca]])\n if ax.lower() == \"z\":\n return np.array([[ca, -sa, 0.0], [sa, ca, 0.0], [0.0, 0.0, 1.0]])\n\ndef rot_seq(axes=None, angles=None):\n \"\"\"\n Return a :math:`3 \\\\times 3` rotation matrix :math:`\\\\mathbf{R} \\\\in SO(3)`\n from given set of axes and angles.\n\n The rotation matrix :math:`\\\\mathbf{R}` is created from the given list of\n angles rotating around the given axes order.\n\n Parameters\n ----------\n axes : list of str\n List of rotation axes.\n angles : list of floats\n List of rotation angles.\n\n Returns\n -------\n R : ndarray\n Rotation matrix.\n\n Examples\n --------\n >>> import numpy as np\n >>> import random\n >>> from ahrs import quaternion\n >>> num_rotations = 5\n >>> axis_order = random.choices(\"XYZ\", k=num_rotations)\n >>> axis_order\n ['Z', 'Z', 'X', 'Z', 'Y']\n >>> angles = np.random.uniform(low=-180.0, high=180.0, size=num_rotations)\n >>> angles\n array([-139.24498146, 99.8691407, -171.30712526, -60.57132043,\n 17.4475838 ])\n >>> R = quaternion.rot_seq(axis_order, angles)\n >>> R # R = R_z(-139.24) R_z(99.87) R_x(-171.31) R_z(-60.57) R_y(17.45)\n array([[ 0.85465231 0.3651317 0.36911822]\n [ 0.3025091 -0.92798938 0.21754072]\n [ 0.4219688 -0.07426006 -0.90356393]])\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Rotation_matrix#General_rotations\n .. [2] https://en.wikipedia.org/wiki/Euler_angles\n\n \"\"\"\n accepted_axes = list('xyzXYZ')\n R = np.identity(3)\n if type(axes) is not list:\n axes = list(axes)\n num_rotations = len(axes)\n if num_rotations < 1:\n return R\n valid_given_axes = set(axes).issubset(set(accepted_axes))\n if valid_given_axes:\n # Perform the matrix multiplications\n for i in range(num_rotations-1, -1, -1):\n R = rotation(axes[i], angles[i])@R\n return R\n\ndef R2q(R=None, eta=0.0):\n \"\"\"\n Compute a Quaternion from a rotation matrix\n\n Use Shepperd's voting scheme to compute the corresponding Quaternion q from\n a given rotation matrix R. Optimized by Sarabandi et al.\n\n References\n ----------\n .. [1] Sarabandi, S. et al. (2018) Accurate Computation of Quaternions\n from Rotation Matrices.\n (http://www.iri.upc.edu/files/scidoc/2068-Accurate-Computation-of-Quaternions-from-Rotation-Matrices.pdf)\n\n \"\"\"\n if R is None:\n R = np.identity(3)\n # Get elements of R\n r11, r12, r13 = R[0][0], R[0][1], R[0][2]\n r21, r22, r23 = R[1][0], R[1][1], R[1][2]\n r31, r32, r33 = R[2][0], R[2][1], R[2][2]\n # Compute qw\n d_w = r11+r22+r33\n if d_w > eta:\n q_w = 0.5*np.sqrt(1.0+d_w)\n else:\n nom = (r32-r23)**2+(r13-r31)**2+(r21-r12)**2\n q_w = 0.5*np.sqrt(nom/(3.0-d_w))\n # Compute qx\n d_x = r11-r22-r33\n if d_x > eta:\n q_x = 0.5*np.sqrt(1.0+d_x)\n else:\n nom = (r32-r23)**2+(r12+r21)**2+(r31+r13)**2\n q_x = 0.5*np.sqrt(nom/(3.0-d_x))\n # Compute qy\n d_y = -r11+r22-r33\n if d_y > eta:\n q_y = 0.5*np.sqrt(1.0+d_y)\n else:\n nom = (r13-r31)**2+(r12+r21)**2+(r23+r32)**2\n q_y = 0.5*np.sqrt(nom/(3.0-d_y))\n # Compute qz\n d_z = -r11-r22+r33\n if d_z > eta:\n q_z = 0.5*np.sqrt(1.0+d_z)\n else:\n nom = (r21-r12)**2+(r31+r13)**2+(r23+r32)**2\n q_z = 0.5*np.sqrt(nom/(3.0-d_z))\n # Assign signs\n if q_w >= 0.0:\n q_x *= np.sign(r32-r23)\n q_y *= np.sign(r13-r31)\n q_z *= np.sign(r21-r12)\n else:\n q_w *= -1.0\n q_x *= -np.sign(r32-r23)\n q_y *= -np.sign(r13-r31)\n q_z *= -np.sign(r21-r12)\n # Return values of quaternion\n return np.asarray([q_w, q_x, q_y, q_z])\n\ndef dcm2quat(R):\n \"\"\"\n Return a unit quaternion from a given Direct Cosine Matrix.\n\n Parameters\n ----------\n R : array\n Direct Cosine Matrix.\n\n Returns\n -------\n q : array\n Unit Quaternion.\n\n References\n ----------\n .. [1] F. Landis Markley. Attitude Determination using two Vector\n Measurements.\n\n \"\"\"\n if(R.shape[0] != R.shape[1]):\n raise ValueError('Input is not a square matrix')\n if(R.shape[0] != 3):\n raise ValueError('Input needs to be a 3x3 array or matrix')\n q = np.array([1., 0., 0., 0.])\n q[0] = 0.5*np.sqrt(1.0 + R[0, 0] + R[1, 1] + R[2, 2])\n # qw4 = 4.0*q[0]\n q[1] = (R[1, 2] - R[2, 1]) / q[0]\n q[2] = (R[2, 0] - R[0, 2]) / q[0]\n q[3] = (R[0, 1] - R[1, 0]) / q[0]\n q[1:] /= 4.0\n return q / np.linalg.norm(q)\n\ndef cardan2q(angles, in_deg=False):\n \"\"\"\n Return a Quaternion from given cardan angles with order: roll, pitch, yaw.\n Where roll is the first rotation (about X-axis), pitch is the second\n rotation (about Y-axis), and yaw is the last rotation (about Z-axis.)\n\n Parameters\n ----------\n angles : array\n Cardan angles.\n\n Returns\n -------\n q : array\n Quaternion.\n\n \"\"\"\n if angles.shape[-1] != 3:\n return None\n if in_deg:\n angles *= DEG2RAD\n cr = np.cos(0.5*angles[0])\n sr = np.sin(0.5*angles[0])\n cp = np.cos(0.5*angles[1])\n sp = np.sin(0.5*angles[1])\n cy = np.cos(0.5*angles[2])\n sy = np.sin(0.5*angles[2])\n # To Quaternion\n q = np.array([\n cy*cp*cr + sy*sp*sr,\n cy*cp*sr - sy*sp*cr,\n sy*cp*sr + cy*sp*cr,\n sy*cp*cr - cy*sp*sr])\n q /= np.linalg.norm(q)\n return q\n\ndef q2cardan(q):\n \"\"\"\n Return the cardan angles from a given quaternion, where the angles have the\n order: roll, pitch, yaw.\n\n Roll is the first rotation (about X-axis), pitch is the second\n rotation (about Y-axis), and yaw is the last rotation (about Z-axis.)\n\n Parameters\n ----------\n q : array\n Quaternion.\n\n Returns\n -------\n angles : array\n Cardan angles.\n \"\"\"\n if q.shape[-1] != 4:\n return None\n roll = np.arctan2(2.0*(q[0]*q[1] + q[2]*q[3]), 1.0 - 2.0*(q[1]**2 + q[2]**2))\n pitch = np.arcsin(2.0*(q[0]*q[2] - q[3]*q[1]))\n yaw = np.arctan2(2.0*(q[0]*q[3] + q[1]*q[2]), 1.0 - 2.0*(q[2]**2 + q[3]**2))\n return np.array([roll, pitch, yaw])\n\ndef am2q(a, m):\n \"\"\"\n Estimate pose from given acceleration and/or compass using Michel-method.\n\n Parameters\n ----------\n a : array\n Array of single sample of 3 orthogonal accelerometers.\n m : array\n Array of single sample of 3 orthogonal magnetometers.\n\n Returns\n -------\n pose : array\n Estimated Quaternion\n\n References\n ----------\n .. [1] Michel, T. et al. (2018) Attitude Estimation for Indoor\n Navigation and Augmented Reality with Smartphones.\n (http://tyrex.inria.fr/mobile/benchmarks-attitude/)\n (https://hal.inria.fr/hal-01650142v2/document)\n .. [2] Janota, A. Improving the Precision and Speed of Euler Angles\n Computation from Low-Cost Rotation Sensor Data.\n (https://www.mdpi.com/1424-8220/15/3/7016/pdf)\n\n \"\"\"\n if m is None:\n m = np.array([0.0, 0.0, 0.0])\n if type(a) != np.ndarray:\n a = np.array(a)\n if type(m) != np.ndarray:\n m = np.array(m)\n H = np.cross(m, a)\n H /= np.linalg.norm(H)\n a /= np.linalg.norm(a)\n M = np.cross(a, H)\n # ENU\n R = np.array([[H[0], M[0], a[0]],\n [H[1], M[1], a[1]],\n [H[2], M[2], a[2]]])\n q = dcm2quat(R)\n return q\n\ndef acc2q(a, return_euler=False):\n \"\"\"\n Estimate pose from given acceleration and/or compass.\n\n Parameters\n ----------\n a : array\n A sample of 3 orthogonal accelerometers.\n m : array\n A sample of 3 orthogonal magnetometers.\n return_euler : bool\n Return pose as Euler angles\n\n Returns\n -------\n pose : array\n Estimated Quaternion or Euler Angles\n\n References\n ----------\n .. [1] Michel, T. et al. (2018) Attitude Estimation for Indoor\n Navigation and Augmented Reality with Smartphones.\n (http://tyrex.inria.fr/mobile/benchmarks-attitude/)\n (https://hal.inria.fr/hal-01650142v2/document)\n .. [2] Zhang, H. et al (2015) Axis-Exchanged Compensation and Gait\n Parameters Analysis for High Accuracy Indoor Pedestrian Dead Reckoning.\n (https://www.researchgate.net/publication/282535868_Axis-Exchanged_Compensation_and_Gait_Parameters_Analysis_for_High_Accuracy_Indoor_Pedestrian_Dead_Reckoning)\n .. [3] Yun, X. et al. (2008) A Simplified Quaternion-Based Algorithm for\n Orientation Estimation From Earth Gravity and Magnetic Field Measurements.\n (https://apps.dtic.mil/dtic/tr/fulltext/u2/a601113.pdf)\n .. [4] Jung, D. et al. Inertial Attitude and Position Reference System\n Development for a Small UAV.\n (https://pdfs.semanticscholar.org/fb62/903d8e6c051c8f4780c79b6b18fbd02a0ff9.pdf)\n .. [5] Bleything, T. How to convert Magnetometer data into Compass Heading.\n (https://blog.digilentinc.com/how-to-convert-magnetometer-data-into-compass-heading/)\n .. [6] RT IMU Library. (https://github.com/RTIMULib/RTIMULib2/blob/master/RTIMULib/RTFusion.cpp)\n .. [7] Janota, A. Improving the Precision and Speed of Euler Angles\n Computation from Low-Cost Rotation Sensor Data. (https://www.mdpi.com/1424-8220/15/3/7016/pdf)\n .. [8] Trimpe, S. Accelerometer -based Tilt Estimation of a Rigid Body\n with only Rotational Degrees of Freedom. 2010.\n (http://www.idsc.ethz.ch/content/dam/ethz/special-interest/mavt/dynamic-systems-n-control/idsc-dam/Research_DAndrea/Balancing%20Cube/ICRA10_1597_web.pdf)\n\n \"\"\"\n qw, qx, qy, qz = 1.0, 0.0, 0.0, 0.0\n ex, ey, ez = 0.0, 0.0, 0.0\n if len(a) == 3:\n ax, ay, az = a\n # Normalize accelerometer measurements\n a_norm = np.linalg.norm(a)\n ax /= a_norm\n ay /= a_norm\n az /= a_norm\n # Euler Angles from Gravity vector\n ex = np.arctan2(ay, az) - np.pi\n if ex*RAD2DEG < -180.0:\n ex += 2.0*np.pi\n ey = np.arctan2(-ax, np.sqrt(ay*ay + az*az))\n ez = 0.0\n if return_euler:\n return np.array([ex, ey, ez])*RAD2DEG\n # Euler to Quaternion\n cx2 = np.cos(ex/2.0)\n sx2 = np.sin(ex/2.0)\n cy2 = np.cos(ey/2.0)\n sy2 = np.sin(ey/2.0)\n qrw = cx2*cy2\n qrx = sx2*cy2\n qry = cx2*sy2\n qrz = -sx2*sy2\n # Normalize reference Quaternion\n q_norm = np.linalg.norm([qrw, qrx, qry, qrz])\n qw = qrw/q_norm\n qx = qrx/q_norm\n qy = qry/q_norm\n qz = qrz/q_norm\n return np.array([qw, qx, qy, qz])\n\ndef am2angles(a, m, in_deg=False):\n \"\"\"\n Estimate pose from given acceleration and compass.\n\n Parameters\n ----------\n a : array\n N-by-3 array with N samples of 3 orthogonal accelerometers.\n m : array\n N-by-3 array with N samples of 3 orthogonal magnetometers.\n\n Returns\n -------\n pose : array\n Estimated Direct Cosine Matrix\n\n References\n ----------\n .. [DT0058] A. Vitali. Computing tilt measurement and tilt-compensated\n e-compass. ST Technical Document DT0058. October 2018.\n (https://www.st.com/resource/en/design_tip/dm00269987.pdf)\n \"\"\"\n # Normalization of 2D arrays\n a /= np.linalg.norm(a, axis=1)[:, None]\n m /= np.linalg.norm(m, axis=1)[:, None]\n angles = np.zeros((len(a), 3)) # Allocation of angles array\n # Estimate tilt angles\n angles[:, 0] = np.arctan2(a[:, 1], a[:, 2])\n angles[:, 1] = np.arctan2(-a[:, 0], np.sqrt(a[:, 1]**2 + a[:, 2]**2))\n # Estimate heading angle\n my2 = m[:, 2]*np.sin(angles[:, 0]) - m[:, 1]*np.cos(angles[:, 0])\n mz2 = m[:, 1]*np.sin(angles[:, 0]) + m[:, 2]*np.cos(angles[:, 0])\n mx3 = m[:, 0]*np.cos(angles[:, 1]) + mz2*np.sin(angles[:, 1])\n angles[:, 2] = np.arctan2(my2, mx3)\n # Return in degrees or in radians\n if in_deg:\n return angles*RAD2DEG\n return angles\n\ndef triad(a, m, V1=None, V2=None, **kw):\n \"\"\"\n Estimate pose from given acceleration and compass using TRIAD method.\n\n Parameters\n ----------\n a : array\n First 3-by-1 observation vector in body frame. Usually is normalized\n acceleration vector a = [ax ay az]^T\n m : array\n Second 3-by-1 observation vector in body frame. Usually is normalized\n magnetic field vector m = [mx my mz]^T\n V1 : array\n 3-by-1 Reference vector 1. Defaults to gravity in navigation frame\n g = [0 0 1]^T\n V2 : array\n 3-by-1 Reference vector 2. Defaults to magnetic field in navigation\n frame m = [cos(dip) 0 sin(dip)]^T, where dip is the magnetic dip in\n local latitude\n\n Extra Parameters\n ----------------\n dip : float\n Magnetic dip in local latitude. Defaults to 66.47° corresponding to\n Germany.\n\n Returns\n -------\n pose : array\n Estimated Direct Cosine Matrix\n\n References\n ----------\n .. [TRIAD] M.D. Shuster et al. Three-Axis Attitude Determination from\n Vector Observations. Journal of Guidance and Control. Volume 4. Number 1.\n 1981. Page 70 (http://www.malcolmdshuster.com/Pub_1981a_J_TRIAD-QUEST_scan.pdf)\n .. [Shuster] M.D. Shuster. Deterministic Three-Axis Attitude Determination.\n The Journal of the Astronautical Sciences. Vol 52. Number 3. September\n 2004. Pages 405-419 (http://www.malcolmdshuster.com/Pub_2004c_J_dirangs_AAS.pdf)\n .. [WikiTRIAD] Triad method in Wikipedia. (https://en.wikipedia.org/wiki/Triad_method)\n .. [Garcia] H. Garcia de Marina et al. UAV attitude estimation using\n Unscented Kalman Filter and TRIAD. IEE 2016. (https://arxiv.org/pdf/1609.07436.pdf)\n .. [CHall4] Chris Hall. Spacecraft Attitude Dynamics and Control.\n Chapter 4: Attitude Determination. 2003.\n (http://www.dept.aoe.vt.edu/~cdhall/courses/aoe4140/attde.pdf)\n .. [iitbTRIAD] IIT Bombay Student Satellite Team. Triad Algorithm.\n (https://www.aero.iitb.ac.in/satelliteWiki/index.php/Triad_Algorithm)\n .. [MarkleyTRIAD] F.L. Makley et al. Fundamentals of Spacecraft Attitude\n Determination and Control. 2014. Pages 184-186.\n \"\"\"\n if V1 is None:\n V1 = np.array([[0.], [0.], [1.]])\n if V2 is None:\n dip = kw.get('dip', 66.47)\n V2 = np.array([[cosd(dip)], [0.0], [sind(dip)]])\n # Normalized Observations\n W1 = np.array(a / np.linalg.norm(a)).reshape((3, 1))\n W2 = np.array(m / np.linalg.norm(m)).reshape((3, 1))\n # First Triad\n s2 = np.cross(W1, W2, axis=0) / np.linalg.norm(np.cross(W1, W2, axis=0))\n s3 = np.cross(W1, np.cross(W1, W2, axis=0), axis=0) / np.linalg.norm(np.cross(W1, W2, axis=0))\n # Second Triad\n r2 = np.cross(V1, V2, axis=0) / np.linalg.norm(np.cross(V1, V2, axis=0))\n r3 = np.cross(V1, np.cross(V1, V2, axis=0), axis=0) / np.linalg.norm(np.cross(V1, V2, axis=0))\n # Solve TRIAD\n Mobs = np.hstack((W1, s2, s3))\n Mref = np.hstack((V1, r2, r3))\n # return Mref@Mobs.T\n return Mobs@Mref.T\n\ndef quest(fb, mb, fn, mn, wf=1.0, wm=1.0):\n \"\"\"\n Estimate pose from given acceleration and compass using TRIAD method.\n\n Parameters\n ----------\n fb : array\n 3-by-1 Observation vector 1 in body frame. Usually is gravity vector\n g = [0 0 1]^T\n mb : array\n 3-by-1 Observation vector 2 in body frame. Usually is magnetic field\n m = [cos(dip) 0 sin(dip)]^T, where dip is the magnetic dip in local\n latitude\n fn : array\n 3-by-1 Reference vector 1. Defaults to gravity in navigation frame\n mn : array\n 3-by-1 Reference vector 2. Defaults to magnetic field in navigation frame\n\n Extra Parameters\n ----------------\n dip : float\n Magnetic dip in local latitude. Defaults to 66.47° corresponding to\n Germany.\n\n Returns\n -------\n pose : array\n Estimated Direct Cosine Matrix\n\n References\n ----------\n .. [TRIAD] M.D. Shuster et al. Three-Axis Attitude Determination from\n Vector Observations. Journal of Guidance and Control. Volume 4. Number 1.\n 1981. Page 70 (http://www.malcolmdshuster.com/Pub_1981a_J_TRIAD-QUEST_scan.pdf)\n .. [Shuster] M.D. Shuster. Deterministic Three-Axis Attitude Determination.\n The Journal of the Astronautical Sciences. Vol 52. Number 3. September\n 2004. Pages 405-419 (http://www.malcolmdshuster.com/Pub_2004c_J_dirangs_AAS.pdf)\n .. [WikiTRIAD] Triad method in Wikipedia. (https://en.wikipedia.org/wiki/Triad_method)\n .. [Garcia] H. Garcia de Marina et al. UAV attitude estimation using\n Unscented Kalman Filter and TRIAD. IEE 2016. (https://arxiv.org/pdf/1609.07436.pdf)\n .. [CHall4] Chris Hall. Spacecraft Attitude Dynamics and Control.\n Chapter 4: Attitude Determination. 2003.\n (http://www.dept.aoe.vt.edu/~cdhall/courses/aoe4140/attde.pdf)\n .. [MarkleyQUEST] F.L. Makley et al. Fundamentals of Spacecraft Attitude\n Determination and Control. 2014. Pages 189-191.\n \"\"\"\n fb = np.array(fb / np.linalg.norm(fb)).reshape((3, 1))\n mb = np.array(mb / np.linalg.norm(mb)).reshape((3, 1))\n fn = np.array(fn / np.linalg.norm(fn)).reshape((3, 1))\n mn = np.array(mn / np.linalg.norm(mn)).reshape((3, 1))\n # K matrix\n B = wf*(fb@fn.T) + wm*(mb@mn.T)\n K11 = B + B.T - np.eye(3)*np.trace(B)\n K22 = np.trace(B)\n K12 = wf*np.cross(fb, fn, axis=0) + wm*np.cross(mb, mn, axis=0)\n K21 = K12.T\n K = np.vstack((np.hstack((K11, K12)), np.hstack((K21, [[K22]]))))\n # Find eigenvalues and eigenvectors\n eigvals, eigvecs = np.linalg.eig(K)\n # Look for the largest eigenvalue\n index = np.argmax(eigvals)\n q = eigvecs[:, index]\n return q/np.linalg.norm(q)\n\ndef slerp(q0, q1, t_array, **kwgars):\n \"\"\"\n Spherical Linear Interpolation between quaternions.\n\n Return a valid quaternion rotation at a specified distance along the minor\n arc of a great circle passing through any two existing quaternion endpoints\n lying on the unit radius hypersphere.\n\n Based on the method detailed in [Wiki_SLERP]_\n\n Parameters\n ----------\n q0 : array\n First endpoint quaternion.\n q1 : array\n Second endpoint quaternion.\n t_array : array\n Array of times to interpolate to.\n threshold : float\n Threshold to closeness of interpolation.\n\n Returns\n -------\n q : array\n New quaternion representing the interpolated rotation.\n\n References\n ----------\n .. [Wiki_SLERP] https://en.wikipedia.org/wiki/Slerp\n\n \"\"\"\n threshold = kwgars.get('threshold', 0.9995)\n t_array = np.array(t_array)\n v0 = np.array(v0)\n v1 = np.array(v1)\n qdot = np.sum(v0*v1)\n # Ensure SLERP takes the shortest path\n if qdot < 0.0:\n v1 *= -1.0\n qdot *= -1.0\n # Interpolate linearly\n if qdot > threshold:\n result = v0[np.newaxis, :] + t_array[:, np.newaxis]*(v1 - v0)[np.newaxis, :]\n return (result.T / np.linalg.norm(result, axis=1)).T\n # Angle between vectors\n theta_0 = np.arccos(qdot)\n sin_theta_0 = np.sin(theta_0)\n theta = theta_0*t_array\n sin_theta = np.sin(theta)\n s0 = np.cos(theta) - qdot*sin_theta/sin_theta_0\n s1 = sin_theta/sin_theta_0\n return s0[:,np.newaxis]*v0[np.newaxis,:] + s1[:,np.newaxis]*v1[np.newaxis,:]\n","sub_path":"ahrs/common/orientation.py","file_name":"orientation.py","file_ext":"py","file_size_in_byte":36577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"286696185","text":"#!/usr/bin/python3\n\"\"\"\nmodule for amenity views\n\"\"\"\nfrom api.v1.views import app_views\nfrom flask import jsonify, request, abort\nfrom models import storage\nfrom models.amenity import Amenity\n\n\n@app_views.route('/amenities', methods=['GET'], strict_slashes=False)\ndef amenities():\n \"\"\" Retrieves the list of all Amenity objects \"\"\"\n amenities = storage.all(\"Amenity\")\n result = []\n for amenity in amenities.values():\n result.append(amenity.to_dict())\n return jsonify(result)\n\n\n@app_views.route('/amenities/', methods=['GET'],\n strict_slashes=False)\ndef get_amenity(amenity_id):\n \"\"\" Retrieves an Amenity object \"\"\"\n amenities = storage.all(\"Amenity\")\n for key in amenities.keys():\n if key.split('.')[-1] == amenity_id:\n return jsonify(amenities.get(key).to_dict())\n abort(404)\n\n\n@app_views.route('/amenities/',\n methods=['DELETE'], strict_slashes=False)\ndef delete_amenity(amenity_id):\n \"\"\" Deletes an Amenity object \"\"\"\n amenities = storage.all(\"Amenity\")\n for key in amenities.keys():\n if key.split('.')[-1] == amenity_id:\n storage.delete(amenities.get(key))\n storage.save()\n return jsonify({}), 200\n abort(404)\n\n\n@app_views.route('/amenities', methods=['POST'], strict_slashes=False)\ndef post_amenity():\n \"\"\" Creates an Amenity \"\"\"\n dic = request.get_json()\n if not dic:\n abort(400, \"Not a JSON\")\n if not ('name' in dic.keys()):\n abort(400, \"Missing name\")\n amenity = Amenity(**dic)\n amenity.save()\n return jsonify(amenity.to_dict()), 201\n\n\n@app_views.route('/amenities/', methods=['PUT'],\n strict_slashes=False)\ndef put_amenity(amenity_id):\n \"\"\" Updates an Amenity object \"\"\"\n amenities = storage.all(\"Amenity\")\n amenity = None\n for key in amenities.keys():\n if key.split('.')[-1] == amenity_id:\n amenity = amenities.get(key)\n if not amenity:\n abort(404)\n new_dict = request.get_json()\n if not new_dict:\n abort(400, \"Not a JSON\")\n for key, value in new_dict.items():\n if key in ('id', 'created_at', 'updated_at'):\n continue\n else:\n setattr(amenity, key, value)\n amenity.save()\n return jsonify(amenity.to_dict()), 200\n","sub_path":"api/v1/views/amenities.py","file_name":"amenities.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"85941547","text":"import pymodbus\nfrom pymodbus.client.sync import ModbusTcpClient as ModbusClient\n\nclient=ModbusClient('192.168.4.29')\nclient.connect()\n\n\n#read inputs:\ndata = client.read_input_registers(6,4)\nch0 = (data.registers[0]/20000.0)*10\nch1 = (data.registers[1]/20000.0)*10\nch2 = (data.registers[2]/20000.0)*10\nch3 = (data.registers[3]/20000.0)*10\n\nprint(\"Channel 0 Input Voltage\",ch0)\nprint(\"Channel 1 Input Voltage\",ch1)\nprint(\"Channel 2 Input Voltage\",ch2)\nprint(\"Channel 3 Input Voltage\",ch3)\n","sub_path":"acromag_952.py","file_name":"acromag_952.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"550231919","text":"import csv\r\ndef cooccurrence (user_tag_file, group_tag_file,use_user_tag, use_group_tag,cooccurrence_matrix_file):\r\n list = {}\r\n if (use_user_tag==1):\r\n with open(group_tag_file, 'r') as tag_id_f:\r\n myreader = csv.reader(tag_id_f, delimiter=',')\r\n for row in myreader:\r\n if (row[0] in list):\r\n list[row[0]].append(row[1])\r\n else:\r\n list[row[0]] = [row[1]]\r\n\r\n if (use_group_tag==1):\r\n with open(group_tag_file, 'r') as tag_id_f:\r\n myreader = csv.reader(tag_id_f, delimiter=',')\r\n for row in myreader:\r\n if (row[0] in list):\r\n list[row[0]].append(row[1])\r\n else:\r\n list[row[0]] = [row[1]]\r\n\r\n print(list)\r\n matix = {}\r\n all_occurances = {}\r\n for word_set_key in list:\r\n word_set_value = list[word_set_key]\r\n word_set_len = len(word_set_value)\r\n for index1 in range (0 , word_set_len):\r\n for index2 in range (0 , word_set_len):\r\n if (index1 == index2):\r\n continue\r\n if (word_set_value[index1],word_set_value[index2]) in matix:\r\n matix[word_set_value[index1],word_set_value[index2]] = matix[word_set_value[index1],word_set_value[index2]] +1\r\n #all_occurances[word_set_value[index1]] =all_occurances[word_set_value[index1]]+ 1\r\n else:\r\n matix[word_set_value[index1],word_set_value[index2]] = 1\r\n #all_occurances[word_set_value[index1]] = 1\r\n\r\n if word_set_value[index1] in all_occurances:\r\n all_occurances[word_set_value[index1]] =all_occurances[word_set_value[index1]]+ 1\r\n else:\r\n all_occurances[word_set_value[index1]] = 1\r\n\r\n\r\n\r\n\r\n #print(matix)\r\n #print(all_occurances)\r\n\r\n cooccurrence_matrix={}\r\n for word_pair_key in matix:\r\n #print(word_pair_key[0])\r\n cooccurrence_matrix[str(word_pair_key[0]),str(word_pair_key[1])] = matix[str(word_pair_key[0]),str(word_pair_key[1])] / all_occurances[str(word_pair_key[0])]\r\n cooccurrence_matrix[str(word_pair_key[0]),str(word_pair_key[0])]=0#1\r\n cooccurrence_matrix[str(word_pair_key[1]),str(word_pair_key[1])]=0#1\r\n\r\n\r\n csvFile = open(cooccurrence_matrix_file, 'w')\r\n for key, value in cooccurrence_matrix.items():\r\n csvFile.write(key[0]+','+key[1]+','+str( value)+'\\n')\r\n\r\n mydict = {}\r\n csvFile = open(cooccurrence_matrix_file, 'r')\r\n for line in csvFile:\r\n splitted = line.split(',')\r\n mydict[(splitted[0],splitted[1])] = float(splitted[2].replace('\\n',''))\r\n\r\n\r\n #with open(cooccurrence_matrix_file, newline='') as csvfile:\r\n # spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\r\n # for row in spamreader:\r\n # print(', '.join(row))\r\n #with open(, 'r') as csvfile:\r\n # CM_reader = csv.reader(csvfile, delimiter='', quotechar='|')\r\n # for row in CM_reader:\r\n # if len(row)>0:\r\n # print(row)\r\n\r\n\r\n\r\n\r\n #print(cooccurrence_matrix)\r\n #print(mydict)\r\n\r\n#Folder_address = 'E:/0GWU/Internship/Group10/Meetup Dataset/CSV_REX/'\r\n#Folder_address = 'E:/0GWU/Internship/Group10/Meetup Dataset/fake_dataset/'\r\n#cooccurrence(Folder_address+'user_tag.csv',Folder_address+'group_tag.csv.EDITED.csv',0,1,Folder_address+'CM.csv')\r\n\r\n","sub_path":"Codes - 5-20-2016/cooccurrence.py","file_name":"cooccurrence.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"543462541","text":"#!/usr/bin/env python3\r\n\r\n# Copyright 2017 Juan Luis Álvarez Martínez\r\n# \r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n# \r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n# \r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n# Convert the dataset CSV files into usable C headers.\r\n\r\nimport sys\r\n\r\nUSAGE = '''\\\r\nUsage: csv-to-header.py VAR_NAME INPUT_FILE OUTPUT_HEADER \\\r\n'''\r\n\r\nHEADER_BOILERPLATE = '''/*\r\n ------------------------------------------\r\n --- Autogenerated file, do not modify! ---\r\n ------------------------------------------\r\n*/\r\n'''\r\n\r\n# ------------\r\n# --- Main ---\r\n# ------------\r\nif __name__ == '__main__':\r\n if len(sys.argv) != 4:\r\n print(USAGE)\r\n exit()\r\n \r\n var_name = sys.argv[1]\r\n in_path = sys.argv[2]\r\n out_path = sys.argv[3]\r\n \r\n in_text = ''\r\n \r\n with open(in_path, 'rb') as f:\r\n in_text = str(f.read(), 'utf-8')\r\n f.close()\r\n \r\n #convert \\r\\n to \\n\r\n in_text = in_text.replace('\\r\\n', '\\n')\r\n \r\n #Split by line\r\n in_lines = in_text.split('\\n')\r\n \r\n #process lines\r\n out_lines = []\r\n for line in in_lines:\r\n if line.startswith('#'):\r\n #Line is a comment\r\n out_lines.append(\" /* %s */\" % line[1:])\r\n elif len(line) == 0:\r\n pass\r\n else:\r\n #Lines are numbers\r\n #Trim trailing comma, add leading 0x\r\n line = ' 0x' + line[:-1]\r\n \r\n #Replace all commas with ', 0x'\r\n line = line.replace(',', ', 0x')\r\n \r\n #Re-add the trailing comma\r\n out_lines.append(line + ',')\r\n \r\n #Write out header, lines, tail\r\n with open(out_path, 'wb') as f:\r\n f.write(bytes(HEADER_BOILERPLATE, 'utf-8'))\r\n f.write( bytes('#ifndef __' + var_name.upper() + '\\n', 'utf-8') )\r\n f.write( bytes('#define __' + var_name.upper() + '\\n', 'utf-8') )\r\n f.write( bytes('#include ' + '\\n', 'utf-8') )\r\n f.write( bytes('static const uint16_t ' + var_name + '[] = {' + '\\n', 'utf-8') )\r\n \r\n for line in out_lines:\r\n f.write(bytes(line + '\\n', 'utf-8'))\r\n \r\n f.write( bytes('};\\n', 'utf-8') )\r\n f.write( bytes('#endif\\n', 'utf-8') )\r\n f.close()\r\n ","sub_path":"scripts/csv-to-header.py","file_name":"csv-to-header.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"324993039","text":"\r\nBasicMaterials = ['ALB', 'APD', 'CE', 'CF', 'CTVA', 'DD', 'ECL', 'EMN', 'FCX',\r\n 'FMC', 'IFF', 'LIN', 'MLM', 'MOS', 'NEM', 'PPG', 'SHW', 'DOW',\r\n 'LYB', 'NUE', 'VMC']\r\n\r\nCommunicationServices = ['ATVI', 'CHTR', 'CMCSA', 'CTL', 'DIS', 'DISCA', 'DISCK',\r\n 'EA', 'FB', 'FOX', 'GOOG', 'GOOGL', 'NFLX', 'T', 'TMUS',\r\n 'TTWO', 'TWTR', 'DISH', 'FOXA', 'IPG', 'LYV', 'NWS', \r\n 'NWSA', 'OMC', 'VZ', 'VIAC']\r\n\r\n\r\nConsumerCyclical = ['AAP', 'AMCR', 'AMZN', 'APTV', 'AZO', 'BBY', 'BLL', 'BKNG',\r\n 'BWA', 'CCL', 'CMG', 'CPRI', 'DHI', 'EBAY', 'EXPE', 'F', \r\n 'FBHS', 'GM', 'GPC', 'GPS', 'HAS', 'HBI', 'HD', 'HLT', \r\n 'HOG', 'HRB', 'LEG', 'LEN', 'LKQ', 'JWN', 'MCD', 'MGM', \r\n 'MHK', 'NCLH', 'NKE', 'PVH', 'ROL', 'ROST', 'SBUX', 'SEE',\r\n 'TIF', 'TJX', 'TPR', 'TSCO', 'UA', 'UAA', 'WRK', 'WYNN', \r\n 'YUM', 'DRI', 'IP', 'KMX', 'KSS', 'LB', 'LOW', 'LVS', 'M',\r\n 'MAR', 'NVR', 'ORLY', 'RCL', 'WHR', 'VFC', 'ULTA', 'RL', \r\n 'PHM', 'PKG']\r\n\r\n\r\nConsumerDefensive = ['ADM', 'CAG', 'CHD', 'CL', 'CLX', 'COST', 'COTY', 'CPB', \r\n 'DG', 'EL', 'HRL', 'HSY', 'K', 'KHC', 'MDLZ', 'MKC', \r\n 'MNST', 'MO', 'PM', 'STZ', 'SYY', 'TAP', 'TGT', 'TSN', \r\n 'WMT', 'GIS', 'KO', 'KR', 'KMB', 'LW', 'NWL', 'SJM', \r\n 'PEP', 'PG']\r\n\r\n\r\nEnergy = ['APA', 'BKR', 'COG', 'COP', 'CVX', 'CXO', 'DVN', 'FANG', 'HAL', \r\n 'HES', 'HFC', 'HP', 'MPC', 'MRO', 'NBL', 'NOV', 'PSX', 'XOM', \r\n 'FTI', 'EOG', 'KMI', 'OXY', 'PXD', 'WMB', 'VLO', 'SLB']\r\n\r\nFinancialServices = ['ADS', 'AJG', 'AON', 'BEN', 'BLK', 'CBOE', 'CME',\r\n 'ICE', 'MCO', 'MMC', 'MSCI', 'NDAQ', \r\n 'TROW', 'WU', 'IVZ', 'MA', 'PYPL', 'SPGI',\r\n 'V', 'WLTW']\r\n\r\nHealthcare = ['A', 'ABBV', 'ABC', 'ABMD', 'ABT', 'AGN', 'ALGN', 'ALXN', \r\n 'AMGN', 'ANTM', 'BAX', 'BDX', 'BMY', 'BSX', 'BIIB', 'CAH',\r\n 'CERN', 'CI', 'CNC', 'COO', 'CVS', 'DGX', 'DHR', 'HCA', 'HOLX',\r\n 'HSIC', 'HUM', 'IDXX', 'ILMN', 'INCY', 'LH', 'LLY', 'JNJ', 'MCK',\r\n 'MDT', 'MRK', 'MTD', 'MYL', 'PRGO', 'SYK', 'TFX', 'TMO', 'XRAY',\r\n 'ZBH', 'ZTS', 'DVA', 'GILD', 'EW', 'IQV', 'ISRG', 'STE', 'VAR', \r\n 'VRTX', 'WAT', 'WBA', 'UHS', 'UNH', 'RMD', 'PFE', 'PKI']\r\n\r\nIndustrials = ['IR', 'AAL', 'ADP', 'ALK', 'ALLE', 'AME', 'AOS', 'ARNC', 'AVY',\r\n 'BA', 'CAT', 'CHRW', 'CMI', 'CPRT', 'CSX', 'CTAS', 'DAL', 'DE',\r\n 'EFX', 'EXPD', 'FAST', 'FDX', 'FLS', 'GPN', 'GWW', 'HII', 'HON',\r\n 'IEX', 'INFO', 'LHX', 'LMT', 'J', 'JBHT', 'JCI', 'MAS', 'MMM', \r\n 'NLSN', 'NOC', 'PNR', 'PWR', 'ROK', 'ROP', 'RSG', 'RTN', 'SWK',\r\n 'TDG', 'TT', 'TXT', 'UAL', 'XYL', 'DOV', 'GD', 'GE', 'ETN', \r\n 'EMR', 'ITW', 'KSU', 'LUV', 'NSC', 'PAYX', 'PCAR', 'UPS', 'URI',\r\n 'UTX', 'VRSK', 'WAB', 'WM', 'UNP', 'SNA', 'RHI', 'PH']\r\n\r\nRealEstate = ['AVB', 'CBRE', 'EXR', 'HST', 'PSA', 'SBAC', 'WY', 'EQR', \r\n 'SPG', 'VTR', 'SLG']\r\n\r\nTechnology = ['AAPL', 'ACN', 'ADBE', 'ADI', 'ADSK', 'AKAM', 'AMAT', 'AMD', \r\n 'ANET', 'ANSS', 'APH', 'AVGO', 'BR', 'CDNS', 'CDW', 'CRM', \r\n 'CSCO', 'CTSH', 'CTXS', 'DXC', 'FFIV', 'FIS', 'FISV', 'FLIR',\r\n 'FLT', 'GRMN', 'HPE', 'HPQ', 'IBM', 'INTC', 'INTU', 'LDOS', \r\n 'JKHY', 'JNPR', 'KEYS', 'MCHP', 'MSFT', 'MSI', 'MU', 'MXIM', \r\n 'NLOK', 'NOW', 'STX', 'SWKS', 'TEL', 'TXN', 'XLNX', 'XRX', \r\n 'ZBRA', 'FTV', 'GLW', 'FTNT', 'IPGP', 'IT', 'KLAC', 'LRCX',\r\n 'NTAP', 'NVDA', 'PAYC', 'ORCL', 'QCOM', 'QRVO', 'VRSN', 'WDC']\r\n","sub_path":"symbols.py","file_name":"symbols.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"517026012","text":"import numpy as np\nimport tensorflow as tf\nimport os, sys, time\nimport math,copy\nfrom multiprocessing import Process\n\nclass disktrain(object):\n def __init__(self,diskdir):\n self.diskdir = diskdir\n self.diskfile_list = self.os_csv_path(self.diskdir)\n \n def os_csv_path(self, rootdir):#tell a dir path ,get all the files' name and put in a list\n file_list = []\n list_dir = os.listdir(rootdir)\n for i in range(0,len(list_dir)):\n path = os.path.join(rootdir,list_dir[i])\n if os.path.isfile(path) and (\".csv\" in path):\n file_list.append(path)\n if os.path.isdir(path):\n ret_list = self.os_csv_path(path)\n file_list = file_list + ret_list\n return file_list\n\n\n def csv_2_tensor(self):\n filename_queue = tf.train.string_input_producer(self.diskfile_list)\n reader = tf.TextLineReader()\n key, value = reader.read(filename_queue)\n record_defaults = [[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0]]\n col1, col2, col3, col4, col5, col6, col7, col8, col9, col10, col11, col12, col13 = tf.decode_csv(value, record_defaults=record_defaults)\n features = tf.reshape(tf.transpose(tf.stack([col1, col2, col3, col4, col5, col6, col7, col8, col9, col10, col11])),[11,1])\n label = tf.reshape(tf.transpose(tf.stack([col12/10])),[1,1])\n return features, label\n \n\nif __name__ == '__main__':\n\n W = tf.Variable(tf.random_normal(shape=[1, 11]))\n b = tf.Variable(tf.random_normal(shape=[1, 1]))\n\n def inference(x):\n y = tf.subtract(tf.matmul(W,x),b)\n return y\n\n def loss(y, y_):\n l2_norm = tf.reduce_sum(tf.square(W))\n alpha = tf.constant([0.01])\n classification_term = tf.reduce_mean(tf.maximum(0., tf.subtract(1., tf.multiply(y, y_))))\n loss = tf.add(classification_term, tf.multiply(alpha, l2_norm))\n return loss\n\n def train(loss):\n my_opt = tf.train.GradientDescentOptimizer(0.0001)\n train_step = my_opt.minimize(loss) \n return train_step\n \n init = tf.initialize_all_variables()\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n a = disktrain(\"/ssd/disk_11_csv\")\n features, label = a.csv_2_tensor()\n total_loss = loss(inference(features), label)\n train_op = train(total_loss)\n coordinator = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess, coord = coordinator)\n i = 1\n best_loss = 3\n best_wb = None\n circle_i = -1\n while i:\n try:\n sess.run([train_op])\n if best_loss > sess.run(total_loss):\n best_loss = sess.run(total_loss)\n best_wb = sess.run([W, b])\n circle_i = i\n if label.eval()[0] > 0:\n print('file num',i)\n print('label', float(label.eval()[0]))\n print('loss:', sess.run(total_loss))\n print('wb:', sess.run([W, b]))\n i = i + 1\n if not(i%10000): \n print('file num',i)\n print('label', label.eval()[0])\n print('loss:', sess.run(total_loss))\n print('wb:', sess.run([W, b]))\n except:\n i = 0\n print('############################')\n print('loss:', best_loss)\n print('wb:', best_wb)\n print('circle_i', circle_i)\n print('############################')\n coordinator.request_stop()\n coordinator.join(threads)\n","sub_path":"predict/disk/skr/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":3691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"383628779","text":"import os\r\n\r\n\r\nfrom flask import Flask\r\n\r\nfrom extensions import db,moment,csrf,bootstrap,login_manager,register_template_global,avatars\r\nfrom views.index.main import index_main_bp\r\nfrom views.index.admin import index_admin_bp\r\nfrom views.index.teachers import index_teachers_bp\r\nfrom views.index.students import index_stu_bp\r\nfrom views.index.auth import index_auth_bp\r\n\r\nfrom config import config\r\n\r\ndef create_app():\r\n app = Flask(__name__)\r\n\r\n app.config.from_object(config['development'])\r\n\r\n register_extensions(app)\r\n register_blueprints(app)\r\n register_errorhandlers(app)\r\n register_template_global(app)\r\n\r\n return app\r\n\r\ndef register_extensions(app):\r\n db.init_app(app)\r\n moment.init_app(app)\r\n csrf.init_app(app)\r\n bootstrap.init_app(app)\r\n login_manager.init_app(app)\r\n avatars.init_app(app)\r\n\r\n\r\ndef register_blueprints(app):\r\n #蓝图注册\r\n app.register_blueprint(index_main_bp)\r\n app.register_blueprint(index_admin_bp,url_prefix='/admin')\r\n app.register_blueprint(index_teachers_bp,url_prefix='/teacher')\r\n app.register_blueprint(index_stu_bp,url_prefix='/student')\r\n app.register_blueprint(index_auth_bp)\r\n\r\ndef register_errorhandlers(app):\r\n pass\r\n\r\n\r\n\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"89559333","text":"import os\nimport sys\n\ndonor_names = [\"John Smith\", \"Jane Doe\", \"Alan Smithee\", \"Tom D.A. Harry\", \"Joe Shmoe\"]\ndonation_amounts = [[18774.48, 8264.47, 7558.71], [281918.99, 8242.13], [181.97, 955.16], [67.10, 500.98], [200.01]]\n\ndonor_db = {name: donation for name, donation in zip(donor_names, donation_amounts)}\n# donor_db = {\"John Smith\": [18774.48, 8264.47, 7558.71], \"Jane Doe\": [281918.99, 8242.13],\n# \"Alan Smithee\": [181.97, 955.16], \"Tom D.A. Harry\": [67.10, 500.98], \"Joe Shmoe\": [200.01]}\n\n\ndef thank_you():\n \"\"\"Module with three functions:\n 1) Append donation to record (if existing donor) or create a new record in database (if not an existing donor.\n 2) Print thank you letter after updating database record.\n 3) List all current donors in database.\"\"\"\n user_input = input('Enter a donor\\'s full name, or type \\'list\\' for a full list. ' +\n 'Type \\'e\\' to exit and return to the main menu.\\n> ').title()\n if user_input.lower() == 'list':\n # Sadly this comprehension doesn't save any lines of code from my original implementation.\n donor_list = [k for k in donor_db]\n print(donor_list)\n thank_you()\n elif user_input.lower() == 'e':\n mailroom()\n else:\n try:\n donation = float(input(\"Please enter a donation amount: \"))\n except ValueError:\n print(\"Error: donations can only be entered as numbers and decimals.\")\n print(\"Returning to previous menu...\")\n thank_you()\n # This seemed like an obvious spot to convert to list/dict comprehension, but the if-else statements made it\n # tricky to do so. I tried a few different ways and could not get it to work. Still not sure if there's a good\n # way to work in else statements to comprehensions--that doesn't seem to be what they're for.\n # I would love to know if there's an elegant way to do this, though.\n donor_list = []\n for k in donor_db:\n donor_list.append(k)\n if user_input in donor_list and k == user_input:\n donor_db[k].append(donation)\n print(\"Existing donor found.\")\n print(\"Appending the amount of {0} to {1}'s file...\".format(donation, user_input))\n print(\"Printing thank you email...\")\n print(\"---------------------------\")\n create_letter(0, user_input, donation)\n else:\n donor_db[user_input] = [donation]\n print(\"New donor detected. Creating record for {0}...\".format(user_input))\n print(\"Printing thank you email...\")\n print(\"---------------------------\")\n create_letter(1, user_input, donation)\n\n\ndef report():\n \"\"\"Generate and print report showing all donors, total dollar amount given, number of donations, and average.\"\"\"\n while True:\n print('Donor Name' + ' ' * 16 + '| Total Given | Num Gifts | Average Gift')\n print('-' * 66)\n for k in donor_db:\n num_gifts = len(donor_db[k])\n total_given = sum(donor_db[k])\n average_gifts = total_given / num_gifts\n print(f'{k: <26}| ${total_given:>10.2f} |{num_gifts:^11}| ${average_gifts:>11.2f}')\n print('\\nReturning to main menu...')\n return\n\n\ndef quit_program():\n \"\"\"Quit Mailroom program.\"\"\"\n print(\"Exiting...\")\n sys.exit()\n\n\ndef create_letter(donor_status, donor_name, donation_amt):\n \"\"\"Return and print formatted letters, depending on options selected. Not intended to be used by itself.\"\"\"\n if donor_status == 0:\n letter_text = \"\"\"\n Dear {0},\n \n Thank you for your very kind donation of ${1}, and for your continuing support.\n \n Your generous contribution will be put to very good use.\n \n Sincerely,\n -The Team\n \"\"\".format(donor_name, donation_amt)\n print(letter_text)\n print(\"---------------------------\")\n print(\"Returning to thank you letter menu...\")\n thank_you()\n elif donor_status == 1:\n letter_text = \"\"\"\n Dear {0},\n\n Thank you for your very kind donation of ${1}.\n\n Your generous contribution will be put to very good use.\n\n Sincerely,\n -The Team\n \"\"\".format(donor_name, donation_amt)\n print(letter_text)\n print(\"---------------------------\")\n print(\"Returning to thank you letter menu...\")\n thank_you()\n elif donor_status == 2:\n return(\"\"\"\n Dear {0},\n\n Thank you for your very kind contribution(s) totaling ${1}.\n\n We would like you to know that your generous donation(s) will be put to very good use.\n\n Sincerely,\n -The Team\n \"\"\".format(donor_name, donation_amt))\n\n\ndef thank_all():\n \"\"\"Write letters generated by create_letter to text files, saving them to same directory as script.\"\"\"\n current_dir = os.getcwd()\n print(\"Saving letters to {0}...\".format(current_dir))\n\n for k, v in donor_db.items():\n letter = create_letter(2, k, sum(v))\n with open('{:s}.txt'.format(k), 'w') as f:\n f.write(letter)\n print(\"---------------------------\")\n print(\"Letters saved to text files in directory. Returning to main menu...\")\n mailroom()\n\n\ndef mailroom():\n \"\"\"Generate main menu options and activate other functions.\"\"\"\n while True:\n selection = input('MAILROOM v0.3\\n------------------------\\nChoose an option:\\n1) Send a thank you' +\n '\\n2) Create a report\\n3) Send letters to everyone\\n4) Quit\\n> ')\n menu_dict = {'1': thank_you, '2': report, '3': thank_all, '4': quit_program}\n try:\n menu_dict.get(selection)()\n except TypeError:\n print(\"Invalid value. Enter a number from 1-4.\")\n pass\n\n\nif __name__ == \"__main__\":\n mailroom()\n","sub_path":"students/JonathanMauk/lesson05/mailroom03.py","file_name":"mailroom03.py","file_ext":"py","file_size_in_byte":6116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"65971543","text":"#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\nimport unicodedata\n\nfrom rdflib import Namespace, URIRef, Literal, RDF, ConjunctiveGraph\n\nfrom settings import pub_base_uri, uri_person, uri_pub\n\n#GLOBAL VARS\nDC = Namespace(\"http://purl.org/dc/terms/\")\nRDFS = Namespace(\"http://www.w3.org/2000/01/rdf-schema#\")\nSWRC = Namespace(\"http://swrc.ontoware.org/ontology#\")\nAIISO = Namespace(\"http://purl.org/vocab/aiiso/schema#\")\nTEACH = Namespace(\"http://linkedscience.org/teach/ns#\")\n#END GLOBAL VARS\n\n# Create the RDF Graph\ngraph = ConjunctiveGraph()\ngraph.bind(\"dc\", DC)\ngraph.bind(\"rdfs\", RDFS)\ngraph.bind(\"swrc\", SWRC)\ngraph.bind(\"aiiso\", AIISO)\ngraph.bind(\"teach\", TEACH)\n# End create RDF Graph\n\n\ndef remove_accents(s):\n '''\n Quits accents and language specific characters of a string\n '''\n return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')\n\n\ndef htmlize_string(string):\n '''\n Make a HTML valid string (quits spaces, commas, dots and accents or language specific characters)\n '''\n return remove_accents(string.replace(\",\", \"\").replace(\".\", \"\").replace(\" \", \"\"))\n\n\ndef rdfize_output_common(pub_dict, puburi):\n '''\n Adds common fields for outputs un the RDF Graph.\n '''\n pub_uriref = URIRef(pub_base_uri +\"/\"+puburi+\"/\"+pub_dict[\"Id. GREC\"])\n\n graph.add((pub_uriref, DC.year, Literal(pub_dict[u\"Any\"])))\n graph.add((pub_uriref, DC.title, Literal(pub_dict[u\"Títol\"])))\n\n graph.add((pub_uriref, RDF.type, SWRC.Publication))\n\n if pub_dict.has_key(u\"Autors\"):\n graph.add((pub_uriref, SWRC.authors, Literal(\"; \".join(pub_dict[u\"Autors\"]))))\n for autor in pub_dict[u\"Autors\"]:\n autor_uriref = URIRef(pub_base_uri +\"/\"+ uri_person +\"/\"+htmlize_string(autor))\n graph.add((autor_uriref, RDF.type, SWRC.Person))\n graph.add((pub_uriref, DC.author, autor_uriref))\n graph.add((autor_uriref, RDFS.label, Literal(autor)))\n graph.add((autor_uriref, DC.identifier, Literal(htmlize_string(autor))))\n\n\ndef rdfize_pages(pub_dict, puburi):\n '''\n Adds pages fields in RDF Graph\n '''\n pub_uriref = URIRef(pub_base_uri +\"/\"+puburi+\"/\"+pub_dict[\"Id. GREC\"])\n if pub_dict[u\"Pàgina inicial\"] != \"\" or pub_dict[u\"Pàgina final\"] != \"\":\n graph.add((pub_uriref, SWRC.pages, Literal(pub_dict[u\"Pàgina inicial\"] +\"-\"+ pub_dict[u\"Pàgina final\"])))\n if pub_dict[\"Volum\"] != \"\":\n graph.add((pub_uriref, SWRC.volume, Literal(pub_dict[\"Volum\"])))\n\n\ndef rdfize_journal_article(pub_dict):\n '''\n Add common article fields into the RDF graph.\n '''\n if \"docent\" in pub_dict[\"Clau\"].split(\" \") or \"docents\" in pub_dict[\"Clau\"].split(\" \"): return\n rdfize_output_common(pub_dict, \"article\")\n pub_uriref = URIRef(pub_base_uri +\"/article/\"+pub_dict[\"Id. GREC\"])\n\n graph.add((pub_uriref, RDF.type, SWRC.Article))\n rdfize_pages(pub_dict, \"article\")\n\n if pub_dict[\"ISSN\"] != \"\":\n journal_uriref = URIRef(pub_base_uri +\"/journal/\"+pub_dict[\"ISSN\"])\n graph.add((pub_uriref, SWRC.isPartOf, journal_uriref))\n graph.add((journal_uriref, RDF.type, SWRC.Journal))\n graph.add((journal_uriref, RDFS.label, Literal(pub_dict[\"Revista\"])))\n graph.add((journal_uriref, SWRC.ISSN, Literal(pub_dict[\"ISSN\"])))\n\n\ndef rdfize_book_article(pub_dict):\n '''\n Add common inbook fields into the RDF graph.\n '''\n if \"docent\" in pub_dict[\"Clau\"].split(\" \") or \"docents\" in pub_dict[\"Clau\"].split(\" \"): return\n rdfize_output_common(pub_dict, \"inbook\")\n pub_uriref = URIRef(pub_base_uri +\"/inbook/\"+pub_dict[\"Id. GREC\"])\n\n graph.add((pub_uriref, RDF.type, SWRC.InBook))\n rdfize_pages(pub_dict, \"inbook\")\n\n if pub_dict[\"ISBN\"] != \"\":\n book_uriref = URIRef(pub_base_uri +\"/book/\"+pub_dict[\"ISBN\"])\n graph.add((pub_uriref, SWRC.isPartOf, book_uriref))\n graph.add((book_uriref, RDF.type, SWRC.Book))\n if pub_dict[u\"Referència\"] != \"\":\n graph.add((book_uriref, RDFS.label, Literal(pub_dict[u\"Referència\"])))\n graph.add((book_uriref, SWRC.ISBN, Literal(pub_dict[\"ISBN\"])))\n if pub_dict[u\"Editorial\"] != \"\": \n graph.add((book_uriref, SWRC.editor, Literal(pub_dict[u\"Editorial\"])))\n\n\ndef rdfize_thesis(pub_dict):\n '''\n Add common PhD Thesis fields into the RDF graph.\n '''\n if pub_dict[u\"Clau\"] != \"Tesi Doctoral\" and pub_dict[u\"Clau\"] != \"Tesi Doctoral Europea\": return\n rdfize_output_common(pub_dict, \"phdthesis\")\n pub_uriref = URIRef(pub_base_uri +\"/phdthesis/\"+pub_dict[\"Id. GREC\"])\n\n graph.add((pub_uriref, RDF.type, SWRC.PhDThesis))\n for autor in pub_dict[u\"Autor\"]:\n autor_uriref = URIRef(pub_base_uri +\"/\"+ uri_person +\"/\"+htmlize_string(autor))\n graph.add((autor_uriref, RDF.type, SWRC.Person))\n graph.add((pub_uriref, DC.author, autor_uriref))\n graph.add((autor_uriref, RDFS.label, Literal(autor)))\n\n for director in pub_dict[u\"Director\"]:\n director_uriref = URIRef(pub_base_uri +\"/\"+ uri_person +\"/\"+htmlize_string(director))\n graph.add((director_uriref, RDF.type, SWRC.Person))\n graph.add((pub_uriref, SWRC.supervisor, director_uriref))\n graph.add((director_uriref, RDFS.label, Literal(director)))\n\n graph.add((pub_uriref, SWRC.school, Literal(pub_dict[u\"Facultat\"])))\n graph.add((pub_uriref, DC.University, Literal(pub_dict[u\"Universitat\"])))\n\n\ndef rdfize_congress_paper(pub_dict):\n '''\n Add common InProceedings fields into the RDF graph.\n '''\n if u\"comité\" in pub_dict[u\"Tipus de participació\"].split(\" \") or u\"Presidència\" in pub_dict[u\"Tipus de participació\"].split(\" \"): return False;\n rdfize_output_common(pub_dict, \"inproceedings\")\n pub_uriref = URIRef(pub_base_uri +\"/inproceedings/\"+pub_dict[\"Id. GREC\"])\n\n graph.add((pub_uriref, RDF.type, SWRC.InProceedings))\n graph.add((pub_uriref, SWRC.atEvent, Literal(pub_dict[u\"Congrés\"])))\n\n\ndef rdfize_patent(pub_dict):\n '''\n Add common patent fields into the RDF graph.\n '''\n rdfize_output_common(pub_dict, \"patent\")\n pub_uriref = URIRef(pub_base_uri +\"/patent/\"+pub_dict[\"Id. GREC\"])\n\n graph.add((pub_uriref, RDF.type, SWRC.Patent))\n graph.add((pub_uriref, SWRC.location, Literal(pub_dict[u\"Països\"])))\n graph.add((pub_uriref, SWRC.organization, Literal(pub_dict[\"Organismes\"])))\n\n\ndef rdfize_input_common(pub_dict):\n '''\n Add common fields for inputs into the RDF graph.\n '''\n pub_uriref = URIRef(pub_base_uri +\"/\"+ uri_pub +\"/\"+pub_dict[\"Id. GREC\"])\n\n director_uriref = URIRef(pub_base_uri +\"/\"+ uri_person +\"/\"+htmlize_string(pub_dict[\"Investigador principal\"]))\n graph.add((pub_uriref, SWRC.head, director_uriref))\n graph.add((director_uriref, RDF.type, SWRC.Person))\n graph.add((director_uriref, RDFS.label, Literal(pub_dict[\"Investigador principal\"])))\n graph.add((director_uriref, DC.identifier, Literal(htmlize_string(pub_dict[\"Investigador principal\"]))))\n\n graph.add((pub_uriref, DC.title, Literal(pub_dict[u\"Títol\"])))\n\n graph.add((pub_uriref, DC.isPartOf, Literal(pub_dict[u\"Convocatòria\"])))\n\n graph.add((pub_uriref, SWRC.financedBy, Literal(pub_dict[u\"Organisme\"])))\n\n graph.add((pub_uriref, SWRC.carriedOutBy, Literal(pub_dict[u\"Institució\"])))\n\n if pub_dict[\"Data d'inici\"] != \"\":\n data = \"-\".join(pub_dict[\"Data d'inici\"].split(\"/\")[::-1])\n graph.add((pub_uriref, DC.beginDate, Literal(data)))\n graph.add((pub_uriref, DC.year, Literal(data.split(\"-\")[0])))\n\n if pub_dict[\"Data Fi\"] != \"\":\n data = \"-\".join(pub_dict[\"Data Fi\"].split(\"/\")[::-1])\n graph.add((pub_uriref, DC.endDate, Literal(data)))\n\n if pub_dict[\"Data\"] != \"\":\n data = \"-\".join(pub_dict[\"Data\"].split(\"/\")[::-1])\n graph.add((pub_uriref, DC.date, Literal(data)))\n\n if pub_dict[\"Investigadors secundaris\"] != []:\n graph.add((pub_uriref, SWRC.authors, Literal(pub_dict[\"Investigador principal\"]+\"; \"+\"; \".join(pub_dict[\"Investigadors secundaris\"]))))\n for researcher in pub_dict[\"Investigadors secundaris\"]:\n researcher_uriref = URIRef(pub_base_uri +\"/\"+ uri_person +\"/\"+htmlize_string(researcher))\n graph.add((pub_uriref, SWRC.member, researcher_uriref))\n graph.add((researcher_uriref, RDF.type, SWRC.Person))\n graph.add((researcher_uriref, RDFS.label, Literal(researcher)))\n graph.add((researcher_uriref, SWRC.note, Literal(htmlize_string(researcher))))\n\n\ndef rdfize_research_project(pub_dict):\n '''\n Add common research project fields into the RDF graph.\n '''\n rdfize_input_common(pub_dict)\n pub_uriref = URIRef(pub_base_uri +\"/\"+ uri_pub +\"/\"+pub_dict[\"Id. GREC\"])\n\n graph.add((pub_uriref, RDF.type, SWRC.Project))\n graph.add((pub_uriref, DC.identifier, Literal(pub_dict[\"Codi oficial\"])))\n graph.add((pub_uriref, DC.isPartOf, Literal(pub_dict[u\"Programa\"])))\n\n\ndef rdfize_european_project(pub_dict):\n '''\n Add common European project fields into the RDF graph.\n '''\n rdfize_input_common(pub_dict)\n pub_uriref = URIRef(pub_base_uri +\"/\"+ uri_pub +\"/\"+pub_dict[\"Id. GREC\"])\n\n graph.add((pub_uriref, RDF.type, SWRC.Project))\n graph.add((pub_uriref, DC.identifier, Literal(pub_dict[\"Codi UE\"])))\n graph.add((pub_uriref, DC.isPartOf, Literal(pub_dict[u\"Programa\"])))\n\n\ndef rdfize_contract(pub_dict):\n '''\n Add common contract fields into the RDF graph.\n '''\n rdfize_input_common(pub_dict)\n pub_uriref = URIRef(pub_base_uri +\"/\"+ uri_pub +\"/\"+pub_dict[\"Id. GREC\"])\n\n graph.add((pub_uriref, RDF.type, SWRC.Contract))\n graph.add((pub_uriref, DC.identifier, Literal(pub_dict[\"Codi oficial\"])))\n\ndef rdfize_pub_list(pub_list):\n '''Translate the publication list structure to a RDF Graph structure'''\n for pub_dict in pub_list:\n if pub_dict.has_key(u\"ISSN\"):\n rdfize_journal_article(pub_dict)\n elif pub_dict.has_key(u\"ISBN\"):\n rdfize_book_article(pub_dict)\n elif pub_dict.has_key(u\"Qualificació\"):\n rdfize_thesis(pub_dict)\n elif pub_dict.has_key(u\"Congrés\"):\n rdfize_congress_paper(pub_dict)\n elif pub_dict.has_key(u\"Unesco\"):\n rdfize_research_project(pub_dict)\n elif pub_dict.has_key(u\"Codi UE\"):\n rdfize_european_project(pub_dict)\n elif pub_dict.has_key(u\"Número de registre\"):\n rdfize_patent(pub_dict)\n elif pub_dict.has_key(u\"Codi oficial\"):\n rdfize_contract(pub_dict)\n\n return graph.serialize(format=\"pretty-xml\")","sub_path":"grec_harvester/harvest_rdfizer.py","file_name":"harvest_rdfizer.py","file_ext":"py","file_size_in_byte":10557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"212901898","text":"from Game import TicTacToe\n\n\nclass Hub: # connects to players\n # def __init__(self, p1, p2, game):\n def __init__(self, p1, p2):\n # print(\"Starting new match\")\n game = TicTacToe() # temporary !!!!!!!!!!!!!!!!!!!!\n # self.p1 = p1(self) temp\n\n # self.p1 = HumanInterface(self)\n # self.p2 = HumanInterface(self)\n\n self.p1 = p1\n self.p2 = p2\n\n self.board = [[0 for x in range(game.get_width())] for y in range(game.get_length())]\n\n self.turn = 2 # this will swap to 1 on the first turn\n while True:\n # switching turns\n if self.turn == 2:\n self.turn = 1\n else:\n self.turn = 2\n\n if self.turn == 1:\n move = self.p1.prompt(self.board)\n self.board[move[0]][move[1]] = 1\n else:\n move = self.p2.prompt(self.board)\n self.board[move[0]][move[1]] = 2\n\n # self.print_board()\n\n if game.check_win(self.board):\n # print(\"Player \", self.turn, \" is the winner\")\n if self.turn == 1:\n self.p1.gameover(0)\n self.p2.gameover(1)\n else:\n self.p1.gameover(1)\n self.p2.gameover(0)\n break\n\n if game.check_full(self.board):\n # print(\"Tie game\")\n self.p1.gameover(2)\n self.p2.gameover(2)\n break\n\n def print_board(self):\n for x in range(0, 3):\n print(\"%s|%s|%s\" % (self.board[x][0], self.board[x][1], self.board[x][2]))\n print('\\n')\n # input()\n\n\nclass HumanInterface: # IO for humans\n @staticmethod\n def is_flippable(): # AI require flipped X and O, humans do not\n return False\n\n def prompt(self, board):\n for x in range(0, 3):\n print(\"%s|%s|%s\" % (self.to_symbol(board[x][0]), self.to_symbol(board[x][1]), self.to_symbol(board[x][2])))\n move = [0, 0]\n\n move[0] = int(input(\"Enter Row: \"))\n move[1] = int(input(\"Enter Column: \"))\n\n while not TicTacToe.check_valid(move, board): # check if move is valid\n move[0] = int(input(\"Enter Row: \"))\n move[1] = int(input(\"Enter Column: \"))\n return move\n\n @staticmethod\n def to_symbol(num):\n if num == 0:\n return \" \"\n if num == 1:\n return \"X\"\n if num == 2:\n return \"O\"\n\n def gameover(self, outcome):\n # 0 lose, 1 win, 2 tie\n print ('')\n\nclass CompInterface:\n def __init__(self, org):\n self.org = org\n\n @staticmethod\n def is_flippable(): # AI require flipped X and O, humans do not\n return True\n\n def prompt(self, board):\n # begin propagation through the assigned organism\n return self.org.begin_propagate(board)\n\n\n @staticmethod\n def to_symbol(num):\n if num == 0:\n return \" \"\n if num == 1:\n return \"X\"\n if num == 2:\n return \"O\"\n\n def gameover(self, outcome):\n # 0 win, 1 lose, 2 tie\n self.org.receive_outcome(outcome)\n","sub_path":"GeneticTrainer/Match.py","file_name":"Match.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"55204881","text":"\"\"\"\nChannel join control, extended by other hooks.\n\nConfig:\n hooks ((str, str list) dict):\n Mapping of controlling hooks to a list of channels they manage.\n exclude ((str, str list) dict):\n Mapping of plugs to user IDs who should be ignored during checks.\n joins (bool):\n ``True`` to check each join as it happens.\n startup (bool):\n ``True`` to run a full check of all named channels on load.\n passive (bool):\n ``True`` to log violations without actually following through with removals.\n\nThis hook implements its own protocol for test purposes, by rejecting all joins and members of a\nchannel. To make full use of it, other hooks with support for channel access can determine if a\nuser satisfies membership of an external group or application.\n\"\"\"\n\nimport asyncio\nfrom collections import defaultdict\nimport logging\n\nimport immp\n\n\nlog = logging.getLogger(__name__)\n\n\nclass AccessPredicate:\n \"\"\"\n Interface for hooks to provide channel access control from a backing source.\n \"\"\"\n\n async def channel_access(self, channel, user):\n \"\"\"\n Verify if a user is allowed access to a channel.\n\n Args:\n channel (.Channel):\n Target channel.\n user (.User):\n Incoming user to be verified.\n\n Returns:\n bool:\n ``True`` if the user is to be granted access.\n \"\"\"\n raise NotImplementedError\n\n\nclass ChannelAccessHook(immp.Hook, AccessPredicate):\n \"\"\"\n Hook for controlling membership of, and joins to, secure channels.\n \"\"\"\n\n schema = immp.Schema({immp.Optional(\"hooks\", dict): {str: [str]},\n immp.Optional(\"exclude\", dict): {str: [str]},\n immp.Optional(\"joins\", True): bool,\n immp.Optional(\"startup\", False): bool,\n immp.Optional(\"passive\", False): bool})\n\n @property\n def hooks(self):\n mapping = {}\n for name, channels in self.config[\"hooks\"].items():\n try:\n hook = self.host.hooks[name]\n except KeyError:\n raise immp.ConfigError(\"Hook '{}' not registered to host\".format(name))\n if not isinstance(hook, AccessPredicate):\n raise immp.HookError(\"Hook '{}' does not implement AccessPredicate\".format(name))\n try:\n channels = tuple(self.host.channels[label] for label in channels)\n except KeyError as e:\n raise immp.ConfigError(\"Channel '{}' not registered to host\".format(e.args[0]))\n mapping[hook] = channels\n return mapping\n\n @property\n def channels(self):\n inverse = defaultdict(list)\n for hook, channels in self.hooks.items():\n for channel in channels:\n inverse[channel].append(hook)\n return inverse\n\n async def channel_access(self, channel, user):\n # Example predicate to block all access.\n return False\n\n async def _predicate(self, hook, channel, user):\n if not isinstance(hook, AccessPredicate):\n raise immp.HookError(\"Hook '{}' does not implement AccessPredicate\".format(hook.name))\n allow = await hook.channel_access(channel, user)\n if not allow:\n log.debug(\"Hook %r disallows %r in %r\", hook.name, user, channel)\n if not self.config[\"passive\"]:\n await channel.remove(user)\n return allow\n\n async def _verify(self, channel, user):\n if user.id in self.config[\"exclude\"].get(user.plug.name, []):\n log.debug(\"Skipping excluded user %r in channel %r\", user, channel)\n return\n elif await user.is_system():\n log.debug(\"Skipping system user %r in channel %r\", user, channel)\n return\n try:\n hooks = self.channels[channel]\n except KeyError:\n return\n for hook in hooks:\n if not await self._predicate(hook, channel, user):\n break\n\n async def _startup_check(self):\n log.debug(\"Running startup access checks\")\n for channel in self.channels:\n members = await channel.members()\n if not members:\n continue\n for user in members:\n await self._verify(channel, user)\n log.debug(\"Finished startup access checks\")\n\n async def start(self):\n await super().start()\n if self.config[\"startup\"]:\n asyncio.ensure_future(self._startup_check())\n\n async def on_receive(self, sent, source, primary):\n await super().on_receive(sent, source, primary)\n if not self.config[\"joins\"] or not primary or sent != source or not source.joined:\n return\n for user in source.joined:\n await self._verify(sent.channel, user)\n","sub_path":"immp/hook/access.py","file_name":"access.py","file_ext":"py","file_size_in_byte":4867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"121083859","text":"\"\"\"\nCopyright 2017 Neural Networks and Deep Learning lab, MIPT\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport argparse\nfrom pathlib import Path\nimport sys\nimport os\n\np = (Path(__file__) / \"..\" / \"..\").resolve()\nsys.path.append(str(p))\n\nfrom deeppavlov.core.commands.train import train_evaluate_model_from_config\nfrom deeppavlov.core.commands.infer import interact_model, predict_on_stream\nfrom deeppavlov.core.common.log import get_logger\nfrom deeppavlov.download import deep_download\nfrom utils.telegram_utils.telegram_ui import interact_model_by_telegram\nfrom utils.server_utils.server import start_model_server\nfrom utils.pip_wrapper import install_from_config\n\n\nlog = get_logger(__name__)\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"mode\", help=\"select a mode, train or interact\", type=str,\n choices={'train', 'evaluate', 'interact', 'predict', 'interactbot', 'riseapi', 'download',\n 'install'})\nparser.add_argument(\"config_path\", help=\"path to a pipeline json config\", type=str)\nparser.add_argument(\"-t\", \"--token\", help=\"telegram bot token\", type=str)\nparser.add_argument(\"-b\", \"--batch-size\", dest=\"batch_size\", default=1, help=\"inference batch size\", type=int)\nparser.add_argument(\"-f\", \"--input-file\", dest=\"file_path\", default=None, help=\"Path to the input file\", type=str)\nparser.add_argument(\"-d\", \"--download\", action=\"store_true\", help=\"download model components\")\n\n\ndef find_config(pipeline_config_path: str):\n if not Path(pipeline_config_path).is_file():\n configs = [c for c in Path(__file__).parent.glob(f'configs/**/{pipeline_config_path}.json')\n if str(c.with_suffix('')).endswith(pipeline_config_path)] # a simple way to not allow * and ?\n if configs:\n log.info(f\"Interpreting '{pipeline_config_path}' as '{configs[0]}'\")\n pipeline_config_path = str(configs[0])\n return pipeline_config_path\n\n\ndef main():\n args = parser.parse_args()\n pipeline_config_path = find_config(args.config_path)\n if args.download or args.mode == 'download':\n deep_download(['-c', pipeline_config_path])\n token = args.token or os.getenv('TELEGRAM_TOKEN')\n\n if args.mode == 'train':\n train_evaluate_model_from_config(pipeline_config_path)\n elif args.mode == 'evaluate':\n train_evaluate_model_from_config(pipeline_config_path, to_train=False, to_validate=False)\n elif args.mode == 'interact':\n interact_model(pipeline_config_path)\n elif args.mode == 'interactbot':\n if not token:\n log.error('Token required: initiate -t param or TELEGRAM_BOT env var with Telegram bot token')\n else:\n interact_model_by_telegram(pipeline_config_path, token)\n elif args.mode == 'riseapi':\n start_model_server(pipeline_config_path)\n elif args.mode == 'predict':\n predict_on_stream(pipeline_config_path, args.batch_size, args.file_path)\n elif args.mode == 'install':\n install_from_config(pipeline_config_path)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"deeppavlov/deep.py","file_name":"deep.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"86461126","text":"\"\"\"\nUsage: merge-pdfs [ ...]\n\nMerges multiple pdfs into a single pdf.\nThe resulting pdf is called 'merged.pdf' (unless that\nfilename is taken, in which case a number is appended\nfor uniqueness).\nThe script requires pdftk to be present on the machine\n(and in the path).\n\"\"\"\nimport sys\nimport os.path\nfrom subprocess import call\n\nif len(sys.argv) < 3:\n print(\"Not enough files given.\")\n print(\"Usage: merge-pdfs [ ...]\")\n sys.exit()\n\nfilenames = sys.argv[1:]\ncorrect_filenames = []\nfor f in filenames:\n if os.path.isfile(f):\n correct_filenames.append(os.path.abspath(f))\n else:\n print(\"Invalid file: \" + f)\nfilenames = correct_filenames\n\nprint(\"Files:\")\nfor f in filenames:\n print(\" \" + f)\n\nif len(filenames) < 2:\n print(\"Not enough valid files.\")\n sys.exit(0)\n\nfolder = os.path.dirname(filenames[0])\ntarget = os.path.join(folder, \"merged.pdf\")\ni = 0\nwhile os.path.exists(target):\n i = i + 1\n target = os.path.join(folder, \"merged-\" + str(i) + \".pdf\")\n\ncall([\"pdftk\"] + filenames + [\"cat\", \"output\", target])\n\n\n","sub_path":"scripts/merge-pdfs.py","file_name":"merge-pdfs.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"620433420","text":"import os\n\nimport setuptools\n\nimport datetime\n\n\nname = \"DEHB\"\npackage_name = \"dehb\"\nauthor = \"Neeratyoy, Noor, Janis, Frank\"\nauthor_email = \"mallik@cs.uni-freiburg.de\"\ndescription = \"Evolutionary Hyperband for Scalable, Robust and Efficient Hyperparameter Optimization\"\nurl = \"https://github.com/automl/DEHB\"\nproject_urls = {\n \"Documentation\": \"https://automl.github.io/DEHB/\",\n \"Source Code\": \"https://github.com/automl/DEHB\",\n}\ncopyright = f\"Copyright {datetime.date.today().strftime('%Y')}, Neeratyoy, Noor, Frank\"\nversion = \"0.0.7\"\n\nHERE = os.path.dirname(os.path.realpath(__file__))\n\n\ndef read_file(filepath: str) -> str:\n \"\"\"\n Read in a files contents\n\n Parameters\n ----------\n filepath : str\n The name of the file.\n\n Returns\n -------\n str\n The contents of the file.\n \"\"\"\n\n with open(filepath, \"r\", encoding=\"utf-8\") as fh:\n return fh.read()\n\n\nextras_require = {\n \"dev\": [\n # Test\n \"pytest>=4.6\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-timeout\",\n # Docs\n \"mkdocs-material\",\n \"mkdocstrings\",\n # Others\n \"ruff\",\n \"black\",\n \"pre-commit\",\n ]\n}\n\nsetuptools.setup(\n name=package_name,\n author=author,\n author_email=author_email,\n description=description,\n long_description=read_file(os.path.join(HERE, \"README.md\")),\n long_description_content_type=\"text/markdown\",\n license=\"Apache-2.0\",\n url=url,\n project_urls=project_urls,\n version=version,\n packages=setuptools.find_packages(\"src\", exclude=[\"tests\"]),\n package_dir={\"\": \"src\"},\n python_requires=\">=3.8\",\n install_requires=read_file(os.path.join(HERE, \"requirements.txt\")).split(\"\\n\"),\n extras_require=extras_require,\n test_suite=\"pytest\",\n platforms=[\"Linux\"],\n classifiers=[\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Natural Language :: English\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"15178935","text":"\n\n#calss header\nclass _CANNY():\n\tdef __init__(self,): \n\t\tself.name = \"CANNY\"\n\t\tself.definitions = [u'thinking quickly and cleverly, especially in business or financial matters: ', u'good or pleasant: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_canny.py","file_name":"_canny.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"463164512","text":"\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nAnalysis of Census Income dataset using Keras\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nfrom sklearn.preprocessing import StandardScaler\n\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\n\n\nfrom sklearn.metrics import (confusion_matrix, precision_recall_curve, auc,\n roc_curve, recall_score, classification_report, f1_score,\n precision_recall_fscore_support)\n\nfrom sklearn.model_selection import RandomizedSearchCV\nimport tensorflow as tf\nimport keras as kr\nfrom time import time \n\n\nfrom keras.models import Model,Sequential, load_model\nfrom keras.layers import Input,Dense,Flatten,Dropout,merge,Reshape,Conv2D,MaxPooling2D,UpSampling2D,Conv2DTranspose\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.wrappers.scikit_learn import KerasClassifier\n\nfrom keras.callbacks import ModelCheckpoint, TensorBoard\n\nfrom keras.optimizers import Adadelta, RMSprop,SGD,Adam\nfrom keras import regularizers\nfrom keras.utils import to_categorical\nfrom keras.regularizers import L1L2\n\n# Creating a Census Dataframe\n\nCensus = pd.read_csv(r'C:\\Users\\Bobby\\Desktop\\AUT Datasets\\AdultIncome.csv') \nprint(Census)\n\n# Explore the Data\n\nCensus.info() # 284807 (Rows) and 31 (Columns)\n\n# Describe the Data\n\nCensus.describe()\nCensus.columns\n\n#Code to check for shape of data\n \n#Code to check for any missing values\n\nCensus.isnull().any()\nCensus.isnull().values.any()\nCensus.isnull().sum() \n\n# Rename the Target Variable Column to be a binary variable (<=50k = 0 (Class 0) and > 50k = 1 (Class 1))\n# Census['income']=Census['income'].map({'<=50K': 0, '>50K': 1, '<=50K.': 0, '>50K.': 1})\n# There were 24720 (76%) instances of <=50K (Class 0) and 7841 (24%) instances of >50K (Class 1) \n\nCensus.income.replace(['<=50K', '>50K'], [0, 1], inplace=True) \nCensus_count = Census['income'].value_counts() \nprint(Census_count) \n \n\n# Plot of Class variable distribution\n\nfig = sns.barplot(x = [0,1], y = Census_count, data = Census, color = 'blue')\nplt.ylabel('Frequency of Class variable')\nplt.xlabel ('Class variable')\nplt.show(fig)\n\n\n# Identify Numeric variables (7 variables including Class (Income))\nnumeric_var = ['age','fnlwgt','education.num','capital.gain','capital.loss','hours.per.week','income']\n\n# Identify Categorical variables (8 variables))\ncateg_var = ['workclass','education','marital.status', 'occupation', 'relationship', 'race', 'sex', 'native']\n\n\n#Converting Categorical variables into Quantitative variables\n\n# Replace '?' in occupation with 0\n\n\nCensus['occupation'] = Census['occupation'].map({'?': 0, 'Farming-fishing': 1, 'Tech-support': 2, \n 'Adm-clerical': 3, 'Handlers-cleaners': 4, 'Prof-specialty': 5,\n 'Machine-op-inspct': 6, 'Exec-managerial': 7, \n 'Priv-house-serv': 8, 'Craft-repair': 9, 'Sales': 10, \n 'Transport-moving': 11, 'Armed-Forces': 12, 'Other-service': 13, \n 'Protective-serv': 14}).astype(int)\n\n\nCensus['sex'] = Census['sex'].map({'Male': 0, 'Female': 1}).astype(int)\n\nCensus['race'] = Census['race'].map({'Black': 0, 'Asian-Pac-Islander': 1, 'Other': 2, 'White': 3, \n 'Amer-Indian-Eskimo': 4}).astype(int)\n\nCensus[\"marital.status\"] = Census[\"marital.status\"].replace(['Never-married','Divorced','Separated','Widowed'], 'Single')\nCensus[\"marital.status\"] = Census[\"marital.status\"].replace(['Married-civ-spouse','Married-spouse-absent','Married-AF-spouse'], 'Married')\nCensus[\"marital.status\"] = Census[\"marital.status\"].map({\"Married\":1, \"Single\":0})\nCensus[\"marital.status\"] = Census[\"marital.status\"].astype(int)\n\n\n# Fill missing Categorial Values\n\nCensus[\"workclass\"] = Census[\"workclass\"].fillna(\"X\")\nCensus[\"native.country\"] = Census[\"native.country\"].fillna(\"United-States\")\n\n\n# Drop data\n\nCensus.drop(labels=[\"workclass\",\"education\",\"relationship\",\"race\",\"native.country\"], axis = 1, inplace = True)\nprint(Census.head())\n\nCensus.head(100)\n\n#################################################### Creating Train/Test Split Data ######################################\n\n# Create 80/20 (Train/Test) split with Random State Values across both the classes (Fraud and Non-Fraud)\n# Creating Input Features (X) and Target variable (y)\n\nX_train, X_test = train_test_split(Census, test_size = 0.20, random_state = 123)\n\n# Create Training Set on < 50K Class (Class 0)\n\nX_train = X_train[X_train.income == 0]\ny_train = X_train['income']\nX_train = X_train.drop(['income'], axis=1)\n\n# Create Test set on > 50K Class (Class 1)\n\ny_test = X_test['income']\nX_test = X_test.drop(['income'], axis=1)\n\n# Converting to Values\n\nX_train = X_train.values\nX_test = X_test.values\n\n# Print Shape of Training and Test sets\n\nprint(X_train.shape, X_test.shape) # (19810, 9) (6513, 9)\nprint(y_train.shape, y_test.shape) # (19810,) (6513,)\n\n\n################################# Variables ##################################\n# Building Neuron Layers with 100, 50, 50 and 100 respectively \n\nn_cols = X_train.shape[1] # Number of Columns \nencoding_dim = 100\n\n########################################### Keras Code ######################################\n\n# Input Shape to use the first hidden layer\ninput_layer = Input(shape = (n_cols, ))\n\n# Build the model\n\nmodel = Sequential()\n\n#Creating the first hidden layer (Encoder)\n\n# model.add(Dense(100, activation = 'relu', input_shape = (n_cols, )))\n\nencoder = Dense(encoding_dim, activation = 'relu')(input_layer)\nencoder1 = Dense(50, activation=\"relu\")(encoder)\n\n# Creating decoder\n\ndecoder = Dense(50, activation='relu')(encoder1)\ndecoder = Dense(n_cols, activation='relu')(decoder)\n\n# Creating the output \n#Encode = Model(inputs=input_layer, outputs=encoder1) \nAutoencoder = Model(inputs=input_layer, outputs=decoder) \n\n# Building epochs and batch_size\n\nnb_epoch = 100\nbatch_size = 256\n\n# Compile the Model\nAutoencoder.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])\n\n\n# Fit the model \n\n\ncheckpointer = ModelCheckpoint(filepath=\"model.h5\", verbose=0, save_best_only=True)\n\ntensorboard = TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True,write_images=True)\n\n# Implies X_train is both the input and output, which is required for reconstruction \n \nhistory = Autoencoder.fit(X_train, X_train,\n epochs=nb_epoch,\n batch_size=batch_size,\n shuffle=True,\n validation_data=(X_test, X_test),\n verbose=1,\n callbacks=[checkpointer, tensorboard]).history\n \n# Saving the model\n\nAutoencoder = load_model('model.h5')\n\n\n# Plot of the Model\n\nepochs = range(nb_epoch)\nplt.figure()\nplt.plot(epochs, history['loss'], 'b', label = 'Training Loss')\nplt.plot(epochs, history['val_loss'], 'r', label = 'Validation Loss')\nplt.title('Training and Validation loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['Training Loss', 'Test Loss']) \nplt.show()\n\n\n \n # Make Predictions\n\npredictions = Autoencoder.predict(X_test)\n\n# Mean Squared Error (MSE)\n\nmse = np.mean(np.power(X_test - predictions, 2), axis=1)\nerror_df = pd.DataFrame({'reconstruction_error': mse, 'true_class': y_test}) \n\n\nerror_df.describe() \n\n \n## Plot ROC Curve\nfpr, tpr, thresholds = roc_curve(error_df.true_class, error_df.reconstruction_error)\nroc_auc = auc(fpr, tpr)\n\nplt.title('Receiver Operating Characteristic')\nplt.plot(fpr, tpr, label='AUC = %0.4f'% roc_auc)\nplt.legend(loc='lower right')\nplt.plot([0,1],[0,1],'r--')\nplt.xlim([-0.001, 1])\nplt.ylim([0, 1.001])\nplt.ylabel('True Positive Rate')\nplt.xlabel('False Positive Rate')\nplt.show();\n#\n### Plot Confusion Matrix\n##\n#precision, recall, th = precision_recall_curve(error_df.true_class, error_df.reconstruction_error)\n#plt.plot(recall, precision, 'b', label='Precision-Recall curve')\n#plt.title('Recall vs Precision')\n#plt.xlabel('Recall')\n#plt.ylabel('Precision')\n#plt.show()\n \n \n\n# Select the Encoder half (of the AutoEncoder) for ClassificationEncoder for Full Classification\n\nnum_classes = 2 \n\n# Applying Logistic Regression\n\nEncode = Model(inputs=input_layer, outputs=encoder1) \nOuput = Dense(num_classes, activation='softmax', kernel_regularizer=L1L2(l1=0.0, l2=0.1))(Encode.output)\nEncoder_Class = Model(Encode.input,Ouput)\n\n# Freezing the weights of the Encoder model\n\n\nfor l1, l2 in zip(Encode.layers[:2],Autoencoder.layers[0:2]):\n l1.set_weights(l2.get_weights())\n \n \n# Get weights for Autoencoder \n\nAutoencoder.get_weights()[0][1] \n\n# Get weights for Endoder layer\nEncode.get_weights()[0][1] \n\n\n# Only Training the fully connected part (by Freezing the Autoencoder weights)\n\nfor layer in Encode.layers[0:2]:\n layer.trainable = False\n\n\n\n\n# Compile the Classification Model\n \nEncoder_Class.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy']) \n\n\n# Model Summary\n \n \nEncoder_Class.summary() \n\n# Training the Classification Model\nstart = time()\nEncoder_Class.fit(X_train, y_train, epochs=100, batch_size=128, shuffle=True, validation_data=(X_test, y_test))\n\nend = time()\nprint (\"Trained model in {:.4f} seconds\".format(end - start)) # Trained model in 2.1083 seconds\n#Saving the Model\n\nEncoder_Class.save_weights('Classification_complete.h5') \n\n \n# Re-training the model by making trainable layer to True\n\nfor layer in Encode.layers[0:2]:\n layer.trainable = True\n \n# Re-compiling the model after re-training \n\n\nEncoder_Class.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy']) \n \n# Re-Training the Classification Model\n\nstart = time()\nClassification = Encoder_Class.fit(X_train, y_train, epochs=100, batch_size=128, shuffle=True, validation_data=(X_test, y_test),callbacks=[checkpointer, tensorboard]).history\nend = time()\nprint (\"Trained model in {:.4f} seconds\".format(end - start)) # Trained model in 2.1083 seconds\n\n\n\n# Plot the loss between training and Validation data\n\n#Accuracy = Classification.history['acc']\n#Val_accuracy = Classification.history['val_acc']\n#loss = Classification.history['loss']\n#val_loss = Classification.history['val_loss']\n#epochs = range(len(Accuracy))\n#plt.plot(epochs, Accuracy, 'bo', label='Training accuracy')\n#plt.plot(epochs, Val_accuracy, 'b', label='Validation accuracy')\n#plt.title('Training and validation accuracy')\n#plt.legend()\n#plt.figure()\n#plt.plot(epochs, loss, 'bo', label='Training loss')\n#plt.plot(epochs, val_loss, 'b', label='Validation loss')\n#plt.title('Training and validation loss')\n#plt.legend()\n#plt.show() \n\n# Alternative way to show and tell\n\nAccuracy = Classification['acc']\nVal_accuracy = Classification['val_acc']\nloss = Classification['loss']\nval_loss = Classification['val_loss']\nepochs = range(len(Accuracy))\nplt.plot(epochs, Accuracy, 'bo', label='Training accuracy')\nplt.plot(epochs, Val_accuracy, 'b', label='Validation accuracy')\nplt.title('Training and validation accuracy')\nplt.legend()\nplt.figure()\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\nplt.show() \n \n# Model Evaluation on Test Set\n\nTest_eval = Encoder_Class.evaluate(X_test, y_test, verbose = 0)\nprint('Test loss:', Test_eval[0])\nprint('Test accuracy:', Test_eval[1])\n\n\n\n \nthreshold = 20000\n\nLABELS = [\"<=50k\", \">50K\"] \npredicted_classes = [1 if e > threshold else 0 for e in error_df.reconstruction_error.values]\nconf_matrix = confusion_matrix(error_df.true_class, predicted_classes)\nplt.figure(figsize=(3, 3))\nsns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt=\"d\");\nplt.title(\"Confusion matrix\")\nplt.ylabel('True class')\nplt.xlabel('Predicted class')\nplt.show()\n\n# Classification for Full Class\n\nfrom sklearn.metrics import classification_report\ntarget_names = [\"Class {}\".format(i) for i in range(num_classes)] ## target_names = ['class 0', 'class 1']\nprint(classification_report(y_test, predicted_classes, target_names = target_names))\n\n","sub_path":"CensusIncome/Censuslncome(Keras).py","file_name":"Censuslncome(Keras).py","file_ext":"py","file_size_in_byte":12595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"89274546","text":"#!/usr/bin/env python\n\nimport argparse\n\nfrom pathlib import Path\nfrom subprocess import call\n\n\ndef set_up_argparser():\n parser = argparse.ArgumentParser()\n parser.add_argument('inputdir', help=\"input directory containing multiple PDFs\")\n parser.add_argument(\"-o\", \"--output-directory\", type=str, help=\"output directory for the table-extract artefacts\")\n return parser\n\n\ndef select_pdfs(input_dir):\n return sorted(Path(input_dir).glob('*.pdf'))\n\n\ndef file_name_without_ext(path):\n file_name = path.resolve().stem\n return file_name.replace(\" \", \"_\").replace(\"'\", \"\").replace(\",\", \"\")\\\n .replace(\"(\", \"\").replace(\")\", \"\")\n\n\ndef create_output_directory(pdf_path):\n file_name = file_name_without_ext(pdf_path)\n out_directory = Path(OUT_DIR) / file_name\n out_directory.mkdir(parents=True, exist_ok=True)\n return out_directory\n\n\ndef handle_paper(pdf_path):\n out_directory = str(create_output_directory(pdf_path))\n basename_pdf = file_name_without_ext(pdf_path)\n # qsub_call = ['qsub',\n # '-N', f'table-extract-{basename_pdf[:20]}',\n # '/home/konzack/scripts/table-extract-pdf.sh']\n func_call = ['./process-pdf.sh', out_directory, str(pdf_path)]\n #qsub_call.extend([out_directory, str(pdf_path)])\n #print(\" \".join(qsub_call))\n call(func_call)\n #call(qsub_call)\n\n\nOUT_DIR = \"./output\"\n\n\ndef main():\n parser = set_up_argparser()\n args = parser.parse_args()\n\n global OUT_DIR\n if args.output_directory:\n OUT_DIR = args.output_directory\n\n pdfs = select_pdfs(args.inputdir)\n\n for pdf_path in pdfs:\n handle_paper(pdf_path)\n\n # p = Pool(4)\n # p.map(handle_paper, pdfs)\n #\n # for i, p in enumerate(pdfs):\n # if str(p.stem).startswith(\"Andresen\"):\n # print i\n #\n # print pdfs[3]\n # handle_paper(pdfs[3])\n\n #call(['./preprocess.sh', 'output/henry_et_al._2007', '/Users/mk21womu/Dropbox/Habitat loss meta-analysis/good_datasets/references/henry et al. 2007.pdf'])\n #extract_tables('output/henry_et_al._2007')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"process_multiple_files.py","file_name":"process_multiple_files.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"122631224","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error, r2_score\n\nlin_reg = LinearRegression()\nboston = pd.read_csv('boston_housing.csv')\n\nboston_targets = boston['MEDIAN VALUE']\n\n# Returns predictions for boston_targets based on inputted cols\ndef bostonPredict(cols):\n boston_inputs = boston[cols]\n lin_reg.fit(boston_inputs, boston_targets)\n return lin_reg.predict(boston_inputs)\n\ndef bostonOtherColumns():\n otherColumns = ['CRIME RATE', 'LARGE LOT', 'INDUSTRY', 'RIVER', 'NOX', 'PRIOR 1940', 'EMP DISTANCE', 'HWY ACCESS', 'PROP TAX RATE', 'STU TEACH RATIO', 'AFR AMER']\n boston_outputs_base = bostonPredict(['LOW STATUS', 'LOW STATUS^2', 'ROOMS', 'ROOMS^2'])\n boston_mse_base = mean_squared_error(boston_targets, boston_outputs_base)\n print('Base: ' + str(boston_mse_base))\n for other in otherColumns:\n boston_outputs_other = bostonPredict(['LOW STATUS', 'LOW STATUS^2', 'ROOMS', 'ROOMS^2', other])\n boston_mse_other = mean_squared_error(boston_targets, boston_outputs_other)\n percentImprovement = (boston_mse_base - boston_mse_other) / boston_mse_base\n if percentImprovement > 0.03:\n print(other + ': ' + str(boston_mse_other))\n\n# 1\nboston_outputs_1 = bostonPredict(['LOW STATUS'])\n\n# 2\nboston_outputs_2 = bostonPredict(['ROOMS'])\n\n# 3\nboston_outputs_3 = bostonPredict(['LOW STATUS', 'ROOMS'])\n\n# 4\nboston[ ['LOW STATUS^2'] ] = boston[ ['LOW STATUS'] ] ** 2\nboston_outputs_4 = bostonPredict(['LOW STATUS', 'LOW STATUS^2'])\n\n# 5\nboston[ ['ROOMS^2'] ] = boston[ ['ROOMS'] ] ** 2\nboston_outputs_5 = bostonPredict(['ROOMS', 'ROOMS^2'])\n\n# 6\nboston_outputs_6 = bostonPredict(['LOW STATUS', 'LOW STATUS^2', 'ROOMS', 'ROOMS^2'])\n\n# 7\nboston[ ['LOWROOMS'] ] = boston[ ['LOW STATUS'] ]\nboston[ ['LOWROOMS'] ].mul(boston[ ['ROOMS'] ], axis='columns', fill_value=0)\nboston_outputs_7 = bostonPredict(['LOW STATUS', 'LOW STATUS^2', 'ROOMS', 'ROOMS^2', 'LOWROOMS'])\n\n# 8\nbostonOtherColumns()\n\n# 9 (Optimized columns does not actually work)\noptimizeColumns = ['LOW STATUS', 'ROOMS', 'INDUSTRY', 'NOX', 'PROP TAX RATE', 'STU TEACH RATIO']\ncolumns = ['CRIME RATE', 'LARGE LOT', 'INDUSTRY', 'RIVER', 'NOX', 'ROOMS', 'PRIOR 1940', 'EMP DISTANCE', 'HWY ACCESS', 'PROP TAX RATE', 'STU TEACH RATIO', 'AFR AMER', 'LOW STATUS']\nlots = []\nfor column in columns:\n for i in range(1, 641):\n boston[ [column + '^' + str(i)] ] = boston[ [column] ] ** (i / 256)\n lots.append(column + '^' + str(i))\n print(column)\nboston_outputs_9 = bostonPredict(lots)\nboston_mse_base = mean_squared_error(boston_targets, boston_outputs_9)\nprint('Total Error: ' + str(boston_mse_base * len(boston)))","sub_path":"kilo.py","file_name":"kilo.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"358098580","text":"# Copyright 2018-2020 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nBenchmarking tool for different commits\n\"\"\"\n# pylint: disable=subprocess-run-check\nimport argparse\nimport locale\nimport os\nimport subprocess\nfrom pathlib import Path\n\nfrom benchmark import col\n\n\nclass cd:\n \"\"\"Context manager for changing the current working directory\"\"\"\n\n def __init__(self, newPath):\n self.newPath = newPath\n self.savedPath = None\n\n def __enter__(self):\n self.savedPath = Path.cwd()\n os.chdir(str(self.newPath))\n\n def __exit__(self, etype, value, traceback):\n os.chdir(str(self.savedPath))\n\n\ndef get_current_git_toplevel():\n \"\"\"Finds the current git toplevel.\n\n Returns:\n Union[Path,NoneType]: The current git toplevel's path or None.\n \"\"\"\n res = subprocess.run([\"git\", \"rev-parse\", \"--show-toplevel\", \"-q\"], stdout=subprocess.PIPE)\n\n if res.returncode == 0:\n return Path(res.stdout.decode(locale.getpreferredencoding()).strip())\n\n return None\n\n\ndef cli():\n \"\"\"Parse the command line arguments, perform the requested action.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"PennyLane benchmarking tool for revisions\")\n parser.add_argument(\n \"-r\",\n \"--revisions\",\n type=lambda x: x.split(\",\"),\n help='Comma-separated list of revisions to run the benchmark on. Use \"here\" for the current git toplevel.',\n )\n\n # Only parse revisions, other args will go to the benchmarking script\n args, unknown_args = parser.parse_known_args()\n\n revisions_directory = Path.home() / \".pennylane\" / \"benchmarks\" / \"revisions\"\n\n toplevel = get_current_git_toplevel()\n\n if revisions_directory.exists():\n with cd(revisions_directory):\n # Make really sure we don't reset the current git\n if toplevel == Path.cwd():\n raise Exception(\n \"Git accidently ended up in the current directory. Stopping to not cause any harm.\"\n )\n\n subprocess.run([\"git\", \"fetch\", \"origin\", \"-q\"], check=True)\n subprocess.run([\"git\", \"reset\", \"--hard\", \"origin/master\", \"-q\"], check=True)\n else:\n revisions_directory.mkdir(parents=True)\n\n subprocess.run(\n [\n \"git\",\n \"clone\",\n \"https://www.github.com/xanaduai/pennylane\",\n str(revisions_directory),\n ],\n check=True,\n )\n\n toplevel = get_current_git_toplevel()\n\n for revision in args.revisions:\n print(\">>> Running benchmark for revision {}\".format(col(revision, \"red\")))\n\n if revision == \"here\":\n if not toplevel:\n print(\n col(\">>> Wasn't able to determine the current git toplevel, skipping...\", \"red\")\n )\n\n continue\n\n pl_directory = toplevel\n else:\n pl_directory = revisions_directory\n\n with cd(pl_directory):\n # Make really sure we don't reset the current git\n if toplevel == Path.cwd():\n raise Exception(\n \"Git accidently ended up in the current directory. Stopping to not cause any harm.\"\n )\n\n subprocess.run([\"git\", \"fetch\", \"origin\", \"-q\"], check=True)\n subprocess.run([\"git\", \"reset\", \"--hard\", revision, \"-q\"], check=True)\n\n benchmark_file_path = Path(__file__).parent / \"benchmark.py\"\n benchmark_env = os.environ.copy()\n benchmark_env[\"PYTHONPATH\"] = str(pl_directory) + \";\" + benchmark_env[\"PATH\"]\n\n subprocess.run(\n [\"python3\", str(benchmark_file_path)] + unknown_args + [\"--noinfo\"],\n env=benchmark_env,\n check=True,\n )\n\n\nif __name__ == \"__main__\":\n cli()\n","sub_path":"benchmark/benchmark_revisions.py","file_name":"benchmark_revisions.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"209869886","text":"\nimport requests\nimport json\n\n\ndef quote_of_the_day():\n \"\"\"get the quote of the day from web api as nicely formatted string\n\n Returns:\n str -- nicely formatted quote\n \"\"\"\n # request data from link\n link = \"https://favqs.com/api/qotd\"\n response_obj = requests.get(link)\n\n # get and check status code\n status = response_obj.status_code\n if status != 200:\n return \"Could not retrieve a quote of the day.\"\n\n # get the data in json\n data = response_obj.json()\n\n # parse for information\n author = data['quote']['author']\n quote = data['quote']['body']\n\n return '{author} said: \"{quote}\"'.format(author=author, quote=quote)\n\n\nif __name__ == \"__main__\":\n print(quote_of_the_day())\n","sub_path":"2019/12_Web_scraping/quote_of_the_day.py","file_name":"quote_of_the_day.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"411750560","text":"from numpy import power, einsum, linalg, cov, linspace, random, sqrt, dot, zeros, subtract, nditer, unravel_index, arange, outer, exp\nfrom math import pi\nimport scipy\nfrom sys import stdout\nfrom time import time\nimport numpy as np\n\ndef _incremental_index_verbose(m):\n \n digits = len(str(m))\n progress = '\\r [ {s:{d}} / {m} ] {s:3.0f}% - ? it/s'\n progress = progress.format(m=m, d=digits, s=0)\n stdout.write(progress)\n beginning = time()\n for i in range(m):\n yield i\n it_per_sec = (time() - beginning) / (i+1)\n progress = '\\r [ {i:{d}} / {m} ]'.format(i=i+1, d=digits, m=m)\n progress += ' {p:3.0f}%'.format(p=100*(i+1)/m)\n progress += ' - {it_per_sec:4.5f} it/s'.format(it_per_sec=it_per_sec)\n stdout.write(progress)\n \n\nclass SOM:\n \n def __init__(self, dim_x, dim_y, n_var, learning_rate, sigma, f_neighborhood='gaussian'):\n \n # x-axis size of map\n self.dim_x = dim_x\n # y-axis size of map\n self.dim_y = dim_y\n # number of variables\n self.n_var = n_var\n # learning rate\n self.learning_rate = learning_rate\n # sigma\n self.sigma = sigma\n # incremental iteration\n self.inc_iter = 0\n \n self.dist_map = zeros((dim_x, dim_y))\n \n if f_neighborhood == 'gaussian':\n self.neighborhood = self.f_gaussian\n elif f_neighborhood == 'mexican':\n self.neighborhood = self.f_mexican\n elif f_neighborhood == 'bubble':\n self.neighborhood = self.f_bubble\n else:\n self.neighborhood = self.f_triangle\n \n def f_gaussian(self, x, y, sigma):\n d = 2*pi*sigma*sigma\n ax = exp(-power(arange(self.dim_x)-x, 2)/d)\n ay = exp(-power(arange(self.dim_y)-y, 2)/d)\n return outer(ax, ay) \n\n def f_mexican(self, x, y, sigma):\n xx, yy = meshgrid(arange(self.dim_x), arange(self.dim_y))\n p = power(xx-x, 2) + power(yy-y, 2)\n d = 2*pi*sigma*sigma\n return exp(-p/d)*(1-2/d*p)\n \n def f_bubble(self, x, y, sigma):\n ax = (arange(self.dim_x) > (x-sigma)) & (arange(self.dim_x) < (x+sigma))\n ay = (arange(self.dim_y) > (y-sigma)) & (arange(self.dim_y) < (y+sigma))\n return outer(ax, ay)*1.\n\n def f_triangle(self, x, y, sigma):\n triangle_x = (-abs(x - arange(self.dim_x))) + sigma\n triangle_y = (-abs(y - arange(self.dim_y))) + sigma\n triangle_x[triangle_x < 0] = 0.\n triangle_y[triangle_y < 0] = 0.\n return outer(triangle_x, triangle_y)\n \n # initialize weights\n def random_init_w(self):\n weights = random.rand(self.dim_x, self.dim_y, self.n_var)\n norm = np.apply_along_axis(lambda x: sqrt(dot(x, x.T)), 2, weights)\n self._weights = (1/norm[:, :, np.newaxis])*weights\n \n def pca_init_w(self, data):\n pc_length, pc = np.linalg.eig(np.cov(np.transpose(data)))\n pc_order = np.argsort(pc_length)\n self._weights = np.zeros((self.dim_x, self.dim_y, self.n_var))\n \n for i, c1 in enumerate(linspace(-1, 1, self.dim_x)):\n for j, c2 in enumerate(linspace(-1, 1, self.dim_y)):\n self._weights[i, j] = c1*pc[pc_order[0]] + c2*pc[pc_order[1]]\n \n\n def execute_train(self, data, n_iter_data, init_method='pca'):\n \n if init_method == 'pca':\n self.pca_init_w(data)\n else:\n self.random_init_w()\n \n n_iter = data.shape[0]*n_iter_data\n \n arr_data_indx = arange(data.shape[0])\n \n self.list_dist_convg = []\n self.list_quantization_error = []\n self.list_topgraphic_error = []\n \n n_run = _incremental_index_verbose(n_iter)\n \n for i_ter in n_run:\n \n if (i_ter % data.shape[0]) == 0:\n np.random.shuffle(arr_data_indx)\n \n i_dx = i_ter % data.shape[0]\n i_dx = arr_data_indx[i_dx]\n self.update(data[i_dx], self.winner(data[i_dx]), self.inc_iter, n_iter)\n self.inc_iter += 1\n \n \n if (i_ter % data.shape[0]) == 0:\n self.list_dist_convg.append(self.dist_convg(data, self._weights, 0.05))\n self.list_quantization_error.append(self.quantization_error(data))\n self.list_topgraphic_error.append(self.topgraphic_error(data, self.inc_iter, n_iter))\n \n \n # build up the distance matrix between x and weights\n def _activate(self, x):\n s = subtract(x, self._weights) # x - w\n it = nditer(self.dist_map, flags=['multi_index'])\n \n while not it.finished:\n self.dist_map[it.multi_index] = sqrt(dot(s[it.multi_index], s[it.multi_index].T))\n it.iternext()\n \n # find out the coordinate of a winner for x\n def winner(self, x):\n self._activate(x)\n return unravel_index(self.dist_map.argmin(),self.dist_map.shape)\n \n # update step\n def update(self, x, win, i_iter, n_iter):\n # calculate the learning rate and sigma for this step\n eta = self.f_decay(self.learning_rate, i_iter, n_iter)\n sig = self.f_decay(self.sigma, i_iter, n_iter)\n # improves the performances\n g = self.neighborhood(win[0], win[1], sig)*eta\n # w_new = eta * neighborhood_function * (x-w)\n self._weights += einsum('ij, ijk->ijk', g, x-self._weights)\n \n # decay function for convergence\n def f_decay(self, learning_rate, i_iter, n_iter):\n return learning_rate / (1+i_iter/(n_iter/2))\n \n # performance: topographic error\n def i_topograohic_error(self, x, eta, sig):\n \n self._activate(x)\n x = np.c_[np.unravel_index(np.argsort(self.dist_map.flatten())[:2], self.dist_map.shape)]\n \n g = self.neighborhood(x[0][0], x[0][1], sig)\n return g[x[1][0], x[1][1]]\n def topgraphic_error(self, data, i_iter, n_iter):\n \n eta = self.f_decay(self.learning_rate, i_iter, n_iter)\n sig = self.f_decay(self.sigma, i_iter, n_iter)\n \n return np.mean(np.apply_along_axis(self.i_topograohic_error, 1, data, eta=eta, sig=sig))\n \n # performance: quantization error\n def quantization_error(self, data):\n \n n_sample = data.shape[0]\n error = 0\n for x in data:\n x_dist = x-self._weights[self.winner(x)]\n error += sqrt(dot(x_dist, x_dist.T))\n \n return error/n_sample\n \n \n # performance: distribution convergence\n def F_test(self, X, Y):\n df1 = len(X) - 1\n df2 = len(Y) - 1\n F = np.var(X)/np.var(Y)\n p_value = scipy.stats.f.cdf(F, df1, df2)\n return p_value\n def t_test(self, X, Y):\n p_value = scipy.stats.ttest_ind(X, Y, equal_var=False)\n return p_value.pvalue\n def dist_convg(self, X, Y, significant_level):\n a = [(self.t_test(X[:, i], Y[:, :, i].flatten()), self.F_test(X[:, i], Y[:, :, i].flatten())) for i in range(X.shape[1])]\n t, f = tuple(zip(*a))\n \n return np.mean((np.array(t)>= significant_level) & (np.array(f)>= significant_level))\n \n def distance_map(self):\n \n um = zeros((self._weights.shape[0], self._weights.shape[1]))\n it = nditer(um, flags=['multi_index'])\n while not it.finished:\n for ii in range(it.multi_index[0]-1, it.multi_index[0]+2):\n for jj in range(it.multi_index[1]-1, it.multi_index[1]+2):\n if (ii >= 0 and ii < self._weights.shape[0] and\n jj >= 0 and jj < self._weights.shape[1]):\n w_1 = self._weights[ii, jj, :]\n w_2 = self._weights[it.multi_index]\n w = w_1 - w_2\n um[it.multi_index] += sqrt(dot(w, w.T))\n it.iternext()\n um = um/um.max()\n return um\n \n","sub_path":"som.py","file_name":"som.py","file_ext":"py","file_size_in_byte":7983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"376457088","text":"import logging\nfrom ..core import Menu\nfrom . import User\nfrom . import Group\nfrom . import Permission\nfrom . import Ldap\n\nclass Security(Menu):\n def __init__(self, scr, path):\n Menu.__init__(self, scr, path, \"Migrate Security\")\n self.log = logging.getLogger(__name__)\n self.log.debug(\"Initializing Security Menu.\")\n self.opthead = [\n self.mkopt('u', \"Users Migration Setup\", self.submenu(User)),\n self.mkopt('g', \"Groups Migration Setup\", self.submenu(Group)),\n self.mkopt('p', \"Permissions Migration Setup\", self.submenu(Permission))]\n self.opttail = [\n None,\n self.mkopt('h', \"Help\", '?'),\n self.mkopt('q', \"Back\", None, hdoc=False)]\n path = self.path[:]\n path.extend([\"LDAP Migration Setup\", \"Migrate LDAP\"])\n self.ldapmenu = self.mkopt('l', \"LDAP Migration Setup\", self.submenu(Ldap), path=path)\n self.opts = []\n self.log.debug(\"Security Menu initialized.\")\n\n def initialize(self):\n path = \"Security Migration Setup\", \"LDAP Migration Setup\", \"available\"\n hasldap = self.scr.state[path].data\n self.log.debug(\"Readying Security Menu for display (ldap=%s).\", hasldap)\n self.opts = self.opthead[:]\n if hasldap: self.opts.append(self.ldapmenu)\n self.opts.extend(self.opttail)\n self.log.debug(\"Security Menu ready for display.\")\n","sub_path":"nex2art/menu/Security.py","file_name":"Security.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"118963807","text":"# my solution\n\n# Molecular biologists call a single strand of DNA sense (or positive (+)) \n# if an RNA version of the same sequence is translated or translatable into protein. \n# https://en.wikipedia.org/wiki/Sense_(molecular_biology)\n\n# When referring to DNA transcription, \n# the coding strand is the DNA strand which has the same base sequence as the RNA transcript produced \n# (although with thymine replaced by uracil).\n# https://en.wikipedia.org/wiki/Coding_strand\n\n# 3'CGCTATAGCGTTT 5' DNA antisense (-) strand (template/noncoding) \n# 5'GCGATATCGCAAA 3' DNA sense (+) strand (nontemplate/coding) \n# 5'GCGAUAUCGCAAA 3' mRNA Sense transcript\n# 3'CGCUAUAGCGUUU 5' mRNA Antisense transcript\n\n# for example\n# 5'-GATGGAACTTGACTACGTAAATT-3' sense (+), coding, nontemplate strand\n# 3'-CTACCTTGAACTGATGCATTTAA-5' antisense (-), noncoding, template strand (transcribed strand)\n\nf = open('002TranscribingDNAintoRNA.txt')\ndna = f.readline().strip()\nf.close()\n\n# my solution\ndef TranscribingDNAintoRNA(dna):\n\tres = ''\n\tfor nuc in dna:\n\t\tif nuc == 'T':\n\t\t\tres += 'U'\n\t\telse:\n\t\t\tres += nuc\n\treturn res\n\n# best solution\ndef TranscribingDNAintoRNA2(dna):\n\treturn dna.replace(\"T\", \"U\")\n\nprint (TranscribingDNAintoRNA(dna))\nprint (TranscribingDNAintoRNA2(dna))\n\n\n# 문제는 전혀 어려울 것이 없는데 dentral dogma를 이해해야 쉬운 문제.\n# T를 U로 바꾸는 건 가장 쉬운 방법이기는 하지만 central dogma와 전혀 관계없는 풀이 방식이라 별로.\n# coding strand가 mRNA 와 U/T만 다른 것이라는 것이 핵심.\n","sub_path":"stronghold/002TranscribingDNAintoRNA.py","file_name":"002TranscribingDNAintoRNA.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"142283615","text":"import pandas as pd\nimport numpy as np\nimport os\n\n#set number of lags\namt_lags = 48 # 12 * 5 = 60 minute total lag.\n\nfile = 'Own_Classifier'\ntweet_polarity = 'tweet'\n\n#for all files to analyze\nfor n in range(1,55):\n os.chdir('C:/Users/ikdem/PycharmProjects/Thesis_Analysis/Thesis_Files')\n df = pd.read_excel(file+str(n)+'.xlsx')\n df2 = pd.DataFrame()\n lags = []\n # for the lags set previously\n for lag in range(0,amt_lags+1):\n l1 = []\n l2 = []\n # for the length of the stock price\n for i in range(0, len(df['Stock Price Normalized'])):\n # if there is a stock price change (markets are open) and the lag is not non-existent, append the polarity measure and stock price to a list\n if str(df['Stock Price Normalized'][i]) != '0.0' and i-lag > 0:\n l2.append(float(df['Stock Price Normalized'][i]))\n if tweet_polarity == 'tweet':\n l1.append(float(df['numTweets Normalized'][i - lag]))\n if tweet_polarity == 'polarity':\n l1.append(float(df['Polarity Normalized'][i - lag]))\n # calculate the correlation PER LAG and append it to a seperate list\n corr = np.corrcoef(l1, l2)[0, 1]\n\n lags.append(float(corr))\n # save everything to a seperate excel file that just contains the lags of the corresponding files.\n df2['SP500 lagged correlations'] = lags\n os.chdir('C:/Users/ikdem/PycharmProjects/Thesis_Analysis/Thesis_Files/Correlations_4_hour_lag')\n df2.to_excel(file + str(n) + '_laggedcorrelations_' + tweet_polarity + '.xlsx', sheet_name='sheet1', index=False)\n\n\n","sub_path":"Thesis_Python/Lagged_Correlation.py","file_name":"Lagged_Correlation.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"508566900","text":"import math\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.neural_network import MLPRegressor\n\na_heavy_side = -4\nb_heavy_side = 4\na_poly_2 = -5\nb_poly_2 = 5\na_homo = 0.01\nb_homo = 4\na_sin = - math.pi\nb_sin = math.pi\ngen_data_counts = 100\ntest_percentage = 0.5\n\n\ndef rand_heavy_side(n: int):\n X = []\n Y = []\n for i in range(0, n):\n x = random.uniform(a_heavy_side, b_heavy_side)\n y = math.floor(1 + np.sign(x) * 0.5)\n if not [x] in X:\n X.append([x])\n Y.append(y)\n else:\n i -= 1\n return X, Y\n\n\n# y = x\ndef rand_homo(n: int):\n X = []\n Y = []\n for i in range(0, n):\n x = random.uniform(a_homo, b_homo)\n y = 1 / x\n if not [x] in X:\n X.append([x])\n Y.append(y)\n else:\n i -= 1\n return X, Y\n\n\n# x = 0\ndef rand_poly_2(n: int):\n X = []\n Y = []\n for i in range(0, n):\n x = random.uniform(a_poly_2, b_poly_2)\n y = x ** 2 + 2\n if not [x] in X:\n X.append([x])\n Y.append(y)\n else:\n i -= 1\n return X, Y\n\n\n# y = 0\ndef rand_sin(n: int):\n X = []\n Y = []\n for i in range(0, n):\n x = random.uniform(a_sin, b_sin)\n y = math.sin(2 * x)\n if not [x] in X:\n X.append([x])\n Y.append(y)\n else:\n i -= 1\n return X, Y\n\n\ndef test_heavy_side():\n X_train, Y_train = rand_heavy_side(gen_data_counts)\n X_test, Y_test = rand_heavy_side(int(gen_data_counts * test_percentage))\n mlp = MLPRegressor(hidden_layer_sizes=(4,), activation='relu', solver='lbfgs')\n mlp.fit(X_train, Y_train)\n predict = mlp.predict(X_test)\n draw_scatter(X_train, Y_train, X_test, Y_test, predict)\n\n\ndef test_homo():\n X_train, Y_train = rand_homo(gen_data_counts)\n X_test, Y_test = rand_homo(int(gen_data_counts * test_percentage))\n mlp = MLPRegressor(hidden_layer_sizes=(4, 4), activation='relu', solver='lbfgs')\n mlp.fit(X_train, Y_train)\n predict = mlp.predict(X_test)\n draw_scatter(X_train, Y_train, X_test, Y_test, predict)\n\n\ndef test_poly_2():\n X_train, Y_train = rand_poly_2(gen_data_counts)\n X_test, Y_test = rand_poly_2(int(gen_data_counts * test_percentage))\n mlp = MLPRegressor(hidden_layer_sizes=(8,), activation='relu', solver='lbfgs')\n mlp.fit(X_train, Y_train)\n predict = mlp.predict(X_test)\n draw_scatter(X_train, Y_train, X_test, Y_test, predict)\n\n\ndef test_sin():\n X_train, Y_train = rand_sin(gen_data_counts)\n X_test, Y_test = rand_sin(int(gen_data_counts * test_percentage))\n mlp = MLPRegressor(hidden_layer_sizes=(16, 16), activation='relu', solver='lbfgs')\n mlp.fit(X_train, Y_train)\n predict = mlp.predict(X_test)\n draw_scatter(X_train, Y_train, X_test, Y_test, predict)\n\n\ndef draw_scatter(X_train, Y_train, X_test, Y_test, predict):\n plt.scatter(X_train, Y_train, c='r')\n plt.scatter(X_test, Y_test, c='b')\n plt.scatter(X_test, predict, c='g')\n plt.show()\n\n\ntest_heavy_side()\ntest_homo()\ntest_poly_2()\ntest_sin()\n","sub_path":"sec_1.py","file_name":"sec_1.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"7465999","text":"currency_from = input()\ncurrency_to = input()\namount_from = input()\ndef exchange(currency_from, currency_to, amount_from):\n \"\"\"Returns: amount of currency received in the given exchange.\n\n In this exchange, the user is changing amount_from money in \n currency currency_from to the currency currency_to. The value \n returned represents the amount in currency currency_to.\n\n The value returned has type float.\n\n Parameter currency_from: the currency on hand\n Precondition: currency_from is a string for a valid currency code\n\n Parameter currency_to: the currency to convert to\n Precondition: currency_to is a string for a valid currency code\n\n Parameter amount_from: amount of currency to convert\n Precondition: amount_from is a float\"\"\"\n from urllib.request import urlopen\n\n doc = urlopen(\"http://cs1110.cs.cornell.edu/2016fa/a1server.php?from=\"+currency_from+\"&to=\"+currency_to+\"&amt=\"+amount_from)\n docstr = doc.read()\n doc.close()\n jstr = docstr.decode('ascii')\n print(jstr)\n jstr1 = jstr.split(\" : \")\n if jstr1[-2] == ' false,\"error\" ':\n value = \"Source currency code is invalid.\"\n else:\n jstr2 = jstr1[2].split('\"')\n jstr3 = jstr2[1].split()\n value = float(jstr3[0])\n return value\n\nprint(exchange(currency_from, currency_to, amount_from))\ndef text_exchange():\n assert exchange(USD,EUR,2.5) == 2.1689225 \n","sub_path":"pyassign2/currency.py","file_name":"currency.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"224342144","text":"import numpy, scipy, sys, pandas, scipy.stats\nimport matplotlib.pyplot as pyplot\nsys.path.append(\"/Users/pwangel/Gene_Analysis\")\nfrom ga_utils import *\n\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\nfrom plotly.graph_objs import *\n\ndir = '/Users/pwangel/Downloads/'\n\n# Read in data\n\ndata = pandas.read_csv(dir+'/gene_count_frags.txt', sep='\\t')\n\ncounts_per_sample = data.sum()\n\nCPM_data = numpy.log2(data/counts_per_sample*1.e6+1).replace([-numpy.inf, numpy.inf], numpy.nan)\nraw_proportion = numpy.power(2.0, CPM_data)/1.e6\n\n# Read in top correlated genes\ntop_50_correlations = numpy.load('/Users/pwangel/Data/correlation_arrays/top_50_gene_pairs.npy')\n\ngene_a = data.loc['ENSG00000088325'].values\ngene_b = data.loc['ENSG00000131747'].values\n\ngene_a = data.loc['ENSG00000134690'].values\ngene_b = data.loc['ENSG00000178999'].values\n\nmin_res = numpy.log2(1/counts_per_sample.max()*1.e6/16.0)\nCPM_array, del_CPM= numpy.linspace(start=min_res, stop = CPM_data.max().max(), num=int(1.e3), retstep=True)\nCPM_array = 0.5*CPM_array[1:]+0.5*CPM_array[:-1]\n\nprob_dist = numpy.zeros(shape=(len(CPM_array), len(CPM_array))) \n\nfor i_gene_count, j_gene_count, total_counts in zip(gene_a, gene_b, counts_per_sample):\n\n i_prob_dist = mcmc.find_prob_distribution(i_gene_count, total_counts, CPM_array, del_CPM)\n j_prob_dist = mcmc.find_prob_distribution(j_gene_count, total_counts, CPM_array, del_CPM)\n\n prob_dist += numpy.outer(i_prob_dist, j_prob_dist)\n\nprob_dist = prob_dist/prob_dist.sum()/del_CPM/del_CPM\n\ndata_to_plot = [Heatmap(z=prob_dist.transpose(), x=CPM_array, y=CPM_array, colorbar=dict(x=-.1))]\nfig = Figure(data=data_to_plot)\nplot(fig, auto_open=True) \n\nchain = mcmc.run_smooth_mcmc(prob_dist, CPM_array)\n\nresults = chain[:,500:,:].reshape((-1,6))","sub_path":"SC/mcmc_correlations.py","file_name":"mcmc_correlations.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"96926489","text":"import os\n\nfrom perfrunner.helpers import local\nfrom perfrunner.helpers.cbmonitor import timeit, with_stats\nfrom perfrunner.tests import PerfTest\n\n\nclass BackupRestoreTest(PerfTest):\n\n def extract_tools(self):\n local.extract_cb(filename='couchbase.rpm')\n\n def flush_buckets(self):\n for i in range(self.test_config.cluster.num_buckets):\n bucket = 'bucket-{}'.format(i + 1)\n self.rest.flush_bucket(self.master_node, bucket)\n\n def backup(self, mode=None):\n local.backup(\n master_node=self.master_node,\n cluster_spec=self.cluster_spec,\n wrapper=self.rest.is_community(self.master_node),\n mode=mode,\n compression=self.test_config.backup_settings.compression,\n )\n\n def compact(self):\n snapshots = local.get_backup_snapshots(self.cluster_spec)\n local.compact(self.cluster_spec,\n snapshots,\n self.rest.is_community(self.master_node)\n )\n\n def restore(self):\n local.drop_caches()\n\n local.restore(cluster_spec=self.cluster_spec,\n master_node=self.master_node,\n wrapper=self.rest.is_community(self.master_node))\n\n def run(self):\n self.extract_tools()\n\n self.load()\n self.wait_for_persistence()\n\n\nclass BackupTest(BackupRestoreTest):\n\n @with_stats\n @timeit\n def backup(self, mode=None):\n super().backup(mode)\n\n def _report_kpi(self, time_elapsed):\n edition = self.rest.is_community(self.master_node) and 'CE' or 'EE'\n backup_size = local.calc_backup_size(self.cluster_spec)\n\n self.reporter.post(\n *self.metrics.bnr_throughput(time_elapsed,\n edition,\n tool='backup')\n )\n\n self.reporter.post(\n *self.metrics.backup_size(backup_size, edition)\n )\n\n def run(self):\n super().run()\n\n time_elapsed = self.backup()\n\n self.report_kpi(time_elapsed)\n\n\nclass BackupSizeTest(BackupTest):\n\n def _report_kpi(self, *args):\n edition = self.rest.is_community(self.master_node) and 'CE' or 'EE'\n backup_size = local.calc_backup_size(self.cluster_spec)\n\n self.reporter.post(\n *self.metrics.backup_size(backup_size, edition)\n )\n\n\nclass BackupTestWithCompact(BackupTest):\n\n @with_stats\n @timeit\n def backup(self, mode=None):\n super().backup(mode)\n super().compact()\n\n\nclass BackupUnderLoadTest(BackupTest):\n\n def run(self):\n super(BackupTest, self).run()\n\n self.hot_load()\n\n self.access_bg()\n\n time_elapsed = self.backup()\n\n self.report_kpi(time_elapsed)\n\n\nclass MergeTest(BackupRestoreTest):\n\n @with_stats\n @timeit\n def merge(self):\n snapshots = local.get_backup_snapshots(self.cluster_spec)\n\n local.drop_caches()\n\n local.cbbackupmgr_merge(self.cluster_spec, snapshots)\n\n def _report_kpi(self, time_elapsed):\n self.reporter.post(\n *self.metrics.merge_throughput(time_elapsed)\n )\n\n def run(self):\n self.extract_tools()\n\n self.load()\n self.wait_for_persistence()\n self.backup() # 1st snapshot\n\n self.flush_buckets()\n\n self.load()\n self.wait_for_persistence()\n self.backup(mode=True) # 2nd snapshot\n\n time_elapsed = self.merge()\n\n self.report_kpi(time_elapsed)\n\n\nclass RestoreTest(BackupRestoreTest):\n\n @with_stats\n @timeit\n def restore(self):\n super().restore()\n\n def _report_kpi(self, time_elapsed):\n edition = self.rest.is_community(self.master_node) and 'CE' or 'EE'\n\n self.reporter.post(\n *self.metrics.bnr_throughput(time_elapsed,\n edition,\n tool='restore')\n )\n\n def run(self):\n super().run()\n\n self.backup()\n\n self.flush_buckets()\n\n time_elapsed = self.restore()\n\n self.report_kpi(time_elapsed)\n\n\nclass ExportImportTest(BackupRestoreTest):\n\n def export(self):\n local.cbexport(master_node=self.master_node,\n cluster_spec=self.cluster_spec,\n bucket=self.test_config.buckets[0],\n data_format=self.test_config.export_settings.format)\n\n def import_data(self):\n import_file = self.test_config.export_settings.import_file\n if import_file is None:\n import_file = 'data.{}'.format(self.test_config.export_settings.type)\n import_file = os.path.join(self.cluster_spec.backup, import_file)\n if self.test_config.export_settings.format != 'sample':\n import_file = 'file://{}'.format(import_file)\n\n local.drop_caches()\n\n local.cbimport(master_node=self.master_node,\n cluster_spec=self.cluster_spec,\n data_type=self.test_config.export_settings.type,\n data_format=self.test_config.export_settings.format,\n bucket=self.test_config.buckets[0],\n import_file=import_file)\n\n def _report_kpi(self, time_elapsed: float):\n self.reporter.post(\n *self.metrics.import_and_export_throughput(time_elapsed)\n )\n\n\nclass ExportTest(ExportImportTest):\n\n @with_stats\n @timeit\n def export(self):\n super().export()\n\n def run(self):\n super().run()\n\n time_elapsed = self.export()\n\n self.report_kpi(time_elapsed)\n\n\nclass ImportTest(ExportImportTest):\n\n @with_stats\n @timeit\n def import_data(self):\n super().import_data()\n\n def run(self):\n super().run()\n\n self.export()\n\n self.flush_buckets()\n\n time_elapsed = self.import_data()\n\n self.report_kpi(time_elapsed)\n\n\nclass ImportSampleDataTest(ImportTest):\n\n def _report_kpi(self, time_elapsed: float):\n self.reporter.post(\n *self.metrics.import_file_throughput(time_elapsed)\n )\n\n def run(self):\n self.extract_tools()\n\n time_elapsed = self.import_data()\n\n self.report_kpi(time_elapsed)\n","sub_path":"perfrunner/tests/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":6259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"285295725","text":"#!/usr/bin/env python\n\n# Script to convert a directory of js-files coming from NEMA to the corresponding \n# lab-files. It should be called from the command line with the following parameters:\n# \n# \t\ta plain text file containing the base names (without extension)\n# \t\t\t\tof the files to convert, separated by newlines\n# \t\tthe directory with the js-files\n# \t\tthe directory in which the resulting lab-files should be saved\n# \n# Author: Johan Pauwels (johan.pauwels@gmail.com)\n# Last changed: 2010-08-08\n# License: GPL\n# \n# Made for Python 2.6\n\nimport json\nimport re\nimport os\nimport sys\nimport os.path\n\ndef convert_json_file_to_lab_files(base_name, js_dir, lab_dir):\n\t\"\"\"Convert NEMA js-file to lab-files\n\n\tConverts the file .js located in to a set of\n\tlab-files in . For each of the data series present in \n\tthe js-file, a subdirectory will be created. The resulting lab-files\n\tare thus written to //.lab, \n\te.g. /path/to/lab_dir/Ground-truth/chordschordmrx09000000.lab\n\n\t\"\"\"\n\n\t# Read js-file and store into single-line string\n\tjs_path = os.path.join(js_dir, base_name+'.js')\n\twith open(js_path) as js_file:\n\t\tjs_content = js_file.read()\n\t\tjs_content = js_content.replace('\\n','')\n\t\t\n\t\t# Use regexp to isolate json\n\t\tdata_exp = re.compile('var .*_data = ([^;]*);')\n\t\tnames_exp = re.compile('var .*_seriesNames = ([^;]*);')\n\t\tdata_match = data_exp.search(js_content)\n\t\tnames_match = names_exp.search(js_content)\n\t\tnames = names_match.group(1)\n\t\tdata = data_match.group(1)\n\t\t\n\t\t# Convert into proper json by quoting strings\n\t\tdata = data.replace('o:','\"o\":')\n\t\tdata = data.replace('f:','\"f\":')\n\t\tdata = data.replace('l:','\"l\":')\n\t\tdata = data.replace('a:','\"a\":')\n\t\t\n\t\t# Replace problematic labels\n\t\tdata = data.replace('\\t',' ')\n\t\tdata = data.replace('\\\\','\\\\\\\\')\n\t\t\n\t\t# Parse json\n\t\tn = json.loads(names)\n\t\td = json.loads(data)\n\t\t\n\t\t# Write lab file for each series\n\t\tfor j in range(len(n)):\n\t\t\tname_dir = os.path.join(lab_dir, n[j])\n\t\t\tif not os.path.exists(name_dir):\n\t\t\t\tos.makedirs(name_dir)\n\t\t\tlab_path = os.path.join(name_dir, base_name+'.lab')\n\t\t\twith open(lab_path, 'w') as lab_file:\n\t\t\t\tfor k in range(len(d[j])):\n\t\t\t\t\tlab_file.write('{o:.7f}\\t{f:.7f}\\t'.format(**d[j][k]))\n\t\t\t\t\t# Hack to work around parsing error in NEMA chord parser\n\t\t\t\t\tif n[j] != 'Ground-truth' and d[j][k]['l'] == 'F#:7sus4':\n#\t\t\t\t\t\tprint('Malformed silence detected in {0} from {o:.7f} to {f:.7f}'.format(n[j], **d[j][k]))\n\t\t\t\t\t\tlab_file.write('N\\n')\n\t\t\t\t\telse:\n\t\t\t\t\t\tlab_file.write('{l}\\n'.format(**d[j][k]))\n\n\nif __name__ == '__main__':\n\tif len(sys.argv) != 4:\n\t\tprint('Usage: ' + os.path.basename(sys.argv[0]) + ' list js_dir lab_dir')\n\t\tsys.exit()\n\t\t\n\t# Get paths from command line\n\tlist_path = sys.argv[1]\n\tjs_dir = sys.argv[2]\n\tlab_dir = sys.argv[3]\n\t\n\t# Open list\n\twith open(list_path) as list:\n\t\tfor i in list:\n\t\t\ti = i.strip()\n\t\t\t# Convert every file in list\n\t\t\tprint('Processing file ' + i)\n\t\t\tconvert_json_file_to_lab_files(i, js_dir, lab_dir)","sub_path":"convert_json_labels_to_lab.py","file_name":"convert_json_labels_to_lab.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"224835332","text":"import numpy\nfrom keras.datasets import imdb\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers.convolutional import Conv1D\nfrom keras.layers.convolutional import MaxPooling1D\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing import sequence\n\nfrom data_structs import Vocabulary\nimport json,sys\nimport pandas as pd\n\ndef get_data():\n voc = Vocabulary(init_from_file=\"Voc\")\n df=pd.read_csv('../../result/{}'.format(sys.argv[1]))\n smiles=df[\"Ligand SMILES\"].tolist()\n label=df[\"label\"].tolist()\n token_list = [voc.tokenize(mol) for mol in smiles]\n encode_list = [voc.encode(token) for token in token_list]\n print(set(voc.aa))\n # exit()\n with open(\"data.json\",\"w\") as f:\n json.dump({\"encode_list\":encode_list, \"label\":label},f) \n return encode_list, label\ntry:\n with open(\"data.json\",\"r\") as f:\n r=json.load(f)\n encode_list=r[\"encode_list\"] \n label=r[\"label\"] \nexcept:\n encode_list, label = get_data()\n\n# fix random seed for reproducibility\nnumpy.random.seed(7)\n# load the dataset but only keep the top n words, zero the rest\ntop_words = 68 \nX_train = encode_list\ny_train = label\n\nX_test = encode_list[5000:]\ny_test = label[5000:]\n#exit()\n# truncate and pad input sequences\nmax_review_length = len(max(encode_list,key=len))\nprint(max_review_length)\n\nX_train = sequence.pad_sequences(X_train, maxlen=max_review_length)\nX_test = sequence.pad_sequences(X_test, maxlen=max_review_length)\n# create the model\nembedding_vecor_length = 32\nmodel = Sequential()\nmodel.add(Embedding(top_words, embedding_vecor_length, input_length=max_review_length))\nmodel.add(Conv1D(filters=32, kernel_size=3, padding='same', activation='relu'))\nmodel.add(MaxPooling1D(pool_size=2))\nmodel.add(LSTM(100))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nprint(model.summary())\nmodel.fit(X_train, y_train, epochs=30, batch_size=64)\n# Final evaluation of the model\n#scores = model.evaluate(X_test, y_test, verbose=0)\nscores = model.evaluate(X_train, y_train, verbose=0)\nprint(\"Accuracy: %.2f%%\" % (scores[1]*100))\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"587536309","text":"from ...generics import fs_utils\nfrom . import scielo_id_gen\nimport xml.etree.ElementTree as ET\n\n\ndef add_scielo_id_to_received_documents(\n received_documents, registered_documents, file_paths):\n \"\"\"Atualiza scielo_id dos documentos recebidos.\"\"\"\n for name, received in received_documents.items():\n if not received.scielo_id:\n add_scielo_id(\n received,\n registered_documents.get(name),\n file_paths.get(name),\n )\n\n\ndef add_scielo_id(received, registered, file_path):\n \"\"\"Atualiza received.registered_scielo_id com o valor do\n registered.scielo_id ou gerando um novo scielo_id.\"\"\"\n if registered and registered.scielo_id:\n received.registered_scielo_id = registered.scielo_id\n else:\n received.registered_scielo_id = scielo_id_gen.generate_scielo_pid()\n xml = ET.parse(file_path)\n node = xml.find(\".//article-meta\")\n if node is not None:\n article_id = ET.Element(\"article-id\")\n article_id.set(\"specific-use\", \"scielo\")\n article_id.set(\"pub-type-id\", \"publisher-id\")\n article_id.text = received.registered_scielo_id\n node.insert(0, article_id)\n new_content = ET.tostring(xml.find(\".\")).decode(\"utf-8\")\n fs_utils.write_file(file_path, new_content)\n","sub_path":"src/scielo/bin/xml/app_modules/app/data/scielo_id_manager.py","file_name":"scielo_id_manager.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"528266581","text":"from distutils.command.build_py import build_py as _build_py\nfrom distutils.core import setup\n\nCORRFITTER_VERSION = '8.0.3'\n\nclass build_py(_build_py):\n def run(self):\n \"\"\" Append version number to corrfitter.py \"\"\"\n with open('src/corrfitter.py', 'a') as cffile:\n cffile.write(\"\\n__version__ = '%s'\\n\" % CORRFITTER_VERSION)\n _build_py.run(self)\n\n# pypi\nwith open('README.rst', 'r') as file:\n long_description = file.read()\n\nsetup(name='corrfitter',\n version=CORRFITTER_VERSION,\n description='Utilities for fitting correlators in lattice QCD.',\n author='G. Peter Lepage, Cornell University',\n author_email='g.p.lepage@cornell.edu',\n license='GPLv3+',\n packages={''},\n package_dir={'':'src'},\n cmdclass={'build_py': build_py},\n requires=[\"lsqfit (>=11.2)\", 'numpy (>=1.7)', 'gvar (>=9.1)'],\n install_requires=['lsqfit>=11.2', 'gvar>=9.1', 'numpy>=1.7'],\n platforms=\"Any\",\n url=\"https://github.com/gplepage/corrfitter.git\",\n long_description=long_description,\n classifiers = [ #\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering :: Physics'\n ]\n\n)","sub_path":"pypi_install_script/corrfitter-8.0.3.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"284361469","text":"import sys\nsys.path.insert(0, '..')\nsys.path.insert(0, '../../textmodel')\n\nfrom textmodel import TextModel\nfrom wxtextview import WXTextView\n\nimport wx\n\n\nmodel = TextModel(u'Hello World!')\nmodel.set_properties(6, 11, fontsize=14)\nmodel.set_properties(6, 11, bgcolor='yellow')\n\napp = wx.App()\nframe = wx.Frame(None)\nview = WXTextView(frame)\nview.model = model\n\nview.index = 5\nview.selection = 0, 5\n\nframe.Show()\napp.MainLoop()\n","sub_path":"wxtextview/demo/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"460860387","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nfrom odoo import http, _\nfrom odoo.addons.portal.controllers.portal import CustomerPortal, pager as portal_pager\nfrom odoo.exceptions import AccessError\nfrom odoo.http import request\nfrom odoo.tools import consteq\n\n\nclass PortalAccount(CustomerPortal):\n \n # def _get_maintenance_data_domain(self):\n # partner = request.env.user.partner_id\n # domain = [\n # ('maintenance_type', 'in', ['Corrective']),\n # ('message_partner_ids', 'child_of', [partner.commercial_partner_id.id]),\n # ('srage_id', 'in', ['new', 'inprogress'])\n # ]\n # return domain\n\n def _prepare_portal_layout_values(self):\n values = super(PortalAccount, self)._prepare_portal_layout_values()\n partner = request.env.user.partner_id\n\n mcorrective_count = request.env['maintenance.request'].search_count([\n ('partner_group_name', '=', partner.grade_id.id),\n ])\n # mcorrective_count = request.env['maintenance.request'].search_count([])\n\n # my_mcorrective_count = 1\n values['mcorrective_count'] = mcorrective_count\n return values\n\n\n\n\n\n # def _prepare_portal_layout_values(self):\n # values = super(PortalMaintances, self)._prepare_portal_layout_values()\n # maintenance_count = request.env['maintenance.request'].search_count(self._get_maintenance_data_domain())\n # values['maintenance_count'] = maintenance_count\n # return values\n\n # ------------------------------------------------------------\n # My Maintenances\n # ------------------------------------------------------------\n\n def _mcorrective_check_access(self, maintenance_ids, access_token=None):\n mcorrective = request.env['maintenance.request'].browse([maintenance_ids])\n mcorrective_sudo = mcorrective.sudo()\n try:\n mcorrective.check_access_rights('read')\n mcorrective.check_access_rule('read')\n except AccessError:\n if not access_token or not consteq(mcorrective_sudo.access_token, access_token):\n raise\n return mcorrective_sudo\n\n def _mcorrective_get_page_view_values(self, mcorrective, access_token, **kwargs):\n values = {\n 'page_name': 'maintenances',\n 'mcorrective': mcorrective,\n }\n if access_token:\n values['no_breadcrumbs'] = True\n values['access_token'] = access_token\n\n if kwargs.get('error'):\n values['error'] = kwargs['error']\n if kwargs.get('warning'):\n values['warning'] = kwargs['warning']\n if kwargs.get('success'):\n values['success'] = kwargs['success']\n\n history = request.session.get('my_corrective_history', [])\n values.update(get_records_pager(history, mcorrective))\n return values\n\n\n\n\n\n\n\n @http.route(['/my/maintenances', '/my/maintenances/page/'], type='http', auth=\"user\", website=True)\n def portal_my_maintenances(self, page=1, date_begin=None, date_end=None, sortby=None, **kw):\n values = self._prepare_portal_layout_values()\n partner = request.env.user.partner_id\n MaintenanceCorrectiveObj = request.env['maintenance.request']\n\n\n domain = [\n ('partner_group_name', '=', partner.grade_id.id),\n ]\n\n\n\n searchbar_sortings = {\n #'date': {'label': _('Invoice Date'), 'order': 'date_invoice desc'},\n #'duedate': {'label': _('Due Date'), 'order': 'date_due desc'},\n #'name': {'label': _('Reference'), 'order': 'name desc'},\n #'state': {'label': _('Status'), 'order': 'state'},\n }\n # default sort by order\n if not sortby:\n sortby = 'name'\n #order = searchbar_sortings[sortby]['order']\n\n archive_groups = self._get_archive_groups('maintenance.request', domain)\n\n\n if date_begin and date_end:\n domain += [('create_date', '>', date_begin), ('create_date', '<=', date_end)]\n\n # count for pager\n\n mcorrective_count = MaintenanceCorrectiveObj.search_count(domain)\n # pager\n pager = portal_pager(\n url=\"/my/maintenances\",\n url_args={'date_begin': date_begin, 'date_end': date_end, 'sortby': sortby},\n total=mcorrective_count,\n page=page,\n step=self._items_per_page\n )\n # content according to pager and archive selected\n mcorrective = MaintenanceCorrectiveObj.search(domain, limit=self._items_per_page, offset=pager['offset'])\n request.session['my_mcorrective_history'] = mcorrective.ids[:100]\n values.update({\n 'date': date_begin,\n 'mcorrective_obj': mcorrective.sudo(),\n 'page_name': 'maintenances',\n 'pager': pager,\n 'archive_groups': archive_groups,\n 'default_url': '/my/maintenances',\n 'searchbar_sortings': searchbar_sortings,\n 'sortby': sortby,\n })\n\n return request.render(\"maintenance.portal_my_maintenances\", values)\n\n @http.route(['/my/maintenances/'], type='http', auth=\"public\", website=True)\n def portal_my_mcorrective_detail(self, maintenance_ids, access_token=None, **kw):\n try:\n mcorrective_sudo = self._mcorrective_check_access(maintenance_ids, access_token)\n except AccessError:\n return request.redirect('/my')\n\n values = self._mcorrective_get_page_view_values(mcorrective_sudo, access_token, **kw)\n return request.render(\"maintenance.portal_my_corrective_page\", values)\n\n @http.route(['/my/maintenances/pdf/'], type='http', auth=\"public\", website=True)\n def portal_my_invoice_report(self, maintenance_ids, access_token=None, **kw):\n try:\n mcorrective_sudo = self._mcorrective_check_access(maintenance_ids, access_token)\n except AccessError:\n return request.redirect('/my')\n\n # print report as sudo, since it require access to taxes, payment term, ... and portal\n # does not have those access rights.\n #pdf = request.env.ref('account.account_invoices').sudo().render_qweb_pdf([invoice_sudo.id])[0]\n #pdfhttpheaders = [\n # ('Content-Type', 'application/pdf'),\n # ('Content-Length', len(pdf)),\n #]\n #return request.make_response(pdf, headers=pdfhttpheaders)\n\n # ------------------------------------------------------------\n # My Home\n # ------------------------------------------------------------\n\n # def details_form_validate(self, data):\n # error, error_message = super(PortalMaintances, self).details_form_validate(data)\n # # prevent VAT/name change if invoices exist\n # partner = request.env['res.users'].browse(request.uid).partner_id\n # invoices = request.env['maintenance.request'].sudo().search_count([])\n # if invoices:\n # if 'vat' in data and (data['vat'] or False) != (partner.vat or False):\n # error['vat'] = 'error'\n # error_message.append(_('Changing VAT number is not allowed once invoices have been issued for your account. Please contact us directly for this operation.'))\n # if 'name' in data and (data['name'] or False) != (partner.name or False):\n # error['name'] = 'error'\n # error_message.append(_('Changing your name is not allowed once invoices have been issued for your account. Please contact us directly for this operation.'))\n # return error, error_message\n","sub_path":"_data/maintanance-old-181017/controllers/portal.py","file_name":"portal.py","file_ext":"py","file_size_in_byte":7669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"415890775","text":"from app import app\n\nimport unittest\nimport json\n\nclass CitiesTestCase(unittest.TestCase):\n\n def test_index(self):\n tester = app.test_client(self)\n response = tester.get('/cities.json', content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data, json.dumps(['Amsterdam', 'San Francisco', 'Berlin', 'New York', 'Chennai']))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"app_test.py","file_name":"app_test.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"398459373","text":"\n# A very simple Flask Hello World app for you to get started with...\n\nfrom flask import Flask, redirect, render_template, request, url_for\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom imgurpython import *\nimport datetime\nimport facebook\nimport re\n\nclient_id = '3dc6a9946ed98c1'\nclient_secret = '7b5b3c6a0592297c21bcc77b7c293d9fb9fd3f0e'\n\nclient = ImgurClient(client_id, client_secret)\n\ngraph = facebook.GraphAPI(access_token='92b767b89e24ff74358f84a8185ba374', version='2.7')\n\napp = Flask(__name__)\napp.config[\"DEBUG\"] = True\n\nSQLALCHEMY_DATABASE_URI = \"mysql+mysqlconnector://junseishin:NotMyPassword@junseishin.mysql.pythonanywhere-services.com/junseishin$blog\".format(\n username=\"\",\n password=\"\",\n hostname=\"\",\n databasename=\"\",\n)\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = SQLALCHEMY_DATABASE_URI\napp.config[\"SQLALCHEMY_POOL_RECYCLE\"] = 299\n\ndb = SQLAlchemy(app)\n\nclass User(db.Model):\n\n __tablename__ = \"accounts\"\n\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(4096))\n password = db.Column(db.String(4096))\n type = db.Column(db.String(4096))\n\nclass Post(db.Model):\n\n __tablename__ = \"posts\"\n\n id = db.Column(db.Integer, primary_key=True)\n month = db.Column(db.String(4096))\n year = db.Column(db.Integer)\n date = db.Column(db.Integer)\n title = db.Column(db.String(4096))\n type = db.Column(db.String(4096))\n content = db.Column(db.TEXT)\n\nmonth_abr=[\"\",\"Jan\",\"Feb\",\"Mar\",\"Apr\",\"May\",\"Jun\",\"Jul\",\"Aug\",\"Sep\",\"Oct\",\"Nov\",\"Dec\"]\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n@app.route('/gallery')\ndef gallery():\n return render_template(\"gallery.html\")\n\n@app.route('/fullwidth')\ndef fullwidth():\n return render_template(\"full-width.html\")\n\n@app.route('/sbleft')\ndef sbleft():\n return render_template(\"sidebar-left.html\")\n\n@app.route('/sbright')\ndef sbright():\n return render_template(\"sidebar-right.html\")\n\n@app.route('/basicgrid')\ndef basicgrid():\n return render_template(\"basic-grid.html\")\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return \"nope\"\n\n#BLOG METHODS\n\n@app.route('/blog')\ndef blog():\n posts=Post.query.order_by(-Post.id)\n years, months = sideYM(posts);\n return render_template(\"blog.html\",nav=0,\n posts=posts.limit(5),years=years,months=months)\n\n@app.route('/blog/')\ndef posty(year=None):\n posts=Post.query.filter_by(year=year).order_by(-Post.id)\n years, months = sideYM(posts);\n return render_template(\"blog.html\",nav=1,\n posts=posts,years=years,months=months)\n\n@app.route('/blog//')\ndef postm(year=None,month=None):\n posts=Post.query.filter_by(year=year, month=month).order_by(-Post.id)\n years, months = sideYM(posts);\n return render_template(\"blog.html\",nav=2,\n posts=posts,years=years,months=months)\n\n@app.route('/blog///')\ndef post(year=None,month=None,id=None):\n posts=Post.query.filter_by(year=year, month=month, id=id).order_by(-Post.id)\n years, months = sideYM(posts);\n return render_template(\"blog.html\",nav=3,\n posts=posts, years=years, months=months)\n\n@app.route('/blog/createpost', methods=['GET', 'POST'])\ndef createPost():\n error = None\n preview = None\n posts=Post.query.order_by(-Post.id)\n years, months = sideYM(posts);\n if request.method == 'POST':\n if request.form['submit']==\"Submit\":\n if User.query.filter_by(username=request.form['username'],\n password=request.form['password'],type='admin').count()==0:\n error = 'Invalid Credentials. Please try again.'\n else:\n now=datetime.datetime.now()\n title=request.form['title']\n content=request.form['content']\n post = Post(year=now.year,month=month_abr[now.month],date=now.day,\n type=\"HTML\",title=title,content=content)\n db.session.add(post)\n db.session.commit()\n #attachment = {\n # 'name': 'Who? Blog',\n # 'link': url_for('.post', year=post.year, month=post.month, id=post.id),\n # 'caption': title,\n # 'description': re.search(r\"

    TL;DR:(.+?)

    \", content).group(0)\n #}\n #graph.put_wall_post(message=re.search(r\"

    (.+?)

    \", content).group(0)[:100]+\"...\", attachment=attachment)\n return redirect(url_for('blog'))\n elif request.form['submit']==\"Preview\":\n preview = request.form['content']\n return render_template('blog.html',nav=-1,\n posts=posts, years=years, months=months,\n error=error, preview = preview)\n\ndef sideYM(posts):\n years = []\n for post in Post.query.distinct(Post.year).order_by(-Post.id):\n if post.year not in years:\n years.append(post.year)\n months = []\n if posts == None or posts.count()<1:\n for post in Post.query.filter_by(year=datetime.datetime.now().year).distinct(Post.month).order_by(-Post.id):\n if post.month not in months:\n months.append(post.month)\n else:\n for post in Post.query.filter_by(year=posts.first().year).distinct(Post.month).order_by(-Post.id):\n if post.month not in months:\n months.append(post.month)\n return years, months\n\n\n#Photos\n\n@app.route('/photos')\ndef photos():\n return render_template(\"photos.html\",pictures=pictures,albums=albums)\n\n@app.route('/photos/')\ndef photosID():\n return render_template(\"photos.html\",pictures=pictures,albums=albums)","sub_path":"flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":5912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"211669228","text":"import collections\n\n\ndef build_model(train_x, train_y):\n train_y = encode_labels(train_y)\n model = Sequential()\n model.add(Dense(256, input_dim=len(train_x.columns), activation=\"relu\"))\n model.add(Dropout(0.2))\n model.add(Dense(64, activation=\"relu\"))\n model.add(Dropout(0.2))\n model.add(Dense(4, activation=\"softmax\"))\n model.compile(\n # optimizer=keras.optimizers.RMSprop(lr=1e-2, decay=1e-8),\n optimizer=keras.optimizers.Adagrad(\n lr=0.001, beta_1=0.9, beta_2=0.999),\n loss=\"categorical_crossentropy\",\n metrics=[\"accuracy\"]\n )\n model.fit(train_x.as_matrix(), train_y.as_matrix(),\n epochs=100, verbose=1, batch_size=32)\n return model\n\n\ndef encode_labels(label_list):\n res = []\n vals = [1, 2, 3, 4]\n for i in label_list:\n ret = {i: 1}\n for x in [x for x in vals if x != i]:\n ret[x] = 0\n res.append(ret)\n return pd.DataFrame(res, columns=vals)\n\n\ndef model_eval(model, val_data, val_labels):\n scores = model.predict(model_val.as_matrix())\n print('Expected: ' + str(dict(collections.Counter(model_val_label.tolist()))))\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"217485609","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport datetime\nimport math\nimport numpy as np\n\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import Dataset, DataLoader\n\ntorch.set_printoptions(precision=5)\n\n\nprint(f\"Time: {datetime.datetime.now()}\")\n\ndef export_result(model_name, trained_model, file_loc, output_name):\n\n # import test data\n print('Import test data (x):')\n sh_hrtf_test = pd.read_csv(file_loc + 'SHed_hrtf_dB.txt', header=None)\n print(sh_hrtf_test.head(5))\n print(sh_hrtf_test.shape)\n print('Import test data (y):')\n hrtf_test = pd.read_csv(file_loc + 'hrtf_dB.txt', header=None)\n print(hrtf_test.head(5))\n print(hrtf_test.shape)\n\n print('x data:')\n x_test = sh_hrtf_test.values.astype('float')\n x_test = x_test.reshape(np.size(x_test,0), 1, 2, -1)\n print('shape of x_test:')\n print(x_test.shape)\n\n print('---------------')\n print('y data:')\n y_test = hrtf_test.values.astype('float')\n print('shape of y_hold:')\n print(y_test.shape)\n\n print('===============')\n\n # standardise data\n means = trained_model['data_mean_hrtf']\n stds = trained_model['data_std_hrtf']\n x_test = (x_test - means) / stds\n\n class Load_Dataset(Dataset):\n\n def __init__(self, input_hrtf, output_hrtf):\n self.input_hrtf = torch.tensor(input_hrtf, dtype=torch.float)\n self.output_hrtf = torch.tensor(output_hrtf, dtype=torch.float)\n\n def __len__(self):\n return len(self.output_hrtf)\n\n def __getitem__(self, index):\n return self.input_hrtf[index], self.output_hrtf[index]\n\n # load data\n testset = Load_Dataset(x_test, y_test)\n test_data_num = np.size(y_test,0)\n testloader = torch.utils.data.DataLoader(testset, batch_size=test_data_num, shuffle=False)\n\n test_dataiter = iter(testloader)\n test_input, test_label = test_dataiter.next()\n\n\n left_net = trained_model['left_model']\n print(left_net)\n right_net = trained_model['right_model']\n print(right_net)\n\n left_net.load_state_dict(trained_model['left_model_state_dict'])\n right_net.load_state_dict(trained_model['right_model_state_dict'])\n criterion = nn.MSELoss()\n\n left_net.eval()\n right_net.eval()\n\n left_test_loss = 0.0\n right_test_loss = 0.0\n accuracy = 0.0\n test_losses = []\n\n for test_input, test_label in testloader:\n with torch.no_grad():\n left_net.eval()\n right_net.eval()\n\n left_output = left_net(test_input)\n left_loss = criterion(left_output, test_label[:, 0:256])\n left_test_loss += left_loss.item()\n\n right_output = right_net(test_input)\n right_loss = criterion(right_output, test_label[:, 256:512])\n right_test_loss += right_loss.item()\n\n # left_output = np.log10(left_output) * 20\n # right_output = np.log10(right_output) * 20\n\n np.savetxt(model_name + output_name + '.csv', np.concatenate((left_output.numpy(), right_output.numpy()), axis=1), delimiter=',')\n\n print(f\"Left Test loss: {left_test_loss / len(testloader)}\")\n print(f\"Right Test loss: {left_test_loss / len(testloader)}\")\n print(f\"Left Error mean: {torch.mean(torch.abs(left_output - test_label[:, 0:256]))}\")\n print(f\"Right Error mean: {torch.mean(torch.abs(left_output - test_label[:, 256:512]))}\")\n print(f\"Left Error std: {torch.std(torch.abs(left_output - test_label[:, 0:256]))}\")\n print(f\"Right Error std: {torch.std(torch.abs(left_output - test_label[:, 256:512]))}\")\n\n # plt.hist(output.numpy() - val_label.numpy())\n # plt.show()\n\n idx = 3\n plt.figure()\n plt.subplot(211)\n plt.plot(left_output.numpy()[idx, :], label='predict')\n plt.plot(test_label.numpy()[idx, 0:256], label='target')\n plt.legend(frameon=False)\n plt.title('hrtf left')\n plt.xscale(\"log\")\n plt.ylim(-50, 10)\n plt.subplot(212)\n plt.plot(right_output.numpy()[idx, :], label='predict')\n plt.plot(test_label.numpy()[idx, 256:512], label='target')\n plt.legend(frameon=False)\n plt.title('hrtf right')\n plt.xscale(\"log\")\n plt.ylim(-50, 10)\n plt.show()\n\n# setup model\nconv1_node = 16\nconv2_node = 32\nconv3_node = 64\nconv4_node = 128\nconv5_node = 256\nconv6_node = 512\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=conv1_node, kernel_size=(1, 5))\n self.conv1_bn = nn.BatchNorm2d(conv1_node)\n self.conv2 = nn.Conv2d(in_channels=conv1_node, out_channels=conv2_node, kernel_size=(1, 5))\n self.conv2_bn = nn.BatchNorm2d(conv2_node)\n self.conv3 = nn.Conv2d(in_channels=conv2_node, out_channels=conv3_node, kernel_size=(1, 5))\n self.conv3_bn = nn.BatchNorm2d(conv3_node)\n self.conv4 = nn.Conv2d(in_channels=conv3_node, out_channels=conv4_node, kernel_size=(1, 5))\n self.conv4_bn = nn.BatchNorm2d(conv4_node)\n self.conv5 = nn.Conv2d(in_channels=conv4_node, out_channels=conv5_node, kernel_size=(1, 5))\n self.conv5_bn = nn.BatchNorm2d(conv5_node)\n self.conv6 = nn.Conv2d(in_channels=conv5_node, out_channels=conv6_node, kernel_size=(2, 5))\n self.conv6_bn = nn.BatchNorm2d(conv6_node)\n self.t_conv1 = nn.ConvTranspose2d(in_channels=conv6_node, out_channels=conv5_node, kernel_size=(1, 5))\n self.conv7_bn = nn.BatchNorm2d(conv5_node)\n self.t_conv2 = nn.ConvTranspose2d(in_channels=conv5_node, out_channels=conv4_node, kernel_size=(1, 5))\n self.conv8_bn = nn.BatchNorm2d(conv4_node)\n self.t_conv3 = nn.ConvTranspose2d(in_channels=conv4_node, out_channels=conv3_node, kernel_size=(1, 5))\n self.conv9_bn = nn.BatchNorm2d(conv3_node)\n self.t_conv4 = nn.ConvTranspose2d(in_channels=conv3_node, out_channels=conv2_node, kernel_size=(1, 5))\n self.conv10_bn = nn.BatchNorm2d(conv2_node)\n self.t_conv5 = nn.ConvTranspose2d(in_channels=conv2_node, out_channels=conv1_node, kernel_size=(1, 5))\n self.conv11_bn = nn.BatchNorm2d(conv1_node)\n self.t_conv6 = nn.ConvTranspose2d(in_channels=conv1_node, out_channels=1, kernel_size=(1, 5))\n\n self.hrtf1 = nn.Linear(512, 512)\n self.hrtf2 = nn.Linear(512, 1024)\n self.hrtf3 = nn.Linear(1024, 512)\n self.hrtf4 = nn.Linear(512, 256)\n self.hrtf5 = nn.Linear(256, 256)\n\n self.merge = nn.Linear(512, 256)\n\n # Dropout module with 0.2 drop probability\n # self.dropout = nn.Dropout(p=0.2)\n\n def forward(self, x):\n x1 = F.relu(self.conv1_bn(self.conv1(x)))\n x1 = F.relu(self.conv2_bn(self.conv2(x1)))\n x1 = F.relu(self.conv3_bn(self.conv3(x1)))\n x1 = F.relu(self.conv4_bn(self.conv4(x1)))\n x1 = F.relu(self.conv5_bn(self.conv5(x1)))\n x1 = F.relu(self.conv6_bn(self.conv6(x1)))\n x1 = F.relu(self.t_conv1(x1))\n x1 = F.relu(self.t_conv2(x1))\n x1 = F.relu(self.t_conv3(x1))\n x1 = F.relu(self.t_conv4(x1))\n x1 = F.relu(self.t_conv5(x1))\n x1 = self.t_conv6(x1)\n x1 = torch.reshape(x1, (-1, 256))\n\n x2 = torch.reshape(x, (-1, 512))\n x2 = F.leaky_relu_(self.hrtf1(x2), 0.01)\n x2 = F.leaky_relu_(self.hrtf2(x2), 0.01)\n x2 = F.leaky_relu_(self.hrtf3(x2), 0.01)\n x2 = F.leaky_relu_(self.hrtf4(x2), 0.01)\n x2 = F.leaky_relu_(self.hrtf5(x2), 0.01)\n\n ## NN with dropout\n # x2 = torch.reshape(x, (-1, 512))\n # x2 = F.leaky_relu_(self.hrtf1(x2), 0.01)\n # x2 = F.leaky_relu_(self.dropout(self.hrtf2(x2)), 0.01)\n # x2 = F.leaky_relu_(self.dropout(self.hrtf3(x2)), 0.01)\n # x2 = F.leaky_relu_(self.dropout(self.hrtf4(x2)), 0.01)\n # x2 = F.leaky_relu_(self.hrtf5(x2), 0.01)\n\n x3 = torch.cat((x1, x2), dim=1)\n x = self.merge(x3)\n return x\n\nmodel_name = 'training_HRTF_08++_25_sparse'\n# trained_model = torch.load('Models/' + model_name + '.pt')\ntrained_model = torch.load('Models/' + model_name + '.pt', map_location=torch.device('cpu'))\nleft_net = trained_model['left_model']\nright_net = trained_model['right_model']\n\nprint(trained_model.keys())\n\nfolder_loc = 'C:/Users/.../Downloads/HRTF_Restoration_01/Training_data/Time_aligned/'\n\noutput_name = '_bern_out'\nfile_loc = folder_loc + 'SH_HRTFs_1st_order_512_sparse_in_bern_oct_3/'\nexport_result(model_name, trained_model, file_loc, output_name)\n\noutput_name = '_sub18_out'\nfile_loc = folder_loc + 'SH_HRTFs_1st_order_512_sparse_in_sub_18_oct_3/'\nexport_result(model_name, trained_model, file_loc, output_name)\n\noutput_name = '_sub19_out'\nfile_loc = folder_loc + 'SH_HRTFs_1st_order_512_sparse_in_sub_19_oct_3/'\nexport_result(model_name, trained_model, file_loc, output_name)\n\noutput_name = '_sub20_out'\nfile_loc = folder_loc + 'SH_HRTFs_1st_order_512_sparse_in_sub_20_oct_3/'\nexport_result(model_name, trained_model, file_loc, output_name)\n","sub_path":"HRTF_Restoration_01/Python_scripts/Validate/export_result_bigger.py","file_name":"export_result_bigger.py","file_ext":"py","file_size_in_byte":9094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"301211698","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n\"\"\"\nFrom synteny blocks, reconstruct ancestral order by interleaving the genes in\nbetween the anchors. This is the bottom-up method used first in Bowers (2003),\nand in Tang (2010), to reconstruct pre-alpha and pre-rho order, respectively.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\nimport logging\n\nfrom math import sqrt\nfrom six.moves import zip_longest\n\nfrom jcvi.compara.synteny import AnchorFile, check_beds\nfrom jcvi.formats.bed import Bed\nfrom jcvi.utils.grouper import Grouper\nfrom jcvi.apps.base import OptionParser, ActionDispatcher\n\ndef main():\n\n actions = (\n ('mergechrom', 'merge synteny blocks on the same chrom'),\n ('pairs', 'convert anchorsfile to pairsfile'),\n ('fillrbh', 'fill syntelog block with RBH orthologs'),\n )\n p = ActionDispatcher(actions)\n p.dispatch(globals())\n\ndef pairs(args):\n \"\"\"\n %prog pairs anchorsfile prefix\n\n Convert anchorsfile to pairsfile.\n \"\"\"\n p = OptionParser(pairs.__doc__)\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n anchorfile, prefix = args\n outfile = prefix + \".pairs\"\n fw = open(outfile, \"w\")\n\n af = AnchorFile(anchorfile)\n blocks = af.blocks\n pad = len(str(len(blocks)))\n npairs = 0\n for i, block in enumerate(blocks):\n block_id = \"{0}{1:0{2}d}\".format(prefix, i + 1, pad)\n lines = []\n for q, s, score in block:\n npairs += 1\n score = score.replace('L', '')\n lines.append(\"\\t\".join((q, s, score, block_id)))\n print(\"\\n\".join(sorted(lines)), file=fw)\n\n fw.close()\n logging.debug(\"A total of {0} pairs written to `{1}`.\".\n format(npairs, outfile))\n\ndef get_collinear(block):\n # block contains (gene a, gene b, score)\n asc_score, asc_chain = print_chain(block)\n desc_score, desc_chain = print_chain(block, ascending=False)\n return asc_chain if asc_score > desc_score else desc_chain\n\ndef print_chain(block, ascending=True):\n\n scope = 50 # reduce search complexity\n if not ascending:\n block = [(a, -b, c) for (a, b, c) in block]\n\n block.sort()\n bsize = len(block)\n fromm = [-1] * bsize\n scores = [score_convert(c) for (a, b, c) in block]\n\n for i, (a, b, c) in enumerate(block):\n for j in range(i + 1, i + scope):\n if j >= bsize:\n break\n\n d, e, f = block[j]\n\n # Ensure strictly collinear\n if d == a or b >= e:\n continue\n\n this_score = scores[i] + score_convert(f)\n if this_score > scores[j]:\n fromm[j] = i\n scores[j] = this_score\n\n scoresfromm = list(zip(scores, fromm))\n maxchain = max(scoresfromm)\n chainscore, chainend = maxchain\n solution = [scoresfromm.index(maxchain), chainend]\n last = chainend\n while True:\n _last = fromm[last]\n if _last == -1:\n break\n last = _last\n solution.append(last)\n\n solution.reverse()\n solution = [block[x] for x in solution]\n if not ascending:\n solution = [(a, -b, c) for (a, b, c) in solution]\n return chainscore, solution\n\ndef mergechrom(args):\n \"\"\"\n %prog mergechrom a.b.anchors\n\n merge synteny blocks on the same chromosome\n \"\"\"\n p = OptionParser(mergechrom.__doc__)\n p.set_beds()\n\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n anchorfile, = args\n qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts)\n\n af = AnchorFile(anchorfile)\n newanchorfile = anchorfile.rsplit(\".\", 1)[0] + \".mergechrom.anchors\"\n fw = open(newanchorfile, \"w\")\n\n qchrom_dic = dict((b.accn,b.seqid) for b in qbed)\n schrom_dic = dict((b.accn,b.seqid) for b in sbed)\n block_dic = dict()\n blocks = af.blocks\n for (i,block) in enumerate(blocks):\n q, s, score = block[0]\n qchrom, schrom = qchrom_dic[q], schrom_dic[s]\n k = \"%s_%s\" % (qchrom, schrom)\n if k not in block_dic: block_dic[k] = []\n block_dic[k].append(i)\n\n for (k, idxs) in block_dic.items():\n print(\"#\" * 3, file=fw)\n for i in idxs:\n for q, s, score in blocks[i]:\n print(\"\\t\".join((q, s, str(score))), file=fw)\n\n fw.close()\n print(\"%d blocks merged to %d\" % (len(blocks), len(block_dic.keys())))\n\ndef fillrbh(args):\n from jcvi.formats.base import DictFile\n\n p = OptionParser(fillrbh.__doc__)\n opts, args = p.parse_args(args)\n\n if len(args) != 3:\n sys.exit(not p.print_help())\n\n blocksfile, rbhfile, orthofile = args\n\n # Generate mapping both ways\n adict = DictFile(rbhfile)\n bdict = DictFile(rbhfile, keypos=1, valuepos=0)\n adict.update(bdict)\n\n fp = open(blocksfile)\n fw = open(orthofile, \"w\")\n nrecruited = 0\n for row in fp:\n a, b = row.split()\n c = '.'\n if b == '.':\n if a in adict:\n b = adict[a]\n nrecruited += 1\n c = 'rbh'\n else:\n c = 'syntelog'\n print(\"\\t\".join((a, b, c)), file=fw)\n\n logging.debug(\"Recruited {0} pairs from RBH.\".format(nrecruited))\n fp.close()\n fw.close()\n\nif __name__ == '__main__':\n main()\n","sub_path":"bin/wgc/reconstruct.py","file_name":"reconstruct.py","file_ext":"py","file_size_in_byte":5306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"324323004","text":"import json\nimport os.path\nimport math\n\ndef readData(name):\n if not os.path.isfile(name): \n print(\"File path {} does not exist...\".format(name))\n return False\n else:\n with open(name, 'r', encoding='utf8') as f: \n j = json.load(f)\n f.close()\n return j\n \ninput = readData('dec_in.json')\nprint(input)\n\nA1 = input[\"A1\"]\nB1 = input[\"B1\"]\nC1 = input[\"C1\"]\nD1 = input[\"D1\"]\nE1 = input[\"E1\"]\nF1 = input[\"F1\"]\nA2 = input[\"A2\"]\nB2 = input[\"B2\"]\nC2 = input[\"C2\"]\nD2 = input[\"D2\"]\nE2 = input[\"E2\"]\nF2 = input[\"F2\"]\nm2 = input[\"m2\"]\nXc2= input[\"Xc2\"]\nYc2= input[\"Yc2\"]\n\nCm1 = input[\"Cm1\"]\nCe1 = input[\"Ce1\"]\nR1 = input[\"R1\"]\nL1 = input[\"L1\"] \nCm2 = input[\"Cm2\"]\nCe2 = input[\"Ce2\"]\nR2 = input[\"R2\"] \nL2 = input[\"L2\"] \n\nK1 = Cm1*Ce1/R1 \nK2 = Cm2*Ce2/R2 \n\ndef F_(beta):\n return F2*math.cos(2*beta)-(A2-B2)/2*math.sin(2*beta)\ndef D_(beta):\n return D2*math.cos(beta)+ E2*math.sin(beta)\ndef E_(beta):\n return E2*math.cos(beta)+ D2*math.sin(beta)\n\nfrom numpy import pi, linspace, array, dot, sin, cos, diag, concatenate, zeros\nfrom numpy.linalg import inv\n\n#A(q, params)\ndef D(q, params):\n alpha, beta = q\n b, g = params\n \n d11 = B1 + (A2+B2)/2 -(A2-B2)/2*math.cos(2*beta)-F2*math.sin(2*beta)\n d12 = -D2*math.cos(beta) - E2*math.sin(beta)\n d21 = -D2*math.cos(beta) - E2*math.sin(beta)\n d22 = C2\n return array([[d11,d12],[d21,d22]])\n\n#F(q, dq, params)\ndef c_term(q, dq, params):\n alpha, beta = q\n dalpha, dbeta = dq\n\n c1 = -2*dalpha*dbeta*((F1+F_(beta))*math.cos(alpha) + (D1+D_(beta))*math.sin(alpha)) - E_(beta)*dbeta**2 \n c2 = F_(beta)*dalpha**2\n return array([c1, c2])\n\n#Q*(q, params)\ndef g_term(q, params):\n alpha, beta = q\n\n g1 = 0\n g2 = m2*g*(Xc2*math.cos(beta)-Yc2*math.sin(beta))\n return array([g1, g2])\n\ndef Q_d(q, dq, params):\n \n dalpha, dbeta = dq\n Q_d_1 = b[0]*dalpha\n Q_d_2 = b[1]*dbeta\n return array([Q_d_1, Q_d_2])\n\ndef h(q, dq, params):\n return c_term(q, dq, params) + g_term(q, params)\n\ndef sysode(x, t, control, params, control_params):\n q, dq = x[:2], x[2:4]\n\n D_c = D(q, params)\n h_c = h(q, dq, params)\n Q_d_c = Q_d(q, dq, params)\n\n # Calculate control\n u = control(x, t, control_params)\n ddq = dot(inv(D_c), u - Q_d_c - h_c )\n\n dx1 = dq\n dx2 = ddq\n dx = dx1, dx2\n\n return concatenate(dx)\n \n# Manipulator parameters\nb = 0.002, 0.002\ng = 9.81\nparams = b, g\n\ndef control(x, t, control_params):\n q, dq = x[:2], x[2:4]\n gains = control_params['gains']\n K1, K2 = gains\n \n q_d = pi/2, pi/4\n q_e = q_d - q\n\n u = dot(K1, q_e) + dot(K2, - dq)\n return u \n\ncontrol_params = {}\nKp = diag([0, 0])\nKd = diag([0, 0])\ncontrol_params['gains'] = Kp, Kd\ncontrol_params['q_d'] = [pi/4, pi/3] \n\n\nfrom scipy.integrate import odeint\n\n# Integration\nt0 = 0 # Initial time \ntf = 3 # Final time\nN = 2E3 # Numbers of points in time span\nt = linspace(t0, tf, int(N)) # Create time span\nx0 = [0, 0, 0, 0] # Set initial state \nsol = odeint(sysode, x0, t, \n args=(control, params, control_params,)) # Integrate system\nq, dq = sol[:,:2], sol[:,2:4]\nalpha_1, alpha_2 = q[:,0], q[:,1]\n\n\nfrom matplotlib.pyplot import *\nplot(t, alpha_1,'r', linewidth=2.0, label = 'Joint 1')\nplot(t, alpha_2,'b', linewidth=2.0, label = 'Joint 2')\n# plot(t, cos(2*pi*t),'black', linestyle = '--', alpha = 0.5, linewidth=2.0, label = 'Joint 1 des')\n# plot(t, sin(2*pi*t),'black', linestyle = '--', alpha = 0.5, linewidth=2.0, label = 'Joint 2 des')\n\n# hlines(cos(2*pi*t), sin(2*pi*t), t0, tf,color = 'black', linestyle = '--', alpha = 0.7)\nhlines(0, t0, tf,color = 'black', linestyle = '--', alpha = 0.7)\nhlines(pi/2, t0, tf,color = 'black', linestyle = '--', alpha = 0.7)\n# plot(t, alpha_1_exct,'r--', linewidth=2.0, alpha = 0.6)\n# plot(t, alpha_2_exct,'b--', linewidth=2.0, alpha = 0.6)\ngrid(color='black', linestyle='--', linewidth=1.0, alpha = 0.7)\ngrid(True)\nxlim([0, tf])\nlegend()\nylabel(r'Angles $q$ (rad)')\nxlabel(r'Time $t$ (s)')\nshow()","sub_path":"scripts/simen.py","file_name":"simen.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"141006123","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on November 12, 2019\n@author: Quentin Lutz \n\"\"\"\nfrom functools import partial\nfrom multiprocessing import Pool\nfrom typing import Optional, Union, Iterable\n\nimport numpy as np\nfrom scipy import sparse\n\nfrom sknetwork.utils.check import check_n_jobs, is_symmetric\n\n\ndef distance(adjacency: sparse.csr_matrix, sources: Optional[Union[int, Iterable]] = None, method: str = 'D',\n return_predecessors: bool = False, unweighted: bool = False, n_jobs: Optional[int] = None):\n \"\"\"Compute distances between nodes.\n\n * Graphs\n * Digraphs\n\n Based on SciPy (scipy.sparse.csgraph.shortest_path)\n\n Parameters\n ----------\n adjacency :\n The adjacency matrix of the graph\n sources :\n If specified, only compute the paths for the points at the given indices. Will not work with ``method =='FW'``.\n method :\n The method to be used.\n\n * ``'D'`` (Dijkstra),\n * ``'BF'`` (Bellman-Ford),\n * ``'J'`` (Johnson).\n return_predecessors :\n If ``True``, the size predecessor matrix is returned\n unweighted :\n If ``True``, the weights of the edges are ignored\n n_jobs :\n If an integer value is given, denotes the number of workers to use (-1 means the maximum number will be used).\n If ``None``, no parallel computations are made.\n\n Returns\n -------\n dist_matrix : np.ndarray\n The matrix of distances between graph nodes. ``dist_matrix[i,j]`` gives the shortest\n distance from point ``i`` to point ``j`` along the graph.\n If no path exists between nodes ``i`` and ``j``, then ``dist_matrix[i, j] = np.inf``.\n predecessors : np.ndarray, optional\n Returned only if ``return_predecessors == True``. The matrix of predecessors, which can be used to reconstruct\n the shortest paths. Row i of the predecessor matrix contains information on the shortest paths from point ``i``:\n each entry ``predecessors[i, j]`` gives the index of the previous node in the path from point ``i`` to point\n ``j``. If no path exists between nodes ``i`` and ``j``, then ``predecessors[i, j] = -9999``.\n\n Examples\n --------\n >>> from sknetwork.data import cyclic_digraph\n >>> adjacency = cyclic_digraph(3)\n >>> distance(adjacency, sources=0)\n array([0., 1., 2.])\n >>> distance(adjacency, sources=0, return_predecessors=True)\n (array([0., 1., 2.]), array([-9999, 0, 1]))\n \"\"\"\n n_jobs = check_n_jobs(n_jobs)\n if method == 'FW' and n_jobs != 1:\n raise ValueError('The Floyd-Warshall algorithm cannot be used with parallel computations.')\n if sources is None:\n sources = np.arange(adjacency.shape[0])\n elif np.issubdtype(type(sources), np.integer):\n sources = np.array([sources])\n n = len(sources)\n directed = not is_symmetric(adjacency)\n local_function = partial(sparse.csgraph.shortest_path,\n adjacency, method, directed, return_predecessors, unweighted, False)\n if n_jobs == 1 or n == 1:\n res = sparse.csgraph.shortest_path(adjacency, method, directed, return_predecessors,\n unweighted, False, sources)\n else:\n with Pool(n_jobs) as pool:\n res = np.array(pool.map(local_function, sources))\n if return_predecessors:\n if n == 1:\n return res[0].ravel(), res[1].astype(int).ravel()\n else:\n return res[0], res[1].astype(int)\n else:\n if n == 1:\n return res.ravel()\n else:\n return res\n\n\ndef shortest_path(adjacency: sparse.csr_matrix, sources: Union[int, Iterable], targets: Union[int, Iterable],\n method: str = 'D', unweighted: bool = False, n_jobs: Optional[int] = None):\n \"\"\"Compute the shortest paths in the graph.\n\n * Graphs\n * Digraphs\n\n Parameters\n ----------\n adjacency :\n The adjacency matrix of the graph\n sources : int or iterable\n Sources nodes.\n targets : int or iterable\n Target nodes.\n method :\n The method to be used.\n\n * ``'D'`` (Dijkstra),\n * ``'BF'`` (Bellman-Ford),\n * ``'J'`` (Johnson).\n unweighted :\n If ``True``, the weights of the edges are ignored\n n_jobs :\n If an integer value is given, denotes the number of workers to use (-1 means the maximum number will be used).\n If ``None``, no parallel computations are made.\n\n Returns\n -------\n paths : list\n If single source and single target, return a list containing the nodes on the path from source to target.\n If multiple sources or multiple targets, return a list of paths as lists.\n An empty list means that the path does not exist.\n\n Examples\n --------\n >>> from sknetwork.data import linear_digraph\n >>> adjacency = linear_digraph(3)\n >>> shortest_path(adjacency, 0, 2)\n [0, 1, 2]\n >>> shortest_path(adjacency, 2, 0)\n []\n >>> shortest_path(adjacency, 0, [1, 2])\n [[0, 1], [0, 1, 2]]\n >>> shortest_path(adjacency, [0, 1], 2)\n [[0, 1, 2], [1, 2]]\n \"\"\"\n if np.issubdtype(type(sources), np.integer):\n sources = [sources]\n if np.issubdtype(type(targets), np.integer):\n targets = [targets]\n\n if len(sources) == 1:\n source2target = True\n source = sources[0]\n elif len(targets) == 1:\n source2target = False\n source = targets[0]\n targets = sources\n else:\n raise ValueError(\n 'This request is ambiguous. Either use one source and multiple targets or multiple sources and one target.')\n\n if source2target:\n dists, preds = distance(adjacency, source, method, True, unweighted, n_jobs)\n else:\n dists, preds = distance(adjacency.T, source, method, True, unweighted, n_jobs)\n\n paths = []\n for target in targets:\n if dists[target] == np.inf:\n path = []\n else:\n path = [target]\n node = target\n while node != source:\n node = preds[node]\n path.append(node)\n if source2target:\n path.reverse()\n paths.append(path)\n if len(paths) == 1:\n paths = paths[0]\n return paths\n","sub_path":"sknetwork/path/shortest_path.py","file_name":"shortest_path.py","file_ext":"py","file_size_in_byte":6289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"439710083","text":"#!/usr/bin/env python3\n\nimport shutil, os\n\nos.chdir('/Users/DY/Core/Python Programming/Programming Files/2017_Summer/new_sbin/')\n\nfor filename in os.listdir('.'):\n if os.path.isdir(os.path.join(os.getcwd(),filename)):\n for folder, subdirs, files in os.walk(filename):\n for f in files:\n os.unlink((os.path.join(os.getcwd(),filename,f)))\n os.rmdir(filename)\n","sub_path":"2017_Summer/ch9_del_dir.py","file_name":"ch9_del_dir.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"593961280","text":"import pandas as pd\nimport gtfs_kit as gk\nimport shapely.geometry as sg\nimport geopandas as gpd\nimport pytest\n\nfrom .context import make_gtfs, DATA_DIR\nfrom make_gtfs import *\n\n\n# Load test ProtoFeed\npfeed = read_protofeed(DATA_DIR / \"auckland\")\npfeed_l = read_protofeed(DATA_DIR / \"auckland_light\")\npfeed_w = read_protofeed(DATA_DIR / \"auckland_wonky\")\n\n\ndef test_get_duration():\n ts1 = \"01:01:01\"\n ts2 = \"01:05:01\"\n get = get_duration(ts1, ts2, units=\"min\")\n expect = 4\n assert get == expect\n\n\ndef test_make_stop_points():\n lines = gpd.read_file(DATA_DIR / \"auckland\" / \"shapes.geojson\").to_crs(\"epsg:2193\")\n lines_looping = lines.iloc[:1]\n lines_nonlooping = lines.iloc[1:]\n\n offset = 5\n side = \"left\"\n points = make_stop_points(lines_nonlooping, \"shape_id\", offset=0, side=side)\n assert set(points.columns) == {\n \"shape_id\",\n \"point_id\",\n \"shape_dist_traveled\",\n \"geometry\",\n }\n for __, group in points.groupby(\"shape_id\"):\n assert group.shape[0] == 2\n\n n = 5\n points = make_stop_points(lines_nonlooping, \"shape_id\", offset, side, n=n)\n for __, group in points.groupby(\"shape_id\"):\n assert group.shape[0] == n\n\n # Points should be the correct distance away.\n assert np.allclose(points.distance(lines_nonlooping.geometry.iat[0]), offset)\n\n points = make_stop_points(lines_looping, \"shape_id\", offset, side, n=n)\n for __, group in points.groupby(\"shape_id\"):\n assert group.shape[0] == n - 1\n\n points = make_stop_points(lines, \"shape_id\", offset, side, spacing=200)\n for __, group in points.groupby(\"shape_id\"):\n assert group.shape[0] >= 2\n\n\ndef test_build_routes():\n for p in [pfeed, pfeed_w]:\n routes = build_routes(pfeed)\n\n # Should have correct shape\n assert routes.shape[0] == pfeed.frequencies.drop_duplicates(\"route_short_name\").shape[0]\n assert set(routes.columns) == {\n \"route_id\",\n \"route_type\",\n \"route_short_name\",\n \"route_long_name\",\n }\n\n\ndef test_build_shapes():\n shapes = build_shapes(pfeed)\n\n # Should be a data frame\n assert isinstance(shapes, pd.DataFrame)\n\n # Should have correct shape\n count = 0\n for direction in pfeed.shapes_extra.values():\n if direction == 0:\n count += 1\n else:\n count += direction\n expect_nshapes = count\n expect_ncols = 4\n assert shapes.groupby(\"shape_id\").ngroups == expect_nshapes\n assert shapes.shape[1] == expect_ncols\n\n\ndef test_build_stops():\n # Test with null ``pfeed.stops``\n pfeed_stopless = pfeed.copy()\n pfeed_stopless.stops = None\n\n # Test with non-null ``pfeed.stops``\n stops = build_stops(pfeed)\n assert stops.shape == pfeed.stops.shape\n assert set(stops.columns) == set(pfeed.stops.columns)\n\n shapes = build_shapes(pfeed_stopless)\n stops = build_stops(pfeed_stopless, shapes, spacing=400)\n assert set(stops.columns) == {\"stop_id\", \"stop_name\", \"stop_lon\", \"stop_lat\"}\n nshapes = shapes.shape_id.nunique()\n assert stops.shape[0] >= nshapes\n\n n = 4\n stops = build_stops(pfeed_stopless, shapes, n=4)\n # Should have correct shape\n nshapes = shapes.shape_id.nunique()\n assert stops.shape[0] <= n * nshapes\n\n\ndef test_build_trips():\n routes = build_routes(pfeed)\n __, service_by_window = build_calendar_etc(pfeed)\n shapes = build_shapes(pfeed)\n trips = build_trips(pfeed, routes, service_by_window)\n\n # Should be a data frame\n assert isinstance(trips, pd.DataFrame)\n\n # Should have correct shape\n f = pd.merge(routes[[\"route_id\", \"route_short_name\"]], pfeed.frequencies)\n f = pd.merge(f, pfeed.service_windows)\n shapes = set(shapes[\"shape_id\"].unique())\n expect_ntrips = 0\n for index, row in f.iterrows():\n # Get number of trips corresponding to this row\n # and add it to the total\n frequency = row[\"frequency\"]\n if not frequency:\n continue\n start, end = row[[\"start_time\", \"end_time\"]].values\n duration = get_duration(start, end, \"h\")\n direction = row[\"direction\"]\n if direction == 0:\n trip_mult = 1\n else:\n trip_mult = direction\n expect_ntrips += int(duration * frequency) * trip_mult\n expect_ncols = 5\n assert trips.shape == (expect_ntrips, expect_ncols)\n\n\ndef test_buffer_side():\n s = sg.LineString([[0, 0], [1, 0]])\n buff = 5\n # Buffers should have correct area and orientation\n for side in [\"left\", \"right\", \"both\"]:\n b = buffer_side(s, side, buff)\n p = b.representative_point()\n if side == \"left\":\n assert b.area >= buff\n assert p.coords[0][1] > 0\n elif side == \"right\":\n assert b.area >= buff\n assert p.coords[0][1] < 0\n else:\n assert b.area >= 2 * buff\n\n\ndef test_get_stops_nearby():\n geom = sg.LineString([[0, 0], [2, 0]])\n stops = gpd.GeoDataFrame(\n [[\"a\", sg.Point([1, 1])], [\"b\", sg.Point([1, -1])]],\n columns=[\"stop_code\", \"geometry\"],\n )\n for side in [\"left\", \"right\", \"both\"]:\n n = get_stops_nearby(stops, geom, side, 1)\n if side == \"left\":\n assert n.shape[0] == 1\n assert n.stop_code.iat[0] == \"a\"\n elif side == \"right\":\n assert n.shape[0] == 1\n assert n.stop_code.iat[0] == \"b\"\n else:\n assert n.shape[0] == 2\n assert set(n.stop_code.values) == {\"a\", \"b\"}\n\n\ndef test_compute_shape_point_speeds():\n shapes = build_shapes(pfeed)\n route_type = pfeed.route_types()[0]\n g = compute_shape_point_speeds(shapes, pfeed.speed_zones, route_type)\n assert isinstance(g, gpd.GeoDataFrame)\n assert set(g.columns) == {\n \"shape_id\",\n \"shape_pt_sequence\",\n \"shape_dist_traveled\",\n \"geometry\",\n \"route_type\",\n \"speed_zone_id\",\n \"speed\",\n }\n assert g.crs == WGS84\n\n # Should have correct length\n assert g.shape[0] >= shapes.shape[0]\n\n # Speed zones present should make sense\n sz = pfeed.speed_zones.loc[lambda x: x.route_type == route_type]\n assert set(g.speed_zone_id) <= set(sz.speed_zone_id)\n\n\n@pytest.mark.slow\ndef test_build_stop_times_for_trip():\n stops = build_stops(pfeed)\n stops_g = gk.geometrize_stops_0(stops, use_utm=True)\n shapes = build_shapes(pfeed)\n shapes_gi = gk.geometrize_shapes_0(shapes, use_utm=True).set_index(\"shape_id\")\n trip_id = \"bingo\"\n shape_id = shapes_gi.index[0]\n\n # Generic case\n linestring = shapes_gi.loc[shape_id].geometry\n stops_g_nearby = get_stops_nearby(stops_g, linestring, \"left\")\n route_type = 3\n sz = pfeed.speed_zones.to_crs(pfeed.utm_crs)\n shape_point_speeds = compute_shape_point_speeds(shapes, sz, route_type)\n default_speed = 2\n start_time = 0\n f = build_stop_times_for_trip(\n trip_id,\n stops_g_nearby,\n shape_id,\n linestring,\n sz,\n route_type,\n shape_point_speeds,\n default_speed,\n start_time,\n )\n assert set(f.columns) == {\n \"trip_id\",\n \"stop_id\",\n \"stop_sequence\",\n \"arrival_time\",\n \"departure_time\",\n \"shape_dist_traveled\",\n }\n\n # Should have correct length\n assert f.shape[0] == stops_g_nearby.shape[0]\n\n # Average speed of trip should be reasonable\n def compute_avg_speed(f):\n return (\n 3.6\n * (f.shape_dist_traveled.iat[-1] - f.shape_dist_traveled.iat[0])\n / (f.arrival_time.iat[-1] - f.arrival_time.iat[0])\n )\n\n sz = pfeed.speed_zones.loc[lambda x: x.route_type == route_type]\n avg_speed = compute_avg_speed(f)\n assert (\n min(sz.speed.min(), default_speed)\n <= avg_speed\n <= max(sz.speed.max(), default_speed)\n )\n\n # Edge case with one speed zone encompassing the trip and infinite speed\n sz = gpd.GeoDataFrame(\n [{\"speed\": np.inf, \"route_type\": route_type}],\n geometry=[sg.box(*linestring.bounds).buffer(10)],\n crs=stops_g.crs,\n )\n shape_point_speeds = compute_shape_point_speeds(shapes, sz, route_type)\n default_speed = 2\n\n f = build_stop_times_for_trip(\n trip_id,\n stops_g_nearby,\n shape_id,\n linestring,\n sz,\n route_type,\n shape_point_speeds,\n default_speed,\n start_time,\n )\n\n # Average speed should be correct\n avg_speed = compute_avg_speed(f)\n assert np.allclose(avg_speed, default_speed)\n\n # Edge case with one speed zone encompassing the trip\n sz = gpd.GeoDataFrame(\n [{\"speed\": 100, \"route_type\": route_type}],\n geometry=[sg.box(*linestring.bounds).buffer(10)],\n crs=stops_g.crs,\n )\n shape_point_speeds = compute_shape_point_speeds(shapes, sz, route_type)\n\n f = build_stop_times_for_trip(\n trip_id,\n stops_g_nearby,\n shape_id,\n linestring,\n sz,\n route_type,\n shape_point_speeds,\n default_speed,\n start_time,\n )\n\n # Average speed should be correct\n avg_speed = compute_avg_speed(f)\n assert np.allclose(avg_speed, 100)\n\n\n@pytest.mark.slow\ndef test_build_stop_times():\n # Test stopless version first\n pfeed_stopless = pfeed.copy()\n pfeed_stopless.stops = None\n routes = build_routes(pfeed_stopless)\n shapes = build_shapes(pfeed_stopless)\n __, service_by_window = build_calendar_etc(pfeed_stopless)\n stops = build_stops(pfeed_stopless, shapes)\n trips = build_trips(pfeed_stopless, routes, service_by_window)\n stop_times = build_stop_times(pfeed_stopless, routes, shapes, stops, trips)\n\n assert isinstance(stop_times, pd.DataFrame)\n\n # Should have correct shape.\n # Number of stop times is at most twice the number of trips,\n # because each trip has at most two stops\n assert stop_times.shape[0] <= 2 * trips.shape[0]\n assert stop_times.shape[1] == 6\n\n # Test with stops\n routes = build_routes(pfeed)\n shapes = build_shapes(pfeed)\n stops = build_stops(pfeed)\n __, service_by_window = build_calendar_etc(pfeed)\n trips = build_trips(pfeed, routes, service_by_window)\n stop_times = build_stop_times(pfeed, routes, shapes, stops, trips)\n\n # Should be a data frame\n assert isinstance(stop_times, pd.DataFrame)\n\n # Should have correct shape.\n # Number of stop times is at least twice the number of trips,\n # because each trip has two stops\n assert stop_times.shape[0] >= 2 * trips.shape[0]\n assert stop_times.shape[1] == 6\n\n # Test with stops and tiny buffer so that no stop times are built\n stop_times = build_stop_times(pfeed, routes, shapes, stops, trips, buffer=0)\n\n # Should be a data frame\n assert isinstance(stop_times, pd.DataFrame)\n\n # Should be empty\n assert stop_times.empty\n\n\n@pytest.mark.slow\ndef test_build_feed():\n feed = build_feed(pfeed)\n\n # Should be a GTFSTK Feed\n assert isinstance(feed, gk.Feed)\n\n # Should have correct tables\n names = [\"agency\", \"calendar\", \"routes\", \"shapes\", \"stops\", \"stop_times\", \"trips\"]\n for name in names:\n assert hasattr(feed, name)\n\n # Should be a valid feed\n v = feed.validate()\n print(v)\n assert \"error\" not in v.type.values\n","sub_path":"tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":11273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"583191571","text":"# -*- coding: utf-8 -*-\nfrom bs4 import BeautifulSoup\nimport requests\nfrom tqdm import tqdm\n \nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport itertools\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import mean_absolute_error\n\n# with open('article_texts.txt', 'r', encoding='utf-8') as f:\n# texts = f.readlines()\n# with open('targets.txt', 'r', encoding='utf-8') as f:\n# targets = [float(i) for i in f.readlines()]\n \n# sns.distplot([len(i) for i in texts], bins=1000)\n\n# texts, targets = zip(*((text, target) for text, target in zip(texts, targets) if len(text)>1000))\n\n# X_train, X_test, y_train, y_test = train_test_split(\n# ... texts, targets, test_size=0.2, random_state=42)\n\n# del texts, targets\n\n# with open('X_train.txt', 'w', encoding='utf-8') as f:\n# for text in tqdm(X_train):\n# f.write(text + '\\n')\n \n# with open('X_test.txt', 'w', encoding='utf-8') as f:\n# for text in tqdm(X_test):\n# f.write(text + '\\n')\n \n# with open('y_train.txt', 'w', encoding='utf-8') as f:\n# for target in tqdm(y_train):\n# f.write(\"{:2.1f}\".format(target) + '\\n')\n \n# with open('y_test.txt', 'w', encoding='utf-8') as f:\n# for target in tqdm(y_test):\n# f.write(\"{:2.1f}\".format(target) + '\\n')\n \n\n# https://www.kaggle.com/alxmamaev/how-to-easy-preprocess-russian-text\nimport nltk\nnltk.download(\"stopwords\")\n\nfrom nltk.corpus import stopwords\nfrom pymystem3 import Mystem\nfrom string import punctuation\n\nfrom sklearn.svm import SVR\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model import LinearRegression\n\nfrom sklearn.pipeline import Pipeline\nimport pickle\nimport time\n\n#Create lemmatizer and stopwords list\nmystem = Mystem() \nrussian_stopwords = stopwords.words(\"russian\")\ntokenizer = nltk.RegexpTokenizer(r\"\\w+\")\n\ndef preprocess_text(text):\n tokens = mystem.lemmatize(text.lower())\n tokens = [token for token in tokens if token not in russian_stopwords\\\n and token != '\\n' \\\n and token.strip() not in punctuation\n ]\n text = \" \".join(tokens)\n return text\n\ndef preprosses_corpus(corpus):\n corpus_clean = []\n for text in tqdm(corpus):\n corpus_clean.append(preprocess_text(text))\n return corpus_clean\n\nwith open('X_train_clean.txt', 'r', encoding='utf-8') as f:\n X_train_clean = f.readlines()\nwith open('X_train.txt', 'r', encoding='utf-8') as f:\n X_train = f.readlines()\nwith open('y_train.txt', 'r', encoding='utf-8') as f:\n y_train = [float(i) for i in f.readlines()]\nwith open('X_test_clean.txt', 'r', encoding='utf-8') as f:\n X_test_clean = f.readlines()\nwith open('X_test.txt', 'r', encoding='utf-8') as f:\n X_test = f.readlines()\nwith open('y_test.txt', 'r', encoding='utf-8') as f:\n y_test = [float(i) for i in f.readlines()]\n\n\nfrom sklearn.feature_extraction.text import CountVectorizer\ncount_vect = CountVectorizer(max_features=10000)\nX_train_counts = count_vect.fit_transform(X_train_clean)\n\nfrom sklearn.feature_extraction.text import TfidfTransformer\ntf_transformer = TfidfTransformer(use_idf=True)#.fit(X_train_counts)\nX_train_tf = tf_transformer.transform(X_train_counts)\n\n\n\nmodel = SVR(verbose=1)\n# model = RandomForestRegressor(verbose=1)\n# model = LinearRegression()\n# model = CatBoostRegressor(verbose=1,task_type=\"GPU\",devices='0:1')\n\n# model = ensemble.GradientBoostingRegressor(verbose=1)\n# model.fit(X_train_tf[:n_samples], y_train[:n_samples])\n\nv\ntext_regression = Pipeline([('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('model', model)])\n\n\n\nstart_time = time.time()\ntext_regression.fit(X_train_clean, y_train)\nprint((time.time() - start_time))\n\npreds = text_regression.predict(X_test_clean)\nmse = mean_squared_error(y_test, preds)\nsqrtmse = np.sqrt(mse)\nprint(sqrtmse)\n\n\nplt.scatter(y_test, preds, alpha=0.1)\nplt.plot([0,10],[0,10], c='r', alpha=0.1)\n\n\npickle.dump(text_regression, open('text_regression_10000_idf_on', 'wb'))\ntext_regression = pickle.load(open('text_regression_10000', 'rb'))\n# # pickle.dump(text_regression, open('pipeline', 'wb'))\n\n# plt.figure(figsize=(8,8))\n# plt.scatter(y_test, preds, alpha=0.1)\n# plt.plot([0,10],[0,10], c='r', alpha=0.2)\n\n# with open('X_train_clean.txt', 'w', encoding='utf-8') as f:\n# for text in tqdm(X_train_clean):\n# f.write(text)\n \n# with open('X_test_clean.txt', 'w', encoding='utf-8') as f:\n# for text in tqdm(X_test_clean):\n# f.write(text)\n \n \nseen = set()\nuniq = [x for x in X_train_clean if x in seen or seen.add(x)]\n \ni = np.random.randint(0,len(X_train))\nprint(X_train_clean[i][:20])\nprint(X_train[i][:20])","sub_path":"src/np1_shallow_training.py","file_name":"np1_shallow_training.py","file_ext":"py","file_size_in_byte":4825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"58309259","text":"import time\nfrom selenium import webdriver\nimport uuid\nimport urls\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\n\ndef helperGet(id, max_delay=10):\n try:\n element = WebDriverWait(browser, max_delay).until(\n EC.presence_of_element_located((By.ID, id))\n )\n return browser.find_element_by_id(id)\n except TimeoutException:\n print(\"Loading took too much time for id :\" + id)\n\ndef login( username, pwd ):\n helperGet( 'login_page_link' ).click()\n \n\n helperGet( 'username_field' ).send_keys(username)\n helperGet( 'password_field' ).send_keys(pwd)\n helperGet( 'login_submit' ).click()\n \n\ndef post(text):\n helperGet('text').send_keys(text)\n time.sleep(4) \n helperGet('post_btn').click()\n time.sleep(6) \n \n\n\nurl = urls.host_url()\nbrowser = webdriver.Chrome('/usr/local/bin/chromedriver')\n\nbrowser.get(url)\n\n\nusernames = []\nuserpwds = []\n\nfor i in range(1000, 1002):\n testid = str(uuid.uuid4())\n testid = testid[0:7]\n username = testid\n userpwd = '1234qwer'\n\n usernames.append(username)\n userpwds.append(userpwd)\n\n helperGet( 'signup_page_link' ).click()\n \n\n helperGet( 'username_field' ).send_keys(username)\n helperGet( 'email_field' ).send_keys(username+\"@test.com\")\n helperGet( 'password_field' ).send_keys(userpwd)\n helperGet( 'password_confirm_field' ).send_keys(userpwd)\n helperGet( 'signup_submit' ).click()\n \n\nlogin( usernames[0], userpwds[0] )\n\nfor i in range(0, 2):\n post('test posting' + str(i))\n\n# delete test start\n\ntext_0 = helperGet('post_0_text').text\n\n# click delete & click no\nhelperGet( 'deleteBtn0' ).click()\ntime.sleep(5)\nhelperGet( 'no' ).click()\ntime.sleep(5)\nassert text_0 == helperGet('post_0_text').text\n\n# click delete & click yes\nhelperGet( 'deleteBtn0' ).click()\ntime.sleep(5)\nhelperGet( 'yes' ).click()\ntime.sleep(5)\nassert text_0 != helperGet('post_0_text').text\n\n# delete test end\n\n# revise test start\n\ntext_0 = helperGet( 'post_0_text' ).text\n\n# click revise & click cancel\nhelperGet( 'reviseBtn0' ).click()\ntime.sleep(5)\nhelperGet( 'cancel' ).click()\ntime.sleep(5)\n\nassert text_0 == helperGet('post_0_text').text\n\n# click revise & click confirm\nhelperGet( 'reviseBtn0' ).click()\ntime.sleep(5)\nhelperGet( 'revise_text' ).send_keys( 'revised' )\ntime.sleep(5)\nhelperGet( 'confirm' ).click()\ntime.sleep(10)\nassert text_0 != helperGet('post_0_text').text\n\nbrowser.quit()\n\n\n","sub_path":"progress/progress3/frontendTest/deleteputtest.py","file_name":"deleteputtest.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"297969706","text":"from django.db import models\nfrom django.utils import timezone\nfrom django.contrib.auth.models import AbstractBaseUser, BaseUserManager\nfrom django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.translation import ugettext_lazy as _\n\nclass UserManager(BaseUserManager):\n def _create_user(self, username, email_address, password, **extra_fields):\n\n now = timezone.localtime(timezone.now())\n\n if not email_address and not username:\n raise ValueError(\"A username or email is required to create an account\")\n\n email_address = self.normalize_email(email_address)\n username = self.model.normalize_username(username)\n\n user = self.model(username=username, email_address=email_address, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_user(self, username, email_address, password=None, **extra_fields):\n\n return self._create_user(username, email_address, password, **extra_fields)\n\n def create_superuser(self, username, email_address, password, **extra_fields):\n email_address = self.normalize_email(email_address)\n username = self.model.normalize_username(username)\n \n return self._create_user(username, email_address, password, **extra_fields)\n\nclass AccountType(models.Model):\n account_type = models.CharField(\n unique=True,\n max_length=20,\n )\n\n def __str__(self):\n return \"%s\" % (self.account_type)\n\n\nclass CustomUser(AbstractBaseUser): \n username = models.CharField(\n unique=True,\n max_length=20,\n )\n email_address = models.EmailField(\n verbose_name='email address',\n max_length=50,\n unique=True,\n )\n account_type = models.ForeignKey(\n AccountType,\n on_delete=models.CASCADE,\n related_name=\"user_account_type\",\n null = True,\n )\n is_active = models.BooleanField(\n default=True\n )\n date_joined = models.DateTimeField(\n auto_now_add=True\n )\n objects = UserManager()\n\n USERNAME_FIELD = 'username'\n REQUIRED_FIELDS = [\"email_address\",]\n \n class Meta:\n verbose_name = _('user')\n verbose_name_plural = _('users')\n\n def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email_address])\n\n def __str__(self):\n if not self.username:\n return \"%s\" % (self.email_address)\n else:\n return \"%s\" % (self.username)\n\n def has_perm(self, perm, obj=None):\n return True\n\n def has_module_perms(self, app_label):\n return True\n \n \n def getPosition(self):\n committees = self.committees.filter(isDeleted=False)\n\n if committees.first():\n return committees.first().position.id\n\n elif self.account_type.id == 1:\n return 'ADMIN'\n return ''\n\n @property\n def is_staff(self):\n return True\n\ndef user_directory_path(instance, filename):\n return 'user_{0}/{1}'.format(instance.user.id, filename)\n\nclass UserProfile(models.Model):\n user = models.ForeignKey(\n CustomUser,\n on_delete=models.CASCADE,\n related_name=\"profile\"\n )\n name = models.CharField(\n max_length=255,\n )\n age = models.IntegerField(\n blank = True,\n null = True,\n default = 0,\n )\n birthdate = models.DateField(\n blank = True,\n null = True,\n )\n birthplace = models.CharField(\n blank = True,\n null = True,\n max_length=255,\n )\n gender = models.ForeignKey(\n 'settings.GenderType',\n on_delete=models.CASCADE,\n related_name=\"gender_user\",\n blank = True,\n null = True,\n )\n profile_picture = models.FileField(\n null = True,\n blank=True,\n upload_to=user_directory_path\n )\n digital_signature = models.ImageField(\n blank=True,\n null = True,\n upload_to=user_directory_path\n )\n\n def __str__(self):\n return \"%s - %s\" % (self.user, self.name)\n\nclass UserApps(models.Model):\n user = models.ForeignKey(\n CustomUser,\n on_delete=models.CASCADE,\n related_name=\"appAccess\"\n )\n installedApps = models.ManyToManyField(\n 'settings.AppName',\n blank=True\n )\n\n def __str__(self):\n return \"%s - %s\" % (self.user, self.installedApps)\n\n\nclass UserLogs(models.Model):\n action_time = models.DateTimeField(\n auto_now_add=True,\n )\n user = models.ForeignKey(\n CustomUser,\n on_delete=models.CASCADE,\n related_name=\"userLogs\"\n )\n action_type = models.CharField(\n max_length=255,\n blank=True,\n null = True,\n )\n content_type = models.ForeignKey(\n ContentType, \n on_delete=models.CASCADE,\n related_name=\"account_content_type\",\n blank=True,\n null = True,\n )\n object_id = models.PositiveIntegerField(\n blank=True,\n null = True,\n )\n content_object = GenericForeignKey(\n 'content_type', \n 'object_id',\n )\n object_type = models.CharField(\n max_length=255,\n blank=True,\n null = True,\n )\n apiLink = models.CharField(\n max_length=255,\n blank=True,\n null = True,\n )\n valueToDisplay = models.CharField(\n max_length=255,\n blank=True,\n null = True,\n )\n\n def __str__(self):\n return \"%s - %s %s %s\" % (self.user,self.action_type, self.content_type, self.object_id)\n\nclass LogDetails(models.Model):\n logDetails = models.ForeignKey(\n UserLogs,\n on_delete=models.CASCADE,\n related_name=\"logDetails\"\n )\n action = models.CharField(\n max_length=255,\n )\n\n def __str__(self):\n return \"%s - %s\" % (self.logDetails,self.action)","sub_path":"users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"287190462","text":"#file : operate.py\n#date : 17/02/11\n#author : mi-na\n#rational : this file provides game operate functions\n\n\n#my module\nimport src.params as params\nfrom src.Field import *\nfrom src.Player import *\nimport src.parse_input as pi\nfrom src.climinal import do_climinal_turn\nfrom src.Logger import *\n\n\n#public module\nimport os\n\n\n#interfaces\n#turn routine\ndef routine(turn_num, players_data, map_data, logger):\n new_players_data = []\n\n new_climinal, climinal_action = do_climinal_turn(turn_num, players_data, map_data, logger)\n new_players_data.append(new_climinal)\n #this check is needed for surrounded situation\n players_data[0] = new_climinal\n if check_game_result(players_data) == \"WIN\":\n return \"WIN\", players_data, logger\n logger.updateCliminal(turn_num, players_data, climinal_action)\n\n logger.dump(turn_num)\n print(\"turn \" + turn_num.__str__())\n for p in players_data:\n p.printPlayer()\n\n #XXX: execute outer(police) program\n input()\n os.system(params.shell_command())\n\n next_action_str = \"\"\n with open('resource/nextAction.txt', 'r') as f:\n next_action_str = f.read()\n\n try:\n next_action = pi.parse(next_action_str)\n for (i, arg) in enumerate(next_action):\n new_players_data.append(do_police_turn(players_data[1+i], arg, map_data))\n logger.updatePolice(turn_num, new_players_data, next_action)\n except:\n print(\"ERROR[INVALID INPUT]\")\n return \"LOSE\", [], logger\n\n #check current game status\n status_code = check_game_result(new_players_data)\n if turn_num == params.max_turn() and status_code == \"NEXT\":\n status_code == \"LOSE\"\n return status_code, new_players_data, logger\n\n\n#helper\n# [Player] -> String\ndef check_game_result(players_data):\n status_code = \"NEXT\"\n climinal_pos = players_data[0].pos\n for i in range(params.police_amount()):\n if players_data[i+1].pos == climinal_pos:\n status_code == \"WIN\"\n return status_code\n\n#TODO?: use too many raise sentense\n# Player -> {\"command\": String, \"destination\": Int} -> Map -> Player\ndef do_police_turn(police_data, arg, map_data):\n current_node = map_data.getNode(police_data.pos)\n com = arg['command']\n dest = arg['destination']\n if com == \"\":\n if dest in current_node.next_nodes:\n police_data.useTransportation(\"\")\n police_data.move(dest)\n else:\n raise\n elif com == \"STATION\":\n if dest in current_node.next_stations:\n police_data.useTransportation(\"STATION\")\n police_data.move(dest)\n else:\n raise\n elif com == \"AIRPORT\":\n if dest in current_node.next_airports:\n police_data.useTransportation(\"AIRPORT\")\n police_data.move(dest)\n else:\n raise\n else:\n raise\n return police_data\n\ndef dump_status(turn_num, players_data):\n if turn_num in params.shown_turn():\n players_data[0].printPlayer()\n for i in range(params.police_amount()):\n players_data[i+1].printPlayer()\n","sub_path":"src/operate.py","file_name":"operate.py","file_ext":"py","file_size_in_byte":3079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"180244669","text":"# coding: utf-8\n\n__author__ = 'cleardusk'\n\nimport sys\n\nsys.path.append('../')\n\nimport cv2\nimport numpy as np\n\nfrom sim3dr.Sim3DR import RenderPipeline\nfrom sim3dr.utils.functions import plot_image\nfrom .tddfa_util import _to_ctype\n\ncfg = {\n 'intensity_ambient': 0.3,\n 'color_ambient': (1, 1, 1),\n 'intensity_directional': 0.6,\n 'color_directional': (1, 1, 1),\n 'intensity_specular': 0.1,\n 'specular_exp': 5,\n 'light_pos': (0, 0, 5),\n 'view_pos': (0, 0, 5)\n}\n\nrender_app = RenderPipeline(**cfg)\n\n\ndef render(ver_lst, tri, wfp):\n overlap = np.zeros((720, 1280, 3), dtype=np.uint8)\n for ver_ in ver_lst:\n ver = _to_ctype(ver_.T) # transpose\n res = render_app(ver, tri, overlap)\n \n if wfp is not None:\n cv2.imwrite(wfp, res)\n \n return res\n","sub_path":"utils/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"110297042","text":"from common.plugin_service import PluginService\nfrom common.util.constant import STATUS_SUCCESS, STATUS_FAIL\nfrom common.util.timeutil import get_time_offset, str_to_dt, dt_to_str\nfrom telemetry import log\nimport copy\n\n\nclass DemoService(PluginService):\n def __init__(self):\n super().__init__()\n\n def do_verify(self, subscription, parameters):\n # Check series set permission\n for data in parameters['seriesSets']:\n meta = self.tsanaclient.get_metric_meta(parameters['apiKey'], data['metricId'])\n\n if meta is None:\n return STATUS_FAIL, 'You have no permission to read Metric {}'.format(data['metricId'])\n\n return STATUS_SUCCESS\n\n def do_inference(self, subscription, model_id, model_dir, parameters):\n log.info('Start to inference {}'.format('Demo'))\n try:\n amplifier = parameters['instance']['params']['amplifier']\n end_time = str_to_dt(parameters['endTime'])\n if 'startTime' in parameters:\n start_time = str_to_dt(parameters['startTime'])\n else:\n start_time = end_time\n\n series = self.tsanaclient.get_timeseries(parameters['apiKey'], parameters['seriesSets'], start_time, end_time)\n\n copied = copy.deepcopy(series)\n\n for data in copied:\n data.value = data.value * amplifier\n\n self.tsanaclient.save_inference_result(parameters, copied)\n\n return STATUS_SUCCESS, ''\n except Exception as e:\n log.error('Exception thrown by inference: ' + repr(e))\n return STATUS_FAIL, 'Exception thrown by inference: ' + repr(e)\n\n","sub_path":"sample/demo_modeless/demo_service.py","file_name":"demo_service.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"562546494","text":"import numpy as np\nimport cv2\nimport glob\nimport pickle\n\ncamera_cal_fname = \"camera_cal/camera_cal.p\"\n\n\ndef calibrate():\n print('Starting camera calibration')\n images = []\n\n img, gray, img_corners, img_original = None, None, None, None\n\n objpoints, imgpoints = [], []\n\n image_files = glob.glob('camera_cal/calibration*.jpg')\n\n for fname in image_files:\n\n nx, ny = 9, 6\n\n if 'calibration1.jpg' in fname:\n nx, ny = 9, 5\n if 'calibration4.jpg' in fname:\n nx, ny = 6, 5\n if 'calibration5.jpg' in fname:\n nx, ny = 7, 6\n\n img = cv2.imread(fname)\n\n # Convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)\n\n # Save example image and corresponding identified corners\n if 'calibration2.jpg' in fname:\n img_original = img.copy()\n img_corners = img.copy()\n img_corners = cv2.drawChessboardCorners(img_corners, (nx, ny), corners, ret)\n cv2.imwrite('output_images/chessboard_original.jpg', img_original)\n cv2.imwrite('output_images/chessboard_corners.jpg', img_corners)\n\n objp = np.zeros((nx*ny, 3), np.float32)\n objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)\n\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)\n images.append(img)\n else:\n print('Did not find corners', fname, (nx, ny))\n\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)\n\n camera_calibration = {'mtx': mtx, 'dist': dist}\n pickle.dump(camera_calibration, open(camera_cal_fname, \"wb\"))\n\n print('Camera calibrated using {0} images'.format(np.array(images).shape[0]))\n\n\ndef get_camera_calibration_matrix():\n try:\n camera_calibration = pickle.load(open(camera_cal_fname, 'rb'))\n except FileNotFoundError:\n calibrate()\n camera_calibration = pickle.load(open(camera_cal_fname, 'rb'))\n\n mtx = camera_calibration['mtx']\n dist = camera_calibration['dist']\n return mtx, dist\n\n\ndef main():\n calibrate()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"camera_calibrator.py","file_name":"camera_calibrator.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"169423749","text":"# Copyright 2022 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom ..test_stage import TestStage\nfrom ..util import UNPIN_ENV, Shard, StageSpec, adjust_workers\n\nif TYPE_CHECKING:\n from ....util.types import ArgList, EnvDict\n from ... import FeatureType\n from ...config import Config\n from ...test_system import TestSystem\n\n\nclass OMP(TestStage):\n \"\"\"A test stage for exercising OpenMP features.\n\n Parameters\n ----------\n config: Config\n Test runner configuration\n\n system: TestSystem\n Process execution wrapper\n\n \"\"\"\n\n kind: FeatureType = \"openmp\"\n\n args: ArgList = []\n\n def __init__(self, config: Config, system: TestSystem) -> None:\n self._init(config, system)\n\n def env(self, config: Config, system: TestSystem) -> EnvDict:\n return dict(UNPIN_ENV)\n\n def shard_args(self, shard: Shard, config: Config) -> ArgList:\n return [\n \"--omps\",\n str(config.omps),\n \"--ompthreads\",\n str(config.ompthreads),\n ]\n\n def compute_spec(self, config: Config, system: TestSystem) -> StageSpec:\n omps, threads = config.omps, config.ompthreads\n procs = omps * threads + config.utility\n workers = adjust_workers(\n len(system.cpus) // procs, config.requested_workers\n )\n\n # return a dummy set of shards just for the runner to iterate over\n shards = [Shard([(i,)]) for i in range(workers)]\n return StageSpec(workers, shards)\n","sub_path":"legate/tester/stages/_osx/omp.py","file_name":"omp.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"533760244","text":"def createHistogram(converted_img):\n gray_level = []\n frequency = []\n histogram = {}\n for i in range(len(converted_img)):\n if converted_img[i] not in gray_level:\n gray_level.append(converted_img[i])\n gray_level.sort()\n for i in range(len(gray_level)):\n frequency.append(converted_img.count(gray_level[i]))\n histogram = dict(zip(gray_level, frequency))\n return histogram\n\n\ndef solve4eqaultion(A, b):\n n = len(A)\n M = A\n\n i = 0\n for x in M:\n x.append(b[i])\n i += 1\n\n for k in range(n):\n for i in range(k, n):\n if abs(M[i][k]) > abs(M[k][k]):\n M[k], M[i] = M[i], M[k]\n else:\n pass\n\n for j in range(k+1, n):\n q = float(M[j][k]) / M[k][k]\n for m in range(k, n+1):\n M[j][m] -= q * M[k][m]\n\n x = [0 for i in range(n)]\n\n x[n-1] = float(M[n-1][n])/M[n-1][n-1]\n for i in range(n-1, -1, -1):\n z = 0\n for j in range(i+1, n):\n z = z + float(M[i][j])*x[j]\n x[i] = float(M[i][n] - z)/M[i][i]\n return x\n\n\ndef Bilinear(old_image, pixelXP, pixelYP):\n # point\n x = int(pixelXP//1)\n xplus = int((pixelXP+1)//1)\n\n y = int(pixelYP//1)\n yplus = int((pixelYP+1)//1)\n\n xScale = pixelXP - x\n yscale = pixelYP - y\n # read color from old image\n a = old_image[xplus][y] - old_image[x][y]\n b = old_image[x][yplus] - old_image[x][y]\n c = old_image[xplus][yplus] + old_image[x][y] - \\\n old_image[x][yplus] - old_image[xplus][y]\n d = old_image[x][y]\n # caculate\n color = (a * xScale) + (b * yscale) + (c * xScale * yscale) + d\n\n color = int(round(color))\n return color\n","sub_path":"etc_function.py","file_name":"etc_function.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"579122811","text":"class Solution:\n # @param {string} s\n # @return {boolean}\n def isValid(self, str):\n s = [] # 注意自己设变量不要和引用参数重复\n\n for i in str:\n if i == '(' or i == '[' or i == '{':\n s.append(i)\n elif i == ')' or i == ']' or i == '}':\n if not s: # 为空加入右括号,肯定是一shi\n return False\n if abs(ord(s.pop())-ord(i)) > 2: # 利用ascii码缩短了代码判断\n return False\n\n if s:\n return False\n else:\n return True","sub_path":"Stack/Valid Parentheses.py","file_name":"Valid Parentheses.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"141914364","text":"#!/usr/bin/env python\n\nimport struct, sys, time, socket\n\n# 0. Usage and Argument initialization\nif ( len( sys.argv ) != 5 ):\n\tsys.exit( \"Usage: \" + sys.argv[0] + \" \" )\n\nemu_hostname = sys.argv[1]\nemu_port = int( sys.argv[2] )\nemu = ( emu_hostname, emu_port )\n\nsender_port = int( sys.argv[3] )\nfilename = open( sys.argv[4], \"r\" )\nseqnum_log = open( \"seqnum.log\", \"w\" )\nack_log = open( \"ack.log\", \"w\" )\n\nsenderSocket = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )\nsenderSocket.bind( ( '', sender_port ) )\nsenderSocket.setblocking( 0 ) # Our socket must not block on recvfrom()!\n\ntimeoutpd = 0.050 # 50ms, change as needed\ntimer_running = False # Flag variable :( \n\neot_ready = False # Flag variable 2 :((\neot = '' # Just forward declaring it\n\nempty_case = True # Flag variable 3 :(((\n\nwin_max = 10\nbase = 0\nnextseqnum = 0\npacketbuffer = [] # Will hold up to 32 packets for potential retransmission\nfor i in range(0, 32):\n\tpacketbuffer.append(i) # Allows for direct assignment into packetbuffer[ index ]\n# for \n\n\n# 1. Begin the loop of creating & sending packets, receiving ACKs, and timeouts\nwhile True:\n\ttry: # Check if you received an ACK packet\n\t\tack, addr = senderSocket.recvfrom( 512 )\n\t\tptype, pseqnum, plength, pdata = struct.unpack( '!iii500s', ack )\n\t\tack_log.write( str( pseqnum ) + '\\n' ) # Log the packet's ACK seqnum\n\t\tbase = (pseqnum + 1) % 32 # Update sender window's base\n\n\t\tif ( base == nextseqnum ):\n\t\t\ttimer_running = False # Stop the timer\n\t\t\tif ( eot_ready == True ): # If this was the final ACK:\n\t\t\t\tsenderSocket.sendto( eot, emu ) # Send EOT packet\n\t\t\t\tseqnum_log.write( str( nextseqnum ) + '\\n' ) # Log the packet seqnum\n\t\t\t\tbreak # Move to section 2. and wait for rcvr EOT.\n\t\t\t# if\n\t\telse:\n\t\t\ttimer = time.time()\n\t\t\ttimer_running = True # Restart the timer\n\t\t# if\n\t\tcontinue # Do not check timeout/room-in-window ifs\n\texcept socket.error: # If there was nothing to receive,\n\t\t# Continue to check the timeout if and the room-in-window elif.\n\t\tpass\n\t# try\n\n\tif ( timer_running and time.time() - timer > timeoutpd ): # If timeout:\n\t\ttimer = time.time() # Restart the timer\n\n\t\t# Resend all packets in the window:\n\t\ttempbase = base\n\t\twhile ( tempbase != nextseqnum ):\n\t\t\tsenderSocket.sendto( packetbuffer[ tempbase ], emu )\n\t\t\tseqnum_log.write( str( tempbase ) + '\\n' ) # Log the resent seqnum\n\t\t\ttempbase = (tempbase + 1) % 32\n\t\t# while\n\telif ( nextseqnum < base + win_max ): # If there is room in the sender window:\n\t\tfiledata = filename.read( 500 )\n\n\t\tif len( filedata ) == 0: # If you've read everything in the file:\n\t\t\teot = struct.pack( '!iii500s', 2, nextseqnum, len( filedata ), filedata )\n\t\t\teot_ready = True # Prepare to send EOT upon receipt of final ACK (above)\n\n\t\t\tif ( empty_case ): # ONLY TO SOLVE THE EMPTY FILE CASE\n\t\t\t\t# In this case, send the EOT immediately.\n\t\t\t\tsenderSocket.sendto( eot, emu )\n\t\t\t\tseqnum_log.write( str( nextseqnum ) + '\\n' ) # Log the packet seqnum\n\t\t\t\tbreak\n\t\telse:\n\t\t\tempty_case = False\n\n\t\t\tpacket = struct.pack( '!iii500s', 1, nextseqnum, len( filedata ), filedata )\n\t\t\tsenderSocket.sendto( packet, emu ) # Send data packet\n\t\t\tseqnum_log.write( str( nextseqnum ) + '\\n' ) # Log the packet's seqnum\n\t\t\tpacketbuffer[ nextseqnum ] = packet\n\t\t\tif ( base == nextseqnum ):\n\t\t\t\ttimer = time.time() # Restart the timer\n\t\t\t\ttimer_running = True\n\t\t\t# if\n\t\t\tnextseqnum = (nextseqnum + 1) % 32\n\t\t# if\n\t# if\n# while\n\n\n# 2. Wait to receive EOT packet from emulator\nsenderSocket.setblocking( 1 ) # NOW set our socket to block on recvfrom()\neot, addr = senderSocket.recvfrom( 512 ) # Receive the EOT packet.\n# As EOT packets are never lost and we have received all ACKs,\n# this is guaranteed to be EOT.\nptype, pseqnum, plength, pdata = struct.unpack( '!iii500s', eot )\n\n\n# 3. Cleanup\nsenderSocket.close()\nfilename.close()\nseqnum_log.close()\nack_log.close()\n","sub_path":"sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"233383607","text":"def buildCrossWordMap(crossword):\n CrossWordMap = {}\n empty ='-'\n cw_len = len(crossword)\n columns = [''.join(['+' for i in range(cw_len)]) for i in range(cw_len)]\n for i,line in enumerate(crossword):\n if empty in line:\n x = x_beg = line.find(empty)\n x_len = 1\n for ic,c in enumerate(line[x_beg+1:]):\n if c == empty:\n if x_len == 0:\n x = x_beg + ic + 1\n x_len += 1\n else:\n if x_len == 1:\n x_len = 0\n if x_len > 1:\n direction = 0\n CrossWordMap[(i, x, direction)] = x_len\n\n for j,c in enumerate(line):\n if c == empty:\n columns[j] = columns[j][:i] + empty + columns[j][i+1:]\n\n for j, col in enumerate(columns):\n if empty in col:\n y = y_beg = col.find(empty)\n y_len = 1\n for ic,c in enumerate(col[y_beg+1:]):\n if c == empty:\n if y_len == 0:\n y = y_beg + ic + 1\n y_len += 1\n else:\n if y_len == 1:\n y_len = 0\n if y_len > 1:\n direction = 1\n CrossWordMap[(y, j, direction)] = y_len\n\n return (CrossWordMap, columns)\n\ndef crossWordPuzzle (crossword, words):\n CrossWordMap, columns = buildCrossWordMap(crossword)\n w_prop = {}\n for word in words:\n w_prop[word] = len(word)\n\n cw_map = sorted(CrossWordMap.items(), reverse=True, key=lambda x:x[1])\n w_map = sorted(w_prop.items(), reverse=True, key=lambda x:x[1])\n\n letter_map = {}\n for x,y,d in CrossWordMap.keys():\n l = CrossWordMap[(x,y,d)]\n if d == 0:\n for i,p in enumerate(range(x,x+l)):\n if (p,y) in letter_map.keys():\n letter_map[(p,y)].append((i,d)) \n else:\n letter_map[(p,y)] = [(i,d)] \n else:\n for j,p in enumerate (range(y,y+l)):\n if (x,p) in letter_map.keys():\n letter_map[(x,p)].append((i,d))\n else:\n letter_map [(x,p)] = [(i,d)]\n # To find common letter location with more than one frequency\n for x,y in letter_map.keys():\n if len(letter_map[(x,y)]) > 1:\n print(x,y,letter_map[(x,y)])\n\n cwAssigned = {}\n\n for w in w_map:\n # assign words based on their length to the available slot length\n for cw in cw_map:\n if cw[1] == w[1]:\n if cw[0] in cwAssigned.keys():\n cwAssigned[cw[0]].append(w[0])\n else:\n cwAssigned[cw[0]] = [w[0]]\n\n return cwAssigned\n\n\n\n\n\n","sub_path":"graph/CrossWord.py","file_name":"CrossWord.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"343138416","text":"#!/usr/bin/env python\n\"\"\"\nInfo about our project comes here\n\"\"\"\n\n__author__ = \"Timo Verbist\"\n__email__ = \"timo.verbist@student.kdg.be\"\n__status__ = \"finished\"\n\n# import\n\n\ndef main():\n while True:\n word = str(input(\"Give a word please.\\n\"))\n wordLength = len(word)\n print(\"uw woord achterstevoren is: \", end=\"\")\n for i in range(wordLength, 0, -1):\n print(word[i-1], end=\"\")\n\n print(\"\\n\\n\")\n\n\nif __name__ == '__main__': # code to execute if called from command-line\n main()\n","sub_path":"2) woord_achterstevoren.py","file_name":"2) woord_achterstevoren.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"244804438","text":"from django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.core.servers.basehttp import FileWrapper\nfrom django.db.models import Q\nfrom django.forms import HiddenInput\nfrom django.shortcuts import HttpResponseRedirect, Http404, render, HttpResponse\nfrom django.utils.safestring import mark_safe\nfrom django.views.generic import ListView\nfrom mimetypes import guess_type\nfrom zdesk import Zendesk, get_id_from_url\nimport os\n\nfrom .models import Task, Post\nfrom subscriptions.models import Subscription\nfrom .forms import TaskForm, PostForm\n\n\ndef getzdesk(u):\n return Zendesk(str(u.zendesk_host.replace('http://', 'https://')), str(\n u.zendesk_email), str(u.zendesk_password))\n\n\ndef zdeskupload(zendesk, f):\n return zendesk.upload_create(\n f.name, f.read(), mime_type='application/binary',\n complete_response=True)['content']['upload']['token']\n\n\n@login_required\ndef search(request):\n try:\n q = request.GET.get('q', '')\n except:\n q = None\n if q:\n k = q.split()\n if len(k) >= 2:\n tasks = []\n for item in k:\n all_tasks = Task.objects.filter(\n Q(title__icontains=item) |\n Q(description__icontains=item)).distinct()\n for task in all_tasks:\n if task not in tasks:\n tasks.append(task)\n else:\n tasks = Task.objects.filter(\n Q(title__icontains=q) |\n Q(description__icontains=q)).distinct()\n\n user_profile = request.user\n user_subscription = Subscription.objects.get(subscriber=request.user)\n user_plan = user_subscription.plan\n avail_tasks = user_profile.get_available_tasks()\n used_tasks = user_profile.used_tasks\n time_saved = user_profile.get_time_saved()\n template = 'tasks/search_results.html'\n context = {'query': q, 'tasks': tasks, 'avail_tasks': avail_tasks,\n 'used_tasks': used_tasks, 'time_saved': time_saved, 'user_plan': user_plan}\n\n else:\n template = 'tasks/tasks_home.html'\n context = {}\n return render(request, template, context)\n\n\n@login_required\ndef tasks_home(request):\n try:\n user_subscription = Subscription.objects.get(subscriber=request.user)\n except:\n return HttpResponseRedirect('/plans')\n\n all_tasks = Task.objects.filter(lister=request.user)\n user_profile = request.user\n user_subscription = Subscription.objects.get(subscriber=request.user)\n user_plan = user_subscription.plan\n avail_tasks = user_profile.get_available_tasks()\n used_tasks = user_profile.used_tasks\n time_saved = user_profile.get_time_saved()\n\n paginator = Paginator(all_tasks, 3) # Show 3 tasks per page\n page = request.GET.get('page')\n\n try:\n tasks = paginator.page(page)\n except PageNotAnInteger: # If page is not an integer, deliver first page.\n tasks = paginator.page(1)\n # If page is out of range (e.g. 9999), deliver last page of results.\n except EmptyPage:\n tasks = paginator.page(paginator.num_pages)\n\n context = {'user': request.user, 'tasks': tasks, 'avail_tasks': avail_tasks,\n 'used_tasks': used_tasks, 'time_saved': time_saved, 'user_plan': user_plan}\n template = 'tasks/tasks_home.html'\n return render(request, template, context)\n\n\n@login_required\ndef task_detail(request, pk):\n alert = 'Detalles de la tarea'\n alert_type = 'alert-info'\n current_task = Task.objects.get(id=pk)\n zendesk = getzdesk(request.user)\n ztask = zendesk.ticket_show(current_task.zendeskid)\n if current_task.active and ztask['ticket']['status'] == 'solved':\n current_task.active = False\n duration = 0\n for c in zendesk.ticket_comments(current_task.zendeskid)['comments']:\n duration += c.get('data', {}).get('call_duration', 0)\n post, created = Post.objects.get_or_create(\n id=c['id'] % 1000000, task=current_task, content=c['body'],\n defaults=dict(updated=c['created_at']))\n if created:\n for a in c['attachments']:\n post.zattachment_set.create(\n url=a['content_url'], name=a['file_name'])\n current_task.calls_duration = duration\n current_task.save()\n id = current_task.id\n posts = Post.objects.filter(task_id=id)\n form = PostForm(request.POST, request.FILES, instance=Post(\n poster=request.user, task=current_task))\n if request.method == 'POST':\n if form.is_valid():\n post = form.save(commit=False)\n current_task.active = True\n current_task.save()\n comment = dict(body=post.content)\n if post.attachment:\n comment['uploads'] = [zdeskupload(zendesk, post.attachment)]\n zendesk.ticket_update(current_task.zendeskid, dict(ticket=dict(\n comment=comment)))\n return HttpResponseRedirect('/tasks/%s' % (id))\n else:\n raise Http404\n else:\n form = PostForm(initial={'poster': request.user, 'task': current_task})\n form.fields['poster'].widget = HiddenInput()\n\n context = {'user': request.user, 'task': current_task, 'posts':\n posts, 'form': form, 'alert': alert, 'alert_type': alert_type}\n template = 'tasks/task_detail.html'\n return render(request, template, context)\n\n\n@login_required\ndef add_task(request, name, l1):\n alert = 'Nueva Tarea'\n alert_type = 'alert-info'\n user_profile = request.user\n task = None\n messages.add_message(\n request, messages.INFO, 'Gracias! te ayudaremos con esto y estaremos en contacto')\n\n if request.method == 'POST' and str(request.user) == str(name):\n form = TaskForm(request.POST, request.FILES)\n if form.is_valid():\n task = form.save(commit=False)\n task.lister = request.user\n task.categ_l1 = l1\n user_profile.used_tasks += 1\n user_profile.save()\n u = request.user\n new_ticket = {\n 'ticket': {\n 'requester_name': str(u),\n 'requester_email': u.email,\n 'subject': task.title,\n 'description': task.description,\n #'set_tags': task.tags,\n 'ticket_field_entries': [\n {\n 'ticket_field_id': 1,\n 'value': 'venti'\n },\n {\n 'ticket_field_id': 2,\n 'value': '$10'\n }\n ]\n }\n }\n zendesk = getzdesk(u)\n if task.attachment:\n new_ticket['ticket']['comment'] = dict(\n uploads=[zdeskupload(zendesk, task.attachment)])\n result = zendesk.ticket_create(data=new_ticket)\n task.zendeskid = id = get_id_from_url(result)\n task.zendeskurl = url = u.zendesk_host + '/agent/tickets/' + id\n task.attachment = None\n task.save()\n messages.success(request, mark_safe(\n 'Zendesk ticket created at
    %s' % (\n url, url)))\n return HttpResponseRedirect('/tasks/%s' % (task.id))\n else:\n form = TaskForm(initial={'categ_l1': l1})\n # att_form=TaskDocumentForm()\n context = {'user': name, 'form': form, 'alert': alert,\n 'alert_type': alert_type, 'task': task}\n template = 'tasks/add_task.html'\n return render(request, template, context)\n\n\nclass Task_List(ListView):\n\n def get_queryset(self):\n tasks = Task.objects.filter(lister=self.request.user)\n return tasks\n\n\ndef download_file(request, pk, filename):\n post = Post.objects.get(id=pk)\n post_file = str(post.attachment)\n file_path = os.path.join(settings.PROTECTED_UPLOADS, post_file)\n wrapper = FileWrapper(file(file_path))\n response = HttpResponse(wrapper, content_type=guess_type(post_file))\n response['Content-Disposition'] = 'attachment;filename=%s' % filename\n response['Content-Type'] = ''\n response['X-SendFile'] = file_path\n return response\n","sub_path":"tasks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"353987785","text":"import re\nimport requests\nimport json\n\nurl = \"http://36kr.com/\"\nheaders = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 \"\n \"(KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36\"}\nresponse = requests.get(url, headers)\nhtml_str = response.content.decode()\nret = re.findall(\"