diff --git "a/1899.jsonl" "b/1899.jsonl" new file mode 100644--- /dev/null +++ "b/1899.jsonl" @@ -0,0 +1,629 @@ +{"seq_id":"271942685","text":"# Benchmark 1 (3D unstructured grid)\n#\n# compares the dumux solution to the analytical solution (Figure 2abc Vanderborght et al 2005)\n#\n# D. Leitner, 2018\n#\n\nimport os\nimport matplotlib.pyplot as plt\nfrom analytic_b1 import *\nfrom vtk_tools import *\nimport van_genuchten as vg\n\n# fig, (ax1, ax2, ax3) = plt.subplots(1, 3)\n\n# go to the right place\npath = os.path.dirname(os.path.realpath(__file__))\nos.chdir(path)\nos.chdir(\"../../../build-cmake/rosi_benchmarking/soil_richards\")\n\n# run dumux\nnp_ = 1 # number of processors\n# if np_ == 1:\n# os.system(\"./richardsUG input/b1a_ug.input\")\n# os.system(\"./richardsUG input/b1b_ug.input\")\n# os.system(\"./richardsUG input/b1c_ug.input\")\n# else:\n# os.system(\"mpirun -n \" + str(np_) + \" ./richardsUG input/b1a_ug.input -Grid.Overlap 1\")\n# os.system(\"mpirun -n \" + str(np_) + \" ./richardsUG input/b1b_ug.input -Grid.Overlap 1\")\n# os.system(\"mpirun -n \" + str(np_) + \" ./richardsUG input/b1c_ug.input -Grid.Overlap 1\")\n\n# Figure 2a\ns_, p_, z1_ = read3D_vtp(\"benchmarkUG_1a-00001\", np_)\nh1_ = vg.pa2head(p_)\nax1.plot(h1_, (z1_ - 2) * 100, \"r+\")\n\n# Figure 2b\ns_, p_, z2_ = read3D_vtp(\"benchmarkUG_1b-00001\", np_)\nh2_ = vg.pa2head(p_)\nax2.plot(h2_, (z2_ - 2) * 100, \"r+\")\n\n# Figure 2c\ns_, p_, z3_ = read3D_vtp(\"benchmarkUG_1c-00001\", np_)\nh3_ = vg.pa2head(p_)\nax3.plot(h3_, (z3_ - 2) * 100, \"r+\")\n\n# np.savetxt(\"dumuxUG_b1\", np.vstack((z1_ - 2, h1_, z2_ - 2, h2_, z3_ - 2, h3_)), delimiter = \",\")\n\nprint(len(p_))\n\nplt.show()\n\n","sub_path":"rosi_benchmarking/soil_richards/python/dumux_ug_b1.py","file_name":"dumux_ug_b1.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"89141897","text":"import random\n\nwith open ('access.log', 'w+') as fileIO:\n\n\tfor x in xrange(0,500):\n\t\tday = str(random.randint(1, 30))\n\t\thour = str(random.randint(0, 23))\n\t\tminute = str(random.randint(0, 59))\n\t\tsecond = str(random.randint(0, 59))\n\t\turl = random.choice(['index', 'contact', 'about'])\n\t\tparam = random.choice(['?user=matthias','?id=5','?token=54be68nk90po',])\n\n\t\tday = '0' + day if int(day) < 10 else day\n\t\thour = '0' + hour if int(hour) < 10 else hour\n\t\tminute = '0' + minute if int(minute) < 10 else minute\n\t\tsecond = '0' + second if int(second) < 10 else second\n\n\t\tfileIO.write('::1 - - [' + day + '/Apr/2017:' + hour + ':' + minute + ':' + second + ' +0200] \"GET /test/' + url + '.html' + param + '\" 200 202575 \"http://localhost:8080/test/' + random.choice(['index', 'contact', 'about']) + '.html\" \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36\"\\n')\n\n\t\tfileIO.write('::1 - - [' + day + '/Apr/2017:' + hour + ':' + minute + ':' + second + ' +0200] \"GET /favicon.ico HTTP/1.1\" 200 202575 \"http://localhost:8080/test/' + url + '.html\" \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36\"\\n')","sub_path":"0. Helper/logGenerator.py","file_name":"logGenerator.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"180602194","text":"#!/bin/python3\n\n#Okek-Obeb Bulan kod\n##\n# Ekok = a * b / ebob(a,b)\n# Ebobu bulmak daha kolay.\n#\n# \n\nkucuksayi = 0\ni = 1\nebob = 1\n\nsayi1 = int(input(\"Birinci sayıyı giriniz: \"))\nsayi2 = int(input(\"İkinci sayıyı giriniz: \"))\n\nif(sayi1 >= sayi2):\n\tkucuksayi = sayi2\nelse:\n\tkucuksayi = sayi1\n\n\nwhile(i <= kucuksayi):\n\tif((sayi1 % i == 0) and (sayi2 % i == 0)):\n\t\tif(ebob < i):\n\t\t\tebob = i\n\n\ti += 1\n\n\nprint(\"Ebob : {}\".format(ebob))\nprint(\"Ekok : {}\".format(sayi1 * sayi2 / ebob))","sub_path":"Okek-Obeb-Python3.py","file_name":"Okek-Obeb-Python3.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"193779853","text":"\"\"\"\nModule containing the main algorithms for solving the N-Queens puzzle.\n\nUsage\n_____\nOne solution: solving for any solution to the N-Queens problem, call\nfind_solution. This method uses backtracking to solve the N-Queens problem.\nThe method takes in an int to determine board size and will print the first\nsolution found to the console or will print no solution found.\n\nEx. find_solution(4) will write the first solution found to the console.\n\nAll solutions: solving for all solutions to the N-Queens problem, call\nall_solutions. This method also uses backtracking to solve the N-Queens\nproblem. The method takes in an int to determine board size and will print all\nthe solutions found to the console or will print no solution found.\n\nEx. all_solutions(4) will print every valid solution to the console.\n\nMethods\n_______\n:method all_solutions(size: int): solves for every solution of the N-Queens\n puzzle of the input size and prints them all (or none found) to console.\n:method find_solution(size: int): solves for a single solution of the N-Queens\n puzzle of the input size and prints the solution (or none found) to the\n console.\n:method solve_all: actual algorithm for finding all the solutions (everything\n from all_solutions but the console printing). Takes a board, the current\n col and a list of completed boards as input and recursively backtracks to\n find the solutions. Returns list of completed solutions.\n\n\"\"\"\n\nfrom copy import deepcopy\nfrom board import Board\n\ndef all_solutions(size: int) -> None:\n \"\"\"\n Main function for finding every solution given a board size of size.\n Will print all completed board states to console if solution is found.\n Will print \"not found\" if there is no solution for a board of size size.\n :param size: int representing size of board to find a solution for\n \"\"\"\n print(f\"Results for board of size {size}:\")\n board = Board(size)\n results = solve_all(col=0, board=board, results=[])\n if results == []:\n print(f\"No valid arrangement for board of size {size}\\n\")\n for result in results:\n for row in result:\n print(row)\n print()\n\ndef solve_all(board: Board, col: int, results: list) -> list:\n \"\"\"\n Recursive function that searches board using BFS to arrange board into\n all completed states. Will return list of all valid solutions.\n :param board: Board object to search\n :param col: int representing current column to try\n \"\"\"\n #Completed board found\n if col >= board.size:\n results.append(deepcopy(board.board))\n return results\n for row in range(board.size):\n #check if position is valid\n if check_constraints(board=board, row=row, col=col):\n #update board and continue BFS\n board.mark_tile(row=row, col=col)\n results = solve_all(col=col+1, board=board, results=results)\n board.unmark_tile(row=row, col=col)\n #no valid solutions for current board position\n return results\n\ndef find_solution(size: int) -> None:\n \"\"\"\n Main function for finding a single solution given a board size of size.\n Will print completed board state to console if solution is found.\n Will print \"not found\" if there is no solution for a board of size size.\n :param size: int representing size of board to find a solution for\n \"\"\"\n board = Board(size)\n if not solve_one(col=0, board=board):\n print(f\"No valid arrangement for board of size {size}\")\n return\n board.print_state()\n\ndef solve_one(board: Board, col: int) -> bool:\n \"\"\"\n Recursive function that searches board using BFS to arrange board into\n completed state. Will return True if board state is valid, False otherwise.\n :param board: Board object to search\n :param col: int representing current column to try\n \"\"\"\n #Completed board found\n if col >= board.size:\n return True\n for row in range(board.size):\n #check if position is valid\n if check_constraints(board=board, row=row, col=col):\n #update board and continue BFS\n board.mark_tile(row=row, col=col)\n if solve_one(col=col+1, board=board):\n return True\n board.unmark_tile(row=row, col=col)\n #no valid solutions for current board position\n return False\n\ndef check_constraints(board: Board, row: int, col: int) -> bool:\n \"\"\"\n Check constraints to see if a queen can be placed on tile.\n :param board: Board object to search\n :param row: int representing row to search\n :param col: int representing current column reached\n \"\"\"\n if not row_constraint(board=board, row=row, col=col):\n return False\n if not upper_diagonal_constraint(board=board, row=row, col=col):\n return False\n if not lower_diagonal_constraint(board, row, col):\n return False\n return True\n\ndef row_constraint(board: Board, row: int, col: int) -> bool:\n \"\"\"\n Function checking to make sure no queens placed on the same row of the board.\n :param board: Board object to search\n :param row: int representing row to search\n :param col: int representing current column reached\n \"\"\"\n for i in range(col):\n if board.is_queen(row=row, col=i):\n return False\n return True\n\ndef upper_diagonal_constraint(board: Board, row: int, col: int) -> bool:\n \"\"\"\n Function checking to make sure no queens placed on the diagonal to the\n upper left of the current position.\n :param board: Board object to search\n :param row: int representing row to search\n :param col: int representing current column reached\n \"\"\"\n #move to first tile in diagonal\n row -= 1\n col -= 1\n #while still in board, test and move to next position return false if fails\n while row >= 0 and col >= 0:\n if board.is_queen(row=row, col=col):\n return False\n row -= 1\n col -= 1\n return True\n\ndef lower_diagonal_constraint(board: Board, row: int, col: int) -> bool:\n \"\"\"\n Function checking to make sure no queens placed on the diagonal to the\n lower left of the current position.\n :param board: Board object to search\n :param row: int representing row to search\n :param col: int representing current column reached\n \"\"\"\n #move to first tile in diagonal\n row += 1\n col -= 1\n #while still in board, test and move to next position return false if fails\n while row < board.size and col >= 0:\n if board.is_queen(row=row, col=col):\n return False\n row += 1\n col -= 1\n return True\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":6585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"246944777","text":"import webview\nimport threading\n\n'''\nThis example demonstrates how to change a window title.\n'''\n\ndef change_url():\n webview.set_title('New title')\n\n\nif __name__ == '__main__':\n t = threading.Thread(target=change_url)\n t.start()\n\n webview.create_window('Simple browser', 'http://www.flowrl.com', width=800, height=600, resizable=True)\n\n","sub_path":"examples/window_title_change.py","file_name":"window_title_change.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"428672965","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 11 14:18:45 2020\n\n@author: jwitherspoon\n\"\"\"\n\nimport json\nfrom country_codes import get_country_code\nfrom pygal.maps.world import World\nfrom pygal.style import RotateStyle\n\nfilename = 'population_data.json'\nwith open(filename,encoding=\"utf8\") as f_obj:\n pop_data = json.load(f_obj)\n \n\n#Build a dictionary of population data\n \ncc_population = {}\n \n\n#Print the 2010 population for each country \nfor pop_dict in pop_data:\n if pop_dict['Year'] == 2010:\n country_name = pop_dict['Country Name']\n population = int(float(pop_dict['Value']))\n code = get_country_code(country_name)\n \n if code:\n print(code+ \": \"+str(population))\n else:\n print ('ERROR - '+country_name)\n\n if code:\n cc_population[code]= population\n\n#Group the countries into 3 population levels\ncc_pops1,cc_pops2,cc_pops3,cc_pops4 = {},{},{},{}\nfor cc, pops in cc_population.items():\n if pops < 10000000:\n cc_pops1[cc] = pops\n elif 10000001 <= pops <= 100000000:\n cc_pops2[cc] = pops \n elif 100000001 <= pops < 1000000000:\n cc_pops3[cc] = pops\n else:\n cc_pops4[cc] = pops\n\n#Plot the map \nwm_style = RotateStyle('#990099')\nwm = World(style=wm_style)\nwm.title = 'World population in 2010, by Country'\nwm.add('0-10m',cc_pops1)\nwm.add('10m-100m',cc_pops2)\nwm.add('100m-1bn',cc_pops3)\nwm.add('>1bn',cc_pops4)\nwm.render_to_file('world_population_b.svg') \n\n","sub_path":"data_visualization/Python16_1AllCountries.py","file_name":"Python16_1AllCountries.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"636926556","text":"\"\"\"\r\nFazer um programa para ler o nome e idade de duas pessoas.\r\nAo final mostrar uma mensagem com os nomes e a idade média\r\nentre essas pessoas, com uma casa decimal, conforme exemplo.\r\n\"\"\"\r\n\r\nprint(\"Dados da primeira pessoa\")\r\nnome1 = input(\"Nome: \")\r\nidade1 = int(input(\"Idade: \"))\r\nprint(\"Dados da segunda pessoa\")\r\nnome2 = input(\"Nome: \")\r\nidade2 = int(input(\"Idade: \"))\r\n\r\nmedia = (idade1 + idade2)/2\r\nprint(f\"A idade média de {nome1} e {nome2} é de {media:.1f} anos\")\r\n","sub_path":"estrutura-sequencial/problema_idades.py","file_name":"problema_idades.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"471846250","text":"import subprocess\nimport sys\nimport numpy as np\nimport pandas as pd\nimport psycopg2\nimport psycopg2 as pg\nfrom psycopg2 import OperationalError\n\n\ndef show_psycopg2_exception(err):\n # get details about the exception\n err_type, err_obj, traceback = sys.exc_info()\n # get the line number when exception occured\n line_n = traceback.tb_lineno\n # print the connect() error\n print(\"\\npsycopg2 ERROR:\", err, \"on line number:\", line_n)\n print(\"psycopg2 traceback:\", traceback, \"-- type:\", err_type)\n # psycopg2 extensions.Diagnostics object attribute\n print(\"\\nextensions.Diagnostics:\", err.diag)\n # print the pgcode and pgerror exceptions\n print(\"pgerror:\", err.pgerror)\n print(\"pgcode:\", err.pgcode, \"\\n\")\n\n\ndef execute_many(conn, datafrm, table):\n # Creating a list of tupples from the dataframe values\n tpls = [tuple(x) for x in datafrm.to_numpy()]\n\n # dataframe columns with Comma-separated\n cols = ','.join(list(datafrm.columns))\n\n # SQL query to execute\n sql = \"INSERT INTO %s(%s) VALUES(%%s,%%s,%%s,%%s,%%s,%%s,%%s)\" % (table, cols)\n cursor = conn.cursor()\n try:\n cursor.executemany(sql, tpls)\n conn.commit()\n print(\"Data inserted using execute_many() successfully...\")\n except (Exception, psycopg2.DatabaseError) as err:\n # pass exception to function\n show_psycopg2_exception(err)\n cursor.close()\n\n\nconnection = pg.connect(\"host=group3-1-i.comp.nus.edu.sg dbname=healthrecord_encrypted \"\n \"port= 5435 user=postgres password=mysecretpassword\")\nconnection.autocommit = True\n\n\nif connection != None:\n\n try:\n cursor = connection.cursor();\n # Dropping table if exists\n cursor.execute(\"DROP TABLE IF EXISTS public_data;\")\n\n sql = '''CREATE TABLE public_data(\n age varchar NOT NULL, \n test_result varchar NOT NULL,\n gender varchar NOT NULL,\n vaccination_status varchar NOT NULL,\n area varchar NOT NULL,\n race varchar NOT NULL,\n vaccine_type varchar NOT NULL\n )'''\n\n # Creating a table\n cursor.execute(sql);\n print(\"public_data table is created successfully................\")\n aheaders = [\"area\", \"race\", \"vaccine_type\"]\n headers = [\"age\", \"vaccine_type\", \"test_result\", \"area\", \"gender\", \"race\", \"vaccination_status\"]\n anonymized = pd.read_table(\"anonymized.data\", sep=';',\n names=aheaders)\n adult_table = pd.read_table(\"adult.data\", sep=',',\n names=headers)\n sub_df = adult_table.drop(columns=[\"area\", \"race\", \"vaccine_type\"])\n\n anonymized['C'] = np.arange(len(anonymized))\n sub_df['C'] = np.arange(len(sub_df))\n df = pd.merge(sub_df, anonymized, on='C',how=\"inner\")\n df = df.drop('C', axis=1)\n\n # Run the execute_many method\n execute_many(connection, df, 'public_data')\n\n # Closing the cursor & connection\n cursor.close()\n connection.close()\n except OperationalError as err:\n # pass exception to function\n show_psycopg2_exception(err)\n # set the connection to 'None' in case of error\n conn = None\n","sub_path":"K-anonymity-auto-Transformation-main/data/database_uploader.py","file_name":"database_uploader.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"280003276","text":"import sys\ninput = sys.stdin.readline\nimport math\nimport pygame\nimport time\nimport random\n\npygame.init()\n\nwhite = (255,255,255)\nblack = (0,0,0)\nred = (255,0,0)\nblue = (0,0,255)\nyellow = (155,155,102)\ngreen = (0,255,0)\n\nwidth = 800\nheight = 600\ndis = pygame.display.set_mode((width,height))\npygame.display.set_caption('Snake game')\n\nclock = pygame.time.Clock()\n\nblock_len = 10\nsnake_speed = 20\n\nfont_style = pygame.font.SysFont(None,40)\nscore_font = pygame.font.SysFont(None,50)\n\ndef your_score(score):\n\tval = score_font.render('Your Score: '+ str(score), True, yellow)\n\tdis.blit(val, [0,0])\n\ndef our_snake(block_len,snake_list):\n\tfor x in snake_list:\n\t\tpygame.draw.rect(dis,black,[x[0], x[1], block_len, block_len])\n\ndef message(msg,color):\n\tm = font_style.render(msg, True, color)\n\tdis.blit(m, [width / 6, height / 3])\n\ndef game_loop():\n\tgame_over = False\n\tgame_close = False\n\n\tx1 = width/2\n\ty1 = height/2\n\n\tx1_change = 0\n\ty1_change = 0\n\n\tsnake_list = []\n\tLength_snake = 1\n\n\tfoodx = round(random.randrange(0, width)/10.0)*10.0\n\tfoody = round(random.randrange(0, height)/10.0)*10.0\n\n\n\n\twhile not game_over:\n\n\t\twhile game_close == True:\n\t\t\tdis.fill(white)\n\t\t\tmessage('You Lost! Press Q-Quit or C-Play Again', red)\n\t\t\tyour_score(Length_snake - 1)\n\t\t\tpygame.display.update()\n\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\t\tif event.key == pygame.K_q:\n\t\t\t\t\t\tgame_over = True\n\t\t\t\t\t\tgame_close = False\n\t\t\t\t\tif event.key == pygame.K_c:\n\t\t\t\t\t\tgame_loop()\n\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tgame_over = True\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_LEFT:\n\t\t\t\t\tx1_change = -block_len\n\t\t\t\t\ty1_change = 0\n\t\t\t\telif event.key == pygame.K_RIGHT:\n\t\t\t\t\tx1_change = block_len\n\t\t\t\t\ty1_change = 0\n\t\t\t\telif event.key == pygame.K_UP:\n\t\t\t\t\tx1_change = 0\n\t\t\t\t\ty1_change = -block_len\n\t\t\t\telif event.key == pygame.K_DOWN:\n\t\t\t\t\tx1_change = 0\n\t\t\t\t\ty1_change = block_len\n\t\tif x1 >= width or x1 < 0 or y1 >= height or y1< 0:\n\t\t\tgame_close = True\n\n\t\tx1 += x1_change\n\t\ty1 += y1_change\n\t\tdis.fill(white)\n\t\tpygame.draw.rect(dis,green,[foodx,foody,block_len,block_len])\n\t\t\n\t\t# pygame.draw.rect(dis,black,[x1,y1,block_len,block_len])\n\t\tsnake_head = []\n\t\tsnake_head.append(x1)\n\t\tsnake_head.append(y1)\n\t\tsnake_list.append(snake_head)\n\t\tif len(snake_list) > Length_snake:\n\t\t\tdel snake_list[0]\n\n\t\tfor x in snake_list[:-1]:\n\t\t\tif x == snake_head:\n\t\t\t\tgame_close = True\n\n\t\tour_snake(block_len,snake_list)\n\t\tyour_score(Length_snake - 1)\n\t\tpygame.display.update()\n\n\t\tif x1 == foodx and y1 == foody:\n\t\t\tfoodx = round(random.randrange(0,width)/10.0)*10.0\n\t\t\tfoody = round(random.randrange(0,height)/10.0)*10.0\n\t\t\tLength_snake += 1\n\t\t\t# print('Got Food!')\n\t\tclock.tick(snake_speed+10)\n\n# message(\"You lost\", red)\n# pygame.display.update()\n# pygame.time.wait(1000)\n# time.sleep(20)\n\tpygame.quit()\n\tquit()\n\ngame_loop()","sub_path":"snake_game.py","file_name":"snake_game.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"24780171","text":"from proj_3_train import ProjModel, ProjData\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nimport pytorch_lightning as pl\nimport os\nimport pandas as pd\nfrom transformers import AutoTokenizer\nimport torch\nfrom torch.utils.data import TensorDataset,DataLoader\nimport logging\nfrom datetime import datetime\nimport sys\n\n\n\ndef get_neg_dataset(path=\"/bigtemp/rm5tx/nlp_project/2016-06_all.csv\"):\n print(\"reading neg\")\n data = pd.read_csv(path, dtype={'body':str},skiprows=[1,500000*1])\n data.rename(columns={\"body\":\"data\"}, inplace=True)\n #data[\"label\"] = 0\n\n return data\n\ndef main():\n out_path=\"/bigtemp/rm5tx/nlp_project/2016-06_all_predicted_all.csv\"\n model = ProjModel.load_from_checkpoint(checkpoint_path=os.path.expanduser(\"~/saved_models/last.ckpt\"))\n \n DATA_PATH = os.path.expanduser(\"~/data_cache/\")\n \n logging.basicConfig(filename='log_inference.log', filemode='w', format='%(message)s', level=logging.INFO)\n mylogger = logging.getLogger('Admin_Client')\n mylogger.info('hello')\n \n data = ProjData(max_len=128)\n data.load(DATA_PATH)\n \n neg_data = get_neg_dataset()\n neg_data.dropna(subset=['data'],inplace=True)\n print(neg_data.shape)\n \n neg_data['data'] = neg_data['data'].map(data.preprocess)\n #print(neg_data['data'].tolist())\n \n #data.tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased', use_fast=True, do_lower_case=True)\n mylogger.info('masking')\n X = neg_data['data']\n X_input_ids = data.tokenize_datasets(X, data.tokenizer)\n X_input_ids = data.trunc_n_pad(X_input_ids)\n X_masks = data.create_attention_masks(X_input_ids)\n #for i in range(len(X_input_ids)):\n # print(len(X_input_ids[i]))\n # print(len(X_masks[i]))\n #print(X_input_ids[0])\n #print(X_masks[0])\n inputs = torch.tensor(X_input_ids)\n masks = torch.tensor(X_masks)\n print(torch.cuda.is_available())\n labels = []\n device = torch.device('cuda:2')\n masked_input = TensorDataset(inputs, masks)\n dataloader = DataLoader(masked_input, batch_size=32)\n model.eval()\n model = model.to(device)\n mylogger.info('Model initializing')\n print(\"number of batches \",len(dataloader))\n \n start = datetime.now()\n for idx, batch in enumerate(dataloader):\n if idx%1000==0:\n mylogger.info(str(idx)+\" \"+str(datetime.now()-start))\n print(str(idx)+\" \"+str(datetime.now()-start))\n b_input, b_mask = batch\n b_input = b_input.to(device)\n b_mask = b_mask.to(device)\n #print(b_input.shape)\n #print(b_mask.shape)\n #print(model(b_input,b_mask).shape)\n labels.extend(model(b_input,b_mask).tolist())\n #print(labels)\n #model.eval()\n #print(type(model))\n #print(model(inputs,masks).shape)\n \n #sents = neg_data['data'].tolist()\n #sents = [\"random sentence\", \"pretty flowers\", \"idiot\", \"fuck you cunt nigger\"]\n #xs,masks = data.process(sents)\n #for sent in sents:\n # x, mask = data.process(sent)\n #print(sent,' ',model(x, mask).item())\n # labels.append(model(x, mask).item())\n \n neg_data[\"label\"] = pd.Series(labels)\n #print(neg_data[[\"data\",\"label\",\"author\"]])\n neg_data.to_csv(out_path)\n logging.shutdown() \n\nif __name__ == '__main__':\n main()\n\n","sub_path":"proj_4_eval.py","file_name":"proj_4_eval.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"112322217","text":"import threading\nimport subprocess\nimport json\nimport sys\nimport traceback\nimport io\n\nfrom gi.repository import GLib\n\nclass MapController:\n def __init__(self, click_callback=None):\n self.process = None\n self.click_callback = click_callback\n\n self.ensure_process()\n\n\n def send_command(self, **kwargs):\n proc = self.ensure_process()\n json.dump(kwargs, self.process_stdin)\n print(file=self.process_stdin, flush=True)\n\n def send_command_if_open(self, **kwargs):\n with self.start_lock:\n if self.process:\n self.send_command(**kwargs)\n\n def ensure_process(self):\n if not self.process:\n self.process = subprocess.Popen(\n [sys.executable, '-m', 'map'],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n )\n self.process_stdin = io.TextIOWrapper(self.process.stdin)\n self.process_stdout = io.TextIOWrapper(self.process.stdout)\n\n self.start_lock = threading.Lock()\n self.start_lock.acquire()\n\n thread = threading.Thread(target=self.input_thread)\n thread.daemon = True\n thread.start()\n\n self.start_lock.acquire()\n self.start_lock.release()\n\n return self.process\n\n def input_thread(self):\n self.ensure_process()\n self.start_lock.release()\n try:\n while True:\n try:\n line = self.process_stdout.readline()\n if not line:\n return\n except (BrokenPipeError, EOFError):\n return\n try:\n data = json.loads(line)\n cmd = data['cmd']\n if cmd == 'point_selected':\n if self.click_callback:\n GLib.idle_add(self.click_callback, data['row'])\n else:\n print('controller: ignoring line:', data)\n except Exception:\n traceback.print_exc()\n finally:\n self.process = None\n","sub_path":"map_controller.py","file_name":"map_controller.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"390326932","text":"from skyscanner.skyscanner import Flights\nflights_service = Flights('cs562624873439782462964823415992')\nresult = flights_service.get_result(\n country='UK',\n currency='GBP',\n locale='en-GB',\n originplace='SIN-sky',\n destinationplace='KUL-sky',\n outbounddate='2017-05-28',\n inbounddate='2017-05-31',\n adults=1).parsed\n\nprint(result)","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"331201961","text":"from django.conf.urls import include, url\nfrom .views import GetAndPostMarks, DoesEverythinMarks\n\n\nurlpatterns =[\n\n url(r'^bookmarks/$', GetAndPostMarks.as_view(), name='get_post_create_api'),\n url(r'^bookmarks/(?P\\d+)$',\n DoesEverythinMarks.as_view(),\n name='put_update_delete_api'),\n\n ]","sub_path":"url_shortener/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"1394654","text":"# Runs the dialogue system using the command line for input/output\n\nfrom dialogue import init_dialogue, dialogue_input, DialogueOption, bold_print\n\n\nif __name__ == '__main__':\n\n persist_dialogue = True\n # Initialise dialogue + other components\n ret = init_dialogue()\n bold_print(ret[\"response\"])\n\n # While not in end_state, keep running\n while persist_dialogue:\n\n #user_input = sr.get_input_string() # returns string\n user_input = input(\"> \")\n \n ret = dialogue_input(user_input)\n dialogue_id = ret['id']\n\n response = None \n\n if dialogue_id == DialogueOption.EXIT:\n persist_dialogue = False\n\n elif dialogue_id == DialogueOption.DA_RESPONSE:\n response = ret['response']\n\n elif dialogue_id == DialogueOption.BOOK_CONFIRMED:\n response = ret['response']\n\n elif dialogue_id == DialogueOption.QA_RESPONSE:\n response = \"answer\"\n\n bold_print(response)\n ","sub_path":"test_run.py","file_name":"test_run.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"477819073","text":"import click\nfrom click_option_group import optgroup\n\nimport jax\nfrom jax import random, numpy as np, value_and_grad, jit, tree_util\nfrom optax import chain, clip_by_global_norm, scale_by_adam, scale, apply_updates, add_decayed_weights, masked\n\nfrom clap.models import CLAP\n\n# data\n\nfrom torch.utils.data import DataLoader\nfrom clap.datasets import pair_text_spectrogram_dataset_collate_fn, PairTextSpectrogramDataset\n\n@click.command()\n@optgroup.group('Model settings')\n@optgroup.option('--text_vocab', default = 256, type = int)\n@optgroup.option('--text_dim', default = 512, type = int)\n@optgroup.option('--text_depth', default = 1, type = int)\n@optgroup.option('--text_heads', default = 8, type = int)\n@optgroup.option('--audio_dim', default = 512, type = int)\n@optgroup.option('--audio_depth', default = 1, type = int)\n@optgroup.option('--audio_heads', default = 8, type = int)\n@optgroup.group('Training settings')\n@optgroup.option('--data_folder', default = './data', type = str)\n@optgroup.option('--batch_size', default = 16, type = int)\n@optgroup.option('--epochs', default = 100, type = int)\n@optgroup.option('--learning_rate', default = 3e-4, type = float)\n@optgroup.option('--weight_decay', default = 1e-1, type = float)\n@optgroup.option('--seed', default = 0, type = int)\n@optgroup.option('--max_norm', default = 0.5, type = float)\ndef train(\n *,\n data_folder,\n batch_size,\n epochs,\n learning_rate,\n weight_decay,\n seed,\n max_norm,\n text_vocab,\n text_dim,\n text_depth,\n text_heads,\n audio_dim,\n audio_depth,\n audio_heads\n):\n # rng\n\n rng_key = random.PRNGKey(seed)\n\n # data\n\n dataset = PairTextSpectrogramDataset(data_folder)\n dl = DataLoader(dataset, batch_size = batch_size, collate_fn = pair_text_spectrogram_dataset_collate_fn, drop_last = True, shuffle = True)\n\n # model \n\n model = CLAP(\n text_vocab = text_vocab,\n text_dim = text_dim,\n text_depth = text_depth,\n text_heads = text_heads,\n audio_dim = audio_dim,\n audio_depth = audio_depth,\n audio_heads = audio_heads\n )\n\n # optimizer\n\n exclude_bias = lambda params: tree_util.tree_map(lambda x: x.ndim != 1, params)\n\n optim = chain(\n clip_by_global_norm(max_norm),\n scale_by_adam(eps=1e-4),\n add_decayed_weights(weight_decay, exclude_bias),\n scale(-learning_rate)\n )\n\n # init\n\n audio, audio_mask, text, text_mask = next(iter(dl))\n\n params = model.init(rng_key, text, audio, text_mask, audio_mask)\n optim_state = optim.init(params)\n\n # loss function, for use with value_and_grad\n\n @jit\n @value_and_grad\n def loss_fn(params, text, audio, text_mask, audio_mask):\n return model.apply(params, text, audio, text_mask, audio_mask)\n\n # train loop\n\n for _ in range(epochs):\n for audio, audio_mask, text, text_mask in dl:\n loss, grads = loss_fn(params, text, audio, text_mask, audio_mask)\n updates, optim_state = optim.update(grads, optim_state, params)\n params = apply_updates(params, updates)\n print(f'loss: {loss}')\n\n # finished\n\nif __name__ == \"__main__\":\n train()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"482067986","text":"from django.conf.urls import url\nfrom order.views import OrderPlaceView, OrderCommitView, OrderPayView, OrderCheckView, OrderCommentView\n\n\nurlpatterns = [\n # Examples:\n # url(r'^$', 'dailyfresh.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^place/$', OrderPlaceView.as_view(), name='place'),\n url(r'^commit/$', OrderCommitView.as_view(), name=\"commit\"),\n url(r'^pay/$', OrderPayView.as_view(), name='pay'),\n url(r'^check/$', OrderCheckView.as_view(), name='check'),\n url(r'^comment/(?P\\d+)$', OrderCommentView.as_view(), name='comment'),\n]\n","sub_path":"apps/order/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"284177351","text":"from datetime import datetime\n\n\ndef rfc3339_converter(time):\n \"\"\"\n Convert time to RFC3339 format\n :param time: time as datetime format\n :return: RFC3339 format time\n \"\"\"\n return time.isoformat(\"T\") + \"Z\"\n\n\ndef time_as_string(time):\n \"\"\"\n Convert time to a string connected by underscore\n :param time: time as datetime format\n :return: a string\n \"\"\"\n return time.strftime(\"%Y_%m_%d_%H_%M\")\n\n\ndef set_query_period():\n \"\"\"\n Set the query period manually\n :return: Start and end time of the period\n \"\"\"\n while True:\n start_time_input = raw_input(\"Input the start date and hour in YYYY-MM-DD-HH format:\\n\")\n end_time_input = raw_input(\"Input the end date and hour in YYYY-MM-DD-HH format:\\n\")\n try: # strptime throws an exception if the input doesn't match the pattern\n start_time = datetime.strptime(start_time_input, \"%Y-%m-%d-%H\")\n end_time = datetime.strptime(end_time_input, \"%Y-%m-%d-%H\")\n except ValueError:\n print(\"The format doesn't match, try again!\\n\")\n continue\n if start_time >= end_time:\n print(\"Start time must be earlier than end time.\\n\")\n continue\n else:\n break\n return start_time, end_time\n","sub_path":"Allocation_optimizer/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"520063394","text":"from django.conf.urls import include, url\nfrom django.contrib.auth import views as auth_views\n\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom . import models\nfrom . import views\nfrom django.contrib import admin\nfrom rest_framework import routers\nfrom .views import (\n\tShowListID,\n\tShowsList,\n\tShowNew,\n\tShowNewV2,\n\tPlacesList,\n PlacesListV2,\n PlaceIDV2,\n\tPlaceShows,\n HomePageView,\n ShowsListV2,\n ShowListIDV2,\n PlaceNewV2,\n PlaceEditV2,\n ShowSoldTickets,\n ListTickets,\n ShowReport,\n ShowReportV2,\n ShowStopV2\n)\n\nurlpatterns = format_suffix_patterns(\n [\n # Spectacles\n url(r'^$', HomePageView.as_view(), name='home'),\n url(r'^logout/$', auth_views.logout, name='logout'),\n url(r'^login/$', auth_views.login, {'template_name': 'admin/login.html'}, name='login'),\n url('listtickets/', ListTickets.as_view(), name='listickets'),\n url('shows/', ShowsList.as_view(), name='showslist'),\n url('showsv2/', ShowsListV2.as_view(), name='showslistv2'),\n url('showid/(?P.+)$', ShowListID.as_view(), name='showid'),\n url('showidv2/(?P.+)$', ShowListIDV2.as_view(), name='showidv2'),\n url('showsoldtickets/(?P.+)$', ShowSoldTickets.as_view(), name='showsoldtickets'),\n url('placeid/(?P.+)$', PlaceShows.as_view(), name='placeid'),\n url('shownew/', ShowNew.as_view(), name='shownew'),\n url('placenewv2/', PlaceNewV2.as_view(), name='placenewv2'),\n url('placeeditv2/(?P.+)$', PlaceEditV2.as_view(), name='placeeditv2'),\n url('shownewv2/', ShowNewV2.as_view(), name='shownewv2'),\n url('placesv2/', PlacesListV2.as_view(), name='placeslistv2'), \n url('placeidv2/(?P.+)$', PlaceIDV2.as_view(), name='placeidv2'),\n url('places/', PlacesList.as_view(), name='placeslist'),\n url('showreport/', ShowReport.as_view(), name='showreport'),\n url('showreport2/', ShowReportV2.as_view(), name='showreportv2'),\n url('showstopv2/(?P.+)$', ShowStopV2.as_view(), name='showstopv2'),\n ],\n)\n","sub_path":"app/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"317789768","text":"# coding: utf-8\nimport os\nimport dj_database_url\nfrom datetime import timedelta\n\ngettext = lambda s: s\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = \"v@q!h8+81aim%n=$5vbz3$+)k!na@n(&o6$nq2kdb3ek2$(gzl\"\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.getenv(\"DEBUG\", \"True\") == \"True\"\n\nTEMPLATE_DEBUG = DEBUG\nTHUMBNAIL_DEBUG = DEBUG\n\nINTERNAL_IPS = [\"127.0.0.1\"]\n\nALLOWED_HOSTS = [\"localhost\", \"myplaystation.ru\"]\n\n# Application definition\n\nROOT_URLCONF = \"myplaystations.urls\"\nWSGI_APPLICATION = \"myplaystations.wsgi.application\"\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = \"ru\"\n\nTIME_ZONE = \"Europe/Moscow\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = \"/static/\"\nMEDIA_URL = \"/media/\"\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media_root\")\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static_root\")\n\nSITE_ID = 1\n\nTEMPLATE_LOADERS = \\\n(\n \"django.template.loaders.filesystem.Loader\",\n \"django.template.loaders.app_directories.Loader\",\n \"django.template.loaders.eggs.Loader\"\n)\n\nMIDDLEWARE_CLASSES = \\\n(\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.doc.XViewMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = \\\n(\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.request\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.csrf\",\n \"django.core.context_processors.tz\",\n \"sekizai.context_processors.sekizai\",\n \"django.core.context_processors.static\",\n \"base.context_processors.footer_news\",\n)\n\nINSTALLED_APPS = \\\n(\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.admin\",\n \"django.contrib.sites\",\n \"django.contrib.sitemaps\",\n \"django.contrib.staticfiles\",\n \"django.contrib.messages\",\n \"smuggler\",\n \"sekizai\",\n \"reversion\",\n \"sorl.thumbnail\",\n \"ckeditor\",\n \"raven.contrib.django.raven_compat\",\n \"gunicorn\",\n \"storages\",\n \"django_filters\",\n \"taggit\",\n \"django_activeurl\",\n \"spurl\",\n \"haystack\",\n \"djcelery\",\n \"celery_haystack\",\n \"myplaystations\",\n \"base\",\n \"base.account\",\n)\n\nif not DEBUG:\n RAVEN_CONFIG = \\\n {\n \"dsn\": \"http://b43aa6b8a8a94762a9ad418da85277ba:a0cdb1eb487d468a956433a8e47ffa64@sentry.psprices.com/6\",\n }\n\nSTATICFILES_STORAGE = \"whitenoise.django.GzipManifestStaticFilesStorage\"\nDEFAULT_FILE_STORAGE = \"storages.backends.s3boto.S3BotoStorage\"\nAWS_PRELOAD_METADATA = True\nAWS_IS_GZIPPED = True\nAWS_S3_HOST = \"s3-eu-west-1.amazonaws.com\"\nAWS_ACCESS_KEY_ID = os.getenv(\"AWS_ACCESS_KEY_ID\")\nAWS_SECRET_ACCESS_KEY = os.getenv(\"AWS_SECRET_ACCESS_KEY\")\nAWS_STORAGE_BUCKET_NAME = os.getenv(\"AWS_STORAGE_BUCKET_NAME\")\nAWS_S3_CUSTOM_DOMAIN = os.getenv(\"AWS_S3_CUSTOM_DOMAIN\")\n\nDATABASES = {\n 'default': dj_database_url.config()\n}\n\nCKEDITOR_IMAGE_BACKEND = \"pillow\"\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\n# CKEDITOR_JQUERY_URL = \"//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js\"\nCKEDITOR_CONFIGS = \\\n{\n \"default\":\n {\n \"toolbar\": \"CMS\",\n \"extraPlugins\": \"link,iframe,colorbutton,autogrow,youtube\",\n }\n}\n\nHAYSTACK_CONNECTIONS = \\\n{\n \"default\":\n {\n \"ENGINE\": \"haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine\",\n \"URL\": os.getenv(\"ELASTICSEARCH_URL\", \"http://127.0.0.1:9200/\"),\n \"INDEX_NAME\": \"haystack\",\n }\n}\nHAYSTACK_SIGNAL_PROCESSOR = \"celery_haystack.signals.CelerySignalProcessor\"\n\nfrom urlparse import urlparse\nREDIS_URL = urlparse(os.getenv(\"REDIS_URL\", \"redis://127.0.0.1:6379\")).netloc\n\nCELERY_ACCEPT_CONTENT = [\"json\"]\nCELERY_TASK_SERIALIZER = \"json\"\nCELERY_RESULT_SERIALIZER = \"json\"\nCELERYBEAT_SCHEDULER = \"djcelery.schedulers.DatabaseScheduler\"\nBROKER_URL = \"redis://{}\".format(REDIS_URL)\nCELERYBEAT_SCHEDULE = \\\n{\n u\"Обновить игры PS+\":\n {\n \"task\": \"base.tasks.get_ps_plus_free_games\",\n \"schedule\": timedelta(hours=2),\n },\n}\n","sub_path":"myplaystations/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"346608780","text":"import numpy as np\nfrom random import*\nfrom multiprocessing import Pool\nfrom time import time\n\n\nclass Algorithem:\n def __init__(self,Gensize,Popsize,reward,mutation):\n self.gensize=Gensize\n self.popsize=Popsize\n self.genes=[]\n self.rewards=[]\n self.reward=reward\n self.mutation=mutation\n\n def generation(self,prev=\"None\",best=None,mut=0):\n start = time()\n\n # print(__name__)\n if prev==\"None\":\n self.genes=[]\n self.rewards=[]\n for i in range(self.popsize):\n self.genes.append(np.random.rand(self.gensize)*2-1)\n\n self.rewards=self.reward(self.genes)\n newbest=np.argmin(self.rewards)\n else:\n self.genes=[]\n self.rewards=[]\n for i in range(self.popsize):\n new=[]\n num=np.random.randint(0,len(prev[0])-1)\n other=best\n while other==best:\n other=randint(0,len(prev)-1)\n a=prev[best]\n b=prev[other]\n for n in range(len(prev[best])):\n if n>=num:\n new.append(b[n])\n else:\n new.append(a[n])\n #mutation\n new=self.mutation(new,mut)\n\n self.genes.append(new)\n\n\n start=time()\n self.rewards=self.reward( self.genes)\n end=time()\n print(\"TIME\",end-start)\n\n self.genes[np.argmax(self.rewards)]=prev[best] #eletism?\n newbest=np.argmin(self.rewards)\n\n topscore=min(self.rewards)\n lowscore=max(self.rewards)\n return self.genes,newbest,topscore, lowscore\n\n","sub_path":"Breakout/GenAlg/GeneticAlgorithemFast.py","file_name":"GeneticAlgorithemFast.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"462529758","text":"import ssl\nimport json\nimport sys\nimport asyncio\nimport logging\nimport core.state as state\nfrom core.events import NEW_SESSION, SESSION_STAGED, SESSION_CHECKIN, JOB_RESULT\nfrom core.listener import Listener\nfrom core.session import Session\nfrom core.utils import get_ipaddress, gen_random_string, check_valid_guid\nfrom logging import Formatter\nfrom io import BytesIO\nfrom zipfile import ZipFile, ZIP_DEFLATED\nfrom base64 import b64encode\nfrom pprint import pprint\nfrom quart import Quart, Blueprint, request, jsonify, Response\nfrom quart.logging import default_handler, serving_handler\n\n\nclass STListener(Listener):\n def __init__(self):\n Listener.__init__(self)\n self.name = 'http2'\n self.author = '@byt3bl33d3r'\n self.description = 'HTTP/2 listener'\n\n self.options = {\n # format:\n # value_name : {description, required, default_value}\n\n 'Name': {\n 'Description' : 'Name for the listener.',\n 'Required' : True,\n 'Value' : 'http/2'\n },\n #'Host': {\n # 'Description' : 'Hostname/IP for staging.',\n # 'Required' : True,\n # 'Value' : f\"https://{get_ipaddress()}\"\n #},\n 'BindIP': {\n 'Description' : 'The IPv4/IPv6 address to bind to.',\n 'Required' : True,\n 'Value' : get_ipaddress()\n },\n 'Port': {\n 'Description' : 'Port for the listener.',\n 'Required' : True,\n 'Value' : 443\n },\n 'Cert': {\n 'Description' : 'SSL Certificate file',\n 'Required' : False,\n 'Value' : 'data/cert.pem'\n },\n 'Key': {\n 'Description' : 'SSL Key file',\n 'Required' : False,\n 'Value' : 'data/key.pem'\n }\n }\n\n def run(self):\n ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n ssl_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_COMPRESSION\n ssl_context.set_ciphers('ECDHE+AESGCM')\n ssl_context.load_cert_chain(certfile=self['Cert'], keyfile=self['Key'])\n ssl_context.set_alpn_protocols(['h2']) # Only http/2\n\n \"\"\"\n While we could use the standard decorators to register these routes, \n using add_url_rule() allows us to create diffrent endpoint names\n programmatically and pass the classes self object to the routes\n \"\"\"\n\n loop = asyncio.get_event_loop()\n\n http_blueprint = Blueprint(__name__, 'http')\n http_blueprint.before_request(self.check_if_naughty)\n http_blueprint.after_request(self.make_normal)\n\n http_blueprint.add_url_rule('/stage.zip', 'stage', self.stage, methods=['GET'])\n http_blueprint.add_url_rule('/', 'first_checkin', self.first_checkin, methods=['POST'])\n http_blueprint.add_url_rule('//jobs', 'jobs', self.jobs, methods=['GET'])\n http_blueprint.add_url_rule('//jobs/', 'job_result', self.job_result, methods=['POST'])\n\n # Add a catch all route\n http_blueprint.add_url_rule('/', 'unknown_path', self.unknown_path, defaults={'path': ''})\n http_blueprint.add_url_rule('/', 'unknown_path', self.unknown_path, methods=['GET', 'POST'])\n\n self.app = Quart(__name__)\n\n logging.getLogger('quart.app').setLevel(logging.DEBUG if state.args['--debug'] else logging.ERROR)\n logging.getLogger('quart.serving').setLevel(logging.DEBUG if state.args['--debug'] else logging.ERROR)\n\n self.app.register_blueprint(http_blueprint)\n self.app.run(host=self['BindIP'],\n port=self['Port'],\n debug=False,\n ssl=ssl_context,\n use_reloader=False,\n access_log_format='%(h)s %(p)s - - %(t)s statusline: \"%(r)s\" statuscode: %(s)s responselen: %(b)s protocol: %(H)s',\n loop=loop)\n\n async def check_if_naughty(self):\n try:\n headers = request.headers['User-Agent'].lower()\n if 'curl' in headers or 'httpie' in headers:\n return jsonify({}), 404\n except KeyError:\n pass\n\n async def make_normal(self, response):\n #response.headers[\"server\"] = \"Apache/2.4.35\"\n return response\n\n async def stage(self):\n with open('data/stage.zip', 'rb') as stage_file:\n stage_file = BytesIO(stage_file.read())\n with ZipFile(stage_file, 'a', compression=ZIP_DEFLATED, compresslevel=9) as zip_file:\n zip_file.write(\"data/stage.py\", arcname=\"Main.py\")\n\n self.dispatch_event(SESSION_STAGED, f'Sending stage ({sys.getsizeof(stage_file)} bytes) -> {request.remote_addr} ...')\n return Response(stage_file.getvalue(), content_type='application/zip')\n\n @check_valid_guid\n async def first_checkin(self, GUID):\n data = json.loads(await request.data)\n self.dispatch_event(NEW_SESSION, Session(GUID, request.remote_addr, data))\n return jsonify({}), 200\n\n @check_valid_guid\n async def jobs(self, GUID):\n self.app.logger.debug(f\"Session {GUID} ({request.remote_addr}) checked in\")\n job = self.dispatch_event(SESSION_CHECKIN, (GUID, request.remote_addr))\n if job:\n return jsonify(job), 200\n\n self.app.logger.debug(f\"No jobs to give {GUID}\")\n return jsonify({}), 200\n\n @check_valid_guid\n async def job_result(self, GUID, job_id):\n self.app.logger.debug(f\"Session {GUID} posted results of job {job_id}\")\n data = json.loads(await request.data)\n self.dispatch_event(JOB_RESULT, (GUID, data))\n\n return jsonify({}), 200\n\n async def unknown_path(self, path):\n self.app.logger.error(f\"Unknown path: {path}\")\n return jsonify({}), 404\n","sub_path":"Server/listeners/http2.py","file_name":"http2.py","file_ext":"py","file_size_in_byte":6096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"1549482","text":"# -*- coding:utf-8 -*-\n'''\n__author__ = 'XD'\n__mtime__ = 2019/1/25\n__project__ = 教学评估解析\nFix the Problem, Not the Blame.\n'''\nimport logging\nimport os\nimport logging.config\nimport yaml\n\n\nclass Logger:\n '''根据配置文件生成logger'''\n def __init__(self, default_path='log/conf.yaml', default_level=logging.INFO):\n path = default_path\n if os.path.exists(path):\n with open(path, 'r', encoding='utf-8') as f:\n config = yaml.load(f)\n logging.config.dictConfig(config)\n else:\n logging.basicConfig(level=default_level)\n self.logger = logging.getLogger('main.core')","sub_path":"log/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"184318007","text":"def laceStrings(s1, s2):\n \"\"\"\n s1 and s2 are strings.\n\n Returns a new str with elements of s1 and s2 interlaced,\n beginning with s1. If strings are not of same length, \n then the extra elements should appear at the end.\n \"\"\"\n # Get longest string\n longest = s1\n shortest = s2\n lacedString = ''\n\n if len(s2) > len(s1):\n \tlongest = s2\n \tshortest = s1\n\n for i in range(len(shortest)):\n \tlacedString += s1[i] + s2[i]\n\n lacedString += longest[len(shortest):]\n\n return lacedString\n\nprint(laceStrings(\"abc\", \"def\") == \"adbecf\")\n\nprint(laceStrings('', \"def\") == \"def\")\n\nprint(laceStrings('abcxyz', \"def\") == \"adbecfxyz\")\n\nprint(laceStrings('abc', \"defxyz\") == \"adbecfxyz\")\n\n","sub_path":"psets/endterm/laceStrings.py","file_name":"laceStrings.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"367302266","text":"import functools\n\n\nheaders = {\n'Accept': '*/*',\n'Accept-Encoding': 'gzip,deflate',\n'Accept-Language': 'en-US,en;q=0.8,zh-TW;q=0.6,zh;q=0.4',\n'Connection': 'keep-alive',\n'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.111 Safari/537.36',\n'Referer': 'http://ha.122.gov.cn/views/examplanpub.html'\n}\n\n# This decorator can be applied to\ndef with_logging(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n print('LOG: Running job \"%s\"' % func.__name__)\n result = func(*args, **kwargs)\n print('LOG: Job \"%s\" completed' % func.__name__)\n return result\n return wrapper","sub_path":"module_4_tools/reuse_requests.py","file_name":"reuse_requests.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"292128244","text":"\"\"\"This module is used to parse raw monitor data from TestSys to bunch of\ndictionaries and lists\"\"\"\n\nimport re\nimport tsweb\n\ncommand_regex = re.compile(r\"\"\"\n(? 0:\n teams[team][3] += 1 #solved counter\n #Set global statistic counters\n accepted_counters[problem] += 1\n rejected_counters[problem] += attempts-1\n else:\n rejected_counters[problem] += attempts\n teams[team][4] += result[1] #scores counter\n results.append(result)\n if result != (0, 0, 0, '', 0):\n active_team = True\n if active_team:\n TeamsResults[team] = results\n active_teams.append((team, teams[team][2]))\n\n active_teams.sort(key = lambda x: x[1])\n\n #Sort teams by id (third key)\n teams_order = sorted(teams)\n #Then by scores (secondary key)\n teams_order = sorted(teams_order, key=lambda x: teams[x][4])\n #And by attempts (primary key)\n teams_order = sorted(teams_order, key=lambda x: teams[x][3], reverse=1)\n\n #Assign ranks to teams\n rank = 1\n for team in teams_order:\n teams[team][5] = rank\n #If team has solved some task, next team's rank will increase,\n #otherwise it'll be same\n if teams[team][3]:\n rank += 1\n #active_teams.append((team, teams[team][2]))\n\n config['teams_list'] = [((i,)+tuple(teams[i])) for i in teams_order]\n config['active_teams'] = active_teams\n config['results'] = TeamsResults\n config['problem_list'] = (problems)\n config['accepts'] = accepted_counters\n config['rejects'] = rejected_counters\n config['total_accepts'] = reduce(\n lambda x,y: x+y, [accepted_counters[i] for i in accepted_counters])\n config['total_rejects'] = reduce(\n lambda x,y: x+y, [rejected_counters[i] for i in rejected_counters])\n\n if len(submissions) > 0:\n succ = filter(\n lambda x: True if x['result'] == 'OK' or x['result'] == 'OC' else False,\n sorted(submissions, key=lambda y: y['time'], reverse=1))\n if succ==[]:\n config['last_success'] = None\n else:\n config['last_success'] = succ[0]\n config['last_submission'] = sorted(submissions,\n\t key=lambda x: x['time'], reverse=1)[0]\n else:\n\t config['last_success'] = None\n\t config['last_submission'] = None\n except ParsingError as e:\n config = {'error': e.message}\n return config\n","sub_path":"flask/tsweb/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":10514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"59363108","text":"import numpy.core.multiarray\nfrom picamera import PiCamera\nfrom time import sleep\nfrom cv2 import aruco\nimport cv2\nimport io\nimport numpy as np\nimport threading\nimport math\nimport time\n\n# Aruco dictionary used\naruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)\n\n# Constants for linear regression\nm = 0.0892\nb = 0.4012\n\n# Markers are 3 in, or 76.2 mm\nmarker_width = 76.2\n\nclass Finder:\n def __init__(self):\n # Init function sets up camera\n self.camera = PiCamera()\n # Make resolution smaller to increase speed, larger to increase accuracy\n # Note: different resolutions require different camera calibration\n self.camera.resolution = (800, 600)\n #self.camera.resolution = (1920, 1080)\n self.camera.exposure_mode = 'off'\n self.camera.shutter_speed = 10000\n self.camera.awb_mode = 'off'\n self.marker_detection = {}\n \n # Camera calibration for 800x600 images\n # self.camera_matrix = np.load('mat800x600.npy')\n # self.dist_coeffs = np.load('dist800x600.npy')\n\n # absolute paths\n self.camera_matrix = np.load('src/Finder/mat800x600.npy')\n self.dist_coeffs = np.load('src/Finder/dist800x600.npy')\n \n self.markers = {}\n self.run_thread = True\n \n self.thread = threading.Thread(target=self.detection_thread)\n self.did_detect = False\n \n def start(self):\n # Easy method to start marker detection\n self.thread.start()\n \n def stop(self):\n # Easy method to stop marker detection\n self.run_thread = False\n self.thread.join()\n \n def detection_thread(self):\n # Thread runs until stopped\n while self.run_thread:\n self.find_markers()\n #print(self.markers)\n\n def find_markers(self):\n # Image is captured to bit stream and decoded\n img_stream = io.BytesIO()\n self.camera.capture(img_stream, 'jpeg')\n img_stream.seek(0)\n img_bytes = np.asarray(bytearray(img_stream.read()), dtype=np.uint8)\n img = cv2.imdecode(img_bytes, cv2.IMREAD_COLOR)\n \n # Image is converted to grayscale first\n # gray = cv2.cvtColor(half, cv2.COLOR_BGR2GRAY)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Thresholding is used to convert to a binary image\n # Otsu's method maximizes contrast based on the input image\n thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n \n # Alternative: Histogram equalization can be used\n # thresh = cv2.equalizeHist(gray)\n\n # Use built-in detection method\n detection = aruco.detectMarkers(thresh, aruco_dict)\n rvecs, tvecs, wvecs = aruco.estimatePoseSingleMarkers(detection[0], 76.2, self.camera_matrix, self.dist_coeffs)\n\n # Run if markers are detected\n if detection[1] is not None:\n self.did_detect = True\n for i in range(len(detection[1])):\n marker_id = detection[1][i][0]\n \n # Select built-in \n x, y, z = tvecs[i][0]\n distance = m*z + b;\n angle_h = math.atan(x/z)\n angle_v = math.atan(y/z)\n \n # Angle Correction\n angle_h = angle_h*1.1136 - 0.0109\n\n # Updates marker entries in dictionary\n self.markers[marker_id] = (distance, angle_h, angle_v, time.time())\n # self.markers[marker_id] = (z, angle_h, angle_v, time.time())\n else:\n self.did_detect = False\n \n","sub_path":"demo1/src/Finder/Finder.py","file_name":"Finder.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"554116537","text":"from itertools import chain\nfrom easydict import EasyDict\nimport requests\n\nfrom soccermetrics import SoccermetricsRestException\nfrom soccermetrics import __api_version__ as API_VERSION\n\nclass Resource(object):\n \"\"\"\n Represents a REST resource. Sets high-level endpoint for API.\n\n :param base_uri: Base URI of API.\n :type base_uri: string\n :param auth: Authentication credential.\n :type auth: tuple\n \"\"\"\n def __init__(self, base_uri, auth):\n self.base_uri = base_uri\n self.auth = auth\n\n self.endpoint = \"/%s\" % API_VERSION\n\n def get(self, uid=None, **kwargs):\n \"\"\"\n Retrieves a representation of REST resource.\n\n The response is an object with the following attributes:\n\n +------------+-----------------------+\n | Attribute | Description |\n +============+=======================+\n | headers | Response headers |\n +------------+-----------------------+\n | meta | Response meta-data |\n +------------+-----------------------+\n | data | Response data |\n +------------+-----------------------+\n\n :param uid: Unique ID of API resource representation.\n :type uid: integer\n :param kwargs: Collection of query parameters.\n :type kwargs: dict\n :returns: Resource representation.\n :rtype: ``EasyDict`` object.\n \"\"\"\n uri = \"%s%s/%d\" % (self.base_uri, self.endpoint, uid) if uid else \\\n \"%s%s\" % (self.base_uri, self.endpoint)\n\n full_param_dict = dict(kwargs, **self.auth)\n\n try:\n resp = requests.get(uri,params=full_param_dict)\n if resp.status_code == 200:\n return Response(self.base_uri, self.auth, resp)\n else:\n data = resp.json()\n raise SoccermetricsRestException(resp.status_code,data['uri'],data['message'])\n except requests.exceptions.RequestException as e:\n raise SoccermetricsRestException(500, uri, msg=e)\n\n def head(self):\n \"\"\"\n Retrieves header data of REST resource.\n\n The response is an object with the following attribute:\n\n +------------+-----------------------+\n | Attribute | Description |\n +============+=======================+\n | headers | Response headers |\n +------------+-----------------------+\n\n :returns: Header data.\n :rtype: ``EasyDict`` object.\n \"\"\"\n uri = \"%s%s\" % (self.base_uri, self.endpoint)\n\n resp = requests.head(uri,params=self.auth)\n\n if resp.status_code < 400:\n return EasyDict(dict(headers=resp.headers))\n else:\n raise SoccermetricsRestException(resp.status_code,resp.url)\n\n def options(self):\n \"\"\"\n Retrieves documentation of REST resource.\n\n If the status code is 200 (OK), returns the documentation. Otherwise,\n returns an error.\n\n The response is an object with the following attributes:\n\n +------------+------------------------+\n | Attribute | Description |\n +============+========================+\n | headers | Response headers |\n +------------+------------------------+\n | data | Resource documentation |\n +------------+------------------------+\n\n Link resources are not included in the documentation.\n\n :returns: Resource documentation data.\n :rtype: ``EasyDict`` object.\n \"\"\"\n uri = \"%s%s\" % (self.base_uri, self.endpoint)\n\n resp = requests.options(uri,params=self.auth)\n\n if resp.status_code == 200:\n return Response(self.base_uri, self.auth, resp)\n else:\n raise SoccermetricsRestException(resp.status_code,resp.url)\n\n\nclass Link(Resource):\n \"\"\"\n Access to linked resources, can also access any API endpoint.\n\n Derived from :class:`Resource`.\n \"\"\"\n def __init__(self, base_uri, auth):\n \"\"\"\n Constructor of Link class.\n\n :param base_uri: Base URI of API.\n :type base_uri: string\n :param auth: Authentication credential.\n :type auth: tuple\n \"\"\"\n super(Link, self).__init__(base_uri, auth)\n\n def get(self, uri, **kwargs):\n \"\"\"\n Returns a representation of the REST resource.\n\n Derived from :func:`Resource.get`.\n\n :param uri: URI of REST resource, relative to base URI.\n :type uri: string\n :param kwargs: Collection of query parameters.\n :type kwargs: dict\n :returns: Resource representation.\n :rtype: ``EasyDict`` object.\n \"\"\"\n self.endpoint = uri\n return super(Link, self).get(**kwargs)\n\n def head(self, uri):\n \"\"\"\n Retrieves header data of REST resource.\n\n Derived from :func:`Resource.head`.\n\n :param uri: URI of REST resource, relative to base URI.\n :type uri: string\n :returns: Header data.\n :rtype: ``EasyDict`` object.\n \"\"\"\n self.endpoint = uri\n return super(Link, self).head()\n\n def options(self, uri):\n \"\"\"\n Retrieves documentation of REST resource representation.\n\n Derived from :func:`Resource.options`.\n\n :param uri: URI of REST resource, relative to base URI.\n :type uri: string\n :returns: Resource documentation data.\n :rtype: ``EasyDict`` object.\n \"\"\"\n self.endpoint = uri\n return super(Link, self).options()\n\n\nclass Response(Resource):\n \"\"\"\n Represents a REST API response object.\n\n Derived from :class:`Resource`.\n \"\"\"\n def __init__(self, base_uri, auth, resp):\n \"\"\"\n Constructor of Response class.\n\n :param base_uri: Base URI of API.\n :type base_uri: string\n :param auth: Authentication credential.\n :type auth: tuple\n :param resp: response object from API request\n :type resp: JSON\n \"\"\"\n super(Response, self).__init__(base_uri, auth)\n jresp = resp.json()\n self._meta = EasyDict(jresp['meta'])\n self.status = resp.status_code\n self.headers = EasyDict(resp.headers)\n self.data = [EasyDict(rec) for rec in jresp['result']]\n\n def _iter(self):\n \"\"\"Custom iterator to retrieve all data from API response\"\"\"\n resp = self\n while True:\n yield (resp.data)\n if not resp._meta or not resp._meta.next:\n raise StopIteration\n else:\n resp = resp.next()\n\n @property\n def page(self):\n \"\"\"Current page property\"\"\"\n return self._meta.page if self._meta else 0\n\n @property\n def pages(self):\n \"\"\"Total pages property\"\"\"\n return self._meta.total_pages if self._meta else 0\n\n @property\n def records_page(self):\n \"\"\"Records per page property\"\"\"\n return self._meta.records if self._meta else 0\n\n @property\n def records(self):\n \"\"\"Total records property\"\"\"\n return self._meta.total_records if self._meta else 0\n\n def first(self):\n \"\"\"Go to first page of API response\"\"\"\n if self._meta:\n self.endpoint = self._meta.first\n return super(Response, self).get()\n else:\n return None\n\n def next(self):\n \"\"\"Go to next page of API response\"\"\"\n if self._meta and self._meta.next:\n self.endpoint = self._meta.next\n return super(Response, self).get()\n return None\n\n def prev(self):\n \"\"\"Go to previous page of API response\"\"\"\n if self._meta and self._meta.prev:\n self.endpoint = self._meta.prev\n return super(Response, self).get()\n return None\n\n def last(self):\n \"\"\"Go to last page of API response\"\"\"\n if self._meta:\n self.endpoint = self._meta.last\n return super(Response, self).get()\n return None\n\n def all(self):\n \"\"\"Retrieve all data of API response\"\"\"\n rec = []\n for page in self._iter():\n rec.extend(page)\n return rec","sub_path":"soccermetrics/rest/resources/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":8159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"328368133","text":"import pygame\nimport random\nimport board_data\nimport helpers\n\nsuns = pygame.sprite.Group()\n\nbalance = 100 # starting bank balance\n\n\ndef balanceBgDisplay(screen):\n balanceBg = pygame.Surface((200, 28))\n balanceBg.fill((255, 255, 255))\n screen.blit(balanceBg, (190, 575))\n\n\ndef balanceDisplay(screen, balanceValue):\n black = (0, 0, 0)\n font = pygame.font.SysFont(\"Times New Roman\", 24)\n balanceLabel = font.render(\"Your balance:\", True, black)\n balanceValue = font.render(str(balance), True, black)\n screen.blit(balanceLabel, (200, 575))\n screen.blit(balanceValue, (340, 575))\n\n\nclass Sun(pygame.sprite.Sprite):\n sunSize = 50\n spawnOffset = 20\n\n def __init__(self):\n print(\"Sun Sprite\")\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load('gfx/misc/sun.png')\n self.image = pygame.transform.scale(self.image, (self.sunSize, self.sunSize))\n self.rect = self.image.get_rect()\n self.rect.center = (\n random.randint(board_data.Xmin + self.spawnOffset, board_data.Xmax - self.spawnOffset),\n random.randint(board_data.Ymin + self.spawnOffset, board_data.Ymax - self.spawnOffset)\n )\n t0 = pygame.time.get_ticks() + 5000\n t1 = t0 + 10000 # 10 s\n helpers.generationTime = random.randint(t0, t1)\n\n\ndef spawn(screen):\n sunSize = 50\n print(\"sun\")\n im = pygame.image.load('gfx/misc/sun.png')\n im = pygame.transform.scale(im, (sunSize, sunSize))\n suns.append(im)\n screen.blit(im, (\n random.randint(board_data.Xmin, board_data.Xmax - sunSize),\n random.randint(board_data.Ymin, board_data.Ymax - sunSize)\n ))\n t0 = pygame.time.get_ticks() + 5000\n t1 = t0 + 10000 # 10 s\n helpers.generationTime = random.randint(t0, t1)\n","sub_path":"suns.py","file_name":"suns.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"283506321","text":"from datetime import datetime\nfrom decimal import Decimal\nfrom functools import partial\n\nfrom mexbtcapi import concepts\nfrom mexbtcapi.concepts.currencies import BTC\nfrom mexbtcapi.concepts.market import Market as BaseMarket\nimport mtgox as low_level\n\n\nclass MtgoxTicker(concepts.market.Ticker):\n TIME_PERIOD = 24 * 60 * 60\n\n\nclass Market(BaseMarket):\n MARKET_NAME = \"MtGox\"\n\n def __init__(self, currency):\n BaseMarket.__init__(self, self.MARKET_NAME, BTC, currency)\n # to convert low level data\n self.multiplier = low_level.multiplier[currency.name]\n self.xchg_factory = partial(concepts.currency.ExchangeRate,\n BTC, currency)\n\n def getTicker(self):\n time = datetime.now()\n data = low_level.ticker(self.c2.name)\n\n data2 = [(Decimal(data[name]['value_int']) / self.multiplier)\n for name in ('high', 'low', 'avg', 'last', 'sell', 'buy')]\n hi, lo, av, la, se, bu = map(self.xchg_factory, data2)\n\n volume = long(data['vol']['value_int'])\n ticket = MtgoxTicker(market=self, time=time, high=hi, low=lo,\n average=av, last=la, sell=se, buy=bu,\n volume=volume)\n return ticket\n","sub_path":"mexbtcapi/api/mtgox/http_v1/high_level.py","file_name":"high_level.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"352655144","text":"import re, os, shutil, csv\n\nimport pandas as pd\n\nfrom sqlalchemy import and_\nfrom flask import current_app\n\nfrom app.libs.upload import df2dict, df2list\nfrom pathlib import Path\nfrom app.models import db\nfrom app.models.record_config import CancerTypes\nfrom app.models.run_info import RunInfo, SeqInfo\nfrom app.models.sample_v import (SampleInfoV, ApplyInfo, TreatInfoV, PathologyInfo, Operation, PatientInfoV,\n FamilyInfoV)\nfrom app.models.mutation import Mutation, Mutations\nfrom app.models.report import Report\n\n\ndef df_to_dict(df):\n '''\n :param df: pd.Dataframe\n :return: list\n '''\n result = []\n for i in df.index:\n dic_row = {}\n df_row = df.loc[i].copy()\n for k in df.columns:\n dic_row[k] = str(df_row[k])\n result.append(dic_row)\n return result\n\n\ndef del_db(db, snvs):\n if snvs:\n try:\n for snv in snvs:\n db.session.delete(snv)\n except:\n db.session.delete(snvs)\n\n\ndef model_to_dict(snvs, list_m):\n if snvs:\n for snv in snvs:\n list_m.append(snv.to_dict())\n return None\n\n\ndef first_check(snvs, list_m, list_c=None):\n if snvs:\n for snv in snvs:\n if list_c:\n if snv.status in list_c:\n drugs = snv.drug\n dic_snv = snv.to_dict()\n # if 'fu_type' not in dic_snv.keys():\n # dic_snv['fu_type'] = dic_snv['mu_name_usual']\n l_drug = []\n if drugs:\n for drug in drugs:\n l_drug.append(drug.to_dict())\n dic_snv['drugs'] = l_drug\n list_m.append(dic_snv)\n else:\n dic_snv = snv.to_dict()\n drugs = snv.drug\n l_drug = []\n if drugs:\n for drug in drugs:\n l_drug.append(drug.to_dict())\n dic_snv['drugs'] = l_drug\n list_m.append(dic_snv)\n return None\n\n\ndef get_rep_item(item):\n pat = '(.*)([0-9]{2,3})(.*)'\n m = re.match(pat, item)\n if m:\n item = m.group(2)\n return item\n\n\ndef set_gene_list(list_gene, N):\n list_row = []\n dic_h = {2: 'r', 1: 'm', 0: 'l'}\n for gene_l in list_gene:\n dic_row = {}\n for i in range(N):\n if gene_l:\n tem = gene_l.pop()\n dic_row[dic_h.get(i)] = tem\n else:\n dic_row[dic_h.get(i)] = {'gene': '', 'transcript': ''}\n list_row.append(dic_row)\n return list_row\n\n\ndef dict2df(list_dic):\n dic_out = {}\n for k in (k for k in list_dic[0].keys()):\n dic_out[k] = []\n for dic in list_dic:\n dic_out[k].append(dic[k])\n df = pd.DataFrame(dic_out)\n return df\n\n\ndef generate_detail(dict_d):\n pass\n\n\ndef md_create(df, dic_in, disease):\n mutation = dic_in['okr_mu']\n if mutation == 'exon 14 skipping' and 'MET' in dic_in['gene']:\n dic_in['gene'] = 'MET'\n elif mutation == 'vIII' and 'EGFR' in dic_in['gene']:\n dic_in['gene'] = 'EGFR'\n elif mutation == 'fusion':\n dic_in['gene'] = dic_in['gene'].split('-')[-1]\n gene = dic_in['gene']\n #print(mutation)\n df_mutation = df[df['disease'].str.contains(disease) &\n df['gene_name'].str.contains(gene) & (df['protein_alteration'] == mutation)]\n\n dic_out = df2dict(df_mutation)\n #print(dic_out)\n\n return dic_out\n\n\ndef get_okr(df, disease, gene, mutation, drug_effect, grade=None):\n levels = ['NCCN', 'Clinical + III', 'Clinical + II/III', 'Clinical + II', 'Clinical + I/II', 'Clinical + I']\n if grade:\n for i in ['FDA', 'ESMO', 'EMA']:\n levels.insert(1, i)\n df['drug_effect'].replace('.', 'indicated', inplace=True)\n df_mutation = df[df['disease'].str.contains(disease) &\n df['gene_name'].str.contains(gene) & (df['protein_alteration'] == mutation)]\n df_list = []\n\n tem = set()\n if not df_mutation.empty:\n for level in levels:\n if level in ['NCCN', 'FDA', 'ESMO', 'EMA']:\n df_le = df_mutation[df_mutation['evidence_level'].str.contains(level)].copy()\n else:\n df_le = df_mutation[df_mutation['evidence_level'] == level].copy()\n if not df_le.empty:\n if tem:\n for k in drug_effect - tem:\n df_drug = df_le[df_le['drug_effect'] == k].copy()\n if not df_drug.empty:\n df_list.append(df_drug)\n tem.add(k)\n else:\n for k in drug_effect:\n df_drug = df_le[df_le['drug_effect'] == k].copy()\n if not df_drug.empty:\n tem.add(k)\n df_list.append(df_drug)\n\n return df_list, drug_effect - tem\n\n\ndef get_mutation_parent():\n file_parent = os.path.join(os.getcwd(), 'app/static/pre_report/template_config/variant_class_parent.csv')\n df_p = pd.read_csv(file_parent, delimiter=',', keep_default_na=False)\n dic_p = {}\n for k, p in df_p[['kids', 'parents']].values:\n if k in dic_p.keys():\n dic_p[k].add(p)\n else:\n if p:\n dic_p[k] = {p}\n else:\n dic_p[k] = ''\n return dic_p\n\n\ndef get_parent_variant(mutation, dic_mu, par_l):\n '''\n :param mutation: 突变\n :param dic_mu: 父子突变对应表\n :param par_l: 所有有关系的突变\n :return: par_l\n '''\n par = dic_mu.get(mutation)\n for k in dic_mu.keys():\n if k.startswith(mutation):\n par = dic_mu.get(k)\n # print(dic_mu['RAS amplification'])\n # print(par)\n # if mutation in dic_mu.keys():\n # par = dic_mu.get(mutation)\n # else:\n # par = {}\n # for k in dic_mu.keys():\n # if k.startswith(mutation):\n # par = dic_mu.get(mutation)\n if par:\n # par_l.append(mutation)\n for mu in par:\n par_l.append(mu)\n # break\n get_parent_variant(mu, dic_mu, par_l)\n return par_l\n\n\ndef okr_create(df, disease, gene, mutation, drug_effect):\n '''\n :param df: okr表格生成的dataframe\n :param disease: 肿瘤类型\n :param gene: 基因名称(融合的选其中一个)\n :param mutation: 突变(融合直接写fusion 例如:EML4-ALK fusion;gene:ALK; mutation: fusion)\n :param drug_effect: {'indicated', 'contraindicated', 'resistance', 'not_recommended'}\n :return: dict_out: okr详细信息 type: dict\n '''\n dic_p = get_mutation_parent()\n df_list, le_key = get_okr(df, disease, gene, mutation, drug_effect)\n if 'fusion' in mutation and gene in mutation:\n par_l = get_parent_variant(mutation, dic_p, [])\n else:\n par_l = get_parent_variant('{} {}'.format(gene, mutation), dic_p, [])\n\n list_mu = set()\n for mu in par_l:\n for p in set(df['protein_alteration'].values):\n if p in mu:\n list_mu.add(p)\n if df_list:\n if le_key:\n le_list, _ = get_okr(df, 'Unspecified Solid Tumor', gene, mutation, le_key)\n df_list.extend(le_list)\n for p in list_mu:\n le_list, _ = get_okr(df, disease, gene, p, le_key)\n df_list.extend(le_list)\n df_out = pd.concat(df_list, sort=False)\n dic_out = df2dict(df_out)\n else:\n dic_out = ''\n return dic_out\n\n\ndef okr_create_n(dic_in, df, disease, drug_effect):\n mutation = dic_in['okr_mu']\n if mutation == 'exon 14 skipping':\n dic_in['gene'] = 'MET'\n elif mutation == 'fusion':\n dic_in['gene'] = dic_in['gene'].split('-')[-1]\n gene = dic_in['gene']\n dic_out = okr_create(df, disease, gene, mutation, drug_effect)\n return dic_out\n\n\ndef grade_mutation(df, disease, gene, mutation, drug_effect):\n df_list, le_key = get_okr(df, disease, gene, mutation, drug_effect, 1)\n dic_p = get_mutation_parent()\n if ('fusion' in mutation) and (gene in mutation):\n par_l = get_parent_variant(mutation, dic_p, [])\n else:\n par_l = get_parent_variant('{} {}'.format(gene, mutation), dic_p, [])\n\n list_mu = set()\n for mu in par_l:\n for p in set(df['protein_alteration'].values):\n if p in mu:\n list_mu.add(p)\n levels = set()\n grade = ''\n if df_list:\n if le_key:\n for p in list_mu:\n le_list, _ = get_okr(df, disease, gene, p, le_key, 1)\n df_list.extend(le_list)\n df_out = pd.concat(df_list, sort=False)\n evidence_level = set(df_out['evidence_level'].values)\n for level in evidence_level:\n level = level.split(' ')[0]\n levels.add(level)\n for k in ['NCCN', 'FDA', 'ESMO', 'EMA']:\n if k in levels:\n grade = 'I类'\n if not grade:\n if 'Clinical' in levels:\n grade = 'II类'\n else:\n grade = 'III类'\n else:\n grade = 'III类'\n return grade\n\n\ndef get_grade(dic_in, df, disease, drug_effect):\n # type = dic_in['type']\n # if type == 'Fusion':\n # mutation = 'fusion'\n # dic_in['gene'] = dic_in['gene'].split('-')[-1]\n # elif type == 'CNV':\n # mutation = 'amplification'\n # elif type == 'DEL':\n # mutation = 'exon {} deletion'.format(dic_in['exon'].strip('exon'))\n # else:\n # mutation = dic_in['pHGVS_1'].split('.')[1]\n mutation = dic_in['okr_mu']\n if mutation == 'exon 14 skipping' and 'MET' in dic_in['gene']:\n dic_in['gene'] = 'MET'\n elif mutation == 'fusion':\n dic_in['gene'] = dic_in['gene'].split('-')[-1]\n gene = dic_in['gene']\n print(gene, mutation)\n grade = grade_mutation(df, disease, gene, mutation, drug_effect)\n return grade\n\n\ndef get_drug(list_dic):\n list_nccn = []\n list_clinical = []\n for dic_out in list_dic.values():\n for level in ['NMPA','NCCN', 'Clinical + III', 'Clinical + II/III']:\n if level in dic_out['evidence_level']:\n for row in dic_out['drug'].split(','):\n list_nccn.append(\n {'drug': row, 'level': level, 'drug_effect': dic_out['drug_effect'], 'id': dic_out['id']})\n for level in ['Clinical + II', 'Clinical + I/II', 'Clinical + I']:\n if level in dic_out['evidence_level']:\n for row in dic_out['drug'].split(','):\n list_clinical.append(\n {'drug': row, 'level': level, 'drug_effect': dic_out['drug_effect'], 'id': dic_out['id']})\n if list_nccn:\n drug = list_nccn\n elif list_clinical:\n drug = list_clinical if len(list_clinical) < 6 else list_clinical[:5]\n else:\n drug = []\n drug_set = set()\n effect_set = set()\n out_drug = []\n\n rep_eff = {'indicated': '敏感', 'contraindicated': '禁忌',\n 'resistance': '耐药', 'not_recommended': '不推荐'}\n\n if drug:\n for row in drug:\n effect_set.add(row['drug_effect'])\n if 'indicated' in effect_set and 'not_recommended' in effect_set:\n for row in drug:\n if 'not_recommended' in row['drug_effect']:\n drug.remove(row)\n for row in drug:\n drug_set.add(row['drug'])\n for row in drug:\n if row['drug'] in drug_set:\n effect = row['drug_effect']\n row['drug_effect'] = convert_str(effect, rep_eff)\n out_drug.append(row)\n drug_set.remove(row['drug'])\n return out_drug\n\n\ndef convert_str(row, rep):\n rep = dict((re.escape(k), v) for k, v in rep.items())\n pat = re.compile('|'.join(rep.keys()))\n out = pat.sub(lambda n: rep[re.escape(n.group(0))], row)\n return out\n\n\ndef save_reesult(seq, username, sam):\n run = seq.run_info\n run_name = run.name\n # print(run_name)\n flag = True\n path_result = current_app.config['RESULT_DIR']\n dir_res = current_app.config['RES_REPORT']\n dir_report = os.path.join(dir_res, 'report')\n mg_id = sam.sample_id\n req_mg = sam.apply_info.req_mg\n dir_report_mg = os.path.join(dir_report, mg_id)\n if not os.path.exists(dir_report_mg):\n os.mkdir(dir_report_mg)\n result_f = ''\n msg = ''\n dict_result = {}\n for path_run in os.listdir(path_result):\n if not run_name in path_run:\n continue\n for root, paths, files in os.walk(os.path.join(path_result, path_run)):\n for file in files:\n if seq.sample_name in root:\n if seq.sample_name in file and file.endswith('.results.xls'):\n result_f = (os.path.join(root, file))\n shutil.copy2(os.path.join(root, file), dir_report_mg)\n if 'cn_results.png' in file:\n shutil.copy2(os.path.join(root, file), dir_report_mg)\n if result_f:\n dfs = pd.read_excel(result_f, sheet_name=None, keep_default_na=False, engine='xlrd')\n\n for name, df in dfs.items():\n dict_result[name] = df2list(df)\n else:\n msg = '文件不存在'\n flag = False\n # print(result_f)\n list_mu = (dict_result.get('filter'))\n report_code = '{}_{}'.format(seq.sample_mg, seq.report_item)\n report = Report.query.filter(and_(Report.run_name == run_name,\n Report.req_mg == seq.sample_mg,\n Report.report_item == seq.report_item)).first()\n if report:\n if report.mutation:\n mutations = report.mutation\n # print(mutations)\n for mu in mutations.mutation:\n # print(mu.id)\n mutations.mutation.remove(mu)\n drugs = mu.drug\n del_db(db, drugs)\n db.session.delete(mu)\n # report.mutation.remove(mutations)\n db.session.delete(mutations)\n else:\n report = Report(run_name=run_name, req_mg=seq.sample_mg, report_item=seq.report_item)\n # report.stage = '突变审核'\n db.session.add(report)\n db.session.commit()\n mutations = Mutations()\n # print(report.id)\n report = Report.query.filter(and_(Report.run_name == run_name,\n Report.req_mg == seq.sample_mg,\n Report.report_item == seq.report_item)).first()\n report.report_user = username\n report.stage = '突变审核'\n report.mutation = mutations\n sam = seq.sample_info_v\n apply = sam.apply_info\n cnacer_t = CancerTypes.query.filter(CancerTypes.name == seq.cancer).first()\n apply.cancer = cnacer_t.okr_name.title()\n # print(apply.cancer)\n report.sample_info_v = sam\n if list_mu:\n for row in list_mu:\n af = row.get('变异丰度')\n try:\n af = float(af)\n if af < 1:\n af = format(af, '.1%')\n except:\n pass\n\n mutation = Mutation(type=row.get('变异类型'), gene=row.get('基因'), transcript=row.get('转录本'),\n exon=row.get('外显子'), cHGVS=row.get('编码改变'), pHGVS_3=row.get('氨基酸改变'),\n pHGVS_1=row.get('氨基酸改变-简写'), chr_start_end=row.get('基因座'),\n function_types=row.get('功能影响'), mu_af=af,\n depth=row.get('深度'), ID_v=row.get('ID'), hotspot=row.get('Hotspot'),\n okr_mu=row.get('OKR注释类型'), mu_type=row.get('报告类型'))\n mutations.mutation.append(mutation)\n\n msg = '{} {}的结果保存成功'.format(run_name, seq.sample_name)\n seq.status = '结果已保存'\n db.session.commit()\n else:\n if msg:\n pass\n else:\n msg = '{} {}未检测到变异'.format(run_name, seq.sample_name)\n seq.status = '结果已保存'\n db.session.commit()\n\n return msg, flag\n\n\ndef save_reesult_lims(seq, username, sam):\n run = seq.run_info\n run_name = run.name\n flag = True\n path_result = current_app.config['RESULT_DIR']\n dir_res = current_app.config['RES_REPORT']\n dir_report = os.path.join(dir_res, 'report')\n mg_id = seq.sample_name\n # req_mg = sam.apply_info.req_mg\n dir_report_mg = os.path.join(dir_report, mg_id)\n if not os.path.exists(dir_report_mg):\n os.mkdir(dir_report_mg)\n result_f = ''\n msg = ''\n dict_result = {}\n for path_run in os.listdir(path_result):\n if not run_name in path_run:\n continue\n for root, paths, files in os.walk(os.path.join(path_result, path_run)):\n for file in files:\n if seq.sample_name in root:\n if seq.sample_name in file and file.endswith('.results.xls'): # 结果文件拷贝到 static/res_report/report/mgid目录下\n result_f = (os.path.join(root, file))\n shutil.copy2(os.path.join(root, file), dir_report_mg)\n if 'cn_results.png' in file: # 拷贝CNV结果\n shutil.copy2(os.path.join(root, file), dir_report_mg)\n if result_f:\n dfs = pd.read_excel(result_f, sheet_name=None, keep_default_na=False, engine='xlrd')\n for name, df in dfs.items():\n dict_result[name] = df2list(df)\n else:\n msg = '文件不存在'\n flag = False\n list_mu = (dict_result.get('filter'))\n # report_code = '{}_{}'.format(seq.sample_mg, seq.report_item)\n report = Report.query.filter(and_(Report.run_name == run_name,\n Report.req_mg == seq.sample_mg,\n Report.report_item == seq.report_item)).first()\n if report:\n if report.mutation:\n mutations = report.mutation # 报告突变信息\n for mu in mutations.mutation:\n mutations.mutation.remove(mu)\n drugs = mu.drug\n del_db(db, drugs)\n db.session.delete(mu)\n db.session.delete(mutations)\n else:\n report = Report(run_name=run_name, req_mg=seq.sample_mg, report_item=seq.report_item)\n db.session.add(report)\n db.session.commit()\n mutations = Mutations()\n report = Report.query.filter(and_(Report.run_name == run_name,\n Report.req_mg == seq.sample_mg,\n Report.report_item == seq.report_item)).first()\n report.report_user = username\n report.stage = '突变审核'\n report.mutation = mutations\n # Note: 样本信息和申请单信息不在关联并且不在本地保存, 从lims获取\n # sam = seq.sample_info_v # 样本信息\n # apply = sam.apply_info # 申请单信息\n # cnacer_t = CancerTypes.query.filter(CancerTypes.name == seq.cancer).first()\n # apply.cancer = cnacer_t.okr_name.title()\n # report.sample_info_v = sam\n if list_mu:\n for row in list_mu:\n af = row.get('变异丰度')\n try:\n af = float(af)\n if af < 1:\n af = format(af, '.1%')\n except:\n pass\n\n mutation = Mutation(type=row.get('变异类型'), gene=row.get('基因'), transcript=row.get('转录本'),\n exon=row.get('外显子'), cHGVS=row.get('编码改变'), pHGVS_3=row.get('氨基酸改变'),\n pHGVS_1=row.get('氨基酸改变-简写'), chr_start_end=row.get('基因座'),\n function_types=row.get('功能影响'), mu_af=af,\n depth=row.get('深度'), ID_v=row.get('ID'), hotspot=row.get('Hotspot'),\n okr_mu=row.get('OKR注释类型'), mu_type=row.get('报告类型'))\n mutations.mutation.append(mutation)\n\n msg = '{} {}的结果保存成功'.format(run_name, seq.sample_name)\n seq.status = '结果已保存'\n db.session.commit()\n else:\n if msg:\n pass\n else:\n msg = '{} {}未检测到变异'.format(run_name, seq.sample_name)\n seq.status = '结果已保存'\n db.session.commit()\n\n return msg, flag\n\n\ndef get_qc_raw(seq):\n run = seq.run_info\n run_name = run.name\n # print(run_name)\n path_result = current_app.config['RESULT_DIR']\n result_f = ''\n msg = ''\n dict_result = {}\n for path_run in os.listdir(path_result):\n if not run_name in path_run:\n continue\n for root, paths, files in os.walk(os.path.join(path_result, path_run)):\n for file in files:\n if seq.sample_name in file and file.endswith('.results.xls'):\n result_f = (os.path.join(root, file))\n if result_f:\n dfs = pd.read_excel(result_f, sheet_name=None, keep_default_na=False)\n\n for name, df in dfs.items():\n dict_result[name] = df2list(df)\n else:\n msg = '文件不存在'\n dic_out = {'qc': dict_result.get('QC'), 'filter': dict_result.get('filter'),\n 'raw': dict_result.get('Mutation.raw'), 'w_list': dict_result.get('白名单')}\n\n return dic_out\n\n\ndef get_raw_file(seq):\n run = seq.run_info\n run_name = run.name\n # print(run_name)\n path_result = current_app.config['RESULT_DIR']\n # print(path_result)\n msg = ''\n dict_result = {}\n for path_run in os.listdir(path_result):\n if not run_name in path_run:\n continue\n for root, paths, files in os.walk(os.path.join(path_result, path_run)):\n for file in files:\n if seq.sample_name in file:\n if file == '{}_result.xlsx'.format(seq.sample_name):\n dict_result['lims_result'] = Path(os.path.join(root, file)).as_posix()\n if file == '{}_QC.xlsx'.format(seq.sample_name):\n dict_result['lims_qc'] = Path(os.path.join(root, file)).as_posix()\n\n if file == '{}.results.xls'.format(seq.sample_name):\n dict_result['result'] = Path(os.path.join(root, file)).as_posix()\n if file == '{}.bam'.format(seq.sample_name):\n dict_result['bam'] = Path(os.path.realpath(os.path.join(root, file))).as_posix()\n if file == '{}.bam.bai'.format(seq.sample_name):\n dict_result['bai'] = Path(os.path.realpath(os.path.join(root, file))).as_posix()\n return dict_result\n\n\ndef get_result_file(seq, key):\n run = seq.run_info\n run_name = run.name\n path_result = current_app.config['RESULT_DIR']\n result_f = ''\n msg = ''\n dict_result = {}\n for path_run in os.listdir(path_result):\n if not run_name in path_run:\n continue\n for root, paths, files in os.walk(os.path.join(path_result, path_run)):\n for file in files:\n if seq.sample_name in file and file.endswith(key):\n result_f = (os.path.join(root, file))\n return result_f\n\n\ndef get_okr_vcf(result_file, list_mu, vcf_file):\n filter_mu = set()\n if list_mu:\n for row in list_mu:\n if row['type'] in ['SNV', 'INS', 'DEL', 'COMPLEX']:\n filter_mu.add('{}:{}'.format(row['gene'], row['cHGVS']))\n elif row['type'] == 'CNV':\n filter_mu.add(row['gene'])\n else:\n filter_mu.add(row['exon'])\n list_w = []\n with open(result_file, 'r')as f_r:\n f = csv.reader(f_r, delimiter='\\t')\n for row in f:\n if row[0].startswith('#'):\n list_w.append(row)\n continue\n if filter_mu:\n if row[2] in filter_mu:\n list_w.append(row)\n f_r.close()\n with open(vcf_file, 'w')as f_w:\n for row in list_w:\n f_w.write('\\t'.join(row) + '\\n')\n\n\ndef get6row(list_okr):\n list_out = []\n for level in ['VI', 'III', 'II', 'I']:\n for row in list_okr:\n if row['grade'] == level:\n list_out.append(row)\n list_out = ((list_out, list_out[:5])[len(list_out) > 6])\n return list_out\n\n\ndef get_clincl(dic_summary):\n list_okrs = []\n for row in dic_summary:\n list_okr = []\n for okr in row['okr']:\n if okr.get('Clinical Trials') != '无证据' and okr.get('Clinical Trials') != '禁忌症':\n okr['drug_effect'] = '敏感'\n list_okr.append(\n {'drug': okr.get('相应治疗'), 'level': 'Clinical + {}'.format(okr.get('分期')),\n 'drug_effect': okr['drug_effect'], 'grade': okr.get('分期')})\n list_okrs.append({'mutation': row['mutation'], 'okr': get6row(list_okr)})\n return list_okrs\n","sub_path":"app/libs/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":25060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"59276381","text":"import hashlib \nprint(\"Prem Bhajaj: 60004188001\")\ndataF = \"data.txt\"\nverifyF = \"test.txt\"\n\ndef readF(name):\n f = open(name, \"r\")\n data = f.read()\n f.close()\n return data\n\nalgoChoice = input(\"Algorithm: MD5(1) / SHA1(2): \")\nflowChoice = input(\"Digest: Generate(a) / Verify(b): \")\ndata = \"\"\nresult = None\nhexHash = \"\"\n\nif flowChoice == 'a':\n data = readF(dataF)\nelse:\n hexHash = input(\"Enter hash of data: \")\n data = readF(verifyF)\n\nif algoChoice == \"1\":\n result = hashlib.md5(data.encode())\nelse:\n result = hashlib.sha1(data.encode())\n\nif flowChoice == 'a':\n print(\"The generated hash is:\",result.hexdigest())\nelse:\n if hexHash == result.hexdigest():\n print(\"The file is intact\")\n else:\n print(\"Tampered FIle\")","sub_path":"CSS/Experiment 5/messageDigest.py","file_name":"messageDigest.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"454137539","text":"import matplotlib\nmatplotlib.use('Agg')\n\nimport argparse\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.lda import LDA\nimport numpy as np\nimport multiprocessing\nimport logging\nfrom sklearn.cross_validation import StratifiedKFold \nfrom sklearn.metrics import auc\nfrom sklearn.metrics import roc_curve as rc\nfrom sklearn.grid_search import GridSearchCV\n\nimport pandas as pd\nimport seaborn as sb\nimport matplotlib.pyplot as plt\n\nlogging.basicConfig(format=\"[%(module)s:%(levelname)s]:%(message)s\")\nlogger = logging.getLogger(__name__)\n\n\nCLASSIFIER_PARAMETER = {\n 'Nearest Neighbors': (KNeighborsClassifier(3),\n [{'n_neighbors': [1, 5, 10, 20]}]),\n 'Linear SVM': (SVC(kernel='linear', C=1, probability=True),\n [{'kernel': ['linear'],'C': [0.1, 0.25, 0.5, 1]}]),\n 'RBF SVM': (SVC(gamma=2, C=1, probability=True),\n [{'kernel': ['rbf'],\n 'gamma': np.arange(0.1, 1, 0.1).tolist() + list(range(1, 10)),\n 'C': np.logspace(-2, 2, 5).tolist()}]),\n 'Decision Tree': (DecisionTreeClassifier(max_depth=None,\n max_features='auto'),\n []),\n 'Random Forest': (RandomForestClassifier(max_depth=None,\n n_estimators=10,\n max_features='auto'),\n [{'n_estimators': list(range(5,20))}]),\n 'Logistic Regression': (LogisticRegression(),\n [{'C': np.logspace(0.1, 3, 7).tolist()}]),\n 'Naive Bayes': (GaussianNB(), []),\n}\n\n\ndef run(data, label, PROCESSORS):\n\n kf = StratifiedKFold(label, n_folds=10)\n\n results = {}\n for key in CLASSIFIER_PARAMETER:\n results[key] = []\n\n \n for fold, (train_index, test_index) in enumerate(kf):\n data_train, data_test = (data[train_index, :],\n data[test_index, :])\n label_train, label_test = (label[train_index],\n label[test_index])\n\n for key, value in CLASSIFIER_PARAMETER.items():\n logger.info('Starting classifier: {} fold: {}'.format(key, fold))\n clf, parameters = value\n if parameters:\n clf_grid = GridSearchCV(clf, parameters, n_jobs=PROCESSORS, cv=10)\n clf_grid.fit(data_train, label_train)\n prediction = clf_grid.predict_proba(data_test)[:,1]\n train_prediction = clf_grid.predict_proba(data_train)[:,1]\n else:\n clf.fit(data_train, label_train)\n prediction = clf.predict_proba(data_test)[:,1]\n train_prediction = clf.predict_proba(data_train)[:,1]\n \n fpr, tpr, thresholds = rc(label_test, prediction)\n results[key].append(auc(fpr, tpr))\n fpr, tpr, thresholds = rc(label_train, train_prediction)\n train_auc = auc(fpr, tpr)\n logger.info('{} {} [train AUC: {}/test AUC: {}]'.format(key, fold, train_auc, results[key][-1]))\n\n return(results)\n\ndef plot(results, data_file):\n fig = plt.figure(figsize=[10,6])\n ds = pd.DataFrame(results)\n ds_long =pd.melt(ds)\n sb.barplot(x='variable', y='value', data=ds_long, palette='Paired')\n plt.xticks(rotation=30)\n plt.title('Classification AUC Mean +- SD')\n plt.xlabel('')\n fig.subplots_adjust(bottom=0.2)\n plt.savefig(data_file + '.pdf')\n\n \n\ndef make_argument_parser():\n '''\n Creates an ArgumentParser to read the options for this script from\n sys.argv\n '''\n parser = argparse.ArgumentParser()\n parser.add_argument('data_directory',\n help='Directory where the data files live.')\n parser.add_argument('data', default='data.npy',\n help='Data file name')\n parser.add_argument('label', default='labels.npy',\n help='label file name')\n parser.add_argument('--level', default='info',\n help='Logging level')\n \n return parser\n \nif __name__=='__main__':\n\n \n parser = make_argument_parser()\n args = parser.parse_args()\n\n if args.level=='info':\n logger.setLevel(logging.INFO)\n else:\n logger.setLevel(logging.DEBUG)\n\n data = np.load(args.data_directory + args.data)\n label = np.load(args.data_directory + args.label)\n\n PROCESSORS = int(multiprocessing.cpu_count() / 2)\n logger.info('Starting classification with {} workers'.format(PROCESSORS))\n results = run(data, label, PROCESSORS)\n \n plot(results, args.data_directory + args.data )\n \n","sub_path":"poly.py","file_name":"poly.py","file_ext":"py","file_size_in_byte":4867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"345112630","text":"from model.encoder import *\nfrom model.decoder import *\nimport torch.nn as nn\nfrom model.utils import *\nimport torch.nn.functional as F\nimport torch\n\n\nclass LabelSmoothingLoss(nn.Module): # label_smoothing\n def __init__(self, classes, smoothing=0.0, dim=-1):\n super(LabelSmoothingLoss, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing\n self.cls = classes\n self.dim = dim\n\n def forward(self, pred, target):\n pred = pred.log_softmax(dim=self.dim)\n with torch.no_grad():\n # true_dist = pred.data.clone()\n true_dist = torch.zeros_like(pred)\n true_dist.fill_(self.smoothing / (self.cls - 1))\n true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)\n return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))\n\n\nclass MultiModal_Model(nn.Module):\n def __init__(self, args):\n super(MultiModal_Model, self).__init__()\n self.args = args\n self.image_encoder = None\n self.text_encoder = None\n self.mixed_encoder = None\n self.decoder = None\n if args.multi_type == \"separate\":\n self.image_encoder, self.image_transform = choose_image_encoder(args)\n self.text_encoder = choose_text_encoder(args)\n self.decoder = Image_and_Text_Decoder(args, Image_Feature_Size[args.image_enc],\n Text_Feature_Size[args.text_enc], args.label_num)\n elif args.multi_type == \"together\":\n self.mixed_encoder, self.image_transform = choose_multi_encoder(args)\n else:\n raise ValueError(\"unsupported multi type\")\n\n def forward(self, **inputs):\n if self.image_encoder is not None:\n # inputs -> bz x num_labels(logits)\n return self.decoder(self.image_encoder(inputs[\"input_image\"]), self.text_encoder(**inputs))\n else:\n loss, logits = self.mixed_encoder(inputs[\"input_image\"], inputs[\"input_token_ids\"],\n inputs['combined_label'])\n return logits\n\n def compute_loss(self, **inputs):\n logits = self.forward(**inputs)\n pred = torch.argmax(logits, dim=-1)\n loss_fn = LabelSmoothingLoss(self.args.label_num, self.args.label_smoothing)\n loss = loss_fn(logits, inputs['combined_label'])\n return loss, pred, logits\n\n def predict(self, **inputs):\n logits = self.forward(**inputs)\n pred = torch.argmax(logits, dim=-1)\n prob = F.softmax(logits)\n return pred, prob\n\n\nclass Image_Model(nn.Module):\n def __init__(self, args):\n super(Image_Model, self).__init__()\n self.args = args\n self.encoder, self.image_transform = choose_image_encoder(args)\n self.decoder = Image_Decoder(Image_Feature_Size[args.image_enc], args.label_num)\n\n def forward(self, **inputs):\n # bz x 3 x 224 x 224 -> bz x args.label_num\n return self.decoder(self.encoder(inputs[\"input_image\"]))\n\n def compute_loss(self, **inputs):\n logits = self.forward(**inputs)\n pred = torch.argmax(logits, dim=-1)\n loss_fn = LabelSmoothingLoss(self.args.label_num, self.args.label_smoothing)\n loss = loss_fn(logits, inputs['image_label'])\n return loss, pred, logits\n\n def predict(self, **inputs):\n logits = self.forward(**inputs)\n pred = torch.argmax(logits, dim=-1)\n return pred, logits\n\n\nclass Text_Model(nn.Module):\n def __init__(self, args):\n super(Text_Model, self).__init__()\n self.args = args\n self.encoder = BERT_Text_Encoder()\n self.decoder = Text_Decoder(label_num=args.label_num)\n\n def forward(self, **inputs):\n # inputs -> bz x args.label_num\n return self.decoder(self.encoder(**inputs))\n\n def compute_loss(self, **inputs):\n logits = self.forward(**inputs)\n pred = torch.argmax(logits, dim=-1)\n loss_fn = LabelSmoothingLoss(self.args.label_num, self.args.label_smoothing)\n loss = loss_fn(logits, inputs['text_label'])\n return loss, pred, logits\n\n def predict(self, **inputs):\n logits = self.forward(**inputs)\n pred = torch.argmax(logits, dim=-1)\n return pred, logits\n","sub_path":"model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"72989508","text":"import torch.nn as nn\nimport torch.nn.functional as F\nimport torch as th\nfrom torch.distributions import MultivariateNormal\nfrom torch.distributions import kl_divergence\nimport torch.distributions as D\n\n\nclass LatentEvolveRNNAgent(nn.Module):\n def __init__(self, input_shape, args):\n super(LatentEvolveRNNAgent, self).__init__()\n self.args = args\n self.input_shape = input_shape\n self.n_agents = args.n_agents\n self.latent_dim = args.latent_dim\n self.hidden_dim = args.rnn_hidden_dim\n self.bs = 0\n\n # pi_param = th.rand(args.n_agents)\n # pi_param = pi_param / pi_param.sum()\n # self.pi_param = nn.Parameter(pi_param)\n\n # mu_param = th.randn(args.n_agents, args.latent_dim)\n # mu_param = mu_param / mu_param.norm(dim=0)\n # self.mu_param = nn.Parameter(mu_param)\n\n #self.embed_fc = nn.Linear(args.n_agents, args.latent_dim * 2)\n self.inference_fc1 = nn.Linear(args.rnn_hidden_dim + input_shape - args.n_agents, args.latent_dim * 4)\n self.inference_fc2 = nn.Linear(args.latent_dim * 4, args.latent_dim)\n\n self.latent = th.rand(args.n_agents, args.latent_dim) # (n,mu)\n\n # self.latent_fc1 = nn.Linear(args.latent_dim, args.latent_dim * 4)\n # self.latent_fc2 = nn.Linear(args.latent_dim * 4, args.latent_dim * 4)\n # self.latent_fc3 = nn.Linear(args.latent_dim, args.latent_dim)\n\n self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)\n self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)\n # self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)\n\n # self.fc1_w_nn=nn.Linear(args.latent_dim,input_shape*args.rnn_hidden_dim)\n # self.fc1_b_nn=nn.Linear(args.latent_dim,args.rnn_hidden_dim)\n\n # self.rnn_ih_w_nn=nn.Linear(args.latent_dim,args.rnn_hidden_dim*args.rnn_hidden_dim)\n # self.rnn_ih_b_nn=nn.Linear(args.latent_dim,args.rnn_hidden_dim)\n # self.rnn_hh_w_nn=nn.Linear(args.latent_dim,args.rnn_hidden_dim*args.rnn_hidden_dim)\n # self.rnn_hh_b_nn=nn.Linear(args.latent_dim,args.rnn_hidden_dim)\n\n self.fc2_w_nn = nn.Linear(args.latent_dim, args.rnn_hidden_dim * args.n_actions, bias=False)\n self.fc2_b_nn = nn.Linear(args.latent_dim, args.n_actions,bias=False)\n\n def init_latent(self, bs):\n self.bs = bs\n loss = 0\n\n return loss, self.latent.detach()\n\n def forward(self, inputs, hidden_state):\n inputs = inputs.reshape(-1, self.input_shape)\n h_in = hidden_state.reshape(-1, self.hidden_dim)\n\n #self.latent = self.embed_fc(inputs[:self.n_agents, - self.n_agents:]) # (n,2*latent_dim)==(n,mu+log var)\n #self.latent[:, -self.latent_dim:] = th.exp(self.latent[:, -self.latent_dim:]) # var\n #latent_embed = self.latent.unsqueeze(0).expand(self.bs, self.n_agents, self.latent_dim * 2).reshape(\n # self.bs * self.n_agents, self.latent_dim * 2)\n\n latent_infer = F.relu(self.inference_fc1(th.cat([h_in.detach(), inputs[:, :-self.n_agents]], dim=1)))\n latent_infer = self.inference_fc2(latent_infer) # (bs*n,latent_dim)\n #latent_infer[:, -self.latent_dim:] = th.exp(latent_infer[:, -self.latent_dim:])\n\n # sample\n gaussian_embed = D.Normal(latent_embed[:, :self.latent_dim], (latent_embed[:, self.latent_dim:])**(1/2))\n gaussian_infer = D.Normal(latent_infer[:, :self.latent_dim], (latent_infer[:, self.latent_dim:])**(1/2))\n\n #loss = gaussian_embed.entropy().sum() + kl_divergence(gaussian_embed, gaussian_infer).sum() # CE = H + KL\n #loss = loss / (self.bs*self.n_agents)\n loss=0\n # handcrafted reparameterization\n # (1,n*latent_dim) (1,n*latent_dim)==>(bs,n*latent*dim)\n # latent_embed = self.latent[:,:self.latent_dim].reshape(1,-1)+self.latent[:,-self.latent_dim:].reshape(1,-1)*th.randn(self.bs,self.n_agents*self.latent_dim)\n # latent_embed = latent_embed.reshape(-1,self.latent_dim) #(bs*n,latent_dim)\n # latent_infer = latent_infer[:, :self.latent_dim] + latent_infer[:, -self.latent_dim:] * th.randn_like(latent_infer[:, -self.latent_dim:])\n # loss= (latent_embed-latent_infer).norm(dim=1).sum()/(self.bs*self.n_agents)\n\n\n #latent = F.relu(self.latent_fc1(latent))\n #latent = (self.latent_fc2(latent))\n\n # latent=latent.reshape(-1,self.args.latent_dim)\n\n # fc1_w=F.relu(self.fc1_w_nn(latent))\n # fc1_b=F.relu((self.fc1_b_nn(latent)))\n # fc1_w=fc1_w.reshape(-1,self.input_shape,self.args.rnn_hidden_dim)\n # fc1_b=fc1_b.reshape(-1,1,self.args.rnn_hidden_dim)\n\n # rnn_ih_w=F.relu(self.rnn_ih_w_nn(latent))\n # rnn_ih_b=F.relu(self.rnn_ih_b_nn(latent))\n # rnn_hh_w=F.relu(self.rnn_hh_w_nn(latent))\n # rnn_hh_b=F.relu(self.rnn_hh_b_nn(latent))\n # rnn_ih_w=rnn_ih_w.reshape(-1,self.args.rnn_hidden_dim,self.args.rnn_hidden_dim)\n # rnn_ih_b=rnn_ih_b.reshape(-1,1,self.args.rnn_hidden_dim)\n # rnn_hh_w = rnn_hh_w.reshape(-1, self.args.rnn_hidden_dim, self.args.rnn_hidden_dim)\n # rnn_hh_b = rnn_hh_b.reshape(-1, 1, self.args.rnn_hidden_dim)\n\n fc2_w = self.fc2_w_nn(latent)\n fc2_b = self.fc2_b_nn(latent)\n fc2_w = fc2_w.reshape(-1, self.args.rnn_hidden_dim, self.args.n_actions)\n fc2_b = fc2_b.reshape((-1, 1, self.args.n_actions))\n\n # x=F.relu(th.bmm(inputs,fc1_w)+fc1_b) #(bs*n,(obs+act+id)) at time t\n x = F.relu(self.fc1(inputs)) # (bs*n,(obs+act+id)) at time t\n\n # gi=th.bmm(x,rnn_ih_w)+rnn_ih_b\n # gh=th.bmm(h_in,rnn_hh_w)+rnn_hh_b\n # i_r,i_i,i_n=gi.chunk(3,2)\n # h_r,h_i,h_n=gh.chunk(3,2)\n\n # resetgate=th.sigmoid(i_r+h_r)\n # inputgate=th.sigmoid(i_i+h_i)\n # newgate=th.tanh(i_n+resetgate*h_n)\n # h=newgate+inputgate*(h_in-newgate)\n # h=th.tanh(gi+gh)\n\n # x=x.reshape(-1,self.args.rnn_hidden_dim)\n h = self.rnn(x, h_in)\n h = h.reshape(-1, 1, self.args.rnn_hidden_dim)\n\n q = th.bmm(h, fc2_w) + fc2_b\n\n # h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim) # (bs,n,dim) ==> (bs*n, dim)\n # h = self.rnn(x, h_in)\n # q = self.fc2(h)\n return q.view(-1, self.args.n_actions), h.view(-1, self.args.rnn_hidden_dim), loss\n # (bs*n,n_actions), (bs*n,hidden_dim), (bs*n,latent_dim)\n","sub_path":"src/modules/agents/latent_evolve_rnn_agent.py","file_name":"latent_evolve_rnn_agent.py","file_ext":"py","file_size_in_byte":6387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"271318914","text":"from django.contrib.auth.models import User\nfrom django.db import models, transaction\nfrom django.db.models import Q\nfrom allauth.account.models import EmailAddress\nfrom allauth.socialaccount.models import SocialAccount\nimport hashlib\nfrom django.utils import timezone\nfrom django.core.exceptions import ValidationError\nimport datetime\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom model_utils.managers import InheritanceManager\n\ndef utc_to_local(utc_dt):\n\treturn utc_dt.replace(tzinfo=timezone.utc).astimezone(timezone.get_current_timezone())\n\t\nclass UserProfile(models.Model):\n\tuser = models.OneToOneField(User, related_name='profile')\n\n\tdef __unicode__(self):\n\t\t#return \"{}'s profile\".format(self.user.username)\n\t\treturn format(self.user.username)\n\n\tclass Meta:\n\t\tdb_table = 'user_profile'\n\n\tdef account_verified(self):\n\t\tif self.user.is_authenticated:\n\t\t\tresult = EmailAddress.objects.filter(email=self.user.email)\n\t\t\tif len(result):\n\t\t\t\treturn result[0].verified\n\t\treturn False\n\n\tdef profile_image_url(self):\n\t\tfb_uid = SocialAccount.objects.filter(user_id=self.user.id, provider='facebook')\n\t\n\t\tif len(fb_uid):\n\t\t\treturn \"http://graph.facebook.com/{}/picture?width=40&height=40\".format(fb_uid[0].uid)\n\t\n\t\treturn \"http://www.gravatar.com/avatar/{}?s=40\".format(hashlib.md5(self.user.email).hexdigest())\n\t\n\t@staticmethod\n\tdef default_user_profile():\n\t\ttry:\n\t\t\tdefault_user_profile = UserProfile.objects.get(user__username='default')\n\t\texcept ObjectDoesNotExist:\n\t\t\tdefault_user = User.objects.create(username='default', first_name='default', last_name='default')\n\t\t\tdefault_user.save()\n\t\t\tdefault_user_profile = UserProfile.objects.create(user=default_user)\n\t\t\tdefault_user_profile.save()\n\t\treturn default_user_profile\n\t\nUser.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])\n\nclass Account(models.Model):\n\tnumber = models.CharField(max_length=200, unique = True, verbose_name=\"Account Number\")\n\tfirst_name = models.CharField(max_length=25)\n\tlast_name = models.CharField(max_length=25)\n\tholder = models.ForeignKey(UserProfile, \n\t\tverbose_name=\"Account Holder\", \n\t\trelated_name='account',\n\t\tnull=True, \n\t\tblank=True,)\n\tholder_verification_key = models.CharField(max_length = 80, verbose_name='Acount Verification key')\n\textra = models.TextField(max_length=2000, verbose_name=\"Account Information\",null=True, blank=True)\n\tactive = models.BooleanField(default=True)\n\t\n\t@property \n\tdef account_holder_email(self):\n\t\tif self.holder:\n\t\t\treturn (self.holder.user.email)\n\t\telse:\n\t\t\treturn ('')\n\t\t\n\t@property \n\tdef account_holder_full_name(self):\n\t\tif self.first_name and self.last_name:\n\t\t\tfull_name = \"%s, %s\" %(self.last_name, self.first_name)\n\t\t\treturn (full_name)\n\t\telse:\n\t\t\treturn ('')\n\t\t\t\n\t\t\t\n\t@property\n\tdef account_holder_last_logon(self):\n\t\tif self.holder:\n\t\t\treturn (self.holder.user.last_login)\n\t\telse:\n\t\t\treturn ('')\n\t\t\t\n\t@property\n\tdef org(self):\n\t\ttry:\n\t\t\to = Org.objects.get(accountownership__account=self)\n\t\t\tif o:\n\t\t\t\treturn o.name\n\t\texcept ObjectDoesNotExist:\n\t\t\tpass\n\t\t\t\n\tdef is_active(self):\n\t\treturn self.active\n\t\t\n\t@staticmethod\n\tdef default_account():\n\t\ttry:\n\t\t\tdefault_account = Account.objects.get(number = 'default')\n\t\texcept ObjectDoesNotExist:\n\t\t\tdefault_account = Account.objects.create(\n\t\t\t\tnumber='default',\n\t\t\t\tfirst_name=\"default acct first\",\n\t\t\t\tlast_name = \"default acct last\",\n\t\t\t\tholder = UserProfile.default_user_profile(),\n\t\t\t\tholder_verification_key = \"eSzX1q@a1mz2yO\\R&3Q\", # not guessable\n\t\t\t\t)\n\t\t\tdefault_account.save()\n\t\treturn default_account\n\t\t\t\n\tdef __unicode__(self):\n\t\treturn (\"%s:%s:%s\"%(self.number, str(self.holder), str(self.active)))\n\t\t\nclass Org(models.Model): # Organization that holds accounts\n\tclass Meta:\n\t \tverbose_name = 'Organization'\n\t \tverbose_name_plural = 'Organizations'\n\tname = models.CharField(max_length=\"200\", unique = True)\n\taccounts = models.ManyToManyField(Account, through=\"AccountOwnership\")\n\thome_url = models.URLField(default='http://example.com')\n\t\n\tdef __unicode__(self):\n\t\treturn (self.name)\t\n\nclass AccountOwnership(models.Model):\n\torg = models.ForeignKey(Org)\n\taccount = models.ForeignKey(Account, unique = True)\n\tverified = models.BooleanField(default=False)\n\t\n\t\n\t@property\n\tdef account_number(self):\n\t\tif self.account:\n\t\t\treturn self.account.number\n\t\t\t\n\t@property\n\tdef account_holder(self):\n\t\tif self.account:\n\t\t\tif self.account.holder:\n\t\t\t\treturn self.account.holder.user.get_full_name()\n\t\treturn 'none'\n\t\t\n\tdef __unicode__(self):\n\t\treturn (str(self.org) + \" \" + str(self.account) + \" \" + str(self.verified))\n\n\"\"\"\n\t----- MDM meter readings section -----\n\tThe following models relate to meter readings, events, statuses\n\"\"\"\n\nclass UOM(models.Model):\n\tclass Meta:\n\t\tverbose_name = 'UOM (Unit of Measure)'\n\t\tverbose_name_plural = 'UOMs (Units of Measure)'\n\tname = models.CharField(max_length=20)\n\tdescription = models.TextField(max_length=2000, null=True, blank=True)\n\t\n\tdef __unicode__(self):\n\t\treturn (self.name)\n\nclass MeterType(models.Model):\n\tELECTRIC = 'E'\n\tGAS = 'G'\n\tWATER = 'W'\n\tMETER_COMMODITY_CHOICES = (\n\t\t(ELECTRIC, 'Electric'),\n\t\t(WATER, 'Water'),\n\t\t(GAS, 'Gas'),\n\t)\n\tname = models.CharField(max_length=80, unique = True)\n\tdescription = models.TextField(max_length=2000, null=True, blank=True)\n\tnumber_of_channels = models.IntegerField(default=1)\n\tdefault_consumption_channel = models.IntegerField(default=1)\n\tdisconnectable = models.BooleanField(default=False)\n\tmeter_commodity = models.CharField(max_length = 1, choices = METER_COMMODITY_CHOICES, default = ELECTRIC)\n\t\n\tdef clean(self):\n\t\tif self.default_consumption_channel > self.number_of_channels:\n\t\t\traise ValidationError('Default Consumption Channel cannot be more than Number of Channels')\n\n\t\t\n\tdef __unicode__(self):\n\t\treturn (self.name)\n\t\t\nclass MeterProfile(models.Model):\n\tname = models.CharField(max_length=50, unique = True)\n\tdescription = models.TextField(max_length=2000, null = True, blank = True)\n\tmeter_type = models.ForeignKey(MeterType)\n\tuoms = models.ManyToManyField(UOM, through='UomToChannelMap')\t\n\n\tdef clean(self):\n\t\t\"\"\" This needs to move to form validation because it only catches the problem after save\n\t\tif self.profile:\n\t\t\tif self.number_of_channels > self.profile.filter(meterprofile__channel_number__isnull=False).count():\n\t\t\t\traise ValidationError('Number of Channels cannot be greater than UOMs with channel assignments')\n\t\t\tif self.profile.filter(meterprofile__channel_number__isnull=False).count() <> self.profile.filter(meterprofile__channel_number__isnull=False).values(\"meterprofile__channel_number\").annotate(n=models.Count(\"pk\")).count():\n\t\t\t\traise ValidationError('Channel number assignments must be uniqe')\n\t\t\"\"\"\n\t\t\n\tdef __unicode__(self):\n\t\treturn (self.name)\n\t\t\nclass UomToChannelMap(models.Model):\n\tmeter_profile = models.ForeignKey(MeterProfile)\n\tuom = models.ForeignKey(UOM)\n\tchannel_number = models.PositiveSmallIntegerField(null = True, blank = True)\n\n#from django.contrib.gis.db import models # postgis is not supported in Heroku Dev environments\n\nclass Zipcode(models.Model): # Make Zipcode separate to support future GIS operations\n\tclass Meta:\n\t\tverbose_name = 'Zip'\n\t\tverbose_name_plural = 'Zip codes'\n\tcode = models.CharField(max_length=5, verbose_name = \"Zip Code\", unique = True)\n\t#poly = models.PolygonField()\n\t#objects = models.GeoManager()\n\n\tdef __unicode__(self):\n\t\treturn (unicode(self.code))\n\t\t\t\nclass Address(models.Model):\n\tclass Meta:\n\t\tverbose_name = 'Site Address'\n\t\tverbose_name_plural = 'Site Addresses'\n\tnum = models.IntegerField(verbose_name = \"Street Number\")\n\tstreet = models.CharField(max_length=100)\n\tcity = models.CharField(max_length=100)\n\tstate = models.CharField(max_length=2)\n\tzipcode = models.ForeignKey(Zipcode)\n\t#objects = models.GeoManager()\n\t\n\t@property\n\tdef address(self):\n\t\treturn (\"%d %s, %s, %s, %s\" %(self.num, self.street, self.city, self.state, str(self.zipcode)))\n\t\n\tdef __unicode__(self):\n\t\treturn (str(self.num) + \" \" + self.street + \" \" + self.city + \" \" + self.state + \" \" + str(self.zipcode))\n\t\nclass MeterSite(models.Model):\n\tname = models.CharField(max_length=120, unique = True)\n\tparent_site = models.ForeignKey('self', null=True, blank=True) # not null for grouping\n\taccount = models.ForeignKey(Account)\n\tlongitude = models.DecimalField(max_digits=12, decimal_places=6, null=True, blank=True)\n\tlatitude = models.DecimalField(max_digits=12, decimal_places=6, null=True, blank=True)\n\taddress = models.ForeignKey(Address, null=True, blank=True)\n\t\n\t@property\n\tdef long_lat(self):\n\t\tif (self.longitude and self.latitude):\n\t\t\treturn (str(self.longitude) + \", \" + str(self.latitude))\n\t\telse:\n\t\t\treturn ('empty')\n\t\t\t\n\t@property\n\tdef account_number(self):\n\t\tif self.account:\n\t\t\treturn self.account.number\n\t@property\n\tdef account_holder(self):\n\t\tif self.account:\n\t\t\treturn self.account.account_holder_full_name\n\t\telse:\n\t\t\treturn 'empty'\n\t@property\n\tdef children_sites(self):\n\t\tchild_sites = MeterSite.objects.filter(parent_site=self)\n\t\tif len(child_sites):\n\t\t\tnames = []\n\t\t\tfor child in child_sites:\n\t\t\t\tnames.append(str(child.name))\n\t\t\treturn names\n\t\t\n\tdef __unicode__(self):\n\t\treturn (self.name)\n\nclass SpecialDate(models.Model):\n\tname = models.CharField(max_length=80, unique = True)\n\tdescription = models.TextField(max_length=2000, blank=True)\n\tday_of_week = models.IntegerField(null=True, blank=True)\n\tdate = models.DateField(null=True, blank=True)\n\tholiday = models.BooleanField(default=False)\n\tno_peak = models.BooleanField(default=True)\n\n\tdef __unicode__(self):\n\t\treturn (self.name)\n\t\t\t\nclass MeterCalendar(models.Model):\n\tname = models.CharField(max_length=80, default=\"default\", unique = True)\n\tdescription = models.TextField(max_length=2000, blank=True)\n\teffective_date = models.DateField()\n\tend_date = models.DateField(null=True, blank=True)\n\tspecial_dates = models.ManyToManyField(SpecialDate, through=\"CalendarSpecialDates\")\n\t\n\tdef __unicode__(self):\n\t\treturn (self.name)\n\t\t\nclass CalendarSpecialDates(models.Model):\n\tmeter_calendar = models.ForeignKey(MeterCalendar)\n\tspecial_date = models.ForeignKey(SpecialDate)\n\t\n\tdef __unicode__(self):\n\t\treturn (str(self.meter_calendar) + \" \" + str(self.special_date))\n\t\t\nclass Meter(models.Model):\n\tname = models.CharField(max_length = 80, unique = True)\n\tserial_number = models.CharField(max_length = 80)\n\tcommunication_address = models.CharField(max_length = 80, default = \"\", blank = True)\n\tmeter_type = models.ForeignKey(MeterType)\n\tprofile = models.ForeignKey(MeterProfile)\n\tpremise = models.ForeignKey(MeterSite)\n\tcalendar = models.ForeignKey(MeterCalendar)\n\tinstall_date = models.DateField(null = True, blank = True, default = datetime.date.today)\n\tremove_date = models.DateField(null = True, blank = True)\n\tspecial_identifier = models.CharField(max_length = 20, null = True, blank = True)\n\t\n\t@property\n\tdef account_owner(self):\n\t\treturn self.premise.account_holder\n\t\t\n\t@property\n\tdef account(self):\n\t\treturn self.premise.account_number\n\t\t\n\t@property\n\tdef last_register_read_datetime(self):\n\t\trr = None\n\t\ttry:\n\t\t\trr = RegisterReading.objects.filter(meter=self).latest('datetime')\n\t\texcept ObjectDoesNotExist:\n\t\t\tpass\n\t\tif rr:\n\t\t\treturn rr.datetime \n\t\telse:\n\t\t\treturn None\n\t@property\n\tdef last_interval_read_datetime(self):\n\t\trr = None\n\t\tir = None\n\t\ttry:\n\t\t\tir = IntervalReading.objects.filter(meter=self).latest('first_interval_datetime')\n\t\texcept ObjectDoesNotExist:\n\t\t\tpass\n\t\tif ir:\n\t\t\treturn ir.first_interval_datetime \n\t\telse:\n\t\t\treturn None\n\t\n\t@property\n\tdef org(self):\n\t\treturn Org.objects.filter(accounts__metersite=self.premise)\n\t\t\n\tdef __unicode__(self):\n\t\treturn (self.name)\n\t\t\t\n# Time sequenced reading header information\nclass IntervalReading(models.Model):\n\tDELIVERED = 0\n\tRECEIVED = 1\n\tNET = 2\n\tSUM = 3\n\tDIRECTION_CHOICES = (\n\t\t(DELIVERED, 'Delivered'),\n\t\t(RECEIVED, 'Recieved'),\n\t\t(NET, 'Net'),\n\t\t(SUM, 'Sum'),\n\t)\n\n\tinsert_datetime = models.DateTimeField(auto_now_add = True)\t\n\tmeter = models.ForeignKey(Meter)\n\tchannel_number = models.SmallIntegerField(default = 1)\n\tuom = models.ForeignKey(UOM)\t\n\tdirection = models.PositiveSmallIntegerField(choices = DIRECTION_CHOICES, default = DELIVERED)\n\tcalculation_constant = models.FloatField(default = 1.0)\n\tinterval = models.SmallIntegerField() # Interval in minutes\n\tinterval_count = models.SmallIntegerField() # Number of intervals in Reading\n\tfirst_interval_datetime = models.DateTimeField() # Subsequent interal timestamps determined based on this\n\tsnapshot_register = models.ForeignKey('RegisterReading', null = True, blank = True) # Register reading (if any) for validation\n\tvalidated = models.BooleanField(default = False)\n\textra = models.CharField(max_length = 12, null = True, blank = True)\t\t\n\n\t@property\n\tdef interval_readings(self):\n\t\tif self.readings:\n\t\t\treturn self.readings.all().order_by('sequence')\n\t\telse:\n\t\t\treturn []\n\t\t\t\n\t@property\n\tdef has_flags(self):\n\t\tif len(IntervalData.objects.filter(reading=self.pk,flags__gt=0)):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\t\n\t@property\n\tdef flag_count(self):\n\t\treturn len(IntervalData.objects.filter(reading=self.pk,flags__gt=0))\n\t\t\t\n\tdef has_invalid_count(self):\n\t\tintervals = IntervalData.objects.filter(reading=self)\n\t\treturn (not (len(intervals) == self.interval_count))\n\t\n\t@property\n\tdef valid_interval_count(self):\n\t\t# We can't just subtract flag_count from intervals because some may be missing\n\t\tgood = [i for i in IntervalData.objects.filter(reading=self) if i.flags==0]\n\t\treturn len(good)\n\t\t\t\n\tdef direction_as_string(self):\n\t\treturn (self.DIRECTION_CHOICES[self.direction])[1]\n\t\t\n\tdef __unicode__(self):\n\t\treturn (\"%s [%s] %s\" %(self.meter.name, \n\t\t\tutc_to_local(self.first_interval_datetime).strftime('%Y-%m-%d %H:%M:%S-%Z'),\n\t\t\tstr(self.uom)))\n\n# Time sequenced raw values\t\t\nclass IntervalData(models.Model):\n\tclass Meta:\n\t\tverbose_name = 'Interval Data'\n\t\tverbose_name_plural = 'Intervals (LP) Data'\n\n\t# Flags are represented by one or more set 'bits' \n\tNONE = int('0', 2) # 0 from binary string\n\tMISSING = int('1', 2) # 1\n\tQUALITY = int('10', 2) # 2\n\tDATA_QUALITY = int('11',2) #3\n\tFULL_POWER_OUTAGE = int('100',2) # 4\n\tPARTIAL_POWER_OUTAGE = int('1000',2) # 8\n\tTIME_CHANGED = int('10000',2) # 16\n\t\n\t# Not all bit combinations are represented by admin choices\n\tINTERVAL_FLAG_CHOICES = (\n\t\t(NONE, 'No Flag'),\n\t\t(MISSING, 'Missing'),\n\t\t(QUALITY, 'General Quality Issue'),\n\t\t(DATA_QUALITY, 'Data Quality Issue'),\n\t\t((PARTIAL_POWER_OUTAGE), 'Partial Power Outage'),\n\t\t((FULL_POWER_OUTAGE), 'Full Power Outage'),\n\t\t((TIME_CHANGED), 'Time Changed')\n\t)\n\tsequence = models.PositiveSmallIntegerField()\n\traw_value = models.DecimalField(max_digits = 12, decimal_places=5, null = True, blank = True)\n\tflags = models.PositiveIntegerField(default = NONE, choices = INTERVAL_FLAG_CHOICES)\n\treading = models.ForeignKey(IntervalReading, related_name='readings')\n\t\n\tobjects = InheritanceManager()\n\t\n\t@property\n\tdef meter_name(self):\n\t\treturn (self.reading.meter.name)\n\t\t\n\tdef _value(self):\n\t\treturn (self.raw_value)\n\tvalue = property(_value)\n\t\n\tdef _date_time(self):\n\t\tdelta = datetime.timedelta(minutes=(self.reading.interval * (self.sequence-1)))\n\t\treturn (self.reading.first_interval_datetime + delta)\n\tdate_time = property(_date_time)\n\t\t\n\t@staticmethod\n\tdef flag_as_string(flag_value):\n\t\treturn [item for item in IntervalData.INTERVAL_FLAG_CHOICES if item[0]==flag_value][0][1]\n\t\t\n\tdef interval_flag_as_string(self):\n\t\treturn [item for item in self.INTERVAL_FLAG_CHOICES if item[0]==self.flags][0][1]\n\t\n\tdef flags_as_bit_string(self):\n\t\treturn \"{0:b}\".format(self.flags)\n\t\n\t@property\t\n\tdef flags_as_list(self):\n\t\tl = []\n\t\tif (self.MISSING | self.flags):\n\t\t\tl.append('missing')\n\t\tif (self.FULL_POWER_OUTAGE | self.flags):\n\t\t\tl.append('full power outage')\n\t\tif (self.PARTIAL_POWER_OUTAGE | self.flags):\n\t\t\tl.append('partial power outage')\n\t\tif (self.TIME_CHANGED | self.flags):\n\t\t\tl.append('time changed')\n\t\tif (self.QUALITY | self.flags):\n\t\t\tl.append('quality')\n\t\tif len(l) == 0:\n\t\t\tl.append('none')\n\t\treturn l\n\t\t\n\tdef set_flag_bit(self, bit_number):\n\t\tself.flags = self.flags | value\n\t\t\t\n\tdef as_tuple(self):\n\t\tt=[]\n\t\tt.append(self.reading.uom.name)\n\t\tt.append(self.reading.channel_number)\n\t\tt.append(self.raw_value)\n\t\tt.append(self.reading.calculation_constant)\n\t\tt.append(utc_to_local(self.date_time).strftime('%Y-%m-%d %H:%M:%S'))\n\t\tt.append(self.sequence)\n\t\tt.append(self.reading.meter.name)\n\t\tt.append(self.reading.direction_as_string())\n\t\tt.append(self.flags)\n\t\tt.append(self.reading.validated)\n\t\tt.append(self.reading.extra)\n\t\tif self.sequence == self.reading.interval_count:\n\t\t\tif self.reading.snapshot_register:\n\t\t\t\tt.append(RegisterData.objects.get(reading=self.reading.snapshot_register).raw_value)\n\t\telse:\n\t\t\tt.append(\"0\")\n\t\treturn t\n\t\n\t@transaction.atomic\n\tdef update_as_edited(self, edited_value, user):\n\t\tnew_data_interval = EditedIntervalData(\n\t\t\tsequence = self.sequence,\n\t\t\traw_value = self.raw_value,\n\t\t\tflags = self.flags,\n\t\t\treading = self.reading,\n\t\t\tedited_value = edited_value,\n\t\t\tedited_by = user)\n\t\tself.delete()\n\t\tnew_data_interval.save()\n\t\treturn new_data_interval\n\t\t\n\t@transaction.atomic\n\tdef update_as_estimated(self, estimated_value, rule):\n\t\tnew_data_interval = EstimatedIntervalData(\n\t\t\tsequence = self.sequence,\n\t\t\traw_value = self.raw_value,\n\t\t\tflags = self.flags,\n\t\t\treading = self.reading,\n\t\t\testimated_value = estimated_value,\n\t\t\tvee_rule = rule)\n\t\tself.delete()\n\t\tnew_data_interval.save()\n\t\treturn new_data_interval\n\n\t@staticmethod\t\n\tdef as_tuple_header_row():\n\t\treturn [\"uom\", \"channel number\",\"reading value\", \"calculation constant\", \"datetime\", \"sequence\", \"meter name\", \"direction\", \"flags\", \"validated\", \"extra\", \"snapshot register\"]\n\n\t\t\n\tdef __unicode__(self):\n\t\treturn (str(self.sequence) + \", \" + str(self.value) + \", \" + self.interval_flag_as_string())\n\n# Stores interval values that are edited during VEE\nclass EditedIntervalData(IntervalData):\n\tclass Meta:\n\t\tverbose_name = 'VEE Edited Interval Data'\n\t\tverbose_name_plural = 'VEE Intervals (EDITS ONLY!)'\t\n\tedited_value = models.DecimalField(max_digits = 12, decimal_places=5, null = True, blank = True)\n\tedited_by = models.CharField(max_length=70)\n\tcreated_datetime = models.DateTimeField(auto_now_add=True, default = timezone.now())\n\t\n\tdef _value(self):\n\t\treturn (self.edited_value)\n\tvalue = property(_value)\n\t\n\tdef __unicode__(self):\n\t\treturn (str(self.sequence) + \", \" + str(self.value) + \", \" + self.edited_by)\n\n# Stores interval values that are Estimated during VEE\nclass EstimatedIntervalData(IntervalData):\n\tclass Meta:\n\t\tverbose_name = 'VEE Estimated Interval Data'\n\t\tverbose_name_plural = 'VEE Intervals (estimated)'\t\n\t\t\n\t# Estimation Rules \n\tPOINT_TO_POINT_LINEAR_INTERPOLATION = 0\n\tAVERAGE_OF_REFERENCE_DAYS = 1\n\tESTIMATION_RULES = (\n\t\t(POINT_TO_POINT_LINEAR_INTERPOLATION, 'linear interpolation'),\n\t\t(AVERAGE_OF_REFERENCE_DAYS, 'average reference days'),\n\t)\n\n\testimated_value = models.DecimalField(max_digits = 12, decimal_places=5, null = True, blank = True)\n\tvee_rule = models.PositiveSmallIntegerField(choices = ESTIMATION_RULES)\n\tcreated_datetime = models.DateTimeField(auto_now_add=True)\n\n\tdef _value(self):\n\t\treturn (self.estimated_value)\n\tvalue = property(_value)\n\t\n\tdef __unicode__(self):\n\t\treturn \"%s, %s, rule:%s\"%(str(self.sequence), str(self.value), str(self.vee_rule))\n\t\t\n# Register read header\t\t\t\t\nclass RegisterReading(models.Model):\n\tinsert_datetime = models.DateTimeField(auto_now_add = True)\t\n\tdatetime = models.DateTimeField()\n\tmeter = models.ForeignKey(Meter)\n\tpurpose = models.CharField(max_length = 12, null = True, blank = True)\n\tvalidated = models.BooleanField(default = False)\n\textra = models.CharField(max_length = 12, null = True, blank = True)\t\t\n\t\n\t@property\n\tdef register_readings(self):\n\t\tif self.readings:\n\t\t\tl = []\n\t\t\tfor r in self.readings.all().order_by('period'):\n\t\t\t\tl.append((str(r.uom.name),r.direction_as_string(),r.period_as_string()))\n\t\t\treturn l\n\t\telse:\n\t\t\treturn []\n\t\t\t\n\tdef __unicode__(self):\n\t\treturn (\"%s [%s] %s:%s-%s\"%(\n\t\t\tstr(self.meter),\n\t\t\tutc_to_local(self.datetime).strftime('%Y-%m-%d %H:%M:%S-%Z'),\n\t\t\tself.purpose,\n\t\t\tself.extra,\n\t\t\tself.register_readings\n\t\t\t))\n\t\nclass RegisterData(models.Model):\n\tclass Meta:\n\t\tverbose_name = 'Register Data'\n\t\tverbose_name_plural = 'Register Readings Data'\t\n\n\tCURRENT = 0\n\tPREVIOUS = 1\n\tPREVIOUS_SEASON = 2 \n\tSEASON_CHANGE = 3\n\tREGISTER_PERIOD_CHOICES = (\n\t\t(CURRENT, 'Current Period'),\n\t\t(PREVIOUS, 'Previous Period'),\n\t\t(PREVIOUS_SEASON, 'Previous Season'),\n\t\t(SEASON_CHANGE, 'Season Change'),\n\t)\n\t\n\tDELIVERED = 0\n\tRECEIVED = 1\n\tNET = 2\n\tSUM = 3\n\tDIRECTION_CHOICES = (\n\t\t(DELIVERED, 'Delivered'),\n\t\t(RECEIVED, 'Recieved'),\n\t\t(NET, 'Net'),\n\t\t(SUM, 'Sum'),\n\t)\n\tuom = models.ForeignKey(UOM)\n\tdirection = models.PositiveSmallIntegerField(choices = DIRECTION_CHOICES, default = DELIVERED)\n\tperiod = models.PositiveSmallIntegerField(choices = REGISTER_PERIOD_CHOICES, default = CURRENT)\n\traw_value = models.DecimalField(max_digits = 12, decimal_places=5, null = True, blank = True)\n\treading = models.ForeignKey(RegisterReading, related_name='readings')\n\n\tdef raw_value_no_decimal(self):\n\t\tif self.raw_value:\n\t\t\treturn (round(self.raw_value,0))\n\n\tdef direction_as_string(self):\n\t\treturn (self.DIRECTION_CHOICES[self.direction])[1]\n\t\t\t\n\tdef period_as_string(self):\n\t\treturn (self.REGISTER_PERIOD_CHOICES[self.period])[1]\n\t\t\n\tdef as_tuple(self):\n\t\tt=[]\n\t\tt.append(self.uom.name)\n\t\tt.append(self.raw_value)\n\t\tt.append(utc_to_local(self.reading.datetime).strftime('%Y-%m-%d %H:%M:%S'))\n\t\tt.append(self.reading.meter.name)\n\t\tt.append(self.direction_as_string())\n\t\tt.append(self.period_as_string())\n\t\tt.append(self.reading.purpose)\n\t\tt.append(self.reading.validated)\n\t\tt.append(self.reading.extra)\n\t\treturn t\n\n\t@staticmethod\t\n\tdef as_tuple_header_row():\n\t\treturn [\"uom\", \"reading value\", \"datetime\", \"meter name\", \"direction\", \"period\", \"purpose\", \"validated\", \"extra\"]\n\t\t\n\t# Return DIRECTION_CHOICES with string, value\n\t@staticmethod\n\tdef directions_reversed():\n\t\treversed_list = []\n\t\tfor c in RegisterData.DIRECTION_CHOICES:\n\t\t\treversed_list.append((c[1],c[0]))\n\t\treturn reversed_list\n\t\t\n\t# Return DIRECTION_CHOICES with string, value\n\t@staticmethod\n\tdef periods_reversed():\n\t\treversed_list = []\n\t\tfor c in RegisterData.REGISTER_PERIOD_CHOICES:\n\t\t\treversed_list.append((c[1],c[0]))\n\t\treturn reversed_list\n\t\t\n\tdef __unicode__(self):\n\t\treturn (str(self.uom) + \", \" + \n\t\t\tstr(self.raw_value) + \", \" + \n\t\t\tutc_to_local(self.reading.datetime).strftime('%Y-%m-%d %H:%M:%S-%Z') + \", \" + \n\t\t\tself.direction_as_string() + \", \" + \n\t\t\tstr(self.period_as_string()))\n\t\t\n\n\"\"\"\n\tStill to-do... events\n\"\"\"\n","sub_path":"mdm/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":22451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"619861999","text":"import appdaemon.plugins.hass.hassapi as hass\nimport datetime, time\n\n\n#\n# Hellow World App\n#\n# Args:\n#\n\nclass xiaomi_vacWorld(hass.Hass):\n\n def initialize(self):\n self.log(\"Starting xiaomi_vac Service\")\n self.run_daily(self.reset, datetime.time(0, 0, 0))\n self.mct = 20*60 # minimum clean time 20min\n self.listen_state(self.presents,\"binary_sensor.someone_is_home\")\n self.vacs = [\"vacuum.xiaomi_vacuum_cleaner\",\"vacuum.xiaomi_vacuum_cleaner_2\"]\n self.cleaning_started = []\n self.tct = [] # total cleaning time\n self.is_cleaning = [] # status\n \n for vac in self.vacs:\n self.listen_state(self.cleaning,vac)\n self.cleaning_started.append(time.time())\n self.tct.append(0)\n self.is_cleaning.append(False)\n\n def g_tct(self,id):\n if(self.is_cleaning[id]):\n return self.tct[id] + time.time() - self.cleaning_started[id]\n else:\n return self.tct[id]\n \n\n def cleaning(self, entity, attribute, old, new,kwargs):\n if(not(new==old)):\n self.log(\"new vacuum (\"+str(entity)+\") status: \"+new+\". old was \"+old+\". tct: \"+str(self.g_tct(self.vacs.index(entity))))\n if(new==\"cleaning\"):\n self.is_cleaning[self.vacs.index(entity)] = True\n self.cleaning_started[self.vacs.index(entity)] = time.time()\n elif(self.is_cleaning[self.vacs.index(entity)]):\n self.is_cleaning[self.vacs.index(entity)] = False\n self.tct[self.vacs.index(entity)] += time.time() - self.cleaning_started[self.vacs.index(entity)]\n self.log(\"cleaning stopped, total time: \"+str(self.g_tct(self.vacs.index(entity))))\n if(self.tct[self.vacs.index(entity)] > self.mct):\n self.set_state(\"input_boolean.cleaning_done_today_\"+str(self.vacs.index(entity)),state=\"on\")\n \n def presents(self, entity, attribute, old, new,kwargs):\n if(new==\"on\"): #someon is approaching home\n self.log(\"someone is home, stop vacuuming.\")\n for vac in self.vacs:\n self.log(\"Vac: \"+vac+\" tct: \"+str(self.g_tct(self.vacs.index(vac))))\n self.call_service(\"vacuum/return_to_base\", entity_id=vac)\n else:\n self.log(\"Home alone.\")\n for vac in self.vacs:\n self.log(\"Vac: \"+vac+\" tct: \"+str(self.g_tct(self.vacs.index(vac))))\n if(self.tct[self.vacs.index(vac)]>20*60):\n self.log(\"tct >20 min cleaned already, enough for today\")\n elif(self.get_state(\"input_boolean.autostart_cleaning\") == \"on\"):\n self.log(\"start cleaning\")\n self.call_service(\"vacuum/start\", entity_id=vac)\n else:\n self.log(\"no cleaning, autostart off\")\n \n\n def reset(self, entity=\"\", attribute=\"\", old=\"\", new=\"\", kwargs=\"\"):\n for vac in self.vacs:\n self.tct[self.vacs.index(vac)] = 0\n self.set_state(\"input_boolean.cleaning_done_today_\"+str(self.vacs.index(entity)),state=\"off\")\n\n\n\n","sub_path":"HA/appdaemon/apps/xiaomi_vac.py","file_name":"xiaomi_vac.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"57072353","text":"from flask import render_template, request\nfrom werkzeug import secure_filename\nfrom webgui import web_gui\nimport os\n\nfrom training_stats import half_hour_test, gpxfile \n\n@web_gui.route('/')\ndef index():\n ''' Render landing page '''\n return render_template('index.html',\n title='Home')\n\n\n@web_gui.route('/upload', methods=['POST'])\ndef upload():\n ''' Serve upload file request '''\n gpx = request.files['file']\n \n def is_gpx(filename):\n return filename.rsplit('.', 1)[1].lower() == 'gpx'\n \n if gpx and is_gpx(gpx.filename):\n filename = secure_filename(gpx.filename)\n saved_file = os.path.join(web_gui.config['USER_GPX_FOLDER'], filename)\n gpx.save(saved_file)\n lactate_thr, _, _ = half_hour_test.calculate_lactate_threshold(gpxfile.get_hr_measurements(saved_file))\n return render_template('training.html',\n filename=filename,\n lactate=lactate_thr,\n other='')\n","sub_path":"webgui/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"154104163","text":"class Solution:\n def findShortestSubArray(self, nums: List[int]) -> int:\n\n record = defaultdict(int)\n for num in nums:\n record[num] += 1\n\n # sort the repeated numbers in reverse\n history = None\n if len(record.items()) > 0:\n history = [(key, val) for key, val in record.items()]\n history.sort(key=lambda x: x[1])\n history.reverse()\n\n # collect the most repeated numbers\n if len(history) > 0:\n\n for i in range(1, len(history)):\n if history[i][1] != history[0][1]:\n history = history[0:i]\n break\n\n history = [hist[0] for hist in history]\n\n # find the shortest subset possible for the repeated numbers\n shortest = list()\n for hist in history:\n length = list()\n for i in range(len(nums)):\n if nums[i] == hist:\n length.append(i)\n\n if len(length) > 1:\n shortest.append(length[-1] - length[0])\n\n # get the shortest length\n shortest.sort()\n\n if len(shortest) > 0:\n return shortest[0] + 1\n else:\n return 1","sub_path":"easy/Degree_of_an_Array/source/brute.py","file_name":"brute.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"150307920","text":"#!/usr/bin/env python3\n# whohungup.py\n# whohungup.c reimplemented in Python3\n# Tests if a TCP connection is left open remotely after a banner is received.\n# Benjamin Perrott\n\nimport sys\nimport socket\nfrom select import poll\n\nRECV_SZ = 1024\nPOLL_MS = 200\nSO_TIMEOUT_S = 2\n\npollfile = '/usr/include/bits/poll.h'\nerr = None # hold socket.errors\n\ndef usage():\n sys.stderr.write('Usage: ' + sys.argv[0] + ' \\n')\n exit(1)\n\ndef fatal(funcmsg):\n sys.stderr.write('[-] Fatal error encountered while ' + \n funcmsg + ': ' + str(err) + '\\n')\n sys.stderr.write('[-] Cannot continue.\\n')\n exit(1)\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n usage()\n\n sock = None\n # setdefaulttimeout works on new socket objects, not existing ones.\n # Thus we move this to before socket creation.\n try:\n socket.setdefaulttimeout(SO_TIMEOUT_S)\n except socket.error as err:\n fatal('setting socket options')\n\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n except socket.error as err:\n fatal('creating socket') \n\n try:\n host = sys.argv[1]\n port = sys.argv[2]\n sock.connect((host, int(port)))\n except socket.error as err:\n fatal('connecting to host')\n\n try:\n banner = str(sock.recv(RECV_SZ), 'utf-8')\n if len(banner) == 0:\n print('[.] No banner received from port ' + port + ' on ' + host)\n else:\n print('[!] Got response from port ' + port + ':\\n' + banner) \n except socket.error as err:\n if err.__class__.__name__ == 'timeout':\n print('[.] No banner received from port ' + port + ' on ' + host)\n else:\n fatal('receiving banner')\n\n # poll to see if the connection has been remotely closed.\n # POLLRDHUP = 'The remote side of the connection hung up.'\n # req Linux 2.6.17 \n\n # AttributeError: module 'select' has no attribute 'POLLRDHUP'\n # Hacky: grabbing macro value from file \n POLLRDHUP = None\n with open(pollfile, 'r') as pollfile_f:\n pollfile_buf = pollfile_f.readlines()\n for line in pollfile_buf:\n if line.find('POLLRDHUP') != -1:\n POLLRDHUP = line.split()[3]\n break\n if POLLRDHUP == None:\n fatal('determining value for POLLRDHUP') \n\n try:\n p = poll() # create polling object\n p.register(sock, int(POLLRDHUP, 16)) # check only for POLLRDHUP events\n res = p.poll(POLL_MS) # returns list of tuples in format (fd, event)\n if len(res) == 0: # If the list is empty, no POLLRDHUP registered\n print(\"[!] The connection remains OPEN.\") \n else:\n print(\"[.] The connection was remotely closed.\")\n except socket.error as err:\n fatal('polling for hang-up')\n\n sock.close()\n","sub_path":"whohungup.py","file_name":"whohungup.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"651351172","text":"'''\nCreated on Jun 03, 2010\nby\n@author: Anne Pajon (ap12)\nCopyright (c) 2009 Wellcome Trust Sanger Institute. All rights reserved.\n\n\nTakes a tab delimited file of format:\ngenus||species_strain||assembly_id||contig||scaffold||run\nand generate QC configuration files\n\nUsage:\n> python ~ap12/genlibpy/genepy/pathtrack/generate_qc_conf.py -a /lustre/scratch103/pathogen/pathpipe/tmp/metadata/metahit/assembly.index -r /lustre/scratch103/pathogen/pathpipe/ -c metahit\n\n'''\n\nfrom genepy import logsetup\nfrom optparse import OptionParser\nfrom genepy import util\nimport sys, os\nimport constants\n\n### ---------------------------------------------------------------------------\n### Logging configuration\n### ---------------------------------------------------------------------------\nimport logging\nlog = logging.getLogger('genepy.pathtrack')\n\n### ---------------------------------------------------------------------------\ndef main():\n usage = \"usage: %prog [Options]\"\n parser = OptionParser(usage=usage)\n parser.add_option(\"-a\", \"--assembly\", metavar=\"FILE\", help=\"FILE containing the list of all contigs and scaffolds to import\", action=\"store\", type=\"string\", dest=\"assembly\")\n parser.add_option(\"-r\", \"--root\", metavar=\"PATH\", help=\"PATH to the root of the hierarchy\", action=\"store\", type=\"string\", dest=\"root\")\n parser.add_option(\"-c\", \"--category\", metavar=\"CATEGORY\", help=\"name of the category from %s\" % constants.CATEGORY, action=\"store\", choices=constants.CATEGORY, dest=\"category\")\n \n (options, args) = parser.parse_args()\n\n if not (options.assembly and options.root and options.category):\n parser.print_help()\n sys.exit()\n\n # check root path\n if not os.path.exists(options.root):\n log.error(\"%s path do not exist\" % options.root)\n log.error(\"Create root path first, then run pipeline before importing assembly files.\")\n sys.exit()\n \n # check log directory exists\n out_log = \"%s/log/%s\" % (options.root, options.category)\n util.checkDir(out_log)\n\n # open qc_pipeline.conf\n pipeline_qc = open('%s/conf/%s/qc_pipeline.conf' % (options.root, options.category), 'w')\n\n # check input assembly file and read it - one line per run (lane)\n util.checkFile(options.assembly)\n assembly_lines = open(options.assembly, \"r\").readlines()\n # compare project name - could have more than one run per project\n previous_project = \"\"\n for line in assembly_lines:\n if line[0] == '!':\n continue\n if not line.count('||') == 6:\n continue\n line = line.strip()\n values = line.split('||')\n project = values[0]\n genus = values[1]\n species = values[2]\n assembly_id = values[3]\n contig_file = values[4]\n scaffold_file = values[5]\n run = values[6]\n\n # check if new project\n if project != previous_project:\n # check if files are in place in the hierarchy\n species_path = \"%s/%s/seq-pipelines/%s/%s\" % (options.root, options.category, genus, species)\n assembly_path = \"%s/ASSEMBLY\" % species_path\n assembly_id_path = \"%s/%s\" % (assembly_path, assembly_id)\n scaffold_file_hierarchy = \"%s/Scaffolds.fna\" % assembly_id_path\n util.checkFile(scaffold_file_hierarchy)\n util.checkFile(\"%s.fai\" % scaffold_file_hierarchy)\n util.checkFile(\"%s.refstats\" % scaffold_file_hierarchy)\n util.checkFile(\"%s.bwt\" % scaffold_file_hierarchy)\n\n # create one qc conf file specific per project\n qc_conf_filename = '%s/conf/%s/%s_qc.conf' % (options.root, options.category, project)\n qc_conf = open(qc_conf_filename, 'w')\n qc_conf.write(constants.QC_CONF_TEMPLATE % {'root':options.root,\n 'category':options.category,\n 'db':constants.DATABASE[options.category],\n 'db_host':os.getenv('VRTRACK_HOST'),\n 'db_port':os.getenv('VRTRACK_PORT'),\n 'db_rw_user':os.getenv('VRTRACK_RW_USER'),\n 'db_password':os.getenv('VRTRACK_PASSWORD'),\n 'project':project,\n 'ref':scaffold_file_hierarchy})\n qc_conf.close()\n\n log.info(\"QC conf file %s has been generated.\" % qc_conf_filename)\n\n # update qc_pipeline.conf\n pipeline_qc.write(\"__VRTrack_QC__\\t%s\\n\" % (qc_conf_filename))\n\n # update previous project name\n previous_project = project\n\n pipeline_qc.close()\n \n\n\n### ---------------------------------------------------------------------------\nif __name__ == '__main__':\n main()\n\n","sub_path":"scripts/pathtrack/legacy/generate_qc_conf.py","file_name":"generate_qc_conf.py","file_ext":"py","file_size_in_byte":4984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"255510905","text":"import setuptools\n#from cmake_setuptools import *\n\nimport os\nimport re\nimport sys\nimport sysconfig\nimport platform\nimport subprocess\n\n#from distutils.version import LooseVersion\nfrom setuptools import setup, Extension\nfrom setuptools.command.build_ext import build_ext\n\n\nclass CMakeExtension(Extension):\n def __init__(self, name, sourcedir=''):\n Extension.__init__(self, name, sources=[])\n self.sourcedir = os.path.abspath(sourcedir)\n\n\nclass CMakeBuild(build_ext):\n def run(self):\n try:\n out = subprocess.check_output(['cmake', '--version'])\n except OSError:\n raise RuntimeError(\n \"CMake must be installed to build the following extensions: \" +\n \", \".join(e.name for e in self.extensions))\n\n for ext in self.extensions:\n self.build_extension(ext)\n\n def build_extension(self, ext):\n extdir = os.path.abspath(\n os.path.dirname(self.get_ext_fullpath(ext.name)))\n cmake_args = [\\\n '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,\\\n '-DPYTHON_EXECUTABLE=' + sys.executable]\n build_args=[]\n\n\n if not os.path.exists(self.build_temp):\n os.makedirs(self.build_temp)\n subprocess.check_call(['cmake', ext.sourcedir] + cmake_args,\n cwd=self.build_temp)\n subprocess.check_call(['cmake', '--build', '.'] + build_args,\n cwd=self.build_temp)\n print() # Add an empty line for cleaner output\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\nsetuptools.setup(\n name=\"MultiScaleOT\", # Replace with your own username\n version=\"0.3.2\",\n author=\"Bernhard Schmitzer\",\n author_email=\"bernhard.schmitzer@tum.de\",\n description=\"A package with coarse-to-fine solvers for optimal transport problems.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/bernhard-schmitzer/MultiScaleOT\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: POSIX\",\n \"Operating System :: MacOS :: MacOS X\"\n ],\n python_requires='>=3.4',\n zip_safe=False,\n #packages=[\"MultiScaleOT\"],\n #package_dir={'MultiScaleOT': '.'},\n #package_data={'MultiScaleOT': ['example-data/*']},\n #include_package_data=True,\n ext_modules=[CMakeExtension('MultiScaleOT',\\\n sourcedir=\"./src\")],\n cmdclass={'build_ext': CMakeBuild}\n )\n\n\n\n","sub_path":"pypi_install_script/MultiScaleOT-0.3.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"263247998","text":"# -*- coding=utf-8 -*-\n\"\"\"\n 验证码识���:\n - 转RGB色彩模式\n - 获取每个点的RGB值,根据条件进行颜色(红色)替换\n - pytesseract识别,长度不为4时重新执行\n\"\"\"\nimport io\n\nimport click\nimport pytesseract\nfrom PIL import Image\n\nfrom webrequests import WebRequest\n\nfrom nsfc import util\n\n\ndef get_captcha(session, captcha_url):\n \n resp = WebRequest.get_response(captcha_url, session=session)\n im = Image.open(io.BytesIO(resp.content))\n\n im = im.convert('RGB')\n\n pixdata = im.load()\n weight, height = im.size\n for x in range(weight):\n for y in range(height):\n rgb = pixdata[x, y]\n if (rgb[0] - rgb[1] > 73) and (rgb[0] - rgb[2] > 73):\n pixdata[x, y] = (0, 0, 0)\n else:\n pixdata[x, y] = (255, 255, 255)\n\n captcha = pytesseract.image_to_string(im).strip()\n\n if len(captcha) != 4:\n return get_captcha(session, captcha_url)\n\n payload = util.query_payload(tryCode=captcha)\n funding_url = 'http://output.nsfc.gov.cn/baseQuery/data/supportQueryResultsData'\n resp = WebRequest.get_response(funding_url, method='POST', session=session, json=payload)\n\n if resp.json()['message'] != '验证码错误':\n click.secho('right captcha: {}'.format(captcha), fg='green', bold=True)\n return captcha\n \n click.secho('wrong captcha: {}'.format(captcha), fg='yellow')\n return get_captcha(session, captcha_url)\n","sub_path":"nsfc/util/parse_captcha.py","file_name":"parse_captcha.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"438776081","text":"import re\n# Find regexes that match the following. (e.g. find a single regex that matches\n# both `antelope` and `antelopes`.)\nline1 = 'antelope'\nline2 = 'antelopes'\nline3 = 'antelop'\n\nprint(\"\\nRegex 1:\")\nmatch1 = re.match('antelopes?', line1)\nmatch2 = re.match('antelopes?', line2)\nmatch3 = re.match('antelopes?', line3)\nprint (f\"testing {line1}: {match1.group()}\" )\nprint (f\"testing {line2}: {match2.group()}\" )\nprint (f\"testing {line3}: {match3}\" )\n\n# * Single regex that matches either of these:\n\n# antelope rocks out\n \n# antelopes rock out\n\nline1 = 'antelope rocks out'\nline2 = 'antelopes rock out'\nline3 = 'antelopes rock '\n\nprint(\"\\nRegex 2:\")\nmatch1 = re.match('antelopes? rocks? out', line1)\nmatch2 = re.match('antelopes? rocks? out', line2)\nmatch3 = re.match('antelopes? rocks? out', line3)\nprint (f\"testing {line1}: {match1.group()}\" )\nprint (f\"testing {line2}: {match2.group()}\" )\nprint (f\"testing {line3}: {match3}\" )\n\n# * Regex that matches either of:\n\n# goat\n \n# moat\n\n# but not:\n\n# boat\n\nline1 = 'goat'\nline2 = 'moat'\nline3 = 'boat'\n\nprint(\"\\nRegex 3:\")\nmatch1 = re.match('[gm]oat', line1)\nmatch2 = re.match('[gm]oat', line2)\nmatch3 = re.match('[gm]oat', line3)\nprint (f\"testing {line1}: {match1.group()}\" )\nprint (f\"testing {line2}: {match2.group()}\" )\nprint (f\"testing {line3}: {match3}\" )\n\n# * Regex that matches dates in YYYY-MM-DD format. (Year can be 1-4 digits, and\n# month and day can each be 1-2 digits). This does not need to verify the date\n# is correct (e.g 3333-33-33 can match).\n\n# 2000-10-12\n \n# 1999-1-20\n \n# 1999-01-20\n \n# 812-2-10\n\nline1 = '2000-10-12'\nline2 = '1999-1-20'\nline3 = '1999-01-20'\nline4 = '812-2-10'\nline5 = '2ooo-10-12'\n\nprint(\"\\nRegex 4:\")\nmatch1 = re.match('[0-9]{1,4}-[0-9]{1,2}-[0-9]{1,2}', line1)\nmatch2 = re.match('[0-9]{1,4}-[0-9]{1,2}-[0-9]{1,2}', line2)\nmatch3 = re.match('[0-9]{1,4}-[0-9]{1,2}-[0-9]{1,2}', line3)\nmatch4 = re.match('[0-9]{1,4}-[0-9]{1,2}-[0-9]{1,2}', line4)\nmatch5 = re.match('[0-9]{1,4}-[0-9]{1,2}-[0-9]{1,2}', line5)\nprint (f\"testing {line1}: {match1.group()}\" )\nprint (f\"testing {line2}: {match2.group()}\" )\nprint (f\"testing {line3}: {match3.group()}\" )\nprint (f\"testing {line4}: {match4.group()}\" )\nprint (f\"testing {line5}: {match5}\" )\n\n","sub_path":"theory/regex.py","file_name":"regex.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"444164322","text":"# -*- coding:utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nimport os\n\nfrom setuptools import setup, find_packages\n\npackage = 'seleniuk'\n\nhere = os.path.dirname(os.path.abspath(__file__))\nversion = next((line.split('=')[1].strip().replace(\"'\", '') for line in\n open(os.path.join(here, package, '__init__.py'))\n if line.startswith('__version__ = ')), '0.0.dev0')\nemail = next((line.split('=')[1].strip() for line in\n open(os.path.join(here, '.env'))\n if line.startswith('email=')), '')\nreadme = os.path.join(here, 'README.md')\n\n\ndef _requires_from_file(filename):\n return open(filename).read().splitlines()\n\n\nsetup(\n name=package,\n version=version,\n url='https://github.com/ukwksk/' + package,\n author='ukwksk',\n author_email=email,\n maintainer='ukwksk',\n maintainer_email=email,\n description='Selenium Wrapper',\n long_description=readme,\n packages=find_packages(),\n install_requires=_requires_from_file('requirements.txt'),\n license=\"MIT\",\n classifiers=[\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: MIT License',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"75754140","text":"from django.shortcuts import render\nfrom crediario.clientes.models import Cliente, Configloja, APIConfigLoja\nfrom crediario.clientes.forms import ClienteBuscaForm\nfrom django.contrib import messages\nfrom django.http import JsonResponse\nfrom django.conf import settings\n\n\ndef vw_clientes(request):\n data = {}\n context = {}\n conf = Configloja.objects.get(cd_chave='ZIM 1')\n data['cd_regiao'], data['sg_loja'] = conf.no_conf.split(':')[:2]\n data['cd_regiao'] = int(data['cd_regiao'])\n form = ClienteBuscaForm(initial=data)\n \n if request.method == 'POST':\n form = ClienteBuscaForm(initial=data, data=request.POST)\n\n if form.is_valid():\n\n clientes = Cliente.objects.none()\n\n if form.cleaned_data['tipo'] == '0': # buscar pelo código\n cd_chave = '{0}{1}'.format(\n str(form.cleaned_data['cd_regiao']).rjust(2),\n form.cleaned_data['valor'].strip().rjust(8))\n clientes = Cliente.objects.filter(cd_chave = cd_chave)\n else: # buscar pelo nome\n clientes = Cliente.objects.filter(cd_nomcod__istartswith=form.cleaned_data['valor'])\n\n context['clientes'] = clientes\n\n if clientes.count() == 0:\n messages.add_message(request, messages.INFO, 'Cliente inexistente')\n\n\n context['form'] = form\n\n return render(request, \n 'clientes/change_list.html',\n context\n )\n\n\ndef vw_api_imagem_cliente(request, codigo):\n conf = APIConfigLoja()\n chave = '{0}{1}'.format(\n str(conf.cd_regiao).rjust(2),\n str(codigo).rjust(8))\n\n cliente = Cliente.objects.filter(cd_chave=chave)\n\n if cliente.count == 0:\n return JsonResponse([], safe=False)\n\n imagem = cliente[0].get_foto()\n\n return JsonResponse(imagem if imagem else [], safe=False)\n ","sub_path":"django/crediario/clientes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"585931252","text":"\"\"\" Tests of Selectors \"\"\"\nimport pytest\n\nfrom ctapipe.core.qualityquery import QualityQuery, QualityCriteriaError\nfrom ctapipe.core.traits import List\n\n\ndef test_selector():\n \"\"\" test the functionality of an example Selector subclass\"\"\"\n\n class ExampleQualityQuery(QualityQuery):\n quality_criteria = List(\n default_value=[\n (\"high_enough\", \"lambda x: x > 3\"),\n (\"a_value_not_too_high\", \"lambda x: x < 100\"),\n (\"smallish\", \"lambda x: x < np.sqrt(100)\"),\n ],\n ).tag(config=True)\n\n query = ExampleQualityQuery()\n\n criteria1 = query(0) # pass smallish\n assert len(criteria1) == 3\n assert (criteria1 == [False, True, True]).all()\n\n criteria2 = query(20) # pass high_enough + not_too_high\n assert (criteria2 == [True, True, False]).all()\n\n criteria3 = query(200) # pass high_enough, fail not_too_high\n assert (criteria3 == [True, False, False]).all()\n\n criteria4 = query(8) # pass all\n assert (criteria4 == True).all()\n\n tab = query.to_table()\n html = query._repr_html_()\n assert isinstance(html, str)\n\n assert tab[\"criteria\"][0] == \"TOTAL\"\n assert tab[\"criteria\"][1] == \"high_enough\"\n assert tab[\"criteria\"][2] == \"a_value_not_too_high\"\n assert tab[\"criteria\"][3] == \"smallish\"\n\n assert tab[\"counts\"][0] == 4\n assert tab[\"counts\"][1] == 3\n assert tab[\"counts\"][2] == 3\n assert tab[\"counts\"][3] == 2\n\n # 0 0 0\n # 1 1 0\n # 1 0 0\n # 1 1 1\n assert tab[\"cumulative_counts\"][0] == 4\n assert tab[\"cumulative_counts\"][1] == 3\n assert tab[\"cumulative_counts\"][2] == 2\n assert tab[\"cumulative_counts\"][3] == 1\n\n # check that the order is preserved\n assert query.criteria_names[1] == \"high_enough\"\n assert query.criteria_names[2] == \"a_value_not_too_high\"\n assert query.criteria_names[3] == \"smallish\"\n\n # check we can get back the correct function string:\n assert query.selection_function_strings[1] == \"lambda x: x > 3\"\n\n assert len(query) == 4 # 4 events counted\n\n\ndef test_bad_selector():\n \"\"\" ensure failure if a selector function is not a function or can't be evaluated\"\"\"\n\n with pytest.raises(QualityCriteriaError):\n s = QualityQuery(\n quality_criteria=[\n (\"high_enough\", \"lambda x: x > 3\"),\n (\"not_good\", \"3\"),\n (\"smallish\", \"lambda x: x < 10\"),\n ]\n )\n assert s\n\n with pytest.raises(QualityCriteriaError):\n s = QualityQuery(\n quality_criteria=[\n (\"high_enough\", \"lambda x: x > 3\"),\n (\"not_good\", \"x == 3\"),\n (\"smallish\", \"lambda x: x < 10\"),\n ]\n )\n assert s\n\n # ensure we can't run arbitrary code.\n # try to construct something that is not in the\n # ALLOWED_GLOBALS list, but which is imported in selector.py\n # and see if it works in a function\n with pytest.raises(NameError):\n s = QualityQuery(quality_criteria=[(\"dangerous\", \"lambda x: Component()\")])\n s(10)\n","sub_path":"ctapipe/core/tests/test_qualityquery.py","file_name":"test_qualityquery.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"441516298","text":"import os\nimport glob\nimport subprocess as sp\nimport ColorTextWriter\n\nclass Bt2Aligner:\n\n def __init__(self, home_dir, input_dir, threads, bt2_index, bt2_parameter, extensions):\n self.home_dir = home_dir\n self.input_dir = input_dir\n self.threads = threads\n self.bt2_index = bt2_index\n self.bt2_parameter = bt2_parameter\n self.extensions = extensions\n\n def aligner(self):\n\n outdir = os.path.join(self.home_dir, 'bt2_aligned')\n if not os.path.isdir(outdir): os.mkdir(outdir)\n\n fastq_list = sorted(glob.glob(self.input_dir + '*tagdustout.fq'))\n\n ctw = ColorTextWriter.ColorTextWriter()\n\n print(ctw.CBEIGE + ctw.CBOLD + 'Running Bowtie2 Aligner ...' + ctw.CEND + '\\n')\n\n for i in fastq_list:\n print('\\n' + ctw.CBEIGE + ctw.CBOLD + 'Mapping: ' + ctw.CBLUE + os.path.basename(i) + ctw.CBEIGE + ctw.CBOLD + ' ...' + ctw.CEND + '\\n')\n\n output_file = outdir + '/' + os.path.basename(i).split('tagdustout.fq')[0] + 'aligned' + self.extensions[4]\n\n command = [\n 'bowtie2',\n '-p', self.threads, '-x', self.bt2_index + 'bt2_index',\n self.bt2_parameter, '-q', i,\n '-S', output_file,\n '2>', output_file.split(self.extensions[4])[0] + self.extensions[3]\n ]\n\n command = ' '.join(command)\n sp.check_call(command, shell=True)\n\n print('\\n' + ctw.CRED + ctw.CBOLD + 'Sequence Alignment Completed!!!' + ctw.CEND + '\\n')","sub_path":"Modules/Bt2Aligner.py","file_name":"Bt2Aligner.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"10343987","text":"#!/usr/bin/env python3\n\ntry:\n from sys import argv\n import ldap3\n\n LDAPDIRS = [\n ('ldaps://ldappv.rwth-aachen.de', 'ou=People,dc=rwth-aachen,dc=de')\n ]\n\n FILTER = '(mail=*)'\n ATTRS = ['cn', 'mail']\n\n print('Searching … ', end='', flush=True)\n entries = []\n for d in LDAPDIRS:\n with ldap3.Connection(d[0], auto_bind=True) as conn:\n print(d[0] + ' … ', end='', flush=True)\n flt = '(&' + FILTER + \\\n '(|(mail=' + argv[1] + '*)(cn=' + argv[1] + '*)))'\n conn.search(d[1], flt, attributes=ATTRS)\n entries.extend(conn.entries)\n\n if len(entries) == 0:\n print('No entries found!')\n exit(1)\n\n print(str(len(entries)) + ' entries found!')\n for i in entries:\n for m in i.mail.values:\n print(m + '\\t' + i.cn[0] + '\\t' + i.entry_dn)\n\nexcept Exception as e:\n print(\"Error: \" + type(e).__name__ + \": \" + str(e))\n exit(1)\n","sub_path":"mutt-addressbook.py","file_name":"mutt-addressbook.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"109082304","text":"from __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport random\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nfrom threading import Thread, Lock\r\nimport tensorflow as tf\r\nimport logging_utils\r\nimport time\r\nfrom emulator import get_num_actions\r\nimport importlib\r\nfrom q_network import *\r\nfrom policy_v_network import *\r\nfrom value_based_actor_learner import *\r\nfrom policy_based_actor_learner import *\r\nimport math\r\n\r\nlogger = logging_utils.getLogger('main')\r\n\r\ndef generate_epsilon():\r\n \"\"\" Generate lower limit for decaying epsilon. \"\"\"\r\n epsilon = {'limits': [0.1, 0.01, 0.5], 'probs': [0.4, 0.3, 0.3]}\r\n return np.random.choice(epsilon['limits'], p=epsilon['probs']) \r\n\r\ndef check_or_create_checkpoint_dir(checkpoint_dir):\r\n \"\"\" Create checkpoint directory if it does not exist \"\"\"\r\n if not os.path.exists(checkpoint_dir):\r\n try:\r\n os.makedirs(checkpoint_dir)\r\n except OSError:\r\n pass\r\n\r\ndef restore_vars(saver, sess, game_name, actor_learner_type, \r\n actor_learner_max_local_steps):\r\n \"\"\" Restore saved net, global score and step, and epsilons OR \r\n create checkpoint directory for later storage. \"\"\"\r\n sess.run(tf.initialize_all_variables())\r\n \r\n checkpoint_dir = 'checkpoints/' + game_name + '/' + \\\r\n {'0': 'Q/', '1': 'sar''sa/', '2': 'a3c/'}[str(actor_learner_type)] + \\\r\n str(actor_learner_max_local_steps) + '_step' + '/'\r\n \r\n check_or_create_checkpoint_dir(checkpoint_dir)\r\n path = tf.train.latest_checkpoint(checkpoint_dir)\r\n if path is None:\r\n return False\r\n else:\r\n saver.restore(sess, path)\r\n return True\r\n\r\ndef save_vars(saver, sess, game_name, actor_learner_type, \r\n actor_learner_max_local_steps, global_step, coord):\r\n \"\"\" Checkpoint shared net params, global score and step, and epsilons. \"\"\"\r\n checkpoint_dir = 'checkpoints/' + game_name + '/' + \\\r\n {'0': 'Q/', '1': 'sarsa/', '2': 'a3c/'}[str(actor_learner_type)] + \\\r\n str(actor_learner_max_local_steps) + '_step' + '/'\r\n \r\n check_or_create_checkpoint_dir(checkpoint_dir)\r\n while not coord.should_stop():\r\n step = sess.run(global_step)\r\n if step % 10000 == 0:\r\n saver.save(sess, checkpoint_dir + 'net-score-step-epsilons', \r\n global_step=step)\r\n\r\ndef get_learning_rate(low, high):\r\n \"\"\" Return LogUniform(low, high) learning rate. \"\"\"\r\n lr = math.exp(random.uniform(math.log(low), math.log(high)))\r\n return lr\r\n\r\ndef main(optimizer_conf, emulator_conf, alg_conf):\r\n \"\"\" Set up the graph, the agents, and run the agents in parallel. \"\"\"\r\n num_actions = len(get_num_actions(emulator_conf[\"rom_path\"], \r\n emulator_conf[\"game\"]))\r\n local_replicas = alg_conf[\"local_replicas\"]\r\n num_actor_learners = alg_conf[\"num_actor_learners\"]\r\n actor_learner_type = alg_conf['actor_learner_type']\r\n actor_learner_max_local_steps = alg_conf['max_local_steps']\r\n rescale_rewards = alg_conf['rescale_rewards']\r\n\r\n if actor_learner_type == 0:\r\n Net = QNetwork\r\n if actor_learner_max_local_steps > 1:\r\n Learner = NStepQLearner\r\n else:\r\n Learner = OneStepQLearner\r\n elif actor_learner_type == 1:\r\n Net = QNetwork\r\n if actor_learner_max_local_steps > 1:\r\n print(\"n-step SARSA not implemented!\")\r\n sys.exit()\r\n else:\r\n Learner = OneStepSARSALearner\r\n elif actor_learner_type == 2:\r\n Net = PolicyVNetwork\r\n Learner = A3CLearner\r\n\r\n with tf.Graph().as_default():\r\n sess = tf.Session()\r\n # Instantiate global step on the graph for \r\n # checkpointing before passing to Saver()\r\n global_step = tf.Variable(0, name='global_step', trainable=False)\r\n increase_global_step_op = global_step.assign_add(1)\r\n\r\n conf_shared_nw = {'name': \"shared_network\",\r\n 'optimizer_conf': optimizer_conf,\r\n 'shared_network': None,\r\n 'local_replicas': local_replicas,\r\n 'num_act': num_actions,\r\n 'actor_learner_type': actor_learner_type,\r\n 'global_step': global_step}\r\n shared_network = Net(conf_shared_nw)\r\n\r\n if actor_learner_type == 0 or actor_learner_type == 1:\r\n conf_target_nw = {'name': \"target_network\",\r\n 'optimizer_conf': optimizer_conf,\r\n 'shared_network': shared_network,\r\n 'local_replicas': local_replicas,\r\n 'num_act': num_actions,\r\n 'actor_learner_type': actor_learner_type,\r\n 'global_step': global_step}\r\n target_network = Net(conf_target_nw)\r\n elif actor_learner_type == 2:\r\n target_network = None\r\n\r\n network_replicas = []\r\n for i in xrange(num_actor_learners):\r\n if local_replicas:\r\n conf_shared_nw_replica = {'name': \"local_replica_network_{}\".format(i),\r\n 'optimizer_conf': optimizer_conf,\r\n 'shared_network': shared_network,\r\n 'local_replicas': local_replicas,\r\n 'num_act': num_actions,\r\n 'actor_learner_type': actor_learner_type,\r\n 'global_step': global_step}\r\n network_replicas.append(Net(conf_shared_nw_replica))\r\n else:\r\n network_replicas.append(shared_network)\r\n\r\n # Instantiate score and epsilon variables on the graph for \r\n # checkpointing before passing to Saver()\r\n # Global score\r\n global_score_placeholder = tf.placeholder(tf.int64)\r\n global_score = tf.Variable(tf.cast(0, tf.int64), \r\n name='max_global_score', trainable=False)\r\n update_global_score_op = global_score.assign(global_score_placeholder)\r\n global_score_summary = tf.scalar_summary(\"Global score\", global_score)\r\n \r\n # Thread scores\r\n thread_score_placeholders = [tf.placeholder(tf.int64) \r\n for i in xrange(num_actor_learners)]\r\n thread_scores = [tf.Variable(tf.cast(0, tf.int64), \r\n name='max_thread_' + str(i) + '_score', trainable=False) \r\n for i in xrange(num_actor_learners)]\r\n update_thread_score_ops = [\r\n thread_scores[i].assign(thread_score_placeholders[i]) \r\n for i in xrange(num_actor_learners)]\r\n\r\n # Exploration epsilons\r\n thread_epsilon_placeholders = [tf.placeholder(tf.float32) \r\n for i in xrange(num_actor_learners)]\r\n thread_epsilons = [tf.Variable(tf.cast(1.0, tf.float32), \r\n name='thread_' + str(i) + '_epsilon', trainable=False) \r\n for i in xrange(num_actor_learners)]\r\n thread_epsilon_limits = [tf.Variable(tf.cast(generate_epsilon(), \r\n tf.float32), name='thread_' + str(i) + '_epsilon_limits', \r\n trainable=False) for i in xrange(num_actor_learners)]\r\n update_thread_epsilon_ops = [\r\n thread_epsilons[i].assign(thread_epsilon_placeholders[i]) \r\n for i in xrange(num_actor_learners)]\r\n\r\n if actor_learner_type == 0 or actor_learner_type == 1:\r\n var_list = shared_network.params + target_network.params \r\n var_list.extend(thread_epsilons)\r\n var_list.extend(thread_epsilon_limits)\r\n elif actor_learner_type == 2:\r\n var_list = shared_network.actor_params + \\\r\n shared_network.critic_params\r\n\r\n var_list.append(global_step)\r\n var_list.append(global_score)\r\n var_list.extend(thread_scores)\r\n\r\n # Reward rescaling\r\n if rescale_rewards:\r\n thread_max_reward_placeholders = [tf.placeholder(tf.float32)\r\n for i in xrange(num_actor_learners)]\r\n thread_max_rewards = [tf.Variable(tf.cast(1.0, tf.float32),\r\n name='thread_' + str(i) + '_r_max', trainable=False)\r\n for i in xrange(num_actor_learners)]\r\n update_thread_max_reward_ops = [\r\n thread_max_rewards[i].assign(thread_max_reward_placeholders[i])\r\n for i in xrange(num_actor_learners)]\r\n var_list.extend(thread_max_rewards)\r\n \r\n saver = tf.train.Saver(var_list=var_list, max_to_keep=10, \r\n keep_checkpoint_every_n_hours=2)\r\n\r\n # Initialise if checkpoint does not exist\r\n restore_vars(saver, sess, emulator_conf[\"game\"], actor_learner_type, \r\n actor_learner_max_local_steps) \r\n \r\n # Merge summaries and initialize writer \r\n summary_op = tf.merge_all_summaries() \r\n summary_writer = tf.train.SummaryWriter(\r\n \"/tmp/summary_logs/{}\".format(int(time.time())), sess.graph_def)\r\n \r\n # Thread coordinator\r\n coord = tf.train.Coordinator()\r\n \r\n # Checkpoint thread\r\n saving = Thread(target=save_vars, \r\n args=(saver, sess, emulator_conf[\"game\"], actor_learner_type, \r\n actor_learner_max_local_steps, global_step, coord))\r\n\r\n summary_conf = {'summary_op': summary_op, \r\n 'summary_writer': summary_writer,\r\n 'global_score_summary': global_score_summary}\r\n\r\n alg_conf['shared_network'] = shared_network\r\n alg_conf['target_network'] = target_network\r\n alg_conf['global_step'] = global_step\r\n alg_conf['increase_global_step_op'] = increase_global_step_op\r\n alg_conf['global_score'] = global_score\r\n alg_conf['global_score_placeholder'] = global_score_placeholder\r\n alg_conf['update_global_score_op'] = update_global_score_op\r\n\r\n alg_conf['lock'] = Lock()\r\n\r\n visualize = emulator_conf[\"visualize\"]\r\n if (visualize == 2): emulator_conf[\"visualize\"] = 0 \r\n actor_learners = []\r\n for i in xrange(num_actor_learners):\r\n if (visualize == 2) and (i == num_actor_learners - 1):\r\n emulator_conf[\"visualize\"] = 1\r\n \r\n alg_conf['actor_id'] = i\r\n alg_conf['epsilon'] = thread_epsilons[i]\r\n alg_conf['epsilon_limit'] = thread_epsilon_limits[i]\r\n alg_conf['epsilon_placeholder'] = thread_epsilon_placeholders[i]\r\n alg_conf['update_thread_epsilon_op'] = update_thread_epsilon_ops[i]\r\n alg_conf['thread_score'] = thread_scores[i]\r\n alg_conf['thread_score_placeholder'] = thread_score_placeholders[i]\r\n alg_conf['update_thread_score_op'] = update_thread_score_ops[i]\r\n\r\n if rescale_rewards:\r\n alg_conf['thread_max_reward'] = thread_max_rewards[i]\r\n alg_conf['thread_max_reward_placeholder'] = \\\r\n thread_max_reward_placeholders[i]\r\n alg_conf['update_thread_max_reward_op'] = \\\r\n update_thread_max_reward_ops[i]\r\n \r\n alg_conf['local_network'] = network_replicas[i]\r\n\r\n actor_learners.append(Learner(sess, optimizer_conf, emulator_conf, \r\n alg_conf, summary_conf))\r\n\r\n saving.start()\r\n\r\n for t in actor_learners:\r\n t.start()\r\n\r\n for t in actor_learners:\r\n t.join()\r\n logger.debug('All training threads finished')\r\n\r\n coord.request_stop()\r\n logger.debug('All threads stopped')\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n # Visualize can take 3 values:\r\n # 0: no visualization of emulator\r\n # 1: all emulators, for all actors, are visualized\r\n # 2: only 1 emulator (for one of the actors) is visualized\r\n \r\n _exit = False\r\n optimizer_conf = {}\r\n emulator_conf = {}\r\n alg_conf = {}\r\n if len(sys.argv) == 23:\r\n emulator_conf[\"game\"] = sys.argv[1]\r\n emulator_conf[\"rom_path\"] = sys.argv[2] # \"../atari_roms\"\r\n emulator_conf[\"visualize\"] = int(sys.argv[3])\r\n optimizer_conf[\"type\"] = sys.argv[4]\r\n optimizer_conf[\"mode\"] = sys.argv[5]\r\n optimizer_conf[\"base_learning_rate\"] = float(sys.argv[6])\r\n optimizer_conf[\"clip_delta\"] = float(sys.argv[7])\r\n optimizer_conf['lr_decay_step'] = int(sys.argv[8])\r\n optimizer_conf['lr_decay_rate'] = float(sys.argv[9])\r\n optimizer_conf['lr_staircase'] = {'True': True, 'False': False}[sys.argv[10]]\r\n optimizer_conf['entropy_regularisation_strength'] = float(sys.argv[11])\r\n optimizer_conf['clip_norm'] = float(sys.argv[12])\r\n optimizer_conf['clip_norm_type'] = sys.argv[13]\r\n alg_conf['actor_learner_type'] = {'Q': 0, 'sarsa': 1, 'a3c': 2}[sys.argv[14]]\r\n alg_conf['num_actor_learners'] = int(sys.argv[15])\r\n alg_conf['gamma'] = float(sys.argv[16])\r\n alg_conf['q_target_update_steps'] = int(sys.argv[17])\r\n alg_conf['grads_update_steps'] = int(sys.argv[18])\r\n alg_conf['max_global_steps'] = int(sys.argv[19])\r\n alg_conf['max_epsilon_annealing_steps'] = int(sys.argv[20])\r\n alg_conf['local_replicas'] = {'True': True, 'False': False}[sys.argv[21]]\r\n alg_conf['max_local_steps'] = int(sys.argv[22])\r\n alg_conf['rescale_rewards'] = {'True': True, 'False': False}[sys.argv[23]]\r\n elif len(sys.argv) == 4:\r\n emulator_conf[\"game\"] = sys.argv[1]\r\n emulator_conf[\"rom_path\"] = sys.argv[2] # \"../atari_roms\"\r\n emulator_conf[\"visualize\"] = int(sys.argv[3])\r\n optimizer_conf[\"type\"] = \"rmsprop\"\r\n optimizer_conf[\"mode\"] = \"shared\"\r\n optimizer_conf[\"base_learning_rate\"] = get_learning_rate(10**-4, 10**-2)\r\n optimizer_conf[\"clip_delta\"] = 1.0\r\n optimizer_conf['lr_decay_step'] = 100000\r\n optimizer_conf['lr_decay_rate'] = 0.95\r\n optimizer_conf['lr_staircase'] = False\r\n optimizer_conf['entropy_regularisation_strength'] = 0.01\r\n optimizer_conf['clip_norm'] = 40.0 # Max gradient norm for clipping\r\n optimizer_conf['clip_norm_type'] = 'global' # global/local/ignore\r\n alg_conf['actor_learner_type'] = {'Q': 0, 'sarsa': 1, 'a3c': 2}['Q']\r\n alg_conf['num_actor_learners'] = 16\r\n alg_conf['gamma'] = 0.99\r\n alg_conf['q_target_update_steps'] = 10000 # 40000 frames / 4\r\n alg_conf['grads_update_steps'] = 5\r\n alg_conf['max_global_steps'] = 2147483647\r\n alg_conf['max_epsilon_annealing_steps'] = 1000000 # 4 million frames / 4\r\n alg_conf['local_replicas'] = False # Must be True for n-step and a3c\r\n alg_conf['max_local_steps'] = 1 # The n in n-step\r\n alg_conf['rescale_rewards'] = False\r\n else:\r\n print(\"You must provide at least 3 arguments! Try:\\n\" \\\r\n \"python main.py ../atari_roms/ 1\")\r\n _exit = True\r\n \r\n if not _exit:\r\n main(optimizer_conf, emulator_conf, alg_conf)\r\n","sub_path":"algorithms/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"329411918","text":"import kivy\nimport urllib\nkivy.require('1.7.1')\n\nfrom kivy.app import App\nfrom kivy.network.urlrequest import UrlRequest\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.label import Label\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.button import Button\nfrom kivy.properties import ListProperty, BooleanProperty, StringProperty, ObjectProperty\nfrom kivy.uix.stacklayout import StackLayout\nfrom kivy.core.audio import SoundLoader\n\n\nclass LoginScreen(GridLayout):\n\n def zaloguj_callback(self, instance):\n if self.adres.text and self.nazwa.text:\n a = 'http://' + self.adres_domofonu.text + '/login'\n params = urllib.urlencode({'name': self.nazwa.text, 'address': self.adres.text})\n try:\n urllib.urlopen(a, data = params)\n except:\n pass\n\n def wyloguj_callback(self, instance):\n a = 'http://' + self.adres_domofonu.text + '/logout'\n params = urllib.urlencode({'name': self.nazwa.text, 'address': self.adres.text})\n try:\n urllib.urlopen(a, data = params)\n except:\n pass\n\n\n def __init__(self, **kwargs):\n super(LoginScreen, self).__init__(**kwargs)\n self.cols = 2\n\n self.add_widget(Label(text='Nazwa'))\n\n self.nazwa = TextInput(multiline=False, font_size=40)\n self.add_widget(self.nazwa)\n\n self.add_widget(Label(text='Adres'))\n\n self.adres = TextInput(multiline=False, font_size=40)\n self.add_widget(self.adres)\n\n self.add_widget(Label(text='Adres domofonu'))\n\n self.adres_domofonu = TextInput(multiline=False, font_size=40)\n self.add_widget(self.adres_domofonu)\n\n self.zaloguj_button = Button(text='Zaloguj')\n self.zaloguj_button.bind(on_press=self.zaloguj_callback)\n self.add_widget(self.zaloguj_button)\n\n self.wyloguj_button = Button(text='Wyloguj')\n self.wyloguj_button.bind(on_press=self.wyloguj_callback)\n self.add_widget(self.wyloguj_button)\n\n\n\nclass MyApp(App):\n\n def build(self):\n return LoginScreen()\n\n\nif __name__ == '__main__':\n MyApp().run()\n","sub_path":"screen.py","file_name":"screen.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"608315288","text":"# Challenge 041\r\n\r\n\"\"\" Ask the user to enter their name and a number. If the number is less than 10, then display\r\ntheir name that number of times; otherwise display the message \"Too high\" three times. \"\"\"\r\n\r\nname = input('Enter your name:\\n>> ')\r\nnumber = int(input('Enter a number:\\n>> '))\r\n\r\nif number < 10:\r\n for i in range(0, number):\r\n print(number)\r\n\r\nelse:\r\n for i in range(1, 4):\r\n print(\"Too high\")\r\n","sub_path":"ex041.py","file_name":"ex041.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"275929712","text":"from odoo import models, fields, api, _\nfrom odoo.exceptions import UserError, ValidationError\nfrom datetime import datetime\nfrom odoo.addons import decimal_precision as dp\n\n\n\nclass InterCompanyTransferLine(models.Model):\n \n _name = \"inter.company.transfer.line.ept\"\n _description = \"Internal Company Transfer Line\"\n \n \n @api.depends( 'inter_transfer_id.picking_ids.state')\n def _get_delivered_qty(self):\n for line in self:\n if line.inter_transfer_id.state in ['processed']:\n qty_delivered = 0.0\n for picking_id in line.inter_transfer_id.picking_ids:\n if picking_id.state != 'cancel':\n for move_line in picking_id.move_ids_without_package:\n if line.product_id == move_line.product_id:\n if picking_id.picking_type_id.code == 'incoming':\n qty_delivered += move_line.product_id.uom_id._compute_quantity(move_line.quantity_done, move_line.product_id.uom_id)\n line.qty_delivered = qty_delivered\n else:\n line.qty_delivered = 0.0\n \n \n quantity = fields.Float(string=\"Quantity\", default=1.0)\n qty_delivered = fields.Float(compute='_get_delivered_qty', string='Delivered Quantity', store=True, readonly=True, digits=dp.get_precision('Product Unit of Measure'))\n price = fields.Float(string='Price', compute=\"default_price_get\", store=True)\n net_price = fields.Float(string='Net Price', compute=\"default_price_get\", store=True)\n subtotal = fields.Float(string='Sub-Total', compute=\"_compute_subtotal\")\n subtotal_with_taxes = fields.Float(string='Sub-Total (w/taxes)', compute=\"_compute_subtotal\")\n product_id = fields.Many2one('product.product', string='Product')\n inter_transfer_id = fields.Many2one('inter.company.transfer.ept')\n product_uom_id = fields.Many2one('uom.uom', string=\"Unidad de Medida\", related='product_id.uom_id')\n x_uom_id = fields.Many2one('uom.uom', string=\"Unidad de Medida\")\n\n\n @api.depends('product_id', 'inter_transfer_id')\n def default_price_get(self):\n \"\"\"\n Get the Product Price\n \"\"\"\n for record in self:\n if record.product_id and record.inter_transfer_id.state:\n # Always get the product data (price) from root company (Grupo Abarrotero Guerrerense)\n if record.inter_transfer_id.state in 'draft':\n product_id = record.with_context(force_company=1).product_id\n\n record.price = product_id.base_imponible_costo\n record.net_price = product_id.standard_price\n else:\n record.price = record.net_price = 0.0\n\n @api.depends('quantity', 'price')\n def _compute_subtotal(self):\n for line in self:\n line.subtotal = line.quantity * line.price\n line.subtotal_with_taxes = line.quantity * line.net_price\n","sub_path":"addons/intercompany_transaction_ept/models/inter_company_transfer_line_ept.py","file_name":"inter_company_transfer_line_ept.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"580216665","text":"import serial, time, datetime\nimport RPi.GPIO as GPIO\nimport serial.tools.list_ports\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(4, GPIO.OUT)\nGPIO.output(4, True) \ntime.sleep(2)\nGPIO.output(4, False)\ntime.sleep(1)\nGPIO.output(4, True)\ntime.sleep(10)\n\nports=list(serial.tools.list_ports.comports())\nport_no = ports[0][0]\nprint(\"intentando abrir %s\" %port_no)\narduino = serial.Serial(port_no, 9600,timeout=1)\nprint(\"Puerto abierto y leido\")\n \nfrom twython import Twython\nfrom auth import (\n consumer_key,\n consumer_secret,\n access_token,\n access_token_secret\n)\n\ntwitter = Twython(\n consumer_key,\n consumer_secret,\n access_token,\n access_token_secret\n)\n\nwhile True:\n x=datetime.datetime.now()\n try:\n read_serial=arduino.readline()\n print(\"\\nLeí bien\")\n numero = int(read_serial)\n print(str(numero))\n print(\"la hora es,\", x)\n\n message1=\"Que temperatura tan agradable %s:%s:%s\" %(x.hour, x.minute, x.second)\n message2=\"Tengo calor %s:%s:%s\" %(x.hour, x.minute, x.second)\n message3=\"Me estoy asando. En Mordor se estaria mejor %s:%s:%s\" %(x.hour, x.minute, x.second)\n message4=\"Que fresquito hace %s:%s:%s\" %(x.hour, x.minute, x.second)\n message5=\"GUANTES Y BUFANDAS A MI, ME CONGELO %s:%s:%s\" %(x.hour, x.minute, x.second)\n message6=\"Me ahogo, mi amo pretende asesinarme @policia %s:%s:%s\" %(x.hour, x.minute, x.second)\n message7=\"Necesitaria un poco de agua %s:%s:%s\" %(x.hour, x.minute, x.second)\n message8=\"Estoy seca, no estoy preparada para vivir en el desierto. %s:%s:%s\" %(x.hour, x.minute, x.second)\n message9=\"Estoy en condiciones perfectas de la muerte para hacer la fotosintesis, nos leemos luego. %s:%s:%s\" %(x.hour, x.minute, x.second)\n\n if numero==1 :\n print(\"Tuiteando \", message1)\n twitter.update_status(status=message1) \n elif numero==2: \n twitter.update_status(status=message2)\n elif numero==3 :\n twitter.update_status(status=message3) \n elif numero==4 :\n twitter.update_status(status=message4)\n elif numero==5 :\n twitter.update_status(status=message5) \n elif numero==6 :\n twitter.update_status(status=message6)\n elif numero==7 : \n twitter.update_status(status=message7) \n elif numero==8 :\n twitter.update_status(status=message8)\n elif numero==9 :\n print(\"Intentando tuitear msg 9\");\n twitter.update_status(status=message9)\n print(\"Tuiteando \", message9)\n else:\n print(\"ERROR\")\n \n print(\"Tuit enviado\")\n except:\n print(\"Error conectándome a la planta\")\n # arduino.close()\n","sub_path":"tuit.py","file_name":"tuit.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"525093699","text":"#!/usr/bin/env python\n#\n# Copyright 2013 Simone Campagna\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n__author__ = 'Simone Campagna'\n\nfrom .field import *\nfrom .scalar import *\n\nclass FreeList(Sequence):\n def append(self, item):\n item = self.validate_value(item)\n self.new_item().set(item)\n\n def new_item(self):\n self._items.append(self.create_new_item(len(self._items)))\n item = self._items[-1]\n self.send_update()\n return item\n\n def delete(self, index):\n del self._items[index]\n self.send_update()\n\n def tostream(self, stream):\n stream.write(\"{0}\\n\".format(len(self)))\n for item in self._items:\n self._stream_put(stream, item)\n \n def frominput(self, istream, caller_trace=None, name=None):\n size_caller_trace = CallerTrace(caller_trace, CallerInfo(self, name + '.'))\n size = istream.get_value(int, size_caller_trace)\n caller_trace = CallerTrace(caller_trace, CallerInfo(self, name))\n self._suspend_update()\n del self._items[:]\n for i in xrange(size):\n new_item = self._new_item()\n new_item.frominput(istream, caller_trace, self._get_item_name(i, size))\n self._resume_update()\n\n @classmethod\n @abc.abstractmethod\n def _dumpclassarraytype(cls):\n return '[*]'\n\nclass StrFreeList(FreeList):\n __type__ = Str\n\nclass IntFreeList(FreeList):\n __type__ = Int\n\nclass FloatFreeList(FreeList):\n __type__ = Float\n\n","sub_path":"structparser/free_list.py","file_name":"free_list.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"423135339","text":"import string\nuser_input = str(input('Enter A Word To See If It Is A Palindrome? > '))\n\n#1. Reverse Word and Check To See If Palindrome\ndef check_pal(word):\n reversed_word = list(reversed(word))\n join_word = ''.join(reversed_word)\n return (join_word == word)\n\n#Call Function To Reverse Word and Check if Palindrome\nprint(f'Your Word is a Palindrome? {check_pal(user_input)}')\n\n\n","sub_path":"code/chad/python/lab_17/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"247224839","text":"from django.urls import path\nfrom .views import EditLiquorHome,EditLiquorSales,LiquorHomepage, LiquorSalespage,LiquorContacts,DeleteSaleItem\n\n\nurlpatterns = [\n\n path(\"Edithome/\",EditLiquorHome,name=\"editliquorhome\"),\n path(\"Editsales/\",EditLiquorSales,name=\"editliquorsales\"),\n path(\"Deletesale\",DeleteSaleItem,name=\"deletesale\"),\n\n path(\"\",LiquorHomepage,name=\"liquorhome\"),\n path(\"/home/\",LiquorHomepage,name=\"liquorhome\"),\n path(\"/sales/\",LiquorSalespage,name=\"liquorsales\"),\n path(\"/contacts/\",LiquorContacts,name=\"liquorcontacts\"),\n\n\n\n\n\n\n\n\n]\n","sub_path":"LiquorStore/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"73000223","text":"import requests\nimport bs4\nimport re\nimport os\nimport os.path\nimport lxml\nimport time\nfrom os import path\nimport sys\nimport time\nimport datetime\n\ndef get_soup(current_link, s):\n res = requests.get(current_link)\n result = re.search(s, res.text)\n\n if result or s == 'skip':\n return bs4.BeautifulSoup(res.text, 'lxml')\n else:\n return None\n\n\n\ndef is_ecommerce(shit):\n \"\"\"test_list = ['price', 'tk', 'taka', 'order', 'delivery', 'bkash', 'rocket', 'nagad', 'courier',\n 'প্রোডাক্ট', 'ডেলিভারি', 'অর্ডার', 'বিকাশ', 'দাম', 'টাকা']\"\"\"\n\n test_list = ['price', 'order', 'delivery', 'bkash', 'rocket', 'nagad', 'courier', 'Price', 'Order', 'Delivery',\n 'Bkash', 'Rocket', 'Nagad', 'Courier', 'ডেলিভারি', 'অর্ডার']\n condition = 0\n\n for item in test_list:\n if re.search(r'\\b%s\\b' % item, shit.text):\n #print(\"---\" + item + \"---\")\n # print(shit.text)\n condition += 1\n break\n\n return condition\n\n\ndef is_jobsite(shit):\n test_list = [\"Hiring\", \"hiring\", \"job\", \"Job\", \"employee\", \"Employee\", \"Employer\", \"chakri\", \"Chakri\", \"Internship\",\n \"internship\", \"চাকরি\", \"Salary\", \"salary\", \"চাকরির বিজ্ঞপ্তি\", \"job circular\", \"Job circular\",\n \"Job Circular\", \"নিয়োগ বিজ্ঞপ্তি\", \"Job Opportunity\", \"Job opportunity\", \"job opportunity\"]\n # condition = '7,957' in shit.text\n # condition = any(ele in shit.text.lower() for ele in test_list)\n # condition = False\n condition = 0\n\n for item in test_list:\n if re.search(r'\\b%s\\b' % item, shit.text):\n # print(\"---\" + item + \"---\")\n # print(shit.text)\n condition += 1\n break\n return condition\n\n\ndef getExistence(link):\n keyword = link.split('.com/')[1]\n link = \"https://www.facebook.com/pg/\" + keyword + \"/posts/\"\n soup = get_soup(link, 'skip')\n if soup is not None:\n # flag = False\n total = 0\n # for shit in soup.find_all('div', class_=True):\n for shit in soup.find_all('div'):\n condition = is_ecommerce(shit)\n condition2 = is_jobsite(shit)\n total = max(total, condition)\n if condition2 != 0:\n print(\"Its a Job Site\")\n return 0\n\n if condition != 0:\n print(\"Yaa Its a E-commerce Site.\")\n return 1\n\n print(\"Its neither e-commerce nor job site\")\n return 0\n\n\nstartGlobal = 8455\nif __name__ == '__main__':\n with open(\"onlyEcommercePageLinks.txt\", \"a\", 2, encoding=\"utf-8\")as onlyEcommercePageLinks:\n Input = open(\"linksGotFromYusuf.txt\", \"r\", encoding='utf-8')\n TemporaryInput = open(\"onlyEcommercePageLinks.txt\", \"r\", encoding='utf-8')\n List = Input.readlines()\n TemporaryList = TemporaryInput.readlines()\n start = startGlobal\n end = len(List)\n # Set.update(List)\n # if len(List) > start + 1000:\n # end = start + 1000\n List = List[start: end]\n temp = startGlobal\n success_count = 0\n for oneLine in List:\n temp += 1\n print(str(temp) + \".\" + oneLine)\n if oneLine.strip() in TemporaryList or oneLine in TemporaryList:\n continue\n oneLine = oneLine.split(\"\\n\")[0]\n\n c = getExistence(oneLine)\n if c != 1:\n continue\n success_count += c\n print(success_count, \"(Success)/ \" + str(temp-startGlobal) + \"(Total)\")\n sys.stdout.flush()\n onlyEcommercePageLinks.write(str(oneLine) + \"\\n\")\n onlyEcommercePageLinks.flush()\n os.fsync(onlyEcommercePageLinks.fileno())\n\n TemporaryInput.close()\n Input.close()\n onlyEcommercePageLinks.close()\n\n","sub_path":"filter_ecommerce_links.py","file_name":"filter_ecommerce_links.py","file_ext":"py","file_size_in_byte":3973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"179757092","text":"# -*- Python -*-\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# Jiao Lin\n# California Institute of Technology\n# (C) 2006-2011 All Rights Reserved\n#\n# {LicenseText}\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n\nimport luban\n\nfrom ....DemoPanelActor import Actor as base\nclass Actor(base):\n\n title='A form'\n description = [\n \"This is a form with a few typical form controls.\",\n \"You can change the values of these form controls, \",\n \"and then submit the form.\",\n ]\n rank = 0\n \n\n def createDemoPanel(self, **kwds):\n # form\n form = luban.e.form(title='test form', id='test-form')\n\n # fields\n # .. check boxes\n checkbox1 = form.checkbox(\n name='boolvar1', label='check box 1', checked=False)\n\n checkbox2 = form.checkbox(\n name='boolvar2', label='check box 2', checked=True)\n\n # .. radio box\n choices = ['one', 'two', 'three']\n # entries are a list of (value, description)\n # in simple cases, value could be equal to description\n entries = [(v,v) for v in choices]\n rad = form.radio(\n id='radiofield', label='radio', name='radiovar',\n entries=entries, selection='two',\n )\n\n # .. selector\n choices = ['alpha', 'beta', 'gamma']\n # entries are a list of (value, description)\n # in simple cases, value could be equal to description\n entries = [(v,v) for v in choices]\n sel = form.selector(\n label='selector', name='selectorvar',\n entries=entries,\n selection='gamma')\n\n # .. text field\n textfield = form.text(label='textfield', name='textvar')\n \n # .. text field for an integer\n intfield = form.text(label='input an integer', name='integervar', value=\"abc\")\n \n # .. text area\n textarea = form.textarea(label='textarea', name='textareavar')\n \n # .. password field\n pwfield = form.password(label='password', name='password')\n\n # submit button\n submit = form.submitbutton(label='submit')\n\n # action when form is submitted\n # since the event handler \"onsubmit\" is reponsible for the submssion\n # event, \"luban.event.data\" refers to the form data.\n form.onsubmit = luban.a.load(\n actor = self.name,\n routine = 'process',\n kwds = luban.event.data,\n )\n\n return form\n\n \n @luban.decorators.formprocesser('test-form')\n def process(self, integervar: luban.decorators.int=None, **kwds):\n \"\"\"\n This event handler process the form inputs.\n See how the input error is handled by the combination of \n input validator in function annotation and \n input-error-action generation in function decorator.\n \"\"\"\n kwds['integervar'] = integervar\n msg = \"submitted: %s\" % (kwds,)\n alert = luban.a.alert(msg)\n \n doc = luban.e.document(title=\"submitted values\")\n for k,v in kwds.items():\n doc.paragraph(text='* %s=%s' % (k,v))\n continue\n refresh = luban.a.select(id='test-form').replaceBy(newelement=doc)\n \n return [alert, refresh]\n\n\n# End of file \n","sub_path":"timber/aokuang.timber/aokuang.timber/actors/form/pyv3_full.py","file_name":"pyv3_full.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"435240822","text":"import numpy as np\nimport glob\nfrom PIL import Image\n\n\nROOT = '/media/DATA/UnrealLandingDataset/AirSimCollectData/'\nOUT_DIR = ROOT + '/results/'\n\ndef get_seg2rgb_map(cmap_file, number_of_classes):\n f = open(cmap_file, 'r')\n all_rows = f.read().splitlines()\n seg2rgb_map = {}\n for row in all_rows:\n seg = row.split('\\t')[0]\n rgb = list(map(lambda x: int(x)/255, row.split('\\t')[1][1:-1].split(',')))\n rgb.append(1)\n seg2rgb_map[int(seg)] = rgb\n\n cmap_list = list(seg2rgb_map.values())[:number_of_classes]\n return cmap_list, seg2rgb_map\n\ndef seg2rgb(seg, seg2rgb_map):\n '''\n :Params::\n seg: integer image with size (W,H)\n seg2rgb_map: a dictionary mappign interger classes to RGB values\n :Return::\n seg_rgb: an RGB image showing segmentation, (W,H,3)\n '''\n seg_rgb = np.array(seg.shape[0], seg.shape[1], 3)\n all_classes = np.unique(seg)\n for i in all_classes:\n seg_rgb[np.where(seg == i)] = seg2rgb_map[i]\n \n return seg_rgb\n\ndef save_pred_results(segmentation, output_dir, cmap_list):\n cmap = mpl.colors.LinearSegmentedColormap.from_list('unreal roof cmap', cmap_list, number_of_classes)\n # define the bins and normalize\n bounds = np.linspace(0,number_of_classes,number_of_classes+1)\n norm = mpl.colors.BoundaryNorm(bounds, number_of_classes)\n\n\n fig, (ax1, ax2) = plt.subplots(figsize=(16, 8), nrows=2, ncols=1)\n\n ax2.imshow(segmentation,cmap=cmap, norm=norm)\n ","sub_path":"utils/seg2rgb.py","file_name":"seg2rgb.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"69525109","text":"import threading\nimport time\nimport queue\n\nclass Worker(threading.Thread):\n def __init__(self, num, queue):\n threading.Thread.__init__(self)\n self.num = num\n self.queue = queue\n\n def run(self):\n while self.queue.qsize() > 0:\n job = self.queue.get()\n print('Worker %d: %s' % (self.num, job))\n if (self.num == 1):\n time.sleep(1)\n else:\n time.sleep(0.3)\n\n\nmy_queue = queue.Queue()\nfor i in range(10):\n my_queue.put('Data %d' % i)\n\nworker1 = Worker(1, my_queue)\nworker2 = Worker(2, my_queue)\n\nworker1.start()\nworker2.start()","sub_path":"yuanta_python3-master/lesson13/Demo6_queue.py","file_name":"Demo6_queue.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"485967673","text":"# -*- coding:utf-8 -*-\n\nimport datetime\nfrom math import sqrt, fabs, ceil, floor\nimport os\nimport numpy as np\n\n\nclass Diamond4(object):\n diamond = 4\n\n def __init__(self, data_frame, data_parameters=None):\n\n if data_parameters is None and os.path.isfile(data_frame):\n with open(data_frame, 'r') as f:\n data_raw = [word for line in f.readlines() if line[:-1].strip()\n for word in line.split()] # 去除空行读入,将原文件分割成一维字符串数组\n\n self.doc = data_raw[2] # .decode('gbk') # 说明字符串\n\n # 日期时间处理\n (year, # 年\n self.month, # 月\n self.day, # 日\n self.hour, # 时\n self.valid_period, # 时效\n self.level) = data_raw[3:9] # 层次\n\n if len(year) == 2:\n year = ('20' + year) if int(year) < 49 else ('19' + year)\n elif len(year) == 4:\n pass\n else:\n raise Exception('year parameter error!')\n\n self.year = year\n\n # 注意start_time和valid_time没有统一规定,要看具体情况\n self.start_time = datetime.datetime(int(year), int(self.month), int(self.day), int(self.hour))\n self.valid_time = self.start_time + datetime.timedelta(hours=int(self.valid_period))\n\n (self.size_lon, # 经度(x方向)格距, 一般为正\n self.size_lat, # 纬度(y方向)格距,有正负号\n self.lon_start, # 起始经度\n self.lon_end, # 终止经度\n self.lat_start, # 起始纬度\n self.lat_end) = (float(i) for i in data_raw[9:15]) # 终止纬度\n\n (self.cols, # 纬向(x方向)格点数目,即列数\n self.rows) = (int(i) for i in data_raw[15:17]) # 经向(y方向)格点数目,即行数\n\n (self.contour_interval, # 等值线间隔\n self.contour_start, # 等值线起始值\n self.contour_end, # 等值线终止值\n self.smooth, # 平滑系数\n self.bold_line) = (float(i) for i in data_raw[17:22]) # 加粗线值\n\n # 数据部分,以一维数组表示\n self.data = [float(i) for i in data_raw[22:]]\n\n # 将数据的一些属性参数集合到一个字典中\n self.parameters = {'doc': self.doc, 'year': self.year, 'month': self.month, 'day': self.day,\n 'hour': self.hour, 'valid_period': self.valid_period, 'level': self.level,\n 'start_time': self.start_time, 'valid_time': self.valid_time,\n 'size_lon': self.size_lon,\n 'size_lat': self.size_lat, 'lon_start': self.lon_start, 'lon_end': self.lon_end,\n 'lat_start': self.lat_start, 'lat_end': self.lat_end, 'cols': self.cols,\n 'rows': self.rows,\n 'contour_interval': self.contour_interval, 'contour_start': self.contour_start,\n 'contour_end': self.contour_end, 'smooth': self.smooth, 'bold_line': self.bold_line}\n del data_raw\n\n elif data_parameters is not None and isinstance(data_frame, (np.ndarray, list)):\n\n self.data = data_frame if isinstance(data_frame, list) else data_frame.flatten().tolist()\n self.parameters = data_parameters\n for key in data_parameters:\n exec('self.' + key + '=' + repr(data_parameters[key]))\n\n else:\n raise Exception('input parameters error!')\n\n def __getitem__(self, index):\n if isinstance(index, tuple) and len(index) == 2:\n return self.value(index[0], index[1])\n else:\n raise Exception('indexing error!')\n\n def __sub__(self, other):\n if self.rows == other.rows and self.cols == self.cols:\n return [x - y for x, y in zip(self.data, other.data)]\n\n def value(self, row, col):\n '''将格点数据看成self.cols*self.nums_lat的二维数组,返回第row行,第col列的值,\n row和col必须为整数,从0开始计数,坐标原点在左上角'''\n if row < 0 or row >= self.rows or col < 0 or col >= self.cols:\n raise Exception('out of data spatial range')\n return self.data[row * self.cols + col]\n\n def IDW(self, lon_lat_s, power=2):\n \"\"\"\n 反距离加权法提取站点数据\n :param lon_lat_s: 以[(lon1,lat1),(lon2,lat2),……]形式传入的一系列站点位置,经纬度必须是弧度形式\n :param power:\n :return: 对应站点位置的插值结果列表\n \"\"\"\n extracted_values = []\n for lon, lat in lon_lat_s:\n # 根据目标位置经纬度计算其周围四个格点在二维数组中的起始和终止行列号\n col_beg = int(fabs((lon - self.lon_start) / self.size_lon))\n row_beg = int(fabs((lat - self.lat_start) / self.size_lat))\n col_end = col_beg + 1\n row_end = row_beg + 1\n\n # 计算包围目标位置的经纬度范围,即起始和终止行列号的对应经纬度,行号与纬度对应,列号与经度对应\n lon_beg = self.lon_start + self.size_lon * col_beg\n lon_end = self.lon_start + self.size_lon * col_end\n lat_beg = self.lat_start + self.size_lat * row_beg\n lat_end = self.lat_start + self.size_lat * row_end\n\n # 根据目标位置与周围四个格点的经纬度距离计算权重\n w1 = 1.0 / (sqrt((lon_beg - lon) ** 2 + (lat_beg - lat) ** 2)) ** power\n w2 = 1.0 / (sqrt((lon_beg - lon) ** 2 + (lat_end - lat) ** 2)) ** power\n w3 = 1.0 / (sqrt((lon_end - lon) ** 2 + (lat_beg - lat) ** 2)) ** power\n w4 = 1.0 / (sqrt((lon_end - lon) ** 2 + (lat_end - lat) ** 2)) ** power\n\n # 目标位置周围四个格点的值\n d1 = self.value(row_beg, col_beg)\n d2 = self.value(row_end, col_beg)\n d3 = self.value(row_beg, col_end)\n d4 = self.value(row_end, col_end)\n\n # 根据反距离加权计算最终值,注意权重与格点要一一对应\n z = (d1 * w1 + d2 * w2 + d3 * w3 + d4 * w4) / (w1 + w2 + w3 + w4)\n\n extracted_values.append(z)\n\n return extracted_values\n\n def to_esri_ascii(self, out_name):\n with open(out_name, 'w') as f:\n y_start = self.lat_end if self.size_lat < 0 else self.lat_start\n header = 'NCOLS %d\\nNROWS %d\\nXLLCENTER %f\\nYLLCENTER %f\\nCELLSIZE %f\\nNODATA_VALUE 9999.0\\n' % (\n self.cols, self.rows, self.lon_start, y_start, self.size_lon)\n f.write(header)\n\n if self.size_lat < 0:\n f.write(' '.join(map(str, self.data)))\n else:\n for i in range(self.rows - 1, -1, -1):\n f.write(' '.join(map(str, self.data[i * self.cols:(i + 1) * self.cols])))\n f.write('\\n') # 必须加换行符,因为' '.join最后还多了一个空格,arcgis不能根据列数自动计算\n try:\n import arcpy\n except ImportError:\n print(\"\"\"warning: you have no Esri's arcpy module, using to_esri_ascii method,\n you can still get the result, but without the associate coordinate information.\n you can use Esri's software like Arcmap to import the result and add the coordinate which is\n WGS1984\"\"\")\n else:\n # 定义坐标系//define the coordinate\n sr = arcpy.SpatialReference('WGS 1984')\n arcpy.DefineProjection_management(out_name, sr)\n\n def to_file(self, out_name, formatted=False):\n with open(out_name, 'w') as f:\n f.write('diamond 4 ' + self.doc + '\\n')\n f.write(' '.join([self.year, self.month, self.day, self.hour, self.valid_period, self.level, '\\n']))\n\n f.write(' '.join(\n ['%.2f' % i for i in\n [self.size_lon, self.size_lat, self.lon_start, self.lon_end, self.lat_start, self.lat_end]] +\n ['%d' % i for i in [self.cols, self.rows]] +\n ['%.2f' % i for i in\n [self.contour_interval, self.contour_start, self.contour_end, self.smooth, self.bold_line]] +\n ['\\n']))\n\n if not formatted:\n f.write(' '.join(['%.2f' % i for i in self.data]))\n\n def to_numpy(self):\n return np.array(self.data).reshape(self.rows, self.cols)\n\n def max(self):\n return max(self.data)\n\n def min(self):\n return min(self.data)\n\n def calc_stats(self):\n pass\n\n def extract_station_value(self, lon_lat_s, method):\n '提取站点数据'\n pass\n\n\nif __name__ == \"__main__\":\n # d0 = Diamond4('D:/000')\n # d1 = Diamond4('D:/024')\n # d1_0 = Diamond4(d1-d0, d1.parameters)\n # d1_0.contour_start = floor(d1_0.min())\n # d1_0.contour_end = ceil(d1_0.max())\n # d1_0.to_file('D:/d1_0.txt')\n d = Diamond4('D:/2T')\n d.contour_start = floor(d.min())\n d.contour_end = ceil(d.max())\n d.to_file('D:/2t.txt')\n","sub_path":"Diamond4.py","file_name":"Diamond4.py","file_ext":"py","file_size_in_byte":9466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"609814771","text":"from analyzer import Analyzer\nfrom manager.manager import Manager\nfrom runner.jvm_bench import Bencher\nfrom runner.stats_collect import JProcess\nfrom tuner import pso\nfrom util import *\nfrom settings import *\n\nimport sys\n\ntarget = 'SPECjvm2008.jar'\nworkload = 'sunflow'\n\n\ndef test1():\n bencher = Bencher(vm_type='hotspot')\n target = 'SPECjvm2008.jar'\n\n run_id = 1\n analyzer = Analyzer()\n for i_options in impact_options.keys():\n # bencher.run(target)\n # print(i_options)\n values = impact_options.get(i_options)\n for value in values:\n jproc = JProcess('spec')\n print(run_id)\n run_id += 1\n bencher.reset_options()\n bencher.add_jvm_option(i_options, value)\n bencher.add_option_str(workload)\n\n bencher.add_option_str('-ikv')\n # bencher.add_option_str('-crf')\n # bencher.add_option_str('false')\n child = bencher.run(target)\n jproc.set_pid(child.pid)\n jproc.fill_jstat_timeout('-gc', output=workload, header=bencher.get_args(target))\n analyzer.manipulate_spec_file()\n # child.wait()\n\n print('PID:{}'.format(child.pid))\n\n analyzer.save()\n\n # result_files=get_all_results(base_dir='E:\\\\code\\\\PythonProjects\\\\jvm')\n ops_data = []\n # for result in result_files:\n # print(result)\n # ops=Analyzer.read_html(result)\n # ops_data.append(ops)\n # print(ops_data)\n\n\ndef test2():\n npso = pso.PSO(workload=workload)\n npso.init_pop()\n npso.fit()\n npso.print_args()\n\n\ndef test3():\n _max, _min, _ave, arg_str = resolve_stats_file('tuner_root\\\\jstat\\\\sunflow\\\\2018-04-11_0.txt')\n nfeature = np.column_stack(_ave)\n nopts = np.column_stack(np.array([1.00e+00, 7.23e+02, 9.56e-01, 3.02e+04, 1.75e+01, 4.67e+01, 5.98e+05,\n 5.92e+01, 8.97e+05, 1.30e+01, 8.02e+00, 2.89e+00, 6.29e+05, 8.11e+00,\n 7.56e+05, 4.59e+01, 8.78e+01 - 2.25e+01, 0.00e+00, 0.00e+00]))\n optimizer = Manager()\n optimizer.update(nfeature, nopts)\n optimizer.save()\n\n\ndef test4():\n optimizer = Manager()\n print(optimizer.features)\n\n\nif __name__ == '__main__':\n\n # for workload in workloads:\n # with open(os.path.join(base_stdout,workload+'.out'),'w') as f:\n # sys.stdout=f\n # optimizer = Manager()\n # # argc=len(sys.argv)\n # # workload=sys.argv[1] if argc>1 else workload\n # optimizer.optimize(target=target, workload=workload)\n optimizer = Manager()\n argc=len(sys.argv)\n workload=sys.argv[1] if argc>1 else workload\n optimizer.optimize(target=target, workload=workload)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"117795011","text":"import Utils\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\n\n\ndef run(model, data, learning_rate, epochs, batch_parts, c, show_graph, save_model, save_model_as):\n \n randseed = 999\n np.random.seed(randseed)\n torch.manual_seed(randseed)\n \n # Change BACK TO MYPARAMETERS!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n optimizer = torch.optim.Adam(model.myparameters,\n lr=learning_rate) # , weight_decay = 0.0001) #, momentum=0.9, nesterov = True) #,weight_decay = 0.99)\n losses = np.zeros(epochs)\n \n #print(\"Start Learning...\")\n \n \n whole_batch_size = len(data[\"train\"])\n batch_size = int(whole_batch_size / batch_parts)\n \n\n for step in range(epochs):\n print(\"Epoch \", step, \" out of \", epochs, \" epochs\")\n dataloader = DataLoader(data[\"train\"], batch_size= batch_size,shuffle=True)\n iter_loss = 0.\n #model.normalize_embs()\n for batch in range (batch_parts):\n \n samples = next(iter(dataloader)).long()\n \n X = Utils.gen_samples(samples,data[\"n_e\"], batch_size, c)\n optimizer.zero_grad()\n #model.normalize_embs()\n \n pred = model.forward(X)\n \n pred_zeros = model.other_rel_to_zero(pred, X)\n loss1 = model.ranking_loss(pred_zeros, c, batch_size)\n #print(loss)\n #print(pred_zeros)\n loss2 = model.ranking_loss_without_neg_sample(pred[:batch_size],X[:batch_size]) # loss scalar for output\n #loss2 = model.martin_loss(X,pred) # loss scalar for output\n loss = loss1 + loss2\n #loss = model.log_likelyhood_loss(pred, X)\n #loss = loss * data[\"n_r\"] # turn on for normal one\n iter_loss += loss\n loss.backward()\n #print(loss.item())\n optimizer.step()\n \n #print(\"loss\", loss.item())\n #print(\"o\",model.o)\n #print(\"grad\",model.o.grad)\n #print(pred)\n losses[step] = iter_loss/ batch_parts\n print(loss.item())\n\n print(\"last Loss: \" , loss.item(), \"learning rate: \", learning_rate)\n print()\n # ----------------------- save model --------------------------\n if (save_model):\n \n torch.save(model, save_model_as)\n print(\"saved model: \")\n print(save_model_as)\n #----------------------- matplot graph ----------------------\n if(show_graph):\n\n #one = np.ones(epochs)\n l = np.zeros(epochs)\n for i in range(len(losses)):\n l[i] += losses[i]\n t = np.arange(0,epochs)\n print(\"losses: \" , losses)\n plt.plot(t, l)\n #plt.plot(t,one)\n #plt.savefig(\"plot.png\")\n plt.show()\n return model","sub_path":"xWx_Embedding/Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"184200809","text":"from django.db import models\nfrom encrypted_id.models import EncryptedIDModel\n\n# Create your models here.\n\n\nclass Notes(EncryptedIDModel):\n\n class Meta(object):\n verbose_name = \"Note\"\n verbose_name_plural = \"Notes\"\n ordering = ['-datetime']\n\n note = models.TextField(blank=False)\n\n datetime = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return \"{}\".format(self.datetime)\n","sub_path":"note_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"241938839","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: CEF PNM Team\n# License: TBD\n# Copyright (c) 2012\n\n#from __future__ import print_function\n\"\"\"\n\nmodule __OhmicConduction__:\n========================================================================\n\n\"\"\"\n\nimport scipy as sp\nfrom .__LinearSolver__ import LinearSolver\n\nclass OhmicConduction(LinearSolver):\n r\"\"\"\n\n OhmicConduction - Class to run an algorithm for electron conduction on constructed networks\n\n It returns voltage gradient inside the network.\n\n \"\"\"\n\n def __init__(self,**kwargs):\n r\"\"\"\n Initializing the class\n \"\"\"\n super(OhmicConduction,self).__init__(**kwargs)\n self._logger.info(\"Create Ohmic Conduction Algorithm Object\")\n\n def _setup(self,\n loglevel=10,\n electronic_conductance='electronic_conductance',\n occupancy='occupancy',\n voltage='voltage',\n **params):\n r\"\"\"\n\n This function executes the essential mathods for building matrices for Linear solution\n \"\"\"\n self._fluid = params['active_fluid']\n try: self._fluid = self.find_object_by_name(self._fluid) \n except: pass #Accept object\n self._X_name = voltage\n self._boundary_conditions_setup()\n g = self._fluid.get_throat_data(prop=electronic_conductance)\n s = self._fluid.get_throat_data(prop=occupancy)\n self._conductance = g*s+g*(-s)/1e3\n\n\n def _do_inner_iteration_stage(self):\n v = self._do_one_inner_iteration()\n self.set_pore_data(prop=self._X_name,data= v)\n self._logger.info('Solving process finished successfully!')\n \n def update(self):\n \n v = self.get_pore_data(prop=self._X_name)\n self._net.set_pore_data(phase=self._fluid,prop=self._X_name,data=v)\n self._logger.info('Results of ('+self.name+') algorithm have been updated successfully.')\n ","sub_path":"OpenPNM/Algorithms/__OhmicConduction__.py","file_name":"__OhmicConduction__.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"54267486","text":"import sys\nimport copy\nimport rospy\nimport time\nimport moveit_commander\nfrom geometry_msgs.msg import PoseStamped\nfrom ar_track_alvar_msgs.msg import AlvarMarkers\nfrom std_msgs import Float32MultiArray\nimport tf2_ros\nimport tf2_kdl\nfrom tf2_kdl import transform_to_kdl as to_kdl\n\ndef p_as_t(p):\n t = tf2_ros.TransformStamped()\n t.transform.translation = p.pose.position\n t.transform.rotation = p.pose.orientation\n return t\n\ndef t_as_p(t):\n p = PoseStamped()\n p.pose.position = t.transform.translation\n p.pose.orientation = t.transform.rotation\n return p\n\ndef from_kdl(frame):\n t = tf2_ros.TransformStamped()\n for attr in ['x', 'y', 'z']:\n setattr(t.transform.translation, attr, getattr(frame.p, attr)())\n q = frame.M.GetQuaternion()\n for attr, ind in zip(['x', 'y', 'z', 'w'], range(4)):\n setattr(t.transform.rotation, attr, q[ind])\n return t\n\ndef get_pose_transformed(transform, pose):\n hb = to_kdl(p_as_t(pose))\n pk = tf2_kdl.do_transform_frame(hb, transform)\n return t_as_p(from_kdl(pk))\n\n\ndef callback(data):\n\n if len(data.markers) == 0:\n return\n\n pose = data.markers[0].pose\n sign = -1 if data.markers[0].id == 8 else 1\n \n pose_target = group.get_current_pose().pose\n\n print(\"manip: \", pose_target)\n\n try:\n trans = tfBuffer.lookup_transform('base_link', 'camera_link', rospy.Time())\n except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):\n return\n\n print('transform: ', trans)\n pose = get_pose_transformed(trans, pose).pose\n\n print(\"ar_tracker: \", pose)\n\n pose_target.position.x += sign * (pose.position.x - pose_target.position.x) / 2.0\n pose_target.position.y += sign * (pose.position.y - pose_target.position.y) / 2.0\n pose_target.position.z += sign * (pose.position.z - pose_target.position.z) / 2.0\n\n group.set_pose_target(pose_target)\n plan = group.plan()\n group.execute(plan)\n time.sleep(1)\n\nif __name__ == '__main__':\n\n print(sys.argv)\n \n moveit_commander.roscpp_initialize(sys.argv)\n rospy.init_node('supernode', anonymous=True)\n\n tfBuffer = tf2_ros.Buffer()\n listener = tf2_ros.TransformListener(tfBuffer)\n\n robot = moveit_commander.RobotCommander()\n group = moveit_commander.MoveGroupCommander(\"manipulator\")\n\n rospy.Subscriber(\"/ar_pose_marker\", AlvarMarkers, callback, queue_size=1)\n rospy.spin()\n\n","sub_path":"move_it.py","file_name":"move_it.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"290498161","text":"import time\r\nimport random\r\nfrom matplotlib import pyplot as py\r\n\r\ndef tma(choice_list, no, c):\r\n #Output array\r\n output=[]\r\n mFree=[]\r\n t0 = time.clock()\r\n print(\"Case\",c,\":\", choice_list)\r\n\r\n #Initialize the mathing list for male and female\r\n for j in range(0,no):\r\n output.append(0)\r\n mFree.append(0)\r\n\r\n\r\n freeCount = no\r\n\r\n \r\n #Loop until all mens are matched\r\n while freeCount>0:\r\n for k in range(0,no):\r\n # If already paired exit\r\n if mFree[k] == 1:\r\n break\r\n\r\n\r\n # Loop through each womens\r\n for j in range(0,no):\r\n choice = choice_list[k][j]\r\n\r\n # If female is not previously engaged assign it with the requesting man\r\n if output[choice-no-1] == 0:\r\n output[choice-no-1] = k+1\r\n mFree[k] = 1\r\n freeCount-=1\r\n break\r\n # If female is already engaged\r\n else:\r\n currentPartner = output[choice-no-1]\r\n print(\"Women {} currently engaged with {}\".format(choice, currentPartner))\r\n # Find the position of requesting male in womens preference list\r\n pos1 = 0\r\n # Position of currently engaged partner\r\n pos2 = 0\r\n #print(choice_list[4].index(k+1))\r\n for n in range(no,no*2):\r\n if (k+1) == choice_list[n]:\r\n pos1 = choice_list[n].index(k+1)\r\n output[choice-no-1] = k+1\r\n mFree[k] = 1\r\n mFree[currentPartner-1] = 0\r\n break\r\n\r\n \r\n \r\n \r\n\r\n\r\n # Compare the position of both the proposal in women's preference list\r\n #print(\"Pos1 {0} Pos2 {1}\".format(pos1,pos2))\r\n #if pos1 > pos2:\r\n \r\n\r\n g = no+1\r\n for l in output:\r\n print(\"Women {} paired with {}\".format(g,l))\r\n g+=1\r\n\r\n # Return Execution time\r\n return time.clock()-t0\r\n \r\n\r\n \r\n \r\n\r\n \r\n\r\n# Driver function\r\nif __name__ == \"__main__\":\r\n t0=0\r\n cases = 4\r\n choice = 0\r\n x = []\r\n y = []\r\n\r\n print(\"1. TMA\")\r\n print(\"2. TMA analysis\")\r\n choice = int(input(\">\"))\r\n\r\n if choice == 1:\r\n choice_list = []\r\n for i in range(0, 8):\r\n if i < 4:\r\n choice_list.append(random.sample(range(5,9),4))\r\n else:\r\n choice_list.append(random.sample(range(1,5), 4))\r\n\r\n time_taken = tma(choice_list, 4, 1)\r\n print(\"Execution time: \",time_taken)\r\n\r\n else:\r\n for n in range(1, 20, 2):\r\n # cases = cases * 2\r\n x.append(cases)\r\n choice_list = []\r\n for i in range(0, cases * 2):\r\n if i < (cases):\r\n choice_list.append(random.sample(range(cases + 1, (cases * 2) + 1), cases))\r\n\r\n else:\r\n choice_list.append(random.sample(range(1, cases + 1), cases))\r\n\r\n t0 += 1\r\n y.append(tma(choice_list, cases, t0))\r\n print(\"=\" * 100)\r\n cases += 2\r\n\r\n\r\n py.plot(x, y, 'g')\r\n py.ylabel('Time (in seconds)')\r\n py.xlabel('No of pairs')\r\n py.show()\r\n\r\n \r\n","sub_path":"Algorithm Assignments M.tech 1st sem/Assignment2/tma_algorithm.py","file_name":"tma_algorithm.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"611802202","text":"from urllib.request import Request, urlopen, build_opener, install_opener\nfrom urllib.parse import urlsplit, quote, urlunsplit\nfrom urllib.error import URLError\nfrom sockshandler import SocksiPyHandler\nfrom bs4 import BeautifulSoup\nimport os\nimport socks\nimport re\nimport time\nimport socket\nimport shutil\n\n\ndef correct_name(name):\n name = name.replace('\\\\', '')\n name = name.replace('/', '')\n name = name.replace(':', '')\n name = name.replace('*', '')\n name = name.replace('?', '')\n name = name.replace('\\\"', '')\n name = name.replace('<', '')\n name = name.replace('>', '')\n name = name.replace('|', '')\n return name\n\n\ndef encode_url(url):\n url = urlsplit(url)\n url = list(url)\n url[2] = quote(url[2])\n url = urlunsplit(url)\n return url\n\n\nclass Spider:\n def __init__(self, path, proxy=False):\n if proxy:\n handler = SocksiPyHandler(socks.SOCKS5, '127.0.0.1', 1080)\n opener = build_opener(handler)\n install_opener(opener)\n self.path = path\n self.tmp_path = os.path.join(path, 'tmp')\n self.retry = 0\n if not os.path.exists(path):\n os.mkdir(path)\n if not os.path.exists(self.tmp_path):\n os.mkdir(self.tmp_path)\n\n def save_image(self, url, path):\n try:\n request = Request(encode_url(url))\n request.add_header('User-Agent',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36')\n response = urlopen(request, timeout=2)\n with open(path, 'wb') as f:\n f.write(response.read())\n self.retry = 0\n except socket.timeout:\n print('超时:%s' % url)\n if self.retry < 5:\n self.retry += 1\n self.save_image(url, path)\n except Exception as e:\n print(e)\n print('保存错误,URL:' + url)\n if self.retry < 5:\n self.retry += 1\n self.save_image(url, path)\n\n def save_pics(self, name, pic_list):\n print('开始保存:' + name)\n pic_path = os.path.join(self.path, name)\n tmp_name_path = os.path.join(self.tmp_path, name)\n if not os.path.exists(tmp_name_path):\n # 在临时文件夹创建目录\n os.mkdir(tmp_name_path)\n i = 1\n for pic_item in pic_list:\n if 'picsgonewild' in pic_item:\n print('跳过:%s' % name)\n return\n print('正在保存第 %d 张 URL:%s' % (i, pic_item))\n tail = pic_item.split('.').pop();\n if tail not in ('jpg', 'jpeg', 'png', 'bmp', 'gif'):\n tail = 'jpg'\n self.save_image(pic_item, os.path.join(tmp_name_path, str(i) + '.' + tail))\n # time.sleep(0.2)\n i += 1\n if os.path.exists(pic_path):\n shutil.rmtree(pic_path, ignore_errors=True)\n shutil.move(tmp_name_path, pic_path)\n print('保存完毕')\n\n def get_page_code(self, url):\n request = Request(url)\n request.add_header('User-Agent',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36')\n try:\n response = urlopen(request, timeout=2)\n page_code = response.read()\n self.retry = 0\n return page_code.decode('utf-8')\n except URLError as e:\n print('网络错误:%s' % url)\n if self.retry < 5:\n self.retry += 1\n self.get_page_code(url)\n except UnicodeDecodeError as e:\n print(e)\n try:\n return page_code.decode('gbk')\n except UnicodeDecodeError as ee:\n print(ee)\n except socket.timeout:\n print('超时:%s' % url)\n if self.retry < 5:\n self.retry += 1\n self.get_page_code(url)\n except Exception as e:\n print('其他错误 URL:' + url)\n if self.retry < 5:\n self.retry += 1\n self.get_page_code(url)\n\n @staticmethod\n def get_pic_list(post_code):\n soup = BeautifulSoup(post_code, 'html.parser')\n div = soup.find('div', attrs={'class': 't t2'})\n images = []\n if div:\n input_items = div.find_all('input', attrs={'type': 'image'})\n for input_item in input_items:\n if input_item.get('src'):\n images.append(input_item['src'])\n return images\n\n def get_post_list(self, page_index):\n url = 'http://www.t66y.com/thread0806.php?fid=16&search=&page=' + str(page_index)\n page_code = self.get_page_code(url)\n if page_code is None:\n return\n soup = BeautifulSoup(page_code, 'html.parser')\n trs = soup.find_all('tr', attrs={'align': 'center', 'class': 'tr3 t_one'})\n for tr in trs:\n tag_a = tr.find('td', attrs={'style': 'text-align:left;padding-left:8px'}).h3.a\n post_title = correct_name(tag_a.string)\n pattern = re.compile('\\[\\d+P\\]')\n if pattern.search(post_title):\n print('开始保存,第%d页' % page_index)\n pic_path = os.path.join(self.path, post_title)\n if os.path.exists(pic_path):\n print('文件夹 %s 已存在,跳过' % post_title)\n continue\n post_url = 'http://www.t66y.com/' + tag_a['href']\n # 获取帖子地址\n post_code = self.get_page_code(post_url)\n if post_code:\n # 获取图片列表\n pic_list = self.get_pic_list(post_code)\n if len(pic_list) > 2:\n self.save_pics(post_title, pic_list)\n\n def start(self, start_index, end_index):\n for index in range(start_index, end_index + 1):\n self.get_post_list(index)\n print('所有文件保存完成')\n\n\nspider = Spider('D:\\\\cl', proxy=True)\nspider.start(1, 10)\n","sub_path":"t66y.py","file_name":"t66y.py","file_ext":"py","file_size_in_byte":6174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"401353968","text":"from flask import jsonify, Flask, render_template\n\nfrom connector import SensorConnector\n\n\napp = Flask(__name__)\nconnector = SensorConnector()\n\n\n@app.route('/')\ndef main():\n labels = ['PM 2.5 [ug/m^3]', 'PM 2.5 [%]', 'PM 10 [ug/m^3]', 'PM 10 [%]']\n result = connector.get_value()\n pm_25 = result['PM 2.5']\n pm_10 = result['PM 10']\n values = [pm_25, pm_25/.25, pm_10, pm_10/.50]\n return render_template('index.html', labels=labels, values=values)\n\n@app.route('/')\ndef get_value():\n return jsonify(connector.get_value())\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"606935528","text":"import numpy as np\nfrom smartGridClass import smartGrid\nfrom collections import deque\n\n# sg is the current smartGrid of the node\n# score is the smartGrid's score\nclass sgNode:\n def __init__(self, sg, score, previous, stepToHere, moveCount):\n assert (isinstance(sg, smartGrid));\n assert (type(score) is int or score is None);\n assert (isinstance(previous, sgNode) or previous is None);\n assert (type(stepToHere) is str or stepToHere is None);\n assert (type(moveCount) is int and moveCount >= 0);\n\n self.sg = sg;\n self.score = score;\n self.prevNode = previous;\n self.stepToHere = stepToHere;\n self.moveCount = moveCount;\n\n def isFirstMove(self):\n # edge case where this is the root node\n # this means there is no previous node\n # return False because this means the first move hasnt been made yet\n if (self.prevNode is None):\n assert (self.score is None);\n assert (self.moveCount == 0);\n assert (self.stepToHere is None);\n return False;\n # the previous node exists\n # if the previous node is the root node, one move has been made\n # return True\n elif (self.prevNode.prevNode is None):\n assert (self.moveCount == 1);\n return True;\n # more than one move made\n # return False\n else:\n assert (self.moveCount > 1);\n return False;\n\n def isBaseNode(self):\n if (self.prevNode is None):\n #assert (self.score is None);\n assert (self.moveCount == 0);\n assert (self.stepToHere is None);\n return True;\n else:\n assert (self.moveCount >= 1);\n return False;\n","sub_path":"nodeClass.py","file_name":"nodeClass.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"11102394","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport formfield.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('vintage', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='archivedpage',\n name='xpaths',\n field=formfield.fields.ModelFormField(null=True, blank=True),\n ),\n ]\n","sub_path":"vintage/migrations/0002_archivedpage_xpaths.py","file_name":"0002_archivedpage_xpaths.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"133862386","text":"\"\"\"\nstory.py\nThe basic stories that are used by the news stand in the game. These are not the\nsame as memories.\n\"\"\"\nfrom .story import Story\nfrom . import newarrivalcopy as COPY\n\nclass NewArrival(Story):\n\n\tdef __init__(self, subject, date):\n\t\tself.subject = subject\n\t\tsuper(NewArrival, self).__init__(date)\t\t\n\n\tdef generateTitle(self):\n\t\tname = self.subject.name\n\t\ttitle = COPY.getWho()\n\t\ttitle = title.format(name[0],name[1],name[2])\n\t\treturn title\n\n\tdef generateText(self):\n\t\tfirstLine = \"firstLine\"\n\t\tmiddleLine = \"middleLine\"\n\t\tlastLine = \"lastLine\"\n\n\t\treturn firstLine + \" \" + middleLine + \" \" + lastLine\n","sub_path":"stories/newarrival.py","file_name":"newarrival.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"343203843","text":"from nj.models import *\nfrom nj import utils\n\ndef add(request):\n member = Member.objects.get(id=int(request['member_id']))\n work = Work.objects.create(member=member, title=request['title'], post=request['post'], start=utils.str_to_date(request['start']), end=utils.str_to_date(request['end']))\n try:\n work.title = Dep.objects.get(id=int(work.title)).title\n except:\n pass\n return {\n 'success': True,\n 'work': work.to_json()\n }\n\ndef getDeps(request):\n deps = []\n for dep in Dep.objects.all():\n deps.append(dep.to_json())\n return {\n 'deps': deps\n }","sub_path":"nj/tabs/old/add_work/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"367035305","text":"import brickpi3\nimport time\n#import statistics\n\nBP = brickpi3.BrickPi3()\ngain_value = 20\nspeed_vals = [0] * 5\nBP.set_motor_limits(BP.PORT_A, 70, 200) #Power limit 70%, speed limit 200 dps)\n\n#Set sensor type\nBP.set_sensor_type(BP.PORT_1, BP.SENSOR_TYPE.NXT_ULTRASONIC)\ntime.sleep(2)\ndef find_speed(speed_vals, new_speed):\n if len(speed_vals) > 5:\n speed_vals.pop()\n speed_vals.insert(1,new_speed)\n\n #Find median\n return sorted(speed_vals) [2]\n\ntry:\n while True:\n #Measure the distance to object in front\n distance = BP.get_sensor(BP.PORT_1) #Reading in cm\n movement_distance = 30 - distance\n control_speed = find_speed(speed_vals,gain_value*movement_distance)\n print(\"Measured distance: %s, Difference: %s, Control speed: %s\" % (distance, movement_distance, control_speed))\n\n #Use velocity control\n BP.set_motor_dps(BP.PORT_A, control_speed)\n BP.set_motor_dps(BP.PORT_B, control_speed)\n time.sleep(0.3)\nexcept KeyboardInterrupt:\n BP.reset_all()\n","sub_path":"prac-files/assignment_2/stalker_boi.py","file_name":"stalker_boi.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"313758810","text":"import pygame\nimport time\n\nwin_size = 500\npygame.init()\nclock = pygame.time.Clock()\nwin = pygame.display.set_mode((win_size, win_size))\npygame.display.set_caption('snake war game!')\nrow = 20\ngap = win_size // row\nline_coordinate = gap\nend = win_size - line_coordinate\nx_change = 0\ny_change = 0\n\n\ndef draw_snake(array, length, array2, length2):\n global line_coordinate, win, image\n win.blit(image, (0, 0))\n blue = (0, 204, 204)\n blue_body = (51, 255, 153)\n red = (204, 0, 0)\n red_body = (255, 51, 51)\n if len(array) != 0 and len(array2) != 0:\n for i in range(length):\n if i == 0:\n pygame.draw.rect(win, red, (array[i][0] + 2, array[i][1] + 1, line_coordinate - 2, line_coordinate - 2))\n else:\n pygame.draw.rect(win, red_body,\n (array[i][0] + 2, array[i][1] + 1, line_coordinate - 2, line_coordinate - 2))\n for i in range(length2):\n if i == 0:\n pygame.draw.rect(win, blue,\n (array2[i][0] + 2, array2[i][1] + 1, line_coordinate - 2, line_coordinate - 2))\n else:\n pygame.draw.rect(win, blue_body,\n (array2[i][0] + 2, array2[i][1] + 1, line_coordinate - 2, line_coordinate - 2))\n clock.tick(15)\n pygame.display.update()\n\n\ndef render_message(player, player2, message_number):\n global winner, starting_pos2, starting_pos1\n font = pygame.font.SysFont('comicsansms', 27)\n if message_number == 1:\n message = font.render('{} player entered outside boundary'.format(player), True, (255, 251, 5))\n message1 = font.render('{} player WON'.format(player2), True, (255, 251, 5))\n pygame.draw.rect(win, (204, 0, 204), (14, 225, 480, 35))\n pygame.draw.rect(win, (204, 0, 204), (155, 265, 220, 35))\n win.blit(message, (22, 225))\n win.blit(message1, (160, 260))\n elif message_number == 2:\n message = font.render('{} player bit himself'.format(player), True, (255, 251, 5))\n message1 = font.render('{} player WON'.format(player2), True, (255, 251, 5))\n pygame.draw.rect(win, (204, 0, 204), (95, 223, 315, 35))\n pygame.draw.rect(win, (204, 0, 204), (155, 265, 225, 35))\n win.blit(message, (100, 225))\n win.blit(message1, (160, 260))\n elif message_number == 3:\n message = font.render('{} have eaten each others head'.format(player), True, (255, 251, 5))\n message1 = font.render('ITS A DRAW', True, (255, 251, 5))\n pygame.draw.rect(win, (204, 0, 204), (2, 232, 500, 35))\n pygame.draw.rect(win, (204, 0, 204), (167, 265, 177, 35))\n win.blit(message, (5, 225))\n win.blit(message1, (170, 260))\n starting_pos2.clear()\n starting_pos2.clear()\n elif message_number == 4:\n message = font.render('{} player WON'.format(player), True, (255, 251, 5))\n pygame.draw.rect(win, (204, 0, 204), (157, 232, 230, 35))\n win.blit(message, (160, 228))\n pygame.display.update()\n time.sleep(5)\n winner = True\n\n\ndef kill(array1, array2):\n global player1_length, player2_length\n head1 = array1[0]\n head2 = array2[0]\n if head1 == head2:\n render_message('red - blue', 'none', 3)\n elif head1 in array2:\n player2_length -= 1\n elif head2 in array1:\n player1_length -= 1\n elif head1 in array1[1:]:\n render_message('red', 'blue', 2)\n elif head2 in array2[1:]:\n render_message('blue', 'red', 2)\n\n\ndef win_match(array1, array2):\n if len(array1) == 1:\n render_message('blue', 'red', 4)\n if len(array2) == 1:\n render_message('red', 'blue', 4)\n\n\ndef play():\n global line_coordinate, starting_pos1, starting_pos2, player1_loose, player2_loose, winner\n player1_x_coordinate = starting_pos1[0][0]\n player1_y_coordinate = starting_pos1[0][1]\n player2_x_coordinate = starting_pos2[0][0]\n player2_y_coordinate = starting_pos2[0][1]\n player1_x_change = 0\n player1_y_change = 0\n player2_x_change = 0\n player2_y_change = 0\n player1_direction_x = False\n player1_direction_y = True\n player2_direction_x = False\n player2_direction_y = True\n start1 = False\n start2 = False\n # to draw the grid line structure of the screen\n while not winner:\n for events in pygame.event.get():\n if events.type == pygame.QUIT:\n winner = True\n pygame.quit()\n quit()\n if events.type == pygame.KEYDOWN:\n if player1_direction_y:\n if events.key == pygame.K_s:\n start1 = True\n player1_y_change = line_coordinate\n player1_x_change = 0\n player1_direction_x = True\n player1_direction_y = False\n if events.key == pygame.K_w:\n start1 = True\n player1_y_change = -line_coordinate\n player1_x_change = 0\n player1_direction_x = True\n player1_direction_y = False\n elif player1_direction_x:\n if events.key == pygame.K_d:\n player1_x_change = line_coordinate\n player1_y_change = 0\n player1_direction_x = False\n player1_direction_y = True\n if events.key == pygame.K_a:\n player1_x_change = -line_coordinate\n player1_y_change = 0\n player1_direction_x = False\n player1_direction_y = True\n\n if player2_direction_y:\n if events.key == pygame.K_DOWN:\n start2 = True\n player2_y_change = line_coordinate\n player2_x_change = 0\n player2_direction_x = True\n player2_direction_y = False\n if events.key == pygame.K_UP:\n start2 = True\n player2_y_change = -line_coordinate\n player2_x_change = 0\n player2_direction_x = True\n player2_direction_y = False\n elif player2_direction_x:\n if events.key == pygame.K_RIGHT:\n player2_x_change = line_coordinate\n player2_y_change = 0\n player2_direction_x = False\n player2_direction_y = True\n if events.key == pygame.K_LEFT:\n player2_x_change = -line_coordinate\n player2_y_change = 0\n player2_direction_x = False\n player2_direction_y = True\n\n if start1 and start2:\n kill(starting_pos1, starting_pos2)\n win_match(starting_pos1, starting_pos2)\n player1_x_coordinate += player1_x_change\n player1_y_coordinate += player1_y_change\n player2_x_coordinate += player2_x_change\n player2_y_coordinate += player2_y_change\n\n if 0 > player1_x_coordinate or player1_x_coordinate > end or 0 > player1_y_coordinate or \\\n player1_y_coordinate > end:\n render_message('red', 'blue', 1)\n starting_pos1.clear()\n if 0 > player2_x_coordinate or player2_x_coordinate > end or 0 > player2_y_coordinate or \\\n player2_y_coordinate > end:\n render_message('blue', 'red', 1)\n starting_pos2.clear()\n\n if start1:\n starting_pos1.insert(0, [player1_x_coordinate, player1_y_coordinate])\n starting_pos1.pop()\n if start2:\n starting_pos2.insert(0, [player2_x_coordinate, player2_y_coordinate])\n starting_pos2.pop()\n draw_snake(starting_pos1, player1_length, starting_pos2, player2_length)\n pygame.display.update()\n\n\nstarting_pos1 = [[line_coordinate * 6, line_coordinate * 10],\n [line_coordinate * 5, line_coordinate * 10],\n [line_coordinate * 4, line_coordinate * 10],\n [line_coordinate * 3, line_coordinate * 10],\n [line_coordinate * 2, line_coordinate * 10],\n [line_coordinate * 1, line_coordinate * 10]]\nstarting_pos2 = [[line_coordinate * 14, line_coordinate * 10],\n [line_coordinate * 15, line_coordinate * 10],\n [line_coordinate * 16, line_coordinate * 10],\n [line_coordinate * 17, line_coordinate * 10],\n [line_coordinate * 18, line_coordinate * 10],\n [line_coordinate * 19, line_coordinate * 10]]\nimage = pygame.image.load('grid.png')\nplayer1_length = 6\nplayer2_length = 6\nwinner = False\nplayer1_loose = False\nplayer2_loose = False\nplay()\n","sub_path":"snake war.py","file_name":"snake war.py","file_ext":"py","file_size_in_byte":8955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"336090283","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 10 18:51:35 2017\n\n@author: Claire\n\"\"\"\n#%%\nfrom landlab.plot.imshow import imshow_grid\n#%%\n# Plot grid of full watershed to verify that it was properly imported.\nplt.figure()\nelev_grid=imshow_grid(mg0, 'topographic__elevation', \n limits=(0,np.max(z0[np.where(z0>0)])), \\\n var_name='Elevation', var_units='m', cmap='terrain', \\\n plot_name=params.dem_name+'-Elevation (m)')\n#%% TO-DO: MIGRATE TO A FIGURES SCRIPT\n# Plot watershed boundary conditions\nplt.figure()\nimshow_grid(mg, mg.status_at_node, allow_colorbar=False, color_for_closed='blue', plot_name='Watershed Boundary Conditions')\nplt.savefig('bc.png')\n\n# Plot grid of the extracted area of watershed that will be modeled and save them\nos.chdir(params.output_folder+'figures/')\n\nplt.figure()\nimshow_grid(mg, 'topographic__elevation', var_name='Elevation', var_units='m', cmap='terrain', plot_name=params.dem_name+'-Elevation (m)')\nplt.savefig('elevation.png')\n\nplt.figure()\nimshow_grid(mg, 'trib_mask', limits=(np.min(trib_mask[np.where(trib_mask>0)]), np.max(trib_mask)), cmap='cool', var_name='Stream Tributary Number', plot_name=params.dem_name+'-Tributary Masks')\nplt.savefig('trib_mask.png')\n\nplt.figure()\nimshow_grid(mg, 'slope', limits=(0, np.max(slp)), cmap='bwr', plot_name=params.dem_name+'-Slope (m/m)')\nplt.savefig('slope.png')\n\nplt.figure()\nimshow_grid(mg, 'cont_area', limits=(0, np.max(ca)), cmap='PiYG',plot_name=params.dem_name+'-Contributing Area (m2)')\nplt.savefig('cont_area.png')\n\nplt.figure()\nimshow_grid(mg, 'strm_mask', limits=(np.min(strm_mask[np.where(strm_mask>0)]), np.max(strm_mask)), cmap='cool', var_name='Stream Number',plot_name=params.dem_name+'-Stream Number')\nplt.savefig('strm_mask.png')\n\n#%% TO-DO: MIGRATE TO A FIGURES SCRIPT\n# Show the cells of the Elwha River mainstem\nmg.status_at_node[:]=4 # close all nodes\nmg.status_at_node[prof_ids_river[0]]=0 # open nodes in stream link 1 tributary area\nimshow_grid(mg, mg.status_at_node, color_for_closed='white', cmap='Blues', plot_name='Elwha River - Nodes Along the Mainstem')\n\n\n#%% TO-DO: MIGRATE TO A FIGURES SCRIPT\n# Plot outputs if desired (need to uncomment)- post flow routing\nplt.figure()\nimshow_grid(mg, 'drainage_area', cmap='GnBu') # Landlab contributing area\nimshow_grid(mg, 'flow__upstream_node_order', cmap='YlGnBu')\nimshow_grid(mg, 'flow__sink_flag', cmap='YlGnBu')\nplt.xlim(12000, 20000)\nplt.ylim(32000, 37000)\nplt.show()\n\n#%% TO-DO: MIGRATE TO A FIGURES SCRIPT \n# Use Landlab function to analyze the Elwha River and plot it!\n# INPUT threshold for stream (can use strm_trshd_grp4 and strm_trshd_grp5 which were named above)\nthreshold=1000000\n\n# Run analysis\nplt.figure()\nprof_ids_river, dist_upstr_river = analyze_channel_network_and_plot(mg, elevations='topographic__elevation', \\\n drainage_area='drainage_area',\\\n flow_receiver='flow__receiver_node', \\\n links_to_flow_receiver='flow__link_to_receiver_node', \\\n number_of_channels=1, starting_nodes=None, threshold=threshold)\n\nslp_river=slp[prof_ids_river[0]]\nmean_slp_river=np.nanmean(slp_river)\n\n# this creates a plot which is the same as: plt.plot(dist_upstr_river[0], z[prof_ids_river[0]])\nplt.xlabel('Distance upstream of Lake Mills (m)')\nplt.ylabel('Elevation (m)')\nplt.title('Elevation Profile of Elwha River Upstream of Lake Mills')\nplt.savefig('elev_prof')\n\n#%% TO-DO: MIGRATE TO A FIGURES SCRIPT\nplt.plot(dist_upstr_river[0], z[prof_ids_river[0]])\n\nlower_river=len(dist_upstr_river[0])-180\nplt.plot(dist_upstr_river[0][60:lower_river], z[prof_ids_river[0][60:lower_river]]) # LM gage section\n\n#plt.plot(dist_upstr_river[0][60:180], z[prof_ids_river[0][60:180]]) # LM gage section\n\nplt.xlabel('Distance upstream of Lake Mills (m)')\nplt.ylabel('Elevation (m)')\nplt.title('Elevation Profile of Elwha River Upstream of Lake Mills')\n\nslope_LM=mg.at_node['slope'][prof_ids_river[0][60:180]]\nprint ('Lake Mills gage slope=',np.nanmean(slope_LM))\n\nslope_lower=mg.at_node['slope'][prof_ids_river[0][60:lower_river]]\nprint ('lower river slope=',np.nanmean(slope_lower))\n\nslope_upper=mg.at_node['slope'][prof_ids_river[0][lower_river::]]\nprint ('upper river slope=',np.nanmean(slope_upper))\n\nslope_overall=mg.at_node['slope'][prof_ids_river[0][60::]]\nprint ('overall slope=',np.nanmean(slope_overall))\n#%%\n#Plot Q vs A and error\n\nprint(\"r-squared:\", QvA_rsq)\n\nplt.loglog(trib_total_area_m2,Q_strms_50EP.values[0],'bo')\nplt.loglog(trib_total_area_m2,Q_reg,'k-')\nplt.xlabel('Contributing Area (m2)')\nplt.ylabel('Streamflow, 50% Exceedance Probability (m3/s)')\nplt.title('Streamflow (50% Exceedance Probability) vs Contributing Area\\n (from DHSVM outputs)')\n\n#%% Plots\nplt.figure()\nplt.plot(network.index,network.strm_mean_slp,label='Landlab')\nplt.plot(network.index,network.segment_slope,'g',label='DHSVM')\nplt.plot(network.index,network.strm_mean_slp-network.segment_slope,'r--',label='Landlab-DHSVM')\nplt.legend(loc=0)\nplt.title('Comparison of Stream Slopes Derived from DHSVM and Landlab')\nplt.xlabel('Stream Number')\nplt.ylabel('Slope (m/m)')\n\n# Can see that the Landlab average slope is much higher (~2x) the DHSVM slope,\n# but follows the same general pattern. Need to look into why this is. Compare \n# how each of them are computing slope. Will use DHSVM slope for now since more conservative??\n\n#%%\nprecip_obs_start_date=precip_obs_daily.index[0]\nprecip_obs_end_date=precip_obs_daily.index[-1]\n#%%\nos.chdir('D:\\GitHub\\Elwha_Landlab')\n# SNOTEL- Buckinghorse - Obs precip\nBuckhorse_station_name='Buckinghorse'\nBuckhorse_station_elev=int(4870/3.281) #m 1484 m\n\nBuckhorse_raw=pd.read_table('Buckinghorse_SNOTEL.txt', header=58, delimiter=',')\nBuckhorse_dates=pd.to_datetime(Buckhorse_raw.Date)\nBuckhorse_daily=pd.DataFrame({'Tmax_C': (Buckhorse_raw['Air Temperature Maximum (degF)']-32)/1.8,\n 'Tmin_C': (Buckhorse_raw['Air Temperature Minimum (degF)']-32)/1.8,\n 'Tavg_C': (Buckhorse_raw['Air Temperature Average (degF)']-32)/1.8,\n 'Precip_mm': (Buckhorse_raw['Precipitation Increment (in)']*25.4)})\nBuckhorse_daily.set_index(Buckhorse_dates, inplace=True)\nBuckhorse_start_date=Buckhorse_daily.index[0]\nBuckhorse_end_date=Buckhorse_daily.index[-1]\n\n# Read in Livneh Station closest to Waterhole- 2013 met data\nos.chdir('D:/GoogleDrive/Watershed Dynamics Group/Projects/Elwha/JimsHolyPipeline/livneh2013/Daily_MET_1915_2011')\nliv2013_start_date=datetime.date(1915,1,1)\nliv2013_end_date=datetime.date(2011,12,31)\nliv2013_met_daily_dates=pd.date_range(liv2013_start_date,liv2013_end_date)\nliv2013_sta24=pd.read_table('Meteorology_Livneh_CONUSExt_v.1.2_2013_47.90625_-123.40625', delim_whitespace=True, header=None)\nliv2013_sta24.index=liv2013_met_daily_dates\nliv2013_sta24.columns=['precip_mm','tmax_c', 'tmin_c', 'wind_m_s'] # from http://dhsvm.pnnl.gov/input/input_metstation_file.stm\n\n\n# DHSVM- Modeled Streamflow\nstorm_flow_CCtop=outflow_saved.loc[storm_start_date:storm_end_date,'CarlsonCanyonTop']\nstorm_flow_CCbot=outflow_saved.loc[storm_start_date:storm_end_date,'CarlsonCanyonBottom']\nstorm_flow_LM=outflow_all.loc[storm_start_date:storm_end_date,'147']\n\n#%%\n# Plot modeled vs observed streamflow\nfig1, ax1=plt.subplots(1,1,figsize=(8,6))\nplt.xticks(rotation=40)\n\n# Streamflow- Modeled and Observed\n#lns1=ax1.plot(LM_data.index[4605:4640], LM_data.loc[storm_start_date:storm_end_date,'Q_m3s'], \n# 'k-', label='Observed - Q, LM gage',linewidth=2)\nlns1=ax1.plot(LM_data.index, LM_data.Q_m3s, 'k-', label='Observed - Q, LM gage',linewidth=2)\nlns2=ax1.plot(storm_flow_LM.index, storm_flow_LM, 'r-', label='Modeled - Q, LM gage',linewidth=2)\n#lns6=ax1.plot(storm_flow_CCtop.index, storm_flow_CCtop,'g-', label='Modeled- Q, CC-top',linewidth=2)\n#lns7=ax1.plot(storm_flow_CCbot.index, storm_flow_CCbot,'r-', label='Modeled- Q, CC-bottom',linewidth=2)\n\nax2=ax1.twinx()\nlns3=ax2.plot(Wtrhole_daily.index, Wtrhole_daily.Precip_mm, 'b-',label= 'Obs- P, elev=1527 m',linewidth=2)\n#lns5=ax2.plot(liv2013BC_sta24.index[163520::], 3*1000*liv2013BC_sta24.precip_m_per_3hr[163520::], 'g-',label= 'Livneh- sta elev=1475 m')\nlns5=ax2.plot(liv2013_sta24.index, liv2013_sta24.precip_mm, 'g*-',label= 'Livneh- sta elev=1475 m')\nlns6=ax2.plot(precip_daily_m.index, 1000*precip_daily_m.precip_m, 'c-',label= 'Modeled- P, basin')\n\nax2.set_ylim(0,200)\nax2.invert_yaxis()\n\nplt.xlim(storm_start_date, storm_end_date)\nax1.set_xlabel ('Date',fontsize=14)\nax1.set_ylabel ('Streamflow (cms)',fontsize=14)\nax2.set_ylabel ('Precipitation (mm)',fontsize=14)\nlns=lns1+lns2+lns3+lns5+lns6\nlabs = [l.get_label() for l in lns]\nby_label = OrderedDict(zip(labs, lns))\nplt.legend(by_label.values(), by_label.keys(), loc=7) #bbox_to_anchor=(0.8, -0.2), ncol=2\nplt.title('Precipitation and Streamflow: Modeled versus Observations',fontsize=16)\nos.chdir(homedir)\n\n# Plot streamflow due to storm on all links\nplt.figure()\n\nfor i in outflow.columns:\n plt.plot(storm_flow_ref.index, outflow.loc[params.storm_start_date:params.storm_end_date,i])\n #plt.plot(outflow_all.index, outflow_all.loc[:,str(i)])\nplt.xticks(rotation=40)\nplt.xlabel ('Date',fontsize=14)\nplt.ylabel ('Streamflow (cms)',fontsize=14)\nplt.title('Streamflow During Storm')\n\n#%%\n# Plot Gumbel Distribution\n# Plot\nfig, ax = plt.subplots(1, 1, figsize=[6,4]) \n# Gumbel distribution of generic data\nx_test=np.arange(np.min(forcing_annual_max_sort),np.max(forcing_annual_max_sort))[::-1]\nx_test0=(x_test-loc)/scale\ny_test=np.exp(-(np.exp(-x_test0)))\nax.plot(x_test,y_test,'r-',linewidth=5, alpha=0.6, label='Gumbel Distribution\\n(epsilon= %.0f,\\n alpha= %.0f)' %(loc, scale))\n\n# Plot data vs plotting position\nplt.plot(forcing_annual_max_sort[::-1], pcntl,'bo', markeredgecolor='k', markeredgewidth=0.5, markersize=4,label='Data (Gringorten \\nPlotting Position)')\n\nmax_p=forcing_daily[params.storm_start_date:params.storm_end_date].max(axis=0)\nmax_p_index=forcing_daily[params.storm_start_date:params.storm_end_date].idxmax(axis=0)\nplt.plot(max_p, np.exp(-(np.exp(-(max_p-loc)/scale))), 'y*', markeredgecolor='k', markersize=10, markeredgewidth=0.5, label='Modeled 2006 storm,\\n P= 74.3 mm, CDF=0.70')\n\nplt.xlabel('Maximum Daily Precipitation (mm)')\nplt.ylabel('Cumulative Distribution Function')\nplt.title('Elwha Watershed Annual Daily Maximum Precipitation\\n Years 1950-2013')\nplt.legend()\n\n#%%\n# Plot Storm\nfig1, ax1=plt.subplots(1,1,figsize=(6,4))\nax1.plot(storm_flow_ref.index, storm_flow_ref, 'b--',label= 'Modeled- Q, Outlet')\nax2=ax1.twinx()\nax2.plot(pd.date_range(params.storm_start_date,params.storm_end_date), storm_precip, 'c-', label= 'Modeled- P, basin')\n\nax1.set_ybound(lower=None, upper=400)\nax2.set_ybound(lower=None, upper=200)\nax2.invert_yaxis()\nax2.set_ylabel ('Precipitation [mm]',fontsize=16)\n\nax1.set_xbound(lower=params.storm_start_date, upper=params.storm_end_date)\nax1.set_xlabel ('Storm Duration [days]',fontsize=16)\nax1.set_ylabel ('Streamflow [cms]',fontsize=16)\nplt.title('Outlet Hydrograph and Basin-wide Hyetograph\\nfor 2006 storm',fontsize=20)\nax1.tick_params(labelsize=14)\nax2.tick_params(labelsize=14)\n#ax1.legend()\n#ax2.legend()\n\n# Plot unit hydrograph on all links\n#option1=storm_flow_UH.index\n#option2=storm_flow_UH.sort_values(by=storm_flow_UH.columns[20], axis=0, ascending=False, inplace=False).index[0:70:4]\n#option3=[147, 165, 184, 197, 358, 251, 262, 288, 384, 388, 368, 382, 169, 373, 304, 337, 345, 189, 130, 311, 305, 135, 192, 385]\n\nfig1, ax1=plt.subplots(1,1,figsize=(6,4))\nfor i in storm_flow_UH.index:\n ax1.plot(days_flow_UH, storm_flow_UH.loc[i,:])\n #ax1.plot(np.arange(0,len(storm_Q_UH.columns))/8, storm_Q_UH.loc[i,:])\n #plt.semilogy(storm_Q_UH.columns, storm_Q_UH.loc[i,:])\n\nax2=ax1.twinx()\nax2.plot(days_precip_UH, storm_precip_UH.values, 'b--',label= 'Modeled- P, basin')\n\nax1.set_ybound(lower=None, upper=5)\nax2.set_ybound(lower=None, upper=5)\nax2.invert_yaxis()\nax2.set_ylabel ('Unit Precipitation [mm]',fontsize=16)\nax1.set_xlabel ('Unit Storm Duration [days]',fontsize=16)\nax1.set_ylabel ('Unit Streamflow [cms/mm]',fontsize=16)\nplt.title('Unit Hydrograph/Hyetograph',fontsize=20)\nax1.tick_params(labelsize=14)\nax2.tick_params(labelsize=14)\n\nfig1, ax1=plt.subplots(1,1,figsize=(6,4))\nfor i in storm_flow_UH.index:\n ax1.plot(days_flow_UH, storm_flow_UH.loc[i,:])\n #ax1.plot(np.arange(0,len(storm_Q_UH.columns))/8, storm_Q_UH.loc[i,:])\n #plt.semilogy(storm_Q_UH.columns, storm_Q_UH.loc[i,:])\n\nax2=ax1.twinx()\nax2.plot(days_precip_UH, storm_precip_UH.values, 'b--',label= 'Modeled- P, basin')\n\nax1.set_ybound(lower=None, upper=5)\nax2.set_ybound(lower=None, upper=5)\nax2.invert_yaxis()\nax2.set_ylabel ('Unit Precipitation [mm/mm*]',fontsize=16)\nax1.set_xlabel ('Unit Storm Duration [days/days*]',fontsize=16)\nax1.set_ylabel ('Unit Streamflow [cms/mm*]',fontsize=16)\nplt.title('Unit Hydrograph/Hyetograph',fontsize=20)\nax1.tick_params(labelsize=14)\nax2.tick_params(labelsize=14)","sub_path":"current_version/figures.py","file_name":"figures.py","file_ext":"py","file_size_in_byte":13042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"594457362","text":"import json\n\nimport scrapy\n\nfrom locations.hours import OpeningHours\nfrom locations.items import Feature\n\n\nclass DunkinUSSpider(scrapy.Spider):\n name = \"dunkin_us\"\n item_attributes = {\n \"brand\": \"Dunkin'\",\n \"brand_wikidata\": \"Q847743\",\n }\n allowed_domains = [\"dunkindonuts.com\"]\n start_urls = [\n \"https://locations.dunkindonuts.com/en\",\n ]\n\n def parse(self, response):\n for href in response.xpath('//*[@class=\"Directory-content\"]//@href').extract():\n yield scrapy.Request(response.urljoin(href))\n\n if response.css('[itemtype=\"https://schema.org/FastFoodRestaurant\"]'):\n yield from self.parse_store(response)\n\n def parse_store(self, response):\n coords = json.loads(response.xpath('//script[@class=\"js-map-data\"]/text()').get())\n hours = json.loads(response.xpath('//script[@class=\"js-hours-config\"]/text()').get())\n opening_hours = OpeningHours()\n for row in hours[\"hours\"]:\n day = row[\"day\"][:2].capitalize()\n for i in row[\"intervals\"]:\n start_hour, start_minute = divmod(i[\"start\"], 100)\n end_hour, end_minute = divmod(i[\"end\"], 100)\n start_time = f\"{start_hour:02}:{start_minute:02}\"\n end_time = f\"{end_hour:02}:{end_minute:02}\"\n opening_hours.add_range(day, start_time, end_time)\n\n address = response.css(\"[itemprop=address]\")\n properties = {\n \"ref\": response.url.rsplit(\"/\", 1)[1],\n \"lat\": coords[\"latitude\"],\n \"lon\": coords[\"longitude\"],\n \"website\": response.url,\n \"street_address\": address.xpath('.//*[@itemprop=\"streetAddress\"]/@content').get(),\n \"city\": address.xpath('.//*[@itemprop=\"addressLocality\"]/@content').get(),\n \"state\": address.xpath('.//*[@itemprop=\"addressRegion\"]/text()').get(),\n \"postcode\": address.xpath('.//*[@itemprop=\"postalCode\"]/text()').get(),\n \"phone\": response.xpath('//*[@itemprop=\"telephone\"]/text()').get(),\n \"opening_hours\": opening_hours.as_opening_hours(),\n }\n yield Feature(**properties)\n","sub_path":"locations/spiders/dunkin_us.py","file_name":"dunkin_us.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"188684487","text":"import json\nimport gzip\nimport re\n\nwith gzip.open('jawiki-country.json.gz') as rf:\n for line in rf:\n obj = json.loads(line)\n if obj['title'] == 'イギリス':\n text = obj['text']\n break\nbasic_info_string = re.search(r'{{基礎情報.*?(.*)^}}$', text,\n flags=(re.MULTILINE | re.DOTALL))\nbasic_infos = re.findall(r'^\\|(.*?) *\\= *(.*?) *(?:(?=\\n\\|)|(?=\\n$))',\n basic_info_string.group(1), flags=(re.MULTILINE | re.DOTALL))\n\nbasic_info_dict = {}\nfor key, value in basic_infos:\n no_emphasis_value = re.sub(r\"'{2,}\", \"\", value)\n no_inner_link_value = re.sub(\n r\"\\[\\[(?:[^\\|]*\\|)??(([^\\|]*?)|({{.*?}}))\\]\\]\", r\"\\1\", no_emphasis_value)\n basic_info_dict[key] = no_inner_link_value\n print(\"'\" + key + \"', \" + \"'\" + no_inner_link_value + \"'\")\n","sub_path":"3/27.py","file_name":"27.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"635848307","text":"# common_lib.py\n\nimport os\nimport sys\nimport time\n\n\n# common library\nclass CommonLib:\n def __init__(self):\n self.version = 1.0\n self.title = 'common library'\n self.errors = []\n\n # e.g. for pyinstaller\n # add absolute path to resource\n @staticmethod\n def get_resource_path(relative_path):\n try:\n # pyinstaller creates a temp folder\n # and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n # absolute path for base dir\n base_path = os.path.abspath(\".\")\n return os.path.join(base_path, relative_path)\n\n @staticmethod\n def get_format_current_date_hu():\n return time.strftime(\"%Y-%M-%d %H:%I:%S\")\n\n @staticmethod\n def get_month_name(index, lang='hu'):\n if lang == 'hu':\n months = ('január', 'február', 'március', 'április', 'május', 'június',\n 'július', 'augusztus', 'szeptember', 'október', 'november', 'december')\n else:\n months = ('January', 'February', 'March', 'April', 'May', 'June',\n 'July', 'August', 'September', 'October', 'November', 'December')\n return months[index]\n\n @staticmethod\n def get_day_name(index, lang='hu'):\n if lang == 'hu':\n days = ('hétfő', 'kedd', 'szerda', 'csütörtök', 'péntek', 'szombat', 'vasárnap')\n else:\n days = ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday')\n return days[index]\n\n @staticmethod\n def list_files(path):\n files = []\n for name in os.listdir(path):\n if os.path.isfile(os.path.join(path, name)):\n files.append(name)\n return files\n\n# end\n","sub_path":"common_lib.py","file_name":"common_lib.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"576788881","text":"# Uses python3\nimport sys\n\ndef gcd(a, b):\n x,y, u,v = 0,1, 1,0\n while a != 0:\n q, r = b//a, b%a\n m, n = x-u*q, y-v*q\n b,a, x,y, u,v = a,r, u,v, m,n\n gcd = b\n return gcd\n\ndef lcm(a, b):\n return int(a*b / gcd(a, b))\n\nif __name__ == '__main__':\n # input = sys.stdin.read()\n a, b = map(int, input().split())\n print(lcm(a, b))\n\n","sub_path":"1. Algorithmic Toolbox/Week 2/lcm.py","file_name":"lcm.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"602791668","text":"# Import dependencies\nimport csv\nimport os\n\n# Assign file location with the csv library\npoll_data=os.path.join(\"election_data.csv\")\n\nwith open(poll_data) as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\",\")\n #skip the header\n header= next(csvreader)\n\n #Declare Variables\n total_vt = 0\n khan_vt = 0\n correy_vt = 0\n li_vt = 0\n otooley_vt = 0\n\n\n\n\n for row in csvreader:\n\n # Count the unique Voter ID's and store in variable called total_votes\n total_vt +=1\n if row[2] == \"Khan\":\n khan_vt+=1\n elif row[2] == \"Correy\":\n correy_vt +=1\n elif row[2] == \"Li\":\n li_vt +=1\n elif row[2] == \"O'Tooley\":\n otooley_vt +=1\n\nkhan_perc = '{0:.3f}'.format((khan_vt/total_vt) * 100)\ncorr_perc = '{0:.3f}'.format((correy_vt/total_vt) * 100)\nli_perc = '{0:.3f}'.format((li_vt/total_vt) *100)\notool_perc = '{0:.3f}'.format((otooley_vt/total_vt) * 100)\n\n\nprint(\"Election Results\")\nprint(\"Total Votes:\" + str(total_vt)) \nprint(\"Khan: \" + str(khan_vt) + \" \" + str(khan_perc) + \"%\")\nprint(\"Correy: \" + str(correy_vt) + \" \" + str(corr_perc) + \"%\")\nprint(\"Li: \" + str(li_vt) + \" \" + str(li_perc) + \"%\")\nprint(\"O'Tooley: \" + str(otooley_vt) + \" \" + str(otool_perc) + \"%\")\n\n\n\ncsvfile = open (\"newpoll.txt\", 'w')\ncsvfile.write(\"Election Results\\n\")\ncsvfile.write(\"Total Votes:\" + str(total_vt) + \"\\n\")\ncsvfile.write(\"Khan: \" + str(khan_vt) + \" \" + str(khan_perc) + \"%\\n\")\ncsvfile.write(\"Correy: \" + str(correy_vt) + \" \" + str(corr_perc) + \"%\\n\")\ncsvfile.write(\"Li: \" + str(li_vt) + \" \" + str(li_perc) + \"%\\n\")\ncsvfile.write(\"O'Tooley: \" + str(otooley_vt) + \" \" + str(otool_perc) + \"%\\n\")\ncsvfile.close()\n","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"384346654","text":"'''\n api.py\n Mash Ibtesum, October 23, 2018\n Simple API to retrive data from the schools databse\n'''\n\nimport psycopg2\nimport sys\nimport flask\nimport json\nimport ast\nfrom config import *\nfrom flask import request\nfrom urllib.parse import urlparse, parse_qs\n\napp = flask.Flask(__name__, static_folder='static')\n\n@app.after_request\ndef after_request(response):\n '''\n Allows cross domain origins by adding the appropriate headers to the responses\n '''\n response.headers.add('Access-Control-Allow-Origin', '*')\n response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')\n response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')\n return response\n\n@app.route('/')\ndef hello():\n '''\n Generic response for the home page\n '''\n print(\"hello\")\n return('Youve reached the home of the APIIII')\n\n@app.route('/schools/',methods=['GET'])\ndef schools():\n '''\n Filter the list of schools using the query parameters. At any moment, multiple\n parameters are going to be used.\n\n RESPONSE: a list of 20 schools with the following details :\n * name\n * location\n * acceptance rate\n * SAT\n * ACT\n * Yearly Tuition\n * Diversity\n * Mid Career Income\n * SERIAL/ 8 digit unique id (OPEID)\n\n GET Parameters :\n Parameters | Required | Valid Options | DEFAULT | DESCRIPTION\n ------------------------------------------------------------------------------\n ownership y public(1), private(2), all (TXT) all\n degree y 2(1), 4(2), grad(3), all (TXT) all 2 year or 4 year colleges\n majors n see MAJOR_LIST (TXT) all\n region_id n see REGION_ID_LIST (INT) all\n SAT_AVG y 200 - 1600 (INT ARR) [800-1600] AVG SAT score\n ACTCMMID y 0-36 (INT ARR) [15-34] ACT midpoint\n COSTT4_A y 0 - 100,000 (INT ARR) [0 - 100,000] Avg Cost of attendace\n MD_EARN_WNE_P9 y 0 - 300,000 (INT ARR) [0 - 300,000] Median earning after 9 yrs\n ADM_RATE y 0 - 1 (INT ARR) [0 - 1] Admission rate in decimals\n page y 1 - 1000 (INT) 1 Number of page to display\n '''\n # TODO: (Optional) implement majors as a search query\n try:\n response = getSchools(\n ast.literal_eval(request.args.get('adm_rate')),\n ast.literal_eval(request.args.get('sat_avg')),\n ast.literal_eval(request.args.get('region_id')),\n ast.literal_eval(request.args.get('ACTCMMID'.lower())),\n ast.literal_eval(request.args.get('md_earn_wne_p10')),\n ast.literal_eval(request.args.get('COSTT4_A'.lower())),\n ast.literal_eval(request.args.get('owner')),\n ast.literal_eval(request.args.get('degree'))\n )\n return(json.dumps(response, indent=4))\n except Exception as e:\n print(e)\n return(\"Wrong query. Check the API doc because the example is too long\")\n\n\n@app.route('/school/',methods=['GET'])\ndef school():\n '''\n Returns detailed profile of a singular college provided the 8 digit OPID\n '''\n try:\n response = getSchool(int(request.args.get('opeid')))\n return(json.dumps(response, indent=4))\n except Exception as e:\n print(e)\n return(\"Wrong query. Check the console log. \\nExcepted structure : /school/?opeid=[insertOpeidHere]\")\n\n@app.route('/schools/name/')\ndef schoolsByName(name):\n '''\n Filter the list of schools using the name parameter. Ignores other parameter. The result is similar to /schools\n '''\n try:\n response = getSchoolsByName(str(name))\n return(json.dumps(response, indent=4))\n except Exception as e:\n print(e)\n return(\"Wrong query. Check the console log. \\nExcepted structure : /schools/name/?name=[insertNameHere]\")\n\n@app.route('/states/', methods=['GET'])\ndef states():\n try:\n return(json.dumps(getStates(), indent=4))\n except Exception as e:\n print(e)\n return(\"Contact your sys admin\")\n\n# TODO: implement /major and /regions and also enum.\n# TODO: Add sort (?) Looks good as is\n\n\n@app.route('/regions/', methods=['GET'])\ndef regions():\n try:\n return(json.dumps(getRegions(), indent=4))\n except Exception as e:\n print(e)\n return(\"Contact your sys admin\")\n\ndef getConnectection():\n '''\n Returns a connection to the database described\n in the config module. Returns None if the\n connection attempt fails.\n '''\n try:\n connection = psycopg2.connect(database=database, user=user, password=password)\n except Exception as e:\n print(e)\n exit()\n return connection\n\ndef getSchools(adm_rate, sat_avg, region_id, ACTCMMID, md_earn_wne_p10, COSTT4_A, ownership, degree):\n '''\n Is called by /schools/ endpoint. Uses psycopg2 to run the command appropriate sql\n query and returns the result as an array of dicts.\n '''\n try:\n connection = getConnectection()\n cursor = connection.cursor()\n query = '''\n SELECT name, CITY, state, OPEID, ACTCMMID, ADM_RATE, SAT_AVG, UGDS_WHITE, COSTT4_A, MD_EARN_WNE_P10, insturl, degree, owner\n FROM schools\n WHERE sat_avg >= {}\n AND sat_avg <={}\n AND md_earn_wne_p10 >= {}\n AND md_earn_wne_p10 <= {}\n AND ACTCMMID >= {}\n AND ACTCMMID <={}\n AND COSTT4_A >= {}\n AND COSTT4_A <={}\n AND adm_rate >= {}\n AND adm_rate <= {}\n '''.format(sat_avg[0],sat_avg[1],\n md_earn_wne_p10[0], md_earn_wne_p10[1],ACTCMMID[0], ACTCMMID[1],\n COSTT4_A[0],COSTT4_A[1], adm_rate[0],adm_rate[1])\n if region_id:\n query+= \"\\n AND region_id = \" + str(region_id)\n if degree:\n query+= \"\\n AND degree <= \" + str(degree)\n if ownership:\n query+= \"\\n AND owner = \" + str(ownership)\n query += \"\\nORDER BY adm_rate ASC\"\n cursor.execute(query)\n answer = []\n # header = [field[0] for field in cursor.description]\n header = ['name', 'city', 'state', 'opeid', 'actcmmid', 'adm_rate', 'sat_avg', 'ugds_white', 'costt4_a', 'md_earn_wne_p10', 'insturl', 'degree', 'owner']\n body = []\n\n for row in cursor:\n body.append(row)\n\n # Generates the dics using the provided headers.\n for school in body:\n school_dict = {}\n for i in range(0, len(header)):\n school_dict[header[i]] = school[i]\n answer.append(school_dict)\n connection.close()\n return answer\n\n except Exception as e:\n print(e)\n connection.close()\n return None\n\ndef getSchoolsByName(name):\n '''\n Is called by /schools/name endpoint. Uses psycopg2 to run the command appropriate sql\n query and returns the result as an array of dicts.\n '''\n try:\n connection = getConnectection()\n cursor = connection.cursor()\n query ='''\n SELECT name, CITY, state, OPEID, ACTCMMID, ADM_RATE, SAT_AVG, UGDS_WHITE, COSTT4_A, MD_EARN_WNE_P10, insturl, degree, owner\n FROM schools\n WHERE name ilike '%{}%'\n '''.format(name)\n\n cursor.execute(query)\n answer = []\n # header = [field[0] for field in cursor.description]\n header = ['name', 'city', 'state', 'opeid', 'actcmmid', 'adm_rate', 'sat_avg', 'ugds_white', 'costt4_a', 'md_earn_wne_p10', 'insturl', 'degree', 'owner']\n body = []\n\n for row in cursor:\n body.append(row)\n\n for school in body:\n school_dict = {}\n for i in range(0, len(header)):\n school_dict[header[i]] = school[i]\n answer.append(school_dict)\n connection.close()\n return answer\n\n except Exception as e:\n connection.close()\n print(e)\n return None\n\ndef getStates():\n '''\n Is called by /staets endpoint. Uses psycopg2 to run the command appropriate sql\n query and returns the result as a specially formatted dictionary designed to cater\n to Semantic UI Framework's dropdown.\n '''\n try:\n connection = getConnectection()\n cursor = connection.cursor()\n query ='''\n SELECT name, abbr\n FROM states\n '''\n\n cursor.execute(query)\n answer = {}\n answer[\"sucess\"] = True\n answer[\"results\"] = []\n # header = [field[0] for field in cursor.description]\n\n for row in cursor:\n td = {}\n td[\"value\"]= row[1]\n td[\"name\"]= row[0]\n td[\"text\"]= row[0]\n answer[\"results\"].append(td)\n connection.close()\n return answer\n\n except Exception as e:\n print(e)\n connection.close()\n return None\n\n\ndef getRegions():\n '''\n Is called by /staets endpoint. Uses psycopg2 to run the command appropriate sql\n query and returns the result as a specially formatted dictionary designed to cater\n to Semantic UI Framework's dropdown.\n '''\n try:\n connection = getConnectection()\n cursor = connection.cursor()\n query ='''\n SELECT id, name\n FROM region\n '''\n\n cursor.execute(query)\n answer = {}\n answer[\"sucess\"] = True\n answer[\"results\"] = []\n # header = [field[0] for field in cursor.description]\n\n for row in cursor:\n td = {}\n td[\"value\"]= row[0]\n td[\"name\"]= row[1]\n td[\"text\"]= row[1]\n answer[\"results\"].append(td)\n connection.close()\n return answer\n\n except Exception as e:\n print(e)\n connection.close()\n return None\n\ndef getSchool(opeid):\n '''\n Is called by /school endpoint. Uses psycopg2 to run the command appropriate sql\n query and returns the result as a dict.\n '''\n try:\n connection = getConnectection()\n cursor = connection.cursor()\n query ='''\n SELECT *\n FROM schools\n WHERE opeid = {}\n '''.format(opeid)\n cursor.execute(query)\n header = [field[0] for field in cursor.description]\n body = [row for row in cursor]\n # Generates the dict, matching the header with the data\n for school in body:\n school_dict = {}\n for i in range(0, len(header)):\n school_dict[header[i]] = school[i]\n connection.close()\n return school_dict\n\n except Exception as e:\n connection.close()\n print(e)\n return None\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n print('Usage: {0} host port'.format(sys.argv[0]))\n print(' Example: {0} perlman.mathcs.carleton.edu 5101'.format(sys.argv[0]))\n exit()\n host = sys.argv[1]\n port = int(sys.argv[2])\n app.run(host=host, port=port, debug=True)\n","sub_path":"webapp/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":11198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"314071721","text":"from DijkstraForwardSearch import maze_environment\nfrom math import inf, sqrt\nfrom heapq import heappop, heappush\n\n\ndef dijkstras_shortest_path(initial_position, destination, graph, adj):\n \"\"\" Searches for a minimal cost path through a graph using Dijkstra's algorithm.\n\n Args:\n initial_position: The initial cell from which the path extends.\n destination: The end location for the path.\n graph: A loaded level, containing walls, spaces, and waypoints.\n adj: An adjacency function returning cells adjacent to a given cell as well as their respective edge costs.\n\n Returns:\n If a path exits, return a list containing all cells from initial_position to destination.\n Otherwise, return None.\n\n \"\"\"\n paths = {initial_position: []} # maps cells to previous cells on path\n pathcosts = {initial_position: 0} # maps cells to their pathcosts (found so far)\n queue = []\n heappush(queue, (0, initial_position)) # maintain a priority queue of cells\n \n while queue:\n priority, cell = heappop(queue)\n if cell == destination:\n return path_to_cell(cell, paths)\n \n # investigate children\n for (child, step_cost) in adj(graph, cell):\n # calculate cost along this path to child\n cost_to_child = priority + transition_cost(graph, cell, child)\n if child not in pathcosts or cost_to_child < pathcosts[child]:\n pathcosts[child] = cost_to_child # update the cost\n paths[child] = cell # set the backpointer\n heappush(queue, (cost_to_child, child)) # put the child on the priority queue\n \n return False\n\ndef path_to_cell(cell, paths):\n if cell == []:\n return []\n return path_to_cell(paths[cell], paths) + [cell]\n \n\n\n\ndef navigation_edges(level, cell):\n \"\"\" Provides a list of adjacent cells and their respective costs from the given cell.\n\n Args:\n level: A loaded level, containing walls, spaces, and waypoints.\n cell: A target location.\n\n Returns:\n A list of tuples containing an adjacent cell's coordinates and the cost of the edge joining it and the\n originating cell.\n\n E.g. from (0,0):\n [((0,1), 1),\n ((1,0), 1),\n ((1,1), 1.4142135623730951),\n ... ]\n \"\"\"\n res = []\n for delta in [(x, y) for x in [-1,0,1] for y in [-1,0,1] if not (x==0 and y==0)]:\n new = (cell[0] + delta[0], cell[1] + delta[1])\n if new in level['spaces']:\n res.append((new, transition_cost(level, new, cell)))\n return res\n\ndef transition_cost(level, cell, cell2):\n distance = sqrt((cell2[0] - cell[0])**2 + (cell2[1] - cell[1])**2)\n average_cost = (level['spaces'][cell] + level['spaces'][cell2])/2\n return distance * average_cost\n\n\ndef test_route(filename, src_waypoint, dst_waypoint):\n \"\"\" Loads a level, searches for a path between the given waypoints, and displays the result.\n\n Args:\n filename: The name of the text file containing the level.\n src_waypoint: The character associated with the initial waypoint.\n dst_waypoint: The character associated with the destination waypoint.\n\n \"\"\"\n\n # Load and display the level.\n level = load_level(filename)\n show_level(level)\n\n # Retrieve the source and destination coordinates from the level.\n src = level['waypoints'][src_waypoint]\n dst = level['waypoints'][dst_waypoint]\n\n # Search for and display the path from src to dst.\n path = dijkstras_shortest_path(src, dst, level, navigation_edges)\n if path:\n show_level(level, path)\n else:\n print(\"No path possible!\")\n\n\nif __name__ == '__main__':\n filename, src_waypoint, dst_waypoint = 'example.txt', 'a','e'\n\n # Use this function call to find the route between two waypoints.\n test_route(filename, src_waypoint, dst_waypoint)\n\n","sub_path":"CMPM146/P1_Navmesh/src/DijkstraForwardSearch/Dijkstra_forward_search.py","file_name":"Dijkstra_forward_search.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"420988430","text":"\"\"\"\nGlassware example AMP book p77. From edx course MIT 15.053x\n\n\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom cvxopt import matrix, solvers\n\nvarNames = [\n '6-oz glasses',\n '10-oz glasses',\n 'champgne glasses'\n ]\n\nconstraintNames = [\n 'production capacity',\n 'storage capacity',\n 'demand limit 6-oz glasses',\n 'nonNeg 6-oz glasses',\n 'nonNeg 10-oz glasses',\n 'nonNeg Champagne glasses'\n ]\n\nA = matrix([#constraints (s.t.)\n [6., 10., 1.,-1., 0., 0.],#6-oz\n [5., 20., 0., 0.,-1., 0.],#10-oz\n [8., 10., 0., 0., 0.,-1.]#champagne\n ])\n\nb = matrix([60., 150., 8., 0., 0., 0.])#rhs\n\nc = -matrix([5., 4.5, 6.])# objective\n\n\nsol=solvers.lp(c,A,b,\n #solver = 'glpk'\n )\n\nprint('\\nOptimal values (solution)')\ndfSol = pd.DataFrame(\n list(zip(varNames, list(sol['x']))),\n columns = ['varName', 'optSolValue'])\nprint(dfSol)\n\nprint('\\ndual values (shadow prices and reduced costs')\ndfDualVals = pd.DataFrame(\n list(zip(constraintNames, list(sol['z']), (['shadow price']*3 + ['reduced cost']*3))),\n columns = ['constraint', 'dual value', 'description'])\nprint(dfDualVals)\n\nprint(\"\\nsol['s']. Not sure how to call it\")\ndfS = pd.DataFrame(\n list(zip(constraintNames, list(sol['s']))),\n columns = ['constraint', 's'])\nprint(dfS)\n\n#NOTE I don't know what to call 's' but I can calculate as follows\ns_sr = np.array(b) - np.dot(np.array(A), np.array(sol['x']))\ns_sr = s_sr*(s_sr > 1e-6)\n\ns_cvxopt = np.array(sol['s'])*(np.array(sol['s']) > 1e-6)\n\n# s_sr and s_cvxopt are identical within numerical error.\n\n# I don't know how to get the allowable increase and decrase of the rhs or the\n# objective function coefficients.\n\n","sub_path":"glasswareLpAPMexample_cvxopt.py","file_name":"glasswareLpAPMexample_cvxopt.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"325630284","text":"import socket\n\nprime = 23\nbase = 5\nsecret = 15\nfinal = False\n\ndef D_H(base, secret):\n return str(int(base) ** secret % prime)\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind(('localhost', 6669))\nserver.listen(2)\n\nwhile not final:\n connec, addr = server.accept()\n print(\"Connected from \" + str(addr) + \"\\n\")\n \n other = connec.recv(1024).decode()\n\n print(\"Sending: \" + D_H(base, secret))\n connec.send(D_H(base, secret).encode())\n print(\"Recieved: \" + other + \"\\n\")\n\n print(\"Sending: \" + D_H(other, secret))\n connec.send(D_H(other, secret).encode())\n final = connec.recv(1024).decode()\n \nprint(\"Recieved: \" + final + \"\\n\")\nserver.close()\n","sub_path":"Diffie–Hellman/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"436697915","text":"\"\"\"empty message\n\nRevision ID: ef6f3ad870b9\nRevises: 53900d8e18c7\nCreate Date: 2020-11-05 15:48:24.313268\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ef6f3ad870b9'\ndown_revision = '53900d8e18c7'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('orders', schema=None) as batch_op:\n batch_op.add_column(sa.Column('date_now', sa.DateTime(), nullable=True))\n batch_op.drop_column('date')\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('orders', schema=None) as batch_op:\n batch_op.add_column(sa.Column('date', sa.VARCHAR(), nullable=True))\n batch_op.drop_column('date_now')\n\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/ef6f3ad870b9_.py","file_name":"ef6f3ad870b9_.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"134643843","text":"import hydrogenic as hy\nimport manyelec as me\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import linalg\nfrom scipy.integrate import quad\nimport time\n\n\ndef timer(func):\n def wrapper(*args, **kwargs):\n t1 = time.time()\n output = func(*args,*kwargs)\n t2 = time.time()\n print(\"{} ran in {:.4f} seconds\".format(func.__name__,t2-t1))\n return output\n return wrapper\n\ndef lam(i):\n alpha = 0.01\n beta = 1.9\n return alpha*beta**i\n\ndef rho(D, x, y, z):\n \"\"\"returns charge density at (x,y,z) for given density matrix\"\"\"\n total = 0\n for i in range(D.shape[0]):\n for j in range(D.shape[0]):\n total += D[i,j]*hy.s_type(i,x,y,z)*hy.s_type(j,x,y,z)\n return total\n\n@timer\ndef kohn_exchange(i, j, D):\n \"\"\"calculates the Kohn-Sham exchange matrix elements\"\"\"\n return -3**(4/3)*(4*lam(i)*lam(j))**(3/4)/np.pi**(5/6)*quad(lambda r: r**2*np.exp(-(lam(i)+lam(j))*r**2)*rho(D,r,0,0)**(1/3),0,5)[0]\n\n@timer\ndef build_G(J,D):\n \"\"\"returns the repulsion matrix given the coulomb direct and density matrices\"\"\"\n size = D.shape[0]\n G = np.zeros((size,size))\n for i in range(size):\n for j in range(i,size):\n G[i,j] -= 0.5*kohn_exchange(i,j,D)\n for k in range(size):\n for l in range(size):\n G[i,j] += J[i,j,k,l]*D[k,l]\n G[j,i] = G[i,j]\n return G\n\n\nif __name__ == '__main__':\n start_time = time.time()\n\n # define the atom\n electrons = 2\n Z = 2\n\n # set parameters for the computation\n dim = 10\n MAX_ITER = 50\n tolerance = 1e-10\n\n # calculate the matrix elements that do not change throughout iteration\n S, T, V, J, _ = me.build_STVJK(dim, Z)\n\n D_old = np.zeros((dim,dim))\n E_old = np.inf\n\n for counter in range(MAX_ITER):\n\n # calculate F and solve eigenvalue equation\n print(\"building G on iter {}\".format(counter+1))\n G = build_G(J,D_old)\n print(\"done\")\n F = T + V + G\n eigenvals, eigenvecs = linalg.eigh(F, S, turbo=True, check_finite=False)\n\n #calculate the new density matrix and atomic energy\n D_new = me.calc_new_density(electrons, eigenvecs)\n E_new, E_kin, E_pot, E_rep = me.calc_new_energy(T, V, G, D_new)\n\n # plt.matshow(D_new)\n # plt.colorbar()\n # plt.show()\n\n #calculate changes to density matrix and energy\n delta_D = me.calc_delta_density(D_old, D_new)\n delta_E = np.abs((E_old-E_new)/((E_new)+1))\n\n # check if self-consistent field reached\n if delta_E < tolerance and delta_D < tolerance: break\n\n # print status to terminal and iterate density matrix and energy\n print('E_new = {:.6f}\\t\\tdelta_E = {:.4e}\\t\\tdelta_D = {:.4e}'.format(E_new, delta_E, delta_D))\n E_old = E_new\n D_old = D_new\n\n # check if maximum number of iterations has been reached\n if counter == MAX_ITER-1:\n print(\"--- {} seconds ---\".format(time.time() - start_time))\n raise RuntimeError(\"Maximum number of iterations reached\")\n\n # print results to terminal\n print(\"\\n-------------------------------------------------\")\n print('Done in {} iterations!'.format(counter+1))\n print(\"Completion time: {:.2f} sec\".format(time.time() - start_time))\n print('Atomic ground state energy: {:.6f} a.u.'.format(E_new))\n print(\"Electron kinetic energy:{:.6f} a.u.\".format(E_kin))\n print(\"Electron-nucleus energy:{:.6f} a.u.\".format(E_pot))\n print(\"Electron_electron repulsion:{:.6f} a.u.\".format(E_rep))\n print(\"-------------------------------------------------\\n\")\n","sub_path":"kohn.py","file_name":"kohn.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"434463870","text":"#Autor: Aline Paulette Villegas Berdejo\n#Este programa contiene un menú donde se puede elegir calcular divisiones o encontrar el número mayor\n\n\ndef dividir(dividendo, divisor): #Calcula el cociente y el residuo de una divisón\n cociente = 0\n if dividendo < divisor:\n cociente = 0\n residuo = dividendo\n while dividendo >= divisor:\n dividendo = dividendo - divisor\n cociente = cociente +1\n residuo = dividendo\n return cociente,residuo\n\n\ndef encontrarMayor(): #Encuentra el número mayor dentro de una serie de números\n numero = int(input(\"Teclea el número [-1 para salir]: \"))\n if numero != -1:\n if numero < -1:\n print(\"Error, no puedes ingresar números negativos si no es -1\")\n else:\n mayor = numero\n else:\n print(\"No hay valor mayor\")\n while numero > -1:\n numero = int(input(\"Teclea un número [-1 para salir]: \"))\n if mayor < numero:\n mayor = numero\n elif numero < -1:\n print(\"Error, no puedes ingresar números negativos si no es -1\")\n elif numero == -1:\n print(\"El mayor es: \", mayor)\n\n\ndef main(): #Ejecuta las funciones y el menú\n print(\"Misión 07. Ciclos while \\nAutor: Aline Villegas Berdejo \\nMatrícula: A01375818\")\n print(\"1. Calcular divisiones \\n2. Encontrar el mayor \\n3.Salir \")\n opcion= int(input(\"Teclea tu opción: \"))\n while opcion != 3:\n if opcion == 1:\n print(\"\\nCalculando divisones\")\n dividendo= int(input(\"Dividendo: \"))\n divisor = int(input(\"Divisor: \"))\n division = dividir(dividendo, divisor)\n print(dividendo , \"/\" , divisor , \"=\" , division[0] , \", sobra\" , division[1])\n print(\"\\nMisión 07. Ciclos while \\nAutor: Aline Villegas Berdejo \\nMatrícula: A01375818\")\n print(\"1. Calcular divisiones \\n2. Encontrar el mayor \\n3.Salir \")\n opcion = int(input(\"Teclea tu opción: \"))\n\n elif opcion == 2:\n print(\"\\nTeclea una serie de números para encontrar el mayor. \")\n encontrarMayor()\n print(\"\\nMisión 07. Ciclos while \\nAutor: Aline Villegas Berdejo \\nMatrícula: A01375818\")\n print(\"1. Calcular divisiones \\n2. Encontrar el mayor \\n3.Salir \")\n opcion = int(input(\"Teclea tu opción: \"))\n\n else:\n print(\"ERROR, teclea 1, 2 ó 3\")\n print(\"\\nMisión 07. Ciclos while \\nAutor: Aline Villegas Berdejo \\nMatrícula: A01375818\")\n print(\"1. Calcular divisiones \\n2. Encontrar el mayor \\n3.Salir \")\n opcion = int(input(\"Teclea tu opción: \"))\n\n if opcion == 3:\n print(\"\\nGracias por usar este programa, regresa pronto.\")\n\n\nmain() #Llama a la función principal","sub_path":"Mision07.py","file_name":"Mision07.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"72263018","text":"\"\"\"Create GeoDataFrames of place boundaries.\"\"\"\n\nimport logging as lg\n\nimport geopandas as gpd\n\nfrom . import downloader\nfrom . import projection\nfrom . import settings\nfrom . import utils\n\n\ndef gdf_from_place(query, which_result=1, buffer_dist=None):\n \"\"\"\n Create a GeoDataFrame from a single place name query.\n\n Geocode the query with Nominatim then turn it into a GeoDataFrame with\n a geometry column.\n\n Parameters\n ----------\n query : string or dict\n query string or structured query dict to geocode/download\n which_result : int\n max number of results to return and which to process upon receipt\n buffer_dist : float\n distance to buffer around the place geometry, in meters\n\n Returns\n -------\n gdf : geopandas.GeoDataFrame\n \"\"\"\n # ensure query type\n if not isinstance(query, (str, dict)):\n raise ValueError(\"query must be a dict or a string\")\n\n # get the data from OSM\n data = downloader._osm_polygon_download(query, limit=which_result)\n if len(data) >= which_result:\n\n # extract data elements from the JSON response\n result = data[which_result - 1]\n bbox_south, bbox_north, bbox_west, bbox_east = [float(x) for x in result[\"boundingbox\"]]\n geometry = result[\"geojson\"]\n place = result[\"display_name\"]\n features = [\n {\n \"type\": \"Feature\",\n \"geometry\": geometry,\n \"properties\": {\n \"place_name\": place,\n \"bbox_north\": bbox_north,\n \"bbox_south\": bbox_south,\n \"bbox_east\": bbox_east,\n \"bbox_west\": bbox_west,\n },\n }\n ]\n\n # if we got an unexpected geometry type (like a point), log a warning\n if geometry[\"type\"] not in [\"Polygon\", \"MultiPolygon\"]:\n utils.log(f'OSM returned a {geometry[\"type\"]} as the geometry', level=lg.WARNING)\n\n # create the GeoDataFrame, name it, and set its original CRS to default_crs\n gdf = gpd.GeoDataFrame.from_features(features)\n gdf.crs = settings.default_crs\n\n # if buffer_dist was passed in, project the geometry to UTM, buffer it\n # in meters, then project it back to lat-lng\n if buffer_dist is not None:\n gdf_utm = projection.project_gdf(gdf)\n gdf_utm[\"geometry\"] = gdf_utm[\"geometry\"].buffer(buffer_dist)\n gdf = projection.project_gdf(gdf_utm, to_latlong=True)\n utils.log(f\"Buffered GeoDataFrame to {buffer_dist} meters\")\n\n # return the gdf\n utils.log(f'Created GeoDataFrame with {len(gdf)} row for query \"{query}\"')\n return gdf\n else:\n # if no data returned (or fewer results than which_result)\n utils.log(\n f'OSM returned no results (or fewer than which_result) for query \"{query}\"',\n level=lg.WARNING,\n )\n return gpd.GeoDataFrame()\n\n\ndef gdf_from_places(queries, which_results=None, buffer_dist=None):\n \"\"\"\n Create a GeoDataFrame from a list of place name queries.\n\n Geocode the queries with Nominatim then turn result into GeoDataFrame with\n a geometry column.\n\n Parameters\n ----------\n queries : list\n list of query strings or structured query dicts to geocode/download,\n one at a time\n which_results : list\n if not None, a list of max number of results to return and which to\n process upon receipt, for each query in queries\n buffer_dist : float\n distance to buffer around the place geometry, in meters\n\n Returns\n -------\n gdf : geopandas.GeoDataFrame\n \"\"\"\n # create an empty GeoDataFrame then append each result as a new row,\n # checking for the presence of which_results\n gdf = gpd.GeoDataFrame()\n if which_results is not None:\n\n if len(queries) != len(which_results):\n raise ValueError(\"which_results length must equal queries length\")\n\n for query, which_result in zip(queries, which_results):\n gdf_tmp = gdf_from_place(query, buffer_dist=buffer_dist, which_result=which_result)\n gdf = gdf.append(gdf_tmp)\n else:\n for query in queries:\n gdf = gdf.append(gdf_from_place(query, buffer_dist=buffer_dist))\n\n # reset the index\n gdf = gdf.reset_index(drop=True)\n\n # set the original CRS of the GeoDataFrame to default_crs, and return it\n gdf.crs = settings.default_crs\n utils.log(f\"Finished creating GeoDataFrame with {len(gdf)} rows from {len(queries)} queries\")\n return gdf\n","sub_path":"osmnx/boundaries.py","file_name":"boundaries.py","file_ext":"py","file_size_in_byte":4578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"75792739","text":"####################################\n#file_name: plotting_zone.py #\n#author: Riccardo La Grassa #\n#data created: 16/11/2016 #\n#data last modified: #\n#Python interpreter: 3.5.2 #\n#mail: riccardo2468@gmail.com #\n####################################\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom nltk import FreqDist\n\n\ndef autolabel_barh(rects):\n # attach some text labels\n for rect in rects:\n width = rect.get_width()\n plt.text(width+0.25,rect.get_y() + rect.get_height() / 2.1, '%d' % int(width), fontsize=6,fontweight='bold',\n bbox=dict(facecolor='lime', boxstyle='round', alpha=0.25))\n\ndef autolabel_barv(rects,color_box):\n # attach some text labels\n for rect in rects:\n height = rect.get_height()\n plt.text(rect.get_x() + rect.get_width() / 2., 0.3+height, '%d' % int(height), fontsize=7, ha='center', va='bottom',bbox = dict(boxstyle='round', facecolor=color_box, alpha=0.25))\n\n\ndef analysis_word(w,str_title,n_fig):\n my_colors1 = [(1.0 / (1.0 + np.math.log2(x)), 0.0, x / len(w)) for x in range(1, len(w) + 1)]\n fig = plt.figure(n_fig, facecolor='white', edgecolor='k', figsize=(7, 9))\n #fig, ((ax1, ax2)) = plt.subplots(nrows=2)\n fig.suptitle(str_title)\n labelsW = [i[0] for i in w]\n valuesW = [i[1] for i in w]\n indexesW = [w for w in range(0,len(labelsW))]\n width = 1\n bar_w=plt.barh(indexesW, valuesW, width, color=my_colors1, align='center')\n plt.yticks(indexesW, labelsW, fontsize='10')\n autolabel_barh(bar_w)\n\n\ndef analysis_frequency(dateX,dateY,legend_labelX,legend_labelY):\n\n fig1 = plt.figure(1, facecolor='white', edgecolor='red', figsize=(13, 6))\n fig1.suptitle('Comparison numbers tweet')\n\n fdist1 = FreqDist(dateX)\n most_common1 = fdist1.most_common(len(dateX))\n most_common1.sort()\n labelsX = [i[0] for i in most_common1]\n valuesX = [i[1] for i in most_common1]\n\n fdist2 = FreqDist(dateY)\n most_common2 = fdist2.most_common(len(dateY))\n most_common2.sort()\n labelsY = [i[0] for i in most_common2]\n valuesY = [i[1] for i in most_common2]\n\n indexesX = np.arange(len(labelsX))\n width = 0.4\n\n my_colors = [(0.0, 0.0, x / len(labelsX)) for x in range(1, len(labelsX) + 1)]\n\n bar_X=plt.bar(indexesX-0.3, valuesX, width, color=my_colors, align='center', label=legend_labelX)\n autolabel_barv(bar_X,'blue')\n\n indexesY = np.arange(len(labelsY))\n bar_Y=plt.bar(indexesY, valuesY, width, fc=(1, 0, 0, 0.4), align='center', label=legend_labelY)\n autolabel_barv(bar_Y,'red')\n plt.xticks(indexesX, labelsX, rotation='vertical', fontsize='7')\n plt.legend(loc='upper right')\n\n ###########################################################################\n ###################plot frequencies####################\n fig2 = plt.figure(2, facecolor='white', edgecolor='red', figsize=(13, 5))\n fig2.suptitle('Plot tweetX frequencies of '+legend_labelX)\n plt.xticks(indexesX, labelsX, rotation='vertical', fontsize='7')\n plt.plot(indexesX, valuesX, color='b')\n\n fig3 = plt.figure(3, facecolor='white', edgecolor='red', figsize=(13, 5))\n fig3.suptitle('Plot tweetY frequencies of '+legend_labelY)\n plt.xticks(indexesY, labelsY, rotation='vertical', fontsize='7')\n plt.plot(indexesY, valuesY, color='r')","sub_path":"plotting_zone.py","file_name":"plotting_zone.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"449205007","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\"\"\" \npos tagger for building a LSTM based pos tagging model.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport os\n\n\nfrom pos import reader\n\ndef data_type():\n return tf.float32\n\nclass POSTagger(object):\n \"\"\"The pos Tagger Model.\"\"\"\n\n def __init__(self, is_training, config):\n self.batch_size = batch_size = config.batch_size\n self.num_steps = num_steps = config.num_steps\n size = config.hidden_size\n vocab_size = config.vocab_size\n target_num = config.target_num # target output number\n \n self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])\n self._targets = tf.placeholder(tf.int32, [batch_size, num_steps])\n \n # Check if Model is Training\n self.is_training = is_training\n \n lstm_cell = tf.contrib.rnn.BasicLSTMCell(size, forget_bias=0.0, state_is_tuple=True)\n if is_training and config.keep_prob < 1:\n lstm_cell = tf.contrib.rnn.DropoutWrapper(\n lstm_cell, output_keep_prob=config.keep_prob)\n cell = tf.contrib.rnn.MultiRNNCell([lstm_cell] * config.num_layers, state_is_tuple=True)\n \n self._initial_state = cell.zero_state(batch_size, data_type())\n \n with tf.device(\"/cpu:0\"):\n embedding = tf.get_variable(\n \"embedding\", [vocab_size, size], dtype=data_type())\n inputs = tf.nn.embedding_lookup(embedding, self._input_data)\n \n if is_training and config.keep_prob < 1:\n inputs = tf.nn.dropout(inputs, config.keep_prob)\n \n outputs = []\n state = self._initial_state\n with tf.variable_scope(\"pos_lstm\"):\n for time_step in range(num_steps):\n if time_step > 0:\n tf.get_variable_scope().reuse_variables()\n (cell_output, state) = cell(inputs[:, time_step, :], state)\n outputs.append(cell_output)\n \n output = tf.reshape(tf.concat(outputs, 1), [-1, size])\n softmax_w = tf.get_variable(\n \"softmax_w\", [size, target_num], dtype=data_type())\n softmax_b = tf.get_variable(\"softmax_b\", [target_num], dtype=data_type())\n logits = tf.matmul(output, softmax_w) + softmax_b\n loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(\n logits = [logits],\n targets = [tf.reshape(self._targets, [-1])],\n weights = [tf.ones([batch_size * num_steps], dtype=data_type())])\n \n # Fetch Reults in session.run()\n self._cost = cost = tf.reduce_sum(loss) / batch_size\n self._final_state = state\n self._logits = logits\n \n # Set Optimizer and learning rate\n self._lr = tf.Variable(0.0, trainable=False)\n tvars = tf.trainable_variables()\n grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),\n config.max_grad_norm)\n optimizer = tf.train.GradientDescentOptimizer(self._lr)\n self._train_op = optimizer.apply_gradients(zip(grads, tvars))\n\n self._new_lr = tf.placeholder(\n data_type(), shape=[], name=\"new_learning_rate\")\n self._lr_update = tf.assign(self._lr, self._new_lr)\n self.saver = tf.train.Saver(tf.global_variables())\n \n def assign_lr(self, session, lr_value):\n session.run(self._lr_update, feed_dict={self._new_lr: lr_value})\n \n @property\n def input_data(self):\n return self._input_data\n \n @property\n def targets(self):\n return self._targets\n \n @property\n def initial_state(self):\n return self._initial_state\n \n @property\n def cost(self):\n return self._cost\n \n @property\n def final_state(self):\n return self._final_state\n \n @property\n def logits(self):\n return self._logits\n \n @property\n def lr(self):\n return self._lr\n \n @property\n def train_op(self):\n return self._train_op\n\ndef run_epoch(session, model, word_data, tag_data, eval_op, pos_train_dir, verbose=False):\n \"\"\"Runs the model on the given data.\"\"\"\n epoch_size = ((len(word_data) // model.batch_size) - 1) // model.num_steps\n start_time = time.time()\n costs = 0.0\n iters = 0\n state = session.run(model.initial_state)\n for step, (x, y) in enumerate(reader.iterator(word_data, tag_data, model.batch_size,\n model.num_steps)):\n fetches = [model.cost, model.final_state, eval_op]\n feed_dict = {}\n feed_dict[model.input_data] = x\n feed_dict[model.targets] = y\n for i, (c, h) in enumerate(model.initial_state):\n feed_dict[c] = state[i].c\n feed_dict[h] = state[i].h\n cost, state, _ = session.run(fetches, feed_dict)\n costs += cost\n iters += model.num_steps\n \n if verbose and step % (epoch_size // 10) == 10:\n print(\"%.3f perplexity: %.3f speed: %.0f wps\" %\n (step * 1.0 / epoch_size, np.exp(costs / iters),\n iters * model.batch_size / (time.time() - start_time)))\n \n # Save Model to CheckPoint when is_training is True\n if model.is_training:\n if step % (epoch_size // 10) == 10:\n checkpoint_path = os.path.join(pos_train_dir, \"lstm\", \"lstm.ckpt\")\n model.saver.save(session, checkpoint_path)\n print(\"Model Saved... at time step \" + str(step))\n \n return np.exp(costs / iters)","sub_path":"transwarpnlp/pos/pos_model.py","file_name":"pos_model.py","file_ext":"py","file_size_in_byte":5230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"375470351","text":"import re\nfrom collections import namedtuple\n\nDiagramTransformation = namedtuple('DiagramTransformation', ['model_name', 'model_type'])\nDiagramLine = namedtuple('DiagramLine', ['a_name', 'a_port', 'b_name', 'b_port'])\n\nJINJA_EXPRESSION_REGEX = re.compile(r'{{\\s*(.*?)\\s*}}')\n\n\ndef parse_diagram_command(str_cmd):\n \"\"\"Returns a diagram command or None if it's not a diagram command\"\"\"\n cmd_args_list = str_cmd.split('.')\n if len(cmd_args_list) <= 1:\n return None\n\n context = cmd_args_list.pop(0)\n if context != 'diagram':\n return None\n\n command = cmd_args_list.pop(0)\n if command == 'transformation':\n if len(cmd_args_list) != 2:\n raise Exception(f'Invalid diagram templating command: \"transformation\" expects 2 arguments but got {cmd_args_list}')\n return DiagramTransformation(*cmd_args_list)\n elif command == 'line':\n if len(cmd_args_list) != 4:\n raise Exception(f'Invalid diagram templating command: \"line\" expects 4 arguments but got {cmd_args_list}')\n return DiagramLine(*cmd_args_list)\n else:\n raise Exception(f'Invalid diagram templating command \"{command}\"')\n\n\ndef parse_diagram_commands(template_contents):\n \"\"\"Returns a list of diagram commands parsed from the template modelica contents\n i.e. it will find any instances of {{ diagram..* }} in the template\n\n :param template_contents: str, modelica template code\n :return: list[DiagramCommand]\n \"\"\"\n matches = JINJA_EXPRESSION_REGEX.finditer(template_contents)\n\n commands = []\n for match in matches:\n group = match.group(1)\n diagram_command = parse_diagram_command(group)\n if diagram_command is None:\n continue\n\n commands.append(diagram_command)\n\n return commands\n","sub_path":"geojson_modelica_translator/model_connectors/couplings/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"13532871","text":"import unittest\nimport Agent1\n\nclass TestConnectingMethod(unittest.TestCase):\n# def test_payloadsend(self):\n # self.assertEqual(True, taskCompleted)\n\n\n\n def check_retrieve_payload(self):\n url='https://jsonplaceholder.typicode.com'\n param='/posts/1'\n response = urllib.request.urlopen(url+param)\n payload = response.read()\n post = retrievePayload()\n self.assertEqual(post,payload)\n\n\nif __name__=='__main__':\n unittest.main()\n\n\n\n\n","sub_path":"TestsAgent1.py","file_name":"TestsAgent1.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"321053496","text":"import os\nimport pytest\nimport numpy as np\n\nfrom .. import Wav2VecSpeechEncoder\n\n\n@pytest.mark.skipif('JINA_TEST_PRETRAINED' not in os.environ, reason='skip the pretrained test if not set')\ndef test_encoding_results():\n target_output_dim = 512\n batch_size = 10\n signal_length = 1024\n test_data = np.random.randn(batch_size, signal_length).astype('f')\n encoder = Wav2VecSpeechEncoder(model_path='/tmp/wav2vec_large.pt', input_sample_rate=16000)\n encoded_data = encoder.encode(test_data)\n assert encoded_data.shape[0] == batch_size\n assert encoded_data.shape[1] % target_output_dim == 0\n","sub_path":"encoders/audio/Wav2VecSpeechEncoder/tests/test_wav2vecspeechencoder.py","file_name":"test_wav2vecspeechencoder.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"43206264","text":"import numpy as np\nfrom math import sqrt\nfrom sklearn import datasets\nfrom collections import Counter\n\nfrom .model_selection import train_test_split\nfrom .metrics import accuracy_score\n\nclass KNNClassifier():\n def __init__(self, k):\n self.k = k\n self._X_train = None\n self._y_train = None\n\n def fit(self, X_train, y_train):\n self._X_train = X_train\n self._y_train = y_train\n return self\n\n def predict(self, X_predict):\n \"\"\"给定待预测数据集X_predict,返回表示X_predict的结果向量\"\"\"\n assert self._X_train is not None and self._y_train is not None, \\\n \"must fit before predict!\"\n assert X_predict.shape[1] == self._X_train.shape[1], \\\n \"the feature number of X_predict must be equal to X_train\"\n y_predict = [self._predict(x) for x in X_predict]\n return np.array(y_predict)\n\n def _predict(self, x):\n \"\"\"给定单个待预测数据x,返回x的预测结果值\"\"\"\n assert x.shape[0] == self._X_train.shape[1], \\\n \"the feature number of x must be equal to X_train\"\n\n distances = [sqrt(np.sum((x_train - x)**2)) for x_train in self._X_train]\n nearest = np.argsort(distances)\n topK_y = [self._y_train[i] for i in nearest[:self.k]]\n votes = Counter(topK_y)\n\n return votes.most_common(1)[0][0]\n\n def score(self, X_test, y_test):\n y_predict = self.predict(X_test)\n return accuracy_score(y_test, y_predict)\n\n# def main():\n# # raw_data_X = [[3.4, 2.3],\n# # [3.1, 1.8],\n# # [1.3, 3.4],\n# # [3.6, 4.7],\n# # [2.3, 2.9],\n# # [7.4, 4.7],\n# # [5.7, 3.5],\n# # [9.2, 2.5],\n# # [7.8, 3.4],\n# # [7.9, 0.8],\n# # ]\n# # raw_data_y = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\n# # X_train = np.array(raw_data_X)\n# # y_train = np.array(raw_data_y)\n# #\n# # my_kNN_clf = KNNClassifier(k=3)\n# # my_kNN_clf.fit(X_train, y_train)\n# # a = my_kNN_clf.predict(np.array([8.1, 7.4]).reshape(1, -1))\n# # print(a)\n\n# iris = datasets.load_iris()\n# X = iris.data\n# y = iris.target\n# X_train, y_train, X_test, y_test = train_test_split(X, y, seed=123)\n\n# my_knn_clf = KNNClassifier(k=3)\n# my_knn_clf.fit(X_train, y_train)\n# my_knn_clf.score(X_train, X_test)\n\n# if __name__ == \"__main__\":\n# main()","sub_path":"ML-Base-MOOC/code/kNN.py","file_name":"kNN.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"265331003","text":"import firebase_admin\nfrom firebase_admin import credentials, firestore\nimport time\n\nclass Customer(object):\n def __init__(self, face_id, age, ethnicity, gender, visits, probabilities, inLine, last_updated):\n self.fac_id = face_id\n self.ethnicity = ethnicity\n self.gender = gender\n self.visits = visits\n self.probabilities = probabilities\n self.inLine = inLine\n self.last_updated = last_updated\n\ncred = credentials.Certificate(\"serviceAccountKey.json\")\nfirebase_admin.initialize_app(cred)\n\n\norder_list = [{'value': 2, 'name': \"Grilled Chicken Sandwich\"}, {'value': 4, 'name': \"Spicy Chicken Sandwich\"},{'value': 1, 'name': \"Nuggets\"}, {'value': 1, 'name': \"Waffle Potato Fries\"}]\n\ndb = firestore.client()\ndoc_ref = db.collection(u'Expo_Customers').document(u'fb2a3461-db5b-4c44-8bde-e521c1298560')\n'''doc_ref.set({\n u'face_id': u'Joel',\n u'age': 23,\n u'ethnicity': u'white',\n u'gender': u'male',\n u'visits': 6,\n u'probabilities': order_list,\n u'inLine': False,\n u'last_updated': int(round(time.time() * 1000))\n})'''\ndoc_ref.update({u'inLine': False})\n\ncol_ref = db.collection(u'Expo_Customers').order_by(u'last_updated').get()\nnames = []\n\nfor doc in col_ref:\n #print(doc.to_dict()['face_id'])\n names = names + [doc.to_dict()['face_id']]\n\nprint(names)\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"117175037","text":"from pydarknet import Detector, Image\n\ndef get_vacant_spots_from_image(image):\n net = Detector(bytes(\"cfg/yolov3.cfg\", encoding=\"utf-8\"), bytes(\"weights/yolov3.weights\", encoding=\"utf-8\"), 0, bytes(\"cfg/coco.data\",encoding=\"utf-8\"))\n\n img = Image(image)\n\n results = net.detect(img)\n print(results)\n\n for cat, score, bounds in results:\n x, y, w, h = bounds\n \n return \"\"","sub_path":"camera_gateway_api/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"158692530","text":"from picamera.array import PiRGBArray\nfrom picamera import PiCamera\nimport argparse\nimport time\n# import cv2\nimport sys\n# import imutils\nimport RPi.GPIO as GPIO\nimport time\n\n# Project Imports\nimport puppypi_config\nimport puppypi_util\nimport puppypi_servo\n# import puppypi_video\n# import puppypi_aws\nimport puppypi_button\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Face processing puppy.')\n\n\n parser.add_argument(\"-x\", \"--servo_x\", type=int, help=\"servo x setting\")\n parser.add_argument(\"-y\", \"--servo_y\", type=int, help=\"servo y setting\")\n parser.add_argument(\"--videofile\", help=\"pre recorded video file\")\n parser.add_argument(\"--aws\", help=\"test AWS\")\n parser.add_argument(\"-v\", \"--verbose\", help=\"increase output verbosity\", action=\"store_true\")\n parser.add_argument(\"-b\", \"--button\", help=\"wait for external button press\", action=\"store_true\")\n parser.add_argument(\"-db\", \"--debugbutton\", help=\"print a debug message on PCB button press\", action=\"store_true\")\n parser.add_argument(\"--noservo\", help=\"surpress the servo\", action=\"store_true\")\n parser.add_argument(\"--livevideo\", help=\"live video\", action=\"store_true\")\n parser.add_argument(\"--servodemo\", help=\"demonstrate the servo\", action=\"store_true\")\n parser.add_argument(\"--showvideoframe\", help=\"Display a video frame via XWindows\", action=\"store_true\")\n parser.add_argument(\"--novideo\", help=\"Surpress a video frame via XWindows\", action=\"store_true\")\n \n args = parser.parse_args()\n\n puppypi_config.verbosemode= args.verbose\n puppypi_config.showvideoframe= args.showvideoframe\n puppypi_config.novideo= args.novideo\n\n if args.noservo:\n puppypi_config.servousage = False\n puppypi_util.printmsg(\"Servo turned off\")\n\n if (args.livevideo):\n puppypi_servo.servo_on()\n puppypi_video.process_livevideo()\n puppypi_servo.servo_off()\n\n elif (args.button):\n puppypi_button.do_button()\n\n elif (args.aws):\n puppypi_aws.mainAWS(args.aws)\n\n elif args.videofile:\n puppypi_config.servousage = False\n puppypi_video.process_video(args.videofile)\n \n elif (args.servo_x >0 and args.servo_y >0):\n puppypi_servo.servo_on()\n puppypi_servo.servo_xy(args.servo_x, args.servo_y)\n puppypi_servo.servo_off()\n \n elif args.servodemo:\n puppypi_util.printmsg(\"Servo Demo\")\n puppypi_servo.servo_on()\n puppypi_servo.servo_demo()\n puppypi_servo.servo_off()\n\n elif (args.debugbutton):\n puppypi_button.do_button_debug()\n\n\n\nif __name__ == \"__main__\":\n try:\n main()\n\n finally:\n puppypi_servo.servo_off()\n puppypi_util.printmsg (\"Cleanup and exit\")\n\n\n","sub_path":"servo/puppypi.py","file_name":"puppypi.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"585881695","text":"import cv2\nfrom facenet_pytorch import MTCNN\nimport torch\nfrom datetime import datetime\nimport os\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\ndef capture_face():\n IMG_PATH = 'C:/Users/Gen Bodmas/PycharmProjects/CIFAR10/test_images'\n count = 30\n usr_name = input(\"Input ur name: \")\n USR_PATH = os.path.join(IMG_PATH, usr_name)\n leap = 1\n\n mtcnn = MTCNN(thresholds=[0.7, 0.7, 0.8], keep_all=True, device=device)\n\n cap = cv2.VideoCapture(0)\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n while cap.isOpened() and count:\n isSuccess, frame = cap.read()\n if mtcnn(frame) is not None and leap % 2:\n path = str(\n USR_PATH + '/{}.jpg'.format(str(datetime.now())[:-7].replace(\":\", \"-\").replace(\" \", \"-\") + str(count)))\n face_img = mtcnn(frame, save_path=path)\n count -= 1\n leap += 1\n text = \"Please Hold Still, capturing in progress\"\n cv2.putText(frame, text, (7, 70), cv2.FONT_HERSHEY_DUPLEX, 1, (100, 255, 0), 3, cv2.LINE_AA)\n cv2.imshow('Face Detection', frame)\n if cv2.waitKey(1) & 0xFF == 27:\n break\n cap.release()\n cv2.destroyAllWindows()\n","sub_path":"face_capturing.py","file_name":"face_capturing.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"157279940","text":"import pygame\nimport pytmx\nimport pyscroll\n\nfrom inv import open_inv\nfrom player import Player\nfrom charge_world import Charge_world as swap\n\n\n\nclass Game:\n\n def __init__(self):\n\n self.charge_world = swap\n\n # creation de la fenêtre\n self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n pygame.display.set_caption(\"Pygamon - Aventure\")\n\n # charger la carte\n self.map = 'house'\n tmxData = pytmx.util_pygame.load_pygame('carte.tmx')\n mapData = pyscroll.data.TiledMapData(tmxData)\n mapLayer = pyscroll.orthographic.BufferedRenderer(mapData, self.screen.get_size())\n mapLayer.zoom = 3.5\n\n # generer un joueur\n playerPosition = tmxData.get_object_by_name('player')\n self.player = Player(playerPosition.x, playerPosition.y)\n\n # definir une liste qui va stocker les rectangles de collisions\n self.walls = []\n\n for obj in tmxData.objects:\n if obj.type == 'collision':\n self.walls.append(pygame.Rect(obj.x, obj.y, obj.width, obj.height))\n\n # dessiner le groupe de calque\n self.group = pyscroll.PyscrollGroup(map_layer=mapLayer, default_layer=5)\n self.group.add(self.player)\n\n # definir le rectangle de collision pour entrer dans la maison\n enter_house = tmxData.get_object_by_name('enter_house_blue')\n self.enter_house_rect = pygame.Rect(enter_house.x, enter_house.y, enter_house.width, enter_house.height)\n\n enter_world = tmxData.get_object_by_name('switch_world_ouest_top')\n self.enter_world_rect = pygame.Rect(enter_world.x, enter_world.y, enter_world.width, enter_world.height)\n\n def handleInput(self):\n pressed = pygame.key.get_pressed()\n\n if pressed[pygame.K_z]:\n self.player.move_up()\n #self.player.change_animation('up')\n elif pressed[pygame.K_s]:\n self.player.move_down()\n elif pressed[pygame.K_d]:\n self.player.move_right()\n #self.player.change_animation('right')\n elif pressed[pygame.K_q]:\n self.player.move_left()\n #self.player.change_animation('left')\n\n\n def update(self):\n self.group.update()\n\n # verifier l'entrer dans la maison\n if self.map == 'house' and self.player.feet.colliderect(self.enter_house_rect):\n self.charge_world.swap_world(self, 'house_blue.tmx', 4, self.player, 'exit_house_blue', 'spawn_house_blue', 'world')\n\n # verifier la sortie dans la maison\n if self.map == 'world' and self.player.feet.colliderect(self.enter_house_rect):\n self.charge_world.swap_world(self, 'carte.tmx', 3.5, self.player, 'enter_house_blue', 'enter_house_exit', 'house')\n\n if self.map == 'carte_Ouest' and self.player.feet.colliderect(self.enter_house_rect):\n self.charge_world.swap_world(self, 'carte_Ouest.tmx', 3.5, self.player, 'switch_world_middle_top', 'spawn_world_ouest_top', 'world')\n print('Changez monde')\n\n # verification de la collision\n for sprite in self.group.sprites():\n if sprite.feet.collidelist(self.walls) > -1:\n sprite.move_back()\n\n def run(self):\n\n clock = pygame.time.Clock()\n\n # boucle du jeu\n running = True\n\n while running:\n\n self.player.save_location()\n self.handleInput()\n self.update()\n self.group.center(self.player.rect)\n self.group.draw(self.screen)\n pygame.display.flip()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n clock.tick(60)\n\n pygame.quit()","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"507629281","text":"''' Write a program that reads a text file (you don't have to error check the filename), keeps a \ncount of the individual words in the file using a dictionary and finally converts the dictionary to \na list of tuples and prints out a sorted version of the list. The main program is given, do not change it.\n \nNote that the counts are not case-sensitive, that is, 'Word' is the same as 'word' or 'wORd'.\n\nAlso, note that your program should account for if a punctuation (like a comma) appears at the end of a word. '''\n\nimport string\n\ndef get_word_list (file_stream):\n word_list = []\n for line in file_stream:\n line = line.strip()\n line_list = line.split()\n for word in line_list:\n word = word.strip()\n word = word.strip(string.punctuation)\n word = word.lower()\n word_list.append(word)\n return word_list\n \n\n\ndef word_list_to_counts (word_list):\n word_count_dict = {}\n for word in word_list:\n if word in word_count_dict:\n word_count_dict[word] += 1\n else:\n word_count_dict[word] = 1\n return word_count_dict\n\ndef dict_to_tuple (word_count_dict):\n dict_list = []\n for item in word_count_dict.items():\n dict_list.append(item)\n return dict_list\n\ndef main():\n filename = input(\"Name of file: \")\n # Get a file stream\n fstream = open(filename)\n # Get a list of words from the stream\n word_list = get_word_list(fstream) \n fstream.close()\n # Transform the list to a dictionary of word-count pairs\n word_count_dict = word_list_to_counts(word_list)\n # Finally, makes a list of tuples from the dictionary\n word_count_tuples = dict_to_tuple(word_count_dict)\n print(sorted(word_count_tuples))\n \nmain()","sub_path":"Assignment/Assignment 13/word_counts.py","file_name":"word_counts.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"346043434","text":"import click\nfrom http.client import HTTPConnection\nfrom urllib.request import urlopen\nimport json\nfrom git import Repo\nimport os\n\n@click.command()\n@click.argument('name')\n@click.option('--output', '-o', default=None, help='Output directory')\n@click.option('--search', '-S', is_flag=True, default=False, help='Search boilerplate')\n\ndef handle(name, output, search):\n if(search):\n searchPackage(name)\n else:\n installPackage(name, output)\n\ndef installPackage(name,output):\n response = urlopen('http://maid.localhost/'+name+'.php')\n data = response.read()\n package = json.loads(data.decode(\"utf-8\"))\n print(\"Downloading '\"+package['name']+\"'...\")\n if(output == None):\n directory = os.getcwd()+\"/\"+package['name']\n else:\n directory = os.getcwd()+\"/\"+output\n Repo.clone_from(package['git'], directory)\n\ndef searchPackage(name):\n response = urlopen('http://maid.localhost/search/'+name+'.php')\n data = response.read()\n packages = json.loads(data.decode(\"utf-8\"))\n print(\"Searching '\"+name+\"'...\")\n for package in packages['packages']:\n print(\"Package:\", package)\n\nif __name__ == '__main__':\n handle()\n","sub_path":"maid.py","file_name":"maid.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"405374855","text":"from flask_sqlalchemy import UnmappedClassError\n\nfrom . import db\nfrom .tables import User, Comment\nfrom .helpers import add_new_record\n\n\ndef get_comments(author_id):\n try:\n user = User.query.filter_by(id=author_id).first()\n comments = user.comments.order_by(Comment.timestamp).all()\n except AttributeError as e:\n raise ValueError('待定') from e\n else:\n return comments\n\n\ndef post_comment(author_id, post_id, comment_id, body):\n try:\n new_comment = Comment(\n author_id=author_id, post_id=post_id,\n comment_id=comment_id, body=body)\n except Exception:\n raise\n else:\n add_new_record(new_comment)\n return new_comment.id\n\n\ndef delete_comment(comment_id):\n try:\n comment_to_delete = Comment.query.filtery_by(id=comment_id).first()\n db.session.delete(comment_to_delete)\n db.session.commit()\n except UnmappedClassError as e:\n raise ValueError('待定') from e\n except Exception:\n raise\n","sub_path":"BBS/model/comment_service.py","file_name":"comment_service.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"326153636","text":"from PIL import Image\nfrom io import BytesIO\nimport io\nimport datetime\nimport uuid\nfrom config import get_conf\n\n\n# 压缩图片\ndef pied_piper(file_read):\n im = Image.open(BytesIO(file_read))\n # print('格式', im.format, ',分辨率', im.size, ',色彩', im.mode)\n w = im.size[0] # 宽\n h = im.size[1] # 高\n new_w = 130\n new_h = new_w / (w / h)\n im.thumbnail((new_w, new_h))\n imgByteArr = io.BytesIO()\n im.save(imgByteArr, format='png')\n # im.save('thumb\\\\' + ImgName, 'JPEG', quality=90)\n return imgByteArr.getvalue()\n\n\n# 生成新的路径\ndef create_photo_path(format):\n date = now_date()\n year = date[\"year\"]\n month = date[\"month\"]\n day = date[\"day\"]\n uid = str(uuid.uuid4())\n suid = ''.join(uid.split('-'))\n conf = get_conf()\n env = conf[\"env\"]\n if env == \"dev\" or env == \"test\":\n prefix = \"test/\"\n elif env == \"pd\":\n prefix = \"image/\"\n path = prefix + year + \"-\" + month + \"/\" + year + \"-\" + month + \"-\" + day + \"/\" + suid + \".\" + format\n return path\n\n\n# 获取当前年月日\ndef now_date():\n year = datetime.datetime.now().year\n month = datetime.datetime.now().month\n day = datetime.datetime.now().day\n date = {\"year\": str(year), \"month\": str(month), \"day\": str(day)}\n return date\n","sub_path":"user_center/file_util.py","file_name":"file_util.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"408106200","text":"import pygame, sys, time, math, statistics\nimport RPi.GPIO as GPIO\nfrom pygame.locals import *\n\ndef getDistance():\n\t# set Trigger to LOW\n\tGPIO.output(GPIO_TRIGGER, False)\n\t# set Trigger to HIGH\n\tGPIO.output(GPIO_TRIGGER, True)\n\t\n\t# set Trigger after 10 us to LOW\n\ttime.sleep(0.00001)\n\tGPIO.output(GPIO_TRIGGER, False)\n\t\n\tStartTime = time.time()\n\tStopTime = time.time()\n\t\n\t# save StartTime\n\twhile GPIO.input(GPIO_ECHO) == 0:\n\t\tStartTime = time.time()\n\t\n\t# save time of arrival\n\twhile GPIO.input(GPIO_ECHO) == 1:\n\t\tStopTime = time.time()\n\t\n\t# time difference between start and arrival\n\tTimeElapsed = StopTime - StartTime\n\t# multiply with the sonic speed (34300 cm/s)\n\t# and divide by 2, because there and back\n\tdistance = (TimeElapsed * 34300) / 2\n\t\n\treturn distance\n\ndef getAverageDistance(amountOfDistancesToAverage):\n\tdistances = [0] * amountOfDistancesToAverage\n\tfor index in range(0,amountOfDistancesToAverage):\n\t\tdistances[index] = getDistance()\n\t\ttime.sleep(0.001)\n\t\n\taverage = statistics.mean(distances)\n\n\treturn average\n\ndef panicLevel(distance, speed):\n\treturn (speed/(20*distance)) + 10*(speed-distance)\n\nGPIO_ECHO = 17\nGPIO_TRIGGER = 4\nGPIO_LED = 19\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(GPIO_LED,GPIO.OUT)\nGPIO.setup(GPIO_TRIGGER, GPIO.OUT)\nGPIO.setup(GPIO_ECHO, GPIO.IN)\n\np = GPIO.PWM(GPIO_LED,100)\ndc = 0\np.start(dc)\n\npygame.init()\nheight = 800\nwidth = 600\nmidHeight = int(height/2)\nmidWidth = int(width/2)\nmaxMeasureableDistance = 150\nif midHeight >= midWidth:\n\tmaxCircleSize = midWidth\nelse:\n\tmaxCircleSize = midHeight\nminCircleSize = 20\ncenter = (midHeight,midWidth)\nsize=(height,width)\nDISPLAYSURF = pygame.display.set_mode(size,pygame.RESIZABLE)\n\npygame.display.set_caption('Panic scale')\n\nBLACK = (0,0,0)\nWHITE = (255,255,255)\nRED = (255,0,0)\nsizeOfCircle = 20\n#measures = [0] * 100\nincrease = +2\npygame.draw.circle(DISPLAYSURF, RED, (100, 50), sizeOfCircle, 0)\npreviousMeasure = getDistance()\npreviousTime = time.time()\n#print(\"Calibrating...\")\n#index = 0\n#for x in range(0,100):\n#\tmeasures[x] = getDistance()\n#print(\"Calibration finished!\")\n\nwhile True: # main game loop\n\tmeasure = getAverageDistance(10)\n\tnewTime = time.time()\n\tspeed = math.fabs((measure - previousMeasure)/(newTime - previousTime))\n\tif speed < 1000:\n\t\tsizeOfCircle = int(panicLevel(measure,speed)/10) + minCircleSize #25 is highest value that panicLevel can return\n\t\tdc = (1-(measure/maxMeasureableDistance))*100 #the closer, the brighter the led\n\t\tif dc > 80:\n\t\t\tdc = 80\n\t\telif dc < 0:\n\t\t\tdc = 0\n\t\tif sizeOfCircle > maxCircleSize:\n\t\t\tsizeOfCircle = maxCircleSize\n\t\telif sizeOfCircle < minCircleSize:\n\t\t\tsizeOfCircle = minCircleSize\n\t\tDISPLAYSURF.fill(WHITE)\n\t\tpreviousTime = newTime\n\t\tpreviousMeasure = measure\n\t\tpygame.draw.circle(DISPLAYSURF, RED, center, sizeOfCircle, 0)\n\t\tpygame.display.update()\n\t\tp.ChangeDutyCycle(dc)\n\t\tprint(\"Distance: \" + str(round(measure,2)) + \" cm; Speed: \"+ str(round(speed,2)) + \" cm/s\")\n\t\n\tfor event in pygame.event.get():\n\t\tif (event.type is KEYDOWN and event.key == K_f):\n\t\t\tif DISPLAYSURF.get_flags() & FULLSCREEN:\n\t\t\t\tpygame.display.set_mode((size))\n\t\t\telse:\n\t\t\t\tpygame.display.set_mode(size, FULLSCREEN)\n\t\telif (event.type is KEYDOWN and event.key == K_q) or event.type == QUIT:\n\t\t\tpygame.quit()\n\t\t\tsys.exit()\n\ttime.sleep(0.01)","sub_path":"Police project with Mila/distanceWithCircleSize.py","file_name":"distanceWithCircleSize.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"66415254","text":"import os\nimport datetime\n\ntypelist = [\"SAVINGS\",\"SHARE\",\"BOND\",\"OTHER\"]\n\nSAVINGS = 0\nSHARE = 1\nBOND = 2\nOTHER = 3\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass investment:\n InvestmentName = None\n InvestmentType = None\n ExpectedAER = None\n\n def __init__(self, InvName, InvType, ExpAER, InitVal, MonthIn):\n \tself.InvestmentName = InvName\n \tself.InvestmentType = InvType\n \tself.ExpectedAER = float(ExpAER)\n \tself.InitialValue = float(InitVal)\n \tself.MonthlyDeposit = float(MonthIn)\n\n\n def printDeets(self):\n \tprint(\"--- \"+ self.InvestmentName + \" ---\")\n \tprint(typelist[self.InvestmentType])\n \tprint(\"AER: \" + self.ExpectedAER + \"%\")\n \tprint(\"Initial Value: \" + self.InitialValue)\n \tprint(\"+\" + self.MonthlyDeposit + \" per Month\")\n \tprint(\"\")\n\n def calcToDate(self, months):\n \tTempVal = self.InitialValue\n \tfor i in range(months):\n \t\tTempVal = TempVal*(1 +(self.ExpectedAER/1200))\n \t\tTempVal += self.MonthlyDeposit\n \tprint(\"in \" + str(diffMonths) + \" months time \" + self.InvestmentName + \" will be worth: £\" + str(round(TempVal, 2)) + \" (AER compounded monthly earning £\" + str(round(TempVal - (self.InitialValue+(months*self.MonthlyDeposit)), 2)) + \" interest)\")\n \treturn TempVal\n\n def __str__(self):\n \treturn str(self.__class__) + \": \" + str(self.__dict__)\n\n\n\n\n\n\n# ~~~ MAIN ~~~ #\n\nInvestmentList = []\n\nos.system('color a')\nprint(\"/*****************************************************************/\")\nprint(\"\")\nprint(\" InvestCalc v0.0000002 JRM\")\nprint(\"\")\nprint(\" Commands:\")\nprint(\" add - create an investment\")\nprint(\" print - print all investments\")\nprint(\" forecast - calculate investment value in the future\")\nprint(\"\")\nprint(\"/*****************************************************************/\")\n\nwhile 1:\n\tprint(\">>>\", end='')\n\tinputter = input()\n\n\tif inputter == \"\":\n\t\tpass\n\n\telif inputter == \"add\":\n\t\tInvName = input('Name: ')\n\t\tprint(\"Investment Type: SAVINGS\")\n\t\tExpAER = input('Expected AER: ')\n\t\tInitVal = input('Initial Value: ')\n\t\tMonthIn = input('Monthly Deposit: ')\n\t\tInvestmentList.append(investment(InvName, SAVINGS, ExpAER, InitVal, MonthIn))\n\t\tprint(\"\")\n\t\t\n\telif inputter == \"print\":\n\t\tprint(\"\")\n\t\tfor i in InvestmentList:\n\t\t\ti.printDeets()\n\n\telif inputter == \"forecast\":\n\t\tendDateInput = input('Enter End Date(mm/yy): ')\n\t\tif len(endDateInput) == 5 and endDateInput[2] == \"/\":\n\t\t\tendDate = datetime.datetime(int(endDateInput[3:5]) + 2000, int(endDateInput[0:2]), 1)\n\t\t\ttodayDate = datetime.datetime.today()\n\t\t\tdiffMonths = endDate.month - todayDate.month + 12*(endDate.year - todayDate.year)\n\t\t\ttotalVal = 0\n\t\t\tfor i in InvestmentList:\n\t\t\t\ttotalVal+= i.calcToDate(diffMonths)\n\t\t\tprint(\"\")\n\t\t\tprint(\"Total: \" + str(round(totalVal, 2)))\n\n\t\telse:\n\t\t\tprint(\"Error\")\n\n\n\telse:\n\t\tprint(\"Error: Command not found\")\n\n\n","sub_path":"InvestCalc.py","file_name":"InvestCalc.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"577118703","text":"from sklearn.model_selection import train_test_split\nimport torch\nimport numpy\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom torch.nn import Parameter\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.optim as optim\n\nRANDOM_SEED = 42\n\ndataset = numpy.loadtxt(\"data/pima-indians-diabetes.csv\", delimiter=\",\")\n# split into input (X) and output (Y) variables\nX = dataset[:,0:8]\nY = dataset[:,8].astype(int)\n\ntrain_X, test_X, train_y, test_y = train_test_split(X, Y, test_size=0.40, random_state=RANDOM_SEED)\n\nx_size = 8\nh_size = 12\nh1_size = 12\ny_size = 2\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.fc1 = nn.Linear(x_size, h_size).double()\n self.fc2 = nn.Linear(h_size, h1_size).double()\n self.fc3 = nn.Linear(h1_size, y_size).double()\n\n # self.W1 = Parameter(init.uniform(torch.Tensor(h_size, x_size), 0, 1).double())\n # self.b1 = Parameter(init.uniform(torch.Tensor(h_size), 0, 1).double())\n #\n # self.W2 = Parameter(init.uniform(torch.Tensor(h1_size,h_size), 0, 1).double())\n # self.b2 = Parameter(init.uniform(torch.Tensor(h1_size), 0, 1).double())\n #\n # self.W3 = Parameter(init.uniform(torch.Tensor(y_size,h1_size), 0, 1).double())\n # self.b3 = Parameter(init.uniform(torch.Tensor(y_size), 0, 1).double())\n\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = F.relu(self.fc2(x))\n x = F.dropout(x, training=self.training)\n y_hat = self.fc3(x)\n return y_hat\n\n # x = F.relu(torch.mm(self.W1, x.t()))\n # x = F.dropout(x, training=self.training)\n # x = F.relu(torch.mm(self.W2, x.t()) + self.b2)\n # x = F.dropout(x, training=self.training)\n # y_hat = torch.mm(self.W3, x.t()) + self.b3\n # return y_hat\n\n\nmodel = Net()\n\nm = nn.LogSoftmax()\nloss = nn.NLLLoss()\n\n# initialising weights\nparams = list(model.parameters())\n\nfor param in params:\n param.data.uniform_(-1,1)\n\noptimizer = optim.RMSprop(model.parameters(), lr=0.001, weight_decay=10)\n\ndef train():\n model.train()\n for epoch in range(10):\n for i in range(1000):\n\n b = i%len(train_X)\n data, target = Variable(torch.from_numpy(train_X[b:b+5])), Variable(torch.from_numpy(train_y[b:b+5]))\n optimizer.zero_grad()\n y_hat = model(data)\n output = loss(m(y_hat), target)\n output.backward()\n optimizer.step()\n\n correct = 0\n # checking training accuracy\n if i%50 == 0:\n d, t = Variable(torch.from_numpy(train_X)), Variable(torch.from_numpy(train_y))\n y_hat = model(d)\n pred = y_hat.data.max(1)[1]\n correct += pred.eq(t.data).cpu().sum()\n print (float(correct)/len(train_X))\n\ndef test():\n model.eval()\n correct = 0\n # checking test accuracy\n d, t = Variable(torch.from_numpy(test_X), volatile=True), Variable(torch.from_numpy(test_y))\n y_hat = model(d)\n pred = y_hat.data.max(1)[1]\n correct += pred.eq(t.data).cpu().sum()\n print ('Test accuracy')\n print (float(correct)/len(test_X))\n\ndef main():\n train()\n test()\n\nif __name__ == '__main__':\n main()\n","sub_path":"simple_feed_forward_nn.py","file_name":"simple_feed_forward_nn.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"169940090","text":"# coding: utf-8\n\"\"\"\n flask_weixin\n ~~~~~~~~~~~~\n\n Weixin implementation in Flask.\n\n :copyright: (c) 2013 by Hsiaoming Yang.\n :license: BSD, see LICENSE for more detail.\n\"\"\"\n\nimport time\nimport hashlib\n\ntry:\n from lxml import etree\nexcept ImportError:\n from xml.etree import cElementTree as etree\nexcept ImportError:\n from xml.etree import ElementTree as etree\n\n\n__all__ = ('Weixin',)\n__version__ = '0.1.0'\n__author__ = 'Hsiaoming Yang '\n\n\nclass Weixin(object):\n \"\"\"Interface for mp.weixin.qq.com\n\n http://mp.weixin.qq.com/wiki/index.php\n \"\"\"\n\n def __init__(self, app=None):\n self.token = None\n self._registry = {}\n\n if app:\n self.init_app(app)\n\n def init_app(self, app):\n if hasattr(app, 'config'):\n config = app.config\n else:\n # flask-weixin can be used without flask\n config = app\n\n self.token = config.get('WEIXIN_TOKEN', None)\n self.sender = config.get('WEIXIN_SENDER', None)\n self.expires_in = config.get('WEIXIN_EXPIRES_IN', 0)\n\n def validate(self, signature, timestamp, nonce):\n \"\"\"Validate request signature.\n\n :param signature: A string signature parameter sent by weixin.\n :param timestamp: A int timestamp parameter sent by weixin.\n :param nonce: A int nonce parameter sent by weixin.\n \"\"\"\n if not self.token:\n raise RuntimeError('WEIXIN_TOKEN is missing')\n\n if self.expires_in:\n try:\n timestamp = int(timestamp)\n except:\n # fake timestamp\n return False\n\n delta = time.time() - timestamp\n if delta < 0:\n # this is a fake timestamp\n return False\n\n if delta > self.expires_in:\n # expired timestamp\n return False\n\n values = [self.token, str(timestamp), str(nonce)]\n s = ''.join(sorted(values))\n hsh = hashlib.sha1(s.encode('utf-8')).hexdigest()\n return signature == hsh\n\n def parse(self, content):\n \"\"\"Parse xml body sent by weixin.\n\n :param content: A text of xml body.\n \"\"\"\n dct = {}\n root = etree.fromstring(content)\n for child in root:\n dct[child.tag] = child.text\n\n ret = {}\n ret['id'] = dct.get('MsgId')\n ret['timestamp'] = int(dct.get('CreateTime', 0))\n ret['receiver'] = dct.get('ToUserName')\n ret['sender'] = dct.get('FromUserName')\n ret['type'] = type = dct.get('MsgType')\n\n if type == 'text':\n ret['content'] = dct.get('Content')\n return ret\n\n if type == 'image':\n ret['picurl'] = dct.get('PicUrl')\n return ret\n\n if type == 'location':\n ret['location_x'] = dct.get('Location_X')\n ret['location_y'] = dct.get('Location_Y')\n ret['scale'] = int(dct.get('Scale', 0))\n ret['label'] = dct.get('Label')\n return ret\n\n if type == 'link':\n ret['title'] = dct.get('Title')\n ret['description'] = dct.get('Description')\n ret['url'] = dct.get('url')\n return ret\n\n return ret\n\n def reply(self, username, type='text', sender=None, **kwargs):\n \"\"\"Create the reply text for weixin.\n\n The reply varies per reply type. The acceptable types are `text`,\n `music` and `news`. Each type accepts different parameters, but\n they share some common parameters:\n\n * username: the receiver's username\n * type: the reply type, aka text, music and news\n * sender: sender is optional if you have a default value\n\n Text reply requires an additional parameter of `content`.\n\n Music reply requires 4 more parameters:\n\n * title: A string for music title\n * description: A string for music description\n * music_url: A link of the music\n * hq_music_url: A link of the high quality music\n\n News reply requires an additional parameter of `articles`, which\n is a list/tuple of articles, each one is a dict:\n\n * title: A string for article title\n * description: A string for article description\n * picurl: A link for article cover image\n * url: A link for article url\n \"\"\"\n if not sender:\n sender = self.sender\n\n if not sender:\n raise RuntimeError('WEIXIN_SENDER is missing')\n\n if type == 'text':\n content = kwargs.get('content', '')\n return text_reply(username, sender, content)\n\n if type == 'music':\n values = {}\n for k in ('title', 'description', 'music_url', 'hq_music_url'):\n values[k] = kwargs.get(k)\n return music_reply(username, sender, **values)\n\n if type == 'news':\n items = kwargs.get('articles', [])\n return news_reply(username, sender, *items)\n\n return None\n\n def register(self, key, func=None):\n \"\"\"Register a command helper function.\n\n You can register the function::\n\n def print_help(**kwargs):\n username = kwargs.get('sender')\n sender = kwargs.get('receiver')\n return weixin.reply(\n username, sender=sender, content='text reply'\n )\n\n weixin.register('help', print_help)\n\n It is also accessible as a decorator::\n\n @weixin.register('help')\n def print_help(*args, **kwargs):\n username = kwargs.get('sender')\n sender = kwargs.get('receiver')\n return weixin.reply(\n username, sender=sender, content='text reply'\n )\n \"\"\"\n if func:\n self._registry[key] = func\n return\n\n return self.__call__(key)\n\n def __call__(self, key):\n \"\"\"Register a reply function.\n\n Only available as a decorator::\n\n @weixin('help')\n def print_help(*args, **kwargs):\n username = kwargs.get('sender')\n sender = kwargs.get('receiver')\n return weixin.reply(\n username, sender=sender, content='text reply'\n )\n \"\"\"\n def wrapper(func):\n self._registry[key] = func\n\n return wrapper\n\n def view_func(self):\n \"\"\"Default view function for Flask app.\n\n This is a simple implementation for view func, you can add it to\n your Flask app::\n\n weixin = Weixin(app)\n app.add_url_rule('/', view_func=weixin.view_func)\n \"\"\"\n from flask import request, Response\n\n signature = request.args.get('signature')\n timestamp = request.args.get('timestamp')\n nonce = request.args.get('nonce')\n if not self.validate(signature, timestamp, nonce):\n return 'signature failed', 400\n\n if request.method == 'GET':\n echostr = request.args.get('echostr')\n return echostr\n\n try:\n ret = self.parse(request.data)\n except:\n return 'invalid', 400\n\n if 'type' not in ret:\n # not a valid message\n return 'invalid', 400\n\n if ret['type'] == 'text' and ret['content'] in self._registry:\n func = self._registry[ret['content']]\n elif '*' in self._registry:\n func = self._registry['*']\n else:\n func = 'failed'\n\n if callable(func):\n text = func(**ret)\n else:\n # plain text\n text = self.reply(\n username=ret['sender'],\n sender=ret['receiver'],\n content=func,\n )\n return Response(text, content_type='text/xml; charset=utf-8')\n\n view_func.methods = ['GET', 'POST']\n\n\ndef text_reply(username, sender, content):\n shared = _shared_reply(username, sender, 'text')\n template = '%s'\n return template % (shared, content)\n\n\ndef music_reply(username, sender, **kwargs):\n kwargs['shared'] = _shared_reply(username, sender, 'music')\n\n template = (\n ''\n '%(shared)s'\n ''\n '<![CDATA[%(title)s]]>'\n ''\n ''\n ''\n ''\n ''\n )\n return template % kwargs\n\n\ndef news_reply(username, sender, *items):\n item_template = (\n ''\n '<![CDATA[%(title)s]]>'\n ''\n ''\n ''\n ''\n )\n articles = map(lambda o: item_template % o, items)\n\n template = (\n ''\n '%(shared)s'\n '%(count)d'\n '%(articles)s'\n ''\n )\n dct = {\n 'shared': _shared_reply(username, sender, 'news'),\n 'count': len(items),\n 'articles': ''.join(articles)\n }\n return template % dct\n\n\ndef _shared_reply(username, sender, type):\n dct = {\n 'username': username,\n 'sender': sender,\n 'type': type,\n 'timestamp': int(time.time()),\n }\n template = (\n ''\n ''\n '%(timestamp)d'\n ''\n )\n return template % dct\n","sub_path":"site-packages/Flask_Weixin-0.1.0-py2.7.egg/flask_weixin.py","file_name":"flask_weixin.py","file_ext":"py","file_size_in_byte":9817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"16740875","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.http import Request\nfrom urllib import parse\nfrom ArticleSpider.items import JobBoleArticleItem\nfrom ArticleSpider.utils.common import *\n\n\nclass JobboleSpider(scrapy.Spider):\n name = 'jobbole'\n allowed_domains = ['blog.jobbole.com']\n start_urls = ['http://blog.jobbole.com/all-posts/']\n\n def parse(self, response):\n # 获取文章url\n post_nodes = response.css(\"#archive .floated-thumb .post-thumb a\")\n for post_node in post_nodes:\n # 文章图片url\n font_image_url = post_node.css(\"img::attr(src)\").extract_first()\n font_image_url = parse.urljoin(response.url, font_image_url)\n # 文章url\n post_url = post_node.css(\"::attr(href)\").extract_first()\n post_url = parse.urljoin(response.url, post_url)\n yield Request(url=post_url, meta={\"front_image_url\": font_image_url}, callback=self.parse_detail)\n\n # 获取下一页url\n next_url = response.css(\".next::attr(href)\").extract_first()\n if next_url:\n yield Request(url=next_url, callback=self.parse)\n\n def parse_detail(self, response):\n # 标题\n title = response.css(\".entry-header h1::text\").extract_first()\n # 创建时间\n create_date = response.css(\".entry-meta-hide-on-mobile::text\").extract_first().strip()[:10]\n create_date = date_convert(create_date)\n # 点赞\n praise_nums = response.css(\".vote-post-up h10::text\").extract_first()\n praise_nums = int(praise_nums) if praise_nums else 0\n # 收藏\n fav_nums = response.css(\".bookmark-btn::text\").extract_first().strip()[:-2].strip()\n fav_nums = int(fav_nums) if fav_nums else 0\n # 评论\n comment_nums = response.css(\"[href='#article-comment'] span::text\").extract_first().strip()[:-2].strip()\n comment_nums = int(comment_nums) if comment_nums else 0\n # 标签\n tag_list = response.css(\".entry-meta-hide-on-mobile a::text\").extract()\n tag_list = [element for element in tag_list if not element.strip().endswith(\"评论\")]\n tags = \",\".join(tag_list)\n # 内容\n content = response.css(\".entry\").extract_first()\n # 文章图片url\n front_image_url = response.meta.get(\"front_image_url\", \"\")\n # 文章url\n url = response.url\n # 文章 url md5\n url_object_id = get_md5(response.url)\n\n # 封装item\n article_item = JobBoleArticleItem()\n article_item[\"url\"] = url\n article_item[\"url_object_id\"] = url_object_id\n article_item[\"front_image_url\"] = [front_image_url]\n article_item[\"title\"] = title\n article_item[\"create_date\"] = create_date\n article_item[\"praise_nums\"] = praise_nums\n article_item[\"fav_nums\"] = fav_nums\n article_item[\"comment_nums\"] = comment_nums\n article_item[\"tags\"] = tags\n article_item[\"content\"] = content\n yield article_item\n","sub_path":"ArticleSpider/spiders/jobbole.py","file_name":"jobbole.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"425406655","text":"from django.conf import settings\nfrom django.urls import path, include\nfrom django.contrib import admin\nfrom drf_spectacular.views import SpectacularAPIView, SpectacularRedocView, SpectacularSwaggerView\n\n\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n TokenVerifyView\n)\n\nfrom .views import (home, ReactTest1,\n LoginRequiredAPI,\n ErrorAPI,\n ConnectedUserInfos)\n\n\nurlpatterns = [\n path(\"home/\", home, name=\"home\"),\n # path('admin/', admin.site.urls),\n path('users/', include('users.urls')),\n # djoser\n path('auth/', include('djoser.urls')),\n path('auth/', include('djoser.urls.jwt')),\n\n # drf-spectacular\n\n path('api/schema/', SpectacularAPIView.as_view(), name='schema'),\n path('api/schema/swagger-ui/',\n SpectacularSwaggerView.as_view(url_name='schema'), name='swagger-ui'),\n path('api/schema/redoc/',\n SpectacularRedocView.as_view(url_name='schema'), name='redoc'),\n # Private Box\n\n path('box/', include('Box.urls')),\n\n\n # test endpoints\n\n path('api/is_connected/', LoginRequiredAPI.as_view(), name=\"auth\"),\n path('api/error_api/', ErrorAPI.as_view(), name=\"erroe-api\"),\n path('api/user_infos/', ConnectedUserInfos.as_view(), name=\"co-user-info\"),\n\n\n ###\n path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n path('api/token/verify/', TokenVerifyView.as_view(), name='token_verify'),\n\n]\n\nif settings.DEBUG:\n urlpatterns += [path('silk/', include('silk.urls', namespace='silk'))]\n","sub_path":"ALearnBox/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"134513710","text":"__author__ = 'kristydahl'\n\nimport arcpy\nimport os\nimport glob\n\narcpy.env.overwriteOutput = True\n\ndef depth_map(location,cats,year, projection):\n location_path = os.path.join('C:/Users/kristydahl/Desktop/GIS_data/military_bases',location)\n map_path = os.path.join(location_path,'map_docs')\n testmap = os.path.join(location_path, 'testmap_custom.mxd') # toggle portrait/landscape here\n print(testmap)\n\n outmap = os.path.join(map_path, 'testing_automated_styles_labels_c5_2100_h.mxd')# edited for hard file path\n gdb = str(location + '.gdb')\n workspace = os.path.join(location_path,gdb)\n print(workspace)\n\n arcpy.env.workspace = workspace\n mxd = arcpy.mapping.MapDocument(str(testmap))\n df = arcpy.mapping.ListDataFrames(mxd,\"Layers\")[0]\n #legend = arcpy.mapping.ListLayoutElements(mxd,\"LEGEND_ELEMENT\")[0]\n\n for cat in cats:\n print(cat)\n print(year)\n print(projection)\n fcs = arcpy.ListFeatureClasses('polygon_for_depth*c%s_high*_%s_%s*' %(cat,year,projection))\n print(fcs)\n for fc in fcs:\n fc_with_full_path = os.path.join(workspace, fc)\n print(fc_with_full_path)\n lyr_name = str('Category ' + cat + ' Depth')\n raster_lyr = arcpy.MakeFeatureLayer_management(fc_with_full_path,lyr_name)\n lyr = arcpy.mapping.Layer(lyr_name)\n arcpy.ApplySymbologyFromLayer_management(lyr, 'C:/Users/kristydahl/Desktop/GIS_data/military_bases/depth_gradations_fullrange2.lyr')\n\n # legend.autoAdd = True\n # legend.title='Depth of inundation (ft)'\n #\n # while legend.isOverflowing:\n # legend.elementHeight = legend.elementHeight + 0.1\n\n arcpy.mapping.AddLayer(df,lyr,\"BOTTOM\")\n print('Added layer to map')\n mxd.saveACopy(outmap)\n del mxd, lyr\n\n# This is working. Next steps:\n# 1. Read definitionQuery from txt file\n# 2. Incorporate into full script\ndef add_common_layers():\n map_path = 'C:/Users/kristydahl/Desktop/GIS_data/military_bases/kings_bay/map_docs/testing_automated_styles_labels2012_ih.mxd'\n outname = 'C:/Users/kristydahl/Desktop/GIS_data/military_bases/kings_bay/map_docs/testing_automated_styles_labels2012_ih_added_labels.mxd'\n print(map_path)\n mxd = arcpy.mapping.MapDocument(map_path)\n df = df = arcpy.mapping.ListDataFrames(mxd,\"Layers\")[0]\n lyr = arcpy.mapping.Layer('C:/Users/kristydahl/Desktop/GIS_data/military_bases/kings_bay/general_map_elements/places/places.shp')\n print(lyr)\n statement_file = 'C:/Users/kristydahl/Desktop/GIS_data/military_bases/kings_bay/general_map_elements/places/places_def_query.txt'\n statement = open(statement_file,\"r\").readlines()\n print(statement[0])\n #lyr = arcpy.mapping.Layer('places_lyr')\n #lyr.name = 'test_places'\n lyr.definitionQuery = statement[0] #'\"name\" IN' + \"('Grover Island','Kingsland','Saint Marys')\"\n\n lyr.showLabels = True\n\n lyr.saveACopy('C:/Users/kristydahl/Desktop/GIS_data/military_bases/kings_bay/general_map_elements/places/selected_places.lyr')\n\n #arcpy.ApplySymbologyFromLayer_management(lyr, 'C:/Users/kristydahl/Desktop/GIS_data/military_bases/places_labels_style.lyr')\n sourceLayer = arcpy.mapping.Layer('C:/Users/kristydahl/Desktop/GIS_data/military_bases/places_labels_style.lyr')\n sourceLayer.showLabels = True\n lyr = arcpy.mapping.Layer('C:/Users/kristydahl/Desktop/GIS_data/military_bases/kings_bay/general_map_elements/places/selected_places.lyr')\n arcpy.mapping.UpdateLayer(df,lyr,sourceLayer, True)\n arcpy.ApplySymbologyFromLayer_management(lyr, 'C:/Users/kristydahl/Desktop/GIS_data/military_bases/places_labels_style.lyr')\n arcpy.mapping.AddLayer(df,lyr,'TOP')\n print('Added layer')\n\n\n mxd.saveACopy(outname)\n del mxd, lyr\n\n\n","sub_path":"testing_automated_labels_styles.py","file_name":"testing_automated_labels_styles.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"555335062","text":"from ajna_commons.flask.log import logger\nfrom bhadrasana.models.ovrmanager import get_recintos_api\nfrom flask import render_template, request, flash\nfrom flask_login import login_required\nfrom virasana.forms.filtros import FormFiltroAPIRecintos\nfrom virasana.usecases.apirecintos_manager import get_eventos, Missao\n\n\ndef configure(app):\n \"\"\"Configura rotas para evento.\"\"\"\n\n def lista_eventos_html(mongodb, session, form: FormFiltroAPIRecintos):\n # start = datetime.combine(form.start.data, datetime.min.time())\n # end = datetime.combine(form.end.data, datetime.max.time())\n form.validate()\n eventos, count_missao = get_eventos(mongodb, session, form.start.data, form.end.data, form.placa.data,\n form.numeroConteiner.data, form.cpfMotorista.data,\n form.motoristas_de_risco.data,\n form.codigoRecinto.data, form.tempo_permanencia.data,\n Missao().get_descricao_missao(int(form.missao.data)))\n return eventos, count_missao\n\n @app.route('/eventos_redirect', methods=['GET'])\n @login_required\n def eventos_redirect():\n mongodb = app.config['mongodb']\n session = app.config['db_session']\n lista_eventos = []\n count_missao = {}\n form = FormFiltroAPIRecintos(request.args)\n try:\n lista_eventos, count_missao = lista_eventos_html(mongodb, session, form)\n except Exception as err:\n flash(err)\n logger.error(err, exc_info=True)\n return render_template('eventos.html',\n lista_eventos=lista_eventos,\n count_missao=count_missao,\n oform=form)\n\n @app.route('/eventos', methods=['GET', 'POST'])\n @login_required\n def eventos():\n mongodb = app.config['mongodb']\n session = app.config['db_session']\n lista_eventos = []\n count_missao = {}\n recintos = get_recintos_api(session)\n missoes = Missao().get_tipos_missao()\n oform = FormFiltroAPIRecintos(request.values, recintos=recintos, missoes=missoes)\n try:\n if request.method == 'POST':\n lista_eventos, count_missao = lista_eventos_html(mongodb, session, oform)\n except Exception as err:\n flash(err)\n logger.error(err, exc_info=True)\n return render_template('eventos.html',\n lista_eventos=lista_eventos,\n count_missao=count_missao,\n oform=oform)\n","sub_path":"virasana/routes/apirecintos_app.py","file_name":"apirecintos_app.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"200930998","text":"# -*- coding: utf-8 -*-\nimport json\nimport scrapy\nfrom locations.items import GeojsonPointItem\n\n\nclass NcpSpider(scrapy.Spider):\n name = \"ncp\"\n item_attributes = { 'brand': \"NCP\" }\n allowed_domains = [\"www.ncp.co.uk\"]\n start_urls = (\"https://www.ncp.co.uk/parking-solutions/cities/\",)\n\n def parse(self, response):\n cities = response.xpath('//div[@class=\"city-tabs\"]//ul/li/a/@href').extract()\n for city in cities:\n yield response.follow(city, self.parse_city)\n\n def parse_city(self, response):\n carparks = response.xpath(\n '//table[@class=\"airportListing large-stacktable\"]/tbody/tr/td[1]/a/@href'\n ).extract()\n for carpark in carparks:\n yield response.follow(carpark, self.parse_carpark)\n\n def parse_carpark(self, response):\n js = response.xpath(\n '//article[@class=\"content\"]//script[1]/text()'\n ).extract_first()\n data = self.parse_js(js)\n carpark = data[\"carparks\"][0]\n location = data[\"location\"]\n properties = {\n \"ref\": carpark[\"carParkCode\"],\n \"name\": carpark[\"carParkTitle\"],\n \"addr_full\": self.get_address(carpark),\n \"city\": carpark[\"addressLine4\"],\n \"country\": \"United Kingdom\",\n \"postcode\": f\"{carpark['postcodePart1']} {carpark['postcodePart2']}\",\n \"lat\": location[\"coords\"][\"lat\"],\n \"lon\": location[\"coords\"][\"lng\"],\n \"phone\": carpark[\"telephoneNumber\"],\n \"opening_hours\": carpark[\"openHours\"].strip(),\n \"website\": response.url,\n }\n yield GeojsonPointItem(**properties)\n\n def parse_js(self, js_string):\n \"\"\"\n hammer a known snippet of javascript of the form\n\n ```\n var detailsItem = {\n 'element' : \"map-canvas-84401-23832-61805\",\n 'carparks' : [{\"metaID\":466...}],\n 'location' : {\"title\":\"Bradford Hall Ings\"...}},\n 'autocarpark' : null\n };\n\n detailsArr.push(detailsItem);\n ```\n\n until we can parse it as json, then parse it\n \"\"\"\n js = (\n js_string.replace(\"var detailsItem = \", \"\")\n .replace(\"detailsArr.push(detailsItem);\", \"\")\n .strip()\n .rstrip(\";\")\n )\n expected_keys = [\"element\", \"carparks\", \"location\", \"autocarpark\"]\n for key in expected_keys:\n js = js.replace(f\"'{key}'\", f'\"{key}\"')\n return json.loads(js)\n\n def get_address(self, record):\n return \", \".join(\n [\n line\n for line in [\n record[\"addressLine1\"],\n record[\"addressLine2\"],\n record[\"addressLine3\"],\n ]\n if line\n ]\n )\n","sub_path":"locations/spiders/ncp.py","file_name":"ncp.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"276270535","text":"from typing import Callable, List, Tuple\nimport numpy as np\n\nclass Optimizer:\n \"\"\"\n Base class for optimizer\n \"\"\"\n\n def __init__(\n self, lr: float = 0.01, final_lr: float = 0.0, decay_type=\"exponential\"\n ):\n self.lr = lr\n self.final_lr = final_lr\n self.decay_type = decay_type\n self.first = True\n self.max_epochs = 100\n\n def _setup_decay(self) -> None:\n if not self.decay_type:\n return\n elif self.decay_type == \"exponential\":\n self.decay_per_epoch = np.power(\n self.final_lr / self.lr, 1.0 / (self.max_epochs - 1)\n )\n elif self.decay_type == \"linear\":\n self.decay_per_epoch = (self.lr - self.final_lr) / (self.max_epochs - 1)\n\n print(\"Decay per epoch\", self.decay_per_epoch)\n\n def _decay_lr(self) -> None:\n if not self.decay_per_epoch:\n return\n elif self.decay_type == \"exponential\":\n self.lr *= self.decay_per_epoch\n elif self.decay_type == \"linear\":\n self.lr -= self.decay_per_epoch\n\n def step(self) -> None:\n pass\n\n\nclass SGD(Optimizer):\n \"\"\"\n Stochastic gradient descent optimizer\n \"\"\"\n\n def __init__(self, lr: float = 0.01):\n super().__init__(lr)\n\n def step(self):\n for (param, param_grad) in zip(self.net.params(), self.net.param_grads()):\n param -= self.lr * param_grad\n\n\nclass SGDMomentun(Optimizer):\n \"\"\"\n SGD with momentun.\n \"\"\"\n\n def __init__(self, lr: float = 0.01, momentun: float = 0.9, *args, **kwargs):\n super().__init__(lr, *args, **kwargs)\n self.momentun = momentun\n\n def step(self) -> None:\n if self.first:\n self.velocities = [np.zeros_like(param) for param in self.net.params()]\n self.first = False\n\n for (param, param_grad, velocity) in zip(\n self.net.params(), self.net.param_grads(), self.velocities\n ):\n self._update_rule(param=param, grad=param_grad, velocity=velocity)\n\n def _update_rule(self, **kwargs) -> None:\n # Update velocity\n kwargs[\"velocity\"] *= self.momentun\n kwargs[\"velocity\"] += self.lr * kwargs[\"grad\"]\n kwargs[\"param\"] -= kwargs[\"velocity\"]\n","sub_path":"chap5/scratch_dl/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"549116188","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @File : send_email.py\n# @Author: Jasmine\n# @Date : 2019/9/25\n# @Desc :\n\n# 1、发送邮件:\n # 定义邮件内容\n # 添加附件\n\n\n# coding:utf-8\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\n\ndef send_mail(smtpserver,sender,psw,receiver,port,report_file):\n # 发送最新的测试报告内容\n with open(report_file, \"rb\") as f:\n mail_body = f.read()\n\n # 定义邮件内容\n msg = MIMEMultipart()\n body = MIMEMultipart(mail_body,_subtype='html',_charset='utf-8')\n msg[\"from\"] = sender # 发件人\n msg[\"to\"] = receiver # 收件人\n msg[\"subject\"] = \"自动化测试报告 \" # 主题\n msg.attach(body)\n\n # 添加附件\n att = MIMEText(open(report_file,'rb').read(), \"base64\", \"utf-8\")\n att[\"Content-Type\"] = \"application/octet-stream\"\n att[\"Content-Disposition\"] = 'attachment; filename=\"test_report.html\"'\n msg.attach(att)\n\n try:\n smtp = smtplib.SMTP_SSL(smtpserver, port)\n except:\n smtp = smtplib.SMTP()\n smtp.connect(smtpserver,port) # 连服务器\n\n # 用户名和密码\n smtp.login(sender, psw)\n smtp.sendmail(sender, receiver, msg.as_string()) # 发送\n smtp.quit()\n\n\n\n\n\n\n\n","sub_path":"common/send_email.py","file_name":"send_email.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"251611548","text":"class Solution(object):\n def binary_search(self,num):\n start = 0\n end = len(self.LIS) - 1\n\n if self.LIS[0] >= num:\n return 0\n\n while end>start+1:\n middle = (start+end)//2\n if self.LIS[middle] > num:\n end = middle\n elif self.LIS[middle] < num:\n start = middle\n else:\n return middle\n\n return end\n\n\n def lengthOfLIS(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if len(nums) == 0:\n return 0\n\n\n self.LIS = [nums[0]]\n\n for i in range(1,len(nums)):\n num = nums[i]\n\n if num > self.LIS[-1]:\n self.LIS.append(num)\n else:\n index = self.binary_search(num)\n\n self.LIS[index] = num\n return len(self.LIS)\n","sub_path":"300.py","file_name":"300.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"247117150","text":"import pytest, pdb\nfrom bson.objectid import ObjectId\nfrom rulesets.models import Account, Ruleset, Session\nfrom tests.fixtures import client\n\nclass DestroyRequestable():\n \"\"\"This class is just a superclass to easily make requests to delete rulesets.\"\"\"\n @pytest.fixture\n def delete(self, client):\n url = f\"/rulesets/{str(self.id)}\"\n query_string = {'session_id': pytest.session.token}\n return lambda: client.delete(url, query_string = query_string)\n\n@pytest.mark.describe('Destroy - nominal case')\nclass TestDestroyNominalCase(DestroyRequestable):\n\n def setup_method(self):\n self.ruleset = Ruleset.objects.create(title = 'test title', description = 'test description')\n self.id = self.ruleset._id\n\n def teardown_method(self):\n Ruleset.objects.delete()\n\n @pytest.mark.it('Returns a 200 (OK) Status code')\n def test_status_code(self, delete):\n assert delete().status_code == 200\n\n @pytest.mark.it('Returns the correct body')\n def test_message(self, delete):\n assert delete().get_json() == {'message': 'deleted'}\n\n @pytest.mark.it('Correctly deletes the ruleset from the database')\n def test_deletion(self, delete):\n response = delete()\n assert Ruleset.objects.raw({'_id': self.ruleset._id}).count() == 0\n\n@pytest.mark.describe('Destroy without giving session ID')\nclass TestDestroyWithoutSessionId():\n\n @pytest.mark.it('Returns a 400 (Bad Request) status code')\n def test_status_code(self, client):\n assert client.delete('/rulesets/test').status_code == 400\n\n @pytest.mark.it('Returns the correct error body')\n def test_response_body(self, client):\n response_body = client.delete('/rulesets/test').get_json()\n assert response_body == {\n 'status': 400,\n 'field': 'session_id',\n 'error': 'required'\n }\n\n@pytest.mark.describe('Destroy with empty session ID')\nclass TestDestroyWithEmptySessionId():\n\n @pytest.mark.it('Returns a 400 (Bad Request) status code')\n def test_status_code(self, client):\n response = client.delete('/rulesets/test', query_string={'session_id': None})\n assert response.status_code == 400\n\n @pytest.mark.it('Returns the correct error body')\n def test_response_body(self, client):\n response = client.delete('/rulesets/test', query_string={'session_id': None})\n assert response.get_json() == {\n 'status': 400,\n 'field': 'session_id',\n 'error': 'required'\n }\n\n@pytest.mark.describe('Destroy with unknown session ID')\nclass TestDestroyWithUnknownSessionId():\n\n @pytest.mark.it('Returns a 404 (Not Found) status code')\n def test_status_code(self, client):\n response = client.delete('/rulesets/test', query_string={'session_id': str(ObjectId())})\n assert response.status_code == 404\n\n @pytest.mark.it('Returns the correct error body')\n def test_response_body(self, client):\n response = client.delete('/rulesets/test', query_string={'session_id': str(ObjectId())})\n assert response.get_json() == {\n 'status': 404,\n 'field': 'session_id',\n 'error': 'unknown'\n }\n\n@pytest.mark.describe('Destroy with unknown ruleset ID')\nclass TestDestroyWithUnknowId(DestroyRequestable):\n\n @classmethod\n def setup_class(self):\n self.id = ObjectId()\n\n @pytest.mark.it('Returns a 404 (Not Found) status code')\n def test_status_code(self, delete):\n assert delete().status_code == 404\n\n @pytest.mark.it('Returns the correct body')\n def test_response_body(self, delete):\n assert delete().get_json() == {\n 'status': 404,\n 'field': 'ruleset_id',\n 'error': 'unknown'\n }","sub_path":"tests/test_destroy.py","file_name":"test_destroy.py","file_ext":"py","file_size_in_byte":3527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"532749983","text":"from mathutils import Vector, Quaternion, Matrix\nimport bpy\nfrom . import quote\nfrom .curve import Curve\n\n# Handles animating TRS properties for glTF nodes. In Blender, this can be\n# either an object or a bone.\n\n\ndef add_node_trs_animation(op, anim_id, node_id, samplers):\n if op.id_to_vnode[node_id]['type'] == 'BONE':\n bone_trs(op, anim_id, node_id, samplers)\n else:\n object_trs(op, anim_id, node_id, samplers)\n\n\n# Convert from glTF coordinates to Blender.\ndef convert_translation(t):\n return Vector([t[0], -t[2], t[1]])\n\n\ndef convert_rotation(r):\n return Quaternion([r[3], r[0], -r[2], r[1]])\n\n\ndef convert_scale(s):\n return Vector([s[0], s[2], s[1]])\n\n\ndef object_trs(op, animation_id, node_id, samplers):\n # Create action\n animation = op.gltf['animations'][animation_id]\n blender_object = op.id_to_vnode[node_id]['blender_object']\n name = \"%s@%s\" % (\n animation.get('name', 'animations[%d]' % animation_id),\n blender_object.name,\n )\n action = bpy.data.actions.new(name)\n action.use_fake_user = True\n\n # Play the first animation by default\n if animation_id == 0:\n blender_object.animation_data_create().action = action\n\n if 'translation' in samplers:\n curve = Curve.for_sampler(op, samplers['translation'])\n fcurves = curve.make_fcurves(\n op, action, 'location',\n transform=convert_translation)\n\n group = action.groups.new('Location')\n for fcurve in fcurves:\n fcurve.group = group\n\n if 'rotation' in samplers:\n curve = Curve.for_sampler(op, samplers['rotation'])\n curve.shorten_quaternion_paths()\n fcurves = curve.make_fcurves(\n op, action, 'rotation_quaternion',\n transform=convert_rotation)\n\n group = action.groups.new('Rotation')\n for fcurve in fcurves:\n fcurve.group = group\n\n if 'scale' in samplers:\n curve = Curve.for_sampler(op, samplers['scale'])\n fcurves = curve.make_fcurves(\n op, action, 'scale',\n transform=convert_scale)\n\n group = action.groups.new('Scale')\n for fcurve in fcurves:\n fcurve.group = group\n\n\ndef bone_trs(op, anim_id, node_id, samplers):\n # Unlike an object, a bone doesn't get its own action; there is one action\n # for the whole armature. To handle this, we store a cache of the action for\n # each animation in the armature's vnode and create one when we first\n # animate a bone in that armature.\n bone_vnode = op.id_to_vnode[node_id]\n armature_vnode = bone_vnode['armature_vnode']\n action_cache = armature_vnode.setdefault('action_cache', {})\n if anim_id not in action_cache:\n name = \"%s@%s\" % (\n op.gltf['animations'][anim_id].get('name', 'animations[%d]' % anim_id),\n armature_vnode['blender_armature'].name,\n )\n action = bpy.data.actions.new(name)\n action_cache[anim_id] = action\n action.use_fake_user = True\n\n # Play the first animation by default\n if anim_id == 0:\n bl_object = armature_vnode['blender_object']\n bl_object.animation_data_create().action = action\n\n action = action_cache[anim_id]\n\n # See vforest.py for the notation and assumptions used here.\n #\n # In glTF, the ordinates of an animation curve say what the final position\n # of the node should be\n #\n # T(b) = sample_gltf_curve()\n #\n # But in Blender, you animate a \"pose bone\", and the final position is\n # computed relative to the rest position as\n #\n # P'(b) = sample_blender_curve()\n # T'(b) = E'(b) P'(b)\n #\n # where the primed varaibles have had a coordinate change to modify the bind\n # pose (again, see vforest.py). Calculating the value we need for P'(b) from\n # the value we have for T(b)\n #\n # P'(b) =\n # E'(b)^{-1} T'(b) =\n # E'(b)^{-1} C(pb)^{-1} T(b) C(b) =\n # {remember that E' do not contain a scale and the C do not contain a translation}\n # Rot[er^{-1}] Trans[-et] Scale[post_s] Rot[post_r] Trans[t] Rot[r] Scale[s] Scale[pre_s] Rot[pre_r] =\n # {lift the translations up}\n # Trans[Rot[er^{-1}](-et) + Rot[er^{-1}] Scale[post_s] Rot[post_r] t] ...\n #\n # Defining pt = (the expression inside the Trans there)\n #\n # Trans[pt] Rot[er^{-1}] Scale[post_s] Rot[post_r] Rot[r] Scale[s] Scale[pre_s] Rot[pre_r] =\n # {by fiat, Scale[post_s] and Scale[pre_s] commute with rotations}\n # Trans[pt] Rot[er^{-1}] Rot[post_r] Rot[r] Scale[post_s] Scale[s] Rot[pre_r] Scale[pre_s] =\n # {using Scale[s] Rot[pre_r] = Rot[pre_r] Scale[s'] where s' is s permuted}\n # Trans[pt] Rot[er^{-1} * post_r * r] Scale[post_s] Rot[pre_r] Scale[s'] Scale[pre_s] =\n # Trans[pt] Rot[er^{-1} * post_r * r * pre_r] Scale[post_s * s' * pre_s] =\n # Trans[pt] Rot[pr] Scale[ps]\n #\n # As we promised, pt depends only on t, pr depends only on r, and ps depends\n # only on s (ignoring constants), so each curve only has its ordinates\n # changed and they are still independent and don't need to be resampled.\n\n et, er = bone_vnode['bone_tr']\n inv_er, inv_et = er.conjugated(), -et\n parent_pre_r = bone_vnode['parent'].get('bone_pre_rotation', Quaternion((1, 0, 0, 0)))\n post_r = parent_pre_r.conjugated()\n pre_r = bone_vnode.get('bone_pre_rotation', Quaternion((1, 0, 0, 0)))\n parent_pre_s = bone_vnode['parent'].get('bone_pre_scale', Vector((1, 1, 1)))\n post_s = Vector((1/c for c in parent_pre_s))\n pre_s = bone_vnode.get('bone_pre_scale', Vector((1, 1, 1)))\n\n if 'translation' in samplers:\n # pt = Rot[er^{-1}](-et) + Rot[er^{-1}] Scale[post_s] Rot[post_r] t\n # = c + m t\n inv_er_mat = inv_er.to_matrix().to_4x4()\n post_s_mat = Matrix.Identity(4)\n for i in range(0, 3):\n post_s_mat[i][i] = post_s[i]\n c = inv_er_mat * inv_et\n m = inv_er_mat * post_s_mat * post_r.to_matrix().to_4x4()\n\n def transform_translation(t): return c + m * convert_translation(t)\n\n # In order to transform the tangents for cubic interpolation, we need to\n # know how the derivative transforms too. The other transforms are\n # linear, so their derivatives change the same way they do, but\n # transform_translation is affine, so its derivative changes by its\n # underlying linear map.\n def transform_velocity(t): return m * convert_translation(t)\n\n if 'rotation' in samplers:\n # pt = er^{-1} * post_r * r * pre_r\n # = d * r * pre_r\n d = inv_er * post_r\n\n def transform_rotation(r): return d * convert_rotation(r) * pre_r\n\n if 'scale' in samplers:\n # ps = post_s * s' * pre_s\n perm = bone_vnode['bone_pre_perm']\n\n def transform_scale(s):\n s = convert_scale(s)\n s = Vector((s[perm[0]], s[perm[1]], s[perm[2]]))\n return Vector((post_s[i] * s[i] * pre_s[i] for i in range(0, 3)))\n\n bone_name = bone_vnode['blender_name']\n base_path = 'pose.bones[%s]' % quote(bone_name)\n\n fcurves = []\n\n if 'translation' in samplers:\n curve = Curve.for_sampler(op, samplers['translation'])\n fcurves += curve.make_fcurves(\n op, action, base_path + '.location',\n transform=transform_translation,\n tangent_transform=transform_velocity)\n\n if 'rotation' in samplers:\n curve = Curve.for_sampler(op, samplers['rotation'])\n # NOTE: it doesn't matter that we're shortening before we transform\n # because transform_rotation preserves the dot product\n curve.shorten_quaternion_paths()\n fcurves += curve.make_fcurves(\n op, action, base_path + '.rotation_quaternion',\n transform=transform_rotation)\n\n if 'scale' in samplers:\n curve = Curve.for_sampler(op, samplers['scale'])\n fcurves += curve.make_fcurves(\n op, action, base_path + '.scale',\n transform=transform_scale)\n\n group = action.groups.new(bone_name)\n for fcurve in fcurves:\n fcurve.group = group\n","sub_path":"addons/io_scene_gltf/animation/node_trs.py","file_name":"node_trs.py","file_ext":"py","file_size_in_byte":8158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"128187984","text":"import librbc as sh\r\ndef dbf(n):\r\n s = 0\r\n for v in range(len(n)):\r\n s = s + (float(n[v])/(2**(v+1)))\r\n return str(s)[1:]\r\ndef frc(n, size = 20):\r\n fb = []\r\n while not len(fb) == size or not n == 0.0:\r\n n = n * 2\r\n fb.append(str(int(n)))\r\n n = n - int(n)\r\n c = ''.join(fb)\r\n return c\r\ndef generate(num):\r\n number = {k:v for k,v in num.items() if not k in 'd'}\r\n if not num['d'] == '0.0':\r\n number['b'] = bin(int(num['d'].split('.')[0])).split('0b')[1] + '.' + frc(float('0.' + num['d'].split('.')[1]))\r\n base = list(number.keys())\r\n for i in base:\r\n if not number[i] == '0.0':\r\n number = sh.converter(number[i],i)\r\n if num['d'] == '0.0':\r\n num['d'] = str(int(number['b'].split('.')[0],2)) + dbf(number['b'].split('.')[1])\r\n #Beautify decimal START\r\n while num['d'][-1] == '0' and len(num['d']) > 1:\r\n num['d'] = num['d'][:-1]\r\n num['d'] = num['d'] + ('0' if num['d'][-1] in '.' else '.0' if not '.' in num['d'] else '')\r\n #Beautify decimal END\r\n number.update({'d' : num['d']})\r\n return number\r\n\r\n","sub_path":"libdbc.py","file_name":"libdbc.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"179139289","text":"class Board(object):\n def __init__(self):\n self.current_state = [[0 for _ in range(3)] for _ in range(3)]\n self.current_player = 'X'\n\n def verify_move(self, pos, player):\n # Returns True if the correct player sends av valid move\n # False if not\n x, y = pos\n if player == self.current_player:\n if x >= 0 and x < 3 and y >= 0 and y < 3:\n if not self.current_state[y][x]:\n return True\n\n return False\n\n def update(self, pos, player):\n # Takes pos as (x, y) coordinates\n # player as a string 'X' or 'O'\n # Returns 0 if the move is accepted\n # -1 if the move is invalid\n if self.verify_move(pos, player):\n x, y = pos\n self.current_state[y][x] = player\n self.current_player = 'X' if player == 'O' else 'O'\n return 0\n return -1\n\n def is_win(self):\n # Returns 0 if the game continues\n # 1 if the game is a draw\n # the winner if there is a winner\n state = self.current_state\n for i in range(3):\n r0 = state[i][0]\n c0 = state[0][i]\n if r0:\n if state[i][1] and state[i][1] == r0:\n if state[i][2] and state[i][2] == r0:\n return r0\n if c0:\n if state[1][i] and state[1][i] == c0:\n if state[2][i] and state[2][i] == c0:\n return c0\n\n d0 = state[0][0]\n d1 = state[0][2]\n if d0:\n if state[1][1] and state[1][1] == d0:\n if state[2][2] and state[2][2] == d0:\n return d0\n if d1:\n if state[1][1] and state[1][1] == d1:\n if state[2][0] and state[2][0] == d1:\n return d1\n\n for row in state:\n for col in row:\n if not col:\n return 0\n\n return 1\n\n def output_board(self):\n # Yields a tuple with\n # 0: an (x, y) coordinate\n # 1: the move occupying that coordinate\n for y in range(3):\n for x in range(3):\n yield (x, y), self.current_state[y][x]\n\n\nif __name__ == '__main__':\n def loop(p):\n board = [[0 for _ in range(3)] for _ in range(3)]\n for (x, y), s in b.output_board():\n board[y][x] = s if s else '-'\n\n for row in board:\n for col in row:\n print(col, end=' ')\n print()\n\n pos = [int(i) for i in input('> ').split(',')]\n while not b.update(pos, p):\n pos = [int(i) for i in input('> ').split(',')]\n\n b = Board()\n p = 'X'\n loop(p)\n while not b.is_win():\n p = 'X' if p == 'O' else 'O'\n loop(p)\n\n print()\n if type(b.is_win()).__name__ == 'str':\n print('Winner is:', b.is_win())\n else:\n print(\"It's a draw!\")\n input()\n","sub_path":"code/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"210598014","text":"import math\r\n\r\ndef closestpair(nums):\r\n minimum=float('-inf')\r\n distance=0\r\n for i in range(0,len(nums)):\r\n xi=int(nums[i][0])\r\n yi=int(nums[i][1])\r\n #print(xi,yi)\r\n for j in range(i+1, len(nums)):\r\n xj=int(nums[j][0])\r\n yj=int(nums[j][1])\r\n #print(xj,yj)\r\n distance=math.pow((xi-xj),2) + math.pow((yi-yj),2)\r\n distance= math.sqrt(distance)\r\n #print(xj,yj,distance)\r\n if(distance Dict[str, dict]:\n schema_by_type = {}\n schema_dir = join(dirname(__file__), 'schema')\n for file in listdir(schema_dir):\n if fnmatch(file, '*.json'):\n entity_type = splitext(file)[0]\n file_path = join(schema_dir, file)\n with open(file_path) as schema_file:\n schema_by_type[entity_type] = json.load(schema_file)\n return schema_by_type\n\n @staticmethod\n def __create_validator_payload(schema: dict, entity_attributes: dict):\n return {\n \"schema\": schema,\n \"object\": entity_attributes\n }\n\n @staticmethod\n def __add_errors_to_entity(entity: Entity, schema_errors: dict):\n for schema_error in schema_errors:\n attribute_name = str(schema_error['dataPath']).strip('.')\n stripped_errors = []\n for error in schema_error['errors']:\n error.replace('\"', '\\'')\n if error == 'should NOT be valid':\n error = JsonValidator.__improve_not_be_valid_message(entity.identifier.entity_type, attribute_name)\n if error != 'should match some schema in anyOf':\n stripped_errors.append(error)\n entity.add_errors(attribute_name, stripped_errors)\n\n @staticmethod\n def __improve_not_be_valid_message(entity_type, attribute_name):\n return f'{entity_type} should have required property: \\'{attribute_name}\\''\n","sub_path":"submission_validator/validation/json.py","file_name":"json.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"222853722","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nimport numpy as np\nnp.random.seed(1337) # for reproducibility\n\nfrom keras.datasets import reuters\nfrom keras.models import Graph\nfrom keras.optimizers import SGD\nfrom keras.layers.core import Dense, Dropout, Activation, Merge\nfrom keras.layers.advanced_activations import HierarchicalSoftmax\nfrom keras.regularizers import l2\nfrom keras.utils import np_utils\nfrom keras.preprocessing.text import Tokenizer\n\nimport time\n'''\n Train and evaluate a simple MLP on the Reuters newswire topic classification task.\n GPU run command:\n THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python examples/reuters_mlp.py\n CPU run command:\n python examples/reuters_mlp.py\n'''\n\nmax_words = 5000\nbatch_size = 100\nnb_epoch = 1000\n\nprint(\"Loading data...\")\n(X_train, y_train), (X_test, y_test) = reuters.load_data(nb_words=max_words, test_split=0.2)\nprint(len(X_train), 'train sequences')\nprint(len(X_test), 'test sequences')\n\nnb_classes = np.max(y_train)+1\nprint(nb_classes, 'classes')\n\ntrue_labels = np.asarray([[y] for y in y_train], dtype='int8')\n\nprint(\"Vectorizing sequence data...\")\ntokenizer = Tokenizer(nb_words=max_words)\nX_train = tokenizer.sequences_to_matrix(X_train, mode=\"binary\")\nX_test = tokenizer.sequences_to_matrix(X_test, mode=\"binary\")\nprint('X_train shape:', X_train.shape)\nprint('X_test shape:', X_test.shape)\n\nprint(\"Convert class vector to binary class matrix (for use with categorical_crossentropy)\")\nY_train = np_utils.to_categorical(y_train, nb_classes)\nY_test = np_utils.to_categorical(y_test, nb_classes)\nprint('Y_train shape:', Y_train.shape)\nprint('Y_test shape:', Y_test.shape)\n\nprint(true_labels.shape)\n\nprint(\"Building model...\")\nm = Graph()\nm.add_input(name='input', ndim=2)\nm.add_input(name='target_labels', ndim=2)\n\n\ndense_output_size = 512 \n\n# standard hidden layer:\nm.add_node(Dense(max_words, dense_output_size, activation='relu'), name='dense', input='input')\n#m.add_node(Dropout(0.5), name='dropout', input='dense')\n\n# add Hierarchical Softmax:\nm.add_node(HierarchicalSoftmax(input_dim=dense_output_size, output_dim=nb_classes,\n activation='relu', W_regularizer=l2(.01),\n train_mode='single_target', test_mode='single_target'),\n name='HierarchicalSoftmax', inputs=['dense', 'target_labels'], merge_mode='concat')\n\nm.add_output(name='output', input='HierarchicalSoftmax')\n\nm.compile('RMSprop', {'output': 'categorical_crossentropy'})\n\nfor i in range(250):\n history = m.fit({'input': X_train, 'target_labels': true_labels, 'output': Y_train},\n validation_data=None, validation_split=None, nb_epoch=1,\n batch_size=batch_size, verbose=1, shuffle=True)\n start = time.time()\n predictions = m.predict({'input': X_train, 'target_labels': true_labels},\n batch_size = batch_size)\n end = time.time()\n print(end - start)\n predictions = np_utils.categorical_probas_to_classes(predictions['output'])\n accuracy = np_utils.accuracy(predictions, y_train)\n print(accuracy)\n\n","sub_path":"examples/hierarchical_reuters_mlp.py","file_name":"hierarchical_reuters_mlp.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"553983537","text":"import os\nimport logging\n\nclass PblCommand:\n name = ''\n help = ''\n\n def run(args):\n pass\n\n def configure_subparser(self, parser):\n parser.add_argument('--sdk', help='Path to Pebble SDK (ie: ~/pebble-dev/PebbleSDK-2.X/)')\n parser.add_argument('--debug', action='store_true',\n help='Enable debugging output')\n\n def sdk_path(self, args):\n \"\"\"\n Tries to guess the location of the Pebble SDK\n \"\"\"\n env_sdk_path = os.getenv('PEBBLE_SDK_PATH')\n if args.sdk:\n sdk_path = args.sdk\n elif env_sdk_path:\n sdk_path = env_sdk_path\n logging.info(\"Overriding Pebble SDK Path with '%s'\", sdk_path)\n else:\n sdk_path = os.path.normpath(os.path.join(os.path.dirname(__file__), '..', '..'))\n if not sdk_path:\n raise Exception(\"SDK path undefined!\")\n if not os.path.exists(sdk_path):\n raise Exception(\"SDK path '{}' doesn't exist!\".format(sdk_path))\n return sdk_path\n\n def add_arm_tools_to_path(self, args):\n os.environ['PATH'] = \"{}:{}\".format(\n os.environ['PATH'],\n os.path.join(self.sdk_path(args), \"arm-cs-tools\", \"bin\"))\n","sub_path":"pebble-dev/PebbleSDK-3.0-dp8/Pebble/common/phonesim/libpebble/pebble/PblCommand.py","file_name":"PblCommand.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"154214061","text":"def main():\r\n\tfname = 'C-small-attempt1'\r\n\toutfile = open(fname + '.out','w')\r\n\tinfile = open(fname + '.in','r')\r\n\tcases_str = infile.readline()\r\n\tcases_str.strip()\r\n\tcases_count = int(cases_str)\r\n\tfor case_num in range(cases_count):\r\n\t\tcase_line = infile.readline()\r\n\t\tcase_line = case_line.strip()\r\n\t\tcase_list = case_line.split(' ')\r\n\t\truns_per_day = case_list[0]\r\n\t\truns_per_day = int(runs_per_day)\r\n\t\tcoaster_size = case_list[1]\r\n\t\tcoaster_size = int(coaster_size)\r\n\t\tnum_groups = case_list[2]\r\n\t\tnum_groups = int(num_groups)\r\n\t\tg_line = infile.readline()\r\n\t\tg_line = g_line.strip()\r\n\t\tg_str_list = g_line.split(' ')\r\n\t\tg_list = []\r\n\t\tfor some_value in g_str_list:\r\n\t\t\tg_list.append(int(some_value))\r\n\t\t# precompute for optimization\r\n\t\tg_next_position = []\r\n\t\tg_number_riders = []\r\n\t\tloop_detect = []\r\n\t\tfor i in range(num_groups):\r\n\t\t\tg_next_position.append(0)\r\n\t\t\tg_number_riders.append(0)\r\n\t\t\tloop_detect.append(False)\r\n\t\tfor i in range(num_groups):\r\n\t\t\tpeople_in_coaster = 0\r\n\t\t\tloading_coaster = True\r\n\t\t\tposition = i\r\n\t\t\tprev_pos = -1\r\n\t\t\tfirst_pos = -1 #not set\r\n\t\t\twhile loading_coaster:\r\n\t\t\t\tif position == first_pos:\r\n\t\t\t\t\tloading_coaster = False\r\n\t\t\t\t\tnxt_pos = prev_pos\r\n\t\t\t\telse:\r\n\t\t\t\t\tif first_pos == -1:\r\n\t\t\t\t\t\tfirst_pos = position\r\n\t\t\t\t\tgroup_size = g_list[position]\r\n\t\t\t\t\tif (people_in_coaster + group_size) <= coaster_size:\r\n\t\t\t\t\t\tpeople_in_coaster = people_in_coaster + group_size\r\n\t\t\t\t\t\tprev_pos = -1\r\n\t\t\t\t\t\tposition = position + 1\r\n\t\t\t\t\t\tif position == num_groups:\r\n\t\t\t\t\t\t\tposition = 0\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tloading_coaster = False\r\n\t\t\t\t\t\tnxt_pos = position\r\n\t\t\tg_number_riders[i] = people_in_coaster\r\n\t\t\tg_next_position[i] = nxt_pos\r\n\t\tposition = 0\r\n\t\teuros = 0\r\n\t\tthis_run = 0\r\n\t\tloop_dtc_mode = 99\r\n\t\twhile this_run < runs_per_day:\r\n\t\t\tif loop_dtc_mode == 1:\r\n\t\t\t\t\tif position == loop_start_pos:\r\n\t\t\t\t\t\tloop_total_runs = this_run - loop_start_run\r\n\t\t\t\t\t\tloop_total_euros = euros - loop_start_amount\r\n\t\t\t\t\t\tloop_dtc_mode = 2\r\n\t\t\t\t\t\truns_left = runs_per_day - this_run\r\n\t\t\t\t\t\tmult_fact = runs_left // loop_total_runs\r\n\t\t\t\t\t\teuros = euros + (loop_total_euros * mult_fact)\r\n\t\t\t\t\t\tthis_run = this_run + (loop_total_runs * mult_fact)\r\n\t\t\tif loop_dtc_mode == 0:\r\n\t\t\t\tif loop_detect[position]:\r\n\t\t\t\t\tloop_start_run = this_run\r\n\t\t\t\t\tloop_start_pos = position\r\n\t\t\t\t\tloop_start_amount = euros\r\n\t\t\t\t\tloop_dtc_mode = 1\r\n\t\t\tif this_run < runs_per_day:\r\n\t\t\t\t# ^^ necessary because breaking out of loop detect can cause us to add extra on the end\r\n\t\t\t\teuros = euros + g_number_riders[position]\r\n\t\t\tloop_detect[position] = True\r\n\t\t\tposition = g_next_position[position]\r\n\t\t\tthis_run = this_run + 1\r\n\t\tthis_case = case_num + 1\r\n\t\toutfile.write('Case #' + str(this_case) + ': ' + str(euros) + \"\\n\")\r\n\tinfile.close()\r\n\toutfile.close()\r\n\r\nmain()\r\n","sub_path":"solutions_python/Problem_55/188.py","file_name":"188.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"35458301","text":"import httpx\ntry:\n from src.common import logger\nexcept ImportError:\n from loguru import logger\n\n\nAPI = 'http://music.163.com/api/search/get/web'\n\n\nclass NetEase:\n \"\"\"\n 网易云搜索\n \"\"\"\n def __init__(self) -> None:\n self.header = {\n 'Accept': '*/*',\n 'Accept-Encoding': 'gzip,deflate,sdch',\n 'Accept-Language': 'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',\n 'Connection': 'keep-alive',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Host': 'music.163.com',\n 'Referer': 'http://music.163.com/search/',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'\n }\n\n self.cookies = {\n 'appver': '1.5.2'\n }\n\n async def search(self, s: str='', type: int=1, offset: int=0, num: int=5) -> httpx.Response:\n \"\"\"\n 搜索\n 关于type:\n 歌曲 1\n 专辑 10\n 歌手 100\n 歌单 1000\n 用户 1002\n mv 1004\n 歌词 1006\n 主播电台 1009\n Args:\n s (str): 搜索关键字\n type (int): 搜索类型\n offset (int): 偏移量(分页用)\n num (int): 搜索数量\n Returns:\n httpx.Response: [description]\n \"\"\"\n datas = {\n 's': s,\n 'type': type,\n 'offset': offset,\n 'limit': num\n }\n async with httpx.AsyncClient() as client:\n result = await client.post(API, data=datas, timeout=30)\n return result\n\n\nasync def search_163(keyword: str, result_num: int=5):\n n = NetEase()\n song_list = []\n data = await n.search(keyword, num=result_num)\n if data and data.status_code == httpx.codes.OK:\n try:\n for item in data.json()['result']['songs'][:result_num]:\n song_list.append(\n {\n 'name': item['name'],\n 'id': item['id'],\n 'artists': ' '.join(\n [artist['name'] for artist in item['artists']]\n ),\n 'type': '163'\n }\n )\n return song_list\n except Exception as e:\n logger.error(f'获取网易云歌曲失败, 返回数据data={data}, 错误信息error={e}')\n return []\n return song_list\n\n\nif __name__ == \"__main__\":\n import asyncio\n r = asyncio.run(search_163('凋叶棕', 5))\n if r:\n for song in r:\n print(song)","sub_path":"src/plugins/music/netease.py","file_name":"netease.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"583824026","text":"# coding=UTF8\nfrom django.conf import settings\nfrom django.http import Http404\n\nfrom apps.core.helpers import Cached\nfrom apps.hosting.v1_1.views import HostingView\nfrom apps.instances.models import Instance\n\n\nclass HostingMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n host = request.META.get('HTTP_HOST')\n if not host or request.META.get('HTTP_HOST_TYPE') != 'hosting':\n return self.get_response(request)\n\n host = host.split(':', 1)[0]\n is_custom_domain = not host.endswith(settings.HOSTING_DOMAIN)\n\n if is_custom_domain:\n try:\n instance = Cached(Instance, kwargs={'domains__contains': [host], 'location': settings.LOCATION}).get()\n except Instance.DoesNotExist:\n raise Http404()\n else:\n instance = host.split('.')[0]\n # Check if we're dealing with: --\n domain_data = instance.rsplit('--', 1)\n if len(domain_data) == 2:\n host, instance = domain_data\n else:\n host = '_default'\n\n kwargs = {\n 'domain': host,\n 'instance': instance\n }\n\n return HostingView.as_view()(request, **kwargs)\n","sub_path":"apps/hosting/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"109027919","text":"from collections import defaultdict\nimport math\nfrom zzz.tutorial02.tutorial02 import Ngram\n\nPATH = '/Users/zz_zhang/勉強会/NLPチュートリアル/NLPtutorial2020/test/'\nTRAIN_FILENAME = '05-train-input.txt'\nTEST_FILENAME = '05-test-input.txt'\nMODEL_NAME = 'tutorial05.model'\n\nN = 100000\n\nclass HMM:\n def __init__(self):\n self.tag_bigram = Ngram(2)\n self.word_tag_bigram = Ngram(2)\n\n def fit(self, text, tags):\n self.tag_bigram.fit('\\n'.join([' '.join(line) for line in tags]))\n self.word_tag_bigram.fit_tag(text, tags)\n\n # for (gram, prob) in self.tag_bigram.prob.items():\n # print(gram, prob)\n # print()\n # for (gram, prob) in self.word_tag_bigram.prob.items():\n # print(gram, prob)\n\n def save_model(self, filename):\n with open(filename, 'w') as file:\n file.write('T\\n') # Start writing Transformation probability (P(tag2|tag1))\n for (gram, prob) in self.tag_bigram.prob.items():\n if ' ' in gram:\n file.write(gram + ' ' + str(prob) + '\\n')\n\n file.write('E\\n') # Start writing generation probability (P(word|tag))\n for (gram, prob) in self.word_tag_bigram.prob.items():\n file.write(gram + ' ' + str(prob) + '\\n')\n\n def load_model(self, filename):\n with open(filename) as file:\n cache = ''.join([line for line in file])\n mode = None\n for line in cache:\n if line == 'T\\n':\n mode = 'T'\n elif line == 'E\\n':\n mode = 'E'\n else:\n if mode == 'T':\n gram1, gram2, prob = line.split(' ')[:-1] # estimate '\\n'\n self.tag_bigram.prob[gram1 + ' ' + gram2] = float(prob)\n elif mode == 'E':\n gram1, gram2, prob = line.split(' ')[:-1]\n self.word_tag_bigram[gram1 + ' ' + gram2] = float(prob)\n\n def predict(self, text: str, linear_lambda = [0.05, 0.95]):\n result = []\n cache = filter(lambda x: x != '', text.split('\\n'))\n tags = list(set([gram.split(' ')[-1] for (gram, _) in self.word_tag_bigram.prob.items()]))\n best_score = defaultdict(lambda: 0)\n for line in cache:\n words = [''] + line.split(' ') + ['<\\\\s>']\n route = defaultdict(lambda :'')\n for index in range(1, len(words)):\n word = words[index]\n pre_word = words[index - 1]\n\n if pre_word == '':\n for tag in tags:\n pt = self.tag_bigram.prob[' ' + tag] * linear_lambda[1] + linear_lambda[0] / len(tags)\n pe = self.word_tag_bigram.prob[word + ' ' + tag] * linear_lambda[1] + linear_lambda[0] / N\n best_score[str(index) + ' ' + tag] = -math.log(pt, 2) - math.log(pe, 2)\n route[str(index) + ' ' + tag] = ' ' + tag\n elif word != '<\\\\s>':\n for tag in tags:\n best_score[str(index) + ' ' + tag] = 0x3fffffff\n pe = self.word_tag_bigram.prob[word + ' ' + tag] * linear_lambda[1] + linear_lambda[0] / N\n for pre_tag in tags:\n pt = self.tag_bigram.prob[pre_tag + ' ' + tag] * linear_lambda[1] + linear_lambda[0] / len(\n tags)\n if best_score[str(index) + ' ' + tag] > best_score[str(index - 1) + ' ' + pre_tag] - math.log(pt, 2) - math.log(pe, 2):\n best_score[str(index) + ' ' + tag] = best_score[str(index - 1) + ' ' + pre_tag] - math.log(pt, 2) - math.log(pe, 2)\n route[str(index) + ' ' + tag] = route[str(index - 1) + ' ' + pre_tag] + ' ' + tag\n else:\n best_score[str(index) + ' ' + word] = 0x3fffffff\n pt = self.tag_bigram.prob[tag + '<\\\\s>'] * linear_lambda[1] + linear_lambda[0] / len(tags)\n\n for tag in tags:\n if best_score[str(index) + ' ' + word] > best_score[str(index - 1) + ' ' + tag] - math.log(pt, 2):\n best_score[str(index) + ' ' + word] = best_score[str(index - 1) + ' ' + tag] - math.log(pt, 2)\n route[str(index) + ' ' + word] = route[str(index - 1) + ' ' + tag] + ' ' + word\n\n # print(route[str(len(words) - 1) + ' ' + '<\\\\s>'])\n res = route[str(len(words) - 1) + ' ' + '<\\\\s>']\n result.append(res.replace(' ', '').replace(' <\\\\s>', ''))\n return result\n\ndef split_word_tag(text: str):\n words = []\n tags = []\n for line in text.split('\\n'):\n if len(line) > 0:\n word = list(map(lambda x: x.split('_')[0], line.split(' ')))\n tag = list(map(lambda x: x.split('_')[1], line.split(' ')))\n words.append(word)\n tags.append(tag)\n return words, tags\n\n\nif __name__ == '__main__':\n hmm = HMM()\n with open(PATH + TRAIN_FILENAME) as file:\n text = ''.join([line for line in file])\n words, tags = split_word_tag(text)\n\n\n hmm.fit(words, tags)\n # for (gram, prob) in hmm.tag_bigram.prob.items():\n # print(gram, prob)\n # for (gram, prob) in hmm.word_tag_bigram.prob.items():\n # print(gram, prob)\n hmm.save_model(MODEL_NAME)\n\n # print()\n hmm.load_model(MODEL_NAME)\n # for (gram, prob) in hmm.tag_bigram.prob.items():\n # print(gram, prob)\n # for (gram, prob) in hmm.word_tag_bigram.prob.items():\n # print(gram, prob)\n with open(PATH + TEST_FILENAME) as file:\n text = ''.join([line for line in file])\n tags = hmm.predict(text)\n print(tags)\n","sub_path":"zzz/tutorial04/tutorial04.py","file_name":"tutorial04.py","file_ext":"py","file_size_in_byte":5896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"162417270","text":"\n# gap_len = 5\n# gap_per = 1/1000\nimport random\nimport sys\nimport math\n\ngenomessetFile = sys.argv[1]\noutFile = sys.argv[2]\ngap_len = int(sys.argv[3])\ngap_freq = int(sys.argv[4])\ngap_per=1.0/gap_freq\n\nwf = open(outFile,\"w\")\n\nwith open(genomessetFile) as f:\n g=f.read()\n glist = list(g)\n total_bp = len(g)\n gap_in = int(1/gap_per)\n\n ranlist_ind=random.sample(range(0, total_bp), math.floor(total_bp*gap_per*1.0))\n #print(ranlist_ind)\n\n for j in ranlist_ind:\n for m in range(gap_len):\n if(j+m= total_bp:\n # break\n g=\"\".join(glist)\n\n wf.write(g)\n wf.close()\n print(\"total_bp=\",total_bp)\n print(\"gap_len=\",gap_len)\n print(\"gap_per=\",gap_per)\n print(\"N_count = \", g.count('N') )\n","sub_path":"simu_rand.py","file_name":"simu_rand.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"417952189","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom logging import StreamHandler, INFO\n\nfrom flask import Flask\nfrom flask.ext.bouncer import Bouncer\n\nfrom database import init_db\nfrom api import create_api\nfrom api.auth import create_bouncer\n\n\nAPI_VERSION = \"v1\"\n\n\ndef create_app(db_url):\n app = Flask(__name__)\n (app.db_session, app.db_metadata, app.db_engine) = init_db(db_url)\n app.debug = os.environ.get('DEBUG') == 'True'\n\n @app.teardown_request\n def shutdown_session(exception=None):\n app.db_session.remove()\n\n if not app.debug:\n app.logger.addHandler(StreamHandler())\n app.logger.setLevel(INFO)\n\n create_api(app, API_VERSION)\n create_bouncer(app)\n return app\n\n\napp = create_app(os.environ.get('BOOKING_DATABASE_URL', 'sqlite:////tmp/flod_booking.db'))\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get('PORT', 1337))\n app = create_app(os.environ.get('BOOKING_DATABASE_URL', 'sqlite:////tmp/flod_booking.db'))\n app.run(host='0.0.0.0', port=port, debug=True)\n","sub_path":"flod_booking/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"426027734","text":"from random import random\n\nclass Ant:\n def __init__(self):\n self.nodes = []\n \n def get_path(self, entrance, exits, alpha):\n self.nodes.append(entrance)\n while self.nodes[-1] not in exits:\n totalPheromone = 0\n \n if self.nodes[-1]!=entrance:\n if len(self.nodes[-1].connected_nodes)>1:\n routes = [node for node in self.nodes[-1].connected_nodes if node != self.nodes[-2]]\n else:\n routes = self.nodes[-1].connected_nodes\n else:\n routes = self.nodes[-1].connected_nodes\n \n for connected_node in routes:\n totalPheromone += (connected_node.pheromone**alpha)\n \n cumuPheromone = []\n cumu = 0\n for i in routes:\n prob = (i.pheromone**alpha)/totalPheromone\n cumu += prob\n cumuPheromone.append(cumu)\n \n randomNum = random()\n for i in range(len(cumuPheromone)):\n if randomNum <= cumuPheromone[i]:\n nodeIndex = i\n break\n \n self.nodes.append(routes[nodeIndex])\n \n newNodes = [self.nodes[0]]\n for i in self.nodes:\n for check in newNodes:\n index = newNodes.index(check)\n if i == check:\n del newNodes[index:]\n break\n newNodes.append(i)\n self.nodes = newNodes\n \n def get_path_length(self):\n return len(self.nodes)\n\n def reset(self):\n self.nodes = [] \n\n","sub_path":"ant.py","file_name":"ant.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"501043060","text":"import pandas as pd\r\nimport cx_Oracle\r\n# from WindPy import *\r\nfrom datetime import *\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\n\r\nplt.rcParams['font.sans-serif'] = ['Arial Unicode MS']\r\n# plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签\r\nplt.rcParams['axes.unicode_minus']=False #用来正常显示负号\r\n\r\n# def get_index(): #获取指数信息\r\n# conn = cx_Oracle.connect('wind/wind@172.16.50.232/dfcf')\r\n# cursor = conn.cursor()\r\n# # date = pd.read_csv('hq/day_hq.csv').iloc[-1]['date'].replace('-', '')\r\n# cursor.execute(\"select F1_1289 as index_name, F2_1289 as ID, F4_1289 as index_intro, F5_1289 as style, F9_1289 as quanzhong, F10_1289 as weighted_method FROM TB_OBJECT_1289\")\r\n# hq = pd.DataFrame(cursor.fetchall(), columns=['指数名称', '证券ID', '指数简介', '[内部]指数风格', '权重类型', '[内部]加权方式'])\r\n# hq.to_csv('hq/指数.csv',header=True,index=False,encoding='utf_8_sig')\r\n\r\n\r\ndef get_index_price(): #获取中证500收盘价\r\n now = datetime.now().strftime(\"%Y-%m-%d\")\r\n w.start()\r\n price = w.wsd(\"000905.SH\", \"close\", \"2009-01-01\", now, \"PriceAdj=B\")\r\n df = pd.DataFrame({'日期': price.Times,'收盘价': price.Data[0]})\r\n df.to_csv('hq/中证500收盘价/中证500日收盘价2009到现在.csv', mode='a', header=True,index=False,encoding='utf_8_sig')\r\n w.stop()\r\n\r\ndef update_day_hq():#获取后复权行情数据\r\n conn = cx_Oracle.connect('wind/wind@172.16.50.232/dfcf')\r\n cursor = conn.cursor()\r\n # date = pd.read_csv('hq/day_hq.csv').iloc[-1]['date'].replace('-', '')\r\n date1 = '20090101'\r\n date2 = '20191231'\r\n cursor.execute(\"select F2_1425 as tdate, F16_1090 as code, F3_1425 as last_close, F4_1425 as open, F5_1425 as high, F6_1425 as low, F7_1425 as close, (case when F8_1425=0 then F7_1425 else ROUND(F9_1425/F8_1425*10*F10_1425,2) end) as vwap, F9_1425 as amount FROM TB_OBJECT_1090, TB_OBJECT_1425 where F4_1090='A' and F2_1090=F1_1425 and F2_1425>{0} and F2_1425<{1}\".format(date1,date2))\r\n hq= pd.DataFrame(cursor.fetchall(),columns=['date', 'code', 'last_close', 'open', 'high', 'low', 'close', 'vwap', 'amount'])\r\n hq['date']=pd.to_datetime(hq['date'])\r\n hq['last_close'] = hq['last_close'].astype(float).round(2)\r\n hq['open']=hq['open'].astype(float).round(2)\r\n hq['high'] = hq['high'].astype(float).round(2)\r\n hq['low'] = hq['low'].astype(float).round(2)\r\n hq['close']=hq['close'].astype(float).round(2)\r\n hq['vwap'] = hq['vwap'].astype(float).round(2)\r\n hq['amount'] = hq['amount'].astype(float).round(2)\r\n #hq.sort_values(['date', 'code']).to_csv('hq/day_hq.csv', index=False)\r\n hq.sort_values(['code','date']).to_csv('hq/day_hq.csv',mode='a',header=False, index=False)\r\n\r\ndef get_estimate_info(year): #获取分析师预测资料\r\n conn = cx_Oracle.connect('wind/wind@172.16.50.232/dfcf')\r\n cursor = conn.cursor()\r\n date = '{0}1231'.format(year)\r\n startdate = '{0}1031'.format(year)\r\n enddate = '{0}1231'.format(year)\r\n cursor.execute(\"select F5_1571 as es_date, F4_1571 as report_period, F16_1090 as tradecode, F8_1571 as estimate_profit, F2_1571 as org_name, F3_1571 as person from TB_OBJECT_1571, TB_OBJECT_1090 where F1_1571 = OB_REVISIONS_1090 and F4_1090='A' and F4_1571 = {0} and F5_1571 > {1} and F5_1571 < {2}\".format(date, startdate, enddate))\r\n df = pd.DataFrame(cursor.fetchall(), columns=['预测日期','报告期','交易代码','预测净利润', '预测机构','分析师姓名'])\r\n df['预测净利润'] = df['预测净利润'] * 10000\r\n df.dropna().sort_values(['交易代码','预测日期']).to_csv('业绩超预期/预测净利润/预测净利润(未处理){0}.csv'.format(year), header=True, index=False, encoding=\"utf_8_sig\")\r\n\r\ndef get_profit(year): #获取净利润资料\r\n conn = cx_Oracle.connect('wind/wind@172.16.50.232/dfcf')\r\n cursor = conn.cursor()\r\n date = '{0}1231'.format(year)\r\n cursor.execute(\"select F3_1854 as release_date, F2_1854 as report_period, F16_1090 as tradecode, F4_1854 as category, F61_1854 as actual_profit from TB_OBJECT_1854, TB_OBJECT_1090 where F1_1854 = OB_REVISIONS_1090 and F4_1090='A' and F2_1854 = {0}\".format(date))\r\n df = pd.DataFrame(cursor.fetchall(), columns=['实际净利润公告日期','实际净利润报告期','交易代码','报表类型','净利润'])\r\n df['实际净利润公告日期']=pd.to_datetime(df['实际净利润公告日期'])\r\n df = df.sort_values(['交易代码'])\r\n df.to_csv('业绩超预期/实际净利润/实际净利润(未处理){0}.csv'.format(year), header=True, index=False, encoding=\"utf_8_sig\")\r\n\r\ndef get_unique(dataframe):\r\n unique_code = []\r\n for code in dataframe['交易代码']: # 找到唯一股票代码成为列表\r\n if code not in unique_code:\r\n unique_code.append(code)\r\n return unique_code\r\n\r\ndef process_estimate_average(year): #找到平均预测净利润\r\n average = []\r\n data = pd.read_csv('业绩超预期/预测净利润/预测净利润(未处理){0}.csv'.format(year), header=0)\r\n unique = get_unique(data)\r\n for i in range(len(unique)):\r\n sub_data = data.loc[data[\"交易代码\"] == unique[i]] #以交易代码选取股票\r\n data_after_dup = sub_data.drop_duplicates(subset = ['分析师姓名'], keep='last', inplace=False) #去掉重复分析师预测\r\n # data_after_dup.to_csv('业绩超预期/test.csv', mode='a', header = True, encoding='utf_8_sig')\r\n average.append(data_after_dup['预测净利润'].mean())\r\n new_data = [unique,average]\r\n labels = ['交易代码', '平均预测净利润']\r\n df = pd.DataFrame.from_records(new_data,labels).T\r\n df.to_csv('业绩超预期/预测净利润/预测净利润(已处理){0}.csv'.format(year), header = True, index=False, encoding='utf_8_sig')\r\n return df\r\n\r\n\r\ndef process_profit_file(year): #选择合并报表作为净利润\r\n df = pd.read_csv('业绩超预期/实际净利润/实际净利润(未处理){0}.csv'.format(year), header=0)\r\n df = df.loc[df['报表类型'] == '合并报表']\r\n df.to_csv('业绩超预期/实际净利润/实际净利润(已处理){0}.csv'.format(year), header=True, index=False, encoding='utf_8_sig')\r\n return df\r\n\r\ndef get_excess(year): #找到业绩超预期公司股\r\n df1=process_estimate_average(year)\r\n df2=process_profit_file(year)\r\n df3 = pd.merge(df1,df2,on='交易代码',how='inner') #合并预测与实际数据\r\n a = df3.pop('平均预测净利润')\r\n df3.insert(5,'平均预测净利润',a)\r\n # df3.to_csv('业绩超预期/合并表/合并表{0}.csv'.format(year), header=True, index=False, encoding='utf_8_sig')\r\n df4 = df3.loc[df3[\"净利润\"] > df3[\"平均预测净利润\"]]\r\n a = df4.pop('交易代码')\r\n a = a.astype(int)\r\n df4.insert(0,'交易代码', a)\r\n df4.to_csv('业绩超预期/业绩超预期/业绩超预期{0}.csv'.format(year),header=True,index=False, encoding='utf_8_sig')\r\n\r\ndef get_graph_info(year,option): #获取每股对应后80天股价信息\r\n df = pd.read_csv('hq/day_hq.csv', #股价\r\n names=['date', 'code', 'last_close', 'open', 'high', 'low', 'close', 'vwap', 'amount'])\r\n df = df.loc[:,('date','code','last_close','close')]\r\n df2 = pd.read_csv('业绩超预期/业绩超预期/业绩超预期{0}.csv'.format(year),header='infer',encoding='utf_8_sig')\r\n df2 = df2.loc[:,('交易代码','实际净利润公告日期','实际净利润报告期','净利润','平均预测净利润')]\r\n start_time = \"{0}0101\".format(year)\r\n end_time = \"{0}1231\".format(year+1)\r\n start_time = pd.Timestamp(start_time).strftime(\"%Y-%m-%d\")\r\n end_time = pd.Timestamp(end_time).strftime(\"%Y-%m-%d\")\r\n df = df.loc[(df['date']>=start_time)&(df['date']<=end_time)]\r\n unique_code=[]\r\n unique_date=[]\r\n delete = []\r\n data_value = []\r\n size = []\r\n for i in range(len(df2)):\r\n code = df2.loc[i, '交易代码']\r\n date = df2.loc[i, '实际净利润公告日期']\r\n if code in df['code'].values:\r\n original_date = pd.Timestamp(df2.loc[i, '实际净利润公告日期'])\r\n after_130_date = (original_date + pd.Timedelta(days=130)).strftime('%Y-%m-%d')\r\n original_date = original_date.strftime('%Y-%m-%d')\r\n # print('origindate: {0} enddate: {1}'.format(original_date, after_130_date))\r\n sample = df.loc[(df['code']==code)&(df['date']=original_date)]\r\n sample = sample.iloc[:80,:]\r\n if (len(sample) != 80):\r\n delete.append((code,date))\r\n continue\r\n if (option == '胜率'):\r\n sample.insert(len(df.iloc[0,:]), 'win_ratio', sample['close'] / sample['last_close'].values)\r\n num = sample['win_ratio'].values\r\n elif (option == '盈亏比'):\r\n sample.insert(len(df.iloc[0,:]), 'revenue', (sample['close'] - sample['last_close']).values)\r\n num = sample['revenue'].values\r\n # print(data_value)\r\n elif (option == '累计收益'):\r\n # print(sub_df)\r\n sample.insert(len(df.iloc[0,:]), 'cum_return', (sample['close'] / sample.iloc[0, 3] - 1).values)\r\n num = sample['cum_return'].values\r\n else:\r\n print('无正确选择')\r\n unique_code.append(code)\r\n unique_date.append(date)\r\n size.append(len(num))\r\n data_value.append(num)\r\n # sample.to_csv('高管增持/每年增持股票股价情况/股价{0}.csv'.format(year),mode='a',index=False,header=False,encoding='utf_8_sig')\r\n max_size = max(size)\r\n column_1 = ['+' + str(i) for i in range(max_size)]\r\n multi_index = pd.MultiIndex.from_arrays([unique_date, unique_code], names=['实际净利润公告日期', '交易代码'])\r\n dataframe = pd.DataFrame(data_value, index=multi_index, columns=column_1)\r\n # print(dataframe)\r\n # return\r\n average = []\r\n if (option == '胜率'):\r\n for i in range(max_size):\r\n num = len(dataframe.loc[dataframe['+{0}'.format(i)] > 1, '+{0}'.format(i)])\r\n total_num = len(dataframe['+{0}'.format(i)].dropna())\r\n average.append(num / total_num)\r\n dataframe.loc['平均'] = average\r\n dataframe.to_csv('业绩超预期/胜率情况/胜率{0}.csv'.format(year), encoding='utf_8_sig')\r\n elif (option == '盈亏比'):\r\n for i in range(max_size):\r\n positive = dataframe.loc[dataframe['+{0}'.format(i)] > 0, '+{0}'.format(i)].values\r\n negative = dataframe.loc[dataframe['+{0}'.format(i)] < 0, '+{0}'.format(i)].values\r\n num = len(positive) / len(negative)\r\n average.append(num)\r\n dataframe.loc['盈亏比'] = average\r\n dataframe.to_csv('业绩超预期/盈亏比/盈亏比{0}.csv'.format(year), encoding='utf_8_sig')\r\n elif (option == '累计收益'):\r\n for i in range(max_size):\r\n num = dataframe['+{0}'.format(i)].dropna().values\r\n average.append(np.mean(num))\r\n dataframe.loc['平均'] = average\r\n dataframe.to_csv('业绩超预期/累计收益/平均累计收益{0}.csv'.format(year), encoding='utf_8_sig')\r\n\r\n\r\n\r\ndef get_cum_return(year): #获取超额累积\r\n df = pd.read_csv('hq/中证500收盘价/中证500日收盘价2009到现在.csv', header='infer')\r\n df['日期']=pd.to_datetime(df['日期'])\r\n df2 = pd.read_csv('业绩超预期/累计收益/平均累计收益{0}.csv'.format(year))\r\n unique_date = [df2.iloc[i, 0][2:12] for i in range(len(df2.iloc[:, 0]))]\r\n unique_code = [df2.iloc[i, 0][15:-1] for i in range(len(df2.iloc[:, 0]))]\r\n unique_date.pop()\r\n unique_code.pop()\r\n multi_index = pd.MultiIndex.from_arrays([unique_date, unique_code], names=['实际净利润公告日期', '交易代码'])\r\n size = []\r\n data_value = []\r\n for i in range(len(df2) - 1):\r\n xlength = len(df2.iloc[i, 1:].dropna())\r\n startdate = pd.Timestamp(unique_date[i]).strftime(\"%Y-%m-%d\")\r\n sub_df = df.loc[df['日期'] >= startdate]\r\n sub_df = sub_df.iloc[:xlength, :]\r\n index_c_return = [sub_df.iloc[j, 1] / sub_df.iloc[0, 1] - 1 for j in range(len(sub_df))]\r\n stock_c_return = df2.iloc[i, 1:].dropna().values\r\n x_data = stock_c_return - index_c_return\r\n size.append(len(x_data))\r\n data_value.append(x_data)\r\n dataframe = pd.DataFrame(data_value, index=multi_index,\r\n columns=['+{0}'.format(i) for i in range(max(size))])\r\n average = []\r\n for i in range(max(size)):\r\n num = np.mean(dataframe['+{0}'.format(i)].dropna().values)\r\n average.append(num)\r\n dataframe.loc['平均'] = average\r\n dataframe.to_csv('业绩超预期/超额累计收益/超额累计收益{0}.csv'.format(year), header=True, index=True, encoding='utf_8_sig')\r\n\r\ndef process_info(year,option): #获取超额胜率和超额盈亏比\r\n df = pd.read_csv('业绩超预期/超额累计收益/超额累计收益{0}.csv'.format(year), index_col=0, encoding='utf_8_sig')\r\n sub_df = df.iloc[:-1,:]\r\n if (option == '超额盈亏比'):\r\n win_loss = []\r\n list = [ (len(sub_df.loc[sub_df['+{0}'.format(i)]>0])\r\n /len(sub_df.loc[sub_df['+{0}'.format(i)]<0])) for i in range(1,len(df.iloc[0,:]),1) ]\r\n win_loss.append(list)\r\n dataframe = pd.DataFrame(win_loss)\r\n dataframe.to_csv('业绩超预期/超额盈亏比/超额盈亏比{0}.csv'.format(year))\r\n elif (option == '超额胜率'):\r\n win_rate=[]\r\n list = [(len(sub_df.loc[sub_df['+{0}'.format(i)] > 0])\r\n / len(sub_df['+{0}'.format(i)].dropna())) for i in range(1, len(df.iloc[0, :]), 1)]\r\n win_rate.append(list)\r\n dataframe=pd.DataFrame(win_rate)\r\n dataframe.to_csv('业绩超预期/超额胜率/超额胜率{0}.csv'.format(year))\r\n\r\ndef ten_year_graph(option): #画十年图\r\n data=[]\r\n for i in range(2009, 2019, 1):\r\n if (option=='胜率'):\r\n dataframe = pd.read_csv('业绩超预期/胜率情况/胜率{0}.csv'.format(i),index_col=0)\r\n elif (option=='累计收益'):\r\n dataframe = pd.read_csv('业绩超预期/累计收益/平均累计收益{0}.csv'.format(i),index_col=0)\r\n elif (option=='盈亏比'):\r\n dataframe = pd.read_csv('业绩超预期/盈亏比/盈亏比{0}.csv'.format(i), index_col=0)\r\n elif (option=='超额累计收益'):\r\n dataframe = pd.read_csv('业绩超预期/超额累计收益/超额累计收益{0}.csv'.format(i), index_col=0)\r\n elif (option=='超额胜率'):\r\n dataframe = pd.read_csv('业绩超预期/超额胜率/超额胜率{0}.csv'.format(i), index_col=0)\r\n elif (option=='超额盈亏比'):\r\n dataframe = pd.read_csv('业绩超预期/超额盈亏比/超额盈亏比{0}.csv'.format(i), index_col=0)\r\n subdata=dataframe.iloc[-1, :]\r\n # subdata.plot(grid=True,figsize=(25,15), label=i, linewidth=2)\r\n data.append(subdata.values)\r\n # plt.legend()\r\n # plt.title(option)\r\n # plt.show()\r\n max_size=max([len(data[i]) for i in range(len(data))])\r\n column_1 = ['+' + str(i) for i in range(max_size)]\r\n dataframe = pd.DataFrame(data, columns=column_1)\r\n average=[]\r\n for i in range(max_size):\r\n num = np.mean(dataframe['+{0}'.format(i)].dropna().values)\r\n average.append(num)\r\n dataframe.loc['平均'] = average\r\n # print(dataframe)\r\n dataframe.iloc[-1, :].plot(grid=True, figsize=(25, 15), label='{0}'.format(option),linewidth=2)\r\n plt.legend()\r\n plt.title('{0}(10年平均)'.format(option))\r\n # plt.show()\r\n\r\ndef read_file():\r\n path = os.path.expanduser(r\"~/Desktop/classes/管理层持股增变化情况.csv\")\r\n df = pd.read_csv(path)\r\n print(df)\r\n\r\n\r\n#main:\r\n# get_index_price()\r\n# update_day_业绩超预期()\r\n\r\n# for i in range(2009,2019,1):\r\n # get_graph_info(i,'累计收益')\r\n # get_graph_info(i, '胜率')\r\n # get_graph_info(i, '盈亏比')\r\n\r\n# for i in range(2009,2019,1):\r\n# get2D_csv(2009,'胜率')\r\n# for i in range(2009,2019,1):\r\n# get_cum_return(i)\r\n# process_info(i,'超额胜率')\r\n# process_info(i, '超额盈亏比')\r\n\r\n# plt.figure(figsize=(20,20), dpi=80)\r\n# plt.figure(1)\r\n# plt.subplot(231)\r\n# ten_year_graph('累计收益')\r\n# plt.subplot(232)\r\n# ten_year_graph('胜率')\r\n# plt.subplot(233)\r\n# ten_year_graph('盈亏比')\r\n# plt.subplot(234)\r\n# ten_year_graph('超额累计收益')\r\n# plt.subplot(235)\r\n# ten_year_graph('超额胜率')\r\n# plt.subplot(236)\r\n# ten_year_graph('超额盈亏比')\r\n# plt.show()\r\n\r\n# read_file()","sub_path":"量化/事件驱动-业绩超预期.py","file_name":"事件驱动-业绩超预期.py","file_ext":"py","file_size_in_byte":16787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"491668475","text":"# coding=utf-8\n\"\"\"\n################################################################################\n# Non standard potential Hamiltonian analysis #\n# ============================================================================ #\n# Finds eigenvalues & spectrum of a Hamiltonian with a non standard potential #\n# (e.g. x**2.5), using exact diagonalization and imaginary time evolution. #\n# Then simulates the time evolution of the ground state wavefunction displaced #\n# with respect to the equilibrium position. #\n################################################################################\n\"\"\"\nimport matplotlib\nmatplotlib.use(\"TkAgg\") # fix things for macOS\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\n\nclass Simulation:\n \"\"\"\n A Simulation represents the numerical framework in which we are operating:\n the space (in x and k), the Hamiltonian, and all the relative helpers (e.g.\n normalization, energy evaluation, etc.).\n\n Attributes:\n K: The kinetic energy operator (in the x basis)\n K_k: The kinetic energy operator (in the k basis)\n V: The potential energy operator (in the x basis)\n H: The total Hamiltonian (K + V)\n\n N: The dimension of the space (number of samples in x)\n x: The x space\n dx: The x space step size\n k: The k space\n dk: The k space step size\n \"\"\"\n\n def __init__(self, N=128, bounds=[-5.0, 5.0], exp=2):\n \"\"\"\n Initialize a Simulation.\n\n Args:\n N: The space dimension (number of samples)\n bounds: An array of two values representing the x boundaries\n (optional, default is [-5, +5])\n exp: The exponent of the potential (optional, default is 2 i.e. the\n harmonic oscillator potential)\n \"\"\"\n self.N = N\n self.dx = (bounds[1]-bounds[0])/N\n self.x = np.linspace(bounds[0]+self.dx/2, bounds[1]-self.dx/2, N)\n\n self.dk = 2*np.pi/(N*self.dx)\n self.k = np.fft.fftfreq(N, 1./N)*self.dk\n\n # Kinetic energy\n self.K_k = np.diag(0.5 * self.k**2) # in the momentum basis (k)\n U = np.sqrt(self.dx) * np.array([\n np.conj(self.plane_wave(m)) for m in self.k/self.dk\n ])\n K = np.dot(U.T.conj(), np.dot(self.K_k, U)) # in the space basis (x)\n\n # Potential energy\n V = np.diag(0.5*np.abs(self.x)**exp) # in the space basis (x)\n\n # Hamiltonian\n self.K = K\n self.V = V\n self.H = K + V\n\n def plane_wave(self, m):\n \"\"\"\n Generates a plane wave with coefficient m.\n\n Args:\n m: The plane wave coefficient\n \"\"\"\n return self.normalize(np.exp(1j*self.dk*m*self.x))\n\n def normalize(self, wf):\n \"\"\"\n Normalizes a wavefunction in x basis.\n \"\"\"\n return wf/np.sqrt(np.vdot(wf, wf)*self.dx)\n\n def time_evolver(self, wf, dt, iterations=1):\n \"\"\"\n Applies the time evolution operator to a given wavefunction using the\n second order Trotter splitting.\n\n Args:\n wf: The wavefunction (in x basis)\n dt: The time interval (note: it may be an imaginary time)\n iterations: the number of iterations (optional, default is 1)\n \"\"\"\n wf_x = wf * np.exp(-0.5j*np.diag(self.V)*dt)\n wf_k = np.fft.fft(wf_x)\n wf_k = wf_k * np.exp(-1j*np.diag(self.K_k)*dt)\n wf_x = np.fft.ifft(wf_k)\n\n for i in range(iterations-1):\n # This is an optimization: we evolve the wavefunction for a whole\n # step dt in V, then the last half step is done outside the loop.\n wf_x = wf_x * np.exp(-1j*np.diag(self.V)*dt)\n\n wf_k = np.fft.fft(wf_x)\n wf_k = wf_k * np.exp(-1j*np.diag(self.K_k)*dt)\n\n wf_x = np.fft.ifft(wf_k)\n\n # Evolve for the last half step, and we’re done!\n return wf_x * np.exp(-0.5j*np.diag(self.V)*dt)\n\n def kinetic_energy(self, wf):\n \"\"\"\n Evaluates the kinetic energy of a wavefunction.\n\n Args:\n wf: The wavefunction (in x basis)\n \"\"\"\n wf_k = np.fft.fft(wf)\n\n return 0.5 * self.dx/self.N * np.sum(np.abs(wf_k)**2 * self.k**2)\n\n def potential_energy(self, wf):\n \"\"\"\n Evaluates the potential energy of a wavefunction.\n\n Args:\n wf: The wavefunction (in x basis)\n \"\"\"\n return self.dx * np.dot(np.abs(wf)**2, np.diag(self.V))\n\n def energy(self, wf):\n \"\"\"\n Evaluates the total energy of a wavefunction (kinetic + potential).\n\n Args:\n wf: The wavefunction (in x basis)\n \"\"\"\n return self.kinetic_energy(wf) + self.potential_energy(wf)\n\nprint('''\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n~ Find the eigenvalues with N = 50 by means of exact diagonalization. ~\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n''')\nexponent = 2.3 # the potential exponent\nsim = Simulation(N=50, exp=exponent)\n\neigvalues = np.linalg.eigvalsh(sim.H)\n\nprint(\"Plotting eigenvalues for simulation with N = 50\")\nprint(\"(close the plot to continue)\")\nplot_title = \"Eigenvalues (N = 50)\"\nplt.figure(1).canvas.set_window_title(plot_title)\nplt.title(plot_title)\nplt.plot(eigvalues, \"+\")\nplt.show()\n\nprint(\"Done.\")\n\nprint('''\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n~ Find the value for N such that the first 10 eigenvalues do not change their ~\n~ value more than 1e-3. ~\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n''')\ntolerance = 1e-3\neigvalues = np.linalg.eigvalsh(Simulation(N=10, exp=exponent).H)[:10]\n\nprint(\"Looking for convergence N with tolerance t = %.1e\" % tolerance)\nfor n in range(11, 500):\n sim = Simulation(N=n, exp=exponent)\n new_eigvalues = np.linalg.eigvalsh(sim.H)[:10]\n error = np.abs(new_eigvalues - eigvalues)\n\n if (np.all(error < tolerance)):\n print(\"Convergence found for N = %i.\" % n)\n break\n\n eigvalues = new_eigvalues\n\nprint(\"Done.\")\n\nprint('''\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n~ Plot the probability density for the first two eigenvectors. ~ ~\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n''')\nn = max(n, 256) # set minimum value of N to 256 so we get a decent smoothness\nprint(\"Plotting eigenstates with N = %i\" % n)\nprint(\"(close the plot to continue)\")\nsim = Simulation(N=n, exp=exponent)\n_, eigvectors = np.linalg.eigh(sim.H)\n\np0 = sim.normalize(np.abs(eigvectors[:, 0]))**2\np1 = sim.normalize(np.abs(eigvectors[:, 1]))**2\n\nylimit = np.max([p0, p1]) * 1.1\nplot_title = \"Eigenstates (N = %i)\" % n\nplt.figure(2).canvas.set_window_title(plot_title)\nplt.suptitle(plot_title)\n\nplt.subplot(211)\nplt.ylabel(\"State 0\")\nplt.plot(sim.x, p0)\nplt.ylim(0, ylimit)\n\nplt.subplot(212)\nplt.ylabel(\"State 1\")\nplt.plot(sim.x, p1)\nplt.ylim(0, ylimit)\n\nplt.show()\n\nprint(\"Done.\")\n\nprint('''\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n~ Find the first two states by imaginary time evolution and plot a comparison ~\n~ with the states found previously. ~\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n''')\ndt = -0.001j # imaginary time interval\nnsteps = 5000 # number of steps to evolve the wavefunction\n\nprint(\"Computing the states using dt = %.1e i, %i steps\" % (dt.imag, nsteps))\nprint(\"Please wait for an imaginary time…\")\n\n# Start from a symmetric wavefunction to get the ground state (we know it will\n# have no nodes, so it must be symmetric).\npsi_gs = np.ones(sim.N)\nfor i in range(nsteps):\n psi_gs = sim.normalize(sim.time_evolver(psi_gs, dt))\n\n# Start from an asymmetric wavefunction to get the first asymmetric state\npsi_1 = np.copy(sim.x)\nfor i in range(nsteps):\n psi_1 = sim.normalize(sim.time_evolver(psi_1, dt))\n\nprint(\"Plotting eigenstates\\n(close the plot to continue)\")\n\nplot_title = \"Eigenstates comparison\"\nplt.figure(3).canvas.set_window_title(plot_title)\nplt.suptitle(plot_title)\n\nplt.subplot(211)\nplt.ylabel(\"Ground state\")\nplt.plot(sim.x, np.abs(psi_gs)**2, label=\"Imaginary time\")\nplt.plot(sim.x, p0, label=\"Exact diagonalization\")\nplt.legend()\n\nplt.subplot(212)\nplt.ylabel(\"First asymmetric state\")\nplt.plot(sim.x, np.abs(psi_1)**2, label=\"Imaginary time\")\nplt.plot(sim.x, p1, label=\"Exact diagonalization\")\nplt.legend()\n\nplt.show()\n\nprint(\"Done.\\n\")\n\nprint('''\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n~ Real time evolution of the shifted ground state wavefunction. ~\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n''')\ndt = 0.001 # real time interval\npsi = np.roll(psi_gs, sim.N/10) # shifted wavefunction\n\nclass WavefunctionAnimation:\n def __init__(self, sim, psi, dt, steps=20):\n self.sim, self.psi, self.dt, self.steps = sim, psi, dt, steps\n self.fig, self.ax = plt.subplots()\n self.psi_line, = self.ax.plot(self.sim.x, np.abs(self.psi)**2)\n\n # Plot the current time\n self.time_text = self.ax.text(0.80, 0.95, \"Time = 0\",\n transform=self.ax.transAxes, bbox=dict(\n facecolor=\"#ffdd00\", alpha=0.5,\n capstyle=\"round\"\n ))\n\n # Plot the potential rescaled\n self.ax.plot(sim.x, np.diag(sim.V)/np.max(np.diag(sim.V)), \"--\")\n\n def __call__(self, frame_num):\n self.psi = sim.time_evolver(self.psi, self.dt, self.steps)\n self.psi_line.set_ydata(np.abs(self.psi)**2)\n self.time_text.set_text(\"Time = %6.2f\" % (frame_num*self.steps*dt))\n\n return self.psi_line, self.time_text\n\n# Run the animation\nwa = WavefunctionAnimation(sim, psi, dt, steps=50)\n\nprint(\"Starting animation\\n(close the animation to continue)\")\n_ = FuncAnimation(wa.fig, wa, frames=1000, interval=20, blit=True)\nplt.show()\n\nprint(\"Done.\")\n\nprint('''\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n~ Evaluate average energy and position and the time period of the average ~\n~ position oscillations. ~\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n''')\nsteps = 1000\nstepsize = 100\npos = np.zeros(steps)\nenergy = np.zeros(steps)\n\nprint(\"Calculating expectation values of energy and position\")\nprint(\"Be patient, this will take some time…\")\n\nfor i in range(steps):\n # Computing the time evolution for `stepsize` iterations, otherwise it would\n # take eons to do the computations for a reasonable time. However, we will\n # lose some high frequency components (if any).\n psi = sim.time_evolver(psi, dt, iterations=stepsize)\n\n pos[i] = np.sum(np.dot(np.abs(psi)**2, sim.x)*sim.dx)\n energy[i] = sim.energy(psi)\n\nprint(\"Plotting average position and energy\\n(close the plot to continue)\")\n\ntime = np.arange(steps*stepsize*dt, step=(stepsize*dt))\n\nplot_title = \"Average position and energy\"\nplt.figure(5).canvas.set_window_title(plot_title)\nplt.title(plot_title)\nplt.xlabel(\"Time\")\nplt.plot(time, pos, label=\"Position\")\nplt.plot(time, energy, label=\"Energy\")\n\nplt.legend()\nplt.show()\n\nprint(\"Calculating the period as the frequency peak in the position average\")\nfreq = np.abs(np.fft.rfft(pos))\nfreqspace = np.fft.rfftfreq(steps, stepsize*dt)\n\nfreq_peak = np.abs(freqspace[np.argmax(freq)])\n\nprint(\"The frequency peak is: %g\\nTime period: %g\" % (freq_peak, 1/freq_peak))\n\nprint(\"Plotting the spectrum\\n(close the plot to continue)\")\nplot_title = \"Frequency spectrum\"\n\nplt.figure(6).canvas.set_window_title(plot_title)\nplt.title(plot_title)\nplt.plot(freqspace, freq)\nplt.xlabel(\"Frequency\")\nplt.show()\n\nprint('''\n\n\n _ _ _ _\n /\\ | | | | | | |\n / \\ | | | __| | ___ _ __ ___| |\n / /\\ \\ | | | / _` |/ _ \\| '_ \\ / _ \\ |\n / ____ \\| | | | (_| | (_) | | | | __/_|\n /_/ \\_\\_|_| \\__,_|\\___/|_| |_|\\___(_)\n\n\n''')\n","sub_path":"non_standard_hamiltonian.py","file_name":"non_standard_hamiltonian.py","file_ext":"py","file_size_in_byte":12363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"614778081","text":"import random\nimport numpy as np\n\n\ndef printIntro():\n print(\"BlackJack AI December 2018\")\n print(\"https://github.com/brs80/blackjack.git\\n\")\n print(\"You are given 500 chips to start.\")\n\n\ndef printWager(player):\n print(\"Chips remaining: {}\".format(player.getMoney()))\n print(\"(1) 5 chips\")\n print(\"(2) 10 chips\")\n print(\"(3) 50 chips\")\n print(\"(4) 100 chips\")\n print(\"(5) other\")\n wager = input(\"Place your wager-> \")\n int(wager)\n if int(wager) > player.getMoney():\n print(\"wager too high\")\n printWager(player)\n if int(wager) < 0:\n print(\"wager too low\")\n printWager(player)\n return wager\n\n\ndef startGame():\n deck = Deck()\n player = Player()\n dealer = Dealer()\n while player.getMoney() > 0:\n player.draw(deck)\n dealer.draw(deck)\n printTable(player, dealer)\n wager = printWager(player)\n player.setMoney(wager)\n\n\ndef printTable(player, dealer):\n print(\"\\nPlayer Hand:\")\n player.showHand()\n print(\"\\nDealer Hand:\")\n dealer.showHand()\n\n\ndef hand_total(hand):\n total = 0\n ace_found = False\n soft = False\n for card in hand:\n if card.value >= 10:\n total += 10\n else:\n total += card.value\n if card.value == 1:\n ace_found = True\n if total < 12 and ace_found:\n total += 10\n soft = True\n return total, ace_found\n\n\nclass Card(object):\n def __init__(self, suit, value):\n self.suit = suit\n self.value = value\n\n def show(self):\n if self.value == 1:\n print(\"{} of {}\".format('A', self.suit))\n elif self.value == 11:\n print(\"{} of {}\".format('J', self.suit))\n elif self.value == 12:\n print(\"{} of {}\".format('Q', self.suit))\n elif self.value == 13:\n print(\"{} of {}\".format('K', self.suit))\n else:\n print(\"{} of {}\".format(self.value, self.suit))\n\n\nclass DeckEmptyError(Exception):\n pass\n\n\nclass Deck(object):\n def __init__(self):\n self.cards = []\n self.discardCount = [0 for i in range(14)]\n self.build()\n\n def build(self):\n self.cards = []\n self.discardCount = [0 for i in range(14)]\n for suit in (\"Spades\", \"Clubs\", \"Diamonds\", \"Hearts\"):\n for value in range(1, 14):\n self.cards.append(Card(suit, value))\n random.shuffle(self.cards)\n\n def show(self):\n for card in self.cards:\n card.show()\n\n def addtoDiscard(self, card):\n self.discardCount[card.value] += 1\n\n def cardsRemaining(self):\n return len(self.cards)\n\n def drawCard(self):\n if not self.cardsRemaining():\n raise DeckEmptyError\n return self.cards.pop()\n\n\nclass CardHolder(object):\n def __init__(self):\n self.hand = []\n\n def draw(self, deck):\n for num in range(1, 3):\n self.hand.append(deck.drawCard())\n\n def hit(self, deck):\n self.hand.append(deck.drawCard())\n\n def discardHand(self, deck):\n for c in self.hand:\n deck.addtoDiscard(c)\n self.hand = []\n\n def handScore(self):\n return hand_total(self.hand)\n\n def showHand(self):\n total = hand_total(self.hand)\n for card in self.hand:\n card.show()\n print(\"Score: {}\\nAce in hand: {}\".format(total[0], total[1]))\n\n\nclass Player(CardHolder, object):\n def getMoney(self):\n return self.money\n\n def setMoney(self, wager):\n self.money = self.money - int(wager)\n\n\nclass Dealer(CardHolder, object):\n def gameHandValue(self):\n return self.hand[0].value\n\n\nclass Game(object):\n def __init__(self):\n self.dealer = Dealer()\n self.player = Player()\n self.deck = Deck()\n self.n_actions = 2\n self.n_features = 17\n self.wins = 0\n self.games = 0\n\n def showState(self):\n self.dealer.showHand()\n self.player.showHand()\n\n def new_round(self):\n self.player.discardHand(self.deck)\n self.dealer.discardHand(self.deck)\n self.dealer.draw(self.deck)\n self.player.draw(self.deck)\n h, s = self.player.handScore()\n return (np.append(np.array([h/21, s, self.dealer.gameHandValue()/21]),\n [x/4 for x in self.deck.discardCount]))\n\n def shuffle(self):\n winRate = self.wins/self.games\n self.games = 0\n self.wins = 0\n self.deck.build()\n return winRate\n\n def step(self, action):\n # hit\n done = False\n reward = 0\n if action == 0:\n self.player.hit(self.deck)\n if self.player.handScore()[0] > 21:\n reward = -1\n done = True\n self.games += 1\n\n # stay\n elif action == 1:\n done = True\n self.games += 1\n score = self.player.handScore()[0]\n while True:\n if self.dealer.handScore()[0] >= 17:\n break\n self.dealer.hit(self.deck)\n d_score = self.dealer.handScore()[0]\n if score > d_score or d_score > 21:\n reward = 1\n self.wins += 1\n elif score == d_score:\n reward = 0\n else:\n reward = -1\n else:\n raise ValueError('Illegal action passed')\n\n h, s = self.player.handScore()\n s_ = (np.append(np.array([h/21, s, self.dealer.gameHandValue()/21]),\n [x/4 for x in self.deck.discardCount]))\n return s_, reward, done\n\n\nif __name__ == '__main__':\n NewGame = Game()\n NewGame.reset()\n while True:\n a = int(input())\n s_, reward, done = NewGame.step(a)\n print(s_)\n print(reward, done)\n if done:\n print(\"round over drawing new hand\")\n input()\n NewGame.new_round()\n","sub_path":"game/blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":5922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"525257843","text":"#%% [markdown]\n## Random walks\n\n#%%\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#%%\nnwalks = 5000\nnsteps = 1000\ndraws = np.random.randint(0,2, size=(nwalks, nsteps))\nsteps = np.where(draws > 0, 1, -1)\nwalks = steps.cumsum(1)\nwalks\n\n#%%\n## Crossing point with p = 30\nnp.min((np.abs(walks) >= 30).argmax(axis=1))\n\n#%%\n","sub_path":"python_sources/IntroNumpy.py","file_name":"IntroNumpy.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"32081877","text":"import os\nimport sys\nimport re\n\n#req_sam = []\n#with open(\"/home/rewatit/rem_methylation.txt\", \"r\") as samples:\n# for s in samples:\n# s = s.rstrip()\n# req_sam.append(s)\n\nF = os.listdir(\"/projects/users/rewatit/rna_seq_analysis\")\nprint(F)\nfor r in F:\n if os.path.isdir(\"/projects/users/rewatit/rna_seq_analysis/\" + r):\n os.chdir(\"/projects/users/rewatit/rna_seq_analysis/\" + r)\n allfiles = os.listdir(\".\")\n for a in allfiles:\n if a.endswith(\".sorted.bam\"):\n val = a.split(\"_\")[0]\n os.system(\"coverageBed -split -abam \" + a + \" -b /home/rewatit/Rattus_norvegicus_exonic_parts.gff | awk 'BEGIN{OFS=\\\"\\t\\\"} {print $1,$4,$5,$5-$4+1,$9,$10}' | sort -k 5 > \" + val + \"_exonic_parts.inclusion\")\n #os.system(\"sed 's/,/\\t/g' \" + a + \" | grep -v description | awk '{OFS=\\\"\\t\\\"}{print $1,$2+$13, $3-$14,$4,$5,$6}' > \" + val + \"_intron.bed\")\n #os.system(\"intersectBed -wao -f 1.0 -s -a /home/rewatit/Rattus_norvegicus_exonic_parts.gff -b \" + a + \" | awk 'BEGIN{OFS=\\\"\\t\\\"}{$16 == 0? s[$9] += 0:s[$9] += $14}END{for (i in s) {print i,s[i]}}' | sort -k 1 > \" + val + \"_exonic_parts.exclusion\")\n #exc = a.split(\".\")[0] + \".exclusion\"\n #os.system(\"readLength=100\")\n #os.system(\"paste \" + a + \" \" + exc + \" | awk -v \\\"len=100\\\" 'BEGIN{OFS=\\\"\\t\\\"; print \\\"exon_ID\\\" , \\\"length\\\" , \\\"inclusion\\\" , \\\"exclusion\\\" , \\\"PSI\\\"}{NIR=$6/($4+len-1) ; NER=$8/(len-1)}{print $5,$4,$6,$8,(NIR+NER<=0)? \\\"NA\\\":NIR / (NIR + NER)}' > exonic_parts.psi\")\n\n os.chdir(\"/projects/users/rewatit/rna_seq_analysis\")\n","sub_path":"psi_script.py","file_name":"psi_script.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"353860128","text":"from django.contrib import admin\nfrom django.urls import path, re_path, include\nfrom shops import views\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom shops.views import account, shop_furniture, order, report, edit_furniture, add_furniture\nfrom shops import apis\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.home, name='home'),\n path('login', views.login_view, name=\"login\"),\n path('logout', views.logout_view, name=\"logout\"),\n path('sign_up', views.sign_up , name=\"sign-up\"),\n\n\n path('account', views.account, name='shop-account'),\n path('furniture', views.shop_furniture, name='shop-furniture'),\n path('furniture/add/', views.add_furniture, name='shop-add-furniture'),\n re_path(r'^furniture/edit/(?P\\d+)/$', views.edit_furniture, name='shop-edit-furniture'), \n path('order', views.order, name='shop-order'),\n path('report', views.report, name='shop-report'),\n\n re_path(r'^api/social/', include('rest_framework_social_oauth2.urls')),\n\n re_path(r'^api/shop/order/notification/(?P.+)/$', apis.shop_order_notification),\n\n\n # API for Customers\n path('api/customer/shops/', apis.customer_get_shops),\n re_path(r'^api/customer/furnitures/(?P\\d+)/$', apis.customer_get_furnitures),\n path('api/customer/order/add/', apis.customer_add_order),\n path('api/customer/order/latest/', apis.customer_get_latest_order),\n path('api/customer/driver/location/', apis.customer_driver_location),\n\n # API for Drivers\n path('api/driver/orders/ready/', apis.driver_get_ready_orders),\n path('api/driver/order/pick/', apis.driver_pick_order),\n path('api/driver/order/latest/', apis.driver_get_latest_order),\n path('api/driver/order/complete/', apis.driver_complete_order),\n path('api/driver/revenue/', apis.driver_get_revenue),\n path('api/driver/location/update/', apis.driver_update_location),\n \n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"furniture_shop_system/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"589954132","text":"#! /usr/bin/env python\n# -*- coding: Latin-1 -*-\n\n# Petit exercice utilisant la librairie graphique Tkinter\n\nfrom Tkinter import *\n\n# définition des gestionnaires\n# d'événements :\n\ndef move():\n \"déplacement de la balle\"\n global x1, y1, dx, dy, flag\n x1, y1 = x1 +dx, y1 + dy\n if x1 >210:\n x1, dx, dy = 210, 0, 15\n if y1 >210:\n y1, dx, dy = 210, -15, 0\n if x1 <10:\n x1, dx, dy = 10, 0, -15\n if y1 <10:\n y1, dx, dy = 10, 15, 0\n can1.coords(oval1,x1,y1,x1+30,y1+30)\n if flag >0: \n fen1.after(50,move)\t\t# boucler après 50 millisecondes\n\ndef stop_it():\n \"arret de l'animation\"\n global flag \n flag =0\n\ndef start_it():\n \"démarrage de l'animation\"\n global flag\n if flag ==0:\t# pour éviter que le bouton ne puisse lancer plusieurs boucles \n flag =1\n move()\n\n#========== Programme principal =============\n\n# les variables suivantes seront utilisées de manière globale :\nx1, y1 = 10, 10\t\t# coordonnées initiales\ndx, dy = 15, 0\t\t# 'pas' du déplacement\nflag =0\t\t\t # commutateur\n\n# Création du widget principal (\"parent\") :\nfen1 = Tk()\nfen1.title(\"Exercice d'animation avec Tkinter\")\n# création des widgets \"enfants\" :\ncan1 = Canvas(fen1,bg='dark grey',height=250, width=250)\ncan1.pack(side=LEFT, padx =5, pady =5)\noval1 = can1.create_oval(x1, y1, x1+30, y1+30, width=2, fill='red')\nbou1 = Button(fen1,text='Quitter', width =8, command=fen1.quit)\nbou1.pack(side=BOTTOM)\nbou2 = Button(fen1, text='Démarrer', width =8, command=start_it)\nbou2.pack()\nbou3 = Button(fen1, text='Arrêter', width =8, command=stop_it)\nbou3.pack()\n# démarrage du réceptionnaire d'évènements (boucle principale) :\nfen1.mainloop()\n","sub_path":"python/oreilly/cours_python/chap08/anima_auto.py","file_name":"anima_auto.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"400674648","text":"import numpy as np\n\nclass NeuralNetwork:\n def __init__(self, layers, alpha=0.1):\n #Initalize the list of weights martices\n #Store the network architecture and learning rateself.\n\n #Layers: [2,2,1] imply 2 - 2 -1 architecture.\n\n self.W =[]\n self.layers = layers\n self.alpha = alpha\n\n # Initalize weights\n # arange(start, stop), so 0, 1, ..., len(layers) -2\n for i in np.arange(0, len(layers) - 2):\n #Rnadom weights\n\n # np.random.randn(x, y) Return 2D matrix x by y\n # + 1 for the bias\n w = np.random.randn(layers[i] + 1, layers[i + 1] + 1)\n\n # Scale w by dividing by sqrt of # of nodes\n # This normalizes the varianceself.\n\n # append adds to the end of the array.\n self.W.append(w / np.sqrt(layers[i]))\n\n # We stopped at len(layers) - 2 because the last two layers are a special case.\n # Output do not need bias.\n w = np.random.randn(layers[-2] + 1, layers[-1])\n self.W.append(w / np.sqrt(layers[-2]))\n\n def __repr__(self):\n # print out 2-2-1\n return \"NeuralNetwork: {}\".format(\n \"-\".join(str(i) for i in self.layers))\n\n def sigmoid(self, x):\n return 1.0/(1 + np.exp(-x))\n\n\n # For back propergation, activation must be differentiable\n def sigmoid_deriv(self, x):\n return x * (1-x)\n\n # We train out function here.\n # X is training data\n # y is the corresponding calss label\n def fit(self, X, y, epochs = 1000, displayUpdate = 100):\n #np.c_(x,y) concatenates side by side\n #X.shape[0] => n X.shape[1] => m\n # add a column of 1's; np.ones(n, m(optional))\n\n X = np.c_[X, np.ones(X.shape[0])]\n\n for epoch in np.arange(0, epochs):\n #loop over each data point and training\n\n # x (element of X), target (element of y)\n # zip(x,y) return tuples: (x1, y1),(x2, y2) ...\n for (x, target) in zip(X, y):\n # Make prediction, backcompute with this.\n self.fit_partial(x, target)\n\n # Display message\n\n if epoch == 0 or (epoch + 1) % displayUpdate == 0:\n loss = self.calculate_loss(X, y)\n print(\"[INFO] epoch={}, loss={:.7f}\".format(\n epoch + 1, loss))\n\n #The heart of backpropagation\n # 2 parameters => indivdual data point & class label\n def fit_partial(self, x, y):\n # Atleast_2d return an atleast 2d array.\n # A will be responsible for storing output activations.\n A = [np.atleast_2d(x)]\n\n # Feed Forward\n # 0, 1 ..., n layers\n for layer in np.arange(0, len(self.W)):\n # Feed foward the activation at current layer.\n\n net = A[layer].dot(self.W[layer])\n\n\n out = self.sigmoid(net)\n\n # Add to the list of activations.\n A.append(out)\n\n # Back Propagation\n # First, compute the difference.\n\n # -1 index points to the last layer, aka output layer.\n error = A[-1] - y\n\n # We build our list of derivatives.\n D = [error * self.sigmoid_deriv(A[-1])]\n\n # Ignore the last 2 layers as we taken account of these already.\n for layer in np.arange(len(A) - 2, 0, -1):\n\n # Delta of current layer is last layer dotted with weight matrix\n delta = D[-1].dot(self.W[layer].T)\n\n # d Error / d Out * d Out / d net\n delta = delta * self.sigmoid_deriv(A[layer])\n D.append(delta)\n\n # We need to reverse the deltas.\n D = D[::-1]\n\n # Weight Update\n\n for layer in np.arange(0, len(self.W)):\n\n # Dot product of layer activation and deltas.\n\n self.W[layer] += -self.alpha * A[layer].T.dot(D[layer])\n\n\n def predict(self, X, addBias = True):\n # Initalize as input features.\n p = np.atleast_2d(X)\n\n if addBias:\n p = np.c_[p, np.ones((p.shape[0]))]\n\n for layer in np.arange(0, len(self.W)):\n p = self.sigmoid(np.dot(p, self.W[layer]))\n\n return p\n\n def calculate_loss(self, X, targets):\n targets = np.atleast_2d(targets)\n\n predictions = self.predict(X, addBias=False)\n loss = 0.5 * np.sum((predictions - targets) ** 2)\n\n return loss\n","sub_path":"MNIST-CIFAR-10_practice/utilities/neuralnetwork.py","file_name":"neuralnetwork.py","file_ext":"py","file_size_in_byte":4356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"396725472","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom listings.models import *\nfrom realtors.models import *\n# Create your views here.\ndef index(request):\n listings=Listing.objects.order_by('-list_date').filter(is_published=True)[:3]\n return render(request,\"pages/index.html\",{'listings':listings})\n\ndef about(request):\n realtors=Realtor.objects.order_by('-hire_date')\n mvp_realtors=Realtor.objects.all().get(is_mvp=True)\n print(mvp_realtors)\n context={\n 'realtors':realtors,\n 'mvp_realtors':mvp_realtors\n }\n return render(request,\"pages/about.html\",context)","sub_path":"pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"393879947","text":"import setuptools\n\nwith open('README.md') as f:\n README = f.read()\n\nsetuptools.setup(\n author=\"Thomas Dewitte\",\n author_email=\"thomasdewittecontact@gmail.com\",\n\n name='bitcoin_value',\n version='1.3.3',\n license=\"MIT\",\n url='https://github.com/dewittethomas/bitcoin-value',\n python_requires='>= 3.5',\n \n description='Gets the value of one bitcoin',\n long_description=README,\n long_description_content_type=\"text/markdown\",\n\n package_dir={\"bitcoin_value\": \"bitcoin_value\"},\n install_requires=[\"requests>=2.22.0\"],\n \n packages=setuptools.find_packages(),\n\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3'\n ]\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"119895287","text":"\"\"\"\nDado uma string com uma frase informada pelo usuário (incluindo espaços em branco), conte:\n\n1. quantos espaços em branco existem na frase.\n2. quantas vezes aparecem as vogais a, e, i, o, u.\n\"\"\"\n\nvogais = ['a', 'e', 'i', 'o', 'u']\n\nfrase = input('Frase: ').lower()\nespacos = frase.count(' ')\n\nfor v in vogais:\n print(f'A vogal \\\"{v}\\\" apareceu {frase.count(v)} vezes na frase.')\nprint(f'Na frase há {espacos} espacos.')\n","sub_path":"06_Exercicios_com_Strings/07-ContarVogaisEEspacos.py","file_name":"07-ContarVogaisEEspacos.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"468246896","text":"import pandas as pd\nimport yaml\nimport os\nimport csv\n\nroot = os.environ.get('INPUT_FOLDER', './')\ncounter = 0\ndf_list = []\nlist_of_columns = {}\nlist_of_anonymized_columns = {}\nfreq_of_anonymization_types = {}\nfor fn in os.listdir(root):\n with open(os.path.join(root, fn), 'r') as f:\n # print(f.name)\n df = pd.io.json.json_normalize(yaml.load(f))\n src_fields = df[\"source.fields\"][0]\n for col in src_fields:\n if col[\"name\"] in list_of_columns:\n list_of_columns[col[\"name\"]] += 1\n else:\n list_of_columns[col[\"name\"]] = 1\n if \"anonymize\" in col:\n if col[\"name\"] in list_of_anonymized_columns:\n list_of_anonymized_columns[col[\"name\"]] += 1\n else:\n list_of_anonymized_columns[col[\"name\"]] = 1\n if col[\"anonymize\"] in freq_of_anonymization_types:\n freq_of_anonymization_types[col[\"anonymize\"]] += 1\n else:\n freq_of_anonymization_types[col[\"anonymize\"]] = 1\n df_list.append(df)\n","sub_path":"process_yml.py","file_name":"process_yml.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"371605322","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('register/', views.registerPage, name=\"register\"),\n\tpath('login/', views.loginPage, name=\"login\"), \n\tpath('logout/', views.logoutUser, name=\"logout\"),\n path('', views.home, name=\"home\"),\n path('doctor//', views.doctor, name=\"doctor\"),\n path('about/', views.about, name=\"about\"),\n path('create_patient', views.createPatient, name=\"create_patient\"),\n path('update_patient//', views.updatePatient, name=\"update_patient\"),\n path('delete_patient//', views.deletePatient, name=\"delete_patient\"),\n path('create_location', views.createLocation, name=\"create_location\"),\n path('update_location//', views.updateLocation, name=\"update_location\"),\n path('delete_location//', views.deleteLocation, name=\"delete_location\"),\n\n]","sub_path":"covidtracefinal/maps/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"400946807","text":"from ..DiffusionModel import DiffusionModel\nimport networkx as nx\nimport numpy as np\n\n__author__ = \"Giulio Rossetti\"\n__email__ = \"giulio.rossetti@gmail.com\"\n\n\nclass ProfileModel(DiffusionModel):\n \"\"\"\n Implement the Profile model of Milli et al.\n Model Parameters:\n (1) nodes profiles\n \"\"\"\n\n def __init__(self, graph):\n super(self.__class__, self).__init__(graph)\n self.available_statuses = {\n \"Susceptible\": 0,\n \"Infected\": 1\n }\n\n self.parameters = {\n \"model\": {},\n \"nodes\": {\n \"profile\": {\n \"descr\": \"Node profile\",\n \"range\": [0, 1],\n \"optional\": True,\n \"default\": 0.1\n }\n },\n \"edges\": {},\n }\n\n self.name = \"Profile\"\n\n def iteration(self):\n \"\"\"\n\n \"\"\"\n self.clean_initial_status(self.available_statuses.values())\n actual_status = {node: nstatus for node, nstatus in self.status.iteritems()}\n\n if self.actual_iteration == 0:\n self.actual_iteration += 1\n return 0, actual_status\n\n for u in self.graph.nodes():\n if actual_status[u] == 1:\n continue\n\n neighbors = self.graph.neighbors(u)\n if isinstance(self.graph, nx.DiGraph):\n neighbors = self.graph.predecessors(u)\n\n infected = 0\n for v in neighbors:\n infected += self.status[v]\n\n if infected > 0:\n eventp = np.random.random_sample()\n if eventp >= self.params['nodes']['profile'][u]:\n actual_status[u] = 1\n\n delta = self.status_delta(actual_status)\n self.status = actual_status\n self.actual_iteration += 1\n\n return self.actual_iteration - 1, delta\n","sub_path":"ndlib/models/epidemics/ProfileModel.py","file_name":"ProfileModel.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"534719630","text":"import os\nimport time\nfrom py4design import py3dmodel, buildingformeval\n#==============================================================================\ntime1 = time.perf_counter()\nflr2flr = 3.7 #m\n#make the floor plate\npyptlist = [[4,4,0], [-4,4,0], [-4,-4,0], [4,-4,0]]\nfloor_plate = py3dmodel.construct.make_polygon(pyptlist)\nextrude = py3dmodel.construct.extrude(floor_plate, [0,0,1],flr2flr)\nface_list = py3dmodel.fetch.topo_explorer(extrude, \"face\")\n\nwin_wall1 = (0.0,1.0,0.0)\nwin_wall2 = (0.0,-1.0,0.0)\n\n#get the external facing walls\nshade_occface_list = []\nroof_occface_list = []\next_list = []\nfor f in face_list:\n n = py3dmodel.calculate.face_normal(f)\n if n == win_wall1 or n == win_wall2:\n ext_list.append(f)\n \n elif n == (0.0, 0.0, 1.0):\n roof_occface_list.append(f)\n else:\n shade_occface_list.append(f)\n \n#construct the windows\ncut_wall_list = []\nwin_list = []\n\nwin1 = py3dmodel.construct.make_rectangle(1.5, 4)\nwin1 = py3dmodel.modify.reverse_face(win1)\nmpt_win1 = py3dmodel.calculate.face_midpt(win1)\nn_win1 = py3dmodel.calculate.face_normal(win1)\n\nwin2 = py3dmodel.construct.make_rectangle(2, 1.5)\nwin2 = py3dmodel.modify.reverse_face(win2)\nmpt_win2 = py3dmodel.calculate.face_midpt(win2)\nn_win2 = py3dmodel.calculate.face_normal(win2)\n\nmoved_pt1 = py3dmodel.modify.move_pt(mpt_win2, (0,1,0), 2.5 )\nmoved_pt2 = py3dmodel.modify.move_pt(mpt_win2, (0,-1,0), 2.5 )\nwin3 = py3dmodel.modify.move(mpt_win2, moved_pt1, win2)\nwin4 = py3dmodel.modify.move(mpt_win2, moved_pt2, win2)\ncmpd = py3dmodel.construct.make_compound([win2,win3,win4])\n\nn_win2 = py3dmodel.calculate.face_normal(win2)\n\nfor ext in ext_list:\n n = py3dmodel.calculate.face_normal(ext)\n mpt = py3dmodel.calculate.face_midpt(ext)\n ax1 = py3dmodel.construct.make_gp_ax3(mpt, n)\n if n == win_wall1:\n #create the windows\n ax2 = py3dmodel.construct.make_gp_ax3(mpt_win1, n_win1)\n mapped_win1 = py3dmodel.modify.map_cs(ax1, ax2, win1)\n mapped_win1 = py3dmodel.fetch.topo2topotype(mapped_win1)\n cut_wall = py3dmodel.construct.boolean_difference(ext, mapped_win1)\n meshed_wall = py3dmodel.construct.simple_mesh(cut_wall)\n cut_wall_list.extend(meshed_wall)\n win_list.append(mapped_win1)\n #create the corridor \n ext_corr = py3dmodel.construct.extrude(ext, n, 1.5)\n corrs = py3dmodel.fetch.topo_explorer(ext_corr, \"face\")\n for cor in corrs:\n c_n = py3dmodel.calculate.face_normal(cor)\n if c_n == (0,0,1):\n shade_occface_list.append(cor)\n elif c_n == (0,0,-1):\n shade_occface_list.append(cor)\n ext_par = py3dmodel.construct.extrude(cor, (0,0,1), 1.2)\n pars = py3dmodel.fetch.topo_explorer(ext_par, \"face\")\n for par in pars:\n p_n = py3dmodel.calculate.face_normal(par)\n if p_n == (1.0, 0.0,0.0):\n shade_occface_list.append(par)\n \n elif n == win_wall2:\n ax2 = py3dmodel.construct.make_gp_ax3(mpt_win2, n_win2)\n mapped_win2 = py3dmodel.modify.map_cs(ax1, ax2, cmpd) \n cut_wall = py3dmodel.construct.boolean_difference(ext, mapped_win2)\n meshed_wall = py3dmodel.construct.simple_mesh(cut_wall)\n cut_wall_list.extend(meshed_wall)\n \n mapped_win2 = py3dmodel.fetch.topo_explorer(mapped_win2, \"face\")\n win_list.extend(mapped_win2)\n #create shade for each window\n for w in mapped_win2:\n w_n = py3dmodel.calculate.face_normal(w)\n ext_win = py3dmodel.construct.extrude(w, w_n, 1)\n shades = py3dmodel.fetch.topo_explorer(ext_win, \"face\")\n for shade in shades:\n s_n = py3dmodel.calculate.face_normal(shade)\n if s_n == (0,0,1):\n shade_occface_list.append(shade)\n \n\ntime2 = time.perf_counter()\n# print(\"CONSTRUCTED MODEL\", (time2-time1)/60.0, \"mins\")\n# py3dmodel.utility.visualise([cut_wall_list, win_list, shade_occface_list, roof_occface_list], [\"WHITE\", \"BLUE\", \"WHITE\", \"WHITE\"]) \n \nprint(\"CALCULATING LOADS...\")\n\n#calculate ettv \ncurrent_path = os.path.dirname(__file__)\nparent_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir))\nweatherfilepath = os.path.join(parent_path, \"example_files\", \"weatherfile\", \"SGP_Singapore.486980_IWEC.epw\" )\nshp_attribs_list = []\n\nfor wall in cut_wall_list:\n shp_attribs = buildingformeval.create_opaque_srf_shape_attribute(wall,2.9,\"wall\" )\n shp_attribs_list.append(shp_attribs)\n\nwin_area = 0\nfor window in win_list:\n area = py3dmodel.calculate.face_area(window)\n win_area = win_area + area\n shp_attribs = buildingformeval.create_glazing_shape_attribute(window, 2.8, 0.8,\"window\")\n shp_attribs_list.append(shp_attribs)\n\nfor shade in shade_occface_list:\n shp_attribs = buildingformeval.create_shading_srf_shape_attribute(shade, \"shade\")\n shp_attribs_list.append(shp_attribs)\n \nfor footprint in [floor_plate]:\n shp_attribs = buildingformeval.create_shading_srf_shape_attribute(footprint, \"footprint\")\n shp_attribs_list.append(shp_attribs)\n \nfor roof in roof_occface_list:\n shp_attribs = buildingformeval.create_opaque_srf_shape_attribute(roof,0.5,\"roof\" )\n shp_attribs_list.append(shp_attribs)\n \n#calculate sensible load\n#result_dictionary = buildingformeval.calc_ettv(shp_attribs_list,weatherfilepath)\n#ettv = result_dictionary[\"ettv\"]\n#facade_area = result_dictionary[\"facade_area\"]\n\noutdoor_temp = 32.8 #C\ndewpoint = 26.3 #C\nindoor_temp = 25.0 #C\nocc_sens_load = 115.0 #W/person\nocc_lat_load = 50.0 #W/person\nequip_load = 5.0 #W/m2\nlighting_load = 15.0 #W/m2\nppl_density = 2.0 #m2/person\nshgc = 0.7 #solar heat gain coefficient\nceiling_height = 3.5\n\nfloor_area = py3dmodel.calculate.face_area(floor_plate)\n#calculate sensible load for air-tight env\nlatent_load1 = buildingformeval.calc_latent_load(floor_area, \n area_per_person = ppl_density, \n watts_per_person = occ_lat_load)\n\nequip_lighting_load = buildingformeval.calc_sensible_load(0, 0, floor_area, \n 0, 0, \n equip_load_per_area = equip_load, \n occ_load_per_person = occ_sens_load, \n light_load_per_area = lighting_load, \n area_per_person = ppl_density)\n\nenv_load = buildingformeval.cal_envelope_conductance_load(shp_attribs_list, outdoor_temp, indoor_temp)\n\n#need to calculate the solar gain from window\nsolar_gain_nat = buildingformeval.calc_solar_gain_rad(shp_attribs_list, weatherfilepath, mode = \"max\")\nsolar_gain_cond = solar_gain_nat * shgc\nsensible_load1 = env_load + equip_lighting_load + solar_gain_cond\nplenum_height = buildingformeval.calc_mech_space(sensible_load1+latent_load1, \n indoor_temp, outdoor_temp, \n duct_air_vel = 15)\n\nprint(\"PLENUM\", plenum_height)\nsystem_d1 = buildingformeval.central_all_air_system(sensible_load1,latent_load1, \n floor_area, \n supply_temp_c = 8.0, \n rej_temp_c = outdoor_temp,\n chiller_efficiency = 0.4)\n\nsystem_d2 = buildingformeval.central_all_air_system(sensible_load1, latent_load1, \n floor_area, \n supply_temp_c = 8.0, \n rej_temp_c = outdoor_temp,\n chiller_efficiency = 0.6)\n\n#calculate sensible load for condensation free panels\nsensible_load2 = buildingformeval.calc_sensible_load(0, 0, floor_area, 0, 0, \n equip_load_per_area = 0.0, \n occ_load_per_person = occ_sens_load, \n light_load_per_area = 0.0, \n area_per_person = ppl_density)\nach_list = [50]\nfor ach in ach_list:\n #calculate sensible load for natural ventilation\n ach_dict = buildingformeval.calc_ach_4_equip(floor_area, flr2flr, equip_load, \n lighting_load, solar_gain_nat, \n ['air_velocity', win_area],\n mx_ach = ach,\n air_temp_c = outdoor_temp)\n \n temp_increase = ach_dict['temp_increase']\n mem_air_temp = outdoor_temp + temp_increase\n \n system_d3 = buildingformeval.con_free_panels_w_fans(sensible_load2, floor_area, \n out_temp_c = outdoor_temp, \n interior_temp_c = mem_air_temp, \n dewpt_temp_c = dewpoint, \n m2_per_fan= 15, fan_power = 75, \n percent_ceiling = 0.73, \n chiller_efficiency = 0.4)\n \n system_d4 = buildingformeval.con_free_panels_w_fans(sensible_load2, floor_area, \n out_temp_c = outdoor_temp, \n interior_temp_c = mem_air_temp, \n dewpt_temp_c = dewpoint, \n m2_per_fan= 15, fan_power = 75, \n percent_ceiling = 0.73, \n chiller_efficiency = 0.6)\n \n print(\"*******************************************************************\")\n # print(\"ACH PARAMETERS\", ach_dict)\n # print('Temp increase', temp_increase)\n # print(\"Interior Air Temp\", mem_air_temp)\n # print(\"SOLAR GAIN\", solar_gain_nat)\n \n print(\"LATENT LOAD\", latent_load1)\n print(\"SENSIBLE LOAD FOR AIR-BASED\", sensible_load1, \"SENSIBLE LOAD FOR RADIANT-BASED\", sensible_load2)\n print(\"*******************************************************************\")\n print(\"DECENTRALISED ALL-AIR-ENERGY\", system_d1[\"energy_consumed_hr\"], \"COP =\", system_d1[\"overall_cop\"])\n print(\"*******************************************************************\")\n print(\"CENTRALISED ALL-AIR-ENERGY\", system_d2[\"energy_consumed_hr\"], \"COP =\", system_d2[\"overall_cop\"])\n print(\"*******************************************************************\")\n print(\"DECENTRALISED CONDENSATION-FREE-ENERGY=\", system_d3[\"energy_consumed_hr\"], \"COP =\", system_d3[\"sensible_cop\"] ,\"PANEL SUPPLY TEMP=\", system_d3[\"supply_temperature_for_panels\"] - 273.15)\n print(\"*******************************************************************\")\n print(\"CENTRALISED CONDENSATION-FREE-ENERGY=\", system_d4[\"energy_consumed_hr\"], \"COP =\", system_d4[\"sensible_cop\"] ,\"PANEL SUPPLY TEMP=\", system_d4[\"supply_temperature_for_panels\"] - 273.15)\n print('Energy Demand', round(system_d4[\"energy_consumed_hr\"]),'W',\n '\\nPANEL CAPACITY', system_d4['panel_capacity'],\n '\\nFAN CAPACITY', system_d4['fan_capacity'],\n '\\nEVA CAPACITY', system_d4['eva_capacity'],\n \"\\nPANEL SUPPLY TEMP =\", system_d4[\"supply_temperature_for_panels\"] - 273.15)\n \n #print \"HOW MANY TIMES LOWER\", system_d1[\"energy_consumed_hr\"]/ system_d2 [\"energy_consumed_hr\"]\n print(\"*******************************************************************\")","sub_path":"example_scripts/evalbldgform/air2srf_example2.py","file_name":"air2srf_example2.py","file_ext":"py","file_size_in_byte":12132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"18212722","text":"import sys\nimport torch\n#import torch.beckends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import optim\nfrom torch.autograd import Variable\nimport time\nimport copy\nimport utils\nimport predict\nimport numpy as np\nfrom Unet import UNet\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\ndef train(model, optimizer, criterion, dataloaders, rstimg_path, num_epochs=10, lr=0.1, val_percent =0.05, cp=True, gpu=False):\n\n print('Let\\'s start training the model!! \\n')\n since = time.time()\n best_model_wts = copy.deepcopy(model.state_dict()) #state_dict = Returns a dictionary containing a whole state of the module. (웨이트, 바이어스 등등 .key()로 뭐 저장되어있는지 확인가능)\n best_loss = 10000000.0\n best_acc = 0.0\n\n # loss 값 저장 - 나중에 그래프 그릴려고\n loss_val = {'train':[], 'val':[], 'best_loss_for_val':[]}\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs-1))\n print('-'*10)\n\n for phase in ['train', 'val']:\n\n running_loss = 0.0\n running_corrects = 0\n isShown = False\n\n\n for original, inputs in dataloaders[phase]:\n\n inputs = inputs.to(device)\n original = original.to(device)\n #print(inputs.size())\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n #forward\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n\n #샘플이미지 출력\n if phase == 'val' and isShown == False:\n isShown = True\n utils.resultShow(inputs, outputs, original, rstimg_path, num_images=3, num_rows=1, title='{}'.format(epoch))\n\n #predict.visualize_model(model, dataloaders, num_images=6) #함수테스트용으로 넣은 것\n loss = criterion(outputs, original)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n #statistics\n running_loss += loss.item() #* inputs.size()[0]\n #print('{} Loss from mini batch: {:.4f}'.format(phase, running_loss))\n #running_corrects += torch.sum(np.abs(outputs - labels.data)**2)\n\n epoch_loss = running_loss / len(dataloaders[phase])\n #epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n #train, val 각각의 epoch loss 저장\n loss_val[phase].append(epoch_loss)\n print('{} Loss: {:.4f}'. format(phase, epoch_loss))\n\n #deep copy the model\n if phase == 'val' and epoch_loss < best_loss:\n best_loss = epoch_loss\n best_model_wts = copy.deepcopy(model.state_dict())\n\n #best loss 저장\n loss_val['best_loss_for_val'].append(best_loss)\n\n\n #오버피팅이 감지되면 train을 멈추게 만들 것\n\n\n #-----------------------------------\n\n print()\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best val Loss: {:4f}'.format(best_loss))\n #print('Best val acc: {:4f}'.format(best_acc))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n\n #그래프 그려봄\n utils.drawLossGraph(loss_val, rstimg_path)\n return model, loss\n\n #print('making it..')\n\n\n\n\n\n\n","sub_path":"180529_unet/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"617944952","text":"#! /usr/bin/python3\n\n### REQUIRES python 3 !!!!\n\n## Run: ./sample.py\n## Reads from stdin and writes to stdout\n## For example:\n## ./sample.py test_out.txt\n\nimport pyfreeling\nimport traceback\nimport sys, os\nimport conll2tree\nimport generaroraciones\nfrom subprocess import Popen, PIPE\n\nruta_archivos = sys.argv[1]\n\n## ----------------------------------------------\n## ------------- MAIN PROGRAM ---------------\n## ----------------------------------------------\nprint(\"Se muere la acer....\")\n\nargOE_script='/home/pablo/repos/Linguakit/linguakit'\n\n## Check whether we know where to find FreeLing data files\nif \"FREELINGDIR\" not in os.environ :\n if sys.platform == \"win32\" or sys.platform == \"win64\" : os.environ[\"FREELINGDIR\"] = \"C:\\\\Program Files\"\n else : os.environ[\"FREELINGDIR\"] = \"/usr/local\"\n print(\"FREELINGDIR environment variable not defined, trying \", os.environ[\"FREELINGDIR\"], file=sys.stderr)\n\nif not os.path.exists(os.environ[\"FREELINGDIR\"]+\"/share/freeling\") :\n print(\"Folder\",os.environ[\"FREELINGDIR\"]+\"/share/freeling\",\n \"not found.\\nPlease set FREELINGDIR environment variable to FreeLing installation directory\",\n file=sys.stderr)\n sys.exit(1)\n\n\n# Location of FreeLing configuration files.\nDATA = os.environ[\"FREELINGDIR\"]+\"/share/freeling/\";\n\n# Init locales\npyfreeling.util_init_locale(\"default\");\n\n# create language detector. Used just to show it. Results are printed\n# but ignored (after, it is assumed language is LANG)\nla=pyfreeling.lang_ident(DATA+\"common/lang_ident/ident-few.dat\");\n\n# create options set for maco analyzer. Default values are Ok, except for data files.\nLANG=\"es\";\nop= pyfreeling.maco_options(LANG);\nop.set_data_files( \"\", \n DATA + \"common/punct.dat\",\n DATA + LANG + \"/dicc.src\",\n DATA + LANG + \"/afixos.dat\",\n \"\",\n DATA + LANG + \"/locucions.dat\", \n DATA + LANG + \"/np.dat\",\n DATA + LANG + \"/quantities.dat\",\n DATA + LANG + \"/probabilitats.dat\");\n\n# create analyzers\ntk=pyfreeling.tokenizer(DATA+LANG+\"/tokenizer.dat\");\nsp=pyfreeling.splitter(DATA+LANG+\"/splitter.dat\");\nsid=sp.open_session();\nmf=pyfreeling.maco(op);\n\n# activate mmorpho odules to be used in next call\nmf.set_active_options (False, # UserMap \n True, # NumbersDetection, \n True, # PunctuationDetection, \n True, # DatesDetection, \n True, # DictionarySearch, \n True, # AffixAnalysis, \n False, # CompoundAnalysis, \n True, # RetokContractions,\n True, # MultiwordsDetection, \n True, # NERecognition, \n True, # QuantitiesDetection, \n True); # ProbabilityAssignment \n\n# create tagger, sense anotator, and parsers\ntg=pyfreeling.hmm_tagger(DATA+LANG+\"/tagger.dat\",True,2);\nsen=pyfreeling.senses(DATA+LANG+\"/senses.dat\");\nwsd = pyfreeling.ukb(DATA+LANG+\"/ukb.dat\");\nchunker= pyfreeling.chart_parser(DATA+LANG+\"/chunker/grammar-chunk.dat\");\nparser = pyfreeling.dep_lstm(DATA+LANG+\"/dep_lstm/params-es.dat\");\n\nfor filepath in os.listdir(ruta_archivos):\n file = os.path.join(ruta_archivos, filepath)\n\n process = Popen([argOE_script, 'rel', 'es', file], stdout=PIPE)\n (output, err) = process.communicate()\n exit_code = process.wait()\n print(output.decode('utf-8'))\n\n content = open(file, 'r').read()\n \n l = tk.tokenize(content)\n ls = sp.split(l)\n ls = mf.analyze(ls)\n ls = tg.analyze(ls)\n ls = sen.analyze(ls)\n ls = wsd.analyze(ls)\n ls = chunker.analyze(ls)\n ls = parser.analyze(ls)\n \n out = pyfreeling.output_conll()\n res = out.PrintResults(ls)\n #print(res)\n conll_sentences = res.split('\\n\\n')\n conll_sentences.pop()\n contador = 0\n contador_corre = 0\n for conll_sentence in conll_sentences:\n try:\n dep_tree = conll2tree.conll2tree(conll_sentence).children[0][1]\n #dep_tree.display(0)\n sentenceListArgOE=generaroraciones.main(dep_tree)\n #print(sentenceListArgOE)\n contador_corre += 1\n except:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n traceback.print_exc()\n #print(exc_type, fname, exc_tb.tb_lineno)\n #print(conll_sentence)\n #print(sys.exc_info())\n exit()\n #print(\"##########################################################\")\n #print(contador)\n #print(contador_corre)\n #print(\"##########################################################\")\n\n \n# clean up \nsp.close_session(sid);\n \n","sub_path":"python3/sample.py.bkp.py","file_name":"sample.py.bkp.py","file_ext":"py","file_size_in_byte":4868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"64546675","text":"\"\"\"You have three stacks of cylinders where each cylinder has the same diameter, but they may vary in height. \nYou can change the height of a stack by removing and discarding its topmost cylinder any number of times.\n\nFind the maximum possible height of the stacks such that all of the stacks are exactly the same height. \nThis means you must remove zero or more cylinders from the top of zero or more of the three stacks until they are all the same height, then return the height.\n\nEg : \n h1=[1,2,1,1]\n h2=[1,1,2]\n h3=[1,1]\n \n There are 4,3 and 2 cylinders in the three stacks, with their heights in the three arrays.\n Remove the top 2 cylinders from h1 (heights = [1, 2]) and from h2 (heights = [1, 1]) so that the three stacks all are 2 units tall. Return 2 as the answer.\"\"\"\n\n#hackerrank : https://www.hackerrank.com/challenges/equal-stacks/problem\n\n#python code :\n\ndef equalStacks(h1, h2, h3):\n # Write your code here\n s1, s2, s3 = map(sum, (h1, h2, h3))\n while h1 and h2 and h3:\n m = min(s1, s2, s3)\n while s1 > m: s1 -= h1.pop(0)\n while s2 > m: s2 -= h2.pop(0)\n while s3 > m: s3 -= h3.pop(0)\n if s1 == s2 == s3: return s1\n return 0\n","sub_path":"Equal Stack.py","file_name":"Equal Stack.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"422117017","text":"from django.shortcuts import render, HttpResponse\nfrom django.views import View\n\n# Create your views here.\nclass PolicyExtractorService(View):\n '''\n 扫描营业执照\n\n '''\n def get(self, request):\n\n return HttpResponse(\"GET request from PolicyExtractorService\")\n\n def post(self, request):\n\n '''\n 首先导入一个图片的地址.\n\n :param request:\n :return:\n '''\n # 首先是pic地址\n pic = request.POST.get('url') # POST必须大写\n import pytesseract\n import cv2\n import matplotlib.pyplot as plt\n import matplotlib\n matplotlib.use(\"Agg\")\n import dlib#这个windows装很麻烦,需要cmake ,linux直接pip\n import matplotlib.patches as mpatches\n from skimage import io, draw, transform, color # pip install scikit_image来安装\n import numpy as np\n import pandas as pd\n import re\n\n pic=cv2.imread(pic,0)#后面加0表示变成1个通道的灰度图.\n\n '''\n cv2\n 里面几个重要函数:\n 1.cv2.imread(pic,0)\n 2.\n '''\n\n\n\n\n\n\n\n\n\n # gray = cv2.cvtColor(pic, cv2.COLOR_BGR2GRAY) # 灰度处理\n # cv2.imshow('gray', gray)\n #先做二值,加强了鲁棒性. 表示超过50亮度的都算作255.也就是只有足够黑的点才算做字体.\n\n retval, imagebin = cv2.threshold(pic, 30, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)\n ## 将照片去除\n\n img_bilateralFilter = cv2.bilateralFilter(imagebin, 40, 100, 100) # 高斯双边滤波\n\n text = pytesseract.image_to_string(img_bilateralFilter, lang='chi_sim')\n\n #还是ocr框架识别率不行.\n\n #opencv文档\n #http://www.opencv.org.cn/opencvdoc/2.3.2/html/search.html?q=imread&check_keywords=yes&area=default\n\n\n\n\n\n\n\n\n\n\n return HttpResponse(\"POST request from PolicyExtractorService\")\n\n\n\n\n\n\n\n\n\n\n detector = dlib.get_frontal_face_detector()\n #dlib :https://www.cnblogs.com/as3asddd/p/7257820.html\n # image = io.imread(pic)\n\n # help(detector)\n # 下面一行的2是一个上采样参数,表示把图片先放大多少倍.越大图片越精细.图片输入过小就需要上采样放大细节.\n # dets = detector(image, 2) # 使用detector进行人脸检测 dets为返回的结果\n\n\n\n image = cv2.imread(pic)\n dets = detector(image, 2) # 使用detector进行人脸检测 dets为返回的结果\n\n for i, face in enumerate(dets):\n # 在图片中标注人脸,并显示\n left = face.left()\n top = face.top()\n right = face.right()\n bottom = face.bottom()\n cv2.rectangle(image, (left, bottom), (right, top), (0, 255, 0), 2)#把矩形画到image这个图片参数上.\n # rect = mpatches.Rectangle((left, bottom), right - left, top - bottom,\n # fill=False, edgecolor='red', linewidth=1)\n # rect = mpatches.Rectangle((12, 12), 25, 52,\n # fill=False, edgecolor='red', linewidth=1)\n\n '''\n 画图还是认准cv2,matplotlib bug太多.\n '''\n\n\n predictor = dlib.shape_predictor(\"../shape_predictor_5_face_landmarks.dat\")\n # 因为代码是在new目录下运行的manager所以这里是.. 表示上级目录里面找.\n # predictor = dlib.shape_predictor(\"/data/zb/shape_predictor_5_face_landmarks.dat\")\n # http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\n\n #画关键点\n\n detected_landmarks = predictor(image, dets[0]).parts()\n\n\n landmarks = np.array([[p.x, p.y] for p in detected_landmarks])\n\n\n for i in landmarks:\n cv2.circle(image, (i[0],i[1]),1,(0,0,255))\n cv2.imwrite('22!!!!!!!!!!.png', image)\n\n # '''\n # 写一个for循环,���图片进行旋转,判断,因为dlib识别不了旋转角度大的图片.\n #\n # '''\n # jiaodu=np.arange(0,360,10)\n\n # imgInfo = image.shape\n # height = imgInfo[0]\n # width = imgInfo[1]\n # deep = imgInfo[2]\n # cv2.imwrite(\"dsfasdfs.png\",image)\n # for i in jiaodu:\n # matRotate = cv2.getRotationMatrix2D((height * 0.5, width * 0.5), i, 1) # mat rotate 1 center 2 angle 3 缩放系数\n #\n # dst = cv2.warpAffine(image, matRotate, (height, width))\n # #cv2输出的后缀名一定要写.\n # cv2.imwrite(str(i)+\".png\",dst)\n\n\n\n\n #dlib:api http://dlib.net/\n\n ## 将眼睛位置可视化\n # plt.figure()\n # ax = plt.subplot(111)\n # ax.imshow(image)\n # plt.axis(\"off\")\n # plt.plot(landmarks[0:4,0],landmarks[0:4,1],'ro')\n # for ii in np.arange(4):\n # plt.text(landmarks[ii,0]-10,landmarks[ii,1]-15,ii)\n # plt.show()\n\n ## 计算眼睛的倾斜角度,逆时针角度\n import numpy as np\n\n #注意arctan有2个值.一个jiao一个jiao+180,需要用鼻子位置来判断.\n def twopointcor(point1, point2):\n \"\"\"point1 = (x1,y1),point2 = (x2,y2)\"\"\"\n deltxy = point2 - point1\n corner = np.arctan(deltxy[1] / deltxy[0]) * 180 / np.pi\n return corner\n\n ## 计算多个角度求均值\n corner10 = twopointcor(landmarks[1, :], landmarks[3, :])\n corner23 = twopointcor(landmarks[1, :], landmarks[2, :])\n # corner20 = twopointcor(landmarks[2, :], landmarks[0, :])\n corner = np.mean([corner10, corner23])\n\n\n\n\n\n\n ## 计算图像的身份证倾斜的角度\n def IDcorner(landmarks):\n \"\"\"landmarks:检测的人脸5个特征点\n 经过测试使用第0个和第2个特征点计算角度较合适\n \"\"\"\n corner20 = twopointcor(landmarks[2, :], landmarks[0, :])\n corner = np.mean([corner20])\n return corner\n\n corner = IDcorner(landmarks)\n\n\n\n ## 将照片转正\n def rotateIdcard(image):\n \"image :需要处理的图像\"\n ## 使用dlib.get_frontal_face_detector识别人脸\n detector = dlib.get_frontal_face_detector()\n dets = detector(image, 2) # 使用detector进行人脸检测 dets为返回的结果\n ## 检测人脸的眼睛所在位置\n predictor = dlib.shape_predictor(\"../shape_predictor_5_face_landmarks.dat\")\n detected_landmarks = predictor(image, dets[0]).parts()\n landmarks = np.array([[p.x, p.y] for p in detected_landmarks])\n corner = IDcorner(landmarks)\n ## 旋转后的图像\n image2 = transform.rotate(image, corner, clip=False)\n image2 = np.uint8(image2 * 255)\n ## 旋转后人脸位置\n det = detector(image2, 2)\n return image2, det\n\n ## 转正身份证:\n image = io.imread(pic)\n image2, dets = rotateIdcard(image)\n\n\n\n ## 可视化修正后的结果\n cv2.imwrite(\"dsfadsf.png\",image2)\n\n # 在图片中标注人脸,并显示\n left = dets[0].left()\n top = dets[0].top()\n right = dets[0].right()\n bottom = dets[0].bottom()\n rect = mpatches.Rectangle((left, bottom), (right - left), (top - bottom),\n fill=False, edgecolor='red', linewidth=1)\n\n ## 照片的位置(不怎么精确)\n width = right - left\n high = top - bottom\n left2 = np.uint(left - 0.5 * width)\n bottom2 = np.uint(bottom + 0.5 * width)\n rect = mpatches.Rectangle((left2, bottom2), 1.8 * width, 2.2 * high,\n fill=False, edgecolor='blue', linewidth=1)\n\n\n ## 身份证上人的照片\n top2 = np.uint(bottom2 + 2.2 * high) #稍微把图片放大一点,然后扣除\n right2 = np.uint(left2 + 1.8 * width)\n image3 = image2[top2:bottom2, left2:right2, :]\n import time\n cv2.imwrite(\"身份证扣出来的图片\"+str(time.time())+\".png\",image3)\n # plt.imshow(image3)\n\n\n\n gray = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY) # 灰度处理\n # cv2.imshow('gray', gray)\n #先做二值,加强了鲁棒性. 表示超过50亮度的都算作255.也就是只有足够黑的点才算做字体.\n retval, imagebin = cv2.threshold(gray, 120, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)\n ## 将照片去除\n imagebin[0:bottom2, left2:-1] = 255\n img_bilateralFilter = cv2.bilateralFilter(imagebin, 40, 100, 100) # 高斯双边滤波\n\n '''\n 看看图片\n '''\n cv2.imwrite(\"剩余的\"+str(time.time())+\".png\",img_bilateralFilter)\n\n text = pytesseract.image_to_string(img_bilateralFilter, lang='chi_sim')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n return HttpResponse(\"POST request from PolicyExtractorService\")\n","sub_path":"jiexi2-master加很多工具/apps/ocr2/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"92812257","text":"import torch\nimport torch.nn as nn\nimport torch.autograd as autograd\nimport torch.nn.functional as F\nfrom rationale_net.models.factory import RegisterModel\nfrom rationale_net.models.abstract_encoder import AbstractEncoder\nfrom rationale_net.models.transformer_modules import PositionalEncoder, EncoderLayer, get_clones\n\n@RegisterModel('transformer')\nclass TransformerEncoder(AbstractEncoder):\n\n def __init__(self, args, embeddings):\n super().__init__(args, args.d_model*args.max_word_length, embeddings)\n \n self.N = self.args.N\n d_model = self.args.d_model\n embedding_size = self.args.embedding_size\n \n self.pe = PositionalEncoder(self.args.cuda, self.args.max_word_length, embedding_size)\n self.layers = get_clones(EncoderLayer(args), self.N)\n self.norm = nn.LayerNorm((self.args.max_word_length, d_model))\n\n\n def forward(self, x_char=None, x_word=None, mask=None):\n x = super().forward(x_char, x_word)\n \n x = self.pe(x)\n\n for i in range(self.N):\n x = self.layers[i](x, mask)\n hidden = self.norm(x)\n\n return self.output(hidden) \n","sub_path":"rationale_net/models/transformer_encoder.py","file_name":"transformer_encoder.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"291488379","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport unittest\nimport sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))\nfrom SimUasWrapper import SimUasWrapper\nfrom Uas import Uas\nfrom Loggers import Loggers\nimport threading\nimport time\n\nclass SimUasWrapperSpec(unittest.TestCase):\n\n def test_sim_uas_wrapper_spawn(self):\n\n uas0 = Uas(0, None)\n Loggers.initialise([\n uas0\n ])\n uas0.boot()\n # su1 = SimUasWrapper(0)\n uas0.connect_mavlink()\n i = 0\n while i < 100:\n try:\n # print('%s:%s %s:%s:%s' % (su1.command.command, su1.command.process.returncode, su1.command.output, su1.command.error, su1.command.status))\n Loggers.sim_vehicle_warning(0, uas0.sim_uas.command.command)\n Loggers.sim_vehicle_warning(0, uas0.sim_uas.command.status)\n Loggers.sim_vehicle_warning(0, uas0.sim_uas.command.output)\n Loggers.sim_vehicle_error(0, uas0.sim_uas.command.error)\n\n # Mavlink\n uas0.get_mavlink_messages()\n\n except Exception as e:\n print(str(e))\n time.sleep(1)\n i +=1 \n\n self.assertEqual(1, 2)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/SimUasWrapperTestCase.py","file_name":"SimUasWrapperTestCase.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"19358671","text":"\n\nfrom xai.brain.wordbase.nouns._phalanx import _PHALANX\n\n#calss header\nclass _PHALANXES(_PHALANX, ):\n\tdef __init__(self,): \n\t\t_PHALANX.__init__(self)\n\t\tself.name = \"PHALANXES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"phalanx\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_phalanxes.py","file_name":"_phalanxes.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"504856339","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\nfrom __future__ import print_function\n\nfrom unittest import TestCase\nfrom datetime import datetime\n\nimport os\nimport shutil\nimport six\nimport subprocess as sp\nimport tempfile\nimport zipfile\n\n\nclass ZipFile(zipfile.ZipFile):\n def _extract_member(self, member, targetpath, pwd):\n if not isinstance(member, zipfile.ZipInfo):\n member = self.getinfo(member)\n ret_val = super()._extract_member(member, targetpath, pwd)\n attr = member.external_attr >> 16\n os.chmod(ret_val, attr)\n return ret_val\n\n\nclass GraknServer(object):\n DISTRIBUTION_LOCATION = 'external/graknlabs_grakn_core/grakn-core-all-mac.zip'\n DISTRIBUTION_ROOT_DIR = 'grakn-core-all-mac'\n\n def __init__(self):\n self.__unpacked_dir = None\n\n def __enter__(self):\n if not self.__unpacked_dir:\n self._unpack()\n sp.check_call([\n 'grakn', 'server', 'start'\n ], cwd=os.path.join(self.__unpacked_dir, GraknServer.DISTRIBUTION_ROOT_DIR))\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n sp.check_call([\n 'grakn', 'server', 'stop'\n ], cwd=os.path.join(self.__unpacked_dir, GraknServer.DISTRIBUTION_ROOT_DIR))\n shutil.rmtree(self.__unpacked_dir)\n\n def _unpack(self):\n self.__unpacked_dir = tempfile.mkdtemp(prefix='grakn')\n with ZipFile(GraknServer.DISTRIBUTION_LOCATION) as zf:\n zf.extractall(self.__unpacked_dir)\n\n\nclass test_Base(TestCase):\n \"\"\" Sets up DB for use in tests \"\"\"\n\n @classmethod\n def setUpClass(cls):\n super(test_Base, cls).setUpClass()\n\n @classmethod\n def tearDownClass(cls):\n super(test_Base, cls).tearDownClass()\n","sub_path":"tests/integration/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"87408488","text":"import hashlib\nimport json\n\nfrom time import time\nfrom uuid import uuid4\nfrom flask import Flask\nfrom flask import jsonify\nfrom textwrap import dedent\nfrom blockchain import Blockchain\n\napp = Flask(__name__)\n\nnode_identifier = str(uuid4()).replace('-','')\n\nblockchain = Blockchain()\n\n@app.route('/mine', methods=['GET'])\ndef mine():\n #\"We will mine a new block.\"\n\n last_block = blockchain.last_block\n last_proof = last_block['proof']\n proof = blockchain.proof_of_work(last_proof, )\n\n #Award a coin for mining\n\n blockchain.new_transaction(\n sender=\"0\",\n recipient=node_identifier,\n amount=1,\n )\n\n #Add the new block to chain\n\n previous_hash = blockchain.hash(last_block)\n block = blockchain.new_block(proof, previous_hash)\n\n response = {\n 'message' :\"New block Forged\",\n 'index':block['index'],\n 'transactions' :block['transactions'],\n 'proof' :block['proof'],\n 'previous_hash': block['previous_hash'],\n\n }\n\n return jsonify(response), 200\n\n\n\n@app.route('/transactions/new', methods=['POST'])\ndef new_transaction():\n # \"We will add a new transaction.\"\n values = request.get_json()\n\n # Check if the required fields are in the posted data\n required = ['sender', 'recipient', 'amount']\n\n if not all(k in values for k in required):\n return \"Missing Values\", 400\n\n #Create a new transaction\n index = blockchain.new_transaction(values['sender'], values['recipient'], values['amount'])\n\n response = {'message': f'Transaction will be added to Block {index}'}\n\n return jsonify(response), 201\n\n@app.route('/chain', methods=['GET'])\ndef full_chain():\n response = {\n 'chain': blockchain.chain,\n 'length': len(blockchain.chain),\n }\n\n return jsonify(response), 200\n\nif __name__==\"__main__\":\n app.run(host='127.0.0.1', port=5000)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"237391717","text":"import json\nfrom Inv import ItemEffects\nfrom Obj import Look\n\nimport sys, os\n\n# Disable printing\ndef blockPrint():\n sys.stdout = open(os.devnull, 'w')\n\n# Restore printing\ndef enablePrint():\n sys.stdout = sys.__stdout__\n\n\ndef reset_inventory():\n with open(\"Inv/Bag reset.json\", \"r\") as file: # Prep json for reading\n Bag = json.load(file) # Read json file convert to dictionary\n write_inventory(Bag)\n\ndef read_inventory(): # returns dictionary of Bag\n try:\n with open(\"Inv/Bag.json\", \"r\") as file: # Prep json for reading\n Bag = json.load(file) # Read json file convert to dictionary\n except: # Nothing in inventory: empty file\n Bag = {}\n\n return Bag\n\ndef write_inventory(Bag): # write to inv\n with open(\"Inv/Bag.json\", \"w\") as file: # Prep json for writing\n json.dump(Bag, file, indent=4, sort_keys=True) # Rewrite the inventory back with new items\n\ndef print_inventory(): #prints the items in inv (inv cmd)\n Bag = read_inventory()\n print(\"Currently in your bag you hold: \")\n for key in Bag.keys():\n print(\" \" + key)\n\ndef add_item_to_inventory(itemDict): # input dictionary from Look (not directly from input) for pick up cmd\n Bag = read_inventory()\n Bag[itemDict[\"name\"]] = itemDict # add a new key and value\n write_inventory(Bag)\n\ndef find_item_name_inventory(item): # find if an item exists in inv from input\n s = \"\"\n item = s.join(item).lower()\n Bag = read_inventory()\n for key in Bag.keys():\n if item.find(key) != -1:\n return key\n print(\"You see no such item in your inventory\")\n return False\n\ndef examine_item_in_inventory(itemName): #part x item cmd and Look examine function\n Bag = read_inventory()\n print(Bag[itemName][\"examine\"])\n\ndef wear_item(item, cur_place):\n blockPrint() # avoid printing error msg for looking in inv and places\n itemName = find_item_name_inventory(item)\n\n if itemName == False:\n itemName, cur_place = Look.find_item_name(item, cur_place) # check if item exists in that place\n enablePrint() # allow print\n if itemName == False: # can't find item\n print(\"I cannot find that item in your inventory or here\")\n return cur_place\n else:\n # item in places\n Look.pick_up_item(itemName, cur_place) # make sure to pick up item if in places since we work in inventory from now\n enablePrint() # allow print again in case it didn't go thru loop\n\n Bag = read_inventory()\n try: # check if can be used\n wearable = Bag[itemName][\"wear\"]\n except:\n print(\"This item cannot be worn\")\n return cur_place\n try: # see if there is special dialogue\n print(Bag[itemName][\"wearText\"][cur_place])\n except:\n print(\"You wear the \" + itemName)\n write_inventory(Bag)\n cur_place = ItemEffects.special_check(itemName, cur_place) # update cur place based on special effects of item\n return cur_place\n\n\ndef use_item(item, cur_place): # use or break item cmd (for now only break)\n itemName = find_item_name_inventory(item) # check if item exists in inv\n Bag = read_inventory()\n\n if itemName == False: # can't find item\n print(\"Pick up items first, to use items they must be in your inventory. \")\n return cur_place\n else:\n try: # check if can be used anywhere\n Bag[itemName][\"usePlaces\"]\n except:\n print(\"This item cannot be used\")\n return cur_place\n if cur_place not in Bag[itemName][\"usePlaces\"]:\n print(f\"You see nowhere to use the {itemName} in the {cur_place}\")\n return cur_place\n else: # successfully used\n print(Bag[itemName][\"usedText\"][cur_place])\n write_inventory(Bag)\n cur_place = ItemEffects.special_check(itemName, cur_place) # check for certain things like unlocking locations\n return cur_place\n\ndef eat_item(item): # eat item cmd\n if item != []:\n Bag = read_inventory()\n itemName = find_item_name_inventory(item) # check if exists\n\n if itemName == False:\n print(\"The item must be in your inventory for you to consume it\")\n return\n else:\n try:\n print(Bag[itemName][\"eat\"])\n except:\n print(\"I don't think the \" + itemName + \" would agree with you\")\n else:\n print(\"Indicate what you are eating. \")\n\n\n\n","sub_path":"Inv/Inventory.py","file_name":"Inventory.py","file_ext":"py","file_size_in_byte":4465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"285407173","text":"import sys\nfrom collections import Counter\nimport math\n\ndef get_file_names():\n train_files = open(sys.argv[1],'r',encoding=\"latin-1\")\n train_files_list = []\n for t in train_files:\n train_files_list.append(t.replace('\\n',''))\n con_train_list = [ name for name in train_files_list if name[0].lower() == 'c']\n lib_train_list = [ name for name in train_files_list if name[0].lower() == 'l']\n train_files.close()\n return con_train_list,lib_train_list, len(con_train_list), len(lib_train_list)\n\ndef smoothening(vocab, n, nk):\n numerator = float(nk+1)\n denominator = float(n+vocab)\n return_value = (numerator/denominator)\n return return_value\n\ndef main():\n #get file names in the train dataset\n c_files, l_files, c_file_count, l_file_count = get_file_names()\n # Count and list words in con and lib dataset separately\n c_Counter = Counter()\n l_Counter = Counter()\n c_text = []\n l_text = []\n #con dataset\n for c_file in c_files:\n c_data = open(c_file,'r',encoding=\"latin-1\")\n for c in c_data:\n c_text.append(c.replace('\\n','').lower())\n c_data.close()\n c_Counter.update(c_text)\n #lib dataset\n for l_file in l_files:\n l_data = open(l_file,'r',encoding=\"latin-1\")\n for l in l_data:\n l_text.append(l.replace('\\n','').lower())\n l_data.close()\n l_Counter.update(l_text)\n # get vocab, con and lib count\n con_count = len(c_text)\n lib_count = len(l_text)\n vocab_text = set(c_text + l_text)\n vocab_count = len(vocab_text)\n #print('con: ' + str(con_count) + ' || lib: ' + str(lib_count) + ' || vocab: ' + str(vocab_count))\n\n lib_top_20 = l_Counter.most_common(20)\n for l in lib_top_20:\n print(l[0], '%.4f' % smoothening(vocab_count, lib_count, int(l[1])))\n print()\n\n con_top_20 = c_Counter.most_common(20)\n for c in con_top_20:\n print(c[0], '%.4f' % smoothening(vocab_count, con_count, int(c[1])))\n\n\nif __name__=='__main__':\n main()\n","sub_path":"NaiveBayes/topwords.py","file_name":"topwords.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"211360867","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport signal\nfrom django.core.management.base import BaseCommand\nfrom django.utils import timezone\nfrom user.models import User\nfrom chat.models import Room\n\nsignal.signal(signal.SIGINT, signal.SIG_DFL)\n\n\nclass Command(BaseCommand):\n args = 'none'\n help = ''\n\n def handle(self, *args, **options):\n\n rooms = Room.objects.filter(\n users__isnull=False,\n chat_type=Room.PRIVATE_BOT,\n app_id='o')\n\n a_rooms = []\n for room in rooms:\n for user in room.users:\n try:\n User.objects.get(id=user).delete()\n except:\n print(\"invalid User\", user)\n a_rooms.append(room.id)\n\n print(\"deleting\", len(a_rooms),\" olo rooms\")\n Room.objects.filter(id__in=a_rooms).delete()\n","sub_path":"olo/management/commands/delete_olo_rooms.py","file_name":"delete_olo_rooms.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"39818709","text":"__author__ = 'tuerke'\n\nfrom .. import data\n\ndef copy_geom(reference_image, images, attributes=None):\n\tif(attributes == None):\n\t\tattributes=['indexOrigin','voxelSize','voxelGap','rowVec','columnVec','sliceVec']\n\tfor image in images:\n\t\tfor attribute in attributes:\n\t\t\timage.setProperty(attribute,reference_image.getProperty(attribute))","sub_path":"isis/tools/copy_geom.py","file_name":"copy_geom.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"591585230","text":"start=int(input(\"enter start km: \"))\r\nend=int(input(\"Enter end km: \"))\r\npeak=int(input())\r\ndistance=end-start\r\nif(distance>5):\r\n distance=distance-5\r\n fare=100+(distance*8)\r\n print(fare)\r\n if(peak==1):\r\n fare1=(fare+(0.25*fare))\r\n print(fare1)\r\nelse:\r\n print(\"Fare=100\")\r\n","sub_path":"ZEN Class/1/uberprob.py","file_name":"uberprob.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"322986826","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import unicode_literals\r\n\r\nfrom django.shortcuts import render\r\n\r\ndef home(request):\r\n return render(request, 'home.html')\r\n\r\n#counting words\r\ndic = {}\r\ndef result(request):\r\n given_sentence = request.GET['fulltext']\r\n splited_sentence = given_sentence.split()\r\n length = len(splited_sentence)\r\n\r\n for i in splited_sentence:\r\n if i in dic:\r\n #dic에 splited_sentence[i]의 value 값 + 1\r\n dic[i] += 1\r\n else:\r\n #dic에 splited_sentence[i]에 key & value 쌍 추가\r\n dic[i] = 1\r\n return render(request, 'result.html', {\r\n 'length': length,\r\n 'text': given_sentence,\r\n 'result': dic\r\n })\r\n\r\n\r\n","sub_path":"django_hw1/hwhwhw/hwhwhwapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"12792301","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom setuptools import setup, find_packages\n\n\ndescription = \"\"\"\nElizabeth Arkham Asylum for the Criminally Insane\n\"\"\"\n\nsetup(\n name='arkham',\n version='___version___',\n description=description,\n long_description=description,\n author='the S.H.I.E.L.D TEAM',\n author_email='waptech@sohu-inc.com',\n url='https://github.com/mSOHU/arkham',\n packages=find_packages(),\n install_requires=[\n 'pyyaml',\n 'pika==0.10.1-dev0',\n 'gevent==1.1rc1',\n ],\n entry_points={\n 'console_scripts': [\n 'arc=arkham.consumer:consumer_entry',\n 'ark-consumer=arkham.consumer:consumer_entry',\n 'ark-rpc=arkham.rpc:rpc_entry',\n ]\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"399942707","text":"import importlib.util\nimport tempfile\nimport uuid\n\nfrom datamodel_code_generator import (\n BaseModel,\n CustomRootType,\n DataModelField,\n PythonVersion,\n)\nfrom datamodel_code_generator.parser.openapi import OpenAPIParser\n\nfrom openapi_to_fastapi.logger import logger\n\n\ndef generate_model_from_schema(schema: str) -> str:\n \"\"\"\n Given an OpenAPI schema, generate pydantic models from everything defined\n in the \"components/schemas\" section\n\n :param schema: Content of an OpenAPI spec, plain text\n :return: Importable python code with generated models\n \"\"\"\n parser = OpenAPIParser(\n BaseModel,\n CustomRootType,\n DataModelField,\n base_class=\"pydantic.BaseModel\",\n custom_template_dir=None,\n extra_template_data=None,\n target_python_version=PythonVersion.PY_37,\n text=schema,\n dump_resolve_reference_action=None,\n validation=True,\n field_constraints=False,\n snake_case_field=False,\n strip_default_none=False,\n aliases=None,\n )\n\n result = parser.parse()\n return str(result)\n\n\ndef load_models(schema: str, name: str = \"\", cleanup: bool = True):\n \"\"\"\n Generate pydantic models from OpenAPI spec and return a python module,\n which contains all the models from the \"components/schemas\" section.\n This function will create a dedicated python file in OS's temporary dir\n and imports it\n :param schema: OpenAPI spec, plain text\n :param name: Prefix for a module name, optional\n :param cleanup: Whether to remove a file with models afterwards\n :return: Module with pydantic models\n \"\"\"\n prefix = name.replace(\"/\", \"\").replace(\" \", \"\").replace(\"\\\\\", \"\") + \"_\"\n with tempfile.NamedTemporaryFile(\n prefix=prefix, mode=\"w\", suffix=\".py\", delete=cleanup\n ) as tmp_file:\n model_py = generate_model_from_schema(schema)\n tmp_file.write(model_py)\n if not cleanup:\n logger.info(\"Generated module %s: %s\", name, tmp_file.name)\n tmp_file.flush()\n module_name = f\"oas_models_{uuid.uuid4()}\"\n spec = importlib.util.spec_from_file_location(module_name, tmp_file.name)\n if spec.loader:\n return spec.loader.load_module(module_name)\n else:\n raise ValueError(f\"Failed to load module {module_name}\")\n","sub_path":"openapi_to_fastapi/model_generator.py","file_name":"model_generator.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"122175836","text":"import re\nimport json\nfrom caliper.server.parser_process import parser_log\n\ndef stress_ng_parser(content,outfp):\n result = 0\n if re.search(r'stress-ng: info:\\s+\\[[0-9]+\\]\\scpu\\s+\\d+\\s+(\\d+\\.\\d+).*',content):\n real_time = re.search(r'stress-ng: info:\\s+\\[[0-9]+\\]\\scpu\\s+\\d+\\s+(\\d+\\.\\d+).*',content)\n result = real_time.group(1)\n outfp.write(content)\n return result \n\ndef stressng(filePath, outfp):\n\n cases = parser_log.parseData(filePath)\n result = []\n for case in cases:\n caseDict = {}\n titleGroup = re.search('\\[test:([\\s\\S]+)stress-', case)\n if titleGroup != None:\n caseDict[parser_log.TOP] = titleGroup.group(0)\n\n caseDict[parser_log.BOTTOM] = parser_log.getBottom(case)\n\n my_regex = '%s([\\s\\S]+)\\[status\\]:' % (\"stress-\")\n center = re.search(my_regex, case)\n table_contents = []\n if center != None:\n tableDict = {}\n data = center.group(1).strip()\n topstr_group = re.search(\"stress[\\s\\S]+?\\[[\\d]+\\]([\\s\\S]+\\n)stress-ng: info: \\[\\d+\\] stressor\", data)\n if topstr_group is not None:\n topstr = topstr_group.groups()[0]\n top = re.sub(\"stress([\\s\\S]+?)\\[[\\d]+\\] \", \"\", topstr.strip())\n tableDict[parser_log.CENTER_TOP] = top\n\n lines = data.splitlines()\n isTop = True\n table = []\n td = ['stressor', 'bogo ops', 'real time(secs)', 'usr time(secs)', 'sys time(secs)',\n 'bogo ops/s(real time)', 'bogo ops/s(usr+sys time)']\n table.append(td)\n for line in lines:\n values = []\n if not isTop:\n value = re.sub(\"stress([\\s\\S]+?)\\[[\\d]+\\]\", \"\", line)\n cells = value.split(\" \")\n for table_title in cells:\n title = table_title.strip()\n if title != '':\n values.append(title)\n\n if line.endswith(\"time)\"):\n isTop = False\n if len(values) != 0:\n table.append(values)\n tableDict[parser_log.TABLE] = table\n table_contents.append(tableDict)\n caseDict[parser_log.TABLES] = table_contents\n result.append(caseDict)\n result = json.dumps(result)\n outfp.write(result)\n return result\n\nif __name__ == \"__main__\":\n infile = \"stressng_output.log\"\n outfile = \"stressng_json.txt\"\n outfp = open(outfile, \"a+\")\n stressng(infile, outfp)\n # parser1(content, outfp)\n outfp.close()\n\n","sub_path":"handlers/stressng_parser.py","file_name":"stressng_parser.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"209042466","text":"from keys import *\nimport botometer\nimport numpy as np\nfrom keywords import political_words\n\n\n# setup\nrapidapi_key = botometer_rapid_api_key # now it's called rapidapi key\ntwitter_app_auth = {\n 'consumer_key': consumer_key,\n 'consumer_secret': consumer_secret,\n 'access_token': access_token,\n 'access_token_secret': access_token_secret,\n}\nbom = botometer.Botometer(wait_on_ratelimit=True,\n rapidapi_key=rapidapi_key,\n **twitter_app_auth)\n\n\ndef isBot(userid):\n result = bom.check_account(userid)\n display_scores = result[\"display_scores\"]\n display_scores_mean = np.array(list(display_scores.values())).mean()\n if display_scores_mean >= 4.0:\n print(\"Propbably a bot.\")\n return True\n return False\n\n\ndef isEnglish(tweet):\n if tweet.lang == 'en':\n return True\n return False\n\n\ndef checkUserVerified(tweet):\n return tweet.user.verified\n\ndef isPolitical(tweet):\n text = tweet.text\n if any(val in text for val in political_words):\n print(\"Political text\")\n return True\n return False\n","sub_path":"tweet_analysis.py","file_name":"tweet_analysis.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"239181525","text":"from django.db import connection\n\n\nCONTENT_TYPES = ('text/plain', 'text/html')\n\n\nclass SqlRequests(object):\n def process_response(self, request, response):\n if request.META['CONTENT_TYPE'] not in CONTENT_TYPES:\n return response\n\n queries = connection.queries\n time = sum([float(query['time']) for query in queries])\n count = len(queries)\n\n res = '''
\n
\n
\n

\n Queries time: %(time)s sec. | Queries count: %(count)s\n

\n
\n
\n
\\n''' % {'time': time, 'count': count}\n response.content = response.content.replace('', res)\n return response\n","sub_path":"core/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"605497124","text":"#!/usr/bin/env python3\n\"\"\" Project Euler | Problem 26\n\"\"\"\nimport time\nstart_time = time.time()\n\n\ndef powers_of_two_less_than(n):\n i = 1\n while 2**i < n:\n yield 2**i\n i += 1\n\n\ndef not_cycles(n):\n\n twos = powers_of_two_less_than(n)\n for i in twos:\n j = 0\n while i * 5**j < n:\n yield i * 5**j \n j += 1\n\n k = 1\n while 5**k < n:\n yield 5**k\n k += 1\n\n\ndef cycles_less_than(n):\n for i in range(2, n):\n if i not in not_cycles(n):\n yield i\n\n\ndef divide_one_by(n):\n \"\"\"\n divide 1 by n (1 / n)\n \"\"\"\n\n ratios = []\n dividend = 10\n\n while dividend < n:\n ratios.append(0)\n dividend *= 10\n ratios.append(n / dividend)\n \n while 1:\n\n dividend = 10 * (dividend % n)\n while dividend < n:\n ratios.append(0)\n dividend *= 10\n\n ratio = n / dividend\n if ratio in ratios:\n end_cycle = len(ratios)\n start_cycle = ratios.index(ratio)\n break\n\n ratios.append(ratio)\n\n return end_cycle - start_cycle\n\n\nmax_cycle = 1\nD = 1\n\nfor d in cycles_less_than(1000):\n cycle = divide_one_by(d)\n if cycle > max_cycle:\n max_cycle = cycle\n D = d\n\n\nanswer = D\nend_time = time.time()\nrun_time = end_time - start_time\n\n\nprint(\"-------------------------------------------\")\nprint(\"| Solution to Project Euler problem 26 |\" )\nprint(\"-------------------------------------------\")\nprint(\"Question: Find the value of d < 1000 for which 1/d contains the longest recurring cycle in its decimal fraction part.\" )\nprint(\"Answer: d = {:d}\".format(answer) )\nprint( \"Wall time: {:3.5f} seconds\".format(run_time))\n","sub_path":"26/problem26.py","file_name":"problem26.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"560532695","text":"from typing import Optional, List\nfrom forex_calendar.constants import Event\nfrom dataclasses import astuple, asdict, fields\nimport json\n\nimport datetime as dt\nimport csv\n\n\ndef json_serial(obj):\n \"\"\"JSON serializer for objects not serializable by default json code\"\"\"\n\n if isinstance(obj, (dt.datetime, dt.date, dt.time)):\n return obj.isoformat()\n\n raise TypeError(\"Type %s not serializable\" % type(obj))\n\n\ndef _auto_file_name(results: List[Event]) -> str:\n filename = f\"forex_calendar_{results[0].date:%Y%m%d}_{results[-1].date:%Y%m%d}\"\n return filename\n\n\ndef save_as_csv(results: List[Event], filename: Optional[str] = None, mode=\"w\"):\n if filename is None:\n filename = f\"{_auto_file_name(results)}.csv\"\n\n output = [astuple(r) for r in results]\n with open(filename, mode) as f:\n csvwriter = csv.writer(f)\n if mode == \"w\":\n header = [field.name for field in fields(results[0])]\n csvwriter.writerow(header)\n\n output = [astuple(r) for r in results]\n csvwriter.writerows(output)\n\n return\n\n\ndef save_as_json(\n results: List[Event], filename: Optional[str] = None, mode=\"w\"\n) -> None:\n if filename is None:\n filename = f\"{_auto_file_name(results)}.json\"\n\n output = json.dumps([asdict(r) for r in results], default=json_serial)\n with open(filename, mode) as f:\n f.write(output)\n\n return\n","sub_path":"forex_calendar/save_results.py","file_name":"save_results.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"566615668","text":"#brings in time\nimport time\n#to make sensors work\nimport setup\nimport RoboPiLib as RPL\n\n#which pin the motor is in\nmotorL = 0\nmotorR = 2\n#motor speeds\nx = 2000\ny = 1000\n#it runs when the pin is not reading anything\nwhile RPL.digitalRead(16) == 1:\n #to run motors at regular speed\n RPL.servoWrite(motorL, x)\n RPL.servoWrite(motorR, y)\n if RPL.digitalRead(16) == 0:\n break\n#it stops when the sensor senses something\nwhile RPL.digitalRead(16) == 0:\n #so the robot only runs 1.5 seconds\n now = time.time()\n future = time.time() + 0.5\n #to run motors slower\n while time.time() < future:\n RPL.servoWrite(motorL, 1600)\n RPL.servoWrite(motorR, 600)\n #function to stop the motors\n if time.time() >= future:\n RPL.servoWrite(motorL, 0)\n RPL.servoWrite(motorR, 0)\n","sub_path":"slowdown.py","file_name":"slowdown.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"311941633","text":"#! /usr/bin/python3\nimport re\nimport os\n## Need this line so Atom can run it\nos.chdir('/home/andres/Programs/python/covid/app/scripts')\npath = 'data/base1.html' #html file location\nnew_path = 'data/base_new.html'\npatterns = ['lib/']\n \nfor pattern in patterns:\n pat1 = 'href=\\\"'+ pattern\n pat2 = 'src=\\\"'+pattern\n\n with open(path) as txt, open(new_path, 'w') as out:\n print(str(txt))\n k = 0\n for oline in txt:\n print(str(k) + \" \" + oline)\n if k < 141:\n k += 1\n else: break\n if re.search(pat1, oline) or re.search(pat2, oline): # Don't recall why I put this.\n\n print (pat1 + pat2)\n if re.search(pat1, oline):\n print(\"Original line for Href: \" + str(oline))\n line = oline.split(' ')\n ref = line[3].split(pattern)\n new_ref1 = \"{{ url_for('static', filename='\" + pattern + ref[1].strip('\\\"') + \"') }}\"\n ref[1] = new_ref1\n line[3] = ref[0] + ref[1] + \"\\\"\"\n new_line = ' '.join(line)\n print(\"Flask formated line: \" + new_line)\n #txt.write(new_line)\n out.write(new_line)\n\n elif re.search(pat2, oline):\n print(\"Original line for src: \" + str(oline))\n line = oline.split('\\\"')\n #print(str(line))\n for i in range(len(line)):\n if re.search(pattern, line[i]):\n #print (\"Element found\")\n break\n\n ref = line[i].split(pattern)\n #print(ref)\n new_ref1 = \"{{ url_for('static', filename='\" + pattern + ref[1].strip('\\\"') + \"') }}\"\n ref[1] = new_ref1\n line[i] = ref[0] + ref[1]\n new_line = '\\\"'.join(line)\n print(\"Flask formated line: \" + new_line)\n #txt.write(new_line)\n out.write(new_line)\n\n else:\n new_line = str(oline)\n #txt.write(new_line)\n out.write(new_line)\n\n## Need to copy file to original path befor a can replace next patter else I'll changes for curretn pattern <- Did it manually for now\n # with open(path, 'w') as t:\n # t.write(out)\n","sub_path":"scripts/change_style_sources.py","file_name":"change_style_sources.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"168476465","text":"# -*- coding: utf-8 -*-\n\nimport numpy\nimport pandas\n\nfrom indicator import dynamical_system, second_stage, force_index, ad, macd, skdj, rsi, asi, cci\nfrom indicator.high_low import compute_high_low\n\n\ndef filter_cross(deviation_series, hist, quote, will):\n deviation = deviation_series[deviation_series > 0]\n for i in range(len(deviation) - 1, 0, -2):\n index_date_2nd = deviation.index[i]\n index = numpy.where(quote.index == index_date_2nd)[0][0]\n if index == len(quote.index) - 2:\n continue\n\n index_date_1st = deviation.index[i - 1]\n c = numpy.count_nonzero(will * hist[index_date_1st: index_date_2nd] > 0)\n if c < 3:\n deviation_series[index_date_1st] = numpy.nan\n deviation_series[index_date_2nd] = numpy.nan\n continue\n\n return deviation_series\n\n\ndef market_deviation(quote, period, ind_vals, will=1):\n if 'max_period' not in quote.columns:\n column = 'close' # 'high'\n quote = compute_high_low(quote, column=column, compute_high=True)\n if 'min_period' not in quote.columns:\n column = 'close' # 'low'\n quote = compute_high_low(quote, column=column, compute_high=False)\n\n column = 'low' if will == 1 else 'high'\n ind_vals_adj = ind_vals.rolling(10).min() if will == 1 else ind_vals.rolling(10).max()\n val_period_series = quote['{}_period'.format('min' if will == 1 else 'max')]\n val_period_series = val_period_series[val_period_series.notna()]\n\n deviation_series = pandas.Series(numpy.nan, index=quote.index)\n for i in range(0, len(val_period_series) - 1, 2):\n date1 = val_period_series.index[i]\n date2 = val_period_series.index[i + 1]\n\n ind_val1 = ind_vals_adj.loc[date1]\n ind_val2 = ind_vals_adj.loc[date2]\n\n if will * val_period_series[i] > will * val_period_series[i + 1] and will * ind_val1 < will * ind_val2:\n deviation_series.at[date1] = quote[column].loc[date1]\n deviation_series.at[date2] = quote[column].loc[date2]\n\n return deviation_series\n\n\ndef market_deviation_asi(quote, period, will):\n quote = asi.compute_asi(quote, period)\n\n column_name = 'asi_bull_market_deviation' if will == 1 else 'asi_bear_market_deviation'\n\n deviation_series = market_deviation(quote, period, quote['asi'], will)\n quote.insert(len(quote.columns), column_name, deviation_series)\n\n return quote\n\n\ndef market_deviation_macd(quote, period, will):\n quote = macd.compute_macd(quote)\n # 价格新低\n # print(quote['close'])\n # MACD 没有新低\n\n column_name = 'macd_bull_market_deviation' if will == 1 else 'macd_bear_market_deviation'\n\n hist = quote['macd_histogram']\n deviation_series = market_deviation(quote, period, hist, will)\n\n deviation_series = filter_cross(deviation_series, hist, quote, will)\n\n quote.insert(len(quote.columns), column_name, deviation_series)\n\n return quote\n\n\ndef market_deviation_force_index(quote, period, will):\n # import ipdb;\n # ipdb.set_trace()\n # n = 13 if is_long_period(period) else 2\n n = 13 * 5 if period == 'day' else 13\n quote = force_index.force_index(quote, n=n)\n\n column_name = 'force_index_bull_market_deviation' if will == 1 else 'force_index_bear_market_deviation'\n hist = quote['force_index']\n deviation_series = market_deviation(quote, period, hist, will)\n deviation_series = filter_cross(deviation_series, hist, quote, will)\n quote.insert(len(quote.columns), column_name, deviation_series)\n\n return quote\n\n\ndef market_deviation_volume_ad(quote, period, will):\n quote = ad.compute_ad(quote)\n\n column_name = 'volume_ad_bull_market_deviation' if will == 1 else 'volume_ad_bear_market_deviation'\n hist = quote['adosc']\n deviation_series = market_deviation(quote, period, hist, will)\n deviation_series = filter_cross(deviation_series, hist, quote, will)\n quote.insert(len(quote.columns), column_name, deviation_series)\n\n return quote\n\n\ndef market_deviation_cci(quote, period, will):\n quote = cci.compute_cci(quote, period)\n\n column_name = 'cci_bull_market_deviation' if will == 1 else 'cci_bear_market_deviation'\n\n hist = quote['cci']\n hist = hist.mask((hist < 100) & (hist > -100), numpy.nan)\n deviation_series = market_deviation(quote, period, hist, will)\n quote.insert(len(quote.columns), column_name, deviation_series)\n\n return quote\n\n\ndef market_deviation_skdj(quote, period, will):\n quote = skdj.compute_skdj(quote)\n\n column_name = 'skdj_bull_market_deviation' if will == 1 else 'skdj_bear_market_deviation'\n\n hist = quote['d']\n hist = hist.mask((hist < 20) & (hist > 80), numpy.nan)\n deviation_series = market_deviation(quote, period, hist, will)\n quote.insert(len(quote.columns), column_name, deviation_series)\n\n return quote\n\n\ndef market_deviation_rsi(quote, period, will):\n quote = rsi.compute_rsi(quote, period)\n\n column_name = 'rsi_bull_market_deviation' if will == 1 else 'rsi_bear_market_deviation'\n\n hist = quote['rsi']\n hist = hist.mask((hist < 30) & (hist > 70), numpy.nan)\n deviation_series = market_deviation(quote, period, hist, will)\n quote.insert(len(quote.columns), column_name, deviation_series)\n\n return quote\n\n\nindicator_func = {\n 'asi': market_deviation_asi,\n 'force_index': market_deviation_force_index,\n 'macd': market_deviation_macd,\n 'volume_ad': market_deviation_volume_ad,\n 'cci': market_deviation_cci,\n 'skdj': market_deviation_skdj,\n 'rsi': market_deviation_rsi\n}\n\n\n# @computed(column_name='macd_bull_market_deviation')\ndef compute_index(quote, period, column):\n if column in quote.columns:\n return quote\n\n quote = dynamical_system.dynamical_system_dual_period(quote, period=period)\n quote = second_stage.second_stage(quote, period)\n\n func = indicator_func[column[:column.index('_b')]]\n for will in [1, -1]:\n quote = func(quote, period, will)\n\n return quote\n","sub_path":"indicator/market_deviation_mat.py","file_name":"market_deviation_mat.py","file_ext":"py","file_size_in_byte":5945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"206557364","text":"'''\n File name : main.py\n Author : Fredrik Dahlin\n Date created : 11/1/2016\n Date last modified : 11/1/2016\n Python Version : 3.4\n'''\nimport priors\nimport utility\nimport pyclone_binomial\n\n\n\n# Load data\ndata, sample_ids = utility.loadData()\n\n# Define prior < AB | BB | NoZygosity | TCN | PCN >\nprior = \"TCN\"\n\n# Get possible states for each mutation\nmutations = priors.getMutations(prior, data)\n\n#data, sample_ids, tumour_content, trace_dir, num_iters, alpha, alpha_priors\n\nerror_rate = 0.001\n\ntumour_content = {}\nfor id in sample_ids:\n\ttumour_content[id] = 1.0\n\ntrace_dir = 'trace'\n\nnum_iters = 10000\n\nalpha = 1\n\nalpha_priors = {\n\t'shape': 1.0,\n\t'rate': 0.001\n}\n\npyclone_binomial.run_pyclone_binomial_analysis(mutations, sample_ids, tumour_content, trace_dir, num_iters, alpha, alpha_priors)\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"42102378","text":"\"\"\"\nProvided by Norm Wood, 6/18/14\n\nExamples of use:\n\nScalar variable\n###############\n\nimport pyhdf.HDF\nimport CloudSat_tools.read_var\n\nf_ptr = pyhdf.HDF.HDF(filename, pyhdf.HDF.HC.READ)\nvar = CloudSat_tools.read_var.get_0D_var(f_ptr, varname)\n\n\n1D variable\n###########\n\nimport pyhdf.HDF\nimport CloudSat_tools.read_var\n\nf_ptr = pyhdf.HDF.HDF(filename, pyhdf.HDF.HC.READ)\nvar = CloudSat_tools.read_var.get_1D_var(f_ptr, varname)\n\n2D variable\n###########\n\nimport pyhdf.SD\nimport CloudSat_tools.read_var\n\nf_ptr = pyhdf.SD.SD(filename, pyhdf.SD.SDC.READ)\nvar = CloudSat_tools.read_var.get_2D_var(f_ptr, varname)\n\n\"\"\"\n\nimport numpy\nimport warnings\n\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\",category=DeprecationWarning)\n import pyhdf.HDF\n import pyhdf.VS\n import pyhdf.SD\n\ndef get_0D_var(file_VS_ptr, varname):\n vs = file_VS_ptr.vstart()\n var = vs.attach(varname)\n tmp = var.read(1)\n var_value = tmp[0][0]\n return(var_value)\n\ndef get_1D_var(file_VS_ptr, varname):\n vs = file_VS_ptr.vstart()\n var = vs.attach(varname)\n var_info = var.inquire()\n var_nRecs = var_info[0]\n tmp = var.read(var_nRecs)\n var_values = numpy.array(tmp)\n var.detach()\n return(var_values)\n \ndef get_1D_vars(file_VS_ptr):\n\tvs = file_VS_ptr.vstart()\n\tvariables = []\n\tfor var in vs.vdatainfo():\n\t\tvariables.append(var[0])\n\treturn variables\n\ndef get_2D_var(file_SD_ptr, varname, scale_default = None, offset_default = None):\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\",category=DeprecationWarning)\n var = file_SD_ptr.select(varname)\n try:\n scale_factor = var.attributes()['factor']\n except KeyError:\n if scale_default != None:\n scale_factor = scale_default\n else:\n scale_factor = 1.\n try:\n offset = var.attributes()['offset']\n except KeyError:\n if offset_default != None:\n offset = offset_default\n else:\n offset = 0.\n var_values = (var[:]-offset)/scale_factor\n return var_values\n","sub_path":"cloudsat_tools.py","file_name":"cloudsat_tools.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"444830637","text":"import re\nfrom zubot import register_class, PLUGINS, bot_name, get_client\n\ndef clean(text):\n return '\\n'.join([s.lstrip() for s in text.split('\\n')])\n\n@register_class\nclass HelpPlugin:\n def process_message(self, reply, msg):\n if not msg['content'].startswith('help'):\n return\n\n content = 'Hi, I am %s. My email is %s' % (bot_name, get_client().email)\n for plugin in PLUGINS:\n if hasattr(plugin, 'helptext'):\n content += clean(plugin.helptext) + '\\n'\n\n content += 'source code: https://github.com/showell/zubot-python/'\n\n reply(content)\n","sub_path":"plugins/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"305972267","text":"from torch.nn import functional as F\n\nfrom dassl.engine import TRAINER_REGISTRY, TrainerX\nfrom dassl.metrics import compute_accuracy\nfrom dassl.modeling.ops import random_mixstyle, crossdomain_mixstyle\n\n\n@TRAINER_REGISTRY.register()\nclass Vanilla2(TrainerX):\n \"\"\"Vanilla baseline.\n\n Slightly modified for mixstyle.\n \"\"\"\n\n def __init__(self, cfg):\n super().__init__(cfg)\n mix = cfg.TRAINER.VANILLA2.MIX\n\n if mix == 'random':\n self.model.apply(random_mixstyle)\n\n elif mix == 'crossdomain':\n self.model.apply(crossdomain_mixstyle)\n\n else:\n raise NotImplementedError\n\n def forward_backward(self, batch):\n input, label = self.parse_batch_train(batch)\n output = self.model(input)\n loss = F.cross_entropy(output, label)\n self.model_backward_and_update(loss)\n\n loss_summary = {\n 'loss': loss.item(),\n 'acc': compute_accuracy(output, label)[0].item()\n }\n\n if (self.batch_idx + 1) == self.num_batches:\n self.update_lr()\n\n return loss_summary\n\n def parse_batch_train(self, batch):\n input = batch['img']\n label = batch['label']\n input = input.to(self.device)\n label = label.to(self.device)\n return input, label\n","sub_path":"imcls/trainers/vanilla2.py","file_name":"vanilla2.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"75791489","text":"import sys, os, time, shutil, json\nimport configparser\nimport argparse\nfrom os.path import abspath\nmain_path = os.path.dirname(os.path.abspath(__file__)) # Retrieve toolbox path\npackage_path = os.path.join(main_path,'libs') # Generate package path\nsys.path.insert(0,package_path) # Insert package path into $PYTHONPATH\nfrom multiprocessing import freeze_support, set_start_method #some stuff for multi-processing support\nfrom libs.RSdatamanager import filemanager as fm\n\n#---------------------------------------------------------------------------------------------------#\ndef main(datapath, **kwargs):\n\n from libs.RSdatamanager.Sentinel2.S2L2A import L2Atile, getTileList \n from libs.ToolboxModules import featurext as m1\n from libs.ToolboxModules import featurets as m2\n from libs.ToolboxModules import trendanalysis as m3\n from libs.ToolboxModules import LandCoverTraining as m4 \n from libs.ToolboxModules import LCclassificationAndCD as m5\n\n #PREPARE SOME TOOLBOX PARAMETERS\n tilenames = kwargs['options'].get('tilenames', None)\n years = kwargs['options'].get('years', None)\n maindir = kwargs['options'].get('maindir', None)\n outpath = kwargs['options'].get('outpath', None)\n deltemp = kwargs['options'].get('deltemp', True)\n\n module1 = kwargs['module1'].get('run', False)\n module2 = kwargs['module2'].get('run', False)\n module3 = kwargs['module3'].get('run', False)\n module4 = kwargs['module4'].get('run', False)\n module5 = kwargs['module5'].get('run', False)\n\n if (module1 or module2):\n #READ DATASETS\n tiledict = getTileList(datapath)\n keys = tiledict.keys()\n\n for k in keys:\n if k in tilenames:\n tileDatapath = tiledict[k]\n print(\"Reading Tile-%s.\" %(k))\n tile = L2Atile(maindir, tileDatapath)\n\n for y in years:\n #UPDATE OPTIONS\n name = k + '_' + y\n update = {\n 'year': y,\n 'savepath': fm.check_folder(outpath, name)\n }\n\n if module1:\n #MODULE 1\n t_mod1 = time.time()\n options = kwargs.get('module1',{})\n options.update( update )\n m1.manager(tile, **options)\n t_mod1 = (time.time() - t_mod1)/60\n print(\"MOD1 TIME = %imin \" %( int(t_mod1) ))\n\n elif module2:\n #MODULE 2\n t_mod2 = time.time()\n options = kwargs.get('module2',{})\n options.update( update )\n m2.manager(k, **options)\n t_mod2 = (time.time() - t_mod2)/60\n print(\"MOD2 TIME = %imin \" %( int(t_mod2) ))\n\n #DELETE TILE-TEMPPATH CONTENT\n if deltemp:\n flag = shutil.rmtree(tile.temppath())\n if flag==None:\n print(\"Temporary File Content of Tile-%s has been successfully removed!\" %(k))\n\n elif module3:\n for k in tilenames:\n #MODULE 3\n t_mod3 = time.time()\n options = kwargs.get('module3',{})\n m3.manager(k, **options)\n t_mod3 = (time.time() - t_mod3)/60\n print(\"MOD3 TIME = %imin \" %( int(t_mod3) ))\n\n elif module4:\n for k in tilenames:\n #MODULE 4\n t_mod4 = time.time()\n options = kwargs.get('module4',{})\n m4.manager(k, **options)\n t_mod4 = (time.time() - t_mod4)/60\n print(\"MOD4 TIME = %imin \" %( int(t_mod4) ))\n\n elif module5:\n for k in tilenames:\n #MODULE 5\n t_mod5 = time.time()\n options = kwargs.get('module5',{})\n m5.manager(k, **options)\n t_mod5 = (time.time() - t_mod5)/60\n print(\"MOD5 TIME = %imin \" %( int(t_mod5) ))\n \n#---------------------------------------------------------------------------------------------------#\nif (__name__ == '__main__'):\n #MULTIPROCESSING INITIALIZATION\n freeze_support() #needed for windows\n set_start_method('spawn') # because the VSCode debugger (ptvsd) is not fork-safe\n\n #READ COMMAND ARGUMENTS\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-c', '--config', required=True, metavar='config.ini')\n parser.add_argument('-m1', '--module1', action='store_true', help=\"run module 1\")\n parser.add_argument('-m2', '--module2', action='store_true', help=\"run module 2\")\n parser.add_argument('-m3', '--module3', action='store_true', help=\"run module 3\")\n parser.add_argument('-m4', '--module4', action='store_true', help=\"run module 4\")\n parser.add_argument('-m5', '--module5', action='store_true', help=\"run module 5\")\n\n args = parser.parse_args()\n\n configfile = abspath(args.config)\n module1 = args.module1\n module2 = args.module2\n module3 = args.module3\n module4 = args.module4\n module5 = args.module5\n\n #READ INITIALIZATION FILE AND SETUP OPTIONS\n config = configparser.ConfigParser()\n config.read(configfile)\n\n datapath = fm.formatPath(config['Paths']['data_path'])\n\n options = {\n 'tilenames': config['Data']['tilenames'].split(','),\n 'years': config['Data']['years'].split(','),\n 'maindir': fm.formatPath(config['Paths']['main_dir']),\n 'outpath': fm.check_folder(config['Paths']['output_path']),\n 'info': True,\n 'deltemp': False\n }\n \n m1options = {}\n m1options.update(options)\n m1options['run'] = module1\n\n m2options = {}\n m2options.update(options)\n m2options['run'] = module2\n m2options['blocksize'] = int(config['Module2']['blocksize'])\n m2options['mappath'] = fm.formatPath(config['Paths']['LC_path'])\n\n m3options = {}\n m3options.update(options)\n m3options['run'] = module3\n m3options['batchsize'] = int(config['Module3']['batchsize'])\n m3options['frequency'] = int(config['Module3']['frequency'])\n\n m4options = {}\n m4options.update(options)\n m4options['run'] = module4\n m4options['blocksize'] = int(config['Module4']['blocksize'])\n m4options['n_classes'] = int(config['Module4']['n_classes'])\n m4options['multiprocessing'] = config.getboolean('Module4', 'multiprocessing') \n m4options['weekly'] = config.getboolean('Module4', 'weekly')\n m4options['singlefeaturedtw'] = config.getboolean('Module4', 'singlefeaturedtw')\n m4options['featureselection'] = config.getboolean('Module4', 'featureselection')\n m4options['multifeatureDTW'] = config.getboolean('Module4', 'multifeatureDTW')\n m4options['similarity'] = config.getboolean('Module4', 'similarity')\n m4options['classprototypes'] = config.getboolean('Module4', 'classprototypes')\n m4options['DTW_max_samp'] = int(config['Module4']['DTW_max_samp'])\n m4options['simi_high'] = int(config['Module4']['simi_high'])\n m4options['simi_decr'] = float(config['Module4']['simi_decr'])\n\n m5options = {}\n m5options.update(options)\n m5options['run'] = module5\n m5options['blocksize'] = int(config['Module5']['blocksize'])\n m5options['n_classes'] = int(config['Module5']['n_classes'])\n m5options['DTW_max_samp'] = int(config['Module5']['DTW_max_samp'])\n m5options['MAX_CD'] = int(config['Module5']['MAX_CD'])\n\n #CALL MAIN FUNCTION\n main(\tdatapath = datapath,\n options = options,\n\t\t\tmodule1 = m1options,\n module2 = m2options,\n module3 = m3options,\n module4 = m4options,\n module5 = m5options\n\t\t)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"402210069","text":"# from PyQt5.QtWidgets import QMainWindow, QFileDialog, QDialog\nfrom PyQt5.QtGui import QIcon, QPixmap\nfrom PyQt5.QtWidgets import QMainWindow, QFileDialog, QDialog\nfrom PyQt5 import QtWidgets\nfrom PyQt5 import QtCore\nfrom libImage import do_all\nimport threading\nfrom time import sleep\n\n\nclass UiSaveDialog(object):\n def setup_ui(self, save_dialog):\n save_dialog.setObjectName(\"save_dialog\")\n save_dialog.resize(311, 293)\n self.gridLayout = QtWidgets.QGridLayout(save_dialog)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.widget = QtWidgets.QWidget(save_dialog)\n self.widget.setObjectName(\"widget\")\n self.gridLayout_2 = QtWidgets.QGridLayout(self.widget)\n self.gridLayout_2.setContentsMargins(0, 0, 0, 0)\n self.gridLayout_2.setObjectName(\"gridLayout_2\")\n self.separate = QtWidgets.QCheckBox(self.widget)\n self.separate.setObjectName(\"separate\")\n self.gridLayout_2.addWidget(self.separate, 5, 0, 1, 1)\n self.skelet1 = QtWidgets.QCheckBox(self.widget)\n self.skelet1.setObjectName(\"skelet1\")\n self.gridLayout_2.addWidget(self.skelet1, 1, 0, 1, 1)\n self.binarization = QtWidgets.QCheckBox(self.widget)\n self.binarization.setObjectName(\"binarization\")\n self.gridLayout_2.addWidget(self.binarization, 0, 0, 1, 1)\n self.key2 = QtWidgets.QCheckBox(self.widget)\n self.key2.setObjectName(\"key2\")\n self.gridLayout_2.addWidget(self.key2, 4, 0, 1, 1)\n self.skelet2 = QtWidgets.QCheckBox(self.widget)\n self.skelet2.setObjectName(\"skelet2\")\n self.gridLayout_2.addWidget(self.skelet2, 2, 0, 1, 1)\n self.key1 = QtWidgets.QCheckBox(self.widget)\n self.key1.setObjectName(\"key1\")\n self.gridLayout_2.addWidget(self.key1, 3, 0, 1, 1)\n self.edges = QtWidgets.QCheckBox(self.widget)\n self.edges.setObjectName(\"edges\")\n self.gridLayout_2.addWidget(self.edges, 6, 0, 1, 1)\n self.gridLayout.addWidget(self.widget, 0, 0, 1, 1)\n self.buttonBox = QtWidgets.QDialogButtonBox(save_dialog)\n self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(\"buttonBox\")\n self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1)\n\n self.retranslate_ui(save_dialog)\n QtCore.QMetaObject.connectSlotsByName(save_dialog)\n\n def retranslate_ui(self, save_dialog):\n _translate = QtCore.QCoreApplication.translate\n save_dialog.setWindowTitle(_translate(\"save_dialog\", \"Сохранять после\"))\n self.separate.setText(_translate(\"save_dialog\", \"выделения точек изгиба\"))\n self.skelet1.setText(_translate(\"save_dialog\", \"1 скелетизации\"))\n self.binarization.setText(_translate(\"save_dialog\", \"бинаризации\"))\n self.key2.setText(_translate(\"save_dialog\", \"объединения ключевых точек\"))\n self.skelet2.setText(_translate(\"save_dialog\", \"2 скелетизации\"))\n self.key1.setText(_translate(\"save_dialog\", \"выделения ключевых точек\"))\n self.edges.setText(_translate(\"save_dialog\", \"выделения рёбер\"))\n\n\nclass UiMainWindow(object):\n def setup_ui(self, main_window):\n main_window.setObjectName(\"main_window\")\n main_window.setEnabled(True)\n main_window.resize(320, 240)\n self.centralwidget = QtWidgets.QWidget(main_window)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.progress = QtWidgets.QProgressBar(self.centralwidget)\n self.progress.setEnabled(False)\n self.progress.setProperty(\"value\", 0)\n self.progress.setObjectName(\"progress\")\n self.gridLayout.addWidget(self.progress, 3, 0, 1, 2)\n self.img = QtWidgets.QLabel(self.centralwidget)\n self.img.setText(\"\")\n self.img.setAlignment(QtCore.Qt.AlignCenter)\n self.img.setObjectName(\"img\")\n self.gridLayout.addWidget(self.img, 1, 0, 1, 2)\n self.filename = QtWidgets.QLabel(self.centralwidget)\n self.filename.setObjectName(\"filename\")\n self.gridLayout.addWidget(self.filename, 0, 0, 1, 1)\n self.filebtn = QtWidgets.QPushButton(self.centralwidget)\n self.filebtn.setObjectName(\"filebtn\")\n self.gridLayout.addWidget(self.filebtn, 0, 1, 1, 1)\n self.output = QtWidgets.QLineEdit(self.centralwidget)\n self.output.setReadOnly(True)\n self.output.setObjectName(\"output\")\n self.gridLayout.addWidget(self.output, 4, 0, 1, 2)\n self.startbtn = QtWidgets.QPushButton(self.centralwidget)\n self.startbtn.setEnabled(False)\n self.startbtn.setObjectName(\"startbtn\")\n self.gridLayout.addWidget(self.startbtn, 2, 0, 1, 1)\n self.setbtn = QtWidgets.QPushButton(self.centralwidget)\n self.setbtn.setObjectName(\"setbtn\")\n self.gridLayout.addWidget(self.setbtn, 2, 1, 1, 1)\n main_window.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(main_window)\n self.statusbar.setObjectName(\"statusbar\")\n main_window.setStatusBar(self.statusbar)\n\n self.retranslate_ui(main_window)\n QtCore.QMetaObject.connectSlotsByName(main_window)\n\n def retranslate_ui(self, main_window):\n _translate = QtCore.QCoreApplication.translate\n main_window.setWindowTitle(_translate(\"main_window\", \"Распознаватель изображений\"))\n self.filename.setText(_translate(\"main_window\", \"Выберите файл\"))\n self.filebtn.setText(_translate(\"main_window\", \"Обзор\"))\n self.output.setText(_translate(\"main_window\", \"Здесь будет распознанный текст\"))\n self.startbtn.setText(_translate(\"main_window\", \"Начать\"))\n self.setbtn.setText(_translate(\"main_window\", \"Настройки\"))\n\n\nclass ends():\n sig = QtCore.pyqtSignal()\n\n\n# noinspection PyUnresolvedReferences\nclass Ui(QMainWindow, UiMainWindow):\n valarr = {\"binarization\": False, \"skelet1\": False, \"skelet2\": False, \"key1\": False, \"key2\": False,\n \"separate\": False, \"edges\": True}\n endsignal = QtCore.pyqtSignal()\n\n def __init__(self, parent=None):\n QMainWindow.__init__(self, parent)\n self.setup_ui(self)\n self.filebtn.clicked.connect(self.file_select)\n self.setbtn.clicked.connect(self.settings)\n self.startbtn.clicked.connect(self.start_processing)\n self.setWindowIcon(QIcon(\"icon.png\"))\n # self.endsignal = QtCore.pyqtSignal()\n self.endsignal.connect(self.end_processing)\n self.show()\n\n def file_select(self):\n self.progress.setEnabled(False)\n self.progress.setValue(0)\n file = QFileDialog.getOpenFileName(self, \"Выбрать файл\", \"/home/ilya/картинки/\", \"Изображение (*.png)\")[0]\n if file == \"\":\n self.startbtn.setEnabled(False)\n self.img.setPixmap(QPixmap())\n self.filename.setText(\"Выберите файл\")\n else:\n pixmap = QPixmap(file)\n pixmap = pixmap.scaled(int((pixmap.width() / pixmap.height()) * 250), 250)\n print(type(pixmap))\n self.img.setPixmap(pixmap)\n self.filename.setText(file)\n self.startbtn.setEnabled(True)\n\n def settings(self):\n dlg = Setd(self.valarr.copy())\n if dlg.exec_() == 1:\n self.valarr = dlg.get_values()\n print(self.valarr)\n\n def start_processing(self):\n self.progress.setValue(0)\n self.progress.setEnabled(True)\n self.startbtn.setEnabled(False)\n self.setbtn.setEnabled(False)\n self.filebtn.setEnabled(False)\n self.thr = threading.Thread(target=do_all, args=(self.filename.text(), self, self.valarr))\n self.thr.start()\n\n def end_processing(self):\n while self.thr.is_alive():\n self.thr.join()\n self.startbtn.setEnabled(True)\n self.setbtn.setEnabled(True)\n self.filebtn.setEnabled(True)\n\n def send_end(self):\n self.endsignal.emit()\n\n# noinspection PyUnresolvedReferences\nclass Setd(QDialog, UiSaveDialog):\n def __init__(self, valarr, parent=None):\n QDialog.__init__(self, parent)\n self.setup_ui(self)\n self.valarr = valarr\n self.binarization.setChecked(self.valarr[\"binarization\"])\n self.skelet1.setChecked(self.valarr[\"skelet1\"])\n self.skelet2.setChecked(self.valarr[\"skelet2\"])\n self.key1.setChecked(self.valarr[\"key1\"])\n self.key2.setChecked(self.valarr[\"key2\"])\n self.separate.setChecked(self.valarr[\"separate\"])\n self.edges.setChecked(self.valarr[\"edges\"])\n\n self.buttonBox.accepted.connect(self.accept)\n self.buttonBox.rejected.connect(self.reject)\n\n self.binarization.stateChanged.connect(self.change_value)\n self.skelet1.stateChanged.connect(self.change_value)\n self.skelet2.stateChanged.connect(self.change_value)\n self.key1.stateChanged.connect(self.change_value)\n self.key2.stateChanged.connect(self.change_value)\n self.separate.stateChanged.connect(self.change_value)\n self.edges.stateChanged.connect(self.change_value)\n\n def get_values(self):\n return self.valarr\n\n def change_value(self, state):\n name = self.sender()\n self.valarr[name.objectName()] = bool(state)\n","sub_path":"imageGUI/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":9608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"455016184","text":"import pickle\nimport dataPrep\n\ndef saveDictFromRaw(file, output, min=2, max=18, reduction=None): #sort phrases by length and store them in different dictionaries, encapsuled in one lib\n #importing and preparing data\n df = open(file, encoding='utf-8') #relative path from main\n data = dataPrep.parseDialogs(df)\n df.close()\n data = dataPrep.parseUtterances(data)\n data = dataPrep.parsePhrase2(data)\n\n if reduction != None:\n data = data[:reduction]\n\n dictionary = []\n for j in range(min, max):\n tab = []\n for i in range(len(data)):\n if len(data[i].split())==j:\n tab.append(data[i])\n dictionary.append(tab)\n\n del data\n\n with open(output, 'wb') as fp:\n pickle.dump(dictionary, fp)\n\ndef dictToSorted(file, output): #sort every dict by alphabetical order\n data = pickle.load(open(file, 'rb'))\n\n sorted_data = []\n for dict in data:\n sorted_data.append(sorted(dict))\n\n with open(output, 'wb') as fp:\n pickle.dump(sorted_data, fp)\n\ndef cleanSortedData(file, output): #delete all double phrases in the sorted dict\n data = pickle.load(open(file, 'rb'))\n\n prev_phrase = \"\"\n dat = []\n for dict in data:\n dictionary = []\n for phrase in dict:\n if prev_phrase != phrase:\n dictionary.append(phrase)\n prev_phrase = phrase\n dat.append(dictionary)\n del data\n\n with open(output, 'wb') as fp:\n pickle.dump(dat, fp)\n return dat\n\n############ MAIN ############\nRAW_FILE = '../../DataBase/dialog/dialogues_text.txt'\nEDITED_FILE = \"./Data/ParsedData.txt\"\nSORTED_FILE = \"./Data/SortedData.txt\"\nCLEAN_FILE = \"./Data/CleanData.txt\"\n\nsaveDictFromRaw(RAW_FILE, output=EDITED_FILE)\ndictToSorted(EDITED_FILE, SORTED_FILE)\ncleanSortedData(SORTED_FILE, CLEAN_FILE)\n\ndata = pickle.load(open(CLEAN_FILE, 'rb'))\n","sub_path":"EtudeStatGPT/dataSort.py","file_name":"dataSort.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"233871689","text":"import sys\n#input='E:/evolutinoarypattern/RMB2_B10.freebayes_snps/RMB2_B10.res.2.csv'\n#pInput='E:/evolutinoarypattern/RMB2_B10.freebayes_snps/RMB2_B10.pVal.csv'\n#output='E:/evolutinoarypattern/RMB2_B10.freebayes_snps/RMB2_B10.res.ks.csv'\nimport argparse\n\narg_parser = argparse.ArgumentParser()\narg_parser.add_argument(\"-i\", \"--input\", help=\"KS-distances input file name\")\narg_parser.add_argument(\"-p\", \"--pvalueInput\", help=\"P-values file name\")\narg_parser.add_argument(\"-o\", \"--output\", help=\"An output file name\")\narg_parser.add_argument(\"-t\", \"--timepoints\", help=\"Number of timepoint\")\n\nargs = arg_parser.parse_args()\n\ninput= args.input\npInput= args.pvalueInput\noutput= args.output\n\ntimePoint = int(args.timepoints) \n\niFile = open(input,'r')\npFile = open(pInput, 'r')\noFile = open(output, 'w')\n\npVals = pFile.readlines()\n\noFile.write('chr\\tloc\\ttimepoint\\tmajor\\tminor\\ttotal_depth\\tminor_depth\\tp0\\tpValue,')\nfor i in range(timePoint - 1):\n oFile.write(str(i))\n oFile.write('\\t')\noFile.write(str(timePoint-1))\noFile.write('\\n')\np=0\niFile.readline()\n\nwhile (True):\n line = []\n\n time = iFile.readline()\n tmp = time.strip().split('\\t')\n\n if(time == ''): break\n\n line = tmp[0:5]\n line.append(tmp[8])\n line.append(tmp[9])\n line.append(tmp[11])\n try:\n line.append(pVals[p].rstrip())\n except IndexError:\n continue\n line.append(tmp[10])\n\n p = p + 1\n\n for i in range(0, timePoint-1):\n time = iFile.readline()\n if(time == ''): break\n tmp = time.strip().split('\\t')\n\n line.append(tmp[10])\n\n oFile.write('\\t'.join(line))\n oFile.write('\\n')\n\n\n","sub_path":"Monte-Carlo_Based_KS-Test/makeKSOutput.py","file_name":"makeKSOutput.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"505647921","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division # Python 2 users only\nfrom django.shortcuts import render, HttpResponse\nfrom django.http import JsonResponse, HttpResponseBadRequest\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom .models import SmartemUser\nimport base64\nimport sys\nimport json\nimport nltk, re, pprint\nfrom nltk import word_tokenize\nfrom collections import Counter\nfrom django.contrib.auth.decorators import login_required\n\nfrom ml import gmail as g\n\nc = re.compile(\"(^>.*$)|(.*<\\/html>)\", re.MULTILINE)\nc2 = re.compile(\"(^20.*$)|(^On.*>)\", re.MULTILINE)\nnonPunct = re.compile('.*[א-תA-Za-z].*') # must contain a letter or digit\n\n# Create your views here.\n@csrf_exempt\ndef append(request):\n received_json_data = json.loads(request.body)\n age = int(received_json_data[\"age\"])\n grade = int(received_json_data[\"grade\"])\n guess = int(received_json_data[\"guess\"])\n datadic = received_json_data[\"data\"]\n text = \"\"\n for t in datadic:\n text += \" \"\n text += re.sub(c2,\"\",re.sub(c,\"\",base64.urlsafe_b64decode(str(datadic[t]))))\n\n text2 = text.decode('utf-8')\n tokens = word_tokenize(text2)\n filtered = [w for w in tokens if nonPunct.match(w.encode('utf-8'))]\n counts = Counter(filtered)\n\n try:\n s = SmartemUser.objects.create(user_age=age,grade=grade,data=counts, guess=guess)\n except Exception as e:\n print(e)\n\n return HttpResponse(\"success\")\n\n\n@csrf_exempt\ndef api(request):\n if request.method == 'PUT':\n data = json.loads(request.body)\n result = g.predict_api(data['data'])\n\n return JsonResponse({\"score\":int(result[0])})\n\n return HttpResponseBadRequest()\n\n\ndef stats(request):\n if not request.user.is_authenticated:\n return HttpResponse(\"Permission denied.\")\n\n objects = SmartemUser.objects.only(\"user_age\",\"grade\")\n c_data = [['Grade']]\n a_data = [['Age']]\n\n errors = []\n\n for o in objects:\n c_data.append([o.grade])\n a_data.append([o.user_age])\n errors.append(abs(o.guess - o.grade))\n\n peoplesum = SmartemUser.objects.count()\n error = sum(errors) / float(peoplesum)\n\n return render(request,'web/stats.html',{'c_data' : c_data, 'a_data':a_data,'peoplesum':peoplesum, 'error':error})","sub_path":"web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"213322372","text":"# -*- coding: utf-8 -*-\n# Copyright 2023 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom google.cloud.network_management import gapic_version as package_version\n\n__version__ = package_version.__version__\n\n\nfrom google.cloud.network_management_v1.services.reachability_service.async_client import (\n ReachabilityServiceAsyncClient,\n)\nfrom google.cloud.network_management_v1.services.reachability_service.client import (\n ReachabilityServiceClient,\n)\nfrom google.cloud.network_management_v1.types.connectivity_test import (\n ConnectivityTest,\n Endpoint,\n LatencyDistribution,\n LatencyPercentile,\n ProbingDetails,\n ReachabilityDetails,\n)\nfrom google.cloud.network_management_v1.types.reachability import (\n CreateConnectivityTestRequest,\n DeleteConnectivityTestRequest,\n GetConnectivityTestRequest,\n ListConnectivityTestsRequest,\n ListConnectivityTestsResponse,\n OperationMetadata,\n RerunConnectivityTestRequest,\n UpdateConnectivityTestRequest,\n)\nfrom google.cloud.network_management_v1.types.trace import (\n AbortInfo,\n AppEngineVersionInfo,\n CloudFunctionInfo,\n CloudRunRevisionInfo,\n CloudSQLInstanceInfo,\n DeliverInfo,\n DropInfo,\n EndpointInfo,\n FirewallInfo,\n ForwardInfo,\n ForwardingRuleInfo,\n GKEMasterInfo,\n GoogleServiceInfo,\n InstanceInfo,\n LoadBalancerBackend,\n LoadBalancerInfo,\n LoadBalancerType,\n NetworkInfo,\n RouteInfo,\n Step,\n Trace,\n VpcConnectorInfo,\n VpnGatewayInfo,\n VpnTunnelInfo,\n)\n\n__all__ = (\n \"ReachabilityServiceClient\",\n \"ReachabilityServiceAsyncClient\",\n \"ConnectivityTest\",\n \"Endpoint\",\n \"LatencyDistribution\",\n \"LatencyPercentile\",\n \"ProbingDetails\",\n \"ReachabilityDetails\",\n \"CreateConnectivityTestRequest\",\n \"DeleteConnectivityTestRequest\",\n \"GetConnectivityTestRequest\",\n \"ListConnectivityTestsRequest\",\n \"ListConnectivityTestsResponse\",\n \"OperationMetadata\",\n \"RerunConnectivityTestRequest\",\n \"UpdateConnectivityTestRequest\",\n \"AbortInfo\",\n \"AppEngineVersionInfo\",\n \"CloudFunctionInfo\",\n \"CloudRunRevisionInfo\",\n \"CloudSQLInstanceInfo\",\n \"DeliverInfo\",\n \"DropInfo\",\n \"EndpointInfo\",\n \"FirewallInfo\",\n \"ForwardInfo\",\n \"ForwardingRuleInfo\",\n \"GKEMasterInfo\",\n \"GoogleServiceInfo\",\n \"InstanceInfo\",\n \"LoadBalancerBackend\",\n \"LoadBalancerInfo\",\n \"NetworkInfo\",\n \"RouteInfo\",\n \"Step\",\n \"Trace\",\n \"VpcConnectorInfo\",\n \"VpnGatewayInfo\",\n \"VpnTunnelInfo\",\n \"LoadBalancerType\",\n)\n","sub_path":"packages/google-cloud-network-management/google/cloud/network_management/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"129355856","text":"import json\nimport numpy as np\nimport os\nimport csv\nimport matplotlib.pyplot as plt\n\ndef smooth(y, box_pts):\n box = np.ones(box_pts)/box_pts\n y_smooth = np.convolve(y, box, mode='same')\n return y_smooth\n\nPATH = '../Spasenov_data/temp_record_4/temp_record_4.json'\n\nwith open(PATH) as f:\n data = json.load(f)\n\nx_left_cheek_r = []\nx_right_cheek_r = []\nx_forehead_r = []\n\nx_left_cheek_g = []\nx_right_cheek_g = []\nx_forehead_g = []\n\nx_left_cheek_b = []\nx_right_cheek_b = []\nx_forehead_b = []\n\nRED = 0\nGREEN = 1\nBLUE = 2\n#import pdb; pdb.set_trace()\ndict = {}\nfor time in data:\n for segment in data[time]:\n if segment == 'left_cheek':\n x_left_cheek_r.append(data[time][segment][0][RED])\n x_left_cheek_g.append(data[time][segment][0][GREEN])\n x_left_cheek_b.append(data[time][segment][0][BLUE])\n elif segment == 'right_cheek':\n x_right_cheek_r.append(data[time][segment][0][RED])\n x_right_cheek_g.append(data[time][segment][0][GREEN])\n x_right_cheek_b.append(data[time][segment][0][BLUE])\n else:\n x_forehead_r.append(data[time][segment][0][RED])\n x_forehead_g.append(data[time][segment][0][GREEN])\n x_forehead_b.append(data[time][segment][0][BLUE])\nx = range(1600)\n\nx_left_cheek_r = smooth(x_left_cheek_r, 11)\nx_left_cheek_g = smooth(x_left_cheek_g, 11)\nx_left_cheek_b = smooth(x_left_cheek_b, 11)\nfig = plt.figure()\n\nplt.subplot(3, 1, 1)\nplt.xlim((0, 1600))\nplt.title('Left cheek (red)')\nplt.xlabel('Frames')\nplt.ylabel('Intensity')\nplt.grid(True)\nplt.plot(x, x_left_cheek_r[100:1700], 'r')\n\nplt.subplot(3, 1, 2)\nplt.xlim((0, 1600))\nplt.title('Left cheek (green)')\nplt.xlabel('Frames')\nplt.ylabel('Intensity')\nplt.grid(True)\nplt.plot(x, x_left_cheek_g[100:1700], 'g')\n\nplt.subplot(3, 1, 3)\nplt.xlim((0, 1600))\nplt.title('Left cheek (blue)')\nplt.plot(x, x_left_cheek_b[100:1700], 'b')\nplt.xlabel('Frames')\nplt.ylabel('Intensity')\nplt.grid(True)\nplt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95, hspace=0.5,\n wspace=0.35)\nplt.show()\n","sub_path":"ekg_project/color_dist/chanels_dynamics.py","file_name":"chanels_dynamics.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"309033943","text":"from typing import Dict, Any, Optional\nimport requests\nfrom spectacles.utils import details_from_http_error\n\n\nclass SpectaclesException(Exception):\n exit_code = 100\n\n def __init__(self, name: str, title: str, detail: str):\n self.type: str = \"/errors/\" + name\n self.title = title\n self.detail = detail\n\n def __repr__(self) -> str:\n return self.title\n\n def __str__(self) -> str:\n return self.title + \" \" + self.detail\n\n\nclass LookMlNotFound(SpectaclesException):\n ...\n\n\nclass LookerApiError(SpectaclesException):\n \"\"\"Exception raised when an error is returned by the Looker API.\n\n Args:\n name: A lowercase, hyphenated, unique ID for the error type.\n title: A short, human-readable summary of the problem.\n status: The HTTP status code returned by the Looker API.\n detail: A human-readable explanation with any helpful tips for\n solving the issue.\n response: The response object returned by the Looker API.\n \"\"\"\n\n exit_code = 101\n\n def __init__(\n self,\n name: str,\n title: str,\n status: int,\n detail: str,\n response: requests.Response,\n ):\n request: requests.PreparedRequest = response.request\n super().__init__(\"looker-api-errors/\" + name, title, detail)\n self.status = status\n self.looker_api_response: Optional[Dict[str, Any]] = details_from_http_error(\n response\n )\n self.request = {\"url\": request.url, \"method\": request.method}\n\n\nclass GenericValidationError(SpectaclesException):\n exit_code = 102\n\n def __init__(self):\n super().__init__(\n name=\"validation-error\",\n title=\"A validation error occurred.\",\n detail=\"Spectacles encountered an error while running validation tests.\",\n )\n\n\nclass ValidationError(GenericValidationError):\n def __init__(\n self,\n model: str,\n explore: str,\n test: Optional[str],\n message: str,\n metadata: Dict[str, Any],\n ):\n self.model = model\n self.explore = explore\n self.test = test\n self.message = message\n self.metadata = metadata\n super().__init__()\n\n def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return NotImplemented\n\n return self.__dict__ == other.__dict__\n\n def __repr__(self):\n return self.message\n\n\nclass SqlError(ValidationError):\n def __init__(\n self,\n model: str,\n explore: str,\n dimension: Optional[str],\n sql: str,\n message: str,\n line_number: Optional[int] = None,\n explore_url: Optional[str] = None,\n lookml_url: Optional[str] = None,\n ):\n metadata = {\n \"dimension\": dimension,\n \"line_number\": line_number,\n \"explore_url\": explore_url,\n \"lookml_url\": lookml_url,\n }\n super().__init__(\n model=model, explore=explore, test=sql, message=message, metadata=metadata\n )\n\n\nclass DataTestError(ValidationError):\n def __init__(\n self, model: str, explore: str, message: str, test_name: str, lookml_url: str\n ):\n metadata = {\"test_name\": test_name, \"lookml_url\": lookml_url}\n super().__init__(\n model=model, explore=explore, test=None, message=message, metadata=metadata\n )\n","sub_path":"spectacles/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"403747130","text":"import liwc\nimport all_features\nimport csv\nimport re\nfrom nltk.tokenize import word_tokenize\nfrom wordsegment import segment\n\nquora_data_file_path = \"Quora_data/quora_answer_label200.csv\"\nquora_data_output_path = \"Quora_data/quora_answer_with_features200.csv\"\nclassification_data_path = \"Quora_data/quora_classification_data200.csv\"\t\t\t\npersonality_categories = [\"O\",\"C\",\"E\",\"A\",\"N\"]\t\t\t\nDEBUG = False\n\nfrom enchant.tokenize import get_tokenizer, HTMLChunker\ndef custom_word_tokenize(text):\n\ttokenizer = get_tokenizer(\"en_US\")\n\twords = []\n\tfor w in tokenizer(text):\n\t\twords.append(w[0])\n\treturn words\n\ndef compute_scores_for_quora_ans(quora_ans, liwc_categories, liwc_trie):\n\tliwc_scores = dict()\n\tfor category in liwc_categories:\n\t\tliwc_scores[category] = 0\n\n\t# Segment the words Present in the quora_ans\n\twords_in_quora_ans = custom_word_tokenize(quora_ans)\n\t# print (quora_ans)\n\t# words_in_quora_ans = segment(quora_ans)\n\t# Traverse the text word by word and count the words in each category\n\tif DEBUG:\n\t\tprint(words_in_quora_ans)\n\tfor word in words_in_quora_ans:\n\t\t# check if word prefix present in trie\n\t\tvalue = liwc_trie.longest_prefix(word)\n\t\tif value[0] is None:\n\t\t\tcontinue\n\t\telif value[0] == word:\n\t\t\t# exact match thus increase counts\n\t\t\tif value[1][0] == \"*\":\t\t\t\n\t\t\t\tfor i in range(1,len(value[1])):\n\t\t\t\t\tliwc_scores[value[1][i]] += 1\n\t\t\telse:\n\t\t\t\tfor category in value[1]:\n\t\t\t\t\tliwc_scores[category] += 1\n\t\telif value[1][0] == \"*\":\n\t\t\t# star match thus increase counts\n\t\t\tfor i in range(1,len(value[1])):\n\t\t\t\tliwc_scores[value[1][i]] += 1\n\t\n\tscores = []\n\tnumber_words = len(words_in_quora_ans)\n\tfor category in liwc_categories:\n\t\tscores.append(liwc_scores[category]/number_words)\n\treturn scores\n\ndef create_classification_data(all_data, all_feature_labels):\n\tdataset_rows = []\n\twith open(classification_data_path, 'w') as csvoutput:\n\t\twriter = csv.writer(csvoutput)\n\t\tattribute_list = personality_categories + all_feature_labels\n\t\tl = len(attribute_list)\n\t\tfor row in all_data:\n\t\t\tr = row[2:]\n\t\t\tif len(r)!=l:\n\t\t\t\tprint(\"len not equal \",(len(r) - l))\n\t\t\telse:\n\t\t\t\tdataset_rows.append(r)\n\t\twriter.writerows(dataset_rows)\n\ndef generate_scored_quora_data():\n\tliwc_categories = liwc.get_list_of_liwc_categories()\n\tliwc_trie = liwc.create_trie_data_structure()\n\t\n\tfeature_labels = all_features.get_all_feature_labels()\n\n\thead_row = []\n\titeration = 0\n\twith open(quora_data_file_path, 'r', encoding='utf-8') as csvinput:\n\t\twith open(quora_data_output_path, 'w') as csvoutput:\n\t\t\twriter = csv.writer(csvoutput)\n\t\t\treader = csv.reader(csvinput)\n\t\t\tprint(\"Printing Here!!\")\n\t\t\tprint(reader)\n\t\t\thead_row = next(reader)\n\t\t\tall_data = []\n\t\t\thead_row.extend(liwc_categories)\n\t\t\thead_row.extend(feature_labels)\n\t\t\tall_data.append(head_row)\n\t\t\tfor row in reader:\n\t\t\t\t# compute LIWC Counts for individual categories\n\t\t\t\tscores = compute_scores_for_quora_ans(row[1], liwc_categories, liwc_trie)\n\t\t\t\tfeatures = all_features.get_all_features(row[1])\n\t\t\t\tdummy_row = row\n\t\t\t\tdummy_row.extend(scores)\n\t\t\t\tdummy_row.extend(features)\n\t\t\t\tall_data.append(dummy_row)\n\t\t\t\t# if DEBUG:\n\t\t\t\t\t# print(dummy_row)\n\t\t\t\tprint(\"quora_ans\", iteration)\n\t\t\t\titeration += 1\n\t\t\tall_feature_labels = []\n\t\t\tall_feature_labels.extend(liwc_categories)\n\t\t\tall_feature_labels.extend(feature_labels)\n\t\t\tcreate_classification_data(all_data, all_feature_labels)\n\t\t\twriter.writerows(all_data)\n\ndef get_feature_vector(text):\n\tliwc_categories = liwc.get_list_of_liwc_categories()\n\tliwc_trie = liwc.create_trie_data_structure()\n\t\n\tfeature_labels = all_features.get_all_feature_labels()\n\n\tscores = compute_scores_for_quora_ans(text, liwc_categories, liwc_trie)\n\tfeatures = all_features.get_all_features(text)\n\tscores.extend(features)\n\treturn scores\n\ndef check_word_tokenizer():\n\tliwc_categories = liwc.get_list_of_liwc_categories()\n\tliwc_trie = liwc.create_trie_data_structure()\n\tprint(liwc_categories)\n\twith open(quora_data_file_path, 'r', encoding='utf-8') as csvinput:\n\t\treader = csv.reader(csvinput)\n\t\thead_row = next(reader)\n\t\tfor row in reader:\n\t\t\ttext = row[1]\n\t\t\tprint(custom_word_tokenize(text))\n\t\t\tscores = compute_scores_for_quora_ans(text, liwc_categories, liwc_trie)\n\t\t\t\n\t\t\tidx = 0\n\t\t\tfor category in liwc_categories:\n\t\t\t\tprint(category,scores[idx])\n\t\t\t\tidx+=1\n\t\t\tsleep(10)\n\ndef check_pos_tagging():\n\twith open(quora_data_file_path, 'r', encoding='utf-8') as csvinput:\n\t\treader = csv.reader(csvinput)\n\t\thead_row = next(reader)\n\t\tfor row in reader:\n\t\t\ttext = row[1]\n\t\t\twords_custom = custom_word_tokenize(text)\n\t\t\twords_nltk = nltk.word_tokenize(text)\n\t\t\tpos_tag_custom = nltk.pos_tag(words_custom)\n\t\t\tprint(words_nltk)\n\t\t\tprint(pos_tag_custom)\n\t\t\tsleep(10)\n\n\ndef main():\n\tall_features.init()\n\tgenerate_scored_quora_data()\n\t# check_word_tokenizer()\n\t# check_pos_tagging()\n\nif __name__ == '__main__':\n\tmain()","sub_path":"demo/quora_count.py","file_name":"quora_count.py","file_ext":"py","file_size_in_byte":4776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"366245630","text":"from mysetup import *\n\nh.load_file(\"labels.hoc\") # has variables needed by network\n\nCTYP = h.CTYP # cell types\nCTYPi = CTYP.count # number of cell types\nCTYPi = int(h.CTYPi)\n\nE = int(h.E2)\nI = int(h.I2)\n\nSTYP = h.STYP # synapse types\nSTYPi = int(h.STYPi)\n\nAM2 = int(h.AM2)\nGA2 = int(h.GA2)\nNM2 = int(h.NM2)\nAM = int(h.AM)\nGA = int(h.GA)\nNM = int(h.NM)\n\nnumc = h.numc # numc -- number of cells\n\nh(\"double pmat[CTYPi][CTYPi]\")\npmat = h.pmat\n\nix = h.ix\nixe = h.ixe\n\nh(\"double div[CTYPi][CTYPi]\")\ndiv = h.div\n\nh(\"double wmat[CTYPi][CTYPi][STYPi]\")\nwmat = h.wmat\n\nh(\"double wd0[CTYPi][CTYPi][STYPi]\")\nwd0 = h.wd0\n\nh(\"double delm[CTYPi][CTYPi]\")\ndelm = h.delm\n\nh(\"double deld[CTYPi][CTYPi]\")\ndeld = h.deld\n\nh(\"double conv[CTYPi][CTYPi]\")\nconv = h.conv\n\nh(\"double synloc[CTYPi][CTYPi]\")\nsynloc = h.synloc\n\nh(\"double syty1[CTYPi][CTYPi]\")\nsyty1 = h.syty1\n\nh(\"double syty2[CTYPi][CTYPi]\")\nsyty2 = h.syty2\n\n# ice - return True iff ct is an inhibitory cell\nice = h.ice\n\nSOMA = h.SOMA\nDEND = h.DEND\nAXON = h.AXON\n\nh(\"objref cells\")\nh(\"cells=new List()\")\ncells = h.cells # list of cells\n\n","sub_path":"sam_code/labels.py","file_name":"labels.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"482727812","text":"List=[[1],[2],[3],[4],[5]]\r\nfor i in List:\r\n X=List.pop(0)\r\n for i in X:\r\n X=X.pop(0)\r\n print(X)\r\n List.append(X)\r\n print(List)\r\n\r\n\r\n\r\n\"\"\"Space Compexity: In the script above, the function accepts a list of integers and returns a list with the corresponding squares of integers.\r\nThe algorithm has to allocate memory for the same number of items as in the input list.\r\nTherefore, the space complexity of the algorithm becomes O(n).\"\"\"\r\n\r\n\"\"\"Time Complexity: O(n'2)In the script above, you can see that we have an outer loop that iterates through all the items in the input list and then a nested inner loop,\r\nwhich again iterates through all the items in the input list.\r\nThe total number of steps performed is n * n, where n is the number of items in the input array.\"\"\"\r\n","sub_path":"Question8.py","file_name":"Question8.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"578969535","text":"import re\nfrom flask import (\n Blueprint, redirect, render_template,\n request, flash, session, url_for, abort\n)\nfrom ..models.user import User\nfrom .auth import login_required\n\n\nblueprint = Blueprint('profile', __name__, url_prefix='/profile')\n\n\n@blueprint.route('/', methods=['GET'])\n@login_required\ndef index():\n uni = session['uni']\n user = User.find_by_uni(uni)\n if user:\n return render_template(\n 'profile/index.html', user=user, uni=uni,\n path=[('#', user.username)], curr_tab='Profile'\n )\n else:\n abort(404)\n\n\n@blueprint.route('/view/', methods=['GET'])\n@login_required\ndef view(uni):\n user = User.find_by_uni(uni)\n uni = session['uni']\n if not user:\n abort(404)\n if user.uni == uni:\n return redirect(url_for('profile.index'))\n elif user:\n return render_template(\n 'profile/index.html', user=user, uni=uni,\n path=[('#', user.username)]\n )\n else:\n abort(404)\n\n\n@blueprint.route('/edit', methods=['GET', 'POST'])\n@login_required\ndef edit():\n uni = session['uni']\n user = User.find_by_uni(uni)\n if request.method == 'POST':\n user.email = request.form['email']\n user.personal_des = request.form['personal_des']\n user.username = request.form['username']\n user.major = request.form['major']\n error = False\n if not user.email:\n flash('Email is required.')\n error = True\n elif not re.fullmatch(r'[^@]*@[^@]*', user.email):\n flash('Invalid email address.')\n error = True\n if error:\n return redirect('')\n else:\n user.save(update=True)\n return redirect(url_for('profile.index'))\n return render_template(\n 'profile/edit.html', user=user,\n path=[\n ('/profile', user.username),\n ('#', 'Edit Profile')\n ],\n curr_tab='Profile'\n )\n","sub_path":"server/core/controllers/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"11853072","text":"# Copyright (c) 2010-2011 OpenStack, LLC.\n# Copyright (c) Nexenta Systems Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom lfs import LFS, LFSDefault, LFSStatus\nfrom swift.common.utils import readconf\n\nzfs_supported = True if os.uname()[0] == 'SunOS' else False\n\nif zfs_supported:\n from lfszfs import LFSZFS\n\ndef get_lfs(conf, srvdir):\n fs = conf.get('fs', 'xfs')\n if conf.has_key('__file__'):\n fs_conf = readconf(conf['__file__'], fs)\n conf = dict(conf, **fs_conf)\n if fs == 'xfs':\n return LFSDefault(conf, srvdir)\n elif zfs_supported and fs == 'zfs':\n return LFSZFS(conf, srvdir)\n else:\n raise 'Cannot load LFS. Invalid FS : %s' %fs\n\n","sub_path":"swift/common/lfs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"456030913","text":"import json\nimport numpy as np\nfrom needle.cases import NeedleTestCase\n\nimport os.path\nimport sys\nTHREEPYDIR = os.path.abspath(os.path.join(os.path.split(__file__)[0], os.path.pardir))\nif THREEPYDIR not in sys.path:\n sys.path.insert(0, THREEPYDIR)\nfrom pyserver.flask_app import app, request, Markup, render_template, main\nfrom three import *\n\n\n@app.route('/test/cannon')\ndef _test_cannon():\n scene = Scene()\n scene.add(PointLight(color=0xffffff, intensity=1, distance=100,\n position=[-2, 20, 4]))\n scene.add(Mesh(geometry=SphereBufferGeometry(radius=0.25),\n material=MeshPhongMaterial(color=0xff0000, shading=FlatShading),\n cannonData={'mass': 1, 'shapes': ['Sphere']},\n position=[0, 2, -4]))\n scene.add(Mesh(geometry=BoxGeometry(width=1, height=1, depth=1),\n material=MeshPhongMaterial(color=0x00ff00, shading=FlatShading),\n cannonData={'mass': 1, 'shapes': ['Box']},\n position=[-2, 3, -4]))\n scene.add(Mesh(geometry=CylinderGeometry(radiusTop=0.5, radiusBottom=0.5, height=1, radialSegments=8),\n material=MeshPhongMaterial(color=0x0000ff, shading=FlatShading),\n position=[2, 4, -6],\n cannonData={'mass': 1, 'shapes': ['Cylinder']}))\n scene.add(Mesh(geometry=PlaneBufferGeometry(width=8, height=8),\n material=MeshBasicMaterial(color=0x5555ff),\n position=[0, -2, -4],\n rotation=[-np.pi/2, 0, 0],\n cannonData={'mass': 0, 'shapes': ['Plane']}))\n return render_template('index.html',\n json_config=Markup(r\"\"\"\"\"\" % (json.dumps({'controls': request.args.get('controls')}, indent=2),\n json.dumps(scene.export(), indent=2))))\n\n\nclass CANNONTest(NeedleTestCase):\n def test_screenshot(self):\n self.driver.get('127.0.0.1:5000/test/cannon')\n self.assertScreenshot('canvas', 'cannon_screenshot')\n\n\nif __name__ == \"__main__\":\n #app.run(host='0.0.0.0')\n import logging\n logging.basicConfig(level=(logging.DEBUG if app.debug else logging.INFO),\n format=\"%(levelname)s %(name)s %(funcName)s %(lineno)d: %(message)s\")\n app.config['TESTING'] = True\n main()\n","sub_path":"test/test_cannon.py","file_name":"test_cannon.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"481984394","text":"import sys\n\nfrom discord.ext import commands\nfrom utils import checks\n\n\nclass AdminCommands(commands.Cog, name=\"Administration\"):\n \"\"\"A cog where all the server admin commands live\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='disconnect-vc', hidden=True)\n async def disconnect_vc(self, ctx):\n for vc in self.bot.voice_clients:\n if vc.guild == ctx.message.guild:\n return await vc.disconnect()\n\n @commands.command(hidden=True)\n @checks.requires_staff_role()\n async def kill(self, ctx):\n await \\\n ctx.send(\"goodbye :(\")\n sys.exit()\n\n @commands.command(name=\"throw_error\")\n @checks.requires_staff_role()\n async def throw_error(self, ctx):\n raise Exception\n\n\ndef setup(bot):\n bot.add_cog(AdminCommands(bot))\n","sub_path":"src/cogs/admin-commands.py","file_name":"admin-commands.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"313842890","text":"\n\n\n\nimport numpy as np\nimport cPickle as pickle\nfrom scipy import integrate\nimport math\nimport os\n\nfrom astropy.cosmology import WMAP9 as cosmo\nimport astropy.units as u\n\nimport SFHs\nimport dust\n\nthis_dir, this_filename = os.path.split(__file__)\n\nclass SED_creator():\n\n\n\n def __init__(self, model, filters = []):\n \n self.model = model\n \n self.SPS_model = self.model['SPS_model']\n self.SFH_model = self.model['SFH_model']\n self.dust_model = self.model['dust_model']\n \n \n # ----- read in SPS models\n \n self.SPS = pickle.load(open(this_dir + '/SPS/SEDs/'+self.SPS_model+'.p','r'))\n \n self.SPS['metallicities'] = np.log10(self.SPS['metallicities']) # --- use logarithmic metallicities\n \n \n # ---- Define parameters and define default prior limits (can update for each object)\n \n\n \n self.parameters = {}\n \n self.parameters['z'] = {}\n self.parameters['z']['uniform_prior_limits'] = [0.,20.]\n \n self.parameters['log10M*'] = {}\n self.parameters['log10M*']['uniform_prior_limits'] = [5.,13.]\n \n \n self.parameters['log10age'] = {}\n self.parameters['log10age']['uniform_prior_limits'] = [1., np.log10(self.SPS['ages'][-1])]\n \n self.parameters['log10Z'] = {}\n self.parameters['log10Z']['uniform_prior_limits'] = [self.SPS['metallicities'][0], self.SPS['metallicities'][-1]]\n \n \n if self.SFH_model == 'exp': \n \n self.parameters['tau'] = {}\n self.parameters['tau']['uniform_prior_limits'] = [-10000., 10000.]\n \n if self.dust_model == 'Calzetti': \n \n self.parameters['tau_V'] = {} \n self.parameters['tau_V']['uniform_prior_limits'] = [0.,5.]\n \n \n \n \n self.parameters['escape_fraction'] = {}\n self.parameters['escape_fraction']['uniform_prior_limits'] = [0.,1.]\n \n \n\n self.filters = filters\n\n self.ages = np.arange(0.,15000.,1.) \n\n\n \n\n \n \n \n # ----- read in fraction of material remaining \n \n # some fraction of material turned into stars gets returned to the ISM (through winds, SN, etc.). The fraction returned depends on the age and metallicity of the stellar population.\n\n R = pickle.load(open(this_dir + '/SPS/RemainingFraction/'+self.SPS_model+'.p','r'))\n \n self.Remaining = [ np.interp(self.ages, R['ages'], R['total'][log10Z]) for log10Z in R['metallicities'] ]\n \n # ----- read in filter transmission functions\n \n \n # -----------------\n # Read in filter transmission curves\n\n\n self.T = {}\n\n for f in filters: \n \n self.T[f] = {}\n \n d = np.loadtxt(this_dir + '/filters/'+'/'.join(f.split('.'))+'.txt').T\n \n self.T[f]['lam'] = d[0]/1E4\n self.T[f]['T'] = d[1]\n \n self.T[f]['meanwv'] = np.exp(integrate.trapz(np.log(self.T[f]['lam'])*self.T[f]['T']/self.T[f]['lam'],x=self.T[f]['lam'])/integrate.trapz(self.T[f]['T']/self.T[f]['lam'],x=self.T[f]['lam']))\n self.T[f]['pivwv'] = np.sqrt(integrate.trapz(self.T[f]['lam'] * self.T[f]['T'], x = self.T[f]['lam'])/integrate.trapz( self.T[f]['T'] / self.T[f]['lam'],x=self.T[f]['lam'])) \n self.T[f]['bandw'] = self.T[f]['meanwv'] * np.sqrt(integrate.trapz((np.log(self.T[f]['lam']/self.T[f]['meanwv'])**2)*self.T[f]['T']/self.T[f]['lam'],x=self.T[f]['lam']))/np.sqrt(integrate.trapz(self.T[f]['T']/self.T[f]['lam'],x=self.T[f]['lam']))\n \n \n self.filters_pivots = [self.T[f]['pivwv'] for f in self.filters]\n \n \n\n \n \n \n \n \n def create_SFH(self, p):\n \n p['age'] = int(10**p['log10age'])\n \n self.SFH_unnormalised = getattr(SFHs, self.SFH_model)(self.ages, p)\n \n # ----- find metallicities which bracket the given value\n \n idx = (np.abs(self.SPS['metallicities'] - p['log10Z'])).argmin()\n\n if p['log10Z'] > self.SPS['metallicities'][idx]: \n Zlow = idx\n Zhigh = idx + 1\n else:\n Zlow = idx - 1\n Zhigh = idx \n \n \n \n scale = (p['log10Z'] - self.SPS['metallicities'][Zlow])/(self.SPS['metallicities'][Zhigh] - self.SPS['metallicities'][Zlow])\n \n \n self.SFH_unnormalised_Remaining = self.SFH_unnormalised * ( (1.0 - scale)*self.Remaining[Zlow] + scale*self.Remaining[Zhigh])\n \n \n \n total_SF_Remaining = np.sum(self.SFH_unnormalised_Remaining)\n\n self.SFH = self.SFH_unnormalised/total_SF_Remaining # ---- renormalise the SFH so it integrates to 1.0 after recycling is included\n\n \n \n def create_SED(self, p):\n \n p['age'] = int(10**p['log10age'])\n \n ef = p['escape_fraction'] # ---- escape fraction\n \n self.create_SFH(p) # ---- must first create SFH.\n \n \n # ----- find metallicities which bracket the given value\n \n idx = (np.abs(self.SPS['metallicities'] - p['log10Z'])).argmin()\n\n if p['log10Z'] > self.SPS['metallicities'][idx]: \n Zlow = idx\n Zhigh = idx + 1\n else:\n Zlow = idx - 1\n Zhigh = idx \n \n scale = (p['log10Z'] - self.SPS['metallicities'][Zlow])/(self.SPS['metallicities'][Zhigh] - self.SPS['metallicities'][Zlow])\n \n \n # ----- create intrinsic SED\n \n SPS_ages = self.SPS['ages']\n\n SPS_ages_midpoints = 10**((np.log10(SPS_ages[1:]) + np.log10(SPS_ages[:-1])) / 2.)\n\n SPS_ages_midpoints = SPS_ages_midpoints.astype(int)\n\n\n previous_SPS_age_midpoint = 0.\n\n self.lam = self.SPS['lam']\n\n\n self.Lnu_incident_Zlow = np.zeros(self.lam.shape) # --- the pure stellar SED\n self.Lnu_transmitted_Zlow = np.zeros(self.lam.shape) # --- the pure stellar SED which is actually transmitted\n self.Lnu_nebular_Zlow = np.zeros(self.lam.shape) # --- the nebular SED\n\n self.Lnu_incident_Zhigh = np.zeros(self.lam.shape) # --- the pure stellar SED\n self.Lnu_transmitted_Zhigh = np.zeros(self.lam.shape) # --- the pure stellar SED which is actually transmitted\n self.Lnu_nebular_Zhigh = np.zeros(self.lam.shape) # --- the nebular SED\n\n\n for i, SPS_age, SPS_age_midpoint in zip(np.arange(0,len(SPS_ages)), SPS_ages[:-1],SPS_ages_midpoints):\n\n SF = np.sum(self.SFH[previous_SPS_age_midpoint:SPS_age_midpoint])\n \n self.Lnu_incident_Zlow += SF * self.SPS['incident'][Zlow][i]\n self.Lnu_transmitted_Zlow += SF * self.SPS['transmitted'][Zlow][i]\n self.Lnu_nebular_Zlow += SF * self.SPS['nebular'][Zlow][i]\n \n self.Lnu_incident_Zhigh += SF * self.SPS['incident'][Zhigh][i]\n self.Lnu_transmitted_Zhigh += SF * self.SPS['transmitted'][Zhigh][i]\n self.Lnu_nebular_Zhigh += SF * self.SPS['nebular'][Zhigh][i]\n \n previous_SPS_age_midpoint = SPS_age_midpoint\n\n\n self.Lnu_incident = (1. - scale)*self.Lnu_incident_Zlow + scale*self.Lnu_incident_Zhigh\n self.Lnu_transmitted = (1. - scale)*self.Lnu_transmitted_Zlow + scale*self.Lnu_transmitted_Zhigh\n self.Lnu_nebular = (1. - scale)*self.Lnu_nebular_Zlow + scale*self.Lnu_nebular_Zhigh\n\n self.Lnu_intrinsic = (1. - ef)*self.Lnu_nebular + (1. - ef)*self.Lnu_transmitted + ef*self.Lnu_incident\n\n\n if self.dust_model == 'Calzetti':\n \n T_nebular, T_stellar = dust.Calzetti(self.lam, p)\n \n self.Lnu = T_nebular*(1. - ef)*self.Lnu_nebular + T_stellar*(1. - ef)*self.Lnu_transmitted + T_stellar*ef*self.Lnu_incident\n \n if 'log10M*' in p.keys():\n \n self.Lnu = self.Lnu * 10**p['log10M*']\n \n \n \n \n def calculate_fluxes(self, p):\n\n z = p['z']\n\n self.create_SED(p)\n \n self.BBLnu = {} # --- broad-band luminosities\n self.BBfnu = {} # --- broad-band fluxes\n \n self.lamz = self.lam * (1. + z) # redshift intrinsic SED\n \n D_l = cosmo.luminosity_distance(z).to('cm').value\n \n self.fnu = ((1.+z)/(4.*math.pi*D_l**2))*self.Lnu\n \n \n for f in self.filters:\n \n nT = np.interp(self.lamz, self.T[f]['lam'], self.T[f]['T'])\n \n self.BBLnu[f] = integrate.trapz((1./self.lamz)*self.Lnu * nT, x = self.lamz) / integrate.trapz((1./self.lamz) * nT, x = self.lamz)\n \n self.BBfnu[f] = integrate.trapz((1./self.lamz)*self.fnu * nT, x = self.lamz) / integrate.trapz((1./self.lamz) * nT, x = self.lamz)\n \n self.BBfnu[f] *= 1E32 # convert to nJy\n \n \n # print f, self.L[f]\n\n self.BBLnus = np.array([self.BBLnu[f] for f in self.filters])\n self.BBfnus = np.array([self.BBfnu[f] for f in self.filters])\n\n\n def calculate_magnitudes(self): # assumes calculate_fluxes() has already been run.\n \n self.m = {}\n self.M = {}\n \n d = (10. * u.pc).to('cm').value\n \n for f in self.filters:\n \n self.M[f] = -2.5*np.log10(self.BBLnu[f]/(4.*math.pi*d**2)) - 48.6\n \n self.m[f] = -2.5*np.log10(self.BBfnu[f]/(3631.*1E9))\n \n\n\n\n\n\n","sub_path":"FSAT_burst/SED_creator.py","file_name":"SED_creator.py","file_ext":"py","file_size_in_byte":9550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"66792559","text":"#!/usr/bin/env python3\n\nimport sys\n\ninput = open(sys.argv[1]) if len(sys.argv) == 2 else sys.stdin\n\ndirs = [(1,0), (0,-1), (-1,0), (0,1)]\n\nx, y = 0, 0\ndx, dy = 10, 1\n\nfor line in input.read().splitlines():\n c, n = line[0], int(line[1:])\n if c == 'N':\n dy += n\n elif c == 'S':\n dy -= n\n elif c == 'E':\n dx += n\n elif c == 'W':\n dx -= n\n elif c == 'F':\n x += n*dx\n y += n*dy\n elif c == 'R':\n n %= 360\n if n == 90:\n dx, dy = dy, -dx\n elif n == 180:\n dx, dy = -dx, -dy\n elif n == 270:\n dx, dy = -dy, dx\n elif c == 'L':\n n %= 360\n if n == 90:\n dx, dy = -dy, dx\n elif n == 180:\n dx, dy = -dx, -dy\n elif n == 270:\n dx, dy = dy, -dx\n\nprint(abs(x) + abs(y))\n","sub_path":"2020/12b.py","file_name":"12b.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"367794886","text":"def get_seq_int(x): # x es un objeto cualquiera\r\n s=repr(x) # s es la cadena que representa x\r\n seq=[] # seq es una lista de enteros\r\n for c in s:\r\n seq.append(ord(c))\r\n return seq\r\n\r\nx=\"Esto es una cadena ejemplo, con caracteres diversos: -+*#_/&%\"\r\ns=get_seq_int(x)\r\nprint(s)\r\n","sub_path":"1ºCurso/Fundamentos-de-la-programacion/2_Tablas asociativas y diccionarios en Python/Implementaciones en Python/get_seq_int.py","file_name":"get_seq_int.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"596487930","text":"from readdata import *\nfrom localsearch import *\nfrom CSP import *\nfrom readdata import *\nfrom course_setup import *\n\n\n# Important: prefers classes with less pre_reqs!\ndef iterative_less_conflicts_have_to_take():\n course_lst = []\n for course in specified_courses:\n if specified_courses[course][3] == 1:\n course_lst.append(course)\n for course in course_lst:\n\t\t# print course\n if get_prereqs(course) != ['None']:\n course_lst += get_prereqs(course)\n\n if honor_flag == 1:\n additional_lst = random.sample(list(courses), 10 - len(course_lst))\n else:\n additional_lst = random.sample(list(courses), 12 - len(course_lst))\n\n total_list = course_lst + additional_lst\n\n while count_courselist_totalviolations(total_list)[0] != 0:\n\t\t# delete biggest violation course from course_lst\n (number_of_violations, violation_list) = count_courselist_totalviolations(total_list)\n\n maxi_index = 0\n maxi = -99999.\n for i in range(len(course_lst), len(total_list)):\n \tif violation_list[i] > maxi:\n \t\tmaxi = violation_list[i]\n \t\tmaxi_index = i\n\n deleted_class = total_list[maxi_index]\n\n if deleted_class not in additional_lst:\n \ttotal_list.remove(deleted_class)\n \tadditional_lst = [x for x in total_list if x not in course_lst]\n else:\n additional_lst.remove(deleted_class)\n total_list.remove(deleted_class)\n\t\t# loop through courses not in course_lst such that adding it doesn't add violation or violation decreases\n # add first one found\n course_names = courses.keys()[:]\n random.shuffle(course_names)\n for course in course_names:\n if course != deleted_class and course not in total_list:\n additional_lst.append(course)\n total_list.append(course)\n if count_courselist_totalviolations(total_list)[0] < number_of_violations:\n break\n else:\n additional_lst.remove(course)\n total_list.remove(course)\n # next iteration of while loop\n # print count_courselist_totalviolations(total_list)[0]\n return total_list\n\n\ndef set_CSP_initial():\n CSP_initial_lst = CSP_initial[:]\n\n course_lst = []\n for course in specified_courses:\n if specified_courses[course][3] == 1:\n course_lst.append(course)\n for course in course_lst:\n\t\t# print course\n if get_prereqs(course) != ['None']:\n course_lst += get_prereqs(course)\n\n for i in range(len(reqs)):\n \tfor course in course_lst:\n \t\tif course in CSP_initial_lst:\n \t\t\tcontinue\n \t\tif reqs[i] in get_requirements(course):\n \t\t\t# print reqs[i], course\n \t\t\tCSP_initial_lst[i] = course\n \t\t\tbreak\n \n return CSP_initial_lst\n\n# print set_CSP_initial()\n\n","sub_path":"have_to_take.py","file_name":"have_to_take.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"30804286","text":"#! /usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport os\nimport json\nimport signal\nimport tempfile\nfrom contextlib import closing\nfrom datetime import datetime\n\nimport cdms2\nimport cwt\nimport django\nimport redis\nimport requests\nimport zmq\nfrom celery import shared_task\nfrom celery.signals import celeryd_init\nfrom celery.utils.log import get_task_logger\nfrom cwt.wps_lib import metadata\nfrom cwt.wps_lib import operations\nfrom OpenSSL import crypto\n\nfrom wps import models\nfrom wps import settings\nfrom wps import wps_xml\nfrom wps.auth import oauth2\nfrom wps.auth import openid\nfrom wps.processes import get_process\nfrom wps.processes import CWTBaseTask\n\nlogger = get_task_logger('wps.tasks')\n\nURN_AUTHORIZE = 'urn:esg:security:oauth:endpoint:authorize'\nURN_RESOURCE = 'urn:esg:security:oauth:endpoint:resource'\n\nclass WPSTaskError(Exception):\n pass\n\ndef create_job(server, status=None, result=None):\n \"\"\" Creates a Job entry. \"\"\"\n if status is None:\n status = metadata.ProcessStarted()\n\n job = models.Job(server=server)\n\n job.save()\n\n job.status_set.create(status=str(status))\n \n return job\n\ndef create_status_location(host, job_id, port=None):\n \"\"\" Format status location. \"\"\"\n loc = 'http://{0}'.format(host)\n\n if port is not None:\n loc = '{0}:{1}'.format(loc, port)\n\n loc = '{0}/wps/job/{1}'.format(loc, job_id)\n\n return loc\n\ndef create_socket(host, port, socket_type):\n \"\"\" Create a ZMQ socket. \"\"\"\n context = zmq.Context.instance()\n\n socket = context.socket(socket_type)\n\n socket.connect('tcp://{0}:{1}'.format(host, port))\n\n return socket\n\ndef default_server():\n \"\"\" Retreives the default server. \"\"\"\n try:\n return models.Server.objects.get(host='default')\n except models.Server.DoesNotExist:\n raise WPSTaskError('Default server does not exist')\n\ndef monitor_handler(**kwargs):\n \"\"\" Monitor CDAS2 queue.\n\n Create a monitor for each CDAS2 instance.\n \"\"\"\n logger.info('celeryd_init, starting monitors')\n\n instances = models.Instance.objects.all()\n\n try:\n for i in instances:\n monitor_cdas.delay(i.id)\n except django.db.utils.ProgrammingError:\n logger.info('Database does not appear to be setup, not starting monitors')\n\n@shared_task\ndef handle_response(data):\n \"\"\" Handle CDAS2 responses.\n\n Convert the CDAS2 response to the appropriate WPS operation response.\n \"\"\"\n job_id, _, response = data.split('!')\n\n logger.info('Handling CDAS2 response for job %s', job_id)\n\n try:\n job = models.Job.objects.get(pk=job_id)\n except models.Job.DoesNotExist:\n # Really should never hist this point\n logger.exception('Job %s does not exist', job_id)\n\n return\n\n error = wps_xml.check_cdas2_error(response)\n\n if error is not None:\n job.failed(error)\n\n return\n\n cap = 'capabilities' in data\n\n desc = 'processDescription' in data\n\n if cap or desc:\n try:\n if cap:\n local_procs = models.Process.objects.filter(backend='local')\n\n result = wps_xml.capabilities_response(response, local_procs)\n else:\n result = wps_xml.describe_process_response_from_cdas2(response)\n except Exception as e:\n logger.exception('Failed to convert CDAS2 response: %s', e.message)\n\n job.failed(e.message)\n\n return\n\n if cap:\n job.server.capabilities = result.xml()\n\n job.server.save()\n\n identifiers = [x.identifier for x in result.process_offerings]\n\n logger.info('Queueing DescribeProcess for following proccesses: %s', identifiers)\n\n describe.delay(job.server.id, identifiers)\n else:\n result = wps_xml.describe_process_response_from_cdas2(response)\n \n server = default_server()\n\n process = models.Process(\n identifier=result.process_description[0].identifier,\n backend='CDAS2',\n description=result.xml())\n \n process.save()\n\n server.processes.add(process)\n\n job.succeeded()\n else:\n output = wps_xml.cdas2_output(response)\n\n job.succeeded(output)\n\n@shared_task\ndef monitor_cdas(instance_id):\n \"\"\" Monitor CDAS2 queue.\n\n Start a handler task for each CDAS2 message that pops off the queue.\n \"\"\"\n try:\n instance = models.Instance.objects.get(pk=instance_id)\n except models.Instance.DoesNotExist:\n logger.info('Instance id \"%s\" does not exist', instance_id)\n\n return\n\n logger.info('Monitoring CDAS instance at %s:%s', instance.host, instance.response)\n\n with closing(create_socket(instance.host, instance.response, zmq.PULL)) as response:\n while True:\n data = response.recv()\n\n handle_response.delay(data)\n\n@shared_task\ndef capabilities(server_id):\n \"\"\" Handles GetCapabilities request. \"\"\"\n try:\n server = models.Server.objects.get(pk=server_id)\n except models.Server.DoesNotExist:\n logger.info('Server id \"%s\" does not exist', server_id)\n\n return\n\n logger.info('Gathering \"%s\" capabilities', server.host)\n\n instances = models.Instance.objects.all()\n\n if len(instances) > 0:\n logger.info('Querying CDAS2 instance capabilities')\n\n instance = instances[0]\n\n job = create_job(server)\n\n with closing(create_socket(instance.host, instance.request, zmq.PUSH)) as request:\n request.send(str('{0}!getCapabilities!WPS'.format(job.id)))\n else:\n logger.info('Server has not CDAS2 instances')\n\n@shared_task\ndef describe(server_id, identifiers):\n \"\"\" Handles a DescribeProcess request. \"\"\"\n try:\n # TODO might want a better way of choosing\n instance = models.Instance.objects.all()\n except models.Instance.DoesNotExist:\n logger.info('Instance id \"%s\" does not exist', instance_id)\n\n return\n\n try:\n server = models.Server.objects.get(pk=server_id)\n except models.Instance.DoesNotExist:\n logger.info('Default server does not exist yet')\n\n return\n\n if len(instance) == 0:\n logger.info('No CDAS2 instance to run describe process for %s', identifier)\n\n return\n\n with closing(create_socket(instance[0].host, instance[0].request, zmq.PUSH)) as request:\n for identifier in identifiers:\n job = create_job(server)\n\n request.send(str('{0}!describeProcess!{1}'.format(job.id, identifier)))\n\n@shared_task(bind=True, base=CWTBaseTask)\ndef check_auth(self, **kwargs):\n self.set_user_creds(**kwargs)\n\n user_id = kwargs.get('user_id')\n\n try:\n user = models.User.objects.get(pk=user_id)\n except models.User.DoesNotExist:\n raise Exception('Could not find user')\n\n cert = crypto.load_certificate(crypto.FILETYPE_PEM, user.auth.cert)\n\n fmt = '%Y%m%d%H%M%SZ'\n\n before = datetime.strptime(cert.get_notBefore(), fmt)\n\n after = datetime.strptime(cert.get_notAfter(), fmt)\n\n now = datetime.now()\n\n if (now >= before and now <= after):\n logger.info('Certificate is still valid')\n\n return\n\n logger.info('Certificate has expired, renewing')\n\n if user.auth.type == 'myproxyclient':\n raise Exception('Please relog into MyProxyClient from your user account page.')\n \n oid = openid.OpenID.parse(user.auth.openid)\n\n access = oid.find(URN_AUTHORIZE)\n\n resource = oid.find(URN_RESOURCE)\n\n cert, key, new_token = oauth2.get_certificate(user.auth.token, access.uri, resource.uri)\n\n logger.info('Recieved new token {}, updating certificate'.format(new_token))\n\n user.auth.token = new_token\n\n user.auth.cert = ''.join([cert, key])\n\n user.auth.save()\n\n \n","sub_path":"compute/wps/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":7844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"616042935","text":"from typing import Tuple\nimport simplejpeg\nimport cv2\nimport numpy as np\nfrom redisAI import modelRunnerAddOutput, createModelRunner, createTensorFromBlob, modelRunnerAddInput, modelRunnerRun #type: ignore\n\ndef compute_resize_scale(image_shape, min_side=800, max_side=1333):\n (rows, cols, _) = image_shape\n smallest_side = min(rows, cols)\n # rescale the image so the smallest side is min_side\n scale = min_side / smallest_side\n # check if the largest side is now greater than max_side, which can happen\n # when images have a large aspect ratio\n largest_side = max(rows, cols)\n if largest_side * scale > max_side:\n scale = max_side / largest_side\n return scale\n\ndef resize_image(img, min_side=800, max_side=1333):\n # compute scale to resize the image\n scale = compute_resize_scale(img.shape, min_side=min_side, max_side=max_side)\n # resize the image with the computed scale\n img = cv2.resize(img, None, fx=scale, fy=scale)\n return np.expand_dims(img, axis=0), scale\n\n\ndef preprocess(np_img) -> Tuple[np.ndarray, int]:\n np_img = np_img.astype(np.float32)\n #np_img -= [103.939, 116.779, 123.68]\n return resize_image(np_img)\n\ndef run(np_img):\n np_img, _ = preprocess(np_img)\n print(np_img.shape)\n print(np_img.dtype)\n model_key = \"mymodel\"\n model_runner = createModelRunner(model_key)\n for i in range(3):\n print(i)\n modelRunnerAddOutput(model_runner, f\"output{i}\")\n input_tensor = createTensorFromBlob(\"FLOAT\", list(np_img.shape), bytearray(np_img.tobytes()))\n modelRunnerAddInput(model_runner, \"input\", input_tensor)\n #The following call leads to crash\n outputs = modelRunnerRun(model_runner)\n print(len(outputs))\n\ndef decode(data) -> np.ndarray:\n return simplejpeg.decode_jpeg(data)\n\ngb = GearsBuilder(\"KeysReader\", \"image\")\ngb.map(lambda elem: decode(elem[\"value\"]))\ngb.map(run)\ngb.run()","sub_path":"gear.py","file_name":"gear.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"319829753","text":"import datetime\nimport os\n\nfrom flask import render_template, send_from_directory, request, flash, redirect, send_file\nfrom flask_restful import abort\nfrom flask_script import Manager\nfrom flask_whooshalchemy import whoosh_index\nfrom werkzeug.utils import secure_filename\nfrom transliterate import translit, detect_language\n\nfrom api import create_app\nfrom api.models import db, Author, File, Tag, FileAuthor, TagFile\nfrom api.utils.parser.parser import FolderParser\n\n\n\napp = create_app('development')\napp.config['STATIC_FOLDER'] = 'static'\napp.template_folder = 'templates'\napp.config['MAX_CONTENT_LENGTH'] = 25 * 1024 * 1024 # max file size 25 megabytes\napp.config['UPLOAD_FOLDER'] = os.path.join(os.getcwd(), 'data')\ndb.init_app(app)\nmanager = Manager(app)\nwith app.app_context():\n whoosh_index(app, File)\n whoosh_index(app, Author)\n whoosh_index(app, Tag)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/css/')\ndef send_css(path):\n root_dir = os.path.dirname(os.getcwd())\n return send_from_directory(os.path.join(root_dir, 'tef_library_api', 'api', 'static', 'css'), path)\n\n\n@app.route('/js/')\ndef send_js(path):\n root_dir = os.path.dirname(os.getcwd())\n return send_from_directory(os.path.join(root_dir, 'tef_library_api', 'api', 'static', 'js'), path)\n\n\n@app.route('/img/')\ndef send_img(path):\n root_dir = os.path.dirname(os.getcwd())\n return send_from_directory(os.path.join(root_dir, 'tef_library_api', 'api', 'static', 'img'), path)\n\n\n@app.route('/file/', methods=['GET',])\ndef download_file(id):\n root_dir = os.path.dirname(os.getcwd())\n file = File.query.filter_by(id=id).first()\n if file.is_folder:\n abort(400, message=\"There is folder with sended id, try another id\")\n print(file.source_name)\n return send_file(os.path.join(app.config['UPLOAD_FOLDER'], file.full_path, secure_filename(file.source_name)),\n as_attachment=True,\n attachment_filename=file.source_name)\n\n\n@app.route('/file', methods=['POST',])\ndef upload_file():\n\n try:\n post_form = dict(request.form)\n print(post_form)\n if 'file' not in request.files or 'name' not in post_form or 'author' not in post_form:\n abort(400)\n\n file = request.files['file']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n abort(400)\n if file and allowed_file(file.filename):\n saved_file = File()\n saved_file.name = post_form['name']\n if detect_language(file.filename) == 'ru':\n saved_file.source_name = secure_filename(translit(file.filename, reversed=True))\n filename = saved_file.source_name\n print(filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], 'new', filename))\n saved_file.file_size = os.stat(os.path.join(app.config['UPLOAD_FOLDER'], 'new', filename)).st_size\n if 'year' in post_form:\n saved_file.year = post_form['year']\n\n\n\n\n abort(401)\n #file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n except Exception as e:\n print(\"error: {0}\".format(e))\n abort(400, message=\"error: {0}\".format(e))\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in app.config['SUPPORTED_EXTENSIONS']\n\n\n@app.route('/reparse')\ndef reparse():\n fp = FolderParser(os.path.join(os.getcwd(), 'data'))\n fp.walk()\n fp.rewalk()\n return \"Reparsed\"\n\nif __name__ == '__main__':\n manager.run()\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"551347797","text":"from django.test import TestCase\n\nfrom webstore.cash.models import Cash\nfrom webstore.product.models import (\n Product,\n Price,\n)\n\n\nclass TestProductModel(TestCase):\n\n def test_product_has_unique_slug_created(self):\n test_name = 'the machine that goes \\'ping\\''\n product_A = Product.objects.create(\n name=test_name,\n )\n product_B = Product.objects.create(\n name=test_name,\n )\n self.assertNotEqual(\n product_A.slug, product_B.slug,\n msg='each product should have an unique slug assigned',\n )\n\n def test_changing_name_does_not_change_slug(self):\n product = Product(\n name='Red Leicester',\n )\n product.save()\n\n slug = product.slug\n product.name = 'Norwegian Jarlsberg'\n product.save()\n self.assertEqual(\n product.slug, slug,\n msg='product slug should stay the same after name update',\n )\n\n def test_get_absolute_url(self):\n product = Product.objects.create(name='The Holy Grail')\n self.assertEqual(\n first='/product/' + product.slug + '/',\n second=product.get_absolute_url(),\n )\n\n def test_get_latest_price(self):\n product = Product.objects.create(\n name='test product',\n )\n prices = Price.objects.bulk_create([\n Price(value=12.0101, valid_from='2018-01-01', product=product),\n Price(value=45.345, valid_from='2017-04-05', product=product),\n Price(value=78.9999, valid_from='2016-08-12', product=product),\n ])\n self.assertIsInstance(product.get_price, Cash)\n self.assertEqual(\n product.get_price,\n Cash('12.0101'),\n )\n\n def test_behaviour_if_no_price_is_set(self):\n product = Product.objects.create(\n name='test product',\n )\n self.assertEqual(\n product.get_price,\n None,\n )\n\n\nclass TestPriceModel(TestCase):\n\n def test_price_type(self):\n product = Product.objects.create(\n name='test product',\n )\n price = Price(\n value=12.0101,\n valid_from='2018-01-01',\n product=product\n )\n self.assertIsInstance(price.value, Cash)\n","sub_path":"webstore/product/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"620891442","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n\nfrom zope.interface import implements\n\nfrom plone.portlets.interfaces import IPortletDataProvider\nfrom plone.app.portlets.portlets import base\n\nfrom zope import schema\nfrom zope.formlib import form\n\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom Products.CMFCore.utils import getToolByName\n\ntry:\n from Products.LinguaPlone.interfaces import ITranslatable\n LINGUAPLONE_SUPPORT = True\nexcept ImportError:\n # Linguaplone not installed\n LINGUAPLONE_SUPPORT = False\n\nfrom plone.app.vocabularies.catalog import SearchableTextSourceBinder\nfrom plone.app.form.widgets.uberselectionwidget import UberSelectionWidget\nfrom plone.app.controlpanel.widgets import MultiCheckBoxVocabularyWidget\nfrom plone.memoize import instance\n\nfrom unice.portlet.mot import MotPortletMessageFactory as _\nfrom zope.i18nmessageid import MessageFactory\n__ = MessageFactory(\"plone\")\n\nfrom Acquisition import aq_inner\nfrom collective.contentleadimage.config import IMAGE_FIELD_NAME\nfrom collective.contentleadimage.config import IMAGE_CAPTION_FIELD_NAME\nfrom zope.component import getUtility\nfrom Products.CMFPlone.interfaces import IPloneSiteRoot\nfrom collective.contentleadimage.leadimageprefs import ILeadImagePrefsForm\n\nclass IMotPortlet(IPortletDataProvider):\n\n portlet_title = schema.TextLine(\n title=_(u'Titre du portlet dans le manager'),\n description=_('help_portlet_title',\n default=u'Titre affiché dans l\\'ecran \"@@manage-portlets\". '\n 'Laisser vide pour \"Mot portlet\".'),\n required=False,\n )\n\n custom_header = schema.TextLine(\n title=_(u\"Titre du portlet\"),\n description=_('help_custom_header',\n default=u\"Laisser vide pour afficher le titre le l'élément sélectionné\"),\n required=False,\n )\n\n mot = schema.Choice(title=_(u\"Elément à afficher\"),\n required=True,\n source=SearchableTextSourceBinder(\n {},\n default_query='path:'\n )\n )\n\n extra_id = schema.TextLine(\n title=_(u'Identifiant CSS à ajouter au portlet'),\n description=_('help_extra_id',\n default=u\"\"),\n default=u'',\n required=False,\n )\n extra_css = schema.TextLine(\n title=_(u'Classes CSS à ajouter au portlet'),\n description=_('help_extra_css',\n default=u\"\"),\n default=u'',\n required=False,\n )\n\n omit_header = schema.Bool(\n title=_(u\"Masquer le header du portlet\"),\n description=_('help_omit_header',\n default=u\"\"),\n required=True,\n default=False)\n\n\nclass Assignment(base.Assignment):\n implements(IMotPortlet)\n\n portlet_title = u''\n mot = None\n extra_css = u''\n extra_id = u''\n custom_header = u\"\"\n omit_header = False\n\n def __init__(self, portlet_title=u'', mot=None, extra_css=u'', extra_id=u'', custom_header=None, omit_header=None):\n self.portlet_title = portlet_title\n self.mot = mot\n self.custom_header = custom_header\n self.omit_header = omit_header\n self.extra_css = extra_css\n self.extra_id = extra_id\n\n @property\n def title(self):\n msg = __(u\"Mot portlet\")\n return self.portlet_title or msg\n\n\nclass Renderer(base.Renderer):\n render = ViewPageTemplateFile('motportlet.pt')\n\n @instance.memoizedproperty\n def mot(self):\n if not self.data.mot:\n return None\n\n portal_path = getToolByName(self.context, 'portal_url').getPortalPath()\n item = self.context.restrictedTraverse(\n str(portal_path + self.data.mot),\n None\n )\n\n return item\n\n def header(self):\n return self.data.custom_header or self.mot.Title()\n\n\n\n @property\n def prefs(self):\n portal = getUtility(IPloneSiteRoot)\n return ILeadImagePrefsForm(portal)\n\n def contentLeadImage(self, css_class=''):\n context = aq_inner(self.mot)\n field = context.getField(IMAGE_FIELD_NAME)\n titlef = context.getField(IMAGE_CAPTION_FIELD_NAME)\n if titlef is not None:\n title = titlef.get(context)\n else:\n title = ''\n if field is not None:\n if field.get_size(context) != 0:\n scale = self.prefs.desc_scale_name\n return field.tag(context, scale=scale, css_class=css_class, title=title)\n return ''\n\n\n\nclass AddForm(base.AddForm):\n form_fields = form.Fields(IMotPortlet)\n form_fields['mot'].custom_widget = UberSelectionWidget\n\n def create(self, data):\n return Assignment(**data)\n\n\nclass EditForm(base.EditForm):\n form_fields = form.Fields(IMotPortlet)\n form_fields['mot'].custom_widget = UberSelectionWidget\n","sub_path":"Plone/zinstance/src/unice.portlet.mot/unice/portlet/mot/motportlet.py","file_name":"motportlet.py","file_ext":"py","file_size_in_byte":4846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"461539327","text":"# graphSigmoid.py\n# 시그모이드 함수를 그려 보자\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef sigmoid(weight, x, b=0, asc=True):\n if asc is True:\n return 1/(1 + np.exp(-(weight * x + b)))\n else:\n return 1/(1 + np.exp((weight * x + b)))\n\n# np.arange() : 파이썬 range()와 유사\nx = np.arange(-5.0, 5.1, 0.1)\n\n\n# weight가 커질 수록 계단함수에 가까워지고, bias가 커질수록 중심이 왼쪽으로 이동\nweight, bias = 1, 0\ny1 = sigmoid(weight, x)\nmylabel = f'y={weight}*x +{bias}'\nplt.plot(x, y1, color='g', label=mylabel)\n\nweight, bias = 5, 0\ny2 = sigmoid(weight, x, bias)\nmylabel = f'y={weight}*x +{bias}'\nplt.plot(x, y2, color='b', label=mylabel)\n\nweight, bias = 5, 3\ny3 = sigmoid(weight, x, bias)\nmylabel = f'y={weight}*x +{bias}'\nplt.plot(x, y3, color='r', label=mylabel)\n\n# 반전시킨 경우\nweight, bias = 5, 3\ny4 = sigmoid(weight, x, bias, False)\nmylabel = f'y={weight}*x +{bias}'\nplt.plot(x, y4, color='y', label=mylabel)\n\n\nplt.axhline(y=0, color='black', linewidth=1, linestyle='dashed')\nplt.axhline(y=1, color='black', linewidth=1, linestyle='dashed')\n\nplt.title('sigmoid function')\nplt.ylim(-0.1, 1.1)\nplt.legend(loc='best')\n\nfilename = 'sigmoid_function.png'\nplt.savefig(filename)\nprint(filename + ' 파일 저장됨')\n\nprint('finished')\n","sub_path":"Machine Learning/수업 자료/3주차_기계학습 알고리즘/12일차_선형회귀분석_로지스틱 회귀 분석/graphSigmoid.py","file_name":"graphSigmoid.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"484292068","text":"from qgis.core import (QgsProject, NULL, qgsfunction)\n\n@qgsfunction(args='auto', group='Custom')\ndef qr(evento, taller, cc, feature, parent):\n if evento and taller and cc:\n import os, qrcode\n base_dir = QgsProject.instance().fileInfo().path()\n qr_dir = os.path.join(base_dir, \"qr\")\n filename = \"{}-{}-{}\".format(evento, taller, cc.replace('.',).replace(\"'\",))\n url = \"http://qgisusers.co/media/{}.pdf\".format(filename)\n if not os.path.exists(qr_dir):\n\t os.makedirs(qr_dir)\n\n filepath = os.path.join(qr_dir, \"qr-{}.png\".format(filename))\n qr = qrcode.QRCode(box_size=10,border=2)\n qr.add_data(url)\n qr.make(fit=True)\n img = qr.make_image()\n img.save(filepath)\n\n return filepath\n else:\n return NULL\n","sub_path":"code/function_editor.py","file_name":"function_editor.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"517596310","text":"import os\n\npath = \"/home/bridgelabz/Desktop/AllBasicProgram\"\nfiles = []\n# r=root,d=directories, f =files\nfor r, d, f in os.walk(path):\n for file in f:\n if '.py' in file:\n files.append(os.path.join(r, file))\nfor f in files:\n print(f)\n ","sub_path":"BasicPython/Directory.py","file_name":"Directory.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"593520805","text":"# This source code is part of the Biotite package and is distributed\n# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further\n# information.\n\nfrom typing import Union, Tuple, Optional, Sequence, overload\nimport numpy as np\nfrom .atoms import AtomArray, AtomArrayStack\n\n@overload\ndef hbond(\n atoms: AtomArray,\n selection1: Optional[np.dnarray] = None,\n selection2: Optional[np.dnarray] = None,\n selection1_type: str = \"both\",\n cutoff_dist: float = 2.5,\n cutoff_angle: float = 120,\n donor_elements: Sequence = ('O', 'N', 'S'),\n acceptor_elements: Sequence = ('O', 'N', 'S'),\n) -> np.ndarray: ...\n@overload\ndef hbond(\n atoms: AtomArrayStack,\n donor_selection: Optional[np.dnarray] = None,\n acceptor_selection: Optional[np.dnarray] = None,\n cutoff_dist: float = 2.5,\n cutoff_angle: float = 120,\n donor_elements: Sequence = ('O', 'N', 'S'),\n acceptor_elements: Sequence = ('O', 'N', 'S')\n) -> Tuple[np.ndarray, np.ndarray]: ...\n\ndef hbond_frequency(mask: np.ndarray) -> np.ndarray: ...","sub_path":"src/biotite/structure/hbond.pyi","file_name":"hbond.pyi","file_ext":"pyi","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"200859707","text":"from __future__ import print_function\nimport os, sys\nimport shutil\n\ndef countLine(inputFile):\n counter = 0\n with open(inputFile, 'r') as fileHandler:\n for line in fileHandler:\n counter += 1\n return counter\n\ndef eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)\n\ndef checkExistenceExit(fileName):\n if not os.path.exists(fileName):\n eprint('path: {} doesnt exist'.format(fileName))\n exit(2)\n\ndef readTriplesFromFile(fileName):\n checkExistenceExit(fileName)\n tripleList = []\n with open(fileName, 'r') as fileHandler:\n for line in fileHandler:\n arr = line.strip().split()\n head, tail, relation = arr[0], arr[1], arr[2]\n\n tripleList.append([head, tail, relation])\n return tripleList\n\ndef readListFromFile(fileName):\n checkExistenceExit(fileName)\n fileList = []\n with open(fileName, 'r') as fileHandler:\n for line in fileHandler:\n arr = line.strip().split()\n key = arr[0]\n val = arr[1]\n if len(arr) > 2:\n val = arr[1:]\n fileList.append([key, val])\n return fileList\n\ndef readDictFromFile(fileName):\n \"\"\"\n Assume \n Key Val\n \"\"\"\n checkExistenceExit(fileName)\n\n fileDict = {}\n with open(fileName, 'r') as fileHandler:\n for line in fileHandler:\n arr = line.strip().split()\n key = arr[0]\n val = arr[1]\n if len(arr) > 2:\n val = arr[1:]\n\n fileDict[key] = val\n\n return fileDict\n\ndef writeListToFile(path, inputList):\n with open(path, 'w') as fileHandler:\n for row in inputList:\n each_line = ''\n for elem in row:\n each_line += '{} '.format(elem)\n each_line = each_line.rstrip()\n print(each_line, file=fileHandler)\ndef writeDictToFile(inputDict, fileName):\n\n with open(fileName, 'w') as fileHandler:\n for key, value in inputDict.items():\n print('{} {}'.format(key, value), file=fileHandler)\n\ndef removeAndMake(path):\n if os.path.exists(path):\n print('path {} eixsts, remove it ...'.format(path))\n shutil.rmtree(path)\n\n os.mkdir(path)\n\n","sub_path":"network-embedding/tense-benchmarks/preprocessing/bin/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"135301090","text":"import matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom scipy.stats import *\r\nimport os\r\n\r\n\r\ndesired_width = 320\r\npd.set_option('display.width', desired_width)\r\nnp.set_printoptions(linewidth=desired_width)\r\npd.set_option(\"display.max_columns\", 100)\r\npd.set_option(\"display.max_rows\", 60)\r\n\r\nfiles=[file for file in os.listdir('./Fuel')] #list all CSV files in Sales data folder\r\n\r\nFueldata=pd.DataFrame()\r\n\r\nfor file in files: #merges files into one dataframe\r\n df = pd.read_csv('./Fuel/' + file)\r\n Fueldata = pd.concat([Fueldata, df])\r\n\r\nprint(Fueldata)\r\nFueldata.to_csv('Fueldataall.csv', index=False)\r\n\r\n\r\n##Fliter on columns\r\n\r\ndf=pd.read_csv('Fueldataall.csv', usecols=[0, 4, 7, 9, 10, 13, 15, 16, 17]) ##reads csv file and picks out specific columns\r\n\r\nprint(df.head())\r\n\r\n##Need to add Month, Week Number and Weekday Colummns\r\n\r\n\r\ndf.columns=['Date', 'Depot', 'CardNo', 'Reg','Odometer', 'Los','FuelType','Quantity','UnitPrice'] ##changes column names\r\n\r\nprint(df.head())\r\n\r\ndf.to_csv('Fuelfiltered.csv', index=False)\r\n\r\n\r\nfueldf=pd.read_csv('Fuelfiltered.csv')\r\nprint(fueldf)\r\n\r\nprint(fueldf.isnull().values.any()) #Checks for NAN values\r\nprint(fueldf['Depot'].value_counts())\r\nprint(fueldf.groupby(['Depot']).sum())\r\nfueldf['Date']=pd.to_datetime(fueldf['Date'])\r\nprint(fueldf.dtypes)\r\nfueldf['Month']=fueldf.Date.dt.month\r\nfueldf['Week']=fueldf.Date.dt.week #adds column of what week number according to date column\r\nfueldf['Weekday']=fueldf.Date.dt.weekday_name #adds column of what weekday according to date column\r\nprint(fueldf.head())\r\n\r\nfueldf['SalesValue']=fueldf['Quantity']*fueldf['UnitPrice'] #adds column Sales Value, error because\r\nprint(fueldf.head())\r\n\r\nfueldf.to_csv('Fueldated.csv', index=False)\r\n\r\n\r\nprint(Fueldata.head())\r\n\r\n\r\n","sub_path":"Fuel Analysis.py","file_name":"Fuel Analysis.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"642954962","text":"import telebot\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\n\r\nbot = telebot.TeleBot('867393222:AAH2tuVofErol73h53c24A2pcMxtcd7k6N0')\r\n\r\n@bot.message_handler(commands=['start'])\r\ndef start_message(message):\r\n bot.send_message(message.chat.id, 'Привет! Я - бот, если ты напишешь название мелодии, я найду ее для тебя на сайте noosphere.ru/melody')\r\n\r\n@bot.message_handler(content_types=['text'])\r\ndef send_text(message):\r\n bot.send_message(message.chat.id, 'Сейчас найду!')\r\n #--------------------------------------------------\r\n finder = message.text\r\n page = requests.get('https://noosphere.ru/melody?query=' + finder + '#catalog')\r\n\r\n soup = BeautifulSoup(page.text, 'html.parser')\r\n if len(str(soup.find(class_='catalog')).split(\"a href\")) > 1:\r\n downloadLink = 'https://noosphere.ru/' + str(soup.find(class_='catalog')).split(\"a href\")[1].split('\"')[1]\r\n\r\n page2 = requests.get(downloadLink)\r\n soup2 = BeautifulSoup(page2.text, 'html.parser')\r\n downloadLink2 = str(soup2.find(class_='col-md-9 video')).split(\"!--\")[1].split(\"<\")[1].split('\"')[1]\r\n\r\n req = requests.get(downloadLink2, stream=True)\r\n if req.status_code == requests.codes.ok:\r\n with open('music' + '.mp3', 'wb') as a:\r\n a.write(req.content)\r\n\r\n bot.send_audio(message.chat.id, open('music.mp3', 'rb'))\r\n bot.send_message(message.chat.id, \"Вот ваша песня по запросу \" + \"'\" + finder + \"'\")\r\n else:\r\n bot.send_message(message.chat.id, \"Извините, я не нашел такой песни\")\r\n # --------------------------------------------------\r\n\r\nbot.polling()","sub_path":"tgba.py","file_name":"tgba.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"63154335","text":"'''Уровень 2\n\nРеализуйте в боте команду /wordcount которая считает слова в присланной фразе. Например на запрос /wordcount Привет как дела бот должен ответить: 3 слова. Не забудь��е:\n\n Добавить проверки на пустую строку\n Как можно обмануть бота, какие еще проверки нужны?\n\n'''\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nimport re\n\nPROXY = {\n 'proxy_url': 'socks5://t1.learn.python.ru:1080',\n 'urllib3_proxy_kwargs': {\n 'username': 'learn',\n 'password': 'python'\n }\n}\ndef str_is_valid(user_message):\n if len(user_message) == 0:\n return False\n for word in user_message:\n return bool(re.match(r'[\\d\\wа-яА-Я]', word))\n return True\n\ndef word_counter(bot, update):\n user_text = update.message.text\n user_text = user_text.replace(r'/wordcount', '').strip()\n user_text = re.sub(r\"[.,:!?]\", \" \", user_text)\n user_text = user_text.split()\n if str_is_valid(user_text):\n update.message.reply_text(f'Вы отправили {len(user_text)} слов(а).')\n else:\n update.message.reply_text('No Valid')\n\ndef main():\n mybot = Updater(\"1038068418:AAESf5kWPyUwSdRUrlJdICk_ynHphYVgLEo\", request_kwargs=PROXY)\n dp = mybot.dispatcher\n dp.add_handler(CommandHandler('wordcount', word_counter))\n mybot.start_polling()\n mybot.idle()\n\nif __name__ == \"__main__\":\n main()","sub_path":"wordcountbot.py","file_name":"wordcountbot.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"12306496","text":"import json\nimport requests\n#for alpha vantage API\n\ndef readAPIKey(filepath):\n\twith open(\"../backend/APIKey.txt\", 'r') as f:\n\t\treturn f.read().replace(\"\\n\",\"\")\n\tf.close()\n\ndef callAPI(function, symbol):\n\tapiKey = readAPIKey(\"APIKey.txt\")\n\tparam = {\"function\": function, \"symbol\": symbol, \"apikey\": apiKey}\n\tresponse = requests.get(url='https://www.alphavantage.co/query?', params=param)\n\tdata = json.loads(response.text)\n\treturn data\n\n\n","sub_path":"mysite/stocks/callAPI.py","file_name":"callAPI.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"313824461","text":"#!/usr/local/bin/python3\n\n\nfrom flask import Flask, send_file, request, make_response\n\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello():\n return send_file(\"butler.png\", mimetype='image/png')\n\n\n@app.route('/xss', methods =['GET'])\ndef XSS1():\n param = request.args.get('param', 'not set')\n\n html = open('xss.html').read()\n # check param\n param = 'hello'\n resp = make_response(html.replace('{{ param }}', param))\n return resp\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080)\n\n","sub_path":"flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"211320548","text":"from django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import redirect,reverse\nfrom django.utils.deprecation import MiddlewareMixin\nimport re\n\nfrom App.models import User\n\n\nclass UserLoginMiddleware(MiddlewareMixin):\n def process_request(self,request):\n\n # 是否登录也没关系的列表(首页,商品列表,商品详情)\n path_list1 = ['/','/ZOL/',\n # r'^/ZOL/goodsearch/(.+\\/){6}',\n # r'^/ZOL/gooddetail/\\d+/',\n '/ZOL/goodsearch/',\n '/ZOL/gooddetail/'\n ]\n\n # 以下的必须登录才能操作\n # render(用于显示数据)\n path_list2 = ['/ZOL/cart/',\n # r'/ZOL/postorder/[-\\w]+/',\n '/ZOL/postorder/',\n '/ZOL/receiveinfo/',\n '/ZOL/api/',\n '/ZOL/pay/',\n '/ZOL/notify/',\n '/ZOL/result/',\n '/ZOL/order/',\n '/ZOL/littleskip/',\n ]\n\n # ajax(用于处理数据)\n path_list3 = [\n '/ZOL/addcart/',\n '/ZOL/numadd/',\n '/ZOL/numreduce/',\n '/ZOL/gooddel/',\n '/ZOL/selectchange/',\n '/ZOL/allselect/',\n '/ZOL/addorder/',\n '/ZOL/orderaddreceive/',\n '/ZOL/buynow/',\n ]\n\n path_list = path_list2 + path_list3\n\n\n # 若使用正则后面匹配的情况过多,但头部的都一样,因而取头部进行匹配\n if request.path not in ['/','/ZOL/']:\n req_path = '/'.join(request.path.split('/')[:3]) + '/'\n else:\n req_path = request.path\n\n\n # 使用正则匹配,略麻烦\n # if request.path == path_list1[0] or request.path == path_list1[1] or \\\n # re.match(path_list1[2],request.path) or re.match(path_list1[3],request.path):\n # try:\n # user_id = request.session.get('user_id')\n # user = User.objects.get(id=user_id)\n # request.user = user\n # except:\n # pass\n\n if req_path in path_list1:\n try:\n user_id = request.session.get('user_id')\n user = User.objects.get(id=user_id)\n request.user = user\n except:\n pass\n\n elif req_path in path_list:\n # 是否登录\n user_id = request.session.get('user_id')\n if not user_id:\n if req_path in path_list2:\n return redirect(reverse('ZOL:login'))\n else:\n data = {\n 'status': 0,\n 'msg': '您尚未登录, 请先登录!'\n }\n return JsonResponse(data)\n else:\n try:\n user_id = request.session.get('user_id')\n user = User.objects.get(id=user_id)\n request.user = user\n except:\n if req_path in path_list1:\n return redirect(reverse('ZOL:login'))\n else:\n data = {\n 'status': -2,\n 'msg': '用户不存在!'\n }\n\n return JsonResponse(data)\n","sub_path":"middlewares/UserMiddleware.py","file_name":"UserMiddleware.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"234932719","text":"from AgentRun import *\nfrom AgentNet import *\nfrom AgentZoo import *\n\n\n\ndef train__car_racing(gpu_id=None, random_seed=0):\n print('pixel-level state')\n rl_agent = (AgentModPPO, AgentInterPPO)[1] # choose DRl algorithm.\n args = Arguments(rl_agent=rl_agent, gpu_id=gpu_id)\n args.if_break_early = True\n args.eval_times2 = 2\n args.eval_times2 = 3\n\n args.env_name = \"CarRacing-v0\"\n args.random_seed = 1943 + random_seed\n args.break_step = int(5e5 * 4) # (2e5) 5e5, used time 25000s\n args.reward_scale = 2 ** -2 # (-1) 80 ~ 900 (1001)\n args.max_memo = 2 ** 11\n args.batch_size = 2 ** 7\n args.repeat_times = 2 ** 4\n args.net_dim = 2 ** 7\n args.max_step = 2 ** 10\n args.show_gap = 2 ** 8 # for Recorder\n args.init_for_training()\n train_agent_mp(args) # train_agent(**vars(args))\n\n\nif __name__ == '__main__':\n # test_conv2d()\n # test_car_racing()\n train__car_racing(random_seed=321)\n","sub_path":"BetaWarning/beta3.py","file_name":"beta3.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"210034897","text":"from typing import List, Optional\nfrom datetime import date\n\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame\n\nfrom src.DataRepository import DataRepository, Universes\nfrom src.util.Features import Features\nfrom src.util.Tickers import Tickers\n\n\nclass Window:\n def __init__(self, window_start: date, trading_win_len: int):\n \"\"\"\n @param window_start: datetime object to indicate the starting date for the window\n @param trading_win_len: the length of the window as integer\n \"\"\"\n # we need to use datetime for start_date as this is how dates are parsed automatically\n\n self.data: DataFrame = pd.read_csv('/Users/vp/PycharmProjects/statarb/resources/all_data.csv', header=[0, 1, 2],\n index_col=0, parse_dates=True)\n self.data.index = self.data.index.values.astype('M8[D]').astype('O') # to turn index into date format\n\n self.window_start: date = window_start\n self.trading_win_len: int = trading_win_len\n self.window_end: date = self.__get_nth_working_day_ahead(self.window_start, self.trading_win_len)\n self.all_tickers = set([i[1] for i in self.data.columns]) # since data has MultiIndex columns,\n\n self.__update_current_window()\n\n def __update_current_window(self):\n \"\"\"\n @return: updates the current_window and removes the dead_tickers\n \"\"\"\n final_day = self.__get_nth_working_day_ahead(target=self.window_end, n=1)\n self.current_window = self.data.loc[self.window_start: final_day]\n self.remove_dead_tickers()\n\n def check_date_equality(self, d1: date, d2: date) -> bool:\n \"\"\"\n the function returns True if the day, month, year are the same\n \"\"\"\n return (d1.day == d2.day and\n d1.month == d2.month and\n d1.year == d2.year)\n\n # Yimiao, please see the functions below\n def __get_nth_working_day_ahead(self, target: date, n: int) -> date:\n # here we want to be able to identify the index of the self.today date (i) and return the date which\n # corresponds to the index of i+n\n # we need to search for the index of today's date in self.data\n idx = np.where(self.data.index == target)[0]\n last_day = self.data.index[-1]\n return min(self.data.iloc[idx + n].index[0], last_day) # so that we do not run out of data\n\n def roll_forward_one_day(self):\n \"\"\"\n @return: updates the current_window and shifts the data by 1 day forward\n \"\"\"\n # here we want to increment the self.today date by calling __get_nth_wortking_day_ahead() with n = 1\n next_starting_day = self.__get_nth_working_day_ahead(target=self.window_start, n=1)\n self.window_start = next_starting_day\n\n # do we also need to update the last day?\n self.__update_current_window()\n\n def get_data(self,\n universe: Optional[List[Universes]] = None,\n tickers: Optional[List[Tickers]] = None,\n features: Optional[List[Features]] = None) -> DataFrame:\n \"\"\"\n @param universe: Universes.SNP or Universes.ETFs\n @param tickers: List of tickers to get data for, if not specified returns data for all tickers\n @param features: List of features to get data for, if not specified returns data for all features\n @return:DataFrame\n \"\"\"\n if universe is None:\n if tickers is None and features is None:\n return self.current_window\n\n elif tickers is None:\n return self.current_window.loc[:, pd.IndexSlice[:, :, features]]\n\n elif features is None:\n return self.current_window.loc[:, pd.IndexSlice[:, tickers, :]]\n\n else:\n if tickers is None and features is None:\n return self.current_window[universe]\n\n if tickers is None:\n return self.current_window.loc[:, pd.IndexSlice[universe, :, features]]\n\n if features is None:\n return self.current_window.loc[:, pd.IndexSlice[universe, tickers, :]]\n\n def remove_dead_tickers(self):\n \"\"\"\n @return: updates the value of current_window to only contain the data\n for tickers where there are no missing values\n \"\"\"\n dead_tickers = set()\n for ticker in self.all_tickers:\n column = self.current_window.loc[:, pd.IndexSlice[:, ticker]]\n if any(column.isna().sum()):\n dead_tickers.add(ticker)\n alive_tickers = self.all_tickers - dead_tickers\n self.current_window = self.current_window.loc[:, pd.IndexSlice[:, alive_tickers]]\n\n\nif __name__ == '__main__':\n backtest_start = date(2008, 1, 2) # must be a trading day\n trading_window_length = 60\n window = Window(backtest_start, trading_window_length)\n print(window.current_window.index.values)\n","sub_path":"Window_test.py","file_name":"Window_test.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"489350354","text":"# Server program\n# UDP VERSION\n\nfrom socket import *\nfrom struct import *\n\n# Set the socket parameters\nhost = \"143.54.13.40\"\nport = 3000\nbuf = 1024\naddr = (host,port)\n\n# Create socket and bind to address\nUDPSock = socket(AF_INET,SOCK_DGRAM)\nUDPSock.bind(addr)\n\n# Receive messages\nwhile 1:\n\tdata,addr = UDPSock.recvfrom(buf)\n\tif not data:\n\t\tprint (\"Client has exited!\")\n\t\tbreak\n\telse:\n\t\tprint (\"\\nTimestamp '\", unpack('l', data[0:8]),\"'\")\n\t\tprint (\"\\nName '\", unpack('8c', data[8:16]),\"'\")\n\t\tprint (\"\\nx '\", unpack('d', data[16:24]),\"'\")\n\t\tprint (\"\\ny '\", unpack('d', data[24:32]),\"'\")\n\n# Close socket\nUDPSock.close()\n","sub_path":"UDP_Python/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"83215857","text":"#!/usr/bin/env python3\n#!-*- coding:utf-8 -*-\nimport datetime\nimport json\nimport random\nimport sqlite3\nfrom time import sleep\nimport sys\n\nfrom twython import Twython, TwythonError\n\nfrom TwitterCrawler.Util import Util\nfrom TwitterCrawler.Auth import AuthApp\nfrom TwitterCrawler.Config import DBPath, TIME_PATTERN, MAX_STATUS_ID, UPDATE_INTERVAL\n\n__author__ = 'Wengling Chen'\n\nclass Crawler:\n def __init__(self):\n pass\n\n # # Force update all status\n @staticmethod\n def getAllTimeline(username, authMode=2):\n if Util.__checkInterval__(UPDATE_INTERVAL, 'UPDATE'):\n username = str(username).strip()\n conn = sqlite3.connect(DBPath)\n cur = conn.cursor()\n cur.execute(\"PRAGMA foreign_keys = ON;\")\n user_id = Crawler.__lookupUserId__(conn, username)\n if user_id is None:\n Crawler.getUserInfo(username, DBConn=conn)\n user_id = Crawler.__lookupUserId__(conn, username)\n if user_id is None:\n print('User screen_name does not exist!')\n conn.close()\n return 1\n\n print('Authorizing application..')\n try:\n if authMode == 1:\n if not AuthApp.checkTokenExist(oauthMode=1):\n AuthApp.authApp()\n twHandler = Twython(AuthApp.getAppKey(), AuthApp.getAppSecret(), AuthApp.getOauthToken(),\n AuthApp.getOauthTokenSecret())\n elif authMode == 2:\n if not AuthApp.checkTokenExist(oauthMode=0):\n AuthApp.authApp2()\n twHandler = Twython(AuthApp.getAppKey(), access_token=AuthApp.getAppAccessToken())\n except TwythonError as e:\n print(e.args[0])\n conn.close()\n return -1\n\n min_id_get = MAX_STATUS_ID\n sleep(5.1)\n\n for i in range(0, 16):\n min_id_get, max_id_get, total_count_get = Crawler.getTimelineById(twHandler, user_id, max_id=min_id_get - 1, count=200, overwrite=1)\n if total_count_get is None:\n print('An error has occured!')\n break\n if total_count_get == 0:\n if i == 0:\n print(\"No Tweet found.\")\n break\n sleep(random.uniform(-0.4, 1.6) + 5.5)\n Util.__updateLastTime__(conn, 'UPDATE')\n conn.close()\n return 0\n\n @staticmethod\n def getNewTimeline(username, authMode=2):\n if Util.__checkInterval__(UPDATE_INTERVAL, 'UPDATE'):\n username = str(username).strip()\n conn = sqlite3.connect(DBPath)\n cur = conn.cursor()\n cur.execute(\"PRAGMA foreign_keys = ON;\")\n user_id = Crawler.__lookupUserId__(conn, username)\n if user_id is None:\n Crawler.getUserInfo(username, DBConn=conn)\n user_id = Crawler.__lookupUserId__(conn, username)\n if user_id is None:\n print('User screen_name does not exist!')\n conn.close()\n return 1\n\n print('Authorizing application..')\n try:\n if authMode == 1:\n if not AuthApp.checkTokenExist(oauthMode=1):\n AuthApp.authApp()\n twHandler = Twython(AuthApp.getAppKey(), AuthApp.getAppSecret(), AuthApp.getOauthToken(),\n AuthApp.getOauthTokenSecret())\n elif authMode == 2:\n if not AuthApp.checkTokenExist(oauthMode=0):\n AuthApp.authApp2()\n twHandler = Twython(AuthApp.getAppKey(), access_token=AuthApp.getAppAccessToken())\n except TwythonError as e:\n print(e.args[0])\n conn.close()\n return -1\n\n min_id, max_id, total_count = Crawler.__checkStatus__(conn, user_id)\n min_id_get = MAX_STATUS_ID\n sleep(5.1)\n\n for i in range(0, 16):\n min_id_get, max_id_get, total_count_get = Crawler.getTimelineById(twHandler, user_id=user_id, since_id=max_id, max_id=min_id_get - 1, count=200, overwrite=0)\n if total_count_get is None:\n print('An error has occured!')\n break\n if total_count_get == 0:\n if i == 0:\n print(\"No Tweet found.\")\n break\n sleep(random.uniform(-0.4, 1.6) + 5.5)\n Util.__updateLastTime__(conn, 'UPDATE')\n conn.close()\n return 0\n\n @staticmethod\n def getHomeTimeline(count_to_get=800, overwrite=1):\n conn = sqlite3.connect(DBPath)\n cur = conn.cursor()\n cur.execute(\"PRAGMA foreign_keys = ON;\")\n if Util.__checkInterval__(UPDATE_INTERVAL, 'UPDATE'):\n if count_to_get > 800:\n count_to_get = 800\n\n print('Authorizing application..')\n try:\n if not AuthApp.checkTokenExist(oauthMode=1):\n AuthApp.authApp()\n twHandler = Twython(AuthApp.getAppKey(), AuthApp.getAppSecret(), AuthApp.getOauthToken(),\n AuthApp.getOauthTokenSecret())\n except TwythonError as e:\n print(e.args[0])\n conn.close()\n return -1\n\n min_id_get = MAX_STATUS_ID\n max_id_get = 0\n total_count_get = 0\n run = 1\n sleep(5.1)\n\n while count_to_get > 0:\n print(count_to_get)\n print(min_id_get)\n if count_to_get // 200 > 0:\n this_count = 200\n else:\n this_count = count_to_get % 200\n try:\n resultsList = twHandler.get_home_timeline(max_id=min_id_get - 1, count=this_count)\n except TwythonError as e:\n print(e.args[0])\n continue\n\n\n for results in resultsList:\n if run == 1:\n sleep(random.uniform(-0.4, 1.6) + 5.5)\n min_id_get, max_id_get, total_count_get, run = Crawler.db_write_tweet(results, twHandler, conn, cur, min_id_get, max_id_get, total_count_get, overwrite, update_user=True)\n count_to_get -= total_count_get\n\n sleep(random.uniform(-0.4, 1.6) + 5.5)\n\n conn.commit()\n Util.__updateLastTime__(conn, 'UPDATE')\n conn.close()\n return 0\n\n @staticmethod\n def getTimelineById(twHandler, user_id, since_id=None, max_id=None, count=10, overwrite=1):\n min_id_get = MAX_STATUS_ID\n max_id_get = 0\n total_count_get = 0\n sleep_f = 1\n\n # if Util.__checkInterval__(5, 'NET'):\n if True:\n if type(user_id) is not int:\n print('Invalid user_id!')\n return None, None, None\n\n conn = sqlite3.connect(DBPath)\n cur = conn.cursor()\n cur.execute(\"PRAGMA foreign_keys = ON;\")\n\n print(\"Getting statuses...\")\n try:\n if since_id is None and max_id is None:\n resultsList = twHandler.get_user_timeline(user_id=user_id, count=count, trim_user=1, include_rts=1)\n elif since_id is None:\n resultsList = twHandler.get_user_timeline(user_id=user_id, count=count, max_id=max_id, trim_user=1, include_rts=1)\n elif max_id is None:\n resultsList = twHandler.get_user_timeline(user_id=user_id, since_id=since_id, count=count, trim_user=1, include_rts=1)\n else:\n resultsList = twHandler.get_user_timeline(user_id=user_id, since_id=since_id, count=count, max_id=max_id, trim_user=1, include_rts=1)\n except TwythonError as e:\n print(e.args[0])\n conn.close()\n return None, None, None\n\n for results in resultsList:\n\n min_id_get, max_id_get, total_count_get, run= Crawler.db_write_tweet(results, twHandler, conn, cur, min_id_get, max_id_get, total_count_get, overwrite)\n\n conn.commit()\n # Util.__updateLastTime__(conn, 'NET')\n conn.close()\n return min_id_get, max_id_get, total_count_get\n\n @staticmethod\n def db_write_tweet(results, twHandler, conn, cur, min_id_get, max_id_get, total_count_get, overwrite, update_user=False, rt_count=0):\n cmdValue = '('\n paraValue = '('\n attrList = results.keys()\n contentList = []\n run = 0\n for attr in attrList:\n\n # # Skip some attributes\n if attr == \"id\" or attr == \"in_reply_to_user_id\" or attr == \"in_reply_to_status_id\":\n continue\n\n content = results.get(attr)\n\n ## Rename id_str to status_id_str\n if attr == \"id_str\":\n attr = \"status_id_str\"\n status_id = int(results.get('id_str'))\n if status_id < min_id_get:\n min_id_get = status_id\n if status_id > max_id_get:\n max_id_get = status_id\n\n ## Get user_id_str from 'user' Object\n if attr == \"user\":\n attr = \"user_id_str\"\n content = results.get('user').get('id_str')\n screen_name = results.get('user').get('screen_name')\n if update_user:\n # print(results.get('user').get('name'))\n run = Crawler.getUserInfo(screen_name, conn)\n\n ## Parse dict to string by json\n if attr == \"entities\" or attr == 'extended_entities' or attr == 'place' or attr == 'coordinates' or attr == 'geo':\n content = json.dumps(content, ensure_ascii=True)\n\n if attr == \"scopes\":\n print(results.get('id_str'))\n print(content)\n sys.exit(1)\n continue\n\n if attr == \"retweeted_status\":\n if (content is not None):\n retweeted_status_id = content.get('id_str')\n if retweeted_status_id is not None and rt_count < 5:\n Crawler.getTweetById(retweeted_status_id, twHandler, conn, cur, rt_count)\n content = json.dumps(content, ensure_ascii=True)\n\n cmdValue = cmdValue + str(attr) + \",\"\n paraValue = paraValue + \"?,\"\n contentList.append(content)\n if overwrite == 0:\n cmdLine = \"INSERT OR IGNORE INTO UserStatus \" + cmdValue[:-1] + \") VALUES \" + paraValue[:-1] + \");\"\n elif overwrite == 1:\n cmdLine = \"INSERT OR REPLACE INTO UserStatus \" + cmdValue[:-1] + \") VALUES \" + paraValue[:-1] + \");\"\n try:\n cur.execute(cmdLine, tuple(contentList))\n except Exception as e:\n print(e.args[0])\n print(cmdLine)\n print(contentList)\n total_count_get += 1\n return min_id_get, max_id_get, total_count_get, run\n\n @staticmethod\n def getTweetById(id, twHandler, conn, cur, rt_count):\n # TODO Uncompleted method\n # print(\"Getting retweeted id: \" + id)\n # if Util.__checkInterval__(5, 'NET'):\n if True:\n try:\n id = int(id)\n except ValueError:\n print(\"invalid status id\")\n return None\n except Exception as e:\n print(\"An error happens when converting status id:\")\n print(e.args[0])\n return None\n if twHandler is None:\n print(\"No network connection!\")\n return\n\n rt_count += 1\n i = 0\n for i in range(0, 5):\n try:\n result = twHandler.show_status(id=id)\n except TwythonError as e:\n if i == 4 or \"404 (Not Found)\" in e.args[0]:\n return\n\n # print(result)\n sleep(random.uniform(-0.4, 1.6) + 5.5)\n\n dum_min_id_get, dum_max_id_get, dum_total_count_get, run = Crawler.db_write_tweet(result, twHandler, conn, cur, 0, 1, 0, True, True, rt_count)\n if run == 1:\n sleep(random.uniform(-0.4, 1.6) + 5.5)\n\n return\n # Util.__updateLastTime__(None, 'NET')\n\n @staticmethod\n def getUserInfo(username, DBConn=None, force=False):\n if type(username) is not str:\n print('Invalid screen_name!')\n return -1\n # if Util.__checkInterval__(5, 'NET'):\n if True:\n if not AuthApp.checkTokenExist(oauthMode=0):\n AuthApp.authApp2()\n\n self_connect_set = False\n if DBConn is None:\n self_connect_set = True\n conn = sqlite3.connect(DBPath)\n else:\n conn = DBConn\n cur = conn.cursor()\n cur.execute(\"PRAGMA foreign_keys = ON;\")\n Crawler.__checkUserData__(conn, username)\n cur.execute(\"SELECT last_update_time FROM UserData WHERE screen_name = ?\", (username,))\n ret = cur.fetchall()\n\n if len(ret) == 0:\n force = True\n elif ret[0][0] is None or ret[0][0] == '':\n force = True\n elif (datetime.datetime.strptime(Util.utcNowStr(), TIME_PATTERN) \\\n - datetime.datetime.strptime(ret[0][0], TIME_PATTERN)).total_seconds() // 3600 >= 24:\n force = True\n\n if (not force) and Crawler.__checkUserData__(conn, username):\n if self_connect_set:\n conn.close()\n return 0\n\n try:\n print(\"Getting User Information...\")\n twHandler = Twython(AuthApp.getAppKey(), access_token=AuthApp.getAppAccessToken())\n results = twHandler.show_user(screen_name=username)\n except TwythonError as e:\n print(e.args[0])\n if self_connect_set:\n conn.close()\n return -1\n attrList = results.keys()\n cmdValue = '('\n paraValue = '('\n contentList = []\n for attr in attrList:\n\n ## Skip some attributes\n if attr == \"profile_background_image_url_https\" or attr == \"id\" \\\n or attr == \"profile_image_url_https\":\n continue\n\n content = results.get(attr)\n\n ## Rename id_str to user_id_str\n if attr == \"id_str\":\n attr = \"user_id_str\"\n\n ## Parse dict to string by json\n if attr == \"status\" or attr == 'entities':\n content = json.dumps(content, ensure_ascii=True)\n\n cmdValue = cmdValue + str(attr) + \",\"\n paraValue = paraValue + \"?,\"\n contentList.append(content)\n\n ## Expand short URL to full URL using entities in return JSON Object\n if attr == \"entities\":\n attr = \"expanded_url\"\n if results.get(\"entities\").get(\"url\") is not None:\n urls = results.get(\"entities\").get(\"url\").get(\"urls\")\n for url in urls:\n content = url.get('expanded_url')\n cmdValue = cmdValue + str(attr) + \",\"\n paraValue = paraValue + \"?,\"\n contentList.append(content)\n\n ## Add update time to query\n cmdValue = cmdValue + \"last_update_time,\"\n paraValue = paraValue + \"?,\"\n contentList.append(Util.utcNowStr())\n\n cmdLine = \"INSERT OR REPLACE INTO UserData \" + cmdValue[:-1] + \") VALUES \" + paraValue[:-1] + \");\"\n\n cur.execute(cmdLine, tuple(contentList))\n conn.commit()\n # Util.__updateLastTime__(conn, 'NET')\n if self_connect_set:\n conn.close()\n return 1\n return -1\n\n @staticmethod\n def __lookupUserId__(DBconn, username):\n cur = DBconn.cursor()\n cur.execute(\"SELECT user_id_str FROM UserData WHERE screen_name=?;\", (username,))\n ret = cur.fetchall()\n\n if len(ret) == 0:\n return None\n try:\n user_id = int(ret[0][0])\n except ValueError:\n print(\"invalid user id\")\n return None\n except Exception as e:\n print(\"An error happens when mapping username to id:\")\n print(e.args[0])\n return None\n else:\n return user_id\n\n @staticmethod\n def __checkStatus__(DBconn, user_id):\n cur = DBconn.cursor()\n cur.execute(\"PRAGMA foreign_keys = ON;\")\n\n cur.execute(\"SELECT status_id_str FROM UserStatus WHERE user_id_str = ?;\", (str(user_id),))\n ret = cur.fetchall()\n total_count = len(ret)\n if total_count == 0:\n return 0, 400000000000000000, 0\n\n cur.execute(\"SELECT min(status_id_str) FROM UserStatus WHERE user_id_str = ?;\", (str(user_id),))\n ret = cur.fetchall()\n min_id = ret[0][0]\n\n cur.execute(\"SELECT max(status_id_str) FROM UserStatus WHERE user_id_str = ?;\", (str(user_id),))\n ret = cur.fetchall()\n max_id = ret[0][0]\n\n return min_id, max_id, total_count\n\n @staticmethod\n def __minFromFetch__(ret):\n min_id = MAX_STATUS_ID\n for returnValue in ret:\n if int(returnValue[0]) < min_id:\n min_id = int(returnValue[0])\n return min_id\n\n @staticmethod\n def __maxFromFetch__(ret):\n max_id = 0\n for returnValue in ret:\n if int(returnValue[0]) > max_id:\n max_id = int(returnValue[0])\n return max_id\n\n @staticmethod\n def __checkUserData__(DBconn, username):\n cur = DBconn.cursor()\n # cur.execute(\"PRAGMA foreign_keys = ON;\")\n cur.execute(\"SELECT name FROM main.sqlite_master WHERE type='table' AND name='UserData';\")\n ret = cur.fetchall()\n if len(ret) == 0:\n cur.execute(\"CREATE TABLE UserData(\"\n \"profile_background_image_url TEXT,\"\n \"status TEXT,\"\n \"followers_count INT NOT NULL,\"\n \"name TEXT NOT NULL,\"\n \"url TEXT,\"\n \"expanded_url TEXT,\"\n \"favourites_count INT NOT NULL,\"\n \"profile_image_url TEXT,\"\n \"is_translation_enabled BOOLEAN NOT NULL,\"\n \"profile_location TEXT,\"\n \"time_zone TEXT,\"\n \"contributors_enabled BOOLEAN NOT NULL,\"\n \"listed_count INT NOT NULL,\"\n \"entities TEXT NOT NULL,\"\n \"user_id_str TEXT PRIMARY KEY NOT NULL,\"\n \"notifications TEXT,\"\n \"profile_background_tile BOOLEAN NOT NULL,\"\n \"profile_background_color TEXT NOT NULL,\"\n \"verified BOOLEAN NOT NULL,\"\n \"profile_text_color TEXT NOT NULL,\"\n \"utc_offset INT,\"\n \"following TEXT,\"\n \"statuses_count INT NOT NULL,\"\n \"geo_enabled BOOLEAN NOT NULL,\"\n \"created_at TEXT NOT NULL,\"\n \"profile_link_color TEXT NOT NULL,\"\n \"profile_sidebar_border_color TEXT NOT NULL,\"\n \"profile_sidebar_fill_color TEXT NOT NULL,\"\n \"follow_request_sent TEXT,\"\n \"is_translator BOOLEAN NOT NULL,\"\n \"friends_count INT NOT NULL,\"\n \"default_profile_image BOOLEAN NOT NULL,\"\n \"location TEXT,\"\n \"default_profile BOOLEAN NOT NULL,\"\n \"profile_banner_url TEXT,\"\n \"protected BOOLEAN NOT NULL,\"\n \"profile_use_background_image BOOLEAN NOT NULL,\"\n \"lang TEXT NOT NULL,\"\n \"screen_name TEXT NOT NULL,\"\n \"description TEXT,\"\n \"last_update_time TEXT NOT NULL\"\n \");\")\n DBconn.commit()\n\n cur.execute(\"SELECT name FROM main.sqlite_master WHERE type='table' AND name='UserStatus';\")\n ret = cur.fetchall()\n if len(ret) == 0:\n cur.execute(\"CREATE TABLE UserStatus(\"\n \"contributors TEXT,\"\n \"text TEXT NOT NULL,\"\n \"in_reply_to_screen_name TEXT,\"\n \"favorite_count INT NOT NULL,\"\n \"retweet_count INT NOT NULL,\"\n \"in_reply_to_status_id_str TEXT,\"\n \"place TEXT,\"\n \"favorited BOOLEAN NOT NULL,\"\n \"user_id_str TEXT NOT NULL,\"\n \"retweeted_status TEXT,\"\n \"source TEXT,\"\n \"in_reply_to_user_id_str TEXT,\"\n \"geo TEXT,\"\n \"coordinates TEXT,\"\n \"status_id_str TEXT PRIMARY KEY NOT NULL,\"\n \"lang TEXT NOT NULL,\"\n \"retweeted BOOLEAN NOT NULL,\"\n \"possibly_sensitive BOOLEAN,\"\n \"created_at TEXT NOT NULL,\"\n \"truncated BOOLEAN NOT NULL,\"\n \"entities TEXT NOT NULL,\"\n \"extended_entities TEXT,\"\n \"FOREIGN KEY (user_id_str) REFERENCES UserData(user_id_str) \"\n \");\")\n DBconn.commit()\n cur.execute(\"SELECT name FROM sqlite_master WHERE type = 'trigger' AND name='delete_user';\")\n ret = cur.fetchall()\n if len(ret) == 0:\n cur.execute(\"CREATE TRIGGER delete_user AFTER DELETE ON UserData \"\n \"FOR EACH ROW BEGIN \"\n \"DELETE FROM UserStatus WHERE UserStatus.user_id_str = OLD.user_id_str;\"\n \"END;\")\n DBconn.commit()\n\n cur.execute(\"SELECT screen_name FROM UserData WHERE screen_name = ?;\", (username,))\n ret = cur.fetchall()\n if len(ret) != 0:\n return True\n return False\n\n","sub_path":"TwitterCrawler/Crawler.py","file_name":"Crawler.py","file_ext":"py","file_size_in_byte":23758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"90551939","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport tempfile\n\nimport mock\nfrom oslo_config import cfg\nimport six\n\nfrom ironic.common import images\nfrom ironic.common import swift\nfrom ironic.conductor import task_manager\nfrom ironic.drivers.modules import deploy_utils\nfrom ironic.drivers.modules import virtual_media_base\nfrom ironic.drivers import utils as driver_utils\nfrom ironic.tests.unit.db import base as db_base\nfrom ironic.tests.unit.db import utils as db_utils\nfrom ironic.tests.unit.objects import utils as object_utils\n\nif six.PY3:\n import io\n file = io.BytesIO\n\nINFO_DICT = db_utils.get_test_redfish_info()\n\nCONF = cfg.CONF\n\n\nclass VirtualMediaCommonMethodsTestCase(db_base.DbTestCase):\n\n def setUp(self):\n super(VirtualMediaCommonMethodsTestCase, self).setUp()\n self.config(enabled_hardware_types=['ilo', 'fake-hardware'],\n enabled_boot_interfaces=['ilo-pxe', 'ilo-virtual-media',\n 'fake'],\n enabled_bios_interfaces=['ilo', 'no-bios'],\n enabled_power_interfaces=['ilo', 'fake'],\n enabled_management_interfaces=['ilo', 'fake'],\n enabled_inspect_interfaces=['ilo', 'fake', 'no-inspect'],\n enabled_console_interfaces=['ilo', 'fake', 'no-console'],\n enabled_vendor_interfaces=['ilo', 'fake', 'no-vendor'])\n self.node = object_utils.create_test_node(\n self.context, boot_interface='ilo-virtual-media',\n deploy_interface='direct')\n\n def test_get_iso_image_name(self):\n boot_iso_actual = virtual_media_base.get_iso_image_name(self.node)\n boot_iso_expected = \"boot-%s\" % self.node.uuid\n self.assertEqual(boot_iso_expected, boot_iso_actual)\n\n @mock.patch.object(tempfile, 'NamedTemporaryFile', spec_set=True,\n autospec=True)\n @mock.patch.object(images, 'create_boot_iso', spec_set=True, autospec=True)\n @mock.patch.object(swift, 'SwiftAPI', spec_set=True, autospec=True)\n @mock.patch.object(virtual_media_base, 'get_iso_image_name',\n spec_set=True, autospec=True)\n @mock.patch.object(driver_utils, 'get_node_capability', spec_set=True,\n autospec=True)\n def test__prepare_iso_image_uefi(self, capability_mock,\n iso_image_name_mock, swift_api_mock,\n create_boot_iso_mock, tempfile_mock):\n CONF.ilo.swift_ilo_container = 'ilo-cont'\n CONF.ilo.use_web_server_for_images = False\n\n swift_obj_mock = swift_api_mock.return_value\n fileobj_mock = mock.MagicMock(spec=file)\n fileobj_mock.name = 'tmpfile'\n mock_file_handle = mock.MagicMock(spec=file)\n mock_file_handle.__enter__.return_value = fileobj_mock\n tempfile_mock.return_value = mock_file_handle\n iso_image_name_mock.return_value = 'abcdef'\n create_boot_iso_mock.return_value = '/path/to/boot-iso'\n capability_mock.return_value = 'uefi'\n\n with task_manager.acquire(self.context, self.node.uuid,\n shared=False) as task:\n boot_iso_actual = virtual_media_base.prepare_iso_image(\n task, 'kernel_uuid', 'ramdisk_uuid',\n deploy_iso_href='deploy_iso_uuid',\n bootloader_href='bootloader_uuid',\n root_uuid='root-uuid',\n kernel_params='kernel-params',\n timeout=None,\n container=CONF.ilo.swift_ilo_container,\n use_web_server=CONF.ilo.use_web_server_for_images)\n iso_image_name_mock.assert_called_once_with(task.node)\n create_boot_iso_mock.assert_called_once_with(\n task.context, 'tmpfile', 'kernel_uuid', 'ramdisk_uuid',\n deploy_iso_href='deploy_iso_uuid',\n esp_image_href='bootloader_uuid',\n root_uuid='root-uuid',\n kernel_params='kernel-params',\n boot_mode='uefi')\n swift_obj_mock.create_object.assert_called_once_with(\n 'ilo-cont', 'abcdef', 'tmpfile', None)\n boot_iso_expected = 'swift:abcdef'\n self.assertEqual(boot_iso_expected, boot_iso_actual)\n\n @mock.patch.object(tempfile, 'NamedTemporaryFile', spec_set=True,\n autospec=True)\n @mock.patch.object(images, 'create_boot_iso', spec_set=True, autospec=True)\n @mock.patch.object(swift, 'SwiftAPI', spec_set=True, autospec=True)\n @mock.patch.object(virtual_media_base, 'get_iso_image_name',\n spec_set=True, autospec=True)\n @mock.patch.object(driver_utils, 'get_node_capability', spec_set=True,\n autospec=True)\n def test__prepare_iso_image_bios(self, capability_mock,\n iso_image_name_mock, swift_api_mock,\n create_boot_iso_mock, tempfile_mock):\n CONF.ilo.swift_ilo_container = 'ilo-cont'\n CONF.ilo.use_web_server_for_images = False\n\n swift_obj_mock = swift_api_mock.return_value\n fileobj_mock = mock.MagicMock(spec=file)\n fileobj_mock.name = 'tmpfile'\n mock_file_handle = mock.MagicMock(spec=file)\n mock_file_handle.__enter__.return_value = fileobj_mock\n tempfile_mock.return_value = mock_file_handle\n iso_image_name_mock.return_value = 'abcdef'\n create_boot_iso_mock.return_value = '/path/to/boot-iso'\n capability_mock.return_value = 'bios'\n\n with task_manager.acquire(self.context, self.node.uuid,\n shared=False) as task:\n boot_iso_actual = virtual_media_base.prepare_iso_image(\n task, 'kernel_uuid', 'ramdisk_uuid',\n deploy_iso_href='deploy_iso_uuid',\n root_uuid='root-uuid',\n kernel_params='kernel-params',\n timeout=None,\n container=CONF.ilo.swift_ilo_container,\n use_web_server=CONF.ilo.use_web_server_for_images)\n iso_image_name_mock.assert_called_once_with(task.node)\n create_boot_iso_mock.assert_called_once_with(\n task.context, 'tmpfile', 'kernel_uuid', 'ramdisk_uuid',\n deploy_iso_href='deploy_iso_uuid',\n esp_image_href=None,\n root_uuid='root-uuid',\n kernel_params='kernel-params',\n boot_mode='bios')\n swift_obj_mock.create_object.assert_called_once_with(\n 'ilo-cont', 'abcdef', 'tmpfile', None)\n boot_iso_expected = 'swift:abcdef'\n self.assertEqual(boot_iso_expected, boot_iso_actual)\n\n @mock.patch.object(deploy_utils, 'copy_image_to_web_server', spec_set=True,\n autospec=True)\n @mock.patch.object(tempfile, 'NamedTemporaryFile', spec_set=True,\n autospec=True)\n @mock.patch.object(images, 'create_boot_iso', spec_set=True, autospec=True)\n @mock.patch.object(virtual_media_base, 'get_iso_image_name',\n spec_set=True, autospec=True)\n @mock.patch.object(driver_utils, 'get_node_capability', spec_set=True,\n autospec=True)\n def test__prepare_iso_image_use_webserver(self, capability_mock,\n iso_image_name_mock,\n create_boot_iso_mock,\n tempfile_mock, copy_file_mock):\n CONF.ilo.use_web_server_for_images = True\n CONF.deploy.http_url = \"http://10.10.1.30/httpboot\"\n CONF.deploy.http_root = \"/httpboot\"\n CONF.pxe.pxe_append_params = 'kernel-params'\n\n fileobj_mock = mock.MagicMock(spec=file)\n fileobj_mock.name = 'tmpfile'\n mock_file_handle = mock.MagicMock(spec=file)\n mock_file_handle.__enter__.return_value = fileobj_mock\n tempfile_mock.return_value = mock_file_handle\n\n ramdisk_href = \"http://10.10.1.30/httpboot/ramdisk\"\n kernel_href = \"http://10.10.1.30/httpboot/kernel\"\n iso_image_name_mock.return_value = 'new_boot_iso'\n create_boot_iso_mock.return_value = '/path/to/boot-iso'\n capability_mock.return_value = 'uefi'\n copy_file_mock.return_value = \"http://10.10.1.30/httpboot/new_boot_iso\"\n\n with task_manager.acquire(self.context, self.node.uuid,\n shared=False) as task:\n driver_internal_info = task.node.driver_internal_info\n driver_internal_info['boot_iso_created_in_web_server'] = True\n boot_iso_actual = virtual_media_base.prepare_iso_image(\n task, kernel_href, ramdisk_href,\n deploy_iso_href='deploy_iso_uuid',\n bootloader_href='bootloader_uuid',\n root_uuid='root-uuid',\n kernel_params='kernel-params',\n use_web_server=CONF.ilo.use_web_server_for_images)\n iso_image_name_mock.assert_called_once_with(task.node)\n create_boot_iso_mock.assert_called_once_with(\n task.context, 'tmpfile', kernel_href, ramdisk_href,\n deploy_iso_href='deploy_iso_uuid',\n esp_image_href='bootloader_uuid',\n root_uuid='root-uuid',\n kernel_params='kernel-params',\n boot_mode='uefi')\n boot_iso_expected = 'http://10.10.1.30/httpboot/new_boot_iso'\n self.assertEqual(boot_iso_expected, boot_iso_actual)\n copy_file_mock.assert_called_once_with(fileobj_mock.name,\n 'new_boot_iso')\n\n @mock.patch.object(virtual_media_base, 'prepare_iso_image', spec_set=True,\n autospec=True)\n def test_prepare_deploy_iso(self, prepare_iso_mock):\n driver_info = {'deploy_kernel': 'kernel', 'deploy_ramdisk': 'ramdisk',\n 'bootloader': 'bootloader'}\n CONF.pxe.pxe_append_params = 'kernel-params'\n timeout = None\n container = 'container'\n prepare_iso_mock.return_value = (\n 'swift:boot-b5451849-e088-4a4c-aa5f-4d97b3371dec')\n\n with task_manager.acquire(self.context, self.node.uuid,\n shared=False) as task:\n deploy_iso_actual = virtual_media_base.prepare_deploy_iso(\n task, {}, 'deploy', driver_info, use_web_server=False,\n container=container)\n prepare_iso_mock.assert_called_once_with(\n task, 'kernel', 'ramdisk', bootloader_href='bootloader',\n kernel_params=CONF.pxe.pxe_append_params, timeout=timeout,\n use_web_server=False, container='container')\n deploy_iso_expected = (\n 'swift:boot-b5451849-e088-4a4c-aa5f-4d97b3371dec')\n self.assertEqual(deploy_iso_expected, deploy_iso_actual)\n","sub_path":"ironic/tests/unit/drivers/modules/test_virtual_media_base.py","file_name":"test_virtual_media_base.py","file_ext":"py","file_size_in_byte":11487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"86213996","text":"from flask.ext.testing import TestCase\n\nfrom checkpoint2.api import app, db\nfrom checkpoint2.config import TestingConfig\n\n\nclass BaseTestCase(TestCase):\n \"\"\"A base test case for User and Bucketlist test classes.\"\"\"\n\n def create_app(self):\n \"\"\"Set up the app for testing. Returns an `app` instance.\"\"\"\n app.config.from_object(TestingConfig)\n self.client = app.test_client()\n return app\n\n def setUp(self):\n \"\"\"Run instructions before the each test is executed.\"\"\"\n db.create_all()\n self.create_user = self.client.post('/auth/register', data=dict(\n username='username', password='password', email='email@email.com'))\n get_token = self.client.post('/auth/login', data=dict(\n username='username', password='password'))\n self.token = get_token.json['token']\n self.bl1 = self.client.post('/bucketlists/', data=dict(\n name='First Bucketlist'), headers={'token': self.token})\n self.bl2 = self.client.post('/bucketlists/', data=dict(\n name='Second Bucketlist'), headers={'token': self.token})\n self.bl3 = self.client.post('/bucketlists/', data=dict(\n name='Third Bucketlist'), headers={'token': self.token})\n self.bl4 = self.client.post('/bucketlists/', data=dict(\n name='Forth Bucketlist'), headers={'token': self.token})\n self.bl5 = self.client.post('/bucketlists/', data=dict(\n name='Fifth Bucketlist'), headers={'token': self.token})\n\n self.bli1 = self.client.post(\n '/bucketlists/{0}/items/'.format(self.bl1.json['id']),\n data=dict(name='First Bucketlist Item Name', done='0'),\n headers={'token': self.token})\n self.bli2 = self.client.post(\n '/bucketlists/{0}/items/'.format(self.bl1.json['id']),\n data=dict(name='Second Bucketlist Item Name', done='1'),\n headers={'token': self.token})\n\n resp = self.client.get('/bucketlists/', headers={'token': self.token})\n self.initial_count = len(resp.json)\n\n self.exp_token = 'eyJhbGciOiJIUzI1NiIsImV4cCI6MTQ1MDY4OTA0MSwiaWF0IjoxND\\\n UwNjg4NDQxfQ.WyIxIiwiJDYkcm91bmRzPTY0Mjg1NyRGcUZsSUxBeDh6UEhPWDNhJDd2L0\\\n 5tUy9TajhKQVRSRVRGaUlYZVhjaE9aZ0JLbDVTREh3czg1LkhBT20xNi9BTW9kSFluZlhmM\\\n zk5MXFWWVpDclNsVzRBcHkuSTdFdlAuOWtEQncvIl0.IPzsof8lZr1vGPgxG-pDUo7RO5nO\\\n aLKkXaa-lIs0c_4'\n\n self.invalid_token = 'eqwJhbGciOiIUzI1NiIsImV4cCI6MTQ1MDY4OTA0MSwiaWF0IjoxND\\\n UwNjg4NDQxfQ.WyIxIiwiJDY98w1bmRzPTY0Mjg1NyRGcUZsSUxBeDh6UEhPWDNhJDd2L0\\\n 5tUy9TajhKQVRSRVRGaUlYZVhjaE9aZ0JLbDVTREh3czg1LkhBT20xNi9BTW9kSFluZlhmM\\\n zk5MXFWWVpDclNsVzRBcHkuSTdFdlAuOWtEQncvIl0.IPzsof8lZr1vGPgxG-pDUo7RO5nO\\\n aLKkXaa-lIs0c_4'\n\n def tearDown(self):\n \"\"\"Run instructions after each test is executed.\"\"\"\n db.session.remove()\n db.drop_all()\n","sub_path":"tests/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"507178295","text":"def solution(healths, items):\n answer = []\n visit = [True for i in items]\n\n for i in range(len(items)):\n items[i].append(i + 1)\n\n healths = sorted(healths)\n items = sorted(items)\n answer = []\n\n for health in healths:\n for i in range(len(items)):\n if health - items[i][1] >= 100 and visit[i]:\n answer.append(items[i][2])\n visit[i] = False\n answer = sorted(answer)\n return answer\n\n\n\n","sub_path":"LGCNS/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"562109224","text":"from django.core.urlresolvers import reverse\nfrom django.test import TestCase, override_settings\n\nfrom polls.models import Question, Choice\nfrom django.utils import timezone\nimport datetime\n\ndef create_question(question_text, days):\n \"\"\"\n Creates a question with the given `question_text` and published the\n given number of `days` offset to now (negative for questions published\n in the past, positive for questions that have yet to be published).\n \"\"\"\n time = timezone.now() + datetime.timedelta(days=days)\n return Question.objects.create(question_text=question_text,\n pub_date=time)\n\ndef create_choice(choice_text, question):\n return Choice.objects.create(choice_text=choice_text, question=question)\n\n@override_settings(ROOT_URLCONF='tests.view_tests.urls', USE_I18N=True, USE_L10N=False, LANGUAGE_CODE='en')\nclass QuestionViewTests(TestCase):\n def test_index_view_with_no_questions(self):\n \"\"\"\n If no questions exist, an appropriate message should be displayed.\n \"\"\"\n response = self.client.get(reverse('polls:index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"No polls are available.\")\n self.assertQuerysetEqual(response.context['latest_question_list'], [])\n\n def test_index_view_with_a_past_question(self):\n \"\"\"\n Questions with a pub_date in the past should be displayed on the\n index page.\n \"\"\"\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('polls:index'))\n self.assertQuerysetEqual(\n response.context['latest_question_list'],\n ['']\n )\n\n def test_index_view_with_a_future_question(self):\n \"\"\"\n Questions with a pub_date in the future should not be displayed on\n the index page.\n \"\"\"\n create_question(question_text=\"Future question.\", days=30)\n response = self.client.get(reverse('polls:index'))\n self.assertContains(response, \"No polls are available.\",\n status_code=200)\n self.assertQuerysetEqual(response.context['latest_question_list'], [])\n\n def test_index_view_with_future_question_and_past_question(self):\n \"\"\"\n Even if both past and future questions exist, only past questions\n should be displayed.\n \"\"\"\n create_question(question_text=\"Past question.\", days=-30)\n create_question(question_text=\"Future question.\", days=30)\n response = self.client.get(reverse('polls:index'))\n self.assertQuerysetEqual(\n response.context['latest_question_list'],\n ['']\n )\n\n def test_index_view_with_two_past_questions(self):\n \"\"\"\n The questions index page may display multiple questions.\n \"\"\"\n create_question(question_text=\"Past question 1.\", days=-30)\n create_question(question_text=\"Past question 2.\", days=-5)\n response = self.client.get(reverse('polls:index'))\n self.assertQuerysetEqual(\n response.context['latest_question_list'],\n ['', '']\n )\n\n@override_settings(ROOT_URLCONF='tests.view_tests.urls', USE_I18N=True, USE_L10N=False, LANGUAGE_CODE='en')\nclass QuestionIndexDetailTests(TestCase):\n def test_detail_view_with_a_future_question(self):\n \"\"\"\n The detail view of a question with a pub_date in the future should\n return a 404 not found.\n \"\"\"\n future_question = create_question(question_text='Future question.',\n days=5)\n response = self.client.get(reverse('polls:detail',\n args=(future_question.id,)))\n self.assertEqual(response.status_code, 404)\n\n def test_detail_view_with_a_past_question(self):\n \"\"\"\n The detail view of a question with a pub_date in the past should\n display the question's text.\n \"\"\"\n past_question = create_question(question_text='Past Question.',\n days=-5)\n response = self.client.get(reverse('polls:detail',\n args=(past_question.id,)))\n self.assertContains(response, past_question.question_text,\n status_code=200)\n\n@override_settings(ROOT_URLCONF='tests.view_tests.urls')\nclass VoteViewTests(TestCase):\n def test_success_vote(self):\n question = create_question(question_text='question.',\n days=0)\n choice1 = create_choice(choice_text='choice1.', question=question)\n choice2 = create_choice(choice_text='choice2.', question=question)\n response = self.client.post(reverse('polls:vote', args=(question.id,)), {'choice': str(choice1.id), })\n self.assertEquals(response.status_code, 302)\n def test_failure_vote(self):\n question = create_question(question_text='question.',\n days=0)\n choice1 = create_choice(choice_text='choice1.', question=question)\n response = self.client.post(reverse('polls:vote', args=(question.id,)), {'choice': str(choice1.id+10), })\n self.assertEquals(response.status_code, 200)\n self.assertContains(response, 'You didn't select a choice.', status_code=200)\n","sub_path":"tests/view_tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"108914994","text":"__author__ = 'riteshk'\r\n\r\nimport sys, getopt\r\n\r\n\"\"\"\r\n The FASTA index file (.fai) as created from samtools faidx, creates a text file with 5 columns. These columns are\r\n accession, length, offset of the first base of the given accession, length of fasta lines and line blen (guess that\r\n is length of fasta lines + 1). this file needs to be read line by line and a corresponding BED3 file can be created.\r\n\"\"\"\r\n\r\n\r\ndef create_bed3(fai_file, bed3_file):\r\n f_in = open(fai_file, 'r')\r\n f_out = open(bed3_file, 'w')\r\n\r\n for line in f_in:\r\n fields = line.split('\\t')\r\n if len(fields) == 5:\r\n accession = fields[0]\r\n size = int(fields[1]) - 1\r\n f_out.write(accession + \"\\t\" + str(0) + \"\\t\" + str(size) + \"\\n\")\r\n\r\n f_in.close()\r\n f_out.close()\r\n\r\n\r\ndef main(argv):\r\n inputfile = ''\r\n outputfile = ''\r\n try:\r\n opts, args = getopt.getopt(argv, \"hi:o:\", [\"ifile=\", \"ofile=\"])\r\n except getopt.GetoptError:\r\n print ('CreateBED3fromFastaIndex.py -i -o ')\r\n sys.exit(2)\r\n for opt, arg in opts:\r\n if opt == '-h':\r\n print ('CreateBED3fromFastaIndex.py -i -o ')\r\n sys.exit()\r\n elif opt in (\"-i\", \"--ifile\"):\r\n inputfile = arg\r\n elif opt in (\"-o\", \"--ofile\"):\r\n outputfile = arg\r\n\r\n print ('Input file is ', inputfile)\r\n print ('Output file is ', outputfile)\r\n create_bed3(inputfile, outputfile)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main(sys.argv[1:])\r\n","sub_path":"wheat/Scripts/CreateBED3fromFastaIndex.py","file_name":"CreateBED3fromFastaIndex.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"278945968","text":"import numpy as np\nclass investment():\n '''\n This class contains functions that return values of daily return\n '''\n def __init__(self):\n value_to_invest = 1000\n self.value_to_invest = value_to_invest\n\n def daily_ret(self,position,num_trials):\n position_value = self.value_to_invest/position # set position value\n cumu_ret = []\n for i in range(num_trials):\n value = float(position_value*sum(np.random.choice([0,2],size=position,p=[0.49,0.51]))) #cumu_value for ith trial\n cumu_ret.append(value) # get a list of cumu return with length num_trials\n daily_ret = [(value/self.value_to_invest)-1 for value in cumu_ret] # get a list of daily return with length num_trials\n return daily_ret\n","sub_path":"mz775/investment/investment.py","file_name":"investment.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"486306864","text":"import time\nfrom threading import Thread\nfrom serial import *\nimport serial.tools.list_ports\nfrom kivy.logger import Logger\n\nclass SerialAdapter:\n __stopReading = False\n __thread = None\n \n def __init__(self, settings):\n Logger.info('Serial: Initializing')\n self.settings = settings\n\n self.values = []\n self.ser = Serial(\n baudrate=int(settings.get('com','baudrate')), \n bytesize=SerialAdapter.BYTESIZE[settings.get('com','bytesize')], \n parity=SerialAdapter.PARITY[settings.get('com','parity')], \n stopbits=SerialAdapter.STOPBITS[settings.get('com','stopbits')],\n timeout=0.1,\n xonxoff=0,\n rtscts=0,\n interCharTimeout=None\n )\n self.ser.setPort(settings.get('com','port'))\n\n settings.addCallback(self.onValueChanged, 'com')\n\n\n def open(self):\n try:\n if not self.ser.isOpen() or self.__thread == None:\n Logger.info('Serial: Attempting to open port: %s', self.ser.port)\n if not self.ser.isOpen():\n self.ser.open()\n if self.__thread == None:\n self.startReading()\n\n except SerialException as e:\n Logger.error('Serial: Failed to open port: %s', self.ser.port)\n Logger.error('Serial: %s', e.args[0])\n self.close()\n self.stopReading()\n\n\n def onValueChanged(self, section, key, value):\n if self.ser.isOpen():\n self.close()\n \n if key == 'port':\n self.ser.setPort(value)\n elif key == 'baudrate':\n self.ser.baudrate = value\n elif key == 'bytesize':\n self.ser.bytesize = SerialAdapter.BYTESIZE[value]\n elif key == 'parity':\n self.ser.parity = SerialAdapter.PARITY[value]\n elif key == 'stopbits':\n self.ser.stopbits = SerialAdapter.STOPBITS[value]\n\n self.open()\n\n\n def close(self):\n Logger.info('Serial: Closing port: %s', self.ser.port)\n self.stopReading()\n if self.ser.isOpen():\n self.ser.close()\n\n\n def isOpen(self):\n return self.ser.isOpen()\n\n\n def startReading(self):\n if self.ser.isOpen():\n Logger.debug('Serial: Start reading')\n self.__thread = Thread(target=self.receiving, args=())\n self.__thread.start()\n\n\n def stopReading(self):\n if self.__thread != None:\n Logger.debug('Serial: Stop reading')\n self.__stopReading = True\n self.__thread.join()\n self.__thread = None\n self.__stopReading = False\n\n\n def receiving(self):\n self.ser.flushInput()\n while not self.__stopReading:\n rcv = self.ser.readline()\n rcv = rcv.decode(\"utf-8\")\n rcv = rcv.replace('\\r', '').replace('\\n', '')\n if rcv != \"\":\n self.values.append(int(rcv))\n\n def getAll(self):\n temp = self.values.copy()\n self.values.clear()\n return temp\n\n\n def write(self, data):\n Logger.trace('Serial: Writing: %d', data)\n self.ser.write(data)\n\n\n def getPorts():\n return list(serial.tools.list_ports.comports())\n\n\n def getPortNames():\n return [p.device for p in SerialAdapter.getPorts()]\n\n\n PARITY = {\n 'None': PARITY_NONE,\n 'Even': PARITY_EVEN,\n 'Odd': PARITY_ODD,\n 'Mark': PARITY_MARK,\n 'Space': PARITY_SPACE\n }\n\n STOPBITS = {\n '1': STOPBITS_ONE,\n '1.5': STOPBITS_ONE_POINT_FIVE,\n '2': STOPBITS_TWO\n }\n\n BYTESIZE = {\n '5': FIVEBITS,\n '6': SIXBITS,\n '7': SEVENBITS,\n '8': EIGHTBITS\n }","sub_path":"SerialAdapter.py","file_name":"SerialAdapter.py","file_ext":"py","file_size_in_byte":3752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"581357226","text":"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# [START contactcenterinsights_create_conversation]\nfrom google.cloud import contact_center_insights_v1\n\n\ndef create_conversation(\n project_id: str,\n transcript_uri: str = \"gs://cloud-samples-data/ccai/chat_sample.json\",\n audio_uri: str = \"gs://cloud-samples-data/ccai/voice_6912.txt\",\n) -> contact_center_insights_v1.Conversation:\n \"\"\"Creates a conversation.\n\n Args:\n project_id:\n The project identifier. For example, 'my-project'.\n transcript_uri:\n The Cloud Storage URI that points to a file that contains the\n conversation transcript. Format is 'gs://{bucket_name}/{file.json}'.\n For example, 'gs://cloud-samples-data/ccai/chat_sample.json'.\n audio_uri:\n The Cloud Storage URI that points to a file that contains the\n conversation audio. Format is 'gs://{bucket_name}/{file.json}'.\n For example, 'gs://cloud-samples-data/ccai/voice_6912.txt'.\n\n Returns:\n A conversation.\n \"\"\"\n # Construct a parent resource.\n parent = (\n contact_center_insights_v1.ContactCenterInsightsClient.common_location_path(\n project_id, \"us-central1\"\n )\n )\n\n # Construct a conversation.\n conversation = contact_center_insights_v1.Conversation()\n conversation.data_source.gcs_source.transcript_uri = transcript_uri\n conversation.data_source.gcs_source.audio_uri = audio_uri\n conversation.medium = contact_center_insights_v1.Conversation.Medium.CHAT\n\n # Call the Insights client to create a conversation.\n insights_client = contact_center_insights_v1.ContactCenterInsightsClient()\n conversation = insights_client.create_conversation(\n parent=parent, conversation=conversation\n )\n\n print(f\"Created {conversation.name}\")\n return conversation\n\n\n# [END contactcenterinsights_create_conversation]\n","sub_path":"contact-center-insights/snippets/create_conversation.py","file_name":"create_conversation.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"539951918","text":"import math,time,serial\nimport RPi.GPIO as GPIO\nser = serial.Serial(port='/dev/ttyS0',baudrate = 19200)\n\n####################################################################################################\n\nGPIO.setmode(GPIO.BCM)\nTRIG1 = 23\nTRIG2 = 17\nECHO1 = 24\nECHO2 = 27\nprint (\"Distance Measurement In Progress\")\nGPIO.setup(TRIG1,GPIO.OUT)\nGPIO.setup(ECHO1,GPIO.IN)\nGPIO.setup(TRIG2,GPIO.OUT)\nGPIO.setup(ECHO2,GPIO.IN)\nGPIO.output(TRIG1,False)\nGPIO.output(TRIG2,False)\npulse_end1 = 0\npulse_end2 = 0\nprint (\"Waiting For Sensor To Settle\")\ndef get_ult(obstacle_right,obstacle_left):\n\ttime.sleep(1)\n\twhile True:\n\t\tGPIO.output(TRIG1, True)\n\t\tGPIO.output(TRIG2, True)\n\t\ttime.sleep(0.00001)\n\t\tGPIO.output(TRIG1, False)\n\t\tGPIO.output(TRIG2, False)\n\t\twhile GPIO.input(ECHO1)==0 and GPIO.input(ECHO2)==0:\n\t\t\tpulse_start1 = time.time()\n\t\t\tpulse_start2 = time.time()\n\t\twhile GPIO.input(ECHO1)==1 or GPIO.input(ECHO2)==1:\n\t\t\tif GPIO.input(ECHO1)==1:\n\t\t\t\tpulse_end1 = time.time() \n\t\t\tif GPIO.input(ECHO2)==1:\n\t\t\t\tpulse_end2 = time.time()\n\t\tpulse_duration1 = pulse_end1 - pulse_start1\n\t\tpulse_duration2 = pulse_end2 - pulse_start2\n\t\tobstacle_right = pulse_duration1 * 17150\n\t\tobstacle_left = pulse_duration2 * 17150\n\t\tobstacle_left = round(obstacle_left, 2)\n\t\tobstacle_right = round(obstacle_right, 2)\n\t\tprint(obstacle_right,obstacle_left)\n\t\tif(obstacle_left<0):\n\t\t\tprint(\"Waiting for Values\")\n\t\tif (obstacle_right>50 and obstacle_left>50):\n\t\t\tstraight()\t\n\t\tif (obstacle_right>50 and obstacle_left<50):\n\t\t\tclockwise()\t\n\t\tif (obstacle_left>50 and obstacle_right<50):\n\t\t\tanticlockwise()\t\n\t\tif(obstacle_right<50 and obstacle_left<50):\n\t\t\tif(obstacle_right<25 and obstacle_left<25):\n\t\t\t\tbrute_stop()\n\t\t\telse: \t\n\t\t\t\tbackward()\n\n #time.sleep(0.7589)\n####################################################################################################\ndef straight():\n\tstm_send='m4x4999y0000'\n\tprint ('Going straight')\n\tser.write(stm_send.encode())\ndef anticlockwise():\n\tstm_send='m4x0000y4999'\n\tprint('Rotating anticlockwise')\n\tser.write(stm_send.encode())\ndef clockwise():\n\tstm_send='m4x9999y4999'\n\tprint('Rotating clockwise')\n\tser.write(stm_send.encode())\ndef backward():\n\tstm_send='m4x4999y9999'\t\n\tprint('Going backward')\n\tser.write(stm_send.encode())\ndef brute_stop():\n\tstm_send='m4x4999y4999'\n\tprint('Brute Stop')\n\tser.write(stm_send.encode())\ndef obstacle_avoid(obstacle_right,obstacle_left):#TAKING DISTANCE IN CENTIMETERS\n\tif(obstacle_left<0):\n\t\tprint(\"Waiting for Values\")\n\tif (obstacle_right>50 and obstacle_left>50):\n\t\tstraight()\t\n\tif (obstacle_right>50 and obstacle_left<50):\n\t\tclockwise()\t\n\tif (obstacle_left>50 and obstacle_right<50):\n\t\tanticlockwise()\t\n\tif(obstacle_right<50 and obstacle_left<50):\n\t\tif(obstacle_right<25 and obstacle_left<25):\n\t\t brute_stop()\n\t\telse: \t\n\t\t\tbackward()\n\tget_ult(obs)\t\t\n\n\t\t\t\n\nif __name__=='__main__':\n\tobstacle_left=-1111111.0\n\tobstacle_right=-1111111.0\n\tget_ult(obstacle_right,obstacle_left)\n\n\n\n\n\nGPIO.cleanup()\n\n\n\n\n\n\n","sub_path":"auto/u3.py","file_name":"u3.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"182116610","text":"import os\n\n# Configuration file of staticmapservice\n\nHEADERS = {\"User-Agent\": \"staticmapservice/0.0.1\"}\n\nTILE_SERVER = os.environ[\"TILE_SERVER\"]\nIS_TMS = False # True if you use a TMS instead of OSM XYZ tiles\n\n# Default values can be overwritten in each request\nDEFAULT_WIDTH = \"300\"\nDEFAULT_HEIGHT = \"200\"\nDEFAULT_ZOOM = \"10\"\n\n# Maximum values can't be overwritten\nMAX_WIDTH = os.environ.get(\"MAX_WIDTH\", \"1280\")\nMAX_HEIGHT = os.environ.get(\"MAX_HEIGHT\", \"1280\")\nMAX_ZOOM = \"19\"\nMAX_PNV = \"30\" # Map won't contain more points, nodes and vertices than this value\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"614120506","text":"import requests\nimport json\nfrom json_tricks import dumps\nimport os\nfrom django.conf import settings\n#from .serializers import AveriaSerializer\nfrom core.elasticsearch.serializers import ElasticSearchSerializer\nfrom ApiADA.loggers import logging\nfrom core.exceptions.customexceptions import ApiException\nfrom core.elasticsearch.elasticsearch_query import CustomElasticSearchQuery\nfrom ApiADA.constantes import Constantes\nimport traceback\nfrom core.elasticsearch.averias_historical.models import AveriaHistorical\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\nlog = logging.getLogger(__name__)\n\nclass Averia():\n\n \n def searchAveriaAnalysisDate(searchanalysisDate):\n log.info('Start:searchAveriaAnalysisDate')\n\n sql={\n \"action\": \"/averias_backlog/averia\",\n \"body\": {\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"range\": {\n \"analysis_info.analysis_date\": {\n \"lte\": searchanalysisDate.strftime(Constantes.DATETIME_FORMAT)\n }\n }\n }\n ]\n }\n }, \n \"_source\": [\"case.id_number\", \"site.site_id\", \"analysis_contract.contract.s_id\", \"analysis_info.analysis_date\", \"analysis_info.analysis_details\", \"analysis_info.analysis_type\", \"analysis_info.tags\", \"analysis_info.analysis_details.date\", \"analysis_info.analysis_details.type\", \"analysis_info.analysis_details.user\", \"analysis_info.analysis_details.value\"]\n\n }\n }\n\n output=CustomElasticSearchQuery.executeSearchDDL(sql) \n\n log.info('End:searchAveriaAnalysisDate')\n return output\n\n def setAveria(averia):\n \n log.info('Start:setAveria')\n serializer=ElasticSearchSerializer(data=averia)\n if (not serializer.is_valid()):\n raise ApiException('Data for averia %s is not valid to be inserted in Elastic' % averia[\"case\"].id_number)\n\n body=serializer.validated_data\n headers = {'Content-type': 'application/json'}\n action = \"/averias_backlog/averia/%s\" % averia[\"case\"].id_number\n try:\n url='https://'+ settings.ELK_SERVER + \":\" + str(settings.ELK_PORT) + action\n response=requests.put(url=url, headers=headers, data=json.dumps(body), cert=(os.path.join(settings.CERTS_FOLDER,settings.ELK_CERT),os.path.join(settings.CERTS_FOLDER,settings.ELK_CERT_KEY) ), verify=os.path.join(settings.CERTS_FOLDER,settings.ELK_ROOT_CA))\n if not response.ok:\n raise ApiException(\"Error executing the insert/update in ElasticSearch server for averia %s Response %s\" % (averia[\"case\"].id_number, response.content.decode(\"utf-8\")))\n except ApiException as ae: \n raise ae\n except Exception as e:\n log.error('Exception:'+type(e).__name__ +\" \" +str(e))\n log.error(traceback.format_exc())\n raise ApiException(\"Error executing the insert/update in ElasticSearch server for averia %s Error %s\" % (averia[\"case\"].id_number, str(e)))\n\n log.info('End:setAveria')\n\n\n def moveToHistorical(averia):\n log.info('Start:moveToHistorical')\n \n sql= {\n \"protocol\": \"POST\",\n \"action\": \"/averias_backlog/averia/_search\",\n \"body\": \n {\n \"query\": \n {\n \"bool\":\n {\n \"must\": [\n {\n \"term\": \n {\n \"case.id_number\": averia\n }\n }\n ]\n }\n }\n }\n }\n \n averiaInfo=CustomElasticSearchQuery.executeDDL(sql)\n if ((averiaInfo) and (\"hits\" in averiaInfo) and (\"hits\" in averiaInfo[\"hits\"])):\n for averiaDelete in averiaInfo[\"hits\"][\"hits\"]:\n AveriaHistorical.setAveriaHistorical(averiaDelete[\"_source\"])\n Averia.deleteAveria(averiaDelete[\"_id\"])\n\n log.info('End:moveToHistorical')\n\n\n\n def deleteAveria(idNumber):\n log.info('Start:deleteAveria')\n\n #Insertamos el registro en el nuevo índice\n sql={\n \"protocol\": \"DELETE\",\n \"action\": \"/averias_backlog/averia/%s\" % idNumber,\n \"body\": {}\n }\n\n CustomElasticSearchQuery.executeDDL(sql)\n\n log.info('End:deleteAveria')\n\n def getAveria(idNumber):\n log.info('Start:getAveria')\n\n #Insertamos el registro en el nuevo índice\n sql={\n \"protocol\": \"GET\",\n \"action\": \"/averias_backlog/averia/%s/_source\" % idNumber,\n \"body\": {}\n }\n\n\n averia_info=CustomElasticSearchQuery.executeDDL(sql)\n if (\"error\" in averia_info):\n if ((\"reason\" in averia_info[\"error\"]) and (\"Document not found\" in averia_info[\"error\"][\"reason\"])):\n raise ObjectDoesNotExist(averia_info[\"error\"][\"reason\"])\n else:\n raise ApiException(averia_info[\"error\"])\n return averia_info\n log.info('End:getAveria')","sub_path":"core/elasticsearch/averias/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"257826416","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport ctypes\nimport ctypes.util\ntry:\n # If possible, use setuptools\n from setuptools import setup\n from setuptools.extension import Extension\n from setuptools.command.build_ext import build_ext as distutils_build_ext\n from setuptools.command.install import install\nexcept ImportError:\n from distutils.core import setup\n from distutils.extension import Extension\n from distutils.command.build_ext import build_ext as distutils_build_ext\n from distutils.command.install import install\nfrom distutils.cmd import Command\nfrom distutils.errors import CCompilerError, DistutilsExecError, \\\n DistutilsPlatformError\nfrom distutils.sysconfig import get_config_var\nimport errno\nimport glob\nimport os\nimport platform\nimport re\nimport shutil\nimport subprocess\nimport sys\n\n\n# Get the version from the shapely module\nversion = None\nwith open('shapely/__init__.py', 'r') as fp:\n for line in fp:\n if \"__version__\" in line:\n exec(line.replace('_', ''))\n break\nif version is None:\n raise ValueError(\"Could not determine Shapely's version\")\n\n# Handle UTF-8 encoding of certain text files.\nopen_kwds = {}\nif sys.version_info > (3,):\n open_kwds['encoding'] = 'utf-8'\n\nwith open('VERSION.txt', 'w', **open_kwds) as fp:\n fp.write(version)\n\nwith open('README.rst', 'r', **open_kwds) as fp:\n readme = fp.read()\n\nwith open('CREDITS.txt', 'r', **open_kwds) as fp:\n credits = fp.read()\n\nwith open('CHANGES.txt', 'r', **open_kwds) as fp:\n changes = fp.read()\n\nlong_description = readme + '\\n\\n' + credits + '\\n\\n' + changes\n\n# Fail installation if we can't find a GEOS shared library with the right\n# version. We ship it with Shapely for Windows, so no need to check on that\n# platform. Code below copied from shapely/geos.py.\nclass InstallCommand(install):\n\n def run(self):\n def load_dll(libname, fallbacks=None):\n lib = ctypes.util.find_library(libname)\n if lib is not None:\n try:\n return ctypes.CDLL(lib)\n except OSError:\n pass\n if fallbacks is not None:\n for name in fallbacks:\n try:\n return ctypes.CDLL(name)\n except OSError:\n # move on to the next fallback\n pass\n # No shared library was loaded. Raise OSError.\n raise OSError(\n \"Could not find library %s or load any of its variants %s\" % (\n libname, fallbacks or []))\n\n if sys.platform.startswith('linux'):\n _lgeos = load_dll(\n 'geos_c', fallbacks=['libgeos_c.so.1', 'libgeos_c.so'])\n elif sys.platform == 'darwin':\n if hasattr(sys, 'frozen'):\n # .app file from py2app\n alt_paths = [os.path.join(os.environ['RESOURCEPATH'],\n '..', 'Frameworks', 'libgeos_c.dylib')]\n else:\n alt_paths = [\n # The Framework build from Kyng Chaos:\n \"/Library/Frameworks/GEOS.framework/Versions/Current/GEOS\",\n # macports\n '/opt/local/lib/libgeos_c.dylib',\n ]\n _lgeos = load_dll('geos_c', fallbacks=alt_paths)\n elif sys.platform == 'sunos5':\n _lgeos = load_dll(\n 'geos_c', fallbacks=['libgeos_c.so.1', 'libgeos_c.so'])\n else: # other *nix systems\n _lgeos = load_dll(\n 'geos_c', fallbacks=['libgeos_c.so.1', 'libgeos_c.so'])\n\n GEOSversion = _lgeos.GEOSversion\n GEOSversion.restype = ctypes.c_char_p\n GEOSversion.argtypes = []\n geos_version_string = GEOSversion()\n if sys.version_info[0] >= 3:\n geos_version_string = geos_version_string.decode('ascii')\n res = re.findall(r'(\\d+)\\.(\\d+)\\.(\\d+)', geos_version_string)\n assert len(res) == 2, res\n geos_version = tuple(int(x) for x in res[0])\n shapely_version = tuple(int(x) for x in version.split('.'))\n\n if shapely_version >= (1, 3):\n if geos_version >= (3, 3):\n install.run(self)\n else:\n print(\n \"Shapely >= 1.3 requires GEOS >= 3.3. \"\n \"Install GEOS 3.3+ and reinstall Shapely.\")\n sys.exit(1)\n\nsetup_args = dict(\n name = 'Shapely',\n version = version,\n requires = ['Python (>=2.6)', 'libgeos_c (>=3.1)'],\n description = 'Geometric objects, predicates, and operations',\n license = 'BSD',\n keywords = 'geometry topology gis',\n author = 'Sean Gillies',\n author_email = 'sean.gillies@gmail.com',\n maintainer = 'Sean Gillies',\n maintainer_email = 'sean.gillies@gmail.com',\n url = 'https://github.com/Toblerity/Shapely',\n long_description = long_description,\n packages = [\n 'shapely',\n 'shapely.geometry',\n 'shapely.algorithms',\n 'shapely.examples',\n 'shapely.speedups',\n 'shapely.vectorized',\n ],\n cmdclass = {'install': InstallCommand},\n classifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering :: GIS',\n ],\n data_files = [('shapely', ['shapely/_geos.pxi'])]\n)\n\n# Add DLLs to Windows packages.\nif sys.platform == 'win32':\n try:\n os.mkdir('shapely/DLLs')\n except OSError as ex:\n if ex.errno != errno.EEXIST:\n raise\n if '(AMD64)' in sys.version:\n for dll in glob.glob('DLLs_AMD64_VC9/*.dll'):\n shutil.copy(dll, 'shapely/DLLs')\n elif sys.version_info[0:2] == (2, 5):\n for dll in glob.glob('DLLs_x86_VC7/*.dll'):\n shutil.copy(dll, 'shapely/DLLs')\n else:\n for dll in glob.glob('DLLs_x86_VC9/*.dll'):\n shutil.copy(dll, 'shapely/DLLs')\n setup_args.update(\n package_data={'shapely': ['shapely/DLLs/*.dll']},\n include_package_data=True,\n )\n\n\n# Optional compilation of speedups\n# setuptools stuff from Bob Ippolito's simplejson project\nif sys.platform == 'win32' and sys.version_info > (2, 6):\n # 2.6's distutils.msvc9compiler can raise an IOError when failing to\n # find the compiler\n ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError,\n IOError)\nelse:\n ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)\n\n\nclass BuildFailed(Exception):\n pass\n\n\ndef construct_build_ext(build_ext):\n class WrappedBuildExt(build_ext):\n # This class allows C extension building to fail.\n\n def run(self):\n try:\n build_ext.run(self)\n except DistutilsPlatformError as x:\n raise BuildFailed(x)\n\n def build_extension(self, ext):\n try:\n build_ext.build_extension(self, ext)\n except ext_errors as x:\n raise BuildFailed(x)\n return WrappedBuildExt\n\n\nif (hasattr(platform, 'python_implementation')\n and platform.python_implementation() == 'PyPy'):\n # python_implementation is only available since 2.6\n ext_modules = []\n libraries = []\nelif sys.platform == 'win32':\n libraries = ['geos']\nelse:\n libraries = ['geos_c']\n\n\nif os.path.exists(\"MANIFEST.in\"):\n pyx_file = \"shapely/speedups/_speedups.pyx\"\n c_file = \"shapely/speedups/_speedups.c\"\n\n force_cython = False\n if 'sdist' in sys.argv:\n force_cython = True\n\n try:\n if (force_cython or not os.path.exists(c_file)\n or os.path.getmtime(pyx_file) > os.path.getmtime(c_file)):\n print(\"Updating C extension with Cython.\", file=sys.stderr)\n subprocess.check_call([\"cython\", \"shapely/speedups/_speedups.pyx\"])\n except (subprocess.CalledProcessError, OSError):\n print(\"Warning: Could not (re)create C extension with Cython.\",\n file=sys.stderr)\n if force_cython:\n raise\n if not os.path.exists(\"shapely/speedups/_speedups.c\"):\n print(\"Warning: speedup extension not found\", file=sys.stderr)\n\next_modules = [\n Extension(\n \"shapely.speedups._speedups\",\n [\"shapely/speedups/_speedups.c\"],\n libraries=libraries,\n include_dirs=[get_config_var('INCLUDEDIR')],),\n]\n\ntry:\n import numpy as np\n from Cython.Distutils import build_ext as cython_build_ext\n from distutils.extension import Extension as DistutilsExtension\n\n cmd_classes = setup_args.setdefault('cmdclass', {})\n if 'build_ext' in cmd_classes:\n raise ValueError('We need to put the Cython build_ext in '\n 'cmd_classes, but it is already defined.')\n cmd_classes['build_ext'] = cython_build_ext\n\n ext_modules.append(DistutilsExtension(\"shapely.vectorized._vectorized\",\n sources=[\"shapely/vectorized/_vectorized.pyx\"],\n libraries=libraries + [np.get_include()],\n include_dirs=[get_config_var('INCLUDEDIR'),\n np.get_include()],\n ))\nexcept ImportError:\n print(\"Numpy or Cython not available, shapely.vectorized submodule not \"\n \"being built.\")\n\n\ntry:\n # try building with speedups\n existing_build_ext = setup_args['cmdclass'].get('build_ext', distutils_build_ext)\n setup_args['cmdclass']['build_ext'] = construct_build_ext(existing_build_ext)\n setup(\n ext_modules=ext_modules,\n **setup_args\n )\nexcept BuildFailed as ex:\n BUILD_EXT_WARNING = \"Warning: The C extension could not be compiled, \" \\\n \"speedups are not enabled.\"\n print(ex)\n print(BUILD_EXT_WARNING)\n print(\"Failure information, if any, is above.\")\n print(\"I'm retrying the build without the C extension now.\")\n\n setup(**setup_args)\n\n print(BUILD_EXT_WARNING)\n print(\"Plain-Python installation succeeded.\")\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":10523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"27900667","text":"# Copyright 2015 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Copyright [2014] Hewlett-Packard Development Company, L.P.\n# limitations under the License.\n\"\"\"\nTests for cue endpoint object.\n\"\"\"\nfrom oslo_db import exception as oslo_exception\n\nfrom cue.api.controllers.v1 import cluster\nfrom cue.db import api as db_api\nfrom cue.db.sqlalchemy import models\nfrom cue import objects\nfrom cue.tests.functional import base\nfrom cue.tests.functional import utils as func_utils\n\n\nclass EndpointObjectsTests(base.FunctionalTestCase):\n dbapi = db_api.get_instance()\n\n def create_object_endpoint(self, node_id, **kw):\n \"\"\"Create an endpoint object for the given node.\"\"\"\n endpoint_dict = func_utils.get_test_endpoint_dict(node_id=node_id,\n **kw)\n api_endpoint = objects.Endpoint(**endpoint_dict)\n self.validate_endpoint_values(endpoint_dict, api_endpoint)\n api_endpoint.create(self.context)\n\n new_endpoint = self.dbapi.get_endpoints_in_node(self.context,\n api_endpoint.node_id)\n return new_endpoint[0]\n\n def validate_endpoint_values(self, endpoint_ref, endpoint_cmp):\n \"\"\"Validate Endpoint Object fields.\"\"\"\n if not (isinstance(endpoint_ref, cluster.EndPoint) or\n isinstance(endpoint_cmp, cluster.EndPoint)):\n self.assertEqual(endpoint_ref.id if hasattr(endpoint_ref, \"id\")\n else endpoint_ref[\"id\"],\n endpoint_cmp.id if hasattr(endpoint_cmp, \"id\")\n else endpoint_cmp[\"id\"],\n \"Invalid endpoint id value\")\n self.assertEqual(endpoint_ref.node_id if hasattr(endpoint_ref,\n \"node_id\") else endpoint_ref[\"node_id\"],\n endpoint_cmp.node_id if hasattr(endpoint_cmp,\n \"node_id\") else endpoint_cmp[\"node_id\"],\n \"Invalid endpoint node_id value\")\n self.assertEqual(endpoint_ref.uri if hasattr(endpoint_ref, \"uri\") else\n endpoint_ref[\"uri\"],\n endpoint_cmp.uri if hasattr(endpoint_cmp, \"uri\") else\n endpoint_cmp[\"uri\"],\n \"Invalid endpoint uri value\")\n self.assertEqual(endpoint_ref.type if hasattr(endpoint_ref, \"type\")\n else endpoint_ref[\"type\"],\n endpoint_cmp.type if hasattr(endpoint_cmp, \"type\")\n else endpoint_cmp[\"type\"],\n \"Invalid endpoint type value\")\n\n def test_endpoint_object_generation(self):\n \"\"\"Test Endpoint Object generation from a cluster dictionary object.\"\"\"\n endpoint_dict = func_utils.get_test_endpoint_dict()\n endpoint_object = objects.Endpoint(**endpoint_dict)\n self.validate_endpoint_values(endpoint_dict, endpoint_object)\n\n def test_endpoint_api_to_object_to_api(self):\n \"\"\"Tests Endpoint api object conversion to Endpoint object and back\n\n to api object.\n \"\"\"\n endpoint_dict = func_utils.get_test_endpoint_dict()\n api_endpoint = cluster.EndPoint(**endpoint_dict)\n object_endpoint = objects.Endpoint(**endpoint_dict)\n self.validate_endpoint_values(api_endpoint, object_endpoint)\n api_endpoint_2 = cluster.EndPoint(**object_endpoint.as_dict())\n self.validate_endpoint_values(api_endpoint, api_endpoint_2)\n\n def test_endpoint_db_to_object_to_db(self):\n \"\"\"Tests Endpoint db object conversion to Endpoint object and back\n\n to db object.\n \"\"\"\n endpoint_dict = func_utils.get_test_endpoint_dict()\n db_endpoint_object = models.Endpoint()\n db_endpoint_object.update(endpoint_dict)\n object_endpoint = objects.Endpoint._from_db_object(objects.Endpoint(),\n db_endpoint_object)\n self.validate_endpoint_values(db_endpoint_object, object_endpoint)\n\n endpoint_changes = object_endpoint.obj_get_changes()\n db_endpoint_object_2 = models.Endpoint()\n db_endpoint_object_2.update(endpoint_changes)\n self.validate_endpoint_values(db_endpoint_object, db_endpoint_object_2)\n\n def test_create_endpoint(self):\n \"\"\"Tests create endpoint from Endpoint objects API.\"\"\"\n new_cluster = func_utils.create_object_cluster(self.context)\n cluster_nodes = self.dbapi.get_nodes_in_cluster(self.context,\n new_cluster.id)\n endpoint_dict = func_utils.get_test_endpoint_dict(\n node_id=cluster_nodes[0].id)\n endpoint = objects.Endpoint(**endpoint_dict)\n self.validate_endpoint_values(endpoint_dict, endpoint)\n endpoint.create(self.context)\n new_endpoint = self.dbapi.get_endpoints_in_node(self.context,\n endpoint.node_id)\n self.validate_endpoint_values(endpoint, new_endpoint[0])\n\n def test_create_endpoint_for_nonexistent_node(self):\n \"\"\"Tests create endpoint for a nonexistent node from Endpoint\n\n objects API.\n \"\"\"\n api_endpoint_dict = func_utils.get_test_endpoint_dict()\n api_endpoint = objects.Endpoint(**api_endpoint_dict)\n self.validate_endpoint_values(api_endpoint_dict, api_endpoint)\n self.assertRaises(oslo_exception.DBReferenceError, api_endpoint.create,\n self.context)\n\n def test_update_endpoint_by_node_id(self):\n \"\"\"Tests update endpoint by node id from Endpoint objects API.\"\"\"\n new_cluster = func_utils.create_object_cluster(self.context, size=1)\n cluster_node = self.dbapi.get_nodes_in_cluster(self.context,\n new_cluster.id)\n cluster_node_id = cluster_node[0].id\n new_endpoint = self.create_object_endpoint(\n cluster_node_id, uri='10.0.0.1:5672', type='AMQP')\n endpoint_values = {\n 'uri': '10.0.0.2:5672',\n 'type': 'XMPP'\n }\n objects.Endpoint.update_by_node_id(self.context, new_endpoint.node_id,\n endpoint_values)\n endpoints = self.dbapi.get_endpoints_in_node(self.context,\n new_endpoint.node_id)\n for endpoint in endpoints:\n self.assertEqual('XMPP', endpoint.type)\n self.assertEqual('10.0.0.2:5672', endpoint.uri)\n\n def test_get_endpoints_by_node_id(self):\n \"\"\"Tests get endpoint by node id from Endpoint objects API.\"\"\"\n new_cluster = func_utils.create_object_cluster(self.context, size=1)\n cluster_node = self.dbapi.get_nodes_in_cluster(self.context,\n new_cluster.id)\n node_id = cluster_node[0].id\n new_endpoint = self.create_object_endpoint(node_id)\n endpoint_list = objects.Endpoint.get_endpoints_by_node_id(\n self.context, new_endpoint.node_id)\n for endpoint in endpoint_list:\n self.validate_endpoint_values(endpoint, new_endpoint)","sub_path":"cue/tests/functional/objects/test_endpoint.py","file_name":"test_endpoint.py","file_ext":"py","file_size_in_byte":7817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"63406477","text":"import autocti as ac\r\n\r\n\r\ndef test__fit_quantities_same_as_calculated_individually(\r\n dataset_1d_7, mask_1d_7_unmasked\r\n):\r\n masked_dataset_7 = dataset_1d_7.apply_mask(mask=mask_1d_7_unmasked)\r\n\r\n post_cti_data = ac.Array1D.full(\r\n fill_value=1.0,\r\n shape_native=masked_dataset_7.data.shape_native,\r\n pixel_scales=1.0,\r\n ).native\r\n\r\n fit = ac.FitDataset1D(dataset=masked_dataset_7, post_cti_data=post_cti_data)\r\n\r\n residual_map = ac.util.fit.residual_map_with_mask_from(\r\n data=masked_dataset_7.data, mask=mask_1d_7_unmasked, model_data=post_cti_data\r\n )\r\n\r\n assert (fit.residual_map == residual_map).all()\r\n\r\n chi_squared_map = ac.util.fit.chi_squared_map_with_mask_from(\r\n residual_map=residual_map,\r\n noise_map=masked_dataset_7.noise_map,\r\n mask=mask_1d_7_unmasked,\r\n )\r\n\r\n assert (fit.chi_squared_map == chi_squared_map).all()\r\n\r\n chi_squared = ac.util.fit.chi_squared_with_mask_from(\r\n chi_squared_map=chi_squared_map, mask=mask_1d_7_unmasked\r\n )\r\n\r\n noise_normalization = ac.util.fit.noise_normalization_with_mask_from(\r\n noise_map=masked_dataset_7.noise_map, mask=mask_1d_7_unmasked\r\n )\r\n\r\n log_likelihood = ac.util.fit.log_likelihood_from(\r\n chi_squared=chi_squared, noise_normalization=noise_normalization\r\n )\r\n\r\n assert fit.log_likelihood == log_likelihood\r\n","sub_path":"test_autocti/dataset_1d/test_fit.py","file_name":"test_fit.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"326625258","text":"import requests\r\nfrom xlsxwriter import Workbook\r\nimport xlrd\r\nfrom xlutils.copy import copy\r\nfrom pprint import pprint\r\nfrom random import randint\r\nfrom time import sleep\r\nimport openpyxl as op\r\nfrom parsel import Selector\r\n\r\n\r\ndef read_excel():\r\n liste = op.load_workbook('google_play_title_final.xlsx')\r\n sheet = liste.get_sheet_by_name('urls')\r\n r = sheet.max_row\r\n urls=[]\r\n for i in range(1, r+1):\r\n urls.append(sheet.cell(row=i, column=1).value)\r\n return urls\r\n\r\n\r\ndef movie_details():\r\n urls=read_excel()\r\n movie_failed=[]\r\n url_count=0\r\n # urls=['https://play.google.com/store/movies/details/West_of_Redemption?id=suooXOCwjHU']\r\n for url in urls[50:100]:\r\n url_count+=1\r\n if url_count%100==0:\r\n sleep(randint(400,500)/10)\r\n \r\n if url_count%1000==0:\r\n sleep(randint(3000,3500)/10)\r\n\r\n try:\r\n user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.143 Safari/537.36'\r\n headers = {'User-Agent': user_agent}\r\n page=requests.get(url,headers=headers)\r\n sleep(randint(30,40)/10)\r\n print(url)\r\n # import pdb; pdb.set_trace()\r\n response = Selector(text=page.content.decode('utf-8'))\r\n title=response.xpath('//h1[@itemprop=\"name\"]/span/text()').extract_first()\r\n date=response.xpath('//div[@class=\"C4ms4d\"]/span/text()').extract_first()\r\n genre=response.xpath('//div[@class=\"C4ms4d\"]/a/text()').extract_first()\r\n buy_price=response.xpath('//button[contains(@aria-label,\"Buy\")]/text()').extract_first()\r\n rent_price=response.xpath('//button[contains(@aria-label,\"Rent\")]/text()').extract_first()\r\n synopsis=response.xpath('//div[@itemprop=\"description\"]/content/text()').extract_first()\r\n print(synopsis,\"s\")\r\n cast=response.xpath('//span[@itemprop=\"actor\"]/a/span/text()').extract()\r\n producer=response.xpath('//span[@itemprop=\"producer\"]/a/span/text()').extract()\r\n director=response.xpath('//span[@itemprop=\"director\"]/a/span/text()').extract()\r\n writers=response.xpath('//span[@itemprop=\"writer\"]/a/span/text()').extract()\r\n language=response.xpath('//div[text()=\"Audio language\"]/following-sibling::div/span/text()').extract()\r\n ratings=response.xpath('//div[text()=\"Rating\"]/following-sibling::span//span/text()').extract()\r\n\r\n check_collection=response.xpath('//h2[text()=\"In this bundle\"]').extract()\r\n if check_collection:\r\n # import pdb; pdb.set_trace()\r\n collection_urls=response.xpath('//div[div[h2[text()=\"In this bundle\"]]]/following-sibling::div//a[@class=\"JC71ub\"]/@href').extract()\r\n titles=response.xpath('//div[div[h2[text()=\"In this bundle\"]]]/following-sibling::div//*[@title!=\"\"]/@title').extract()\r\n for ititle in titles:\r\n output=[title,make_unicode(ititle),date,genre,buy_price,rent_price,make_unicode(synopsis),list_join(cast),list_join(producer),list_join(director),list_join(writers),list_join(language),ratings,url]\r\n write_collection_excel(output)\r\n else:\r\n duration=response.xpath('//div[@class=\"C4ms4d\"]/span/span/text()').extract_first()\r\n output=[make_unicode(title),date,duration,genre,buy_price,rent_price,make_unicode(synopsis),list_join(cast),list_join(producer),list_join(director),list_join(writers),list_join(language),ratings,url]\r\n write_excel(output)\r\n except Exception as e:\r\n movie_failed.append(url)\r\n failed_write(movie_failed) \r\n print(e)\r\n print(\"exception in loading the movie\" + str(url))\r\n\r\ndef make_unicode(input):\r\n try:\r\n input = input.decode('utf-8')\r\n return input\r\n except:\r\n return input\r\n\r\ndef list_join(value):\r\n output=\" | \".join(value)\r\n return output\r\n\r\ndef write_excel(output):\r\n rb=xlrd.open_workbook(\"movie_details.xls\") \r\n wb=copy(rb)\r\n # wb=Workbook(\"sports_urls.xls\")\r\n w_sheet=wb.get_sheet(0)\r\n ir=len(w_sheet._Worksheet__rows)\r\n\r\n for j,item in enumerate(output):\r\n print(item)\r\n w_sheet.write(ir,j,item)\r\n\r\n wb.save('movie_details.xls')\r\n\r\ndef write_collection_excel(output):\r\n rb=xlrd.open_workbook(\"movie_details_collection.xls\") \r\n wb=copy(rb)\r\n # wb=Workbook(\"sports_urls.xls\")\r\n w_sheet=wb.get_sheet(0)\r\n ir=len(w_sheet._Worksheet__rows)\r\n\r\n for j,item in enumerate(output):\r\n w_sheet.write(ir,j,item)\r\n\r\n wb.save('movie_details_collection.xls')\r\n\r\ndef failed_write(failed_list):\r\n wb=op.Workbook()\r\n dest_file_name=\"movie_failed.xlsx\"\r\n ws=wb.active\r\n ws.title = \"urls\"\r\n ws.append(failed_list)\r\n wb.save(filename=dest_file_name)\r\n\r\nmovie_details()","sub_path":"parsel_google_play.py","file_name":"parsel_google_play.py","file_ext":"py","file_size_in_byte":4943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"290437561","text":"import gui\nfrom gui import screens, popups\nfrom gui.decorators import cb_with_args\nimport gui.common\nimport lvgl as lv\n\nimport utime as time\nimport os, gc, sys\nimport ujson as json\n# hex and base64 encoding\nfrom ubinascii import hexlify, unhexlify, a2b_base64, b2a_base64\n\nfrom bitcoin import bip39, bip32, psbt, script\nfrom bitcoin.networks import NETWORKS\nfrom keystore import KeyStore\n\nfrom qrscanner import QRScanner\nfrom usbhost import USBHost\nfrom rng import get_random_bytes\n\nfrom pin import Secret, Key\nfrom platform import simulator, storage_root, USB_ENABLED, DEV_ENABLED\nfrom ucryptolib import aes\nfrom hashlib import hmac_sha512\n\nfrom io import BytesIO\n\nreckless_fname = \"%s/%s\" % (storage_root, \"reckless.json\")\n\nqr_scanner = QRScanner()\nusb_host = USBHost()\n\n# entropy that will be converted to mnemonic\nentropy = None\n# network we are using\nnetwork = None\n# our key storage\nkeystore = KeyStore(storage_root=storage_root)\n\nDEFAULT_XPUBS = []\nALL_XPUBS = []\n\nSUPPORTED_SCRIPTS = {\n \"p2wpkh\": \"Native Segwit\",\n \"p2sh-p2wpkh\": \"Nested Segwit\",\n \"p2wsh-sortedmulti\": \"Native Segwit Multisig\",\n \"p2sh-p2wsh-sortedmulti\": \"Nested Segwit Multisig\",\n}\n\ndef catchit(fn):\n \"\"\" Catches an error in the function and \n displays error screen with exception \"\"\"\n # Maybe there is a better way... \n def cb(*args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except Exception as e:\n b = BytesIO()\n sys.print_exception(e, b)\n gui.error(\"Something bad happened...\\n\\n%s\" % b.getvalue().decode())\n return cb\n\n@catchit\ndef cancel_scan():\n print(\"Cancel scan!\")\n qr_scanner.stop()\n show_main()\n\n@catchit\ndef del_wallet(w):\n keystore.delete_wallet(w)\n\n@catchit\ndef select_wallet(w):\n popups.show_wallet(w, delete_cb=del_wallet)\n\n@catchit\ndef new_wallet_confirm(name, descriptor):\n # print(\"creating wallet %s:\" % name,descriptor)\n keystore.create_wallet(name, descriptor)\n\n@catchit\ndef parse_new_wallet(s):\n show_main()\n gui.update(30)\n\n # wallet format:\n # name&descriptor\n arr = s.split(\"&\")\n if len(arr) != 2:\n gui.error(\"Invalid wallet format\")\n return\n w = keystore.check_new_wallet(*arr)\n keys_str = []\n for key in w.keys:\n k = (\"%r\" % key).replace(\"]\", \"]\\n\")\n if keystore.owns_key(key):\n keys_str.append(\"#7ED321 My key: # %s\" % k)\n else:\n keys_str.append(\"#F5A623 External key: # %s\" % k)\n keys = \"\\n\\n\".join(keys_str)\n if w.script_type not in SUPPORTED_SCRIPTS.keys():\n raise ValueError(\"Script type \\\"%s\\\" is not supported\" % w.script_type)\n sc = w.script_type\n msg = \"Policy: %s\\nScript: %s\\n%s\\n\\n%s\" % (w.policy, SUPPORTED_SCRIPTS[w.script_type], sc, keys)\n\n scr = popups.prompt(\"Add wallet \\\"%s\\\"?\" % arr[0], msg, ok=cb_with_args(new_wallet_confirm, name=arr[0], descriptor=arr[1]))\n scr.message.set_recolor(True)\n\n@catchit\ndef add_new_wallet():\n screens.show_progress(\"Scan wallet to add\",\n \"Scanning.. Click \\\"Cancel\\\" to stop.\",\n callback=cancel_scan)\n gui.update(30)\n qr_scanner.start_scan(parse_new_wallet)\n\n@catchit\ndef wallets_menu():\n buttons = []\n def wrapper(w):\n def cb():\n select_wallet(w)\n return cb\n for wallet in keystore.wallets:\n buttons.append((wallet.name, wrapper(wallet)))\n buttons.append((lv.SYMBOL.PLUS+\" Add new wallet (scan)\", add_new_wallet))\n gui.create_menu(buttons=buttons, cb_back=show_main, title=\"Select the wallet\")\n\n@catchit\ndef show_xpub(name, derivation, xpub=None):\n xpubs_menu()\n gui.update(30)\n try:\n if xpub is None:\n xpub = keystore.get_xpub(derivation)\n prefix = \"[%s]\" % bip32.path_to_str(bip32.parse_path(derivation), fingerprint=keystore.fingerprint)\n except:\n gui.error(\"Derivation path \\\"%s\\\" doesn't look right...\" % derivation)\n return\n xpub_str = xpub.to_base58(network[\"xpub\"])\n slip132 = xpub.to_base58()\n if slip132 == xpub_str:\n slip132 = None\n popups.show_xpub(name, xpub_str, slip132=slip132, prefix=prefix)\n\n@catchit\ndef get_custom_xpub_path():\n def cb(derivation):\n show_xpub(\"Custom path key\", derivation)\n screens.ask_for_derivation(cb, xpubs_menu)\n\n@catchit\ndef more_xpubs_menu():\n def selector(name, derivation):\n def cb():\n show_xpub(\"Master \"+name, derivation)\n return cb\n buttons = []\n for name, derivation in ALL_XPUBS:\n buttons.append((name, selector(name, derivation)))\n buttons.append((\"Enter custom derivation\", get_custom_xpub_path))\n gui.create_menu(buttons=buttons, cb_back=xpubs_menu, title=\"Select the master key\")\n\n@catchit\ndef xpubs_menu():\n def selector(name, derivation):\n def cb():\n show_xpub(\"Master \"+name, derivation)\n return cb\n buttons = []\n for name, derivation in DEFAULT_XPUBS:\n buttons.append((name, selector(name, derivation)))\n buttons.append((\"Show more keys\", more_xpubs_menu))\n buttons.append((\"Enter custom derivation\", get_custom_xpub_path))\n gui.create_menu(buttons=buttons, cb_back=show_main, title=\"Select the master key\")\n\n@catchit\ndef sign_psbt(wallet=None, tx=None, success_callback=None):\n keystore.sign(tx)\n # remove everything but partial sigs\n # to reduce QR code size\n tx.unknown = {}\n tx.xpubs = {}\n for i in range(len(tx.inputs)):\n tx.inputs[i].unknown = {}\n tx.inputs[i].non_witness_utxo = None\n tx.inputs[i].witness_utxo = None\n tx.inputs[i].sighash_type = None\n tx.inputs[i].bip32_derivations = {}\n tx.inputs[i].witness_script = None\n tx.inputs[i].redeem_script = None\n for i in range(len(tx.outputs)):\n tx.outputs[i].unknown = {}\n tx.outputs[i].bip32_derivations = {}\n tx.outputs[i].witness_script = None\n tx.outputs[i].redeem_script = None\n b64_tx = b2a_base64(tx.serialize()).decode('utf-8')\n if b64_tx[-1:] == \"\\n\":\n b64_tx = b64_tx[:-1]\n popups.qr_alert(\"Signed transaction:\", b64_tx, \"Scan it with your software wallet\", width=520)\n if success_callback is not None:\n success_callback(b64_tx)\n\n@catchit\ndef parse_transaction(b64_tx, success_callback=None, error_callback=None):\n # we will go to main afterwards\n show_main()\n try:\n raw = a2b_base64(b64_tx)\n tx = psbt.PSBT.parse(raw)\n except:\n gui.error(\"Failed at transaction parsing\")\n if error_callback is not None:\n error_callback(\"invalid argument\")\n return\n # blue wallet trick - if the fingerprint is 0 we use our fingerprint\n for scope in [tx.inputs, tx.outputs]:\n for el in scope:\n for der in el.bip32_derivations:\n if el.bip32_derivations[der].fingerprint == b'\\x00\\x00\\x00\\x00':\n el.bip32_derivations[der].fingerprint = keystore.fingerprint\n try:\n data = keystore.check_psbt(tx)\n except Exception as e:\n gui.error(\"Problem with the transaction: %r\" % e)\n if error_callback is not None:\n error_callback(\"invalid argument\")\n return\n title = \"Spending %u\\nfrom %s\" % (data[\"spending\"], data[\"wallet\"].name)\n popups.prompt_tx(title, data,\n ok=cb_with_args(sign_psbt, wallet=data[\"wallet\"], tx=tx, success_callback=success_callback), \n cancel=cb_with_args(error_callback, \"user cancel\")\n )\n\n@catchit\ndef scan_transaction():\n screens.show_progress(\"Scan transaction to sign\",\n \"Scanning.. Click \\\"Cancel\\\" to stop.\",\n callback=cancel_scan)\n gui.update(30)\n qr_scanner.start_scan(parse_transaction)\n\n@catchit\ndef verify_address(s):\n # we will go to main afterwards\n show_main()\n # verifies address in the form [bitcoin:]addr?index=i\n s = s.replace(\"bitcoin:\", \"\")\n arr = s.split(\"?\")\n index = None\n addr = None\n # check that ?index= is there\n if len(arr) > 1:\n addr = arr[0]\n meta_arr = arr[1].split(\"&\")\n # search for `index=`\n for meta in meta_arr:\n if meta.startswith(\"index=\"):\n try:\n index = int(meta.split(\"=\")[1])\n except:\n gui.error(\"Index is not an integer...\")\n return\n if index is None or addr is None:\n # where we will go next\n gui.error(\"No derivation index in the address metadata - can't verify.\")\n return\n for w in keystore.wallets:\n if w.address(index) == addr:\n popups.qr_alert(\"Address #%d from wallet\\n\\\"%s\\\"\" % (index+1, w.name),\n \"bitcoin:%s\"%addr, message_text=addr)\n return\n gui.error(\"Address doesn't belong to any wallet. Wrong device or network?\")\n\n@catchit\ndef scan_address():\n screens.show_progress(\"Scan address to verify\",\n \"Scanning.. Click \\\"Cancel\\\" to stop.\",\n callback=cancel_scan)\n gui.update(30)\n qr_scanner.start_scan(verify_address)\n\n@catchit\ndef set_network_xpubs(net):\n while len(DEFAULT_XPUBS) > 0:\n DEFAULT_XPUBS.pop()\n DEFAULT_XPUBS.append((\"Single key\", \"m/84h/%dh/0h\" % net[\"bip32\"]))\n DEFAULT_XPUBS.append((\"Multisig\", \"m/48h/%dh/0h/2h\" % net[\"bip32\"]))\n\n while len(ALL_XPUBS) > 0:\n ALL_XPUBS.pop()\n ALL_XPUBS.append((\"Single Native Segwit\\nm/84h/%dh/0h\" % net[\"bip32\"], \"m/84h/%dh/0h\" % net[\"bip32\"]))\n ALL_XPUBS.append((\"Single Nested Segwit\\nm/49h/%dh/0h\" % net[\"bip32\"], \"m/49h/%dh/0h\" % net[\"bip32\"]))\n ALL_XPUBS.append((\"Multisig Native Segwit\\nm/48h/%dh/0h/2h\" % net[\"bip32\"], \"m/48h/%dh/0h/2h\" % net[\"bip32\"]))\n ALL_XPUBS.append((\"Multisig Nested Segwit\\nm/48h/%dh/0h/1h\" % net[\"bip32\"], \"m/48h/%dh/0h/1h\" % net[\"bip32\"]))\n\n@catchit\ndef select_network(name):\n global network\n if name in NETWORKS:\n network = NETWORKS[name]\n if keystore.is_initialized:\n set_network_xpubs(network)\n # load existing wallets for this network\n keystore.load_wallets(name)\n # create a default wallet if it doesn't exist\n if len(keystore.wallets) == 0:\n # create a wallet descriptor\n # this is not exactly compatible with Bitcoin Core though.\n # '_' means 0/* or 1/* - standard receive and change \n # derivation patterns\n derivation = DEFAULT_XPUBS[0][1]\n xpub = keystore.get_xpub(derivation).to_base58()\n fingerprint = hexlify(keystore.fingerprint).decode('utf-8')\n prefix = \"[%s%s]\" % (fingerprint, derivation[1:])\n descriptor = \"wpkh(%s%s/_)\" % (prefix, xpub)\n keystore.create_wallet(\"Default\", descriptor)\n else:\n raise RuntimeError(\"Unknown network\")\n\n@catchit\ndef network_menu():\n def selector(name):\n def cb():\n try:\n select_network(name)\n show_main()\n except Exception as e:\n gui.error(\"%r\" % e)\n return cb\n # could be done with iterator\n # but order is unknown then\n gui.create_menu(buttons=[\n (\"Mainnet\", selector(\"main\")),\n (\"Testnet\", selector(\"test\")),\n (\"Regtest\", selector(\"regtest\")),\n (\"Signet\", selector(\"signet\"))\n ], title=\"Select the network\")\n\n@catchit\ndef show_mnemonic():\n # print(bip39.mnemonic_from_bytes(entropy))\n popups.show_mnemonic(bip39.mnemonic_from_bytes(entropy))\n\n@catchit\ndef save_entropy():\n gui.prompt(\"Security\", \"Do you want to encrypt your key?\", save_entropy_encrypted, save_entropy_plain)\n\n@catchit\ndef entropy_decrypt(entropy_encrypted):\n # 2 - MODE_CBC\n crypto = aes(Key.key, 2, Key.iv)\n data = crypto.decrypt(entropy_encrypted)\n l = data[0]\n if l > 32:\n raise RuntimeError(\"Failed to decrypt entropy - data is corrupted\")\n return data[1:l+1]\n\n@catchit\ndef entropy_encrypt(entropy_plain):\n # 2 - MODE_CBC\n crypto = aes(Key.key, 2, Key.iv)\n # encrypted data should be mod 16 (blocksize)\n pad_len = 16-((len(entropy_plain)+1) % 16)\n data = bytes([len(entropy_plain)])+entropy_plain+bytes(pad_len)\n return crypto.encrypt(data);\n\n@catchit\ndef save_entropy_encrypted():\n try:\n Key.iv = get_random_bytes(16)\n entropy_encrypted = entropy_encrypt(entropy)\n hmac_entropy_encrypted = hmac_sha512(Key.key, entropy_encrypted)\n obj = {\n \"entropy\": hexlify(entropy_encrypted).decode('utf-8'),\n \"iv\": hexlify(Key.iv).decode('utf-8'),\n \"hmac\": hexlify(hmac_entropy_encrypted).decode('utf-8')\n }\n with open(reckless_fname, \"w\") as f:\n f.write(json.dumps(obj))\n with open(reckless_fname, \"r\") as f:\n d = json.loads(f.read())\n if \"entropy\" in d and d[\"entropy\"] == hexlify(entropy_encrypted).decode('utf-8') and \\\n unhexlify(d[\"hmac\"]) == hmac_entropy_encrypted and entropy == entropy_decrypt(entropy_encrypted):\n gui.alert(\"Success!\", \"Your encrypted key is saved in the memory now\")\n else:\n gui.error(\"Something went wrong\")\n except Exception as e:\n gui.error(\"Fail: %r\" % e)\n\n@catchit\ndef save_entropy_plain():\n obj = {\"entropy\": hexlify(entropy).decode('utf-8')}\n with open(reckless_fname, \"w\") as f:\n f.write(json.dumps(obj))\n with open(reckless_fname, \"r\") as f:\n d = json.loads(f.read())\n if \"entropy\" in d and d[\"entropy\"] == hexlify(entropy).decode('utf-8'):\n gui.alert(\"Success!\", \"Your key is saved in the memory now\")\n else:\n gui.error(\"Something went wrong\")\n\n@catchit\ndef delete_entropy():\n try:\n os.remove(reckless_fname)\n gui.alert(\"Success!\", \"Your key is deleted\")\n except:\n gui.error(\"Failed to delete the key\")\n\n@catchit\ndef save_settings(config):\n try:\n if USB_ENABLED and not config[\"usb\"]:\n os.remove(\"%s/%s\" % (storage_root, \"USB_ENABLED\"))\n if not USB_ENABLED and config[\"usb\"]:\n with open(\"%s/%s\" % (storage_root, \"USB_ENABLED\"), \"w\") as f:\n f.write(\"dummy\") # should be hmac instead\n if DEV_ENABLED and not config[\"developer\"]:\n os.remove(\"%s/%s\" % (storage_root, \"DEV_ENABLED\"))\n if not DEV_ENABLED and config[\"developer\"]:\n with open(\"%s/%s\" % (storage_root, \"DEV_ENABLED\"), \"w\") as f:\n f.write(\"dummy\") # should be hmac instead\n time.sleep_ms(100)\n if simulator:\n # meh... kinda doesn't work on unixport\n sys.exit()\n else:\n import pyb\n pyb.hard_reset()\n except Exception as e:\n gui.error(\"Failed to update settings!\\n%r\" % e)\n print(config)\n\n@catchit\ndef settings_menu():\n gui.create_menu(buttons=[\n (\"Show recovery phrase\", show_mnemonic),\n (\"Save key to memory\", save_entropy),\n (\"Delete key from memory\", delete_entropy),\n (\"Security settings\", \n cb_with_args(popups.show_settings, \n {\"usb\": USB_ENABLED, \"developer\": DEV_ENABLED}, \n save_settings)),\n ], cb_back=show_main,title=\"Careful. Think twice.\")\n\n@catchit\ndef show_main():\n gui.create_menu(buttons=[\n (\"Wallets\", wallets_menu),\n (\"Master public keys\", xpubs_menu),\n (\"Sign transaction\", scan_transaction),\n (\"Verify address\", scan_address),\n (\"Use another password\", ask_for_password),\n (\"Switch network (%s)\" % network[\"name\"], network_menu),\n (\"Settings\", settings_menu)\n ])\n\n@catchit\ndef get_new_mnemonic(words=12):\n entropy_len = words*4//3\n global entropy\n entropy = get_random_bytes(entropy_len)\n return bip39.mnemonic_from_bytes(entropy)\n\n@catchit\ndef gen_new_key(words=12):\n mnemonic = get_new_mnemonic(words)\n screens.new_mnemonic(mnemonic,\n cb_continue=ask_for_password,\n cb_back=show_init,\n cb_update=get_new_mnemonic)\n\n@catchit\ndef recover_key():\n screens.ask_for_mnemonic(cb_continue=mnemonic_entered,\n cb_back=show_init,\n check_mnemonic=bip39.mnemonic_is_valid,\n words_lookup=bip39.find_candidates)\n\n@catchit\ndef mnemonic_entered(mnemonic):\n global entropy\n entropy = bip39.mnemonic_to_bytes(mnemonic.strip())\n ask_for_password()\n\n@catchit\ndef load_key():\n global entropy\n with open(reckless_fname, \"r\") as f:\n d = json.loads(f.read())\n entropy = unhexlify(d[\"entropy\"])\n if \"hmac\" in d:\n hmac_calc = hmac_sha512(Key.key, entropy)\n if unhexlify(d[\"hmac\"]) != hmac_calc:\n raise ValueError('Hmac does not match!')\n Key.iv = unhexlify(d[\"iv\"])\n entropy = entropy_decrypt(entropy)\n if entropy is not None:\n ask_for_password()\n else:\n gui.error(\"Failed to load your recovery phrase.\")\n\n@catchit\ndef show_init():\n buttons = [\n (\"Generate new key\", gen_new_key),\n (\"Enter recovery phrase\", recover_key)\n ]\n # check if reckless.json file exists\n # os.path is not implemented in micropython :(\n try:\n with open(reckless_fname,\"r\") as f:\n c = f.read()\n if len(c) == 0:\n raise RuntimeError(\"%s file is empty\" % reckless_fname)\n # if ok - add an extra button\n buttons.append((\"Load key from memory\", load_key))\n except:\n pass\n screens.create_menu(buttons=buttons)\n\n@catchit\ndef ask_for_password():\n screens.ask_for_password(init_keys)\n\n@catchit\ndef init_keys(password):\n mnemonic = bip39.mnemonic_from_bytes(entropy)\n seed = bip39.mnemonic_to_seed(mnemonic, password)\n keystore.load_seed(seed)\n # choose testnet by default\n select_network(\"test\")\n gc.collect()\n show_main()\n if usb_host.callback is None:\n usb_host.callback = host_callback\n\n# process all usb commands\n@catchit\ndef host_callback(data):\n # close all existing popups\n popups.close_all_popups()\n\n if data==\"fingerprint\":\n usb_host.respond(hexlify(keystore.fingerprint).decode('ascii'))\n return\n\n if data.startswith(\"xpub \"):\n path = data[5:].strip(\" /\\r\\n\")\n try:\n if path == \"m\":\n hd = keystore.root.to_public()\n else:\n hd = keystore.get_xpub(path)\n xpub = hd.to_base58(network[\"xpub\"])\n usb_host.respond(xpub)\n\n show_xpub(\"Master key requested from host:\", path, xpub)\n except Exception as e:\n print(e)\n usb_host.respond(\"error: bad derivation path '%s'\" % path)\n return\n\n if data.startswith(\"sign \"):\n def success_cb(signed_tx):\n usb_host.respond(signed_tx)\n def error_cb(error):\n usb_host.respond(\"error: %s\" % error)\n parse_transaction(data[5:], success_callback=success_cb, error_callback=error_cb)\n return\n\n if data.startswith(\"showaddr \"):\n arr = data.split(\" \")\n path = arr[-1].strip()\n addrtype = \"wpkh\"\n if len(arr) > 2:\n addrtype = arr[-2].strip()\n # TODO: detect wallet this address belongs to\n try:\n key = keystore.get_xpub(path)\n if addrtype == \"wpkh\":\n sc = script.p2wpkh(key)\n elif addrtype == \"pkh\":\n sc = script.p2pkh(key)\n elif addrtype == \"sh-wpkh\":\n sc = script.p2sh(script.p2wpkh(key))\n else:\n raise RuntimeError()\n addr=sc.address(network)\n usb_host.respond(addr)\n popups.qr_alert(\"Address with path %s\\n(requested by host)\" % (path),\n \"bitcoin:\"+addr, message_text=addr)\n\n except Exception as e:\n print(e)\n usb_host.respond(\"error: invalid argument\")\n return\n\n if data.startswith(\"importwallet \"):\n parse_new_wallet(data[13:])\n\n # TODO: \n # - signmessage \n # - showdescraddr \n\ndef update(dt=30):\n gui.update(dt)\n qr_scanner.update()\n usb_host.update()\n\ndef ioloop():\n while True:\n time.sleep_ms(30)\n update(30)\n\ndef main(blocking=True):\n # FIXME: check for all ports (unix, js, stm)\n # what is available in os module\n # maybe we can check it without try-except\n try:\n os.mkdir(storage_root)\n except:\n pass\n # schedules display autoupdates if blocking=False\n # it may cause GUI crashing when out of memory\n # but perfect to debug\n gui.init(blocking)\n ret = Secret.load_secret()\n if ret == False:\n Secret.generate_secret()\n screens.ask_pin(not ret, show_init)\n if blocking:\n ioloop()\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"428920905","text":"from datetime import timedelta, date\nimport subprocess\nimport os\nimport time\n\nchannels = ['WDR2', 'WDR3']\n# , 'WDR4', 'BR_Heimat', 'Bayern+', 'Bayern_2_Sued', 'HR1',\n# 'HR2', 'HR3', 'HR4', 'You_FM', 'MDR_Klassik', 'MDR_Jump', 'MDR_Sputnik', 'NDR_90.3',\n# 'NDR2', 'NDR_Spez', 'NDR_Blue', 'NDR1', 'B888', 'Fritz', 'Radio_Eins', 'Br_Klassik',\n# 'Bayern_1', 'Bayern_3']\n\n\n# pool = multiprocessing.Pool(len(channels))\n\ndef parallel_starter():\n pass\n\ndef runner(directory):\n run_time = 1\n for channel in channels:\n # call = ['/usr/local/bin/python3', '/Users/Raul/Dropbox/Documents/Uni/Bachelorarbeit/AudioRecorder/runner.py', channel, directory, str(time)]\n # file = channel+'-rec-log.txt'\n # log = os.path.join(directory, \"Data\", \"Logs\", file)\n # log = open(log, 'a') # so that data written to it will be appended\n # c = subprocess.Popen(call, stdout=log, stderr=log)\n # exit_code = p.poll()\n run_channel(directory, channel)\n # while True:\n # exit_code = run_channel(directory, channel)\n # while exit_code == None:\n # time.sleep(.10)\n # print(exit_code, channel)\n # exit_code = None\n\n\ndef run_channel(directory, channel):\n run_time = 1\n call = ['/usr/local/bin/python3', '/Users/Raul/Dropbox/Documents/Uni/Bachelorarbeit/AudioRecorder/runner.py', channel, directory, str(run_time)]\n file = channel+'-rec-log.txt'\n log = os.path.join(directory, \"Data\", \"Logs\", file)\n log = open(log, 'a') # so that data written to it will be appended\n p = subprocess.Popen(call, stdout=log, stderr=log)\n exit_code = p.poll()\n while exit_code == None:\n time.sleep(.5)\n exit_code = p.poll()\n run_channel(directory, channel)\n\n\ndef main():\n # directory = '/Volumes/Untitled/Ba/'\n directory=\"Test\"\n runner(directory)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"runner_controller.py","file_name":"runner_controller.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"432254082","text":"# Copyright (c) 2014 Carlos Valiente\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"Apache Libcloud compute driver implementation for Vagrant.\"\"\"\n\nimport itertools\nimport logging\nimport os\nimport pwd\nimport re\nimport subprocess\nimport time\n\nfrom libcloud.common.types import LibcloudError\nfrom libcloud.compute import base\nfrom libcloud.compute.types import NodeState\n\nfrom libcloudvagrant import virtualbox\nfrom libcloudvagrant.catalogue import VagrantCatalogue\nfrom libcloudvagrant.types import (\n VagrantImage,\n VagrantNetwork,\n VagrantNode,\n VagrantNodeSize,\n VagrantVolume,\n)\n\n\n__all__ = [\n \"VAGRANT\",\n \"VagrantDriver\",\n]\n\n\nVAGRANT = \"vagrant\"\n\n_HOME = pwd.getpwuid(os.getuid()).pw_dir\n\n\nclass VagrantDriver(base.NodeDriver):\n\n \"\"\"Apache Libcloud driver implementation.\n\n Docstrings here document how this driver diverges from the base class.\n\n \"\"\"\n\n type = VAGRANT\n name = \"Vagrant\"\n website = \"http://www.vagrantup.com/\"\n\n log = logging.getLogger(\"libcloudvagrant\")\n\n def __init__(self):\n super(VagrantDriver, self).__init__(key=None)\n\n def attach_volume(self, node, volume, device=None):\n \"\"\"Attaches volume to node.\n\n At the moment only SATA devices of the form ``/dev/sd[a-z]`` are\n accepted as values to parameter ``device``.\n\n :param node: Node to attach volume to.\n :type node: :class:`VagrantNode`\n\n :param volume: Volume to attach.\n :type volume: :class:`VagrantVolume`\n\n :param device: Where the device is exposed, e.g. '/dev/sdb'\n :type device: ``str``\n\n :rytpe: ``bool``\n\n \"\"\"\n if volume.attached_to:\n self.log.warn(\"Volume %s already attached to %s\",\n volume.name, volume.attached_to)\n return False\n\n if node.state not in (NodeState.STOPPED, NodeState.UNKNOWN):\n self.log.warn(\"Cannot attach volumes to running nodes\")\n return False\n\n try:\n with self._catalogue as c:\n node_uuid = c.virtualbox_uuid(node)\n virtualbox.attach_volume(node_uuid, volume.path, device)\n volume.attached_to = node.name\n c.update_volume(volume)\n except:\n self.log.warn(\"Error attaching %s to %s\", volume, node,\n exc_info=True)\n return False\n self.log.info(\"Volume '%s' attached to node '%s'\",\n volume.name, node.name)\n return True\n\n def create_node(self, name, size, image, networks=None, **kwargs):\n \"\"\"Create a new node instance. This instance will be started\n automatically.\n\n Requires the following arguments:\n\n :param name: String with a name for this new node\n :type name: ``str``\n\n :param size: The size of resources allocated to this node.\n :type size: :class:`VagrantSize`\n\n :param image: OS Image to boot on node.\n :type image: :class:`VagrantImage`\n\n Accepts the following non-standard arguments:\n\n :param networks: The networks to connect this node to.\n :type networks: ``list`` of :class:`VagrantNetwork`\n\n All other arguments are ignored.\n\n \"\"\"\n if networks is None:\n networks = []\n\n self.log.info(\"Creating node '%s' ..\", name)\n\n with self._catalogue as c:\n public_ips = [n.allocate_address().to_dict()\n for n in networks if n.public]\n private_ips = [n.allocate_address().to_dict()\n for n in networks if not n.public]\n size = size.to_dict()\n image = image.to_dict()\n for n in networks:\n c.update_network(n)\n node = VagrantNode(name=name,\n public_ips=public_ips,\n private_ips=private_ips,\n driver=self,\n size=size,\n image=image)\n self.log.debug(\"create_node(%s): Created object: %s\", name, node)\n c.add_node(node)\n c.save() # Explicit save, so that the next command succeeds\n self.ex_start_node(node)\n self.log.info(\".. Node '%s' created\", name)\n return node\n\n def create_volume(self, size, name, **kwargs):\n \"\"\"Create a new volume.\n\n :param size: Size of volume in gigabytes (required)\n :type size: ``int``\n\n :param name: Name of the volume to be created\n :type name: ``str``\n\n All other arguments are ignored.\n\n :return: The newly created volume.\n :rtype: :class:`VagrantVolume`\n\n \"\"\"\n with self._catalogue as c:\n path = c.volume_path(\"%s.vdi\" % (name,))\n virtualbox.create_volume(path=path, size=size * 1024)\n volume = VagrantVolume(name=name,\n size=size,\n extra={\n \"attached_to\": None,\n \"path\": path,\n },\n driver=self)\n c.add_volume(volume)\n self.log.info(\"Volume '%s' created\", name)\n return volume\n\n def delete_image(self, image):\n \"\"\"Deletes a node image from a provider.\n\n :param image: Node image object.\n :type image: :class:`VagrantImage`\n\n :return: ``True`` if delete_image was successful, ``False`` otherwise.\n :rtype: ``bool``\n\n \"\"\"\n try:\n self._vagrant(\"box remove --force --provider virtualbox\", image.id)\n return True\n except:\n self.log.warn(\"Cannot remove image %s\", image, exc_info=True)\n return False\n\n def deploy_node(self, **kwargs):\n \"\"\"Create a new node, and start deployment.\n\n This method calls ``Node.deploy_node()`` with the default Vagrant SSH\n connection parameters and credentials.\n\n \"\"\"\n ssh_config = self._vagrant_ssh_config()\n kwargs[\"ssh_username\"] = ssh_config[\"user\"]\n kwargs[\"ssh_port\"] = ssh_config[\"port\"]\n kwargs[\"ssh_key\"] = ssh_config[\"key\"]\n kwargs[\"ssh_interface\"] = ssh_config[\"host\"]\n self.log.debug(\"Deploy args: %s\", kwargs)\n super(VagrantDriver, self).deploy_node(**kwargs)\n\n def detach_volume(self, volume):\n \"\"\"Detaches a volume from a node.\n\n :param volume: Volume to be detached\n :type volume: :class:`VagrantVolume`\n\n :rtype: ``bool`\n\n \"\"\"\n node = volume.attached_to\n if not node:\n return True\n try:\n with self._catalogue as c:\n node_uuid = c.virtualbox_uuid(node)\n virtualbox.detach_volume(node_uuid, volume.path)\n volume.attached_to = None\n c.update_volume(volume)\n return True\n except:\n self.log.warn(\"Cannot detach volume %s\", volume.name,\n exc_info=True)\n return False\n\n def destroy_node(self, node):\n \"\"\"Destroy a node.\n\n Volumes attached to this node and networks this node is connected to\n are not destroyed.\n\n :param node: The node to be destroyed\n :type node: :class:`VagrantNode`\n\n :return: True if the destroy was successful, False otherwise.\n :rtype: ``bool``\n\n \"\"\"\n try:\n with self._catalogue as c:\n self._vagrant(\"destroy --force\", node.name)\n for ip in node._public_ips + node._private_ips:\n self.log.debug(\"destroy_node(): Deallocating address %s\",\n ip)\n n = c.find_network(ip.network_name)\n n.deallocate_address(ip.address)\n c.update_network(n)\n for v in c.get_volumes():\n if v.attached_to == node.name:\n self.log.debug(\"destroy_node(): Detaching %s\", v)\n self.detach_volume(v)\n c.remove_node(node)\n return True\n except:\n self.log.warn(\"Cannot destroy %s\", node.name, exc_info=True)\n return False\n\n def destroy_volume(self, volume):\n \"\"\"Destroys a storage volume.\n\n :param volume: Volume to be destroyed\n :type volume: :class:`VagrantVolume`\n\n :return: True if the destroy was successful, False otherwise.\n :rtype: ``bool``\n\n \"\"\"\n if volume.attached_to:\n self.log.warn(\"Cannot destroy volume %s: It is attached to %s\",\n volume.name, volume.attached_to)\n return False\n with self._catalogue as c:\n c.remove_volume(volume)\n try:\n os.unlink(volume.path)\n except IOError:\n self.log.warn(\"Cannot unlink %s\", volume.path, exc_info=True)\n return True\n\n def get_image(self, image_id):\n \"\"\"Returns a Vagrant image object.\n\n :param image_id: Image to retrieve (like ``hashicorp/precise64``).\n :type image_id: ``str``\n\n :return: Image instance on success.\n :rtype :class:`VagrantImage`\n\n \"\"\"\n def find_image():\n for image in self.list_images():\n if image.id == image_id:\n return image\n\n image = find_image()\n if image:\n return image\n\n self.log.info(\"Fetching image '%s' ..\", image_id)\n self._vagrant(\"box add --provider virtualbox\", image_id)\n self.log.info(\".. Done fetching image '%s'\", image_id)\n\n return find_image()\n\n def list_images(self, location=None):\n \"\"\"Lists registered images\n\n :return: A list of registered images\n :rtype: ``list`` of :class:`VagrantImage`\n\n \"\"\"\n images = []\n cur = None\n for line in self._vagrant(\"box list\").strip().split(\"\\n\"):\n self.log.debug(\"Scanning [%s]\", line)\n if not line:\n continue\n bits = line.split(\",\")\n key = bits[2]\n data = bits[3]\n if key == \"box-name\":\n if cur is not None:\n images.append(cur)\n cur = {\"box-name\": data}\n elif key == \"box-provider\":\n cur[\"box-provider\"] = data\n if cur:\n images.append(cur)\n\n return [VagrantImage(name=i[\"box-name\"], driver=self)\n for i in images if i[\"box-provider\"] == \"virtualbox\"]\n\n def list_nodes(self):\n \"\"\"Lists all registered nodes.\n\n :return: A list of node objects\n :rtype: ``list`` of :class:`VagrantNode`\n\n \"\"\"\n with self._catalogue as catalogue:\n nodes = catalogue.get_nodes()\n self.log.debug(\"Catalogue nodes: %s\", nodes)\n return nodes\n\n def list_sizes(self, location=None):\n \"\"\"Returns the single size object defined.\n\n The default size object instructs ``libcloud`` to create a node with\n the same amount of memory and number of CPUs as those of the Vagrant\n image it is created from.\n\n The ``location`` argument is ignored.\n\n :return: A list of one single size object\n :rtype: ``list`` of :class:`VagrantSize`\n\n \"\"\"\n return [VagrantNodeSize(name=\"default\",\n ram=0,\n driver=self,\n extra={\"cpu\": 0})]\n\n def list_volumes(self):\n \"\"\"Lists all registered storage volumes.\n\n :rtype: ``list`` of :class:`VagrantVolume`\n\n \"\"\"\n with self._catalogue as c:\n ret = c.get_volumes()\n self.log.debug(\"list_volumes(): Returning %s\", ret)\n return ret\n\n def reboot_node(self, node):\n \"\"\"Reboot a node.\n\n :param node: The node to be rebooted\n :type node: :class:`VagrantNode`\n\n :return: ``True`` if the reboot was successful, otherwise ``False``\n :rtype: ``bool``\n\n \"\"\"\n try:\n self._vagrant(\"reload --no-provision\", node.name)\n return True\n except:\n self.log.debug(\"Cannot reload %s\", node.name, exc_info=True)\n\n def wait_until_running(self, nodes, wait_period=3, timeout=600,\n ssh_interface=\"public_ips\", force_ipv4=True):\n \"\"\"Block until the provided nodes are considered running.\n\n Unlike its overridden version, this method does not require any public\n or private IP address to be assigned to the host, since Vagrant's NAT\n interface is always available when a node's state is\n ``NodeState.RUNNING``.\n\n :param nodes: List of nodes to wait for.\n :type nodes: ``list`` of :class:`VagrantNode`\n\n :param wait_period: How many seconds to wait between each loop\n iteration. (default is 3)\n :type wait_period: ``int``\n\n :param timeout: How many seconds to wait before giving up.\n (default is 600)\n :type timeout: ``int``\n\n :param ssh_interface: Ignored parameter.\n :type ssh_interface: ``str``\n\n :param force_ipv4: Ignored parameter\n :type force_ipv4: ``bool``\n\n :return: ``[(VagrantNode, ip_addresses)]`` list of tuple of\n VagrantNode instance and list of IP addresses of their\n Vagrant NAT interfaces.\n :rtype: ``list`` of ``tuple``\n\n \"\"\"\n start = time.time()\n end = start + timeout\n\n uuids = set([node.uuid for node in nodes])\n\n self.log.debug(\"wait_until_running(): Waiting for %s (%s)\", nodes,\n uuids)\n while time.time() < end:\n running = [n for n in self.list_nodes()\n if (n.uuid in uuids and\n n.state == NodeState.RUNNING)]\n self.log.debug(\"wait_until_running(): Running nodes: %s\", running)\n if len(running) == len(uuids):\n host = self._vagrant_ssh_config()[\"host\"]\n ret = list(zip(running, itertools.repeat([host])))\n self.log.debug(\"wait_until_running(): Returning %s\", ret)\n return ret\n else:\n time.sleep(wait_period)\n\n raise LibcloudError(value='Timed out after %s seconds' % (timeout,),\n driver=self)\n\n def ex_create_network(self, name, cidr, public=False):\n \"\"\"Creates a Vagrant network.\n\n This is an extension method.\n\n :param name: Name of the network\n :type name: ``str``\n\n :param cidr: Address and netmask of the network\n :type cidr: ``str``\n\n :param public: Whether this is a public or a private network (default:\n private network)\n :type public\" ``Bool``\n\n :return: A Vagrant network object\n :rtype: :class:`VagrantNetwork`\n\n \"\"\"\n self.log.debug(\"ex_create_network(%s, %s, %s): Entering\",\n name, cidr, public)\n with self._catalogue as c:\n network = VagrantNetwork(name, cidr, public, allocated=[])\n c.add_network(network)\n return network\n\n def ex_destroy_network(self, network):\n \"\"\"Destroys a Vagrant network object.\n\n Networks with addresses in use by nodes cannot be destroyed.\n\n This is an extension method.\n\n :param network: The Vagrant network to destroy\n :type network: :class:`VagrantNetwork`\n\n :return: ``True`` on success, ``False`` otherwise\n :rtype: ``Bool``\n\n \"\"\"\n try:\n with self._catalogue as c:\n c.remove_network(network)\n return True\n except:\n self.log.warn(\"Cannot destroy network %s\", network, exc_info=True)\n return False\n\n def ex_get_node_state(self, node):\n \"\"\"Returns the state of the given node.\n\n This is an extension method.\n\n :param node: The node object\n :type node: :class:`VagrantNode`\n\n :rtype: :class:`NodeState`\n\n \"\"\"\n try:\n with self._catalogue as c:\n node_uuid = c.virtualbox_uuid(node)\n return virtualbox.get_node_state(node_uuid)\n except:\n self.log.warn(\"Cannot get node state for '%s'\", node.name,\n exc_info=True)\n return NodeState.UNKNOWN\n\n def ex_start_node(self, node):\n \"\"\"Starts a node.\n\n This is an extension method.\n\n :param node: The node object\n :type node: :class:`VagrantNode`\n\n \"\"\"\n self.log.info(\"Starting node '%s' ..\", node.name)\n self._vagrant(\"up --provider virtualbox\", node.name)\n self.log.info(\".. Node '%s' started\", node.name)\n\n def ex_stop_node(self, node):\n \"\"\"Stops a node.\n\n This is an extension method.\n\n :param node: The node object\n :type node: :class:`VagrantNode`\n\n \"\"\"\n self.log.info(\"Stopping node '%s' ..\", node.name)\n with self._catalogue as c:\n node_uuid = c.virtualbox_uuid(node)\n virtualbox.stop_node(node_uuid)\n self.log.info(\".. Node '%s' stopped\", node.name)\n\n def _vagrant(self, *args):\n \"\"\"Executes the ``vagrant`` command in machine-readable output format.\n\n Raises and error if the exit status is non-zero.\n\n :param args: Parameters to ``vagrant``\n :type args: ``list``\n\n :return: The combined standard output and standard error of the\n command.\n\n :rtype: ``str``.\n\n \"\"\"\n cmdline = [\"vagrant --machine-readable\"]\n cmdline.extend(args)\n cmdline = \" \".join(str(arg) for arg in cmdline)\n self.log.debug(\"Executing %s (cwd: %s)\",\n cmdline, self._dot_libcloudvagrant)\n p = subprocess.Popen(cmdline, shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=self._dot_libcloudvagrant)\n stdout, _ = p.communicate()\n if p.returncode:\n raise LibcloudError(stdout, driver=self)\n self.log.debug(stdout)\n return stdout\n\n @property\n def _dot_libcloudvagrant(self):\n \"\"\"Path to the Vagrant catalogue directory.\n\n \"\"\"\n dname = os.path.join(_HOME, \".libcloudvagrant\")\n if not os.access(dname, os.F_OK):\n os.mkdir(dname)\n return dname\n\n @property\n def _catalogue(self):\n \"\"\"Vagrant catalogue instance.\n\n \"\"\"\n return VagrantCatalogue(self._dot_libcloudvagrant, self)\n\n def _vagrant_ssh_config(self):\n ret = {}\n ssh_config = self._vagrant(\"ssh-config\")\n m = re.search(\"HostName (.+)$\", ssh_config, re.MULTILINE)\n if m:\n ret[\"host\"] = m.group(1)\n m = re.search(\"User (.+)$\", ssh_config, re.MULTILINE)\n if m:\n ret[\"user\"] = m.group(1)\n m = re.search(\"Port (.+)$\", ssh_config, re.MULTILINE)\n if m:\n ret[\"port\"] = int(m.group(1))\n m = re.search(\"IdentityFile (.+)$\", ssh_config, re.MULTILINE)\n if m:\n ret[\"key\"] = m.group(1)\n return ret\n","sub_path":"libcloudvagrant/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":20474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"589264735","text":"import requests\nimport time\nfrom packages.helper.string_modifier import StringModifier\nfrom packages.helper.timer import Timer\nfrom fake_useragent import UserAgent\nclass DataReader:\n\ttimeout_time = 0.5\n\t\n\tdef __init__(self,type,category,req_page):\n\t\tself.category = category\n\t\tself.ua = UserAgent()\n\t\tself.headers = {'User-Agent': self.ua.chrome}\n\t\tself.type = type\n\t\tself.page = 0\n\t\tself._link = f\"https://forum.gamer.com.tw/ajax/rank.php?c={type}&page={self.page}\"\n\t\tself.req_page = req_page\n\t\tself._datas = []\n\n\tdef start_request(self):\n\t\t'''\n\t\tperform req_page times HTTP request, and store the result in datas variable,\n\t\tit will check any data exist to avoid duplicate\n\t\t'''\n\t\tfor current_page in range(1,self.req_page+1):\n\t\t\tself.link = current_page\n\t\t\tresponse = requests.get(self.link,headers=self.headers).json() #get the list\n\t\t\tprint(f\"{self.category} in page {current_page}\")\n\t\t\ttry:\n\t\t\t\tfor game in response:\n\t\t\t\t\tself.datas.append(self.get_data_to_write(game))\n\t\t\texcept Exception as e:\n\t\t\t\t#some page return [] as there no any more subpages, or the format returned is different\n\t\t\t\tprint(e)\n\t\t\t\traise\n\t\t\t\n\t\t\ttime.sleep(self.timeout_time)\n\t\t\t\n\tdef get_data_to_write(self,game):\n\t\t'''\n\t\tthe start_request method will invoke this method to get the dictonary object to write\n\t\t'''\n\t\tto_write = {} \n\t\ttry:\t\t\t\n\t\t\tto_write[\"name\"] = StringModifier.remove_end_space(game[\"title\"])\n\t\t\tto_write[\"gameID\"] = game[\"bsn\"]\n\t\t\tto_write[\"rank\"] = game[\"ranking\"]\n\t\t\tto_write[\"population\"] = int(game[\"hot\"])\n\t\t\tto_write[\"newThread\"] = int(game[\"article\"])\n\t\texcept Exception:\n\t\t\traise KeyError(\"No valid key found in the json file\")\n\t\treturn to_write\n\n\t\t\n\n\t@property\n\tdef datas(self):\n\t\treturn self._datas\n\n\t@property\n\tdef link(self):\n\t\treturn self._link\n\t\n\t@link.setter\n\tdef link(self,page):\n\t\tself._link = f\"https://forum.gamer.com.tw/ajax/rank.php?c={self.type}&page={page}\"\n\n\nclass SyncDataReader(DataReader):\n\tdef __init__(self,type,category,req_page):\n\t\tsuper().__init__(type,category,req_page)\n\n\tdef get_data_to_write(self, game):\n\t\tpass\n\n\n","sub_path":"packages/data_reader/data_reader.py","file_name":"data_reader.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"355728308","text":"def main():\n global module\n module = AnsibleModule(argument_spec=dict(project_path=dict(required=True, type='path'), binary_path=dict(type='path'), state=dict(default='present', choices=['present', 'absent', 'planned']), variables=dict(type='dict'), variables_file=dict(type='path'), plan_file=dict(type='path'), state_file=dict(type='path'), targets=dict(type='list', default=[]), lock=dict(type='bool', default=True), lock_timeout=dict(type='int'), force_init=dict(type='bool', default=False)), required_if=[('state', 'planned', ['plan_file'])], supports_check_mode=True)\n project_path = module.params.get('project_path')\n bin_path = module.params.get('binary_path')\n state = module.params.get('state')\n variables = (module.params.get('variables') or {\n \n })\n variables_file = module.params.get('variables_file')\n plan_file = module.params.get('plan_file')\n state_file = module.params.get('state_file')\n force_init = module.params.get('force_init')\n if (bin_path is not None):\n command = [bin_path]\n else:\n command = [module.get_bin_path('terraform')]\n if force_init:\n init_plugins(command[0], project_path)\n variables_args = []\n for (k, v) in variables.items():\n variables_args.extend(['-var', '{0}={1}'.format(k, v)])\n if variables_file:\n variables_args.extend(['-var-file', variables_file])\n preflight_validation(command[0], project_path, variables_args)\n if (state == 'present'):\n command.extend(APPLY_ARGS)\n elif (state == 'absent'):\n command.extend(DESTROY_ARGS)\n if (module.params.get('lock') is not None):\n if module.params.get('lock'):\n command.append('-lock=true')\n else:\n command.append('-lock=true')\n if (module.params.get('lock_timeout') is not None):\n command.append(('-lock-timeout=%ds' % module.params.get('lock_timeout')))\n for t in (module.params.get('targets') or []):\n command.extend(['-target', t])\n (needs_application, changed) = (True, True)\n if (state == 'planned'):\n (plan_file, needs_application) = build_plan(command[0], project_path, variables_args, state_file)\n if (state == 'absent'):\n needs_application = True\n command.extend(variables_args)\n elif (plan_file and os.path.exists(plan_file)):\n command.append(plan_file)\n elif (plan_file and (not os.path.exists(plan_file))):\n module.fail_json(msg='Could not find plan_file \"{0}\", check the path and try again.'.format(plan_file))\n else:\n (plan_file, needs_application) = build_plan(command[0], project_path, variables_args, state_file)\n command.append(plan_file)\n if (needs_application and (not module.check_mode) and (not (state == 'planned'))):\n (rc, out, err) = module.run_command(command, cwd=project_path)\n if ((state == 'absent') and ('Resources: 0' in out)):\n changed = False\n if (rc != 0):\n module.fail_json(msg='Failure when executing Terraform command. Exited {0}.\\nstdout: {1}\\nstderr: {2}'.format(rc, out, err), command=' '.join(command))\n else:\n changed = False\n (out, err) = ('', '')\n outputs_command = ([command[0], 'output', '-no-color', '-json'] + _state_args(state_file))\n (rc, outputs_text, outputs_err) = module.run_command(outputs_command, cwd=project_path)\n if (rc == 1):\n module.warn('Could not get Terraform outputs. This usually means none have been defined.\\nstdout: {0}\\nstderr: {1}'.format(outputs_text, outputs_err))\n outputs = {\n \n }\n elif (rc != 0):\n module.fail_json(msg='Failure when getting Terraform outputs. Exited {0}.\\nstdout: {1}\\nstderr: {2}'.format(rc, outputs_text, outputs_err), command=' '.join(outputs_command))\n else:\n outputs = json.loads(outputs_text)\n module.exit_json(changed=changed, state=state, outputs=outputs, stdout=out, stderr=err, command=' '.join(command))","sub_path":"Data Set/bug-fixing-5/3bf6c50fa95abcb1ab586f2fce53a99c39854c69-
-fix.py","file_name":"3bf6c50fa95abcb1ab586f2fce53a99c39854c69-
-fix.py","file_ext":"py","file_size_in_byte":3965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"601825790","text":"class Solution:\n def lengthOfLastWord(self, s: str) -> int:\n right = len(s) - 1\n while right >= 0 and s[right] == ' ':\n right -= 1\n\n left = right\n while left >= 0 and s[left] != ' ':\n left -= 1\n\n return right - left\n\n\nif __name__ == '__main__':\n solution = Solution()\n s = 'a '\n print(solution.lengthOfLastWord(s))\n","sub_path":"t58.py","file_name":"t58.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"85680295","text":"A1, B1, C1 = int(input()), int(input()), int(input())\nA2, B2, C2 = int(input()), int(input()), int(input())\nif A1 >= B1 and A1 >= C1:\n H1 = A1\nelif B1 >= A1 and B1 >= C1:\n H1 = B1\nelse:\n H1 = C1\n\nif A1 <= B1 and A1 <= C1:\n M1 = A1\nelif B1 <= A1 and B1 <= C1:\n M1 = B1\nelse:\n M1 = C1\n\nL1 = A1 + B1 + C1 - M1 - H1\n\nif A2 >= B2 and A2 >= C2:\n H2 = A2\nelif B2 >= A2 and B2 >= C2:\n H2 = B2\nelse:\n H2 = C2\n\nif A2 <= B2 and A2 <= C2:\n M2 = A2\nelif B2 <= A2 and B2 <= C2:\n M2 = B2\nelse:\n M2 = C2\n\nL2 = A2 + B2 + C2 - M2 - H2\n\nif H1 == H2 and L1 == L2 and M1 == M2:\n print('Boxes are equal')\nelif H1 >= H2 and L1 >= L2 and M1 >= M2:\n print('The first box is larger than the second one')\nelif H2 >= H1 and L2 >= L1 and M2 >= M1:\n print('The first box is smaller than the second one')\nelse:\n print('Boxes are incomparable')\n","sub_path":"Week2/Week2_18.py","file_name":"Week2_18.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"77916961","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 29 10:49:33 2020\n\n@author: remi\n\"\"\"\n\n\nimport numpy as np\nimport scipy.sparse as sparse\nimport pandas as pd\nimport json\nimport time\nfrom sklearn import linear_model\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn import metrics\nimport copy\nfrom sklearn.model_selection import GridSearchCV\nimport networkx as nx\nimport community\n\n\nt1=time.time()\ndef get_number_link(id, df_know_infos, df_train_edges, page_types, link_depth):\n temp_df=df_train_edges[df_train_edges['id_2'].isin([id])]['id_1'].append(df_train_edges[df_train_edges['id_1'].isin([id])]['id_2'])\n for iter in range(link_depth):\n temp_df=df_train_edges[df_train_edges['id_2'].isin(temp_df)]['id_1'].append(df_train_edges[df_train_edges['id_1'].isin(temp_df)]['id_2'])\n know_links=df_know_infos[df_know_infos['id'].isin(temp_df)]\n dict={}\n nb_link=know_links.shape[0]\n dict.update({'nb_link': nb_link})\n if nb_link>0:\n for page_type in page_types:\n dict.update({page_type :know_links[know_links['page_type']==page_type]['id'].count()/nb_link})\n else:\n for page_type in page_types:\n dict.update({page_type : 0})\n \n return dict\n\ndef get_features_ratios(id,features):\n my_dict={}\n my_feature=features.get(str(id))\n my_dict.update({'mean' : np.mean(my_feature)})\n my_dict.update({'std' : np.std(my_feature)})\n my_dict.update({'sharpe' : np.mean(my_feature)/np.std(my_feature)})\n my_dict.update({'nb_obs' : len(my_feature)})\n my_dict.update({'sum' : sum(my_feature)})\n return my_dict\n\ndef create_X(df_target,page_types, df_know_infos, df_edges, features,X_ids,partition_dict, df_new_infos=None, new_infos=False ):\n \n nb_page_type=len(page_types)\n nb_ids=len(X_ids)\n if new_infos:\n X=np.zeros((nb_ids, (nb_page_type+1)*8+6))\n else:\n X=np.zeros((nb_ids, (nb_page_type+1)*4+6))\n for iter in range(nb_ids):\n id=X_ids[iter]\n direct_link=get_number_link(id, df_know_infos, df_edges, page_types, 0)\n second_link=get_number_link(id, df_know_infos, df_edges, page_types, 1)\n third_link=get_number_link(id, df_know_infos, df_edges, page_types, 2)\n fourth_link=get_number_link(id, df_know_infos, df_edges, page_types, 3)\n ids_community=partition_dict.get(id)\n if new_infos:\n direct_link_estimates=get_number_link(id, df_new_infos, df_edges, page_types, 0)\n second_link_estimates=get_number_link(id, df_new_infos, df_edges, page_types, 1)\n third_link_estimates=get_number_link(id, df_new_infos, df_edges, page_types, 2)\n fourth_link_estimates=get_number_link(id, df_new_infos, df_edges, page_types, 3)\n id_features=get_features_ratios(id, features)\n if new_infos:\n row_values=[ids_community]+list(direct_link.values())+list(second_link.values())+list(third_link.values())+list(fourth_link.values())+list(direct_link_estimates.values())+list(second_link_estimates.values())+list(third_link_estimates.values())+list(fourth_link_estimates.values())+list(id_features.values())\n else:\n row_values=[ids_community]+list(direct_link.values())+list(second_link.values())+list(third_link.values())+list(fourth_link.values())+list(id_features.values())\n X[iter,:]=row_values\n return X\n\n\ndef find_custom_model(Y, X, CVD_parameters,parameters_lasso, page_types):\n new_ratios=np.zeros((len(Y),len(page_types)))\n X_centered=copy.deepcopy(X)\n X_centered=X_centered-X_centered.mean(axis=0)\n model_dict={}\n count=0\n for page_type in page_types:\n new_Y=np.array(list(map(lambda x: 1 if x==page_type else 0, Y)))\n new_Y_mean=np.mean(new_Y)\n new_Y=new_Y-new_Y_mean\n best_lasso=GridSearchCV(linear_model.Lasso(max_iter=20000, tol=0.01, fit_intercept=False, selection='random'), parameters_lasso, n_jobs=4, verbose=0).fit(X_centered, new_Y).best_estimator_\n #best_lasso=linear_model.Lasso(max_iter=20000, tol=0.01, selection='random').fit(X_centered, new_Y)\n new_ratios[:,count]=best_lasso.predict(X_centered)+new_Y_mean\n model_dict.update({page_type: [best_lasso, new_Y_mean]})\n count+=1;\n clf = GridSearchCV(DecisionTreeClassifier(), CVD_parameters, n_jobs=4, verbose=0).fit(X=new_ratios, y=Y)\n tree_model = clf.best_estimator_\n predictions=tree_model.predict(new_ratios)\n print (clf.best_score_, clf.best_params_) \n model_dict.update({'decision_tree': tree_model})\n return model_dict, predictions \n \ndef use_custom_model_for_prediction(X, model_dict, page_types):\n new_ratios=np.zeros((np.shape(X)[0],len(page_types)))\n X_centered=copy.deepcopy(X)\n X_centered=X_centered-X_centered.mean(axis=0)\n count=0\n for page_type in page_types:\n new_Y_mean=model_dict.get(page_type)[1]\n new_ratios[:,count]=model_dict.get(page_type)[0].predict(X_centered)+new_Y_mean\n count+=1;\n predictions=model_dict.get('decision_tree').predict(new_ratios)\n return predictions\n\n\nparameters = {'max_depth':range(1,23), 'criterion' :['gini', 'entropy'],'splitter': ['best','random'], 'min_samples_split' : range(3,6),'min_samples_leaf': range(1,5), 'min_impurity_decrease': [i/10 for i in range(40)] }\nparameters_lasso={'alpha':[i/5 for i in range(10)]}\nCVD_parameters = {'max_depth':range(1,8), 'criterion' :['gini', 'entropy'],'splitter': ['best','random'], 'min_samples_split' : range(3,6),'min_samples_leaf': range(1,5), 'min_impurity_decrease': [i/10 for i in range(40)] }\n\n\ndf_edges=pd.read_csv(\"musae_facebook_edges.csv\")\ndf_target=pd.read_csv(\"musae_facebook_target.csv\")\nfeatures=json.load(open(\"musae_facebook_features.json\"))\nnb_pages=df_target.shape[0] \npage_types=df_target['page_type'].unique()\n\ndf_know_infos=df_target[df_target['id']0.05*nb_pages else False)==True]\n\ndf_test_edges=df_edges\ndf_test_target=df_target[df_target['id'].map(lambda x: True if x>0.6*nb_pages else False)==True]\n\nY_train=df_train_target['page_type'].values\nY_test=df_test_target['page_type'].values\n\n\nX_ids=df_target['id'].values\nG = nx.Graph()\nG.add_nodes_from(X_ids)\nids_1 = df_edges['id_1'].values\nids_2 = df_edges['id_2'].values\nfor iter in range(len(ids_1)):\n G.add_edge(ids_1[iter],ids_2[iter])\npartition_dict = community.best_partition(G)\n\n\n\nnb_iter=4\ninitial_known_informations=copy.deepcopy(df_know_infos)\nmy_models=[]\nfor iter in range(nb_iter):\n print(\"starting iteration \"+str(iter)+\" on training part\")\n X_ids_train=df_train_target['id'].values\n \n if iter==0:\n X_train=create_X(df_train_target, page_types, df_know_infos, df_edges,features,X_ids_train,partition_dict)\n else:\n X_train=create_X(df_train_target, page_types, df_know_infos, df_edges,features,X_ids_train,partition_dict, pd.DataFrame({'id':X_ids_train, 'page_type':new_informations}), True)\n \n model_dict, new_informations=find_custom_model(Y_train, X_train, CVD_parameters,parameters_lasso, page_types)\n\n my_models.append(model_dict)\n\n \n \ndf_know_infos=copy.deepcopy(initial_known_informations)\nprint(\"starting on test set\")\ncount=0\nfor model_dict in my_models:\n X_ids_test=df_test_target['id'].values\n if count==0:\n X_test=create_X(df_test_target, page_types, df_know_infos, df_edges,features,X_ids_test,partition_dict)\n else:\n X_test=create_X(df_test_target, page_types, df_know_infos, df_edges,features,X_ids_test,partition_dict,pd.DataFrame({'id':X_ids_test, 'page_type':new_informations}) ,True)\n new_informations=use_custom_model_for_prediction(X_test, model_dict, page_types)\n\n print(\"current classification report:\")\n classification_report=metrics.classification_report(Y_test,new_informations)\n print(classification_report)\n count+=1","sub_path":"Projet Explo Codes/version passée et de test/main_projet_MEGAMIX.py","file_name":"main_projet_MEGAMIX.py","file_ext":"py","file_size_in_byte":7943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"421558572","text":"\n\"\"\"\nExecute the below code in a cell by cell and see the output. There are Assignments to do it by your own, so complete that as well\nUse the Spyder run and execute these .py files\n\"\"\"\n\n\nclass Developer:\n # class variables for all instances\n bonus = \"Yes\"\n def __init__(self, fname, lname, company):\n # instance variables unique to each instance\n self.fname = fname\n self.lname = lname\n self.email_id = fname + lname + \"@\" + company + \".com\"\n self.company = company\n self.fullname = self.fname + \" \" + self.lname\n\n def show_details(self):\n return self.fullname, self.email_id, Developer.bonus\n\nclass Manager(Developer):\n def __init__(self, fname, lname, company, salary):\n super().__init__(fname, lname, company)\n self.salary = salary\n self.bonus = salary * 0.2\n\n def total_salary(self):\n self.total_salary = self.salary + self.bonus\n return self.total_salary\n\nuser1 = Manager(\"john\", \"williams\", \"ABC\", 70000)\nuser2 = Manager(\"dave\", \"smith\", \"ABC\", 125000)\nuser3 = Manager(\"bob\", \"mario\", \"ABC\", 80000)\n\nprint(user1.fullname)\nprint(user2.fullname)\nprint(user3.show_details())\n\n\n# Assignment - 10\n\n","sub_path":"10_inheritence.py","file_name":"10_inheritence.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"144816112","text":"\"\"\"`main` is the top level module for your Flask application.\"\"\"\n# Import the Flask Framework\nfrom flask import Flask\nfrom flask import request, jsonify\n\nfrom googleapiclient.discovery import build\n\napp = Flask(__name__)\n# Note: We don't need to call run() since our application is embedded within\n# the App Engine WSGI application server.\n\n\n@app.route('/')\ndef hello():\n \"\"\"Return a friendly HTTP greeting.\"\"\"\n return app.send_static_file('index.html')\n\ndef _analyze(text, type='analyzeSentiment'):\n body = {\n 'document': {\n 'type': 'PLAIN_TEXT',\n 'content': text,\n },\n 'encoding_type': 'UTF32'\n }\n\n service = build('language', 'v1')\n\n meth = getattr(service.documents(), type)\n request = meth(body=body)\n response = request.execute()\n\n return response\n\n\n@app.route('/analyze', methods=['POST'])\ndef analyzer():\n q = request.get_json()\n t = q['text']\n\n return jsonify(_analyze(t))\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n \"\"\"Return a custom 404 error.\"\"\"\n return 'Sorry, Nothing at this URL.', 404\n\n\n@app.errorhandler(500)\ndef application_error(e):\n \"\"\"Return a custom 500 error.\"\"\"\n return 'Sorry, unexpected error: {}'.format(e), 500\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"368740755","text":"#!/usr/bin/env python3 \n\nimport glob\nimport os,sys\nimport numpy as np \n\nfrom imageLibs import *\n\nfrom sklearn import datasets\nfrom sklearn.manifold import TSNE\nfrom matplotlib import pyplot as plt\nimport pandas as pd\n\nimport seaborn as sns\n\nfrom tqdm import tqdm\nimport cv2\nimport math\nimport pickle\n\ncropCoords=\"170,250,1920,1034\"\ncropCoords=\"150,10,1650,910\" # Peng - cow\n# cropCoords=\"150,10,900,910\" # Peng - faceUp\n# cropCoords=\"900,10,1650,910\" # Peng - bodyDown\n\n# eidDict structure :\n# eidDict[eid] = dict \n# eid: the eid string\n# dict: a dict of : \n# dict['imgPath'] : array of imgPath string\n# dict['fvs'] : array of fv vector corresponding to imgPath array\n#\n\n\n# # Those are the cow that changed their eid.\n# eidMap={\n# '982000159018957':'982123714829369',\n# '982123479335814':'982123714829372',\n# '982123520571387':'982123714829374',\n# '982000189575899':'982123714829371'}\neidMap={\n \"982123553042212\" : \"982123750660987\",\n \"982123714450303\" : \"982123750661001\"\n}\n#eidMap={}\n# # Those are images that are excluded from the analysis\n# excludeImage =[\n# 'sideDataset.20190401-20190831/982091014657402/shed8003993.20190415_172000.mp4.pts00115_866.png', # bleached\n# 'sideDataset.20190401-20190831/982123531951989/shed8003993.20190414_144000.mp4.pts00462_733.png', # bad crop\n# 'end'\n# ]\n\n# excludeImage = [\n# 'topSide.20200801-20200820.pm.222/982123553042029/20200811_152000.png','sideDataset.20200801-20200820.pm/982123553042029/shed8003993.20200811_152000.mp4.pts00046_666.png',\n# 'topSide.20200801-20200820.pm.222/982123714450308/20200814_151000.png','cowId.topDownBennett.20200801-20200820/982123714450308/QBH-PortalTopDow.shed8003993.20200814_151000.mp4.pts00126_721.png',\n# 'topSide.20200801-20200820.pm.222/982123714450243/20200803_144000.png','cowId.topDownBennett.20200801-20200820/982123714450243/QBH-PortalTopDow.shed8003993.20200803_144000.mp4.pts00060_516.png',\n# 'topSide.20200801-20200820.pm.222/982123518698409/20200812_142000.png',\n# 'topSide.20200801-20200820.pm.222/982123533468200/20200819_151000.png',\n# 'topSide.20200801-20200820.pm.222/982123714829351/20200810_145000.png',\n# 'topSide.20200801-20200820.pm.222/982123553042012/20200806_143000.png', \n# ]\nexcludeImage = []\n# def loadBlackCowDict():\n# fn='blackCow.annotation.txt'\n# f=open(fn,\"r\") \n# res=dict() \n# for lineNumber,line in enumerate(f):\n# arr=line.split(\",\")\n# eidStr=arr[0].split(\".\")[0] \n# if (eidStr in eidMap):\n# eid=eidMap[eidStr]\n# else:\n# eid=eidStr\n \n# label=arr[5].strip()\n# if (label == '10000'):\n# status='black'\n# else:\n# status='NotBlack'\n \n# if (eid in res):\n# print(\"Warning: {} appear more than once in {}. Overwriting status ...\".format(eid,fn))\n \n# res[eid] = status\n# return res\n\n\n\ndef statPerCow(eitDict, mciDf,numTop=5):\n data=dict()\n\n excluded=[]\n\n # Error in Top counting\n for img,d2df in eitDict.items():\n if img in excludeImage:\n excluded.append(img)\n continue\n eid=getEid(img) \n if eid not in data:\n data[eid]={'numImg':0, 'eitCount':0,'mciCount':0, 'numImgCheck':0}\n\n data[eid]['numImg'] += 1\n imgs=list(d2df[:numTop]['imgPath'])\n errorCount=0\n for path in imgs:\n cow=getEid(path)\n if (cow != eid):\n errorCount += 1\n \n if (errorCount >0):\n data[eid]['eitCount'] += 1\n\n # Matching-cluster Index counting\n for index, row in mciDf.iterrows():\n if row['path'] in excludeImage:\n continue\n\n \n eid=getEid(row['path'])\n if (eid not in data):\n print(\"Eid {} not in data ?? \".format(eid))\n print(crashHere)\n\n data[eid]['numImgCheck'] += 1\n if (row['rank'] > 0):\n data[eid]['mciCount'] += 1\n\n #res={'eid':[],'numImg':[], 'eitCount':[],'mciCount':[],'blackCow':[],'eitPercent':[],'mciPercent':[]}\n res={'eid':[],'numImg':[], 'eitCount':[],'mciCount':[],'eitPercent':[],'mciPercent':[]}\n for key,value in data.items():\n if (value['numImgCheck'] != value['numImg']):\n print(\"WARNING: {} have different number of image: {} vs {} ??\".format(key,value['numImg'],value['numImgCheck']))\n\n if value['numImg'] < numTop:\n continue\n\n res['eid'].append(key)\n res['numImg'].append(value['numImg'])\n res['eitCount'].append(value['eitCount'])\n res['mciCount'].append(value['mciCount'])\n #res['blackCow'].append(blackCowDict[key])\n res['eitPercent'].append(value['eitCount']/value['numImg']*100)\n res['mciPercent'].append(value['mciCount']/value['numImg']*100)\n\n print(\"{} img excluded.\".format(len(excluded)))\n\n return excluded,pd.DataFrame(res)\n\n\ndef getEid(path):\n eid=path.split('/')[1]\n if (eid in eidMap):\n eid=eidMap[eid]\n return eid\n\n\ndef summaryData(dataDirPath):\n dirs=glob.glob(dataDirPath+\"/*\")\n\n cows=[]\n count=[]\n for dir in dirs:\n if not os.path.isdir(dir):\n continue\n imgs=[]\n imgs.extend(glob.glob(dir+\"/*.jpg\"))\n imgs.extend(glob.glob(dir+\"/*.png\"))\n eid=os.path.basename(dir)\n count.append(len(imgs))\n cows.append(eid)\n #trainStat.append([eid,len(jpgs)])\n \n print(\"Number of cow: {}\".format(len(cows)))\n a=np.array(count)\n quantilesAt=[0,0.25,0.5,0.75,1]\n print(\"Quantile at {}\".format(quantilesAt))\n q=np.quantile(a,quantilesAt)\n print(q)\n print(\"Total number of images: {}\".format(sum(count)))\n plt.hist(count)\n plt.title(\"Number of image per cow\")\n plt.xlabel(\"Number of image per cow\")\n plt.ylabel(\"Number of cow\")\n print()\n return (cows,count)\n #print(trainStat)\n\n# # def trainStat():\n# # summaryData(trainDirPath)\n\n\n\ndef loadFv(fvPath,exclude=excludeImage,toInclude=None):\n with open(fvPath,\"r\") as f:\n lines = f.readlines()\n dfDict={'eids':[],'fvs':[],'paths':[]}\n eidDict=dict()\n for line in lines:\n line=line.strip()\n if line.startswith(\"#\"):\n continue \n arr=line.split(\":\")\n if (len(arr) != 2): \n continue\n eidstr=arr[0]\n if (eidstr in exclude):\n continue \n if toInclude is not None and eidstr not in toInclude:\n continue \n eid=getEid(eidstr)\n fvstr=arr[1]\n fv=[float(x) for x in fvstr.split(\",\")]\n dfDict['eids'].append(eid)\n dfDict['fvs'].append(fv)\n dfDict['paths'].append(eidstr)\n \n if (eid not in eidDict):\n eidDict[eid] = {'imgPath':[], 'fvs':[]}\n eidDict[eid]['imgPath'].append(eidstr)\n eidDict[eid]['fvs'].append(fv)\n\n return (eidDict,dfDict)\n \n\n# def toDict(eids,fvs):\n# res=dict()\n# for i in range(len(eids)):\n# eid=eids[i]\n# if eid not in res:\n# res[eid]={'fvs':[]}\n\n# res[eid]['fvs'].append(fvs[i])\n\n# return res\n\n\ndef selectTopCow(eidDict,top):\n #eidDict=toDict(eids,fvs)\n \n # Count them \n eidVec=[]\n countVec=[]\n for eid in eidDict:\n eidVec.append(eid)\n countVec.append(len(eidDict[eid]['fvs']))\n \n # Sort \n eidVec=np.array(eidVec)\n countVec=np.array(countVec)\n ind = np.argsort(countVec)[::-1]\n eidVec = eidVec[ind]\n\n eidTop=[]\n fvTop=[]\n\n for eid in eidVec[:top]:\n fv=eidDict[eid]['fvs']\n fvTop.extend(fv)\n eidTop.extend([eid]*len(fv))\n \n return eidTop,fvTop\n \n\ndef plotTSNE(eids,fvs,showLegend=True):\n\n X_embedded = TSNE(n_components=2).fit_transform(fvs)\n label=np.array(eids)\n plt.figure(figsize=(12, 10))\n for i, t in enumerate(set(eids)):\n #print(\"{} - {}\".format(i,t))\n idx = label == t\n plt.scatter(X_embedded[idx, 0], X_embedded[idx, 1], label=t,alpha=0.7) \n if showLegend:\n plt.legend(bbox_to_anchor=(1, 1))\n plt.show()\n\n# def tsneCow(eidList,eidDict=None): \n# if eidDict is None:\n# if (tsneCow.eidDict is None):\n# print(\"Loading fvs from disk ...\")\n# eidDict,dfDict=loadFv(\"sideDataset.20190401-20190430.fv\")\n# tsneCow.eidDict = eidDict\n# else:\n# eidDict = tsneCow.eidDict\n\n\n# eids=[]\n# fvs=[]\n# for cow in eidList: \n# fvs.extend(eidDict[cow]['fvs'])\n# eids.extend([cow]*len(eidDict[cow]['fvs']))\n\n# print(\"Generating TSNE for {} data points\".format(len(fvs)))\n# plotTSNE(eids,fvs)\n \n\n# tsneCow.eidDict=None\n\ndef plotByEids(eidDict,eids):\n eidWorst=[]\n fvWorst=[] \n for eid in eids:\n fv=eidDict[eid]['fvs']\n fvWorst.extend(fv)\n eidWorst.extend([eid]*len(fv)) \n plotTSNE(eidWorst,fvWorst)\n return eidWorst,fvWorst\n\ndef plotWorstCluster(eidDict,d2Table,numWorst):\n d2Table=d2Table.sort_values(by=['d2']).reset_index(drop=True)\n d2Table['index'] = d2Table.index\n\n w=d2Table.head(numWorst)\n cows=list(w['cowA'])\n cows.extend(list(w['cowB']))\n\n\n #Uniq\n cows=list(set(cows))\n\n eidWorst=[]\n fvWorst=[]\n #eidDict=toDict(eids,fvs)\n for eid in cows:\n fv=eidDict[eid]['fvs']\n fvWorst.extend(fv)\n eidWorst.extend([eid]*len(fv))\n \n plotTSNE(eidWorst,fvWorst)\n return w,eidWorst,fvWorst\n\n\n\ndef computeClusterCentroid(eidDict):\n #eidDict=toDict(eids,fvs)\n for cow in eidDict:\n arr = np.array(eidDict[cow]['fvs'])\n centroid = arr.mean(axis=0)\n eidDict[cow]['centroid']=centroid\n\n return eidDict\n\ndef distance2(e1, e2):\n e1=np.array(e1)\n e2=np.array(e2)\n d2 = np.sum(np.square(e1 - e2))\n # sim = np.dot(f1, f2.T)\n return d2\n\ndef computeDistancePairs(data):\n cows=list(data.keys())\n #cows=cows[:100]\n d2Table={'d2':[],'cowA':[],'cowB':[],\n # 'numImgCowA':[],\n # 'numImgCowB':[],\n # 'totImg':[]\n }\n\n pbar=tqdm(total=len(cows)*len(cows)/2)\n count=0\n\n for i in range(len(cows)-1):\n for j in range(i+1,len(cows)):\n cowA=cows[i]\n cowB=cows[j]\n d2=distance2(data[cowA]['centroid'],data[cowB]['centroid'])\n d2Table['d2'].append(d2)\n d2Table['cowA'].append(cowA)\n d2Table['cowB'].append(cowB)\n #numA=len(data[cowA]['fvs'])\n #numB=len(data[cowB]['fvs'])\n #d2Table['numImgCowA'].append(numA)\n #d2Table['numImgCowB'].append(numB)\n #d2Table['totImg'].append(numA+numB)\n # distances.append(d2)\n # d2Table.extend([d2,cowA,cowB])\n # src.append(cowA)\n # dest.append(cowB)\n pbar.update(1)\n count+=1\n pbar.close()\n\n return pd.DataFrame(d2Table)\n\ndef drawCows(eidDict,eids,imgPerCow,cropx0y0x1y1=cropCoords):\n img=None \n for eid in eids:\n if eid not in eidDict:\n continue\n line=drawCowHori(eidDict,eid,imgPerCow,0,cropx0y0x1y1=cropx0y0x1y1)\n img=stitch(img,line,True,0)\n return img\n \ndef drawCowHori(eidDict,eid,numCow,margin=0,cropx0y0x1y1=cropCoords):\n cowList=eidDict[eid]['imgPath']\n img=None\n x0, y0, x1, y1 = map(int, cropx0y0x1y1.split(\",\"))\n width=x1-x0\n height=y1-y0 \n for i in range(numCow):\n if i >= len(cowList) :\n imgA=np.zeros((height,width,3), np.uint8)\n else:\n imgA=cropCenter(cowList[i],cropx0y0x1y1=cropx0y0x1y1)\n img=stitch(img,imgA,False,margin)\n return img \n\ndef drawPair(eidDict,eidA,eidB,numCow,oriVert=True,margin=0):\n\n cowAList=eidDict[eidA]['imgPath']\n cowBList=eidDict[eidB]['imgPath']\n cowAList.sort()\n cowBList.sort()\n img=None\n for i in range(numCow):\n if i >= len(cowAList) :\n imgA=np.zeros((784,1750,3), np.uint8)\n else:\n imgA=cropCenter(cowAList[i],cropx0y0x1y1=cropCoords)\n\n if i >= len(cowBList) :\n imgB=np.zeros((784,1750,3), np.uint8)\n else:\n imgB=cropCenter(cowBList[i],cropx0y0x1y1=cropCoords)\n\n pair=stitch(imgA,imgB,not oriVert,margin)\n #print(\"{} vs {}\".format(cowAList[i],cowBList[i]))\n\n img=stitch(img,pair,oriVert,0)\n\n #plt.imshow(img)\n return img\n\n# #def drawNearCows(eidDict,)\n\ndef showWorstPairs(eidDict,worstDf):\n numCol=3\n margin=10\n color=(255,255,255)\n numRow=math.ceil(len(worstDf)/numCol)\n fig, axs = plt.subplots(numRow,numCol,figsize=(18,30))\n\n for index, row in worstDf.iterrows():\n bgr=drawPair(eidDict,row['cowA'],row['cowB'],5)\n plotRow=int(index/numCol)\n plotCol=index % numCol \n rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)\n axs[plotRow, plotCol].imshow(rgb)\n axs[plotRow, plotCol].set_title('d={:.3}'.format(row['d2']))\n axs[plotRow, plotCol].axis('off')\n #show(img,'d={}'.format(row['d2']))\n\n # Save the full figure...\n fn='worstCluster.png'\n fig.savefig(fn)\n print('{} generated'.format(os.path.realpath(fn)))\n\n\ndef showWorstPairsHori(eidDict,worstDf):\n numCol=3\n margin=10\n color=(255,255,255)\n numRow=math.ceil(len(worstDf))\n fig, axs = plt.subplots(numRow,figsize=(20,20),dpi=400)\n\n for index, row in worstDf.iterrows():\n bgr=drawPair(eidDict,row['cowA'],row['cowB'],5,oriVert=False)\n #plotRow=int(index/numCol)\n #plotCol=index % numCol \n rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)\n axs[index].imshow(rgb)\n axs[index].set_title('d={:.3}'.format(row['d2']))\n axs[index].axis('off')\n #show(img,'d={}'.format(row['d2']))\n\n # Save the full figure...\n fn='worstCluster.png'\n fig.savefig(fn,bbox_inches='tight')\n print('{} generated'.format(os.path.realpath(fn)))\n\n\n# def buildDistanceTable(eidDict):\n\n# # Flattern the eidDict\n# eids=[]\n# fvs=[]\n# imgPaths=[]\n\n# for eid in eidDict:\n# eids.extend([eid]* len(eidDict[eid]['imgPath']))\n# fvs.extend(eidDict[eid]['fvs'])\n# imgPaths.extend(eidDict[eid]['imgPath'])\n\n# # Calculate every pair distances ...\n# d2Dict=dict()\n# tot=int(len(eids)*len(eids)/2)\n# pbar = tqdm(total=tot)\n# counter=0\n\n# for i in range(len(eids)-1):\n# d2Table={'imgPath':[], 'd2': [], 'eid':[]}\n# for j in range(i+1,len(eids)): \n# dest=imgPaths[j] \n# d2=distance2(fvs[i],fvs[j])\n# d2Table['imgPath'].append(imgPaths[j])\n# d2Table['d2'].append(d2)\n# d2Table['eid'].append(eids[j])\n\n# # Add the symmetry :\n# if (dest not in d2Dict):\n# d2Dict[dest] = pd.DataFrame({\n# 'imgPath':[imgPaths[i]],\n# 'd2':[d2],\n# 'eid':[eids[i]]\n# })\n# else:\n# d2Dict[dest] = d2Dict[dest].append({\n# 'imgPath':imgPaths[i],\n# 'd2':d2,\n# 'eid':eids[i]\n# },ignore_index=True)\n# #counter += 1\n# pbar.update(1)\n# d2Dict[imgPaths[i]]=pd.DataFrame(d2Table)\n# #if (counter >= 1000):\n# # break \n# pbar.close()\n# return d2Dict\n\n\ndef savePkl(df,fn):\n print('Saving to {} ...'.format(fn))\n with open(fn, 'wb') as f:\n pickle.dump(df, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef loadPkl(fn):\n with open(fn, 'rb') as f:\n return pickle.load(f)\n\n\ndef dfSort(df,byAttribute,ascending=True):\n return df.sort_values(by=[byAttribute],ascending=ascending).reset_index(drop=True)\n\n# def calOneDistance(imgPaths,fvs,eids,indexMap,index):\n# i,j=indexMap[index]\n# d2=distance2(fvs[i],fvs[j])\n# return (imgPaths[i],imgPaths[j],d2)\n\n# if __name__ == '__main__':\n# #trainStat()\n# #print()\n# #testStat()\n# #compare()\n# eidDict,eids,fvs=loadFv(\"sideDataset.20190401-20190430.fv\") \n# # img=drawPair(eidDict,'982000159018957','982123531951905',5,oriVert=True,margin=5)\n# # show(img,'d=2222')\n\n# indexMap=genMap(10)\n# calOneDistance(imgPaths,fvs,eids,indexMap,40)\n\n \n\n# # data=computeClusterCentroid(eids,fvs)\n# # d2Table=computeDistancePairs(data)\n \n# # worstDf,eidBad,fvBad=plotWorstCluster(eids,fvs,d2Table,10)\n\n# # showWorstPairs(worstDf)\n\n\n# d2All = buildDistanceTable(eidDict)\n# saveD2All(d2All)\n\n\n","sub_path":"analysis/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":16504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"396452673","text":"from conn import mysql_pool, redis_pool, get_rabbitmq_conn\nfrom config import IS_NAIVE, QUERY_SQL, QUERY_INTERVAL, QUEUE_NAME\nfrom typing import List, Dict, Optional\nimport redis\nimport json\nimport pika\n\n\ndef query(author_name: str) -> Dict[str, object]:\n \"\"\"\n The entry point for query, which will perform the query directly via MySQL\n or through Redis, RabbitMQ and MySQL.\n \"\"\"\n response = {\n \"query\": author_name,\n \"status\": True,\n \"waiting\": 0,\n \"result\": []\n }\n # Naive version: query directly from MySQL\n if IS_NAIVE:\n response[\"result\"] = query_from_mysql(author_name)\n return response\n # Powerful version: Redis -> RabbitMQ -> MySQL\n cached = query_from_redis(author_name)\n if cached is not None: # cache hit in Redis\n response[\"result\"] = cached\n return response\n # cache miss, then add the query into RabbitMQ\n add_to_rabbitmq(author_name)\n # let the client wait for QUERY_INTERVAL seconds and then retry\n response[\"status\"], response[\"waiting\"] = False, QUERY_INTERVAL\n return response\n\n\ndef query_from_mysql(author_name: str) -> List[Dict[str, str]]:\n connection = mysql_pool.connection()\n cursor = connection.cursor()\n result = list()\n try:\n cursor.execute(QUERY_SQL, (\"%\" + author_name + \"%\",))\n items = cursor.fetchall()\n for author, aff in items:\n result.append({\n \"author\": author,\n \"affiliation\": aff,\n })\n except Exception as e:\n connection.rollback()\n print(e)\n finally:\n cursor.close()\n connection.close()\n return result\n\n\ndef query_from_redis(author_name: str) -> Optional[List[Dict[str, str]]]:\n connection = redis.Redis(connection_pool=redis_pool)\n cached = connection.get(author_name)\n if cached is None:\n return cached\n return json.loads(cached)\n\n\ndef test_and_incr_in_redis(flag: str) -> int:\n connection = redis.Redis(connection_pool=redis_pool)\n return connection.incr(flag)\n\n\ndef add_to_rabbitmq(author_name: str) -> None:\n flag_key = \"Waiting: \" + author_name\n # atomically check whether author_name has already been added to the queue\n if test_and_incr_in_redis(flag_key) != 1:\n print(\"Already in queue or redis: \", author_name)\n return\n rabbitmq_conn = get_rabbitmq_conn()\n rabbitmq_channel = rabbitmq_conn.channel()\n rabbitmq_channel.queue_declare(queue=QUEUE_NAME, durable=True)\n rabbitmq_channel.basic_publish(\n exchange=\"\",\n routing_key=QUEUE_NAME,\n body=author_name,\n properties=pika.BasicProperties(\n delivery_mode=2, # make message persistent\n )\n )\n rabbitmq_conn.close()\n print(\"Add to the queue: \", author_name)\n\n\ndef add_to_redis(key: str, data: List[Dict[str, str]], ttl: int) -> bool:\n connection = redis.Redis(connection_pool=redis_pool)\n print(\"Add to redis: \", key, data, ttl)\n return connection.set(key, json.dumps(data), ex=ttl)\n","sub_path":"entry.py","file_name":"entry.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"215952286","text":"# -*- coding: utf-8 -*-\nfrom openerp import models, fields, api\nfrom openerp.tools.translate import _\nfrom openerp.osv import osv\n\nclass supplement(models.Model):\n _name = 'tender.supplement'\n _inherit = ['mail.thread']\n\n buyer_id = fields.Many2one('res.users', readonly=True)\n supplement_no = fields.Char(readonly=True)\n supplement_line_ids = fields.One2many('tender.line', 'supplement_id')\n text = fields.Text()\n state = fields.Selection([('draft', 'Draft'),\n ('confirmed', 'Confirmed'),\n ('manager_confirmed', 'Manager Confirmed'),\n ('done', 'Done')],\n string='Status', index=True, readonly=True, default='draft',\n track_visibility='onchange', copy=False)\n\n @api.depends('supplement_line_ids')\n def _amount_all(self):\n for supplement_id in self:\n amount_total = 0.0\n for line in supplement_id.supplement_line_ids:\n amount_total += line.subtotal\n supplement_id.update({\n 'amount_total': self.currency_id.round(amount_total),\n #'amount_total': amount_untaxed + amount_tax,\n })\n amount_total = fields.Monetary(string='Total', store=True, readonly=True, compute='_amount_all', track_visibility='always')\n\n def compute_default_value(self):\n return self.sudo().env['res.currency'].search([['name', '=', 'CNY']])\n currency_id = fields.Many2one(\"res.currency\", default=compute_default_value)\n\n @api.one\n def to_draft(self):\n self.write({'state': 'draft'})\n @api.one\n def to_confirmed(self):\n self.write({'state': 'confirmed'})\n @api.one\n def to_manager_confirmed(self):\n self.write({'state': 'manager_confirmed'})\n @api.one\n def to_done(self):\n self.generate_order()\n self.write({'state': 'done'})\n for supplement_line_id in self.supplement_line_ids:\n supplement_line_id.write({'state': 'done'})\n supplement_line_id.requisition_id.change_state()\n supplement_line_id.allocated_id.change_state()\n @api.one\n def unlink(self):\n if self.state == 'draft':\n for supplement_line_id in self.supplement_line_ids:\n supplement_line_id.write({'state': 'allocated'})\n supplement_line_id.requisition_id.change_state()\n supplement_line_id.allocated_id.change_state()\n super(supplement, self).unlink()\n else:\n raise osv.except_osv(_('Error'), _(\"state != draft\"))\n @api.one\n def write(self, vals):\n if self.state == 'draft' or self.state == False:\n return super(supplement, self).write(vals)\n else:\n for field in ['supplement_line_ids']:\n if field in vals:\n raise osv.except_osv(_('Error'), _(\"state != draft\"))\n return super(supplement, self).write(vals)\n\n @api.multi\n def _track_subtype(self, init_values):\n self.ensure_one()\n if 'state' in init_values and self.state == 'confirmed':\n return 'tender.mt_supplement_confirmed'\n elif 'state' in init_values and self.state == 'manager_confirmed':\n return 'tender.mt_supplement_manager_confirmed'\n elif 'state' in init_values and self.state == 'done':\n return 'tender.mt_supplement_done'\n return super(supplement, self)._track_subtype(init_values)\n @api.model\n def create(self, vals):\n vals['supplement_no'] = self.env['ir.sequence'].get('supplement_no') or '/'\n result = super(supplement, self).create(vals)\n #添加关注\n all_type = self.env['mail.message.subtype'].search(['|', ('res_model', '=', self._name), ('default', '=', True)]).ids\n result.message_subscribe_users(user_ids=result.buyer_id.ids, subtype_ids=all_type)\n #发送消息\n result.message_post(_('draft'), subtype='mail.mt_comment')\n z = {}\n zz = {}\n #完成发给结算员\n group_id = self.env.ref('tender.group_tender_settlement').ids\n no_group_id = self.env.ref('tender.group_tender_purchaser').ids\n settlement_user_ids = self.env['res.users'].search([['groups_id', '=', group_id], ['groups_id', '!=', no_group_id]]).ids\n z[str(self.env.ref('tender.mt_supplement_done').id)] = settlement_user_ids\n #确认后发给minister\n group_id = self.env.ref('tender.group_tender_minister').ids\n no_group_id = self.env.ref('tender.group_tender_manager').ids\n minister_user_ids = self.env['res.users'].search([['groups_id', '=', group_id], ['groups_id', '!=', no_group_id]]).ids\n z[str(self.env.ref('tender.mt_supplement_confirmed').id)] = minister_user_ids\n #确认后发给manager\n group_id = self.env.ref('tender.group_tender_manager').ids\n manager_user_ids = self.env['res.users'].search([['groups_id', '=', group_id]]).ids\n z[str(self.env.ref('tender.mt_supplement_manager_confirmed').id)] = manager_user_ids\n user_ids = []\n for (a, b) in z.iteritems():\n user_ids += b\n user_ids = list(set(user_ids))\n for u in user_ids:\n if u != result.buyer_id.id:\n zz[u] = []\n for (a, b) in z.iteritems():\n if u in b:\n zz[u].append(int(a))\n for (uid, tid) in zz.iteritems():\n result.message_subscribe_users(user_ids=[uid], subtype_ids=tid)\n return result\n @api.one\n def generate_order(self):\n partner_ids = []\n for supplement_line_id in self.supplement_line_ids:\n if supplement_line_id.supplier_id.id not in partner_ids:\n partner_ids.append(supplement_line_id.supplier_id.id)\n for partner_id in self.env['tender.partner'].search([['id', 'in', partner_ids]]):\n for pk_corp in list(set(self.supplement_line_ids.mapped('pk_corp'))):\n order_id = self.sudo().env['tender.order'].create({'supplement_id': self.id,\n 'operid': self.buyer_id.id,\n 'pk_corp': pk_corp,\n 'supplier_id': partner_id.id}).id\n for supplement_line_id in self.env['tender.line'].search([['supplier_id', 'in', [partner_id.id]],\n ['supplement_id', '=', self.id]]):\n if supplement_line_id.pk_corp == pk_corp:\n self.sudo().env['tender.order_line'].create({'order_id': order_id,\n 'price': supplement_line_id.price,\n 'name': supplement_line_id.name,\n 'invcode': supplement_line_id.invcode,\n 'invspec': supplement_line_id.invspec,\n 'line_id': supplement_line_id.id,\n 'buyer_received_date': supplement_line_id.buyer_received_date,\n 'num': supplement_line_id.npraynum,\n 'measname': supplement_line_id.measname,\n 'tax': supplement_line_id.tax,\n 'npraynum': supplement_line_id.npraynum})\n return {\"type\": \"ir.actions.act_window\",\n \"res_model\": \"tender.order\",\n \"views\": [[False, \"tree\"], [False, \"form\"]],\n \"domain\": [[\"tender_id\", \"=\", self.id]]}\n","sub_path":"models/supplement.py","file_name":"supplement.py","file_ext":"py","file_size_in_byte":8071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"260189280","text":"from datetime import datetime\nimport hashlib\nimport hmac\nimport sys\nimport os\nfrom time import sleep\nimport time\nimport urllib\n\n\nimport pandas as pd\nfrom pytz import timezone\nimport requests\n\n# 将repostory的目录i,作为根目录,添加到系统环境中。\nVNPY_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))\nif VNPY_ROOT not in sys.path:\n sys.path.append(VNPY_ROOT)\n print(f'append {VNPY_ROOT} into sys.path')\n\nfrom vnpy.gateway.binances.binances_gateway import (\n CHINA_TZ,\n D_REST_HOST,\n D_TESTNET_RESTT_HOST,\n F_REST_HOST,\n F_TESTNET_RESTT_HOST,\n Security\n)\nfrom vnpy.gateway.binance.binance_gateway import REST_HOST\nfrom vnpy.event.engine import EventEngine\nfrom vnpy.trader.utility import load_json, round_to\nfrom vnpy.api.rest import Request\nfrom vnpy.api.rest.rest_client import RestClient\n\nclass BianceRest(RestClient):\n def __init__(self):\n super().__init__()\n\n self.key: str = \"\"\n self.secret: str = \"\"\n\n self.order_count: int = 1_000_000\n self.time_offset = 0\n\n def sign(self, request: Request) -> Request:\n \"\"\"\n Generate BINANCE signature.\n \"\"\"\n security = request.data[\"security\"]\n if security == Security.NONE:\n request.data = None\n return request\n\n if request.params:\n path = request.path + \"?\" + urllib.parse.urlencode(request.params)\n else:\n request.params = dict()\n path = request.path\n\n if security == Security.SIGNED:\n timestamp = int(time.time() * 1000)\n\n if self.time_offset > 0:\n timestamp -= abs(self.time_offset)\n elif self.time_offset < 0:\n timestamp += abs(self.time_offset)\n\n request.params[\"timestamp\"] = timestamp\n\n query = urllib.parse.urlencode(sorted(request.params.items()))\n signature = hmac.new(self.secret, query.encode(\n \"utf-8\"), hashlib.sha256).hexdigest()\n\n query += \"&signature={}\".format(signature)\n path = request.path + \"?\" + query\n\n request.path = path\n request.params = {}\n request.data = {}\n\n # Add headers\n headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"application/json\",\n \"X-MBX-APIKEY\": self.key\n }\n\n if security in [Security.SIGNED, Security.API_KEY]:\n request.headers = headers\n\n return request\n\n def on_query_time(self, data: dict, request: Request) -> None:\n \"\"\"\"\"\"\n local_time = int(time.time() * 1000)\n server_time = int(data[\"serverTime\"])\n self.time_offset = local_time - server_time\n\n def on_default_error(\n self, exception_type: type, exception_value: Exception, tb, request: Request\n ):\n \"\"\"\n Callback when cancelling order failed on server.\n \"\"\"\n # Record exception if not ConnectionError\n if not issubclass(exception_type, ConnectionError) and not issubclass(exception_type, ConnectionResetError):\n self.on_error(exception_type, exception_value, tb, request)\n\n\nclass Spot(BianceRest):\n def __init__(self):\n super().__init__()\n\n def connect(\n self,\n key: str,\n secret: str,\n session_number: int,\n proxy_host: str,\n proxy_port: int\n ):\n \"\"\"\n Initialize connection to REST server.\n \"\"\"\n self.key = key\n self.secret = secret.encode()\n self.proxy_port = proxy_port\n self.proxy_host = proxy_host\n\n self.connect_time = (\n int(datetime.now(CHINA_TZ).strftime(\"%y%m%d%H%M%S\")) * self.order_count\n )\n\n self.init(\"https://api.binance.com\", proxy_host, proxy_port)\n self.start(session_number)\n\n def query_time(self):\n \"\"\"\"\"\"\n data = {\n \"security\": Security.NONE\n }\n path = \"/api/v1/time\"\n\n return self.add_request(\n \"GET\",\n path,\n callback=self.on_query_time,\n data=data\n )\n\n def query_account(self, symbol: str = \"USDT\") -> Request:\n \"\"\"\"\"\"\n data = {\"security\": Security.SIGNED}\n\n path = \"/api/v3/account\"\n\n resp = self.request(\n method=\"GET\",\n path=path,\n data=data\n )\n if isinstance(resp, requests.Response):\n data = resp.json()\n for asset in data[\"balances\"]:\n if asset['asset'] == symbol:\n return float(asset[\"free\"])\n else:\n return self.query_account()\n\n\n def transfer(self, amount) -> Request:\n \"\"\"\n 从USDT期货帐号转到现货帐号\n \"\"\"\n data = {\"security\": Security.SIGNED}\n\n path = \"/sapi/v1/futures/transfer\"\n\n params = {\n \"asset\": \"USDT\",\n \"amount\": amount,\n \"type\": 2\n }\n\n resp = self.request(\n method=\"POST\",\n path=path,\n data=data,\n params=params\n )\n if isinstance(resp, requests.Response):\n if resp.status_code // 100 != 2:\n msg = f\"转帐失败,状态码:{resp.status_code},信息:{resp.text}\"\n print(msg)\n return False\n else:\n data = resp.json()\n return data['tranId']\n else:\n return self.transfer(amount)\n\n \n def swap(self, symbol, amount):\n \"\"\"\n 兑换\n \"\"\"\n data = {\"security\": Security.SIGNED}\n\n path = \"/api/v3/order\"\n orderid = \"NKD8FYX4-\" + str(self.connect_time)\n\n params = {\n \"symbol\": symbol + \"USDT\",\n \"side\": \"BUY\",\n \"type\": \"MARKET\",\n \"quantity\": amount,\n \"newClientOrderId\": orderid,\n \"newOrderRespType\": \"ACK\"\n }\n\n resp = self.request(\n method=\"POST\",\n path=path,\n data=data,\n params=params\n )\n if isinstance(resp, requests.Response):\n if resp.status_code // 100 != 2:\n msg = f\"兑换失败,状态码:{resp.status_code},信息:{resp.text}\"\n print(msg)\n return False\n else:\n data = resp.json()\n return data\n else:\n return self.swap(symbol, amount)\n\n def quota(self, symbol):\n \"\"\"\n 获取报价\n \"\"\"\n data = {\"security\": Security.NONE}\n\n path = \"/api/v3/ticker/price\"\n\n params = {\n \"symbol\": symbol + \"USDT\",\n }\n\n resp = self.request(\n method=\"GET\",\n path=path,\n data=data,\n params=params\n )\n if isinstance(resp, requests.Response):\n if resp.status_code // 100 != 2:\n msg = f\"获取报价失败,状态码:{resp.status_code},信息:{resp.text}\"\n print(msg)\n return False\n else:\n data = resp.json()\n return float(data['price'])\n else:\n return self.quota(symbol)\n\n def query_contract(self, symbol):\n \"\"\"\n 查询合约最小交易量\n \"\"\"\n data = {\"security\": Security.NONE}\n\n path = \"/api/v3/exchangeInfo\"\n\n resp = self.request(\n method=\"GET\",\n path=path,\n data=data,\n )\n if isinstance(resp, requests.Response):\n if resp.status_code // 100 != 2:\n msg = f\"获取报价失败,状态码:{resp.status_code},信息:{resp.text}\"\n print(msg)\n return False\n else:\n data = resp.json()\n for d in data[\"symbols\"]:\n base_currency = d[\"baseAsset\"]\n quote_currency = d[\"quoteAsset\"]\n if quote_currency == \"USDT\" and base_currency == symbol:\n min_volume = 1\n\n for f in d[\"filters\"]:\n if f[\"filterType\"] == \"LOT_SIZE\":\n min_volume = float(f[\"stepSize\"])\n return min_volume\n else:\n return self.quota(symbol)\n\n def purchase(self, symbol: str, amount) -> Request:\n \"\"\"\n 申购币安宝\n \"\"\"\n data = {\"security\": Security.SIGNED}\n\n path = \"/sapi/v1/lending/daily/purchase\"\n\n params = {\n \"productId\": symbol,\n \"amount\": amount,\n }\n\n resp = self.request(\n method=\"POST\",\n path=path,\n data=data,\n params=params\n )\n if isinstance(resp, requests.Response):\n if resp.status_code // 100 != 2:\n msg = f\"购买币安宝失败,状态码:{resp.status_code},信息:{resp.text}\"\n print(msg)\n return False\n else:\n data = resp.json()\n return data['purchaseId']\n else:\n return self.purchase(symbol, amount)\n\n \n\nclass Future(BianceRest):\n def __init__(self):\n super().__init__()\n\n def connect(\n self,\n usdt_base: bool,\n key: str,\n secret: str,\n session_number: int,\n server: str,\n proxy_host: str,\n proxy_port: int\n ) -> None:\n \"\"\"\n Initialize connection to REST server.\n \"\"\"\n self.usdt_base = usdt_base\n self.key = key\n self.secret = secret.encode()\n self.proxy_port = proxy_port\n self.proxy_host = proxy_host\n self.server = server\n\n self.connect_time = (\n int(datetime.now().strftime(\"%y%m%d%H%M%S\")) * self.order_count\n )\n\n if self.server == \"REAL\":\n if self.usdt_base:\n self.init(F_REST_HOST, proxy_host, proxy_port)\n else:\n self.init(D_REST_HOST, proxy_host, proxy_port)\n else:\n if self.usdt_base:\n self.init(F_TESTNET_RESTT_HOST, proxy_host, proxy_port)\n else:\n self.init(D_TESTNET_RESTT_HOST, proxy_host, proxy_port)\n\n self.start(session_number)\n\n self.query_time()\n\n def query_time(self) -> Request:\n \"\"\"\"\"\"\n data = {\n \"security\": Security.NONE\n }\n\n if self.usdt_base:\n path = \"/fapi/v1/time\"\n else:\n path = \"/dapi/v1/time\"\n\n return self.add_request(\n \"GET\",\n path,\n callback=self.on_query_time,\n on_error=self.on_default_error,\n data=data\n )\n \n def query_account(self) -> Request:\n \"\"\"\"\"\"\n data = {\"security\": Security.SIGNED}\n\n if self.usdt_base:\n path = \"/fapi/v2/account\"\n else:\n path = \"/dapi/v1/account\"\n\n resp = self.request(\n method=\"GET\",\n path=path,\n data=data\n )\n if isinstance(resp, requests.Response):\n data = resp.json()\n for asset in data[\"assets\"]:\n if asset['asset'] == \"USDT\":\n return float(asset[\"walletBalance\"])\n else:\n return self.query_account()\n\n\nif __name__ == \"__main__\":\n setting = load_json(\"connect_binances.json\")\n move_money_setting = load_json(\"move_money.json\")\n key = setting[\"key\"]\n secret = setting[\"secret\"]\n session_number = setting[\"会话数\"]\n server = setting[\"服务器\"]\n proxy_host = setting[\"代理地址\"]\n proxy_port = setting[\"代理端口\"]\n future_account = Future()\n future_account.connect(True, key, secret, session_number, server,\n proxy_host, proxy_port)\n spot_accout = Spot()\n spot_accout.connect(key, secret, session_number,\n proxy_host, proxy_port)\n sleep(10)\n usdt_wallet_balance = future_account.query_account()\n \n move_money = move_money_setting.get(\"money\", 15000)\n exchange = move_money_setting.get(\"exchange\", \"BTC\")\n min_volume = move_money_setting.get(\"min_volume\", 15)\n\n if usdt_wallet_balance > move_money + 0.1:\n diff = usdt_wallet_balance - move_money\n ret = spot_accout.transfer(diff)\n if ret:\n print(f\"成功从USDT合约帐号转换 {diff} USDT到现货帐号\")\n \n spot_wallet_balance = spot_accout.query_account(\"USDT\")\n if spot_wallet_balance > min_volume + 0.1 and exchange != \"USDT\":\n last_price = spot_accout.quota(exchange)\n amount = spot_wallet_balance / last_price\n min_volume_exchange = spot_accout.query_contract(exchange)\n amount = round_to(amount, min_volume_exchange)\n ret = spot_accout.swap(exchange, amount)\n if ret:\n print(f\"成功转移{spot_wallet_balance} USD为 {exchange}\")\n\n exchange_wallet_balance = spot_accout.query_account(exchange)\n if exchange_wallet_balance > move_money_setting['min_binance']:\n ret = spot_accout.purchase(exchange, exchange_wallet_balance)\n if ret:\n print(f\"将 {exchange_wallet_balance} {exchange} 申购币安宝\")\n\n pass","sub_path":"prod/job/move_money.py","file_name":"move_money.py","file_ext":"py","file_size_in_byte":13333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"454934198","text":"import os\r\nimport glob\r\nimport nibabel as nib\r\nimport numpy as np\r\nfrom scipy import stats\r\nfrom core import utils, losses\r\nimport tensorflow as tf\r\n\r\n\r\nconfig = tf.ConfigProto(allow_soft_placement=True)\r\nconfig.gpu_options.allow_growth = True\r\n\r\n\r\ndef load_image(name, dtype=np.float32):\r\n img = nib.load(name)\r\n return np.asarray(img.get_fdata(), dtype), img.affine, img.header\r\n\r\n\r\ndef process_image(data):\r\n data_norm = stats.zscore(data, axis=None, ddof=1)\r\n return np.expand_dims(np.expand_dims(data_norm, -1), 0)\r\n\r\n\r\ndef process_label(data, intensities=(0, 205)):\r\n n_class = len(intensities)\r\n label = np.zeros((np.hstack((data.shape, n_class))), dtype=np.float32)\r\n\r\n for k in range(1, n_class):\r\n label[..., k] = (data == intensities[k])\r\n\r\n label[..., 0] = np.logical_not(np.sum(label[..., 1:], axis=-1))\r\n return np.expand_dims(label, 0)\r\n\r\n\r\n# if __name__ == '__main__':\r\n#\r\n# for name in label_names:\r\n# os.system(r'zxhvolumelabelop %s %s -genprob 3 3 3' % (name, name.replace('_label', '_distance_prob')))\r\n\r\n\r\nif __name__ == '__main__':\r\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\r\n\r\n data_path = '../../../../../../dataset/training_ct_20_commonspace2'\r\n os.chdir(data_path)\r\n print(os.getcwd())\r\n\r\n label_names = glob.glob('*label.nii.gz')\r\n image_suffix = 'image.nii.gz'\r\n label_suffix = 'label.nii.gz'\r\n\r\n import time\r\n\r\n save_path = './ncc_images'\r\n if not os.path.exists(save_path):\r\n os.makedirs(save_path)\r\n\r\n label_intensities = (0, 205, 420, 500, 550, 600, 820, 850)\r\n\r\n image_tensor = tf.placeholder(tf.float32, [1, 112, 96, 112, 1])\r\n label_tensor = tf.placeholder(tf.float32, [1, 112, 96, 112, len(label_intensities)])\r\n\r\n # compute gradient image of intensity and label data\r\n label_grad = utils.compute_gradnorm_from_volume(label_tensor)\r\n image_grad = tf.reduce_sum(utils.compute_gradnorm_from_volume(image_tensor), axis=-1, keepdims=True)\r\n\r\n # compute local normalized cross-correlation maps from gradient images\r\n NCC = losses.CrossCorrelation(win=5, kernel='ones')\r\n ncc_tensor = tf.exp(tf.concat([NCC.ncc(image_grad, label_grad[..., i, None])\r\n for i in range(len(label_intensities))], axis=-1))\r\n\r\n with tf.Session(config=config) as sess:\r\n for name in label_names:\r\n print(name)\r\n time_start = time.time()\r\n label_name = os.path.basename(name)\r\n label, affine, header = load_image(label_name)\r\n image = load_image(label_name.replace(label_suffix, image_suffix))[0]\r\n\r\n # pre-processing for image and label\r\n image_data = process_image(image)\r\n label_data = process_label(label, intensities=label_intensities)\r\n\r\n # produce probability maps\r\n # prob_data = utils.get_prob_from_label(tf.constant(label_data), sigma=1.)\r\n\r\n # produce masks from probability maps\r\n # mask_data = utils.compute_mask_from_prob(prob_data).eval()\r\n # print(\"Mask percentage: %.4f\" % (np.sum(mask_data) / np.prod(mask_data.shape)))\r\n\r\n\r\n # evaluate tensors\r\n # prob_grad = prob_grad_tensor.eval()\r\n # image_grad = image_grad_tensor.eval()\r\n ncc = sess.run(ncc_tensor, feed_dict={image_tensor: image_data, label_tensor: label_data})\r\n print(\"NCC percentage: %.4f\" % (np.sum(ncc > 1) / np.prod(ncc.shape)))\r\n\r\n # save into nifty files\r\n # prob = nib.Nifti1Image((prob_data.eval().squeeze(0)*1000).astype(np.uint16),\r\n # affine=affine, header=header)\r\n # nib.save(prob, os.path.join(save_path, label_name.replace(label_suffix, 'prob.nii.gz')))\r\n\r\n # mask = nib.Nifti1Image(mask_data.squeeze(0).astype(np.uint16), affine=affine, header=header)\r\n # nib.save(mask, os.path.join(save_path, label_name.replace(label_suffix, 'mask.nii.gz')))\r\n\r\n ncc_nii = nib.Nifti1Image((ncc.squeeze(0)), affine=affine, header=header)\r\n nib.save(ncc_nii, os.path.join(save_path, label_name.replace(label_suffix, 'ncc_exp.nii.gz')))\r\n\r\n # img_grad_nii = nib.Nifti1Image((image_grad.squeeze((0, -1))*1000).astype(np.uint16), affine=affine, header=header)\r\n # nib.save(img_grad_nii, os.path.join(save_path, 'img_grad_' + label_name))\r\n\r\n # prob_grad_nii = nib.Nifti1Image((prob_grad.squeeze((0, -1))*1000).astype(np.uint16), affine=affine, header=header)\r\n # nib.save(prob_grad_nii, os.path.join(save_path, 'prob_grad_' + label_name))\r\n\r\n time_end = time.time()\r\n print(\"Elapsing time: %s\" % (time_end - time_start))\r\n","sub_path":"src_3d/help/produce_ncc_images.py","file_name":"produce_ncc_images.py","file_ext":"py","file_size_in_byte":4732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"362665398","text":"import datetime\nfrom django.db.models import Q\nfrom django.http.response import JsonResponse\nfrom django.shortcuts import render, redirect, reverse\nfrom django.views.generic import View, ListView, DetailView\nfrom re_shop_system import settings\nfrom django.core.paginator import Paginator, InvalidPage, EmptyPage, PageNotAnInteger\nfrom libs.paginators import page_range_func\nfrom .models import Contract\nfrom ..oper.models import Operator\nfrom ..shop.models import Shop\nfrom libs.date_split import month_split, MONTH, SEASON, YEAR\nfrom ..rental.models import Rental\n# Create your views here.\n\nclass ContractList(View):\n\n def get(self, request):\n qdic = request.GET\n# < QueryDict: {'start': ['2017-05-20'], 'end': ['2019-05-20'], 'statu_ids': ['1'], 'search': ['张飞']} >\n dic = qdic.dict()\n# {'start': '2017-05-20', 'end': '2019-05-20', 'statu_ids': '1', 'search': '张飞'}\n fmt = '%Y-%m-%d'\n today = datetime.date.today()\n for contract in Contract.objects.all():\n if contract.end_time < datetime.date.today():\n contract.contract_status=2\n contract.save()\n # 将合同到期的记录状态更新为2\n # Contract.objects.filter(end_time__lt=today).update(contract_status=2)\n # 显示未过期合同\n # __lt__(self, other)\t小于 __gt__(self, other) 大于\n contracts = Contract.objects.filter(end_time__gt=today)\n # {0: '选择合同状态', 1: '合同期', 2: '合同结束', 3: '合同未开始', 4: '毁约'}\n dic_status = dict(Contract.CONTRACT_STATUS)\n # 将状态信息传递到前端\n for contract in contracts:\n status_id = contract.contract_status\n contract.contract_status = dic_status[status_id]\n # contract.save() # ValueError\n start_time = request.GET.get('start')\n end_time = request.GET.get('end')\n search = request.GET.get('search')\n statu_id = int(request.GET.get('statu_ids',0))\n if search:\n contracts = contracts.filter(\n Q(shop_name__contains=search) |\n Q(shop__shop_address__contains=search) |\n Q(operator__operator_name__contains=search)\n )\n if start_time:\n contracts = contracts.filter(start_time__gte=start_time)\n if end_time:\n contracts = contracts.filter(end_time__lte=end_time)\n if statu_id:\n contracts = contracts.filter(contract_status=statu_id)\n\n # 分页显示\n per_page = settings.PER_PAGE\n paginator = Paginator(contracts, per_page)\n '''\n paginator.object_list =>\n < QuerySet[ < Contract: < QuerySet[ < Shop: 长沙市 - 芙蓉区 >] > -雌雄子母剑 - ] >>, \n < Contract: ] > -方天画戟 - < QuerySet[ < Operator: 吕布ct: ] > -丈八蛇矛 - < QuerySet[ < Operator: 张飞 - 13456789444 >] >>, \n < Contract: ] > -青龙偃月刀 - ] >> hop: 长沙市 - 芙蓉区 >] > -歌舞升平 - ] >>] >\n '''\n max_pages = settings.MAX_PAGES\n try:\n current_page = request.GET.get('page',1)\n # < Page 1 of 1 >\n '''\n [ < Contract: < QuerySet[ < Shop: 长沙市 - 芙蓉区 - 长沙火车站 >] > -雌雄子母剑 - ] >>, \n < Contract: ] > -方天画戟 - < QuerySet[ < Operator: 吕布 ] > -丈八蛇矛 - ] >>, \n < Contract: ] > -青龙偃月刀 - < QuerySet[ < Operator: 关云长 - 13456789666 >] >>, < C长沙市 - 芙蓉区 - 马王堆 >] > -歌舞升平 - < QuerySet[ < Operator: 董卓 - 13456789222 >] >>]\n '''\n current_paginator = paginator.page(current_page)\n except (InvalidPage, EmptyPage, PageNotAnInteger) as ex:\n current_page = 1\n current_paginator = paginator.page(current_page)\n\n # range(1, 3)\n page_range = page_range_func(paginator, current_page, max_pages)\n info = {\n 'page_range':page_range,\n 'current_paginator':current_paginator,\n 'paginator':paginator,\n 'dic_status': dic_status,\n 'contracts': contracts,\n 'query_dict':request.GET,\n }\n return render(request, 'contract_list.html',info)\n\n\nclass ContractAdd(View):\n\n def get(self, request):\n # fmt = '%Y-%m-%d'\n # contract = Contract.objects.get(pk=1)\n # start = contract.start_time.strftime(fmt)\n # print(start)\n # print(type(start))\n today = datetime.date.today()\n # 从未出租过\n shops = list(Shop.objects.filter(contract_set=None))\n # 出租过,但合同已到期或合同状态\n shops2 = Shop.objects.exclude(contract_set=None)\n for item in shops2:\n # contract_status => 1 => 正常出租\n if not item.contract_set.filter(end_time__gt=today, contract_status=1):\n # Shop实例对象有append方法 但是QuerySet对象没有\n shops.append(item)\n operators = Operator.objects.all()\n pay_type = dict(Contract.PAY_TYPE)\n info = {'pay_type':pay_type, 'operators':operators, 'shops':shops}\n return render(request, 'contract_add.html',info)\n\n def post(self, request):\n # request.POST\n # < QueryDict: {'shop_ids[]': ['7'], 'shop_name': ['犬夜叉'], 'operator_ids[]': ['5', '7'], 'start': ['2019-06-01'],\n # 'end': ['2020-06-01'], 'pay_type': ['2'], 'contract_rent': ['6000'], 'shop_addr': ['']} >\n shop_ids = request.POST.getlist('shop_ids[]',[])\n shop_name = request.POST.get('shop_name')\n # print(shop_name) #犬夜叉\n operator_ids = request.POST.getlist('operator_ids[]',[])\n # print(operator_ids) # ['5', '7']\n fmt = '%Y-%m-%d'\n today = datetime.date.today().strftime(fmt)\n start = request.POST.get('start', today)\n # print(type(start)) # \n end = request.POST.get('end', today) # str\n # 将str_type转换为datetime_type\n datetime_start = datetime.datetime.strptime(start, fmt)\n # datetime_start.strftime(fmt) => 2019-06-01 \n # print(datetime_start) # 2019-06-01 00:00:00\n\n # print(type(datetime_start)) # \n datetime_end = datetime.datetime.strptime(end,fmt)\n pay_type = int(request.POST.get('pay_type', 0))\n contract_status = 1\n pay_amount = 0\n # print(pay_type) # 2\n contract_rent = int(request.POST.get('contract_rent'))\n contract_reminder_day = request.POST.get('contract_reminder_day')\n if not contract_reminder_day.strip(): contract_reminder_day = datetime_start.day\n if shop_name and pay_type and contract_rent:\n contract = Contract(\n start_time=start,\n end_time=end,\n shop_name=shop_name,\n contract_rent=contract_rent,\n pay_type=pay_type,\n contract_reminder_day = contract_reminder_day,\n contract_status=contract_status,\n )\n contract.save()\n\n if shop_ids:\n for shop_id in shop_ids:\n shop = Shop.objects.get(pk=shop_id)\n contract.shop.add(shop) # 可以把add看作是列表中的append方法,从而不会覆盖掉之前的变量\n contract.save()\n if operator_ids:\n for operator_id in operator_ids:\n operator = Operator.objects.get(pk=operator_id)\n contract.operator.add(operator)\n contract.save()\n contract_id = contract.contract_id\n print(type(contract.start_time))\n # 根据支付方式生成交租单\n contract_pay_type_dict = {1: {\"pay_type\": MONTH, \"pay_money\": 1}, 3: {\"pay_type\": SEASON, \"pay_money\": 3},\n 12: {\"pay_type\": YEAR, \"pay_money\": 12}}\n\n rental_cycles = month_split(datetime_start, datetime_end, step=pay_type)\n for start_date, end_date in rental_cycles:\n # print(start_date.strftime(fmt), end_date.strftime(fmt)) #2017-05-20 2017-08-19 str\n Rental.objects.create(\n start_date=start_date,\n pay_amount = pay_amount,\n end_date=end_date,\n should_amount=contract_pay_type_dict[pay_type]['pay_money']*contract_rent,\n arrears=contract_pay_type_dict[pay_type]['pay_money']*contract_rent-pay_amount,\n pay_date = today,\n contract_id=contract_id,\n )\n info = {\n 'code': 200,\n 'msg': '合同添加成功!',\n }\n else:\n info = {\n 'code':400,\n 'msg':'合同添加失败!',\n }\n return JsonResponse(info)\n\n\nclass ContractEdit(View):\n\n def get(self, request, id):\n contract = Contract.objects.get(pk=id)\n operators = Operator.objects.all()\n shops = list(contract.shop.all())\n # 未出租的商铺\n shops_no_contract = Shop.objects.filter(contract_set=None)\n print(shops_no_contract)\n for shop_no_contract in shops_no_contract:\n shops.append(shop_no_contract)\n # 合同已经结束或者未开始的商铺\n cons = Contract.objects.exclude(contract_status=1)\n contract_status_choice = dict(Contract.CONTRACT_STATUS)\n for con in cons:\n shops_contract_over = con.shop.all()\n for shop_contract_over in shops_contract_over:\n shops.append(shop_contract_over)\n shops_list = []\n for shop in shops:\n if shop not in shops_list:\n shops_list.append(shop)\n # print(shops_list)\n pay_type = dict(Contract.PAY_TYPE)\n info = {\n 'contract':contract,\n 'pay_type':pay_type,\n 'shops_list':shops_list,\n 'operators':operators,\n 'contract_status_choice':contract_status_choice,\n }\n return render(request, 'contract_edit.html', info)\n\n def post(self, request, id):\n try:\n contract = Contract.objects.get(pk=id)\n fmt = '%Y-%m-%d'\n today = datetime.date.today().strftime(fmt) # str\n shop_ids = request.POST.getlist('shop_ids[]', []) # 门面id\n operator_ids = request.POST.getlist('operator_ids[]', []) # 经营人id\n shop_name = request.POST.get('shop_name', contract.shop_name)\n start = request.POST.get('start', today) # \n # contract.start_time => \n contract_start_time = contract.start_time.strftime(fmt)\n end = request.POST.get('end', today)\n contract_end_time = contract.end_time.strftime(fmt)\n datetime_start = datetime.datetime.strptime(start,fmt) # \n print(contract_start_time == datetime_start.strftime(fmt))\n datetime_end = datetime.datetime.strptime(end, fmt)\n pay_type = int(request.POST.get('pay_type',0))\n contract_pay_type = contract.pay_type\n contract_rent = int(request.POST.get('contract_rent')) # 月租金\n contract_reminder_day = request.POST.get('contract_reminder_day')\n contract_status = int(request.POST.get('contract_status', 0)) # 1\n if not contract_reminder_day.strip(): contract_reminder_day = datetime_start.day\n if pay_type == contract_pay_type and \\\n datetime_start.strftime(fmt) == contract_start_time and \\\n datetime_end.strftime(fmt) == contract_end_time:\n modify_flag = 0\n print('modify_flag = 0')\n else:\n modify_flag = 1\n print('modify_flag = 1')\n if shop_name and pay_type:\n contract.pay_type = pay_type\n contract.shop_name = shop_name\n contract.contract_reminder_day = contract_reminder_day\n contract.start_time = start\n contract.end_time = end\n contract.contract_status = contract_status\n contract.save()\n if shop_ids:\n print('shop_ids clear...')\n contract.shop.clear()\n for shop_id in shop_ids:\n shop = Shop.objects.get(pk=shop_id)\n contract.shop.add(shop)\n contract.save()\n if operator_ids:\n print('operator_ids clear...')\n contract.operator.clear()\n for operator_id in operator_ids:\n operator = Operator.objects.get(pk=operator_id)\n contract.operator.add(operator)\n contract.save()\n else:\n raise ValueError('数据不完整')\n\n # 删除旧合同,添加新合同\n contract_id = contract.contract_id\n pay_type_dict = {\n 1:{'pay_type':1, 'pay_money':1},\n 3:{'pay_type':3, 'pay_money':3},\n 12:{'pay_type':12, 'pay_money':12},\n }\n if modify_flag:\n print('this is modify_flag...')\n rentals = Rental.objects.filter(contract_id=contract_id).order_by('start_date')\n Rental.objects.filter(contract_id=contract_id).delete()\n cycle_should_amount = contract_rent * pay_type_dict[pay_type]['pay_money'] # 周期应付租金\n # 租金表单内所有实际支付金额汇总\n pay_amount_sum = 0\n for rental in rentals:\n pay_amount = rental.pay_amount\n pay_amount_sum += pay_amount\n pay_cycle = month_split(datetime_start, datetime_end, step=pay_type)\n for start_date,end_date in pay_cycle:\n # 有错 UnboundLocalError: local variable 'pay_amount_sum' referenced before assignment\n # => pay_amount_sum = 0 放在for循环外面\n cycle_pay_amount = pay_amount_sum - cycle_should_amount # 周期支付租金\n if cycle_pay_amount <= 0:\n cycle_pay_amount = pay_amount_sum\n else:\n cycle_pay_amount = cycle_should_amount\n cycle_arrears = cycle_should_amount - cycle_pay_amount # 欠款只有0与大于0的情况\n\n Rental.objects.create(\n contract_id = contract_id,\n start_date = start_date.strftime(fmt),\n end_date = end_date.strftime(fmt),\n pay_date = today,\n pay_amount = cycle_pay_amount,\n should_amount = cycle_should_amount,\n arrears = cycle_arrears,\n )\n\n info = {\n 'code':200,\n 'msg':'修改成功!',\n }\n except Exception as ex:\n info = {\n 'code':400,\n 'msg':'数据不完整',\n }\n return JsonResponse(info)","sub_path":"Shop_system/apps/contract/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"371694167","text":"from collections import Counter\ndef popular_words(text: str, words: list) -> dict:\n c = dict(Counter(text.split()))\n c = {k.lower(): v for k, v in c.items()}\n o = {}\n for i in words:\n if i in c.keys():\n o[i] = c.get(i)\n else:\n o[i] = 0\n return o\n\n\n\nif __name__ == '__main__':\n\n # These \"asserts\" are used for self-checking and not for an auto-testing\n print(popular_words('''\nWhen I was One\nI had just begun\nWhen I was Two\nI was nearly new\n''', ['i', 'was', 'three', 'near']), {\n 'i': 4,\n 'was': 3,\n 'three': 0,\n 'near': 0\n })\n","sub_path":"python/popular-words.py","file_name":"popular-words.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"486740638","text":"from django.shortcuts import render,HttpResponse,redirect\nfrom .models import *\nfrom django.contrib import messages\nimport datetime\nfrom django.core.paginator import Paginator,EmptyPage,PageNotAnInteger\nfrom django.contrib.auth import authenticate, login, logout\n\n# Create your views here.\n\n#定义分页器\n#\ndef fenye(request,list,yeshu):\n paginator = Paginator(list, yeshu)\n pag_num = paginator.num_pages\n curuent_page_num = int(request.GET.get('page', 1)) # 获取当前页数,默认为1\n curuent_page = paginator.page(curuent_page_num)\n if pag_num < 11: # 判断当前页是否小于11个\n pag_range = paginator.page_range\n elif pag_num > 11:\n if curuent_page_num < 6:\n pag_range = range(1, 11)\n elif curuent_page_num > (paginator.num_pages) - 5:\n pag_range = range(pag_num - 9, pag_num + 1)\n else:\n pag_range = range(curuent_page_num - 5, curuent_page_num + 5) # 当前页+5大于最大页数时\n stat={\"pagintor\" : paginator,\"current_Page\":curuent_page,\"current_Page_num\":curuent_page_num,\"pag_range\":pag_range}\n return stat\n\n\n\n#首页的操作\n#显示提醒事项以及显示维护员和设备的数量\n\ndef index(request):\n\n\n username = request.session.get('username', '')\n if not username:\n messages.warning(request,'请登录!')\n return render(request,'adlogin.html')\n\n\n #获取维护员以及设备数量\n stulen=Stu.objects.all()\n stu_len=stulen.count()\n equlen=Equinfo.objects.all()\n equ_count=equlen.count()\n # print(equ_count)\n #获取提醒事项\n remind=Reminder.objects.order_by(\"-id\")#倒序取出提醒事项\n recent=Recent.objects.order_by('-id')#倒序取出近期登陆情况\n\n rec = Paginator(recent,5)\n t_num = rec.num_pages\n c_page_num = int(request.GET.get('tpage', 1)) # 获取当前页数,默认为1\n c_page = rec.page(c_page_num)\n if t_num < 11: # 判断当前页是否小于11个\n t_range = rec.page_range\n elif t_num > 11:\n if c_page_num < 6:\n t_range = range(1, 11)\n elif c_page_num > (rec.num_pages) - 5:\n t_range = range(t_num - 9, t_num + 1)\n else:\n t_range = range(c_page_num - 5, c_page_num + 5) # 当前页+5大于最大页数时\n #分页\n stat=fenye(request,remind,5)\n stat['username']=username\n stat['user_count']=stu_len\n stat['equ_count']=equ_count\n stat['recent']=rec\n stat['c_Page']=c_page\n stat['c_Page_num']=c_page_num\n stat['t_range']=t_range\n\n return render(request, 'index.html', stat)\n\n###################学生管理####################\n\n\n\ndef stuman(request):\n username = request.session.get('username', '')\n if not username:\n messages.warning(request, '请登录!')\n return render(request, 'adlogin.html')\n\n # stu_list=Stu.objects.order_by(\"-id\")#倒序取出\n stu_list=Stu.objects.all().order_by('id')\n stu=fenye(request,stu_list,6)\n stu['username']=username\n return render(request,\"stuman.html\",stu)\n\n\ndef studel(request):\n stuid=request.GET.get('nid')\n print(stuid)\n Stu.objects.filter(id=stuid).delete()\n return redirect(('/index/stuman'))\n\ndef stuch(request):\n if request.method=='GET':\n return render(request,'stuman.html')\n else:\n userID=request.POST.get('usrID')\n pwd = request.POST.get('pwd')\n nid = request.POST.get('userid')\n userna = request.POST.get('userna')\n ret={'status':True,'errormsg':None}\n print(nid)\n print(userna)\n print(pwd)\n try:\n stu=Stu.objects.filter(id=nid).update(stuser=userID,stuname=userna,stpwd=pwd)\n except Exception as e:\n ret['status']=False\n ret['errormsg']=e\n return HttpResponse(str(ret))\n\ndef addstu(request):\n username = request.session.get('username', '')\n if not username:\n messages.warning(request, '请登录!')\n return render(request, 'adlogin.html')\n if request.method == 'GET':\n return render(request, 'addstu.html', {'username': username})\n else:\n stat = {'msg': None, 'username': username}\n uflag = 0\n usrid=request.POST.get('userID')\n u = request.POST.get('userna')\n p = request.POST.get('userpwd')\n pp = request.POST.get('userpwdd')\n if p == pp:\n if not u:\n uflag = 1\n elif \" \" in u or \" \" in p or \" \" in usrid:\n uflag = 2\n else:\n stat['msg'] = '两次密码不一致!'\n return render(request, 'addstu.html', stat)\n\n if uflag == 1:\n stat['msg'] = '用户名不能为空!'\n return render(request, 'addstu.html', stat)\n elif uflag == 2:\n stat['msg'] = '字段不能包含空格!'\n return render(request, 'addstu.html', stat)\n # d=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n # print(usrid+u+p+pp)\n stu=Stu.objects.create(stuname=u,stpwd=p,stuser=usrid)\n messages.success(request,'添加成功!')\n return redirect('/index/stuman')\n\ndef stusearch(request):\n username = request.session.get('username', '')\n if not username:\n messages.warning(request, '请登录!')\n return render(request, 'adlogin.html')\n name=request.GET.get('user')\n stu_list=Stu.objects.filter(stuname=name)\n stat=fenye(request,stu_list,6)\n stat['username']=username\n return render(request, 'stuman.html', stat)\n\n\n################管理员操作####################\n\n\ndef adminman(request):\n username = request.session.get('username', '')\n if not username:\n messages.warning(request, '请登录!')\n return render(request, 'adlogin.html')\n # pass\n admin_list = Admin.objects.all().order_by('id')\n adm=fenye(request,admin_list,6)\n adm['username']=username\n return render(request,\"adminman.html\",adm)\n # return redirect('/adminman/')\n\n\n\ndef adel(request):\n # return redirect('/index/adminman')\n adid=request.GET.get('nid')\n print(adid)\n Admin.objects.filter(id=adid).delete()\n # return render(request,'adminman.html')\n return redirect('/index/adminman')\n\n\ndef adch(request):\n username = request.session.get('username', '')\n if not username:\n messages.warning(request, '请登录!')\n return render(request, 'adlogin.html')\n if request.method=='GET':\n return render(request,'adminman.html')\n else:\n pwd = request.POST.get('pwd')\n nid = request.POST.get('userid')\n userna = request.POST.get('userna')\n ret={'status':True,'errormsg':None}\n print(nid)\n print(userna)\n print(pwd)\n try:\n Adch = Admin.objects.filter(id=nid).update(aduser=userna,adpwd=pwd)\n except Exception as e:\n ret['status']=False\n ret['errormsg']='处理异常,请重试!'\n messages.success(request,'修改成功!')\n return HttpResponse(str(ret))\n # return HttpResponse(str(ret))\n # return render(request,'adminman.html')\n\n\ndef adsearch(request):\n username = request.session.get('username', '')\n if not username:\n messages.warning(request, '请登录!')\n return render(request, 'adlogin.html')\n name=request.GET.get('user')\n ad_list=Admin.objects.filter(aduser=name)\n stat=fenye(request,ad_list,6)\n stat['username']=username\n\n return render(request, 'adminman.html', stat)\n\n\n\n\ndef addadmin(request):\n username = request.session.get('username', '')\n if not username:\n messages.warning(request, '请登录!')\n return render(request, 'adlogin.html')\n if request.method=='GET':\n return render(request,'addadmin.html',{'username':username})\n else:\n stat={'msg':None,'username':username}\n uflag=0\n nid=request.POST.get('userid')\n u=request.POST.get('userna')\n p=request.POST.get('userpwd')\n pp=request.POST.get('userpwdd')\n a=request.POST.get('select')\n if p==pp:\n if not u:\n uflag=1\n elif \" \" in u or \" \" in p:\n uflag=2\n elif not nid:\n uflag=3\n else:\n stat['msg']='两次密码不一致!'\n return render(request,'addadmin.html',stat)\n\n if uflag==1:\n stat['msg'] = '用户名不能为空!'\n return render(request, 'addadmin.html', stat)\n elif uflag==2:\n stat['msg']='用户名或密码不能包含空格!'\n return render(request,'addadmin.html',stat)\n elif uflag==3:\n stat['msg'] = '用户工号不能为空!'\n return render(request, 'addadmin.html', stat)\n try:\n admin=Admin.objects.create(adid=nid,aduser=u,adpwd=p,authority=a)\n messages.success(request, '添加成功!')\n return redirect('/index/adminman')\n except Exception as e:\n return render(request,'addadmin.html',{'msg':'处理异常,请重试!'})\n\n","sub_path":"EquMainSys/equipment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"297499337","text":"import pygame\nimport random\nimport os\nxpos = 440\nypos = 140\nos.environ['SDL_VIDEO_WINDOW_POS'] = \"%d,%d\" % (xpos,ypos)\n\n\npygame.init()\n\npygame.display.set_caption(\"Snek!\")\nred = (255,0,0)\ngreen = (0,255,0)\nblue = (0,0,255)\nblack = (0,0,0)\nwin_width = 400\nwin_height = 400\nfps = 12\n\nwin = pygame.display.set_mode((win_width,win_height))\nbackground_image = pygame.image.load('background.png').convert()\nbackground_image = pygame.transform.scale(background_image, (win_width,win_height))\nclock = pygame.time.Clock()\n\nclass Snake:\n\n def __init__(self, width, height, x, y, vel):\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n self.vel = vel\n self.hitbox = (self.x, self.y, self.width, self.height)\n\n def draw(self, win):\n snake_sprite = pygame.image.load('snake.png').convert()\n win.blit(snake_sprite, (self.x,self.y))\n self.hitbox = (self.x, self.y, self.width, self.height)\n \n def get_rect(self):\n return pygame.Rect(self.hitbox)\n\n def get_x(self):\n return self.x\n \n def get_y(self):\n return self.y\n\nclass Tail:\n \n def __init__(self, width, height, x, y, tailposition):\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n self.tailposition = tailposition\n self.index = 0\n self.hitbox = (self.x, self.y, self.width, self.height)\n\n def draw(self, win):\n snake_sprite = pygame.image.load('snake.png').convert()\n self.x = self.get_x()\n self.y = self.get_y()\n win.blit(snake_sprite, (self.x,self.y))\n self.hitbox = (self.x, self.y, self.width, self.height)\n \n def get_rect(self):\n return pygame.Rect(self.hitbox)\n\n def get_x(self):\n self.index = -1*(self.tailposition+1)\n self.x = positionx[self.index]\n return self.x\n def get_y(self):\n self.index = -1*(self.tailposition+1)\n self.y = positiony[self.index]\n return self.y\n \nclass Treat:\n def __init__(self, width, height, x, y):\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n self.hitbox = (self.x, self.y, self.width, self.height)\n\n def draw(self, win):\n treat_sprite = pygame.image.load('treat.png').convert()\n win.blit(treat_sprite, (self.x,self.y))\n self.hitbox = (self.x, self.y, self.width, self.height)\n\n def get_rect(self):\n\n return pygame.Rect(self.hitbox)\n\n\nsnake = Snake(10, 10, 200, 200, 10)\ntailobjectlist = [snake]\npositionx = [snake.x]\npositiony = [snake.y]\npositionoflasttail = []\npositionoflasttail = []\nscore = []\ntreats=[]\ntail = False\nnumberoftreats = True\nmoveright = True\nmoveleft = False\nmoveup = False\nmovedown = False\ngameover = False\n\ndef gameover():\n global gameover\n gameover = True\n snake.vel = 0\n redrawGameWindow()\n## gameFont = pygame.font.SysFont('fixedsys', 60)\n## gameoversurface = gameFont.render('Game Over.', False, (0,0,0))\n #win.blit(gameoversurface,(5,5))\n\ndef redrawGameWindow():\n win.blit(background_image, (0,0))\n global gameover\n \n if gameover == True:\n gameFont = pygame.font.SysFont('fixedsys', 60)\n gameoversurface = gameFont.render('Game Over.', False, (255,0,0))\n win.blit(gameoversurface,(100,160))\n \n snake.draw(win)\n treat.draw(win)\n if len(tailobjectlist) > 0:\n for i in tailobjectlist:\n i.draw(win)\n \n pygame.display.update()\n\nrun = True\nwhile run:\n clock.tick(fps)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n while numberoftreats:\n width = 10\n height = 10\n x = random.randint(0, ((win_width-width)//10))*10\n y = random.randint(0, ((win_height-height)//10))*10\n treat = Treat(width, height, x, y)\n treats.append(treat)\n numberoftreats = False\n\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT] and snake.x > 0:\n moveright = False\n moveleft = True\n moveup = False\n movedown = False\n if keys[pygame.K_RIGHT] and snake.x < win_width-snake.width:\n moveright = True\n moveleft = False\n moveup = False\n movedown = False\n if keys[pygame.K_UP] and snake.y > 0:\n moveright = False\n moveleft = False\n moveup = True\n movedown = False\n if keys[pygame.K_DOWN] and snake.y < win_height-snake.height:\n moveright = False\n moveleft = False\n moveup = False\n movedown = True\n\n if moveright:\n snake.x += snake.vel\n positionx.append(snake.x)\n positiony.append(snake.y)\n if moveleft:\n snake.x -= snake.vel\n positionx.append(snake.x)\n positiony.append(snake.y)\n if moveup:\n snake.y-=snake.vel\n positionx.append(snake.x)\n positiony.append(snake.y)\n if movedown:\n snake.y += snake.vel\n positionx.append(snake.x)\n positiony.append(snake.y)\n \n \n if snake.get_rect().colliderect(treat.get_rect()):\n treats.clear()\n score.append(1)\n numberoftreats = True\n tailposition = (len(tailobjectlist))\n index = -1*(len(tailobjectlist)+1) \n if len(tailobjectlist)==0:\n tail= True\n tail = Tail(10, 10, positionx[index], positiony[index], (tailposition))\n tailobjectlist.append(tail)\n tailposition+=1\n else:\n tail = Tail(10, 10, positionx[index], positiony[index], tailposition)\n tailobjectlist.append(tail)\n tailposition+=1\n\n if (len(tailobjectlist))>0:\n for i in tailobjectlist[1:-1]:\n if snake.get_rect().colliderect(i.get_rect()):\n gameover()\n if snake.x >= win_width or snake.x < 0:\n gameover()\n if snake.y >= win_height or snake.y <0:\n gameover()\n \n \n redrawGameWindow()\n\n\npygame.quit()\n","sub_path":"worm clone.py","file_name":"worm clone.py","file_ext":"py","file_size_in_byte":6008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"432553790","text":"\"\"\"\nCopyright © 2020 Forescout Technologies, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport json\nimport urllib.request\n\n# Extract the token\nbearer_token = params[\"connect_authorization_token\"]\n\n# Setup list of tables we are going iterate through\nreadtables = [\"connect_cherwell_fs_staging_table\"]\n\n# API URL\nurl_call = params[\"connect_cherwell_base_url\"]\nurl_path = \"/api/V1/getbusinessobjectsummary/busobname/\"\n\n# URL Header Type Information\nurl_content_type = params[\"connect_cherwell_content_type\"]\nurl_accept_type = params[\"connect_cherwell_accept_type\"]\n\n# -------------------------------------------------------------------------------------------------\n\n# Setup the header information to information from Cherwell\nheader_info = {\n 'Content-Type': url_content_type,\n 'Accept': url_accept_type,\n 'Authorization': 'Bearer ' + bearer_token\n}\n\n# initialize the payload variable and variables that are used to store\n# the object info for the tables we are going to reference\npayload_data = ''\n\nfsimporttable_busObjID = ''\nfsimporttable_Name = ''\n\n# ----- Process table information\nfor x in readtables:\n # Grab the table name based on the parameter\n tablename = params[x]\n\n # Build the entire url\n target_url = url_call + url_path + tablename\n\n # Issue the REST API call and retrieve the information\n request_response = urllib.request.Request(target_url, headers=header_info,\n data=bytes(payload_data, encoding=\"utf-8\"), method='GET')\n resp = urllib.request.urlopen(request_response, context=ssl_context)\n\n # Read the json responses\n request_response = json.loads(resp.read())\n\n # Process the output based on which paramter is being pulled back\n # Extract the ObjID and the Display Name\n fsimporttable_busObjID = request_response[0]['busObId']\n fsimporttable_Name = request_response[0]['name']\n\n# Server URL update needed to grab the objects schema id\n# this is required to build the payload for the submission\n# of data as the busObj name and ID are required to process\nurl_path = \"/api/V1/getbusinessobjectschema/busobid/\" + fsimporttable_busObjID\n\n# No payload to send but field is still used to send\npayload_data = ''\nvariables = {}\n\n# Build the entire url to grab the business object schema\ntarget_url = url_call + url_path\n\n# Issue the REST API call and retrieve the information\nrequest_response = urllib.request.Request(target_url, headers=header_info, data=bytes(payload_data, encoding=\"utf-8\"),\n method='GET')\nresp = urllib.request.urlopen(request_response, context=ssl_context)\n\ncmdb_payload = {}\ncmdb_payload[\"busObId\"] = fsimporttable_busObjID\n\n# Read the json responses\nrequest_response1 = json.loads(resp.read())\n\n# Need to read and store the field definitions\ntable_fields = request_response1['fieldDefinitions']\ntable_field_limited = [\"U_ClassificationMethod\", \"U_Comment\", \"U_Company\", \"U_ComplianceStatus\", \"U_Department\",\n \"U_DeviceInterfaces\", \"U_DHCPDeviceClass\", \"U_DHCPDeviceOS\", \"U_DHCPDomainName\",\n \"U_DHCPHostname\", \"U_DHCPOptionsFingerPrint\", \"U_DHCPServerAddress\", \"U_DHCPVendorClass\",\n \"U_DisplayName\", \"U_DistinguishedName\", \"U_DNSName\", \"U_ExternalDrives\", \"U_Function\",\n \"U_HostIsOnline\", \"U_HotFixInstalled\", \"U_IntranetWSUSServer\", \"U_IPAddress\", \"U_IPv6Address\",\n \"U_IPv6LinkLocalAddress\", \"U_LDAPUserName\", \"U_LinuxHostname\", \"U_LinuxManagable\",\n \"U_LinuxManageableSSHDirect\", \"U_LinuxUser\", \"U_LinuxVersion\", \"U_MACAddress\",\n \"U_MicrosoftAppsInstalled\", \"U_MobilePhone\", \"U_NetBIOSDomain\", \"U_NetBIOSHostname\",\n \"U_NetBIOSMembershipType\", \"U_NetworkAdapters\", \"U_NetworkFunction\", \"U_NICVendor\",\n \"U_NICVendorValue\", \"U_NumberOfHostsOnPort\", \"U_NumberOfIPAddresses\", \"U_OpenPorts\",\n \"U_OSFingerprint\", \"U_Phone\", \"U_RunningConfig\", \"U_RunningConfigTime\",\n \"U_SecureConnectorDeploymentType\", \"U_SecureConnectorSysTrayDisplay\", \"U_SecureConnectorVersion\",\n \"U_ServiceBanner\", \"U_SignedInStatus\", \"U_StreetAddress\", \"U_SwitchHostname\", \"U_SwitchIP\",\n \"U_SwitchIPandPortName\", \"U_SwitchLocation\", \"U_SwitchPortAction\", \"U_SwitchPortAlias\",\n \"U_SwitchPortConfigurations\", \"U_SwitchPortConnect\", \"U_SwtichPortName\",\n \"U_SwitchPortPoEConnectedDevice\", \"U_SwitchPortPoEPowerConsumption\", \"U_SwitchPortVLAN\",\n \"U_SwtichPortVLANGroup\", \"U_SwitchPortVLANName\", \"U_SwitchPortVoiceDevice\",\n \"U_SwitchPortVoiceVLAN\", \"U_SwitchVendor\", \"U_SwitchVirtualInterface\", \"U_SwitchVoIPPortn\",\n \"U_SystemDescription\", \"U_Title\", \"U_User\", \"U_UserGivenName\", \"U_VirtualMachineHardware\",\n \"U_VirtualMachinePowerState\", \"U_WindowsAntiSpywareInstalled\", \"U_WindowsAntiVirusInstalled\",\n \"U_WindowsAntiVirusRunning\", \"U_WindowsAntiVirusUpdate\", \"U_WindowsApplicationsInstalled\",\n \"U_WindowsCloudStorageInstalled\", \"U_WindowsHardDriveEncryption\",\n \"U_WindowsHardDriveEncryptionState\", \"U_WindowsInstantMessagingInstalled\", \"U_WindowsBehindNAT\",\n \"U_WindowsLoggedOn\", \"U_WindowsManageableDomain\", \"U_WindowsManageableDomainCurrent\",\n \"U_WindowsManageableLocal\", \"U_WindowsManageableSecureConnector\", \"U_WindowsPeerToPeerInstalled\",\n \"U_WindowsPeerToPeerRunning\", \"U_WindowsPersonalFirewall\", \"U_WindowsUpdateAgentInstalled\",\n \"U_WindowsUpdatesInstalledRebootRequired\", \"U_WindowsVersion\", \"U_WindowsVersionCPEFormat\",\n \"U_WindowsVersionFineTuned\", \"U_WLANAPLocation\", \"U_WLANAPName\", \"U_WLANAssociationStatus\",\n \"U_WLANAuthenticationMethod\", \"U_WLANBSSID\", \"U_WLANClientConnectivityStatus\",\n \"U_WLANClientRole\", \"U_WLANClientUserAgent\", \"U_WLANClientUserName\", \"U_WLANClientVLAN\",\n \"U_WLANCIPIP\", \"U_WLANDetectedClientType\", \"U_WLANManagingController\", \"U_WLANNetworkFunction\",\n \"U_WLANSSID\", \"U_CMDBRecID\"]\n\n# Map the field definitions to their individual object id's\nfield_name_to_field_busobjid_map = {}\nfor row in table_fields:\n field_name = row['name']\n field_id = row['fieldId']\n\n # Lets limit the payload to a minor set of fields. To adjust this add the field\n # to the table_field_limited list in order to include it\n if field_name in table_field_limited:\n field_name_to_field_busobjid_map[field_name] = field_id\n\ncmdb_fields = []\ndirty = True\n\n# Setup the payload to submit the data to the staging table\nfor field in field_name_to_field_busobjid_map:\n cmdb_field = {}\n fieldname = str(field)\n\n if fieldname.startswith(\"U_\") and fieldname != \"U_\":\n try:\n ct_fieldname = \"cherwell_\" + fieldname\n logging.debug(ct_fieldname)\n parameter_field = params[ct_fieldname]\n # check to make sure there is a passed value. If not do not include\n # in the payload. This is used to prevent sending of empty fields\n\n if parameter_field != \"Irresolvable\" and parameter_field != \"\":\n cmdb_field[\"value\"] = parameter_field\n cmdb_field[\"dirty\"] = dirty\n cmdb_field[\"name\"] = fieldname\n cmdb_field[\"fieldId\"] = field_name_to_field_busobjid_map[fieldname]\n cmdb_fields.append(cmdb_field)\n except:\n errormessage = \"Error for field: \" + ct_fieldname\n logging.debug(errormessage)\n\n# Put together the final parts of the payload\ncmdb_payload[\"fields\"] = cmdb_fields\ncmdb_payload[\"persist\"] = dirty\n\n# Set up the URL for the REST API to save the object\ntarget_url = url_call + '/api/V1/savebusinessobject'\n\n# Convert the payload to json in order to convert the values\n# to the correct format for json\npayload_data = json.dumps(cmdb_payload)\n\nlogging.debug(\"Payload for submission\")\nlogging.debug(payload_data)\n\n# Issue the request for the save\nrequest_response = urllib.request.Request(target_url, headers=header_info, data=bytes(payload_data, encoding=\"utf-8\"),\n method='POST')\n\n# Retrieve the response from the submission\nresponse = {}\nresp = urllib.request.urlopen(request_response, context=ssl_context)\n\n# For actions, the response object must have a field named \"succeeded\" to denote if the action suceeded or not.\n# The field \"troubleshooting\" is optional to display user defined messages in CounterACT for actions.\n\nif resp.getcode() == 200:\n response[\"succeeded\"] = True\n request_response = json.loads(resp.read())\n id = request_response['busObRecId']\n logging.debug(\"(UPDATE) The record id created was {}\".format(id))\n response[\"RecID\"] = id\nelse:\n response[\"succeeded\"] = False\n response[\"troubleshooting\"] = \"Failed action. Response code: {}\".format(resp.getcode())\n\nlogging.debug(\"Update to CMDB STAGE TABLE result: \")\nlogging.debug(response)\n","sub_path":"Cherwell/Cherwell 1.0.2/cherwell_update_asset_in_cmdb.py","file_name":"cherwell_update_asset_in_cmdb.py","file_ext":"py","file_size_in_byte":10224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"18297479","text":"from selenium import webdriver\nfrom time import sleep\n\nfrom selenium.webdriver.common.keys import Keys\n\n\nclass bot:\n def __init__(self):\n self.driver = webdriver.Chrome(\"C:\\chromedriver_win32\\chromedriver.exe\")\n self.driver.get(\"https://play.typeracer.com/\")\n self.driver.maximize_window()\n sleep(5)\n self.driver.find_element_by_xpath('//*[@id=\"qcCmpButtons\"]/button[2]').click()\n sleep(5)\n self.driver.find_element_by_xpath(\n '//*[@id=\"dUI\"]/table/tbody/tr[2]/td[2]/div/div[1]/div/table/tbody/tr[3]/td/table/tbody/tr/td[2]/table/tbody/tr[1]/td/a').click()\n sleep(4)\n while True:\n try:\n sleep(0.5)\n text = self.driver.find_element_by_xpath(\n '//*[@id=\"gwt-uid-15\"]/table/tbody/tr[2]/td/table/tbody/tr[1]/td/table/tbody/tr[1]/td/div/div/span[1]').get_attribute(\n \"innerHTML\")\n print(text)\n self.driver.find_element_by_xpath('//*[@id=\"gwt-uid-15\"]/table/tbody/tr[2]/td/table/tbody/tr[2]/td/input').send_keys(text)\n self.driver.find_element_by_xpath('//*[@id=\"gwt-uid-15\"]/table/tbody/tr[2]/td/table/tbody/tr[2]/td/input').send_keys(Keys.SPACE)\n except:\n print('DONE')\n break\n\nbot()","sub_path":"bots/Selenium/tipingHack.py","file_name":"tipingHack.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"256177131","text":"'''\nCombine predictions of models using different feature sets.\nAuthor: Linhua (Alex) Wang\nDate: 12/27/2018\n'''\nfrom os.path import exists,abspath,isdir,dirname\nfrom sys import argv\nfrom os import listdir,environ\nfrom common import load_properties\nimport pandas as pd\nimport numpy as np\n\ndata_folder = abspath(argv[1])\n\nfns = listdir(data_folder)\nfns = [fn for fn in fns if fn != 'analysis']\nfns = [data_folder + '/' + fn for fn in fns]\nfeature_folders = [fn for fn in fns if isdir(fn)]\n\nfoldValues = range(int(argv[2]))\n\nprediction_dfs = []\nvalidation_dfs = []\n\nfor value in foldValues:\n\tprediction_dfs = []\n\tvalidation_dfs = []\n\tfor folder in feature_folders:\n\t\tfeature_name = folder.split('/')[-1]\n\t\tprediction_df = pd.read_csv(folder + '/predictions-%d.csv.gz' %value,compression='gzip')\n\t\tprediction_df.set_index(['id','label'],inplace=True)\n\t\tprediction_df.columns = ['%s.%s' %(feature_name,col) for col in prediction_df.columns]\n\n\t\tvalidation_df = pd.read_csv(folder + '/validation-%d.csv.gz' %value,compression='gzip')\n\t\tvalidation_df.set_index(['id','label'],inplace=True)\n\t\tvalidation_df.columns = ['%s.%s' %(feature_name,col) for col in validation_df.columns]\n\n\t\tprediction_dfs.append(prediction_df)\n\t\tvalidation_dfs.append(validation_df)\n\n\tprediction_dfs = pd.concat(prediction_dfs,axis=1)\n\tvalidation_dfs = pd.concat(validation_dfs,axis=1)\n\n\tprediction_dfs.to_csv(data_folder + '/predictions-%d.csv.gz' %value,compression='gzip')\n\tvalidation_dfs.to_csv(data_folder + '/validation-%d.csv.gz' %value,compression='gzip')\n","sub_path":"combine_feature_predicts.py","file_name":"combine_feature_predicts.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"177399643","text":"import time\nfrom collections import MutableMapping\n\nimport numpy as np\nfrom event_model import compose_run\n\nDTYPE_MAP = {\n np.ndarray: \"array\",\n int: \"number\",\n float: \"number\",\n np.float: \"number\",\n np.float32: \"number\",\n np.float64: \"number\",\n}\n\n\ndef get_dtype(xx):\n return DTYPE_MAP.get(type(xx), type(xx).__name__)\n\n\n_GLOBAL_SCAN_ID = 0\n\n\nclass CreateDocs(object):\n def __init__(self, data_keys, data_key_md=None, **kwargs):\n if data_key_md is None:\n data_key_md = {}\n if isinstance(data_keys, str):\n data_keys = (data_keys,)\n self.data_key_md = data_key_md\n self.descriptor_uid = None\n self.md = kwargs\n self.data_keys = data_keys\n self.start_uid = None\n\n self.desc_fac = None\n self.resc_fac = None\n self.stop_factory = None\n self.ev_fac = None\n self.evp_fac = None\n\n def start_doc(self, x):\n global _GLOBAL_SCAN_ID\n _GLOBAL_SCAN_ID += 1\n self.md.update(scan_id=_GLOBAL_SCAN_ID)\n bundle = compose_run(metadata=self.md, validate=False)\n new_start_doc, self.desc_fac, self.resc_fac, self.stop_factory = bundle\n self.start_uid = new_start_doc[\"uid\"]\n return new_start_doc\n\n def descriptor(self, x):\n\n # XXX: handle multiple descriptors?\n\n # If data_keys is none then we are working with a dict\n if self.data_keys is None:\n self.data_keys = tuple([k for k in x])\n\n # If the incoming data is a dict extract the data as a tuple\n if isinstance(x, MutableMapping):\n x = tuple([x[k] for k in self.data_keys])\n if not isinstance(x, tuple):\n tx = tuple([x])\n # XXX: need to do something where the data is a tuple!\n elif len(self.data_keys) == 1:\n tx = tuple([x])\n else:\n tx = x\n\n new_descriptor, self.ev_fac, self.evp_fac = self.desc_fac(\n name=\"primary\",\n data_keys={\n k: {\n \"source\": \"analysis\",\n # XXX: how to deal with this when xx is a future?\n \"dtype\": get_dtype(xx),\n \"shape\": getattr(xx, \"shape\", []),\n **self.data_key_md.get(k, {}),\n }\n for k, xx in zip(self.data_keys, tx)\n },\n hints={\"analyzer\": {\"fields\": sorted(list(self.data_keys))}},\n object_keys={k: [k] for k in self.data_keys},\n validate=False,\n )\n return new_descriptor\n\n def event(self, x):\n if isinstance(x, MutableMapping):\n x = tuple([x[k] for k in self.data_keys])\n if not isinstance(x, tuple) or (\n len(self.data_keys) == 1 and len(x) > 1\n ):\n tx = tuple([x])\n else:\n tx = x\n\n return self.ev_fac(\n timestamps={k: time.time() for k in self.data_keys},\n filled={k: True for k in self.data_keys},\n data={k: v for k, v in zip(self.data_keys, tx)},\n validate=False,\n )\n\n def stop(self, x):\n return self.stop_factory()\n\n def create_doc(self, name, x):\n # This is because ``start`` is a valid method for ``Stream``\n if name == \"start\":\n _name = \"start_doc\"\n else:\n _name = name\n return name, getattr(self, _name)(x)\n","sub_path":"shed/doc_gen.py","file_name":"doc_gen.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"551681686","text":"import math as math\n\nimport dask\nimport dask.array as da\nimport numba\nimport numpy as np\nfrom replay_trajectory_classification.bins import atleast_2d\n\nSQRT_2PI = np.sqrt(2.0 * np.pi)\n\n\n@numba.vectorize(['float64(float64, float64, float64)'], nopython=True,\n cache=True)\ndef gaussian_pdf(x, mean, sigma):\n '''Compute the value of a Gaussian probability density function at x with\n given mean and sigma.'''\n return math.exp(-0.5 * ((x - mean) / sigma)**2) / (sigma * SQRT_2PI)\n\n\ndef estimate_position_distance(place_bin_centers, positions, position_std):\n return np.prod(\n gaussian_pdf(\n np.expand_dims(place_bin_centers, axis=0),\n np.expand_dims(positions, axis=1),\n position_std),\n axis=-1\n )\n\n\ndef estimate_position_density(place_bin_centers, positions, position_std):\n return np.mean(estimate_position_distance(\n place_bin_centers, positions, position_std), axis=0)\n\n\ndef estimate_log_intensity(density, occupancy, mean_rate):\n return np.log(mean_rate) + np.log(density) - np.log(occupancy)\n\n\ndef estimate_intensity(density, occupancy, mean_rate):\n '''\n\n Parameters\n ----------\n density : ndarray, shape (n_bins,)\n occupancy : ndarray, shape (n_bins,)\n mean_rate : float\n\n Returns\n -------\n intensity : ndarray, shape (n_bins,)\n\n '''\n return np.exp(estimate_log_intensity(density, occupancy, mean_rate))\n\n\ndef normal_pdf_integer_lookup(x, mean, std=20, max_value=3000):\n \"\"\"Fast density evaluation for integers by precomputing a hash table of\n values.\n\n Parameters\n ----------\n x : int\n mean : int\n std : float\n max_value : int\n\n Returns\n -------\n probability_density : int\n\n \"\"\"\n normal_density = gaussian_pdf(np.arange(-max_value, max_value), 0, std)\n\n return normal_density[(x - mean) + max_value]\n\n\ndef estimate_log_joint_mark_intensity(decoding_marks,\n encoding_marks,\n mark_std,\n place_bin_centers,\n encoding_positions,\n position_std,\n occupancy,\n mean_rate,\n max_mark_value=6000,\n set_diag_zero=False,\n position_distance=None):\n \"\"\"\n\n Parameters\n ----------\n decoding_marks : ndarray, shape (n_decoding_spikes, n_features)\n encoding_marks : ndarray, shape (n_encoding_spikes, n_features)\n mark_std : float or ndarray, shape (n_features,)\n place_bin_centers : ndarray, shape (n_position_bins, n_position_dims)\n encoding_positions : ndarray, shape (n_decoding_spikes, n_position_dims)\n position_std : float or ndarray, shape (n_position_dims,)\n occupancy : ndarray, shape (n_position_bins,)\n mean_rate : float\n is_track_interior : None or ndarray, shape (n_position_bins,)\n max_mark_value : int\n set_diag_zero : bool\n\n Returns\n -------\n log_joint_mark_intensity : ndarray, shape (n_decoding_spikes, n_position_bins)\n\n \"\"\"\n # mark_distance: ndarray, shape (n_decoding_spikes, n_encoding_spikes)\n mark_distance = np.prod(\n normal_pdf_integer_lookup(\n np.expand_dims(decoding_marks, axis=1),\n np.expand_dims(encoding_marks, axis=0),\n std=mark_std,\n max_value=max_mark_value\n ),\n axis=-1\n )\n\n if set_diag_zero:\n diag_ind = np.diag_indices_from(mark_distance)\n mark_distance[diag_ind] = 0.0\n\n n_encoding_spikes = encoding_marks.shape[0]\n\n if position_distance is None:\n position_distance = estimate_position_distance(\n place_bin_centers, encoding_positions, position_std)\n\n return estimate_log_intensity(\n mark_distance @ position_distance / n_encoding_spikes,\n occupancy,\n mean_rate)\n\n\ndef fit_multiunit_likelihood_integer_pass_position(position,\n multiunits,\n place_bin_centers,\n mark_std,\n position_std,\n is_track_interior=None,\n **kwargs):\n '''\n\n Parameters\n ----------\n position : ndarray, shape (n_time, n_position_dims)\n multiunits : ndarray, shape (n_time, n_marks, n_electrodes)\n place_bin_centers : ndarray, shape ( n_bins, n_position_dims)\n model : sklearn model\n model_kwargs : dict\n occupancy_model : sklearn model\n occupancy_kwargs : dict\n is_track_interior : None or ndarray, shape (n_bins,)\n\n Returns\n -------\n joint_pdf_models : list of sklearn models, shape (n_electrodes,)\n ground_process_intensities : list of ndarray, shape (n_electrodes,)\n occupancy : ndarray, (n_bins, n_position_dims)\n mean_rates : ndarray, (n_electrodes,)\n\n '''\n if is_track_interior is None:\n is_track_interior = np.ones((place_bin_centers.shape[0],),\n dtype=np.bool)\n position = atleast_2d(position)\n place_bin_centers = atleast_2d(place_bin_centers)\n\n not_nan_position = np.all(~np.isnan(position), axis=1)\n\n occupancy = np.zeros((place_bin_centers.shape[0],))\n occupancy[is_track_interior] = estimate_position_density(\n place_bin_centers[is_track_interior],\n position[not_nan_position],\n position_std)\n\n mean_rates = []\n ground_process_intensities = []\n encoding_marks = []\n encoding_positions = []\n position_at_spike_distances = []\n\n for multiunit in np.moveaxis(multiunits, -1, 0):\n\n # ground process intensity\n is_spike = np.any(~np.isnan(multiunit), axis=1)\n mean_rates.append(is_spike.mean())\n marginal_density = np.zeros((place_bin_centers.shape[0],))\n position_at_spike_distance = estimate_position_distance(\n place_bin_centers[is_track_interior],\n position[is_spike & not_nan_position],\n position_std)\n\n if is_spike.sum() > 0:\n marginal_density[is_track_interior] = np.mean(\n position_at_spike_distance, axis=0)\n\n position_at_spike_distances.append(position_at_spike_distance)\n ground_process_intensities.append(\n estimate_intensity(marginal_density, occupancy, mean_rates[-1])\n + np.spacing(1))\n\n encoding_marks.append(\n multiunit[is_spike & not_nan_position].astype(int))\n encoding_positions.append(position[is_spike & not_nan_position])\n\n summed_ground_process_intensity = np.sum(\n np.stack(ground_process_intensities, axis=0), axis=0, keepdims=True)\n\n return {\n 'encoding_marks': encoding_marks,\n 'encoding_positions': encoding_positions,\n 'summed_ground_process_intensity': summed_ground_process_intensity,\n 'position_at_spike_distances': position_at_spike_distances,\n 'occupancy': occupancy,\n 'mean_rates': mean_rates,\n 'mark_std': mark_std,\n 'position_std': position_std,\n **kwargs,\n }\n\n\ndef estimate_multiunit_likelihood_integer_pass_position(multiunits,\n encoding_marks,\n mark_std,\n place_bin_centers,\n encoding_positions,\n position_std,\n position_at_spike_distances,\n occupancy,\n mean_rates,\n summed_ground_process_intensity,\n max_mark_value=6000,\n set_diag_zero=False,\n is_track_interior=None,\n time_bin_size=1,\n chunks=None):\n '''\n\n Parameters\n ----------\n multiunits : ndarray, shape (n_time, n_marks, n_electrodes)\n place_bin_centers : ndarray, (n_bins, n_position_dims)\n joint_pdf_models : list of sklearn models, shape (n_electrodes,)\n ground_process_intensities : list of ndarray, shape (n_electrodes,)\n occupancy : ndarray, (n_bins, n_position_dims)\n mean_rates : ndarray, (n_electrodes,)\n\n Returns\n -------\n log_likelihood : (n_time, n_bins)\n\n '''\n\n if is_track_interior is None:\n is_track_interior = np.ones((place_bin_centers.shape[0],),\n dtype=np.bool)\n\n n_time = multiunits.shape[0]\n log_likelihood = (-time_bin_size * summed_ground_process_intensity *\n np.ones((n_time, 1)))\n\n multiunits = np.moveaxis(multiunits, -1, 0)\n log_joint_mark_intensities = []\n\n for multiunit, enc_marks, enc_pos, mean_rate, pos_at_spike_dist in zip(\n multiunits, encoding_marks, encoding_positions, mean_rates,\n position_at_spike_distances):\n is_spike = np.any(~np.isnan(multiunit), axis=1)\n decoding_marks = da.from_array(\n multiunit[is_spike].astype(np.int))\n log_joint_mark_intensities.append(\n decoding_marks.map_blocks(\n estimate_log_joint_mark_intensity,\n enc_marks,\n mark_std,\n place_bin_centers[is_track_interior],\n enc_pos,\n position_std,\n occupancy[is_track_interior],\n mean_rate,\n position_distance=da.from_array(pos_at_spike_dist),\n max_mark_value=max_mark_value,\n set_diag_zero=set_diag_zero,\n chunks=chunks\n ))\n\n for log_joint_mark_intensity, multiunit in zip(\n dask.compute(*log_joint_mark_intensities), multiunits):\n is_spike = np.any(~np.isnan(multiunit), axis=1)\n log_likelihood[np.ix_(is_spike, is_track_interior)] += (\n log_joint_mark_intensity + np.spacing(1))\n\n log_likelihood[:, ~is_track_interior] = np.nan\n\n return log_likelihood\n","sub_path":"replay_trajectory_classification/multiunit_likelihood_integer_pass_position.py","file_name":"multiunit_likelihood_integer_pass_position.py","file_ext":"py","file_size_in_byte":10577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"240566161","text":"import pygame\nimport constants\n\nclass TextClass(pygame.sprite.Sprite):\n '''Skapar objekt vi fyller med text'''\n\n def __init__(self):\n super().__init__()\n \n self.font = pygame.font.SysFont(\"04b19\", 50)\n self.image = self.font.render(\"\",True,constants.BLACK)\n self.rect = self.image.get_rect()\n self.posX = 0\n self.posY = 0\n def update(self,t):\n self.image = self.font.render(t,True,constants.BLACK)\n self.rect = self.image.get_rect()\n self.rect.x = self.posX\n self.rect.y = self.posY\n def die(self):\n self.kill()\n","sub_path":"textClass.py","file_name":"textClass.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"252351605","text":"import re\n\na = 'C|C++|Java|C#|Python|Javascript'\n\n\n# print(a.index('Python') > -1)\n# print('Python' in a)\n\nr = re.findall('PHP', a)\nprint(r)\n# 规则\nif len(r) > 0:\n print('字符串中包含PHP')\nelse:\n print('No')\n","sub_path":"10_reg_json/c2.py","file_name":"c2.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"409102639","text":"'''\nCreated on 16 okt. 2016\n\n@author: Adrian\n'''\nfrom django.conf.urls import url\nfrom . import views\n\napp_name = 'docs'\nurlpatterns = [\n url(r'^$', views.IndexView.as_view(), name = 'Index'),\n url(r'^tutor/', views.tutor, name='Tutor'),\n]","sub_path":"docs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"468156124","text":"def format_el_room_type_id(el_id):\n el_id = str(int(el_id))\n\n return max(4 - len(el_id), 0) * '0' + el_id\n\n\n# print(format_el_room_type_id('01'))\n# print(format_el_room_type_id('0100'))\n# print(format_el_room_type_id('000100'))\n\nA = [0, 60, 80, 105, 115, 130, 150]\nB = [0, 65, 85, 110, 140, 160, 175]\nC = [0, 75, 100, 120, 135, 150, 180]\nlength = len(A)\n\n\n# 找到最大的数的下标\ndef max_info(nums):\n a, b = -1, 0\n for i, v in enumerate(nums):\n if v > b:\n b = v\n a = i\n return a, b\n\n\ndef investment_problem():\n am = length * [0]\n bm = length * [0]\n cm = length * [0]\n # 贪心法, 从价值密度上看\n for i in range(1, length):\n am[i] = A[i] / i\n bm[i] = B[i] / i\n cm[i] = C[i] / i\n # abc = [am, bm, cm]\n # for j in range(1, length):\n # x, y = max_info([am[j], bm[j], cm[j]])\n\n\n# 最优解 1,4,1\ndef investment(n, m):\n # 初始化,在只有一个项目的情况下\n for x in range(m):\n # 投资第一个项目\n dp[0][x] = ABC[0][x]\n status[0][x] = x\n\n # 投资前i个项目\n for i in range(1, n):\n\n # 前i个项目总投入的钱数j\n for j in range(m):\n\n # 投资当前项目i的钱数\n for k in range(j):\n\n tmp = ABC[i][k] + dp[i - 1][j - k]\n if tmp > dp[i][j]:\n # 更新当前的最优解\n dp[i][j] = tmp\n # 更新标志函数\n status[i][j] = k\n return dp[n - 1][m - 1]\n\n\ndef print_result(n, m):\n inv = [0] * n\n inv[n - 1] = status[n - 1][m - 1]\n for i in range(n - 2, -1, -1):\n t = 0\n for j in range(n - 1, i, -1):\n t += inv[j]\n inv[i] = status[i][m - t]\n\n for x in range(n):\n # print(\"Invest\", inv[x], \"for project\", x + 1)\n pass\n\n\ndef dp_blocks(i, j, k):\n if i == j:\n d[i][j][k] = (length[j] + k) ** 2\n return d[i][j][k]\n\n # 备忘录状态查询\n if d[i][j][k] > 0:\n return d[i][j][k]\n\n # 暂时先取第一种方式的结果作为d[i][j][len]的值\n d[i][j][k] = dp_blocks(i, j - 1, 0) + (length[j] + k) * (length[j] + k)\n\n # 按照第二种方式计算,并根据情况更新d[i][j][len]的值\n for p in range(i, j):\n if color[p] == color[j]:\n tmp = dp_blocks(i, p, length[j] + k) + dp_blocks(p + 1, j - 1, 0)\n if tmp > d[i][j][k]:\n d[i][j][k] = tmp\n\n return d[i][j][k]\n\n\nif __name__ == '__main__':\n color = [0, 1, 2, 3, 1]\n length = [0, 1, 4, 3, 1]\n\n # 初始化一个足够大的区域\n d = [[[0] * 16] * 16] * 16\n # print(d)\n\n score = dp_blocks(1, 4, 0)\n print(score)\n\n\n\n\n\n\n\n\n ############\n n = 3\n m = 6\n ABC = [A, B, C]\n dp = [[0] * m] * n\n status = [[0] * m] * n\n\n benefit = investment(n=n, m=m)\n # print(\"Total benefit :\", benefit)\n print_result(3, 6)\n","sub_path":"test/20190415.py","file_name":"20190415.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"23266266","text":"from celery import Celery\nfrom django.core.mail import send_mail\nfrom django.conf import settings\nimport django\nimport os\n\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dailyfresh.settings')\ndjango.setup()\n# 创建一个Celery实例,第一个参数字符串随便填,第二个参数指定中间人\napp = Celery(\"celery_tasks.tasks\",broker=\"redis://127.0.0.1:6379/1\")\n\n\n# 发送邮件函数\n@app.task\ndef send_register_active_email(to_email,username,token):\n subject = \"天天生鲜欢迎您\"\n message = \"\"\n sender = settings.EMAIL_FROM\n recever = [to_email, ]\n html_message = \"

%s欢迎您注册天天生鲜,账号激活地址:

http://127.0.0.1:8000/user/active/%s/\" % (\n username, token, token)\n send_mail(subject, message, sender, recever, html_message=html_message)","sub_path":"dailyfresh/celery_tasks/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"542467347","text":"from unittest2 import TestCase\nimport os\n\nfrom pyramid import testing\nfrom mozsvc.config import load_into_settings\nfrom mozsvc.plugin import load_and_register\nfrom sqlalchemy.exc import IntegrityError\n\nfrom tokenserver import logger\nfrom tokenserver.assignment import INodeAssignment\n\n\nclass TestLDAPNode(TestCase):\n\n def setUp(self):\n super(TestCase, self).setUp()\n\n # get the options from the config\n self.config = testing.setUp()\n self.ini = os.path.join(os.path.dirname(__file__),\n 'test_sql.ini')\n settings = {}\n load_into_settings(self.ini, settings)\n self.config.add_settings(settings)\n\n # instantiate the backend to test\n self.config.include(\"tokenserver\")\n load_and_register(\"tokenserver\", self.config)\n self.backend = self.config.registry.getUtility(INodeAssignment)\n\n # adding a node with 100 slots\n self.backend._safe_execute(\n \"\"\"insert into nodes (`node`, `service`, `available`,\n `capacity`, `current_load`, `downed`, `backoff`)\n values (\"phx12\", \"sync\", 100, 100, 0, 0, 0)\"\"\")\n self._sqlite = self.backend._engine.driver == 'pysqlite'\n\n def tearDown(self):\n if self._sqlite:\n filename = self.backend.sqluri.split('sqlite://')[-1]\n if os.path.exists(filename):\n os.remove(filename)\n else:\n self.backend._safe_execute('delete from nodes')\n self.backend._safe_execute('delete from user_nodes')\n\n def test_get_node(self):\n\n unassigned = None, None\n self.assertEquals(unassigned,\n self.backend.get_node(\"tarek@mozilla.com\", \"sync\"))\n\n res = self.backend.allocate_node(\"tarek@mozilla.com\", \"sync\")\n\n if self._sqlite:\n wanted = (1, u'phx12')\n else:\n wanted = (0, u'phx12')\n\n self.assertEqual(res, wanted)\n self.assertEqual(wanted,\n self.backend.get_node(\"tarek@mozilla.com\", \"sync\"))\n","sub_path":"tokenserver/tests/test_backend_sql.py","file_name":"test_backend_sql.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"648752177","text":"\"\"\"Module for the categorization and caging of a donor.\"\"\"\nimport copy\nimport logging\n\nfrom sqlalchemy.exc import SQLAlchemyError\n\nfrom application.exceptions.exception_ultsys_user import UltsysUserNotFoundError\nfrom application.flask_essentials import database\nfrom application.flask_essentials import redis_queue\nfrom application.helpers.build_models import build_model_exists\nfrom application.helpers.build_models import build_model_new\nfrom application.helpers.general_helper_functions import flatten_user_dict\nfrom application.helpers.general_helper_functions import munge_address\nfrom application.helpers.general_helper_functions import validate_user_payload\nfrom application.helpers.model_serialization import from_json\nfrom application.helpers.ultsys_user import find_ultsys_user\nfrom application.models.caged_donor import CagedDonorModel\nfrom application.models.gift import GiftModel\nfrom application.models.queued_donor import QueuedDonorModel\nfrom application.schemas.caged_donor import CagedDonorSchema\n\n\ndef categorize_donor( donor_dict ):\n \"\"\"The main function that queries the database and matches each user against the donor to get a category.\n\n donor_dict = {\n \"id\": None,\n \"user_first_name\": user_first_name,\n \"user_last_name\": user_last_name,\n \"user_zipcode\": user_zipcode,\n \"user_address\": user_address,\n \"user_email_address\": user_email_address,\n \"user_phone_number\": user_phone_number\n }\n\n A query is made to the database for all users with the donor's last name. Then a loop is made over all the users\n returned and matches made against the fields used for caging:\n category = [ first_name, last_name, zipcode, street_address, email, phone_number ]\n A complete match would look like [ 1, 1, 1, 1, 1, 1, 1 ], and in this case this would indicate the donor exists.\n\n The first three items in the list [ first_name, last_name, zipcode ] are the base characteristics. The last\n three [ street_address, email, phone_number ] are the discriminators. Given the matches for a particular user\n the category matrix is passed to the function:\n\n category_weight( category_test_matrix )\n\n which is a simple, and yet a flexible/configurable function, for determining the category of a donor. For example,\n if the category matrix looks like [ 1, 1, 1, 0, 0, 0 ] the weighting function uses the base fields to determine\n what the discriminators should sum to to assign a category. In this case the sum is 0 and would suggest that, from\n extensive studies on caging across the Ultsys user database, that the donor should be caged. A full explanation\n with supporting data is given on the project Wiki.\n\n The matrix can be extended to include weighting to each field if needed. Currently, the weighting is strict and\n requires a match on all fields for the user to be categorized as existing. An alternative may be to match either\n on the email address or phone number and street.\n\n :param donor_dict: The donor dictionary from the front-end.\n :return: A category: new, cage, or exists.\n \"\"\"\n\n category_definitions = { 0: 'new', 1: 'cage', 2: 'exists', 3: 'caged' }\n\n # Check to see if the donor has a user ID.\n if 'id' in donor_dict and donor_dict[ 'id' ]:\n # Function is_user returns category_weight = 2 if found ( exists ), along with user[ 1 ] = [ user_id ]\n # If category_weight = 1 it might be because it found duplicate users: = [ user_id1, user_id2 ]\n is_user = check_if_user( donor_dict )\n return category_definitions[ is_user[ 0 ] ], is_user[ 1 ]\n\n # Check to see if the donor has a registered email and if so pull user ID.\n if 'user_email_address' in donor_dict and donor_dict[ 'user_email_address' ]:\n query_parameters = {\n 'search_terms': { 'email': { 'eq': donor_dict[ 'user_email_address' ] } },\n 'sort_terms': [ ]\n }\n users_with_given_email = find_ultsys_user( query_parameters )\n if users_with_given_email:\n ultsys_user = users_with_given_email[ 0 ]\n return category_definitions[ 2 ], [ ultsys_user[ 'ID' ] ]\n\n # Check to see if the donor has already been caged.\n if check_if_caged( donor_dict ) == 3:\n return category_definitions[ check_if_caged( donor_dict ) ], []\n\n # If they don't already exist and are not previously caged: cage the donor.\n query_parameters = {\n 'search_terms': { 'lastname': { 'eq': donor_dict[ 'user_last_name' ] } },\n 'sort_terms': []\n }\n users_by_last_name = find_ultsys_user( query_parameters )\n\n # If no last names exist this is a new donor.\n if not users_by_last_name:\n return category_definitions[ 0 ], []\n\n donor_street = munge_address( donor_dict[ 'user_address' ] )\n\n user_ids = []\n exists_user_ids = []\n maximum_weight = 0\n for user in users_by_last_name:\n # The identifier in Drupal is uppercase.\n if user[ 'ID' ] not in user_ids:\n # Capture the user so that it isn't considered more than once.\n user_ids.append( user[ 'ID' ] )\n\n # Initialize the category matrix to no match on any fields: [ 0, 0, 0, 0, 0, 0 ].\n category_match_matrix = [ 0 ] * 6\n\n # Set a match on last name since query matches here.\n category_match_matrix[ 1 ] = 1\n\n # Do some basic transformations to the address: set lowercase, remove whitespace and punctuation.\n user_street = munge_address( user[ 'address' ] )\n\n # Find matches across match matrix.\n if donor_dict[ 'user_first_name' ].lower() == user[ 'firstname' ].lower():\n category_match_matrix[ 0 ] = 1\n if donor_dict[ 'user_zipcode' ] == user[ 'zip' ] and \\\n donor_dict[ 'user_zipcode' ] != 0:\n category_match_matrix[ 2 ] = 1\n if donor_street == user_street and donor_street != '':\n category_match_matrix[ 3 ] = 1\n if donor_dict[ 'user_email_address' ].lower() == user[ 'email' ].lower() and \\\n donor_dict[ 'user_email_address' ] != '':\n category_match_matrix[ 4 ] = 1\n if donor_dict[ 'user_phone_number' ] == user[ 'phone' ] and \\\n donor_dict[ 'user_phone_number' ] != '0':\n category_match_matrix[ 5 ] = 1\n\n # After matching the user then categorize them as new ( 0 ), cage ( 1 ) or exists ( 2 ).\n weight = category_weight( category_match_matrix )\n\n # Keep track of the maximum weight found.\n maximum_weight = track_maximum_weight( weight, maximum_weight, exists_user_ids, user[ 'ID' ] )\n\n return category_definitions[ maximum_weight ], exists_user_ids\n\n\ndef track_maximum_weight( weight, maximum_weight, exists_user_ids, user_id ):\n \"\"\"Function to track the maximum weight.\n\n :param weight: The current weight coming from the category_match_matrix.\n :param maximum_weight: The maximum weight found over all iterated users.\n :param exists_user_ids: The user ID's that have been found where donor is the user.\n :param user_id: The current user ID in the iteration.\n :return: Maximum weight\n \"\"\"\n if weight > maximum_weight:\n maximum_weight = weight\n\n # Add to ID's and downgrade maximum weight if more than one user exactly matches the donor.\n if weight == 2:\n exists_user_ids.append( user_id )\n if len( exists_user_ids ) > 1:\n maximum_weight = 1\n\n return maximum_weight\n\n\ndef category_weight( category_test_matrix ):\n \"\"\"A simple function for determining the category of a donor.\n\n The category_test_matrix list [ first_name, last_name, zipcode, street_address, email, phone_number ] is passed\n in, and then spliced into:\n base fields: [ first_name, last_name, zipcode ]\n discriminator fields: [ street_address, email, phone_number ]\n\n :param category_test_matrix: The list [ first_name, last_name, zipcode, street_address, email, phone_number ]\n :return: weight which is an integer 0, 1, or 2\n \"\"\"\n\n # Base fields are: [ first, last, zip ]\n # Discriminators: [ street, email, phone ]\n weight = 0\n\n # Use slice operator to grab parts of the category matrix. Some useful shortcuts:\n # The notation category_test_matrix[ 3: ] takes from the 3rd element to the end.\n # category_test_matrix[ :-2 ] on [ 1, 1, 1, 0, 0, 0 ] returns [ 1, 1, 1, 0 ], or dropping last 2 elements.\n base_fields = category_test_matrix[ 0:3 ]\n discriminators = category_test_matrix[ 3: ]\n sum_discriminators = sum( discriminators )\n if base_fields == [ 1, 1, 1 ] and sum_discriminators >= 1:\n if sum_discriminators == 3:\n weight = 2\n else:\n weight = 1\n elif base_fields == [ 1, 1, 1 ] and sum_discriminators == 0:\n weight = 1\n elif base_fields == [ 0, 1, 1 ] and sum_discriminators >= 1:\n weight = 1\n elif base_fields == [ 0, 1, 0 ] and sum_discriminators >= 1:\n weight = 1\n\n return weight\n\n\ndef check_if_caged( donor_dict ):\n \"\"\"See if the donor was previously caged.\n\n :param donor_dict = {\n \"id\": None,\n \"user_first_name\": user_first_name,\n \"user_last_name\": user_last_name,\n \"user_zipcode\": user_zipcode,\n \"user_address\": user_address,\n \"user_email_address\": user_email_address,\n \"user_phone_number\": user_phone_number\n }\n :return: 3 for caged and 0 for not caged.\n \"\"\"\n\n street = munge_address( donor_dict[ 'user_address' ] )\n\n caged_donor = CagedDonorModel.query \\\n .filter_by( user_first_name=donor_dict[ 'user_first_name' ] ) \\\n .filter_by( user_last_name=donor_dict[ 'user_last_name' ] ) \\\n .filter_by( user_zipcode=donor_dict[ 'user_zipcode' ] )\n for caged_query in caged_donor.all():\n caged_address = munge_address( caged_query.user_address )\n if caged_address.strip() == street and street != '':\n return 3\n return 0\n\n\ndef check_if_user( donor_dict ):\n \"\"\"See if the donor exists.\n\n :param donor_dict = {\n \"id\": None,\n \"user_first_name\": user_first_name,\n \"user_last_name\": user_last_name,\n \"user_zipcode\": user_zipcode,\n \"user_address\": user_address,\n \"user_email_address\": user_email_address,\n \"user_phone_number\": user_phone_number\n }\n :return: 2 for caged and 0 for not caged.\n \"\"\"\n\n # A user ID is said to exist and so if one isn't returned there is a problem.\n query_parameters = {\n 'search_terms': { 'ID': { 'eq': donor_dict[ 'id' ] } },\n 'sort_terms': []\n }\n user_by_id = find_ultsys_user( query_parameters )\n if user_by_id:\n # We are returning a category here: ( category_weight, [ user_id ] )\n return 2, [ user_by_id[ 0 ][ 'ID' ] ]\n raise UltsysUserNotFoundError\n\n\n@redis_queue.job\ndef redis_queue_caging( user, transactions, app_config_name ):\n \"\"\"A function for queueing a caging operation and updating models with caged donor or Ultsys user.\n\n Here is what the user looks like:\n\n user: {\n \"id\": null,\n \"user_address\": {\n \"user_first_name\": \"Aaron\",\n \"user_last_name\": \"Peters\",\n \"user_zipcode\": \"22202\",\n \"user_address\": \"1400 Crystal City Dr\",\n \"user_city\": \"Arlington\",\n \"user_state\": \"VA\",\n \"user_email_address\": \"apeters@numbersusa.com\",\n \"user_phone_number\": \"7038168820\"\n },\n \"billing_address\": {\n \"billing_first_name\": \"Aaron\",\n \"billing_last_name\": \"Peters\",\n \"billing_zipcode\": \"22202\",\n \"billing_address\": \"1400 Crystal City Dr\",\n \"billing_city\": \"Arlington\",\n \"billing_state\": \"VA\",\n \"billing_email_address\": \"apeters@numbersusa.com\",\n \"billing_phone_number\": \"7038168820\"\n }\n 'payment_method_nonce': 'tokencc_bc_string',\n 'category': 'queued',\n 'customer_id': '476914249',\n 'gift_id': 3,\n 'searchable_id': UUID( 'd1aeac47-17ce-46ca-9d45-3f540f7a1d85' ),\n 'queued_donor_id': 3\n }\n\n :param user: The user dictionary\n :param transactions: The list of transactions. If this is a Braintree sale, for example, there will be one\n transaction in the list. On the other hand if this is an administrative sale where the method used is\n a check or money order there will be 2 transactions.\n :param app_config_name: The configuration ( PROD, DEV, TEST ) that the app is running.\n :return:\n \"\"\"\n\n # This is getting pushed onto the queue outside an application context: create it here.\n from application.app import create_app # pylint: disable=cyclic-import\n app = create_app( app_config_name ) # pylint: disable=C0103\n\n with app.app_context():\n # Categorize the user: new, cage, caged, exists.\n # The variable category is a tuple:\n # category[ 0 ]: the category of the donor.\n # category[ 1 ]: if category is 'exists' this will hold an ID like [ 1234 ].\n # If category[ 0 ] is 'cage' it might be donor matched 2 or more users: category[ 1 ] = [ 1234, 5678 ].\n # If category[ 0 ] is 'exists' then len( category[ 1 ] ) == 1.\n\n # This is a fix to a mismatch between what the back-end expects and what the front-end is passing.\n # The fix is used at the donate controller to correct the mismatch. It is also used at the reprocess\n # queued donor to create the user dictionary expected for caging. Finally, here we use it because\n # A donor who never got into the queued donor table will be re-queued and can be reprocessed at this point.\n user = validate_user_payload( user )\n\n donor_dict = copy.deepcopy( user )\n donor_dict = flatten_user_dict( donor_dict )\n category = categorize_donor( donor_dict )\n logging.debug( '***** category: %s', category )\n\n gross_gift_amount = str( transactions[ 0 ][ 'gross_gift_amount' ] )\n\n if category[ 0 ] == 'exists':\n ultsys_user_id = category[ 1 ][ 0 ]\n user[ 'id' ] = ultsys_user_id\n build_model_exists( user, gross_gift_amount )\n gift_id = user[ 'gift_id' ]\n gift_model = GiftModel.query.filter_by( id=gift_id ).one_or_none()\n gift_model.user_id = ultsys_user_id\n QueuedDonorModel.query.filter_by( id=user[ 'queued_donor_id' ] ).delete()\n elif category[ 0 ] == 'cage' or category[ 0 ] == 'caged':\n gift_id = user[ 'gift_id' ]\n gift_model = GiftModel.query.filter_by( id=gift_id ).one_or_none()\n gift_model.user_id = -1\n caged_donor_dict = user[ 'user_address' ]\n caged_donor_dict[ 'gift_searchable_id' ] = gift_model.searchable_id\n caged_donor_dict[ 'campaign_id' ] = user[ 'campaign_id' ]\n caged_donor_dict[ 'customer_id' ] = user[ 'customer_id' ]\n caged_donor_model = from_json( CagedDonorSchema(), caged_donor_dict, create=True )\n caged_donor_model.data.gift_id = gift_id\n database.session.add( caged_donor_model.data )\n QueuedDonorModel.query.filter_by( id=user[ 'queued_donor_id' ] ).delete()\n elif category[ 0 ] == 'new':\n ultsys_user_id = build_model_new( user, gross_gift_amount )\n user[ 'id' ] = ultsys_user_id\n gift_id = user[ 'gift_id' ]\n gift_model = GiftModel.query.filter_by( id=gift_id ).one_or_none()\n gift_model.user_id = ultsys_user_id\n QueuedDonorModel.query.filter_by( id=user[ 'queued_donor_id' ] ).delete()\n\n try:\n database.session.commit()\n except SQLAlchemyError as error:\n database.session.rollback()\n raise error\n","sub_path":"caging.py","file_name":"caging.py","file_ext":"py","file_size_in_byte":15827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"236854075","text":"from sys import stdin\nfrom pandas import read_table\n\n\ndef read_indata(input_file, noheader, sep=\"\\t\"):\n\n \"\"\"Reads a sep-delimited file and returns a dataframe.\n\n Utility function to account for the fact that there are three types\n of possible delimited files you want to handle: those with a full header,\n those with no header and those lacking a header in the index column.\n \"\"\"\n\n infile = stdin if input_file == \"-\" else input_file\n\n header_row = None if noheader else 0\n\n df = read_table(infile, header=header_row, dtype=str,\n sep=sep)\n\n df = _turn_index_into_regular_column_if_it_contains_data(df)\n\n return df\n\n\ndef _turn_index_into_regular_column_if_it_contains_data(df):\n\n if not all(df.index == range(len(df))):\n df = df.reset_index()\n\n return df\n","sub_path":"ebs/read_indata.py","file_name":"read_indata.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"286918116","text":"import sys\nfrom os import listdir\n\nemotionDict = {ord('W') - ord('A'): 0, ord('L') - ord('A'): 1, ord('E') - ord('A'): 2, ord('A') - ord('A'): 3, ord('F') - ord('A'): 4, ord('T') - ord('A'): 5, ord('N') - ord('A'): 6}\nemotions = [\"anger\", \"boredom\", \"disgust\", \"anxiety/fear\", \"happiness\", \"sadness\", \"neutral version\"]\n\noriPath = [\"10fold\", \"5fold\"]\nfor path in oriPath:\n for f in listdir(path):\n testLabel = []\n resultLabel = []\n confusionMatrix = [[0] * 7 for _ in range(7)]\n oriName = path + \"/\" + f\n resultName = \"result/\" + path + \"/\" + \"Result_\" + f\n\n # read file\n with open(oriName, \"r\") as fin:\n lines = fin.readlines()\n for line in lines:\n testLabel.append(line.split(\" \")[0])\n with open(resultName, \"r\") as fin:\n fin.readline() # skip line 1\n lines = fin.readlines()\n for line in lines:\n resultLabel.append(line.split(\" \")[0])\n # caculate the confusion martix\n for i in range(len(testLabel)):\n x = testLabel[i]\n y = resultLabel[i]\n x_ = emotionDict[int(x)]\n y_ = emotionDict[int(y)]\n confusionMatrix[x_][y_] += 1\n # ouptut format\n outputTxt = \" |\"\n for emotion in emotions:\n outputTxt += \"%15s|\" % (emotion)\n outputTxt += \"\\n\"\n for i in range(len(confusionMatrix)):\n sum_ = sum(confusionMatrix[i])\n outputTxt += \"%15s(%3d/%3d)|\" % (emotions[i], confusionMatrix[i][i], sum_)\n for j in confusionMatrix[i]:\n accuracy = float(j) / sum_ if sum_ != 0 else 0\n text = \" %3d (%.2f)\" % (j, accuracy)\n outputTxt += text + \"|\"\n outputTxt += (\"\\n\")\n with open(\"confusion matrix/\" + path + \"/\" + f, \"w\") as fout:\n fout.write(outputTxt)","sub_path":"script/confusionMatrix.py","file_name":"confusionMatrix.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"269005764","text":"from sqlalchemy import Column, Integer, String, ForeignKey, Float, BLOB\nfrom sqlalchemy.orm import relationship\n\nfrom src.database.db import Base\n\n\nclass Products(Base):\n __tablename__ = 'products'\n id = Column(Integer, primary_key=True, autoincrement=True)\n product_name = Column(String(50))\n image1 = Column(BLOB)\n image2 = Column(BLOB)\n image3 = Column(BLOB)\n price = Column(Float)\n product_des = Column(String(150),nullable=True)\n brand_id = Column(Integer,ForeignKey(\"brands.id\"))\n brand_rel = relationship(\"Brands\")\n def __init__(self,\n product_name=None,\n image1=None,\n image2=None,\n image3=None,\n price =None,\n product_des= None,\n brand_id = None,):\n self.product_name = product_name\n self.image1 = image1\n self.image2 = image2\n self.image3 = image3\n self.price= price\n self.product_des = product_des\n self.brand_id = brand_id\n\n\n def toDict(self):\n u = {\"product_id\":self.id,\"product_name\":self.product_name,\"image1\": str(self.image1), \"image2\": str(self.image2), \"image3\": str(self.image3),\n \"price\":self.price,\"product_des\":self.product_des,\"brand_id\":self.brand_id}\n if(self.brand_rel):\n u[\"brand_name\"] = self.brand_rel.brand_name\n return u\n","sub_path":"src/models/Products.py","file_name":"Products.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"22105812","text":"# pylint: disable=missing-docstring\r\ntry:\r\n import simplejson as json\r\nexcept ImportError:\r\n import json\r\nimport os\r\nimport subprocess\r\nimport random\r\nfrom distutils import spawn\r\nimport pickle\r\nimport argparse\r\nimport shutil\r\nimport time\r\nfrom abc import ABCMeta, abstractmethod\r\nimport ipaddress\r\n\r\nclass TestLinkUtilization():\r\n def __init__(self):\r\n self.ip4net1 = ipaddress.IPv4Network('10.10.1.0/24')\r\n self.ip4net2 = ipaddress.IPv4Network('10.10.2.0/24')\r\n self.ip4net3 = ipaddress.IPv4Network('10.10.3.0/24')\r\n self.num_host = 3\r\n self.num_addrs = 75\r\n self.num_cases = 300\r\n self.case_list = [(0, 0)]*(self.num_cases)\r\n self.ip_list = []\r\n for i in range(1, self.num_addrs+1):\r\n self.ip_list.append(self.ip4net1[i])\r\n for i in range(1, self.num_addrs+1):\r\n self.ip_list.append(self.ip4net2[i])\r\n for i in range(1, self.num_addrs+1):\r\n self.ip_list.append(self.ip4net3[i])\r\n\r\n def _gen_node_pairs(self):\r\n count = 0\r\n total_attempts = 0\r\n while count < self.num_cases:\r\n total_attempts += 1\r\n case = (random.choice(range(self.num_addrs*self.num_host)),\r\n random.choice(range(self.num_addrs*self.num_host)))\r\n while case[0] == case[1]:\r\n case = (random.choice(range(self.num_addrs*self.num_host)),\r\n random.choice(range(self.num_addrs*self.num_host)))\r\n if case not in self.case_list:\r\n self.case_list[count] = case\r\n count += 1\r\n\r\n def _stor_host_cases(self):\r\n fl1 = open(\"chm-cases\", \"w\")\r\n fl2 = open(\"clab-cases\", \"w\")\r\n fl3 = open(\"banter-cases\", \"w\")\r\n for case in self.case_list:\r\n ip_case = (self.ip_list[case[0]], self.ip_list[case[1]])\r\n if case[0] < self.num_addrs:\r\n inst = (case[0] + 1) % (self.num_addrs+1)\r\n fl1.write(\"{0:0>3d} {1} {2}\\n\".format(inst, str(ip_case[0]), str(ip_case[1])))\r\n elif case[0] < 2*self.num_addrs:\r\n inst = (case[0] - self.num_addrs+1) % (self.num_addrs+1)\r\n fl2.write(\"{0:0>3d} {1} {2}\\n\".format(inst, str(ip_case[0]), str(ip_case[1])))\r\n elif case[0] < 3*self.num_addrs:\r\n inst = (case[0] - (2*self.num_addrs)+1) % (self.num_addrs+1)\r\n fl3.write(\"{0:0>3d} {1} {2}\\n\".format(inst, str(ip_case[0]), str(ip_case[1])))\r\n fl1.close()\r\n fl2.close()\r\n fl3.close()\r\n \r\n #def _stor_host_cases(self):\r\n # fl1 = open(\"chm-cases\", \"w\")\r\n # fl2 = open(\"banter-cases\", \"w\")\r\n # for case in self.case_list:\r\n # ip_case = (self.ip_list[case[0]], self.ip_list[case[1]])\r\n # if case[0] < self.num_addrs:\r\n # inst = (case[0] + 1) % (self.num_addrs+1)\r\n # fl1.write(\"{0:0>3d} {1} {2}\\n\".format(inst, str(ip_case[0]), str(ip_case[1])))\r\n # elif case[0] < 2*self.num_addrs:\r\n # inst = (case[0] - self.num_addrs+1) % (self.num_addrs+1)\r\n # fl2.write(\"{0:0>3d} {1} {2}\\n\".format(inst, str(ip_case[0]), str(ip_case[1])))\r\n # fl1.close()\r\n # fl2.close()\r\n\r\n def create_input_files(self):\r\n self._gen_node_pairs()\r\n self._stor_host_cases()\r\n\r\n def create_result_report(self):\r\n base_dir = \"test-link-utilization\"\r\n for algo in [\"STP\", \"BF\"]:\r\n lu_dir = \"{0}/{1}\".format(base_dir, algo)\r\n fln = \"{0}/{1}/{1}-{2}\".format(base_dir, algo, \"exp-results.csv\")\r\n fl4 = open(fln, \"w\")\r\n for hostname in [\"banter\", \"chm\", \"clab\"]:\r\n cases_fn = \"{0}/{1}/{1}{2}\".format(lu_dir, hostname, \"-cases\")\r\n ping_fn = \"{0}/{1}/{2}\".format(lu_dir, hostname, \"ping-results.log\")\r\n iperf_fn = \"{0}/{1}/{2}\".format(lu_dir, hostname, \"iperf-results.log\")\r\n case_fle = open(cases_fn, \"r\")\r\n ping_fle = open(ping_fn, \"r\")\r\n iperf_fle = open(iperf_fn, \"r\")\r\n case_lines = case_fle.readlines()\r\n iperf_lines = iperf_fle.readlines()\r\n ping_lines = ping_fle.readlines()\r\n pos = 0\r\n iperf_pos = 0\r\n for cln in case_lines:\r\n ipaddrs = cln.strip().rsplit(\" \", 3)\r\n lat = ping_lines[((pos+1)*7)].strip().rsplit(\" \", 5)\r\n if lat[0] != \"rtt\":\r\n latv = \"0,0,0,0\"\r\n bndwv = \"0\"\r\n else:\r\n latv = lat[3].replace(\"/\", \",\")\r\n bndw = iperf_lines[iperf_pos].strip().rsplit(\",\")\r\n bndwv = bndw[8]\r\n iperf_pos += 1\r\n result = \"{0},{1},{2},{3}\\n\".format(ipaddrs[1], ipaddrs[2], bndwv, latv)\r\n fl4.write(result)\r\n pos += 1\r\n case_fle.close()\r\n ping_fle.close()\r\n iperf_fle.close()\r\n fl4.close()\r\n\r\nclass Testbed():\r\n __metaclass__ = ABCMeta\r\n\r\n LAUNCH_WAIT = 60\r\n BATCH_SZ = 5\r\n VIRT = NotImplemented\r\n APT = spawn.find_executable(\"apt-get\")\r\n CONTAINER = NotImplemented\r\n BF_VIRT_IMG = \"kcratie/edge-vpn:20.7\"\r\n\r\n def __init__(self, exp_dir=None):\r\n parser = argparse.ArgumentParser(description=\"Configures and runs EdgeVPN Testbed\")\r\n parser.add_argument(\"--clean\", action=\"store_true\", default=False, dest=\"clean\",\r\n help=\"Removes all generated files and directories\")\r\n parser.add_argument(\"--configure\", action=\"store_true\", default=False, dest=\"configure\",\r\n help=\"Generates the config files and directories\")\r\n parser.add_argument(\"-v\", action=\"store_true\", default=False, dest=\"verbose\",\r\n help=\"Print testbed activity info\")\r\n parser.add_argument(\"--range\", action=\"store\", dest=\"range\",\r\n help=\"Specifies the testbed start and end range in format #,#\")\r\n parser.add_argument(\"--run\", action=\"store_true\", default=False, dest=\"run\",\r\n help=\"Runs the currently configured testbed\")\r\n parser.add_argument(\"--end\", action=\"store_true\", default=False, dest=\"end\",\r\n help=\"End the currently running testbed\")\r\n parser.add_argument(\"--info\", action=\"store_true\", default=False, dest=\"info\",\r\n help=\"Displays the current testbed configuration\")\r\n parser.add_argument(\"--setup\", action=\"store_true\", default=False, dest=\"setup\",\r\n help=\"Installs software requirements. Requires run as root.\")\r\n parser.add_argument(\"--pull\", action=\"store_true\", default=False, dest=\"pull\",\r\n help=\"Pulls the {} image from docker hub\"\r\n .format(Testbed.BF_VIRT_IMG))\r\n parser.add_argument(\"--lxd\", action=\"store_true\", default=False, dest=\"lxd\",\r\n help=\"Uses LXC containers\")\r\n parser.add_argument(\"--dkr\", action=\"store_true\", default=False, dest=\"dkr\",\r\n help=\"Use docker containers\")\r\n parser.add_argument(\"--ping\", action=\"store\", dest=\"ping\",\r\n help=\"Ping the specified address from each container\")\r\n parser.add_argument(\"--arp\", action=\"store\", dest=\"arp\",\r\n help=\"arPing the specified address from each container\")\r\n parser.add_argument(\"--edgev\", action=\"store\", dest=\"edgev\",\r\n help=\"Perform the specified service action: stop/start/restart\")\r\n parser.add_argument(\"--churn\", action=\"store\", dest=\"churn\",\r\n help=\"Restarts the specified amount of nodes in the overlay,\"\r\n \"one every interval\")\r\n parser.add_argument(\"--test\", action=\"store\", dest=\"test\",\r\n help=\"Performs latency and bandwidth test between random pairs of \"\r\n \"nodes. Ex test=\")\r\n\r\n self.args = parser.parse_args()\r\n self.exp_dir = exp_dir\r\n if not self.exp_dir:\r\n self.exp_dir = os.path.abspath(\".\")\r\n self.template_file = \"{0}/template-config.json\".format(self.exp_dir)\r\n self.template_bf_file = \"{0}/template-bf-config.json\".format(self.exp_dir)\r\n self.config_dir = \"{0}/config\".format(self.exp_dir)\r\n self.cores_dir = \"{0}/cores\".format(self.exp_dir)\r\n self.logs_dir = \"{0}/log\".format(self.exp_dir)\r\n self.data_dir = \"{0}/test-link-utilization\".format(self.exp_dir)\r\n self.config_file_base = \"{0}/config-\".format(self.config_dir)\r\n self.seq_file = \"{0}/startup.list\".format(self.exp_dir)\r\n self.range_file = \"{0}/range_file\".format(self.exp_dir)\r\n\r\n if self.args.range:\r\n rng = self.args.range.rsplit(\",\", 2)\r\n self.range_end = int(rng[1])\r\n self.range_start = int(rng[0])\r\n elif not self.args.range and os.path.isfile(\"range_file\"):\r\n with open(self.range_file) as rng_fle:\r\n rng = rng_fle.read().strip().rsplit(\",\", 2)\r\n self.range_end = int(rng[1])\r\n self.range_start = int(rng[0])\r\n else:\r\n raise RuntimeError(\"Range unspecified\")\r\n self.total_inst = self.range_end - self.range_start\r\n self.seq_list = None #[range(self.range_end, self.range_start)]\r\n self.load_seq_list()\r\n\r\n @classmethod\r\n def runshell(cls, cmd):\r\n \"\"\" Run a shell command. if fails, raise an exception. \"\"\"\r\n if cmd[0] is None:\r\n raise ValueError(\"No executable specified to run\")\r\n resp = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\r\n return resp\r\n\r\n @property\r\n @abstractmethod\r\n def gen_config(self, range_start, range_end):\r\n pass\r\n\r\n @property\r\n @abstractmethod\r\n def start_instance(self, instance):\r\n pass\r\n\r\n @property\r\n @abstractmethod\r\n def end(self):\r\n pass\r\n\r\n def clean_config(self):\r\n if os.path.isdir(self.config_dir):\r\n shutil.rmtree(self.config_dir)\r\n if self.args.verbose:\r\n print(\"Removed dir {}\".format(self.config_dir))\r\n if os.path.isfile(self.seq_file):\r\n os.remove(self.seq_file)\r\n if self.args.verbose:\r\n print(\"Removed file {}\".format(self.seq_file))\r\n\r\n def make_clean(self):\r\n self.clean_config()\r\n if os.path.isdir(self.logs_dir):\r\n shutil.rmtree(self.logs_dir)\r\n if self.args.verbose:\r\n print(\"Removed dir {}\".format(self.logs_dir))\r\n if os.path.isdir(self.cores_dir):\r\n shutil.rmtree(self.cores_dir)\r\n if self.args.verbose:\r\n print(\"Removed dir {}\".format(self.cores_dir))\r\n\r\n def configure(self):\r\n with open(self.range_file, \"w\") as rng_fle:\r\n rng_fle.write(self.args.range)\r\n self.gen_config(self.range_start, self.range_end)\r\n self.save_seq_list()\r\n\r\n def save_seq_list(self):\r\n with open(self.seq_file, \"wb\") as seq_fle:\r\n pickle.dump(self.seq_list, seq_fle)\r\n seq_fle.flush()\r\n if self.args.verbose:\r\n print(\"Instance sequence saved with {0} entries\\n{1}\"\r\n .format(self.total_inst, self.seq_list))\r\n\r\n def load_seq_list(self):\r\n if os.path.isfile(self.seq_file):\r\n with open(self.seq_file, \"rb\") as seq_fle:\r\n self.seq_list = pickle.load(seq_fle)\r\n if self.args.verbose:\r\n print(\"Sequence list loaded from existing file - {0} entries\\n{1}\".\r\n format(len(self.seq_list), self.seq_list))\r\n else:\r\n self.seq_list = list(range(self.range_start, self.range_end))\r\n random.shuffle(self.seq_list)\r\n\r\n def start_range(self, num, wait):\r\n cnt = 0\r\n sequence = self.seq_list[self.range_start-1:self.range_end]\r\n for inst in sequence:\r\n self.start_instance(inst)\r\n cnt += 1\r\n if cnt % num == 0 and cnt < len(sequence):\r\n #if self.args.verbose:\r\n print(\"{0}/{1} container(s) instantiated\".format(cnt, len(sequence)))\r\n time.sleep(wait)\r\n print(\"{0} container(s) instantiated\".format(cnt))\r\n\r\n def run(self):\r\n self.start_range(Testbed.BATCH_SZ, Testbed.LAUNCH_WAIT)\r\n\r\n def display_current_config(self):\r\n print(\"----Testbed Configuration----\")\r\n print(\"{0} instances range {1}-{2}\".format(self.total_inst, self.range_start,\r\n self.range_end))\r\n print(\"Config dir {0}\".format(self.config_dir))\r\n print(\"Config base filename {0}\".format(self.config_file_base))\r\n print(\"Log dir {0}\".format(self.logs_dir))\r\n print(\"Contianer image {0}\".format(Testbed.BF_VIRT_IMG))\r\n print(\"\".format())\r\n\r\n def setup_system(self):\r\n setup_cmds = [[\"./setup-system.sh\"]]\r\n for cmd_list in setup_cmds:\r\n if self.args.verbose:\r\n print(cmd_list)\r\n resp = Testbed.runshell(cmd_list)\r\n print(resp.stdout.decode(\"utf-8\") if resp.returncode == 0 else\r\n resp.stderr.decode(\"utf-8\"))\r\n\r\n @abstractmethod\r\n def run_container_cmd(self, cmd_line, instance_num):\r\n pass\r\n\r\n def churn(self, param):\r\n params = param.rsplit(\",\", 2)\r\n iters = int(params[0])\r\n inval = int(params[1])\r\n self._churn(iters, inval)\r\n\r\n def _churn(self, churn_count=0, interval=30):\r\n if churn_count == 0:\r\n churn_count = self.total_inst\r\n cnt = 0\r\n restarted_nds = set()\r\n while cnt < churn_count:\r\n inst = random.choice(range(self.range_start, self.range_end))\r\n print(\"Stopping node\", inst)\r\n self.run_container_cmd([\"systemctl\", \"stop\", \"edgev\"], inst)\r\n if self.args.verbose:\r\n print(\"Waiting\", interval, \"seconds\")\r\n time.sleep(interval)\r\n print(\"Resuming node\", inst)\r\n self.run_container_cmd([\"systemctl\", \"start\", \"edgev\"], inst)\r\n restarted_nds.add(inst)\r\n cnt += 1\r\n if self.args.verbose:\r\n print(\"Waiting\", interval, \"seconds\")\r\n time.sleep(interval)\r\n if self.args.verbose:\r\n print(\"{0} nodes restarted\\n{1}\".format(cnt, str(restarted_nds)))\r\n\r\n def run_test(self):\r\n test = None\r\n if self.args.test == \"lui\":\r\n test = TestLinkUtilization()\r\n test.create_input_files()\r\n if self.args.test == \"lur\":\r\n test = TestLinkUtilization()\r\n test.create_result_report()\r\n print(\"Link Utilization test case completed\")\r\n\r\nclass DockerTestbed(Testbed):\r\n VIRT = spawn.find_executable(\"docker\")\r\n CONTAINER = \"edgev-dkr{0}\"\r\n\r\n def __init__(self, exp_dir=None):\r\n super().__init__(exp_dir=exp_dir)\r\n self.network_name = \"dkrnet\"\r\n\r\n #def configure(self):\r\n # super().configure()\r\n # self.pull_image()\r\n\r\n def create_network(self):\r\n #netid=docker network ls | grep dkrnet | awk 'BEGIN { FS=\" \"} {print $2}'\r\n #docker network create dkrnet\r\n pass\r\n\r\n def gen_config(self, range_start, range_end):\r\n with open(self.template_file) as cfg_tmpl:\r\n template = json.load(cfg_tmpl)\r\n olid = template[\"CFx\"].get(\"Overlays\", None)\r\n olid = olid[0]\r\n node_id = template[\"CFx\"].get(\"NodeId\", \"a000###feb6040628e5fb7e70b04f###\")\r\n node_name = template[\"OverlayVisualizer\"].get(\"NodeName\", \"dkr###\")\r\n netwk = template[\"BridgeController\"][\"Overlays\"][olid][\"NetDevice\"][\"AppBridge\"].get(\"NetworkAddress\", \"10.10.1.0/24\")\r\n netwk = ipaddress.IPv4Network(netwk)\r\n for val in range(range_start, range_end):\r\n rng_str = \"{0:03}\".format(val)\r\n cfg_file = \"{0}{1}.json\".format(self.config_file_base, rng_str)\r\n node_id = \"{0}{1}{2}{1}{3}\".format(node_id[:4], rng_str, node_id[7:29], node_id[32:])\r\n node_name = \"{0}{1}\".format(node_name[:3], rng_str)\r\n node_ip = str(netwk[val])\r\n template[\"CFx\"][\"NodeId\"] = node_id\r\n template[\"OverlayVisualizer\"][\"NodeName\"] = node_name\r\n template[\"BridgeController\"][\"Overlays\"][olid][\"NetDevice\"][\"AppBridge\"][\"IP4\"] = node_ip\r\n template[\"BridgeController\"][\"Overlays\"][olid][\"NetDevice\"][\"AppBridge\"][\"PrefixLen\"] = netwk.prefixlen\r\n os.makedirs(self.config_dir, exist_ok=True)\r\n with open(cfg_file, \"w\") as cfg_fle:\r\n json.dump(template, cfg_fle, indent=2)\r\n cfg_fle.flush()\r\n if self.args.verbose:\r\n print(\"{0} config file(s) generated\".format(range_end-range_start))\r\n #self.gen_bf_config(olid)\r\n\r\n def gen_bf_config(self, olid):\r\n with open(self.template_bf_file) as cfg_tmpl:\r\n template = json.load(cfg_tmpl)\r\n template[\"OverlayId\"] = olid\r\n template[\"BridgeName\"] = \"edgevbr{0}\".format(olid)\r\n cfg_file = \"{0}{1}.json\".format(self.config_file_base, \"bf-cfg\")\r\n with open(cfg_file, \"w\") as cfg_fle:\r\n json.dump(template, cfg_fle, indent=2)\r\n cfg_fle.flush()\r\n\r\n if self.args.verbose:\r\n print(\"BoundedFlood config file(s) generated\")\r\n\r\n def start_instance(self, instance):\r\n instance = \"{0:03}\".format(instance)\r\n container = DockerTestbed.CONTAINER.format(instance)\r\n log_dir = \"{0}/dkr{1}\".format(self.logs_dir, instance)\r\n os.makedirs(log_dir, exist_ok=True)\r\n\r\n cfg_file = \"{0}{1}.json\".format(self.config_file_base, instance)\r\n if not os.path.isfile(cfg_file):\r\n self.gen_config(instance, instance+1)\r\n\r\n mount_cfg = \"{0}:/etc/opt/edge-vpn/config.json\".format(cfg_file)\r\n mount_log = \"{0}/:/var/log/edge-vpn/\".format(log_dir)\r\n mount_data = \"{0}/:/var/edge-vpn/\".format(self.data_dir)\r\n args = [\"--rm\", \"--privileged\"]\r\n opts = \"-d\"\r\n img = Testbed.BF_VIRT_IMG\r\n cmd = \"/sbin/init\"\r\n cmd_list = [DockerTestbed.VIRT, \"run\", opts, \"-v\", mount_cfg, \"-v\", mount_log,\r\n args[0], args[1], \"--name\", container, \"--network\", self.network_name,\r\n img, cmd]\r\n if self.args.verbose:\r\n print(cmd_list)\r\n resp = Testbed.runshell(cmd_list)\r\n print(resp.stdout.decode(\"utf-8\") if resp.returncode == 0 else resp.stderr.decode(\"utf-8\"))\r\n\r\n def run_container_cmd(self, cmd_line, instance_num):\r\n #report = dict(fail_count=0, fail_node=[])\r\n cmd_list = [DockerTestbed.VIRT, \"exec\", \"-it\"]\r\n inst = \"{0:03}\".format(instance_num)\r\n container = DockerTestbed.CONTAINER.format(inst)\r\n cmd_list.append(container)\r\n cmd_list += cmd_line\r\n resp = Testbed.runshell(cmd_list)\r\n if self.args.verbose:\r\n print(cmd_list)\r\n print(resp.stdout.decode(\"utf-8\"))\r\n #if resp.returncode != 0:\r\n # report[\"fail_count\"] += 1\r\n # report[\"fail_node\"].append(\"node-{0}\".format(inst))\r\n #rpt_msg = \"{0}: {1}/{2} failed\\n{3}\".format(cmd_line, report[\"fail_count\"],\r\n # self.range_end - self.range_start,\r\n # report[\"fail_node\"])\r\n #print(rpt_msg)\r\n\r\n def run_cmd_on_range(self, cmd_line, delay=0):\r\n report = dict(fail_count=0, fail_node=[])\r\n for inst in self.seq_list[self.range_start-1:self.range_end]:\r\n cmd_list = [DockerTestbed.VIRT, \"exec\", \"-it\"]\r\n inst = \"{0:03}\".format(inst)\r\n container = DockerTestbed.CONTAINER.format(inst)\r\n cmd_list.append(container)\r\n cmd_list += cmd_line\r\n resp = Testbed.runshell(cmd_list)\r\n if self.args.verbose:\r\n print(cmd_list)\r\n print(resp.stdout.decode(\"utf-8\"))\r\n if resp.returncode != 0:\r\n report[\"fail_count\"] += 1\r\n report[\"fail_node\"].append(\"node-{0}\".format(inst))\r\n if delay > 0:\r\n time.sleep(delay)\r\n rpt_msg = \"{0}: {1}/{2} failed\\n{3}\".format(cmd_line, report[\"fail_count\"],\r\n self.range_end - self.range_start,\r\n report[\"fail_node\"])\r\n print(rpt_msg)\r\n\r\n\r\n def pull_image(self):\r\n cmd_list = [DockerTestbed.VIRT, \"pull\", Testbed.BF_VIRT_IMG]\r\n resp = Testbed.runshell(cmd_list)\r\n if self.args.verbose:\r\n print(resp)\r\n\r\n def stop_range(self):\r\n cnt = 0\r\n cmd_list = [DockerTestbed.VIRT, \"kill\"]\r\n sequence = self.seq_list[self.range_start-1:self.range_end]\r\n for inst in sequence:\r\n cnt += 1\r\n inst = \"{0:03}\".format(inst)\r\n container = DockerTestbed.CONTAINER.format(inst)\r\n cmd_list.append(container)\r\n if self.args.verbose:\r\n print(cmd_list)\r\n resp = Testbed.runshell(cmd_list)\r\n print(resp.stdout.decode(\"utf-8\") if resp.returncode == 0 else\r\n resp.stderr.decode(\"utf-8\"))\r\n print(\"{0} Docker container(s) terminated\".format(cnt))\r\n\r\n def end(self):\r\n self.run_cmd_on_range([\"systemctl\", \"stop\", \"edgev\"])\r\n self.stop_range()\r\n\r\n def run_ping(self, target_address):\r\n report = dict(fail_count=0, fail_node=[])\r\n for inst in range(self.range_start, self.range_end):\r\n cmd_list = [DockerTestbed.VIRT, \"exec\", \"-it\"]\r\n inst = \"{0:03}\".format(inst)\r\n container = DockerTestbed.CONTAINER.format(inst)\r\n cmd_list.append(container)\r\n cmd_list += [\"ping\", \"-c1\"]\r\n cmd_list.append(target_address)\r\n resp = Testbed.runshell(cmd_list)\r\n if self.args.verbose:\r\n print(cmd_list)\r\n print(\"ping \", target_address, \"\\n\", resp.stdout.decode(\"utf-8\"))\r\n if resp.returncode != 0:\r\n report[\"fail_count\"] += 1\r\n report[\"fail_node\"].append(\"node-{0}\".format(inst))\r\n rpt_msg = \"ping {0}: {1}/{2} failed\\n{3}\".format(target_address, report[\"fail_count\"],\r\n self.range_end - self.range_start,\r\n report[\"fail_node\"])\r\n print(rpt_msg)\r\n\r\n def run_arp(self, target_address):\r\n for inst in range(self.range_start, self.range_end):\r\n cmd_list = [DockerTestbed.VIRT, \"exec\", \"-it\"]\r\n inst = \"{0:03}\".format(inst)\r\n container = DockerTestbed.CONTAINER.format(inst)\r\n cmd_list.append(container)\r\n cmd_list += [\"arping\", \"-C1\"]\r\n cmd_list.append(target_address)\r\n if self.args.verbose:\r\n print(cmd_list)\r\n resp = Testbed.runshell(cmd_list)\r\n print(resp.stdout.decode(\"utf-8\") if resp.returncode == 0 else\r\n resp.stderr.decode(\"utf-8\"))\r\n\r\n def run_svc_ctl(self, svc_ctl):\r\n if svc_ctl == \"stop\":\r\n self.run_cmd_on_range([\"systemctl\", \"stop\", \"edgev\"])\r\n elif svc_ctl == \"start\":\r\n self.run_cmd_on_range([\"systemctl\", \"start\", \"edgev\"], 10)\r\n elif svc_ctl == \"restart\":\r\n self.run_cmd_on_range([\"systemctl\", \"restart\", \"edgev\"], 1)\r\n else:\r\n print(\"Invalid service control specified, only accepts start/stop/restart\")\r\n\r\ndef main(): # pylint: disable=too-many-return-statements\r\n exp = DockerTestbed()\r\n\r\n\r\n if exp.args.run and exp.args.end:\r\n print(\"Error! Both run and end were specified.\")\r\n return\r\n\r\n if exp.args.info:\r\n exp.display_current_config()\r\n return\r\n\r\n if exp.args.setup:\r\n exp.setup_system()\r\n return\r\n\r\n if exp.args.pull:\r\n exp.pull_image()\r\n return\r\n\r\n if exp.args.clean:\r\n exp.make_clean()\r\n return\r\n\r\n if exp.range_end - exp.range_start <= 0:\r\n print(\"Invalid range, please fix RANGE_START={0} RANGE_END={1}\".\r\n format(exp.range_start, exp.range_end))\r\n return\r\n\r\n if exp.args.configure:\r\n exp.configure()\r\n\r\n if exp.args.run:\r\n exp.run()\r\n return\r\n\r\n if exp.args.end:\r\n exp.end()\r\n return\r\n\r\n if exp.args.ping:\r\n exp.run_ping(exp.args.ping)\r\n return\r\n\r\n if exp.args.arp:\r\n exp.run_arp(exp.args.arp)\r\n return\r\n\r\n if exp.args.edgev:\r\n exp.run_svc_ctl(exp.args.edgev)\r\n return\r\n\r\n if exp.args.churn:\r\n exp.churn(exp.args.churn)\r\n return\r\n\r\n if exp.args.test:\r\n exp.run_test()\r\n return\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"Testbed/testbed.py","file_name":"testbed.py","file_ext":"py","file_size_in_byte":25342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"411874088","text":"import pyprimesieve\n\n\ndef successiveNonPrimeIntGenerator(startInt, sequenceLength):\n intList = []\n while len(intList) <= sequenceLength:\n for i in range(2, startInt):\n if (startInt % i) == 0:\n intList.append(startInt)\n startInt += 1\n break\n \n else:\n startInt +=1\n \n return intList\n\nif __name__ == \"__main__\":\n\tstartInt = raw_input(\"Please enter a starting number: \")\n\tsequenceLength = raw_input(\"Please enter how many non-prime integers you want: \")\n\tsuccessiveNonPrimeIntGenerator(int(startInt), int(sequenceLength))\n","sub_path":"integerGeneration.py","file_name":"integerGeneration.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"67116706","text":"\n\nfrom xai.brain.wordbase.nouns._bandit import _BANDIT\n\n#calss header\nclass _BANDITTI(_BANDIT, ):\n\tdef __init__(self,): \n\t\t_BANDIT.__init__(self)\n\t\tself.name = \"BANDITTI\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"bandit\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_banditti.py","file_name":"_banditti.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"619555605","text":"from __future__ import print_function\nimport h5py\nimport os\nfrom gunpowder import *\n\n\ndef prepare_h5source(ds_dir, volume_name, label_file=(\"groundtruth_seg.h5\", \"main\"), raw_file=(\"im_uint8.h5\", \"main\"),\n add_dummy_mask=True):\n h5_filepath = \".{}.h5\".format(volume_name)\n if add_dummy_mask:\n with h5py.File(os.path.join(ds_dir, volume_name, label_file[0]), \"r\") as f_labels:\n mask_shape = f_labels[label_file[1]].shape\n with h5py.File(h5_filepath, \"w\") as h5:\n h5[\"volumes/raw\"] = h5py.ExternalLink(os.path.join(ds_dir, volume_name, raw_file[0]), raw_file[1])\n h5[\"volumes/labels/neuron_ids\"] = h5py.ExternalLink(os.path.join(ds_dir, volume_name, label_file[0]),\n label_file[1])\n datasets = {VolumeTypes.RAW: 'volumes/raw',\n VolumeTypes.GT_LABELS: 'volumes/labels/neuron_ids'}\n if add_dummy_mask:\n h5.create_dataset(\n name=\"volumes/labels/mask\",\n dtype=\"uint8\",\n shape=mask_shape,\n fillvalue=1,\n )\n datasets[VolumeTypes.GT_MASK]='volumes/labels/mask'\n\n h5source = gunpowder.Hdf5Source(\n h5_filepath,\n datasets=datasets,\n #resolutions=(8, 8, 8),\n )\n return h5source\n","sub_path":"training/gunpowder_wrappers.py","file_name":"gunpowder_wrappers.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"541549713","text":"__author__ = 'fearstruck'\n\nimport unittest\n\n\nclass MyTestCase(unittest.TestCase):\n def create_data_set():\n group = array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])\n labels = ['A', 'A', 'B', 'B']\n return group, labels\n\n\n def test_something(self):\n self.assertEqual(True, False)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"ml/tests/test_knn.py","file_name":"test_knn.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"466481219","text":"# https://adventofcode.com/2019/day/1\n\nimport os\nfrom pprint import pprint\n\nSCRIPT_DIR = os.path.dirname(__file__)\nINPUT_FILENAME = 'inputs.txt'\n\ndef get_inputs(dir, filename):\n file = os.path.join(dir, filename)\n inputs = []\n\n with open(file, 'r') as f:\n inputs = f.read().splitlines()\n\n return inputs\n\ndef process(inputs):\n outputs = inputs.copy()\n return outputs\n\ndef main():\n inputs = get_inputs(SCRIPT_DIR, INPUT_FILENAME)\n answer = process(inputs)\n print(f'answer:', answer)\n\nmain()\n","sub_path":"2019/day-0/part-1/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"199150334","text":"import tkinter, math\nr = 500\ncanvas = tkinter.Canvas(width = r, height = r)\ncanvas.pack()\n\nKORYTNACKA = [r/2,r/2,0]\n\ndef uhol(x):\n return x*2*math.pi/360\n\ndef nastavPoziciu(x,y):\n KORYTNACKA[0] = x\n KORYTNACKA[1] = y\n\ndef posunUhol(x):\n KORYTNACKA[2] += x\n\ndef chod(x):\n a1 = KORYTNACKA[0]\n b1 = KORYTNACKA[1]\n\n a2 = a1 + x*math.cos(KORYTNACKA[2])\n b2 = b1 + x*math.sin(KORYTNACKA[2])\n\n canvas.create_line(a1,b1,a2,b2)\n nastavPoziciu(a2,b2)\n\ndef krok(x):\n if(x > r):\n return 0\n chod(x)\n posunUhol(uhol(90))\n krok(x+4)\n\nkrok(0)\n\ncanvas.mainloop()","sub_path":"grafika_rekurzia/src/program3b.py","file_name":"program3b.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"460790882","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom keras.optimizers import Adagrad\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation\nfrom keras.layers.recurrent import GRU\nfrom theano.compile import mode\nfrom lib import utility as util\nimport pre_processing as pp\n\n__author__ = 'yossiadi'\n\nnp.random.seed(1539) # for reproducibility\n\n# parameters\nmaxlen = 20 # cut frames after this number\nbatch_size = 16\nepochs = 500\ninput_dim = 3\noutput_dim = 2\nlayer_size = 20\n\nmodel_path = \"models/paralysis.net.model\"\ntest_path = \"db/test.save\"\n\nprint(\"Loading data...\")\nX_test, y_test = util.get_pickle_data(test_path)\nprint(\"Pre-processing the features...\")\nX_test = pp.cut_and_pad(X_test, max_len=maxlen, num_of_features=input_dim)\ny_test = pp.convert_y(y_test)\n\n# build the model\nmodel = Sequential()\nmodel.add(GRU(input_dim=input_dim, output_dim=layer_size))\nmodel.add(Dense(input_dim=layer_size, output_dim=output_dim))\nmodel.add(Activation('softmax'))\nmodel.load_weights(model_path)\n\noptimizer = Adagrad()\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=optimizer)\n\n# get predictions\ny_hat = model.predict_classes(X_test, batch_size=batch_size)\n\nprint(\"Target: %s\" % y_test[:, 1])\nprint(\"Y Hat: %s\" % y_hat)\n\nprint(\"\")\nprint(\"Test set: %s\" % test_path)\nprint(\"Accuracy on the test set: %s\" % (float(np.sum(y_hat == y_test[:, 1]))/len(y_hat)))\n\n\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"43929150","text":"import os\nimport subprocess\nimport cv2\n\nface_csc = cv2.CascadeClassifier('C:/projects/opencv-python/opencv/data/haarcascades/haarcascade_lowerbody.xml')\ncam = cv2.VideoCapture(0)\nhat = cv2.imread('mid_jeans.jpg')\n\ndef put_hat(hat,fc,x,y,w,h):\n \n face_width = w\n face_height = h\n \n hat_width = face_width\n hat_height = int(face_height)+10\n \n hat = cv2.resize(hat,(hat_width,hat_height))\n \n for i in range(hat_height):\n for j in range(hat_width):\n for k in range(3):\n if hat[i][j][k]<235:\n fc[y+i-int(0.25*face_height)][x+j][k] = hat[i][j][k]\n return fc\n\nwhile(True):\n tf, img = cam.read() \n \n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n \n faces = face_csc.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(40,40)\n )\n \n for (x, y, w, h) in faces:\n cv2.rectangle(gray, (x,y), (x+w, y+h), (0,0,0), 5)\n img = put_hat(hat,img,x,y,w,h)\n \n cv2.imshow('video', img)\n key = cv2.waitKey(1)\n if key == 27:\n break\n\ncam.release()\ncv2.destroyAllWindows()\n","sub_path":"jeans-filter.py","file_name":"jeans-filter.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"197252234","text":"import sys\nimport random\nfrom PyQt5 import QtCore, QtWidgets, QtGui\n\nclass MyApp(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n\n self.v_box = QtWidgets.QVBoxLayout()\n\n self.ed_1 = QtWidgets.QTextEdit()\n\n self.btn = QtWidgets.QPushButton(\"Press me to make fun~\")\n\n self.frm_empty = QtWidgets.QFrame()\n\n self.set_up()\n\n def set_up(self):\n self.setFont(QtGui.QFont(\"Arial\", 12))\n self.setWindowTitle(\"Hello Word!\")\n icons = {\n 0: QtGui.QIcon(\"icon\\\\chameleon.ico\"),\n 1: QtGui.QIcon(\"icon\\\\aol_mail.ico\"),\n 2: QtGui.QIcon(\"icon\\\\emotion_darth_wader.ico\")\n }\n\n self.setWindowIcon(icons.get((random.randint(0, 9) % 3)))\n\n self.frm_empty.resize(500, 350)\n\n self.ed_1.installEventFilter(self)\n self.frm_empty.setMouseTracking(True)\n self.frm_empty.installEventFilter(self)\n self.btn.installEventFilter(self)\n\n self.v_box.addWidget(self.ed_1)\n self.v_box.addWidget(self.frm_empty)\n self.v_box.addWidget(self.btn)\n\n # self.frm_right.setMouseTracking(True)\n # self.frm_right.installEventFilter(self)\n\n # self.frm_left.setMouseTracking(True)\n # self.frm_left.installEventFilter(self)\n\n self.btn.clicked.connect(self.btn_click)\n\n self.setLayout(self.v_box)\n\n def eventFilter(self, obj, event):\n if (type(obj) == QtWidgets.QTextEdit) and \\\n (event.type() == QtCore.QEvent.KeyPress):\n print(event.key())\n\n elif event.type() == QtCore.QEvent.MouseMove:\n print(type(obj))\n\n return False\n\n def showEvent(self, event):\n self.resize(800, 600)\n center = QtWidgets.QApplication.desktop().availableGeometry()\n x = (center.width() - self.width()) / 2\n y = (center.height() - self.height()) / 2\n self.move(x, y)\n\n def btn_click(self):\n QtWidgets.QMessageBox.information(self, \"Say Hi\", \"Hello World\", QtWidgets.QMessageBox.Ok)\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n window = MyApp()\n window.show()\n sys.exit(app.exec_())","sub_path":"PyQtPractice/TryEventFiter.py","file_name":"TryEventFiter.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"399795830","text":"import json\nimport io\nfrom unipath import Path\n\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.test import TestCase\nfrom django.conf import settings\n\nfrom rest_framework.test import APIClient\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\nfrom rest_framework.reverse import reverse\nfrom rest_framework.test import APIRequestFactory\n\nfrom .api import SubmissionDetails\nfrom .models import Job, Submission\nfrom .model_factories import *\n\n\nclass JobListTests(APITestCase):\n\n def test_return_of_available_job_types(self):\n j1 = JobFactory.create(name=\"job1\")\n j2 = JobFactory.create(name=\"job2\")\n response = self.client.get(reverse('job',)+\".json\")\n response.render()\n self.assertEqual(response.status_code, 200)\n test_data = '{\"count\":2,\"next\":null,\"previous\":null,\"results\":[{\"pk\":1,\"name\":\"job1\"},{\"pk\":2,\"name\":\"job2\"}]}'\n self.assertEqual(response.content.decode(\"utf-8\"), test_data)\n\n\nclass SubmissionDetailTests(APITestCase):\n\n file = ''\n data = {}\n factory = APIRequestFactory()\n\n def setUp(self):\n self.file = SimpleUploadedFile('file1.txt',\n bytes('these are the file contents!',\n 'utf-8'))\n self.data = {'input_data': self.file,\n 'job': 'job1',\n 'submission_name': 'test',\n 'email': 'a@b.com'}\n j1 = JobFactory.create(name=\"job1\")\n\n def test_submission_detail_is_returned(self):\n s1 = SubmissionFactory.create()\n response = self.client.get(reverse('submissionDetail',\n args=[s1.pk, ]) + \".json\")\n self.assertEqual(response.status_code, 200)\n test_data = '{\"submission_name\":\"submission_0\",\"UUID\":\"'+s1.UUID+'\"}'\n self.assertEqual(response.content.decode(\"utf-8\"), test_data)\n\n def test_valid_submission_post_creates_entry(self):\n request = self.factory.post(reverse('submission'), self.data,\n format='multipart')\n view = SubmissionDetails.as_view()\n response = view(request)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n def test_rejection_with_bad_email(self):\n self.data['email'] = 'b'\n request = self.factory.post(reverse('submission'), self.data,\n format='multipart')\n view = SubmissionDetails.as_view()\n response = view(request)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_rejection_with_bad_job_id(self):\n self.data['job'] = 'job34'\n request = self.factory.post(reverse('submission'), self.data,\n format='multipart')\n view = SubmissionDetails.as_view()\n response = view(request)\n self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)\n\n def test_rejection_with_blank_submission_name(self):\n self.data['submission_name'] = \"\"\n request = self.factory.post(reverse('submission'), self.data,\n format='multipart')\n view = SubmissionDetails.as_view()\n response = view(request)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_rejection_without_submission_name(self):\n del(self.data['submission_name'])\n request = self.factory.post(reverse('submission'), self.data,\n format='multipart')\n view = SubmissionDetails.as_view()\n response = view(request)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_rejection_without_email(self):\n del(self.data['email'])\n request = self.factory.post(reverse('submission'), self.data,\n format='multipart')\n view = SubmissionDetails.as_view()\n response = view(request)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_rejection_without_job(self):\n del(self.data['job'])\n request = self.factory.post(reverse('submission'), self.data,\n format='multipart')\n view = SubmissionDetails.as_view()\n response = view(request)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_rejection_without_input_data(self):\n del(self.data['input_data'])\n request = self.factory.post(reverse('submission'), self.data,\n format='multipart')\n view = SubmissionDetails.as_view()\n response = view(request)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n","sub_path":"analytics_automated/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":4822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"416639865","text":"# -*- coding = utf-8 -*-\n# Created by qiu on 16-1-10\n#\n\nfrom urllib import request\nfrom bs4 import BeautifulSoup\n\ndoc = request.urlopen(url='http://www.crummy.com/software/BeautifulSoup/bs4/doc/index.zh.html')\nsoup = BeautifulSoup(doc, 'lxml')\n\nfor link in soup.find_all('a'):\n result = link.get('href')\n if result.startswith(('http', 'https')):\n print(result)\n\n\n \n\n\n\n\n","sub_path":"No_0009/findLink.py","file_name":"findLink.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"478346239","text":"#!/usr/bin/python\n# Place image to powerpoint file\n\nimport argparse\nfrom pptx import Presentation\nfrom pptx.util import Cm\nfrom pptx.util import Pt\nimport os.path\nimport pandas as pd\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\", \"--input\", help=\"presentation file\")\nparser.add_argument(\"--excel\", help=\"image information excel\")\nargs = parser.parse_args()\n\ndf = pd.read_excel(args.excel)\nprs = Presentation(args.input)\nprevious_page = 0\nfor i in range(0,df.iloc[:,0].count()):\n\tif previous_page != df.ix[i,\"page\"]:\n\t\tslide = prs.slides.add_slide(prs.slide_layouts[2])\n\t\ttitle = slide.shapes.title\n\t\ttitle.text = df.ix[i,\"title\"]\n\t\tprevious_page = df.ix[i,\"page\"]\n\t\n\timage_path = df.ix[i,\"image_path\"]\n\tif(os.path.exists(image_path)):\n\t\tpic = slide.shapes.add_picture(image_path, Cm(df.ix[i,\"x\"]), Cm(df.ix[i,\"y\"]), width=Cm(df.ix[i,\"width\"]))\n\nprs.save(args.input)\n\n","sub_path":"ppt_addimage.py","file_name":"ppt_addimage.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"153663237","text":"#\n# NOTE: This is a copy of haversine/haversine.py, 2023-02-28. It is intended\n# to create a baseline for performance regression tests.\n#\n# To establish a new baseline, replace this file with the current one.\n#\n\nfrom enum import Enum\nfrom math import pi\nfrom typing import Union, Tuple\nimport math\n\n\n# mean earth radius - https://en.wikipedia.org/wiki/Earth_radius#Mean_radius\n_AVG_EARTH_RADIUS_KM = 6371.0088\n\n\nclass Unit(str, Enum):\n \"\"\"\n Enumeration of supported units.\n The full list can be checked by iterating over the class; e.g.\n the expression `tuple(Unit)`.\n \"\"\"\n\n KILOMETERS = 'km'\n METERS = 'm'\n MILES = 'mi'\n NAUTICAL_MILES = 'nmi'\n FEET = 'ft'\n INCHES = 'in'\n RADIANS = 'rad'\n DEGREES = 'deg'\n\n\nclass Direction(float, Enum):\n \"\"\"\n Enumeration of supported directions.\n The full list can be checked by iterating over the class; e.g.\n the expression `tuple(Direction)`.\n Angles expressed in radians.\n \"\"\"\n\n NORTH = 0.0\n NORTHEAST = pi * 0.25\n EAST = pi * 0.5\n SOUTHEAST = pi * 0.75\n SOUTH = pi\n SOUTHWEST = pi * 1.25\n WEST = pi * 1.5\n NORTHWEST = pi * 1.75\n\n\n# Unit values taken from http://www.unitconversion.org/unit_converter/length.html\n_CONVERSIONS = {\n Unit.KILOMETERS: 1.0,\n Unit.METERS: 1000.0,\n Unit.MILES: 0.621371192,\n Unit.NAUTICAL_MILES: 0.539956803,\n Unit.FEET: 3280.839895013,\n Unit.INCHES: 39370.078740158,\n Unit.RADIANS: 1/_AVG_EARTH_RADIUS_KM,\n Unit.DEGREES: (1/_AVG_EARTH_RADIUS_KM)*(180.0/pi)\n}\n\n\ndef get_avg_earth_radius(unit):\n return _AVG_EARTH_RADIUS_KM * _CONVERSIONS[unit]\n\n\ndef _normalize(lat: float, lon: float) -> Tuple[float, float]:\n \"\"\"\n Normalize point to [-90, 90] latitude and [-180, 180] longitude.\n \"\"\"\n lat = (lat + 90) % 360 - 90\n if lat > 90:\n lat = 180 - lat\n lon += 180\n lon = (lon + 180) % 360 - 180\n return lat, lon\n\n\ndef _normalize_vector(lat: \"numpy.ndarray\", lon: \"numpy.ndarray\") -> Tuple[\"numpy.ndarray\", \"numpy.ndarray\"]:\n \"\"\"\n Normalize points to [-90, 90] latitude and [-180, 180] longitude.\n \"\"\"\n lat = (lat + 90) % 360 - 90\n lon = (lon + 180) % 360 - 180\n wrap = lat > 90\n if numpy.any(wrap):\n lat[wrap] = 180 - lat[wrap]\n lon[wrap] = lon[wrap] % 360 - 180\n return lat, lon\n\n\ndef _ensure_lat_lon(lat: float, lon: float):\n \"\"\"\n Ensure that the given latitude and longitude have proper values. An exception is raised if they are not.\n \"\"\"\n if lat < -90 or lat > 90:\n raise ValueError(f\"Latitude {lat} is out of range [-90, 90]\")\n if lon < -180 or lon > 180:\n raise ValueError(f\"Longitude {lon} is out of range [-180, 180]\")\n\n\ndef _ensure_lat_lon_vector(lat: \"numpy.ndarray\", lon: \"numpy.ndarray\"):\n \"\"\"\n Ensure that the given latitude and longitude have proper values. An exception is raised if they are not.\n \"\"\"\n if numpy.abs(lat).max() > 90:\n raise ValueError(\"Latitude(s) out of range [-90, 90]\")\n if numpy.abs(lon).max() > 180:\n raise ValueError(\"Longitude(s) out of range [-180, 180]\")\n\n\ndef _explode_args(f):\n return lambda ops: f(**ops.__dict__)\n\n\n@_explode_args\ndef _create_haversine_kernel(*, asin=None, arcsin=None, cos, radians, sin, sqrt, **_):\n asin = asin or arcsin\n\n def _haversine_kernel(lat1, lng1, lat2, lng2):\n \"\"\"\n Compute the haversine distance on unit sphere. Inputs are in degrees,\n either scalars (with ops==math) or arrays (with ops==numpy).\n \"\"\"\n lat1 = radians(lat1)\n lng1 = radians(lng1)\n lat2 = radians(lat2)\n lng2 = radians(lng2)\n lat = lat2 - lat1\n lng = lng2 - lng1\n d = (sin(lat * 0.5) ** 2\n + cos(lat1) * cos(lat2) * sin(lng * 0.5) ** 2)\n # Note: 2 * atan2(sqrt(d), sqrt(1-d)) is more accurate at\n # large distance (d is close to 1), but also slower.\n return 2 * asin(sqrt(d))\n return _haversine_kernel\n\n\n\n@_explode_args\ndef _create_inverse_haversine_kernel(*, asin=None, arcsin=None, atan2=None, arctan2=None, cos, degrees, radians, sin, sqrt, **_):\n asin = asin or arcsin\n atan2 = atan2 or arctan2\n\n def _inverse_haversine_kernel(lat, lng, direction, d):\n \"\"\"\n Compute the inverse haversine on unit sphere. lat/lng are in degrees,\n direction in radians; all inputs are either scalars (with ops==math) or\n arrays (with ops==numpy).\n \"\"\"\n lat = radians(lat)\n lng = radians(lng)\n cos_d, sin_d = cos(d), sin(d)\n cos_lat, sin_lat = cos(lat), sin(lat)\n sin_d_cos_lat = sin_d * cos_lat\n return_lat = asin(cos_d * sin_lat + sin_d_cos_lat * cos(direction))\n return_lng = lng + atan2(sin(direction) * sin_d_cos_lat,\n cos_d - sin_lat * sin(return_lat))\n return degrees(return_lat), degrees(return_lng)\n return _inverse_haversine_kernel\n\n\n_haversine_kernel = _create_haversine_kernel(math)\n_inverse_haversine_kernel = _create_inverse_haversine_kernel(math)\n\ntry:\n import numpy\n has_numpy = True\n _haversine_kernel_vector = _create_haversine_kernel(numpy)\n _inverse_haversine_kernel_vector = _create_inverse_haversine_kernel(numpy)\nexcept ModuleNotFoundError:\n # Import error will be reported in haversine_vector() / inverse_haversine_vector()\n has_numpy = False\n\ntry:\n import numba # type: ignore\n if has_numpy:\n _haversine_kernel_vector = numba.vectorize(fastmath=True)(_haversine_kernel)\n # Tuple output is not supported for numba.vectorize. Just jit the numpy version.\n _inverse_haversine_kernel_vector = numba.njit(fastmath=True)(_inverse_haversine_kernel_vector)\n _haversine_kernel = numba.njit(_haversine_kernel)\n _inverse_haversine_kernel = numba.njit(_inverse_haversine_kernel)\nexcept ModuleNotFoundError:\n pass\n\n\ndef haversine(point1, point2, unit=Unit.KILOMETERS, normalize=False, check=True):\n \"\"\" Calculate the great-circle distance between two points on the Earth surface.\n\n Takes two 2-tuples, containing the latitude and longitude of each point in decimal degrees,\n and, optionally, a unit of length.\n\n :param point1: first point; tuple of (latitude, longitude) in decimal degrees\n :param point2: second point; tuple of (latitude, longitude) in decimal degrees\n :param unit: a member of haversine.Unit, or, equivalently, a string containing the\n initials of its corresponding unit of measurement (i.e. miles = mi)\n default 'km' (kilometers).\n :param normalize: if True, normalize the points to [-90, 90] latitude and [-180, 180] longitude.\n :param check: if True, check that points are normalized.\n\n Example: ``haversine((45.7597, 4.8422), (48.8567, 2.3508), unit=Unit.METERS)``\n\n Precondition: ``unit`` is a supported unit (supported units are listed in the `Unit` enum)\n\n :return: the distance between the two points in the requested unit, as a float.\n\n The default returned unit is kilometers. The default unit can be changed by\n setting the unit parameter to a member of ``haversine.Unit``\n (e.g. ``haversine.Unit.INCHES``), or, equivalently, to a string containing the\n corresponding abbreviation (e.g. 'in'). All available units can be found in the ``Unit`` enum.\n \"\"\"\n\n # unpack latitude/longitude\n lat1, lng1 = point1\n lat2, lng2 = point2\n\n # normalize points or ensure they are proper lat/lon, i.e., in [-90, 90] and [-180, 180]\n if normalize:\n lat1, lng1 = _normalize(lat1, lng1)\n lat2, lng2 = _normalize(lat2, lng2)\n elif check:\n _ensure_lat_lon(lat1, lng1)\n _ensure_lat_lon(lat2, lng2)\n\n return get_avg_earth_radius(unit) * _haversine_kernel(lat1, lng1, lat2, lng2)\n\n\ndef haversine_vector(array1, array2, unit=Unit.KILOMETERS, comb=False, normalize=False, check=True):\n '''\n The exact same function as \"haversine\", except that this\n version replaces math functions with numpy functions.\n This may make it slightly slower for computing the haversine\n distance between two points, but is much faster for computing\n the distance between two vectors of points due to vectorization.\n '''\n if not has_numpy:\n raise RuntimeError('Error, unable to import Numpy, '\n 'consider using haversine instead of haversine_vector.')\n\n # ensure arrays are numpy ndarrays\n if not isinstance(array1, numpy.ndarray):\n array1 = numpy.array(array1)\n if not isinstance(array2, numpy.ndarray):\n array2 = numpy.array(array2)\n\n # ensure will be able to iterate over rows by adding dimension if needed\n if array1.ndim == 1:\n array1 = numpy.expand_dims(array1, 0)\n if array2.ndim == 1:\n array2 = numpy.expand_dims(array2, 0)\n\n # Asserts that both arrays have same dimensions if not in combination mode\n if not comb:\n if array1.shape != array2.shape:\n raise IndexError(\n \"When not in combination mode, arrays must be of same size. If mode is required, use comb=True as argument.\")\n\n # unpack latitude/longitude\n lat1, lng1 = array1[:, 0], array1[:, 1]\n lat2, lng2 = array2[:, 0], array2[:, 1]\n\n # normalize points or ensure they are proper lat/lon, i.e., in [-90, 90] and [-180, 180]\n if normalize:\n lat1, lng1 = _normalize_vector(lat1, lng1)\n lat2, lng2 = _normalize_vector(lat2, lng2)\n elif check:\n _ensure_lat_lon_vector(lat1, lng1)\n _ensure_lat_lon_vector(lat2, lng2)\n\n # If in combination mode, turn coordinates of array1 into column vectors for broadcasting\n if comb:\n lat1 = numpy.expand_dims(lat1, axis=0)\n lng1 = numpy.expand_dims(lng1, axis=0)\n lat2 = numpy.expand_dims(lat2, axis=1)\n lng2 = numpy.expand_dims(lng2, axis=1)\n\n return get_avg_earth_radius(unit) * _haversine_kernel_vector(lat1, lng1, lat2, lng2)\n\n\ndef inverse_haversine(point, distance, direction: Union[Direction, float], unit=Unit.KILOMETERS):\n lat, lng = point\n r = get_avg_earth_radius(unit)\n return _inverse_haversine_kernel(lat, lng, direction, distance/r)\n\n\ndef inverse_haversine_vector(array, distance, direction, unit=Unit.KILOMETERS):\n if not has_numpy:\n raise RuntimeError('Error, unable to import Numpy, '\n 'consider using inverse_haversine instead of inverse_haversine_vector.')\n\n # ensure arrays are numpy ndarrays\n array, distance, direction = map(numpy.asarray, (array, distance, direction))\n\n # ensure will be able to iterate over rows by adding dimension if needed\n if array.ndim == 1:\n array = numpy.expand_dims(array, 0)\n\n # Asserts that arrays are correctly sized\n if array.ndim != 2 or array.shape[1] != 2 or array.shape[0] != len(distance) or array.shape[0] != len(direction):\n raise IndexError(\"Arrays must be of same size.\")\n\n # unpack latitude/longitude\n lat, lng = array[:, 0], array[:, 1]\n\n r = get_avg_earth_radius(unit)\n return _inverse_haversine_kernel_vector(lat, lng, direction, distance/r)\n","sub_path":"tests/haversine_baseline.py","file_name":"haversine_baseline.py","file_ext":"py","file_size_in_byte":11147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"275551206","text":"#https://www.hackerrank.com/challenges/find-substring/problem\nimport re\nn = int(input())\ntexto = ' '.join([input() for _ in range(n)])\ni = int(input())\nlista = ''\nfor r in range(i):\n word = input()\n pat = rf'(?<=\\w){word}(?=\\w)'\n match = re.findall(pat,texto)\n print(len(match))\n","sub_path":"regex/applications/find_sub_word.py","file_name":"find_sub_word.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"242910936","text":"'''\nTo download a conents of url using urllib\non to local machine\n'''\n\nimport urllib.error\nimport urllib.request\n\ntry:\n\n url = urllib.request.urlopen(\"https://www.python.org/\") # the method that takes the url\n\n content = url.read()\n\n url.close()\n f = open(\"python.html\", \"wb\")\n f.write(content) # it contains complete html page without style\n f.close()\n\nexcept urllib.error.HTTPError:\n print(\"URL not found\")\n","sub_path":"Python Programs/PythonAdvanced/Networking/Downlading/DownloadingAHTML.py","file_name":"DownloadingAHTML.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"214755100","text":"from selenium import webdriver\r\nimport time\r\n\r\nwhile True:\r\n import requests\r\n f = open(r'I:\\python visual studio code\\ctf\\flag.txt', 'w+')\r\n f.write(str(''))\r\n f = open(r'I:\\python visual studio code\\ctf\\cookie.txt', 'r')\r\n g = f.readlines()\r\n for i in g:\r\n url = 'http://172.16.223.'+str(i[:3])+'/protected/include/core/db/con.php'\r\n flag = requests.get(url).text\r\n file = open(r'I:\\python visual studio code\\ctf\\flag.txt', 'a+')\r\n file.write(str(i[:3])+':'+str(flag)+'\\n')\r\n file.close()\r\n print(str(i[:3])+'执行完毕')\r\n dr = webdriver.Firefox()\r\n dr.get('https://172.16.223.202/match/WAR16/login')\r\n time.sleep(2)\r\n username = dr.find_element_by_id('username')\r\n username.send_keys('1603030134')\r\n password = dr.find_element_by_id('password')\r\n password.send_keys('980828')\r\n login_btn = dr.find_element_by_id('btn_submit')\r\n login_btn.click()\r\n time.sleep(2)\r\n jinru = dr.find_element_by_id('btn-reg')\r\n jinru.click()\r\n time.sleep(2)\r\n pxbaji = dr.find_element_by_class_name('media-text')\r\n pxbaji.click()\r\n time.sleep(2)\r\n flag = dr.find_element_by_id('ipt-flag')\r\n file = open(r'I:\\python visual studio code\\ctf\\flag.txt', 'r+')\r\n pfile = file.readlines()\r\n ticks = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\r\n for i in pfile:\r\n flag.send_keys(str(i[4:-1]))\r\n tijiao = dr.find_element_by_id('btn_submit')\r\n tijiao.click()\r\n flag.clear()\r\n time.sleep(2)\r\n print(str(i[:3]), '提交成功')\r\n print('本轮flag提交完成-----》等待下轮开始--->当前时间为:'+ticks)\r\n time.sleep(2000)\r\n","sub_path":"(taishi)python visual studio code/ctf/自动提交.py","file_name":"自动提交.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"519328036","text":"# pylint:disable=unused-variable\n# pylint:disable=unused-argument\n# pylint:disable=redefined-outer-name\n# pylint:disable=protected-access\n# pylint:disable=not-an-iterable\n# pylint:disable=no-value-for-parameter\n\nimport asyncio\nfrom datetime import datetime\nfrom random import randint\nfrom typing import List, Optional\n\nimport pytest\nimport simcore_service_catalog.api.dependencies.director\nfrom fastapi import FastAPI\nfrom models_library.services import (\n ServiceAccessRightsAtDB,\n ServiceDockerData,\n ServiceType,\n)\nfrom pydantic.types import PositiveInt\nfrom simcore_service_catalog.api.routes import services\nfrom simcore_service_catalog.db.repositories.groups import GroupsRepository\nfrom simcore_service_catalog.models.domain.group import GroupAtDB, GroupType\nfrom simcore_service_catalog.models.schemas.services import ServiceOut\nfrom starlette.testclient import TestClient\nfrom yarl import URL\n\npytest_simcore_core_services_selection = [\n \"postgres\",\n]\npytest_simcore_ops_services_selection = [\n \"adminer\",\n]\n\n\n@pytest.fixture(scope=\"session\")\ndef user_id() -> int:\n return randint(1, 10000)\n\n\n@pytest.fixture(scope=\"session\")\ndef user_groups(user_id: int) -> List[GroupAtDB]:\n return [\n GroupAtDB(\n gid=user_id,\n name=\"my primary group\",\n description=\"it is primary\",\n type=GroupType.PRIMARY,\n ),\n GroupAtDB(\n gid=randint(10001, 15000),\n name=\"all group\",\n description=\"it is everyone\",\n type=GroupType.EVERYONE,\n ),\n GroupAtDB(\n gid=randint(15001, 20000),\n name=\"standard group\",\n description=\"it is standard\",\n type=GroupType.STANDARD,\n ),\n ]\n\n\n@pytest.fixture(scope=\"session\")\ndef registry_services() -> List[ServiceDockerData]:\n NUMBER_OF_SERVICES = 5\n return [\n ServiceDockerData(\n key=\"simcore/services/comp/my_comp_service\",\n version=f\"{v}.{randint(0,20)}.{randint(0,20)}\",\n type=ServiceType.COMPUTATIONAL,\n name=f\"my service {v}\",\n description=\"a sleeping service version {v}\",\n authors=[{\"name\": \"me\", \"email\": \"me@myself.com\"}],\n contact=\"me.myself@you.com\",\n inputs=[],\n outputs=[],\n )\n for v in range(NUMBER_OF_SERVICES)\n ]\n\n\n@pytest.fixture(scope=\"session\")\ndef db_services(\n registry_services: List[ServiceOut], user_groups: List[GroupAtDB]\n) -> List[ServiceAccessRightsAtDB]:\n return [\n ServiceAccessRightsAtDB(\n key=s.key,\n version=s.version,\n gid=user_groups[0].gid,\n execute_access=True,\n product_name=\"osparc\",\n )\n for s in registry_services\n ]\n\n\n@pytest.fixture()\nasync def director_mockup(\n loop, monkeypatch, registry_services: List[ServiceOut], app: FastAPI\n):\n async def return_list_services(user_id: int) -> List[ServiceOut]:\n return registry_services\n\n monkeypatch.setattr(services, \"list_services\", return_list_services)\n\n class FakeDirector:\n @staticmethod\n async def get(url: str):\n if url == \"/services\":\n return [s.dict(by_alias=True) for s in registry_services]\n if \"/service_extras/\" in url:\n return {\n \"build_date\": f\"{datetime.utcnow().isoformat(timespec='seconds')}Z\"\n }\n\n def fake_director_api(*args, **kwargs):\n return FakeDirector()\n\n monkeypatch.setattr(\n simcore_service_catalog.api.dependencies.director,\n \"get_director_api\",\n fake_director_api,\n )\n\n # Check mock\n from simcore_service_catalog.api.dependencies.director import get_director_api\n\n assert isinstance(get_director_api(), FakeDirector)\n yield\n\n\n@pytest.fixture()\nasync def db_mockup(\n loop,\n monkeypatch,\n app: FastAPI,\n user_groups: List[GroupAtDB],\n db_services: List[ServiceAccessRightsAtDB],\n):\n async def return_list_user_groups(self, user_id: int) -> List[GroupAtDB]:\n return user_groups\n\n async def return_gid_from_email(*args, **kwargs) -> Optional[PositiveInt]:\n return user_groups[0].gid\n\n monkeypatch.setattr(GroupsRepository, \"list_user_groups\", return_list_user_groups)\n monkeypatch.setattr(\n GroupsRepository, \"get_user_gid_from_email\", return_gid_from_email\n )\n\n\nasync def test_director_mockup(\n director_mockup, app: FastAPI, registry_services: List[ServiceOut], user_id: int\n):\n assert await services.list_services(user_id) == registry_services\n\n\n@pytest.mark.skip(\n reason=\"Not ready, depency injection does not work, using monkeypatch. still issue with setting up database\"\n)\nasync def test_list_services(\n director_mockup, db_mockup, app: FastAPI, client: TestClient, user_id: int\n):\n await asyncio.sleep(10)\n\n url = URL(\"/v0/services\").with_query(user_id=user_id)\n response = client.get(str(url))\n assert response.status_code == 200\n data = response.json()\n","sub_path":"services/catalog/tests/unit/with_dbs/test_entrypoint_services.py","file_name":"test_entrypoint_services.py","file_ext":"py","file_size_in_byte":5049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"255497881","text":"from typing import List\n\n\nclass Solution:\n def numIslands(self, grid: List[List[str]]) -> int:\n if not grid:\n return 0\n\n count = 0\n m, n = len(grid), len(grid[0])\n visited = [[False for _ in range(n)] for _ in range(m)]\n\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1' and not visited[i][j]:\n count += 1\n\n self._dfs(grid, i, j, visited)\n\n return count\n\n def _dfs(self, grid, row, column, visited):\n m, n = len(grid), len(grid[0])\n\n if row < 0 or row >= m or column < 0 or column >= n \\\n or grid[row][column] != '1' or visited[row][column]:\n return\n\n visited[row][column] = True\n\n self._dfs(grid, row - 1, column, visited)\n self._dfs(grid, row + 1, column, visited)\n self._dfs(grid, row, column - 1, visited)\n self._dfs(grid, row, column + 1, visited)\n","sub_path":"leetcode/algorithms/p0200_number_of_islands_1.py","file_name":"p0200_number_of_islands_1.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"39696530","text":"# -*- coding: utf-8 -*-\nimport xlwt\nimport json\nimport sys\n\n\n#sys.setdefaultencoding('utf-8')\n\nfile = xlwt.Workbook(encoding='utf-8')\n\n# overwrite_default => True\ntable = file.add_sheet('Number.txt', cell_overwrite_ok=False)\n\ntxt = open('number.txt').read()\njson_txt = json.loads(txt)\n\nfor x in range(len(json_txt)):\n for y in range(len(json_txt[x])):\n table.write(x, y, json_txt[x][y])\n\nfile.save('numbers.xls')\n\n\n\n","sub_path":"0830/xlwtDemo.py","file_name":"xlwtDemo.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"509125095","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\n# НУЖЕН БИЗНЕС-АКК\n\ndef get_tasks_off_all_pages(page_count):\n website = 'https://www.fl.ru/projects/?page={}&kind=5' # FL.ru https://freelance.ru/projects/\n all_pages = []\n page = 1\n while True:\n print(page)\n response = requests.get(website.format(page))\n soup = BeautifulSoup(response.text, 'lxml')\n if not soup.find('div', id='projects-list') or page == page_count:\n return all_pages\n else:\n all_pages.append(soup)\n page += 1\n\n\ndef parse_tasks(page):\n parsed_tasks = []\n tasks = page.find_all('div', class_='b-post')\n for task in tasks:\n task_data = dict()\n task_data['task_name'] = task.find('a', class_='b-post__link').text\n try:\n task_data['task_value'] = task.find('div', class_='b-post__price_bold').text\n except AttributeError:\n task_data['task_value'] = None\n task_data['task_summary'] = task.find('p').text # FL.ru (чё-то странное, потом)\n task_data['task_date'] = task.find('a').find_previous('div').text\n task_data['task_link'] = 'https://www.fl.ru/projects{}'.format(task.a['href'])\n parsed_tasks.append(task_data)\n return parsed_tasks\n\n\ndef run(page_count):\n parsed_tasks = []\n all_pages = get_tasks_off_all_pages(page_count)\n for page in all_pages:\n parsed_tasks.extend(parse_tasks(page))\n print(parsed_tasks)\n return parsed_tasks\n\n\nif __name__ == '__main__':\n run()\n\n","sub_path":"unstable/freelance_ru.py","file_name":"freelance_ru.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"103851623","text":"# Solution for Electronics Shop\n\n\ndef get_money_spent(b, keyboards, drives):\n keyboards = sorted(keyboards, reverse=True)\n drives = sorted(drives)\n money_spent = -1\n\n for k in keyboards:\n for d in drives:\n sum_kd = k + d\n if sum_kd > money_spent:\n if sum_kd <= b:\n money_spent = sum_kd\n else:\n break\n\n return money_spent\n\n\nif __name__ == '__main__':\n b = int(input().split()[0]) # b = budget, n and m are not required\n keyboards = list(map(int, input().split()))\n drives = list(map(int, input().split()))\n\n money = get_money_spent(b, keyboards, drives)\n print(money)\n","sub_path":"Python/Algorithms/Implementation/Electronics Shop.py","file_name":"Electronics Shop.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"403540498","text":"'''\nModified in 4/4/2021 1:18\nCollect the efficiency result based on Google Net and DeepFool.\nModification:\n Adding scaling factor when modifying the CNN FC layer.\n Checking the DeepFool performance\n\n'''\n\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nimport torchvision.transforms as transforms\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.optim as optim\nimport torch.utils.data as data_utils\nfrom torch.autograd import Variable\nimport math\nimport torchvision.models as models\nfrom PIL import Image\nfrom deepfool import deepfool\nimport os\nimport csv\nfrom ModelModify import ModifyModel,ModifyModelVGG, ModifyModelScale,ModifyModelVGGScale\nfrom DeepFoolC import deepfoolC\nfrom DeepFoolB import deepfoolB\nimport HeatMapForgradientOrPerturbation as HM\n#from HeatMapForgradientOrPerturbation import heatmap\nimport cv2\nfrom scipy.misc import imread, imsave, imresize\n\nScale=20\n\n\nnet = models.vgg19(pretrained=True).cuda()\n#net2 = models.resnet18(pretrained=True)\n# Switch to evaluation mode\nnet.eval()\n'''\nnet2 = models.resnet34(pretrained=True)\n#net2 = models.resnet18(pretrained=True)\n# Switch to evaluation mode\nnet2.eval()\n'''\n\nnet2 = models.vgg19(pretrained=True)\nnet2= ModifyModelVGGScale(net2,Scale)\nnet2.cuda()\nnet2.eval()\n\n\n#\nAT=\"DeepFool\"\nCSVfilenameTime ='VGG19'+'_'+ AT +\"_\"+str(Scale)+\"_MethodB\"+'_Result.csv'\nfileobjT = open(CSVfilenameTime, 'w', newline='') # \n# fileobj.write('\\xEF\\xBB\\xBF')#\n# \nwriterT = csv.writer(fileobjT) # csv.writer(fileobj)writer writer\nValueTime=['Original ATT,GT','Original ATT, ATT','On Fake ATT, GT','On Fake ATT,ATT','On Fake ATT, Def','ACC','ACC_ALL','DL2R','DL2G','DL2B','DLIR','DLIG','DLIB','AL2R','AL2G','AL2B','ALIR','ALIG','ALIB']\nwriterT.writerow(ValueTime)\nCountT=0 #deepfool\nCountTotal=0 #\nCountDF_EFF=0 #deepfool \nCountDF_EFF_Def=0 #DeepFool\n\n\nFolder='D:/workspace/imagenet2012B/test/'\nFileName='ILSVRC2012_test'\nAppend='.JPEG' #00099990\nError=[]\nfor i in range(1,100000):\n Index=str(i+1)\n K=len(Index)\n IndexFull='_'\n for j in range(8-K):\n IndexFull=IndexFull+str(0)\n IndexFull=IndexFull+Index\n FNAME=Folder+FileName+IndexFull+Append\n #im_orig = Image.open('test_im2.jpg')\n\n CC = cv2.imread(FNAME)\n #print(im_orig.size)\n a, b, c = CC.shape\n #print(CC.shape, c)\n\n image = imread(FNAME)\n if (len(image.shape) < 3):\n #print('gray')\n continue\n if c!=3:\n continue\n\n CountTotal=CountTotal+1\n\n #im_orig = Image.open('test_im2.jpg')\n #im_orig = Image.open('ILSVRC2012_test_00000002.JPEG')\n mean = [ 0.485, 0.456, 0.406 ]\n std = [ 0.229, 0.224, 0.225 ]\n #im_origB = Image.open('ILSVRC2012_test_00000002.JPEG')\n im_orig = Image.open(FNAME)\n im_origB = Image.open(FNAME)\n\n # Remove the mean\n im = transforms.Compose([\n transforms.Scale(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean = mean,\n std = std)])(im_orig)\n imB = transforms.Compose([\n transforms.Scale(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean = mean,\n std = std)])(im_origB)\n #r, loop_i, label_orig, label_pert, pert_image = deepfool(im, net)\n '''\n f_image = net.forward(Variable(im[None, :, :, :], requires_grad=True)).data.cpu().numpy().flatten()\n I = (np.array(f_image)).flatten().argsort()[::-1]\n Originallabel = I[0]\n '''\n r, loop_i, label_orig, label_pert, Originallabel,Protected,pert_image,TheGradient = deepfoolC(im, net2)\n rB, loop_iB, label_origB, label_pertB, pert_imageB,TheGradientB = deepfoolB(imB, net)\n print(\"original: \", Originallabel)\n print(\"original: \", Protected)\n #summary result\n print(\"Original Attack Result: \", label_pertB, \" Original Label in original Attack: \",label_origB)\n print(\" Attack Result In Fake: \", label_pert, \" Original Label in Attack With Fake: \", Originallabel,\" Protected By Fake: \",Protected )\n Acc=0\n AccB=0\n if label_pertB!=label_origB:\n print(\"DeepFool Works!\")\n CountDF_EFF=CountDF_EFF+1\n if label_origB==Protected:\n CountDF_EFF_Def=CountDF_EFF_Def+1\n Acc=1\n if label_origB == Protected:\n CountT=CountT+1\n AccB=1\n print(\"Efficiency: ===>\", int(CountT*100/CountTotal))\n #L2 and Linfinity\n\n # get the perturbation and the gradient based on the defence one\n RA, RB, RC = HM.get2Dfrom3D(224, 224, r) # perturbation\n # get the perturbation and the gradient based on the original version\n BRA, BRB, BRC = HM.get2Dfrom3D(224, 224, rB) # perturbation\n\n #defence\n L2RD=HM.L2NormValue(RA)\n L2GD = HM.L2NormValue(RB)\n L2BD = HM.L2NormValue(RC)\n\n LIRD = HM.L_Inf(RA)\n LIGD = HM.L_Inf(RB)\n LIBD = HM.L_Inf(RC)\n\n #original\n\n L2RA = HM.L2NormValue(BRA)\n L2GA = HM.L2NormValue(BRB)\n L2BA = HM.L2NormValue(BRC)\n\n LIRA = HM.L_Inf(BRA)\n LIGA = HM.L_Inf(BRB)\n LIBA = HM.L_Inf(BRC)\n '''\n ValueTime = ['Original ATT,GT', 'Original ATT, ATT', 'On Fake ATT, GT', 'On Fake ATT,ATT', 'On Fake ATT, Def',\n 'ACC', 'ACC_ALL', 'DL2R', 'DL2G', 'DL2B', 'DLIR', 'DLIG', 'DLIB', 'AL2R', 'AL2G', 'AL2B', 'ALIR',\n 'ALIG', 'ALIB']'''\n ValueTime = [label_origB, label_pertB, Originallabel,label_pert, Protected,\n Acc, AccB, L2RD, L2GD, L2BD, LIRD, LIGD, LIBD,\n L2RA, L2GA, L2BA, LIRA, LIGA, LIBA]\n writerT.writerow(ValueTime)\n\n\n\nprint(\"Final Result: \",CountT,CountTotal,CountDF_EFF,CountDF_EFF_Def)\nValueTime=[CountT,CountTotal,CountDF_EFF,CountDF_EFF_Def,int(CountT*100/CountTotal)]\nwriterT.writerow(ValueTime)\nexit()\n\ndef clip_tensor(A, minv, maxv):\n A = torch.max(A, minv*torch.ones(A.shape))\n A = torch.min(A, maxv*torch.ones(A.shape))\n return A\n\nclip = lambda x: clip_tensor(x, 0, 255)\n'''\ntf = transforms.Compose([transforms.Normalize(mean=[0, 0, 0], std=map(lambda x: 1 / x, std)),\n transforms.Normalize(mean=map(lambda x: -x, mean), std=[1, 1, 1]),\n transforms.Lambda(clip),\n transforms.ToPILImage(),\n transforms.CenterCrop(224)])\n'''\ntf = transforms.Compose([transforms.Normalize(mean=[0, 0, 0], std=list(map(lambda x: 1 / x, std))),\n transforms.Normalize(mean=list(map(lambda x: -x, mean)), std=[1, 1, 1]),\n transforms.Lambda(clip),\n transforms.ToPILImage(),\n transforms.CenterCrop(224)])\n\nplt.figure()\n\nplt.xticks([])\nplt.yticks([])\n\nplt.imshow(tf(pert_image.cpu()[0]))\nstr_label_pert=\"Perturbed based on the fake CNN\"\nplt.title(str_label_pert)\nplt.show()\n\n\n# get the perturbation and the gradient based on the defence one\nGA,GB,GC=HM.get2Dfrom3D(224,224,TheGradient) #gradient\nRA,RB,RC=HM.get2Dfrom3D(224,224,r) # perturbation\n# get the perturbation and the gradient based on the original version\nBGA,BGB,BGC=HM.get2Dfrom3D(224,224,TheGradientB) #gradient\nBRA,BRB,BRC=HM.get2Dfrom3D(224,224,rB) # perturbation\n\n#\ntitle=\"Perturbation Compare A, Positive 4\"\nHM.CVShowCompareFB(RA,BRA,title)\ntitle=\"Perturbation Compare A, Negtive 4\"\nHM.CVShowCompareGB(RA,BRA,title)\nprint(\"L2\")\nprint(HM.L2NormValue(RA))\nprint(HM.L2NormValue(BRA))\ntitle=\"Perturbation Compare A, Positive 0\"\nHM.CVShowCompareF(RA,BRA,title)\ntitle=\"Perturbation Compare A, Negtive 0\"\nHM.CVShowCompareG(RA,BRA,title)\n\ntitle=\"Perturbation Compare A, Positive 2\"\nHM.CVShowCompareFC(RA,BRA,title)\ntitle=\"Perturbation Compare A, Negtive 2\"\nHM.CVShowCompareGC(RA,BRA,title)\n\n\ntitle=\"Perturbation Compare B, Positive\"\nHM.CVShowCompareF(RB,BRB,title)\ntitle=\"Perturbation Compare B, Negtive\"\nHM.CVShowCompareG(RB,BRB,title)\n\ntitle=\"Perturbation Compare C, Positive\"\nHM.CVShowCompareF(RC,BRC,title)\ntitle=\"Perturbation Compare C, Negtive\"\nHM.CVShowCompareG(RC,BRC,title)\n\ntitle=\"Gradient Compare A, Positive\"\nHM.CVShowCompareF(GA,BGA,title)\ntitle=\"Gradient Compare A, Negtive\"\nHM.CVShowCompareG(GA,BGA,title)\n\n\ntitle=\"Gradient Compare B, Positive\"\nHM.CVShowCompareF(GB,BGB,title)\ntitle=\"Gradient Compare B, Negtive\"\nHM.CVShowCompareG(GB,BGB,title)\n\n\ntitle=\"Gradient Compare C, Positive\"\nHM.CVShowCompareF(GC,BGC,title)\ntitle=\"Gradient Compare C, Negtive\"\nHM.CVShowCompareG(GC,BGC,title)\n\n\n\n#exit()\n\n#show the heatmap of the perturbation\n\n\nRA,RB,RC=HM.get2Dfrom3D(224,224,r)\nTitle=\"Perturbation Orginal A\"\nim, cbar = HM.heatmap(RA,Title,cmap=\"YlGn\", cbarlabel=\"Perturbation\")\nTitle=\"Perturbation B\"\nim, cbar = HM.heatmap(RB,Title,cmap=\"YlGn\", cbarlabel=\"Perturbation\")\nTitle=\"Perturbation C\"\nim, cbar = HM.heatmap(RC,Title,cmap=\"YlGn\", cbarlabel=\"Perturbation\")\n\n\nRA,RB,RC=HM.get2Dfrom3D(224,224,rB)\nTitle=\"Perturbation by Faked GradientA\"\nim, cbar = HM.heatmap(RA,Title,cmap=\"YlGn\", cbarlabel=\"Perturbation\")\nTitle=\"Perturbation B\"\nim, cbar = HM.heatmap(RB,Title,cmap=\"YlGn\", cbarlabel=\"Perturbation\")\nTitle=\"Perturbation C\"\nim, cbar = HM.heatmap(RC,Title,cmap=\"YlGn\", cbarlabel=\"Perturbation\")\n\nGA,GB,GC=HM.get2Dfrom3D(224,224,TheGradient)\n\nTitle=\"Gradient A\"\nim, cbar = HM.heatmap(GA,Title,cmap=\"YlGn\", cbarlabel=\"Gradient\")\nTitle=\"Gradient B\"\nim, cbar = HM.heatmap(GB,Title,cmap=\"YlGn\", cbarlabel=\"Perturbation\")\nTitle=\"Gradient C\"\nim, cbar = HM.heatmap(GC,Title,cmap=\"YlGn\", cbarlabel=\"Gradient\")\n","sub_path":"Code/MM21_FakeGradient_DeepFool_VGG.py","file_name":"MM21_FakeGradient_DeepFool_VGG.py","file_ext":"py","file_size_in_byte":9438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"316128651","text":"import tensorflow as tf\nimport numpy as np\nimport math\nimport cv2\nimport sys\nimport os\nfrom scipy import ndimage\nimport random\nfrom PIL import Image\n###########################################################################################\ntraining_index = './train.txt'\nnewlabel_index ='./newlabel.txt'\nclassnum=17\nmaxImageNum=1360\n#############################################################################################\n\ndef read_traing_list():\n\ttrain_image_dir = []\n\ttrain_label_dir = []\n\treader = open(training_index)\n\twhile 1:\n\t\tline = reader.readline()\n\t\ttmp = line.split(\" \")\n\t\tif not line:\n\t\t\tbreak\n\t\ttrain_image_dir.append(tmp[0])\n\t\ttrain_label_dir.append(tmp[1][0:-1])\n\t# print train_image_dir[1:maxImageNum]\n\t# print train_label_dir[1:maxImageNum]\n\treader.close()\n\treturn train_image_dir, train_label_dir\ndef distort_image():\n\ttrain_image_dir, train_label_dir = read_traing_list()\n\tlabel_reader = open(newlabel_index,\"w\")\n\tfor idx in range(len(train_image_dir)):\n\t\tprint(\"idx:\",idx)\n\t# ########## flip_left_rightand rotate ###################\n\t\timage_path = \"17flowers/jpg/\"+str(train_label_dir[idx])+\"/\"+str(train_image_dir[idx])\n\t\tim=Image.open(image_path)\n\t# \twidth,height=im.size\n\t# \t# print (image_path, height,width)\n\t# \tbox_left_up = (0, 0, 227, 227)\n\t# \tbox_right_up=(width-227,0,width,227)\n\t# \tbox_left_down=(0,height-227,227,height)\n\t# \tbox_right_down=(width-227,height-227,width,height)\n\t# \tcenter_x=width/2\n\t# \tcenter_y=height/2\n\t# \tbox_center=(center_x-227/2,center_y-227/2,center_x+227/2,center_y+227/2)\n\t# \tbox_left_up_img = im.crop(box_left_up)\n\t# \tbox_right_up_img = im.crop(box_right_up)\n\t# \tbox_left_down_img = im.crop(box_left_down)\n\t# \tbox_right_down_img = im.crop(box_right_down)\n\t# \tbox_center_img = im.crop(box_center)\n\t# \tbox_left_up_path = image_path[:-4]+\"_left_up.png\"\n\t# \tbox_right_up_path = image_path[:-4]+\"_right_up.png\"\n\t# \tbox_left_down_path = image_path[:-4]+\"_left_down.png\"\n\t# \tbox_right_down_path = image_path[:-4]+\"_right_down.png\"\n\t# \tbox_center_path = image_path[:-4]+\"_center.png\"\n\t# \tbox_left_up_img.save(box_left_up_path)\n\t# \tbox_right_up_img.save(box_right_up_path)\n\t# \tbox_left_down_img.save(box_left_down_path)\n\t# \tbox_right_down_img.save(box_right_down_path)\n\t# \tbox_center_img.save(box_center_path)\n\t# \tlabel_reader.write(box_left_up_path+\" \"+str(train_label_dir[idx])+\"\\n\")\n\t# \tlabel_reader.write(box_right_up_path+\" \"+str(train_label_dir[idx])+\"\\n\")\n\t# \tlabel_reader.write(box_left_down_path+\" \"+str(train_label_dir[idx])+\"\\n\")\n\t# \tlabel_reader.write(box_right_down_path+\" \"+str(train_label_dir[idx])+\"\\n\")\n\t# \tlabel_reader.write(box_center_path+\" \"+str(train_label_dir[idx])+\"\\n\")\n\t# \tos.system(\"rm \")\n\t# label_reader.close()\n\t\t# raw_input()\n\t\t# resize_img = im.resize((227, 227)) \n\t# \t#rotate_45 = im.rotate(45) \n\t# \tflip_horizon = im.transpose(Image.FLIP_LEFT_RIGHT) \n\t# \t#flip_vertical = im.transpose(Image.FLIP_TOP_BOTTOM)\n\t# \t#rotate_90 = im.transpose(Image.ROTATE_90) \n\t# \t#rotate_180 = im.transpose(Image.ROTATE_180) \n\t# \t#rotate_270 = im.transpose(Image.ROTATE_270) \n\t# \tflip_resize_path = image_path[:-4]+\"_0.png\"\n\t# \tresize_img.save(flip_resize_path)\n\t# \tlabel_reader.write(flip_resize_path+\" \"+str(train_label_dir[idx])+\"\\n\")\n\t# label_reader.close()\n\t############## change brightness #############\n\t# \timage_path = \"17flowers/jpg/\"+str(train_label_dir[idx])+\"/\"+str(train_image_dir[idx])\n\t# \timage_tmp = cv2.imread(image_path,cv2.IMREAD_COLOR) \n\t# \tbrightness_image=image_tmp\n\t# \tfor i in range(image_tmp.shape[0]):\n # \t\t\tfor j in range(image_tmp.shape[1]):\n # \t\t \t\tbrightness_image[i,j] = (255-image_tmp[i,j,0],255-image_tmp[i,j,1],255-image_tmp[i,j,2])\n # \t\tbrightness_image_path = image_path[:-4]+\"_4.png\"\n # \t\tcv2.imwrite(brightness_image_path,brightness_image) \n\t# \tlabel_reader.write(brightness_image_path+\" \"+str(train_label_dir[idx])+\"\\n\")\n\t# label_reader.close()\nif __name__==\"__main__\":\n distort_image()","sub_path":"data_argument.py","file_name":"data_argument.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"93155950","text":"#train.py\n#you can use python train.py --config ***/default = ../config/config.json\n#some imitate from https://github.com/baidu-research/NCRF/blob/master/wsi/bin/train.py\nimport tensorflow as tf\nimport dcrf\nimport data\nimport json\nimport logging\nimport argparse\nimport time\nimport pickle\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--config',default = '../config/config.json')\nargs = parser.parse_args()\nconfig = args.config\nf = open(config,'r')\nconfig = json.load(f)\nf.close()\n\n\ntrain_dataset = data.GetDataset(config['train'],config['batch_size']) #define dataset loader\nvalid_dataset = data.GetDataset(config['valid'],config['batch_size'])\n# for data,label in train_dataset:\n# print(data.numpy(),label.numpy())\n# print(train_dataset)\nmodel = dcrf.DCRF() #define our inference model\noptimizer = tf.keras.optimizers.SGD(learning_rate = config['lr'],\n momentum = config['momentum']) #define the optimizer\n\n# @tf.function() \ndef train(summary,summary_writer):\n# train_dataset = data.GetDataset('../Dataset/',4) \n time_now = time.time()\n# global train_dataset\n for i,(datas,labels) in enumerate(train_dataset):\n# print(datas)\n# sess = tf.compat.v1.Session()\n# sess.run(datas)\n# datas,labels = next(iter(train_dataset))\n# print(datas.numpy())\n with tf.GradientTape() as Tape:\n inference = model(datas)\n print(inference)\n probs = tf.sigmoid(inference)\n #here we get (batch_size,patch_size,1) represents the probility\n loss = -tf.reduce_sum(tf.math.log(probs))\n predict = tf.cast((probs>=0.5),tf.int32)\n grad = Tape.gradient(loss,model.trainable_variables)\n #print(model.trainable_variables[0])\n print(grad[0])\n #grad[0] = tf.cast(grad[0],tf.int32)\n optimizer.apply_gradients(zip(grad,model.trainable_variables))\n labels = tf.tile(tf.reshape(labels,(-1,1)),(1,inference.shape[1]))\n acc = tf.reduce_sum(tf.cast(tf.equal(labels,predict),tf.float32)) / (labels.shape[0]*labels.shape[1])\n# auc = tf.metrics.AUC()\n# auc.update_state(labels,predict)\n f = open(config['inference'] + 'epoch%d_batch_%d.npy'%(summary['epoch']+1,i+1),'wb')\n pickle.dump(probs.numpy(),f)\n f.close()\n time_spent = time.time() - time_now\n time_now = time.time()\n logging.info(\n '{}, Epoch : {}, Step : {}, Training Loss : {:.5f}, '\n 'Training Acc : {:.3f}, Run Time : {:.2f}'\n .format(\n time.strftime(\"%Y-%m-%d %H:%M:%S\"), summary['epoch'] + 1,\n summary['step'] + 1, loss.numpy(), acc.numpy(), time_spent))\n\n \n summary['step'] += 1\n if summary['step'] % config['log_every'] == 0:\n with summary_writer.as_default():\n tf.summary.scalar('train/loss', loss.numpy(), summary['step'])\n tf.summary.scalar('train/acc', acc.numpy(), summary['step'])\n# tf.summary.scalar('\n\n summary['epoch'] += 1\n return summary\n\n# @tf.function()\ndef valid(summary):\n loss_sum = 0\n acc_sum = 0\n for i,(datas,labels) in enumerate(valid_dataset):\n #when do valid,we should return the loss and acc until one epoch ends\n inference = model(datas)\n #print(inference.shape)\n probs = tf.sigmoid(inference)\n predict = tf.cast((probs>=0.5),tf.int32)\n print(predict.shape)\n loss = -tf.reduce_sum(tf.math.log(probs))\n #labels = tf.tile(tf.reshape(labels,(-1,1)),(1,inference.shape[0]))\n acc = tf.reduce_sum(tf.cast(tf.equal(labels,predict),tf.float32)) / (labels.shape[0]*labels.shape[1])\n loss_sum += loss.numpy()\n acc_sum += acc.numpy()\n \n summary['loss'] = loss_sum / (i+1)\n summary['acc'] = acc_sum / (i+1)\n return summary\n\ndef main():\n #we should calculate the acc and auc and froc of the model,each epoch we do valid on validation set\n logging.basicConfig(level=logging.INFO)\n try:\n train_log_dir = config['train_log']\n train_summary_writer = tf.summary.create_file_writer(train_log_dir)\n# test_summary_writer = tf.summary.create_file_writer(test_log_dir)\n summary_train = {'epoch': 0, 'step': 0}\n summary_valid = {'loss': float('inf'), 'acc': 0}\n \n for eps in range(config['epochs']):\n #f = open(config['inference'] + 'epoch%d.npy'%eps,'wb')\n summary_train = train(summary_train,train_summary_writer)\n #f.close()\n model.save_weights(config['weight_path'])\n time_now = time.time()\n summary_valid = valid(summary_valid)\n time_spent = time.time() - time_now\n\n logging.info(\n '{}, Epoch : {}, Step : {}, Validation Loss : {:.5f}, '\n 'Validation Acc : {:.3f}, Run Time : {:.2f}'\n .format(\n time.strftime(\"%Y-%m-%d %H:%M:%S\"), summary_train['epoch'],\n summary_train['step'], summary_valid['loss'],\n summary_valid['acc'], time_spent))\n with train_summary_writer.as_default():\n tf.summary.scalar('valid/loss', summary_valid['loss'], step=eps)\n tf.summary.scalar('valid/accuracy', summary_valid['acc'], step=eps)\n except KeyboardInterrupt:\n model.save_weights(config['weight_path']) #if ctrl + c ,save model_weights\n\nif __name__ == '__main__':\n main()\n","sub_path":"Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":5591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"47128122","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', 'soundcloud.views.index'),\n url(r'^json/load_tracks$', 'soundcloud.views.load_tracks_json', name='load_tracks'),\n url(r'^json/track$', 'soundcloud.views.get_track_json', name='get_track'),\n url(r'^json/like$', 'soundcloud.views.like_dislike_json', {'liked': True,}, name='like'),\n url(r'^json/dislike$', 'soundcloud.views.like_dislike_json', {'liked': False,}, name='dislike'),\n url(r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"soundcloud/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"392465498","text":"import os\nimport requests\nimport argparse\nfrom dotenv import load_dotenv\n\nACCESS_TOKEN = os.getenv('ACCESS_TOKEN')\nACCESS_TOKEN = '4075e57d5492c7c0fcb87ddd5b555e03e4da4342'\n\n\ndef check_url_exists(url):\n requests.get(url).raise_for_status()\n\n\ndef normalize_bitlink(url):\n \"\"\"Возвращает склеенный domain + id от битлинка.\"\"\"\n url_domain = url.split('/')[-2]\n url_id = url.split('/')[-1]\n return f'{url_domain}/{url_id}'\n\n\ndef create_short_link(token, long_url):\n check_url_exists(long_url)\n headers = {\n \"Authorization\": f\"Bearer {token}\"\n }\n json_settings = {\n \"long_url\": f\"{long_url}\"\n }\n response = requests.post(\"https://api-ssl.bitly.com/v4/bitlinks\", json=json_settings, headers=headers)\n return response.json()['link']\n\n\ndef count_clicks_on_link(token, short_url):\n url_for_response = normalize_bitlink(short_url)\n headers = {\n \"Authorization\": f\"Bearer {token}\"\n }\n params = {\n 'unit': 'day',\n 'units': '-1'\n }\n response = requests.get(f\"https://api-ssl.bitly.com/v4/bitlinks/{url_for_response}/clicks/summary\", headers=headers,\n params=params)\n response.raise_for_status()\n return response.json()['total_clicks']\n\n\ndef is_bitlink(url, token):\n headers = {\n \"Authorization\": f\"Bearer {token}\"\n }\n url_for_response = normalize_bitlink(url)\n response = requests.get(f'https://api-ssl.bitly.com/v4/bitlinks/{url_for_response}', headers=headers)\n try:\n response.raise_for_status()\n return True\n except requests.exceptions.HTTPError:\n return False\n\n\nif __name__ == '__main__':\n load_dotenv()\n parser = argparse.ArgumentParser(\n description='Создаёт короткую ссылку или возвращает количество переходов по короткой ссылке')\n parser.add_argument('url', help='Которкая или длинная ссылка')\n args = parser.parse_args()\n url = args.url\n\n if is_bitlink(url, ACCESS_TOKEN):\n try:\n total_clicks = count_clicks_on_link(ACCESS_TOKEN, url)\n print('Количество переходов по ссылке', url, '-', total_clicks)\n except requests.exceptions.HTTPError:\n print(\"Введите ссылку ещё раз\")\n else:\n try:\n short_url = create_short_link(ACCESS_TOKEN, url)\n print('Ваша короткая ссылка -', short_url)\n except requests.exceptions.HTTPError:\n print(\"Введите ссылку ещё раз\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"562310","text":"\"\"\" Assignment Dataset Class\n Author: Nicholas Noochla-or\n Date: 7/22/2019\n\n Enhancements in this release:\n - Create an object of type TempDataset(). \n - Load temperature data into that object\n - Name our dataset and ask for its name\n - Ask for the number of temperature samples in the dataset\n - Ask for the number of temperature samples within a certain range\n - Ask for the average temperature for a particular day of the week and hour of the day\n - Get the minimum, average and maximum temperature as a tuple\n - Find out how many objects of TempDataset() have been created\n\n\"\"\"\n\n\ndef main():\n\n current_set = TempDataset()\n\n print(\"First test of get_num_objects: \", end='')\n\n if TempDataset.get_num_objects() == 1:\n print(\"Success\")\n else:\n print(\"Fail\")\n\n second_set = TempDataset()\n\n print(\"Second test of get_num_objects: \", end='')\n\n if TempDataset.get_num_objects() == 2:\n print(\"Success\")\n else:\n print(\"Fail\")\n\n print(\"Testing get_name and set_name: \")\n print(\"- Default Name:\", end='')\n\n if current_set.get_name() == \"Unnamed\":\n print(\"Success\")\n else:\n print(\"Fail\")\n\n print(\"- Try setting a name too short: \", end='')\n\n if current_set.set_name(\"to\"):\n print(\"Fail\")\n elif not current_set.get_name() == \"Unnamed\":\n print(\"Fail\")\n else:\n print(\"Success\")\n\n print(\"- Try setting a name too long: \", end='')\n\n if current_set.set_name(\"supercalifragilisticexpialidocious\"):\n print(\"Fail\")\n elif not current_set.get_name() == \"Unnamed\":\n print(\"Fail\")\n else:\n print(\"Success\")\n\n print(\"- Try setting a name just right: \", end='')\n\n if not current_set.set_name(\"New Name\"):\n print(\"Fail\")\n elif current_set.get_name() == \"New Name\":\n print(\"Success\")\n else:\n print(\"Fail\")\n\n print(\"- Make sure we didn't touch the other object: \", end='')\n if second_set.get_name() == \"Unnamed\":\n print(\"Success\")\n else:\n print(\"Fail\")\n\n print(\"Testing get_avg_temperature_day_time: \", end='')\n if current_set.get_avg_temperature_day_time(None, 0, 0) is None:\n print(\"Success\")\n else:\n print(\"Fail\")\n\n print(\"Testing get_num_temps: \", end='')\n if current_set.get_num_temps(None, 0, 0) is None:\n print(\"Success\")\n else:\n print(\"Fail\")\n\n print(\"Testing get_loaded_temps: \", end='')\n if current_set.get_loaded_temps() is None:\n print(\"Success\")\n else:\n print(\"Fail\")\n\n print(\"Testing get_summary_statistics: \", end='')\n if current_set.get_summary_statistics(None) is None:\n print(\"Success\")\n else:\n print(\"Fail\")\n\n print(\"Testing process_file: \", end='')\n if current_set.process_file(None) is False:\n print(\"Success\")\n else:\n print(\"Fail\")\n\nclass TempDataset:\n \"\"\"\n a class used to represent Temperature data\n\n Attributes\n _name: string\n name of the data set object\n _data_set: \n tuple of data\n \"\"\"\n\n __counter = int(0)\n \n def __init__(self):\n self._name = \"Unnamed\"\n self._data_set = None\n TempDataset.__counter += 1\n\n \n def process_file(self, filename):\n return False\n\n def get_summary_statistics(self, active_sensors):\n if(self._data_set == None):\n return None\n else:\n return (0, 0, 0)\n\n\n def get_avg_temperature_day_time(self, active_sensors, day, time):\n if(self._data_set == None):\n return None\n else:\n return 0\n\n def get_num_temps(self, active_sensors, lower_bound, upper_bound):\n if(self._data_set == None):\n return None\n else:\n return 0\n\n def get_loaded_temps(self):\n if(self._data_set == None):\n return None\n else:\n return 0\n\n # @property\n def get_name(self):\n return self._name\n \n # @get_name.setter\n def set_name(self, new_name):\n if(len(new_name) < 3 or len(new_name) > 21):\n return False\n else:\n self._name = new_name\n return True\n\n @classmethod\n def get_num_objects(cls):\n return cls.__counter\n \n\nif __name__ == \"__main__\":\n main()\n\n\"\"\"\n\nFirst test of get_num_objects: Success\nSecond test of get_num_objects: Success\nTesting get_name and set_name:\n- Default Name:Success\n- Try setting a name too short: Success\n- Try setting a name too long: Success\n- Try setting a name just right: Fail\n- Try setting a name just right: Success\n- Make sure we didn't touch the other object: Success\nTesting get_avg_temperature_day_time: Success\nTesting get_num_temps: Success\nTesting get_loaded_temps: Success\nTesting get_summary_statistics: Success\nTesting process_file: Success\n\n\"\"\"","sub_path":"cs21A/003/003-DataSetClass.py","file_name":"003-DataSetClass.py","file_ext":"py","file_size_in_byte":4860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"73216131","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom azure.cli.core.profiles import ResourceType\n\n\ndef exists(cmd, client, timeout=None):\n from azure.core.exceptions import HttpResponseError\n try:\n client.get_file_system_properties(timeout=timeout)\n return True\n except HttpResponseError as ex:\n from azure.cli.command_modules.storage.track2_util import _dont_fail_on_exist\n StorageErrorCode = cmd.get_models(\"_shared.models#StorageErrorCode\",\n resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)\n _dont_fail_on_exist(ex, StorageErrorCode.container_not_found)\n return False\n","sub_path":"src/azure-cli/azure/cli/command_modules/storage/operations/filesystem.py","file_name":"filesystem.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"92996357","text":"#! /usr/bin/env python3\n\n#\n# This Helferlein searches recursively for a string in all files of a directory\n#\n\nimport os\nimport sys\n\n\ndef main():\n args = str.join(\" \", sys.argv[1:])\n os.system(\"grep -rnw . -e \\\"\" + args + \"\\\" --color=always\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"g.py","file_name":"g.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"568893791","text":"import plotly.express as px\nimport config.auth as config\n\n\ndef get_fig_map(df):\n \"\"\"\n get the fig map layout\n :param df:\n :return:\n \"\"\"\n fig = px.scatter_mapbox(df,\n lon='lon',\n lat='lat',\n size='session_count',\n color='session_count',\n title='test title',\n hover_name='site_name',\n hover_data=['site_code', 'site_name', 'session_count'],\n color_continuous_scale=px.colors.carto.Temps\n )\n\n fig.update_layout(\n mapbox={'accesstoken': config.MAP_BOX_TOKEN, 'center': {'lat': 48.853499, 'lon': 2.3493147}, 'zoom': 11},\n margin={'l': 0, 'r': 0, 't': 0, 'b': 0})\n return fig\n","sub_path":"utils/figure.py","file_name":"figure.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"338829171","text":"# -*- coding: utf8 -*-\n# ---------------------------------------------------------------------------\n# Introdução a Programação de Computadores - IPC\n# Universidade do Estado do Amazonas - UEA\n# Prof. Jucimar Jr\n#\n# Adham Lucas da Silva Oliveira 1715310059\n# Gabriel Barroso da Silva Lima 1715310011\n# Guilherme Silva de Oliveira 1715310034\n# Natália Cavalcante Xavier 1715310021\n# Tiago Ferreira Aranha\t 1715310047\n#\n# Dada uma seqüência de n números, imprimi-la na ordem inversa à da leitura. \n#---------------------------------------------------------------------------\n\nn = int(input(\"digite a quantidade de números: \"))\nlista = []\n\nfor i in range (0, n):\n \n numero = int(input(\"digite um número: \"))\n lista.append(numero)\n \nprint(lista[::-1])","sub_path":"lista06/lista06_lista03_questao01.py","file_name":"lista06_lista03_questao01.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"40631222","text":"import multiprocessing as mp \nimport os \nimport time\n\na = 1\n\ndef th1():\n global a\n a = 1000\n print(\"th1 a\",a)\n print(os.getppid(),\"----\",os.getpid())\n print(\"吃饭早饭\")\n time.sleep(1)\n print(\"吃饭午饭\")\n time.sleep(2)\n print(\"吃饭晚饭\")\n time.sleep(3)\n\ndef th2():\n print(os.getppid(),\"----\",os.getpid())\n print(\"睡觉午觉\")\n time.sleep(2)\n print(\"睡觉\")\n time.sleep(3)\n print(\"th2 a:\",a)\n\ndef th3():\n print(os.getppid(),\"----\",os.getpid())\n print(\"打豆豆\")\n time.sleep(2)\n print(\"打豆豆\")\n time.sleep(2)\n\n#创建三个新的进程,关联上面三个事件\n#生产进程对象分别表示这三个进程\np1 = mp.Process(target = th1)\np2 = mp.Process(target = th2)\np3 = mp.Process(target = th3)\n\n#通过生成的进程对象启动子进程\n#子进程有父进程的代码段 只不过只执行对应的函数\np1.start()\np2.start()\np3.start()\n\nprint(\"Parent PID\",os.getpid())\n\n#阻塞等待回收进程\np1.join()\np2.join()\np3.join()\n\nprint(\"++++++++++++++++++++++++\")\n# th1()\n# th2()\n# th3()","sub_path":"AB/python网络/day5/process1.py","file_name":"process1.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"367975530","text":"from configparser import ConfigParser, Error\nfrom .. import BaseCredentialStorage, lazy_property\n\n\nclass IniCredentialStorage(BaseCredentialStorage):\n\n def __init__(self, filename):\n self.filename = filename\n\n @lazy_property\n def parser(self):\n parser = ConfigParser(allow_no_value=True)\n try:\n f = open(self.filename, \"r\")\n except IOError:\n return parser\n\n with f:\n try:\n parser.readfp(f)\n except Error:\n pass\n\n return parser\n\n def load(self, name):\n if not self.parser.has_section(name):\n self.parser.add_section(name)\n return dict(self.parser[name].items())\n\n def save(self, name, credential):\n self.parser[name] = credential\n with open(self.filename, 'w') as f:\n self.parser.write(f)\n","sub_path":"ix/credential/storages/ini.py","file_name":"ini.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"546110773","text":"import torch.nn as nn\nimport torchtext\nimport torch.optim as optim\nimport time\nfrom torchtext.data import get_tokenizer\nfrom torchtext.experimental.datasets import IMDB\nfrom torch.utils.data import random_split, DataLoader\nfrom nlp.w2v.Network.w2vDense import Sentiment\nfrom nlp.w2v.utils import *\n\n# Prepare Data\nvocab = IMDB(data_select='train')[0].get_vocab()\nglove_vocab = torchtext.vocab.Vocab(counter=vocab.freqs,\n max_size=MAX_VOCAB_SIZE,\n min_freq=MIN_FREQ,\n vectors=torchtext.vocab.GloVe(name='6B', dim=W2V_DIM))\n\ntokenizer = get_tokenizer('spacy', language='en_core_web_sm')\ntrain_set, test_set = IMDB(tokenizer=tokenizer, vocab=glove_vocab)\nvocab = train_set.get_vocab()\n\ngenerator = torch.Generator().manual_seed(SEED)\ntrain_num = int(len(train_set) * 0.8)\nvalid_num = len(train_set) - train_num\ntrain_set, valid_set = random_split(train_set, lengths=[train_num, valid_num], generator=generator)\npad_id = vocab['']\n\n# Zero Padding\ndef pad_trim(data):\n ''' Pads or trims the batch of input data.\n\n Arguments:\n data (torch.Tensor): input batch\n Returns:\n new_input (torch.Tensor): padded/trimmed input\n labels (torch.Tensor): batch of output target labels\n '''\n data = list(zip(*data))\n # Extract target output labels\n labels = torch.tensor(data[0]).float().to(DEVICE)\n # Extract input data\n inputs = data[1]\n\n # Extract only the part of the input up to the MAX_SEQ_LEN point\n # if input sample contains more than MAX_SEQ_LEN. If not then\n # select entire sample and append until the length of the\n # sequence is MAX_SEQ_LEN\n new_input = torch.stack([torch.cat((input[:MAX_SEQ_LEN],\n torch.tensor([pad_id] * max(0, MAX_SEQ_LEN - len(input))).long()))\n for input in inputs])\n\n return new_input, labels\n\ntrain_loader = DataLoader(train_set, batch_size=BATCH_SIZE, collate_fn=pad_trim)\nvalid_loader = DataLoader(valid_set, batch_size=BATCH_SIZE, collate_fn=pad_trim)\ntest_loader = DataLoader(test_set, batch_size=BATCH_SIZE, collate_fn=pad_trim)\n\n# Define Model\nmodel = Sentiment(vocab).to(DEVICE)\ncriterion = nn.BCELoss()\noptimizer = optim.Adam(model.parameters(), lr=0.003)\n\n# Training\nepochs = 10\nfor e in range(epochs):\n running_loss = 0\n start = time.time()\n for input, label in train_loader:\n if input.shape[0] != BATCH_SIZE:\n continue\n\n input, label = input.to(DEVICE), label.to(DEVICE)\n\n optimizer.zero_grad()\n output = model(input).squeeze(1)\n loss = criterion(output, label)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n else:\n test_loss = 0\n eval_acc = 0\n with torch.no_grad():\n for input, labels in valid_loader:\n if input.shape[0] != BATCH_SIZE:\n continue\n input, labels = input.to(DEVICE), labels.to(DEVICE)\n output = model(input).squeeze(1)\n\n test_loss += criterion(output, labels)\n eval_acc += calculate_acc(output, labels)\n\n print(\"==============================================\")\n print(f\"Device = {DEVICE}; Elapsed time {(time.time() - start):.3f} seconds\")\n print(f'Training loss: {running_loss / len(train_loader)}')\n print(f'Validation loss: {test_loss / len(valid_loader)}')\n print(f'Correct Rate: {eval_acc / len(valid_loader)}')\n\n# Test\ntest_loss = 0\neval_acc = 0\nstart = time.time()\nwith torch.no_grad():\n for input, labels in test_loader:\n if input.shape[0] != BATCH_SIZE:\n continue\n input, labels = input.to(DEVICE), labels.to(DEVICE)\n output = model(input).squeeze(1)\n test_loss += criterion(output, labels)\n eval_acc += calculate_acc(output, labels)\nprint(\"==============================================\")\nprint(f\"Device = {DEVICE}; Elapsed time {(time.time() - start):.3f} seconds\")\nprint(f'Test loss: {test_loss / len(test_loader)}')\nprint(f'Correct Rate: {eval_acc / len(test_loader)}')","sub_path":"nlp/w2v/dense.py","file_name":"dense.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"496479216","text":"import tensorflow as tf\nimport argparse\nimport os\nfrom word_rnn import WordRNN\nfrom data_utils import build_word_dict, build_word_dataset, batch_iter, download_dbpedia\nimport time\n\nNUM_CLASS = 2\nBATCH_SIZE = 256\nNUM_EPOCHS = 40\nMAX_DOCUMENT_LEN = 100\nnum_train = 5816\nnum_test = 415\n\ndef train(train_x, train_y, test_x, test_y, vocabulary_size, args):\n with tf.Session() as sess:\n model = WordRNN(vocabulary_size, MAX_DOCUMENT_LEN, NUM_CLASS) # vocabulary_size: 268970\n\n # Define training procedure\n global_step = tf.Variable(0, trainable=False)\n params = tf.trainable_variables()\n gradients = tf.gradients(model.loss, params)\n clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)\n optimizer = tf.train.AdamOptimizer(model.lr)\n train_op = optimizer.apply_gradients(zip(clipped_gradients, params), global_step=global_step)\n\n # optimizer = tf.train.AdamOptimizer(model.lr)\n # train_op = optimizer.apply_gradients(zip(gradients, params), global_step=global_step)\n\n # # Summary\n # loss_summary = tf.summary.scalar(\"loss\", model.loss)\n # summary_op = tf.summary.merge_all()\n # summary_writer = tf.summary.FileWriter(args.summary_dir, sess.graph)\n\n # Initialize all variables\n sess.run(tf.global_variables_initializer())\n\n # Load variables from pre-trained model\n if not args.pre_trained == \"none\":\n pre_trained_variables = [v for v in tf.global_variables()\n if (v.name.startswith(\"embedding\") or v.name.startswith(\"birnn\")) and \"Adam\" not in v.name]\n saver = tf.train.Saver(pre_trained_variables)\n ckpt = tf.train.get_checkpoint_state(os.path.join(args.restore_path, \"model\"))\n saver.restore(sess, ckpt.model_checkpoint_path)\n print(\"11111111111--restore weights from {}\".format(ckpt.model_checkpoint_path))\n\n def train_step(batch_x, batch_y):\n feed_dict = {\n model.x: batch_x,\n model.y: batch_y,\n model.keep_prob: 0.8,\n }\n _, step, loss = sess.run([train_op, global_step, model.loss], feed_dict=feed_dict)\n # summary_writer.add_summary(summaries, step)\n return loss\n\n def test_accuracy(test_x, test_y):\n test_batches = batch_iter(test_x, test_y, BATCH_SIZE, 1)\n sum_accuracy, cnt = 0., 0\n for test_batch_x, test_batch_y in test_batches:\n accuracy = sess.run(model.accuracy, feed_dict={model.x: test_batch_x, model.y: test_batch_y, model.keep_prob: 1.0})\n sum_accuracy += accuracy\n cnt += 1\n return sum_accuracy / cnt\n\n # Training loop\n batches = batch_iter(train_x, train_y, BATCH_SIZE, NUM_EPOCHS)\n\n st = time.time()\n steps_per_epoch = int(num_train / BATCH_SIZE)\n for batch_x, batch_y in batches:\n\n step = tf.train.global_step(sess, global_step)\n num_epoch = int(step / steps_per_epoch)\n curr_lr = sess.run(model.lr)\n\n # def get_lr(curr_lr, init_lr):\n # import numpy as np\n # t_total = NUM_EPOCHS * steps_per_epoch\n # curr_lr = float(curr_lr)\n # lr = 0.5 * init_lr * (1 + np.cos(np.pi * curr_lr / t_total))\n # return lr\n # model.lr.load(get_lr(curr_lr, 0.01), session=sess)\n\n # if step == 10:\n # model.lr.load(0.01, session=sess)\n #\n # if step == 20:\n # model.lr.load(0.001, session=sess)\n\n loss = train_step(batch_x, batch_y)\n\n # if step % 1 == 0:\n if step % 10 == 0:\n test_acc = test_accuracy(test_x, test_y)\n train_acc = test_accuracy(train_x, train_y)\n\n mode = \"w\" if step == 0 else \"a\"\n with open(args.summary_dir + \"-accuracy.txt\", mode) as f:\n print(\"{},{},{},{},{}\".format(num_epoch, step, test_acc, train_acc, loss), file=f)\n\n print(\"epoch: {}, step: {}, loss: {}, steps_per_epoch: {}, batch size: {}\".\n format(num_epoch, step, loss, steps_per_epoch, BATCH_SIZE))\n print(\"test_accuracy: {}, train_accuracy: {}, learning rate: {}\".format(test_acc, train_acc, curr_lr))\n print(\"time of one epoch: {}\\n\".format(time.time()-st))\n st = time.time()\n\n\nif __name__ == \"__main__\":\n stt = time.time()\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--pre_trained\", type=str, default=\"auto_encoder\", help=\"none or auto_encoder\")\n parser.add_argument(\"--summary_dir\", type=str, default=\"summary_classifier\", help=\"summary dir.\")\n parser.add_argument(\"--restore_path\", type=str, default=\"save_model_auto_encoder_all_delete_5000\")\n args = parser.parse_args()\n\n os.environ['CUDA_VISIBLE_DEVICES'] = '3'\n\n print(\"\\n Building dictionary..\")\n word_dict = build_word_dict()\n print(\"Preprocessing dataset..\")\n train_x, train_y = build_word_dataset(\"train\", word_dict, MAX_DOCUMENT_LEN)\n test_x, test_y = build_word_dataset(\"test\", word_dict, MAX_DOCUMENT_LEN)\n assert len(train_x) == len(train_y)\n assert len(test_x) == len(test_y)\n print(\"length of train_x: {}, length of test_x: {}\".format(len(train_x), len(test_x)))\n print(\"length of word_dict: {}\".format(len(word_dict)))\n train(train_x, train_y, test_x, test_y, len(word_dict), args)\n\n print(\"total time: {}\".format(time.time() - stt))\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"503458245","text":"# O que o usuário pode fazer no programa:\n#\n# 1- Baixar/Atualizar a base de dados de uma ação;\n# python3 start.py --update\n# 2- Executar o programa inteiro;\n# python3 start.py \n# 3- Executar o programa carregando os modelos ja treinados;\n# python3 start.py --load-models\n# 6- Gerar avaliações de uma previsão;\n# vai ser uma opção\n# 7- Gerar os gráficos da previsão e das avaliações;\n# vai ser uma opção\n# 8- Listar os tickers disponíveis para uso\n# python3 start.py --stocks\n\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os, sys\nimport Scripts.DataManager as dm\nfrom os import system, name\n\ntickers = {\n \"Inter\": \"BIDI4.SA\",\n \"Petrobras\": \"PETR4.SA\",\n \"Vale\": \"VALE3.SA\",\n \"Itau\": \"ITUB4.SA\",\n \"Ambev\": \"ABEV3.SA\",\n \"Sinqia\": \"SQIA3.SA\",\n \"Bovespa\": \"BOVA11.SA\"\n}\n\ndef main():\n params = sys.argv[1:]\n selectedTicker = \"\"\n estados = {\n \"update\": False,\n \"load-models\": False,\n }\n\n if len(params) < 1:\n clearScreen()\n print(\"Comando incorreto, por favor insira o comando seguindo a sequência:\")\n print(\"\\npython3 start.py [opções]\")\n print(\"\\n\\nPara ver a lista de ações disponíveis digite 'python3 start.py --stocks' sem as aspas.\")\n return\n\n if params[0] == \"--stocks\":\n listaTickers()\n return\n elif params[0] not in tickers.keys():\n clearScreen()\n print(\"Ação inválida\")\n print(\"\\n\\nPara ver a lista de ações disponíveis digite 'python3 start.py --stocks' sem as aspas.\")\n return\n else:\n selectedTicker = tickers[params[0]]\n\n for i in range(len(params)):\n if params[i] == \"--load-models\":\n estados['load-models'] = True\n elif params[i] == \"--update\":\n estados['update'] = True\n\n dm.run(ticker = selectedTicker, update = estados['update'], loadModels = estados['load-models'], path = os.getcwd())\n\n\ndef listaTickers():\n clearScreen()\n print(\"Lista de ações disponíveis:\")\n for tickerKey in tickers.keys():\n print(f\"\\t- {tickerKey}\")\n\n\ndef clearScreen():\n # for windows\n if name == 'nt':\n _ = system('cls')\n\n # for mac and linux(here, os.name is 'posix')\n else:\n _ = system('clear')\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"435199971","text":"import re\r\nfrom termcolor import colored\r\n\r\n\r\ndef is_kirill(text: str):\r\n search = re.search(\"\"\"[а-яА-ЯёЁ]+\"\"\", text) or re.search(\"\"\"[^0-9][._][0-9]+\"\"\", text)\r\n return bool(search)\r\n\r\n\r\ndef find_symbol(text: str):\r\n _symb_var = []\r\n symbols = ['…', ',', '!=', '?', '<>', '>', '<',\r\n '[', ']', '(', ')', '*', '+', '-',\r\n '=', '{', '}', ':', ';', '.', '&',\r\n '--', '++', ':=']\r\n for i in symbols:\r\n if i in text:\r\n j = text.count(i)\r\n while j:\r\n _symb_var.append(i)\r\n j -= 1\r\n return _symb_var\r\n\r\n\r\ndef is_service(text: str):\r\n service_words = ['int', 'void', 'float', 'while', 'do',\r\n 'if', 'then', 'else', 'switch', 'case',\r\n 'break', 'default', 'repeat', 'begin',\r\n 'end', 'until', 'sin', 'cos']\r\n\r\n if text in service_words:\r\n return True\r\n\r\n\r\ndef compile_text(text: str):\r\n copy_text = text\r\n _text_serv = []\r\n if is_kirill(text):\r\n return print(colored('\\nYour input = {t}\\n\\n'\r\n 'Something wrong in your input. Check it.'.format(t=text), 'red'))\r\n else:\r\n try:\r\n _text_num = re.findall('[+-]?[0-9]+.?[0-9]+?', text)\r\n for i in _text_num:\r\n text = text.replace(i, '')\r\n except: _text_num = ['']\r\n\r\n _text_symb = find_symbol(text)\r\n for i in _text_symb:\r\n text = text.replace(i, ' ')\r\n\r\n text = text.split(' ')\r\n\r\n for i in text:\r\n if is_service(i):\r\n _text_serv.append(i)\r\n\r\n for i in _text_serv:\r\n text.remove(i)\r\n\r\n while (' ' in text) | ('' in text):\r\n try:\r\n text.remove('')\r\n except: pass\r\n\r\n try:\r\n text.remove(' ')\r\n except: pass\r\n\r\n _text_var = set(text)\r\n _text_symb = set(_text_symb)\r\n\r\n for i in _text_var:\r\n if re.findall('[0-9]', i) or re.findall('[.,!@#$%^&*(+=]', i):\r\n return print(colored('\\nYour input = {t}\\n\\n'\r\n 'Something wrong in your input. Check it.'.format(t=copy_text), 'red'))\r\n\r\n return print('Your input = {inp}\\n\\n'\r\n 'Into tour input:\\n'\r\n 'Service words = {serv}\\n'\r\n 'Variables = {var}\\n'\r\n 'Nums = {nums}\\n'\r\n 'Symbols = {symb}'\r\n .format(inp=copy_text, serv=', '.join(_text_serv), var=', '.join(_text_var),\r\n nums=', '.join(_text_num), symb=' '.join(_text_symb)))\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n string = 'int main (void) {float b, a[13]; int n;…}'\r\n #string = 'repeat begin И:=b+a[n]; n:=n-1 end until n=0;'\r\n #string = 'switch(c){case 0: b=2*a[n]; break; default: b=d;}'\r\n #string = 'if c then b:=sin(2*a) else b:=a;'\r\n compile_text(string)\r\n\r\n","sub_path":"V семестр/Системне програмування - 2/Бровченко/sys_prog/lab3/lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"22969437","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom functools import reduce\nimport json\nimport sys,time\nfrom threading import Thread\ndef connected_to_internet(url='http://www.google.com/', timeout=5):\n try:\n _ = requests.get(url, timeout=timeout)\n return True\n except requests.ConnectionError:\n return False\nif(connected_to_internet()):\n pass \nelse:\n print(\"Please Check Your Internet Connection and Try again(....)\")\n time.sleep(2)\n exit(0) \n\nbaseurl=\"https://psna.ecoleaide.com/home.htm\"\nglobal users,error_occur\nusers=0\nerror_occur=0\nfilen=open(\"it.txt\",'a+')\n\ndef loop(name):\n global users\n # name=\"k\"\n sess=requests.Session()\n req=sess.get(baseurl) \n una='17it49' \n payload=dict(username='PSNA_p'+una,password=name)\n # print(payload)\n req1=sess.post(data=payload,url=baseurl)\n bs_inst=BeautifulSoup(req1.text,'lxml')\n# names=bs_inst.find(\"select\").text.lstrip().rstrip() \n try:\n fail=bs_inst.find('div',{'class':'portal-error'}).text.rstrip().lstrip()\n if fail==\"Invalid username or password\":\n error_occur+=1\n \n except:\n try:\n names=bs_inst.find('a',{'class':'text-md block'}).text.lstrip().rstrip()\n # filen.write(\" <-------->\"+str(names)+\"<--------->\"+name+\"\\n\")\n mail=bs_inst.find_all('div',{'class':'p-a'})\n si=[]\n for i in mail:\n si.append(i.find_all(text=True))\n \n # filen.write(\" <-------->\"+str(names)+\"<--------->\"+'=='+name+\"\\n\")\n filen.write(\" <-------->\"+str(names)+\"<--------->\"+'=='+name+\"\\n\")\n # print(\" <-------->\"+str(names)+\"<--------->\"+name+\"\\n\") \n # print(i)\n # contact=bs_inst.find('span',{'class':'w-36 rounded sub-1 bold'})\n \n \n \n users+=1 \n # print(contact)\n except Exception as e:\n # print(e)\n pass\n# loop()\ndef t1():\n for i in range(1,7):\n for j in range(1,32):\n if i<10 or j<10:\n if i<10 and j<10:\n loop('0'+str(j)+'0'+str(i)+\"1999\")\n elif j<10:\n loop(\"0\"+str(j)+str(i)+\"1999\")\n else:\n loop(str(j)+'0'+str(i)+\"1999\")\ndef t2():\n for i in range(1,5):\n for j in range(1,32):\n if i<10 or j<10:\n if i<10 and j<10:\n loop('0'+str(j)+'0'+str(i)+\"1999\")\n elif j<10:\n loop(\"0\"+str(j)+str(i)+\"1999\")\n else:\n loop(str(j)+'0'+str(i)+\"1999\")\n \n\n # print(\"try \"+str(i))\n else :\n loop(str(j)+str(i)+\"1999\")\ndef t3():\n for i in range(5,9):\n for j in range(1,32):\n if i<10 or j<10:\n if i<10 and j<10:\n loop('0'+str(j)+'0'+str(i)+\"1999\")\n elif j<10:\n loop(\"0\"+str(j)+str(i)+\"1999\")\n else:\n loop(str(j)+'0'+str(i)+\"1999\")\n \n\n # print(\"try \"+str(i))\n else :\n loop(str(j)+str(i)+\"1999\")\ndef t4():\n for i in range(9,13):\n for j in range(1,32):\n if i<10 or j<10:\n if i<10 and j<10:\n loop('0'+str(j)+'0'+str(i)+\"1999\")\n elif j<10:\n loop(\"0\"+str(j)+str(i)+\"1999\")\n else:\n loop(str(j)+'0'+str(i)+\"1999\")\n \n\n # print(\"try \"+str(i))\n else :\n loop(str(j)+str(i)+\"1999\")\nthreads=[]\nfor thread in [t1,t2,t3,t4]:\n threads.append(Thread(target=thread))\n threads[-1].start()\n\n\n# print(error_occur)\n# loop()\n\n","sub_path":"brute.py","file_name":"brute.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"559389354","text":"#! /usr/bin/env python\n\nimport sys\nimport PyQt5\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import uic\n\nimport rospy, rospkg\nfrom mavros_msgs.srv import CommandBool, SetMode\nfrom mavros_msgs.msg import State\nfrom geometry_msgs.msg import Twist, TwistStamped\nfrom sensor_msgs.msg import BatteryState\n\nrospack = rospkg.RosPack()\nvehicle_monitor = rospack.get_path('vehicle_monitor')\nmonitor_UI = vehicle_monitor +'/vehicle_monitor.ui'\n\nclass MainDialog(QDialog):\n def __init__(self, parent=None, flags=Qt.WindowStaysOnTopHint):\n super().__init__(parent=parent, flags=flags)\n uic.loadUi(monitor_UI, self)\n\n self.arm_pushButton.clicked.connect(lambda state, data = True : self.arming(state, data))\n self.disarm_pushButton.clicked.connect(lambda state, data = False : self.arming(state, data))\n self.offboard_pushButton.clicked.connect(lambda state, mode = 'offboard' : self.setMode(state, mode))\n self.manual_pushButton.clicked.connect(lambda state, mode = 'stabilized' : self.setMode(state, mode))\n \n self.timer = QTimer()\n self.timer.setInterval(100)\n self.timer.timeout.connect(self.stateDisplay)\n self.timer.start()\n self._battery = 0\n self._tar_linear_x = 0\n self._tar_angular_z = 0\n self._cur_linear_x = 0\n self._cur_angular_z = 0\n\n rospy.wait_for_service('mavros/cmd/arming')\n rospy.wait_for_service('mavros/set_mode')\n # rospy.wait_for_message('mavros/state', State)\n # rospy.wait_for_message('mavros/battery', BatteryState)\n # rospy.wait_for_message('mavros/setpoit_velocity/cmd_vel_unstamped', Twist)\n rospy.Subscriber('mavros/state', State, self.stateSub)\n rospy.Subscriber('mavros/battery', BatteryState, self.batterySub)\n rospy.Subscriber('mavros/setpoint_velocity/cmd_vel_unstamped', Twist, self.tarVelocitySub)\n rospy.Subscriber('mavros/local_position/velocity_body', TwistStamped, self.curVelocitySub)\n \n def arming(self, state, data):\n try:\n arming_client = rospy.ServiceProxy('mavros/cmd/arming', CommandBool)\n respose = arming_client(data)\n except rospy.ServiceException as e:\n alert = QMessageBox()\n alert.setText(\"Service call failed : \" + e)\n alert.exec_()\n \n def setMode(self, state, mode):\n try:\n set_mode_client = rospy.ServiceProxy('mavros/set_mode', SetMode)\n res = set_mode_client(0, mode)\n except rospy.ServiceException as e:\n alert = QMessageBox()\n alert.setText(\"Service call failed : \" + e)\n alert.exec_()\n\n def stateDisplay(self):\n connected = 'Connected : ' + str(self._connected)\n armed = 'Armed : ' + str(self._armed)\n mode = 'Mode : ' + self._mode\n battery = 'Battery : ' + str(round(self._battery, 0)) + '%'\n cur_velocity_linear = 'Linear : ' + str(round(self._cur_linear_x, 2)) + ' m/s'\n cur_velocity_angular = 'Angula : ' + str(round(self._cur_angular_z, 2)) + ' rad/s'\n cur_velocity = 'Current Velocity\\n ' + cur_velocity_linear + '\\n ' + cur_velocity_angular\n tar_velocity_linear = 'Linear : ' + str(round(self._tar_linear_x, 2)) + ' m/s'\n tar_velocity_angular = 'Angula : ' + str(round(self._tar_angular_z, 2)) + ' rad/s'\n tar_velocity = 'Target Velocity\\n ' + tar_velocity_linear + '\\n ' + tar_velocity_angular\n self.textBrowser.setText(connected + '\\n' + armed + '\\n' + mode + '\\n' + battery + '\\n\\n' + cur_velocity + '\\n' + tar_velocity)\n\n def stateSub(self, msg):\n self._connected = msg.connected\n self._armed = msg.armed\n self._mode = msg.mode\n \n def batterySub(self, msg):\n self._battery = msg.percentage\n self._battery *= 100\n\n def tarVelocitySub(self, msg):\n self._tar_linear_x = msg.linear.x\n self._tar_angular_z = msg.angular.z\n \n def curVelocitySub(self, msg):\n self._cur_linear_x = msg.twist.linear.x\n self._cur_angular_z = msg.twist.angular.z\n\n\nrospy.init_node(\"vehicle_monitor\")\napp = QApplication(sys.argv)\nmain_dialog = MainDialog()\nmain_dialog.show()\napp.exec_()\nrospy.spin()","sub_path":"script/vehicle_monitor.py","file_name":"vehicle_monitor.py","file_ext":"py","file_size_in_byte":4284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"266233489","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nAlphabet maps objects to integer ids. It provides two way mapping from the index to the objects.\n\"\"\"\nimport json\nimport os\n\n\nclass Alphabet:\n def __init__(self, name, label=False):\n self.__name = name\n self.PADDING = \"\"\n self.UNKNOWN = \"\"\n self.label = label\n self.instance2index = {}\n self.instances = []\n\n # Index 0 is occupied by default, all else following.\n self.next_index = 0\n self.add(self.PADDING)\n\n # for word we reserve 0 as default value for padding\n if not self.label:\n self.add(self.UNKNOWN)\n\n\n def add(self, instance):\n if instance not in self.instance2index:\n self.instances.append(instance)\n self.instance2index[instance] = self.next_index\n self.next_index += 1\n\n def get_index(self, instance):\n try:\n return self.instance2index[instance]\n except KeyError:\n return self.instance2index[self.UNKNOWN]\n\n def get_instance(self, index):\n try:\n return self.instances[index]\n except IndexError:\n print('WARNING:Alphabet get_instance ,unknown instance, return the first label.')\n return self.instances[0]\n\n def size(self):\n # if self.label:\n # return len(self.instances)\n # else:\n return len(self.instances)\n\n def iteritems(self):\n return self.instance2index.items()\n","sub_path":"utils/alphabet.py","file_name":"alphabet.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"20801983","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\nfrom alipay.aop.api.domain.InvoiceBillResponseDTO import InvoiceBillResponseDTO\n\n\nclass InvoiceBillResponsePageDTO(object):\n\n def __init__(self):\n self._current_page = None\n self._datas = None\n self._page_size = None\n self._total_count = None\n\n @property\n def current_page(self):\n return self._current_page\n\n @current_page.setter\n def current_page(self, value):\n self._current_page = value\n @property\n def datas(self):\n return self._datas\n\n @datas.setter\n def datas(self, value):\n if isinstance(value, InvoiceBillResponseDTO):\n self._datas = value\n else:\n self._datas = InvoiceBillResponseDTO.from_alipay_dict(value)\n @property\n def page_size(self):\n return self._page_size\n\n @page_size.setter\n def page_size(self, value):\n self._page_size = value\n @property\n def total_count(self):\n return self._total_count\n\n @total_count.setter\n def total_count(self, value):\n self._total_count = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.current_page:\n if hasattr(self.current_page, 'to_alipay_dict'):\n params['current_page'] = self.current_page.to_alipay_dict()\n else:\n params['current_page'] = self.current_page\n if self.datas:\n if hasattr(self.datas, 'to_alipay_dict'):\n params['datas'] = self.datas.to_alipay_dict()\n else:\n params['datas'] = self.datas\n if self.page_size:\n if hasattr(self.page_size, 'to_alipay_dict'):\n params['page_size'] = self.page_size.to_alipay_dict()\n else:\n params['page_size'] = self.page_size\n if self.total_count:\n if hasattr(self.total_count, 'to_alipay_dict'):\n params['total_count'] = self.total_count.to_alipay_dict()\n else:\n params['total_count'] = self.total_count\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = InvoiceBillResponsePageDTO()\n if 'current_page' in d:\n o.current_page = d['current_page']\n if 'datas' in d:\n o.datas = d['datas']\n if 'page_size' in d:\n o.page_size = d['page_size']\n if 'total_count' in d:\n o.total_count = d['total_count']\n return o\n\n\n","sub_path":"alipay/aop/api/domain/InvoiceBillResponsePageDTO.py","file_name":"InvoiceBillResponsePageDTO.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"296399038","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 01 14:55:26 2016\n\n@author: sknowles\n\"\"\"\n\nimport suds\nimport matplotlib.pyplot as plt\n\nrwProj = suds.SUDS.RWProject(r'\\\\data-server\\server\\Race\\2016\\11_27Abu\\Documents\\Strategy\\11_27Abu Race.rwp')\nrwProj.Load()\nsession = rwProj.SessionData\n\n# Plot all fuel corrected laptimes against time of day\nfor driver in session.Drivers:\n for outing in driver.Outings:\n session_times = [lap.SessionTime.TotalMinutes for lap in outing]\n fc_laptimes = [lap.FuelCorrectedLapTime.TotalSeconds for lap in outing]\n plt.plot(session_times, fc_laptimes)\n\nplt.ylim(100, 120)\nplt.show()","sub_path":"SUDS v0.1.5/suds/RWProject_Example.py","file_name":"RWProject_Example.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"95831518","text":"import argparse\nimport importlib\nimport numpy as np\nimport os\nimport os.path as path\nimport pickle\nfrom tqdm import tqdm\n\nimport yaml\nimport scipy.sparse as sparse\nfrom scipy.special import logsumexp\n\nfrom data.utils import load_pose_data, read_global, read_local_raw\nfrom geometry import SE2\nfrom ours.mapping import RefMap\nfrom settings import DATA_DIR, RESULTS_DIR\nfrom utils import pose_err\n\nself_dirpath = os.path.dirname(os.path.abspath(__file__))\n\n# off-map thresholds, within this tolerance to be considered on-map\non_xy_thres = 5.\non_rot_thres = 30.\n\n\ndef fw_update(logalpha, log_trans_mat, log_lhood):\n \"\"\"\n Numerically stable version of update using logsumexp. Assumes\n all inputs are in log-space already.\n \"\"\"\n result = np.empty(log_trans_mat.shape[0])\n # ensure csc format so indices are extracted properly\n mat = log_trans_mat.tocsc()\n indptr = mat.indptr\n for i in range(mat.shape[0]):\n indptr = mat.indptr\n indices = mat.indices[indptr[i]:indptr[i+1]]\n # extract relevant column vector entries\n col = logalpha[indices]\n result[i] = logsumexp(col + mat.data[indptr[i]:indptr[i+1]])\n return result + log_lhood\n\n\ndef bw_update(logbeta, log_trans_mat, log_lhood):\n \"\"\"\n Numerically stable version of update using logsumexp. Assumes\n all inputs are in log-space already.\n \"\"\"\n result = np.empty(log_trans_mat.shape[1])\n # ensure csr format so indices are extracted properly\n mat = log_trans_mat.tocsr()\n indptr = mat.indptr\n for i in range(mat.shape[1]):\n indptr = mat.indptr\n indices = mat.indices[indptr[i]:indptr[i+1]]\n # extract relevant column vector entries\n beta_row = logbeta[indices]\n lhood_row = log_lhood[indices]\n result[i] = logsumexp(beta_row + lhood_row +\n mat.data[indptr[i]:indptr[i+1]])\n return result\n\n\ndef forward_backward(log_prior, log_trans_mats, log_lhoods):\n T = len(log_lhoods)\n N = len(log_prior)\n # forward recursion\n logalpha = np.empty((T, len(prior)))\n for t in tqdm(range(T), desc='fw step'):\n if t == 0:\n logalpha[t, :] = log_prior + log_lhoods[t]\n else:\n logalpha[t, :] = fw_update(logalpha[t-1], log_trans_mats[t-1],\n log_lhoods[t])\n log_marginal = logsumexp(logalpha[-1]) # p(X) marginal data lhood\n # backward recursion\n logbeta = np.zeros((T, len(prior)))\n for t in tqdm(reversed(range(T-1)), desc='bw step', total=T-1):\n logbeta[t] = bw_update(logbeta[t+1], log_trans_mats[t], log_lhoods[t+1])\n # forward beliefs\n fw_beliefs = np.empty((T, N))\n for t in range(T):\n fw_beliefs[t] = np.exp(logalpha[t] - logsumexp(logalpha[t]))\n # backward beliefs\n bw_beliefs = np.empty((T, N))\n bw_beliefs = np.exp(logalpha + logbeta - log_marginal)\n return fw_beliefs, bw_beliefs\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=(\"Run loop closure experiments for our method or comparisons\"))\n parser.add_argument(\"-rt\", \"--reference-traverse\", type=str, default=\"overcast1\",\n help=\"reference traverse name, e.g. overcast, night\")\n parser.add_argument(\"-qt\", \"--query-traverses\", type=str, nargs=\"+\", required=True,\n help=\"query traverse name, e.g. dusk, night\")\n parser.add_argument(\"-rf\", \"--reference-filename\", type=str,\n default='xy_1_t_10_wd_4.pickle',\n help=\"filename containing reference map object\")\n parser.add_argument(\"-qf\", \"--query-filename\", type=str, default='xy_2_t_15.csv',\n help=\"filename containing subsampled query traverse poses\")\n parser.add_argument(\"-p\", \"--params\", type=str, default=\"\",\n help=\"filename containing model parameters\")\n parser.add_argument(\"-m\", \"--methods\", nargs=\"+\", type=str,\n choices=[\"ours\", \"xu20\", \"stenborg20\", \"baseline\", \"noverif\", \"nooff\"],\n default=[\"ours\", \"xu20\", \"stenborg20\", \"noverif\", \"nooff\"])\n args = parser.parse_args()\n\n ref_traverse = args.reference_traverse\n r_fname = args.reference_filename\n q_fname = args.query_filename\n\n # load map\n map_dir = path.join(DATA_DIR, ref_traverse, 'saved_maps')\n fpath = path.join(map_dir, r_fname)\n with open(fpath, \"rb\") as f:\n refMap = pickle.load(f)\n\n pbarq = tqdm(args.query_traverses)\n for query in pbarq:\n pbarq.set_description(query)\n # load query sequence\n tstampsQ, gtQ, muQ, SigmaQ = load_pose_data(query, q_fname)\n query_global = read_global(query, tstampsQ)\n\n pbarm = tqdm(args.methods)\n for method in pbarm:\n pbarm.set_description(method)\n\n # for each method (ours/comparisons), import assoc. module\n\n if method == \"ours\":\n from ours.localization import LocalizationFull as Localization\n from ours.localization import convergedFull as converged\n elif method == \"noverif\":\n from ours.localization import LocalizationNoVerif as Localization\n from ours.localization import convergedNoVerif as converged\n elif method == \"nooff\":\n from ours.localization import LocalizationNoOff as Localization\n from ours.localization import convergedNoOff as converged\n else:\n localization = importlib.import_module(\n f\"comparison_methods.{method}.localization\")\n Localization = localization.Localization\n from ours.localization import convergedNoOff as converged\n\n # import params\n\n if args.params:\n params_file = args.params\n else:\n param_fname = \"ours\" if method in ['noverif', 'nooff'] else method\n params_file = param_fname + \".yaml\"\n\n # read in parameters\n\n params_path = path.abspath(path.join(self_dirpath, \"..\", \"params\"))\n with open(path.join(params_path, params_file), 'r') as f:\n params = yaml.safe_load(f)\n\n # create description of experiment if not specified\n\n description = \\\n f\"{ref_traverse}_{r_fname[:-7]}_{query}_{q_fname[:-4]}_{method}\"\n\n # identify on-map status of query\n\n gtQSE2 = SE2(gtQ)\n refgtSE2 = SE2(refMap.gt_poses)\n\n def min_pose_err(poseSE2):\n xy_errs, rot_errs = (poseSE2 / refgtSE2).magnitude()\n wgtd = xy_errs + 10. * rot_errs\n best = np.argmin(wgtd)\n return xy_errs[best], rot_errs[best]\n qgt_err = np.asarray(list(map(min_pose_err, gtQSE2))).T\n qxy, qrot = qgt_err\n\n # night-rain traverses uses GPS, orientation wrong so override\n\n on_xy_thres1 = on_xy_thres if query != 'night-rain' else 10.\n on_rot_thres1 = on_rot_thres if query != 'night-rain' else 360.\n q_on_map = np.logical_and(qxy < on_xy_thres1,\n qrot * 180. / np.pi < on_rot_thres1)\n\n import matplotlib.pyplot as plt\n ref_l = 4045\n ref_u = 4092\n\n refPoses = refMap.gt_poses[ref_l:ref_u, :]\n start_ind = 108\n end_ind = 118\n end_ind1 = 132\n fig, ax = plt.subplots()\n ax.plot(gtQ[start_ind:end_ind, 1], gtQ[start_ind:end_ind, 0],\n color='red', zorder=10, linewidth=3)\n ax.plot(gtQ[end_ind:end_ind1, 1], gtQ[end_ind:end_ind1, 0],\n color='red', zorder=10, linestyle='dashed', linewidth=3)\n ax.scatter([gtQ[start_ind, 1]], [gtQ[start_ind, 0]],\n color='red', zorder=10, s=100)\n ax.scatter([gtQ[end_ind-1, 1]], [gtQ[end_ind-1, 0]],\n color='red', zorder=10, s=100)\n ax.scatter([gtQ[end_ind1-1, 1]], [gtQ[end_ind1-1, 0]],\n color='red', zorder=10, s=100)\n ax.plot(refPoses[:, 1], refPoses[:, 0], color='blue', marker='o')\n ax.set_xlim(-210, -150)\n ax.set_ylim(-440, -380)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.axis('off')\n fig.tight_layout()\n # save results\n scores = []\n checks = []\n xy_errs = []\n rot_errs = []\n ref_inds = []\n on_map_stats = []\n off_probs = []\n\n # save intermediate\n meas_lhoods = []\n transition_matrices = []\n prior = None\n\n # setup localization object\n loc = Localization(params, refMap)\n for i in tqdm(range(start_ind, end_ind1)):\n\n qLoc = read_local_raw(query, tstampsQ[i])\n qGlb = query_global[i]\n qmu, qSigma = muQ[i], SigmaQ[i]\n # usually at t=0 there is a meas. update with no motion\n # separate initialization performed\n if i == start_ind:\n prior = np.log(loc.belief)\n lhood = loc.init(qmu, qSigma, qGlb, qLoc)\n meas_lhoods.append(np.log(lhood))\n else:\n # update state estimate\n trans_mat = loc._update_motion(qmu, qSigma)\n lhood = loc._update_meas(qGlb, qLoc)\n trans_mat.data = np.log(trans_mat.data)\n meas_lhoods.append(np.log(lhood))\n transition_matrices.append(trans_mat)\n\n fw_beliefs, bw_beliefs = forward_backward(\n prior, transition_matrices, meas_lhoods)\n\n # check convergence of forward beliefs\n\n T = len(fw_beliefs)\n\n scores_fw = np.empty(T, dtype=float)\n checks_fw = np.empty(T, dtype=bool)\n ref_inds_fw = np.empty(T, dtype=int)\n xy_err_fw = np.empty(T, dtype=float)\n rot_err_fw = np.empty(T, dtype=float)\n off_prob_fw = np.empty(T, dtype=float)\n\n scores_bw = np.empty(T, dtype=float)\n checks_bw = np.empty(T, dtype=bool)\n ref_inds_bw = np.empty(T, dtype=int)\n xy_err_bw = np.empty(T, dtype=float)\n rot_err_bw = np.empty(T, dtype=float)\n off_prob_bw = np.empty(T, dtype=float)\n\n for t in tqdm(range(T), desc='converge', leave=False):\n qLoc = read_local_raw(query, tstampsQ[i])\n # check convergence save forward pass stats\n ind_prop, check, score = converged(\n loc, query_global[t], qLoc, fw_beliefs[t])\n xy_err, rot_err = pose_err(gtQ[t], refMap.gt_poses[ind_prop],\n degrees=True)\n scores_fw[t] = score\n checks_fw[t] = check\n ref_inds_fw[t] = ind_prop\n xy_err_fw[t] = xy_err\n rot_err_fw[t] = rot_err\n off_prob = fw_beliefs[t, -1] if method in [\"ours\", \"noverif\"] else 0.\n off_prob_fw[t] = off_prob\n # check convergence save backward pass stats\n ind_prop, check, score = converged(\n loc, query_global[t], qLoc, bw_beliefs[t])\n xy_err, rot_err = pose_err(gtQ[t], refMap.gt_poses[ind_prop],\n degrees=True)\n scores_bw[t] = score\n checks_bw[t] = check\n ref_inds_bw[t] = ind_prop\n xy_err_bw[t] = xy_err\n rot_err_bw[t] = rot_err\n off_prob = bw_beliefs[t, -1] if method in [\"ours\", \"noverif\"] else 0.\n off_prob_bw[t] = off_prob\n\n results = {\"forward\": {\"scores\": scores_fw,\n \"checks\": checks_fw,\n \"ref_inds\": ref_inds_fw,\n \"xy_err\": xy_err_fw,\n \"rot_err\": rot_err_fw,\n \"off_probs\": off_prob_fw,\n \"on_status\": q_on_map},\n \"backward\": {\"scores\": scores_bw,\n \"checks\": checks_bw,\n \"ref_inds\": ref_inds_bw,\n \"xy_err\": xy_err_bw,\n \"rot_err\": rot_err_bw,\n \"off_probs\": off_prob_bw,\n \"on_status\": q_on_map}}\n\n # add belief plot\n fig1, ax1 = plt.subplots(figsize=(5, 2))\n xvals = np.arange(ref_u - ref_l)\n yvals = fw_beliefs[end_ind-start_ind, ref_l:ref_u]\n ax1.bar(xvals, yvals)\n ax1.set_ylim(0., 0.7)\n xt = ax1.get_xticks()\n oval = xt[-1] + 10\n ax1.bar([oval], [fw_beliefs[end_ind-start_ind, -1]], width=5)\n ax1.set_yticks([])\n ax1.xaxis.set_tick_params(labelsize=22)\n xt = np.append(xt, xt[-1] + 10)\n xt = xt.astype(int)\n xtl = xt.tolist()\n xtl[-1] = \"O\"\n ax1.set_xticks(xt)\n ax1.set_xticklabels(xtl)\n fig1.tight_layout()\n\n fig2, ax2 = plt.subplots(figsize=(5, 2))\n xvals = np.arange(ref_u - ref_l)\n yvals = fw_beliefs[end_ind+3-start_ind, ref_l:ref_u]\n yvals = np.hstack((np.zeros(20), yvals[:-20]))\n ax2.bar(xvals, yvals)\n ax2.set_ylim(0., 0.7)\n ax2.set_yticks([])\n xt = ax2.get_xticks()\n oval = xt[-1] + 10\n ax2.bar([oval], [fw_beliefs[-1, -1]], width=5)\n xt = np.append(xt, xt[-1] + 10)\n xt = xt.astype(int)\n xtl = xt.tolist()\n xtl[-1] = \"O\"\n ax2.set_xticks(xt)\n ax2.set_xticklabels(xtl)\n ax2.xaxis.set_tick_params(labelsize=22)\n fig2.tight_layout()\n\n xlim = ax2.get_xlim()\n\n fig3, ax3 = plt.subplots(figsize=(5, 2))\n xvals = np.arange(int(xlim[0]), int(xlim[1])-5)\n yvals = np.ones(len(xvals)) / len(xvals)\n ax3.bar(xvals, yvals)\n ax3.set_xlim(*xlim)\n ax3.set_ylim(0., 0.7)\n ax3.set_yticks([])\n xt = ax2.get_xticks()\n ax3.bar([oval], [0.3], width=5)\n ax3.set_xticks(xt)\n ax3.set_xticklabels(xtl)\n ax3.xaxis.set_tick_params(labelsize=22)\n fig3.tight_layout()\n plt.show()\n\n","sub_path":"topometricloc/results/run_loop_closure2.py","file_name":"run_loop_closure2.py","file_ext":"py","file_size_in_byte":14734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"239898414","text":"\"\"\" Checks base functionality of the articles engine \"\"\"\nimport unittest\nimport base_service as baseServiceTests\n\n\nclass ArticleInfoTest(baseServiceTests.ServiceTest):\n \"\"\" Checks common service functionality. \"\"\"\n\n def setUp(self):\n \"\"\" Initializes test with service specific data \"\"\"\n self._url_ = 'http://localhost:8080/articles'\n self._data_ = { 'services': 'Test Service',\n 'title': 'Test Restaurant',\n 'description': 'It is just a test content.',\n 'cut': 'It may be same as description or may be not.',\n 'text': 'This is text about the restaurant. But it is also a mock.',\n 'author': 'Anonymous' }\n\n \nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/tests/articles.py","file_name":"articles.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"79519160","text":"#!/usr/bin/python\n#** (C) Copyright 2013, Applied Physical Sciences Corp., A General Dynamics Company\n#**\n#** Gravity is free software; you can redistribute it and/or modify\n#** it under the terms of the GNU Lesser General Public License as published by\n#** the Free Software Foundation; either version 3 of the License, or\n#** (at your option) any later version.\n#**\n#** This program is distributed in the hope that it will be useful,\n#** but WITHOUT ANY WARRANTY; without even the implied warranty of\n#** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#** GNU Lesser General Public License for more details.\n#**\n#** You should have received a copy of the GNU Lesser General Public\n#** License along with this program;\n#** If not, see .\n#**\n\nimport math\nimport json\nimport os\nfrom functools import reduce\nfrom numpy import array\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential,model_from_json\nfrom tensorflow.keras.layers import LSTM, Dense, RepeatVector, TimeDistributed\nfrom tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard\nfrom tensorflow.keras import optimizers\n\nn_steps = 20 # number of samples to autoencode\nlr = 0.0001 # Adam optimizer parameter\n\n# split a multivariate sequence into samples\ndef split_sequences(sequences, n_steps):\n X, y = list(), list()\n for i in range(len(sequences)):\n end_ix = i + n_steps\n if end_ix > len(sequences):\n break\n seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix-1, :]\n X.append(seq_x)\n y.append(seq_y)\n return array(X), array(y)\n\ndef TrainModel(training_data, model_filename, epochs=3):\n print(\"Training on %d sequences\"%(len(training_data)))\n seqs = []\n min_length = reduce(lambda a,b: min(a,len(training_data[b])), training_data, len(training_data[list(training_data)[0]]))\n print(\"length is %d\"%(min_length))\n for k in sorted(training_data):\n t = training_data[k][:min_length] # truncate to the length of the shortest array\n t_vals = [x[1] for x in t] # discard the timestamps (for now)\n t_seq = np.array(t_vals)\n t_seq = t_seq.reshape(len(t_seq), 1)\n seqs.append(t_seq)\n\n dataset = np.hstack(seqs)\n n_features = len(seqs)\n\n X, y = split_sequences(dataset, n_steps)\n print(X.shape, y.shape)\n\n lstm_autoencoder = Sequential()\n # Encoder\n lstm_autoencoder.add(LSTM(32, activation='relu', input_shape=(n_steps, n_features), return_sequences=True))\n lstm_autoencoder.add(LSTM(16, activation='relu', return_sequences=False))\n lstm_autoencoder.add(RepeatVector(n_steps))\n # Decoder\n lstm_autoencoder.add(LSTM(16, activation='relu', return_sequences=True))\n lstm_autoencoder.add(LSTM(32, activation='relu', return_sequences=True))\n lstm_autoencoder.add(TimeDistributed(Dense(n_features)))\n\n \n adam = tf.keras.optimizers.Adam(lr)\n lstm_autoencoder.compile(optimizer=\"adam\", loss='mse')\n \n lstm_autoencoder_history = lstm_autoencoder.fit(X, X, \n epochs=epochs, \n batch_size=64, \n verbose=1).history\n \n \n model_json = lstm_autoencoder.to_json()\n with open(model_filename, \"w\") as json_file:\n json_file.write(model_json)\n lstm_autoencoder.save_weights(model_filename + \".h5\")\n print(\"Saved model to disk\")\n\nclass GravityModel():\n def __init__(self, model_filename):\n print(\"Loading model from disk\")\n json_file = open(model_filename, 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n self.lstm_autoencoder = model_from_json(loaded_model_json)\n # load weights into new model\n self.lstm_autoencoder.load_weights(model_filename + \".h5\")\n print(\"Loaded model from disk\" + str(self.lstm_autoencoder))\n self.n_steps = n_steps\n\n def Flatten(self, X):\n flattened_X = np.empty((X.shape[0], X.shape[2])) # sample x features array.\n for i in range(X.shape[0]):\n flattened_X[i] = X[i, (X.shape[1]-1), :]\n return(flattened_X)\n\n def ComputeMSE(self, data):\n seqs = []\n min_length = reduce(lambda a,b: min(a,len(data[b])), data, len(data[list(data)[0]]))\n if min_length < self.n_steps:\n raise ValueError\n for k in sorted(data):\n t = data[k][:min_length]\n t_vals = [x[1] for x in t] # discard the timestamps (for now)\n t_seq = np.array(t_vals)\n t_seq = t_seq.reshape(len(t_seq), 1)\n seqs.append(t_seq)\n dataset = np.hstack(seqs)\n X, y = split_sequences(dataset, self.n_steps)\n predictions = self.lstm_autoencoder.predict(X, verbose=1)\n mse = np.mean(np.power(self.Flatten(X) - self.Flatten(predictions), 2), axis=1)\n return mse\n\n\n# Test the autoencoder separately from Gravity\nif __name__ == \"__main__\":\n fs = 10000\n secs = 5\n good_pct = 0.99\n train_pct = 0.80\n t1 = np.linspace(0, 200*np.pi, num=fs*secs)\n s1 = np.sin(t1)\n t2= t1\n split = int(len(t1) * good_pct)\n rate = np.linspace(1, 1.3, num = len(t1) - split)\n t2[split:] = t2[split:] * rate\n s2 = np.cos(t2)\n data = {}\n data[\"s1\"] = list(zip(t1, s1))\n data[\"s2\"] = list(zip(t1, s2))\n TrainModel(data, \"model.json\")\n gm = GravityModel(\"model.json\")\n for k in list(data):\n data[k] = data[k][:n_steps]\n print(gm.ComputeMSE(data))\n\n","sub_path":"test/examples/14-AnomalyDetection/gravity_autoencoder.py","file_name":"gravity_autoencoder.py","file_ext":"py","file_size_in_byte":5546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"510234849","text":"#RF_Titanic_Submission.py\n#baseline submission at 79.904% - using RandomForestClassifer - V13\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\nimport os\nimport random\n\ndef make_prediction(Score):\n Score = Score * 100\n if Score >= 100:\n return 1\n else:\n arr_rander = np.arange(1000)\n for c in range(1000):\n arr_rander[c] = random.randrange(100)\n rander = np.average(arr_rander)\n \n # print(rander) \n \n if rander < Score:\n return 1 # passenger lives\n else:\n return 0 # passenger dies\n\n\n# clear the screen \nos.system('clear')\ntitanDir = '/Users/johncyclist22/Documents/ML_Competitions/Titanic/Data/Submissions/'\n\n\n# open the training dataset - prepare dataframe\npredict_data = pd.read_csv('My Submission BDT 0127.csv')\npredict_outcome = pd.DataFrame()\n\n\n# print information for review\nprint(predict_data.columns)\nprint(predict_data.describe())\nprint(predict_data.head())\n\nfor counter in range(10):\n\n predict_outcome['PassengerId'] = predict_data['PassengerId']\n predict_outcome['Survived'] = predict_data.apply(lambda x: make_prediction(x['Scored Probabilities']), axis=1)\n print('Run #: '+str(counter+1)+ \"--> Survivors: \" + str(predict_outcome['Survived'].sum()) + \" out of \" + str(predict_outcome['Survived'].count()) + \" Rate--> \" + str(predict_outcome['Survived'].sum() / predict_outcome['Survived'].count() ) ) \n \n\nprint(predict_outcome.head())\nprint(predict_outcome.describe())\n\n\npredict_outcome.to_csv(titanDir+'my_submission_v33.csv', index=False)\nprint(\"v33 submission was successfully saved - Boosted Decision Tree Azure ML\")\n","sub_path":"ML_Stud_BDT_0127/ML Studio BDT 0127.py","file_name":"ML Studio BDT 0127.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"288254556","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport higher\nimport copy\n\nfrom gbml import GBML\nfrom utils.utils import get_accuracy, apply_grad, mix_grad, grad_to_cos, loss_to_ent\nfrom utils.hessianfree import HessianFree\n\nclass iMAML(GBML):\n\n def __init__(self, args):\n super().__init__(args)\n self._init_net()\n self._init_opt()\n self.lamb = 100.0\n self.n_cg = args.cg_steps\n self.version = args.version\n\n if self.version == 'HF':\n self.inner_optimizer = HessianFree(cg_max_iter=3,)\n return None\n\n @torch.enable_grad()\n def inner_loop(self, fmodel, diffopt, train_input, train_target):\n \n train_logit = fmodel(train_input)\n inner_loss = F.cross_entropy(train_logit, train_target)\n diffopt.step(inner_loss)\n\n return None\n\n @torch.no_grad()\n def cg(self, in_grad, outer_grad, params):\n x = outer_grad.clone().detach()\n r = outer_grad.clone().detach() - self.hv_prod(in_grad, x, params)\n p = r.clone().detach()\n for i in range(self.n_cg):\n Ap = self.hv_prod(in_grad, p, params)\n alpha = (r @ r)/(p @ Ap)\n x = x + alpha * p\n r_new = r - alpha * Ap\n beta = (r_new @ r_new)/(r @ r)\n p = r_new + beta * p\n r = r_new.clone().detach()\n return self.vec_to_grad(x)\n \n def vec_to_grad(self, vec):\n pointer = 0\n res = []\n for param in self.network.parameters():\n num_param = param.numel()\n res.append(vec[pointer:pointer+num_param].view_as(param).data)\n pointer += num_param\n return res\n\n @torch.enable_grad()\n def hv_prod(self, in_grad, x, params):\n hv = torch.autograd.grad(in_grad, params, retain_graph=True, grad_outputs=x)\n hv = torch.nn.utils.parameters_to_vector(hv).detach()\n # precondition with identity matrix\n return hv/self.lamb + x\n\n def outer_loop(self, batch, is_train):\n \n train_inputs, train_targets, test_inputs, test_targets = self.unpack_batch(batch)\n\n loss_log = 0\n acc_log = 0\n grad_list = []\n loss_list = []\n\n for (train_input, train_target, test_input, test_target) in zip(train_inputs, train_targets, test_inputs, test_targets):\n\n with higher.innerloop_ctx(self.network, self.inner_optimizer, track_higher_grads=False) as (fmodel, diffopt):\n\n for step in range(self.args.n_inner):\n self.inner_loop(fmodel, diffopt, train_input, train_target)\n \n train_logit = fmodel(train_input)\n in_loss = F.cross_entropy(train_logit, train_target)\n\n test_logit = fmodel(test_input)\n outer_loss = F.cross_entropy(test_logit, test_target)\n loss_log += outer_loss.item()/self.batch_size\n\n with torch.no_grad():\n acc_log += get_accuracy(test_logit, test_target).item()/self.batch_size\n \n if is_train:\n params = list(fmodel.parameters(time=-1))\n in_grad = torch.nn.utils.parameters_to_vector(torch.autograd.grad(in_loss, params, create_graph=True))\n outer_grad = torch.nn.utils.parameters_to_vector(torch.autograd.grad(outer_loss, params))\n implicit_grad = self.cg(in_grad, outer_grad, params)\n grad_list.append(implicit_grad)\n loss_list.append(outer_loss.item())\n\n if is_train:\n self.outer_optimizer.zero_grad()\n weight = torch.ones(len(grad_list))\n weight = weight / torch.sum(weight)\n grad = mix_grad(grad_list, weight)\n grad_log = apply_grad(self.network, grad)\n self.outer_optimizer.step()\n \n return loss_log, acc_log, grad_log\n else:\n return loss_log, acc_log\n","sub_path":"metalearners/imaml.py","file_name":"imaml.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"480955315","text":"import asyncio\n\nimport httpcore\nimport httpx\n\nfrom sanic_testing.testing import SanicTestClient\n\nfrom sanic import Sanic\nfrom sanic.response import text\n\n\nclass DelayableHTTPConnection(httpcore._async.connection.AsyncHTTPConnection):\n async def arequest(self, *args, **kwargs):\n await asyncio.sleep(2)\n return await super().arequest(*args, **kwargs)\n\n async def _open_socket(self, *args, **kwargs):\n retval = await super()._open_socket(*args, **kwargs)\n if self._request_delay:\n await asyncio.sleep(self._request_delay)\n return retval\n\n\nclass DelayableSanicConnectionPool(httpcore.AsyncConnectionPool):\n def __init__(self, request_delay=None, *args, **kwargs):\n self._request_delay = request_delay\n super().__init__(*args, **kwargs)\n\n async def _add_to_pool(self, connection, timeout):\n connection.__class__ = DelayableHTTPConnection\n connection._request_delay = self._request_delay\n await super()._add_to_pool(connection, timeout)\n\n\nclass DelayableSanicSession(httpx.AsyncClient):\n def __init__(self, request_delay=None, *args, **kwargs) -> None:\n transport = DelayableSanicConnectionPool(request_delay=request_delay)\n super().__init__(transport=transport, *args, **kwargs)\n\n\nclass DelayableSanicTestClient(SanicTestClient):\n def __init__(self, app, request_delay=None):\n super().__init__(app)\n self._request_delay = request_delay\n self._loop = None\n\n def get_new_session(self):\n return DelayableSanicSession(request_delay=self._request_delay)\n\n\nrequest_timeout_default_app = Sanic(\"test_request_timeout_default\")\nrequest_no_timeout_app = Sanic(\"test_request_no_timeout\")\nrequest_timeout_default_app.config.REQUEST_TIMEOUT = 0.6\nrequest_no_timeout_app.config.REQUEST_TIMEOUT = 0.6\n\n\n@request_timeout_default_app.route(\"/1\")\nasync def handler1(request):\n return text(\"OK\")\n\n\n@request_no_timeout_app.route(\"/1\")\nasync def handler2(request):\n return text(\"OK\")\n\n\n@request_timeout_default_app.websocket(\"/ws1\")\nasync def ws_handler1(request, ws):\n await ws.send(\"OK\")\n\n\ndef test_default_server_error_request_timeout():\n client = DelayableSanicTestClient(request_timeout_default_app, 2)\n request, response = client.get(\"/1\")\n assert response.status == 408\n assert \"Request Timeout\" in response.text\n\n\ndef test_default_server_error_request_dont_timeout():\n client = DelayableSanicTestClient(request_no_timeout_app, 0.2)\n request, response = client.get(\"/1\")\n assert response.status == 200\n assert response.text == \"OK\"\n\n\ndef test_default_server_error_websocket_request_timeout():\n\n headers = {\n \"Upgrade\": \"websocket\",\n \"Connection\": \"upgrade\",\n \"Sec-WebSocket-Key\": \"dGhlIHNhbXBsZSBub25jZQ==\",\n \"Sec-WebSocket-Version\": \"13\",\n }\n\n client = DelayableSanicTestClient(request_timeout_default_app, 2)\n request, response = client.get(\"/ws1\", headers=headers)\n\n assert response.status == 408\n assert \"Request Timeout\" in response.text\n","sub_path":"tests/test_request_timeout.py","file_name":"test_request_timeout.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"545054164","text":"from PyQt5.QtWidgets import QWidget, QPushButton, QGridLayout,QLabel,QTableWidget\nfrom PyQt5.QtWidgets import QHBoxLayout\nimport sqlite3\nimport ManageOperation\n\n\n# 管理员我的界面\nclass Controller_myself_win(QWidget): # 增加一个编辑资料的按钮\n def __init__(self):\n super(Controller_myself_win, self).__init__()\n self.returnBtn = QPushButton(\"返回\")\n self.ExditBtn = QPushButton(\"编辑\")\n self.chang_image = QPushButton(\"换头像\")\n self.name = QLabel(\"姓名:\")\n self.sex = QLabel(\"性别:\")\n self.number = QLabel(\"手机号:\")\n self.year = QLabel(\"出生年月:\")\n self.school = QLabel(\"学校:\")\n self.amend = QPushButton(\"修改密码\")\n self.withdraw = QPushButton('退出')\n self.tupian = QLabel()\n self.devise_ui()\n\n def devise_ui(self):\n self.horizontalLayout = QHBoxLayout(self)\n self.layout = QGridLayout()\n self.win = QWidget()\n self.win.setLayout(self.layout) # 设置顶级布局管理器\n self.horizontalLayout.addWidget(self.win)\n self.win.setMouseTracking(True) # 设置widget鼠标跟踪\n self.layout.setContentsMargins(100, 0, 0, 0)\n\n sqlpath = './datas/database/Information.db'\n conn = sqlite3.connect(sqlpath)\n c = conn.cursor()\n c.execute(\"select * from Controller_data where number=(?)\",(ManageOperation.number,))\n self.data = c.fetchall()[0]\n c.close()\n conn.close()\n self.name1 = QLabel(self.data[1]) # 读取数据库中的信息,将信息输出label中\n self.sex1 = QLabel(self.data[3])\n self.number1 = QLabel(self.data[0])\n self.year1 = QLabel(self.data[2][0:4] + \"年 \" + self.data[2][5:] + ' 月')\n self.school1 = QLabel(self.data[4])\n self.returnBtn.setMaximumSize(60, 40)\n self.ExditBtn.setMaximumSize(60, 40)\n self.name.setMaximumSize(70, 40)\n self.sex.setMaximumSize(70, 40)\n self.number.setMaximumSize(70, 40)\n self.school.setMaximumSize(70, 40)\n self.year.setMaximumSize(100, 40)\n self.name1.setMaximumSize(350, 40)\n self.sex1.setMaximumSize(350, 40)\n self.number1.setMaximumSize(350, 40)\n self.school1.setMaximumSize(350, 40)\n self.year1.setMaximumSize(350, 40)\n self.amend.setMaximumSize(500, 40)\n self.withdraw.setMaximumSize(500, 40)\n self.chang_image.setMaximumSize(90, 40)\n self.tupian.setMaximumSize(250, 250)\n self.chang_image.setStyleSheet(\"QPushButton{ font-family:'宋体';font-size:22px;color:rgb(0,0,0);}\\\n QPushButton{background-color:rgb(170,200, 50)}\\\n QPushButton:hover{background-color:rgb(50, 170, 200)}\")\n self.name.setStyleSheet(\"QLabel{color:rgb(0,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}\")\n self.year.setStyleSheet(\"QLabel{color:rgb(0,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}\")\n self.sex.setStyleSheet(\"QLabel{color:rgb(0,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}\")\n self.number.setStyleSheet(\"QLabel{color:rgb(0,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}\")\n self.school.setStyleSheet(\"QLabel{color:rgb(0,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}\")\n self.amend.setStyleSheet(\"QPushButton{ font-family:'宋体';font-size:22px;color:rgb(0,0,0);}\\\n QPushButton{background-color:rgb(170,200, 50)}\\\n QPushButton:hover{background-color:rgb(50, 170, 200)}\")\n self.withdraw.setStyleSheet(\"QPushButton{ font-family:'宋体';font-size:22px;color:rgb(0,0,0);}\\\n QPushButton{background-color:rgb(170,200, 50)}\\\n QPushButton:hover{background-color:rgb(50, 170, 200)}\")\n self.returnBtn.setStyleSheet(\"QPushButton{ font-family:'宋体';font-size:22px;color:rgb(0,0,0);}\\\n QPushButton{background-color:rgb(170,200, 50)}\\\n QPushButton:hover{background-color:rgb(50, 170, 200)}\")\n self.ExditBtn.setStyleSheet(\"QPushButton{ font-family:'宋体';font-size:22px;color:rgb(0,0,0);}\\\n QPushButton{background-color:rgb(170,200, 50)}\\\n QPushButton:hover{background-color:rgb(50, 170, 200)}\")\n self.name1.setStyleSheet(\"QLabel{color:rgb(255,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}\")\n self.sex1.setStyleSheet(\"QLabel{color:rgb(255,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}\")\n self.year1.setStyleSheet(\"QLabel{color:rgb(255,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}\")\n self.number1.setStyleSheet(\"QLabel{color:rgb(255,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}\")\n self.school1.setStyleSheet(\"QLabel{color:rgb(255,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}\")\n self.layout.addWidget(self.tupian, 1, 1, 4, 4)\n self.layout.addWidget(self.chang_image, 5, 2, 1, 2)\n self.layout.addWidget(self.returnBtn, 0, 0, 1, 1)\n self.layout.addWidget(self.ExditBtn, 0, 10, 1, 1)\n self.layout.addWidget(self.name, 1, 6, 1, 1)\n self.layout.addWidget(self.name1, 1, 8, 1, 6)\n self.layout.addWidget(self.year, 2, 6, 1, 1)\n self.layout.addWidget(self.year1, 2, 8, 1, 6)\n self.layout.addWidget(self.sex, 3, 6, 1, 1)\n self.layout.addWidget(self.sex1, 3, 8, 1, 6)\n self.layout.addWidget(self.number, 4, 6, 1, 1)\n self.layout.addWidget(self.number1, 4, 8, 1, 6)\n self.layout.addWidget(self.school, 5, 6, 1, 1)\n self.layout.addWidget(self.school1, 5, 8, 1, 6)\n self.layout.addWidget(self.amend, 7, 6, 1, 6)\n self.layout.addWidget(self.withdraw, 8, 6, 1, 6)","sub_path":"low6/ManageInterface/Controller_myself_win.py","file_name":"Controller_myself_win.py","file_ext":"py","file_size_in_byte":5914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"97616809","text":"import sys\nimport requests\nimport logging\nfrom sentry import ravenClient\n\ndef getProxy():\n try:\n proxy = requests.get('http://127.0.0.1:5010/get/').text\n return proxy\n except:\n return getProxy()\n\ndef deleteProxy(proxy):\n requests.get('http://127.0.0.1:5010/delete/?proxy={}'.format(proxy))\n\ndef getProxyStatus():\n return requests.get('http://127.0.0.1:5010/get_status/').content\n\n\ndef request(method, url, Referer, **kwargs):\n proxyCount = 20\n proxy = getProxy()\n headers = {\n 'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-TW;q=0.6',\n 'Cache-Control': 'no-cache',\n 'Connection': 'keep-alive',\n 'Cookie': 'Cookie: session_id=3589639185CF658; _did=web_5612985630A41123; uuid=df556396b4424b85ed2af9a5bc0ac956; supernova=1',\n 'DNT': '1',\n 'Host': 'www.acfun.cn',\n 'Pragma': 'no-cache',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Referer': 'http://www.acfun.cn/'\n }\n\n if Referer is not None:\n headers['Referer'] = Referer\n\n while proxyCount > 0:\n proxy = getProxy()\n requestCount = 3\n while requestCount > 0:\n try:\n res = requests.request(\n method,\n url,\n timeout = 5,\n headers = headers,\n proxies = {'http': 'http://{}'.format(proxy)},\n **kwargs\n )\n data = res.json().get('data')\n if res.status_code == 200 and data is not None:\n return res\n else:\n logging.error('proxy response not right:')\n logging.error(res.status_code)\n logging.error(res.text)\n requestCount -= 1\n except Exception as e:\n logging.error('proxy request fail:')\n logging.error(e)\n requestCount -= 1\n logging.error('IP {} Not Availble'.format(proxy))\n proxyCount -= 1\n deleteProxy(proxy)\n\n\n logging.error('Proxy Not Availble')\n logging.info('Proxy Not Availble, Using Local Network')\n ravenClient.captureMessage('Proxy Not Availble, Using Local Network')\n return requests.request(method, url, headers = headers, **kwargs)\n\n\ndef get(url, **kwargs):\n return request('get', url, **kwargs)","sub_path":"server/proxy/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"565866284","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,../tests/notebooks//py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.10.3\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %%\nimport itertools \nimport math\nimport os\nimport pickle as pkl\nfrom typing import List, Dict, Any\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom sklearn.metrics import accuracy_score, roc_auc_score\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom sklearn.datasets import fetch_openml\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nmpl.rcParams['figure.dpi'] = 250\n\n# %load_ext autoreload\n# %autoreload 2\n\n# change working directory to project root\nif os.getcwd().split('/')[-1] != 'imodels':\n os.chdir('..')\n\nMODEL_COMPARISON_PATH = 'tests/comparison_data/'\n\n# %%\ndatasets = [\n (\"breast-cancer\", 13),\n (\"breast-w\", 15),\n (\"credit-g\", 31),\n (\"haberman\", 43),\n (\"heart\", 1574),\n (\"labor\", 4),\n (\"vote\", 56),\n ]\n\ndef get_comparison_result(path: str, estimator_name: str, test=False) -> Dict[str, Any]:\n if test:\n result_file = path + 'test/' + f'{estimator_name}_test_comparisons.pkl'\n else:\n result_file = path + 'val/' + f'{estimator_name}_comparisons.pkl'\n return pkl.load(open(result_file, 'rb'))\n\ndef get_x_and_y(result_data: pd.Series) -> (pd.Series, pd.Series):\n complexities = result_data[result_data.index.str.contains('complexity')]\n rocs = result_data[result_data.index.str.contains('ROC')]\n complexity_sort_indices = complexities.argsort() \n return complexities[complexity_sort_indices], rocs[complexity_sort_indices]\n\ndef viz_comparison_val_average(result: Dict[str, Any]) -> None:\n '''Plot dataset-averaged ROC AUC vs dataset-averaged complexity for different hyperparameter settings\n of a single model, including zoomed-in plot of overlapping region\n '''\n result_data = result['df']['mean']\n result_estimators = result['estimators']\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))\n for est in np.unique(result_estimators):\n \n est_result_data = result_data[result_data.index.str.contains(est)]\n x, y = get_x_and_y(est_result_data) \n axes[0].plot(x, y, marker='o', markersize=4, label=est.replace('_', ' '))\n \n if est in result['auc_of_auc'].index:\n area = result['auc_of_auc'][est]\n label = est.split(' - ')[1] + f' AUC: {area:.3f}'\n axes[1].plot(x, y, marker='o', markersize=4, label=label.replace('_', ' '))\n\n for ax in axes:\n ax.set_xlabel('complexity score')\n ax.set_ylabel('ROC AUC')\n ax.legend(frameon=False, handlelength=1) \n axes[0].set_title('average ROC AUC across all comparison datasets')\n axes[1].set_xlim(result['auc_of_auc_lb'], result['auc_of_auc_ub'])\n axes[1].set_title('Overlapping, low (<30) complexity region only')\n \n plt.tight_layout()\n plt.show()\n\ndef viz_comparison_test_average(results: List[Dict[str, Any]]) -> None:\n '''Plot dataset-averaged ROC AUC vs dataset-averaged complexity for different models\n '''\n for result in results:\n mean_result = result['df']['mean']\n est = result['estimators'][0]\n x, y = get_x_and_y(mean_result) \n plt.plot(x, y, marker='o', markersize=2, linewidth=1, label=est.replace('_', ' '))\n plt.xlim(0, 30)\n plt.xlabel('complexity score', size=8)\n plt.ylabel('ROC AUC', size=8)\n plt.title('average ROC AUC across all comparison datasets', size=8)\n plt.legend(frameon=False, handlelength=1, fontsize=8)\n plt.show()\n\ndef viz_comparison_datasets(result: Dict[str, Any], cols=3, figsize=(14, 10), test=False) -> None:\n '''Plot ROC AUC vs complexity for different datasets and models (not averaged)\n '''\n if test:\n results_df = pd.concat([r['df'] for r in result])\n results_estimators = [r['estimators'][0] for r in result]\n else:\n results_df = result['df']\n results_estimators = np.unique(result['estimators'])\n\n datasets = list(results_df.columns)[:-2]\n n_rows = int(math.ceil(len(datasets) / cols))\n fig, axes = plt.subplots(n_rows, cols, figsize=figsize)\n\n for i, dataset in enumerate(datasets):\n curr_ax = axes[i // cols, i % cols]\n results_data = results_df[dataset]\n for est in np.unique(results_estimators):\n results_data_est = results_data[results_data.index.str.contains(est)]\n x, y = get_x_and_y(results_data_est)\n curr_ax.plot(x, y, marker='o', markersize=4, label=est.replace('_', ' '))\n \n curr_ax.set_xlim(0, 30)\n curr_ax.set_xlabel('complexity score')\n curr_ax.set_ylabel('ROC AUC')\n curr_ax.set_title(f'dataset {dataset}')\n curr_ax.legend(frameon=False, handlelength=1)\n \n plt.tight_layout()\n plt.show()\n\n\n# %% [markdown]\n# # dataset stats\n\n# %%\nmetadata = []\ncolumns = ['name', 'samples', 'features', 'class 0 ct', 'class 1 ct', 'majority class %']\nfor dataset_name, data_id in datasets:\n dataset = fetch_openml(data_id=data_id, as_frame=False)\n shape = dataset.data.shape\n class_counts = np.unique(dataset.target, return_counts=True)[1]\n metadata.append([dataset_name, shape[0], shape[1], class_counts[0], class_counts[1], np.max(class_counts) / np.sum(class_counts)])\npd.DataFrame(metadata, columns=columns).set_index('name') \n\n# %% [markdown]\n# # complexity vs. ROC Area plot for all models\n\n# %%\ntest_models = [\n 'random_forest', \n 'gradient_boosting', \n 'skope_rules', \n 'rulefit', \n 'fplasso', \n 'fpskope',\n 'grl',\n 'oner',\n 'brs']\ntest_results = [get_comparison_result(MODEL_COMPARISON_PATH, mname, test=True) for mname in test_models]\nviz_comparison_test_average(test_results)\nviz_comparison_datasets(test_results, cols=2, figsize=(13, 18), test=True)\n\n# %% [markdown]\n# # hyperparameter tuning plots for each model\n#\n\n# %% [markdown]\n# ## Random Forest\n\n# %%\ncomparison_result = get_comparison_result(MODEL_COMPARISON_PATH, 'random_forest')\nviz_comparison_val_average(comparison_result)\nviz_comparison_datasets(comparison_result)\n\n# %% [markdown]\n# ## Gradient boosted trees\n\n# %%\ncomparison_result = get_comparison_result(MODEL_COMPARISON_PATH, 'gradient_boosting')\nviz_comparison_val_average(comparison_result)\nviz_comparison_datasets(comparison_result)\n\n# %% [markdown]\n# ## SkopeRules\n\n# %%\ncomparison_result = get_comparison_result(MODEL_COMPARISON_PATH, 'skope_rules')\nviz_comparison_val_average(comparison_result)\nviz_comparison_datasets(comparison_result)\n\n# %% [markdown]\n# ## RuleFit\n\n# %%\ncomparison_result = get_comparison_result(MODEL_COMPARISON_PATH, 'rulefit')\nviz_comparison_val_average(comparison_result)\nviz_comparison_datasets(comparison_result)\n\n# %% [markdown]\n# ## FPLasso\n\n# %%\ncomparison_result = get_comparison_result(MODEL_COMPARISON_PATH, 'fplasso')\nviz_comparison_val_average(comparison_result)\nviz_comparison_datasets(comparison_result)\n\n# %% [markdown]\n# ## FPSkope\n\n# %%\ncomparison_result = get_comparison_result(MODEL_COMPARISON_PATH, 'fpskope')\nviz_comparison_val_average(comparison_result)\nviz_comparison_datasets(comparison_result)\n\n# %% [markdown]\n# ## BRL\n\n# %%\ncomparison_result = get_comparison_result(MODEL_COMPARISON_PATH, 'brl')\nviz_comparison_val_average(comparison_result)\nviz_comparison_datasets(comparison_result)\n\n# %%\n","sub_path":"tests/notebooks/imodels_comparisons.py","file_name":"imodels_comparisons.py","file_ext":"py","file_size_in_byte":7539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"86870185","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\nimport os\n\ndef config_gameini_func(session,param):\n result = {'code':0,'msg':[],'errmsg':[],'log':[]}\n param = eval(param)\n localnetip = param.get('listenportal')\n loginip = param.get('connectip')\n serverdesc = param.get('server')\n servername = param.get('name')\n idxnum = param.get('idx')\n ordernum = param.get('order')\n listpvnum = param.get('listpv')\n chatserverip = param.get('servip')\n \n game_temp = '/tools/endpoint/template/server_user.ini'\n game_ini = '/ss/game/server_user.ini'\n f = file(game_temp,'rb').read()\n f = f.replace('localnetip',localnetip)\n f = f.replace('loginip',loginip)\n f = f.replace('serverdesc',serverdesc)\n f = f.replace('servername',servername)\n f = f.replace('idxnum',idxnum)\n f = f.replace('ordernum',ordernum)\n f = f.replace('listpvnum',listpvnum)\n f = f.replace('chatserverip',chatserverip)\n file(game_ini,'wb').write(f.decode('utf-8').encode('gbk'))\n return str(result)\n\nif __name__ == '__main__':\n config_gameini_func(1,{'listenportal':'111.111.111.111'})\n\n","sub_path":"endpoint/yw/config_gameini.py","file_name":"config_gameini.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"584492656","text":"#Some imports in Java are not useful in Ruby/Python and can be resolved immediately.\nJAVA_KNOWN_IMPORTS = {\n\t'java.util.Map' : 'Dictionary',\n\t'java.util.HashMap' : 'Dictionary',\n\t'java.util.ArrayList' : 'List',\n\t'java.util.List' : 'List',\n\t'java.util.LinkedList' : 'List',\n\t'java.util.Set':\t'Set',\n\t'java.util.HashSet':\t'Set'\n}\n\n\n#######\n#Functions#\n\nBUILTIN_FUNCTIONS = {'System.out.println','System.out.print', 'print','println','length', 'all', 'sum','add','index','remove','get','keySet','values'}\n\nBUILTIN_EQUIVALENT_FUNCTIONS = {'add':{'List':'push','Set':'add'},\n\t\t\t\t'remove':{'List':'remove','Set':'remove'},\n\t\t\t\t'println':'display',\n\t\t\t\t'print':'display',\n\t\t\t\t'size':'length',\n\t\t\t\t'get':'index',\n\t\t\t\t'put':'index',\n\t\t\t\t'containsKey':'contains?',\n\t\t\t\t'keySet':'keys',\n\t\t\t\t'values':'values'}\n\nBUILTIN_ARG_FUNCTIONS = {'add':'same',\n\t\t\t'remove':'Int',\n\t\t\t'print':'String',\n\t\t\t'println':'String',\n\t\t\t'size':None,\n\t\t\t'get':'Int',\n\t\t\t'put':'same',\n\t\t\t'containsKey':'same',\n\t\t\t'keySet':None,\n\t\t\t'values':None}\n\nBUILTIN_TYPE_FUNCTIONS = {'length':'Int',\n\t\t\t'push':'Void',\n\t\t\t'display':'Void',\n\t\t\t'remove':'Void',\n\t\t\t'index':'Void',\n\t\t\t'contains?':'Boolean',\n\t\t\t'add':'Void',\n\t\t\t'keys':['Set','@k'],\n\t\t\t'values':['List','@v']}\n\nPARTICULAR_FUNCTIONS = {'index'}\n\n","sub_path":"api_translator.py","file_name":"api_translator.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"43837482","text":"from operator import attrgetter\n\nclass Person:\n def __init__(self, name, age, spouse=None, children=[]):\n self.name = name\n self.age = age\n self.spouse = spouse\n self.children = children\n\nclass Child(Person):\n def __init__(self, name, age, spouse, children, parents):\n Person.__init__(self, name, age, spouse, children)\n self.parents = parents\n\n def get_siblings(self):\n sib_lst = []\n for parent in self.parents:\n sib_lst = list(set(sib_lst) | set(parent.children))\n sib_lst.remove(self)\n sib_lst = sorted(sib_lst, key=attrgetter('age'))\n sib_names = [sib.name for sib in sib_lst]\n return sib_names\n\nJonny = Person(\"Jonny\", 32, None, [])\nBeth = Person(\"Beth\", 28, Jonny, [])\nJonny.spouse = Beth\nMax = Child(\"Max\", 5, None, [], [Jonny])\nAnnie = Child(\"Annie\", 10, None, [], [Beth])\nRon = Child(\"Ron\", 7, None, [], [Beth, Jonny])\nJonny.children.extend([Max, Ron])\nBeth.children.extend([Annie, Ron])\nprint(Ron.get_siblings())","sub_path":"introduction_to_python/classes_new/person_get_sibilings/solution/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"540092890","text":"class Node():\n def __init__(self, data=None, next=None):\n self.data = data\n self.next = next\n\n\nclass LinkedList():\n def __init__(self, head=None):\n self.head = head\n\n def insert(self, data):\n new_node = Node(data)\n new_node.next = self.head\n self.head = new_node\n\n def size(self):\n current_node = self.head\n count = 0\n while current_node is not None:\n current_node = current_node.next\n count += 1\n return count\n\n def search(self, data):\n current_node = self.head\n while current_node is not None:\n if current_node.data == data:\n return current_node\n current_node = current_node.next\n return None\n\n def delete(self, data):\n if self.head is None:\n print(\"Empty linked list\")\n return 0\n current_node = self.head\n previous_node = None\n while current_node is not None:\n if current_node.data != data: # Traverse until data is found\n previous_node = current_node\n current_node = current_node.next\n # data is found in 3 types as follows\n elif previous_node is None: # data found at first \\\n # node ( previous node still is None)\n self.head = current_node.next\n current_node = current_node.next\n elif current_node.next is None: # data is the last element\n previous_node.next = None\n return\n else: # data found at another position.. detach and reattach next\n previous_node.next = current_node.next\n current_node = previous_node.next\n\n def getFirstNode(self):\n return self.head\n\n def reverseUtil(self, current_node, previous_node):\n if current_node.next is None:\n self.head = current_node\n current_node.next = previous_node\n return\n next_node = current_node.next\n current_node.next = previous_node\n self.reverseUtil(next_node, current_node)\n\n def reverse(self):\n if self.head is None:\n print(\"list empty\")\n return\n self.reverseUtil(self.head, None)\n\n def printList(self):\n current_node = self.head\n while current_node is not None:\n print(current_node.data)\n current_node = current_node.next\n return None\n\n def nonRecursiveReversr(self):\n if self.head is None:\n print(\"LinkedList is empty\")\n return\n if self.head.next is None:\n return\n current = self.head\n previous = None\n while current is not None:\n next = current.next\n current.next = previous\n previous = current\n current = next\n self.head = previous\n\n def reverseKtoM(self, k , m):\n count = 0\n current = self.head\n next = None\n while count < k and current is not None:\n count =+ 1\n current = current.next\n if current is None:\n print(\"k greater than length of LinkedList\")\n return\n reverse_head = current\n reverse_tail = current.next\n previous = current\n while count < m:\n count =+ 1\n next = current.next\n current.next = previous\n previous = current\n current = current.next\n reverse_head.next = current\n\nL1 = LinkedList()\nfor i in range(5, 0, -1):\n L1.insert(i)\nL1.printList()\nprint(\"\")\nL1.reverse()\nL1.printList()\nprint(\"\")\nL1.nonRecursiveReversr()\nL1.printList()\n# size() the paranthesis is imp otherwise it will print the size method\n\n","sub_path":"linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"653665810","text":"import csv\nimport numpy as np\nimport os\nfrom scipy.signal import spectrogram\nimport sys\n\n\n# parameter to be varied\neeg_epoch_width_in_s = int(sys.argv[2])\neeg_source = sys.argv[1]\nnum_classes = int(sys.argv[3])\n\n# set up file location paths\nepochs_path = 'data/epochs_{0}c/'.format(str(num_classes))\nsxx_path = 'data/spectrogram_{0}c/'.format(str(num_classes))\n\nepochs_files = [file for file in os.listdir(epochs_path) if\n '_{0}_ew{1}'.format(eeg_source, eeg_epoch_width_in_s) in file]\nfor epochs_file in epochs_files:\n species = epochs_file.split('_' + eeg_source)[0]\n print('Processing ' + species)\n # set up input and output files\n epochs_filename = epochs_path + epochs_file\n sourcefile = open(epochs_filename, mode='r', newline='')\n filereader = csv.reader(sourcefile)\n template = '{0}_{1}_ew{2}.csv'\n common_labels = [eeg_source, str(eeg_epoch_width_in_s)]\n output_filename = sxx_path + template.format(species, *common_labels)\n targetfile = open(output_filename, mode='w', newline='')\n filewriter = csv.writer(targetfile)\n # read epoch and create spectrogram\n for row in filereader:\n data = np.asarray(row, dtype=float)\n f, t, Sxx = spectrogram(data, 256.0)\n Sxx = Sxx[0:41, :].flatten()\n Sxx = Sxx / np.amax(Sxx)\n filewriter.writerow(Sxx)\n # close files\n sourcefile.close()\n targetfile.close()\n","sub_path":"createspectrogram.py","file_name":"createspectrogram.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"517273854","text":"from bisect import bisect_left, bisect_right\n\nn = int(input())\nresult = -1\narray = list(map(int,input().split()))\nfor i in range(n-1):\n if bisect_left(array,i) ==i:\n result = i\nif n-1 == array[n-1]:\n result = n-1\n\nprint(result)","sub_path":"binary_search/q28.py","file_name":"q28.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"359013169","text":"#\n# Copyright (c) 2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom src.api.types import Tag, Rectangle, SingleEntity, Entity, Detection\n\n\ndef test_single_entity():\n expected_dict = {\n \"tag\": {\n \"value\": \"car\",\n \"confidence\": 0.97\n },\n \"box\": {\"l\": 1.0, \"t\": 2.0, \"w\": 3.0, \"h\": 4.0}\n }\n\n tag = Tag(\"car\", 0.97)\n box = Rectangle(1.0, 2.0, 3.0, 4.0)\n single_entity = SingleEntity(tag, box)\n assert expected_dict == single_entity.as_dict()\n\n\ndef test_entity():\n detections = []\n entities = []\n expected_dict = {\n 'inferences': [\n {\n 'type': 'entity',\n 'subtype': 'vehicleDetection',\n 'entity': {\n 'tag': {'value': 'car', 'confidence': 0.97},\n 'box': {'l': 1.0, 't': 2.0, 'w': 3.0, 'h': 4.0}\n }\n },\n {\n 'type': 'entity',\n 'subtype': 'vehicleDetection',\n 'entity': {\n 'tag': {'value': 'bike', 'confidence': 0.94},\n 'box': {'l': 0.0, 't': 0.0, 'w': 0.0, 'h': 0.0}\n }\n }\n ]\n }\n detections = [\n SingleEntity(Tag(\"car\", 0.97), Rectangle(1.0, 2.0, 3.0, 4.0)),\n SingleEntity(Tag(\"bike\", 0.94), Rectangle(0.0, 0.0, 0.0, 0.0)),\n ]\n for detection in detections:\n entity = Entity(subtype_name=\"vehicleDetection\", entity=detection)\n entities.append(entity)\n model_detection = Detection(entities=entities)\n print(model_detection.as_dict())\n assert expected_dict == model_detection.as_dict()\n","sub_path":"extras/ams_wrapper/tests/unit/test_entity_type.py","file_name":"test_entity_type.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"129201278","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 25 15:43:43 2021\n\n@author: Ali Kalbaliyev\n\"\"\"\n\nimport pandas as pd\nimport streamlit as st\nimport plotly.express as px\nfrom PIL import Image\n\nst.set_page_config(page_title='Machine Learning Application')\nst.header('Welcome to ML Application')\n\n\n#### -- Load DataFrame \ndf=pd.read_csv('Salary.csv')\ndf=df.dropna()\nst.dataframe(df) # show Dataframe on app\n\n\n# Creating beta columns for splitting width n columns\ncolumn1,column2=st.beta_columns(2)\n\n# Piechart \nchart1=px.pie(df,\n title='Pie Chart',\n names='Country',\n values='change_company_a_lot',)\n#Scatter Plot\nchart2 =px.scatter(df,\n \n x='Salary_usd',\n y='Experience_year',\n color='Country',\n title='Scatter Plot')\n\n#Funnel Chart\n# Calculation for Funnel chart\nsalary_mean=df.groupby(\"Country\").agg({'Salary_usd':'mean'})\nsalary_mean=salary_mean.sort_values(by=['Salary_usd'], ascending=False)\nsalary_mean=salary_mean['Salary_usd'].reset_index()\n\nchart3=px.funnel(salary_mean,\n title='Funnel chart',\n y='Country',\n x='Salary_usd')\n\n\n# Plotting on Application\ncolumn1.plotly_chart(chart1,use_container_width=True)\ncolumn2.plotly_chart(chart3,use_container_width=True)\nst.plotly_chart(chart2)\n\n\n## Adding Images\n\nst.header('First Image')\nimage1=Image.open('meme2.jpg') # reading image \nst.image(image1,caption='First Image',use_column_width=True) # showing on application \n\n\n#Slider\nunique_country=list(df[\"Country\"].unique())\nunique_ex_years=list(df[\"Salary_usd\"].unique())\n\nst.header('Salary Slider')\nsalary = st.slider(label='',\n min_value=float(min(unique_ex_years)),\n max_value=float(max(unique_ex_years)),\n value=float(min(unique_ex_years)))\n\nst.write(\"Selected\", salary, 'USD') # real-time checking slider value\n\n\n#Multi Selection\ncountry_selection=st.multiselect('Countries Select Menu' ,\n unique_country,unique_country)\n\n\n#Filter DataFrame\ndf_filter=(df[\"Salary_usd\"]>salary) &(df[\"Country\"].isin(country_selection)) # condition\n\nresult_number=df[df_filter].shape[0] # results\nst.markdown('Number of result: {0}'.format(result_number)) # realtime showing result number\nst.dataframe(df[df_filter])\n\n\n\n\n# Barchart\n# Calculation for Bar chart\nsalary_count=df[df_filter].groupby(\"Country\").agg({'Salary_usd':'size'})\nsalary_count=salary_count.sort_values(by=['Salary_usd'], ascending=False)\nsalary_count=salary_count['Salary_usd'].reset_index()\n\nbarchart=px.bar(df[df_filter], \n title='Bar Chart',\n x=salary_count[\"Country\"],\n y=salary_count[\"Salary_usd\"],\n template='plotly_white',\n color_discrete_sequence=['#800080'])\n\nst.plotly_chart(barchart)\n\n\n#Modelling\nst.header('Modelling with Logistic Regression')\nst.subheader('Diabetes Dataset')\n\ndiabetes=pd.read_csv(\"diabetes.csv\")\nst.dataframe(diabetes)\n\n# Splitting \nX=diabetes.drop(\"Outcome\",axis=1)\nY=diabetes.Outcome\n\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,y_train,y_test=train_test_split(X,Y,test_size=0.25,random_state=42)\n\nst.markdown('X_train size = {0}'.format(X_train.shape))\nst.markdown('X_test size = {0}'.format(X_test.shape))\nst.markdown('y_train size = {0}'.format(y_train.shape))\nst.markdown('y_test size = {0}'.format(y_test.shape))\n\nif st.button('Calculate Model'): # add Button to Application\n st.title('Congratulations Your Model is working')\n \n import pickle\n document=\"myModel\"\n loaded_model=pickle.load(open(document,'rb'))\n y_loded_model_pred=loaded_model.predict(X_test)\n \n from sklearn.metrics import accuracy_score,confusion_matrix,classification_report\n st.markdown('Confusion Matrix')\n st.write(confusion_matrix(y_test,y_loded_model_pred))\n \n report = classification_report(y_test, y_loded_model_pred, output_dict=True) # creating dataframe from classifaction report\n df_report = pd.DataFrame(report).transpose()\n \n st.dataframe(df_report)\n \n accuracy=str(round(accuracy_score(y_test,y_loded_model_pred),2))+\"%\"\n st.markdown(\"Accuracy Score = \"+accuracy) \n st.title('Thanks For using')\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"602047192","text":"import re\n\nfrom . import noteparser\n\nclass SkyABC15(noteparser.NoteParser):\n\n def __init__(self):\n\n super().__init__()\n\n self.position_map = {\n '.': (-1, -1),\n 'A1': (0, 0), 'A2': (0, 1), 'A3': (0, 2), 'A4': (0, 3), 'A5': (0, 4),\n 'B1': (1, 0), 'B2': (1, 1), 'B3': (1, 2), 'B4': (1, 3), 'B5': (1, 4),\n 'C1': (2, 0), 'C2': (2, 1), 'C3': (2, 2), 'C4': (2, 3), 'C5': (2, 4)\n }\n\n self.inverse_position_map = {\n (-1, -1): '.',\n (0, 0): 'A1', (0, 1): 'A2', (0, 2): 'A3', (0, 3): 'A4', (0, 4): 'A5',\n (1, 0): 'B1', (1, 1): 'B2', (1, 2): 'B3', (1, 3): 'B4', (1, 4): 'B5',\n (2, 0): 'C1', (2, 1): 'C2', (2, 2): 'C3', (2, 3): 'C4', (2, 4): 'C5'\n }\n\n self.note_name_with_octave_regex = re.compile(r'([ABCabc][1-5])')\n self.note_name_regex = self.note_name_with_octave_regex\n self.single_note_name_regex = re.compile(r'(\\b[ABCabc][1-5]\\b)')\n self.not_note_name_regex = re.compile(r'[^ABCabc]+')\n self.not_octave_regex = re.compile(r'[^123]+')\n\n def calculate_coordinate_for_note(self, note, song_key='C', note_shift=0, is_finding_key=False):\n \"\"\"\n Returns a tuple containing the row index and the column index of the note's position.\n \"\"\"\n note = note.upper()\n\n if note in self.position_map.keys(): # Note Shift (ie transposition in Sky)\n pos = self.position_map[note] # tuple\n if (pos[0] < 0) and (pos[1] < 0): # Special character\n return pos\n else:\n idx = pos[0] * self.columns + pos[1]\n idx = idx + note_shift\n pos = (int(idx / self.columns), idx - self.columns * int(idx / self.columns))\n if (0, 0) <= pos <= (2, 4):\n return pos\n else:\n raise KeyError('Note ' + str(note) + ' was not in range of the Sky keyboard.')\n else:\n raise KeyError('Note ' + str(note) + ' was not found in the position_map dictionary.')\n\n def get_note_from_coordinate(self, coord):\n\n try:\n note = self.inverse_position_map[coord]\n except KeyError:\n note = 'X'\n\n return note\n\n def sanitize_note_name(self, note_name):\n\n # make sure the first letter of the note is uppercase, for sky note's dictionary keys\n note_name = note_name.capitalize()\n return note_name\n","sub_path":"python/noteparsers/skyabc15.py","file_name":"skyabc15.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"147325239","text":"import time\nimport traceback\nimport os\n\nfrom webrecorder.utils import load_wr_config\n\n\n# ============================================================================\nclass Worker(object):\n def __init__(self, worker_cls):\n self._running = True\n\n self.sleep_secs = int(os.environ.get('TEMP_SLEEP_CHECK', 30))\n print('Running {0} every {1}'.format(worker_cls.__name__, self.sleep_secs))\n\n config = load_wr_config()\n\n self.worker = worker_cls(config)\n\n def stop(self):\n self._running = False\n\n def run(self):\n while self._running:\n try:\n self.worker()\n\n except:\n traceback.print_exc()\n\n finally:\n time.sleep(self.sleep_secs)\n\n","sub_path":"webrecorder/webrecorder/rec/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"178015070","text":"from lxml import html\nimport requests, time\n\nstart_time = time.time() # 初始时间戳\nnow = time.strftime(\"%Y%m%d\", time.localtime()) # 当前日期戳\n# # ========================输入区开始========================\n# input_file_name = '微博UID' # 输入文件的名称\noutput_file_name = '微博信息批量获取' # 输出文件的名称\n#\npath_prefix = '/Users/alicewish/我的坚果云/' # 文件地址前缀\n# input_file_path = path_prefix + input_file_name + '.txt' # 输入文件的地址\noutput_file_path = path_prefix + output_file_name + '.txt' # 输出文件的地址\n\n# ================读取剪贴板================\nfrom tkinter import Tk\n\nr = Tk()\nread_text = r.clipboard_get()\ntext_readline = read_text.splitlines()\nprint(text_readline)\n# ========================处理文本========================\nnickname_list = [] # 初始化昵称列表\nfriends_list = [] # 初始化好友数列表\nfans_list = [] # 初始化粉丝数列表\nposts_list = [] # 初始化微博数列表\ninfo_list = [] # 初始化信息列表\n\nfor i in range(len(text_readline)):\n entry_start_time = time.time()\n # ========================获取昵称和粉丝数========================\n nickname = \"\"\n friends = \"\"\n fans = \"\"\n posts = \"\"\n location = \"\"\n description = \"\"\n days = \"\"\n started = \"\"\n try:\n page = requests.get('http://sinacn.weibodangan.com//user/' + text_readline[i])\n tree = html.fromstring(page.text)\n nickname = tree.xpath('//h3[@class=\"username\"]/text()')[0]\n data = tree.xpath('//td/text()')\n friends = data[0]\n fans = data[1].replace(\" \", \"\")\n posts = data[2]\n alldays = tree.xpath(\"//div[@class='hidden-xs hidden-sm']/p[1]/text()\")[0]\n days = alldays[7:]\n started = tree.xpath(\"//span[@id='register_time']/text()\")[0]\n location = tree.xpath(\"//div[@class='info'][1]/text()\")[0]\n description = tree.xpath(\"//div[@class='info'][2]/text()\")[0]\n except:\n pass\n info = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) + \"\\t\" + nickname + \"\\t\" + friends + \"\\t\" + fans + \"\\t\" + posts + \"\\t\" + location + \"\\t\" + description + \"\\t\" + days + \"\\t\" + started\n info_list.append(info)\n # ================每项时间计时================\n entry_run_time = time.time() - entry_start_time\n entry_print = \"耗时:{:.4f}秒\".format(entry_run_time)\n print(info_list[i], \"\\n\", entry_print)\n\n# ================写入昵称列表================\ntext = '\\r\\n'.join(info_list) # 写入文本\n# ================写入剪贴板================\nimport pyperclip\n\npyperclip.copy(text)\nspam = pyperclip.paste()\n\n# ================写入文本文档================\nf = open(output_file_path, 'w')\ntry:\n f.write(text)\nfinally:\n f.close()\n\n# ================运行时间计时================\nrun_time = time.time() - start_time\nif run_time < 60: # 两位小数的秒\n print(\"耗时:{:.2f}秒\".format(run_time))\nelif run_time < 3600: # 分秒取整\n print(\"耗时:{:.0f}分{:.0f}秒\".format(run_time // 60, run_time % 60))\nelse: # 时分秒取整\n print(\"耗时:{:.0f}时{:.0f}分{:.0f}秒\".format(run_time // 3600, run_time % 3600 // 60, run_time % 60))\n","sub_path":"微博-信息批量获取.py","file_name":"微博-信息批量获取.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"58554208","text":"#!/usr/bin/env python\n\"\"\"\nFunctions for writing tables\n\n\nAuthors:\n - Arno Klein, 2012-2013 (arno@mindboggle.info) http://binarybottle.com\n - Forrest Sheng Bao, 2012 (forrest.bao@gmail.com) http://fsbao.net\n\nCopyright 2013, Mindboggle team (http://mindboggle.info), Apache v2.0 License\n\n\"\"\"\n\ndef read_columns(filename, n_columns=1, trail=False):\n \"\"\"\n Read n-column text file. Assumes space(s) as delimiter.\n\n Parameters\n ----------\n filename : name of text file [string]\n n_columns : number of columns to extract [integer]\n trail : combine all remaining columns as a string\n in the final list [Boolean]\n\n Returns\n -------\n columns : a list of lists of strings, one list per column of text.\n\n \"\"\"\n import re\n\n Fp = open(filename, 'r')\n lines = Fp.readlines()\n columns = [[] for x in range(n_columns)]\n for line in lines:\n if line:\n row = re.findall(r'\\S+', line)\n if len(row) >= n_columns:\n for icolumn in range(n_columns):\n if trail and icolumn == n_columns - 1:\n columns[icolumn].append(' '.join(row[icolumn::]))\n else:\n columns[icolumn].append(row[icolumn])\n else:\n import os\n os.error('The number of columns in {0} is less than {1}.'.format(\n filename, n_columns))\n Fp.close()\n\n return columns\n\ndef write_columns(columns, column_names, output_table, delimiter=',',\n quote=True, input_table=''):\n \"\"\"\n Write table with columns and column names. Assumes space(s) as delimiter.\n\n If there is an input table file to append to, assume a 1-line header.\n\n Parameters\n ----------\n columns : list of lists of floats or integers\n values (each list is a column of values)\n column_names : list of strings\n names of columns\n output_table : string\n name of output table file\n delimiter : string\n delimiter between columns, such as ','\n bracket : string\n string bracketing each element, such as '\"'\n input_table : string (default is empty string)\n name of table file to which the columns are to be appended\n\n Returns\n -------\n output_table : string\n name of output table file\n\n Examples\n --------\n >>> from mindboggle.utils.io_table import write_columns\n >>> labels = ['category one', 'category two', 'category three', 'category four']\n >>> values = [0.12, 0.36, 0.75, 0.03]\n >>> values2 = [32, 87, 53, 23]\n >>> columns = [labels, values]\n >>> column_names = ['label', 'value']\n >>> output_table = 'write_columns.csv'\n >>> delimiter = ','\n >>> quote = True\n >>> input_table = ''\n >>> write_columns(columns, column_names, output_table, delimiter, quote, input_table)\n >>> write_columns(values2, 'value 2', output_table, delimiter,\n >>> quote, input_table=output_table)\n\n \"\"\"\n import os\n import sys\n from mindboggle.utils.io_table import read_columns\n\n output_table = os.path.join(os.getcwd(), output_table)\n if quote:\n q = '\"'\n else:\n q = ''\n\n #-----------------------\n # Check format of inputs\n #-----------------------\n # If the list contains integers or floats, put in a list:\n if columns:\n if isinstance(columns[0], int) or isinstance(columns[0], float) or \\\n isinstance(columns[0], str):\n columns = [columns]\n # If the list contains all lists, accept format:\n elif all([isinstance(x, list) for x in columns]):\n pass\n else:\n print(\"Error: columns contains unacceptable elements.\")\n print(\"columns type is: {0}\".format(type(columns)))\n print(\"columns length is: {0}\".format(len(columns)))\n print(\"columns[0] type is: {0}\".format(type(columns[0])))\n sys.exit()\n # If column_names is a string, create a list containing\n # as many of this string as there are columns.\n if isinstance(column_names, str):\n column_names = [column_names for x in columns]\n elif isinstance(column_names, list):\n if len(column_names) < len(columns):\n column_names = [column_names[0] for x in columns]\n else:\n pass\n else:\n print(\"Error: column_names is neither a list nor a string\")\n sys.exit()\n\n #-----------------------------------\n # Read columns from input table file\n #-----------------------------------\n if input_table:\n input_columns = read_columns(input_table, n_columns=1, trail=True)\n input_names = input_columns[0][0]\n input_columns = input_columns[0][1::]\n #else:\n # input_names = ''\n # input_columns = ['' for x in columns[0]]\n\n #--------------\n # Write to file\n #--------------\n Fp = open(output_table, 'wa')\n if column_names:\n column_names = [q+x+q for x in column_names]\n if input_table:\n Fp.write(delimiter.join([input_names,\n delimiter.join(column_names) + \"\\n\"]))\n else:\n Fp.write(delimiter.join(column_names) + \"\\n\")\n #else:\n # Fp.write(input_names + \"\\n\")\n\n for irow in range(len(columns[0])):\n if input_table:\n Fp.write(input_columns[irow] + delimiter)\n for icolumn, column in enumerate(columns):\n if icolumn < len(columns)-1:\n Fp.write('{0}{1}{2}{3}'.format(\n q, column[irow], q, delimiter))\n else:\n Fp.write('{0}{1}{2}'.format(q, column[irow], q))\n Fp.write(\"\\n\")\n\n Fp.close()\n\n else:\n print(\"NOTE: 'columns' is empty. Nothing written.\")\n\n return output_table\n\ndef write_rows(filename, list_of_lines, header=\"\"):\n \"\"\"\n Write a list to a file, one line per list element.\n\n Parameters\n ----------\n filename : string\n name of output file\n list_of_lines : list\n each element is written to file as a line\n header : string (default is empty string)\n header to write at the top of the file\n\n Returns\n -------\n filename : string\n name of output file\n\n \"\"\"\n\n Fp = open(filename, 'w')\n\n if header:\n Fp.write(header + '\\n')\n\n for element in list_of_lines:\n Fp.write(str(element) + '\\n')\n\n Fp.close()\n\n return filename\n\ndef write_shape_stats(labels_or_file, sulci=[], fundi=[],\n affine_transform_file='', transform_format='itk',\n area_file='', mean_curvature_file='', travel_depth_file='',\n geodesic_depth_file='', convexity_file='', thickness_file='',\n labels_spectra=[], labels_spectra_IDs=[],\n sulci_spectra=[], sulci_spectra_IDs=[],\n exclude_labels=[-1], delimiter=','):\n \"\"\"\n Make tables of shape statistics per label, fundus, and/or sulcus.\n\n Parameters\n ----------\n labels_or_file : list or string\n label number for each vertex or name of VTK file with index scalars\n sulci : list of integers\n indices to sulci, one per vertex, with -1 indicating no sulcus\n fundi : list of integers\n indices to fundi, one per vertex, with -1 indicating no fundus\n affine_transform_file : string\n affine transform file to standard space\n transform_format : string\n format for transform file\n Ex: 'txt' for text, 'itk' for ITK, and 'mat' for Matlab format\n area_file : string\n name of VTK file with surface area scalar values\n mean_curvature_file : string\n name of VTK file with mean curvature scalar values\n travel_depth_file : string\n name of VTK file with travel depth scalar values\n geodesic_depth_file : string\n name of VTK file with geodesic depth scalar values\n convexity_file : string\n name of VTK file with convexity scalar values\n thickness_file : string\n name of VTK file with thickness scalar values\n labels_spectra : list of lists of floats\n Laplace-Beltrami spectra for labeled regions\n labels_spectra_IDs : list of integers\n unique ID numbers (labels) for labels_spectra\n sulci_spectra : list of lists of floats\n Laplace-Beltrami spectra for sulci\n sulci_spectra_IDs : list of integers\n unique ID numbers (labels) for sulci_spectra\n exclude_labels : list of lists of integers\n indices to be excluded (in addition to -1)\n delimiter : string\n delimiter between columns, such as ','\n\n Returns\n -------\n label_table : string\n output table filename for label shapes\n sulcus_table : string\n output table filename for sulcus shapes\n fundus_table : string\n output table filename for fundus shapes\n\n Examples\n --------\n >>> import os\n >>> from mindboggle.utils.io_vtk import read_scalars\n >>> from mindboggle.utils.io_table import write_shape_stats\n >>> path = os.environ['MINDBOGGLE_DATA']\n >>> labels_or_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk')\n >>> sulci_file = os.path.join(path, 'arno', 'features', 'sulci.vtk')\n >>> fundi_file = os.path.join(path, 'arno', 'features', 'fundi.vtk')\n >>> sulci, name = read_scalars(sulci_file)\n >>> fundi, name = read_scalars(fundi_file)\n >>> affine_transform_file = os.path.join(path, 'arno', 'mri',\n >>> # 'affine_to_template.mat')\n >>> 't1weighted_brain.MNI152Affine.txt')\n >>> #transform_format = 'mat'\n >>> transform_format = 'itk'\n >>> area_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.area.vtk')\n >>> mean_curvature_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')\n >>> travel_depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')\n >>> geodesic_depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.geodesic_depth.vtk')\n >>> convexity_file = ''\n >>> thickness_file = ''\n >>> delimiter = ','\n >>> #\n >>> import numpy as np\n >>> labels, name = read_scalars(labels_or_file)\n >>> labels_spectra = [[1,2,3] for x in labels]\n >>> labels_spectra_IDs = np.unique(labels).tolist()\n >>> sulci_spectra = [[1,2,3] for x in sulci]\n >>> sulci_spectra_IDs = np.unique(sulci).tolist()\n >>> exclude_labels = [-1]\n >>> #\n >>> write_shape_stats(labels_or_file, sulci, fundi,\n >>> affine_transform_file, transform_format, area_file,\n >>> mean_curvature_file, travel_depth_file, geodesic_depth_file,\n >>> convexity_file, thickness_file, labels_spectra,\n >>> labels_spectra_IDs, sulci_spectra,\n >>> sulci_spectra_IDs, exclude_labels, delimiter)\n\n \"\"\"\n import os\n import numpy as np\n from mindboggle.shapes.measure import means_per_label, stats_per_label, \\\n sum_per_label\n from mindboggle.utils.io_vtk import read_scalars, read_vtk, \\\n apply_affine_transform\n from mindboggle.utils.io_table import write_columns\n\n # Make sure inputs are lists:\n if isinstance(labels_or_file, np.ndarray):\n labels = labels_or_file.tolist()\n elif isinstance(labels_or_file, list):\n labels = labels_or_file\n elif isinstance(labels_or_file, str):\n labels, name = read_scalars(labels_or_file)\n if isinstance(sulci, np.ndarray):\n sulci = sulci.tolist()\n if isinstance(fundi, np.ndarray):\n fundi = fundi.tolist()\n\n #-------------------------------------------------------------------------\n # Feature lists, shape names, and shape files:\n #-------------------------------------------------------------------------\n # Feature lists:\n feature_lists = [labels, sulci, fundi]\n feature_names = ['label', 'sulcus', 'fundus']\n spectra_lists = [labels_spectra, sulci_spectra]\n spectra_ID_lists = [labels_spectra_IDs, sulci_spectra_IDs]\n spectra_names = ['label spectrum', 'sulcus spectrum']\n table_names = ['label_shapes.csv', 'sulcus_shapes.csv', 'fundus_shapes.csv']\n\n # Shape names corresponding to shape files below:\n shape_names = ['area', 'mean curvature', 'travel depth', 'geodesic depth',\n 'convexity', 'thickness']\n\n # Load shape files as a list of numpy arrays of per-vertex shape values:\n shape_files = [area_file, mean_curvature_file, travel_depth_file,\n geodesic_depth_file, convexity_file, thickness_file]\n shape_arrays = []\n column_names = []\n first_pass = True\n area_array = []\n for ishape, shape_file in enumerate(shape_files):\n if os.path.exists(shape_file):\n if first_pass:\n faces, lines, indices, points, npoints, scalars_array, name, \\\n input_vtk = read_vtk(shape_file, True, True)\n points = np.array(points)\n first_pass = False\n if affine_transform_file:\n affine_points, \\\n foo1 = apply_affine_transform(affine_transform_file,\n points, transform_format, save_file=False)\n affine_points = np.array(affine_points)\n else:\n scalars_array, name = read_scalars(shape_file, True, True)\n if scalars_array.size:\n shape_arrays.append(scalars_array)\n\n # Store area array:\n if ishape == 0:\n area_array = scalars_array.copy()\n\n # Initialize table file names:\n sulcus_table = None\n fundus_table = None\n\n # Loop through features / tables:\n for itable, feature_list in enumerate(feature_lists):\n table_column_names = []\n\n #---------------------------------------------------------------------\n # For each feature, construct a table of average shape values:\n #---------------------------------------------------------------------\n table_file = os.path.join(os.getcwd(), table_names[itable])\n if feature_list:\n feature_name = feature_names[itable]\n columns = []\n\n #-----------------------------------------------------------------\n # Mean positions in the original space:\n #-----------------------------------------------------------------\n # Compute mean position per feature:\n positions, sdevs, label_list, foo = means_per_label(points,\n feature_list, exclude_labels, area_array)\n\n # Append mean position per feature to columns:\n table_column_names.append('mean position')\n columns.append(positions)\n\n #-----------------------------------------------------------------\n # Mean positions in standard space:\n #-----------------------------------------------------------------\n if affine_transform_file:\n # Compute standard space mean position per feature:\n standard_positions, sdevs, label_list, foo = means_per_label(affine_points,\n feature_list, exclude_labels, area_array)\n\n # Append standard space mean position per feature to columns:\n table_column_names.append('mean position in standard space')\n columns.append(standard_positions)\n\n #-----------------------------------------------------------------\n # Loop through shape measures:\n #-----------------------------------------------------------------\n table_column_names.extend(column_names[:])\n for ishape, shape_array in enumerate(shape_arrays):\n shape_name = shape_names[ishape]\n print(' Compute statistics on {0} {1}'.\n format(feature_name, shape_name))\n\n # Append shape names and values per feature to columns:\n pr = feature_name + \": \" + shape_name + \": \"\n if np.size(area_array):\n po = \" (weighted)\"\n else:\n po = \"\"\n #-------------------------------------------------------------\n # Append total feature areas to columns:\n #-------------------------------------------------------------\n if ishape == 0 and np.size(area_array):\n sums, label_list = sum_per_label(shape_array,\n feature_list, exclude_labels)\n table_column_names.append(pr + 'total')\n columns.append(sums)\n #-------------------------------------------------------------\n # Append feature shape statistics to columns:\n #-------------------------------------------------------------\n else:\n medians, mads, means, sdevs, skews, kurts, \\\n lower_quarts, upper_quarts, \\\n label_list = stats_per_label(shape_array,\n feature_list, exclude_labels, area_array, precision=1)\n\n table_column_names.append(pr + 'median' + po)\n table_column_names.append(pr + 'median absolute deviation' + po)\n table_column_names.append(pr + 'mean' + po)\n table_column_names.append(pr + 'standard deviation' + po)\n table_column_names.append(pr + 'skew' + po)\n table_column_names.append(pr + 'kurtosis' + po)\n table_column_names.append(pr + 'lower quartile' + po)\n table_column_names.append(pr + 'upper quartile' + po)\n columns.append(medians)\n columns.append(mads)\n columns.append(means)\n columns.append(sdevs)\n columns.append(skews)\n columns.append(kurts)\n columns.append(lower_quarts)\n columns.append(upper_quarts)\n\n #-----------------------------------------------------------------\n # Laplace-Beltrami spectra:\n #-----------------------------------------------------------------\n if itable in [0,1]:\n spectra = spectra_lists[itable]\n spectra_name = spectra_names[itable]\n spectra_IDs = spectra_ID_lists[itable]\n\n # Order spectra into a list:\n spectrum_list = []\n for label in label_list:\n if label in spectra_IDs:\n spectrum = spectra[spectra_IDs.index(label)]\n spectrum_list.append(spectrum)\n else:\n spectrum_list.append('')\n\n # Append spectral shape name and values to relevant columns:\n columns.append(spectrum_list)\n table_column_names.append(spectra_name)\n\n #-----------------------------------------------------------------\n # Write labels/IDs and values to table:\n #-----------------------------------------------------------------\n # Write labels/IDs to table:\n write_columns(label_list, feature_name, table_file, delimiter)\n\n # Append columns of shape values to table:\n if columns:\n write_columns(columns, table_column_names, table_file,\n delimiter, quote=True, input_table=table_file)\n else:\n # Write something to table:\n write_columns([], '', table_file, delimiter)\n\n #---------------------------------------------------------------------\n # Return correct table file name:\n #---------------------------------------------------------------------\n if itable == 0:\n label_table = table_file\n elif itable == 1:\n sulcus_table = table_file\n elif itable == 2:\n fundus_table = table_file\n\n return label_table, sulcus_table, fundus_table\n\n\ndef write_vertex_measures(table_file, labels_or_file, sulci=[], fundi=[],\n affine_transform_file='', transform_format='itk',\n area_file='', mean_curvature_file='', travel_depth_file='',\n geodesic_depth_file='', convexity_file='', thickness_file='',\n delimiter=','):\n \"\"\"\n Make a table of shape values per vertex.\n\n Parameters\n ----------\n table_file : output filename (without path)\n labels_or_file : list or string\n label number for each vertex or name of VTK file with index scalars\n sulci : list of integers\n indices to sulci, one per vertex, with -1 indicating no sulcus\n fundi : list of integers\n indices to fundi, one per vertex, with -1 indicating no fundus\n affine_transform_file : string\n affine transform file to standard space\n transform_format : string\n format for transform file\n Ex: 'txt' for text, 'itk' for ITK, and 'mat' for Matlab format\n area_file : string\n name of VTK file with surface area scalar values\n mean_curvature_file : string\n name of VTK file with mean curvature scalar values\n travel_depth_file : string\n name of VTK file with travel depth scalar values\n geodesic_depth_file : string\n name of VTK file with geodesic depth scalar values\n convexity_file : string\n name of VTK file with convexity scalar values\n thickness_file : string\n name of VTK file with thickness scalar values\n delimiter : string\n delimiter between columns, such as ','\n\n Returns\n -------\n shape_table : table file name for vertex shape values\n\n Examples\n --------\n >>> import os\n >>> from mindboggle.utils.io_vtk import read_scalars\n >>> from mindboggle.tables.all_shapes import write_vertex_measures\n >>> #\n >>> table_file = 'vertex_shapes.csv'\n >>> path = os.environ['MINDBOGGLE_DATA']\n >>> labels_or_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk')\n >>> sulci_file = os.path.join(path, 'arno', 'features', 'sulci.vtk')\n >>> fundi_file = os.path.join(path, 'arno', 'features', 'fundi.vtk')\n >>> sulci, name = read_scalars(sulci_file)\n >>> fundi, name = read_scalars(fundi_file)\n >>> affine_transform_file = os.path.join(path, 'arno', 'mri',\n >>> 't1weighted_brain.MNI152Affine.txt')\n >>> transform_format = 'itk'\n >>> area_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.area.vtk')\n >>> mean_curvature_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')\n >>> travel_depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')\n >>> geodesic_depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.geodesic_depth.vtk')\n >>> convexity_file = ''\n >>> thickness_file = ''\n >>> delimiter = ','\n >>> #\n >>> write_vertex_measures(table_file, labels_or_file, sulci, fundi,\n >>> affine_transform_file, transform_format, area_file,\n >>> mean_curvature_file, travel_depth_file, geodesic_depth_file,\n >>> convexity_file, thickness_file, delimiter)\n\n \"\"\"\n import os\n import numpy as np\n from mindboggle.utils.io_vtk import read_scalars, read_vtk, apply_affine_transform\n from mindboggle.utils.io_table import write_columns\n\n # Make sure inputs are lists:\n if isinstance(labels_or_file, np.ndarray):\n labels = labels_or_file.tolist()\n elif isinstance(labels_or_file, list):\n labels = labels_or_file\n elif isinstance(labels_or_file, str):\n labels, name = read_scalars(labels_or_file)\n if isinstance(sulci, np.ndarray):\n sulci = sulci.tolist()\n if isinstance(fundi, np.ndarray):\n fundi = fundi.tolist()\n\n # Feature names and corresponding feature lists:\n feature_names = ['label', 'sulcus', 'fundus']\n feature_lists = [labels, sulci, fundi]\n\n # Shape names corresponding to shape files below:\n shape_names = ['area', 'mean curvature', 'travel depth', 'geodesic depth',\n 'convexity', 'thickness']\n\n # Load shape files as a list of numpy arrays of per-vertex shape values:\n shape_files = [area_file, mean_curvature_file, travel_depth_file,\n geodesic_depth_file, convexity_file, thickness_file]\n\n # Append columns of per-vertex scalar values:\n columns = []\n column_names = []\n for ifeature, values in enumerate(feature_lists):\n if values:\n columns.append(values)\n column_names.append(feature_names[ifeature])\n\n first_pass = True\n for ishape, shape_file in enumerate(shape_files):\n if os.path.exists(shape_file):\n if first_pass:\n u1, u2, u3, points, u4, scalars, u5, u6 = read_vtk(shape_file)\n columns.append(points)\n column_names.append('coordinates')\n first_pass = False\n if affine_transform_file:\n affine_points, \\\n foo1 = apply_affine_transform(affine_transform_file,\n points, transform_format)\n columns.append(affine_points)\n column_names.append('coordinates in standard space')\n else:\n scalars, name = read_scalars(shape_file)\n if len(scalars):\n columns.append(scalars)\n column_names.append(shape_names[ishape])\n\n # Prepend with column of indices and write table\n shapes_table = os.path.join(os.getcwd(), table_file)\n write_columns(range(len(columns[0])), 'index', shapes_table, delimiter)\n write_columns(columns, column_names, shapes_table, delimiter, quote=True,\n input_table=shapes_table)\n\n return shapes_table\n\n\ndef write_face_vertex_averages(input_file, area_file='', delimiter=','):\n \"\"\"\n Make table of average vertex values per face.\n\n Parameters\n ----------\n input_file : string\n name of VTK file with scalars to average\n area_file : string\n name of VTK file with surface area scalar values\n delimiter : string\n delimiter between columns, such as ','\n\n Returns\n -------\n output_table : string\n output table filename\n\n Examples\n --------\n >>> import os\n >>> from mindboggle.utils.io_vtk import read_scalars\n >>> from mindboggle.utils.io_table import write_face_vertex_averages\n >>> path = os.environ['MINDBOGGLE_DATA']\n >>> #input_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')\n >>> #input_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')\n >>> input_file = os.path.join(path, 'arno', 'shapes', 'lh.thickness.vtk')\n >>> area_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.area.vtk')\n >>> delimiter = ','\n >>> #\n >>> write_face_vertex_averages(input_file, area_file, delimiter)\n\n \"\"\"\n import os\n import numpy as np\n\n from mindboggle.utils.io_vtk import read_vtk, read_scalars\n from mindboggle.utils.io_table import write_columns\n\n faces, lines, indices, points, npoints, scalars, name, \\\n input_vtk = read_vtk(input_file, True, True)\n if area_file:\n area_scalars, name = read_scalars(area_file, True, True)\n\n #---------------------------------------------------------------------\n # For each face, average vertex values:\n #---------------------------------------------------------------------\n output_table = os.path.join(os.getcwd(), 'average_face_values.csv')\n columns = []\n for face in faces:\n values = []\n for index in face:\n if area_file:\n values.append(scalars[index] / area_scalars[index])\n else:\n values.append(scalars[index])\n columns.append(np.mean(values))\n\n #-----------------------------------------------------------------\n # Write to table:\n #-----------------------------------------------------------------\n write_columns(columns, '', output_table, delimiter, quote=False)\n\n return output_table\n\n\ndef write_average_face_values_per_label(input_indices_vtk,\n input_values_vtk='', area_file='',\n output_stem='', exclude_values=[-1], background_value=-1):\n \"\"\"\n Write out a separate VTK file for each integer (>-1)\n in (the first) scalar list of an input VTK file.\n Optionally write the values drawn from a second VTK file.\n\n Parameters\n ----------\n input_indices_vtk : string\n path of the input VTK file that contains indices as scalars\n input_values_vtk : string\n path of the input VTK file that contains values as scalars\n output_stem : string\n path and stem of the output VTK file\n exclude_values : list or array\n values to exclude\n background_value : integer or float\n background value in output VTK files\n scalar_name : string\n name of a lookup table of scalars values\n\n Examples\n --------\n >>> import os\n >>> from mindboggle.utils.io_table import write_average_face_values_per_label\n >>> path = os.environ['MINDBOGGLE_DATA']\n >>> input_indices_vtk = os.path.join(path, 'allen', 'labels', 'lh.DKTatlas100.gcs.vtk')\n >>> input_values_vtk = os.path.join(path, 'allen', 'shapes', 'lh.thickness.vtk')\n >>> area_file = os.path.join(path, 'allen', 'shapes', 'lh.pial.area.vtk')\n >>> output_stem = 'labels_thickness'\n >>> exclude_values = [-1]\n >>> background_value = -1\n >>> #\n >>> write_average_face_values_per_label(input_indices_vtk,\n >>> input_values_vtk, area_file, output_stem, exclude_values, background_value)\n >>> #\n >>> # View:\n >>> #example_vtk = os.path.join(os.getcwd(), output_stem + '0.vtk')\n >>> #from mindboggle.utils.plots import plot_vtk\n >>> #plot_vtk(example_vtk)\n\n \"\"\"\n import os\n import numpy as np\n from mindboggle.utils.io_vtk import read_scalars, read_vtk, write_vtk\n from mindboggle.utils.io_table import write_columns\n from mindboggle.utils.mesh import remove_faces\n\n # Load VTK file:\n faces, lines, indices, points, npoints, scalars, scalar_names, \\\n foo1 = read_vtk(input_indices_vtk, True, True)\n if area_file:\n area_scalars, name = read_scalars(area_file, True, True)\n print(\"Explode the scalar list in {0}\".\n format(os.path.basename(input_indices_vtk)))\n if input_values_vtk != input_indices_vtk:\n values, name = read_scalars(input_values_vtk, True, True)\n print(\"Explode the scalar list of values in {0} \"\n \"with the scalar list of indices in {1}\".\n format(os.path.basename(input_values_vtk),\n os.path.basename(input_indices_vtk)))\n else:\n values = np.copy(scalars)\n\n # Loop through unique (non-excluded) scalar values:\n unique_scalars = [int(x) for x in np.unique(scalars)\n if x not in exclude_values]\n for scalar in unique_scalars:\n\n keep_indices = [x for sublst in faces for x in sublst]\n new_faces = remove_faces(faces, keep_indices)\n\n # Create array and indices for scalar value:\n select_scalars = np.copy(scalars)\n select_scalars[scalars != scalar] = background_value\n scalar_indices = [i for i,x in enumerate(select_scalars) if x == scalar]\n print(\" Scalar {0}: {1} vertices\".format(scalar, len(scalar_indices)))\n\n #---------------------------------------------------------------------\n # For each face, average vertex values:\n #---------------------------------------------------------------------\n output_table = os.path.join(os.getcwd(),\n output_stem+str(scalar)+'.csv')\n columns = []\n for face in new_faces:\n values = []\n for index in face:\n if area_file:\n values.append(scalars[index] / area_scalars[index])\n else:\n values.append(scalars[index])\n columns.append(np.mean(values))\n\n #-----------------------------------------------------------------\n # Write to table:\n #-----------------------------------------------------------------\n write_columns(columns, '', output_table, delimiter=',', quote=False)\n\n # Write VTK file with scalar value:\n #output_vtk = os.path.join(os.getcwd(), output_stem + str(scalar) + '.vtk')\n #write_vtk(output_vtk, points, indices, lines, new_faces,\n # [select_values.tolist()], [output_scalar_name])\n\n\n","sub_path":"mindboggle/utils/io_table.py","file_name":"io_table.py","file_ext":"py","file_size_in_byte":32689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"208527760","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Student',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),\n ('last_language_code', models.CharField(max_length=2, choices=[('en', 'English'), ('fr', 'French')])),\n ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"framework2/apps/core/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"391547962","text":"from question_model import Question\nfrom data import question_data\nfrom quiz_brain import QuizBrain\n\nQuestion_Bank = []\n\nfor question in question_data:\n Q_text = question[\"text\"]\n Q_answer = question[\"answer\"]\n new_question = Question(Q_text, Q_answer)\n Question_Bank.append(new_question)\n\nquiz = QuizBrain(Question_Bank)\n\nwhile quiz.still_has_questions():\n quiz.next_question()\n\nif quiz.score == len(Question_Bank):\n print(\"Congratulations!,You got all questions right\")\nelse:\n print(\"You have completed the quiz\")\n print(f\"Your final score is {quiz.score}/{len(Question_Bank)}\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"547237992","text":"import os\nimport numpy as np\nimport pandas as pd\nfrom .._settings import settings\nfrom ..preprocess import reverse_complement_seqs, ohe_seqs\nfrom ..dataload._utils import _seq2Fasta\n\n\nalphabet = np.array([\"A\", \"G\", \"C\", \"T\"])\n\n\ndef random_base():\n \"\"\"\n Generate a random base from the AGCT alpahbet.\n \"\"\"\n return np.random.choice(alphabet)\n\n\ndef random_seq(seq_len):\n \"\"\"\n Generate a random sequence of length seq_len.\n\n Parameters\n ----------\n seq_len : int\n Length of sequence to return.\n\n Returns\n -------\n Random sequence.\n \"\"\"\n return \"\".join([np.random.choice(alphabet) for i in range(seq_len)])\n\n\ndef random_seqs(seq_num, seq_len):\n \"\"\"\n Generate seq_num random sequences of length seq_len\n\n Parameters\n ----------\n seq_num (int):\n number of sequences to return\n seq_len (int):\n length of sequence to return\n\n Returns\n -------\n numpy array of random sequences.\n \"\"\"\n return np.array([random_seq(seq_len) for i in range(seq_num)])\n\n\ndef generate_random_data(\n num_seqs: int, \n seq_len: int, \n vocab: str = \"DNA\", \n num_outputs: int = 1, \n out_dir: str = None, \n dataset_name: str = None\n):\n \"\"\"\n Simple function tp generate commonly used file types for testing EUGENe models\n\n Parameters\n ----------\n num_seqs (int):\n number of sequences to generate\n seq_len (int):\n length of sequences to generate\n vocab (str):\n vocabulary to use for sequence generation. Default is DNA.\n num_outputs (int):\n number of outputs to generate. Default is 1.\n out_dir (str):\n directory to save files to. Default is None.\n dataset_name (str):\n name of dataset. Default is None.\n \"\"\"\n out_dir = out_dir if out_dir is not None else settings.dataset_dir\n\n if dataset_name is None:\n dataset_name = f\"random{num_seqs}seqs_{seq_len}bp\"\n\n out_dir = os.path.join(out_dir, dataset_name)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n seqs = random_seqs(num_seqs, seq_len)\n oheseqs = ohe_seqs(seqs, vocab=vocab)\n rev_seqs = reverse_complement_seqs(seqs)\n rev_ohe_seqs = ohe_seqs(rev_seqs, vocab=vocab)\n n_digits = len(str(num_seqs - 1))\n ids = np.array(\n [\"seq{num:0{width}}\".format(num=i, width=n_digits) for i in range(num_seqs)]\n )\n labels = np.random.randint(0, 2, size=(num_seqs, num_outputs))\n activities = np.random.rand(num_seqs, num_outputs)\n label_cols = [\"label_{}\".format(i) for i in range(num_outputs)]\n activity_cols = [\"activity_{}\".format(i) for i in range(num_outputs)]\n d = dict(\n dict(name=ids, seq=seqs),\n **dict(zip(label_cols, labels.T)),\n **dict(zip(activity_cols, activities.T)),\n )\n pd.DataFrame(d).to_csv(\n os.path.join(out_dir, f\"{dataset_name}_seqs.tsv\"), sep=\"\\t\", index=False\n )\n np.save(os.path.join(out_dir, f\"{dataset_name}_seqs\"), seqs)\n np.save(os.path.join(out_dir, f\"{dataset_name}_ohe_seqs\"), oheseqs)\n np.save(os.path.join(out_dir, f\"{dataset_name}_rev_seqs\"), rev_seqs)\n np.save(os.path.join(out_dir, f\"{dataset_name}_ohe_rev_seqs\"), rev_ohe_seqs)\n np.save(os.path.join(out_dir, f\"{dataset_name}_ids\"), ids)\n np.save(os.path.join(out_dir, f\"{dataset_name}_labels\"), labels)\n np.save(os.path.join(out_dir, f\"{dataset_name}_activities\"), activities)\n _seq2Fasta(seqs, ids, name=os.path.join(out_dir, f\"{dataset_name}_seqs\"))\n","sub_path":"eugene/utils/_random_data.py","file_name":"_random_data.py","file_ext":"py","file_size_in_byte":3467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"264975162","text":"\n\"\"\"\nodc-little-cube\n\nuse elatic instead of postgresql -\n\n1. less code\n2. more cloud-native\n\n\"\"\"\n\nimport logging\nimport sys\nimport os\nimport json\nimport pprint\n\nfrom collections import OrderedDict\n\n#from dcindexLib.xml_meta_lib import get_metadata_docs_bucket # uses xml based metadata\nfrom dcindexLib.json_meta_lib import get_metadata_docs_json\n\nfrom dcindexLib.projection_stuff import get_projection_info\nfrom dcindexLib.elastic_index import connect_elasticsearch\nfrom dcindexLib.elastic_index import l_create_index\nfrom dcindexLib.elastic_index import store_record\n\n\ndef create_footprint(coord):\n\n # still need to figure out if its lon,lat or lat,lon\n print(\"TONY foot coord:\", coord)\n foot = {\n \"type\": \"Polygon\", \n \"coordinates\": [\n [\n [\n float(coord['ul']['lat']),\n float(coord['ul']['lon'])\n ], \n [\n float(coord['ur']['lat']),\n float(coord['ur']['lon'])\n ], \n [\n float(coord['lr']['lat']),\n float(coord['lr']['lon'])\n ], \n [\n float(coord['ll']['lat']),\n float(coord['ll']['lon'])\n ],\n [\n float(coord['ul']['lat']),\n float(coord['ul']['lon'])\n ] \n ]\n ]\n } \n\n foot = {\n \"type\": \"Polygon\", \n \"coordinates\": [\n [\n [\n float(coord['ul']['lon']),\n float(coord['ul']['lat'])\n ], \n [\n float(coord['ur']['lon']),\n float(coord['ur']['lat'])\n ], \n [\n float(coord['lr']['lon']),\n float(coord['lr']['lat'])\n ], \n [\n float(coord['ll']['lon']),\n float(coord['ll']['lat'])\n ],\n [\n float(coord['ul']['lon']),\n float(coord['ul']['lat'])\n ] \n ]\n ]\n\n } \n\n print (foot)\n\n return foot\n\ndef elastic_flatten_doc(mdoc):\n foot = create_footprint(mdoc['extent']['coord'])\n elastic_doc = {\n 'creation_dt': mdoc['creation_dt'],\n 'processing_level': mdoc['processing_level'],\n 'red': mdoc['image']['bands']['red']['path'],\n 'green': mdoc['image']['bands']['green']['path'],\n 'blue': mdoc['image']['bands']['blue']['path'],\n 'nir': mdoc['image']['bands']['nir']['path'],\n 'pixel_qa': mdoc['image']['bands']['pixel_qa']['path'],\n 'ul': {\n 'lat': mdoc['extent']['coord']['ul']['lat'],\n 'lon': mdoc['extent']['coord']['ul']['lon'],\n },\n 'footprint': foot\n }\n return elastic_doc\n\n\n\ndef elastic_all_metatdata(bucket, top_directory_prefix):\n \"\"\" Main loop function to traverse/crawl the bucket-->prefix or filesystem directory tree and index each dataset\n\n for each .json metadata file:\n\n * extract the metadata and \n * create a doc (dict json blob for the postgresql database)\n\n Args:\n **bucket_name** (str): AWS S3 Bucket Name - example lsaa-staging-cog\n\n config (str): A datacube config file to over-ride the one in your home directory\n\n **prefix** (str): AWS prefix within the bucket to start the recursive search for .json file = example L8\n\n Returns:\n ABSOLUTELY_NOTHING\n\n \"\"\"\n print (\"meta loop\")\n cnt=0;\n\n es_conn = connect_elasticsearch()\n\n\n # delete any old indexes - similar to clearing the postgres db\n es_conn.indices.delete(index='cube', ignore=[400, 404])\n\n # create new elastic search index\n index_name='cube'\n record_type = 'calif'\n l_create_index(es_conn, index_name, record_type)\n\n #for metadata_path, metadata_doc in get_metadata_docs_bucket(bucket, top_directory_prefix):\n for metadata_path, metadata_doc in get_metadata_docs_json(bucket, top_directory_prefix):\n uri = metadata_path\n print(uri)\n cnt=cnt+1\n print(cnt)\n print(\"META:\", metadata_doc)\n elastic_ready_doc = elastic_flatten_doc(metadata_doc)\n # add_dataset(metadata_doc, uri, rules, index)\n logging.info(\"Indexing %s\", metadata_path)\n print(\"creationdate\", metadata_doc['creation_dt'])\n print(elastic_ready_doc)\n elastic_json_record = json.dumps(elastic_ready_doc)\n print(\"###\"*30)\n pprint.pprint(elastic_json_record)\n store_record(es_conn, index_name, record_type, elastic_json_record)\n\n\n\n# ################# MAIN ################### #\n\n# get parameters here later hard code for now\n\nlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)\n\nmy_bucket = 'lsaa-staging-cog'\ntop_directory_prefix = \"L08/2014/042/034/\"\nelastic_all_metatdata(my_bucket, top_directory_prefix)\n","sub_path":"app/LEGACY/v3_index_california_to_elastic_main_using_json.py","file_name":"v3_index_california_to_elastic_main_using_json.py","file_ext":"py","file_size_in_byte":5598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"382629832","text":"# ----------\n# User Instructions:\n#\n# Define a function, search() that returns a list\n# in the form of [optimal path length, row, col]. For\n# the grid shown below, your function should output\n# [11, 4, 5].\n#\n# If there is no valid path from the start point\n# to the goal, your function should return the string\n# 'fail'\n# ----------\n\n# Grid format:\n# 0 = Navigable space\n# 1 = Occupied space\nfrom pprint import pprint\ngrid = [[0, 0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 1, 0],\n [0, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 1, 0]]\ninit = [0, 0]\ngoal = [len(grid)-1, len(grid[0])-1]\ncost = 1\n\ndelta = [[-1, 0], # go up\n [ 0,-1], # go left\n [ 1, 0], # go down\n [ 0, 1]] # go right\n\ndelta_name = ['^', '<', 'v', '>']\n\ndef search(grid,init,goal,cost):\n # ----------------------------------------\n # insert code here\n # ----------------------------------------\n # open list elements are of type: [g, x, y]\n # 0: open, 1: closed\n closed = [[0 for col in range(len(grid[0]))] for row in range(len(grid))]\n closed[init[0]][init[1]] = 1\n\n x = init[0]\n y = init[1]\n g = 0\n\n open = [[g, x, y]]\n\n found = False # flag that is set when search complete\n resign = False # flag set if we can't find expand\n\n # print 'initial open list'\n # for i in range(len(open)):\n # print ' ', open[i]\n # print '-----'\n\n while found is False and resign is False:\n # check if we still have elements in the open list\n if len(open) == 0:\n resign = True\n # print 'fail'\n path = 'fail'\n\n\n else:\n # remove node from list\n open.sort()\n open.reverse()\n next = open.pop()\n\n # print 'take list item'\n # print next\n x = next[1]\n y = next[2]\n g = next[0]\n\n if x == goal[0] and y == goal[1]:\n found = True\n path = next\n # print next\n\n else:\n # expand winning element and add to new open list\n for i in range(len(delta)):\n x2 = x + delta[i][0]\n y2 = y + delta[i][1]\n\n if x2 >= 0 and x2 < len(grid) and y2 >= 0 and y2 < len(grid[0]):\n # not yet checked, and not an obstacle\n if closed[x2][y2] == 0 and grid[x2][y2] == 0:\n g2 = g + cost\n open.append([g2, x2, y2])\n\n closed[x2][y2] = 1\n return path\n\ndef search1(grid, init, goal, cost):\n closed = [[0 for col in range(len(grid[0]))] for row in range(len(grid))]\n expand = [[-1 for col in range(len(grid[0]))] for row in range(len(grid))]\n actions = [[-1 for col in range(len(grid[0]))] for row in range(len(grid))]\n\n closed[init[0]][init[1]] = 1\n\n x = init[0]\n y = init[1]\n g = 0\n\n open = [[g, x, y]]\n accu = 0\n while open:\n open.sort()\n open.reverse()\n next = open.pop()\n\n g, x, y = next\n expand[x][y] = accu\n accu += 1\n\n if x == goal[0] and y == goal[1]:\n # return next\n print(\"found\")\n else:\n for i in range(len(delta)):\n x2 = x + delta[i][0]\n y2 = y + delta[i][1]\n if x2 >= 0 and x2 < len(grid) and y2 >= 0 and y2 < len(grid[0]):\n if closed[x2][y2] == 0 and grid[x2][y2] == 0:\n g2 = g + cost\n open.append([g2, x2, y2])\n closed[x2][y2] = 1\n\n actions[x2][y2] = i\n policy = [[\" \" for col in range(len(grid[0]))] for row in range(len(grid))]\n x, y = goal\n policy[x][y] = \"*\"\n\n step = 1\n steps = [[\" \" for col in range(len(grid[0]))] for row in range(len(grid))]\n\n while x != init[0] or y != init[1]:\n action = actions[x][y]\n\n x2 = x - delta[action][0]\n y2 = y - delta[action][1]\n\n policy[x2][y2] = delta_name[action]\n step += 1\n\n x = x2\n y = y2\n\n x, y = goal\n steps[x][y] = step\n while x != init[0] or y != init[1]:\n action = actions[x][y]\n\n x2 = x - delta[action][0]\n y2 = y - delta[action][1]\n\n step -= 1\n steps[x2][y2] = step\n\n x = x2\n y = y2\n # return \"fail\"\n # return expand\n pprint(expand)\n pprint(steps)\n\n return policy\nprint(search(grid,init,goal,cost))\npprint(search1(grid,init,goal,cost))\n","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":4570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"338253929","text":"#!python\n# feeling lucky aka Google proj\n# type a search term on the command line and have my computer automatically open a browser with all\n# the top search results in new tabs.\n\nimport bs4\nimport requests\nimport os\nimport sys\n\n''' TO-DO LIST:\n•\t Read the command line arguments from sys.argv.\n•\t Fetch the search result page with the requests module.\n•\t Find the links to each search result.\n•\t Call the webbrowser.open() function to open the web browser.\n'''\n\ndef usage_lucky():\n print('''\n The lucky project need at least one argument to complete the search\n e.g. python feeling_lucky.py \"nasty stuff online\"''')\n\n\ndef agrs2arg():\n find_str = []\n i = 1\n while i < len(sys.argv):\n # print(sys.argv[i])\n find_str.append(sys.argv[i])\n i += 1\n find_str = \" \".join(find_str)\n return(find_str)\n print(find_str)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n str_get = agrs2arg()\n print(\"Googling...\")\n str_search = \"https://www.google.ru/search?q=\"\n req = requests.get(str_search + str_get)\n '''\n another way to solve sys.argv joining stuff <<\n '''\n res = requests.get('https://google.com/search?q=' + ' '.join(sys.argv[1:]))\n res.raise_for_status()\n # print(res.text)\n # SOUP PART\n\n soup = bs4.BeautifulSoup(res.text, \"html.parser\")\n result_div = soup.find_all('div', attrs={'class': 'ZINbbc'})\n result_div2 = soup.select('div')\n # result_div2 = soup.find_all('div', attrs={'class': 'r'})\n print(result_div2[10].attrs)\n\n\n links = []\n titles = []\n descriptions = []\n\n # for r in result_div:\n # # Checks if each element is present, else, raise exception\n # try:\n # link = r.find('a', href=True)\n # title = r.find('div', attrs={'class': 'vvjwJb'}).get_text()\n # description = r.find('div', attrs={'class': 's3v9rd'}).get_text()\n #\n # # Check to make sure everything is present before appending\n # if link != '' and title != '' and description != '':\n # links.append(link['href'])\n # titles.append(title)\n # descriptions.append(description)\n # # Next loop if one element is not present\n # except:\n # continue\n\n\n # for i in result_div:\n # try:\n # link = i.find('a', href=True)\n # title = i.find('div', attrs={'class': 'vvjwJb'})\n # print(title)\n # links.append(link)\n # # print(link.getText())\n # except:\n # continue\n # # print(i)\n # # print(links[10])\n # xx = links[10]\n # print(xx.attrs)\n else:\n usage_lucky()","sub_path":"webscrapping_proj/_feeling_lucky.py","file_name":"_feeling_lucky.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"638710665","text":"import pygame\nimport random\nfrom pygame.constants import *\n\n\npygame.font.init()\nHELV30 = pygame.font.SysFont('Helvetica',30)\nWIDTH, HEIGHT = 500, 540\nFPS = 20\nPIXEL = 20\nMARGIN = 40\n\nclass Engine:\n def __init__(self):\n self.__root = pygame.display.set_mode((WIDTH, HEIGHT))\n self.__snake = Snake([DEFAULT_HEAD, DEFAULT_BP1, DEFAULT_BP2], [(60, 60),(40,60),(20,60)])\n self.__fruit = pygame.Rect(0,0,PIXEL,PIXEL)\n self.__run = True\n self.__end = False\n self.__mouse_pos = (0,0)\n\n def game_loop(self):\n clock = pygame.time.Clock()\n self.generate_fruit()\n while self.__run:\n clock.tick(FPS)\n if self.__end:\n self.end_game_screen()\n else:\n self.__root.fill((50,100,50))\n self.draw_snake()\n self.playing_display()\n self.__snake.move()\n self.get_key()\n self.draw_fruit()\n self.check_collisions()\n self.__snake.check_new_highscore()\n for event in pygame.event.get():\n if event.type == QUIT:\n self.__run = False\n elif event.type == MOUSEBUTTONDOWN:\n self.get_mouse_pos()\n \n pygame.display.update()\n\n def draw_snake(self):\n for segment in self.__snake.body:\n pygame.draw.rect(self.__root, segment.color, segment.hitbox)\n\n def get_key(self):\n keys = pygame.key.get_pressed()\n if keys[K_w]:\n if self.__snake.direction == 'DOWN':\n pass\n else:\n self.__snake.direction = 'UP'\n elif keys[K_s]:\n if self.__snake.direction == 'UP':\n pass\n else:\n self.__snake.direction = 'DOWN'\n elif keys[K_d]:\n if self.__snake.direction == 'LEFT':\n pass\n else:\n self.__snake.direction = 'RIGHT'\n elif keys[K_a]:\n if self.__snake.direction == 'RIGHT':\n pass\n else:\n self.__snake.direction = 'LEFT'\n\n def playing_display(self):\n bg = pygame.draw.rect(self.__root, (0,0,0), pygame.Rect(0,0,WIDTH,MARGIN))\n score_label = HELV30.render(f'Score: {self.__snake.score}', False, (255,255,255))\n highscole_label = HELV30.render(f'Highscore: {self.__snake.highscore}', False, (255,255,255))\n self.__root.blit(score_label, (5,5))\n self.__root.blit(highscole_label, (WIDTH-(highscole_label.get_rect().width)-5,5))\n\n def generate_fruit(self):\n test = True\n while test:\n x = random.randrange(0,500-PIXEL,PIXEL)\n y = random.randrange(MARGIN,HEIGHT-PIXEL,PIXEL)\n self.__fruit.update(x,y,PIXEL, PIXEL)\n test = self.fruit_aux()\n\n def fruit_aux(self):\n for segment in self.__snake.body:\n if self.__fruit.colliderect(segment.hitbox):\n repeat = True\n break\n else:\n repeat = False\n return repeat\n\n def draw_fruit(self):\n pygame.draw.rect(self.__root, (255,0,0), self.__fruit)\n\n def check_collisions(self):\n head = self.__snake.body[0]\n if head.hitbox.colliderect(self.__fruit):\n self.__snake.new_body()\n self.generate_fruit()\n self.__snake.score += 1\n for pos,segment in enumerate(self.__snake.body):\n if pos > 1:\n if segment.hitbox.colliderect(head.hitbox):\n self.__end = True\n \n def end_game_screen(self):\n bg = pygame.draw.rect(self.__root, (0,0,0), pygame.Rect(0,0,WIDTH,HEIGHT))\n lose_info = HELV30.render('YOU LOSE!', False, (255,255,255))\n lose_rect = lose_info.get_rect(center=(WIDTH/2, (HEIGHT/2)-50))\n play_again = HELV30.render('Play again?',False, (255,255,255))\n play_again_rect = play_again.get_rect(center=(WIDTH/2, (HEIGHT/2)))\n yes = HELV30.render('YES', False, (255,255,255))\n yes_rect = yes.get_rect(center=((WIDTH/2)-50, (HEIGHT/2)+60))\n no = HELV30.render('NO', False, (255,255,255))\n no_rect = no.get_rect(center=((WIDTH/2)+50, (HEIGHT/2)+60))\n self.__root.blit(lose_info, lose_rect)\n self.__root.blit(play_again, play_again_rect)\n self.__root.blit(yes, yes_rect)\n self.__root.blit(no, no_rect)\n if no_rect.collidepoint(self.__mouse_pos):\n self.__run = False\n elif yes_rect.collidepoint(self.__mouse_pos):\n self.reset()\n\n def get_mouse_pos(self):\n self.__mouse_pos = pygame.mouse.get_pos()\n\n def reset(self):\n self.__snake.score = 0\n self.__snake.snake_positions = [(60, 60),(40,60),(20,60)]\n self.__snake.body = [DEFAULT_HEAD, DEFAULT_BP1, DEFAULT_BP2]\n self.generate_fruit()\n self.__mouse_pos = (0,0)\n self.__end = False\n\n\n\n\nclass Snake:\n def __init__(self, body, positions):\n self.__body = body\n self.__snake_positions = positions\n self.__direction = 'RIGHT'\n self.__score = 0\n self.__highscore = 0\n \n @property\n def body(self):\n return self.__body\n\n @body.setter\n def body(self, new_value):\n self.__body = new_value\n\n @property\n def snake_positions(self):\n return self.__snake_positions\n\n @snake_positions.setter\n def snake_positions(self, new_value):\n self.__snake_positions = new_value\n\n @property\n def direction(self):\n return self.__direction\n\n @direction.setter\n def direction(self, new_dir):\n self.__direction = new_dir\n\n @property\n def score(self):\n return self.__score\n\n @score.setter\n def score(self, new_score):\n self.__score = new_score\n\n @property\n def highscore(self):\n return self.__highscore\n\n def new_body(self):\n new_body_x, new_body_y = self.__snake_positions[-1]\n new_body = Body(new_body_x, new_body_y)\n self.__snake_positions.append(self.__snake_positions[-1])\n self.__body.append(new_body)\n \n\n def move(self):\n head_x_position, head_y_position = self.__snake_positions[0]\n if self.__direction == 'UP':\n new_head_positions = (head_x_position, head_y_position-PIXEL)\n if new_head_positions[1] < MARGIN:\n new_head_positions = (head_x_position, HEIGHT-PIXEL)\n elif self.__direction == 'DOWN':\n new_head_positions = (head_x_position, head_y_position+PIXEL)\n if new_head_positions[1] == HEIGHT:\n new_head_positions = (head_x_position, MARGIN)\n elif self.__direction == 'LEFT':\n new_head_positions = (head_x_position-PIXEL, head_y_position)\n if new_head_positions[0] < 0:\n new_head_positions = (WIDTH-PIXEL, head_y_position)\n elif self.__direction == 'RIGHT':\n new_head_positions = (head_x_position+PIXEL, head_y_position)\n if new_head_positions[0] == WIDTH:\n new_head_positions = (0, head_y_position)\n self.__snake_positions = [new_head_positions] + self.__snake_positions[:-1]\n for segment, position in zip(self.__body, self.__snake_positions):\n segment.x = position[0]\n segment.y = position[1]\n segment.update_hitbox()\n\n def check_new_highscore(self):\n if self.__score > self.__highscore:\n self.__highscore = self.__score\n\n\nclass Body:\n def __init__(self, x, y, color=(100,100,200)):\n self.__x = x\n self.__y = y\n self.__color = color\n self.__hitbox = pygame.Rect(self.__x, self.__y, PIXEL, PIXEL)\n\n @property\n def x(self):\n return self.__x\n\n @x.setter\n def x(self, new_x):\n self.__x = new_x\n\n @property\n def y(self):\n return self.__y\n\n @y.setter\n def y(self, new_y):\n self.__y = new_y\n\n @property\n def color(self):\n return self.__color\n\n @property\n def hitbox(self):\n return self.__hitbox\n\n def update_hitbox(self):\n self.__hitbox.update(self.__x, self.__y, PIXEL, PIXEL)\n\n\nDEFAULT_HEAD = Body(60,60,(50,50,150))\nDEFAULT_BP1 = Body(40,60)\nDEFAULT_BP2 = Body(20,60)\n\ngame = Engine()\ngame.game_loop()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"312605526","text":"# Python implementation using the multiprocessing module\n#\nfrom __future__ import division\nimport os, sys, glob\nimport scipy\nimport numpy as np\nfrom scipy.ndimage.filters import gaussian_filter\nfrom astropy import units as u\nfrom astropy import constants as const\nfrom astropy.cosmology import LambdaCDM\nimport pandas as pd\nimport h5py\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nsys.path.insert(0, '/cosma5/data/dp004/dc-beck3/lib/')\nimport read_hdf5\nimport cfuncs as cf\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning, append=1)\nos.system(\"taskset -p 0xff %d\" % os.getpid())\n\n# MPI initialisation\nfrom mpi4py import MPI\ncomm = MPI.COMM_WORLD\ncomm_rank = comm.Get_rank()\ncomm_size = comm.Get_size()\n\nfrom mpi_errchk import mpi_errchk\n\n\ndef sigma_crit(zLens, zSource, cosmo):\n Ds = cosmo.angular_diameter_distance(zSource)\n Dl = cosmo.angular_diameter_distance(zLens)\n Dls = cosmo.angular_diameter_distance_z1z2(zLens, zSource)\n sig_crit = (const.c**2/(4*np.pi*const.G))*Ds/(Dl*Dls)\n return sig_crit\n\n\ndef area(vs):\n \"\"\"\n Use Green's theorem to compute the area enclosed by the given contour.\n \"\"\"\n a = 0\n x0, y0 = vs[0]\n for [x1, y1] in vs[1:]:\n dy = y1 - y0\n a += x0*dy\n x0 = x1\n y0 = y1\n return a\n\n\ndef cal_lensing_signals(kap, bzz, ncc):\n dsx_arc = bzz/ncc\n # deflection maps\n alpha1, alpha2 = cf.call_cal_alphas(kap, bzz, ncc)\n \n # shear maps\n npad = 5\n al11 = 1 - np.gradient(alpha1, dsx_arc, axis=0)\n al12 = - np.gradient(alpha1, dsx_arc, axis=1)\n al21 = - np.gradient(alpha2, dsx_arc, axis=0)\n al22 = 1 - np.gradient(alpha2, dsx_arc, axis=1)\n detA = al11*al22 - al12*al21\n \n kappa0 = 1 - 0.5*(al11 + al22)\n shear1 = 0.5*(al11 - al22)\n shear2 = 0.5*(al21 + al12)\n shear0 = (shear1**2 + shear2**2)**0.5\n \n # magnification maps\n mu = 1.0/((1.0-kap)**2.0-shear1*shear1-shear2*shear2)\n \n lambda_t = 1 - kappa0 - shear0 # tangential eigenvalue, page 115\n \n # lensing potential\n phi = cf.call_cal_phi(kap, bzz, ncc)\n\n return alpha1, alpha2, mu, phi, detA, lambda_t\n\n\ndef einstein_radii(xs, ys, detA, lambda_t, zl, cosmo, ax, method):\n curve_crit= ax.contour(xs, ys, detA,\n levels=(0,), colors='r',\n linewidths=1.5, zorder=200)\n Ncrit = len(curve_crit.allsegs[0])\n curve_crit = curve_crit.allsegs[0]\n curve_crit_tan= ax.contour(xs, ys,\n lambda_t, levels=(0,), colors='r',\n linewidths=1.5, zorder=200)\n Ncrit_tan = len(curve_crit_tan.allsegs[0])\n if Ncrit_tan > 0:\n len_tan_crit = np.zeros(Ncrit_tan)\n for i in range(Ncrit_tan):\n len_tan_crit[i] = len(curve_crit_tan.allsegs[0][i])\n curve_crit_tan = curve_crit_tan.allsegs[0][len_tan_crit.argmax()]\n if method == 'eqv':\n Rein = np.sqrt(np.abs(area(curve_crit_tan))/np.pi) #[arcsec]\n if method == 'med':\n dist = np.sqrt(curve_crit_tan[:, 0]**2 + curve_crit_tan[:, 1]**2)\n Rein = np.median(dist) #[arcsec]\n else:\n curve_crit_tan= np.array([])\n Rein = 0\n return Ncrit, curve_crit, curve_crit_tan, Rein\n\n\ndef timedelay_magnification(mu_map, phi_map, dsx_arc, Ncells, lp1, lp2,\n alpha1, alpha2, SrcPosSky, zs, zl, cosmo):\n \"\"\"\n Calculate Photon-travel-time and Magnification of strong lensed\n supernovae\n\n Input:\n\n Output:\n len(mu): number of multiple images of supernova\n delta_t: Time it takes for photon to cover distance source-observer\n mu: luminosity magnification of source\n \"\"\"\n # Mapping light rays from image plane to source plan\n [sp1, sp2] = [lp1 - alpha1, lp2 - alpha2] #yi1,yi2[arcsec]\n\n # Source position [arcsec]\n #x = SrcPosSky[0]*u.Mpc\n #y = SrcPosSky[1]*u.Mpc\n #z = SrcPosSky[2]*u.Mpc\n #if (y == 0.) and (z == 0.):\n # beta1 = 1e-3\n # beta2 = 1e-3\n #else:\n # beta1 = ((y/x)*u.rad).to_value('arcsec')\n # beta2 = ((z/x)*u.rad).to_value('arcsec')\n beta1 = 1e-3\n beta2 = 1e-3\n print('lp1', lp1, comm_rank)\n print('sp1', sp1, comm_rank)\n #input(\"Wait here mapping_triangles\")\n print(\"Wait here mapping_triangles\", comm_rank)\n theta1, theta2 = cf.call_mapping_triangles([beta1, beta2], \n lp1, lp2, sp1, sp2)\n # calculate magnifications of lensed Supernovae\n #input(\"Wait here inverse_cic_single\")\n print(\"Wait here inverse_cic_single\", comm_rank)\n mu = cf.call_inverse_cic_single(mu_map, 0.0, 0.0, theta1, theta2, dsx_arc)\n # calculate time delays of lensed Supernovae in Days\n prts = cf.call_inverse_cic_single(phi_map, 0.0, 0.0, theta1, theta2, dsx_arc)\n Kc = ((1.0+zl)/const.c.to('Mpc/s') * \\\n (cosmo.angular_diameter_distance(zl) * \\\n cosmo.angular_diameter_distance(zs) / \\\n (cosmo.angular_diameter_distance(zs) - \\\n cosmo.angular_diameter_distance(zl)))).to('sday').value\n delta_t = Kc*(0.5*((theta1 - beta1)**2.0 + (theta2 - beta2)**2.0) - prts)/cf.apr**2\n beta = [beta1, beta2]\n theta = [theta1, theta2]\n return len(mu), delta_t, mu, theta, beta\n\n\ndef lenslistinit():\n global l_HFID, l_deltat, l_mu, l_srctheta, l_srcbeta, l_tancritcurves, l_einsteinradius\n l_HFID=[]; l_deltat=[]; l_mu=[]; l_srctheta=[]; l_srcbeta=[]; l_tancritcurves=[]; l_einsteinradius=[]\n return l_HFID, l_deltat, l_mu, l_srctheta, l_srcbeta,l_tancritcurves, l_einsteinradius\n\n\ndef srclistinit():\n global s_deltat, s_mu, s_zs, s_alpha, s_theta, s_beta, s_tancritcurves, s_einsteinradius\n s_deltat=[]; s_mu=[]; s_zs=[]; s_alpha=[]; s_theta=[]; s_beta=[]; s_tancritcurves=[]; s_einsteinradius=[]\n return s_deltat, s_mu, s_zs, s_alpha, s_theta, s_beta, s_tancritcurves, s_einsteinradius\n\n\n@mpi_errchk\ndef lensing_signal():\n # Get command line arguments\n args = {}\n if comm_rank == 0:\n args[\"simdir\"] = sys.argv[1]\n args[\"dmdir\"] = sys.argv[2]\n args[\"snapnum\"] = int(sys.argv[3])\n args[\"ncells\"] = int(sys.argv[4])\n args[\"outbase\"] = sys.argv[5]\n args = comm.bcast(args)\n \n # Organize devision of Sub-&Halos over Processes on Proc. 0\n if comm_rank == 0:\n s = read_hdf5.snapshot(args[\"snapnum\"], args[\"simdir\"])\n fname = glob.glob(args[\"dmdir\"]+'*.h5')\n ffname = []\n for ff in range(len(fname)):\n if (os.path.getsize(fname[ff])/(1024*1024.0)) < 1:\n fname[ff] = 0\n else:\n ffname.append(fname[ff])\n ffname = np.asarray(ffname)\n fname_split = np.array_split(ffname, comm_size)\n\n # Cosmological Parameters\n cosmo = LambdaCDM(H0=s.header.hubble*100,\n Om0=s.header.omega_m,\n Ode0=s.header.omega_l)\n redshift = s.header.redshift\n else:\n fname_split=None; cosmo=None; redshift=None\n\n cosmo = comm.bcast(cosmo, root=0)\n redshift = comm.bcast(redshift, root=0)\n\n fname = comm.scatter(fname_split, root=0)[0]\n print('Proc. %d has file %s with size %f GB' %\n (comm_rank, fname.split('/')[-1], os.path.getsize(fname)/(1024**3)))\n\n dmf = h5py.File(fname, 'r')\n\n zl = redshift\n zs = 2.\n lenslistinit(); srclistinit()\n # Run through lenses\n for ll in range(len(dmf['subhalo_id'])):\n print('Proc. %d works on lens %d with ID %d' % (comm_rank, ll, dmf['subhalo_id'][ll]))\n #converting box size and pixels size from ang. diam. dist. to arcsec\n FOV_arc = (dmf['fov_width'][ll]/cf.Da(zl, cosmo)*u.rad).to_value('arcsec') #[arcsec] box size\n dsx_arc = FOV_arc/args[\"ncells\"] #[arcsec] pixel size\n # initialize the coordinates of grids (light rays on lens plan)\n lp1, lp2 = cf.make_r_coor(FOV_arc, args[\"ncells\"]) #[arcsec]\n\n # Calculate critical surface density\n sigma_cr = sigma_crit(zl, zs, cosmo).to_value('Msun Mpc-2')\n kappa = dmf['density_map'][ll]/sigma_cr\n print('The Kappa place has a max of %f and min of %f' % \n (np.max(kappa), np.min(kappa)))\n fig = plt.figure()\n ax = fig.add_subplot(111)\n \n # Calculate Deflection Maps\n alpha1, alpha2, mu_map, phi, detA, lambda_t = cal_lensing_signals(kappa,\n FOV_arc,\n args[\"ncells\"]) \n # Calculate Einstein Radii\n Ncrit, curve_crit, curve_crit_tan, Rein = einstein_radii(lp1, lp2,\n detA,\n lambda_t,\n zl, cosmo,\n ax, 'med')\n # Calculate Time-Delay and Magnification\n snia_pos = np.array([0, 0, 0])\n try:\n n_imgs, delta_t, mu, theta, beta = timedelay_magnification(\n mu_map, phi, dsx_arc, args[\"ncells\"],\n lp1, lp2, alpha1, alpha2, snia_pos, zs, zl, cosmo)\n except:\n print('Function:timedelay_magnification() failed')\n continue\n print('timedelay_magnification', n_imgs, mu, theta, comm_rank)\n if n_imgs > 1:\n # Tree Branch 1\n l_HFID.append(int(dmf['subhalo_id'][ll]))\n # Tree Branch 2\n l_srcbeta.append(beta)\n l_tancritcurves.append(curve_crit_tan)\n l_einsteinradius.append(Rein)\n # Tree Branch 3\n l_srctheta.append(theta)\n l_deltat.append(delta_t)\n l_mu.append(mu)\n\n ########## Save to File ########\n tree = plant_Tree()\n\n # Tree Branches of Node 1 : Lenses\n tree['HF_ID'] = l_HFID\n tree['snapnum'] = args[\"snapnum\"]\n tree['zl'] = redshift\n tree['zs'] = 2.\n # Tree Branches of Node 1 : Sources\n tree['Sources']['beta'] = l_srcbeta\n tree['Sources']['TCC'] = l_tancritcurves\n tree['Sources']['Rein'] = l_einsteinradius\n for imgs in range(len(l_mu)):\n # Tree Branches of Node 2 : Multiple Images\n tree['Sources']['theta'][imgs] = l_srctheta[imgs]\n tree['Sources']['delta_t'][imgs] = l_deltat[imgs]\n tree['Sources']['mu'][imgs] = l_mu[imgs]\n label = args[\"simdir\"].split('/')[-2].split('_')[2]\n filename = args[\"outbase\"]+'LM_%s_%d.pickle' % (label, comm_rank)\n filed = open(filename, 'wb')\n pickle.dump(tree, filed)\n filed.close()\n plt.close(fig)\n\n\nif __name__ == '__main__':\n lensing_signal()\n","sub_path":"LensingMap/LM_main_mpi.py","file_name":"LM_main_mpi.py","file_ext":"py","file_size_in_byte":10810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"44561381","text":"#!/bin/env python2\nclass Solution:\n # @param triangle, a list of lists of integers\n # @return an integer\n def minimumTotal(self, triangle):\n levels = len(triangle)\n if levels == 0:\n return 0\n if levels == 1:\n return triangle[0][0]\n for i in range(levels - 2, -1, -1):\n for j in range(i+1):\n triangle[i][j] += min(triangle[i+1][j], triangle[i+1][j+1])\n return triangle[0][0]\n","sub_path":"python/triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"156831544","text":"# Subprocess Simple Exercise:\n# Use the subprocess module to compare the contents of the two\n# given file and show the differences between them. List only\n# the changed, added or deleted lines and redirect the output in\n# a file output.txt\n\nimport subprocess\n\noutfile = open(\"haze_output\", \"w\")\np = subprocess.Popen(\n (\"diff -y -W 70 --suppress-common-lines haze_files/file1.txt haze_files/file2.txt\"),\n shell=True,\n universal_newlines=True,\n stdout=outfile,\n)\n\nfrom subprocess import *\ncall('diff -y -W 100 haze_files/file1.txt haze_files/file2.txt --suppress-common-lines | tee output.txt', shell=True)","sub_path":"Modules/subprocess_module/dev_exercises/haze.py","file_name":"haze.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"56368186","text":"from graphql.type.definition import GraphQLArgument, GraphQLField, GraphQLNonNull, GraphQLObjectType\nfrom graphql.type.scalars import GraphQLString, GraphQLInt\nfrom graphql.type.schema import GraphQLSchema\n\n\ndef resolve_raises(*_):\n raise Exception(\"Throws!\")\n\n\nQueryRootType = GraphQLObjectType(\n name='QueryRoot',\n fields={\n 'thrower': GraphQLField(GraphQLNonNull(GraphQLString), resolver=resolve_raises),\n 'context': GraphQLField(\n type=GraphQLNonNull(GraphQLString),\n resolver=lambda self, info, **kwargs: info.context),\n\n 'test': GraphQLField(\n type=GraphQLNonNull(GraphQLString),\n resolver=lambda self, info: 'Hello World'\n ),\n 'test_args': GraphQLField(\n type=GraphQLNonNull(GraphQLString),\n args={'name': GraphQLArgument(GraphQLString)},\n resolver=lambda self, info, **kwargs: 'Hello {}'.format(kwargs.get(\"name\"))\n ),\n 'test_def_args': GraphQLField(\n type=GraphQLString,\n args={'name': GraphQLArgument(GraphQLString),},\n resolver=lambda self, info, name=\"World\": 'Hello {}'.format(name)\n )\n }\n)\n\nMutationRootType = GraphQLObjectType(\n name='MutationRoot',\n fields={\n 'writeTest': GraphQLField(\n type=QueryRootType,\n resolver=lambda *_: QueryRootType\n )\n }\n)\n\nSchema = GraphQLSchema(QueryRootType, MutationRootType)\n","sub_path":"tests/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"193582401","text":"#步骤1:导入unittest模块\nimport unittest\nfrom day04.day0401.calculator import Count\n\n# 步骤2:编写测试类TestCount ,继承TestCase 类\nclass TestCount(unittest.TestCase):\n # 步骤3:重写sutUp函数\n def setUp(self):\n print(\"测试开始\")\n\n def test_add1(self):\n print(\"测试两个整数函数\")\n c1 = Count(10,20)\n res1 = c1.add()\n print(\"计算实际结果:\",res1)\n # 步骤5:使用unittest 提供的断言函数\n self.assertEqual(res1,30)\n\n def test_add2(self):\n print(\"测试两个浮点型数据函数\")\n c2=Count(2167.45,3978.78)\n res2=c2.add()\n print(\"计算实际结果:\",res2)\n if abs(res2-6146.23)<0.001:\n res2 = 6146.23\n self.assertEqual(res2,6146.23)\n\n def test_add3(self):\n print(\"测试两个字符串函数\")\n c3 = Count(\"你好\",\"测试\")\n res3 =c3.add()\n print(\"实际计算结果:\",res3)\n self.assertEqual(res3,\"你好测试\")\n\n # 步骤6:编写tearDown函数\n def tearDown(self):\n print(\"测试结束\")\n\n\n\n# 步骤7:调用unittest提供的main()函数,执行测试用例\nif __name__ == '__main__':\n suite = unittest.TestSuite()\n case1 =TestCount(\"test_add1\")\n suite.addTest(case1)\n suite.addTest(TestCount(\"test_add2\"))\n suite.addTest(TestCount(\"test_add3\"))\n\n\n runner = unittest.TextTestRunner\n runner.run(suite)","sub_path":"selenium/day04/day0401/demo02.py","file_name":"demo02.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"477005484","text":"# 파이썬이 보기에는 예외가 아니지만 우리가 예외로 정의해 예외 처리할 수 있음\n# -> if 조건:\n# raise Exception(\"메시지\") -> Exception 클래스를 생성해서 예외를 발생시키는 개념\n# -> 비정상종료되기 때문에 try ~ except 처리 필수\n\nfrom random import randint\n\ndef my_random():\n try:\n n = randint(1, 4)\n if n == 2:\n raise Exception(\"2\") # 아무 예외 클래스로 해도 됨 -> 하지만 적합한 클래스를 쓰는게 맞지 -> 직접 예외 클래스를 만들어서 사용\n print(\"랜덤값:\", n)\n except Exception as e:\n print(\"예외\", e)\n\n\nmy_random()\n\n# try:\n# my_random()\n# except Exception as e:\n# print(\"예외\", e)\n","sub_path":"day7/sample22_exception4.py","file_name":"sample22_exception4.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"61003447","text":"# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport random\nfrom collections import OrderedDict\n\nimport pytest\n\nimport pyignite.utils as _putils\nfrom pyignite.datatypes import IntObject\n\ntry:\n from pyignite import _cutils\n\n _cutils_hashcode = _cutils.hashcode\n _cutils_schema_id = _cutils.schema_id\nexcept ImportError:\n _cutils_hashcode = lambda x: None # noqa: E731\n _cutils_schema_id = lambda x: None # noqa: E731\n pass\n\n\n@pytest.mark.skip_if_no_cext\ndef test_bytes_hashcode():\n assert _cutils_hashcode(None) == 0\n assert _cutils_hashcode(b'') == 1\n assert _cutils_hashcode(bytearray()) == 1\n assert _cutils_hashcode(memoryview(b'')) == 1\n\n for i in range(1000):\n rnd_bytes = bytearray([random.randint(0, 255) for _ in range(random.randint(1, 1024))])\n\n fallback_val = _putils.__hashcode_fallback(rnd_bytes)\n assert _cutils_hashcode(rnd_bytes) == fallback_val\n assert _cutils_hashcode(bytes(rnd_bytes)) == fallback_val\n assert _cutils_hashcode(memoryview(rnd_bytes)) == fallback_val\n\n\n@pytest.mark.skip_if_no_cext\n@pytest.mark.parametrize(\n 'value',\n [\n '皮膚の色、',\n 'Произвольный символ',\n 'Random string',\n '',\n ]\n)\ndef test_string_hashcode(value):\n assert _cutils_hashcode(value) == _putils.__hashcode_fallback(value), f'failed on {value}'\n\n\n@pytest.mark.skip_if_no_cext\ndef test_random_string_hashcode():\n assert _cutils_hashcode(None) == 0\n assert _cutils_hashcode('') == 0\n\n for i in range(1000):\n rnd_str = get_random_unicode(random.randint(1, 128))\n assert _cutils_hashcode(rnd_str) == _putils.__hashcode_fallback(rnd_str), f'failed on {rnd_str}'\n\n\n@pytest.mark.skip_if_no_cext\ndef test_schema_id():\n rnd_id = random.randint(-100, 100)\n assert _cutils_schema_id(rnd_id) == rnd_id\n assert _cutils_schema_id(None) == 0\n assert _cutils_schema_id({}) == 0\n\n for i in range(1000):\n schema = OrderedDict({get_random_field_name(20): IntObject for _ in range(20)})\n assert _cutils_schema_id(schema) == _putils.__schema_id_fallback(schema), f'failed on {schema}'\n\n\n@pytest.mark.skip_if_no_cext\n@pytest.mark.parametrize(\n 'func,args,kwargs,err_cls',\n [\n [_cutils_hashcode, [123], {}, ValueError],\n [_cutils_hashcode, [{'test': 'test'}], {}, ValueError],\n [_cutils_hashcode, [], {}, TypeError],\n [_cutils_hashcode, [123, 123], {}, TypeError],\n [_cutils_hashcode, [], {'input': 'test'}, TypeError],\n [_cutils_schema_id, ['test'], {}, ValueError],\n [_cutils_schema_id, [], {}, TypeError],\n [_cutils_schema_id, [], {}, TypeError],\n [_cutils_schema_id, [123, 123], {}, TypeError],\n [_cutils_schema_id, [], {'input': 'test'}, TypeError],\n ]\n)\ndef test_handling_errors(func, args, kwargs, err_cls):\n with pytest.raises(err_cls):\n func(*args, **kwargs)\n\n\ndef get_random_field_name(length):\n first = get_random_unicode(length // 2, latin=True)\n second = get_random_unicode(length - length // 2, latin=True)\n\n first = first.upper() if random.randint(0, 1) else first.lower()\n second = second.upper() if random.randint(0, 1) else second.lower()\n\n return first + '_' + second\n\n\ndef get_random_unicode(length, latin=False):\n include_ranges = [\n (0x0041, 0x005A), # Latin high\n (0x0061, 0x007A), # Latin lower\n (0x0410, 0x042F), # Russian high\n (0x0430, 0x044F), # Russian lower\n (0x05D0, 0x05EA) # Hebrew\n ]\n\n alphabet = []\n\n if latin:\n include_ranges = include_ranges[0:2]\n\n for current_range in include_ranges:\n for code_point in range(current_range[0], current_range[1] + 1):\n alphabet.append(chr(code_point))\n\n return ''.join(random.choice(alphabet) for _ in range(length))\n","sub_path":"tests/test_cutils.py","file_name":"test_cutils.py","file_ext":"py","file_size_in_byte":4577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"320108253","text":"from django.urls import path\nfrom . import views \n\nurlpatterns = [\n path('login/', views.loginPage, name=\"login\"),\n path('logout/', views.logoutUser, name=\"logout\"),\n path('dd_certi//',views.dd_certi,name=\"dd-certi\"),\n path('view_certi/',views.view_certi,name=\"view-certi\"),\n]\n\n","sub_path":"certificate/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"88321100","text":"# Variáveis Compostas (Listas) Parte 2\n#dados = []\n#dados.append('Pedro')\n#dados.append(25)\n#pessoas = []\n#pessoas.append(dados[:])\n\n#pessoas = [['Pedro', '25'], ['Maria', '19'], ['João', '32']]\n#print(pessoas[0][0])\n#print(pessoas[1][1])\n#print(pessoas[2][0])\n#print(pessoas[1])\n#for p in pessoas:\n# print(f'{p[0]} tem {p[1]} anos de idade.')\n\npessoas = []\ndado = []\ntotal_maior =totam_menor = 0\n\nfor c in range(0, 3):\n dado.append(str(input('Nome:')))\n dado.append(int(input('Idade: ')))\n pessoas.append(dado[:])# Criar uma copia da lista dado\n dado.clear()# limpa o conteudo da lista dado\n\nfor p in pessoas:\n if p[1] >= 21:\n print(f'{p[0]} é maior de idade.')\n total_maior += 1\n else:\n print(f'{p[0]} é menor de idade.')\n totam_menor += 1\n\nprint(f'temos {total_maior} maiores e {totam_menor} menores de idade.')\n","sub_path":"aula18aparte2listas.py","file_name":"aula18aparte2listas.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"513949284","text":"from datetime import timedelta,datetime\n# The DAG object; we'll need this to instantiate a DAG\nfrom airflow import DAG\n# Operators; we need this to operate!\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.utils.dates import days_ago\nfrom datetime import datetime\nimport requests\nimport json\nimport pandas as pd\nimport pymongo\nimport time\nfrom datetime import date\nfrom airflow.models import Variable\n# from fuzzwuzzy import fuzz\n# These args will get passed on to each operator\n# You can override them on a per-task basis during operator initialization\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': datetime(2020,3,12,2,0,0),\n 'email': ['airflow@example.com'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 2,\n 'retry_delay': timedelta(minutes=1),\n # 'queue': 'bash_queue',\n # 'pool': 'backfill',\n # 'priority_weight': 10,\n # 'end_date': datetime(2016, 1, 1),\n # 'wait_for_downstream': False,\n # 'dag': dag,\n # 'sla': timedelta(hours=2),\n 'execution_timeout': timedelta(minutes=300),\n # 'on_failure_callback': some_function,\n # 'on_success_callback': some_other_function,\n # 'on_retry_callback': another_function,\n # 'sla_miss_callback': yet_another_function,\n # 'trigger_rule': 'all_success'\n}\ndag = DAG(\n 'ads_2020_sample',\n default_args=default_args,\n description='sample 2020 facebook ads data',\n schedule_interval='@once',\n)\n\ndbase='POTUS_2020_DEV'\nurl1 = 'mongodb://%s:%s@denver.ischool.syr.edu'\nmongoClient= pymongo.MongoClient(url1 % ('bitslab', '0rang3!'))\ndb = mongoClient[dbase]\ncol=db[\"fb_ads_dev\"]\ndata=[]\n\ndef get_data(ds, **kwargs):\n sample=[]\n samp_text=[]\n start_date=Variable.get('sample_start_date')\n end_date=Variable.get('sample_end_date')\n sample_size=Variable.get('sample_size')\n print(\"hello\")\n for doc in col.distinct('ad_creative_body',{\"ad_delivery_start_time\":{\"$gte\":start_date,\"$lte\":end_date}}):\n print(len(sample))\n if len(samp_text)int(sample_size):\n# break\n# try:\n# i=1\n# if doc['ad_creative_body'] not in samp_text:\n# for y in samp_text:\n# if fuzz.ratio(doc['ad_creative_body'],y)>50:\n# i=1\n# break\n# if i==0:\n# samp_text.append(doc['ad_creative_body'])\n# sample.append(doc)\n# except:\n# continue\n# if len(sample)>int(sample_size):\n# break\n# return \"done\"\n\nget_data_step = PythonOperator(\n task_id='get_data',\n provide_context=True,\n python_callable=get_data,\n dag=dag,\n)\n\n\n\nstop_op = DummyOperator(task_id='stop_task', dag=dag)\n\n\nget_data_step >> stop_op\n\n\n","sub_path":"ads_2020_sample.py","file_name":"ads_2020_sample.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"523106907","text":"from boto3.session import Session\nimport click\nfrom datetime import datetime, timedelta\nimport json\nimport logging\nimport yaml\n\nlogging.getLogger().setLevel(logging.DEBUG)\n\nLOOKUP_HOURS = 12\n\ndefault_query_string = \"\"\"\nfields @timestamp, @message\n| sort @timestamp desc\n| limit 20\n\"\"\".replace(\"\\n\", \" \").strip()\n\n\ndef read_file(filename):\n with open(filename, \"r\") as query_file:\n content = query_file.read()\n return content.replace(\"\\n\", \" \").strip()\n\n\n@click.command()\n@click.option(\"--profile\", \"-p\", help=\"AWS profile name.\", default=\"default\")\n@click.option(\"--loggroupname\", \"-l\", help=\"Log Group name\", required=True)\n@click.option(\"--starttime\", \"-s\", help=f\"Start time (e.g. 2020-04-01); return last {LOOKUP_HOURS} records if not specified\", default=None)\n@click.option(\"--endtime\", \"-e\", help=\"End time (e.g. 2020-04-01); now if not specified.\", default=None)\n@click.option(\"--queryfile\", \"-f\", help=\"Query file.\", default=None)\n@click.option(\"--region\", \"-r\", help=\"AWS Region. Default ap-southeast-2\", default=\"ap-southeast-2\")\ndef main(profile, loggroupname, starttime, endtime, queryfile, region):\n session = Session(profile_name=profile)\n client = session.client(\"logs\", region_name=region)\n\n end_dt = datetime.now() if endtime is None else datetime.strptime(endtime, \"%Y-%m-%d\")\n start_dt = end_dt - timedelta(hours=LOOKUP_HOURS) if starttime is None else datetime.strptime(starttime, \"%Y-%m-%d\")\n \n query_string = read_file(queryfile) if queryfile else default_query_string\n \n query_id = client.start_query(\n logGroupName=loggroupname,\n startTime=int(start_dt.timestamp()),\n endTime=int(end_dt.timestamp()),\n queryString=query_string,\n )[\"queryId\"]\n\n status = \"Running\" # 'Scheduled'|'Running'|'Complete'|'Failed'|'Cancelled'\n while status == \"Running\":\n response = client.get_query_results(queryId=query_id)\n for item in response.get(\"results\", []):\n print(\"----------\")\n print(yaml.dump({\n field[\"field\"]: json.loads(field[\"value\"]) if field[\"value\"][0] == \"{\" else field[\"value\"]\n for field in item if field[\"field\"] != \"@ptr\"\n }))\n status = response.get(\"status\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"CloudWatch/CloudWatch-LogsInsights/cloudwatch_logs_insights_run_query.py","file_name":"cloudwatch_logs_insights_run_query.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"481811823","text":"import pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nimport pickle\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import GridSearchCV\nimport RedefineTestData\nimport RF_TrainingTest\n\ndf = pd.read_csv('RedefinedData.txt', sep=' ')\n\nfeature_names = [\n\t'acce0mean', 'acce0var', 'acce1mean', 'acce1var', 'acce2mean', 'acce2var',\n#\t'gyro0mean', 'gyro0var', 'gyro1mean', 'gyro1var', 'gyro2mean', 'gyro2var'\n]\nscaler = MinMaxScaler()\nscaler.fit(df[feature_names])\nx_train = scaler.transform(df[feature_names])\ny_train = df['class']\n\nrf = RandomForestClassifier(random_state=123456)\nn_estimators_range = [x for x in range(10, 200, 10)]\nmax_depth_range = [x for x in range(1, 15)]\nparam_grid = [{'n_estimators': n_estimators_range, 'max_depth':max_depth_range}]\ngrid = GridSearchCV(rf, param_grid, cv=10, n_jobs=-1)\ngrid.fit(x_train, y_train)\n\nprint(grid.best_params_)\n\npickle.dump(grid, open('rf_model.pkl', 'wb'))\npickle.dump(scaler, open('scaler.pkl', 'wb'))\n\nprint(\"RedefineTestData\")\nRedefineTestData.run()\nprint(\"RF_TrainingTest\")\nRF_TrainingTest.run()\n","sub_path":"Modeling/RandomForest/RF_Train.py","file_name":"RF_Train.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"178815962","text":"import itertools\r\n\r\nusage_banner = \"\"\"\r\nUSAGE\r\npython3 wordlister.py \r\n\r\nExample:\r\npython3 wordlister.py /mnt/e/words.txt /mnt/e/result.txt\r\n\r\nHH\r\n\"\"\"\r\n\r\ndef main():\r\n\timport sys, os\r\n\targv = sys.argv[1:]\r\n\r\n\twordlist = None\r\n\tgenerated = list()\r\n\r\n\ttry:\r\n\t\tfile_read = open(argv[0], 'r')\r\n\t\tfile_write = open(argv[1], 'w')\r\n\t\twordlist = file_read.read().split('\\n')\r\n\r\n\t\twhile '' in wordlist:\r\n\t\t\twordlist.remove('')\r\n\r\n\t\trepeat = len(wordlist)\r\n\r\n\t\ttry:\r\n\t\t\tfor repeat in range(1, repeat):\r\n\t\t\t\tfor generated_string in itertools.product(wordlist, repeat=repeat):\r\n\t\t\t\t\tprint(f\"[*] {''.join(generated_string)}\", end='\\r')\r\n\t\t\t\t\tgenerated.append(''.join(generated_string))\r\n\t\texcept KeyboardInterrupt:\r\n\t\t\tfile_read.close()\r\n\r\n\t\t\tprint(f\"\\n[i] Writing to file {argv[1]}\")\r\n\t\t\tfile_write.write('\\n'.join(generated))\r\n\t\t\tfile_write.close()\r\n\t\texcept Exception as e:\r\n\t\t\tprint(f\"\\n[!] {e}\")\r\n\t\tfinally:\r\n\t\t\tfile_read.close()\r\n\r\n\t\t\tprint(f\"\\n[i] Writing to file {argv[1]}\")\r\n\t\t\tfile_write.write('\\n'.join(generated))\r\n\t\t\tfile_write.close()\r\n\texcept Exception as e:\r\n\t\tprint(e)\r\n\t\tprint(usage_banner)\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n","sub_path":"wordlister.py","file_name":"wordlister.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"305593","text":"import os\nimport json\nfrom Builder.Coordinates.ClickCoordinate import ClickCoordinate\n\ncounter = 1\n\nclass Relation:\n\n def __init__(self, fields, relation_counter=None, dependency=None, eceld_folder=None):\n global counter\n if relation_counter:\n counter = relation_counter\n self.number = counter\n if not dependency:\n self.name = \"Relationship \" + str(self.number)\n else:\n self.name = \"Dependency \" + str(self.number)\n self.observation_list = []\n counter += 1\n for index, item in enumerate(fields):\n self.observation_list.append(Observation(item, index,eceld_folder))\n\n\nclass Observation:\n \n def __init__(self, node, index,eceld_folder):\n # Data details and contents\n self.index_observation = index\n self.start = node['start']\n self.data = node['data']\n self.data_type = node['data_type']\n self.artifact = node['artifact']\n self.eceld_folder = eceld_folder\n self.imgName = str\n self.is_click = True if \"clicks_id\" in node['data'] else False\n\n # If Click image detected, get coordinates and save them as object data\n if self.is_click:\n self.data = node['data']\n img_Name = self.get_image_path(self.data['content'])\n self.data['content'] = img_Name\n self.coordinateX = 0\n self.coordinateY = 0\n self.button = \"\\'left\\'\"\n self.clicks = 1\n self.data['clicks'] = self.clicks\n self.data['button'] = self.button\n try:\n analyze = ClickCoordinate()\n analyze.analyze_file(img_Name)\n self.coordinateX, self.coordinateY = analyze.click_coord()\n self.data['X_Coordinate'] = int(self.coordinateX)\n self.data['Y_Coordinate'] = int(self.coordinateY)\n except Exception as e:\n # If algorithm cannot determine the coordinates, they will be set to (0,0)\n print(e)\n self.data['X_Coordinate'] = 0\n self.data['Y_Coordinate'] = 0 \n #print(f\"image: {img_Name}\")\n #print(f\"x: {self.coordinateX}, y: {self.coordinateY}\")\n \n\n # # Depicts the time to wait before looking for observation or executing script\n self.delay = 0\n\n # Information to create script\n if(self.data_type == \"Keypresses\" or self.data_type == \"imgPoint\" or self.data_type == \"auditd\"):\n self.user_action = True\n else:\n self.user_action = False\n\n self.script = None\n\n # Selected labels which will be used to filter traffic on the Runner\n try:\n self.select_filters = node[\"select_filters\"]\n if len(self.select_filters) == 0:\n self.select_filters = ['ip.src']\n \n except:\n self.select_filters = []\n\n #change to 1 when ignoring observation in script\n self.ignore = 0\n\n # Get location of image for further analysis\n def get_image_path(self,default_content):\n head_tail = os.path.split(default_content)\n pic_name = head_tail[1]\n pic_path = self.eceld_folder+'/Clicks/'+pic_name\n return pic_path\n \n\n def show(self):\n string = str(self.index_observation) + \") \" + \"start: \" + str(self.start) + ', ' + \"data_type: \" + str(self.data_type) + ', ' + \"artifact: \" + str(self.artifact) + ', ' + \"data: \" + str(self.data)\n return string\n \n\nif __name__ == '__main__':\n \n directory = '../Causation_Extractor/relationships/'\n file_list = []\n eceld_folder = \"\"\n for file in os.listdir(directory):\n file_list.append(file)\n file_list.sort()\n for f in file_list:\n with open(directory + f, 'r') as relation:\n temp_relation = Relation(json.load(relation),eceld_folder)\n for item in temp_relation.observation_list:\n print(str(item.data))\n print(\"-------------\")\n","sub_path":"Builder/Relation.py","file_name":"Relation.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"536829920","text":"# Uses python3\nimport sys\nimport itertools\n\npartition_dict = {}\ndef partition(W, A, j):\n W = sorted(W)\n key = str(W[0]) + str(W[1]) + str(W[2]) + str(j)\n if key in partition_dict:\n return partition_dict[key]\n elif W[0] == W[1] == W[2] == 0:\n partition_dict[key] = 1\n return 1\n elif j == 0:\n partition_dict[key] = 0\n return 0\n elif A[j-1] <= W[0]:\n partition_dict[key] = max(partition([W[0] - A[j-1]] + W[1:], A, j-1), partition([W[0], W[1] - A[j-1], W[2]], A, j-1), partition([W[0], W[1], W[2]-A[j-1]], A, j-1))\n return partition_dict[key]\n elif A[j-1] <= W[1]:\n partition_dict[key] = max(partition([W[0], W[1] - A[j-1], W[2]], A, j - 1), partition([W[0], W[1], W[2]-A[j-1]], A, j-1))\n return partition_dict[key]\n elif A[j-1] <= W[2]:\n partition_dict[key] = partition([W[0],W[1],W[2]-A[j-1]],A, j-1)\n return partition_dict[key]\n else:\n partition_dict[key] = 0\n return 0\n\n\ndef partition3(A):\n sum_a = sum(A)\n if sum_a % 3 != 0:\n return 0\n else:\n sum_per = sum_a//3\n W = [sum_per]*3\n return partition(W, A, len(A))\n\n\n# n, *A = list(map(int, input().split()))\n# print(partition3(A))\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n n, *A = list(map(int, input.split()))\n print(partition3(A))\n\n","sub_path":"week6_dynamic_programming2/2_partitioning_souvenirs/partition3.py","file_name":"partition3.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"34328500","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 8 00:54:39 2019\n\n@author: datta\n\"\"\"\n\nfrom Plot import * \nimport pandas as pd \nfrom rotations import * \nimport math\nimport numpy as np \n\n\n# Wraps angle to (-pi,pi] range\ndef wraptopi(x):\n if x > np.pi:\n x = x - (np.floor(x / (2 * np.pi)) + 1) * 2 * np.pi\n elif x < -np.pi:\n x = x + (np.floor(x / (-2 * np.pi)) + 1) * 2 * np.pi\n return x\n\ndef ros_time_To_Seconds(ros_time):\n year = ros_time[0:4]\n month = ros_time[5:7]\n day = ros_time[8:10]\n hours = ros_time[11:13]\n minutes = ros_time[14:16]\n seconds = ros_time[17:]\n\n total_seconds = float(hours) * 3600 + float(minutes) * 60 + float(seconds)\n #print(year, month, day, hours, minutes, seconds, total_seconds)\n return total_seconds\n\ndef ros_time_diff(ros_time1, ros_time2):\n time_diff = ros_time_To_Seconds(ros_time1) - ros_time_To_Seconds(ros_time2)\n #print(abs(time_diff))\n return abs(time_diff)\n\ndef ros_time_df_To_Seconds(ros_time_df):\n\n\n year = ros_time_df[0:4]\n month = ros_time_df[5:7]\n day = ros_time_df[8:10]\n hours = ros_time_df[11:13]\n minutes = ros_time_df[14:16]\n seconds = ros_time_df[17:]\n\n total_seconds = hours * 3600 + minutes * 60 + seconds\n #print(year, month, day, hours, minutes, seconds, seconds)\n return total_seconds\n\ndef odometry_msg_to_euler(odom_msg):\n euler = Quaternion(odom_msg['.pose.pose.orientation.w'],\n odom_msg['.pose.pose.orientation.x'],\n odom_msg['.pose.pose.orientation.y'],\n odom_msg['.pose.pose.orientation.z']).to_euler()\n return euler\n \ndef odometry_msg_to_yaw(odom_msg):\n yaw = odometry_msg_to_euler(odom_msg)[2]\n return yaw\n\n\n\ndef Geodetic_to_ECEF(latitude, longitude, height):\n #define a and b\n latitude = math.radians(latitude)\n longitude = math.radians(longitude)\n a = 6378137 #meters\n b = 6356752.31424518 #meters\n N = (a**2)/(np.sqrt(a**2*np.cos(latitude)*np.cos(latitude)+b**2*np.sin(latitude)*np.sin(latitude)))\n x = (N + height) * np.cos(latitude) * np.cos(longitude)\n y = (N + height) * np.cos(latitude) * np.sin(longitude)\n z = ((b**2/a**2)*N + height) * np.sin(latitude)\n\n return x, y, z\n\nclass Converter:\n def __init__(self, column1=None, column2=None):\n self.column1 = column1\n self.column2 = column2\n \n def GPS_to_UTC(self, gps_time):\n #Gps time from Jan 1st 1970 and Unix time is from Jan 5th 1980\n unix_time_diff = 315964782\n utc_time = gps_time + unix_time_diff\n return utc_time\n\n def UTC_to_GPS(self, unix_time):\n #Gps time from Jan 1st 1970 and Unix time is from Jan 5th 1980\n unix_time_diff = 315964782\n gps_time = unix_time - unix_time_diff\n return gps_time\n\n def format_to_GPS(self, read_file):\n #print(read_file[self.column1])\n \n ###### Week and GPSTime conversion from GroundTruth data ##############\n #(Week * days_in_week * hours_in_a_day * minutes * seconds + GPS_time)#\n #######################################################################\n\n gps_time = (read_file[self.column1] * 7 * 24 * 60 * 60 + read_file[self.column2])\n return gps_time\n\n \n\"\"\"\n### Modified csv file used as Ground truth data\n\nGroundTruth = Converter(column1 = 'Week', column2 = 'GPSTime')\nread_file = pd.read_csv(\"CSVFiles/ground_truth.csv\")\ngps_time = GroundTruth.format_to_GPS(read_file)\n#print(gps_time)\nunix_time = GroundTruth.GPS_to_UTC(gps_time)\n#print(unix_time)\ndataframe = pd.DataFrame(read_file)\ndataframe['UTC'] = unix_time\n#print(dataframe)\ndataframe.to_csv(\"CSVFiles/ground_truth.csv\")\n\n\"\"\"\n\n\n### For debugging date time convestion \n\"\"\"\n\nprint(ros_time_To_Seconds(\"2019/07/11/08:54:51.938986\"))\nprint(ros_time_To_Seconds(\"2019/07/11/08:54:52.938986\"))\n\nprint(ros_time_diff(\"2019/07/11/08:54:52.1678494\", \"2019/07/11/08:54:51.938986\"))\n\n\"\"\"\n\"\"\"\n### Debug for geo to ecef\nx1, y1, z1 = Geodetic_to_ECEF(47.0591225026\n, 15.4586022146\n, 405.441\n)\nprint(x1, y1, z1)\n\nx2, y2, z2 = Geodetic_to_ECEF(47.0591749850\n, 15.4909999\n, 405.441\n)\nprint(x2, y2, z2)\n\nprint(x1-x2, y1-y2, z1-z2)\n\n\n#Getting orientation to yaw\n\nread_file = pd.read_csv(\"CSVFiles/zed_odom.csv\")\nyaw = odometry_msg_to_yaw(read_file)\ndataframe = pd.DataFrame(read_file)\ndataframe['yaw'] = yaw\ndataframe.to_csv(\"CSVFiles/zed_odom.csv\")\n\"\"\"\n\n","sub_path":"robot_localization_masters_project/DataConvert.py","file_name":"DataConvert.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"400867034","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/general_conf/path_config.py\n# Compiled at: 2018-09-13 02:55:15\n# Size of source mod 2**32: 410 bytes\nfrom os.path import expanduser, join\nhome = expanduser('~')\nconfig_path = join(home, '.autoxtrabackup')\nconfig_path_file = join(config_path, 'autoxtrabackup.cnf')\nlog_file_path = join(config_path, 'autoxtrabackup.log')","sub_path":"pycfiles/mysql_autoxtrabackup-1.5.5-py3.6/path_config.cpython-36.py","file_name":"path_config.cpython-36.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"126724543","text":"\"\"\" Compiled: 2020-09-18 10:38:49 \"\"\"\n\n#__src_file__ = \"extensions/advanced_corporate_actions/./etc/FProcessCorpActions.py\"\n\"\"\"----------------------------------------------------------------------------\nMODULE\n FProcessCorpActions - Perform corporate action elections\n\nDESCRIPTION\n\nNOTE\n\nENDDESCRIPTION\n----------------------------------------------------------------------------\"\"\"\n\nimport acm\nimport FBDPGui\nimport importlib\nimportlib.reload(FBDPGui)\n\nfrom FCorpActionProcessor import CorpActionProcessor\nfrom FBDPRollback import RollbackWrapper\nfrom FTransactionHandler import RollbackHandler\nfrom FBDPCurrentContext import Summary, Logme, CreateLog\n\nScriptName = \"FProcessCorpActions\"\n\nFBDPGui.DefaultVariables.defaults = FBDPGui.Parameters('FBDPParameters',\n 'FProcessCorpActions')\n\nttCorpActions = \"The corporate action records to process.\"\n\n\nael_variables = FBDPGui.TestVariables(\n # [VariableName,\n # DisplayName,\n # Type, CandidateValues, Default,\n # Mandatory, Multiple, Description, InputHook, Enabled]\n ['CorpActions',\n 'Corporate Actions',\n 'FCorporateAction', None, None,\n 1, 1, ttCorpActions, None, 1, None],\n )\n\ndef ael_main(dictionary):\n\n import FBDPCommon\n importlib.reload(FBDPCommon)\n\n CreateLog(ScriptName,\n dictionary['Logmode'],\n dictionary['LogToConsole'],\n dictionary['LogToFile'],\n dictionary['Logfile'],\n dictionary['SendReportByMail'],\n dictionary['MailList'],\n dictionary['ReportMessageType'])\n\n FBDPCommon.execute_script(perform, dictionary)\n\n\ndef perform(dictionary):\n corpActions = dictionary['CorpActions']\n for action in corpActions:\n Logme()('Processing corporate action {0}'.format(action.Name()))\n rollback = RollbackWrapper(action.Name(), \n bool(dictionary['Testmode']))\n processor = CorpActionProcessor(action, RollbackHandler(rollback))\n processor.Process()\n\n Summary().log(dictionary)\n Logme()(None, 'FINISH')\n","sub_path":"Extensions/Advanced Corporate Actions/FPythonCode/FProcessCorpActions.py","file_name":"FProcessCorpActions.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"312266930","text":"\"\"\"\nThree in One:\n Describe how you could use a single array to implement three stacks.\n\"\"\"\n\n\nclass Stack:\n def __init__(self, array, start, end):\n self.array = array\n self.start = start\n self.current = start\n self.end = end\n\n def push(self, value):\n if self.current == self.end:\n raise ValueError(\"Stack is full\")\n self.array[self.current] = value\n self.current += 1\n\n def pop(self):\n if self.current < self.start:\n raise ValueError(\"Stack is empty\")\n value = self.array[self.current - 1]\n self.array[self.current - 1] = None\n self.current -= 1\n return value\n\n def top(self):\n return self.array[self.current - 1]\n\n\ndef solution(array):\n size = len(array)\n size_stack = size // 3\n\n stack_01 = Stack(array, 0, size_stack)\n stack_02 = Stack(array, size_stack, size_stack + size_stack)\n stack_03 = Stack(array, size_stack + size_stack, size)\n\n for x in range(0, 10):\n stack_01.push(x)\n\n for x in range(10, 20):\n stack_02.push(x)\n\n for x in range(20, 30):\n stack_03.push(x)\n\n for x in range(29, 20, -1):\n value = stack_03.pop()\n assert value == x, f\"{x} != {value}\"\n\n for x in range(19, 10, -1):\n value = stack_02.pop()\n assert value == x, f\"{x} != {value}\"\n\n for x in range(9, 0, -1):\n value = stack_01.pop()\n assert value == x, f\"{x} != {value}\"\n\n print(array)\n\n\nif __name__ == \"__main__\":\n array = [None] * 30\n print(\"Solution: \", solution(array))\n","sub_path":"python/stacks_queues/question_3_1.py","file_name":"question_3_1.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"286455440","text":"# Shows an example of a function that accepts an int argument and returns a string response\n# The contents of the returned string are determined by the value in the int passed into the function\n\nimport random\n\ndef getAnswer(answerNumber):\n # If you don't specify a return, the default for a function is to return 'None'\n # 'None' represents the absense of a value\n if answerNumber == 1:\n return 'It is certain'\n elif answerNumber == 2:\n return 'It is decidedly so'\n elif answerNumber == 3:\n return 'Yes'\n elif answerNumber == 4:\n return 'Reply hazy try again'\n elif answerNumber == 5:\n return 'Ask again later'\n elif answerNumber == 6:\n return 'Concentrate and ask again'\n elif answerNumber == 7:\n return 'My reply is no'\n elif answerNumber == 8:\n return 'Outlook not so good'\n elif answerNumber == 9:\n return 'Very doubtful'\n\nr = random.randint(1, 9) # Inclusive of both the supplied start and end points\nfortune = getAnswer(r)\nprint(fortune)\n","sub_path":"3. Functions/3_magic8Ball.py","file_name":"3_magic8Ball.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"547150542","text":"\"\"\"empty message\n\nRevision ID: 5b5b4712fb30\nRevises: 40d0ef599e82\nCreate Date: 2016-11-04 15:29:37.751000\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '5b5b4712fb30'\ndown_revision = '40d0ef599e82'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('contact_query',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('body', sa.String(), nullable=True),\n sa.Column('website_url', sa.String(), nullable=True),\n sa.Column('email', sa.String(), nullable=True),\n sa.Column('subscribed', sa.Boolean(), nullable=True),\n sa.Column('created_at', sa.DateTime(timezone=True), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('contact_query')\n ### end Alembic commands ###\n","sub_path":"store/migrations/versions/5b5b4712fb30_.py","file_name":"5b5b4712fb30_.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"508658842","text":"from tkinter import Canvas, Tk\nfrom helpers import make_circle\n\ngui = Tk()\ngui.title('Circle')\ncanvas = Canvas(gui, width=500, height=500, background='white')\ncanvas.pack()\n########################## YOUR CODE BELOW THIS LINE ##############################\n\n\n\n\n# Make the first spirograph pictured\nmake_circle(canvas, (100, 50), 25)\n\n\n\n########################## YOUR CODE ABOVE THIS LINE ############################## \ncanvas.mainloop()","sub_path":"course-files/lectures/lecture13/while_loops/03_drawing/05_spirograph_1.py","file_name":"05_spirograph_1.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"260643346","text":"import tensorflow as tf\nimport tensorflow.keras as keras\nimport tensorflow_datasets as tfds\nimport numpy as np\n\nfrom utils import layer_dict, process_numpy, show, ShowCallBack, CustomIoU\n\n\nimg_data = np.load(\"./seg_dataset/img128.npy\")\nseg_data = np.load(\"./seg_dataset/seg128.npy\")\n\n\ndef get_model(img_shape=(128, 128, 3), filter_num=16):\n inputs = keras.layers.Input(shape=(img_shape))\n o = inputs\n o_dict = dict()\n for l, s in reversed(layer_dict.items()):\n if l == img_shape[0]:\n o = keras.layers.Conv2D(filters=filter_num * s, kernel_size=3, strides=1, padding=\"SAME\")(o)\n o = keras.layers.BatchNormalization()(o)\n o = keras.layers.LeakyReLU(0.2)(o)\n o_dict[l] = o\n if l < img_shape[0]:\n o = keras.layers.Conv2D(filters=filter_num * s, kernel_size=3, strides=2, padding=\"SAME\")(o)\n o = keras.layers.BatchNormalization()(o)\n o = keras.layers.LeakyReLU(0.2)(o)\n o_dict[l] = o\n\n for l, s in layer_dict.items():\n if l == img_shape[0]:\n break\n o = keras.layers.Conv2DTranspose(filters=filter_num * layer_dict[l * 2], kernel_size=3, strides=2, padding=\"SAME\")(o)\n o = keras.layers.BatchNormalization()(o)\n o = keras.layers.LeakyReLU(0.2)(o)\n\n o_ = keras.layers.Conv2D(filters=filter_num * layer_dict[l * 2], kernel_size=1, strides=1, padding=\"SAME\")(o_dict[l * 2])\n o_ = keras.layers.BatchNormalization()(o_)\n o_ = keras.layers.LeakyReLU(0.2)(o_)\n o = keras.layers.Add()([o, o_])\n o_dict[l * 2] = o\n\n os = []\n for l, o in o_dict.items():\n o_ = keras.layers.Conv2DTranspose(filters=filter_num * layer_dict[img_shape[0]], kernel_size=1, strides=img_shape[0]//l, padding=\"SAME\")(o)\n o_ = keras.layers.BatchNormalization()(o_)\n o_ = keras.layers.LeakyReLU(0.2)(o_)\n os.append(o_)\n o = keras.layers.Concatenate(axis=-1)(os)\n o = keras.layers.Conv2D(filters=256, kernel_size=1, strides=1, padding=\"SAME\")(o)\n return keras.Model(inputs=inputs, outputs=o)\n\n\ndataset = process_numpy(img_data, seg_data, batch_size=8)\nmodel = get_model()\nmodel.summary()\nkeras.utils.plot_model(model, show_shapes=True)\nmodel.compile(optimizer=keras.optimizers.Adam(learning_rate=1e-4), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=CustomIoU(num_classes=256))\nmodel.fit(dataset, epochs=20)\nshow(seg_data[:16], model(img_data[:16]))\n","sub_path":"fpn.py","file_name":"fpn.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"341710146","text":"import os\nimport time\n\nfrom common.logger import Logger\nfrom common.globalconfig import GlobalConfig\nfrom http.server import HTTPServer\n\nfrom reportserver.server.RESTRequestHandler import RestRequestHandler\n\n\n#Create and start the HTTP Server\n\n\nclass SimpleHttpServer:\n def __init__(self):\n plugin_cfg_path = os.getenv('RECCE7_PLUGIN_CONFIG') or 'config/plugins.cfg'\n global_cfg_path = os.getenv('RECCE7_GLOBAL_CONFIG') or 'config/global.cfg'\n self.g_config = GlobalConfig(plugin_cfg_path, global_cfg_path)\n self.g_config.read_plugin_config()\n self.g_config.read_global_config()\n self.host = self.g_config.get_report_server_host()\n self.port = self.g_config.get_report_server_port()\n log_path = self.g_config['ReportServer']['reportserver.logName']\n log_level = self.g_config['ReportServer']['reportserver.logLevel']\n self.log = Logger(log_path, log_level).get('reportserver.server.SimpleHTTPServer.SimpleHTTPServer')\n\n def setupAndStart(self):\n\n server_address = (self.host, self.port)\n\n request_handler = RestRequestHandler\n\n # instantiate a server object\n httpd = HTTPServer (server_address, request_handler)\n print(time.asctime(), \"Server Starting - %s:%s\" % (self.host, self.port))\n\n try:\n # start serving pages\n httpd.serve_forever ()\n except KeyboardInterrupt:\n pass\n\n httpd.server_close()\n print(time.asctime(), \"Server Stopped - %s:%s\" % (self.host, self.port))","sub_path":"reportserver/server/SimpleHttpServer.py","file_name":"SimpleHttpServer.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"88483910","text":"ourcards=[7,2,9,4,3,1,6,5,8]\nj = 1\n\n\nwhile j < 9:\n\tcard = ourcards[j]\n\ti = j-1\n\twhile i >= 0 and ourcards[i] > card:\n\t ourcards[1+i] = ourcards[i]\n\t i = i-1\n\tourcards[i+1] = card\n\tj = j+1\nprint(ourcards)\n\n\n\n","sub_path":"python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"364841413","text":"# Adapted from https://gist.github.com/batzner/7c24802dd9c5e15870b4b56e22135c96\nimport getopt\nimport sys\n\nimport tensorflow as tf\n\nusage_str = ('python tensorflow_rename_variables.py '\n '--checkpoint_dir=path/to/dir/ --replace_from=substr '\n '--replace_to=substr --add_prefix=abc --dry_run')\nfind_usage_str = ('python tensorflow_rename_variables.py '\n '--checkpoint_dir=path/to/dir/ --find_str=[\\'!\\']substr')\ncomp_usage_str = ('python tensorflow_rename_variables.py '\n '--checkpoint_dir=path/to/dir/ '\n '--checkpoint_dir2=path/to/dir/')\n\n\ndef print_usage_str():\n print('Please specify a checkpoint_dir. Usage:')\n print('%s\\nor\\n%s\\nor\\n%s' % (usage_str, find_usage_str, comp_usage_str))\n print('Note: checkpoint_dir should be a *DIR*, not a file')\n\n\ndef compare(checkpoint_dir, checkpoint_dir2):\n import difflib\n with tf.compat.v1.Session():\n list1 = [el1 for (el1, el2) in\n tf.train.list_variables(checkpoint_dir)]\n list2 = [el1 for (el1, el2) in\n tf.train.list_variables(checkpoint_dir2)]\n for k1 in list1:\n if k1 in list2:\n continue\n else:\n print('{} close matches: {}'.format(\n k1, difflib.get_close_matches(k1, list2)))\n\n\ndef find(checkpoint_dir, find_str):\n with tf.compat.v1.Session():\n negate = find_str.startswith('!')\n if negate:\n find_str = find_str[1:]\n for var_name, _ in tf.train.list_variables(checkpoint_dir):\n if negate and find_str not in var_name:\n print('%s missing from %s.' % (find_str, var_name))\n if not negate and find_str in var_name:\n print('Found %s in %s.' % (find_str, var_name))\n\n\ndef rename(checkpoint_dir, replace_from, replace_to, add_prefix, dry_run):\n checkpoint = tf.train.get_checkpoint_state(checkpoint_dir)\n with tf.compat.v1.Session() as sess:\n for var_name, _ in tf.train.list_variables(checkpoint_dir):\n # Load the variable\n var = tf.train.load_variable(checkpoint_dir, var_name)\n\n # Set the new name\n if None not in [replace_from, replace_to]:\n new_name = var_name\n if replace_from in var_name:\n new_name = new_name.replace(replace_from, replace_to)\n if add_prefix:\n new_name = add_prefix + new_name\n if dry_run:\n print('%s would be renamed to %s.' % (var_name,\n new_name))\n else:\n print('Renaming %s to %s.' % (var_name, new_name))\n # Create the variable, potentially renaming it\n var = tf.Variable(var, name=new_name)\n\n if not dry_run:\n # Save the variables\n saver = tf.compat.v1.train.Saver()\n sess.run(tf.compat.v1.global_variables_initializer())\n saver.save(sess, checkpoint.model_checkpoint_path)\n\n\ndef main(argv):\n checkpoint_dir = None\n checkpoint_dir2 = None\n replace_from = None\n replace_to = None\n add_prefix = None\n dry_run = False\n find_str = None\n\n try:\n opts, args = getopt.getopt(argv, 'h', ['help=', 'checkpoint_dir=',\n 'replace_from=', 'replace_to=',\n 'add_prefix=', 'dry_run',\n 'find_str=',\n 'checkpoint_dir2='])\n except getopt.GetoptError as e:\n print(e)\n print_usage_str()\n sys.exit(2)\n for opt, arg in opts:\n if opt in ('-h', '--help'):\n print(usage_str)\n sys.exit()\n elif opt == '--checkpoint_dir':\n checkpoint_dir = arg\n elif opt == '--checkpoint_dir2':\n checkpoint_dir2 = arg\n elif opt == '--replace_from':\n replace_from = arg\n elif opt == '--replace_to':\n replace_to = arg\n elif opt == '--add_prefix':\n add_prefix = arg\n elif opt == '--dry_run':\n dry_run = True\n elif opt == '--find_str':\n find_str = arg\n\n if not checkpoint_dir:\n print_usage_str()\n sys.exit(2)\n\n if checkpoint_dir2:\n compare(checkpoint_dir, checkpoint_dir2)\n elif find_str:\n find(checkpoint_dir, find_str)\n else:\n rename(checkpoint_dir, replace_from, replace_to, add_prefix, dry_run)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","sub_path":"tensorflow_rename_variables.py","file_name":"tensorflow_rename_variables.py","file_ext":"py","file_size_in_byte":4694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"71917071","text":"import gym\nimport gym.spaces as spaces\nimport numpy as np\nfrom collections import deque\nfrom multiprocessing import Process\nfrom copy import deepcopy\nimport time\n\nfrom envs.AtariPreprocessor import PreprocessAtariStates\nfrom envs.RewardShaper import RewardShaper\n\nfrom utils.LevelFileExporter import LevelFileExporter\n\n\nclass SingleCoreEnvWrapperHappyElim(Process):\n def __init__(self, env_name, env, seed = 123, extra_info = {}, pipe = None):\n env_name = deepcopy(env_name)\n seed = deepcopy(seed)\n\n super(SingleCoreEnvWrapperHappyElim, self).__init__()\n\n env_name = deepcopy(env_name)\n\n self.extra_info = deepcopy(extra_info)\n\n if len(env_name) >= 16 and env_name[:16] == \"HappyElimination\":\n if len(env_name) > 16:\n extra_info[\"extra_info\"][\"env_idx\"] = int(env_name[17:])\n\n self.env = env\n\n if extra_info[\"train_multiple_levels\"]:\n self.train_multiple_levels = True\n self.multiple_level_range = extra_info[\"multiple_level_range\"]\n else:\n self.train_multiple_levels = False\n\n self.env_type = \"HappyElimination\"\n\n self.level_file_exporter = LevelFileExporter(extra_info[\"level_index\"])\n self.enable_record = False\n else:\n raise NotImplementedError()\n\n if self.env_type == \"HappyElimination\":\n # State space\n self.observation_space = {\n \"Img\": self.env.state_shape,\n \"Vec\": self.env.vecState_shape\n }\n\n # Action space\n self.action_mode = \"Discrete\"\n self.action_n = self.env.action_num\n elif self.env_type == \"gym\":\n # State space\n if not isinstance(self.env.observation_space, spaces.Box):\n raise RuntimeError(\"Unknown state space {}\".foramt(type(self.env.observation_space)))\n\n self.observation_space = self.env.observation_space.shape\n if len(self.observation_space) == 3:\n self.observation_space = (4, 84, 84)\n\n self.last_atari_frames = deque(maxlen = 3)\n for _ in range(3):\n self.last_atari_frames.append(np.zeros([1, 84, 84]))\n\n # Action space\n if isinstance(self.env.action_space, spaces.Discrete):\n self.action_mode = \"Discrete\"\n self.action_n = self.env.action_space.n\n elif isinstance(self.env.action_space, spaces.Box):\n self.action_mode = \"Continuous\"\n self.action_dim = self.env.action_space.shape[0]\n self.action_range = [self.env.action_space.low[0], self.env.action_space.high[0]]\n else:\n raise RuntimeError(\"Unknown action space {}\".format(type(self.env.action_space)))\n else:\n raise NotImplementedError()\n\n self.rewardShaper = RewardShaper(env_name)\n\n # Start from checkpoint countdown\n self.start_from_checkpoint_count = 0\n self.cool_down = 0\n\n self.progress_condition = None\n self.scheduled_count = 0\n self.scheduled_cooldown = 0\n self.start_listening = False\n\n self.pipe = pipe\n\n def run(self):\n while True:\n command, args = self.receive_safe_protocol(self.pipe)\n\n if command == 0:\n # Reset\n if args is None:\n item = deepcopy(self.reset())\n # item = np.zeros([10, 10])\n else:\n item = deepcopy(self.reset(info = args))\n # item = np.zeros([10, 10])\n\n self.send_safe_protocol(self.pipe, 10, item)\n\n elif command == 1:\n # Step\n next_state, reward, done, info = self.step(args)\n # next_state, reward, done, info = np.zeros([10, 10]), 0, True, dict()\n next_state = deepcopy(next_state)\n reward = deepcopy(reward)\n done = deepcopy(done)\n info = deepcopy(info)\n item = (next_state, reward, done, info)\n\n self.send_safe_protocol(self.pipe, 11, item)\n\n elif command == 2:\n # Terminate\n return\n\n elif command == 3:\n continue\n\n else:\n raise NotImplementedError()\n\n def send_safe_protocol(self, pipe, command, args):\n success = False\n\n while not success:\n pipe.send((command, args))\n\n ret = pipe.recv()\n if ret == command:\n success = True\n\n def receive_safe_protocol(self, pipe):\n pipe.poll(None)\n\n command, args = pipe.recv()\n # print(\"[slave] received command\", command)\n\n pipe.send(command)\n # print(\"[slave] send command\", command)\n\n return deepcopy(command), deepcopy(args)\n\n def reset(self, info = dict()):\n if self.env_type == \"HappyElimination\":\n if self.start_listening:\n self.start_listening = False\n self.progress_condition = None\n self.scheduled_count = 0\n self.scheduled_cooldown = 0\n\n self.start_from_checkpoint_count = 0\n self.cool_down = 0\n else:\n self.start_listening = True\n\n if self.train_multiple_levels:\n if \"level_idx\" not in info:\n n = np.random.randint(self.multiple_level_range[0], self.multiple_level_range[1] + 2)\n else:\n n = info[\"level_idx\"]\n\n if self.start_from_checkpoint_count > 0:\n state = self.load_check_point()\n if state is None:\n state = self.env.reset(\"envs/tapLogicEnv/levels/\" + str(n) + \".txt\")\n else:\n self.start_from_checkpoint_count -= 1\n else:\n state = self.env.reset(\"envs/tapLogicEnv/levels/\" + str(n) + \".txt\")\n if self.cool_down > 0:\n self.cool_down -= 1\n else:\n if self.start_from_checkpoint_count > 0:\n state = self.load_check_point()\n if state is None:\n state = self.env.reset()\n else:\n self.start_from_checkpoint_count -= 1\n else:\n state = self.env.reset()\n if self.cool_down > 0:\n self.cool_down -= 1\n\n if self.enable_record:\n self.level_file_exporter.reset_record(self.env.viewParser)\n elif self.env_type == \"gym\":\n state = self.env.reset()\n\n if len(self.observation_space) == 3:\n state = PreprocessAtariStates(state)\n\n for _ in range(3):\n self.last_atari_frames.append(np.zeros([1, 84, 84]))\n\n origin_state = state\n state = np.concatenate(\n (state, self.last_atari_frames[0], self.last_atari_frames[1], self.last_atari_frames[2]),\n axis = 0\n )\n\n self.last_atari_frames.append(origin_state)\n else:\n raise NotImplementedError()\n\n return state\n\n def step(self, action):\n if self.env_type == \"HappyElimination\":\n if self.start_listening and self.env.check_progress_condition(self.progress_condition):\n start = time.clock()\n flag = self.check_point()\n\n end = time.clock()\n if end - start > 0.1:\n print(\"Takes too long for checkpoint\")\n raise RuntimeError(\"Takes too long for checkpoint\")\n\n if not flag:\n self.start_from_checkpoint_count = 0\n self.cool_down = 0\n else:\n self.start_from_checkpoint_count = self.scheduled_count\n self.cool_down = self.scheduled_cooldown\n\n self.start_listening = False\n self.progress_condition = None\n self.scheduled_count = 0\n self.scheduled_cooldown = 0\n\n if self.extra_info[\"state_mode\"] == 0:\n if isinstance(action, int):\n action = [action // self.env.boardSize[1], action % self.env.boardSize[1]]\n elif len(action) == 1:\n action = [action[0] // self.env.boardSize[1], action[0] % self.env.boardSize[1]]\n\n start = time.clock()\n next_state, reward, done, info = self.env.step(action)\n end = time.clock()\n if end - start > 0.1:\n print(\"Takes too long for step\")\n raise RuntimeError(\"Takes too long for step\")\n\n if self.enable_record and not info[\"unchanged\"]:\n self.level_file_exporter.record_next(self.env.viewParser, info[\"action_for_viewer\"])\n\n elif self.env_type == \"gym\":\n next_state, reward, done, info = self.env.step(action)\n\n if len(self.observation_space) == 3:\n next_state = PreprocessAtariStates(next_state)\n\n origin_next_state = next_state\n next_state = np.concatenate(\n (next_state, self.last_atari_frames[0], self.last_atari_frames[1], self.last_atari_frames[2]),\n axis = 0\n )\n\n self.last_atari_frames.append(origin_next_state)\n else:\n raise NotImplementedError()\n\n return next_state, reward, done, info\n\n def enable_recording(self):\n self.enable_record = True\n\n def save_record(self):\n self.level_file_exporter.store_file()\n\n def render(self, render_double = False, agent_hock = None):\n if self.env_type == \"HappyElimination\":\n self.env.render(render_double = render_double, agent_hock = agent_hock)\n elif self.env_type == \"gym\":\n self.env.render(mode = \"human\")\n else:\n raise NotImplementedError()\n\n def get_action_type(self):\n return self.action_mode\n\n def check_point(self):\n if self.env_type == \"HappyElimination\":\n return self.env.check_point()\n else:\n raise NotImplementedError(\"Env does not support check_point\")\n\n def load_check_point(self):\n if self.env_type == \"HappyElimination\":\n return self.env.load_check_point()\n else:\n raise NotImplementedError(\"Env does not support check_point\")\n\n def enable_concentration_learning(self, progress, count, cool_down):\n if self.cool_down > 0:\n return False\n\n if self.env.need_concentration_learning():\n self.progress_condition = progress\n self.scheduled_count = count\n self.scheduled_cooldown = cool_down\n","sub_path":"envs/SingleCoreEnvWrapperHappyElim.py","file_name":"SingleCoreEnvWrapperHappyElim.py","file_ext":"py","file_size_in_byte":11024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"88596292","text":"from lints import base\nfrom cryptography import x509\nfrom cryptography.x509.oid import NameOID\nfrom util.time import Time\nfrom util import ca,oid\n'''\n 7.1.6.1: If the Certificate asserts the policy identifier of 2.23.140.1.2.2, then it MUST also include organizationName, localityName (to the extent such field is required under Section 7.1.4.2.2), stateOrProvinceName (to the extent such field is required under Section 7.1.4.2.2), and countryName in the Subject field.*/\n 7.1.4.2.2 applies only to subscriber certificates.\n'''\nclass CertPolicyOVRequiresProvinceOrLocal(base.LintInterface):\n def Initialize(self):\n return 0\n\n def CheckApplies(self,c):\n return ca.IsSubscriberCert(c) and oid.SliceContainsOID(c,oid.BROrganizationValidatedOID)\n \n def Execute(self,c):\n try:\n if oid.TypeInName(c.subject,oid.LocalityNameOID) or oid.TypeInName(c.subject,oid.StateOrProvinceNameOID):\n return base.LintResult(base.LintStatus.Pass)\n else:\n return base.LintResult(base.LintStatus.Error)\n except:\n return base.LintResult(base.LintStatus.Error)\n \n\n\ndef init():\n base.RegisterLint(base.Lint(\"e_cert_policy_ov_requires_province_or_locality\",\"If certificate policy 2.23.140.1.2.2 is included, localityName or stateOrProvinceName MUST be included in subject\",\"BRs: 7.1.6.1\",base.LintSource.CABFBaselineRequirements,Time.CABEffectiveDate,CertPolicyOVRequiresProvinceOrLocal()))","sub_path":"lints/lint_cert_policy_ov_requires_province_or_locality.py","file_name":"lint_cert_policy_ov_requires_province_or_locality.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"512626792","text":"#!/usr/bin/python3\n\nimport sys\nimport re\nimport fileinput\n\n#DICTIONARY WITH COVERAGES\n\nconverted_read = True\n\nif len(sys.argv) == 5:\n unmapped = sys.argv[1]\n converted = sys.argv[2]\n genetic_map = sys.argv[3]\n chr = sys.argv[4]\nelse:\n sys.exit(\"The usage should be ./convert_4Ner_to_cM.py \\\n zip_4Ner_chr Ne out_cM_file\")\n\nExcluded = []\n\nwith open(unmapped, \"r\") as in_fh:\n for line in in_fh:\n line = line.rstrip()\n if not line.startswith(\"#\"):\n chr_coord = line.split()[2]\n Excluded.append(chr_coord)\n\n\nwith open(genetic_map, \"r\") as in2_fh:\n next(in2_fh)\n with open(converted, \"r\") as in3_fh:\n for line1, line2 in zip(in2_fh, in3_fh):\n line1 = line1.rstrip()\n line2 = line2.rstrip()\n fields1 = line1.split()\n fields2 = line2.split()\n if (fields1[0] not in Excluded) and (fields2[0] == chr):\n print(\"{}\\t{}\".format(fields2[2], fields1[1]))\n previous = fields2[2]\n continue\n else:\n if fields1[0] in Excluded:\n unmapped_read = True\n while unmapped_read:\n line1 = next(in2_fh)\n fields1 = line1.rstrip().split()\n if (fields1[0] not in Excluded) and (fields2[0] == chr):\n print(\"{}\\t{}\".format(fields2[2], fields1[1]))\n unmapped_read = False\n break\n elif (fields1[0] not in Excluded) and (fields2[0] != chr):\n break\n","sub_path":"MAP_RECOMB/new_maps/join_lifted_map.py","file_name":"join_lifted_map.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"637709378","text":"\"\"\"\nmymedia URL Configuration\n\"\"\"\nfrom drf_yasg import openapi\nfrom drf_yasg.views import get_schema_view\nfrom rest_framework import permissions\n\nfrom django.contrib import admin\nfrom django.urls import include, path, re_path\n\nschema_view = get_schema_view(\n openapi.Info(\n title='mymedia API',\n default_version='v1',\n description='SSKDs for mymedia project.',\n terms_of_service='https://www.google.com/policies/terms/',\n contact=openapi.Contact(email='jumpyoshim@gmail.com'),\n license=openapi.License(name='MIT License'),\n ),\n public=True,\n permission_classes=(permissions.AllowAny,)\n)\n\nurlpatterns = [\n re_path(r'^swagger(?P\\.json|\\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),\n path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),\n path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),\n\n path('admin/', admin.site.urls),\n path('oauth/', include('oauth2_provider.urls', namespace='oauth2_provider')),\n path('users/', include('users.urls')),\n path('groups/', include('groups.urls')),\n path('articles/', include('articles.urls')),\n]\n","sub_path":"mymedia/mymedia/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"321895069","text":"import argparse\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"filename\", help=\"The target ip will write into this file\", action=\"store\", type=str)\n parser.add_argument(\n \"--tags\", help=\"The repo name need to be onlined\", action=\"store\", type=str, default=\"\")\n args = parser.parse_args()\n\n with open(args.filename, \"w\") as fobj:\n fobj.write(\n \"10.100.6.1,10.100.6.2,10.100.6.3,10.100.6.4,10.100.6.5,10.100.6.6,10.100.6.7,10.100.6.8\")\n","sub_path":"mock_get_tags_ips.py","file_name":"mock_get_tags_ips.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"36788333","text":"def minDistance(word1, word2):\n \"\"\"\n :type word1: str\n :type word2: str\n :rtype: int\n \"\"\"\n len1 = len(word1)\n len2 = len(word2)\n if len1==0:\n return len2\n if len2==0:\n return len1\n d = [[0 for _ in range(len2+1)] for _ in range(len1+1)]\n for i in range(0,len2+1):\n d[0][i] = i\n for i in range(0,len1+1):\n d[i][0] = i\n print(d)\n def myMin(x,y,z):\n t = x if x 3000:\r\n GI_nums_mult_COI.add(i)\r\n \r\n\r\n#makes a dictionary of lists for each taxa/gene choice\r\nfor i in GI_nums:\r\n if i.split(\"|\")[0] in dic.keys():\r\n dic_list = dic[i.split(\"|\")[0]]\r\n dic_list.append(i.split(\"|\")[1])\r\n dic[i.split(\"|\")[0]] = dic_list\r\n else:\r\n dic[i.split(\"|\")[0]] = [i.split(\"|\")[1]]\r\n genes.add(re.split('_|\\|', i)[1])\r\n \r\n#same for COI\r\nfor i in GI_nums_mult_COI:\r\n if i.split(\"|\")[0] in dic_COI.keys():\r\n dic_list = dic_COI[i.split(\"|\")[0]]\r\n dic_list.append(i.split(\"|\")[1])\r\n dic_COI[i.split(\"|\")[0]] = dic_list\r\n else:\r\n dic_COI[i.split(\"|\")[0]] = [i.split(\"|\")[1]]\r\n genes.add(re.split('_|\\|', i)[1])\r\n \r\ncount = 0\r\n#deal with lengths of COI and add to dic to try and resolve\r\nfor i in dic_COI:\r\n count += 1\r\n print(float(count)/float(len(dic_COI)))\r\n lengths = [int(m.split('_')[1]) for m in dic_COI[i]]\r\n individual = [dic_COI[i][x] for x, l in enumerate(lengths) if l < 2000]\r\n whole = [dic_COI[i][x] for x, l in enumerate(lengths) if l > 2000 and l < 3000]\r\n mito = [dic_COI[i][x] for x, l in enumerate(lengths) if l > 3000]\r\n if len(mito) > 0:\r\n GIs_to_align = [mito[0].split(\"_\")[0], 'GU365907']\r\n alignment = alignment_reg(GIs_to_align)\r\n iden = identity_calc(alignment)\r\n if iden > 80:\r\n #have to do span to account for random small blocks that dont align\r\n span = 0\r\n #get start\r\n for l in range(len(alignment[0])):\r\n col = alignment[:, l]\r\n if '-' not in col:\r\n span += 1\r\n if span == 10:\r\n break\r\n elif span > 0 and '-' in col:\r\n span = 0\r\n start = l-8\r\n span = 0\r\n #get stop\r\n for l in reversed(range(len(alignment[0]))):\r\n col = alignment[:, l]\r\n if '-' not in col:\r\n span += 1\r\n if span == 10:\r\n break\r\n elif span > 0 and '-' in col:\r\n span = 0\r\n end = l+10\r\n handle = Entrez.efetch(db=\"nucleotide\", rettype=\"fasta\", retmode=\"text\", id=mito[0].split(\"_\")[0], seq_start=start-1, seq_stop=end-2)\r\n record = SeqIO.read(handle, \"fasta\")\r\n records.append(record)\r\n else:\r\n print('Low iden when matching COI to whole mito')\r\n with open(\"COI_hand_check.txt\", \"a\") as a:\r\n a.write(i + '\\n')\r\n elif len(whole) > 0:\r\n #pick whole\r\n if len(whole) == 1:\r\n GI_nums_single.add(i+\"|\"+whole[0])\r\n else:\r\n #to pipeline\r\n dic[i] = whole\r\n if len(individual) > 0:\r\n #pick individual\r\n if len(individual) == 1:\r\n GI_nums_single.add(i+\"|\"+individual[0])\r\n else:\r\n print(individual)\r\n result = []\r\n ranges = {}\r\n current_start = -1\r\n current_stop = -1\r\n whole_length = 0\r\n #do tiling\r\n for m in [x.split('_')[0] for x in individual]:\r\n iden, start_stop = tiling([m], 'COI_trnL_COII')\r\n start, end = start_stop[0]\r\n #uses gi for danaus chrysippus COI_trnL_COII\r\n if iden > 70:\r\n #make dic of lists\r\n if (start, end) in ranges.keys():\r\n ranges_dic_list = ranges[(start, end)]\r\n ranges_dic_list.append(m)\r\n ranges[(start, end)] = ranges_dic_list\r\n else:\r\n ranges[(start, end)] = [m]\r\n else:\r\n print('Alignment below 70')\r\n print(GIs_to_align)\r\n if len(ranges) == 0:\r\n print('All alignments below 70, printing to file')\r\n with open(\"COI_hand_check.txt\", \"a\") as a:\r\n a.write(i + '\\n')\r\n #get merged range\r\n for start, stop in sorted(ranges.keys()):\r\n if start > current_stop:\r\n result.append((start, stop))\r\n current_start, current_stop = start, stop\r\n else:\r\n current_stop = max(current_stop, stop)\r\n result[-1] = (current_start, current_stop)\r\n for n in result:\r\n whole_length += n[1] - n[0] + 1\r\n #go through each combination of ranges to get 95% of whole range\r\n for L in range(1, len(ranges)+1):\r\n max_perc = 0\r\n for subset in itertools.combinations(ranges.keys(), L):\r\n comb_length = 0\r\n #for each combination, get merged length\r\n current_start = -1\r\n current_stop = -1\r\n result = []\r\n for start, stop in sorted(subset):\r\n if start > current_stop:\r\n result.append((start, stop))\r\n current_start, current_stop = start, stop\r\n else:\r\n current_stop = max(current_stop, stop)\r\n result[-1] = (current_start, current_stop)\r\n for x in result:\r\n comb_length += x[1] - x[0] + 1\r\n if whole_length >= comb_length:\r\n perc = float(comb_length)/float(whole_length)\r\n if perc > max_perc:\r\n max_perc = perc\r\n max_subset = set()\r\n max_subset.add(subset)\r\n elif perc == max_perc:\r\n max_subset.add(subset)\r\n else:\r\n pass \r\n else:\r\n pass\r\n # goes through all combinations in a level before breaking\r\n if max_perc > .95:\r\n break\r\n final_tiling = [(0, 0)]*L\r\n for combination in max_subset:\r\n for x, comb_frag in enumerate(sorted(combination)):\r\n if comb_frag[1] - comb_frag[0] + 1 > final_tiling[x][1] - final_tiling[x][0] + 1:\r\n final_tiling[x] = comb_frag\r\n print(final_tiling)\r\n possible_GIs = [ranges[x] for x in final_tiling]\r\n print([ranges[x] for x in final_tiling])\r\n for m in possible_GIs:\r\n if len(m) == 1:\r\n GI_nums_single.add(i+\"|\"+m[0] + \"_0\")\r\n else:\r\n dic[i] = m\r\n # merge ranges - if same number of ranges as original, keep all,\r\n # want the least number to overlap 95% of whole range\r\n #try each comb from low to high and if hits 95%, choose those\r\n # if multiple higher than 95%, choose one with best %\r\n #if multiple with same % and same numb of combs- multiple\r\nSeqIO.write(records, \"mito_COI.fa\", \"fasta\")\r\n#pulls out the GIs with first, the longest number of ATCGs and second, the longest length and makes dictionary\r\nfor i in dic:\r\n GIlist = []\r\n count = 0\r\n for n in dic[i]:\r\n GIlist.append(n.split(\"_\")[0])\r\n dic[i] = resolve_seqs(GIlist)\r\n print(str(round((float(count)/float(len(dic)))*100, 2)) + \"%\")\r\n count += 1\r\n#splits the ones that still have multiple (so the longest had multiple choices) and the ones that are resolved\r\nfor i in dic:\r\n if len(dic[i])>1:\r\n dic_mult[i] = dic[i]\r\n else:\r\n dic_single[i] = dic[i]\r\n\r\nfor i in genes:\r\n finalGInums_only1 = set()\r\n finalGInums_longest = set()\r\n for n in dic_single.keys():\r\n if i == re.split('_|\\|', n)[1]:\r\n finalGInums_longest.add(''.join(dic_single[n]))\r\n for n in GI_nums_single:\r\n if i == re.split('_|\\|', n)[1]:\r\n finalGInums_only1.add(re.split('_|\\|', n)[-2])\r\n with open(\"final_GIs.txt\", \"a\") as o:\r\n for m in finalGInums_only1:\r\n o.write(str(m)+\"\\n\")\r\n #this needs to go to blast_sp_parse.py\r\n with open(i + \"_accession_nums_resolved.txt\", \"w\") as o:\r\n for m in finalGInums_longest:\r\n o.write(str(m)+\"\\n\")\r\n#this needs to go to cluster.py \r\nwith open(\"multiple_gene_choices.txt\", \"w\") as w:\r\n for i in dic_mult:\r\n w.write(i + \"\\t\" + str(dic_mult[i]) + \"\\n\")\r\nconn.close()\r\n\r\n","sub_path":"multiple.py","file_name":"multiple.py","file_ext":"py","file_size_in_byte":10932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"54634312","text":"import glob\n\nimport h5py\nfrom pylab import plt\n\nfrom epochs import epochs\n\nfiles = sorted(glob.glob('../outs/*'))\nfor no, file in enumerate(files):\n with h5py.File(file, 'r') as fid:\n rms = fid['misfit/rms_inland_at_epoch'][...]\n #if no==0:\n plt.plot(epochs, rms,'x-', label='Nrough=%d'%no)\n #plt.plot(epochs, rms,'x-')\n \nplt.grid('on')\nplt.xlabel('days after mainshock')\nplt.ylabel('RMS(m)')\nplt.legend(prop={'size':7}, bbox_to_anchor=(0.18, 1.01))\nplt.savefig('RMS_misfits_at_epochs.pdf')\nplt.show()\n \n","sub_path":"inversions/inversion6/occam/iter1/run2/analysis_rms/plot_rms.py","file_name":"plot_rms.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"534884436","text":"import argparse\nimport torch\nimport torchvision\nfrom torchvision import transforms\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom utils import make_data_loader, create_save_folder, UnNormalize, multi_class_loss\nfrom diffusion import diffusion\nimport os\nfrom torch.multiprocessing import set_sharing_strategy\nset_sharing_strategy(\"file_system\")\nimport umap\nimport matplotlib\nmatplotlib.use(\"agg\")\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\n\nfrom PIL import Image\nimport faiss\nimport seaborn as sns\nimport pandas as pd\nimport copy\nimport random\n\n\nclass Trainer(object):\n def __init__(self, args):\n self.args = args\n \n kwargs = {\"num_classes\":self.args.num_class}\n\n if args.net == \"resnet18\":\n from nets.resnet import ResNet18\n model = ResNet18(pretrained=(args.load == 'imagenet'), **kwargs)\n elif args.net == \"resnet50\":\n from nets.resnet import ResNet50\n model = ResNet50(pretrained=(args.load == 'imagenet'), **kwargs)\n elif args.net == \"wideresnet282\":\n from nets.wideresnet import WRN28_2\n model = WRN28_2(**kwargs)\n else:\n raise NotImplementedError\n \n print(\"Number of parameters\", sum(p.numel() for p in model.parameters() if p.requires_grad))\n \n self.model = nn.DataParallel(model).cuda()\n \n self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.args.lr, momentum=0.9, nesterov=True, weight_decay=5e-4)\n self.criterion = nn.CrossEntropyLoss(ignore_index=-1)\n self.criterion_nored = nn.CrossEntropyLoss(reduction=\"none\")\n \n self.kwargs = {\"num_workers\": 12, \"pin_memory\": False} \n self.train_loader, self.val_loader = make_data_loader(args, **self.kwargs)\n\n\n self.best = 0\n self.best_epoch = 0\n self.acc = []\n self.train_acc = []\n self.med_clean = []\n self.med_noisy = []\n self.perc_clean = []\n self.perc_noisy = []\n\n self.reductor_plot = umap.UMAP(n_components=2)\n\n self.toPIL = torchvision.transforms.ToPILImage()\n\n self.unorm = UnNormalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))\n\n \n def train(self, epoch):\n running_loss = 0.0\n self.model.train()\n \n acc = 0\n tbar = tqdm(self.train_loader)\n m_dists = torch.tensor([])\n l = torch.tensor([])\n self.epoch = epoch\n total_sum = 0\n for i, sample in enumerate(tbar):\n image, target, ids = sample[\"image\"], sample[\"target\"], sample[\"index\"]\n \n if self.args.cuda:\n target, image = target.cuda(), image.cuda()\n\n self.optimizer.zero_grad()\n \n outputs, feat = self.model(image)\n\n loss = multi_class_loss(outputs, target)\n loss = torch.mean(loss)\n \n preds = torch.argmax(F.log_softmax(outputs, dim=1), dim=1)\n \n acc += torch.sum(preds == torch.argmax(target, dim=1))\n total_sum += preds.size(0)\n loss.backward()\n if i % 10 == 0:\n tbar.set_description(\"Training loss {0:.2f}, LR {1:.6f}\".format(loss.item(), self.optimizer.param_groups[0][\"lr\"]))\n self.optimizer.step()\n print(\"[Epoch: {}, numImages: {}, numClasses: {}]\".format(epoch, total_sum, self.args.num_class))\n print(\"Training Accuracy: {0:.4f}\".format(float(acc)/total_sum))\n self.train_acc.append(float(acc)/total_sum)\n return float(acc)/total_sum\n \n\n def val(self, epoch):\n self.model.eval()\n acc = 0\n vbar = tqdm(self.val_loader)\n total = 0\n with torch.no_grad():\n for i, sample in enumerate(vbar):\n image, target = sample[\"image\"], sample[\"target\"]\n if self.args.cuda:\n image, target = image.cuda(), target.cuda()\n outputs = self.model(image)[0]\n \n loss = self.criterion(outputs, target)\n preds = torch.argmax(F.log_softmax(outputs, dim=1), dim=1)\n acc += torch.sum(preds == target.data)\n total += preds.size(0)\n \n if i % 10 == 0:\n vbar.set_description(\"Validation loss: {0:.2f}\".format(loss.item()))\n final_acc = float(acc)/total\n print(\"[Epoch: {}, numImages: {}]\".format(epoch, (len(self.val_loader)-1)*self.args.batch_size + image.shape[0]))\n self.acc.append(final_acc)\n if final_acc > self.best:\n self.best = final_acc\n self.best_epoch = epoch\n \n print(\"Validation Accuracy: {0:.4f}, best accuracy {1:.4f} at epoch {2}\".format(final_acc, self.best, self.best_epoch))\n return final_acc\n\n \n def load(self, dir, load_linear=False, load_optimizer=False):\n #This load function accepts different types of checkpoint dictionaries and will remove the last layer of resnets/wideresnets/cnns by default\n dict = torch.load(dir)\n if load_optimizer:\n self.optimizer.load_state_dict(dict[\"optimizer\"])\n self.best = dict[\"best\"]\n self.best_epoch = dict[\"best_epoch\"]\n\n if \"state_dict\" in dict.keys():\n dic = dict[\"state_dict\"]\n elif \"network\" in dict.keys():\n dic = dict[\"network\"]\n elif \"net\" in dict.keys():\n dic = dict[\"net\"]\n else:\n dic = dict\n\n if \"module\" in list(dic.keys())[0]:\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for k, v in dic.items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n dic = new_state_dict\n\n if not load_linear:\n if self.args.net == \"wideresnet282\":\n del dic[\"output.weight\"]\n del dic[\"output.bias\"]\n elif \"resnet\" in self.args.net:\n del dic[\"linear.weight\"]\n del dic[\"linear.bias\"]\n self.model.module.load_state_dict(dic, strict=False)\n\n if \"state_dict\" in list(dict.keys()):\n print(\"Loaded model with top accuracy {} at epoch {}\".format(self.best, dict[\"best_epoch\"]))\n self.epoch = dict[\"epoch\"]\n return dict[\"epoch\"]\n \n print(\"Loaded model with top accuracy %3d\" % (self.best)) \n\n def track_loss(self, relabel, plot=False, multi=False, feats=False):\n self.model.eval()\n acc = 0\n total_sum = 0\n with torch.no_grad():\n nargs = copy.deepcopy(self.args)\n loader, _ = make_data_loader(nargs, no_aug=True, **self.kwargs) \n loader.dataset.targets = relabel #unshuffled original label guess\n \n tbar = tqdm(loader)\n tbar.set_description(\"Tracking loss\")\n \n features = torch.tensor([]) \n losses = torch.tensor([])\n for i, sample in enumerate(tbar):\n image, target, ids = sample[\"image\"], sample[\"target\"], sample[\"index\"]\n if self.args.cuda:\n target, image = target.cuda(), image.cuda()\n outputs, feat = self.model(image)\n features = torch.cat((features, feat.cpu()))\n \n loss = multi_class_loss(outputs, target)\n\n losses = torch.cat((losses, loss.detach().cpu()))\n\n losses = losses.view(-1)\n\n if feats:\n return losses, features\n return losses\n\n\n def label_propagation(self, plot=False, diffuse=False):\n self.model.eval()\n with torch.no_grad():\n transform = None\n if self.args.load == \"imagenet\":\n transform = torchvision.transforms.Resize(224) #Was trained at a different resolution than cifar\n loader, _ = make_data_loader(self.args, no_aug=True, transform=transform, **self.kwargs)\n dim = 2048 if self.args.net == \"resnet50\" else 512\n dim = dim if self.args.net != \"wideresnet282\" else 128\n features_average = torch.zeros((len(loader.dataset), dim))\n\n features = torch.tensor([])\n tbar = tqdm(loader)\n for i, sample in enumerate(tbar):\n image, target, ids = sample[\"image\"], sample[\"target\"], sample[\"index\"]\n if self.args.cuda:\n target, image = target, image.cuda()\n outputs, feat = self.model(image)\n features = torch.cat((features, feat.cpu()))\n features_average += features\n torch.cuda.empty_cache()\n features_average = features_average\n targ = torch.tensor(loader.dataset.targets) \n features = features_average.numpy()\n\n #Normalize the features + PCA whitening\n faiss.normalize_L2(features)\n pca = PCA(whiten=True, n_components=features.shape[1]) \n features = pca.fit_transform(features)\n features = np.ascontiguousarray(features)\n\n labels = - torch.ones(targ.shape[0])\n \n for i,ii in enumerate(self.indicies):\n labels[ii] = targ[ii] #known samples\n \n if diffuse: #Diffusion\n final_labels = torch.zeros(targ.shape[0], self.args.num_class)\n weights = torch.zeros(targ.shape[0]) \n p_labels, p_weights, class_weights = diffusion(features, labels.clone(), self.indicies, k=200, max_iter=50, classes=self.args.num_class)\n p_labels = torch.from_numpy(p_labels).float()\n p_weights = torch.from_numpy(p_weights).float()\n else: #KNN\n index = faiss.IndexFlatIP(features.shape[1])\n index.add(features[self.indicies])\n _, I = index.search(features, 1)\n p_labels = labels[self.indicies[I]]\n p_weights = torch.ones(features.shape[0])\n \n if plot is not None: #Optional UMap plots\n embedding = self.reductor_plot.fit_transform(features)\n emb = embedding[self.indicies]#Centroids, at least one per class\n plt.figure(7)\n plt.scatter(embedding[:, 0], embedding[:, 1], c=[sns.color_palette(n_colors=self.args.num_class)[x] for x in targ], s=0.1)\n plt.scatter(emb[:, 0], emb[:, 1], c=[sns.color_palette(n_colors=self.args.num_class)[x] for x in targ[self.indicies]], marker=\"*\")\n plt.scatter(emb[:, 0], emb[:, 1], c=\"#000000\", marker=\"*\", s=1.1)\n plt.savefig(\"data/embedding{}.png\".format(plot))\n \n df = pd.DataFrame(embedding, columns=[\"x\", \"y\"])\n sns_plot = sns.jointplot(x=\"x\", y=\"y\", data=df, kind=\"kde\");\n sns_plot.savefig(\"data/embedding_density{}.png\".format(plot))\n \n plt.figure(6)\n plt.scatter(embedding[:, 0], embedding[:, 1], c=[sns.color_palette(n_colors=self.args.num_class)[x] for x in torch.argmax(p_labels, dim=1)], s=0.1)\n plt.scatter(emb[:, 0], emb[:, 1], c=[sns.color_palette(n_colors=self.args.num_class)[x] for x in torch.argmax(p_labels[self.indicies], dim=1)], marker=\"*\")\n plt.scatter(emb[:, 0], emb[:, 1], c=\"#000000\", marker=\"*\", s=1.1)\n plt.savefig(\"data/embedding_diffusion{}.png\".format(plot))\n plt.close()\n\n if diffuse:\n labels = torch.zeros(features.shape[0], self.args.num_class)\n for i, p in enumerate(torch.argmax(p_labels,1)):\n labels[i][p.item()] = 1\n else:\n labels = torch.zeros(features.shape[0], self.args.num_class)\n for i, p in enumerate(p_labels.long()):\n labels[i][p] = 1\n p_labels = labels\n \n del features\n torch.cuda.empty_cache() \n return p_labels, p_weights\n \ndef main():\n\n\n parser = argparse.ArgumentParser(description=\"Reliable Label Bootstrapping, ReLaB\")\n parser.add_argument(\"--net\", type=str, default=\"wideresnet282\",\n choices=[\"resnet18\", \"wideresnet282\", \"resnet50\"],\n help=\"net name, only used for loading the self-supervised weights for the label propagation (default: wideresnet282)\")\n parser.add_argument(\"--dataset\", type=str, default=\"cifar10\", choices=[\"cifar10\", \"cifar100\", \"miniimagenet\"])\n parser.add_argument(\"--epochs\", type=int, default=60)\n parser.add_argument(\"--lr\", type=float, default=0.1)\n parser.add_argument(\"--gamma\", type=float, default=0.1, help=\"Multiplicative factor for lr decrease, default .1\")\n parser.add_argument(\"--batch-size\", type=int, default=100)\n parser.add_argument(\"--save-dir\", type=str, default=\"bootstrapped_dataset\")\n parser.add_argument(\"--load\", default=None, type=str, required=True, help=\"Pretrained self-supervised weights, type imagenet for imagenet weights\")\n parser.add_argument(\"--diffuse\", action=\"store_true\", default=False, help=\"Use diffusion, default: kNN\")\n parser.add_argument(\"--no-cuda\", action=\"store_true\", default=False, help=\"No cuda\")\n parser.add_argument(\"--spc\", default=1, type=int, help=\"Number of labeled samples per class\")\n parser.add_argument(\"--seed\", default=-1, type=int)\n parser.add_argument(\"--boot-spc\", default=50, type=int, help=\"Number of samples to bootstrap, default: 50 (cifar10)\")\n\n args = parser.parse_args()\n #For reproducibility purposes\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n dict_class = {\"cifar10\":10, \"cifar100\":100, \"miniimagenet\":100}\n \n args.num_class = dict_class[args.dataset]\n \n if args.save_dir is None:\n args.save_dir = \"{}_{}spc\".format(args.net, args.dataset, args.spc)\n\n if not os.path.isdir(args.save_dir):\n os.mkdir(args.save_dir)\n \n args.save_dir = os.path.join(args.save_dir, \"seed{}\".format(args.seed))\n\n if not os.path.isdir(args.save_dir):\n os.mkdir(args.save_dir)\n \n args.cuda = not args.no_cuda\n\n nargs = copy.deepcopy(args)\n _trainer = Trainer(nargs)\n \n torch.manual_seed(args.seed) \n targ = torch.tensor(_trainer.train_loader.dataset.targets) #Original labels\n ind = torch.randperm(targ.shape[0])\n\n classes = [[] for _ in range(dict_class[args.dataset])]\n indicies = [[] for _ in range(dict_class[args.dataset])]\n #Random seed sample selection\n for i in ind:\n if len(classes[targ[i]]) < args.spc:\n classes[targ[i]].append(targ[i])\n indicies[targ[i]].append(i)\n \n #Indicies for the 1 sample per class experiments, ordered from worst to best https://github.com/google-research/fixmatch/issues/16\n one_spc_indicies = [\n [7408, 8148, 9850, 10361, 33949, 36506, 37018, 45044, 46443, 47447],\n [5022, 8193, 8902, 9601, 25226, 26223, 34089, 35186, 40595, 48024],\n [7510, 13186, 14043, 21305, 22805, 31288, 34508, 40470, 41493, 45506],\n [9915, 9978, 16631, 19915, 28008, 35314, 35801, 36149, 39215, 42557],\n [6695, 14891, 19726, 22715, 23999, 34230, 46511, 47457, 49181, 49397],\n [12830, 20293, 26835, 30517, 30898, 31061, 43693, 46501, 47310, 48517],\n [1156, 11501, 19974, 21963, 32103, 42189, 46789, 47690, 48229, 48675],\n [4255, 6446, 8580, 11759, 12598, 29349, 29433, 33759, 35345, 38639]\n ]\n \n if args.spc == 1 and args.dataset == \"cifar10\": #Select the comparable samples for CIFAR-10\n pos = [7, 4, 0] #first, last and one in the middle\n ind = one_spc_indicies[pos[args.seed-1]]\n indicies = [[i] for i in ind]\n \n indicies = torch.tensor(indicies).view(-1) #Final selected labeled samples\n\n bc = np.bincount(targ[indicies].numpy())\n print(\"Labeled samples per class: \", bc)\n \n ori_labels = torch.tensor(_trainer.train_loader.dataset.targets).clone()\n\n relabel_acc = []\n \n _trainer = Trainer(args)\n _trainer.indicies = indicies\n \n #Loading self-supervised weights\n if args.load is not None and args.load != \"imagenet\":\n _trainer.load(args.load, load_linear=False)\n\n #Get the noisy labels using diffusion in the feature space and the original samples\n final_labels = torch.zeros(targ.shape[0], _trainer.args.num_class)\n\n p_labels, _ = _trainer.label_propagation(plot=None, diffuse=args.diffuse)\n\n relabel = torch.zeros(p_labels.shape[0], _trainer.args.num_class)\n for i, p in enumerate(torch.argmax(p_labels,1)):\n relabel[i][p.item()] = 1\n\n args.relabel = relabel\n sums = torch.zeros(args.num_class)\n accuracies = torch.zeros(args.num_class)\n for i, l in enumerate(relabel):\n l = torch.argmax(l)\n sums[l] += 1\n if l == ori_labels[i]:\n accuracies[l] += 1\n\n accuracies = accuracies / sums\n\n #Just indicative\n print(\"Per class accuracy subset:\", accuracies)\n print(\"Subset mean accuracy:\", accuracies.mean())\n print(\"Weighted mean accuracy:\", (accuracies * sums).sum() / sums.sum())\n print(\"Per class subset:\", sums)\n\n #Initialise a new net to train on the set\n nargs = copy.deepcopy(args)\n nargs.net = \"wideresnet282\"\n _trainer = Trainer(nargs) #Retrain from scratch\n\n _trainer.indicies = torch.tensor(indicies).view(-1) # Known samples\n\n print(\"Noise ratio\",1-(ori_labels == torch.argmax(relabel, dim=1)).sum().float()/relabel.shape[0])\n relabel_acc.append((ori_labels == torch.argmax(relabel, dim=1)).sum().float()/relabel.shape[0])\n\n _trainer.train_loader.dataset.targets = relabel.clone()\n\n losses_t = torch.zeros(30, len(relabel)) #Average over the last 30 epochs\n\n for eps in range(args.epochs):\n t = _trainer.train(eps)\n\n #Track sample loss for each epoch for the last 30 epochs\n if eps >= args.epochs - 30:\n #Validation (Not used for anything)\n #v = _trainer.val(eps)\n\n losses, features =_trainer.track_loss(relabel, plot=True, feats=True)\n losses_t[eps%30] = losses\n\n relabel_a = torch.argmax(relabel, dim=1)\n \n # Loss ranking tracking\n ids = torch.argsort(losses, descending=False)\n ids_tf = ids[:int(1.*ids.shape[0]/4)] #top 25%\n ids_t = ids[:int(1.*ids.shape[0]/10)] #top 10%\n ids_to = ids[:int(1.*ids.shape[0]/100)] #top 1%\n\n print(\"Top 25%:\", (relabel_a[ids_tf] == ori_labels[ids_tf]).sum().float()/ids_tf.shape[0], (losses[ids_tf] * 1000).mean())\n print(\"Top 10%:\", (relabel_a[ids_t] == ori_labels[ids_t]).sum().float()/ids_t.shape[0], (losses[ids_t] * 1000).mean())\n print(\"Top 1%:\", (relabel_a[ids_to] == ori_labels[ids_to]).sum().float()/ids_to.shape[0], (losses[ids_to] * 1000).mean())\n\n #Loss average and normalisation\n losses = torch.mean(losses_t, dim=0)\n \n relabel = relabel_a\n \n #Final ranking\n ids = torch.argsort(losses, descending=False)\n ids = torch.cat((_trainer.indicies, ids)).view(-1).numpy() #adding the seed samples as having the lowest possible loss\n\n indexes = np.unique(ids, return_index=True)[1]\n ids = np.array([ids[index] for index in sorted(indexes)]).astype(np.int)\n\n #Number of samples per class\n \n idnx = np.zeros(args.num_class).astype(np.int)\n ids_class = np.zeros((args.num_class,args.boot_spc))\n \n for i, ii in enumerate(ids): #sorted by lowest loss, small loss trick, get bottom args.boot_spc samples per class\n c = relabel[ii]\n if idnx[c] < args.boot_spc:\n ids_class[c][idnx[c]] = ii\n idnx[c] += 1\n \n ids_class = ids_class.flatten().astype(np.int)\n\n #Accuracy of the bootstrapped reliable set\n print(\"Top {} per class:\".format(args.boot_spc), (relabel[ids_class] == ori_labels[ids_class]).sum().float()/ids_class.shape[0])\n\n #one hot\n relab = torch.zeros((len(_trainer.train_loader.dataset), args.num_class))\n for i, r in enumerate(relabel):\n relab[i][r] = 1\n\n per_class_acc = torch.zeros(args.num_class)\n for i in ids_class:\n if relabel[i] == ori_labels[i]:\n per_class_acc[relabel[i]] += 1\n \n print(\"Per class acc {}spc : \".format(args.boot_spc), per_class_acc / args.boot_spc)\n print(\"Mean class acc {}spc : \".format(args.boot_spc), torch.mean(per_class_acc / args.boot_spc))\n print(\"STD class acc {}spc : \".format(args.boot_spc), torch.std(per_class_acc / args.boot_spc))\n \n np.savez(os.path.join(args.save_dir, \"labels_seed{}_{}spc_{}\".format(args.seed, args.spc, args.dataset)), relab.numpy())\n np.savez(os.path.join(args.save_dir, \"subset_seed{}_{}spc_{}c_{}\".format(args.seed, args.spc, args.boot_spc, args.dataset)), ids_class)\n \n print(\"Stats: relabel accuracies\", relabel_acc)\n \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main_subset.py","file_name":"main_subset.py","file_ext":"py","file_size_in_byte":21015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"629431181","text":"import ast\nfrom http.server import BaseHTTPRequestHandler\nfrom VerilogCodeGenerators import VerilogFPGAAdder\n\n\ndef parse_bytes_to_dict(bytes_to_parse):\n \"\"\"Given a byte string, parse it into a dict.\"\"\"\n return ast.literal_eval(bytes_to_parse.decode(\"utf-8\"))\n\n\ndef generate_verilog_code(\n type_of_hardware_module,\n total_bits,\n inacc_bits,\n):\n \"\"\"Wrapper function to generate verilog code for the ASIC structural adder\n\n Generate relevant verilog code depending on the parameters given.\n\n Args:\n type_of_hardware_module (str): The type of hardware module.\n total_bits (int): The total number of bits.\n inacc_bits (int): The number of inaccurate bits.\n\n Returns:\n str: Generated verilog code.\n \"\"\"\n\n return VerilogFPGAAdder.generate_verilog_code(\n type_of_hardware_module,\n total_bits,\n inacc_bits,\n )\n\n\nclass handler(BaseHTTPRequestHandler):\n def do_POST(self):\n # Gets the size of data\n content_length = int(self.headers[\"Content-Length\"])\n\n # Gets the data itself\n req_body_dict = parse_bytes_to_dict(self.rfile.read(content_length))\n\n # Generate the Verilog code\n verilog_code = generate_verilog_code(\n req_body_dict[\"type_of_hardware_module\"],\n req_body_dict[\"total_bits\"],\n req_body_dict[\"inacc_bits\"],\n )\n\n # Sends a response\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/plain\")\n self.end_headers()\n self.wfile.write(verilog_code.encode())\n\n return\n","sub_path":"api/verilog-generator/fpga-adder.py","file_name":"fpga-adder.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"646940130","text":"#!/usr/bin/env python\n# coding: utf-8\n\n#

Introduction:

\n# This is my First kernel, I have attempted to understand which features contribute to the Price of the houses.\n#
A shoutout to SRK and Anisotropic from whom iv learned a lot about data visualisation
\n\n#

Lets import the libraries we need for now

\n\n# In[ ]:\n\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport matplotlib.pyplot as plt\nimport seaborn as sns\ncolor = sns.color_palette()\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\npd.options.mode.chained_assignment = None\npd.options.display.max_columns = 999\n\nimport plotly.offline as py\npy.init_notebook_mode(connected=True)\nimport plotly.graph_objs as go\nimport plotly.tools as tls\n\n\n# \n#

now we import the dataset

\n# \n\n# In[ ]:\n\n\ndata = pd.read_csv('../input/kc_house_data.csv')\n\n\n# In[ ]:\n\n\n# Lets check it out \ndata.head()\n\n\n#

now lets check out how many NaN values are there

\n\n# In[ ]:\n\n\ndata.isnull().sum()\n\n\n# wow! so we just dont need to bother about using Imputer and handeling the NaN values\n#
Now lets check out how the data actually is
\n\n# In[ ]:\n\n\nprint((data.info()))\nprint((\"**\"*40))\nprint((data.describe()))\n\n\n# Oops, we forgot to convert the date to datetime, lets get that done first\n\n# In[ ]:\n\n\ndata['date'] = pd.to_datetime(data['date'])\n# while im at it, let me create a year and month column too\ndata['year'], data['month'] = data['date'].dt.year, data['date'].dt.month\n\n\n# In[ ]:\n\n\n# as we have everything from the date column, lets simply remove it \ndel data['date']\n\n\n#

We have finished the preliminary data cleaning, now lets visualize and check out for some correlation that we can use

\n\n# the dataset includes latitude and longitude for each entry, lets plot it out and see if specific areas sold more houses or less\n\n# In[ ]:\n\n\nplt.figure(figsize=(12,12))\nsns.jointplot( 'long','lat',data = data, size=9 , kind = \"hex\")\nplt.xlabel('Longitude', fontsize=10)\nplt.ylabel('Latitude', fontsize=10)\nplt.show()\n\n\n# as we guessed, there are some areas where many houses were sold\n#
\n#

lets try out the pearson correlation


\n# \n\n# In[ ]:\n\n\ndataa = [\n go.Heatmap(\n z= data.corr().values,\n x= data.columns.values,\n y= data.columns.values,\n colorscale='Viridis',\n text = True ,\n opacity = 1.0\n \n )\n]\n\nlayout = go.Layout(\n title='Pearson Correlation',\n xaxis = dict(ticks='', nticks=30),\n yaxis = dict(ticks='' ),\n width = 800, height = 600,\n \n)\n\nfig = go.Figure(data=dataa, layout=layout)\npy.iplot(fig, filename='Housedatacorr')\n\n\n# In the price col we can see some rows are so very close to zero, but lets not remove them from the data set as of yet, it may be useful for some ensemble process\n#
\n#

Now lets try out some trees and ensemble methods for a better understanding of the feature importances


\n\n# In[ ]:\n\n\n# the models we will run\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.ensemble import AdaBoostRegressor\nfrom sklearn.ensemble import ExtraTreesRegressor\n\n# some metrics to help us out\nfrom sklearn.cross_validation import cross_val_score\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import mean_squared_error as mse\n\n\n# lets remove the price col from data as we will need it now \n\n# In[ ]:\n\n\ntarget = data['price']\n# we dont need the price column in data anymore\ndel data['price']\n\n\n# In[ ]:\n\n\nX_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2, random_state=42)\n\n\n# Now im going to find feature_importances using various ensemble methods \n\n# In[ ]:\n\n\ndr = DecisionTreeRegressor()\ndr.fit(X_train,y_train)\ndrimp = dr.feature_importances_\n\n\n# In[ ]:\n\n\nrfr = RandomForestRegressor(n_estimators=100)\nrfr.fit(X_train,y_train)\nrfrimp = rfr.feature_importances_\n\n\n# In[ ]:\n\n\ngbr = GradientBoostingRegressor(n_estimators=100)\ngbr.fit(X_train,y_train)\ngbrimp = gbr.feature_importances_\n\n\n# In[ ]:\n\n\nabr = AdaBoostRegressor(n_estimators=100)\nabr.fit(X_train,y_train)\nabrimp = abr.feature_importances_\n\n\n# In[ ]:\n\n\netr = ExtraTreesRegressor(n_estimators=100)\netr.fit(X_train,y_train)\netrimp = etr.feature_importances_\n\n\n# lets create a data frame that has all these values \n# \n\n# In[ ]:\n\n\nd = {'Decision Tree':drimp, 'Random Forest':rfrimp, 'Gradient Boost':gbrimp,'Ada boost':abrimp, 'Extra Tree':etrimp}\n\n\n# In[ ]:\n\n\nfeatures = pd.DataFrame(data = d)\n# lets check out features\nfeatures.head()\n\n\n# One good way to check how important a feature is will be to calculate the mean from each method \n\n# In[ ]:\n\n\nfeatures['mean'] = features.mean(axis= 1) \n# we forgot to add the names of the features\nfeatures['names'] = data.columns.values\n\n\n# In[ ]:\n\n\n#lets check it out now \nfeatures.head()\n\n\n#

Now i'll plot a barplot to illustrate how the mean of each feature has fared

\n# \n\n# In[ ]:\n\n\ny = features['mean'].values\nx = features['names'].values\ndata = [go.Bar(\n x= x,\n y= y,\n width = 0.5,\n marker=dict(\n color = features['mean'].values,\n colorscale='Portland',\n showscale=True,\n reversescale = False\n ),\n opacity=0.6\n )]\n\nlayout= go.Layout(\n autosize= True,\n title= 'Mean Feature Importance',\n hovermode= 'closest',\n yaxis=dict(\n title= 'Feature Importance for Housing Price',\n ticklen= 5,\n gridwidth= 2\n ),\n showlegend= False\n)\nfig = go.Figure(data=data, layout=layout)\npy.iplot(fig, filename='barplothouse')\n\n\n#

Conclusion

\n#
We can see that there are two very prominent features i.e. sqft_living and grade, which according to all the models are very useful to predict the price of the house.
\n#
A close third is the Latitude, which one can consider as the area where the house is
\n#
We were also expecting the bathrooms feature and the sqft_above feature to be high ranking in the barplot, as it was considered imp according to Pearson corr.
\n#
But maybe the ensemble's know better or it has simply overfitted the data
\n#
I think there might be a few more features that can be extracted from the dataset which might give more insight , in accordance to our intuitive and qualitative thinking
\n#
Hope you enjoyed it! please give me some feedback below and upvote if you liked it!
\n","sub_path":"downloaded_kernels/house_sales/parsed_kernels/kernel_47.py","file_name":"kernel_47.py","file_ext":"py","file_size_in_byte":6552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"70637843","text":"# Ejercicio 4.6: Generala servida\n# Queremos estimar la probabilidad de obtener una generala servida en una tirada de dados. Podemos \n# hacer la cuenta usando un poco de teoría de probabilidades, o podemos simular que tiramos los\n# dados muchas veces y ver cuántas de esas veces obtuvimos cinco dados iguales. En este ejercicio\n# vamos a usar el segundo camino.\n\n# Escribí una función tirar() que devuelva una lista con cinco dados generados aleatoriamente.\n# Escribí otra función llamada es_generala(tirada) que devuelve True si y sólo si\n# los cinco dados de la lista tirada son iguales.\n\n# Luego analizá el siguiente código. Correlo con N = 100000 varias veces y observá\n# los valores que obtenés. Luego correlo algunas veces con N = 1000000\n# (ojo, hace un millón de experimentos, podría tardar un poco):\nimport random\n\ndef tirar():\n results = []\n for _ in range(5):\n results.append(random.randint(1,6))\n return results\n\ndef es_generala(tirada):\n return len(set(tirada)) == 1\n\nN = 1000000\n\nG = sum([es_generala(tirar()) for i in range(N)])\n\nprob = G/N\n\nprint(f'Tiré {N} veces, de las cuales {G} saqué generala servida.')\n\nprint(f'Podemos estimar la probabilidad de sacar generala servida mediante {prob:.6f}.')\n\n\n# ¿Por qué varían más los resultados obtenidos con N = 100000 que con N = 1000000? ¿Cada cuántas tiradas en promedio podrías decir que sale una generala servida? ¿Cómo se puede calcular la probabilidad de forma exacta?\n\n","sub_path":"class4/4.6.py","file_name":"4.6.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"565742914","text":"import asyncio\nimport aiopg, aioredis\nimport datetime\nimport os\nimport psycopg2\nimport socket\nimport time\n\nfrom . import constants\n\ndb_pool = None\nredis_pool = None\ndsn = \"dbname=%s user=postgres password=%s host=%s\" % (os.environ[\"DB_NAME\"], os.environ[\"DB_PASSWORD\"], os.environ[\"DB_HOST\"])\n\nasync def connect_to_database():\n global db_pool\n if db_pool is None:\n while True:\n try:\n db_pool = await aiopg.create_pool(dsn)\n except psycopg2.OperationalError:\n continue\n break\n cursor = await db_pool.cursor()\n return cursor\n\nasync def connect_to_redis():\n global redis_pool\n if redis_pool is None:\n redis_pool = await aioredis.create_redis_pool(\"redis://redis\", encoding = \"utf-8\")\n return redis_pool\n\nasync def _initialize_db():\n with (await connect_to_database()) as cur:\n with open(\"wbserver/schema.sql\", \"r\") as f:\n await cur.execute(\"BEGIN;\\n%s\\nCOMMIT;\" % f.read())\n print(\"Created database schema.\", flush = True)\n\n redis = await connect_to_redis()\n await redis.set(\"last-modified\", datetime.datetime.utcnow().strftime(constants.HTTP_TIME_FORMATSTR))\n\ndef initialize_db():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n while True:\n try:\n s.connect((os.environ[\"DB_HOST\"], 5432))\n s.close()\n break\n except socket.error:\n time.sleep(1)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(_initialize_db())\n","sub_path":"wbserver/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"211535996","text":"import sys\nimport random\n\ndef readFile(fileway):\n with open(fileway) as file:\n words = file.read().split(\"\\n\")\n return words\n\ndef randomWords(selectable_words, repeate_words):\n while True:\n word = random.choice(selectable_words)\n if word in repeate_words:\n continue\n else:\n repeate_words.append(word)\n return word\n\nparticles = [\"a\",\"the\",\"her\",\"his\",\"another\",\"the other\",\"my\",\"our\",\"mine\",\"their\"]\nnouns = [\"cat\",\"dog\",\"man\",\"woman\",\"boy\",\"girl\",\"granny\",\"wife\",\"boss\",\"horse\",\"mate\",\"daddy\",\"friend\",\"squirrel\"]\nverbs = [\"sang\",\"ran\",\"jumped\",\"heard\",\"answered\",\"went\",\"told\",\"hoped\",\"felt\",\"slept\",\"hopped\",\"cried\",\"laughed\",\"walked\",\"dug\",\"came\"]\nadverbs = [\"loudly\",\"quietly\",\"well\",\"badly\",\"slowly\",\"politely\",\"rudely\",\"indeed\",\"instead\",\"rarely\",\"recently\"]\n\nrows = 6\nfile_nouns = []\nfile_verbs = []\nfile_adjectives = []\nrepeate_words = []\n\ntry:\n rows = int(sys.argv[1])\n print(\"Число строк:\", rows)\nexcept (IndexError, ValueError):\n print(\"Неверный параметр строк\")\ntry:\n file_nouns = readFile(sys.argv[2])\nexcept (OSError, IndexError) as error:\n print(\"Список сущ. не загружен\\nError: \", error)\ntry:\n file_verbs = readFile(sys.argv[3])\nexcept (OSError, IndexError) as error:\n print(\"Список гл. не загружен\\nError: \", error)\ntry:\n file_adjectives = readFile(sys.argv[4])\nexcept (OSError, IndexError) as error:\n print(\"Список прил. не загружен\\nError: \", error)\n\nnouns += file_nouns\nverbs += file_verbs\n\nfor i in range(rows):\n if i % 6 == 0:\n repeate_words = []\n if i != 0:\n print(\"--------------------------------\")\n particle = randomWords(particles, repeate_words)\n noun = randomWords(nouns, repeate_words)\n verb = randomWords(verbs, repeate_words)\n if random.randint(0,1) == 0:\n if file_adjectives:\n if random.randint(0, 2) < 2:\n adjectives = randomWords(file_adjectives, repeate_words)\n print(particle, adjectives, noun, verb)\n else:\n print(particle, noun, verb)\n else:\n print(particle, noun, verb)\n else:\n adverb = randomWords(adverbs, repeate_words)\n print(particle, noun, verb, adverb)\n","sub_path":"awful_poetry.py","file_name":"awful_poetry.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"341855216","text":"import numpy as np\nfrom theano.compat.python2x import OrderedDict\nfrom kdl_template import *\n\n# random state so script is deterministic\nrandom_state = np.random.RandomState(1999)\n# home of the computational graph\ngraph = OrderedDict()\n\n# minibatch size\nminibatch_size = 20\n# number of input units\nn_in = 5\n# number of hidden units\nn_hid = 10\n# number of output units\nn_out = 5\n\n# Generate sinewaves offset in phase\nn_timesteps = 50\nd1 = 3 * np.arange(n_timesteps) / (2 * np.pi)\nd2 = 3 * np.arange(n_in) / (2 * np.pi)\nall_sines = np.sin(np.array([d1] * n_in).T + d2)\nall_sines = all_sines[:, None, :]\nall_sines = np.concatenate([all_sines] * minibatch_size, axis=1)\n\n\n# Setup dataset and initial hidden vector of zeros\nX = all_sines[:-1].astype(theano.config.floatX)\ny = all_sines[1:].astype(theano.config.floatX)\nX_mask = np.ones_like(X[:, :, 0])\ny_mask = np.ones_like(y[:, :, 0])\n\n# input (where first dimension is time)\ndatasets_list = [X, X_mask, y, y_mask]\nX_sym, X_mask_sym, y_sym, y_mask_sym = add_datasets_to_graph(\n datasets_list, [\"X\", \"X_mask\", \"y\", \"y_mask\"], graph,\n list_of_test_values=datasets_list)\n\n# Setup weights\nproj_X = linear_layer([X_sym], graph, 'l1_proj', n_hid, random_state)\n\nh = easy_gru_recurrent([proj_X], X_mask_sym, n_hid, graph, 'l1_rec',\n random_state)\n\n# linear output activation\ny_hat = linear_layer([h], graph, 'l2_proj', n_out, random_state)\n\n# error between output and target\ncost = squared_error_nll(y_hat, y_sym)\ncost = masked_cost(cost, y_mask_sym).mean()\n# Parameters of the model\nparams, grads = get_params_and_grads(graph, cost)\n\n# Use stochastic gradient descent to optimize\nopt = sgd(params)\nlearning_rate = 0.001\nupdates = opt.updates(params, grads, learning_rate)\n\n# By returning h we can train while preserving hidden state from previous\n# samples. This can allow for truncated backprop through time (TBPTT)!\nfit_function = theano.function([X_sym, X_mask_sym, y_sym, y_mask_sym], [cost],\n updates=updates)\n\n\ndef status_func(status_number, epoch_number, epoch_results):\n print_status_func(epoch_results)\n\nepoch_results = iterate_function(fit_function, [X, X_mask, y, y_mask],\n minibatch_size,\n list_of_output_names=[\"cost\"],\n n_epochs=2000,\n status_func=status_func,\n shuffle=True,\n random_state=random_state)\n","sub_path":"tutorial_code/regression_rnn/easy_gru_regression_rnn.py","file_name":"easy_gru_regression_rnn.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"28599076","text":"import django_filters\nimport graphene\nfrom graphene_django import DjangoObjectType\nfrom graphene_django.filter import DjangoFilterConnectionField\nfrom jobs.models import Job\n\nclass JobFilter(django_filters.FilterSet):\n class Meta:\n model = Job\n fields = ['name', 'per_meter', 'job_group']\n\n\nclass JobNode(DjangoObjectType):\n class Meta:\n model = Job\n interfaces = (graphene.relay.Node, )\n\n\n###########################################################################\n# ______ __ __ ______ ______ __ ______ ______ # \n# /\\ __ \\ /\\ \\/\\ \\ /\\ ___\\ /\\ == \\ /\\ \\ /\\ ___\\ /\\ ___\\ # \n# \\ \\ \\/\\_\\ \\ \\ \\_\\ \\ \\ \\ __\\ \\ \\ __< \\ \\ \\ \\ \\ __\\ \\ \\___ \\ # \n# \\ \\___\\_\\ \\ \\_____\\ \\ \\_____\\ \\ \\_\\ \\_\\ \\ \\_\\ \\ \\_____\\ \\/\\_____\\#\n# \\/___/_/ \\/_____/ \\/_____/ \\/_/ /_/ \\/_/ \\/_____/ \\/_____/#\n###########################################################################\n\nclass Query(graphene.ObjectType):\n job = DjangoFilterConnectionField(\n JobNode,\n filterset_class=JobFilter,\n )\n\n\n################################################################################################\n# __ __ __ __ ______ ______ ______ __ ______ __ __ ______ # \n# /\\ \"-./ \\ /\\ \\/\\ \\ /\\__ _\\ /\\ __ \\ /\\__ _\\ /\\ \\ /\\ __ \\ /\\ \"-.\\ \\ /\\ ___\\ # \n# \\ \\ \\-./\\ \\ \\ \\ \\_\\ \\ \\/_/\\ \\/ \\ \\ __ \\ \\/_/\\ \\/ \\ \\ \\ \\ \\ \\/\\ \\ \\ \\ \\-. \\ \\ \\___ \\ # \n# \\ \\_\\ \\ \\_\\ \\ \\_____\\ \\ \\_\\ \\ \\_\\ \\_\\ \\ \\_\\ \\ \\_\\ \\ \\_____\\ \\ \\_\\\\\"\\_\\ \\/\\_____\\# \n# \\/_/ \\/_/ \\/_____/ \\/_/ \\/_/\\/_/ \\/_/ \\/_/ \\/_____/ \\/_/ \\/_/ \\/_____/# \n################################################################################################\n\nclass CreateJob(graphene.relay.ClientIDMutation):\n job = graphene.Field(JobNode)\n\n class Input:\n name = graphene.String(\n description='Name of job!',\n required=True,\n )\n per_meter = graphene.Boolean(\n description='This job can be charged per meter!',\n required=True,\n )\n value_per_meter = graphene.Float(\n description='If can be charged per meter, what is the price of the meter?'\n )\n job_group = graphene.String(\n description='Group of job',\n required=True,\n )\n\n def mutate_and_get_payload(root, info, **_input): # pylint: disable=no-self-argument\n job = Job(\n name=_input.get('name'),\n per_meter=_input.get('per_meter'),\n value_per_meter=_input.get('value_per_meter'),\n job_group=_input.get('job_group'),\n )\n job.save()\n\n return CreateJob(job=job)\n\n\nclass UpdateJob(graphene.relay.ClientIDMutation):\n job = graphene.Field(JobNode)\n\n class Input:\n id = graphene.ID(\n descripition='Job Id',\n required=True,\n )\n name = graphene.String(\n description='Job Title'\n )\n per_meter = graphene.Boolean(\n description='This job can be charged per meter!'\n )\n value_per_meter = graphene.Float(\n description='If can be charged per meter, what is the price of the meter?'\n )\n job_group = graphene.String(\n description='Group of job'\n )\n \n def mutate_and_get_payload(root, info, **_input): # pylint: disable=no-self-argument\n _id = _input.get('id')\n\n if not _id:\n raise Exception('Id is required!')\n \n per_meter = _input.get('per_meter')\n\n if not per_meter:\n raise Exception('Per meter is required!')\n\n jobs = Job.objects.get(pk=_id) # pylint: disable=no-member\n name = _input.get('name')\n \n if not name:\n name = jobs.name\n \n value_per_meter = _input.get('value_per_meter')\n\n if value_per_meter:\n if per_meter == False:\n raise Exception('Not necessary | per_meter:false')\n elif not value_per_meter:\n value_per_meter = jobs.value_per_meter\n\n job_group = _input.get('job_group')\n\n if not job_group:\n job_group = jobs.job_group\n \n job = Job(\n id=_id,\n name=name,\n per_meter=per_meter,\n value_per_meter=value_per_meter,\n job_group=job_group,\n )\n job.save()\n\n return UpdateJob(job=job)\n\n\nclass Mutation(graphene.AbstractType):\n create_job = CreateJob.Field()\n update_job = UpdateJob.Field()\n","sub_path":"jobs/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":4596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"118165121","text":"\n\nfrom xai.brain.wordbase.nouns._chowder import _CHOWDER\n\n#calss header\nclass _CHOWDERS(_CHOWDER, ):\n\tdef __init__(self,): \n\t\t_CHOWDER.__init__(self)\n\t\tself.name = \"CHOWDERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"chowder\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_chowders.py","file_name":"_chowders.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"15526491","text":"from ase import Atoms\nfrom gpaw import GPAW, PW, FermiDirac\nfrom gpaw.response.df import DielectricFunction\nfrom gpaw.bztools import find_high_symmetry_monkhorst_pack\nfrom gpaw.mpi import world\nimport numpy as np\nfrom ase.parallel import paropen\nfrom ase.io import read\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nmpl.use('Agg')\nimport os\n\natoms = read('../relax/POSCAR')\ndef calc_scf(encut=600,k=10,width=0.01):\n calc = GPAW(\n mode=PW(encut),\n xc='LDA',\n kpts={'density':k,'gamma':True},\n random=True,\n occupations=FermiDirac(width),\n txt='gs.out'\n )\n return calc\n\ndef gs_response(calc,k=30,nbands=90,width=0.001):\n# kpts = find_high_symmetry_monkhorst_pack(calc,density=k, pbc=True)\n responseGS = calc.fixed_density(\n# kpts=kpts,\n kpts={'density':k},\n parallel={'band':1},\n nbands=nbands,\n occupations=FermiDirac(width),\n convergence={'bands':60},\n txt='gsresponse.out'\n )\n return responseGS\n\ndef eels_calc(calc, direction='M', eta=25e-3, pbc=True, ecut=100, domega0=0.01, xc='RPA'):\n f = paropen('%s/%s/q_list.dat' % (xc, direction),'w')\n Q={'M':42, 'K':42, 'A':52}\n for i in range(0,3*int(Q[direction])+1,2):\n if os.path.isfile('%s/%s/EELS_%d' % (xc, direction, i)): \n continue\n df = DielectricFunction(calc,eta=eta,pbc=pbc,ecut=ecut,\n domega0=domega0,\n # integrationmode='tetrahedron_integration',\n txt='%s/%s/df_%d.out' % (xc, direction, i)) \n if direction == 'M':\n q_c = np.array([i/42.0, 0.0, 0.0])\n elif direction == 'K':\n q_c = np.array([i/42.0, i/42.0, 0.0])\n elif direction == 'A':\n q_c = np.array([0.0, 0.0, i/52.0])\n df.get_dielectric_function(q_c=q_c,xc=xc, filename='%s/%s/epsilon_%d.csv' % (xc, direction, i))\n df.get_eels_spectrum(q_c=q_c,xc=xc, filename='%s/%s/EELS_%d' % (xc, direction, i))\n\n # Calculate cartesian momentum vector:\n cell_cv = atoms.cell\n bcell_cv = 2*np.pi*np.linalg.inv(cell_cv).T\n q_v = np.dot(q_c, bcell_cv)\n print(np.sqrt(np.inner(q_v, q_v)),file=f)\n f.close()\n\ndef main():\n# calc = calc_scf()\n# atoms.calc = calc\n# atoms.get_potential_energy()\n# responseGS = gs_response(calc)\n# responseGS.write('gsresponse.gpw','all')\n \n for xc in ['RPA', 'ALDA']:\n for direction in ['A', 'M', 'K']:\n if not os.path.isdir('%s/%s' %(xc, direction)):\n os.system('mkdir -p %s/%s' % (xc, direction))\n eels_calc(calc='/opt/gpw_files/C8Li/gsresponse.gpw', xc=xc, direction=direction)\n\nif __name__==\"__main__\":\n main()\n","sub_path":"C8Li/eels/eels_calc.py","file_name":"eels_calc.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"424494488","text":"from sklearn.feature_extraction.text import TfidfTransformer\r\nimport pandas as pd\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn import model_selection\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.ensemble import VotingClassifier\r\n\r\n#import dataset\r\nurl = (\"C:\\\\Users\\\\sidharth.m\\\\Desktop\\\\Project_sid_35352\\\\Tweets.csv\")\r\ndocuments = pd.read_csv(url)\r\n\r\narray = documents.values\r\n#choose tweet column\r\nx = array[0:, 10]\r\n#print(x)\r\ny= array[0:, 1]\r\n\r\ncount_vect = CountVectorizer()\r\nX_train_counts = count_vect.fit_transform(x)\r\n#print(X_train_counts.shape)\r\n\r\n\r\ntfidf_transformer = TfidfTransformer()\r\nX_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)\r\n#print(X_train_tfidf.shape)\r\n\r\nseed = 7\r\nkfold = model_selection.KFold(n_splits=10, random_state=seed)\r\n# create the sub models\r\nestimators = []\r\nmodel1 = LogisticRegression()\r\nestimators.append(('logistic', model1))\r\nmodel2 = DecisionTreeClassifier()\r\nestimators.append(('cart', model2))\r\nmodel3 = SVC()\r\nestimators.append(('svm', model3))\r\n# create the ensemble model\r\nensemble = VotingClassifier(estimators).fit(X_train_tfidf, y)\r\nresults = model_selection.cross_val_score(ensemble, X_train_tfidf, y, cv=kfold)\r\nprint(results.mean())\r\n\r\nurl1 = (\"C:\\\\Users\\\\sidharth.m\\\\Desktop\\\\Project_sid_35352\\\\outputkrithika.csv\")\r\ndocuments1 = pd.read_csv(url1)\r\narray1 = documents1.values\r\n#choose tweet column\r\n#x1 = array1[0:, 2]\r\nx2= (documents1['tweet']).astype(str)\r\n\r\nX_test = count_vect.transform(x2)\r\n#print(X_test.shape)\r\n\r\ntest = tfidf_transformer.transform(X_test)\r\n#print(test.shape)\r\n\r\npredicted = ensemble.predict(test)\r\nprint(predicted)\r\n","sub_path":"txtclassfusingvoting.py","file_name":"txtclassfusingvoting.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"485435742","text":"import pickle\nimport numpy as np\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom numpy import asarray\nfrom monsite.settings import *\n#format=1001#format de test\nformat=1001\n\nif format ==0:\n hauteur=201\n largeur=201\nelif format==4:\n hauteur=3508\n largeur=2480\nelif format==3:\n hauteur=4961\n largeur=3508\nelif format==2:\n hauteur=7016\n largeur=4961\nelse :\n hauteur=1001\n largeur=1001\n\nxcentre=round(hauteur/2)\nycentre=round(largeur/2)\n\npourcentmarge=0.98\nunitepixel=round(pourcentmarge*largeur/2)\n\nxmin=int(xcentre-unitepixel)\nxmax=int(xcentre+unitepixel)\nymin=int(ycentre-unitepixel)\nymax=int(ycentre+unitepixel)\n\nFichier = open(STATIC_ROOT+'imago/imago1001_it7bis.txt','rb')\n#Fichier = open('pavage_de_base/programmes_calcul/imago_1001.txt','rb')\n#Fichier = open('pavage_de_base/programmes_calcul/imago_essai1001.txt','rb')\n#Fichier = open('pavage_de_base/programmes_calcul/imago_A3_it7.txt','rb')\nimago = pickle.load(Fichier)\n# désérialisation\nFichier.close()\n\n#resultat=np.zeros((largeur, hauteur, 3), dtype=np.uint8)\n\n#img1 = np.zeros((hauteur, largeur, 3), dtype=np.uint8)\n#imago=np.zeros((hauteur, largeur,2))\n\n# for i in range(xmin,xmax+1):\n# for j in range(ymin,ymax+1):\n# if (i-xcentre)**2+(j-ycentre)**2'*: Searches Allrecipes.com for the best recipes based on your query. :mag:\r\n:black_small_square: *'get recipe'*: Run this after the 'recipe' or 'recipe ' command to fetch your recipes! :stew:\r\n:black_small_square: *'statistics '*: Show the latest COVID19 statistics for each country. :earth_americas:\r\n:black_small_square: *'statistics '*: Show the latest COVID19 statistics for all countries starting with that prefix. :globe_with_meridians:\r\n\"\"\", use_aliases=True)\r\n msg.body(response)\r\n responded = True\r\n\r\n import random\r\n # While loop to run the chatbot indefinetely\r\n while (True): \r\n \r\n # Takes the user input and converts all characters to lowercase\r\n #user_input = input().lower()\r\n #i think i would be able to put tokenize and lemmatizer here by user input\r\n \r\n # Defining the Chatbot's exit condition, can a synonym type list be used to add more quit 'words'?\r\n if incoming_msg == 'a':#change back 'a' to 'quit' when finished \r\n def_res = print (\"Thank you for visiting.\")\r\n responded = True\r\n break \r\n \r\n for intent,pattern in keywords_dict.items(): #pattern acts as an object because it was created by re.compile(str)\r\n \r\n #will have toc reate bigrams here and add to keywords_dict.items() or create a new dictionary\r\n \r\n # Using the regular expression search function to look for keywords in user input\r\n if re.search(pattern, incoming_msg): \r\n \r\n # if a keyword matches, select the corresponding intent from the keywords_dict dictionary\r\n matched_intent=intent \r\n \r\n # The fallback intent is selected by default\r\n key='fallback' \r\n if matched_intent in responses:\r\n \r\n # If a keyword matches, the fallback intent is replaced by the matched intent as the key for the responses dictionary\r\n key = matched_intent \r\n \r\n # The chatbot prints the response that matches the selected intent\r\n please_work = print (responses[key]) #random.choice will allow for random responses by the chatbot\r\n #the random.choice function is return random LETTERS from the responses instead of the responses, hence it was removed temporarily \r\n \r\n responded = True\r\n \r\n if not responded:\r\n msg.body(\"Sorry, I don't understand. Send 'hello' for a list of commands.\")\r\n\r\n return HttpResponse(str(resp))\r\n","sub_path":"bot_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"60590437","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\n\ndef home_view(request):\n name = 'Bob'\n context = {'name':'Dave'}\n html = \"\"\"\n

Привет {}!

\n \"\"\".format(name)\n return render(request, 'home.html', context)\n\n","sub_path":"project1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"560602440","text":"from django.shortcuts import render\nfrom .models import Reservation, Room, Lecture\nfrom django.utils import timezone\nfrom datetime import datetime, timedelta, date\n\n\ndef building_list(request):\n return render(request, \"Building_list/building_list.html\")\n\n\ndef option_check(request):\n return render(request, \"Building_list/option_check.html\")\n\n\ndef room_list(request):\n if request.method == 'POST':\n day_of_the_week = request.POST['day_of_the_week']\n floors = request.POST['floor']\n\n rooms = Room.objects.all()\n lectures = Lecture.objects.all()\n\n room_dict = {}\n\n for i in range(0, rooms.__len__()):\n temp = rooms[i]\n if ((temp.day_of_the_week == day_of_the_week) and (temp.floor == floors)) or (\n (temp.day_of_the_week == day_of_the_week) and ('all' == floors)):\n room_dict[temp] = i\n flag = 'true'\n for j in range(0, lectures.__len__()):\n temp2 = lectures[j]\n if (temp2.room == temp) and temp2.created_string() != 'false':\n if flag == 'true':\n temp.available_time = temp2.start_time\n temp.name = temp2.name\n temp.save()\n flag = 'false'\n else:\n time = datetime.combine(date.min, temp.available_time) - datetime.combine(date.min,\n temp2.start_time)\n if time > timedelta(minutes=1):\n temp.available_time = temp2.start_time\n temp.name = temp2.name\n temp.save()\n\n return render(request, \"Building_list/room_list.html\", {\"rooms\": room_dict})\n\n\ndef room_detail(request, room_id):\n # lectures = Lecture.objects.all()\n # lecture_dict = {}\n # for i in range(0, lectures.__len__()):\n # tmp = lectures[i]\n # if tmp.room == Room.objects.get(id=room_id):\n # lecture_dict[tmp] = i\n room = Room.objects.get(id=room_id)\n return render(request, \"Building_list/room_detail.html\", {\"room\": room})\n","sub_path":"Building_list/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"138963667","text":"from django.contrib.auth import login, logout,authenticate\nfrom django.contrib.auth.decorators import permission_required, login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import HttpResponse, JsonResponse, HttpResponseRedirect\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, renderer_classes\nfrom rest_framework.renderers import JSONRenderer, TemplateHTMLRenderer\nfrom rest_framework import status\nfrom rest_framework import viewsets, permissions\nfrom . import serializers\nfrom django.core.exceptions import PermissionDenied\n# from django.views.generic import TemplateView\nfrom django.views.generic.base import TemplateResponseMixin, ContextMixin\nfrom django.views import View\nfrom django.template import RequestContext\n# from django.template.context_processors import csrf\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom django.shortcuts import redirect, render\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib import messages\nfrom django.views.generic import CreateView\nfrom .models import User, BursaryApplicant, BursaryCommitteeAdmin, SubmittedApplicationsModel, NewApplicationModel, TrialModel\nfrom .form import ApplicantSignUpForm, BursaryAdminSignUpForm, NewApplicationForm, TrialApplicationForm\nfrom .serializers import SubmittedApplicationsModelSerializer, BursaryApplicantSerializer, NewApplicationSerializer, UserSerializer, TrialModelSerializer\nfrom django.views.decorators.csrf import csrf_exempt\n\n# ########################################## machine learning model ######################################\nimport joblib\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import accuracy_score\n\n\n\n\n\ndef get_predictions( number_of_siblings, total_fee, paid, outstanding_balance, father_gross_income, mother_gross_income, guardian_gross_income):\n cls = joblib.load('deployed_model.sav')\n prediction = cls.predict( number_of_siblings, total_fee, paid, outstanding_balance, father_gross_income, mother_gross_income, guardian_gross_income)\n\n if prediction == 0:\n return \"not awarded\"\n elif prediction == 1:\n return \"AWARDED\"\n else:\n return \"ERROR\"\n\ndef approve_reject(unit):\n # try:\n ml_model = joblib.load('ibursary_fundmodel.sav')\n scaler = joblib.load('scalers.pkl')\n # mydata = request.data\n # unit = np.array(list(mydata.values()))\n\n sc = MinMaxScaler()\n # X_train = sc_x.fit_transform(X_train)\n X_test = scaler.transform(unit)\n\n # df_test = pd.DataFrame(unit)\n # b = sc_x.fit_transform(df_test)\n # X = sc_x.transform(unit)\n\n\n # scalers = joblib.load('scalers.pkl') \n # X = scalers.transform(unit)\n y_pred = ml_model.predict(X_test)\n # y_pred = (y_pred>0.58)\n newdf = pd.DataFrame(y_pred, columns=['Application_Status'])\n newdf=newdf.replace({1:'Approve application', 0:'Reject application'})\n # return ('Application status: {}'.format(newdf))\n return(newdf)\n # except ValueError as e:\n # return HttpResponse(e.args[0], status.HTTP_400_BAD_REQUEST)\n\ndef ohevalue(df):\n\n ohe_col = ['disability_0.0', 'disability_1.0','employed_No', 'employed_Yes','gender_Female', 'gender_Male','outstanding_balance_log','number_of_siblings', 'school_type', 'paid', 'father_gross_income',\n 'outstanding_balance', 'total_fee', \n 'benefitted_No', 'benefitted_Yes',\n \n 'status_both_parents_alive', 'status_partial_orphan',\n 'status_total_orphan']\n cat_columns=['gender','school_type', 'benefitted','disability','status']\n df_processed = pd.get_dummies(df, prefix_sep=\"_\", columns=cat_columns)\n newdict={}\n for i in ohe_col:\n if i in df_processed.columns:\n newdict[i]=df_processed[i].values\n else:\n newdict[i]=0\n newdf=pd.DataFrame(newdict, index=[0])\n return newdf\n\ndef mresult(request):\n\n if request.method == 'POST':\n form=TrialApplicationForm(request.POST)\n if form.is_valid():\n # convert form data into a dictionary\n myDict = (request.POST).dict()\n # read the dict as a dataframe\n df = pd.DataFrame(myDict, index=[0])\n print(approve_reject(ohevalue(df)))\n print(\"Hot Encoded\", ohevalue(df))\n print(\"Raw values\", df)\n \n answer = approve_reject(ohevalue(df))\n \n Xscalers = approve_reject(ohevalue(df))\n messages.success(request, 'Recommendation: {}'.format(answer))\n \n form = TrialApplicationForm()\n return render(request, '../templates/bursary_application_form.html', {'form': form})\n \n \n\n\n\n\n\n\n\n \n\n\n\n# result = get_predictions(number_of_siblings, total_fee, paid, outstanding_balance, father_gross_income, mother_gross_income, guardian_gross_income)\n \n \n\n # result = cls.predict([indv])\n # for i in result:\n # if result == 1:\n # print (\"AWARDED 10000\")\n # elif result == 2:\n # print(\"AWARDED 5000\")\n # elif result == 3:\n # print(\"AWARDED 5000\")\n # elif result == 4:\n # print(\"AWARDED 5000\")\n # else:\n # print(\"You are not eligible for a bursary at this time\")\n\n\n # print(indv)\n # print(\"The predicted class is: \", result)\n\n\n # return JsonResponse({'message': 'result successful'})\n \n\n\n# Create your views here.\n# def apply_for_bursary(request):\n# return render(request, '../templates/bursary_application_form.html')\n\ndef register(request):\n return render(request, '../templates/applicant_registration.html')\n\n# a view that displays a form for creating the object then saves the object\n# @csrf_exempt\nclass applicant_register(CreateView):\n model = User\n # form name from form.py\n form_class = ApplicantSignUpForm\n template_name = '../templates/student_registration.html'\n\n # send an email verification to a new user's email address\n\n # redirect to login page after successfully saving a new user(applicant) to database \n def form_valid(self, form):\n user=form.save()\n login(self.request, user)\n return redirect('/accounts/login/')\n\n\n\ndef landing_page_view(request):\n # template_name = \"../templates/landing_page.html\"\n return render(request, \"../templates/landing_page.html\")\n\n# restrict access to any page on the admin dashboard if not logged in\nclass AdminDashboardView(TemplateResponseMixin, ContextMixin, View):\n template_name = \"../templates/home.html\"\n\n def get(self, request, **kwargs):\n try:\n current_user = request.user\n if current_user.is_bursarycommitteeadmin == True and current_user.is_authenticated and (bool(request.path_info == '/') or bool(request.path_info == '/accounts/login/')):\n return redirect(\"/bc-admin/\")\n context = self.get_context_data(**kwargs)\n return self.render_to_response(context)\n except:\n context = self.get_context_data(**kwargs)\n return self.render_to_response(context)\n\n#############################################################################\n# restrict access to any page on the student dashboard if not logged in\nclass StudentDashboardView(TemplateResponseMixin, ContextMixin, View):\n # template containing the frontend app\n template_name=\"../templates/home.html\"\n\n def get(self, request, **kwargs):\n # if request.user.is_authenticated == True and bool(request.path_info == '/accounts/login/'):\n # print('user is deddoh')\n # return redirect('/st-dashboard/')\n # context = self.get_context_data(**kwargs)\n # # deny permission if user is not a bursary applicant \n # # raise PermissionDenied\n # print('error retrieving user')\n # return self.render_to_response(context)\n # ################################### not working #################\n try:\n current_user = request.user\n if current_user.is_bursarycommitteeadmin == False and current_user.is_authenticated() and (bool(request.path_info == '/') or bool(request.path_info == '/accounts/login/')):\n print('hey!')\n return redirect('/st-dashboard/')\n context = self.get_context_data(**kwargs)\n print('No Heyyy!')\n return self.render_to_response(context)\n\n except:\n context = self.get_context_data(**kwargs)\n return self.render_to_response(context)\n\ndef trial_form(request):\n return render(request, '../templates/bursary_application_form.html')\n\ndef TrialVie(request):\n \n if request.method == 'POST':\n form = TrialApplicationForm(request.POST, request.FILES)\n if form.is_valid():\n entry = form.save(commit=False)\n entry.institution = request.POST['institution']\n entry.reg = request.POST['reg']\n # entry.chief = request.POST['chief']\n form.save()\n print(\"saved\")\n else:\n form = TrialApplicationForm()\n print(\"error submitting form\")\n return render(request, 'home.html', locals())\n\nclass TrialView(CreateView):\n model = TrialModel\n form_class = TrialApplicationForm\n template_name = '../templates/bursary_application_form.html'\n\n #Redirect to this page in order to review the form data before submission \n def form_valid(self, form):\n print('redirected to another page')\n return render(self.request, '../templates/applicant_registration.html', self.get_context_data())\n\n def form_invalid(self, form):\n\n return JsonResponse({\"status\": \"false\", \"message\": form.errors})\n \n \n \n # def form_valid(request, self, form):\n # post = form.save(commit=False)\n # post.save()\n # print(\"success\")\n # return redirect('/accounts/registert')\n\n\n\n\n # return redirect('../templates/home.html') \n # if request.method == 'POST':\n # form = TrialApplicationForm(request.POST, request.FILE)\n # # if form.is_valid()://////\n # institution = form.cleaned_data.get('institution')\n # reg = form.cleaned_data.get('reg')\n # chief = form.cleaned_data.get('chief')\n # form.save()\n # else:\n # form = TrialApplicationForm()\n # return render(request, 'home.html', {'form':form})\n\n#save reviewed bursary application form data\ndef bursary_form_application(request):\n if request.method == 'POST':\n form = TrialApplicationForm(request.POST, request.FILES)\n if form.is_valid():\n school = form.cleaned_data.get('institution')\n adm = form.cleaned_data.get('reg')\n\n print(adm)\n \n db_schools = TrialModel.objects.all().filter(institution=school).exists()\n db_adms = TrialModel.objects.all().filter(reg=adm).exists()\n print(\"Reg exists: \", db_adms)\n\n # restrict applications to only one instance per school\n # if db_schools:\n if db_adms:\n messages.warning(request, \"An application with that Registration Number already exists! Only one application instance is allowed!\")\n \n # return HttpResponseRedirect('../../st-dashboard')\n # show this error message\n return JsonResponse({\"status\": \"Application Failed!\", \"message\": \"An application with that Registration Number already exists! Only one application instance is allowed!\"})\n else: \n\n post = form.save(commit=False)\n post.user = request.user\n myDict = (request.POST).dict()\n df = pd.DataFrame(myDict, index=[0])\n # ml response\n answer = approve_reject(ohevalue(df))\n print(ohevalue(df))\n # print(df)\n # res = messages.success(request, 'Recommendation: {}'.format(answer))\n\n\n # save the ml response to database, in the Model_Recommendation field\n post.Model_Recommendation = answer\n\n # record exists exception\n # duplicate = TrialModel.objects.filter(user = request)\n\n \n # save the form to the database\n post.save()\n\n # redirect to the \n return HttpResponseRedirect('../../st-dashboard')\n # return JsonResponse({\"status\": \"success\", \"message\": \"Success\"})\n\n \n\n@csrf_exempt\ndef login_request(request):\n\n if request.method=='POST':\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password')\n user = authenticate(username=username, password=password)\n if user is not None :\n login(request,user)\n # if credentials match an applicant, redirect to student dashboard\n if user.is_bursaryapplicant == True :\n print(user)\n return redirect('/st-dashboard')\n # if credentials match a bursary staff member, redirect to bursary admin dashboard\n if user.is_bursarycommitteeadmin == True or user.is_superuser == True:\n return redirect('/bc-admin')\n \n else:\n messages.error(request,\"Invalid USERNAME or PASSWORD\")\n else:\n messages.error(request,\"Invalid USERNAME or password\")\n return render(request, '../templates/login.html',\n context={'form':AuthenticationForm()})\n\n# when you log out, be redirected to the login page\n@csrf_exempt\ndef logout_view(request):\n logout(request)\n return redirect('/accounts/login/')\n\n\nclass SubmittedApplicationsViewSet(viewsets.ModelViewSet):\n queryset = NewApplicationModel.objects.all()\n serializer_class = serializers.NewApplicationSerializer\n\n def perform_create(self, serializer):\n serializers.save(user=self.request.user)\n\n\n\n##################################################################################################################################\n# @login_required\n@csrf_exempt\ndef new_application_submission(request):\n if request.method == 'POST':\n form = NewApplicationForm(request.POST or None)\n\n if form.is_valid():\n bursary = form.save(commit=False)\n bursary.user = request.user\n bursary.save()\n # return redirect('/accounts/login/')\n return JsonResponse({\"status\": \"success\", \"message\": \"Success\"})\n \n # return HttpResponseRedirect('/bc-admin/')\n else:\n form = NewApplicationForm()\n c = {'form': form}\n # c.update(csrf(request))\n return render(request, '../templates/home.html', c)\n #######################################################################################################################################\n\n \n # data = NewApplicationModel.objects.all()\n # sr = NewApplicationSerializer(data = request.POST)\n \n # if sr.is_valid():\n # family_type = request.POST.get('family_type')\n # guardian_for_orphan = request.POST.get('guardian_for_orphan')\n # wishers_for_orphan = request.POST.get('wishers_for_orphan')\n # other_for_orphan = request.POST.get('other_for_orphan')\n # other_orphan_text = request.POST.get('other_orphan_text')\n # fund_beneficiary_before = request.POST.get('fund_beneficiary_before')\n # fund_amount = request.POST.get('fund_amount')\n # fund_source = request.POST.get('fund_source')\n # p_first_name = request.POST.get('p_first_name')\n # p_middle_name = request.POST.get('p_middle_name')\n # p_last_name = request.POST.get('p_last_name')\n # p_employed = request.POST.get('p_employed')\n # p_education = request.POST.get('p_education')\n # p_occupation = request.POST.get('p_occupation')\n # p_phone = request.POST.get('p_phone')\n # p_nhif = request.POST.get('p_nhif')\n # p_id = request.POST.get('p_id')\n # p_pension_income = request.POST.get('p_pension_income')\n # p_relief_income = request.POST.get('p_relief_income')\n # p_business_income = request.POST.get('p_business_income')\n # p_farming_income = request.POST.get('p_farming_income')\n # p_private_groups_income = request.POST.get('p_private_groups_income')\n # p_well_wishers_income = request.POST.get('p_well_wishers_income')\n # p_casual_labour_income = request.POST.get('p_casual_labour_income')\n # p_other_income = request.POST.get('p_other_income')\n\n # # school details\n # last_name = request.POST.get('last_name')\n # first_name = request.POST.get('first_name')\n # middle_name = request.POST.get('middle_name')\n # value = request.POST.get('value')\n # dob = request.POST.get('dob')\n # name = request.POST.get('name')\n # institution = request.POST.get('institution')\n # course = request.POST.get('course')\n # university_reg_no = request.POST.get('university_reg_no')\n # university_year = request.POST.get('university_year')\n # total_university_fee = request.POST.get('total_university_fee')\n # paid_university_fee = request.POST.get('paid_university_fee')\n # outstanding_university_fee = request.POST.get('outstanding_university_fee')\n # secondary_school_name = request.POST.get('secondary_school_name')\n # school_type = request.POST.get('school_type')\n # total_sec_fee = request.POST.get('total_sec_fee')\n # paid_sec_fee = request.POST.get('paid_sec_fee')\n # outstanding_sec_fee = request.POST.get('outstanding_sec_fee')\n # class_name = request.POST.get('class_name')\n # sec_reg_no = request.POST.get('sec_reg_no')\n # sec_year = request.POST.get('sec_year')\n # school = request.POST.get('school')\n\n # # location \n # sub_county = request.POST.get('sub_county')\n # ward = request.POST.get('ward')\n # village = request.POST.get('village')\n # year = request.POST.get('year')\n\n # sr.save()\n # return JsonResponse(sr.data, status=status.HTTP_201_CREATED)\n # return JsonResponse(sr.errors, status=status.HTTP_400_BAD_REQUEST)\n \n # data = form.cleaned_data\n # new_application_model = NewApplicationModel.objects.create(**data, user=request.user)\n\n\n\n\n\n\n# get all submitted bursary applications\ndef get_applications(request):\n data = SubmittedApplicationsModel.objects.all()\n if request.method == 'GET':\n # first_name = BursaryApplicant.objects.get(first_name='first_name', flat=True)\n \n serializer = SubmittedApplicationsModelSerializer(data, many=True)\n\n return JsonResponse( serializer.data, safe=False)\n\n# get all registered applicants in the Admin view\ndef registered_applicants(request):\n data = BursaryApplicant.objects.all()\n\n if request.method == 'GET':\n serializer = BursaryApplicantSerializer(data, many=True)\n # print(data)\n return JsonResponse(serializer.data, safe=False)\n\n# trials \ndef show_submissions(request):\n # data = TrialModel.objects.filter(first_name__first_name )\n # data = TrialModel.objects.filter(fname__first_name = request.user)\n ffname = TrialModel.objects.all()\n # data = TrialModel.objects.filter(id=29).values('id','fname.first_name', 'fname__first_name', 'fname__last_name', 'institution',\n # 'reg', 'year_of_study', 'total_fee', 'paid', 'outstanding_balance', 'fund_source', 'fund_source_amount',\n # 'pension_income', 'relief_income', 'business_income', 'farming_income', \n # 'private_groups_income', 'well_wishers_income', 'casual_labour_income', 'other_income', 'guardian_as_financier', \n # 'well_wishers_as_financier', 'other_financier', 'other_financiers', 'p_first_name','p_middle_name','p_last_name', \n # 'p_occupation', 'p_phone', 'p_id_number', 'p_employed', 'p_education_level', 'p_nhif', 'father_gross_income', \n # 'guardian_gross_income', 'mother_gross_income','number_of_siblings', 'guardian_children', 'working_siblings', \n # 'siblings_in_secondary', 'siblings_in_post_secondary', 'school_rep_name', \n # 'school_document', 'chief_name', 'chief_document', 'mca_name', 'mca_document', 'clergy_name', \n # 'clergy_document', 'transcript_document', 'fee_structure_document', 'fee_slip_document' )\n\n # data = TrialModel.objects.filter(user_id=29).values( 'user_id', 'user_id__first_name', 'user_id__last_name')\n\n # data = TrialModel.objects.filter(fname__user=request.user)\n data = TrialModel.objects.all()\n\n if request.method == 'GET':\n serializer = TrialModelSerializer(data, many=True)\n # print(data)\n return JsonResponse(serializer.data, safe=False)\n\ndef loggedUser(request):\n # username = None\n data = BursaryApplicant.objects.filter(user=request.user)\n\n if request.method == 'GET':\n serializer = BursaryApplicantSerializer(data, many=True)\n # if request.user.is_authenticated():\n first_name = request.user.first_name\n email = request.user.email\n print(first_name)\n print(email)\n return JsonResponse( serializer.data, safe=False)\n\ndef awardedApplicant(request):\n # username = None\n data = TrialModel.objects.filter(user=request.user)\n\n if request.method == 'GET':\n serializer = TrialModelSerializer(data, many=True)\n # if request.user.is_authenticated():\n first_name = request.user.first_name\n email = request.user.email\n print(first_name)\n print(email)\n return JsonResponse( serializer.data, safe=False)\n\ndef apply(request):\n data = NewBursaryApplication.objects.all()\n \n\ndef check_login(request):\n if request.user.is_authenticated():\n return HttpResponse(json.dumps({'result': {'logged': True}, 'user': request.user.username}),\n content_type=\"application/json\")\n else: return HttpResponse(json.dumps({'result': {'logged': False}}),\n content_type=\"application/json\")\n\n# CRUD OPERATIONS\n\n# DELETE\n@csrf_exempt\ndef deleteapplicant(request, id):\n if request.method == 'DELETE':\n TrialModel.objects.filter(id=id).delete()\n return JsonResponse({\"status\": \"success\", \"message\": \"Item deleted successfully!\"})\n\n\n# UPDATE\n\n# @csrf_exempt\n# @api_view(['PUT'])\n# def update_application_status(request, id):\n# task = TrialModel.objects.get(id=id)\n# serializer = TrialModelSerializer(instance = task, data=request.POST)\n# if serializer.is_valid():\n# print(\"serializer\", serializer)\n# print(\"serializer.data\", serializer.data)\n# serializer.save()\n\n# return JsonResponse(serializer.data)\n# return JsonResponse(serializer.errors)\n\n# @csrf_exempt\n# def update_application_status(instance, *args, **kwargs):\n# instance.institution = validated_data.get('institution', instance.institution)\n# instance.reg = validated_data.get('reg', instance.reg)\n# print(instance)\n# instance.save()\n# return instance\n\n# @csrf_exempt\n@api_view(['PUT'])\n@renderer_classes((TemplateHTMLRenderer, JSONRenderer))\ndef update_application_status(request, id):\n instance = TrialModel.objects.get(id=id)\n\n # use request.data.get instead of request.POST to get data from external api\n instance.institution = request.data.get('institution', instance.institution)\n instance.reg = request.data.get('reg', instance.reg)\n instance.year_of_study = request.data.get('year_of_study', instance.year_of_study)\n instance.total_fee = request.data.get('total_fee', instance.total_fee)\n instance.paid = request.data.get('paid', instance.paid)\n instance.outstanding_balance = request.data.get('outstanding_balance', instance.outstanding_balance)\n instance.Bursary_Application_Status = request.data.get('Bursary_Application_Status', instance.Bursary_Application_Status)\n instance.amount_allocated = request.data.get('amount_allocated', instance.amount_allocated)\n instance.description = request.data.get('description', instance.description)\n\n print(request.POST.get('institution', instance.institution))\n instance.save(update_fields=['institution'])\n \n\n serializer = TrialModelSerializer(instance=instance, data=request.POST)\n if serializer.is_valid():\n \n \n serializer.save()\n \n return Response(serializer.data)\n\n\n# Reports data\n# approved applications\ndef all_approved_applications(request):\n data = TrialModel.objects.all().filter(Bursary_Application_Status=\"APPROVED\")\n if request.method == \"GET\":\n \n serializer = TrialModelSerializer(data, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n#rejected applications\ndef all_rejected_applications(request):\n data = TrialModel.objects.all().filter(Bursary_Application_Status=\"UNSUCCESSFUL\")\n if request.method == \"GET\":\n \n serializer = TrialModelSerializer(data, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n# under review\ndef under_review_applications(request):\n data = TrialModel.objects.all().filter(Bursary_Application_Status=\"REVIEW_IN_PROCESS\")\n if request.method == \"GET\":\n \n serializer = TrialModelSerializer(data, many=True)\n return JsonResponse(serializer.data, safe=False)","sub_path":"Ibursary-backend/ibursary_accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":25938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"31828063","text":"import gym.spaces\nimport time\nimport numpy as np\nimport tensorflow as tf\nfrom utils import logger\nfrom algos.acer import Acer, AgentEnv\nfrom policies.agent import Agent\nfrom dstruct.buffers import Buffer\nfrom utils.misc import set_global_seeds\nfrom policies.acer_lstm import AcerLstm\nfrom policies.acer_convnet import AcerConvnet\nfrom utils.cmd import make_atari_env, atari_arg_parser\nfrom common.vec_env.environment import AbstractEnvRunner\n\n\nclass Environment(AbstractEnvRunner):\n def __init__(self, env, model, nsteps, nstack):\n super().__init__(env=env, model=model, nsteps=nsteps)\n self.nstack = nstack\n nh, nw, nc = env.observation_space.shape\n self.nc = nc # nc = 1 for atari, but just in case\n self.nenv = nenv = env.num_envs\n self.nact = env.action_space.n\n self.nbatch = nenv * nsteps\n self.batch_ob_shape = (nenv*(nsteps+1), nh, nw, nc*nstack)\n self.obs = np.zeros((nenv, nh, nw, nc * nstack), dtype=np.uint8)\n obs = env.reset()\n self.update_obs(obs)\n\n def update_obs(self, obs, dones=None):\n if dones is not None:\n self.obs *= (1 - dones.astype(np.uint8))[:, None, None, None]\n self.obs = np.roll(self.obs, shift=-self.nc, axis=3)\n self.obs[:, :, :, -self.nc:] = obs[:, :, :, :]\n\n def run(self):\n enc_obs = np.split(self.obs, self.nstack, axis=3) # so now list of obs steps\n mb_obs, mb_actions, mb_mus, mb_dones, mb_rewards = [], [], [], [], []\n for _ in range(self.nsteps):\n actions, mus, states = self.model.step_model.step(\n self.obs, state=self.states, mask=self.dones\n )\n mb_obs.append(np.copy(self.obs))\n mb_actions.append(actions)\n mb_mus.append(mus)\n mb_dones.append(self.dones)\n obs, rewards, dones, _ = self.env.step(actions)\n # states information for statefull models like LSTM\n self.states = states\n self.dones = dones\n self.update_obs(obs, dones)\n mb_rewards.append(rewards)\n enc_obs.append(obs)\n mb_obs.append(np.copy(self.obs))\n mb_dones.append(self.dones)\n\n enc_obs = np.asarray(enc_obs, dtype=np.uint8).swapaxes(1, 0)\n mb_obs = np.asarray(mb_obs, dtype=np.uint8).swapaxes(1, 0)\n mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0)\n mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)\n mb_mus = np.asarray(mb_mus, dtype=np.float32).swapaxes(1, 0)\n\n mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)\n\n # Used for statefull models like LSTM's to mask state when done\n mb_masks = mb_dones\n # Used for calculating returns. The dones array is now aligned\n # with rewards\n mb_dones = mb_dones[:, 1:]\n\n # shapes are now [nenv, nsteps, []]\n # When pulling from buffer, arrays will now be reshaped in\n # place, preventing a deep copy.\n\n return enc_obs, mb_obs, mb_actions, mb_rewards, mb_mus,\\\n mb_dones, mb_masks\n\n\ndef fit(\n policy,\n env,\n seed,\n nsteps=20,\n nstack=4,\n total_timesteps=int(80e6),\n q_coef=0.5,\n ent_coef=0.01,\n max_grad_norm=10,\n lr=7e-4,\n lrschedule='linear',\n rprop_epsilon=1e-5,\n rprop_alpha=0.99,\n gamma=0.99,\n log_interval=100,\n buffer_size=50000,\n replay_ratio=4,\n replay_start=10000,\n c=10.0,\n trust_region=True,\n alpha=0.99,\n delta=1\n):\n print(\"Running Acer Simple\")\n print(locals())\n tf.reset_default_graph()\n set_global_seeds(seed)\n\n # num_procs = len(env.remotes) # HACK\n model = Acer(\n policy=policy,\n observation_space=env.observation_space,\n action_space=env.action_space,\n nenvs=env.num_envs,\n nsteps=nsteps,\n nstack=nstack,\n ent_coef=ent_coef,\n q_coef=q_coef,\n gamma=gamma,\n max_grad_norm=max_grad_norm,\n lr=lr,\n rprop_alpha=rprop_alpha,\n rprop_epsilon=rprop_epsilon,\n total_timesteps=total_timesteps,\n lrschedule=lrschedule,\n c=c,\n trust_region=trust_region,\n alpha=alpha,\n delta=delta\n )\n\n env_runner = Environment(env=env, model=model, nsteps=nsteps, nstack=nstack)\n if replay_ratio > 0:\n buffer = Buffer(env=env, nsteps=nsteps, nstack=nstack, size=buffer_size)\n else:\n buffer = None\n nbatch = env.num_envs * nsteps\n agent = AgentEnv(env_runner, model, buffer, log_interval)\n agent.tstart = time.time()\n # nbatch samples, 1 on_policy call and multiple off-policy calls\n for agent.steps in range(0, total_timesteps, nbatch):\n agent.call(on_policy=True)\n if replay_ratio > 0 and buffer.has_atleast(replay_start):\n n = np.random.poisson(replay_ratio)\n for _ in range(n):\n agent.call(on_policy=False) # no simulation steps in this\n\n env.close()\n\n\ndef main():\n parser = atari_arg_parser()\n parser.add_argument('--policy', help='Policy architecture',\n choices=['cnn', 'lstm', 'lnlstm'], default='cnn')\n parser.add_argument('--lrschedule', help='Learning rate schedule',\n choices=['constant', 'linear'], default='constant')\n parser.add_argument('--logdir', help='Directory for logging')\n args = parser.parse_args()\n logger.configure(args.logdir)\n\n num_cpu = 16\n env = make_atari_env(args.env, num_cpu, args.seed)\n if args.policy == 'cnn':\n policy_fn = AcerConvnet\n elif args.policy == 'lstm':\n policy_fn = AcerLstm\n else:\n print(\"Policy {} not implemented\".format(args.policy))\n return\n fit(\n policy_fn,\n env,\n args.seed,\n total_timesteps=int(args.num_timesteps * 1.1),\n lrschedule=args.lrschedule\n )\n env.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"baselines/examples/run_acer.py","file_name":"run_acer.py","file_ext":"py","file_size_in_byte":6028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"481846116","text":"# 爬取猫眼前100电影\nfrom urllib import request, parse\nfrom urllib.error import HTTPError\nfrom http.cookiejar import CookieJar\nimport re\nimport json\nfrom time import sleep\n\ndef get_page(url, url_opener):\n header = {'User-Agent':('Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36'\n ' (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36v')}\n req = request.Request(url=url, headers=header)\n try:\n response = url_opener.open(req)\n if(response.getcode()==200):\n return response.read().decode(\"utf8\")\n return None\n except HTTPError as e:\n return None\n\ndef get_opener():\n # 获取http库的cookiejar,再用request处理,然后构建opener\n cookie = CookieJar()\n handler = request.HTTPCookieProcessor(cookie)\n opener = request.build_opener(handler)\n return opener\n\ndef get_parse_html(html):\n '''html source\n
\n 1\n \n \"\"\n \"霸王别姬\"\n \n
\n
\n
\n

霸王别姬

\n

\n 主演:张国荣,张丰毅,巩俐\n

\n

上映时间:1993-01-01(中国香港)

\n
\n

9.6

\n
\n\n
\n
\n\n
'''\n # 图片,片名,主演,上映时间,评分\n pattern = re.compile(('
.*?data-src=\"(.*?)\".*?data-val=.*?>(.*?).*?'\n 'star\">(.*?)

.*?releasetime\">(.*?)

.*?integer\">(\\d\\.).*?.*?'\n 'fraction\">(\\d).*?.*?
'), re.S)\n items = re.findall(pattern, html)\n # 换行也能使用strip()默认去掉\n for item in items:\n yield (item[0],item[1],item[2].strip()[3:], item[3][5:], item[4]+item[5])\n\ndef get_file_json(content):\n with open('/home/yuyuyu/Desktop/movies', 'a', encoding='utf8') as f:\n f.write(json.dumps(content, ensure_ascii=False)+'\\n')\\\n # json.dump(content, f, ensure_ascii=False)\n\ndef get_movie():\n with open('/home/yuyuyu/Desktop/movies', 'r', encoding='utf8') as f:\n for i in f:\n print(i)\n\ndef main(offset, url_opener):\n #\n url = parse.urljoin(\"https://maoyan.com/board/4\", \"?offset=\"+str(offset))\n html = get_page(url, url_opener)\n for content in get_parse_html(html):\n get_file_json(content)\n\nif __name__ == '__main__':\n url_opener = get_opener()\n for i in range(10):\n main(i*10, url_opener)\n sleep(1)\n get_movie()","sub_path":"Python/demo/catEyeMovie.py","file_name":"catEyeMovie.py","file_ext":"py","file_size_in_byte":3187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"7673412","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport kivy\nfrom kivy.app import App\nfrom kivy.app import Widget\nfrom datetime import date\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.stacklayout import StackLayout\nfrom kivy.uix.anchorlayout import AnchorLayout\nfrom kivy.core.window import Window\nfrom kivy.uix.label import Label\nfrom kivy.uix.image import Image\nfrom kivy.uix.screenmanager import ScreenManager, Screen, FadeTransition\nfrom kivy.properties import StringProperty\nfrom kivy.properties import NumericProperty\nfrom kivy.properties import ObjectProperty\nfrom kivy.uix.scrollview import ScrollView\nfrom kivy.uix.button import Button\nfrom kivy.uix.dropdown import DropDown\nfrom kivy.base import runTouchApp\nfrom kivy.core.window import Window;\nfrom kivy.uix.bubble import Bubble, BubbleButton\n\n\nhoje = date.today()\namanha = hoje.day\nontem = hoje.month\nsemana = hoje.weekday\ndias = (0, 1, 2, 3, 4, 5, 6)\nfeira = ('Segunda', 'Terça', 'Quarta', 'Quinta', 'Sexta', 'Sábado', 'Domingo')\nx = dias[hoje.weekday()]\nn = NumericProperty(35)\nclass PrimeiroScreen(Screen):\n\tdef __init__(self, **kwargs):\n\t\tself.name = 'um'\n\t\tsuper(Screen,self).__init__(**kwargs)\n\n\tdef dia(self):\n\t\t\n\t\tif x == 0:\n\t\t\tApp.get_running_app().root.current = 'seg'\n\t\telif x == 1:\n\t\t\tApp.get_running_app().root.current = 'ter'\n\t\telif x == 2:\n\t\t\tApp.get_running_app().root.current = 'qua'\n\t\telif x == 3:\n\t\t\tApp.get_running_app().root.current = 'qui'\n\t\telif x == 4:\n\t\t\tApp.get_running_app().root.current = 'sex'\n\t\telif x == 5:\n\t\t\tApp.get_running_app().root.current = 'sab'\n\t\telif x == 6:\n\t\t\tApp.get_running_app().root.current = 'dom'\n\t\n\nclass SegundoScreen(Screen):\n\tdef __init__(self, **kwargs):\n\t\tself.name = 'dois'\n\t\tsuper(Screen,self).__init__(**kwargs)\n\n\tdef mucarela(self, *args):\n\t\tscreen4 = self.manager.get_screen('carrinho')\n\t\tscreen4.btn1 = BubbleButton(text=\"Muçarela\", font_size='20dp', size_hint=(1,None), background_normal='1.png', background_down='2.png')\n\t\tscreen4.lb1 = Label(text=\"25,00\", font_size='20dp', size_hint=(1,None))\n\t\tscreen4.ids.lb5.value += 25\n\t\tscreen4.ids.grid.add_widget(screen4.btn1)\n\t\tscreen4.ids.grid.add_widget(screen4.lb1)\n\t\tprint(n)\n\t\n\tdef catupiry(self, *args):\n\t\tscreen4 = self.manager.get_screen('carrinho')\n\t\tscreen4.btn2 = BubbleButton(text=\"Catupiry\",font_size='20dp', size_hint=(1,None), background_normal='2.png', background_down='1.png')\n\t\tscreen4.lb2 = Label(text=\"25,00\",font_size='20dp', size_hint=(1,None))\n\t\tscreen4.ids.lb5.value += 25\n\t\tscreen4.ids.grid.add_widget(screen4.btn2)\n\t\tscreen4.ids.grid.add_widget(screen4.lb2)\n\t\n\tdef peru(self, *args):\n\t\tscreen4 = self.manager.get_screen('carrinho')\n\t\tscreen4.btn2 = BubbleButton(text=\"Peito de peru\",font_size='20dp', size_hint=(1,None), background_normal='1.png', background_down='2.png')\n\t\tscreen4.lb2 = Label(text=\"95,00\",font_size='20dp', size_hint=(1,None))\n\t\tscreen4.ids.lb5.value += 35\n\t\tscreen4.ids.grid.add_widget(screen4.btn2)\n\t\tscreen4.ids.grid.add_widget(screen4.lb2)\n\t\n\tdef portuguesa(self, *args):\n\t\tscreen4 = self.manager.get_screen('carrinho')\n\t\tscreen4.btn2 = BubbleButton(text=\"Portuguesa\",font_size='20dp', size_hint=(1,None), background_normal='2.png', background_down='1.png')\n\t\tscreen4.lb2 = Label(text=\"17,00\",font_size='20dp', size_hint=(1,None))\n\t\tscreen4.ids.lb5.value += 27\n\t\tscreen4.ids.grid.add_widget(screen4.btn2)\n\t\tscreen4.ids.grid.add_widget(screen4.lb2)\n\t\t\n\tdef toscana(self, *args):\n\t\tscreen4 = self.manager.get_screen('carrinho')\n\t\tscreen4.btn2 = BubbleButton(text=\"Toscana\",font_size='20dp', size_hint=(1,None), background_normal='1.png', background_down='2.png')\n\t\tscreen4.lb2 = Label(text=\"5,50\",font_size='20dp', size_hint=(1,None))\n\t\tscreen4.ids.lb5.value += 35\n\t\tscreen4.ids.grid.add_widget(screen4.btn2)\n\t\tscreen4.ids.grid.add_widget(screen4.lb2)\nclass TerceiroScreen(Screen):\n\tdef __init__(self, **kwargs):\n\t\tself.name = 'carrinho'\n\t\tsuper(Screen,self).__init__(**kwargs)\n\nclass SegundaScreen(Screen):\n\tdef __init__(self, **kwargs):\n\t\tself.name = 'seg'\n\t\tsuper(Screen,self).__init__(**kwargs)\n\nclass TercaScreen(Screen):\n\tdef __init__(self, **kwargs):\n\t\tself.name = 'ter'\n\t\tsuper(Screen,self).__init__(**kwargs)\n\nclass QuartaScreen(Screen):\n\tdef __init__(self, **kwargs):\n\t\tself.name = 'qua'\n\t\tsuper(Screen,self).__init__(**kwargs)\n\nclass QuintaScreen(Screen):\n\tdef __init__(self, **kwargs):\n\t\tself.name = 'qui'\n\t\tsuper(Screen,self).__init__(**kwargs)\n\nclass SextaScreen(Screen):\n\tdef __init__(self, **kwargs):\n\t\tself.name = 'sex'\n\t\tsuper(Screen,self).__init__(**kwargs)\n\t\n\tdef promo2(self, *args):\n\t\tscreen4 = self.manager.get_screen('carrinho')\n\t\tscreen4.btnp1 = Button(text=\"Muçarela(+Refri)\", font_size='20dp', size_hint=(1,None), background_normal='1.png', background_down='2.png')\n\t\tscreen4.lbp1 = Label(text=\"20,00\", font_size='20dp', size_hint=(1,None))\n\t\tscreen4.ids.lb5.value += 20\n\t\tscreen4.ids.grid.add_widget(screen4.btnp1)\n\t\tscreen4.ids.grid.add_widget(screen4.lbp1)\n\t\n\tdef promo1(self, *args):\n\t\tscreen4 = self.manager.get_screen('carrinho')\n\t\tscreen4.btn2 = BubbleButton(text=\"Catupiry(+Refri)\",font_size='20dp', size_hint=(1,None), background_normal='2.png', background_down='1.png')\n\t\tscreen4.lb2 = Label(text=\"25,00\",font_size='20dp', size_hint=(1,None))\n\t\tscreen4.ids.lb5.value += 20\n\t\tscreen4.ids.grid.add_widget(screen4.btn2)\n\t\tscreen4.ids.grid.add_widget(screen4.lb2)\n\t\nclass SabadoScreen(Screen):\n\tdef __init__(self, **kwargs):\n\t\tself.name = 'sab'\n\t\tsuper(Screen,self).__init__(**kwargs)\n\nclass DomingoScreen(Screen):\n\tdef __init__(self, **kwargs):\n\t\tself.name = 'dom'\n\t\tsuper(Screen,self).__init__(**kwargs)\n\t\t\t\t\nclass MyLabel1(Label):\n\tvalue = NumericProperty(0)\n\t\n\nclass RootScreen(ScreenManager):\n pass\n \nclass pizzoideApp(App):\n\ttitle = 'Pizzoide!'\n\tdef on_pause(self):\n\t\treturn True\n\t\t\n\tdef on_resume(self):\n\t \n\t pass\n \n\tdef build(self):\n\t\treturn RootScreen()\nif __name__ == '__main__':\n appVar = pizzoideApp()\n pizzoideApp().run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"380125426","text":"# import os\nimport bs4\n# import requests\n# import sys\nfrom urllib.request import urlopen\nimport autoDownloader\n\n\ndef image_url_scrapper(url):\n\n connection = urlopen(url)\n raw_html = connection.read()\n\n connection.close()\n\n page_soup = bs4.BeautifulSoup(raw_html, 'html.parser')\n container = page_soup.find_all(\"div\", {\"class\": \"item-container\"})\n\n return container\n\n\nif __name__ == '__main__':\n listout = image_url_scrapper(\"https://www.newegg.com/p/pl?d=video+cards+for+desktop\")\n for elements in listout:\n autoDownloader.download_from_url(\"http:\" + elements.a.img['src'])\n # print(elements.a.img['src'])\n","sub_path":"Auto-Downloader/url-scrap.py","file_name":"url-scrap.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"420777490","text":"import urllib.request\nfrom bs4 import BeautifulSoup\nimport re, os, time, csv\n\nfirst_dir = 'V3_BigData'\ndir_number = 1\ndir_name = './naver_ranking'\nhtml = urllib.request.urlopen('http://movie.naver.com/movie/sdb/rank/rmovie.nhn')\nsoup = BeautifulSoup(html, 'html.parser')\ntags = str(soup)\np_all = re.compile(r'''\n#\n<.*>\\n<.*>\\n\n#title\n<.*title=\"(.*)\">\n.*\\n\\n\\n.*\\n.*\\n\n#등락\n<.*alt=\"(.{2,4})\".*\\n<.*>\n#등락폭\n(\\d+)<.*>''',re.VERBOSE)\nmovie_all_find = p_all.findall(tags)\nmovie_title=[]\nmovie_rank_updown=[]\nmovie_rank_updown_num=[]\nfinal=[]\nnum_list=[]\n\nfor num in range(len(movie_all_find)):\n movie_title.append(movie_all_find[num][0])\n movie_rank_updown_num.append(movie_all_find[num][2])\n if movie_all_find[num][1] == \"na\":\n movie_rank_updown.append(\"\")\n elif movie_all_find[num][1] == \"up\":\n movie_rank_updown.append(\"+\")\n elif movie_all_find[num][1] == \"down\":\n movie_rank_updown.append(\"-\")\n\ndef search_file_list(dir_name):\n try:\n file_list = os.listdir(dir_name)\n return len(file_list)\n except Exception:\n pass\n\ndef search_dir_list(dir_name):\n global dir_number\n dir_name = './naver_ranking%d' % dir_number\n if not os.path.isdir(dir_name):\n os.mkdir(dir_name)\n return dir_number, dir_name\n else:\n file_len = search_file_list(dir_name)\n if file_len < 3:\n return dir_number, dir_name\n else:\n dir_number += 1\n dir_number, dir_name = search_dir_list(dir_name)\n return dir_num, dir_name\n\nif not os.path.isdir(first_dir):\n os.mkdir(first_dir)\nos.chdir('./'+first_dir)\n\ndir_num, dir_name = search_dir_list(dir_name)\nfile_name = './%s' % (time.strftime('%Y-%m-%d_%H%M%S', time.localtime()))\n\ncsvfile = open(\"%s%s.csv\" %(dir_name, file_name), 'w', newline=\"\")\ncsvwriter = csv.writer(csvfile)\nhead = [\"순위\", \"영화명\", \"변동폭\"]\ncsvwriter.writerow(head)\n\nfor num in range(1,51):\n num_list.append(num)\nfor num, name, updown, updown_num in zip(num_list, movie_title, movie_rank_updown, movie_rank_updown_num):\n final.append((num, name, updown+updown_num))\nfor count in range(len(final)):\n csvwriter.writerow(final[count])\ncsvfile.close()","sub_path":"02. Data Science/1. Collection/5_2. Distributed_saving/distribute_saving_date.py","file_name":"distribute_saving_date.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"445077484","text":"from classes.Node import Node\nimport numpy as np\nfrom classes.Packet import Packet\nfrom classes.Message import Message\nfrom scipy.stats import levy\nfrom classes.Utilities import StructuredMessage\n\n\nclass Client(Node):\n def __init__(self, env, conf, net, loggers=None, label=0, id=None, p2p=False, messages=None):\n self.conf = conf\n self.message_workload = messages\n\n # Is the end user sending loop traffic to cover their activities\n self.user_loop_traffic = self.conf['clients']['user_loop_traffic']\n super().__init__(env=env, conf=conf, net=net, loggers=loggers, id=id)\n\n def schedule_retransmits(self):\n pass\n\n def schedule_message(self, message):\n # This function is used in the transcript mode\n ''' schedule_message adds given message into the outgoing client's buffer. Before adding the message\n to the buffer the function records the time at which the message was queued.'''\n\n print(\"> Scheduled message\")\n current_time = self.env.now\n message.time_queued = current_time\n for pkt in message.pkts:\n pkt.time_queued = current_time\n self.add_to_buffer(message.pkts)\n\n def print_msgs(self):\n ''' Method prints all the messages gathered in the buffer of incoming messages.'''\n for msg in self.msg_buffer_in:\n msg.output()\n\n def start(self):\n ''' Main client method; It sends packets out.\n It checks if there are any new packets in the outgoing buffer.\n If it finds any, it sends the first of them.\n If none are found, the client sends out a dummy\n packet (i.e., cover loop packet).\n '''\n\n delays = []\n\n while True:\n if self.alive:\n if delays == []:\n delays = list(np.random.exponential(self.rate_sending, 10000))\n\n delay = delays.pop()\n yield self.env.timeout(float(delay))\n\n if len(self.pkt_buffer_out) > 0: #If there is a packet to be send\n tmp_pkt = self.pkt_buffer_out.pop(0)\n self.send_packet(tmp_pkt)\n self.env.total_messages_sent += 1\n self.env.real_pkts += 1\n\n elif self.user_loop_traffic:\n tmp_pkt = Packet.dummy(conf=self.conf, net=self.net, dest=self, sender=self) # sender_estimates[sender.label] = 1.0\n tmp_pkt.time_queued = self.env.now\n self.send_packet(tmp_pkt)\n self.env.total_messages_sent += 1\n self.env.dummy_pkts += 1\n else:\n break\n\n def simulate_modeled_traffic(self, exclude=None):\n messages = self.net.traffic[self.id]\n\n for idx, message in enumerate(messages):\n if self.alive:\n yield self.env.timeout(message['time_from_last_msg'])\n\n if idx == 0:\n self.env.active_clients += 1\n self.system_logger.info(StructuredMessage(metadata=(self.env.now, self.env.active_clients, self.env.message_ctr, self.env.real_pkts, self.env.dummy_pkts)))\n\n for recipient in message['to']:\n # Prevent the second sender from sending to the tracked recipient\n if exclude and recipient == exclude.id:\n continue\n\n # New Message\n r_client = self.net.clients_dict[recipient]\n msg = Message.random(conf=self.conf, net=self.net, sender=self, dest=r_client, size=message['size'])\n self.simulate_adding_packets_into_buffer(msg)\n\n if idx == len(messages) - 1:\n self.env.active_clients -= 1\n else:\n break\n\n def simulate_message_generation(self, dest, model_traffic):\n ''' This method generates actual 'real' messages that can be used to compute the entropy.\n The rate and amount at which we generate this traffic is defined by rate_generating and num_target_packets\n in the config file.'''\n i = 0\n\n generation_rate = self.rate_generating\n self.env.active_clients += 1\n\n if model_traffic:\n # Hardcoded generation distribution based on traffic workload files\n # Should be size=self.conf[\"misc\"][\"num_target_packets\"] but the distribution is shifted into negative values\n # therefore extra is needed for values picked < 0\n delays = [x for x in levy.rvs(*(-60.86760352972247, 230.09494123284878), size=2000) if x > 0]\n # send the first message off the bat\n delays.append(0)\n\n while i < self.conf[\"misc\"][\"num_target_packets\"]:\n if model_traffic:\n generation_rate = delays.pop()\n\n yield self.env.timeout(float(generation_rate))\n\n # New Message\n msg = Message.random(conf=self.conf, net=self.net, sender=self, dest=dest, model_traffic=model_traffic)\n self.simulate_adding_packets_into_buffer(msg)\n for num, pkt in enumerate(msg.pkts):\n if i + num < len(pkt.probability_mass):\n pkt.probability_mass[i + num] = 1.0 # only needed for sender1\n i += len(msg.pkts)\n print(f\" {i} packets sent for entropy measurement\")\n self.env.finished = True\n self.env.active_clients -= 1\n\n def simulate_adding_packets_into_buffer(self, msg):\n # This function is used in the test mode\n current_time = self.env.now\n msg.time_queued = current_time # The time when the message was created and placed into the queue\n for pkt in msg.pkts:\n pkt.time_queued = current_time\n self.add_to_buffer(msg.pkts)\n self.env.message_ctr += 1\n self.system_logger.info(StructuredMessage(metadata=(self.env.now, self.env.active_clients, self.env.message_ctr, self.env.real_pkts, self.env.dummy_pkts)))\n\n def add_to_buffer(self, packets):\n for pkt in packets:\n tmp_now = self.env.now\n pkt.time_queued = tmp_now\n self.pkt_buffer_out.append(pkt)\n","sub_path":"classes/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":6200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"235302385","text":"\"\"\"config URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.views.generic import TemplateView\n\nfrom userapp import views\n\nurlpatterns = [\n\n path('mypage', views.UserView.mypage, name='mypage'),\n path('profile', views.UserView.profile, name='profile'),\n path('userupdateimple', views.UserView.userupdateimple,name='userupdateimple'),\n path('ncheck/',views.UserView.ncheck,name='ncheck'),\n\n path('myrecipe_reg',views.UserView.myrecipereg,name='myrecipe_reg'),\n path('myrecipeaddimpl', views.UserView.myrecipeaddimpl,name='myrecipeaddimpl'),\n path('recipeingrcheck',TemplateView.as_view(template_name='userapp/recipeingrcheck.html'),name='recipeingrcheck'),\n path('recipeingradd',views.UserView.recipeingradd,name='recipeingradd'),\n\n path('popingr.html',views.UserView.popingr, name='popingr'),\n path('popsearch/',views.UserView.popsearch,name='popsearch'),\n\n # path('like',TemplateView.as_view(template_name='like.html'),name='like'),\n path('like', views.UserView.like, name='like'),\n\n path('allergy',views.UserView.allergy,name='allergy'),\n path('allergyrem',views.UserView.allergyrem,name='allergyrem'),\n path('allergyadd',views.UserView.allergyadd,name='allergyadd')\n]\n","sub_path":"userapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"437555359","text":"import os\nimport sys\nimport logging\nfrom logging.config import dictConfig\nimport pika\n\nrouting_key = \"green\"\n\n\ndef create_connection_channel():\n connection_parameters = pika.ConnectionParameters(host=\"172.17.0.2\", port=5672)\n connection = pika.BlockingConnection(connection_parameters)\n channel = connection.channel()\n return channel\n\n\ndef main():\n logging.info(\"main\")\n channel = create_connection_channel()\n\n message = \"Hello World!\"\n channel.basic_publish(exchange=\"try_green\", routing_key=routing_key, body=message)\n logging.info(\" [x] Sent %r:%r\" % (routing_key, message))\n channel.connection.close()\n\n\nif __name__ == \"__main__\":\n logging_config_dict = dict(\n version=1,\n formatters={\n \"simple\": {\n \"format\": \"\"\"%(asctime)s | %(name)-12s | %(levelname)-8s | %(message)s\"\"\"\n }\n },\n handlers={\"console\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"simple\"}},\n root={\"handlers\": [\"console\"], \"level\": logging.DEBUG},\n )\n\n dictConfig(logging_config_dict)\n main()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"452906744","text":"from django.urls import path\n#from .views import HomePageView\nfrom . import views\n\nurlpatterns = [\n\tpath('',views.index,name='index'),\n path('register/',views.register,name='register'),\n path('login', views.login, name='login'),\n path('home/', views.home, name='home'),\n path('home/courses',views.courses, name='courses'),\n path('home/edit_profile/', views.edit_profile, name='edit_profile'),\n path('logout/', views.logOut, name='logout'),\n path('contact/', views.contact, name='contact'),\n path('about/', views.about, name='about'),\n]\n\n","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"272032700","text":"import MapReduce\nimport sys\n\n#########################################################\n# Name: Junzhi Ye\n# Coursera Intro to Data Science Assignment 3, Problem 5\n#\n# Description: Remove the last 10 characters from each\n# string of nucleotides, then remove any duplicates\n# generated \n#\n# Execution: \n# % python unique_trims.py dna.json\n##########################################################\n\n# import MapReduce\nmr = MapReduce.MapReduce()\n\n# =============================\n# Do not modify above this line\n\ndef mapper(record):\n\n # input is [sequence id, nucleotides] \n # remove the last 10 characters from each string of \n # nucleotides\n mr.emit_intermediate(record[1][:len(record[1]) - 10], 0)\n \ndef reducer(key, value):\n \n # keys are the unique nucleotides, with last 10 \n # characters trimmed \n mr.emit(key)\n\n# Do not modify below this line\n# =============================\nif __name__ == '__main__':\n inputdata = open(sys.argv[1])\n mr.execute(inputdata, mapper, reducer)\n","sub_path":"unique_trims.py","file_name":"unique_trims.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"334388475","text":"import tkinter as tk\nfrom tkinter import ttk, filedialog, messagebox\nfrom ctypes import windll\nfrom tkinter.ttk import Style\nfrom executer import SQLExecuter, ExecuteError\nfrom datastore.create import create_xml\nfrom xlrd import open_workbook\nfrom os import listdir\nfrom os.path import abspath, join\n\nLARGE_FONT = ('Courier New', 14)\nNORM_BOLD_FONT = ('Verdana', 12, 'bold')\nSMALL_FONT = ('Verdana', 11)\nFILE_PATH = ''\nFOLDER_PATH = ''\nBOOKS = {}\nFINAL_RESULT = None\n\n\nclass SqlApp(tk.Frame):\n\n def __init__(self, parent, *args, **kwargs):\n tk.Frame.__init__(self, parent, *args, **kwargs)\n windll.shcore.SetProcessDpiAwareness(1)\n\n MainPage(parent).pack(fill=tk.BOTH, expand=True)\n parent.mainloop()\n\n\ndef get_data(data):\n global BOOKS\n selected_cols = []\n selected_rows = []\n final_result = []\n col_names = []\n\n for name, path in data[1]:\n book = open_workbook(path)\n BOOKS[name] = book\n\n if not isinstance(data[0][2], tuple):\n book = BOOKS[data[0][2]]\n sheet = book.sheet_by_name(data[0][2])\n if data[0][0] is not None:\n selected_rows.extend(data[0][0])\n else:\n selected_rows.extend([i for i in range(sheet.nrows)])\n for i in range(sheet.ncols):\n if sheet.cell_value(0, i) in data[0][1]:\n selected_cols.append(i)\n\n col_names.extend([sheet.cell_value(0, i) for i in selected_cols])\n\n for i in selected_rows:\n current_row = []\n for j in selected_cols:\n current_row.append(sheet.cell_value(i, j))\n final_result.append(current_row)\n else:\n book1 = BOOKS[data[0][2][0]]\n book2 = BOOKS[data[0][2][1]]\n sheet1 = book1.sheet_by_name(data[0][2][0])\n sheet2 = book2.sheet_by_name(data[0][2][1])\n\n selected_rows.extend(data[0][0])\n col1_index = []\n col2_index = []\n\n for i in range(sheet1.ncols):\n if sheet1.cell_value(0, i) in data[0][1][0]:\n col1_index.append(i)\n for i in range(sheet2.ncols):\n if sheet2.cell_value(0, i) in data[0][1][1]:\n col2_index.append(i)\n\n selected_cols.extend([tuple(col1_index), tuple(col2_index)])\n\n first_row = [data[0][2][0] + '.' + sheet1.cell_value(0, i) for i in col1_index] + \\\n [data[0][2][1] + '.' + sheet2.cell_value(0, i) for i in col2_index]\n col_names.extend(first_row)\n final_result.append(first_row)\n\n for i, j in selected_rows:\n current_row = []\n if i is None:\n for _ in selected_cols[0]:\n current_row.append('null')\n else:\n for x in selected_cols[0]:\n current_row.append(sheet1.cell_value(i, x))\n if j is None:\n for _ in selected_cols[1]:\n current_row.append('null')\n else:\n for x in selected_cols[1]:\n current_row.append(sheet2.cell_value(j, x))\n final_result.append(current_row)\n\n if len(final_result) == 0:\n final_result.append(col_names)\n return final_result\n\n\ndef open_file(obj):\n global FILE_PATH, FOLDER_PATH\n\n try:\n FILE_PATH = filedialog.askopenfilenames(filetypes=(('Excel Files (*.xls, *.xlsx)', '*.xls'),\n ('Excel Files (*.xls, *.xlsx)', '*.xlsx')))\n except FileNotFoundError:\n return\n\n if FOLDER_PATH:\n ans = messagebox.askquestion('Select File',\n 'You have already selected a folder. If you want to select file instead of the '\n 'folder than click yes.', icon='warning')\n if ans == 'yes':\n FOLDER_PATH = ''\n else:\n return\n\n if FILE_PATH:\n obj.config(state=tk.NORMAL)\n\n\ndef open_folder(obj):\n global FILE_PATH, FOLDER_PATH\n try:\n FOLDER_PATH = filedialog.askdirectory()\n except FileNotFoundError:\n FOLDER_PATH = ''\n return\n\n if FILE_PATH:\n ans = messagebox.askquestion('Select Folder',\n 'You have already selected a file. If you want to select folder instead of the '\n 'file than click yes(The file needs to be in the selected folder '\n 'if you need both).', icon='warning')\n if ans == 'yes':\n FILE_PATH = ''\n else:\n return\n\n if FOLDER_PATH:\n obj.config(state=tk.NORMAL)\n FILE_PATH = []\n for filename in listdir(FOLDER_PATH):\n if filename.endswith('.xls') or filename.endswith('.xlsx'):\n FILE_PATH.append(abspath(join(FOLDER_PATH, filename)))\n FILE_PATH = tuple(FILE_PATH)\n\n\ndef reset_box(obj):\n obj.delete(1.0, tk.END)\n\n\ndef run_query(queries, controller):\n global FILE_PATH, FINAL_RESULT\n final_result = {}\n metadata = {}\n for file in FILE_PATH:\n metadata[file] = create_xml(file)\n for query in queries.split(';'):\n if query.strip() == '':\n break\n try:\n data_obj = SQLExecuter(query, metadata)\n data = data_obj.return_result()\n if isinstance(data[0], tuple):\n final_result[query] = get_data(data)\n else:\n final_result[query] = data[0]\n except ExecuteError as e:\n final_result[query] = str(e)\n\n FINAL_RESULT = final_result\n\n res_app = ShowResult(controller)\n res_app.wm_title('Query Results')\n width = res_app.winfo_screenwidth() - 20\n height = res_app.winfo_screenheight() - 20\n res_app.geometry(f'{width}x{height}+10+10')\n\n\ndef do():\n print('No')\n\n\nclass MainPage(tk.Frame):\n\n def __init__(self, parent, *args, **kwargs):\n tk.Frame.__init__(self, parent, *args, **kwargs)\n\n selection = ttk.Frame(self)\n selection.grid(row=0, column=0, sticky=tk.NSEW)\n\n instructions = ttk.Label(selection,\n text='Select file or folder(containing one or more Excel Files) '\n 'on which you want to perform operations.',\n font=SMALL_FONT)\n instructions.grid(row=0, columnspan=2, padx=10, pady=5)\n\n open_file_btn = ttk.Button(selection, text='Choose File', style='def.TButton',\n command=lambda: open_file(query_box))\n open_file_btn.grid(row=1, sticky=tk.W, padx=10, pady=10)\n\n open_folder_btn = ttk.Button(selection, text='Choose Folder', style='def.TButton',\n command=lambda: open_folder(query_box))\n open_folder_btn.grid(row=1, column=1, sticky=tk.W, padx=10, pady=10)\n\n lbl1 = ttk.Label(self, text='Enter your query below. If multiple, then separate with semi-colon(;).',\n font=SMALL_FONT)\n lbl1.grid(row=1, sticky=tk.W, padx=10, pady=5)\n\n query_box = tk.Text(self, width=72, height=12, state=tk.DISABLED, font=LARGE_FONT)\n query_box.grid(row=2, columnspan=2, pady=10, padx=10, sticky=tk.NSEW)\n\n scrollbar_y = ttk.Scrollbar(self, orient=tk.VERTICAL, command=query_box.yview)\n scrollbar_y.grid(row=2, column=2, sticky=tk.NS)\n query_box['yscrollcommand'] = scrollbar_y.set\n\n s = Style()\n s.configure('def.TButton', font=SMALL_FONT)\n\n run_button = ttk.Button(self, text='Run Query', style='def.TButton',\n command=lambda: run_query(query_box.get(1.0, tk.END), parent))\n run_button.grid(row=3, columnspan=2, sticky=tk.W, pady=5, padx=10)\n\n reset_btn = ttk.Button(self, text='Reset', style='def.TButton', command=lambda: reset_box(query_box))\n reset_btn.grid(row=3, column=1, sticky=tk.E, pady=5, padx=10)\n\n\nclass ShowResult(tk.Toplevel):\n\n def __init__(self, parent, *args, **kwargs):\n tk.Toplevel.__init__(self, parent, *args, **kwargs)\n\n canvas = tk.Canvas(self)\n canvas.pack(fill=tk.BOTH, expand=True)\n\n yscroll = ttk.Scrollbar(parent, orient=tk.VERTICAL)\n yscroll.pack(side=tk.RIGHT, fill=tk.Y)\n\n if FINAL_RESULT is not None:\n for query, result in FINAL_RESULT.items():\n label_query = ttk.Label(canvas, text='>>> ' + query, font=SMALL_FONT)\n label_query.pack(pady=10, padx=10, anchor=tk.SW)\n\n if isinstance(result, list):\n table = ttk.Treeview(canvas, columns=tuple(result[0]), show='headings')\n\n yscrolltable = ttk.Scrollbar(table, orient=tk.VERTICAL)\n yscrolltable.pack(side=tk.RIGHT, fill=tk.Y)\n xscrolltable = ttk.Scrollbar(table, orient=tk.HORIZONTAL)\n xscrolltable.pack(side=tk.BOTTOM, fill=tk.X)\n\n for col in result[0]:\n table.heading(col, text=col)\n for row in result[1:]:\n table.insert('', 'end', values=tuple(row))\n table.pack(pady=10, padx=10, fill=tk.BOTH, expand=True)\n\n yscrolltable.config(command=table.yview)\n xscrolltable.config(command=table.xview)\n table.configure(xscrollcommand=xscrolltable.set, yscrollcommand=yscrolltable.set)\n else:\n lbl = ttk.Label(canvas, text=result, font=NORM_BOLD_FONT)\n lbl.pack(pady=10, padx=10, side=tk.TOP, anchor=tk.W)\n lbl.configure(foreground='red')\n\n yscroll.config(command=canvas.yview)\n canvas.configure(yscrollcommand=yscroll.set)\n\n\nif __name__ == '__main__':\n root = tk.Tk()\n root.resizable(False, False)\n root.title('SQL Query Executer for Excel Sheets')\n app = SqlApp(root)\n root.mainloop()\n","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":9936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"568033360","text":"from typing import List\nimport numpy as np\nimport pandas as pd\nfrom ..core.base import FeatureExtractor\nfrom P4J import MultiBandPeriodogram\nimport logging\n\n\nclass PeriodExtractor(FeatureExtractor):\n def __init__(self, bands=None):\n self.periodogram_computer = MultiBandPeriodogram(method='MHAOV')\n if bands is None:\n self.bands = [1, 2]\n else:\n self.bands = bands\n\n def get_features_keys(self) -> List[str]:\n features = ['Multiband_period', 'PPE']\n for band in self.bands:\n features.append(f'Period_band_{band}')\n features.append(f'delta_period_{band}')\n return features\n\n def get_required_keys(self) -> List[str]:\n return [\n 'mjd',\n 'magpsf_ml',\n 'sigmapsf_ml',\n 'fid'\n ]\n\n def _compute_features(self, detections: pd.DataFrame, **kwargs) -> pd.DataFrame:\n oids = detections.index.unique()\n features = []\n\n detections = detections.sort_values('mjd')\n\n periodograms = {}\n for oid in oids:\n oid_detections = detections.loc[[oid]]\n\n oid_detections = oid_detections.groupby('fid').filter(\n lambda x: len(x) > 5)\n\n available_bands = oid_detections.fid.unique()\n\n self.periodogram_computer.set_data(\n mjds=oid_detections[['mjd']].values,\n mags=oid_detections[['magpsf_ml']].values,\n errs=oid_detections[['sigmapsf_ml']].values,\n fids=oid_detections[['fid']].values)\n\n try:\n self.periodogram_computer.frequency_grid_evaluation(\n fmin=1e-3, fmax=20.0, fresolution=1e-3)\n self.frequencies = self.periodogram_computer.finetune_best_frequencies(\n n_local_optima=10, fresolution=1e-4)\n except TypeError as e:\n logging.error(f'TypeError exception in PeriodExtractor: '\n f'oid {oid}\\n{e}')\n object_features = pd.DataFrame(\n data=[[np.nan] * len(self.get_features_keys())],\n columns=self.get_features_keys(),\n index=[oid]\n )\n features.append(object_features)\n periodograms[oid] = {\n 'freq': None,\n 'per': None\n }\n continue\n best_freq, best_per = self.periodogram_computer.get_best_frequencies()\n\n freq, per = self.periodogram_computer.get_periodogram()\n period_candidate = 1.0 / best_freq[0]\n\n period_candidates_per_band = []\n for band in self.bands:\n if band not in available_bands:\n period_candidates_per_band.extend([np.nan, np.nan])\n continue\n best_freq_band = self.periodogram_computer.get_best_frequency(band)\n #Getting best period\n best_period_band = 1.0 / best_freq_band\n #Calculating delta period\n delta_period_band = np.abs(period_candidate - best_period_band)\n period_candidates_per_band.extend([best_period_band, delta_period_band])\n\n # Significance estimation\n entropy_best_n = 100\n top_values = np.sort(per)[-entropy_best_n:]\n normalized_top_values = top_values + 1e-2\n normalized_top_values = normalized_top_values / np.sum(normalized_top_values)\n entropy = (-normalized_top_values * np.log(normalized_top_values)).sum()\n significance = 1 - entropy / np.log(entropy_best_n)\n object_features = pd.DataFrame(\n data=[[period_candidate, significance] + period_candidates_per_band],\n columns=self.get_features_keys(),\n index=[oid]\n )\n features.append(object_features)\n periodograms[oid] = {\n 'freq': freq,\n 'per': per\n }\n\n features = pd.concat(features, axis=0, sort=True)\n features.index.name = 'oid'\n if 'shared_data' in kwargs.keys():\n kwargs['shared_data']['period'] = features\n kwargs['shared_data']['periodogram'] = periodograms\n return features\n","sub_path":"late_classifier/features/extractors/period_extractor.py","file_name":"period_extractor.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"518153905","text":"from bandits.bandit import Bandit\nimport numpy as np\n\nclass BayesUCBMean(Bandit):\n\n def select_arm(self, t, influence_limit= False, c=5):\n selected_arm = 0\n max_value = 0\n\n #select arm\n for (index, arm) in enumerate(self.arms):\n val = arm.reward_dist_mean(influence_limit = influence_limit) \n if val > max_value:\n max_value = val\n selected_arm = index\n\n return selected_arm\n \n def update(self, arm, reward):\n self.arms[arm].pulls += 1\n self.arms[arm].rewards += reward\n self.arms[arm].update_reward_dist(reward)\n","sub_path":"bandits/bayesUCBMean.py","file_name":"bayesUCBMean.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"135406468","text":"fullscreen = False\nresources_path = 'resources'\n\nfrom pyglet.window import key\n\nUP = 0\nRIGHT = 1\nDOWN = 2\nLEFT = 3\nFIRE = 4\n\ncontrols_map = {\n 1: {\n UP: key.UP,\n RIGHT: key.RIGHT,\n DOWN: key.DOWN,\n LEFT: key.LEFT,\n FIRE: key.SPACE,\n },\n 2: {\n UP: key.W,\n RIGHT: key.D,\n DOWN: key.S,\n LEFT: key.A,\n FIRE: key.LSHIFT,\n },\n}\n","sub_path":"engine/util/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"393754403","text":"import pygame as pg\nimport settings as s\nimport player as p\nimport groups as g\n#import background_objects as bo\nimport tilemap as tm\nimport keybinds as k\n\npg.init()\n\npg.display.set_caption(\"Explorer\")\npg.display.set_icon(pg.image.load(s.ICON_IMAGE))\n\nclock = pg.time.Clock()\n\npg.time.set_timer(s.CHECK_FPS, s.CHECK_FPS_INTERVAL)\npg.time.set_timer(s.ADVANCE_ANIMATION, s.ANIMATION_LENGTH)\npg.time.set_timer(s.BREATH, s.BREATH_INTERVAL)\n\nplayer = None\n\nsolids = pg.sprite.Group()\nenemies = pg.sprite.Group()\nbackground = pg.sprite.Group()\n\ntrue_scroll = [0,0]\n\nall_sprites = pg.sprite.Group()\n\nloaded_map = \"test2\"\n\ndef small_display(text):\n font = pg.font.SysFont(None, s.SMALL_FONT_SIZE)\n new_text = font.render(text, True, s.BLACK)\n return new_text\n\ndef text_objects(text, font):\n textSurface = font.render(text, True, s.BLACK)\n return textSurface, textSurface.get_rect()\n\ndef set_up():\n global player\n \n game_map = tm.Map(loaded_map)\n game_map.make_map()\n \n #bo.Sky((-s.WIDTH, game_map.height-s.HEIGHT*1.25), loaded_map.lower())\n \n player_x, player_y = game_map.player_spawn\n player = p.Player(player_x, player_y)\n player.kill_Y = 1000\n\ndef render(fps):\n if loaded_map == \"Grass\":\n s.SCREEN.fill(s.GRASS_BACKGROUND_COLOR)\n elif loaded_map == \"Snow\":\n s.SCREEN.fill(s.SNOW_BACKGROUND_COLOR)\n elif loaded_map == \"Sand\":\n s.SCREEN.fill(s.SAND_BACKGROUND_COLOR)\n else:\n s.SCREEN.fill(s.DEFAULT_BACKGROUND_COLOR)\n \n true_scroll[0] += (player.rect.x-true_scroll[0]-s.WIDTH/2+s.PLAYER_SIZE[0])//30\n true_scroll[1] += (player.rect.y-true_scroll[1]-s.HEIGHT/2)//30\n scroll = true_scroll.copy()\n scroll[0] = int(scroll[0])\n scroll[1] = int(scroll[1])\n\n for sprite in g.background:\n s.SCREEN.blit(sprite.surf, (sprite.X-scroll[0]*sprite.speed, sprite.Y-scroll[1]*sprite.speed))\n \n for sprite in g.non_solids:\n s.SCREEN.blit(sprite.surf, (sprite.location[0]-scroll[0], sprite.location[1]-scroll[1]))\n \n for sprite in g.solids:\n s.SCREEN.blit(sprite.surf, (sprite.location[0]-scroll[0], sprite.location[1]-scroll[1]))\n \n for sprite in g.enemies:\n s.SCREEN.blit(sprite.surf, (sprite.location[0]-scroll[0], sprite.location[1]-scroll[1]))\n \n s.SCREEN.blit(player.surf, (player.rect.x-scroll[0], player.rect.y-scroll[1]))\n \n s.SCREEN.blit(small_display(str(fps)),(0, 0))\n \n coins_text = \"Coins: \" + str(player.coins)\n s.SCREEN.blit(small_display(coins_text), (s.WIDTH-(len(coins_text))*10, 0))\n \n clock.tick(s.FPS)\n pg.display.update()\n\ndef main():\n set_up()\n fps = 0\n \n while True:\n player.update()\n g.background.update()\n \n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n quit()\n if event.type == pg.KEYDOWN:\n if event.key == k.JUMP:\n player.jump()\n if event.type == s.CHECK_FPS:\n fps = round(clock.get_fps())\n if event.type == s.ADVANCE_ANIMATION:\n player.advance_animation()\n if event.type == s.BREATH and player.stage == player.is_standing:\n player.start_breath()\n render(fps)\n \nif __name__ == \"__main__\":\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"446852772","text":"\"\"\"make labels and entry boxes, enter values and make response_dict_numerical\"\"\"\n\nimport tkinter as tk\n\nfrom Validate_entries import *\n\n# response_list_numerical = []\n# response_dict_numerical = {}\n\ncriteria_list_numerical_entries = [\n \"Enter Age:\", \"Enter Hemoglobin:\", \"Enter QT interval:\", \"Enter Heart Rate:\"\n]\n\nroot = tk.Tk()\nroot.title(\"Data Entry Screen\")\nroot.geometry('1000x1000')\nroot.resizable(True, True)\n\n\n# Validation function\n# def is_number(string):\n# try:\n# float(string)\n# return True\n# except ValueError:\n# return False\n\n\n# make labels for numerical entry boxes\n\ndef make_labels_numerical(question_list):\n rowcount = 0\n for name in question_list:\n lbl_name = tk.Label(root, text=name, font=(\"Arial\", 16))\n lbl_name.grid(column=0, row=rowcount)\n rowcount += 1\n\n\nmake_labels_numerical(criteria_list_numerical_entries)\n\n\n# make Entry boxes\n\ndef make_entry_boxes():\n global my_entries\n my_entries = [] # list of entrybox objects. Each individual box is called \"entry\"\n\n # Row loop\n for y in range(len(criteria_list_numerical_entries)):\n entry = tk.Entry(root, bd=4, fg=\"red\", bg=\"aqua\")\n entry.grid(row=y, column=1, pady=20, padx=5)\n my_entries.append(entry)\n print(my_entries) # debug\n return my_entries\n\n\nmake_entry_boxes()\n\n\ndef get_responses():\n response_list = [] # list of user entries in order\n print(\"Entry list:\", my_entries)\n\n for entries in my_entries:\n\n if not is_number(entries.get()):\n print(\"Entry is not a number. \")\n lbl_show.config(text=\"Some entries are not numbers. Re-enter values and try again\")\n break\n response_list.append(float(entries.get()))\n lbl_show.config(text=response_list)\n\n print(\"Here is the response_list\", response_list)\n\n return response_list\n\n\ndef create_dictionary_numerical_entries():\n question_list = criteria_list_numerical_entries\n response_list = get_responses()\n\n print(\"\\n\\nQuestion list;\", question_list)\n print(\"\\nFinal response list:\", response_list)\n response_dict_numerical = dict(zip(question_list, response_list))\n\n \"\"\" needs code to validate entries and screen for values with empty strings\"\"\"\n\n print(\"\\nResponse dictionary numerical:\", response_dict_numerical)\n # my_tools.result_text = \"Selected responses:\", my_tools.response_dict\n return response_dict_numerical\n\n\nbtn_click = tk.Button(root, text=\"Enter Selections:\", font=(\"Arial\", 16),\n fg=\"magenta\",\n command=create_dictionary_numerical_entries)\nbtn_click.grid(row=6, column=0, pady=20)\n\nlbl_show = tk.Label(root, text=\"\")\nlbl_show.grid(row=7, column=0, pady=20)\n\ntk.mainloop()\n","sub_path":"Entry_boxes.py","file_name":"Entry_boxes.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"22550552","text":"from typing import List\n\n\nclass Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n results = [[]]\n self.recurse(nums, results, set())\n return results\n\n def recurse(self, nums, results, seen):\n numstuple = tuple(nums)\n if numstuple in seen:\n return\n seen.add(numstuple)\n\n if not nums:\n return\n\n results.append(nums)\n for i in range(len(nums)):\n self.recurse(nums[:i] + nums[i + 1:], results, seen)","sub_path":"leetcode/p0078_subsets/my_attempt.py","file_name":"my_attempt.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"205938515","text":"#!/usr/bin/env python3\n\"\"\"Run matrix multiplication with Tensorflow on a GPU and CPU.\n\nThis is a short demo of various matrix sizes to show how a GPU can\nbecome more efficient at calculating matrix multiplication with\nTensorflow and CUDA.\n\"\"\"\n\nimport argparse\nimport sys\nimport time\n\nfrom tabulate import tabulate\nimport tensorflow as tf\n\nMATRIX_SHAPES = [2**x for x in range(14)]\n\n\ndef create_matrix(matrix_shape):\n \"\"\"Create a matrix of specified shape.\n\n Args:\n matrix_shape: tuple of (x, y)\n Returns:\n tensor of random values from a uniform distribution\n\n \"\"\"\n return tf.random_uniform(\n shape=matrix_shape,\n minval=0,\n maxval=1,\n dtype=tf.float16\n )\n\n\ndef time_tf_matrixmul(matrix_shape):\n \"\"\"Run tensorflow with device and matrix size.\n\n Args:\n device: the name of the tensorflow device to use\n matrix_shape: tuple of (x, y)\n Returns:\n time in seconds for matrix multiplication operation\n\n \"\"\"\n with tf.Session() as session:\n matrix_a = create_matrix(matrix_shape)\n matrix_b = create_matrix(matrix_shape)\n\n start = time.time()\n session.run(tf.matmul(matrix_a, matrix_b))\n end = time.time()\n\n return round(end - start, 2)\n\n\ndef bench():\n \"\"\"Run matrix multiplication on various sizes.\n\n Args:\n device: the name of the tensorflow device to use\n \"\"\"\n results = []\n for shape in MATRIX_SHAPES:\n print('%sx%s' % (shape, shape))\n time = time_tf_matrixmul((shape, shape))\n results.append([shape, time])\n\n print(tabulate(results, headers=['Shape', 'Time']))\n\n\nif __name__ == '__main__':\n sys.exit(bench())\n","sub_path":"bench/bench.py","file_name":"bench.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"584259215","text":"from selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport multiprocessing as mp\nimport requests, os, re, time\n\n\ndef entrysaverfun(url, fl=None):\n def piclinkfun(element):\n flag = True\n try:\n element.find_element_by_tag_name('img')\n return flag\n except:\n flag = False\n return flag\n\n datetime = '0000-00-00 00:00:00'\n date = '0000-00-00'\n articletitle = 'abc'\n themename = 'abc'\n piclen = 0\n videolistlen = 0\n blogtext = ''\n\n if fl is None:\n fl = []\n piclist = []\n videolist = []\n i = 1\n\n print('Saving blog entry : ' + url)\n options = webdriver.ChromeOptions()\n options.add_argument('--headless')\n options.add_experimental_option('excludeSwitches', ['enable-automation'])\n driver = webdriver.Chrome(chrome_options=options)\n driver.get(url)\n\n try:\n datetime = driver.find_element_by_xpath(\"//span[@class='articleTime']\").text[:19]\n date = datetime[:10]\n except:\n try:\n datetime = driver.find_element_by_xpath(\"//time[@class='skin-textQuiet']\").text[-19:]\n date = datetime[:10]\n except:\n print('Failed to Get Entry Time', url)\n fl.append(url)\n\n try:\n themename = driver.find_element_by_xpath(\"//span[@class='articleTheme']\").text[4:]\n except:\n try:\n themename = driver.find_element_by_xpath(\"//dl[@class='_3wmM1dM_ skin-entryThemes']\").text[4:]\n except:\n print('Failed to Get Theme Name', url)\n fl.append(url)\n try:\n articletitle = driver.find_element_by_xpath(\"//a[@class='skinArticleTitle']\").text\n except:\n print('Failed to Get Article Title ', url)\n fl.append(url)\n # entryid = re.findall(r'-.*?(\\d+).html', url)\n\n path = date + ' ' + articletitle\n path = re.sub(r'[\\\\/:*?\"<>|]', '', path)\n # print('Blog entry ID : ' + entryid[0])\n # print('Theme name : ' + themename)\n # print('Path : ' + path)\n\n try: # 创建文件夹 '''时间 标题'''\n if not os.path.exists(themename + '/' + path):\n os.makedirs(themename + '/' + path)\n print('Created folder ' + themename + '/' + path + ' successfully')\n else:\n print('Folder' + ' existed')\n except:\n print('Failed to Creat Folder ')\n print('Failed to Save Blog :', url)\n fl.append(url)\n driver.quit()\n\n try:\n blogtext = driver.find_element_by_id('entryBody').text\n except:\n print('Failed to Get Entry Text:', url)\n fl.append(url)\n\n # '''正文写入txt'''\n # '''\n # 标题 时间\n # 主题\n # 正文\n # '''\n try:\n textname = path + '.txt'\n with open(themename + '/' + path + '/' + textname, 'w', encoding='UTF-8') as f:\n f.write(articletitle + ' ' + datetime + '\\n' + 'テーマ: ' + themename + '\\n' + blogtext)\n print('Saved blog ' + path + ' text')\n except:\n print('Failed to Save Blog ' + path + ' Text')\n fl.append(url)\n\n try:\n links = driver.find_element_by_id('entryBody').find_elements_by_tag_name('a')\n for s in links:\n if piclinkfun(s):\n piclink = s.find_element_by_tag_name('img').get_attribute('src')\n piclink = re.search(r\"(.+)\\?caw=(\\d+)\", piclink).group(1)\n piclist.append(piclink)\n # print(piclist)\n piclen = len(piclist)\n if piclen == 0:\n print('No picture in blog ' + path)\n\n except:\n print('Failed to Get ' + path + ' Pictures')\n fl.append(url)\n\n try:\n if not piclen == 0:\n print('Found ' + str(piclen) + ' picture(s) in blog ' + path)\n for k in piclist:\n # print(k)\n r = requests.get(url=k)\n pictype = requests.head(k).headers.get('content-type') # 判断图片格式\n if pictype == 'image/jpeg':\n pictype = 'jpg'\n elif pictype == 'image/gif':\n pictype = 'gif'\n elif pictype == 'image/png':\n pictype = 'png'\n picname = path + str(i) + '.' + pictype\n with open(themename + '/' + path + '/' + picname, 'wb') as f:\n f.write(r.content)\n i += 1\n if i > piclen:\n i = 1\n print('Saved Blog ' + path + ' pictures')\n except:\n print('Failed to Save pictures,URL:', url)\n fl.append(url)\n\n try:\n iframes = driver.find_element_by_id('entryBody').find_elements_by_tag_name('iframe')\n # print(iframes)\n for iframe in iframes:\n driver.switch_to.frame(iframe)\n videolink = driver.find_element_by_tag_name('source').get_attribute('src')\n # print(videolink)\n videolist.append(videolink)\n driver.switch_to.default_content()\n\n videolistlen = len(videolist)\n if videolistlen == 0:\n print('Saved blog ' + path + ' Successfully')\n except:\n print('Failed to Get ' + path + ' Videos URL')\n fl.append(url)\n driver.quit()\n\n try:\n if not videolistlen == 0:\n print('Found ' + str(videolistlen) + ' video(s) in blog ' + path)\n for k in videolist:\n # print(k)\n r = requests.get(k)\n videotype = requests.head(k).headers.get('content-type') # 判断图片格式\n videotype = videotype[6:]\n # print(videotype[6:])\n videoname = path + ' ' + str(i) + '.' + videotype\n with open(themename + '/' + path + '/' + videoname, 'wb') as f:\n f.write(r.content)\n i += 1\n if i > videolistlen:\n print('Saved Blog ' + path + ' videos')\n print('Saved blog ' + path + ' Successfully')\n except:\n print('Failed to Save videos,URL:', url)\n fl.append(url)\n driver.quit()\n\n driver.quit()\n\n\ndef Searchentry(url, name):\n data = requests.get(url).content\n soup = BeautifulSoup(data, 'html.parser')\n links = [s.get('href') for s in soup.find_all('a')]\n for entrylink in [str for str in links if str not in ['', ' ', None]]:\n if entrylink.startswith('/' + name + '/entry-'):\n entrylist.append(entrylink)\n\n\ndef Pagefun(urlflag, Fristthemeurl): # urlflag,0:theme,1:archive,2:entrylist\n global entrylist, themeid\n entrylist = []\n themeurl = Fristthemeurl\n if urlflag == 0:\n Fristthemenum = re.findall(r'theme(.*?)-', Fristthemeurl)\n themeid = re.findall(r'-.*?(\\d+).html', Fristthemeurl)\n print('Blog theme URL :', themeurl)\n themeid = themeid[0]\n print('Blog theme ID :', themeid)\n elif urlflag == 1:\n Fristthemenum = re.findall(r'archive(.*?)-', Fristthemeurl)\n achiveid = re.findall(r'-.*?(\\d+).html', Fristthemeurl)\n print('Blog archive URL :', themeurl)\n themeid = achiveid[0]\n print('Blog archive ID :', themeid)\n else:\n Fristthemenum = re.findall(r'entrylist(.*?)', Fristthemeurl)\n\n if Fristthemenum[0] == '':\n themenum = 1\n Original_theme_num = 1\n print('Searching entries on Page', themenum)\n else:\n themenum = int(Fristthemenum[0])\n Original_theme_num = int(Fristthemenum[0])\n print('Searching entries on Page', themenum)\n\n # '''翻页'''\n # '''1)themeFristPage : themenum=1 后翻(1)None→break[Only one Page] (2)themenum+=1 前翻→themenum==1 →break\n # 2)themeX: themenum=X 后翻 themenum+=1 前翻 themenum-=1\n # 3)lastPage: themenum=lastPage 后翻None→break 前翻→themenum-=1→→→→1\n # '''\n try:\n while True: # 后翻页\n Searchentry(themeurl, blogname)\n data = requests.get(themeurl).content\n soup = BeautifulSoup(data, 'html.parser')\n NextP1 = soup.find('a', class_='skinSimpleBtn pagingNext')\n NextP2 = soup.find('a', class_='skin-paginationNext skin-btnIndex js-paginationNext')\n if NextP1 == None and NextP2 == None: # 无下一页\n if themenum == 1: # 只有一页\n print('This theme only has 1 Page')\n break\n elif Original_theme_num == 1: # 本来就是从第一页开始翻的\n break\n else: # 翻回最初的前一页\n if urlflag == 0:\n themeurl = 'https://ameblo.jp/' + blogname + '/theme' + str(\n Original_theme_num - 1) + '-' + themeid + '.html'\n elif urlflag == 1:\n themeurl = 'https://ameblo.jp/' + blogname + '/archive' + str(\n Original_theme_num - 1) + '-' + themeid + '.html'\n else:\n themeurl = 'https://ameblo.jp/' + blogname + '/entrylist-' + str(\n Original_theme_num - 1) + '.html'\n themenum = Original_theme_num - 1\n print('Searching entries on Page ' + str(themenum))\n break\n\n else: # 后翻\n themenum += 1\n print('Searching entries on Page ' + str(themenum))\n if urlflag == 0:\n themeurl = 'https://ameblo.jp/' + blogname + '/theme' + str(themenum) + '-' + themeid + '.html'\n elif urlflag == 1:\n themeurl = 'https://ameblo.jp/' + blogname + '/archive' + str(themenum) + '-' + themeid + '.html'\n else:\n themeurl = 'https://ameblo.jp/' + blogname + '/entrylist-' + str(themenum) + '.html'\n\n while True: # 前翻页\n if Original_theme_num == 1: # 只有一页或从第一页开始翻\n break\n elif themenum == 1: # 翻到第一页\n Searchentry(themeurl, blogname)\n break\n else: # 前翻\n Searchentry(themeurl, blogname)\n themenum -= 1\n print('Searching entries on Page ' + str(themenum))\n if urlflag == 0:\n themeurl = 'https://ameblo.jp/' + blogname + '/theme' + str(themenum) + '-' + themeid + '.html'\n elif urlflag == 1:\n themeurl = 'https://ameblo.jp/' + blogname + '/archive' + str(themenum) + '-' + themeid + '.html'\n else:\n themeurl = 'https://ameblo.jp/' + blogname + '/entrylist-' + str(themenum) + '.html'\n\n except Exception:\n print(' Failed to search entries URL')\n exit()\n\n entrylist = list(set(entrylist)) # 去除重复元素\n # print(entrylist)\n\n if urlflag == 0:\n print('This theme has ' + str(len(entrylist)) + ' entries(entry).')\n elif urlflag == 1:\n print('This archive has ' + str(len(entrylist)) + ' entries(entry).')\n else:\n print('This account has ' + str(len(entrylist)) + ' entries(entry).')\n\n\ndef is_amebaurl(url):\n flag = 0\n if url.startswith('https://ameblo.jp/' + blogname + '/entrylist'):\n flag = 3\n return flag\n elif url.startswith('https://ameblo.jp/' + blogname + '/entry'):\n return flag\n elif url.startswith('https://ameblo.jp/' + blogname + '/theme'):\n flag = 1\n return flag\n elif url.startswith('https://ameblo.jp/' + blogname + '/archive'):\n flag = 2\n return flag\n else:\n flag = 4\n return flag\n\n\ndef savejob1(job1list, l):\n for i in job1list:\n entryurl = 'https://ameblo.jp' + i\n entrysaverfun(entryurl, l)\n\n\ndef savejob2(job2list, l):\n for i in job2list:\n entryurl = 'https://ameblo.jp' + i\n entrysaverfun(entryurl, l)\n\n\ndef failedlistfun(fl):\n t = st1 - st\n h = t / 3600\n m = t % 3600 / 60\n s = t % 60\n fl = list(set(fl))\n llen = len(fl)\n if llen == 0:\n print('Saved All Blog Entries Media in', '%02d:%02d:%02d' % (int(h), int(m), int(s)))\n else:\n print('Saved Part of Blog Entries Media in', '%02d:%02d:%02d' % (int(h), int(m), int(s)))\n try:\n with open('failed_list.txt', 'a') as f:\n for i in fl:\n f.write(i + '\\n')\n print('Saved fault log as failed_list.txt')\n except:\n print('Failed to save fault log failed_list.txt')\n print('----------Failed to save URL list-----------')\n for i in fl:\n print(i)\n\n\ndef multicore(list1, list2, fl):\n list1len = len(list1)\n # print(list1len)\n if list1len == 0:\n print('No entry in this theme')\n elif list1len == 1:\n entryurl = 'https://ameblo.jp' + list1[0]\n entrysaverfun(entryurl)\n else:\n for i in list1: # divide list1 into 2 lists\n list2.append(i)\n list1.remove(i)\n # print(list1)\n # print(list2)\n # print(len(list1))\n # print(len(list2))\n p1 = mp.Process(target=savejob1, args=(list1, fl))\n p2 = mp.Process(target=savejob2, args=(list2, fl))\n p1.start()\n p2.start()\n p1.join()\n p2.join()\n\n\ndef is_kw_contain(url, keyword, title_or_text=0):\n flag = True\n data = requests.get(url).content\n soup = BeautifulSoup(data, 'html.parser')\n if title_or_text == 0:\n checktitle = re.findall(r'entry_title\":\"(.*?)\",\"entry_text\"', soup.get_text())\n if keyword in checktitle[0]:\n return flag\n else:\n flag = False\n return flag\n else:\n bodytext = soup.select('#entryBody')\n entrytext = bodytext[0]\n if keyword in entrytext.text:\n return flag\n else:\n flag = False\n return flag\n\n\nif __name__ == '__main__':\n job2list = []\n containkwlist = []\n failedlist = mp.Manager().list()\n print('Ameba Blog Saver v2.2.3')\n print('Author : Nakateru (2019.12.25)')\n firstinput = input(\"Input Ameba Blog URL or 'O' to enter set mode:\")\n if firstinput == 'O' or firstinput == 'o':\n print(\n '[1]Filter keywords from entry title [2]Filter keywords from entries text [3]Read URL from txt file [4]Exit')\n secondinput = input('Select number:')\n if secondinput == '1':\n kw = input(\"Input keywords in entries title:\")\n if not kw == '':\n Fristurl = input(\"Input Ameba Blog URL to the filter:\")\n try:\n blogname = re.findall(r'ameblo.jp/(.*?)/', Fristurl)\n blogname = blogname[0]\n print('Blog username: ' + blogname)\n except:\n print('Error URL')\n exit()\n\n if is_amebaurl(Fristurl) == 0:\n print('This is an Ameba entry URL')\n if is_kw_contain(Fristurl, kw, 0):\n entrysaverfun(Fristurl, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n else:\n print('Entry title does not contain keywords')\n exit()\n\n elif is_amebaurl(Fristurl) == 1:\n print('This is an Ameba theme URL')\n st = time.time()\n Pagefun(0, Fristurl)\n entrylistlen = len(entrylist)\n if entrylistlen == 0:\n print('No entry in this theme')\n exit()\n elif entrylistlen == 1:\n print('Only 1 entry in this theme')\n entryurl = 'https://ameblo.jp' + entrylist[0]\n if is_kw_contain(entryurl, kw, 0):\n print('Entry title contains keywords')\n entrysaverfun(entryurl, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n else:\n print('Entry title does not contain keywords')\n exit()\n\n else:\n print('Searching keywords in all entries title of this theme...')\n for i in entrylist:\n entryurl = 'https://ameblo.jp' + i\n if is_kw_contain(entryurl, kw, 0):\n containkwlist.append(i)\n else:\n pass\n containkwlistlen = len(containkwlist)\n print(containkwlistlen, 'entries title contain keywords')\n multicore(containkwlist, job2list, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n\n elif is_amebaurl(Fristurl) == 2:\n print('This is an Ameba theme URL classified by months')\n st = time.time()\n Pagefun(1, Fristurl)\n entrylistlen = len(entrylist)\n if entrylistlen == 0:\n print('No entry in this theme')\n exit()\n elif entrylistlen == 1:\n print('Only 1 entry in this theme')\n entryurl = 'https://ameblo.jp' + entrylist[0]\n if is_kw_contain(entryurl, kw, 0):\n print('Entry title contains keywords')\n entrysaverfun(entryurl, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n else:\n print('Entry title does not contain keywords')\n exit()\n\n else:\n print('Searching keywords in all entries title in this theme...')\n for i in entrylist:\n entryurl = 'https://ameblo.jp' + i\n if is_kw_contain(entryurl, kw, 0):\n containkwlist.append(i)\n else:\n pass\n\n containkwlistlen = len(containkwlist)\n print(containkwlistlen, 'entries title contain keywords')\n multicore(containkwlist, job2list, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n\n elif is_amebaurl(Fristurl) == 3:\n print('This is an Ameba account URL')\n saveall = input('Do you want to save entries which titles contain keywords in this account?[y/n]')\n if saveall == 'y':\n Pagefun(2, Fristurl)\n entrylistlen = len(entrylist)\n if entrylistlen == 0:\n print('No entry in this account')\n exit()\n elif entrylistlen == 1:\n print('Only 1 entry in this account')\n entryurl = 'https://ameblo.jp' + entrylist[0]\n if is_kw_contain(entryurl, kw, 0):\n print('Entry title contains keywords')\n entrysaverfun(entryurl, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n else:\n print('Entry title does not contain keywords')\n exit()\n\n else:\n print('Searching keywords in all entries title in this account...')\n for i in entrylist:\n entryurl = 'https://ameblo.jp' + i\n if is_kw_contain(entryurl, kw, 0):\n containkwlist.append(i)\n else:\n pass\n\n containkwlistlen = len(containkwlist)\n print(containkwlistlen, 'entries title contain keywords')\n multicore(containkwlist, job2list, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n\n else:\n print('Error URL')\n exit()\n\n else:\n print('Error keywords')\n exit()\n\n elif secondinput == '2':\n kw = input(\"Input keywords in entries text:\")\n if not kw == '':\n Fristurl = input(\"Input Ameba Blog URL to the filter:\")\n try:\n blogname = re.findall(r'ameblo.jp/(.*?)/', Fristurl)\n blogname = blogname[0]\n print('Blog username: ' + blogname)\n except Exception:\n print('Error URL')\n exit()\n\n if is_amebaurl(Fristurl) == 0:\n print('This is an Ameba entry URL')\n st = time.time()\n if is_kw_contain(Fristurl, kw, 1):\n entrysaverfun(Fristurl, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n else:\n print('Entry text does not contain keywords')\n exit()\n\n elif is_amebaurl(Fristurl) == 1:\n print('This is an Ameba theme URL')\n st = time.time()\n Pagefun(0, Fristurl)\n entrylistlen = len(entrylist)\n if entrylistlen == 0:\n print('No entry in this theme')\n exit()\n elif entrylistlen == 1:\n print('Only 1 entry in this theme')\n entryurl = 'https://ameblo.jp' + entrylist[0]\n if is_kw_contain(entryurl, kw, 1):\n print('Entry title contains keywords')\n entrysaverfun(entryurl, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n else:\n print('Entry text does not contain keywords')\n exit()\n\n else:\n print('Searching keywords in all entries text in this theme...')\n for i in entrylist:\n entryurl = 'https://ameblo.jp' + i\n if is_kw_contain(entryurl, kw, 1):\n containkwlist.append(i)\n else:\n pass\n containkwlistlen = len(containkwlist)\n print(containkwlistlen, 'entries text contain keywords')\n multicore(containkwlist, job2list, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n\n elif is_amebaurl(Fristurl) == 2:\n print('This is an Ameba theme URL classified by months')\n Pagefun(1, Fristurl)\n entrylistlen = len(entrylist)\n if entrylistlen == 0:\n print('No entry in this theme')\n exit()\n elif entrylistlen == 1:\n print('Only 1 entry in this theme')\n entryurl = 'https://ameblo.jp' + entrylist[0]\n if is_kw_contain(entryurl, kw, 1):\n print('Entry text contains keywords')\n entrysaverfun(entryurl, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n else:\n print('Entry text does not contain keywords')\n exit()\n\n else:\n print('Searching keywords in all entries text of this theme...')\n for i in entrylist:\n entryurl = 'https://ameblo.jp' + i\n if is_kw_contain(entryurl, kw, 1):\n containkwlist.append(i)\n else:\n pass\n containkwlistlen = len(containkwlist)\n print(containkwlistlen, 'entries text contain keywords')\n multicore(containkwlist, job2list, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n\n elif is_amebaurl(Fristurl) == 3:\n print('This is an Ameba account URL')\n saveall = input('Do you want to save entries which contain keywords in this account?[y/n]')\n if saveall == 'y':\n st = time.time()\n Pagefun(2, Fristurl)\n entrylistlen = len(entrylist)\n if entrylistlen == 0:\n print('No entry in this account')\n exit()\n elif entrylistlen == 1:\n print('Only 1 entry in this account')\n entryurl = 'https://ameblo.jp' + entrylist[0]\n if is_kw_contain(entryurl, kw, 1):\n print('Entry text contains keywords')\n entrysaverfun(entryurl, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n else:\n print('Entry text does not contain keywords')\n exit()\n\n else:\n print('Searching keywords in all entries text in this account...')\n for i in entrylist:\n entryurl = 'https://ameblo.jp' + i\n if is_kw_contain(entryurl, kw, 1):\n containkwlist.append(i)\n else:\n pass\n\n containkwlistlen = len(containkwlist)\n print(containkwlistlen, 'entries text contain keywords')\n multicore(containkwlist, job2list, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n\n else:\n print('Error URL')\n exit()\n\n else:\n print('Error keywords')\n exit()\n\n elif secondinput == '3':\n txt_file_name = input('Input txt file name:')\n st = time.time()\n try:\n with open(txt_file_name, encoding='UTF-8') as f:\n txt_file = f.read().splitlines()\n if not len(txt_file) == 0:\n for url in [s for s in txt_file if s not in ['', ' ', None]]:\n url = re.split(r\"[\\s\\n]\", url)\n entrysaverfun(url[0], failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n else:\n print('Error txt file')\n exit()\n except:\n print('Error txt file')\n exit()\n\n elif secondinput == '4':\n exit()\n else:\n print('Error input')\n exit()\n else:\n Fristurl = firstinput\n try:\n blogname = re.findall(r'ameblo.jp/(.*?)/', Fristurl)\n blogname = blogname[0]\n print('Blog username: ' + blogname)\n except Exception:\n print('Error URL')\n exit()\n\n st = time.time()\n\n if is_amebaurl(Fristurl) == 0:\n print('This is an Ameba entry URL')\n entrysaverfun(Fristurl, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n\n elif is_amebaurl(Fristurl) == 1:\n print('This is an Ameba theme URL')\n Pagefun(0, Fristurl)\n entrylistlen = len(entrylist)\n if entrylistlen == 0:\n print('No entry in this theme')\n exit()\n elif entrylistlen == 1:\n print('Only 1 entry in this theme')\n entryurl = 'https://ameblo.jp' + entrylist[0]\n entrysaverfun(entryurl, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n else:\n multicore(entrylist, job2list, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n\n elif is_amebaurl(Fristurl) == 2:\n print('This is an Ameba theme URL classified by months')\n Pagefun(1, Fristurl)\n entrylistlen = len(entrylist)\n if entrylistlen == 0:\n print('No entry in this theme')\n exit()\n elif entrylistlen == 1:\n print('Only 1 entry in this theme')\n entryurl = 'https://ameblo.jp' + entrylist[0]\n entrysaverfun(entryurl, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n else:\n multicore(entrylist, job2list, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n\n elif is_amebaurl(Fristurl) == 3:\n print('This is an Ameba accout URL')\n saveall = input('Do you want to save all entries in this account?[y/n]')\n if saveall == 'y':\n Pagefun(2, Fristurl)\n entrylistlen = len(entrylist)\n if entrylistlen == 0:\n print('No entry in this account')\n exit()\n elif entrylistlen == 1:\n print('Only 1 entry in this account')\n entryurl = 'https://ameblo.jp' + entrylist[0]\n entrysaverfun(entryurl, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n else:\n multicore(entrylist, job2list, failedlist)\n st1 = time.time()\n failedlistfun(failedlist)\n\n else:\n exit()\n else:\n print('Error URL')\n exit()\n","sub_path":"Ameba_Blog_saverv2.2.3.py","file_name":"Ameba_Blog_saverv2.2.3.py","file_ext":"py","file_size_in_byte":31048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"255284621","text":"#!/usr/bin/python\n'''\nShort script to evaluate the objective function.\n'''\nfrom collections import defaultdict\nimport argparse\nimport itertools\nimport logging\nimport sys\nimport yaml\n\nfrom calculate import run_calculate\nfrom datatypes import Datum, datum_sort_key\nimport constants as cons\n\ndef parse(args):\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\n '--calculate', '-c', type=str,\n metavar = '\" commands for calculate.py\"',\n help=('These commands produce the calculated data. Leave one space '\n 'after the 1st quotation mark enclosing the arguments.'))\n parser.add_argument(\n '--reference', '-r', type=str,\n metavar='\" commands for calculate.py\"',\n help=('These commands produce the reference data. Leave one space '\n 'after the 1st quotation mark enclosing the arguments.'))\n parser.add_argument(\n '--output', '-o', type=str, metavar='filename.txt',\n help='Write data to file.')\n parser.add_argument(\n '--print', '-p', action='store_true', dest='doprint',\n help='Print data.')\n opts = parser.parse_args(args)\n return opts\n\ndef convert_energies(data_cal, data_ref):\n energies_ref = [x for x in data_ref if x.dtype in ('energy', 'energy2')]\n energies_cal = [x for x in data_cal if x.dtype in ('energy', 'energy2')]\n groups_ref = defaultdict(list)\n for datum in energies_ref:\n groups_ref[datum.group].append(datum)\n groups_cal = defaultdict(list)\n for datum in energies_cal:\n groups_cal[datum.group].append(datum)\n for gnum_ref, gnum_cal in itertools.izip(sorted(groups_ref), sorted(groups_cal)):\n group_energies_ref = groups_ref[gnum_ref]\n group_energies_cal = groups_cal[gnum_cal]\n value, index = min((datum.value, index) for index, datum in enumerate(group_energies_ref))\n minimum_cal = group_energies_cal[index].value\n for datum in group_energies_cal:\n datum.value -= minimum_cal\n\ndef import_steps(params):\n for param in params:\n if isinstance(cons.steps[param.ptype], basestring):\n param.step = float(cons.steps[param.ptype]) * param.value\n else:\n param.step = cons.steps[param.ptype]\n if param.step == 0.0:\n param.step = 0.1\n# def import_steps(params, yamlfile='steps.yaml', **kwargs):\n# '''\n# Grabs step sizes for parameters from a yaml file. Can also take\n# arguments to override the dictionary.\n# '''\n# with open(yamlfile, 'r') as f:\n# steps = yaml.load(f)\n# for key,value in kwargs.iteritems():\n# steps[key] = value\n# for param in params:\n# param.step = steps[param.ptype]\n\ndef import_weights(data, yamlfile='weights.yaml', **kwargs):\n with open(yamlfile, 'r') as f:\n weights = yaml.load(f)\n for key, value in kwargs.iteritems():\n weights[key] = value\n for datum in data:\n if datum.dtype == 'eig' or datum.dtype == 'eigz':\n if datum.i == 0 and datum.j == 0:\n datum.weight = weights['eig_i']\n elif datum.i == datum.j:\n datum.weight = weights['eig_d']\n else:\n datum.weight = weights['eig_o']\n elif datum.dtype == 'hess':\n if datum.i == datum.j:\n datum.weight = weights['hess_11']\n else:\n datum.weight = weights['hess']\n else:\n datum.weight = weights[datum.dtype]\n \ndef calc_x2(data_cal, data_ref, output=None, doprint=False):\n if isinstance(data_cal, list):\n assert isinstance(data_cal[0], Datum), \\\n \"attempted to calculate objective function using an object that isn't Datum\"\n elif isinstance(data_cal, basestring):\n data_cal = run_calculate(data_cal.split())\n else:\n raise Exception('failed to determine ff calculated data')\n if isinstance(data_ref, list):\n assert isinstance(data_ref[0], Datum), \\\n \"attempted to calculate objective function using an object that isn't Datum\"\n elif isinstance(data_ref, basestring):\n data_ref = run_calculate(data_ref.split())\n else:\n raise Exception('failed to determine reference data')\n assert len(data_cal) == len(data_ref), \"number of reference and ff calculated data points don't match\"\n data_cal = sorted(data_cal, key=datum_sort_key)\n data_ref = sorted(data_ref, key=datum_sort_key)\n convert_energies(data_cal, data_ref)\n import_weights(data_ref)\n\n total_x2 = 0.\n if output or doprint:\n separate_x2 = defaultdict(float)\n for datum_ref, datum_cal in itertools.izip(data_ref, data_cal):\n single_x2 = datum_ref.weight**2 * (datum_ref.value - datum_cal.value)**2\n total_x2 += single_x2\n if output or doprint:\n separate_x2[datum_ref.dtype] += single_x2\n if output or doprint:\n lines = []\n header = '{0:<20} {1:>16} {2:<20} {3:>16} {4:>16} {5:>16}'.format(\n 'ref', 'ref value', 'cal', 'cal value', 'weight', 'x2')\n lines.append('total x2: {}'.format(total_x2))\n for dtype, value in separate_x2.iteritems():\n lines.append('{} x2: {}'.format(dtype, value))\n lines.append('')\n lines.append(header)\n lines.append('-' * len(header))\n for datum_ref, datum_cal in itertools.izip(data_ref, data_cal):\n single_x2 = datum_ref.weight**2 * (datum_ref.value - datum_cal.value)**2\n lines.append('{0:<20} {1:>16.6f} {2:<20} {3:>16.6f} {4:>16.6f} {5:>16.6f}'.format(\n datum_ref.name, datum_ref.value, datum_cal.name, datum_cal.value, datum_ref.weight, single_x2))\n if doprint:\n for line in lines:\n print(line)\n elif output:\n lines = [x + '\\n' for x in lines]\n with open(output, 'w') as f:\n f.writelines(lines)\n return total_x2\n\nif __name__ == '__main__':\n import logging.config\n with open('logging.yaml', 'r') as f:\n cfg = yaml.load(f)\n logging.config.dictConfig(cfg)\n \n opts = parse(sys.argv[1:])\n calc_x2(opts.calculate, opts.reference, opts.output, opts.doprint)\n","sub_path":"compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":6200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"104129121","text":"\"\"\"\nIn this simple RPG game, the hero fights the goblin. He has the options to:\n\n1. fight goblin\n2. do nothing - in which case the goblin will attack him anyway\n3. flee\n\n\"\"\"\nimport random\n\nclass character():\n def __init__(self, name, health, power, power2, heal, back, elem):\n self.name = name\n self.health = health\n self.power = power\n self.power2 = power2\n self.heal = heal\n self.back = back\n self.elem = elem\n\n def attack(self, enemy):\n enemy.health -= self.power\n print('{} does {} to {}.'.format(self.name, self.power, enemy.name))\n\n def attack2(self, enemy):\n enemy.health -= self.power2\n print('critical strike! {} does {} to {}.'.format(self.name, self.power2, enemy.name))\n\n def regenerate(self):\n self.health += self.heal\n print('{} healed for {} health points.'.format(self.name, self.heal))\n \n def thorns(self, enemy):\n enemy.health -= self.back\n print('{} returns damage to {} for {} points'.format(self.name, enemy.name, self.back))\n\n def elemental(self, enemy):\n enemy.health -= self.elem\n print('{} unleashes the power of thoros on {} for {} points.'.format(self.name, enemy.name, self.elem))\n\n def print_status(self):\n print('{} has {} health, {} power and {} elemental power.'.format(self.name, self.health, self.power, self.elem))\n\n def alive(self):\n return self.health > 0\n \ndef main():\n hero1 = character('hero', 20, 5, 10, 0, 0, 0)\n goblin1 = character('goblin', 14, 4, 6, 0, 0, 0)\n medic = character('medic', 12, 2, 4, 2, 0, 0)\n shadow = character('shadow', 1, 7, 14, 0, 0, 0)\n zombie = character('zombie', 0, 5, 5, 0, 0, 0)\n thornmail = character('thornmail', 50, 1, 2, 0 , 2, 0)\n mage = character('mage', 10, 0, 0, 0, 0, 20)\n shadow_attacked = 0\n\n while goblin1.alive() and hero1.alive() and medic.alive() and shadow.alive() and thornmail.alive():\n goblin1.print_status()\n hero1.print_status()\n medic.print_status()\n shadow.print_status()\n zombie.print_status()\n thornmail.print_status()\n mage.print_status()\n print()\n print(\"what do you want to do?\")\n print(\"1. fight goblin\")\n print(\"2. do nothing\")\n print(\"3. flee\")\n print(\"> \",)\n user_input = input()\n if user_input == \"1\":\n # Hero attacks goblin\n if random.random() <= 0.2:\n hero1.attack2(goblin1)\n elif random.random() <= 0.1:\n mage.elem(goblin1)\n else:\n hero1.attack(goblin1)\n if not goblin1.alive():\n print('you have slayed the goblin!')\n elif user_input == \"2\":\n pass\n elif user_input == \"3\":\n print(\"goodbye.\")\n break\n else:\n print(\"invalid input %r\" % user_input)\n\n if goblin1.alive():\n # goblin attacks hero, medic, shadow, zombie, or thornmail\n if random.random() < 0.14:\n goblin1.attack(hero1)\n\n elif random.random() < 0.14:\n goblin1.attack(medic)\n\n if medic.alive():\n # medic heals at 20% chance\n if random.random() < 0.2:\n medic.regenerate()\n\n elif random.random() < .14:\n shadow_attacked += 1\n print('goblin attempts to attack shadow!')\n if shadow_attacked >= 10:\n goblin1.attack(shadow)\n print('goblin finally hit the shadow! game over.')\n\n elif random.random() < .14:\n goblin1.attack(zombie)\n print('zombie takes damage but remains in battle!')\n\n elif random.random() < .14:\n goblin1.attack(thornmail)\n thornmail.thorns(goblin1)\n \n elif random.random() < .14:\n goblin1.attack(mage)\n\n\n\n if not hero1.alive() and medic.alive() and shadow.alive() and thornmail.alive() and mage.alive():\n print('game over.')\n\nmain()","sub_path":"rpg-0.py","file_name":"rpg-0.py","file_ext":"py","file_size_in_byte":4163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"171256228","text":"# SPDX-License-Identifier: Apache-2.0\r\n# Licensed to the Ed-Fi Alliance under one or more agreements.\r\n# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.\r\n# See the LICENSE and NOTICES files in the project root for more information.\r\n\r\nimport configargparse\r\nfrom lms_ds_loader.constants import Constants\r\nfrom lms_ds_loader.arguments import Arguments\r\n\r\n\r\ndef parse_arguments(args_in) -> Arguments:\r\n \"\"\"\r\n Configures the command-line interface.\r\n\r\n Parameters\r\n ----------\r\n args_in : list of str\r\n Full argument list from the command line.\r\n\r\n Returns\r\n -------\r\n arguments : Arguments\r\n A populated `Arguments` object.\r\n \"\"\"\r\n\r\n assert isinstance(args_in, list), \"Argument `args_in` must be a list\"\r\n\r\n parser = configargparse.ArgParser()\r\n parser.add(\"-c\", \"--csvpath\", help=\"base path for input files\", required=True)\r\n parser.add(\r\n \"-e\",\r\n \"--engine\",\r\n help=\"database engine\",\r\n choices=[Constants.DbEngine.MSSQL, Constants.DbEngine.POSTGRESQL],\r\n default=Constants.DbEngine.MSSQL,\r\n )\r\n parser.add(\r\n \"-s\", \"--server\", help=\"database server name or IP address\", required=True\r\n )\r\n parser.add(\"--port\", help=\"server port number\", type=int)\r\n parser.add(\"-d\", \"--dbname\", help=\"database name\", required=True)\r\n\r\n USE_INTEGRATED = \"--useintegratedsecurity\"\r\n USE_INTEGRATED_SHORT = \"-i\"\r\n parser.add(\r\n USE_INTEGRATED_SHORT,\r\n USE_INTEGRATED,\r\n help=\"use Integrated Security\",\r\n action=\"store_true\",\r\n )\r\n\r\n user_name_required = (\r\n USE_INTEGRATED not in args_in and USE_INTEGRATED_SHORT not in args_in\r\n )\r\n parser.add(\"-u\", \"--username\", required=user_name_required)\r\n parser.add(\r\n \"-p\", \"--password\", required=user_name_required, env_var=\"MSSQL_PASSWORD\"\r\n )\r\n\r\n parser.add(\"-v\", \"--verbose\", help=\"Enable verbose logging\", action=\"store_true\")\r\n\r\n args_parsed = parser.parse_args(args_in)\r\n\r\n arguments = Arguments(args_parsed.csvpath, args_parsed.engine)\r\n arguments.verbose = args_parsed.verbose\r\n\r\n if args_parsed.useintegratedsecurity:\r\n arguments.set_connection_string_using_integrated_security(\r\n args_parsed.server, args_parsed.port, args_parsed.dbname\r\n )\r\n else:\r\n arguments.set_connection_string(\r\n args_parsed.server,\r\n args_parsed.port,\r\n args_parsed.dbname,\r\n args_parsed.username,\r\n args_parsed.password,\r\n )\r\n\r\n return arguments\r\n","sub_path":"src/lms-ds-loader/lms_ds_loader/argparser.py","file_name":"argparser.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"522846206","text":"#! /usr/bin/env python\n# coding: utf-8\n\nimport re\nimport json\nimport base64\nfrom flask import request, jsonify, g\nfrom Tools.RenderTemplate import RenderTemplate\nfrom Class.TopicMessage import BCMessage\nfrom Web import message_url_prefix as url_prefix, create_blue, verify_mns_message, redis, control, login_required\n\n__author__ = 'ZhouHeng'\n\nrt = RenderTemplate(\"Message\", url_prefix=url_prefix)\nmessage_view = create_blue('message_view', url_prefix=url_prefix, auth_required=False)\n\n\ndef cache_message(message_info):\n cache_key = \"dms_cache_message\"\n max_len = 100\n current_len = redis.llen(cache_key)\n while current_len >= max_len:\n redis.rpop(cache_key)\n current_len -= 1\n message_info = json.dumps(message_info)\n redis.lpush(cache_key, message_info)\n\n\ndef get_cache_message(start, stop):\n cache_key = \"dms_cache_message\"\n items = redis.lrange(cache_key, start, stop)\n n_items = map(json.loads, items)\n return n_items\n\n\n@message_view.route(\"/receive\", methods=[\"POST\"])\ndef receive_message_func():\n verify_r = verify_mns_message(request.method, request.headers, request.path)\n if verify_r != 1:\n return jsonify({\"success\": True, \"data\": \"not save\"})\n r_data = request.json\n message_info = dict()\n coverage_keys = {\"TopicOwner\": \"topic_owner\", \"PublishTime\": \"publish_time\", \"TopicName\": \"topic_name\",\n \"SubscriptionName\": \"subscription_name\", \"MessageId\": \"message_id\", \"Message\": \"message_content\",\n \"MessageTag\": \"message_tag\"}\n for key in coverage_keys:\n if key in r_data:\n message_info[coverage_keys[key]] = r_data[key]\n if re.match(\"^[a-z\\\\d/\\+]+=*$\", message_info[\"message_content\"], re.I) is not None:\n message_info[\"message_content\"] = base64.b64decode(message_info[\"message_content\"])\n if message_info[\"topic_name\"] == \"bc\":\n r, h_content = BCMessage.convert_humanable(message_info[\"message_content\"])\n if r is True:\n message_info[\"readable_content\"] = h_content\n message_md5 = r_data[\"MessageMD5\"]\n message_tag = message_info.get(\"message_tag\", \"\")\n for key in message_info:\n if type(message_info[key]) == str:\n message_info[key] = message_info[key].decode(\"utf-8\")\n redis_key = \"message_%s_%s\" % (message_tag, message_md5)\n if redis.get(redis_key) is not None:\n control.new_topic_message(**message_info)\n return jsonify({\"success\": True, \"data\": \"not notification\"})\n redis.setex(redis_key, \"\", 60)\n # cache message\n cache_message(message_info)\n # 通知\n query_url = \"http://\" + request.host + url_prefix + \"/manager/\"\n notify_mode, interval_time = control.notification_topic_message(message_info, query_url)\n message_info[\"notify_mode\"] = notify_mode\n control.new_topic_message(**message_info)\n redis.setex(redis_key, notify_mode, interval_time)\n return jsonify({\"success\": True, \"data\": \"success\"})\n\n\n@message_view.route(\"/manager/\", methods=[\"GET\"])\ndef manager_page():\n tag_url = url_prefix + \"/tag/\"\n query_url = url_prefix + \"/query/\"\n url_cache_messages = url_prefix + \"/cache/\"\n test = '{{test}}'\n return rt.render(\"Index.html\", tag_url=tag_url, query_url=query_url, url_cache_messages=url_cache_messages)\n\n\n@message_view.route(\"/tag/\", methods=[\"GET\"])\n@login_required\ndef my_tag_data():\n tags = control.get_user_topic_tag(g.user_name, g.user_role)\n return jsonify({\"status\": True, \"data\": tags})\n\n\n@message_view.route(\"/tag/\", methods=[\"POST\"])\n@login_required\ndef add_tag_data():\n request_data = request.json\n message_tag = request_data[\"message_tag\"]\n notify_mode = request_data[\"notify_mode\"]\n access_ding = request_data.get(\"access_ding\", None)\n ding_mode = int(request_data.get(\"ding_mode\", \"1\"))\n interval_time = request_data[\"interval_time\"]\n l = control.new_user_topic_tag(g.user_name, g.user_role, message_tag, notify_mode, access_ding=access_ding,\n ding_mode=ding_mode, interval_time=interval_time)\n if l == 1:\n return jsonify({\"status\": True, \"data\": message_tag, \"location\": url_prefix + \"/manager/\"})\n else:\n return jsonify({\"status\": False, \"data\": \"标签可能已存在\"})\n\n\n@message_view.route(\"/tag/\", methods=[\"PUT\"])\n@login_required\ndef update_tag_data():\n request_data = request.json\n allow_keys = [\"message_tag\", \"notify_mode\", \"access_ding\", \"ding_mode\", \"interval_time\"]\n for key in request_data:\n if key not in allow_keys:\n return jsonify({\"status\": False, \"data\": \"Not Allow %s\" % key})\n l = control.update_user_topic_tag(g.user_name, g.user_role, **request_data)\n request_data[\"exec_r\"] = l\n request_data[\"op\"] = \"PUT\"\n return jsonify({\"status\": True, \"data\": request_data})\n\n\n@message_view.route(\"/tag/\", methods=[\"DELETE\"])\n@login_required\ndef delete_tag_data():\n request_data = request.json\n message_tag = request_data[\"message_tag\"]\n l = control.delete_user_topic_tag(g.user_name, g.user_role, message_tag)\n request_data[\"exec_r\"] = l\n request_data[\"op\"] = \"DELETE\"\n return jsonify({\"status\": True, \"data\": request_data})\n\n\n@message_view.route(\"/query/\", methods=[\"GET\"])\ndef query_message():\n message_id = request.args[\"message_id\"]\n topic_owner = \"1530531001163833\"\n if \"topic_owner\" in request.args:\n topic_owner = request.args[\"topic_owner\"]\n topic_name = \"JYWaring\"\n if \"topic_name\" in request.args:\n topic_name = request.args[\"topic_name\"]\n db_items = control.query_topic_message(topic_owner=topic_owner, topic_name=topic_name, message_id=message_id)\n return jsonify({\"status\": True, \"data\": db_items})\n\n\n@message_view.route(\"/cache/\", methods=[\"GET\"])\ndef cache_message_action():\n start = 0\n end = 100\n if \"start\" in request.args:\n start = int(request.args[\"start\"])\n if \"end\" in request.args:\n end = int(request.args[\"end\"]) - 1\n # message_id = request.args[\"message_id\"]\n # topic_owner = \"1530531001163833\"\n # if \"topic_owner\" in request.args:\n # topic_owner = request.args[\"topic_owner\"]\n # topic_name = \"JYWaring\"\n # if \"topic_name\" in request.args:\n # topic_name = request.args[\"topic_name\"]\n # db_items = control.query_topic_message(topic_owner=topic_owner, topic_name=topic_name, message_id=message_id)\n items = get_cache_message(start, end)\n return jsonify({\"status\": True, \"data\": items})\n","sub_path":"Web/views/messge_manager_view.py","file_name":"messge_manager_view.py","file_ext":"py","file_size_in_byte":6456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"615933804","text":"from .models import *\nfrom . import twitter_api\nfrom datetime import datetime\nfrom backend.main.analyze import get_analysis_result\nfrom backend.main import twitter_api\n\ndef run_update():\n\n batch_job = {} # {hashtag: [users subscribed to this hashtag]}\n\n print(\"SCANNING...\")\n\n # go through all subscriptions and add jobs that need new updates\n subscriptions = Subscription.objects.all()\n for sub in subscriptions:\n # check if ready to scan\n if not sub.last_scanned or ((datetime.today() - sub.last_scanned).days >= sub.frequency):\n if sub.hashtag_id.topic not in batch_job:\n batch_job[sub.hashtag_id.topic] = [] \n batch_job[sub.hashtag_id.topic].append(sub.user_id.username)\n\n print(\"RUNNING ANALYSIS\")\n \n if not batch_job:\n print(\"JOB DONE\")\n return \n\n # make connection\n conn = twitter_api.create_conn()\n\n # run analysis on hashtags in batch_job\n for topic, users in batch_job.items():\n get_analysis_result(topic, conn)\n for user in users:\n sub = Subscription.objects.filter(user_id = user).filter(hashtag_id=topic)\n sub.update(last_scanned=datetime.today())\n\n print(\"JOB DONE\")","sub_path":"backend/backend/main/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"81659845","text":"\"\"\"\nLongest common subsequence implementation starter\n\"\"\"\n\nimport main\n\n\nif __name__ == '__main__':\n with open('lab_2/diff_report_example.txt', 'r', encoding='utf-8') as file:\n report_example = file.read()\n ORIGINAL_TEXT = 'I have a cat. \\nIts body is covered with bushy white fur.'\n SUSPICIOUS_TEXT = 'I have a cat. \\nIts body is covered with shiny black fur.'\n print('\\tOriginal text:\\n' + ORIGINAL_TEXT)\n print('\\n\\tSuspicious text:\\n' + SUSPICIOUS_TEXT)\n print('\\n\\t...tokenizing original text...')\n original_text_tokens = main.tokenize_by_lines(ORIGINAL_TEXT)\n print('\\t...tokenizing suspicious text...')\n suspicious_text_tokens = main.tokenize_by_lines(SUSPICIOUS_TEXT)\n print('\\t...creating a line-by-line diff report...\\n\\n')\n report = (main.create_diff_report(original_text_tokens, suspicious_text_tokens,\n main.accumulate_diff_stats(original_text_tokens, suspicious_text_tokens)))\n print(report)\n\n RESULT = report\n assert RESULT == report_example, 'LCS implementation not working'\n","sub_path":"lab_2/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"578518402","text":"import pickle, json\nimport numpy\nfrom scipy.misc import factorial\n\nimport matplotlib\nmatplotlib.use('Agg')\nfrom mpl_toolkits.mplot3d.axes3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import NullFormatter\nimport matplotlib.cm as cm\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap\n\nFIG_EXT = 'eps'\n\nn = 255.0\ncm_custom = LinearSegmentedColormap.from_list(\n \"riedel\",\n [\n (0.0, (1.0, 1.0, 1.0)),\n (2.5/15, (68/n, 84/n, 158/n)),\n (5.0/15, (117/n, 192/n, 235/n)),\n (10.0/15, (234/n, 230/n, 76/n)),\n (12.5/15, (206/n, 59/n, 46/n)),\n (1.0, (142/n, 33/n, 39/n))\n ]\n )\ncm_custom.set_under(color='white')\ncolor_lblue = (0.5686274509803921, 0.6823529411764706, 0.8901960784313725)\ncolor_dblue = (0.13725490196078433, 0.28627450980392155, 0.5568627450980392)\ncolor_dred = (1.0, 0.20784313725490197, 0.16470588235294117)\ncolor_dgreen = (0.10588235294117647, 0.6470588235294118, 0.17254901960784313)\ncolor_dyellow = (0.8352941176470589, 0.592156862745098, 0.13725490196078433)\n\n\nP_GLOBAL = {\n # backend parameters\n 'backend': 'ps',\n 'text.usetex': False,\n\n # main parameters\n 'font.family': 'sans-serif',\n 'font.sans-serif': ['Helvetica'],\n 'font.size': 7,\n 'lines.linewidth': 1,\n 'lines.dash_capstyle': 'round',\n\n # axes\n 'axes.labelsize': 7,\n 'axes.linewidth': 0.5,\n 'xtick.labelsize': 7,\n 'ytick.labelsize': 7,\n 'xtick.major.pad': 3,\n 'xtick.minor.pad': 3,\n 'ytick.major.pad': 3,\n 'ytick.minor.pad': 3,\n\n 'xtick.major.size': 2,\n 'ytick.major.size': 2,\n 'xtick.minor.size': 1.5,\n 'ytick.minor.size': 1.5,\n}\n\nP_INSET = {\n 'font.size': 6,\n 'xtick.labelsize': 6,\n 'ytick.labelsize': 6,\n 'axes.labelsize': 6,\n 'lines.linewidth': 0.5,\n 'xtick.major.size': 2,\n 'ytick.major.size': 2,\n 'xtick.minor.size': 1,\n 'ytick.minor.size': 1,\n}\n\n\nclass plot_params:\n\n def __init__(self, **kwds):\n old_vals = {}\n for key in kwds:\n old_val = matplotlib.rcParams[key]\n if old_val != kwds[key]:\n old_vals[key] = old_val\n self.old_vals = old_vals\n self.new_vals = kwds\n\n def __enter__(self):\n matplotlib.rcParams.update(self.new_vals)\n\n def __exit__(self, *args):\n matplotlib.rcParams.update(self.old_vals)\n\nclass inset:\n\n def __init__(self, *args, **kwds):\n self.args = args\n self.kwds = kwds\n self.params = plot_params(**P_INSET)\n\n def __enter__(self):\n self.params.__enter__()\n return makeSubplot(*self.args, **self.kwds)\n\n def __exit__(self, *args):\n self.params.__exit__()\n\n\nclass figsize:\n\n def __init__(self, columns, aspect):\n column_width_inches = 85 / 25.4 # 85 mm\n\n fig_width = column_width_inches * columns\n fig_height = fig_width * aspect # height in inches\n fig_size = [fig_width, fig_height]\n\n self.params = plot_params(**{'figure.figsize': fig_size})\n\n def __enter__(self):\n self.params.__enter__()\n return plt.figure()\n\n def __exit__(self, *args):\n self.params.__exit__()\n\n\ndef dashes(s):\n if s.endswith('--'):\n return (6, 3)\n elif s.endswith('-'):\n return []\n elif s.endswith('-.'):\n return (5,3,1,3)\n elif s.endswith(':'):\n return (0.5,2)\n raise Exception(\"Unknown dash style \" + s)\n\n\ndef buildGHZDistributions():\n\n with open('data/ghz_binning_ardehali_2p_Q.pickle') as f:\n qdata = pickle.load(f)\n\n with open('data/ghz_binning_ardehali_2p_number.pickle') as f:\n pdata = pickle.load(f)\n\n q_s1x_s2x, q_s12x_s12y = qdata[1]\n p_s1x_s2x, p_s12x_s12y = pdata[1]\n\n q1, q1_edges = q_s1x_s2x\n q2, q2_edges = q_s12x_s12y\n p1, p1_edges = p_s1x_s2x\n p2, p2_edges = p_s12x_s12y\n\n X = (q2_edges[0][1:] + q2_edges[0][:-1]) / 2\n Y = (q2_edges[1][1:] + q2_edges[1][:-1]) / 2\n g1, g2 = numpy.meshgrid(X, Y)\n\n #print (q2 * (-g1 + g2)).sum() / q2.sum()\n #exit(0)\n\n # Adjust data to get rid of \"seam\" in the middle of the vertical wall\n for i in xrange(q1.shape[0]):\n for j in xrange(q1.shape[1]):\n if q1[i,j] > 0:\n q1[i,j] = 0\n break\n\n for j in xrange(q1.shape[1]):\n for i in xrange(q1.shape[0]):\n if q1[i,j] > 0:\n q1[i,j] = 0\n break\n\n corrs = [\n dict(data=p1, edges=q1_edges, zmax=5.5, single=True, Q=False),\n dict(data=p2, edges=q2_edges, zmax=3.5, single=False, Q=False),\n dict(data=q1, edges=p1_edges, zmax=5.5, single=True, Q=True),\n dict(data=q2, edges=p2_edges, zmax=3.5, single=False, Q=True)\n ]\n\n with figsize(1, 0.8) as fig:\n for i, corr in enumerate(corrs):\n ax = fig.add_subplot(2, 2, i + 1, projection='3d')\n ax.view_init(elev=40., azim=245.)\n\n data = corr['data']\n edges = corr['edges']\n\n X = (edges[0][1:] + edges[0][:-1]) / 2\n Y = (edges[1][1:] + edges[1][:-1]) / 2\n\n # normalize on 1\n data = data.astype(numpy.float64) / data.sum() / (X[1] - X[0]) / (Y[1] - Y[0]) * 100\n\n X, Y = numpy.meshgrid(X, Y)\n\n ax.plot_surface(X, Y, data.T, rstride=2, cstride=2, cmap=cm_custom,\n linewidth=0, antialiased=False)\n\n #ax.contour(X, Y, data.T, cmap=cm_custom,\n # levels=numpy.linspace(0, corr['zmax'], 25))\n\n representation = 'Q' if corr['Q'] else 'pos-P'\n ax.set_zlabel('\\n\\nprobability ($\\\\times 10^{-2}$)')\n fig.text(0.2 + 0.5 * (i % 2), 0.93 - 0.48 * (i / 2),\n \"SU(2)-Q\" if corr['Q'] else 'positive-P', fontsize=P_GLOBAL['font.size'])\n\n if corr['single']:\n ax.set_xlabel('\\n\\n$\\\\mathrm{Re}\\,\\\\sigma_1^x$')\n ax.set_ylabel('\\n\\n$\\\\mathrm{Re}\\,\\\\sigma_2^x$')\n #ax.set_zlabel('\\n\\n$P_{\\\\mathrm{' + representation + '}}$, $\\\\times 10^{-2}$')\n\n ax.set_xlim3d(-3.5, 3.5)\n ax.xaxis.set_ticks(range(-3, 4))\n ax.yaxis.set_ticks(range(-3, 4))\n else:\n ax.set_xlabel('\\n\\n$\\\\mathrm{Re}\\,\\\\sigma_1^x \\\\sigma_2^x$')\n ax.set_ylabel('\\n\\n$\\\\mathrm{Re}\\,\\\\sigma_1^y \\\\sigma_2^y$')\n #ax.set_zlabel('\\n\\n$P_{\\\\mathrm{' + representation + '}}$, $\\\\times 10^{-2}$')\n\n ax.set_xlim3d(-8.5, 6.5)\n ax.set_ylim3d(-6.5, 8.5)\n ax.xaxis.set_ticks(range(-8, 8, 2))\n ax.yaxis.set_ticks(range(-6, 10, 2))\n ax.zaxis.set_ticks([0, 1, 2, 3])\n\n ax.set_zlim3d(0, corr['zmax'])\n\n # clear background panes\n ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n\n #ax.w_xaxis.set_rotate_label(False)\n #ax.w_yaxis.set_rotate_label(False)\n\n fig.text(0.1 + 0.5 * (i % 2), 0.93 - 0.48 * (i / 2), ('(a)', '(b)', '(c)', '(d)')[i],\n fontsize=P_GLOBAL['font.size'] + 2)\n\n\n fig.tight_layout(pad=1.3)\n fig.savefig('figures/ghz_distributions.' + FIG_EXT)\n\n\ndef buildCooperative():\n\n def G_analytic(gamma, I, J, N):\n if gamma is None:\n s = 0\n for n in xrange(N - J + 1):\n s += factorial(N - n) / factorial(N - J - n)\n return factorial(N) / ((N + 1) * (factorial(N - I))) * s\n else:\n def binom(n, k):\n if k < 0 or k > n: return 0\n return factorial(n) / factorial(k) / factorial(n - k)\n s = 0\n for n in xrange(N - J + 1):\n for i in xrange(I + 1):\n s += factorial(n) * factorial(N - n) ** 2 / factorial(N - I) / factorial(N - J - n) * \\\n binom(I, i) * binom(N - J, n - i) * (1 - gamma) ** i * gamma ** (I - i)\n return s / (N + 1)\n\n def g_analytic(theta, J, N):\n gamma = numpy.cos(theta) ** 2\n return G_analytic(gamma, J, J, N) / G_analytic(None, J, J, N)\n\n def deltas_analytic(thetas, J, N):\n gs = g_analytic(thetas, J, N)\n gs_3theta = g_analytic(thetas * 3, J, N)\n return (3 * gs - gs_3theta - 2)\n\n with open('data/cooperative-N1-J1-21.json') as f:\n n1 = json.load(f)\n with open('data/cooperative-N2-J2-25.json') as f:\n n2 = json.load(f)\n\n with figsize(0.75, 1) as fig:\n for i, n in enumerate([n1, n2]):\n ax = fig.add_subplot(2, 1, i + 1)\n\n thetas_scaled = numpy.array(n['thetas'])\n deltas = numpy.array(n['deltas_mean'])\n err = numpy.array(n['deltas_err'])\n\n #ax.errorbar(thetas, deltas, yerr=err)\n ax.fill_between(thetas_scaled, deltas-err, deltas+err,\n facecolor=color_lblue, interpolate=True,\n color=color_dblue,\n linewidth=0.3)\n\n #ax.plot(thetas_scaled, deltas, 'k-')\n ax.plot(thetas_scaled,\n deltas_analytic(thetas_scaled / numpy.sqrt(i + 1), i + 1, i + 1),\n 'k--', dashes=dashes('--'))\n\n ax.set_xlim((thetas_scaled[0], thetas_scaled[-1]))\n ax.set_ylim((-0.05, 0.45 if i == 0 else 0.55))\n\n ax.set_xlabel(\"$\\\\theta\" + (\"\\\\sqrt{2}\" if i == 1 else \"\") + \"$ (rad)\")\n ax.set_ylabel(\"Violation\")\n\n #fig.text(0.01 if i == 0 else 0.5, 0.9, 'a' if i == 0 else 'b',\n # fontsize=P_GLOBAL['font.size'] + 2, fontweight='bold')\n ax.text(0.02, 0.35 if i == 0 else 0.43, '(a)' if i == 0 else '(b)',\n fontsize=P_GLOBAL['font.size'] + 2)\n\n fig.tight_layout(pad=0.7)\n fig.savefig('figures/cooperative.' + FIG_EXT)\n\n\ndef buildGHZCorrelations():\n\n def getF_analytical(particles, quantity):\n \"\"\"\n Returns 'classical' and 'quantum' predictions for the\n Mermin's/Ardehali's state and operator.\n \"\"\"\n if quantity == 'F_mermin':\n return 2. ** (particles / 2), 2. ** (particles - 1)\n elif quantity == 'F_ardehali':\n return 2. ** ((particles + 1) / 2), 2. ** (particles - 0.5)\n else:\n raise NotImplementedError(quantity)\n\n def filter_data(data, **kwds):\n result = []\n for d in data:\n for key in kwds:\n if kwds[key] != d[key]:\n break\n else:\n result.append(d)\n\n ns = []\n vals = []\n errs = []\n lhvs = []\n qms = []\n for r in sorted(result, key=lambda x: x['particles']):\n if r['quantity'] in ('F_ardehali', 'F_mermin'):\n cl, qm = getF_analytical(r['particles'], r['quantity'])\n if r['error'] / qm > 0.5:\n continue\n lhvs.append(cl)\n qms.append(qm)\n\n ns.append(r['particles'])\n vals.append(r['mean'])\n errs.append(r['error'])\n\n return dict(ns=numpy.array(ns), mean=numpy.array(vals), error=numpy.array(errs),\n lhvs=numpy.array(lhvs), qms=numpy.array(qms))\n\n\n with open('data/ghz_sampling.json') as f:\n data = json.load(f)\n\n with figsize(0.75, 1) as fig:\n\n G = matplotlib.gridspec.GridSpec(2, 2)\n\n ax1 = fig.add_subplot(G[0,0])\n ax2 = fig.add_subplot(G[0,1])\n\n ax1.set_xlabel('particles', color='white') # need it to make matplotlib create proper spacing\n fig.text(0.51, 0.52, 'particles', fontsize=P_GLOBAL['axes.labelsize'])\n ax1.set_ylabel('$\\\\langle F \\\\rangle / \\\\langle F \\\\rangle_{\\\\mathrm{QM}}$')\n\n representation = 'Q'\n violations = filter_data(data['violations'], representation=representation, size=10**9)\n\n ns = violations['ns']\n qms = violations['qms']\n mean = violations['mean'] / qms\n err = violations['error'] / qms\n\n cl_ns = numpy.arange(1, 61)\n cl_qm = [getF_analytical(n, 'F_ardehali' if n % 2 == 0 else 'F_mermin') for n in cl_ns]\n cl_qm = numpy.array(zip(*cl_qm)[0]) / numpy.array(zip(*cl_qm)[1])\n\n ax1.set_xlim((0, 10.5))\n ax1.set_ylim((-0.05, 1.6))\n ax2.set_xlim((49.5, 61))\n ax2.set_ylim((-0.05, 1.6))\n\n for ax in (ax1, ax2):\n ax.plot(cl_ns, numpy.ones(60), color='grey', linewidth=0.5,\n linestyle='--', dashes=dashes('--'))\n ax.errorbar(ns, mean, yerr=err, color=color_dblue, linestyle='None',\n capsize=1.5)\n ax.plot(cl_ns, cl_qm, color=color_dred, linestyle='-.', dashes=dashes('-.'))\n\n # hide the spines between ax and ax2\n ax1.spines['right'].set_visible(False)\n ax2.spines['left'].set_visible(False)\n ax2.yaxis.tick_right()\n ax1.xaxis.set_ticks([1, 5, 10])\n ax2.xaxis.set_ticks([50, 55, 60])\n ax2.tick_params(labelright='off') # don't put tick labels at the right side\n ax1.yaxis.tick_left()\n\n # add cut-out lines\n d = .015 # how big to make the diagonal lines in axes coordinates\n # arguments to pass plot, just so we don't keep repeating them\n kwargs = dict(transform=ax2.transAxes, color='k', clip_on=False,\n linewidth=P_GLOBAL['axes.linewidth'])\n ax2.plot((-d,+d),(-d,+d), **kwargs)\n ax2.plot((-d,+d),(1-d,1+d), **kwargs)\n\n kwargs.update(transform=ax1.transAxes,\n linewidth=P_GLOBAL['axes.linewidth']) # switch to the bottom axes\n ax1.plot((1-d,1+d),(1-d,1+d), **kwargs)\n ax1.plot((1-d,1+d),(-d,+d), **kwargs)\n ax1.text(1, 1.25, '(a)', fontsize=P_GLOBAL['font.size'] + 2)\n\n fig.subplots_adjust(wspace=0.001)\n\n ax = fig.add_subplot(G[1,:])\n ax.set_xlabel('particles')\n ax.set_ylabel('$\\\\log_{2}($rel. error$)$')\n\n corr1 = filter_data(data['different_order_correlations'],\n representation='Q', quantity='N_total', size=10**9)\n corrm = filter_data(data['violations'],\n representation='Q', size=10**9)\n\n ax.plot(corr1['ns'], numpy.log2(corr1['error'] / corr1['ns'] * 2.),\n color=color_dgreen, linestyle='--', dashes=dashes('--'))\n ax.plot(corrm['ns'], numpy.log2(corrm['error'] / corrm['qms'])[:50],\n color=color_dblue)\n\n ref_ns = numpy.arange(1, 36)\n ax.plot(ref_ns, ref_ns / 2. - 20, linestyle=':', dashes=dashes(':'), linewidth=0.75, color='grey')\n\n ax.set_xlim((0, 61))\n ax.set_ylim((-24, 0))\n ax.yaxis.set_ticks(range(-20, 1, 5))\n\n #for i in (0, 1):\n # fig.text(0.03 if i == 0 else 0.52, 0.9, 'a' if i == 0 else 'b',\n # fontsize=P_GLOBAL['font.size'] + 2, fontweight='bold')\n ax.text(3, -6, '(b)', fontsize=P_GLOBAL['font.size'] + 2)\n\n fig.tight_layout(pad=0.7)\n\n p1 = ax1.get_position()\n p2 = ax2.get_position()\n\n dwidth = (p2.x0 - p1.x0 - p1.width) / 2. - 0.01\n ax1.set_position([p1.x0, p1.y0, p1.width + dwidth, p1.height])\n ax2.set_position([p2.x0 - dwidth, p2.y0, p2.width + dwidth, p2.height])\n\n fig.savefig('figures/ghz_correlations.' + FIG_EXT)\n\n\ndef buildGHZDecoherence():\n\n with open('data/ghz_decoherence.json') as f:\n data = json.load(f)\n\n def find(**kwds):\n for dataset in data:\n for kwd in kwds:\n if kwds[kwd] != dataset[kwd]:\n break\n else:\n return dataset\n return None\n\n ns = (2, 3, 4, 6)\n colors = {2: color_dblue, 3: color_dred, 4: color_dgreen, 6: color_dyellow}\n ndashes = {2: '-', 3: '--', 4: '-.', 6: ':'}\n\n with figsize(0.75, 1 / 1.6) as fig:\n ax = fig.add_subplot(1, 1, 1)\n\n for i, n in enumerate(ns):\n\n dataset = find(particles=n, quantity='N_total')\n mean = numpy.array(dataset['mean'])\n\n # normalize\n mean /= dataset['particles'] / 2.\n time = numpy.arange(mean.size)\n\n # filter near-zero parts, helps with readability\n indices = (mean > 0.05)\n mean = mean[indices]\n time = time[indices]\n\n ax.plot(time, mean, color=colors[n], linestyle=ndashes[n], dashes=dashes(ndashes[n]))\n\n #xs = numpy.linspace(48, 60)\n #ax.plot(xs, [0.9 - i * 0.12] * xs.size, color=colors[n],\n # linestyle=ndashes[n], dashes=dashes(ndashes[n]))\n #ax.text(63, 0.87 - i * 0.12, str(n) + \" particles\", fontsize=P_GLOBAL['font.size']-1)\n\n\n ax.set_xlim(0, 100)\n ax.set_ylim(0, 1)\n ax.set_xlabel('$\\\\tau$')\n ax.set_ylabel('$F(\\\\tau)/F(0)$')\n\n fig.tight_layout(pad=0.3)\n fig.savefig('figures/ghz_decoherence.' + FIG_EXT)\n\n\n\nif __name__ == '__main__':\n\n with plot_params(**P_GLOBAL):\n distr_params = {'xtick.labelsize': 6, 'ytick.labelsize': 6}\n with plot_params(**distr_params):\n buildGHZDistributions()\n buildCooperative()\n buildGHZCorrelations()\n buildGHZDecoherence()\n","sub_path":"buildfigures.py","file_name":"buildfigures.py","file_ext":"py","file_size_in_byte":17125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"412075305","text":"from .agds_helpers import AGDSHelpers\nfrom .agds import AGDS\n\ndata_file = 'agds/data.txt'\n\nagds = AGDS(data_file)\nh = AGDSHelpers(agds)\n\nprint(h.filter_single_param('leaf-length', 1, 10))\n\nprint(h.filter_multi_param([('leaf-length', 4.6, 5.1), ('leaf-width', 3.1, 3.5)]))\n\n\n","sub_path":"agds/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"560336992","text":"# -*- coding: utf-8 -*-\n# @Time : 2017/12/18 21:01\n# @Author : zhaochencheng\n# @QQ : 907779487\n# @version :Python 3.5.2\n# @File : test_1.py\n# @Software: PyCharm Community Edition\n'''\n题目:有四个数字:1、2、3、4,能组成多少个互不相同且无重复数字的三位数?各是多少?\n程序分析:可填在百位、十位、个位的数字都是1、2、3、4。组成所有的排列后再去 掉不满足条件的排列。\n\n'''\nsum = 0\nL = []\nfor i in range(1,5):\n for j in range(1,5):\n for k in range(1,5):\n if (i != j) and (i != k) and (j != k) :\n sum = i*100+j*10+k\n L.append(sum)\nprint(L)\nprint(\"这样的数共有:\",len(L))\n\n'''\n网上做法\n'''\n#python自带这个函数的\nfrom itertools import permutations\n\nfor i in permutations([1, 2, 3, 4], 3):\n print(i)","sub_path":"python_100_example/test_1.py","file_name":"test_1.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"42452955","text":"# output :['Hari', 'Shyam' ....] -> ['iraH', 'mayhS' .....]\r\n\r\nnames = ['Hari', 'Ram', 'Shyam', 'Sita']\r\nreverse = lambda nam: nam[::-1]\r\nreversed_names = list(map(reverse, names))\r\nprint(reversed_names)\r\n\r\n# output : [2, 3, 5, 9] -> [2, 6, 120, ...] (factorial)\r\n\r\nnumbers = [2, 3, 5, 9]\r\n\r\n\r\ndef facto(f):\r\n temp = 1\r\n for i in range(1, f + 1):\r\n temp = temp * i\r\n return temp\r\n\r\n\r\nprint(list(map(facto, numbers)))\r\n\r\n# ['Hari' , 'Shyam' ....] -> [4, 5 ...] (number of character)\r\n\r\ncharacter = lambda name: len(name)\r\nprint(list(map(character, names)))\r\n\r\n# ['xyz@gmail.com' ......] -> ['xyz' ....]\r\nemails = ['abc@gmail.com', 'xyz@hotmail.com', 'jkl@gmail.com', 'rty@yahoo.com', 'wes@outlook.com']\r\n\r\n\r\ndef name_extraction(email):\r\n index_value = email.index('@')\r\n name = email[:index_value]\r\n return name\r\n\r\n\r\nprint(list(map(name_extraction, emails)))\r\n","sub_path":"lambda_map_2.py","file_name":"lambda_map_2.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"27805870","text":"# encoding=utf8\n\n\"\"\"\ndescription: \n求表达式A+B或者A-B的结果,以字符串类型返回。A、B为只由数字构成的字符串,分别表达十进制正整数;A的数值严格大于B的数值。\nlimit:\nlen(B) <= len(A) < 100\n\"\"\"\n\n\ndef solve(a0: str, b0: str, add: bool):\n a = '0' + a0\n b = '0' * (len(a) - len(b0)) + b0\n\n carry, res = 0, ''\n for i in range(len(a) - 1, -1, -1):\n total = carry + int(a[i]) + (1 if add else -1) * int(b[i])\n carry = (total + 10) // 10 - 1\n remain = (total + 10) % 10\n res = str(remain) + res\n res = res.lstrip('0')\n print('%s %s %s -> %s' % (a0, '+' if add else '-', b0, res))\n\n\nsolve('1', '0', True)\nsolve('9', '1', True)\nsolve('999', '1', True)\nsolve('999', '998', False)\nsolve('999', '123', False)\nsolve('1000', '999', False)\nsolve('1000', '1', False)\n","sub_path":"apps/interview_codes/two_big_integers_add_sub.py","file_name":"two_big_integers_add_sub.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"388460444","text":"# -*- coding: utf-8 -*-\n\"\"\"\nname:\ntype:\ncityID:\naddress:\nlat:\nlng:\nurl:\n\"\"\"\n\nimport re\nimport os\nimport csv\nimport json\nimport time\n\nimport Geocode as gmap\n \nregionList = ['[01]新北市', '[31]臺北市', '[32]臺北市', '[33]臺北市', '[34]臺北市', '[35]臺北市',\n '[36]臺北市', '[37]臺北市', '[38]臺北市', '[39]臺北市', '[40]臺北市', '[41]臺北市',\n '[42]臺北市']\n\npath = './school/'\nsavepath='./school_data/'\nfilename = 'school_{}.json'\n\nkey = ''\n\nfor schooltype in os.listdir(path):\n filepath = path + schooltype\n with open(filepath, encoding='utf8') as rf:\n data = csv.DictReader(rf)\n for row in data:\n if row.get('縣市名稱') in regionList:\n temp = {}\n #name\n temp['name'] = row.get('學校名稱')\n #cityID & address\n address = row.get('地址')\n temp['cityID'] = re.findall(r'\\[(\\d{3})\\]',address)[0]\n temp['address'] = address.split(']')[-1]\n #lat lng\n temp.update(gmap.geocoding(address, key))\n #type\n temp['type'] = 'school'\n #url\n temp['url'] = row.get('網址')\n\n print(row.get('代碼'))\n #save\n with open(savepath + filename.format(row.get('代碼')),\n 'w', encoding='utf8') as wf:\n json.dump(temp, wf)\n time.sleep(0.05)\n \n \n ","sub_path":"school.py","file_name":"school.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"280468474","text":"#Imports\r\nfrom urllib.request import urlretrieve\r\nimport re\r\nimport datetime\r\n\r\nURL_PATH = 'https://s3.amazonaws.com/tcmg476/http_access_log'\r\nLOCAL_FILE = 'local_copy.log'\r\ntotal_requests = 0\r\nyear_count = 0\r\n\r\nlocal_file, headers = urlretrieve(URL_PATH, LOCAL_FILE);\r\n\r\nFILE_NAME = 'path/to/file'\r\n\r\n#counts and matches for dates\r\noct94_count = 0\r\nnov94_count = 0\r\ndec94_count = 0\r\njan94_count = 0\r\nfeb94_count = 0\r\nmar94_count = 0\r\napr94_count = 0\r\nmay94_count = 0\r\njun94_count = 0\r\njul94_count = 0\r\naug94_count = 0\r\nsep94_count = 0\r\noct95_count = 0\r\njan_match = 0\r\nfeb_match = 0\r\nmar_match = 0\r\napr_match = 0\r\nmay_match = 0\r\njun_match = 0\r\njul_match = 0\r\naug_match = 0\r\nsep_match = 0\r\noct_match = 0\r\noct95_match = 0\r\nnov_match = 0\r\ndec_match = 0\r\n\r\nf = open(LOCAL_FILE) #opens local file\r\nfor line in f: #goes through line by line to check for requests per date\r\n if 'Oct/1994' in line:\r\n oct94_count += 1\r\n elif 'Nov/1994' in line:\r\n nov94_count += 1\r\n elif 'Dec/1994' in line:\r\n dec94_count += 1\r\n elif 'Jan/1995' in line:\r\n jan94_count += 1\r\n elif 'Feb/1995' in line:\r\n feb94_count += 1\r\n elif 'Mar/1995' in line:\r\n mar94_count += 1\r\n elif 'Apr/1995' in line:\r\n apr94_count += 1\r\n elif 'May/1995' in line:\r\n may94_count += 1\r\n elif 'Jun/1995' in line:\r\n jun94_count += 1\r\n elif 'Jul/1995' in line:\r\n jul94_count += 1\r\n elif 'Aug/1995' in line:\r\n aug94_count += 1\r\n elif 'Sep/1995' in line:\r\n sep94_count += 1\r\n else:\r\n oct95_count += 1\r\n#Prints request number per month/year\r\nprint(f'Oct/1994 requests:', oct94_count)\r\nprint(f'Nov/1994 requests:', nov94_count)\r\nprint(f'Dec/1994 requests:', dec94_count)\r\nprint(f'Jan/1995 requests:', jan94_count)\r\nprint(f'Feb/1995 requests:', feb94_count)\r\nprint(f'Mar/1995 requests:', mar94_count)\r\nprint(f'Apr/1995 requests:', apr94_count)\r\nprint(f'May/1995 requests:', may94_count)\r\nprint(f'Jun/1995 requests:', jun94_count)\r\nprint(f'Jul/1995 requests:', jul94_count)\r\nprint(f'Aug/1995 requests:', aug94_count)\r\nprint(f'Sep/1995 requests:', sep94_count)\r\nprint(f'Oct/1995 requests:', oct95_count)\r\nprint()\r\nprint()\r\n\r\npages = {}\r\n\r\nf = open(LOCAL_FILE)\r\nmon = 0\r\ntue = 0\r\nwed = 0\r\nthur = 0\r\nfri = 0\r\nsat = 0\r\nsun = 0\r\n\r\nf = open(LOCAL_FILE) #opens file\r\nfor line in f:\r\n pieces = re.split('.+ \\[(.+) .+\\] \"[A-Z]{3,4} (.+) HTTP/1.0\" ([0-9]{3})', line) #splits line\r\n if len(pieces) < 4:\r\n continue\r\n date = pieces[1].split(':')\r\n date = date[0]\r\n days = datetime.datetime.strptime(date, '%d/%b/%Y')\r\n\r\n weekday = datetime.datetime.weekday(days)\r\n\r\n #counts day of week\r\n if weekday == 0:\r\n mon += 1 \r\n elif weekday == 1:\r\n tue += 1\r\n elif weekday == 2:\r\n wed += 1\r\n elif weekday == 3:\r\n thur += 1\r\n elif weekday == 4:\r\n fri += 1\r\n elif weekday == 5:\r\n sat += 1\r\n elif weekday == 6:\r\n sun += 1\r\n #counts months\r\n if 'Jan' in line:\r\n jan_match += 1\r\n if 'Feb' in line:\r\n feb_match += 1\r\n if 'Mar' in line:\r\n mar_match += 1\r\n if 'Apr' in line:\r\n apr_match += 1\r\n if 'May' in line:\r\n may_match += 1\r\n if 'Jun' in line:\r\n jun_match += 1\r\n if 'Jul' in line:\r\n jul_match += 1\r\n if 'Aug' in line:\r\n aug_match += 1\r\n if 'Sep' in line:\r\n sep_match += 1\r\n if 'Oct/1994' in line:\r\n oct_match += 1\r\n if 'Oct/1995' in line:\r\n oct95_match += 1\r\n if 'Nov' in line:\r\n nov_match += 1\r\n if 'Dec' in line:\r\n dec_match += 1\r\n\r\n filename = pieces[2]\r\n\r\n if filename in pages:\r\n pages[filename] += 1\r\n else:\r\n pages[filename] = 1\r\n \r\n#finds average number of requests per day\r\nave_mon = mon / 52\r\nave_tue = tue / 52\r\nave_wed = wed / 52\r\nave_thur = thur / 52\r\nave_fri = fri / 52\r\nave_sat = sat / 52\r\nave_sun = sun / 52\r\n\r\n#formats output for printing\r\nformatted_mon = \"{:.2f}\".format(ave_mon)\r\nformatted_tue = \"{:.2f}\".format(ave_tue)\r\nformatted_wed= \"{:.2f}\".format(ave_wed)\r\nformatted_thur = \"{:.2f}\".format(ave_thur)\r\nformatted_fri = \"{:.2f}\".format(ave_fri)\r\nformatted_sat = \"{:.2f}\".format(ave_sat)\r\nformatted_sun = \"{:.2f}\".format(ave_sun)\r\n\r\n#prints avg number of requests per day\r\nprint(f'The number of requests on Monday averaged {formatted_mon}')\r\nprint(f'The number of requests on Tuesday averaged {formatted_tue}')\r\nprint(f'The number of requests on Wednesday averaged {formatted_wed}')\r\nprint(f'The number of requests on Thursday averaged {formatted_thur}')\r\nprint(f'The number of requests on Friday averaged {formatted_fri}')\r\nprint(f'The number of requests on Saturday averaged {formatted_sat}')\r\nprint(f'The number of requests on Sunday averaged {formatted_sun}')\r\n\r\n","sub_path":"PythonProject2.py","file_name":"PythonProject2.py","file_ext":"py","file_size_in_byte":4691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"647313895","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport time\nimport requests\nimport json\nimport resource\nfrom guppy import hpy\n\nfrom confluent_kafka import Consumer\n\nhp = hpy()\n\n# equivalent to: curl endpoint --header \"Content-Type: application/json\" --request POST --data data endpoint_url\ndef post(endpoint_url, payload):\n # uses lib requests\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n request = requests.post(endpoint_url, data=json.dumps(payload), headers=headers)\n\n\ndef report(endpoint_url, current_time, throughput_mb_per_s, timestamps):\n if endpoint_url.startswith(\"http://\"):\n topics = timestamps.keys()\n min_ts = {\n topic: min(timestamps[topic])\n for topic in topics\n }\n # min_ts = min(timestamps)\n offsets = {\n topic: [t - min_ts[topic] for t in timestamps[topic]]\n for topic in topics\n }\n # offsets = [t - min_ts for t in timestamps]\n lateness = {\n topic: [abs(offsets[topic][i] - i) for i in range(len(offsets[topic]))]\n for topic in topics\n }\n # lateness = [abs(offsets[i] - i) for i in range(len(offsets))]\n max_lateness = [\n max(lateness[topic])\n for topic in topics\n ]\n payload = dict(\n timestamp=current_time,\n throughput=throughput_mb_per_s,\n # min_timestamp = min(timestamps),\n max_lateness=max(max_lateness),\n id=consumer_id\n )\n post(endpoint_url, payload)\n else:\n print('Throughput in window: {} MB/s'.format(throughput_mb_per_s))\n\n\n###\n### PLEASE SET THE BELOW CONFIGURATION\n###\n\n# currently posts to endpoint using requests: https://2.python-requests.org/en/v2.9.1/\nendpoint_url = \"http://focussensors.duckdns.org:9000/consumer_reporting_endpoint\"\nconsumer_id = os.environ[\"POD_NAME\"] or \"unknown\"\n\n# Address of the kafka servers and topic name\n# kafka_servers = '192.168.56.101:9092'\nkafka_servers = 'internal-service-0.kafka.svc.cluster.local:32400'\n# topic_name = 'test'\n# topic_name = \"^sensor.*\"\ntopic_name = [\"sensor{}\".format(i) for i in range(50)]\n\n# Whether to only listen for messages that occurred since the consumer started ('latest'),\n# or to pick up all messages that the consumer has missed ('earliest').\n# Using 'latest' means the consumer must be started before the producer.\nread_topic_from = 'latest'\n\n# How often to indicate data rate in seconds\nthroughput_debug_interval_in_sec = 5\n\n###\n### Consumer code\n###\n\nkbs_in_mb = 1000\n\nprint('Connecting to Kafka @ {}'.format(kafka_servers))\nc = Consumer({\n 'bootstrap.servers': kafka_servers,\n 'group.id': consumer_id, # 'mygroup',\n 'auto.offset.reset': read_topic_from,\n # 'metadata.max.age.ms': 5000,\n 'max.partition.fetch.bytes': 7500 * 1024,\n # see https://github.com/confluentinc/confluent-kafka-python/issues/759\n # queue a maximum of 100 messages\n 'queued.max.messages.kbytes': 75000\n})\n\nlast_subscribe_time = int(time.time())\nc.subscribe(topic_name)\nnomsg_count = 0\n\nkbs_so_far = 0\n\nwindow_start_time = int(time.time())\n\ntimestamps = dict()\n\nprint(f\"endpoint={endpoint_url}\")\n\nhp.setrelheap()\n\n# Refactored by NH - 15/04\n# The hope is that the msg, when \"function scoped\", will be gc'ed\n# ...but whether the memory is released to the OS is yet to be seen...\ndef poll(consumer):\n msg = consumer.poll(1.0)\n\n meta = {}\n if msg is None:\n return meta\n\n if msg.error:\n meta['error'] = msg.error()\n return meta\n\n # extract the necessary meta (and effectively discard the message payload)\n meta['msg_size'] = sys.getsizeof(msg.value()) / 1000\n meta['topic'] = msg.topic\n meta['timestamp'] = msg.timestamp()[1]\n\n # explicitly delete the msg\n # del msg\n\n print(\"refcount msg={}, msg.value={}\".format(\n sys.getrefcount(msg),\n sys.getrefcount(msg.value)\n ));\n\n return meta\n\n\nwhile True:\n\n # Waits 1 second to receive a message, if it doesn't find one goes round the loop again\n message_meta = poll(c)\n current_time = int(time.time())\n\n # check if message was received\n if not message_meta:\n nomsg_count = nomsg_count + 1\n if 10 < current_time - last_subscribe_time:\n print(\"number of nomsgs: {}\".format(nomsg_count))\n last_subscribe_time = current_time\n continue\n\n # check if error from consumer\n if 'error' in message_meta:\n print(\"Consumer error: {}\".format(message_meta['error']))\n continue\n\n if message_meta['topic'] in timestamps:\n timestamps[message_meta['topic']].append(message_meta['timestamp'])\n else:\n timestamps[message_meta['topic']] = [message_meta['timestamp']]\n\n if 10 < current_time - last_subscribe_time:\n print(\"number of nomsgs: {}\".format(nomsg_count))\n nomsg_count = 0\n last_subscribe_time = current_time\n\n # Maintain figures for throughput reporting\n kbs_so_far += message_meta['msg_size']\n\n # Determine if we should output a throughput figure\n window_length_sec = current_time - window_start_time\n\n if window_length_sec >= throughput_debug_interval_in_sec:\n throughput_mb_per_s = float(kbs_so_far / (throughput_debug_interval_in_sec * kbs_in_mb))\n print('Throughput in window: {} MB/s'.format(throughput_mb_per_s))\n print('Peak memory use: {} Mb'.format(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024))\n h = hp.heap()\n by_types = h.bytype\n # by_refs = h.byrcs\n print(\"Heap by types {}\".format(by_types))\n report(endpoint_url, current_time, throughput_mb_per_s, timestamps)\n\n # Reset ready for the next throughput indication\n window_start_time = int(time.time())\n kbs_so_far = 0\n timestamps = dict()\n\nc.close()\n","sub_path":"consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":5868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"207424698","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn import *\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nnp.set_printoptions(threshold=np.nan)\r\n\r\nl1=pd.read_csv(\"D:/Rochester/Data Mining/assignment1/iris.csv\",delimiter=\",\", usecols=(0,1,2,3,4),names=[\"sl\",\"sw\",\"pl\",\"pw\",\"class\"])\r\nl1.describe()\r\nl2=l1.groupby(\"class\")\r\nl2=l2.agg({'sl':np.average, 'sw':np.average,'pl':np.average,'pw':np.average})\r\nl2\r\n#Basic statistics\r\n\r\nses=l1[l1['class']=='Iris-setosa']\r\na=ses.plot(kind=\"scatter\",x='sl',y='sw',label='seotsa',color='red')\r\nves=l1[l1['class']=='Iris-versicolor']\r\nb=ves.plot(kind=\"scatter\",x='sl',y='sw',label='versicolor',color='blue')\r\nvis=l1[l1['class']=='Iris-virginica']\r\nc=vis.plot(kind=\"scatter\",x='sl',y='sw',label='virginica',color='green')\r\nsep=l1[l1['class']=='Iris-setosa']\r\nd=sep.plot(kind=\"scatter\",x='pl',y='pw',label='seotsa',color='red')\r\nvep=l1[l1['class']=='Iris-versicolor']\r\ne=vep.plot(kind=\"scatter\",x='pl',y='pw',label='versicolor',color='blue')\r\nvip=l1[l1['class']=='Iris-virginica']\r\nf=vip.plot(kind=\"scatter\",x='pl',y='pw',label='virginica',color='green')\r\n#Scatter Graph\r\n\r\nld=cluster.KMeans(n_clusters=3,random_state=1)\r\nlf=l1.iloc[:,0:4]\r\nld.fit(lf)\r\nprint (ld.labels_)\r\nld.labels_\r\ncluster=np.array(ld.labels_)\r\nb=np.zeros(150,dtype=int)\r\nb.shape\r\ntarget=np.array(l1['class'])\r\nfor i in range(150):\r\n if target[i]=='Iris-setosa' :\r\n b[i]=1\r\n if target[i]=='Iris-versicolor':\r\n b[i]=0\r\n if target[i]=='Iris-virginica':\r\n b[i]=2\r\nprint(b)\r\nsum=0\r\nfor i in range(150):\r\n if cluster[i]==b[i]:\r\n sum=sum+1\r\n sum\r\nprint('cluster analysis accuracy=',sum*100/150,'%')\r\n#Cluster analysis\r\n\r\ndata=np.array(l1.iloc[:,0:4])\r\ntarget=np.array(l1['class'])\r\ntrain_data,test_data,train_target,test_target=cross_validation.train_test_split(data,target,test_size=0.3,random_state=0)\r\n#train_data=np.concatenate((data[0:45, :],data[50:95, :],data[100:145, :]),axis = 0)\r\ntrain_data\r\n#train_target=np.concatenate((target[0:45],target[50:95],target[100:145]),axis = 0)\r\ntrain_target\r\n#test_data=np.concatenate((data[45:50, :],data[95:100, :],data[145:150, :]),axis = 0)\r\ntest_data\r\n#test_target=np.concatenate((target[45:50],target[95:100],target[145:150]),axis = 0)\r\ntest_target\r\nclf=DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=6)\r\nclf.fit(train_data, train_target)\r\ntraindata=clf.predict(train_data)\r\ntestdata=clf.predict(test_data)\r\nsum=0\r\nfor i in range(45):\r\n if testdata[i]==test_target[i]:\r\n sum=sum+1\r\nsum\r\nprint('accuracy=',sum*100/45,'%')\r\n#Decision tree\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Assignment1/assignment1.py","file_name":"assignment1.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"52628075","text":"'''\r\nScript that puts all of the different functions together to create a GUI application\r\n'''\r\n\r\nimport tkinter as tk\r\nimport math\r\nimport misc\r\n\r\n\r\nclass App(tk.Frame):\r\n def __init__(self, master=None):\r\n tk.Frame.__init__(self, master)\r\n self.bind_all('', self.solve)\r\n self.bind_all('', self.clear)\r\n self.pack()\r\n self.home()\r\n\r\n # Sets up the layout (buttons, entries, labels, etc.)\r\n def home(self):\r\n self.canvas = tk.Canvas(self, width=300, height=300, bg='white')\r\n self.canvas.pack()\r\n\r\n self.radians = tk.Checkbutton(self)\r\n self.radians['text'] = 'Use radians instead of degrees'\r\n self.radians.pack()\r\n self.ticked = tk.StringVar()\r\n self.ticked.set('0')\r\n self.radians['variable'] = self.ticked\r\n\r\n self.a_label = tk.Label(self)\r\n self.a_label['text'] = 'Side a:'\r\n self.a_label.pack()\r\n self.a = tk.Entry(self)\r\n self.a.pack()\r\n\r\n self.b_label = tk.Label(self)\r\n self.b_label['text'] = 'Side b:'\r\n self.b_label.pack()\r\n self.b = tk.Entry(self)\r\n self.b.pack()\r\n\r\n self.c_label = tk.Label(self)\r\n self.c_label['text'] = 'Side c:'\r\n self.c_label.pack()\r\n self.c = tk.Entry(self)\r\n self.c.pack()\r\n\r\n self.A_label = tk.Label(self)\r\n self.A_label['text'] = 'Angle A:'\r\n self.A_label.pack()\r\n self.A = tk.Entry(self)\r\n self.A.pack()\r\n\r\n self.B_label = tk.Label(self)\r\n self.B_label['text'] = 'Angle B:'\r\n self.B_label.pack()\r\n self.B = tk.Entry(self)\r\n self.B.pack()\r\n\r\n self.C_label = tk.Label(self)\r\n self.C_label['text'] = 'Angle C:'\r\n self.C_label.pack()\r\n self.C = tk.Entry(self)\r\n self.C.pack()\r\n\r\n self.area_label = tk.Label(self)\r\n self.area_label['text'] = 'Area:'\r\n self.area_label.pack()\r\n self.area = tk.Entry(self)\r\n self.area.pack()\r\n\r\n self.solve_button = tk.Button(self)\r\n self.solve_button['text'] = 'Solve'\r\n self.solve_button['command'] = self.solve\r\n self.solve_button.pack(side='right')\r\n\r\n self.clear_button = tk.Button(self)\r\n self.clear_button['text'] = 'Clear'\r\n self.clear_button['command'] = self.clear\r\n self.clear_button.pack(side='right')\r\n\r\n # Function that clears all fields\r\n # event argument is only there because Tkinter's button bindings require it\r\n def clear(self, event=None):\r\n for obj in [self.a, self.b, self. c, self.A, self.B, self.C, self.area]:\r\n for _ in obj.get():\r\n obj.delete(0)\r\n self.canvas.delete('all')\r\n self.a.focus_set()\r\n\r\n # Function that gets input, solves angles/sides and outputs results\r\n # event argument is only there because Tkinter's button bindings require it\r\n def solve(self, event=None):\r\n # Get all the numbers from the entries and convert to float\r\n a = float(self.a.get()) if self.a.get() != '' else 0\r\n b = float(self.b.get()) if self.b.get() != '' else 0\r\n c = float(self.c.get()) if self.c.get() != '' else 0\r\n A = float(self.A.get()) if self.A.get() != '' else 0\r\n B = float(self.B.get()) if self.B.get() != '' else 0\r\n C = float(self.C.get()) if self.C.get() != '' else 0\r\n\r\n # Convert to radians if input wants degrees\r\n if self.ticked.get() == '0':\r\n A = math.radians(A)\r\n B = math.radians(B)\r\n C = math.radians(C)\r\n\r\n # Use identify_method to calculate angles and side lengths\r\n results = misc.identify_method(a, b, c, A, B, C)\r\n # Repeat because it only finds one thing at a time, so find all\r\n for _ in range(3):\r\n results = list(misc.identify_method(results))\r\n results.append(misc.identify_area_method(results))\r\n\r\n # Show the user degrees if they didn't want radians\r\n if self.ticked.get() == '0':\r\n results[3] = math.degrees(results[3])\r\n results[4] = math.degrees(results[4])\r\n results[5] = math.degrees(results[5])\r\n\r\n # Clear everything and insert the results\r\n self.clear()\r\n for index, obj in enumerate([self.a, self.b, self. c, self.A, self.B, self.C, self.area]):\r\n obj.insert(0, str(round(results[index], 10)))\r\n\r\n self.draw_triangle()\r\n\r\n # Draws triangle on canvas\r\n def draw_triangle(self):\r\n # Get all side lengths\r\n a = float(self.a.get())\r\n b = float(self.b.get())\r\n c = float(self.c.get())\r\n\r\n xy = misc.solve_point(a, b, c)\r\n\r\n # Multiply everything by this to fill up the canvas\r\n scale_factor = 300 / (c + max(abs(min(0, xy[0])),\r\n abs(min(0, xy[1]))\r\n ))\r\n\r\n # Add these values to all x & y points to centre the triangle\r\n center_y = (300 - xy[1] * scale_factor) / 2\r\n center_x = abs(min([0, xy[0] * scale_factor]))\r\n\r\n self.canvas.create_polygon(0 + center_x, 0 + center_y,\r\n c * scale_factor + center_x, 0 + center_y,\r\n xy[0] * scale_factor + center_x, xy[1] * scale_factor + center_y)\r\n\r\n# Script will only run if it is the main script (i.e. not imported)\r\nif __name__ == \"__main__\":\r\n root = tk.Tk()\r\n root.resizable(width=False, height=False)\r\n app = App(master=root)\r\n app.master.title('TriSolver')\r\n\r\n app.mainloop()\r\n","sub_path":"TriSolver/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"553328451","text":"import torch\nfrom model.FaceBagNet_model_A import Net\nfrom preprocessing.augmentation import color_augumentor\nfrom copy import deepcopy\nimport numpy as np\nimport torch.nn.functional as F\nimport cv2\nfrom mtcnn.mtcnn import MTCNN\n\n\nclass FaceBagNet:\n def __init__(self, model_path, patch_size=48, torch_device=\"cpu\"):\n\n # TODO: bn, id_class?\n self.model_path = model_path\n self.patch_size = patch_size\n\n self.neural_net = Net(num_class=2, id_class=300, is_first_bn=True)\n self.neural_net.load_pretrain(self.model_path)\n\n self.neural_net = torch.nn.DataParallel(self.neural_net)\n\n self.neural_net.to(torch_device)\n\n self.torch_device = torch_device\n\n # TODO: this line\n self.neural_net.eval()\n\n self.augmentor = color_augumentor\n\n # returns probability that the image is genuine (not presented)\n def predict(self, full_size_image):\n\n image = deepcopy(full_size_image) # TODO: remove copying\n\n image = self.augmentor(image, target_shape=(self.patch_size, self.patch_size, 3), is_infer=True)\n\n n = len(image)\n image = np.concatenate(image, axis=0)\n image = np.transpose(image, (0, 3, 1, 2))\n image = image.astype(np.float32)\n image = image.reshape([n, 3, self.patch_size, self.patch_size])\n image = np.array([image])\n image = image / 255.0\n\n input_tensor = torch.FloatTensor(image)\n\n shape = input_tensor.shape\n b, n, c, w, h = shape\n # print(b, n, c, w, h)\n\n input_tensor = input_tensor.view(b * n, c, w, h)\n\n # inpt = inpt.cuda() if torch.cuda.is_available() else inpt.cpu()\n input_tensor = input_tensor.to(self.torch_device)\n\n # print(input_tensor)\n\n with torch.no_grad():\n logit, _, _ = self.neural_net(input_tensor)\n logit = logit.view(b, n, 2)\n logit = torch.mean(logit, dim=1, keepdim=False)\n prob = F.softmax(logit, 1)\n\n is_real = list(prob.data.cpu().numpy()[:, 1])[0]\n\n return is_real\n\nfrom time import time\n\nif __name__ == \"__main__\":\n\n model48 = FaceBagNet(\"model48.pth\", 48, \"cuda\")\n model32 = FaceBagNet(\"model48.pth\", 32, \"cuda\") # TODO: this one is just dummy\n # model32 = FaceBagNet(\"model32.pth\", 32, \"cuda\")\n model16 = FaceBagNet(\"model16.pth\", 16, \"cuda\")\n\n detector = MTCNN()\n\n # capture = cv2.VideoCapture(\"images/video.mp4\")\n capture = cv2.VideoCapture(0)\n\n capture.set(3, 1280)\n capture.set(4, 720)\n\n while True:\n ret, frame = capture.read()\n detection = detector.detect_faces(frame)\n\n if detection:\n\n for i, bbox in enumerate(detection):\n bbox = bbox['box']\n pt1 = bbox[0], bbox[1]\n pt2 = bbox[0] + bbox[2], bbox[1] + int(bbox[3])\n\n # TODO: do not hardcode face index\n if i == 0:\n crop_img = frame[bbox[1]:bbox[1] + bbox[3], bbox[0]:bbox[0] + bbox[2]]\n cv2.imshow(\"detected face\", crop_img)\n\n time_ = time()\n print(\"model16: {} model32: {} model48: {}\".format(model16.predict(crop_img),\n model32.predict(crop_img),\n model48.predict(crop_img)))\n duration = time() - time_\n print(duration)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n","sub_path":"FBN.py","file_name":"FBN.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"279113754","text":"\nimport configparser\nimport os\nimport logging\n\nclass ConfigUtil:\n _instance = None\n logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.DEBUG)\n defaultPath = str(os.getcwd())+\"/config/config.cfg\"\n cp = configparser.ConfigParser()\n isloaded = False\n __instance = None\n \n @staticmethod \n def get_instance():\n \"\"\" Static access method. \"\"\"\n if ConfigUtil.__instance == None:\n ConfigUtil()\n return ConfigUtil.__instance\n\n def __init__(self):\n \"\"\" Virtually private constructor. \"\"\"\n if ConfigUtil.__instance != None:\n logging.info(\"This is a singleton class. Use get_instance method\")\n print(\"This is a singleton class. Use get_instance method\")\n raise Exception(\"This class is a singleton!\")\n else:\n ConfigUtil.__instance = self\n\n # def __init__(self):\n # self.defaultPath = str(os.getcwd())+\"/config/config.cfg\"\n # self.cp= configparser.ConfigParser()\n # self.isloaded = False\n \n \n # Method to fetch the value based on the section and key passed.\n def get_value(self, section, setting):\n try:\n if(not self.isloaded):\n self.load_config() \n ret = self.cp.get(section, setting)\n except configparser.NoOptionError:\n ret = None\n return ret\n\n # Method to load data \n def load_config(self):\n try:\n if (os.path.exists(self.defaultPath) and os.path.isfile(self.defaultPath)): \n self.cp.read(self.defaultPath)\n #print(\"Config File loaded\")\n return True\n else:\n print(\"Config file not loaded\")\n logging.info(\"\\n \\n Config file %s doesn't exist.\",self.defaultPath)\n except:\n print(\"Config file not loaded\")\n logging.info(\"Config file %s doesn't exist.\")\n # print(\"False\")\n return False\n\n #Method to retrieve all sections from config.cfg file\n def get_all_sections(self)->list:\n try:\n if(not self.isloaded):\n self.load_config() \n sections = self.cp.sections()\n except configparser.NoOptionError:\n sections = None\n return sections \n \n\nif __name__ == \"__main__\":\n cu = ConfigUtil()\n print(cu.get_value(\"COVID19\",\"name\"))\n print(cu.get_all_sections())\n","sub_path":"src/main/ConfigUtil.py","file_name":"ConfigUtil.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"493454230","text":"# Darrell Powe III\n# # This is a practice exercise from the book \"Python Crash Course\"\n# # Exercise 8-10\n\n\ndef show_magicians(names):\n \"\"\"\n This function loops through a list of names and returns the names of the people in the list.\n\n :param names: Gets passed a modified list on names of magicians\n :return: Individual names\n \"\"\"\n\n while names:\n name = names.pop()\n print(name)\n\n\ndef make_great(new):\n \"\"\"\n This function loops through a list and modifies it, by adding 'The Great ' before their name, popping the item and\n adds it to another list.\n :param new: Gets passed a list on names of magicians\n :return:\n \"\"\"\n while new:\n magician = 'The Great ' + new.pop() + '!'\n list_of_new_magicians.append(magician)\n\n\nlist_of_magicians = [\"Elore\",\n \"Andunorim\",\n \"Trerius\",\n \"Amaex\",\n \"Judarin\",\n \"Idijamar\",\n \"Azahr\",\n \"Oveflyn\",\n \"Jenior\",\n \"Dhubus\"]\nlist_of_new_magicians = []\n\nmake_great(list_of_magicians)\nshow_magicians(list_of_new_magicians)\n","sub_path":"great_magicians.py","file_name":"great_magicians.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"77148553","text":"SIGNS = {\n (4, 20): 'Овен', (5, 20): 'Телец', (6, 20): 'Близнаци',\n (7, 21): 'Рак', (8, 22): 'Лъв', (9, 22): 'Дева',\n (10, 22): 'Везни', (11, 21): 'Скорпион', (12, 21): 'Стрелец',\n (1, 19): 'Козирог', (2, 18): 'Водолей', (3, 20): 'Риби'\n}\n\n\ndef what_is_my_sign(day, month):\n for key, value in SIGNS.items():\n if ((key[0] == month and key[1] >= day) or ((key[0] == month+1 or\n key[0] == month+11) and\n key[1] < day)):\n return value\n","sub_path":"task1/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"300133220","text":"import base64\nimport json\nimport sys\nfrom functools import partial\nfrom itertools import count\nfrom random import randint\nfrom time import time, sleep\nfrom traceback import format_tb\nfrom types import FunctionType\nfrom typing import cast, TypeVar\n\nfrom bson import ObjectId\n\nfrom xbasic.core import Object, Error\nfrom xbasic.field import Struct, Field\nfrom xbasic.mongo import Collection, Database, Q\nfrom xbasic.web.app import Request, Response, json_response\nfrom xbasic.web.utils import pretty_flatten, XBuilder\nfrom xutils.code_utils import cached_property, dotted, secure, repr_wrapper, CachedProperty\nfrom xutils.data_utils import flatten\nfrom xutils.exp_utils import Get\n\ntry:\n raise Exception()\nexcept:\n _, _, tb = sys.exc_info()\n TracebackType = type(tb)\n\n_YES_NO = {'yes': True, 'no': False}\n\n\ndef next_path(path):\n assert path.startswith('/'), path\n\n pos = path.find('/', 1)\n if pos > -1:\n return path[1:pos], path[pos:]\n else:\n return path[1:], ''\n\n\ndef access_token():\n return ''.join(base64.encodebytes(bytearray(\n randint(0, 0xFF) for _ in range(128)\n )).decode('utf-8').strip().splitlines())\n\n\ndef get_url_name(name):\n return ''.join(flatten((\"-\", b) if (i > 0) and a != b else b\n for i, (a, b) in enumerate(zip(name, name.lower()))))\n\n\nclass NoRouteError(Exception):\n pass\n\n\nclass Param(property):\n ORDER = count()\n\n def __init__(self, type_=None, link=None, name=None, parent=None, default=None):\n super(Param, self).__init__(self.get_value, self.set_value)\n\n self.name = name\n self.parent = parent\n\n if name is None:\n self.order = next(self.ORDER)\n self.type = type_\n\n if link is not None:\n self.link = link\n\n if default is not None:\n self.default = default\n\n def __getattr__(self, attr):\n try:\n return getattr(self.parent, attr)\n except AttributeError:\n raise AttributeError(attr)\n\n def get_value(self, layer):\n try:\n value = layer.params[self.name]\n except KeyError:\n try:\n value = self.default(layer)\n except AttributeError:\n raise AttributeError(self.name)\n layer.params[self.name] = value\n return value\n\n def set_value(self, layer, value):\n layer.params[self.name] = value\n\n def bind(self, name):\n return type(self)(parent=self, name=name)\n\n @repr_wrapper\n def __repr__(self):\n return {\n name: value for name, value in vars(self).items()\n if value is not None\n }\n\n\nclass StaticLayer(CachedProperty):\n pass\n\n\nT = TypeVar('T')\n\n\ndef static_layer(layer: T):\n return cast(T, StaticLayer(layer))\n\n\nclass Layer(Object):\n __metadata__ = [\n 'layers',\n 'route_layers',\n 'events',\n 'sorted_params',\n 'route_handlers'\n ]\n\n layers = []\n parents = []\n\n route_layers = {}\n route_handlers = {}\n\n name = None\n url_name = None\n parent = None\n\n sorted_params = []\n\n events = {}\n\n def __init__(self, parent, **params):\n self.parent = parent\n self.params = {}\n\n for name, value in params.items():\n setattr(self, name, value)\n\n self.ready()\n\n def __getattr__(self, attr):\n value = getattr(self.parent, attr)\n vars(self)[attr] = value\n return value\n\n def ready(self):\n pass\n\n @classmethod\n def __prebuild__(cls):\n super(Layer, cls).__prebuild__()\n\n if cls is not Layer:\n base_layer = cls.base(Layer) # type: Layer\n cls.layers.extend(base_layer.layers)\n cls.route_layers.update(base_layer.route_layers)\n cls.events.update(base_layer.events)\n cls.sorted_params.extend(base_layer.sorted_params)\n cls.route_handlers.update(base_layer.route_handlers)\n\n @classmethod\n def __build__(cls, name, value):\n super(Layer, cls).__build__(name, value)\n\n if isinstance(value, type):\n if issubclass(value, Layer):\n cls.register_layer(value)\n\n elif isinstance(value, StaticLayer):\n cls.register_layer(value.origin)\n\n elif isinstance(value, Param):\n value = value.bind(name)\n setattr(cls, name, value)\n cls.sorted_params.append(value)\n\n elif name.startswith('on_'):\n cls.events[name[3:]] = secure(value)\n\n elif name.startswith('route_'):\n route_name = name[6:]\n route_value = value\n\n if isinstance(value, FunctionType):\n if name != value.__name__:\n route_name = value.__name__\n\n elif callable(value):\n def route_value(context, path):\n return value(context)(path)\n\n cls.route_handlers[route_name] = route_value\n\n @classmethod\n def __postbuild__(cls):\n super(Layer, cls).__postbuild__()\n\n cls.sorted_params.sort(key=Get.order)\n\n cls.name = cls.__name__\n cls.url_name = get_url_name(cls.__name__)\n\n for parent in cls.parents:\n parent.register_layer(cls)\n\n @classmethod\n def register_layer(cls, layer_cls):\n if layer_cls not in cls.layers:\n cls.route_layers[layer_cls.link_name or layer_cls.url_name] = layer_cls\n cls.layers.append(layer_cls)\n\n @cached_property\n def tree_name(self):\n if self.parent is not None:\n return dotted(self.parent.tree_name, type(self).name)\n return type(self).name\n\n def default(self, path):\n return Response(app_iter=('NoDefault: ', self.tree_name, \" \", self.link), status=404)\n\n def index(self):\n return self.default('')\n\n link_name = None\n\n @property\n def link_params(self):\n for param in self.sorted_params:\n if param.type:\n yield str(getattr(self, param.name))\n\n @cached_property\n def link_route(self):\n return self.link_name or self.url_name\n\n @property\n def link_items(self):\n if self.parent is not None:\n yield from self.parent.link_items\n if self.link_route:\n yield self.link_route\n else:\n yield ''\n yield from self.link_params\n\n @cached_property\n def link(self):\n return '/'.join(self.link_items) or '/'\n\n @cached_property\n def link_url(self):\n return self.req.host_url + self.link\n\n @classmethod\n def handle(cls, req: Request):\n print((req.method, req.path, req.content_type, list(req.params.keys())))\n return cls(None, req=req).route(req.path)\n\n def route(self, path):\n for event_name, event_handler in self.events.items():\n try:\n event_value = self.req.params[event_name]\n except KeyError:\n continue\n event_response = event_handler(self, event_value)\n if event_response is not None:\n return event_response\n\n if path in '/':\n return self.index()\n\n default_path = path\n url_name, path = next_path(path)\n\n try:\n handler = self.route_handlers[url_name]\n except KeyError:\n pass\n\n else:\n return handler(self, path)\n\n try:\n layer_cls = self.route_layers[url_name]\n except KeyError:\n return self.default(default_path)\n\n params = {}\n\n for param in layer_cls.sorted_params:\n if param.type:\n value, path = next_path(path)\n params[param.name] = param.type(value)\n\n layer = layer_cls(self, **params)\n return layer.route(path)\n\n @cached_property\n def root(self):\n if self.parent is not None:\n return self.parent.root\n return self\n\n\nclass Command(Struct):\n name = Field(str)\n args = Field(list, default=[])\n\n\nclass UnknownCommandError(Error):\n pass\n\n\nclass Service(Layer):\n __metadata__ = ['commands']\n\n commands = {}\n\n command = None # type: Command\n\n encoder = cast(json.JSONEncoder, cached_property(\n lambda self: json.JSONEncoder(default=self.encode)\n ))\n\n def encode(self, o):\n if isinstance(o, Exception):\n return {'type': type(o).__name__, 'content': o.args}\n\n elif isinstance(o, TracebackType):\n return [text.splitlines() for text in format_tb(o)]\n\n elif isinstance(o, XBuilder):\n return ''.join(flatten(o))\n\n return repr(o)\n\n @classmethod\n def __prebuild__(cls):\n super(Service, cls).__prebuild__()\n\n if cls is not Service:\n cls.commands.update(cls.base(Service).commands)\n\n @classmethod\n def __build__(cls, name, value):\n super(Service, cls).__build__(name, value)\n\n if name.startswith('do_'):\n cls.commands[name[3:]] = secure(value)\n\n def on_command(self, command: Command):\n self.command = command\n\n def service_index(self):\n return \"NoCommand\"\n\n def index(self):\n if self.command is None:\n return self.service_index()\n\n try:\n try:\n handler = self.commands[self.command.name]\n except AttributeError:\n raise UnknownCommandError(self.command.raw)\n\n result = {\"result\": handler(self, *self.command.args)}\n\n except Exception:\n _, exc_value, exc_tb = sys.exc_info()\n result = {'error': exc_value, 'traceback': exc_tb}\n\n return json_response(result, self.encoder)\n\n\nclass Lang(Service):\n req = cast(Request, property(Get.parent.req))\n\n def do_translate(self, keys: list):\n return {key: self[key] for key in keys}\n\n def guess_item(self, item):\n while '.' in item:\n _, item = item.split('.', 1)\n\n try:\n return self.parent.user_lang[item]\n except KeyError:\n pass\n return item\n\n def __getitem__(self, item):\n try:\n return self.parent.user_lang[item]\n except KeyError:\n item = self.parent.user_lang[item] = self.guess_item(item)\n return item\n\n def __getattr__(self, attr):\n return self[attr]\n\n\nclass Session(Collection):\n token = Field(str)\n timeout = Field(int)\n\n def doc_remove(self, doc):\n for resource in Resource(self.database).find({\"session_id\": doc.id}):\n resource.free()\n return super(Session, self).doc_remove(doc)\n\n def doc_commit(self, doc, **kwargs):\n kwargs['timeout'] = time()\n return super(Session, self).doc_commit(doc, **kwargs)\n\n def find_timeout(self, over_seconds):\n for session in self.find({\n 'timeout': Q < (time() - over_seconds)\n }):\n yield session\n\n\nclass Resource(Collection):\n session_id = Field(ObjectId)\n token = Field(str)\n\n def new(self, session=None, **kwargs):\n if session is not None:\n kwargs['session_id'] = session.id\n return super(Resource, self).new(**kwargs)\n\n def doc_free(self, doc):\n self.doc_remove(doc)\n\n\nclass SiteDatabase(Database):\n collections = [Session, Resource]\n\n\nclass Site(Layer):\n frame = True\n json = False\n pretty = True\n\n user_lang = {}\n\n lang = static_layer(Lang)\n\n site_db = None # type: SiteDatabase\n req = None # type: Request\n\n _session = None\n\n _cookie = cached_property(\n lambda self: type(self).__name__)\n\n site_sessions = cast(Session, cached_property(\n lambda self: Session(self.site_db)))\n\n def resources(self, collection_cls):\n return collection_cls(self.site_db, filter={\n \"session_id\": self.session.id\n }, constants={\n \"session_id\": self.session.id\n })\n\n def site_process(self):\n print(type(self).__name__, \"process started!\")\n while True:\n for session in self.site_sessions.find_timeout(60 * 5):\n print(\"CleanSession\", session.remove().id)\n sleep(10)\n\n @property\n def session(self):\n if self._session is not None:\n return self._session\n try:\n token = self.req.cookies[self._cookie]\n except KeyError:\n pass\n else:\n self._session = self.site_sessions.find_one({\"token\": token})\n if self._session is None:\n self._session = self.site_sessions.new(token=access_token())\n return self._session\n\n def on_site_frame(self, value):\n self.frame = _YES_NO.get(value, self.frame)\n\n def on_site_json(self, value):\n self.json = _YES_NO.get(value, self.json)\n\n def on_site_pretty(self, value):\n self.pretty = _YES_NO.get(value, self.pretty)\n\n def site_response(self, res: Response):\n if self._session is not None:\n if self._session.token != self.req.cookies.get(self._cookie):\n res.set_cookie(self._cookie, self._session.token)\n if self._session.id is None:\n self._session.save() # update timeout\n else:\n self._session.commit()\n\n res.cache_control.max_age = 0\n\n return res\n\n def route(self, path):\n res = super(Site, self).route(path)\n if not isinstance(res, Response):\n if self.frame:\n res = self.site_frame(res)\n if self.json:\n res = {'html': ''.join(flatten(res))}\n if isinstance(res, dict):\n res = json_response(res)\n elif self.pretty:\n res = pretty_flatten(res)\n res = Response(app_iter=res)\n return self.site_response(res)\n\n def site_frame(self, frame):\n raise NotImplementedError()\n\n @cached_property\n def cache(self):\n return {}\n","sub_path":"xbasic/web/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":13938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"240355421","text":"from PyQt5 import QtGui\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.uic import loadUi\n\nfrom matplotlib.backends.backend_qt5agg import (NavigationToolbar2QT as NavigationToolbar)\nimport librosa\nimport numpy as np\nimport pywt\nfrom src import Features, Database\n\n\nclass DiscreteWT(QMainWindow):\n\n def __init__(self):\n QMainWindow.__init__(self)\n loadUi(\"../ui/Discrete.ui\", self)\n self.setWindowTitle(\"Wavelet Transform Data Analyzer Tool\")\n self.setWindowIcon(QtGui.QIcon('../icon/icon.png'))\n self.setFixedSize(1482, 949)\n Features.center(self)\n\n self.MplWidget.canvas.axes.remove()\n self.clear_count = 0 #currently how many canvas we have\n self.load_check = False #işlenecek bir sinyal var mı ?\n self.analyze_check = False #işlenmiş mi\n\n self.signals = []\n self.audio = []\n self.time = []\n self.addToolBar(NavigationToolbar(self.MplWidget.canvas, self)) # embedding matplotlib to widget\n\n self.actionClose.triggered.connect(self.close_window)\n self.actionFolder.triggered.connect(self.load_folder)\n self.actionImage.triggered.connect(self.load_signal) #load signal via menubar\n self.pushButton_1D_Analyze.clicked.connect(self.analyze_signal) #analyze button\n\n #self.pushButton_1D_Analyze.clicked.connect(self.run) #bütün dataya bütün fonksiyonlar\n\n self.pushButton_save_to_db.clicked.connect(self.save_to_db)\n\n\n wave_types = pywt.wavelist(kind='discrete')\n self.comboBox_1D_Type.addItems(wave_types) #initialize comboBox\n for i in range(1,20):\n self.comboBox_1D_Level.addItem(str(i) , i)\n\n self.comboBox_1D_Level.currentIndexChanged.connect(self.on_combobox_changed, self.comboBox_1D_Level.currentIndex())\n\n self.checkBox_1D_Average.setChecked(True)\n self.checkBox_1D_Entropy.setChecked(True)\n self.checkBox_1D_Kurtosis.setChecked(True)\n self.checkBox_1D_Max.setChecked(True) # discrete page statistic functions checks\n self.checkBox_1D_Median.setChecked(True)\n self.checkBox_1D_Min.setChecked(True)\n self.checkBox_1D_Skewness.setChecked(True)\n self.checkBox_1D_StandartDeviation.setChecked(True)\n\n def close_window(self):\n self.close()\n\n def on_combobox_changed(self, value):\n if(value >= 7):\n Features.message(\"Plotting is not supported on levels higher than 7\",QMessageBox.Information)\n\n def load_signal(self):\n self.all_signals, self.filter, self.load_check = Features.load_signal(self)\n if(self.load_check):\n if(len(self.all_signals) > 1): # Multiple file\n self.label_1D_DataName.setText(\"Multiple Signals\") # Data name\n else:\n name = self.all_signals[0].split(sep ='/')\n self.label_1D_DataName.setText(name[-1]) # Setting data name\n else:\n Features.message(\"You have to load at least 1 signal\", QMessageBox.Warning)\n self.label_1D_DataName.setText(\"No signal selected\")\n\n def load_folder(self):\n self.all_signals, self.load_check = Features.load_folder(self)\n if (self.load_check):\n Features.message(str(len(self.all_signals)) + \"signals ready to be processed !\", QMessageBox.Information)\n if (len(self.all_signals) > 1): # Multiple file\n self.label_1D_DataName.setText(\"Multiple Signals\") # Data name\n else:\n name = self.all_signals[0].split(sep='/')\n self.label_1D_DataName.setText(name[-1]) # Setting data name\n else:\n Features.message(\"You have to load at least 1 signal\", QMessageBox.Warning)\n\n def analyze_signal(self):\n self.average, self.entropy, self.kurtosis, self.max_v, self.median, self.min_v, self.skewness, self.standart_dev = Features.check_statistics(self)\n self.wavelet_type, self.wavelet_level = Features.discrete_check_wavelet(self)\n if(self.load_check and (self.average or self.entropy or self.kurtosis or self.max_v or self.median or self.min_v or self.skewness or self.standart_dev)): #işlenecek müzik olması durumu\n\n self.analyze_check = True\n col, self.header, self.db_header = Features.init_table(self, len(self.all_signals))\n self.db_matrix = np.zeros((len(self.all_signals), 8 * (self.wavelet_level + 1)))\n\n for iter in range(0 , len(self.all_signals)):\n print(self.all_signals[iter])\n self.signals.clear()\n self.audio, self.sample = librosa.load(self.all_signals[iter])\n self.signals.append(self.audio)\n self.time = np.arange(0, len(self.audio)) / self.sample\n\n coeffs = pywt.wavedec(self.audio, self.wavelet_type, level = self.wavelet_level) #wavelet analyze\n\n for i in range(0 , self.wavelet_level + 1):\n self.signals.append(coeffs[i]) #adding signals array to coeffs\n\n if (iter == 0 and self.wavelet_level < 8): #plotting first signal only\n Features.discrete_plot_signal(self)\n self.db_matrix[iter] = Features.insertTable(self, iter, col) #level = 3 ise signals içinde 5 (4 analiz edilmiş + 1 source)\n else:\n if not self.load_check :\n Features.message(\"You have to load at least 1 signal\", QMessageBox.Warning)\n else:\n Features.message(\"You have to pick at least 1 statistic function\", QMessageBox.Warning)\n\n def save_to_db(self):\n if(self.load_check and self.analyze_check):\n w_name = str(self.wavelet_type)\n table_name = \"Db_GTZAN_function_\" + w_name + \"_Degree_\" + str(self.wavelet_level)\n\n Database.create_table(Database.database_name, table_name, self.db_header, \"\") # creating new table with statistic function\n\n for index in range(0, len(self.all_signals)):\n name = self.all_signals[index].split(sep='/') #name of signal\n Database.delete_row(Database.database_name, table_name, name[-1], \"\")\n Database.add_values_to_table(Database.database_name, table_name, name[-1], self.db_header, self.db_matrix[index], \"\") #adding db to values\n Features.message(\"Your Data Saved Succesfully\", QMessageBox.Information)\n\n else:\n if (self.load_check):\n Features.message(\"You have to analyze the signals first\", QMessageBox.Warning)\n else:\n Features.message(\"You have to load at least 1 signal\", QMessageBox.Warning)\n\n\n def run(self):\n wave_types = pywt.wavelist(kind='discrete')\n levels = [1,2,3]\n signals = []\n self.average, self.entropy, self.kurtosis, self.max_v, self.median, self.min_v, self.skewness, self.standart_dev = Features.check_statistics(self)\n table_names = Database.get_table_names(Database.database_name)\n\n for wave_func in wave_types:\n if wave_func.find(\"sym\") == -1:\n for level in levels:\n self.wavelet_level = level\n self.wavelet_type = wave_func\n col, self.header, self.db_header = Features.init_table(self, len(self.all_signals))\n self.db_matrix = np.zeros((len(self.all_signals), 8 * (self.wavelet_level + 1)))\n\n if not any(\"Db_GTZAN_function_\" + str(wave_func) + \"_Degree_\" + str(level) in s for s in table_names):\n\n for iter in range(0, len(self.all_signals)):\n self.signals.clear()\n self.audio, self.sample = librosa.load(self.all_signals[iter])\n self.signals.append(self.audio)\n self.time = np.arange(0, len(self.audio)) / self.sample\n\n coeffs = pywt.wavedec(self.audio, self.wavelet_type, level=self.wavelet_level) # wavelet analyze\n\n for i in range(0, self.wavelet_level + 1):\n self.signals.append(coeffs[i]) # adding signals array to coeffs\n\n self.db_matrix[iter] = Features.insertTable(self, iter,col) # level = 3 ise signals içinde 5 (4 analiz edilmiş + 1 source)\n\n w_name = str(wave_func)\n table_name = \"Db_GTZAN_function_\" + w_name + \"_Degree_\" + str(level)\n Database.create_table(Database.database_name, table_name, self.db_header, \"\") # creating new table with statistic function\n\n for index in range(0, len(self.all_signals)):\n name = self.all_signals[index].split(sep='/') # name of signal\n Database.delete_row(Database.database_name, table_name, name[-1], \"\")\n Database.add_values_to_table(Database.database_name, table_name, name[-1], self.db_header, self.db_matrix[index], \"\") # adding db to values\n print(table_name)\n else:\n print(\"Db_GTZAN_function_\" + str(wave_func) + \"_Degree_\" + str(level) + ' passed')\n else:\n print(wave_func + \" passed\")","sub_path":"src/DiscreteWav.py","file_name":"DiscreteWav.py","file_ext":"py","file_size_in_byte":9857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"82266859","text":"\"\"\"empty message\n\nRevision ID: 4bef3177e17c\nRevises: ebccccebee7c\nCreate Date: 2017-11-27 19:05:38.834115\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '4bef3177e17c'\ndown_revision = 'ebccccebee7c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('item', sa.Column('imgpath', sa.String(length=30), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('item', 'imgpath')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/4bef3177e17c_.py","file_name":"4bef3177e17c_.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"514890511","text":"def find_short(s):\n # This function is a solution to the 7kyu Kata on codewars.com: \n # https://www.codewars.com/kata/shortest-word/train/python \n # Given a string of words return the length of the shortest word(s). String\n # will never be empty and you do not need to account for different data\n # types.\n \n sep_words = s.split(\" \") # split up the string into separate words\n l = len(sep_words[1]) #get length of first word\n\n for i in sep_words:\n if len(i) < l: # compare length of each word to first word, if it's smaller it becomes l\n l = len(i)\n \n return l\n","sub_path":"find_short.py","file_name":"find_short.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"526605935","text":"__author__ = 'gaudiouney'\nTAL = TAC = 500\nFACE = 'http://www.skatecuriosidade.com/wp-content/uploads/2013/03/king.jpg'\nCH = CV = 180\nFACEBOTAO = 'http://img.vivaolinux.com.br/imagens/dicas/comunidade/pms-ms.png'\nHH = HV = 50\nTELAA2 = 'http://cerebromasculino.com/wp-content/uploads/2013/09/moldura_quadro03.gif'\nHC = HV = 500\n\nclass Botao:\n def __init__(self, html, deque, tela):\n bt = self.e_botao = html.IMG(src=FACEBOTAO, width=HH, heigth=HV)\n bt.onclick = deque.voa\n bt.style.position = \"relative\"\n bt.style.marginLeft = '10px'\n tela <= bt\n\n#class Retorno:\n# def __init__(self, html, deque, tela):\n# bt = self.e_botao = html.IMG(src=FACEBOTAO, width=HH, heigth=HV)\n# bt.onclick = deque.voltar\n# bt.style.position = \"relative\"\n# bt.style.marginLeft = '30px'\n# tela <= bt\n\nclass Carta:\n def __init__(self, html, xy, deque):\n x, y = self.pos = xy\n self.deque = deque\n ct = self.e_carta = html.IMG(src=FACE, width=CH, heigth=CV)\n ct.style.position = \"absolute\"\n ct.style.left, ct.style.top = xy\n ct.style.marginLeft = '140px'\n x = x / 5\n ct.style.transition = \"left 0.5s linear %fs, top 0.5s linear %fs\" % (x, x)\n deque <= ct\n\n def voa(self, evento):\n self.deque.voa()\n\n def voar(self, delta):\n dx, dy = delta\n x, y = self.pos\n xy = self.pos = x + dx, y + dy\n ct = self.e_carta\n ct.style.left, ct.style.top = xy\n\n\nclass Deque:\n def __init__(self, html, tela):\n self.tela = tela\n self.deque = [Carta(html, (x*4, 128), self) for x in range(10)]\n def voa(self, ev=0):\n [carta.voar((800, 200)) for carta in self.deque]\n def voltar(self, ev):\n [carta.voar((-800, -200)) for carta in self.deque]\n def __le__(self, other):\n self.tela <= other\n\ndef main(html, doc):\n tela2 = doc[\"menu2\"]\n tela = doc[\"main\"]\n splash = html.DIV()\n cartas = html.DIV()\n tela <= splash\n tela <= cartas\n tela<=tela2\n tela2 <= html.IMG(src=TELAA2, width=HC, heigth=HV);\n deque = Deque(html, splash)\n botao = Botao(html, deque, cartas)\n #retorno = Retorno(html, deque, cartas)","sub_path":"src/vista.py","file_name":"vista.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"230001196","text":"import re, urllib\n\ndef get_page(url):\n return urllib.urlopen(url).read()\n\nseeds = ['http://www.lib.kobe-u.ac.jp/directory/sinbun/ymlist/ymlist.html']\nyears = []\nlinks = []\n# example: href=\"191201.html\"\nfor seed in seeds:\n\n #print get_page(seed)\n \n for i in re.findall('[0-9]+\\.\\html+', urllib.urlopen(seed).read()):\n #print i\n years.append(\"http://www.lib.kobe-u.ac.jp/directory/sinbun/ymlist/\"+i)\n\nfor year in years: \n for j in re.findall('[0-9]{8}', urllib.urlopen(year).read()):\n link = 'http://www.lib.kobe-u.ac.jp/das/ContentViewServlet?METAID='+j+'&TYPE=HTML_FILE&POS=1&LANG=JA'\n if link in links:\n continue\n else:\n links.append(link)\n","sub_path":"news/news/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"214436674","text":"import pandas as pd\nimport numpy as np\nimport pickle\nimport patsy\n\nclass Model(object):\n\n #Model: object that contain a model, either ReachSb or ReachSbPb\n\n def __init__(self, use_ad_type=False):\n\n #initializes a model object\n #inputs:\n # use_ad_type: (optional) Use the ReachSb (use_ad_type=False) or the\n # ReachSbPb model (use_ad_type=True). Default is False.\n\n if use_ad_type == False:\n self.model_type_ = 'without_ad_type'\n\n file = open(\"ReachSb.p\",\"rb\")\n model_info = pickle.load(file)\n file.close()\n\n self.model_= model_info['model']\n self.formula_= model_info['formula']\n self.train_data_ = model_info['train_data']\n\n else:\n self.model_type_ = 'with_ad_type'\n\n file = open(\"ReachSbPb.p\",\"rb\")\n model_info = pickle.load(file)\n file.close()\n\n self.model_ = model_info['model']\n self.formula_ = model_info['formula']\n self.train_data_ = model_info['train_data']\n\n def make_prediction(self, reach=[0], site=['All 4'], ad_type=None):\n\n # Make a prediction. The prediction will be made over a grid that\n # relates each predictor.\n #\n # Inputs:\n # reach: List of floats. Values for reach per day to be used for the\n # prediction. Default is [0]\n # site: List of strings. Name of media to be used for the prediction.\n # Default is 'All 4'.\n # ad_type: (optional) List of strings or None. Name of the ad types\n # to be used for the prediction. Default is None.\n #\n # Returns: Pandas Dataframe with the Reach, Site, Ad Type and predicted\n # total conversions per day. Reach and total conversions are in\n # normal space (not log).\n\n # apply encoding\n _,train_data = patsy.dmatrices(self.formula_,\n self.train_data_,\n return_type='dataframe')\n\n encoding = train_data.design_info\n\n # tranform reach to log10 scale\n reach = np.log10(np.array(reach)+1)\n\n predictions = []\n if self.model_type_ == 'without_ad_type':\n\n #build prediction dataframe\n new_reach = []\n new_site = []\n for r in reach:\n for s in site:\n new_reach.append(r)\n new_site.append(s)\n\n new_df = pd.DataFrame({'Reach':new_reach, 'Sb':new_site})\n new_df = new_df.drop_duplicates()\n self.new_df_ = new_df\n\n elif self.model_type_ == 'with_ad_type':\n\n #build prediction dataframe\n new_reach = []\n new_site = []\n new_ad_type = []\n for r in reach:\n for s in site:\n for ad in ad_type:\n new_reach.append(r)\n new_site.append(s)\n new_ad_type.append(ad)\n\n new_df = pd.DataFrame({'Reach':new_reach, 'Sb':new_site, 'Pb':new_ad_type})\n new_df = new_df.drop_duplicates()\n self.new_df_ = new_df\n\n new_df_enc, = patsy.build_design_matrices([encoding],self.new_df_,\n return_type='dataframe')\n predictions = self.model_.predict(new_df_enc)\n\n # total conversions and reach in normal scale\n predictions = 10**predictions - 1\n self.new_df_['Reach'] = 10**self.new_df_['Reach']-1\n\n predictions_df = pd.DataFrame(predictions, columns=['Conversions/day'])\n\n new_df = pd.concat([self.new_df_, predictions_df], axis=1)\n new_df = new_df.sort_values(by=['Conversions/day'], ascending=False)\n\n if self.model_type_ == 'without_ad_type':\n new_df.rename(columns={'Reach':'Reach/day','Sb':'Site'},\n inplace=True)\n elif self.model_type_ == 'with_ad_type':\n new_df.rename(columns={'Reach':'Reach/day','Sb':'Site',\n 'Pb':'Ad Type'},\n inplace=True)\n\n return new_df\n","sub_path":"SMRS/models/apply_model.py","file_name":"apply_model.py","file_ext":"py","file_size_in_byte":4233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"449077207","text":"import os, shutil\n\npath = 'hadoop/mapreduce/input'\nif os.path.exists(path):\n\tshutil.rmtree(path)\nos.makedirs(path)\n\nwith open(\"hadoop/data/map_reduce_input_data\") as f:\n\tdata = f.readlines()\n\tfor line in data:\n\t\tline = line.rstrip('\\n')\n\t\tdoc_id= line[0]\n\t\t##doc_title=line[2]\n\t\tfo=open(\"hadoop/mapreduce/input/input01\", \"ab+\")\n\t\tfo.write(doc_id)\n\t\t##fo.write(doc_title)\n\n","sub_path":"splitter.py","file_name":"splitter.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"227884203","text":"from functools import partial\nfrom typing import Optional, Tuple, Union\nfrom warnings import warn\n\nimport gym\nimport numpy as np\nfrom stable_baselines.common.base_class import BaseRLModel\nimport tensorflow as tf\nfrom tqdm import tqdm\n\nfrom imitation import summaries\nimport imitation.rewards.discrim_net as discrim_net\nfrom imitation.rewards.reward_net import BasicShapedRewardNet\nimport imitation.util as util\nfrom imitation.util import buffer, reward_wrapper\n\n\nclass AdversarialTrainer:\n \"\"\"Trainer for GAIL and AIRL.\"\"\"\n\n env: gym.Env\n \"\"\"The original environment.\"\"\"\n\n env_train: gym.Env\n \"\"\"Like `self.env`, but wrapped with train reward unless in debug mode.\n\n If `debug_use_ground_truth=True` was passed into the initializer then\n `self.env_train` is the same as `self.env`.\n \"\"\"\n\n env_test: gym.Env\n \"\"\"Like `self.env`, but wrapped with test reward unless in debug mode.\n\n If `debug_use_ground_truth=True` was passed into the initializer then\n `self.env_test` is the same as `self.env`.\n \"\"\"\n\n def __init__(self,\n env: Union[gym.Env, str],\n gen_policy: BaseRLModel,\n discrim: discrim_net.DiscrimNet,\n expert_rollouts: Tuple[np.ndarray, np.ndarray, np.ndarray],\n *,\n disc_opt_cls: tf.train.Optimizer = tf.train.AdamOptimizer,\n disc_opt_kwargs: dict = {},\n n_disc_samples_per_buffer: int = 200,\n gen_replay_buffer_capacity: Optional[int] = None,\n init_tensorboard: bool = False,\n debug_use_ground_truth: bool = False):\n \"\"\"Builds Trainer.\n\n Args:\n env: A Gym environment or ID that the policy is trained on.\n gen_policy: The generator policy that is trained to maximize\n discriminator confusion.\n discrim: The discriminator network.\n For GAIL, use a DiscrimNetGAIL. For AIRL, use a DiscrimNetAIRL.\n expert_rollouts: A tuple of three arrays from expert rollouts,\n `old_obs`, `act`, and `new_obs`.\n disc_opt_cls: The optimizer for discriminator training.\n disc_opt_kwargs: Parameters for discriminator training.\n n_disc_samples_per_buffer: The number of obs-act-obs triples\n sampled from each replay buffer (expert and generator) during each\n step of discriminator training. This is also the number of triples\n stored in the replay buffer after each epoch of generator training.\n gen_replay_buffer_capacity: The capacity of the\n generator replay buffer (the number of obs-action-obs samples from\n the generator that can be stored).\n\n By default this is equal to `20 * n_disc_samples_per_buffer`.\n init_tensorboard: If True, makes various discriminator\n TensorBoard summaries.\n debug_use_ground_truth: If True, use the ground truth reward for\n `self.train_env`.\n This disables the reward wrapping that would normally replace\n the environment reward with the learned reward. This is useful for\n sanity checking that the policy training is functional.\n \"\"\"\n self._sess = tf.get_default_session()\n self._global_step = tf.train.create_global_step()\n\n self._n_disc_samples_per_buffer = n_disc_samples_per_buffer\n self.debug_use_ground_truth = debug_use_ground_truth\n\n self.env = util.maybe_load_env(env, vectorize=True)\n self._gen_policy = gen_policy\n\n # Discriminator and reward output\n self._discrim = discrim\n self._disc_opt_cls = disc_opt_cls\n self._disc_opt_kwargs = disc_opt_kwargs\n with tf.variable_scope(\"trainer\"):\n with tf.variable_scope(\"discriminator\"):\n self._build_disc_train()\n self._init_tensorboard = init_tensorboard\n if init_tensorboard:\n with tf.name_scope(\"summaries\"):\n self._build_summarize()\n self._sess.run(tf.global_variables_initializer())\n\n if debug_use_ground_truth:\n self.env_train = self.env_test = self.env\n else:\n reward_train = partial(\n self.discrim.reward_train,\n gen_log_prob_fn=self._gen_policy.action_probability)\n self.env_train = reward_wrapper.RewardVecEnvWrapper(\n self.env, reward_train)\n self.env_test = reward_wrapper.RewardVecEnvWrapper(\n self.env, self.discrim.reward_test)\n\n if gen_replay_buffer_capacity is None:\n gen_replay_buffer_capacity = 20 * self._n_disc_samples_per_buffer\n self._gen_replay_buffer = buffer.ReplayBuffer(gen_replay_buffer_capacity,\n self.env)\n self._populate_gen_replay_buffer()\n self._exp_replay_buffer = buffer.ReplayBuffer.from_data(*expert_rollouts)\n if n_disc_samples_per_buffer > len(self._exp_replay_buffer):\n warn(\"The discriminator batch size is larger than the number of \"\n \"expert samples.\")\n\n @property\n def discrim(self) -> discrim_net.DiscrimNet:\n \"\"\"Discriminator being trained, used to compute reward for policy.\"\"\"\n return self._discrim\n\n @property\n def gen_policy(self) -> BaseRLModel:\n \"\"\"Policy (i.e. the generator) being trained.\"\"\"\n return self._gen_policy\n\n def train_disc(self, n_steps=10, **kwargs):\n \"\"\"Trains the discriminator to minimize classification cross-entropy.\n\n Args:\n n_steps (int): The number of training steps.\n gen_old_obs (np.ndarray): See `_build_disc_feed_dict`.\n gen_act (np.ndarray): See `_build_disc_feed_dict`.\n gen_new_obs (np.ndarray): See `_build_disc_feed_dict`.\n \"\"\"\n for _ in range(n_steps):\n fd = self._build_disc_feed_dict(**kwargs)\n step, _ = self._sess.run([self._global_step, self._disc_train_op],\n feed_dict=fd)\n if self._init_tensorboard and step % 20 == 0:\n self._summarize(fd, step)\n\n def train_gen(self, n_steps=10000):\n self._gen_policy.set_env(self.env_train)\n # TODO(adam): learn was not intended to be called for each training batch\n # It should work, but might incur unnecessary overhead: e.g. in PPO2\n # a new Runner instance is created each time. Also a hotspot for errors:\n # algorithms not tested for this use case, may reset state accidentally.\n self._gen_policy.learn(n_steps, reset_num_timesteps=False)\n self._populate_gen_replay_buffer()\n\n def _populate_gen_replay_buffer(self) -> None:\n \"\"\"Generate and store generator samples in the buffer.\n\n More specifically, rolls out generator-policy trajectories in the\n environment until `self._n_disc_samples_per_buffer` obs-act-obs samples are\n produced, and then stores these samples.\n \"\"\"\n gen_rollouts = util.rollout.generate_transitions(\n self._gen_policy, self.env_train,\n n_timesteps=self._n_disc_samples_per_buffer)[:3]\n self._gen_replay_buffer.store(*gen_rollouts)\n\n def train(self, n_epochs=100, *, n_gen_steps_per_epoch=None,\n n_disc_steps_per_epoch=None):\n \"\"\"Trains the discriminator and generator against each other.\n\n Args:\n n_epochs (int): The number of epochs to train. Every epoch consists\n of training the discriminator and then training the generator.\n n_disc_steps_per_epoch (int): The number of steps to train the\n discriminator every epoch. More precisely, the number of full batch\n Adam optimizer steps to perform.\n n_gen_steps_per_epoch (int): The number of generator training steps\n during each epoch. (ie, the timesteps argument in in\n `policy.learn(timesteps)`).\n \"\"\"\n for i in tqdm(range(n_epochs), desc=\"AIRL train\"):\n self.train_disc(**_n_steps_if_not_none(n_disc_steps_per_epoch))\n self.train_gen(**_n_steps_if_not_none(n_gen_steps_per_epoch))\n\n def eval_disc_loss(self, **kwargs):\n \"\"\"Evaluates the discriminator loss.\n\n The generator rollout parameters of the form \"gen_*\" are optional,\n but if one is given, then all such parameters must be filled (otherwise\n this method will error). If none of the generator rollout parameters are\n given, then a rollout with the same length as the expert rollout\n is generated on the fly.\n\n Args:\n gen_old_obs (np.ndarray): See `_build_disc_feed_dict`.\n gen_act (np.ndarray): See `_build_disc_feed_dict`.\n gen_new_obs (np.ndarray): See `_build_disc_feed_dict`.\n\n Returns:\n discriminator_loss (float): The total cross-entropy error in the\n discriminator's classification.\n \"\"\"\n fd = self._build_disc_feed_dict(**kwargs)\n return np.mean(self._sess.run(self.discrim.disc_loss, feed_dict=fd))\n\n def _build_summarize(self):\n self._summary_writer = summaries.make_summary_writer(\n graph=self._sess.graph)\n self.discrim.build_summaries()\n self._summary_op = tf.summary.merge_all()\n\n def _summarize(self, fd, step):\n events = self._sess.run(self._summary_op, feed_dict=fd)\n self._summary_writer.add_summary(events, step)\n\n def _build_disc_train(self):\n # Construct Train operation.\n disc_opt = self._disc_opt_cls(**self._disc_opt_kwargs)\n self._disc_train_op = disc_opt.minimize(\n tf.reduce_mean(self.discrim.disc_loss),\n global_step=self._global_step)\n\n def _build_disc_feed_dict(self, *,\n gen_old_obs: Optional[np.ndarray] = None,\n gen_act: Optional[np.ndarray] = None,\n gen_new_obs: Optional[np.ndarray] = None,\n ) -> dict:\n \"\"\"Build a feed dict that holds the next training batch of generator\n and expert obs-act-obs triples.\n\n Args:\n gen_old_obs (np.ndarray): A numpy array with shape\n `[self.n_disc_samples_per_buffer_per_buffer] + env.observation_space.shape`.\n The ith observation in this array is the observation seen when the\n generator chooses action `gen_act[i]`.\n gen_act (np.ndarray): A numpy array with shape\n `[self.n_disc_samples_per_buffer_per_buffer] + env.action_space.shape`.\n gen_new_obs (np.ndarray): A numpy array with shape\n `[self.n_disc_samples_per_buffer_per_buffer] + env.observation_space.shape`.\n The ith observation in this array is from the transition state after\n the generator chooses action `gen_act[i]`.\n \"\"\" # noqa: E501\n\n # Sample generator training batch from replay buffers, unless provided\n # in argument.\n none_count = sum(int(x is None)\n for x in (gen_old_obs, gen_act, gen_new_obs))\n if none_count == 3:\n tf.logging.debug(\"_build_disc_feed_dict: No generator rollout \"\n \"parameters were \"\n \"provided, so we are generating them now.\")\n gen_old_obs, gen_act, gen_new_obs = self._gen_replay_buffer.sample(\n self._n_disc_samples_per_buffer)\n elif none_count != 0:\n raise ValueError(\"Gave some but not all of the generator params.\")\n\n # Sample expert training batch from replay buffer.\n expert_old_obs, expert_act, expert_new_obs = self._exp_replay_buffer.sample(\n self._n_disc_samples_per_buffer)\n\n # Check dimensions.\n n_expert = len(expert_old_obs)\n n_gen = len(gen_old_obs)\n N = n_expert + n_gen\n assert n_expert == len(expert_act)\n assert n_expert == len(expert_new_obs)\n assert n_gen == len(gen_act)\n assert n_gen == len(gen_new_obs)\n\n # Concatenate rollouts, and label each row as expert or generator.\n old_obs = np.concatenate([expert_old_obs, gen_old_obs])\n act = np.concatenate([expert_act, gen_act])\n new_obs = np.concatenate([expert_new_obs, gen_new_obs])\n labels = np.concatenate([np.zeros(n_expert, dtype=int),\n np.ones(n_gen, dtype=int)])\n\n # Calculate generator-policy log probabilities.\n log_act_prob = self._gen_policy.action_probability(old_obs, actions=act,\n logp=True)\n assert len(log_act_prob) == N\n log_act_prob = log_act_prob.reshape((N,))\n\n fd = {\n self.discrim.old_obs_ph: old_obs,\n self.discrim.act_ph: act,\n self.discrim.new_obs_ph: new_obs,\n self.discrim.labels_ph: labels,\n self.discrim.log_policy_act_prob_ph: log_act_prob,\n }\n return fd\n\n\ndef _n_steps_if_not_none(n_steps):\n if n_steps is None:\n return {}\n else:\n return dict(n_steps=n_steps)\n\n\ndef init_trainer(env_id: str,\n rollout_glob: str,\n *,\n n_expert_demos: Optional[int] = None,\n seed: int = 0,\n log_dir: str = None,\n use_gail: bool = False,\n num_vec: int = 8,\n parallel: bool = False,\n max_episode_steps: Optional[int] = None,\n max_n_files: int = 1,\n scale: bool = True,\n airl_entropy_weight: float = 1.0,\n discrim_kwargs: bool = {},\n reward_kwargs: bool = {},\n trainer_kwargs: bool = {},\n make_blank_policy_kwargs: bool = {},\n ):\n \"\"\"Builds an AdversarialTrainer, ready to be trained on a vectorized\n environment and expert demonstrations.\n\n Args:\n env_id: The string id of a gym environment.\n rollout_glob: Argument for `imitation.util.rollout.load_trajectories`.\n n_expert_demos: The number of expert trajectories to actually use\n after loading them via `load_trajectories`.\n If None, then use all available trajectories.\n If `n_expert_demos` is an `int`, then use\n exactly `n_expert_demos` trajectories, erroring if there aren't\n enough trajectories. If there are surplus trajectories, then use the\n first `n_expert_demos` trajectories and drop the rest.\n seed: Random seed.\n log_dir: Directory for logging output.\n use_gail: If True, then train using GAIL. If False, then train\n using AIRL.\n num_vec: The number of vectorized environments.\n parallel: If True, then use SubprocVecEnv; otherwise, DummyVecEnv.\n max_episode_steps: If specified, wraps VecEnv in TimeLimit wrapper with\n this episode length before returning.\n max_n_files: If provided, then only load the most recent `max_n_files`\n files, as sorted by modification times.\n policy_dir: The directory containing the pickled experts for\n generating rollouts.\n scale: If True, then scale input Tensors to the interval [0, 1].\n airl_entropy_weight: Only applicable for AIRL. The `entropy_weight`\n argument of `DiscrimNetAIRL.__init__`.\n trainer_kwargs: Arguments for the Trainer constructor.\n reward_kwargs: Arguments for the `*RewardNet` constructor.\n discrim_kwargs: Arguments for the `DiscrimNet*` constructor.\n make_blank_policy_kwargs: Keyword arguments passed to `make_blank_policy`,\n used to initialize the trainer.\n \"\"\"\n env = util.make_vec_env(env_id, num_vec, seed=seed, parallel=parallel,\n log_dir=log_dir, max_episode_steps=max_episode_steps)\n gen_policy = util.init_rl(env, verbose=1,\n **make_blank_policy_kwargs)\n\n if use_gail:\n discrim = discrim_net.DiscrimNetGAIL(env.observation_space,\n env.action_space,\n scale=scale,\n **discrim_kwargs)\n else:\n rn = BasicShapedRewardNet(env.observation_space,\n env.action_space,\n scale=scale,\n **reward_kwargs)\n discrim = discrim_net.DiscrimNetAIRL(rn,\n entropy_weight=airl_entropy_weight,\n **discrim_kwargs)\n\n expert_demos = util.rollout.load_trajectories(rollout_glob,\n max_n_files=max_n_files)\n if n_expert_demos is not None:\n assert len(expert_demos) >= n_expert_demos\n expert_demos = expert_demos[:n_expert_demos]\n\n expert_rollouts = util.rollout.flatten_trajectories(expert_demos)[:3]\n trainer = AdversarialTrainer(env, gen_policy, discrim, expert_rollouts,\n **trainer_kwargs)\n return trainer\n","sub_path":"src/imitation/algorithms/adversarial.py","file_name":"adversarial.py","file_ext":"py","file_size_in_byte":16290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"464598558","text":"import unittest\n\nimport numpy as np\n\nfrom pydrake.solvers import mathematicalprogram as mp\n\nfrom pydrake.solvers import augmented_lagrangian as al\nfrom pydrake.autodiffutils import InitializeAutoDiff, AutoDiffXd\n\nfrom pydrake.common.test_utilities.deprecation import catch_drake_warnings\n\n\nclass TestAugmentedLagrangianNonsmooth(unittest.TestCase):\n def setUp(self):\n self.prog = mp.MathematicalProgram()\n x = self.prog.NewContinuousVariables(2)\n self.prog.AddQuadraticCost(x[0] * x[0] + 2 * x[1] * x[1])\n self.prog.AddLinearConstraint(x[0] + x[1] <= 3)\n\n def test_eval_double(self):\n dut = al.AugmentedLagrangianNonsmooth(prog=self.prog,\n include_x_bounds=True)\n x_val = np.array([1., 3])\n lambda_val = np.array([0.5])\n al_val, constraint_residue, cost = dut.Eval(x=x_val,\n lambda_val=lambda_val,\n mu=0.1)\n self.assertIsInstance(al_val, float)\n self.assertIsInstance(constraint_residue, np.ndarray)\n self.assertIsInstance(cost, float)\n\n def test_eval_ad(self):\n dut = al.AugmentedLagrangianNonsmooth(prog=self.prog,\n include_x_bounds=True)\n x_val = InitializeAutoDiff(np.array([1., 3]))\n al_val, constraint_residue, cost = dut.Eval(x=x_val,\n lambda_val=np.array([0.5]),\n mu=0.1)\n self.assertIsInstance(al_val, AutoDiffXd)\n self.assertIsInstance(constraint_residue, np.ndarray)\n self.assertIsInstance(cost, AutoDiffXd)\n\n def test_lagrangian_size(self):\n self.assertEqual(\n al.AugmentedLagrangianNonsmooth(\n prog=self.prog, include_x_bounds=True).lagrangian_size(), 1)\n self.assertEqual(\n al.AugmentedLagrangianNonsmooth(\n prog=self.prog, include_x_bounds=False).lagrangian_size(), 1)\n\n def test_is_equality(self):\n self.assertEqual(\n al.AugmentedLagrangianNonsmooth(\n prog=self.prog, include_x_bounds=True).is_equality(), [False])\n\n def test_nonsmooth_augmented_lagrangian_deprecation(self):\n # Remove after 2022-07-01.\n with catch_drake_warnings(expected_count=1):\n dut = al.NonsmoothAugmentedLagrangian(prog=self.prog,\n include_x_bounds=True)\n self.assertEqual(dut.lagrangian_size(), 1)\n","sub_path":"bindings/pydrake/solvers/test/augmented_lagrangian_test.py","file_name":"augmented_lagrangian_test.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"209319218","text":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"TrainOnestepGen network\"\"\"\n\nfrom mindspore import nn\nfrom mindspore import ops\nfrom mindspore.ops import composite as C\nfrom mindspore.ops import functional as F\nfrom mindspore.ops import operations as P\n\nGRADIENT_CLIP_TYPE = 1\nGRADIENT_CLIP_VALUE = 1.0\n\nclip_grad = C.MultitypeFuncGraph(\"clip_grad\")\n\n\n@clip_grad.register(\"Number\", \"Number\", \"Tensor\")\ndef _clip_grad(clip_type, clip_value, grad):\n \"\"\"\n Clip gradients.\n Inputs:\n clip_type (int): The way to clip, 0 for 'value', 1 for 'norm'.\n clip_value (float): Specifies how much to clip.\n grad (tuple[Tensor]): Gradients.\n\n Outputs:\n tuple[Tensor], clipped gradients.\n \"\"\"\n if clip_type not in (0, 1):\n return grad\n dt = F.dtype(grad)\n if clip_type == 0:\n new_grad = C.clip_by_value(grad, F.cast(F.tuple_to_array((-clip_value,)), dt),\n F.cast(F.tuple_to_array((clip_value,)), dt))\n else:\n new_grad = nn.ClipByNorm()(grad, F.cast(F.tuple_to_array((clip_value,)), dt))\n return new_grad\n\n\nclass TrainOnestepGen(nn.TrainOneStepCell):\n \"\"\"TrainOnestepGen\n Encapsulation class of DBPN network training.\n Append an optimizer to the training network after that the construct\n function can be called to create the backward graph.\n Args:\n network(Cell): Generator with loss Cell. Note that loss function should have been added\n optimizer(Cell):Optimizer for updating the weights.\n sens (Number): The adjust parameter. Default: 1.0.\n Outputs:\n Tensor\n \"\"\"\n\n def __init__(self, network, optimizer, sens=1.0, enable_clip_grad=True):\n super(TrainOnestepGen, self).__init__(network, optimizer, sens)\n self.cast = P.Cast()\n self.hyper_map = C.HyperMap()\n self.enable_clip_grad = enable_clip_grad\n # self.print = P.Print()\n\n def construct(self, HR_img, LR_img):\n \"\"\"Defines the computation performed.\"\"\"\n weights = self.weights\n\n loss = self.network(HR_img, LR_img)\n sens_g = ops.Fill()(ops.DType()(loss), ops.Shape()(loss), self.sens)\n grads = self.grad(self.network, weights)(HR_img, LR_img, sens_g)\n if self.enable_clip_grad:\n grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)\n grads = self.grad_reducer(grads)\n return ops.depend(loss, self.optimizer(grads))\n","sub_path":"research/cv/DBPN/src/trainonestep/trainonestepgenv2.py","file_name":"trainonestepgenv2.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"143607545","text":"import math\nimport sys\nfrom pathlib import Path\n\nsys.path.insert(0, \".\")\nfrom edit_image import parameter_range\n\npexels_dir = Path(\"/scratch\") / \"stud\" / \"pfister\" / \"NIAA\" / \"pexels\"\nimg_dir = pexels_dir / \"images\"\nout_dir = pexels_dir / \"edited_images\"\n\ndel parameter_range[\"lcontrast\"]\norig_imgs = list(img_dir.iterdir())\norig_imgs: set = {str(img.name) for img in orig_imgs}\n\nmissing_all = set(orig_imgs)\nmissing_sw = set()\nfor parameter in parameter_range:\n for change in parameter_range[parameter][\"range\"]:\n if math.isclose(change, parameter_range[parameter][\"default\"]):\n continue\n edited_imgs = list((out_dir / parameter / str(change)).iterdir())\n edited_imgs: set = {str(img.name) for img in edited_imgs}\n missing = orig_imgs.difference(edited_imgs)\n missing_all.intersection_update(missing)\n missing_sw.update(missing)\n print(parameter, change)\n print(len(missing))\n\nprint(\"missing all:\", len(missing_all))\nprint(missing_all)\n\nprint(\"missing somewhere:\", len(missing_sw))\nprint(missing_sw)\n","sub_path":"dataset_processing/find_missing.py","file_name":"find_missing.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"71100418","text":"def dragon(a):\n # make a copy\n b = list(a)\n # reverse the characters\n b.reverse()\n # replace all 0 with 1 and 1 with 0\n b = ['0' if i == '1' else '1' for i in b]\n # result is a + '0' + b \n return a + '0' + ''.join(b)\n\n\ndef checksum(a):\n cs = ''\n i = 0\n while i < len(a):\n if a[i] == a[i + 1]:\n cs += '1'\n else:\n cs += '0'\n i += 2\n if len(cs) % 2 == 0:\n return checksum(cs)\n\n return cs\n\n\ndef fill_disk(disk, n):\n if len(disk) >= n:\n return checksum(disk[:n])\n\n return fill_disk(dragon(disk), n)\n\n\n# tests\nassert dragon('1') == '100'\nassert dragon('0') == '001'\nassert dragon('11111') == '11111000000'\nassert dragon('111100001010') == '1111000010100101011110000'\n\nassert checksum('110010110100') == '100'\n\nassert fill_disk('10000', 20) == '01100'\n\nprint(fill_disk('01110110101001000', 272))\n","sub_path":"2016/16_01.py","file_name":"16_01.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"88782713","text":"#!/usr/bin/python\n\nimport sys\nfrom nilumod import *\n\ndef main():\n '''\n This is an example of how to use it\n '''\n # Read arguments, if no given read stdin\n # The arguments must be defined as DATE, HOUR and can not be a list of dates\n dates = sys.argv[1]\n hour = sys.argv[2]\n if dates==[] or hour==[]:\n while True:\n try:\n if dates==[]: \n dates.append(raw_input())\n if hour==[]:\n hour.append(raw_input())\n except EOFError:\n break\n z = weatherdata_Z(dates,hour)\n\nif __name__=='__main__':\n main()\n","sub_path":"nilu/gen_Z.py","file_name":"gen_Z.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"609867097","text":"import math\nimport xml.etree.ElementTree as ET\nfrom src.gameSprites import *\nfrom src.gameConst import *\n\nclass Solar() :\t\t\n\n def __init__( self, xmlString ) :\n self.sun = Planet( \"Sun\", 0, SUN_R, 0, SUN_COLOR )\n self.planets = []\n try : \n xmlTree = ET.fromstring( xmlString ) \n for planet in xmlTree : \n self.planets.append( self.getPlanet( planet ) )\n except ET.ParseError :\n print( \"Errors in the xml file!!\" )\n self._quadric = gluNewQuadric()\n gluQuadricDrawStyle( self._quadric, GLU_FILL )\n \n def getPlanet( self, planet ) :\n moons = []\n disks = []\n for prop in planet :\n if prop.tag.upper() == \"NAME\" :\n name = prop.text\n elif prop.tag.upper() == \"DIST\" :\n dist = float( prop.text ) * UNIT\n elif prop.tag.upper() == \"R\" :\n r = float( prop.text ) * UNIT\n elif prop.tag.upper() == \"ANGULAR\" :\n angular = ANGULAR_SCALAR * 2 * math.pi / float( prop.text )\n elif prop.tag.upper() == \"COLOR\" :\n color = eval( prop.text )\n elif prop.tag.upper() == \"MOON\" : \n moons.append( self.getPlanet( prop ) ) \n elif prop.tag.upper() == \"DISK\" : \n disks.append( self.getDisk( prop ) ) \n p = Planet( name, dist, r, angular, color )\n p.moons = moons\n p.disks = disks\n return p\n\n def getDisk( self, disk ) :\n xinc = 0\n yinc = 0\n zinc = 0\n for prop in disk :\n if prop.tag.upper() == \"NAME\" :\n name = prop.text\n elif prop.tag.upper() == \"INNER\" :\n inner = float( prop.text )\n elif prop.tag.upper() == \"OUTER\" :\n outer = float( prop.text )\n elif prop.tag.upper() == \"COLOR\" :\n color = eval( prop.text )\n elif prop.tag.upper() == \"XINC\" :\n xinc = float( prop.text )\n elif prop.tag.upper() == \"YINC\" :\n yinc = float( prop.text )\n elif prop.tag.upper() == \"ZINC\" :\n zinc = float( prop.text )\n d = Disk( name, inner, outer, color, xinc, yinc, zinc )\n return d\n\n def update( self ) :\n self.sun.update()\n for p in self.planets : p.update()\n \n def blit( self ) :\n if ORBIT_TRAJECTORY_VISIBLE :\n self.drawOrbits()\n self.sun.blit()\n for p in self.planets : p.blit()\n \n def drawOrbits( self ) :\n glPushMatrix() \n glRotate( -90, 1, 0, 0 )\n for p in self.planets :\n glMaterialfv( GL_FRONT_AND_BACK, GL_DIFFUSE, ORBIT_COLOR )\n c = 2 * math.pi * p.dist\n q = ORBIT_QUALITY_MIN + int( c * ORBIT_QUALITY )\n gluDisk( self._quadric, p.dist-ORBIT_WIDTH, p.dist, q, q )\n glPopMatrix()\n","sub_path":"src/solar.py","file_name":"solar.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"143477464","text":"import numpy as np\nimport cPickle\n\n\nPAD_ID = 0\nSTART_ID = 1\nEND_ID = 2\n\ndef read_data(source_path, target_path):\n \"\"\"\n read and bucket data\n\n Args:\n source_path: path to the pkl file containing source sequences\n target_path: path to pkl file containing target sequences\n\n Returns: bucketed data, buckets (see bucket_data function)\n\n \"\"\"\n X_dict = cPickle.load(open(source_path, 'rb'))\n y_dict = cPickle.load(open(target_path, 'rb'))\n\n X, y = [], []\n for k, v in X_dict.iteritems():\n X.append(v)\n y.append(y_dict[k])\n\n return bucket_data(X, y)\n\n\ndef bucket_data(X, y, max_buckets=6):\n \"\"\"\n Constructs buckets for seq2seq model.\n Bucket sizes: (len_x, max(len(y) for elements in y where len(x) == len_x)\n there should be a limited number of lengths in X\n (one for each number of staff lines),\n for each length one bucket is constructed.\n Args:\n X: list of input data.\n y: list of target data, same order as X.\n max_buckets: Max number of buckets (number of unique lengths in X)\n\n Returns: data_bucketed as list of lists where index is\n data in bucket of buckets[index], buckets as list.\n\n \"\"\"\n\n x_lengths = [e.shape[1] for e in X]\n y_lengths = [len(e) for e in y]\n data_bucketed = []\n buckets = []\n\n # buckets in X data are staff_length * n, where is max number of staves\n # in source data (set max_buckets to n)\n x_buckets = np.unique(x_lengths)\n print(x_buckets)\n assert len(x_buckets) <= max_buckets, \\\n \"too many buckets in X: {0}\".format(len(x_buckets))\n\n for bucket_id, xl in enumerate(x_buckets):\n x_xl = [X[i] for i in xrange(len(x_lengths)) if x_lengths[i] == xl]\n y_xl = [y[i] for i in xrange(len(x_lengths)) if x_lengths[i] == xl]\n yl = max([y_lengths[i] for i in xrange(\n len(x_lengths)) if x_lengths[i] == xl])\n buckets.append((xl, yl))\n data_bucketed.append(zip(x_xl, y_xl))\n\n for i, b in enumerate(buckets):\n buckets[i] = (b[0], b[1]+2)\n\n return data_bucketed, buckets\n\n\ndef split_train_test_bucketed(data_set, partition=0.8, seed=1):\n \"\"\"\n Split each bucket in data_set to partition.\n\n Args:\n data_set: bucketed data set\n partition: partition to split data with.\n seed: (optional) seed for np.random to split data with.\n\n Returns:\n \"\"\"\n\n train_data, test_data = [], []\n for b in xrange(len(data_set)):\n rnd = np.random.RandomState(seed)\n data = data_set[b]\n rnd.shuffle(data)\n\n split = int(len(data) * partition)\n train_data.append(data[:split])\n test_data.append(data[split:])\n return train_data, test_data\n\n\ndef get_batch(data, bucket_id, buckets, batch_size, reverse_input=True):\n \"\"\"\n\n Args:\n data:\n bucket_id:\n buckets:\n batch_size:\n reverse_input:\n\n Returns:\n\n \"\"\"\n encoder_size, decoder_size = buckets[bucket_id]\n encoder_inputs, decoder_inputs = [], []\n\n # Get a random batch of encoder and decoder inputs from data,\n # pad them if needed, reverse encoder inputs and add GO to decoder.\n for _ in xrange(batch_size):\n encoder_input, decoder_input = np.random.choice(data[bucket_id])\n if reverse_input:\n encoder_input = np.fliplr(encoder_input)\n encoder_inputs.append(encoder_input)\n\n # Decoder inputs get an extra \"GO\" symbol, and are padded then.\n decoder_pad_size = decoder_size - len(decoder_input) - 1\n decoder_input = [START_ID] + list(decoder_input) + \\\n [PAD_ID] * decoder_pad_size\n if len(decoder_input) != decoder_size:\n raise ValueError(\"input too small\", len(decoder_input), decoder_size)\n decoder_inputs.append(decoder_input)\n\n # Now we create batch-major vectors from the data selected above.\n batch_encoder_inputs, batch_decoder_inputs, batch_weights = [], [], []\n\n # Batch encoder inputs are just re-indexed encoder_inputs.\n for length_idx in xrange(encoder_size):\n batch_encoder_inputs.append(np.array([\n encoder_inputs[batch_idx][:, length_idx]\n for batch_idx in xrange(batch_size)\n ], dtype=np.float32))\n\n # Batch decoder inputs are re-indexed decoder_inputs, we create weights.\n for length_idx in xrange(decoder_size):\n batch_decoder_inputs.append(\n np.array([decoder_inputs[batch_idx][length_idx]\n for batch_idx in xrange(batch_size)],\n dtype=np.int32))\n\n # Create target_weights to be 0 for targets that are padding.\n batch_weight = np.ones(batch_size, dtype=np.float32)\n for batch_idx in xrange(batch_size):\n # We set weight to 0 if the corresponding target is a PAD symbol.\n # The corresponding target is decoder_input shifted by 1 forward.\n target = None\n if length_idx < decoder_size - 1:\n target = decoder_inputs[batch_idx][length_idx + 1]\n if length_idx == decoder_size - 1 or target == PAD_ID:\n batch_weight[batch_idx] = 0.0\n batch_weights.append(batch_weight)\n return batch_encoder_inputs, batch_decoder_inputs, batch_weights\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"81470053","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport sys\n\nimport tensorflow as tf\nimport numpy as np\nfrom dataManipulations import *\nfrom plotUtilities import *\nfrom model import *\nfrom sklearn import metrics\n\nFLAGS = None\n\n#deviceName = '/cpu:0'\n#deviceName = '/:GPU:0'\ndeviceName = None\n##############################################################################\n##############################################################################\ndef makePlots(sess, myDataManipulations):\n #Fetch operations\n x = tf.get_default_graph().get_operation_by_name(\"input/x-input\").outputs[0]\n y = tf.get_default_graph().get_operation_by_name(\"model/output/Identity\").outputs[0] \n yTrue = tf.get_default_graph().get_operation_by_name(\"input/y-input\").outputs[0]\n\n dropout_prob = tf.get_default_graph().get_operation_by_name(\"model/dropout_prob\").outputs[0]\n trainingMode = tf.get_default_graph().get_operation_by_name(\"model/trainingMode\").outputs[0]\n\n features = myDataManipulations.features\n labels = myDataManipulations.labels\n result = sess.run([x, y, yTrue], feed_dict={x: features, yTrue: labels, dropout_prob: 0.0, trainingMode: False})\n\n '''\n prob = tf.nn.softmax(logits=y)\n onehot_labels = tf.one_hot(tf.to_int32(yTrue), depth=10, axis=-1)\n result = sess.run([x, y, yTrue, prob, onehot_labels], feed_dict={x: xs, yTrue: ys, keep_prob: 1.0})\n\n p = result[3]\n onehot = result[4]\n print(\"Target:\",onehot[0:3])\n print(\"Prediction:\",p[0:3])\n #plt.plot(p[0],\"r\",p[1],\"g\",p[2],\"b\")\n plt.plot(onehot[0],\"r\")\n plt.show()\n return\n '''\n \n modelInput = result[0]\n modelResult = result[1]\n model_fastMTT = modelInput[:,1]\n model_fastMTT = np.reshape(model_fastMTT,(-1,1))\n labels = result[2]\n\n mLow_H125 = 110\n mHigh_H125 = 130\n\n mLow_Z90 = 80\n mHigh_Z90 = 100\n\n index = (labels>mLow_H125)*(labelsmLow_Z90)*(labelsmLow_H125)*(labelsmLow_Z90)*(labels 'str':\r\n tab = {1: 'I', 5: 'V', 10: 'X', 50: 'L', 100: 'C', 500: 'D', 1000: 'M'}\r\n n = len(str(num))\r\n res = \"\"\r\n for i in range(n, 0, -1):\r\n a = num // (10 ** (i-1))\r\n num = num % (10 ** (i-1))\r\n if a == 0:\r\n continue\r\n elif 1 <= a <= 3:\r\n res = res + tab[10 ** (i-1)] * a\r\n elif a == 4:\r\n res = res + tab[10 ** (i-1)] + tab[5 * 10 ** (i-1)]\r\n elif a == 5:\r\n res = res + tab[5 * 10 ** (i-1)]\r\n elif 6 <= a <= 8:\r\n res = res + tab[5 * 10 ** (i-1)] + tab[10 ** (i-1)] * (a-5)\r\n else:\r\n res = res + tab[10 ** (i-1)] + tab[10 ** i]\r\n return res\r\n\r\n def intToRoman_1(self, num: 'int') -> 'str':\r\n M = [\"\", \"M\", \"MM\", \"MMM\"]\r\n C = [\"\", \"C\", \"CC\", \"CCC\", \"CD\", \"D\", \"DC\", \"DCC\", \"DCCC\", \"CM\"]\r\n X = [\"\", \"X\", \"XX\", \"XXX\", \"XL\", \"L\", \"LX\", \"LXX\", \"LXXX\", \"XC\"]\r\n I = [\"\", \"I\", \"II\", \"III\", \"IV\", \"V\", \"VI\", \"VII\", \"VIII\", \"IX\"]\r\n return M[num // 1000] + C[(num % 1000) // 100] + X[(num % 100) // 10] + I[num % 10]\r\n\r\n def intToRoman_2(self, num: 'int') -> 'str':\r\n M = {0: \"\", 1: \"M\", 2: \"MM\", 3: \"MMM\"}\r\n C = {0: \"\", 1: \"C\", 2: \"CC\", 3: \"CCC\", 4: \"CD\", 5: \"D\",\r\n 6: \"DC\", 7: \"DCC\", 8: \"DCCC\", 9:\"CM\"}\r\n X = {0: \"\", 1: \"X\", 2: \"XX\", 3: \"XXX\", 4: \"XL\", 5: \"L\",\r\n 6: \"LX\", 7: \"LXX\", 8: \"LXXX\", 9: \"XC\"}\r\n I = {0: \"\", 1: \"I\", 2: \"II\", 3: \"III\", 4: \"IV\", 5: \"V\",\r\n 6: \"VI\", 7: \"VII\", 8: \"VIII\", 9: \"IX\"}\r\n return M[num // 1000] + C[(num % 1000) // 100] \\\r\n + X[(num % 100) // 10] + I[num % 10]\r\n\r\nif __name__ == \"__main__\":\r\n a = Solution()\r\n num = 3\r\n print(a.intToRoman(num))\r\n print(a.intToRoman_1(num))\r\n print(a.intToRoman_2(num))\r\n num = 4\r\n print(a.intToRoman(num))\r\n print(a.intToRoman_1(num))\r\n print(a.intToRoman_2(num))\r\n num = 9\r\n print(a.intToRoman(num))\r\n print(a.intToRoman_1(num))\r\n print(a.intToRoman_2(num))\r\n num = 58\r\n print(a.intToRoman(num))\r\n print(a.intToRoman_1(num))\r\n print(a.intToRoman_2(num))\r\n num = 1994\r\n print(a.intToRoman(num))\r\n print(a.intToRoman_1(num))\r\n print(a.intToRoman_2(num))","sub_path":"Solutions/0012_intToRoman.py","file_name":"0012_intToRoman.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"71513372","text":"from re import T\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.sql.schema import Column, ForeignKey, Sequence\nfrom sqlalchemy.sql.sqltypes import DateTime, Integer, String\nfrom sqlalchemy.sql import func\nfrom sqlalchemy.orm import sessionmaker\nfrom bs4 import BeautifulSoup\nimport requests\nfrom models import Content\nfrom connection import engine\n\n### Creating session to make db queries\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n### ----- session code for that particular session goes here -----\nstatement = 'SELECT contents_content.url FROM contents_content WHERE owner_id = 1 ORDER BY id DESC LIMIT 1'\nresults = session.execute(statement).scalars().all()\ncnet_recent_url_in_db = 'null'\nif len(results)>0:\n cnet_recent_url_in_db = results[0]\nprint('---------Cnet last record : '+cnet_recent_url_in_db)\n\n# --------!!!!!------ Populating techdaily_content table -------!!!!!!!!!--------\nowner_names = ['Cnet','Beebom', 'Android Authority']\nowner_urls = ['https://www.cnet.com', 'https://beebom.com', 'https://www.androidauthority.com']\nowner_ids = [1, 2, 3]\n\ncontent_titles = []\ncontent_urls = []\ncontent_authors = []\ncontent_img_urls = []\ncontent_pub_dates = []\n\ncnet = 0 #choosing cnet\nowner_id = owner_ids[cnet] \nhtml_text = requests.get(owner_urls[cnet]).text\nsoup = BeautifulSoup(html_text, 'lxml')\ntitles = soup.find_all('div',class_ = 'row item')\nfor title in titles:\n aHref = title.find('a')\n image = title.find('img')\n if ((owner_urls[cnet]+aHref['href'])==cnet_recent_url_in_db):\n print('------------MATCHING RECORD FOUND')\n break\n print(owner_id) #owner_id\n print(aHref.text) #content title\n print(owner_urls[cnet]+aHref['href']) #content url\n print(image['src']+'\\n') #content image url\n content_titles.append(aHref.text)\n content_urls.append(owner_urls[cnet]+aHref['href'])\n content_img_urls.append(image['src'])\n # content = Content()\n # content.owner_id = owner_id\n # content.title = aHref.text\n # # content.author = 'John Doe'\n # content.url = owner_urls[cnet]+aHref['href']\n # content.img_url = image['src']\n # # content.pub_date = 'June 17, 2021'\n # session.add(content)\n\nfor content_title, content_url, content_img_url in zip(\n reversed(content_titles), reversed(content_urls), reversed(content_img_urls)):\n content = Content()\n content.owner_id = owner_id\n content.title = content_title\n # content.author = 'John Doe'\n content.url = content_url\n content.img_url = content_img_url\n # content.pub_date = content_pub_date\n session.add(content) \n\nsession.commit()\nsession.close()\n","sub_path":"bs4/app_cnet.py","file_name":"app_cnet.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"589683833","text":"from appium.webdriver import webdriver\nfrom appium.webdriver.common.mobileby import MobileBy\n\n\nclass BasePage:\n def __init__(self, driver: webdriver=None):\n self.driver = driver\n\n def find_swipe(self, text, num=4):\n for i in range(num + 1):\n try:\n ele = self.driver.find_element(MobileBy.XPATH, f\"//*[@text='{text}']\")\n print(\"找到了\")\n return ele\n except:\n if i == num:\n print(f'找了{i + 1}次,未找到')\n else:\n size = self.driver.get_window_size()\n width = size['width']\n height = size['height']\n width_start = width / 2\n width_end = width / 2\n height_start = height * 0.8\n height_end = height * 0.2\n duration = 2000\n self.driver.swipe(width_start, height_start, width_end, height_end, duration)\n\n def find(self, by, value):\n ele = self.driver.find_element(by, value)\n return ele\n\n def find_and_click(self, by, value):\n self.find(by, value).click()\n\n def find_and_sendkeys(self, by, value, text):\n self.find(by, value).send_keys(text)\n\n def get_toast_text(self, by, value):\n toast = self.find(by, value).get_attribute('text')\n return toast\n","sub_path":"HomeWork/Appium_Hw_19/Appnium_Hw_19_test/page/base_page.py","file_name":"base_page.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"22881020","text":"from bs4 import BeautifulSoup\nimport json\n\ndef tuple2json(d=[]):\n j={}\n for (a,b) in d:\n j[a]=b\n return j\n\ndef parse_item1(i):\n return [(x.name.strip(),x.string.strip() if x.string else \"\" ) for x in i.children if not x.name is None]\n\ndef parse(cc=\"\"):\n if len(cc) == 0 : return []\n y=BeautifulSoup(cc,features=\"html.parser\")\n c=y.rss.channel\n d1=[(x.name,x.text) for x in c.children if not x.name is None and x.name != \"item\"]\n j1=tuple2json(d1)\n print(j1)\n d2=[parse_item1(x) for x in c.findAll('item')]\n d3=[tuple2json(x) for x in d2]\n r={ \"info\":j1, \"item\":d3, }\n return r\n\ndef save(dd=[],name='./index.json'):\n z=json.dumps(dd)\n with open(name,'w') as f:\n f.write(z)\n\ndef test():\n with open('./topnews.xml') as f:\n cc=f.read()\n d=parse(cc)\n save(d)\n print(d)\n","sub_path":"parser/huanqiu/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"325331653","text":"import math\n\n\n# record the black chess information\nclass State:\n def __init__(self, black_dict):\n self.black_dict = black_dict\n\n\n# the search point should be the mid point of two given point\ndef find_search_point(black_list):\n tmp_list = black_list.copy()\n\n target_list = []\n\n for black_piece in tmp_list:\n tmp_list.remove(black_piece)\n for other_piece in tmp_list:\n\n # case1: distance between 2 black pieces is 2\n if distance_cal(black_piece, other_piece) == 2:\n\n # if 2 black pieces are in a vertical relationship\n if black_piece[0] == other_piece[0]:\n\n # compute y coordinates of all possible black pieces in case1 areas\n y_list = [black_piece[1], other_piece[1], int((black_piece[1] + other_piece[1]) / 2)]\n\n # check if any black piece inside these areas\n for y_val in y_list:\n x_val = black_piece[0]\n\n # if there is a black piece on the right side of the area\n if (x_val + 2, y_val) in black_list:\n target_list.append((x_val + 1, int((black_piece[1] + other_piece[1]) / 2)))\n break\n\n # if there is a black piece on the left side of the area\n if (x_val - 2, y_val) in black_list:\n target_list.append((x_val - 1, int((black_piece[1] + other_piece[1]) / 2)))\n break\n\n # if there is no black piece on either left or right side of the area\n else:\n target_list.append((x_val, int((black_piece[1] + other_piece[1]) / 2)))\n\n # if 2 black pieces are in a horizontal relationship\n if black_piece[1] == other_piece[1]:\n\n # compute x coordinates of all possible black pieces in case1 areas\n x_list = [black_piece[0], other_piece[0], int((black_piece[0] + other_piece[0]) / 2)]\n\n # check if any black piece inside these areas\n for x_val in x_list:\n y_val = black_piece[1]\n\n # if there is a black piece on the top of the area\n if (x_val, y_val + 2) in black_list:\n target_list.append((int((black_piece[0] + other_piece[0]) / 2), y_val + 1))\n break\n\n # if there is a black piece on the bottom of the area\n if (x_val, y_val - 2) in black_list:\n target_list.append((int((black_piece[0] + other_piece[0]) / 2), y_val - 1))\n break\n\n # if there is no black piece on either top or bottom of the area\n else:\n target_list.append((int((black_piece[0] + other_piece[0]) / 2), y_val))\n\n # case2: distance between 2 black pieces is square root of 5\n if math.sqrt(5) - distance_cal(black_piece, other_piece) < 0.000001:\n\n if math.fabs(black_piece[0] - other_piece[0]) == 1:\n y_check = black_piece[1] * 2 - other_piece[1]\n x_check = other_piece[0]\n if (x_check, y_check) in black_list:\n target_list.append(black_piece[0], (black_piece[1] + other_piece[1]) / 2)\n\n else:\n target_list.append(black_piece[0], (black_piece[1] + other_piece[1]) / 2)\n target_list.append(other_piece[0], (black_piece[1] + other_piece[1]) / 2)\n\n if math.fabs(black_piece[1] - other_piece[1]) == 1:\n y_check = other_piece[1]\n x_check = black_piece[0] * 2 - other_piece[0]\n if (x_check, y_check) in black_list:\n target_list.append((black_piece[0] + other_piece[0]) / 2, black_piece[1])\n\n else:\n target_list.append((black_piece[0] + other_piece[0]) / 2, black_piece[1])\n target_list.append((other_piece[0] + other_piece[0]) / 2, black_piece[1])\n\n # case3: distance between 2 black pieces is square root of 8\n if math.sqrt(8) - distance_cal(black_piece, other_piece) < 0.000001:\n target_list.append(((black_piece[0] + other_piece[0]) / 2, (black_piece[1] + other_piece[1]) / 2))\n\n # target list with duplication\n print(\"tg: \", target_list)\n\n\n# calculate distance between two tokens\ndef distance_cal(point1, point2):\n return math.sqrt((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2)\n\n\ndef main():\n print(find_search_point([(2, 2), (4, 2), (4, 4)]))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"explosion_simulation.py","file_name":"explosion_simulation.py","file_ext":"py","file_size_in_byte":4882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"123483574","text":"import os\nfrom typing import Any, Optional\n\nimport numpy\nfrom napari_plugin_engine import napari_hook_implementation\n\nfrom PartSegImage import Image, ImageWriter\nfrom PartSegImage.image import DEFAULT_SCALE_FACTOR\n\n\n@napari_hook_implementation\ndef napari_write_labels(path: str, data: Any, meta: dict) -> Optional[str]:\n ext = os.path.splitext(path)[1]\n if not isinstance(data, numpy.ndarray) or ext not in {\".tiff\", \".tif\", \".TIFF\", \".TIF\"}:\n return\n scale_shift = min(data.ndim, 3)\n image = Image(\n data,\n numpy.divide(meta[\"scale\"], DEFAULT_SCALE_FACTOR)[-scale_shift:],\n axes_order=\"TZXY\"[-data.ndim :],\n channel_names=[meta[\"name\"]],\n shift=numpy.divide(meta[\"translate\"], DEFAULT_SCALE_FACTOR)[-scale_shift:],\n name=\"ROI\",\n )\n ImageWriter.save(image, path)\n return path\n\n\n@napari_hook_implementation\ndef napari_write_image(path: str, data: Any, meta: dict) -> Optional[str]:\n ext = os.path.splitext(path)[1]\n if not isinstance(data, numpy.ndarray) or ext not in {\".tiff\", \".tif\", \".TIFF\", \".TIF\"}:\n return\n scale_shift = min(data.ndim, 3)\n axes = \"TZXY\"\n channel_names = [meta[\"name\"]]\n if data.shape[-1] < 6:\n axes += \"C\"\n scale_shift -= 1\n channel_names = [f'{meta[\"name\"]} {i}' for i in range(1, data.shape[-1] + 1)]\n image = Image(\n data,\n numpy.divide(meta[\"scale\"], DEFAULT_SCALE_FACTOR)[-scale_shift:],\n axes_order=axes[-data.ndim :],\n channel_names=channel_names,\n shift=numpy.divide(meta[\"translate\"], DEFAULT_SCALE_FACTOR)[-scale_shift:],\n name=\"Image\",\n )\n ImageWriter.save(image, path)\n return path\n","sub_path":"package/PartSegCore/napari_plugins/save_tiff_layer.py","file_name":"save_tiff_layer.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"112690807","text":"import random\nimport unittest\nfrom nose.tools import eq_\n\nfrom solariat_bottle.tests.journeys.customers_agents.base import CustomerAgentBaseCase\nfrom solariat_bottle.tests.base import setup_customer_schema, get_schema_config\nfrom solariat_bottle.scripts.data_load.gforce.customers import (\n LOCATION_VALUES, INTENT_LABELS, PRODUCTS,\n CUSTOMER_SENIORITY_VALUES, INDUSTRIES, STATUS, CUSTOMER_SEGMENTS,\n customer_first_name_male, customer_first_name_female, customer_last_name\n)\n\n\nclass CustomersBaseTest(CustomerAgentBaseCase):\n def setUp(self):\n super(CustomersBaseTest, self).setUp()\n setup_customer_schema(self.user, get_schema_config(self.generate_customer_data()))\n\n def generate_customer_data(self):\n sex = random.choice(('M', 'F'))\n first_name = random.choice({\n 'M': customer_first_name_male,\n 'F': customer_first_name_female\n }[sex])\n last_name = random.choice(customer_last_name)\n\n data = dict(\n first_name=first_name,\n last_name=last_name,\n age=random.randint(16, 100),\n account_id=self.account.id,\n location=random.choice(LOCATION_VALUES),\n assigned_segments=[],\n assigned_labels=[],\n sex=sex,\n status=random.choice(STATUS),\n industry=random.choice(INDUSTRIES),\n products=self.choose_many(PRODUCTS),\n last_call_intent=[random.choice(INTENT_LABELS)],\n num_calls=random.randint(0, 100),\n seniority=random.choice(CUSTOMER_SENIORITY_VALUES),\n phone='01' + ''.join(random.sample('1234567890', 10))\n )\n return data\n\n def _create(self, **kw):\n data = self.generate_customer_data()\n data.update(kw)\n CustomerProfile = self.account.get_customer_profile_class()\n return CustomerProfile.objects.create(**data)\n\n def _fetch(self, **kw):\n data = {\n 'from': \"01/01/2016\",\n 'to': \"12/31/2030\",\n 'age_groups': [],\n 'agent_id': \"\",\n 'call_intents': [],\n 'customer_statuses': [],\n 'genders': None,\n 'industries': [],\n 'locations': None,\n 'segments': [],\n 'limit': 50,\n 'offset': 0,\n }\n data.update(kw)\n return self._post('/customer-profiles/json', data_dict=data)\n\n\nclass CustomersDetailsTest(CustomersBaseTest):\n\n def test_industries(self):\n groups = INDUSTRIES\n groups_counts = {g: random.randint(1, 10) for g in groups}\n for g, count in groups_counts.items():\n [self._create(industry=g) for i in xrange(count)]\n\n for facet_groups in self.combinations(groups):\n resp = self._fetch(industries=facet_groups)\n for g, samples in self.groupby(resp, 'industry'):\n eq_(groups_counts[g], len(list(samples)))\n\n def test_age_groups(self):\n groups = {\n '16 - 25': (16, 25),\n '26 - 35': (26, 35),\n '36 - 45': (36, 45),\n '46 -' : (46, 100),\n }\n groups_counts = {g: random.randint(1, 10) for g in groups.values()}\n for g, count in groups_counts.items():\n [self._create(age=random.randint(*g)) for i in xrange(count)]\n\n for facet_groups in self.combinations(groups):\n resp = self._fetch(age_groups=facet_groups)\n self.categorize_age_groups(resp)\n for g, samples in self.groupby(resp, 'age'):\n eq_(groups_counts[g], len(list(samples)))\n\n def test_customer_status(self):\n groups = STATUS\n groups_counts = {g: random.randint(1, 10) for g in groups}\n for g, count in groups_counts.items():\n [self._create(status=g) for i in xrange(count)]\n\n for facet_groups in self.combinations(groups):\n resp = self._fetch(customer_statuses=facet_groups)\n for g, samples in self.groupby(resp, 'status'):\n eq_(groups_counts[g], len(list(samples)))\n\n # def test_segments(self):\n # segment_objs = [CustomerSegment.objects.create(account_id=self.account.id,\n # **segment) for segment in CUSTOMER_SEGMENTS]\n # groups = {o.display_name: o.id for o in segment_objs}\n # groups_counts = {g: random.randint(1, 10) for g in groups.values()}\n # for g, count in groups_counts.items():\n # [self._create(assigned_segments=[g]) for i in xrange(count)]\n #\n # for facet_groups in self.combinations(groups):\n # resp = self._fetch(segments=facet_groups)\n # for g, samples in self.groupby(resp, 'assigned_segments'):\n # oid = groups[g[0]['display_name']]\n # eq_(groups_counts[oid], len(list(samples)))\n\n\nclass CustomersDistributionTest(CustomersBaseTest):\n\n def test_plot_by_status(self):\n groups = STATUS\n groups_counts = {g: random.randint(1, 10) for g in groups}\n for g, count in groups_counts.items():\n [self._create(status=g) for i in xrange(count)]\n\n for facet_groups in self.combinations(groups):\n resp = self._fetch(customer_statuses=facet_groups, group_by='status', plot_by='distribution')\n expected = {k: groups_counts[k] for k in facet_groups} or groups_counts\n eq_(self.count_distribution(resp), expected)\n\n @unittest.skip('CustomerSegment was removed')\n def test_plot_by_segments(self):\n segment_objs = [CustomerSegment.objects.create(account_id=self.account.id,\n **segment) for segment in CUSTOMER_SEGMENTS]\n groups = {o.display_name: o for o in segment_objs}\n groups_counts = {g: random.randint(1, 10) for g in groups.values()}\n for g, count in groups_counts.items():\n [self._create(assigned_segments=[g.id]) for i in xrange(count)]\n\n for facet_groups in self.combinations(groups):\n resp = self._fetch(segments=facet_groups, group_by='segment', plot_by='distribution')\n expected = {k.display_name: v for k, v in groups_counts.items() if not facet_groups or k.display_name in facet_groups}\n eq_(self.count_distribution(resp), expected)\n\n def test_plot_by_locations(self):\n groups = LOCATION_VALUES\n groups_counts = {g: random.randint(1, 10) for g in groups}\n for g, count in groups_counts.items():\n [self._create(location=g) for i in xrange(count)]\n\n for facet_groups in self.combinations(groups):\n resp = self._fetch(locations=facet_groups, group_by='location', plot_by='distribution')\n expected = {k: groups_counts[k] for k in facet_groups} or groups_counts\n eq_(self.count_distribution(resp), expected)\n\n def test_plot_by_genders(self):\n groups = ['M', 'F']\n groups_counts = {g: random.randint(1, 10) for g in groups}\n for g, count in groups_counts.items():\n [self._create(sex=g) for i in xrange(count)]\n\n for facet_groups in self.combinations(groups, 2):\n resp = self._fetch(genders=facet_groups, group_by='gender', plot_by='distribution')\n expected = {k: groups_counts[k] for k in facet_groups} or groups_counts\n eq_(self.count_distribution(resp), expected)\n\n def test_plot_by_industry(self):\n groups = INDUSTRIES\n groups_counts = {g: random.randint(1, 10) for g in groups}\n for g, count in groups_counts.items():\n [self._create(industry=g) for i in xrange(count)]\n\n for facet_groups in self.combinations(groups):\n resp = self._fetch(industries=facet_groups, group_by='industry', plot_by='distribution')\n expected = {k: groups_counts[k] for k in facet_groups} or groups_counts\n eq_(self.count_distribution(resp), expected)\n","sub_path":"tests/journeys/customers_agents/test_customers.py","file_name":"test_customers.py","file_ext":"py","file_size_in_byte":8022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"126180356","text":"import gpiozero\nfrom time import sleep\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"steps\", type=int)\nparser.add_argument(\"-f\", \"--forward\", action=\"store_true\")\nargs = parser.parse_args()\n\npulse = gpiozero.DigitalOutputDevice(2)\ndirection = gpiozero.DigitalOutputDevice(3)\nif args.forward:\n direction.on()\nfor i in range(args.steps):\n pulse.on()\n sleep(.001)\n pulse.off()\n\ndirection.close()\npulse.close()\n\n","sub_path":"slide.py","file_name":"slide.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"341506994","text":"# import unittest\n# unittest.main('test_wrap_nicely')\n\n\nimport unittest\n\nimport wrap_nicely\n\n\nclass TestWrapNicely(unittest.TestCase):\n def test_wrap_nicely(self):\n \"\"\"\n test text wraps properly\n \"\"\"\n # Setup\n string = 'A string of text to test to see if it wraps nicely'\n max_chars = 10\n\n # Calls\n the_lines = wrap_nicely.wrap_nicely(string, max_chars)\n\n # Asserts\n self.assertEqual(the_lines, [\n 'A string', \n 'of text to', \n 'test to', \n 'see if it', \n 'wraps', \n 'nicely'\n ])\n","sub_path":"test_wrap_nicely.py","file_name":"test_wrap_nicely.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"301716109","text":"import glob\nimport os\n\nfrom solar.core.profile import Profile\nfrom solar.extensions.base import BaseExtension\nfrom solar import utils\n# Import all modules from the directory in order\n# to make subclasses for extensions work\nmodules = glob.glob(\n os.path.join(utils.read_config()['extensions-dir'], 'modules', '*.py')\n)\n[__import__('%s.%s' % ('modules', os.path.basename(f)[:-3]), locals(), globals()) for f in modules]\n\n\ndef get_all_extensions():\n return BaseExtension.__subclasses__()\n\n\ndef find_extension(id_, version):\n extensions = filter(\n lambda e: e.ID == id_ and e.VERSION == version,\n get_all_extensions())\n\n if not extensions:\n return None\n\n return extensions[0]\n\n\ndef find_by_provider_from_profile(profile, provider):\n profile_ = Profile(profile)\n extensions = profile_.extensions\n result = None\n for ext in extensions:\n result = find_extension(ext['id'], ext['version'])\n if result:\n break\n\n return result(profile_)\n","sub_path":"solar/solar/extensions/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"567626564","text":"\n#-*- coding:utf-8 _*- \n\"\"\" \n@author:chengzhuo \n@file: goods_list.py \n@time: 2017/10/30 \n\"\"\"\nimport json\nimport path_1 as pt\n\n\n\n\nlist_path=''\n\n\n\ndef path_list(page):\n global list_path\n list_path=''.join((pt.dict_path['conf'],'/',page,'_list.txt'))\n\n\n\n\n\ndef check():\n T = False\n try:\n f = open(list_path, 'r', encoding='utf8')\n data = json.loads(f.read())\n except json.decoder.JSONDecodeError:\n f1 = open(list_path, 'w', encoding='utf8')\n f1.write('{}')\n T = True\n f1.close()\n if T:\n data = json.loads(f.read())\n j=1\n for i in data.keys() :\n\n print(' '.join((str(j)+'.',i,data[i])))\n j+=1\n f.close()\n return data\n\n\n\ndef add_goods():\n print('add goods or change price of goods')\n T = False\n try:\n f = open(list_path, 'r', encoding='utf8')\n data = json.loads(f.read())\n except json.decoder.JSONDecodeError:\n f1 = open(list_path, 'w', encoding='utf8')\n f1.write('{}')\n T = True\n f1.close()\n if T:\n data = json.loads(f.read())\n T=True\n while T:\n goods=input('input name of goods:')\n goods_2=input('confirm name of goods:')\n price=input('input price of goods:')\n price_2=input('confirm price of goods:')\n while goods != goods_2 or price != price_2:\n print('The two inputs is inconsistent,please again')\n goods = input('input name of goods:')\n goods_2 = input('confirm name of goods:')\n price = input('input price of goods:')\n price_2 = input('confirm price of goods:')\n data[goods]=price\n exit=input('continue add goods,please input \\'c\\',exit input \\'e\\':')\n if exit=='e':\n T=False\n f.close()\n f=open(list_path,'w',encoding='utf8')\n f.write(json.dumps(data))\n f.close()\n\n\n\n\n\n\nif __name__==\"__main__\":\n path_list('book')\n add_goods()\n pass","sub_path":"shopping/shore/module/goods_list.py","file_name":"goods_list.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"471798858","text":"# library\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n\n\nplt.rcParams[\"font.family\"] = \"Century Gothic\"\nplt.rcParams[\"font.size\"] = \"14\"\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n\nmax = 1000\nmin = 1\nN_points = 400\nZin = np.linspace(1, 1000, 400)\nDelta = (max-min)/N_points\n\nTest_Load = np.array([50, 100, 200, 400, 800])\n\nTest_50 = int(50/(999/400))\nTest_100 = int(100/(999/400))\nTest_200 = int(200/(999/400))\nTest_400 = int(400/(999/400))\nTest_800 = int(800/(999/400))\nRs = 50\nS11 = np.abs((Zin-Rs)/(Zin+Rs))\nS11_db = 20*np.log10(S11)\n\nplt.figure()\nplt.plot(Zin, S11_db, label='S11')\nplt.plot(Test_Load[0], S11_db[Test_50], \"ro\", label = \"R=50\", color = 'green')\nplt.plot(Test_Load[1], S11_db[Test_100], \"ro\", label = \"R=100\", color = 'red')\nplt.plot(Test_Load[2], S11_db[Test_200], \"ro\", label = \"R=200\", color = 'blue')\nplt.plot(Test_Load[3], S11_db[Test_400], \"ro\", label = \"R=400\", color = 'tomato')\nplt.plot(Test_Load[4], S11_db[Test_800], \"ro\", label = \"R=800\", color = 'hotpink')\nprint(S11_db[Test_50])\nplt.xlabel('ZL, Ом')\nplt.xscale(\"log\")\nplt.ylabel('S11, дБ')\nplt.grid()\nplt.legend()\n\n\n\nVtn = 0.3\nCox = 7.6e-3\nCov = 289e-12\nunCox = 187e-6\nL = 100e-9\nVgs = 1.2\n\nVds = 0\nF = 30e9\n\nW = np.linspace(5e-6, 16e-6, 100)\nR_switch = L/(unCox*W*(Vgs-Vtn-Vds))\nCgs = W*L*Cox*0.5+W*Cov\nR_C_tot = 1/(2*3.14*F*Cgs)\n\nplt.figure()\nplt.plot(W, R_switch, label='R')\nplt.xlabel('W, м')\nplt.ylabel('R, Ом')\nplt.grid()\n\nfig, ax1 = plt.subplots()\ncolor = 'tab:red'\nax1.set_xlabel('W, м')\nplt.grid()\nax1.set_ylabel('Сdg, Ф', color=color)\nax1.plot(W, Cgs, color=color)\nax1.tick_params(axis='y', labelcolor=color)\nax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\ncolor = 'tab:blue'\nax2.set_ylabel('R(30ГГц), Ом', color=color) # we already handled the x-label with ax1\nax2.plot(W, R_C_tot, color=color)\nax2.tick_params(axis='y', labelcolor=color)\nfig.tight_layout() # otherwise the right y-label is slightly clipped\nplt.grid()\nplt.show()\n\n\n\n\n\n\n\n","sub_path":"S11_50_plot.py","file_name":"S11_50_plot.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"62475275","text":"from django.contrib.auth.models import AbstractUser, BaseUserManager\nfrom django.db import models\n\n\nclass UserManager(BaseUserManager):\n\n def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n user = self.model(email=email, **extra_fields)\n user.save(using=self._db)\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_user(self, email, **extra_fields):\n extra_fields.setdefault('is_staff', False)\n extra_fields.setdefault('is_superuser', False)\n return self._create_user(email, **extra_fields)\n\n def create_superuser(self, email, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n return self._create_user(email, **extra_fields)\n\n\nclass User(AbstractUser):\n objects = UserManager()\n\n class Roles(models.TextChoices):\n admin = 'admin'\n moderator = 'moderator'\n user = 'user'\n\n class Meta:\n verbose_name = 'user'\n verbose_name_plural = 'users'\n\n username = models.CharField(max_length=150,\n unique=True,\n blank=True,\n verbose_name='username')\n bio = models.TextField(max_length=500,\n blank=True,\n null=True,\n verbose_name='biography')\n role = models.CharField(max_length=9,\n choices=Roles.choices,\n default=Roles.user,\n verbose_name='role')\n email = models.EmailField(unique=True,\n verbose_name='email')\n\n def is_admin(self):\n return self.role == self.Roles.admin or self.is_superuser\n\n def is_moderator(self):\n return self.role == self.Roles.moderator or self.is_staff\n","sub_path":"api/users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"524733230","text":"import feedparser\nimport bash_org.bash_org_sqlite\nimport time\nimport sqlite3\n\n\n'''\n--------connect to sql-----------\n -*- coding: utf-8 -*-\nc,conn = bash_org_sqlite.connect_to_baze()\n------------disconnect to sql----------\nbash_org_sqlite.disconnect_from_baze()\n\nprint(link.entries[0].id)\nprint(link.entries[0].title)\nprint(link.entries[0].description)\n'''\n\ndef read_bash(i):\n\tlink = feedparser.parse('http://bash.im/rss/')\n\ttext = link.entries[i].description\n\ttext = text.split('
')\n\ttext = ('\\n').join(text)\n\t\n\t#---------qote--------\n\ttext = text.split('"')\n\tdiscription = ('\"').join(text)\n\t\n\tdate = 'Дата публикации: ' + link.entries[i].published[:-5]\n\tnumber = link.entries[i].title\n\n\treturn (date,number,discription)\n\ndef del_of_readen_add(number):\n\t#--------connect to sql-----------\n\tc,conn = bash_org_sqlite.connect_to_baze()\n\t#--------del---------------\n\tbash_org_sqlite.del_from_baze(date,number,discription,do_read)\n\t#------------disconnect to sql----------\n\tbash_org_sqlite.disconnect_from_baze()\n\n'''\n#--------connect to sql-----------\nconn=sqlite3.connect('bash_org.db')\nc = conn.cursor()\n\nc.execute(\"\"\"SELECT numbers,discription FROM data_bash;\"\"\")\nnumber_old = c.fetchall()[0][0]\n#c.execute(\"\"\"DELETT numbers,discription FROM data_bash WHERE numbers =(?);\"\"\",c.fetchall()[0])\n\n#------------disconnect to sql----------\nconn.commit()\nc.close()\n'''\n\n","sub_path":"telegram/bash_org/rss_parse.py","file_name":"rss_parse.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"598109733","text":"from model import Model\nfrom default_net import get_net\nfrom runner import Runner\nimport env_builder\nimport policy_fn\nfrom baselines import logger\nfrom baselines.common import explained_variance\n\n\nimport gym\nimport numpy as np\nimport time\n\ndef get_const(x):\n\tdef fn(_):\n\t\treturn x\n\treturn fn\n\ndef learn(env, total_steps = 1e6, load_path = None, net = 'mlp', n_steps = 512, n_agents = 1, train_loops = 4, train_batchsize = 128, cliprange = 0.2, lr = 1e-3, log_interval = 10):\n\n\t\"\"\"\n\ttrain_loops: The loop nums of every n_batch samples\n\n\t\"\"\"\n\n\tenvrionment = env_builder.build(env, n_agents)\n\tnetwork = get_net(net)\n\t### policy\n\tpolicy = policy_fn.build_policy(envrionment, network)\n\t### model\n\tmodel = Model(envrionment, policy, train_batchsize)\n\tif load_path is not None:\n\t\tmodel.load(load_path)\n\t\treturn model, envrionment\n\t### runner\n\tdims = policy_fn.get_env_dims(env)\n\trunner = Runner(envrionment, model, n_steps, dims)\n\n\tn_batch = n_steps*n_agents\n\tn_updates = total_steps//n_batch\n\n\tl_r = get_const(lr)\n\tcliprange = get_const(cliprange)\n\n\tfor update in np.arange(n_updates):\n\n\t\ttstart = time.time()\n\n\t\tfrac = 1.0 - (update-1.0)/n_updates\n\t\tlr = l_r(frac)\n\t\tcliprangenow = cliprange(frac)\n\n\t\tob, returns, done, action, value, neglogpa, _ = runner.run()\n\t\tloss = []\n\t\tfor _ in np.arange(train_loops):\n\t\t\tindex = np.arange(n_batch)\n\t\t\tnp.random.shuffle(index)\n\t\t\tfor begin in np.arange(0, n_batch, train_batchsize):\n\t\t\t\tend = begin + train_batchsize\n\t\t\t\tmbinds = index[begin:end]\n\t\t\t\tsamples = [array[mbinds] for array in (ob, returns, action, value, neglogpa)]\n\t\t\t\t# print('Begin to trian with samples:', begin)\n\t\t\t\tloss.append(model.train(lr, cliprangenow, *samples))\n\n\t\tloss = np.mean(loss, axis = 0)\n\t\t# print('loss:', loss)\n\n\t\ttnow = time.time()\n\t\tfps = int(n_batch/(tnow - tstart))\n\n\t\tif update % log_interval == 0 or update == 1:\n\t\t\tev = explained_variance(value, returns)\n\t\t\tlogger.logkv('serial_timesteps', update * n_steps)\n\t\t\tlogger.logkv('nupdates', update)\n\t\t\tlogger.logkv('total_time_steps', update * n_batch)\n\t\t\tlogger.logkv('fps', fps)\n\t\t\tlogger.logkv('explained_variance', float(ev))\n\t\t\tfor (lossval, lossname) in zip(loss, model.loss_names):\n\t\t\t\tlogger.logkv(lossname, lossval)\n\t\t\tprint(model.loss_names, loss)\n\n\treturn model, envrionment\n","sub_path":"ppo.py","file_name":"ppo.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"320170776","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#https://www.jianshu.com/p/58b3fe300ecb\n# 目标函数:y=x^2\ndef func(x):\n return np.square(x)\n\n\n# 目标函数一阶导数:dy/dx=2*x\ndef dfunc(x):\n return 2 * x\n\n\ndef GD_momentum(x_start, df, epochs, lr, momentum):\n \"\"\"\n 带有冲量的梯度下降法。\n :param x_start: x的起始点\n :param df: 目标函数的一阶导函数\n :param epochs: 迭代周期\n :param lr: 学习率\n :param momentum: 冲量\n :return: x在每次迭代后的位置(包括起始点),长度为epochs+1\n \"\"\"\n\n xs = np.zeros(epochs +1)\n x = x_start\n xs[0] = x\n v = 0\n for i in range(epochs):\n dx = df(x)\n # v表示x要改变的幅度\n #在学习率较小的时候,适当的momentum能够起到一个加速收敛速度的作用\n #在学习率较大的时候,适当的momentum能够起到一个减小收敛时震荡幅度的作用\n v = -dx * lr + momentum * v\n x +=v\n xs[i+1] = x\n\n return xs\n\ndef demo2_GD_momentum():\n line_x = np.linspace(-5, 5, 100)\n line_y = func(line_x)\n\n plt.figure('Gradient Desent: Learning Rate, Momentum')\n\n x_start = -5\n epochs = 20\n\n lr = [0.01, 0.1, 0.6, 0.9]\n momentum = [0.0, 0.1, 0.5, 0.9]\n\n color = ['k', 'r', 'g', 'y']\n row = len(lr)\n col = len(momentum)\n size = np.ones(epochs + 1) * 10\n size[-1] = 30\n\n for i in range(row):\n for j in range(col):\n x = GD_momentum(x_start, dfunc, epochs, lr=lr[i], momentum=momentum[j])\n plt.subplot(row, col, i * col + j + 1)\n plt.plot(line_x, line_y, c='b')\n plt.plot(x, func(x), c=color[i], label='lr={}, mo={}'.format(lr[i], momentum[j]))\n plt.scatter(x, func(x), c=color[i], s=size)\n plt.legend(loc=1)\n plt.show()\n\n\n\ndemo2_GD_momentum()","sub_path":"ai1/GD_momentum.py","file_name":"GD_momentum.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"19690976","text":"##############################################################################\n# basefunctions.py #\n# Author: Nicholas Huang #\n# Functions for generating material properties #\n##############################################################################\nimport numpy as np\n\ndef kt(T, alpha, Beta, gamma, n):\n '''\n From Jones and Runyan 2008 (arxiv: 0806.1921)\n '''\n if (not isinstance(T, np.ndarray)):\n T = np.array(T)\n return alpha * T ** (Beta + gamma * T ** n)\n\ndef NIST_cu(T, a, b, c, d, e, f, g, h, i):\n '''\n From the NIST Cryogenic Technologies Grou\n '''\n if (not isinstance(T, np.ndarray)):\n T = np.array(T)\n num = a + \\\n c * np.sqrt(T) +\\\n e * T + \\\n g * T ** 1.5 + \\\n i * T * T\n denom = 1 + \\\n b * np.sqrt(T) + \\\n d * T + \\\n f * T ** 1.5 + \\\n h * T * T\n\n return 10 ** (num / denom)\n\ndef NIST_10(T, coeff):\n tmp = 0\n logT = np.log10(T)\n logprod = 1\n for c in coeff:\n tmp += c * logprod\n logprod *= logT\n return 10 ** tmp\n","sub_path":"materials/basefunctions.py","file_name":"basefunctions.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"292831110","text":"#''-*- coding: UTF-8 -*-\n#############################################\n# Author: Zeeshan Shahid #\n# Last edited: 10-07-2017 #\n#############################################\n\n############### -- Imports -- ##################\nimport os\nfrom PIL import Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw\n\ndef main():\n # Get all image files\n image_list = list(os.listdir(\".\"))\n image_list = [i for i in image_list if \".py\" not in i] # ignore self\n f = ImageFont.truetype(\"arial.ttf\", 30)\n\n for im in image_list:\n img = Image.open(im)\n draw = ImageDraw.Draw(img)\n draw.text((0, 0),\"© {copyright-holder-name}\",(0,0,0),font=f)\n draw = ImageDraw.Draw(img)\n img.save(im)\n\n return 0\n\nif __name__ == '__main__':\n main()\n\n\"\"\" ############ References and Miscellanea ##############\nhttps://stackoverflow.com/questions/16373425/add-text-on-image-using-pil\nhttp://python-catalin.blogspot.ca/2010/06/add-text-on-image-with-pil-module.html\n\"\"\"\n","sub_path":"AddTextToImage.py","file_name":"AddTextToImage.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"277160535","text":"#\n# Copyright (c) 2023 Airbyte, Inc., all rights reserved.\n#\n\nimport json\nfrom unittest.mock import MagicMock\n\nimport pytest\nimport requests as requests\nimport responses\nfrom source_mailgun.source import SourceMailgun\n\nfrom . import TEST_CONFIG\n\n\n@pytest.fixture\ndef check_connection_url():\n return \"https://api.mailgun.net/v3/domains\"\n\n\n@pytest.fixture\ndef source_mailgun(test_config):\n source = SourceMailgun()\n yield source\n del source\n\n\n@pytest.mark.parametrize(\n \"config\",\n [\n TEST_CONFIG,\n dict(**TEST_CONFIG, **{\"start_date\": \"2021-01-01T00:00:00Z\"}),\n dict(**TEST_CONFIG, **{\"start_date\": \"2021-01-01T00:00:00Z\", \"end_date\": \"2021-12-31T23:59:59Z\"}),\n ],\n)\ndef test_check_connection(mocked_responses, source_mailgun, auth_header, check_connection_url, config):\n bad_config = config.copy()\n bad_config[\"private_key\"] = bad_config[\"private_key\"][-1::-1]\n bad_key_message = \"Bad key message\"\n bad_key_body = json.dumps({\"message\": bad_key_message})\n\n def request_callback(request):\n if request.headers.get(\"Authorization\") == auth_header:\n return 200, {}, \"\"\n else:\n return 401, {}, bad_key_body\n\n mocked_responses.add_callback(responses.GET, check_connection_url, callback=request_callback, content_type=\"application/json\")\n\n logger_mock = MagicMock()\n\n assert source_mailgun.check_connection(logger_mock, config) == (True, None)\n\n check_result = source_mailgun.check_connection(logger_mock, bad_config)\n assert not check_result[0]\n assert bad_key_message in check_result[1]\n\n\ndef test_check_connection_config_region_error(mocked_responses, source_mailgun, check_connection_url, test_config):\n test_config[\"domain_region\"] = \"WRONG_REGION\"\n check_result = source_mailgun.check_connection(MagicMock(), test_config)\n assert not check_result[0]\n assert \"domain_region\" in check_result[1]\n\n\ndef test_check_connection_request_error(mocked_responses, source_mailgun, check_connection_url, test_config):\n custom_exception = requests.RequestException()\n mocked_responses.add(responses.GET, check_connection_url, body=custom_exception)\n assert source_mailgun.check_connection(MagicMock(), test_config) == (False, custom_exception)\n\n\n@pytest.mark.parametrize(\n \"config, error\",\n [\n (TEST_CONFIG, None),\n (dict(**TEST_CONFIG, **{\"start_date\": \"2021-01-01T00:00:00Z\"}), None),\n (dict(**TEST_CONFIG, **{\"start_date\": \"wrong format\"}), \"date format\"),\n ],\n)\ndef test_streams(config, error):\n source = SourceMailgun()\n expected_streams_number = 2\n if error is None:\n streams = source.streams(config)\n assert len(streams) == expected_streams_number\n else:\n with pytest.raises(ValueError) as exc_info:\n source.streams(config)\n assert error in str(exc_info.value)\n","sub_path":"dts/airbyte/airbyte-integrations/connectors/source-mailgun/unit_tests/test_source.py","file_name":"test_source.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"280259868","text":"#coding=utf-8\r\nfrom handlers.base.base_handler import BaseHandler\r\nfrom models.friends.friends_model import User1\r\n\r\nclass ModifyNameHandler(BaseHandler):\r\n\r\n def get(self):\r\n user = User1.by_uuid(self.get_argument('uuid', ''))\r\n self.db.delete(user)\r\n self.db.commit()\r\n self.redirect('/')\r\n\r\n\r\n def post(self):\r\n user = User1.by_uuid(self.get_argument('uuid', ''))\r\n delete = self.get_argument('delete', '')\r\n if delete == 'delete':\r\n self.db.delete(user)\r\n self.db.commit()\r\n self.redirect('/')\r\n elif user:\r\n user.username=self.get_argument('username', '')\r\n self.db.add(user)\r\n self.db.commit()\r\n self.redirect('/')\r\n else:\r\n self.write('error no')","sub_path":"friends_server/handlers/friends/friends_handler.py","file_name":"friends_handler.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"11585476","text":"import firebase_admin\r\nfrom firebase_admin import credentials\r\nfrom firebase_admin import db\r\n\r\nimport threading\r\nimport time\r\n\r\ncred = credentials.Certificate(\"xxxx-3bcf1-firebase-adminsdk-tudu6-a4eeea4874.json\")\r\nfirebase_admin.initialize_app(cred, {'databaseURL' : 'https://xxxx-3bcf1.firebaseio.com/'})\r\n\r\ndef compare(a, b):\r\n if a == b:\r\n return 0\r\n else:\r\n return 1\r\n \r\nref = firebase_admin.db.reference('order')\r\n\r\nCnt = 0\r\nuserid = []\r\npastData = []\r\npresentData = []\r\n\r\npastData = ref.get()\r\nprint(pastData)\r\n\r\nwhile True: \r\n presentData = ref.get()\r\n\r\n if compare(presentData, pastData) == 1:\r\n NewOrder = ref.get()\r\n try:\r\n NoNewOrder = len(pastData)\r\n count = 0\r\n for info in NewOrder:\r\n if Cnt == 0:\r\n userid.append(NewOrder[info]['userid'])\r\n orderid.append(NewOrder[info]['key'])\r\n else:\r\n if count >= NoNewOrder:\r\n userid.append(NewOrder[info]['userid'])\r\n orderid.append(NewOrder[info]['key'])\r\n count += 1\r\n \r\n print(userid)\r\n except:\r\n for info in NewOrder:\r\n userid.append(NewOrder[info]['userid'])\r\n print(userid)\r\n \r\n Cnt = 1\r\n else:\r\n print(\"NOP\")\r\n\r\n pastData = presentData\r\n","sub_path":"Face Recognition-Ordering Goods/Testing1.py","file_name":"Testing1.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"63902144","text":"#!/usr/bin/python\n\n#################################################\n# module: hw07_parser.py\n# YOUR NAME\n# YOUR A#\n#################################################\n\nfrom maker import maker\n\n\nclass parser(object):\n\n @staticmethod\n def parse_elt(elt):\n # let's make sure that elt is a string.\n assert isinstance(elt, str)\n constant, variable, power = elt, 'x', '^0'\n for i in range(len(elt)):\n if elt[i].isalpha():\n constant, variable, power = elt.partition(elt[i])\n if constant == '':\n constant = '1'\n if power != '':\n power = power.split(\"^\")\n power = power[1]\n\n element = maker.make_prod(maker.make_const(float(constant)), maker.make_pwr(variable, float(power)))\n return element\n\n @staticmethod\n def parse_sum(poly_str):\n assert isinstance(poly_str, str)\n elementals = poly_str.split()\n i = 0\n\n while i != len(elementals):\n if elementals[i] == '+':\n elementals.pop(i)\n if elementals[i] == '-':\n elementals.pop(i)\n elementals[i] = '-' + elementals[i]\n i += 1\n\n for e in range(len(elementals)):\n elementals[e] = parser.parse_elt(elementals[e])\n\n if len(elementals) == 1:\n return elementals[0]\n else:\n adder = maker.make_plus(elementals[0], elementals[1])\n for i in range(2, len(elementals)):\n adder = maker.make_plus(adder, elementals[i])\n return adder\n","sub_path":"CS3430-ScientificComputingPython/hw07/hw07_parser.py","file_name":"hw07_parser.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"3263678","text":"\"\"\" TensorMONK :: layers :: attention's \"\"\"\n\n__all__ = [\"SelfAttention\"]\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .convolution import Convolution\nfrom .utils import compute_flops\n\n\nclass SelfAttention(nn.Module):\n r\"\"\" Self-Attention from Self-Attention Generative Adversarial Networks\n\n Args:\n tensor_size: shape of tensor in BCHW\n (None/any integer >0, channels, height, width)\n shrink (int, optional): used to compute output channels of key and\n query, i.e, tensor_size[1] / shrink, default = 8\n scale_factor (float, optional): Used to speedup the module by\n computing the attention at a lower scale (after interpolation).\n \"\"\"\n\n def __init__(self, tensor_size, shrink=8, scale_factor=1.,\n return_attention=False, **kwargs):\n super(SelfAttention, self).__init__()\n\n self.shrink = shrink\n self.scale_factor = scale_factor\n self.oc = int(tensor_size[1] / shrink)\n self.return_attention = return_attention\n\n self.key = Convolution(tensor_size, 1, self.oc, 1, True, None)\n self.query = Convolution(tensor_size, 1, self.oc, 1, True, None)\n self.value = Convolution(tensor_size, 1, tensor_size[1], 1, True, None)\n self.gamma = nn.Parameter(torch.zeros(1))\n\n self.tensor_size = tensor_size\n\n def forward(self, tensor):\n if self.scale_factor != 1:\n _tensor = tensor.clone()\n tensor = F.interpolate(tensor, scale_factor=self.scale_factor)\n n, c, h, w = tensor.shape\n\n key = self.key(tensor).view(n, -1, h*w)\n query = self.query(tensor).view(n, -1, h*w)\n value = self.value(tensor).view(n, -1, h*w)\n\n attention = F.softmax(torch.bmm(query.permute(0, 2, 1), key), dim=2)\n o = torch.bmm(value, attention.permute(0, 2, 1)).view(n, c, h, w)\n\n if self.scale_factor != 1:\n o = F.interpolate(o, size=_tensor.shape[2:])\n if self.return_attention:\n return _tensor + o*self.gamma, attention\n return _tensor + o*self.gamma, attention\n if self.return_attention:\n return tensor + o*self.gamma, attention\n return tensor + o*self.gamma\n\n def flops(self):\n flops = 0\n c, h, w = self.tensor_size[1:]\n if self.scale_factor != 1:\n # assuming nearest\n nh, nw = int(h*self.scale_factor), int(w*self.scale_factor)\n flops += (c*h*w + c*nh*nw) * 2\n # attention - bmm\n flops += ((2 * self.oc * self.oc) - 1) * ((h * w)**2)\n # attention - softmax\n flops += (h * w) * (h * w * 3)\n # o - bmm\n flops += c * ((2 * h * w) - 1) * h * w\n # tensor + o*gamma\n flops += c * h * w * 2\n return compute_flops(self) + flops\n\n\n# from tensormonk.layers import Convolution\n# from tensormonk.layers.utils import compute_flops\n# tensor_size = (3, 16, 60, 60)\n# x = torch.rand(*tensor_size)\n# test = SelfAttention(tensor_size, 8, 1.)\n# test(x)[1].shape\n# %timeit test(x)[1].shape\n# test = SelfAttention(tensor_size, 8, 0.25)\n# test(x)[1].shape\n# %timeit test(x)[1].shape\n","sub_path":"tensormonk/layers/attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"427257334","text":"from flask import Blueprint\nfrom flask import request\nfrom flask import render_template\nimport requests\nimport json\n\nunfollowfriend = Blueprint('unfollowfriend', __name__, url_prefix='/api/friend')\n\n\n@unfollowfriend.route('/unfollow', methods=['GET'])\ndef unfollow():\n email = request.cookies.get('email', '')\n session_id = request.cookies.get('session_id', '')\n friend_email = request.args.get('friend_email','')\n redirect_url = \"http://52.221.228.19:8037/api/friend/mine\"\n data = {'email': email, 'session_id': session_id,\"friend_email\": friend_email}\n r = requests.post(\"http://127.0.0.1:9998/friend/unfollow\", data=data)\n result = json.loads(r.content)\n\n if result['success']:\n return render_template(\"success_and_redirect.html\",\n success_message=\"Unfollow success .\",\n redirect_url=redirect_url)\n else:\n return render_template(\"error.html\",\n error_message=result[\"message\"],\n redirect_url=redirect_url)\n","sub_path":"myapp/friend/unfollow_friend.py","file_name":"unfollow_friend.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"19188259","text":"#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n'''\r\nCreated on 2015-1-15\r\n\r\n@author: JohnDannl\r\n\r\ntransform all wiki_id to matrix id in the bow model,run after wikianc/fitershort.py\r\n\r\n'''\r\nfrom config import t2anc_widrf_file,wid2mid_dic_file,t2anc_mid_file\r\n\r\nwid2mid={}\r\nfor line in open(wid2mid_dic_file):\r\n wid,mid=line.split()\r\n wid2mid[wid]=mid\r\n\r\nt2anc_mid=open(t2anc_mid_file,'w')\r\nfor line in open(t2anc_widrf_file):\r\n mids=[]\r\n for wid in line.split():\r\n if wid2mid.has_key(wid):\r\n mids.append(wid2mid[wid]) \r\n t2anc_mid.write(' '.join(mids)+'\\n') \r\n\r\nt2anc_mid.close() ","sub_path":"wikianc/wid2mid.py","file_name":"wid2mid.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"86847772","text":"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import namedtuple, defaultdict\nfrom distutils.util import strtobool\nimport itertools as it\nimport operator as op\nimport os\n\nimport numpy as onp\nimport six\nfrom six.moves import xrange\n\nfrom ..config import flags\nfrom .. import core\nfrom .. import ad_util\nfrom .. import tree_util\nfrom .. import linear_util as lu\nfrom ..abstract_arrays import (ConcreteArray, ShapedArray, make_shaped_array,\n array_types, scalar_types)\nfrom ..core import AbstractTuple, JaxTuple, pack, valid_jaxtype, Literal\nfrom ..util import partial, partialmethod, memoize, concatenate, safe_map, prod\nfrom ..lib import xla_bridge as xb\nfrom . import partial_eval as pe\nfrom . import ad\n\nFLAGS = flags.FLAGS\nflags.DEFINE_bool('jax_device_values',\n strtobool(os.getenv('JAX_DEVICE_VALUES', \"True\")),\n 'Enable device-persistent values.')\nflags.DEFINE_bool('jax_debug_nans',\n strtobool(os.getenv('JAX_DEBUG_NANS', \"False\")),\n 'Add nan checks to every operation.')\n\ndef apply_primitive(prim, *args, **params):\n abstract_args = map(abstractify, args)\n compiled_fun = xla_primitive_callable(prim, *abstract_args, **params)\n return compiled_fun(*args)\n\n@memoize\ndef xla_primitive_callable(prim, *abstract_args, **params):\n shapes = tuple(map(xla_shape, abstract_args))\n built_c = primitive_computation(prim, *shapes, **params)\n result_shape = xla_shape_to_result_shape(built_c.GetReturnValueShape())\n handle_result = result_handler(result_shape)\n compiled = built_c.Compile(shapes, xb.get_compile_options(),\n backend=xb.get_backend())\n return partial(execute_compiled_primitive, prim.name, compiled, handle_result)\n\n@memoize\ndef primitive_computation(prim, *shapes, **params):\n c = xb.make_computation_builder(\"primitive_computation\")\n xla_args = map(c.ParameterWithShape, shapes)\n xla_result = translation_rule(prim)(c, *xla_args, **params)\n try:\n return c.Build()\n except RuntimeError as e:\n # try for a better error message by using the abstract_eval checks\n prim.abstract_eval(*map(aval_from_xla_shape, shapes), **params)\n raise e\n\ndef aval_from_xla_shape(shape):\n if shape.is_tuple():\n return AbstractTuple(map(aval_from_xla_shape, shape.tuple_shapes()))\n else:\n return ShapedArray(shape.dimensions(), shape.element_type())\n\ndef execute_compiled_primitive(name, compiled, result_handler, *args):\n input_bufs = [device_put(x) for x in args]\n out_buf = compiled.Execute(input_bufs)\n check_nans(name, out_buf)\n return result_handler(out_buf)\n\ndef check_nans(name, buf):\n FLAGS.jax_debug_nans and _check_nans(name, buf.shape(), buf)\n\ndef _check_nans(name, xla_shape, buf):\n if xla_shape.is_tuple():\n _map(partial(_check_nans, name), xla_shape.tuple_shapes(), buf.destructure())\n else:\n if onp.issubdtype(xla_shape.element_type(), onp.floating):\n pyval = buf.to_py()\n if onp.any(onp.isnan(pyval)):\n msg = \"invalid value (nan) encountered in {}\"\n raise FloatingPointError(msg.format(name))\n\ndef device_put(x, device_num=0):\n \"\"\"Place a Python value `x` on device number `device_num`.\n\n This is a wrapper around jax.lib.xla_bridge.device_put to handle\n additional Python types, namely\n 1. the array-like types DeviceArray (which is already backed by device\n memory, though may be on the wrong device) and its subclass DeviceConstant\n (which represents a lazy value to be instantiated), and\n 2. the tuple-like types DeviceTuple (which is already backed by device\n memory, though may be on the wrong device) and JaxTuple (which may have some\n elements that are backed by device memory on the correct device).\n In particular, this function avoids transferring data already placed on the\n correct device, and handles instantiating DeviceConstants.\n\n Args:\n x: a tuplelike-tree with arraylike leaves representing the value to be\n transferred to the device, where tuplelike means a JaxTuple or\n DeviceTuple, and arraylike includes DeviceArray, DeviceConstant, and\n anything that has an '__array__' attr.\n device_num: an int representing the target physical device number.\n\n Returns:\n A buffer representing the input `x` placed on the appropriate device.\n \"\"\"\n x = canonicalize_pyval_dtype(x)\n t = type(x)\n if t is DeviceArray or t is DeviceTuple:\n if x.device_buffer.device() == device_num:\n return x.device_buffer\n else:\n # TODO(phawkins): perform a direct device-to-device copy rather than\n # bouncing via the host.\n return device_put(x.device_buffer.to_py(), device_num)\n elif isinstance(x, DeviceConstant):\n return instantiate_device_constant(x, device_num=device_num)\n elif isinstance(x, (DeviceArray, onp.ndarray)):\n return xb.device_put(x, device_num) # handle arraylikes\n elif isinstance(x, JaxTuple):\n element_bufs = tuple(map(partial(device_put, device_num=device_num), x))\n return xb.make_tuple(element_bufs, device_num)\n else:\n raise TypeError(t)\n\ndef device_put_many(xs_and_devices):\n \"\"\"Place multiple Python values on multiple devices in parallel.\n\n This is a wrapper around jax.lib.xla_bridge.device_put_many to handle\n additional Python types. See the docstring for jax.interpreters.xla.device_put\n for more information.\n\n Args:\n xs_and_devices: a sequence of (pyval, device_num) pairs in which device_num\n is an int representing the target physical device number and pyval is a\n tuple-like tree with arraylike leaves (see the device_put docstring).\n\n Returns:\n A sequence of buffers representing the inputs placed on the corresponding\n device numbers.\n \"\"\"\n transfer_indices = []\n transfers = []\n outputs = [None] * len(xs_and_devices)\n for i, (x, device_num) in enumerate(xs_and_devices):\n x = canonicalize_pyval_dtype(x)\n t = type(x)\n if t is DeviceArray or t is DeviceTuple:\n if x.device_buffer.device() == device_num:\n outputs[i] = x.device_buffer\n else:\n transfer_indices.append(i)\n # TODO(phawkins): perform a direct device-to-device copy rather than\n # bouncing via the host.\n transfers.append((x.device_buffer.to_py(), device_num))\n elif isinstance(x, DeviceConstant):\n outputs[i] = instantiate_device_constant(x, device_num=device_num)\n elif hasattr(t, '__array__'):\n transfer_indices.append(i)\n transfers.append((x, device_num)) # handle arraylikes\n elif t is JaxTuple:\n # TODO(mattjj,phawkins): improve this to avoid device_put call\n element_bufs = tuple(map(partial(device_put, device_num=device_num), x))\n outputs[i] = xb.make_tuple(element_bufs, device_num)\n else:\n raise TypeError(t)\n\n transfer_results = xb.device_put_many(transfers)\n for i, result in zip(transfer_indices, transfer_results):\n outputs[i] = result\n return outputs\n\n\n# When we execute an XLA computation, we get a raw device buffer back and need\n# to package it into a suitable Python object to return to the user. To avoid\n# unnecessary device-to-host transfers, we typically return a DeviceValue that\n# acts just like a familiar Python type (e.g. an ndarray or JaxTuple) but is\n# lazy in that it only copies data back to the host as required. Since the same\n# DeviceValue type is formed on every execution of a compiled computation, at\n# compile time we set up result handler functions and thus avoid redoing some of\n# the Python bookkeeping work on every execution. Since XLA shapes are slower to\n# manipulate than simple Python builtins, we store the metadata required for\n# forming the DeviceValue result in special ResultArray / ResultTuple classes.\n\n# Every JaxType needs to map to an XLA type. However this function's design is\n# based on the assumption that XLA types can be mapped uniquely back to a\n# JaxType, i.e. that the mapping is bijective. That assumption could be relaxed,\n# but it would mean we need to do a bit more bookkeping on the Python side to\n# track abstract values of outputs.\ndef xla_shape_to_result_shape(xla_shape):\n if xla_shape.is_tuple():\n aval = aval_from_xla_shape(xla_shape)\n result_shapes = tuple(map(xla_shape_to_result_shape, xla_shape.tuple_shapes()))\n return ResultTuple((aval, result_shapes))\n else:\n shape, dtype = xla_shape.dimensions(), xla_shape.element_type()\n ndim, size = len(shape), prod(shape)\n return ResultArray((shape, dtype, ndim, size))\nclass ResultTuple(tuple): pass\nclass ResultArray(tuple): pass\n\ndef result_handler(result_shape):\n if FLAGS.jax_device_values:\n return device_persistent_result_handler(result_shape)\n else:\n return pyval_result_handler(result_shape)\n\ndef device_persistent_result_handler(result_shape):\n t = type(result_shape)\n if t is ResultArray:\n return partial(DeviceArray, result_shape)\n elif t is ResultTuple:\n return partial(DeviceTuple, result_shape)\n else:\n raise TypeError(t)\n\ndef pyval_result_handler(result_shape):\n del result_shape\n def _tuple_to_jaxtuple(v):\n if isinstance(v, tuple):\n return JaxTuple(_tuple_to_jaxtuple(t) for t in v)\n return v\n def f(buf):\n return _tuple_to_jaxtuple(buf.to_py())\n return f\n\ndef compile_jaxpr(jaxpr, const_vals, *abstract_args):\n arg_shapes = list(map(xla_shape, abstract_args))\n built_c = jaxpr_computation(jaxpr, const_vals, (), *arg_shapes)\n result_shape = xla_shape_to_result_shape(built_c.GetReturnValueShape())\n return built_c.Compile(arg_shapes, xb.get_compile_options(),\n backend=xb.get_backend()), result_shape\n\ndef build_jaxpr(jaxpr, const_vals, *abstract_args):\n arg_shapes = list(map(xla_shape, abstract_args))\n built_c = jaxpr_computation(jaxpr, const_vals, (), *arg_shapes)\n return built_c\n\n\ndef _prefetch_jaxpr_literals(jaxpr):\n \"\"\"Prefetches any DeviceArray values inside a jaxpr to the host.\"\"\"\n for eqn in jaxpr.eqns:\n for v in eqn.invars:\n if type(v) is core.Literal and isinstance(v.val, DeviceArray):\n v.val.copy_to_host_async()\n if eqn.bound_subjaxprs:\n for subjaxpr, _const_bindings, _freevar_bindings in eqn.bound_subjaxprs:\n _prefetch_jaxpr_literals(subjaxpr)\n\n\ndef jaxpr_computation(jaxpr, const_vals, freevar_shapes, *arg_shapes):\n assert not any(type(invar) in (tuple, list) for invar in jaxpr.invars)\n c = xb.make_computation_builder(\"jaxpr_computation\")\n\n def read(v):\n if type(v) is Literal:\n return c.Constant(canonicalize_pyval_dtype(v.val))\n else:\n return env[v]\n\n def write(v, node):\n assert node is not None\n env[v] = node\n\n env = {}\n write(core.unitvar, c.Tuple())\n if const_vals:\n for val in const_vals:\n if isinstance(val, DeviceArray):\n val.copy_to_host_async()\n _map(write, jaxpr.constvars, map(c.Constant, const_vals))\n _map(write, jaxpr.freevars, map(c.ParameterWithShape, freevar_shapes))\n else:\n all_freevars = it.chain(jaxpr.constvars, jaxpr.freevars)\n _map(write, all_freevars, map(c.ParameterWithShape, freevar_shapes))\n _map(write, jaxpr.invars, map(c.ParameterWithShape, arg_shapes))\n _prefetch_jaxpr_literals(jaxpr)\n for eqn in jaxpr.eqns:\n if not eqn.restructure:\n in_nodes = list(map(read, eqn.invars))\n else:\n in_nodes = [xla_pack(c, map(read, invars)) if type(invars) is tuple\n else read(invars) for invars in eqn.invars]\n in_shapes = _map(c.GetShape, in_nodes)\n subcs = [\n jaxpr_computation(\n subjaxpr, (),\n _map(c.GetShape, map(read, const_bindings + freevar_bindings)),\n *in_shapes)\n for subjaxpr, const_bindings, freevar_bindings in eqn.bound_subjaxprs]\n subfuns = [(subc, _map(read, const_bindings + freevar_bindings))\n for subc, (_, const_bindings, freevar_bindings)\n in zip(subcs, eqn.bound_subjaxprs)]\n ans = translation_rule(eqn.primitive)(c, *(subfuns + in_nodes), **eqn.params)\n c.GetShape(ans) # force xla to do shape error checking\n out_nodes = xla_destructure(c, ans) if eqn.destructure else [ans]\n _map(write, eqn.outvars, out_nodes)\n return c.Build(read(jaxpr.outvar))\n\ndef _map(f, *xs):\n return tuple(map(f, *xs))\n\ndef xla_destructure(c, ans):\n num_elements = len(c.GetShape(ans).tuple_shapes())\n return [c.GetTupleElement(ans, i) for i in range(num_elements)]\n\ndef xla_pack(c, xs):\n return c.Tuple(*xs)\n\ndef tuple_constant(c, val, canonicalize_types=True):\n return c.Tuple(*map(c.Constant, val))\nxb.register_constant_handler(JaxTuple, tuple_constant)\n\ndef translation_rule(p):\n backend = xb.get_backend()\n backend_specific_rule = backend_specific_translations[backend.platform].get(p)\n try:\n return backend_specific_rule or translations[p]\n except KeyError:\n raise NotImplementedError(\n \"XLA translation rule for '{}' not implemented\".format(p))\n\n\ndef lower_fun(fun, c, *xla_args, **params):\n xla_shapes = tuple(map(c.GetShape, xla_args))\n avals = map(aval_from_xla_shape, xla_shapes)\n pvals = [pe.PartialVal((a, core.unit)) for a in avals]\n jaxpr, _, consts = pe.trace_unwrapped_to_jaxpr(fun, pvals, **params)\n built_c = jaxpr_computation(jaxpr, consts, (), *xla_shapes)\n return c.Call(built_c, xla_args)\n\n\ntranslations = {}\nbackend_specific_translations = defaultdict(dict)\n\ntranslations[core.pack_p] = lambda c, *xs: c.Tuple(*xs)\ntranslations[core.call_p] = lambda c, subc_a1, *a2: c.Call(subc_a1[0],\n subc_a1[1] + a2)\ntranslations[core.identity_p] = lambda c, x: x\n\ndef zeros_like_translation_rule(c, x):\n def _zeros_like(shape):\n if shape.is_tuple():\n return c.Tuple(*(_zeros_like(x) for x in shape.tuple_shapes()))\n else:\n return c.Broadcast(c.Constant(onp.array(0, shape.element_type())),\n shape.dimensions())\n return _zeros_like(c.GetShape(x))\n\ndef add_jaxvals_translation_rule(c, x, y):\n x_shape, y_shape = map(c.GetShape, (x, y))\n if x_shape.is_tuple() and y_shape.is_tuple():\n xs = xla_destructure(c, x)\n ys = xla_destructure(c, y)\n return c.Tuple(*map(partial(add_jaxvals_translation_rule, c), xs, ys))\n else:\n return c.Add(x, y)\n\ntranslations[ad_util.zeros_like_p] = zeros_like_translation_rule\ntranslations[ad_util.add_jaxvals_p] = add_jaxvals_translation_rule\n\n\ndef canonicalize_pyval_dtype(x):\n try:\n return canonicalize_dtype_handlers[type(x)](x)\n except KeyError:\n msg = \"No canonicalize handler registered for type: {}\"\n raise TypeError(msg.format(type(x)))\n\ncanonicalize_dtype_handlers = {}\n\ndef canonicalize_tuple_dtype(tup):\n return JaxTuple(map(canonicalize_pyval_dtype, tup))\ncanonicalize_dtype_handlers[JaxTuple] = canonicalize_tuple_dtype\n\ndef canonicalize_ndarray_dtype(x):\n return onp.asarray(x, xb.canonicalize_dtype(onp.result_type(x)))\n\nfor t in array_types:\n canonicalize_dtype_handlers[t] = canonicalize_ndarray_dtype\n\ndef identity(x): return x\n\n\ndef abstractify(x):\n try:\n return pytype_aval_mappings[type(x)](x)\n except KeyError:\n raise TypeError(\"No abstraction handler for type: {}\".format(type(x)))\n\npytype_aval_mappings = {}\n\ndef abstractify_tuple(tup):\n return AbstractTuple(map(abstractify, tup))\npytype_aval_mappings[JaxTuple] = abstractify_tuple\npytype_aval_mappings[AbstractTuple] = abstractify_tuple\n\nfor t in array_types:\n pytype_aval_mappings[t] = make_shaped_array\n\n\nclass DeviceValue(object):\n \"\"\"A DeviceValue represents a value backed by device memory.\"\"\"\n __slots__ = [\"device_buffer\"]\n def __init__(self, device_buffer):\n self.device_buffer = device_buffer\n\n def _check_if_deleted(self):\n if self.device_buffer is None:\n raise ValueError(\"DeviceValue has been deleted.\")\n\n def block_until_ready(self):\n \"\"\"Blocks the caller until the buffer's value has been computed on device.\n\n This method is mostly useful for timing microbenchmarks that wish to\n time how long a computation takes, without transferring the result back\n to the host.\n \"\"\"\n self._check_if_deleted()\n self.device_buffer.block_host_until_ready()\n\n\nclass DeviceTuple(DeviceValue):\n \"\"\"A DeviceTuple is a JaxTuple backed by a single device memory buffer.\"\"\"\n __slots__ = [\"aval\", \"result_shapes\"]\n\n def __init__(self, result_shape, device_buffer):\n self.device_buffer = device_buffer\n self.aval, self.result_shapes = result_shape\n\n def __iter__(self):\n bufs = self.device_buffer.destructure()\n handlers = map(device_persistent_result_handler, self.result_shapes)\n elts = [handler(buf) for handler, buf in zip(handlers, bufs)]\n return iter(elts)\n\n def __len__(self):\n return len(self.aval)\n\n def __repr__(self):\n return 'DeviceTuple(len={length})'.format(length=len(self))\n\n def __eq__(self, other):\n return tuple(self) == tuple(other)\n\n\n# DeviceValues don't need to be dtype-canonicalized because we assume values on\n# the device have already been canonicalized.\ncore.pytype_aval_mappings[DeviceTuple] = core.pytype_aval_mappings[JaxTuple]\npytype_aval_mappings[DeviceTuple] = op.attrgetter('aval')\ncanonicalize_dtype_handlers[DeviceTuple] = identity\n\ndef _device_tuple_constant_handler(c, val, canonicalize_types=True):\n py_val = pack(c.Constant(elt, canonicalize_types=canonicalize_types)\n for elt in val)\n return c.Constant(py_val)\nxb.register_constant_handler(DeviceTuple, _device_tuple_constant_handler)\n\n# TODO(mattjj): could jit-compile a computation here\nad_util.jaxval_adders[DeviceTuple] = ad_util.add_jaxtuples\n\n# TODO(phawkins): after Jaxlib 0.1.17 has been released, bump the minimum\n# jaxlib version and change callers of this function to simply call\n# the copy_to_host_async method directly.\ndef _copy_to_host_async(buffer):\n if hasattr(buffer, \"copy_to_host_async\"):\n buffer.copy_to_host_async()\n\n\ndef forward_method(attrname, self, fun, *args):\n return fun(getattr(self, attrname), *args)\nforward_to_value = partial(forward_method, \"_value\")\n\nclass DeviceArray(DeviceValue):\n \"\"\"A DeviceArray is an ndarray backed by a single device memory buffer.\"\"\"\n # We don't subclass ndarray because that would open up a host of issues,\n # but lax_numpy.py overrides isinstance behavior and attaches ndarray methods.\n __slots__ = [\"shape\", \"dtype\", \"ndim\", \"size\", \"_npy_value\"]\n __array_priority__ = 100.\n\n def __init__(self, result_shape, device_buffer):\n self.device_buffer = device_buffer\n self.shape, self.dtype, self.ndim, self.size = result_shape\n self._npy_value = None\n\n # TODO make device_buffer a property, make the _npy_value writeable, invalidate\n @property\n def _value(self):\n self._check_if_deleted()\n if self._npy_value is None:\n self._npy_value = self.device_buffer.to_py()\n self._npy_value.flags.writeable = False\n return self._npy_value\n\n def copy(self):\n \"\"\"Returns an ndarray (backed by host memory, not device memory).\"\"\"\n return onp.asarray(self)\n\n def copy_to_host_async(self):\n \"\"\"Requests a copy of the buffer to the host.\"\"\"\n self._check_if_deleted()\n if self._npy_value is None:\n _copy_to_host_async(self.device_buffer)\n\n def delete(self):\n \"\"\"Deletes the device array and any cached copy on the host.\n\n It is an error to access the contents of a `DeviceArray` after it has\n been deleted.\n\n Use of this method is optional; device buffers will be reclaimed\n automatically by Python when a DeviceArray object is garbage collected.\n However, it is sometimes useful to have more explicit control over the\n time of deletion.\n \"\"\"\n self.device_buffer.delete()\n self.device_buffer = None\n self._npy_value = None\n\n def __repr__(self):\n return onp.array_repr(self)\n\n def item(self):\n if onp.issubdtype(self.dtype, onp.complexfloating):\n return complex(self)\n elif onp.issubdtype(self.dtype, onp.floating):\n return float(self)\n elif onp.issubdtype(self.dtype, onp.integer):\n return int(self)\n elif onp.issubdtype(self.dtype, onp.bool_):\n return bool(self)\n else:\n raise TypeError(self.dtype)\n\n def __len__(self):\n try:\n return self.shape[0]\n except IndexError:\n raise TypeError(\"len() of unsized object\") # same as numpy error\n\n def __iter__(self):\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d array\") # same as numpy error\n else:\n return (self[i] for i in xrange(self.shape[0]))\n\n def __reversed__(self):\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d array\")\n else:\n return (self[i] for i in xrange(self.shape[0] - 1, -1, -1))\n\n def __format__(self, format_spec):\n # Simulates behavior of https://github.com/numpy/numpy/pull/9883\n if self.ndim == 0:\n return format(self._value[()], format_spec)\n else:\n return format(self._value, format_spec)\n\n __array__ = partialmethod(forward_to_value, onp.asarray)\n __str__ = partialmethod(forward_to_value, str)\n __bool__ = __nonzero__ = partialmethod(forward_to_value, bool)\n __float__ = partialmethod(forward_to_value, float)\n __int__ = partialmethod(forward_to_value, int)\n if six.PY2:\n __long__ = partialmethod(forward_to_value, long) # noqa: F821\n __complex__ = partialmethod(forward_to_value, complex)\n __hex__ = partialmethod(forward_to_value, hex)\n __oct__ = partialmethod(forward_to_value, oct)\n\n # pickle saves and loads just like an ndarray\n __reduce__ = partialmethod(forward_to_value, op.methodcaller(\"__reduce__\"))\n\n # clobbered when jax.numpy is imported, but useful in tests\n def __eq__(self, other): return self._value == other\n\n def __hash__(self):\n # TODO(mattjj): this is not semantically correct because it is possible\n # __eq__ is true for values with unequal __hash__ values. However, the\n # main use case at the moment is memoization for which false negatives are\n # fine.\n return id(self)\n\nscalar_types.add(DeviceArray)\n\n\n# DeviceValues don't need to be canonicalized because we assume values on the\n# device have already been canonicalized.\ncore.pytype_aval_mappings[DeviceArray] = ConcreteArray\npytype_aval_mappings[DeviceArray] = make_shaped_array\ncanonicalize_dtype_handlers[DeviceArray] = identity\n\ndef _device_array_constant_handler(c, val, canonicalize_types=True):\n return c.Constant(onp.asarray(val), canonicalize_types=canonicalize_types)\nxb.register_constant_handler(DeviceArray, _device_array_constant_handler)\n\npytype_aval_mappings[ConcreteArray] = make_shaped_array\npytype_aval_mappings[ShapedArray] = identity\n\n\nclass DeviceConstant(DeviceArray):\n def copy_to_host_async(self): pass\n\n @staticmethod\n def constant_handler(c, constant_instance, canonicalize_types=True):\n assert False\n\ndef instantiate_device_constant(const, cutoff=1e6, device_num=0):\n # dispatch an XLA Computation to build the constant on the device if it's\n # large, or alternatively build it on the host and transfer it if it's small\n # TODO(mattjj): need a way to instantiate on a specific device\n assert isinstance(const, DeviceConstant)\n if const.size > cutoff and device_num == 0:\n c = xb.make_computation_builder(\"constant_instantiating_computation\")\n xla_const = const.constant_handler(c, const)\n compiled = c.Build(xla_const).Compile((), xb.get_compile_options(),\n backend=xb.get_backend())\n return compiled.Execute(())\n else:\n return xb.device_put(onp.asarray(const), device_num)\n\n\ndef xla_shape(x):\n try:\n return xb.Shape.array_shape(x.dtype, x.shape)\n except AttributeError:\n if type(x) in (core.AbstractTuple, core.JaxTuple):\n return xb.Shape.tuple_shape(tuple(map(xla_shape, x)))\n else:\n raise TypeError(type(x))\n\n\ndef xla_call_impl(fun, *args, **params):\n device_values = FLAGS.jax_device_values and params.pop('device_values')\n compiled_fun = xla_callable(fun, device_values, *map(abstractify, args))\n try:\n return compiled_fun(*args)\n except FloatingPointError:\n print(\"Invalid value encountered in the output of a jit function. \"\n \"Calling the de-optimized version.\")\n return fun.call_wrapped(*args) # probably won't return\n\n\n@lu.memoize\ndef xla_callable(fun, device_values, *abstract_args):\n pvals = [pe.PartialVal((aval, core.unit)) for aval in abstract_args]\n with core.new_master(pe.JaxprTrace, True) as master:\n jaxpr, (pval, consts, env) = pe.trace_to_subjaxpr(fun, master, False).call_wrapped(pvals)\n assert not env # no subtraces here (though cond might eventually need them)\n compiled, result_shape = compile_jaxpr(jaxpr, consts, *abstract_args)\n del master, consts, jaxpr, env\n if device_values:\n handle_result = device_persistent_result_handler(result_shape)\n else:\n handle_result = pyval_result_handler(result_shape)\n return partial(execute_compiled, compiled, pval, handle_result)\n\ndef execute_compiled(compiled, pval, handle_result, *args):\n input_bufs = [device_put(x) for x in args]\n out_buf = compiled.Execute(input_bufs)\n check_nans(\"jit-compiled computation\", out_buf)\n return pe.merge_pvals(handle_result(out_buf), pval)\n\n\ndef xla_call_translation_rule(c, subc_a1, *a2, **params):\n subc, a1 = subc_a1\n return c.Call(subc, a1 + a2)\n\nxla_call_p = core.Primitive('xla_call')\nxla_call = partial(core.call_bind, xla_call_p)\nxla_call_p.def_custom_bind(xla_call)\nxla_call_p.def_impl(xla_call_impl)\n\ntranslations[xla_call_p] = xla_call_translation_rule\nad.primitive_transposes[xla_call_p] = partial(ad.call_transpose, xla_call_p)\n","sub_path":"jax/interpreters/xla.py","file_name":"xla.py","file_ext":"py","file_size_in_byte":26021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"372940047","text":"import sys\nimport socket\n\n\nip = ['192.168.122.232', '192.168.122.229']\n\nfor i in range(2):\n # Create a TCP/IP socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Connect the socket to the port where the server is listening\n server_address = (ip[i], 10000)\n print(f\"connecting to {server_address}\")\n sock.connect(server_address)\n\n\n try:\n # Send data\n gambar = open('gambarkucing.jpg', 'rb')\n hasil = gambar.read()\n print(\"sending gambarkucing\"+str(i+1)+\".jpg\")\n sock.sendall(hasil)\n\n namafile = 'kucing'+str(i+1)\n responfile = open(namafile+'.jpg', 'wb')\n\n # Look for the response\n amount_received = 0\n amount_expected = len(hasil)\n while amount_received < amount_expected:\n data = sock.recv(16)\n amount_received += len(data)\n if data:\n responfile.write(data)\n else:\n break\n finally:\n print(\"closing\")\n sock.close()\n","sub_path":"progjar1/JAWABAN/clientgambar.py","file_name":"clientgambar.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"108474264","text":"from pymongo import MongoClient\n\n#创建数据库连接\nconn = MongoClient('localhost',27017)\n#创建数据库对象\ndb = conn.test\n#数据操作\nmyset = db.class4\n# myset.insert_many([\n# {'name':'bbb','num':'12312'},\n# {'name':'ccc','num':'23123'},\n# {'name':'ddd','num':'31213'}\n# ])\n# myset.save({'name':'bbb','num':'66666','_id':1})\n\n# cursor = myset.find({'name':{'$exists':True}},{'_id':0})\n# for i in cursor.limit(3).sort() :\n# print(i)\n\n# dic = {'$or':[{'name':'bbb'},{'name':'aaa'}]}\n# d = myset.find_one(dic)\n# print(d)\n\n\n#关闭连接\nconn.close()","sub_path":"MongoDB/2019.3.4/mongo.py","file_name":"mongo.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"69391376","text":"#!/usr/bin/python\n# coding: utf-8\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\nimport json\nimport argparse\nimport logging\nimport logging.config\nimport textwrap\nimport itertools\nfrom datetime import datetime, timedelta\n\nfrom lxml import etree as ET\n\nimport plumber\nimport pipeline_xml\nfrom articlemeta.client import ThriftClient\n\nfrom SolrAPI import Solr\n\nlogger = logging.getLogger('updatesearch')\n\nALLOWED_COLLECTION = [\n 'scl',\n 'arg',\n 'cub',\n 'esp',\n 'sss',\n 'spa',\n 'chl',\n 'mex',\n 'prt',\n 'ecu',\n 'cri',\n 'sza',\n 'col',\n 'per',\n 'ven',\n 'ury',\n 'bol',\n 'par'\n]\n\n\nclass UpdateSearch(object):\n \"\"\"\n Process to get article in article meta and index in Solr.\n \"\"\"\n\n usage = \"\"\"\\\n Process to index article to SciELO Solr.\n\n This process collects articles in the Article meta using thrift and index\n in SciELO Solr.\n\n With this process it is possible to process all the article or some specific\n by collection, issn from date to until another date and a period like 7 days.\n \"\"\"\n\n parser = argparse.ArgumentParser(textwrap.dedent(usage))\n\n parser.add_argument('-p', '--period',\n type=int,\n help='index articles from specific period, use number of days.')\n\n parser.add_argument('-f', '--from',\n dest='from_date',\n type=lambda x: datetime.strptime(x, '%Y-%m-%d'),\n nargs='?',\n help='index articles from specific date. YYYY-MM-DD.')\n\n parser.add_argument('-u', '--until',\n dest='until_date',\n type=lambda x: datetime.strptime(x, '%Y-%m-%d'),\n nargs='?',\n help='index articles until this specific date. YYYY-MM-DD (default today).',\n default=datetime.now())\n\n parser.add_argument('-c', '--collection',\n dest='collection',\n default=None,\n help='use the acronym of the collection eg.: spa, scl, col.')\n\n parser.add_argument('-i', '--issn',\n dest='issn',\n default=None,\n help='journal issn.')\n\n parser.add_argument('-d', '--delete',\n dest='delete',\n default=None,\n help='delete query ex.: q=*:* (Lucene Syntax).')\n\n parser.add_argument('-s', '--sanitization',\n dest='sanitization',\n default=False,\n action='store_true',\n help='Remove objects from the index that are no longer present in the database.')\n\n parser.add_argument('-url', '--url',\n dest='solr_url',\n help='Solr RESTFul URL, processing try to get the variable from environment ``SOLR_URL`` otherwise use --url to set the url(preferable).')\n\n parser.add_argument('-v', '--version',\n action='version',\n version='version: 0.2')\n\n def __init__(self):\n\n self.args = self.parser.parse_args()\n\n solr_url = os.environ.get('SOLR_URL')\n\n if not solr_url and not self.args.solr_url:\n raise argparse.ArgumentTypeError('--url or ``SOLR_URL`` enviroment variable must be the set, use --help.')\n\n if not solr_url:\n self.solr = Solr(self.args.solr_url, timeout=10)\n else:\n self.solr = Solr(solr_url, timeout=10)\n\n if self.args.period:\n self.args.from_date = datetime.now() - timedelta(days=self.args.period)\n\n def format_date(self, date):\n \"\"\"\n Convert datetime.datetime to str return: ``2000-05-12``.\n\n :param datetime: bult-in datetime object\n\n :returns: str\n \"\"\"\n if not date:\n return None\n\n return date.strftime('%Y-%m-%d')\n\n def pipeline_to_xml(self, article):\n \"\"\"\n Pipeline to tranform a dictionary to XML format\n\n :param list_dict: List of dictionary content key tronsform in a XML.\n \"\"\"\n\n ppl = plumber.Pipeline(\n pipeline_xml.SetupDocument(),\n pipeline_xml.DocumentID(),\n pipeline_xml.DOI(),\n pipeline_xml.Collection(),\n pipeline_xml.DocumentType(),\n pipeline_xml.URL(),\n pipeline_xml.Authors(),\n pipeline_xml.Titles(),\n pipeline_xml.OriginalTitle(),\n pipeline_xml.Pages(),\n pipeline_xml.WOKCI(),\n pipeline_xml.WOKSC(),\n pipeline_xml.JournalAbbrevTitle(),\n pipeline_xml.Languages(),\n pipeline_xml.AvailableLanguages(),\n pipeline_xml.Fulltexts(),\n pipeline_xml.PublicationDate(),\n pipeline_xml.SciELOPublicationDate(),\n pipeline_xml.SciELOProcessingDate(),\n pipeline_xml.Abstract(),\n pipeline_xml.AffiliationCountry(),\n pipeline_xml.AffiliationInstitution(),\n pipeline_xml.Sponsor(),\n pipeline_xml.Volume(),\n pipeline_xml.SupplementVolume(),\n pipeline_xml.Issue(),\n pipeline_xml.SupplementIssue(),\n pipeline_xml.ElocationPage(),\n pipeline_xml.StartPage(),\n pipeline_xml.EndPage(),\n pipeline_xml.JournalTitle(),\n pipeline_xml.IsCitable(),\n pipeline_xml.Permission(),\n pipeline_xml.Keywords(),\n pipeline_xml.JournalISSNs(),\n pipeline_xml.SubjectAreas(),\n pipeline_xml.ReceivedCitations(),\n pipeline_xml.TearDown()\n )\n\n xmls = ppl.run([article])\n\n # Add root document\n add = ET.Element('add')\n\n for xml in xmls:\n add.append(xml)\n\n return ET.tostring(add, encoding=\"utf-8\", method=\"xml\")\n\n def run(self):\n \"\"\"\n Run the process for update article in Solr.\n \"\"\"\n\n art_meta = ThriftClient()\n\n if self.args.delete:\n\n self.solr.delete(self.args.delete, commit=True)\n\n elif self.args.sanitization:\n\n # set of index ids\n ind_ids = set()\n\n # set of articlemeta ids\n art_ids = set()\n\n # all ids in index\n list_ids = json.loads(self.solr.select(\n {'q': '*:*', 'fl': 'id', 'rows': 1000000}))['response']['docs']\n\n for id in list_ids:\n ind_ids.add(id['id'])\n\n # all ids in articlemeta\n for item in art_meta.documents(only_identifiers=True):\n if item.collection not in ALLOWED_COLLECTION:\n continue\n art_ids.add('%s-%s' % (item.code, item.collection))\n\n # Ids to remove\n remove_ids = ind_ids - art_ids\n\n for id in remove_ids:\n self.solr.delete('id:%s' % id, commit=True)\n\n logger.info(\"List of removed ids: %s\" % remove_ids)\n\n else:\n\n # Get article identifiers\n\n logger.info(\"Indexing in {0}\".format(self.solr.url))\n\n for document in art_meta.documents(\n collection=self.args.collection,\n issn=self.args.issn,\n from_date=self.format_date(self.args.from_date),\n until_date=self.format_date(self.args.until_date)\n ):\n\n try:\n xml = self.pipeline_to_xml(document)\n self.solr.update(self.pipeline_to_xml(document), commit=True)\n except ValueError as e:\n logger.error(\"ValueError: {0}\".format(e))\n logger.exception(e)\n continue\n except Exception as e:\n logger.error(\"Error: {0}\".format(e))\n logger.exception(e)\n continue\n\n # optimize the index\n self.solr.commit()\n self.solr.optimize()\n\n\ndef main():\n\n try:\n # set log\n logging.config.fileConfig('logging.conf')\n\n # Start time\n start = time.time()\n\n # run the process\n UpdateSearch().run()\n\n # End Time\n end = time.time()\n\n print(\"Duration {0} seconds.\".format(end-start))\n\n except KeyboardInterrupt:\n logger.critical(\"Interrupt by user\")\n\nif __name__ == \"__main__\":\n\n # command line\n sys.exit(main() or 0)\n","sub_path":"proc/updatesearch/updatesearch.py","file_name":"updatesearch.py","file_ext":"py","file_size_in_byte":8550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"184257634","text":"#Q1\n\nli=['a','b','c','d','e']\nli.reverse()\nprint(\"1)The reversed list is = \",li)\n\n#Q2--\n\nstr= ['T', 'q', 'B', 'F', 'j', 'O', 't', 'L', 'D']\nfor s in str:\n if s.isupper():\n print(\"2)The uppercase letter is \",s)\n\n#Q3--\n\nli=['1','3','6','9','12','15','18','21']\nli2=[]\nfor n in li:\n li2.append(int(n))\nli=li2\nprint(\"3)The list of ntegers is\",li2)\n\n\n#Q4--\n\nstr='abba'\nstr1='allk'\nif(str==str[::-1]):\n print(\"4.1)The string is palindromic\")\nelse:\n print(\"The string is not palindromic\")\n \nif(str1==str1[::-1]):\n print(\"The string is palindromic\")\nelse:\n print(\"4.2)The string is not palindromic\")\n\n\n#Q5--\n\nimport copy\nl_1=[[1,2,3],[4,5,6]]\nl_2=copy.deepcopy(l_1)\nprint(\"5)The new list is = \",l_2)\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"assignment4.py","file_name":"assignment4.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"558981238","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\ndef main():\n from setuptools import setup, find_packages\n\n with open(\"README.rst\", \"rt\") as inf:\n readme = inf.read()\n\n setup(name=\"eow\",\n version=\"2015.1.5\",\n description=\"Edit local files in CodeMirror\",\n long_description=readme,\n author=u\"Andreas Kloeckner\",\n author_email=\"inform@tiker.net\",\n license=\"MIT\",\n zip_safe=False,\n\n install_requires=[\n \"flask\",\n \"ipaddr\",\n ],\n\n scripts=[\"bin/eow\"],\n packages=find_packages(),\n include_package_data=True,\n package_data={\n 'eow': [\n 'templates/*.html',\n 'static/*.js',\n 'static/*.css',\n 'static/codemirror/*/*.js',\n 'static/codemirror/*/*.css',\n 'static/codemirror/*/*/*.js',\n 'static/codemirror/*/*/*.css',\n ],\n }\n )\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pypi_install_script/eow-2015.1.5.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"56290161","text":"#!/usr/bin/env python\n\nimport os\nfrom flask.ext.script import Manager, Shell\nfrom sqlalchemy.sql import func\nfrom app import create_app, db\nfrom app.models import QueryStocks, Stock, StockTransaction, Transaction\nfrom app.models.fixtures import populate_test_stocks\n\napp = create_app(os.getenv('CONFIG') or \"default\")\nmanager = Manager(app)\n\n\ndef make_shell_context():\n \"\"\"Return a dictionary with objects we want available in the shell.\"\"\"\n return dict(app=app, db=db, func=func, QueryStocks=QueryStocks, Stock=Stock, StockTransaction=StockTransaction,\n Transaction=Transaction)\n\n\nmanager.add_command('shell', Shell(make_context=make_shell_context))\n\n\n@manager.command\ndef bootstrap(test_data=False):\n \"\"\"Bootstrap the database.\"\"\"\n db.drop_all()\n db.create_all()\n\n if test_data:\n print(\"Populating stocks with test data ...\")\n populate_test_stocks(db, app.config['ENDPOINT'])\n\n\n@manager.command\ndef test():\n \"\"\"Run unit tests.\"\"\"\n import unittest\n\n # Bootstrap the test database with test data.\n bootstrap(test_data=True)\n\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n\n\nif __name__ == \"__main__\":\n manager.run()","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"409769026","text":"\"\"\"\nRao-Teh samples of tolerance MJP trajectories on trees.\n\nThis should use concepts related to inference of parameters\nof continuous time Bayesian networks (CTBN),\nbut it will not be so general as to allow any arbitrary network.\n\nIn this module,\nthe disease_data variable is a list, indexed by tolerance class,\nof maps from a node to a set of allowed tolerance states.\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport itertools\nimport random\nimport math\nfrom collections import defaultdict\n\nimport numpy as np\nimport networkx as nx\n\nfrom raoteh.sampler import (\n _graph_transform,\n _mjp, _tmjp,\n _sample_mcx, _sample_mcy, _sample_mcz, _sample_mjp,\n )\n\n\n__all__ = []\n\n\n\ndef gen_histories(ctm, T, root, node_to_primary_state,\n disease_data=None, uniformization_factor=2, nhistories=None):\n \"\"\"\n Use the Rao-Teh method to sample histories on trees.\n\n Edges of the yielded trees will be augmented\n with weights and states.\n The weighted size of each yielded tree should be the same\n as the weighted size of the input tree.\n\n Parameters\n ----------\n ctm : instance of CompoundToleranceModel\n Model defining the compound Markov process.\n T : weighted undirected acyclic networkx graph\n Weighted tree.\n root : integer\n Root of the tree.\n node_to_primary_state : dict\n A map from a node to a primary process state observation.\n If a node is missing from this map,\n then the observation is treated as missing.\n disease_data : list, optional\n A list, indexed by tolerance class,\n of maps from a node to a set of allowed tolerance states.\n uniformization_factor : float, optional\n A value greater than 1.\n nhistories : integer, optional\n Sample this many histories.\n If None, then sample an unlimited number of histories.\n\n \"\"\"\n # Get initial jointly feasible trajectories\n # for the components of the compound process.\n primary_trajectory, tolerance_trajectories = get_feasible_history(\n ctm, T, root, node_to_primary_state,\n disease_data=disease_data)\n\n # Summarize the primary process in ways that are useful for Rao-Teh.\n primary_total_rates = _mjp.get_total_rates(ctm.Q_primary)\n primary_max_total_rate = max(primary_total_rates.values())\n primary_omega = uniformization_factor * primary_max_total_rate\n primary_poisson_rates = dict(\n (a, primary_omega - q) for a, q in primary_total_rates.items())\n P_primary = _sample_mjp.get_uniformized_transition_matrix(\n ctm.Q_primary, omega=primary_omega)\n\n # Summarize the compound process.\n Q_tolerance = _tmjp.get_tolerance_rate_matrix(ctm.rate_off, ctm.rate_on)\n\n # Summarize the tolerance process in ways that are useful for Rao-Teh.\n tolerance_total_rates = _mjp.get_total_rates(Q_tolerance)\n tolerance_max_total_rate = max(tolerance_total_rates.values())\n tolerance_omega = uniformization_factor * tolerance_max_total_rate\n tolerance_poisson_rates = dict(\n (a, tolerance_omega - q) for a, q in tolerance_total_rates.items())\n P_tolerance = _sample_mjp.get_uniformized_transition_matrix(\n Q_tolerance, omega=tolerance_omega)\n\n # Generate histories using Rao-Teh sampling.\n for i in itertools.count():\n\n # Remove redundant nodes in the tolerance trajectories.\n new_tolerance_trajectories = []\n for traj in tolerance_trajectories:\n all_rnodes = _graph_transform.get_redundant_degree_two_nodes(traj)\n extra_rnodes = all_rnodes - set(T)\n traj = _graph_transform.remove_redundant_nodes(traj, extra_rnodes)\n new_tolerance_trajectories.append(traj)\n tolerance_trajectories = new_tolerance_trajectories\n\n # Yield the sampled trajectories.\n yield primary_trajectory, tolerance_trajectories\n\n # If we have sampled enough histories, then return.\n if nhistories is not None:\n nsampled = i + 1\n if nsampled >= nhistories:\n return\n\n # Resample poisson events on the primary trajectory,\n # then extract the event map from the resulting timing trajectory.\n timing_traj = _sample_mjp.resample_poisson(\n primary_trajectory, primary_poisson_rates)\n event_map = _graph_transform.get_event_map(T, root, timing_traj)\n edge_to_event_times = {}\n for edge, events in event_map.items():\n event_times = set(tm for tm, obj in events)\n edge_to_event_times[edge] = event_times\n\n # Resample primary states given the base tree and tolerances.\n primary_trajectory = resample_primary_states(\n T, root,\n ctm.primary_to_part,\n P_primary, ctm.primary_distn,\n node_to_primary_state,\n tolerance_trajectories, edge_to_event_times)\n\n # Remove redundant nodes in the primary process trajectory\n # so that it can be more efficiently used as a background\n # for sampling the tolerance process trajectories.\n all_rnodes = _graph_transform.get_redundant_degree_two_nodes(\n primary_trajectory)\n expendable_rnodes = all_rnodes - set(T)\n primary_trajectory = _graph_transform.remove_redundant_nodes(\n primary_trajectory, expendable_rnodes)\n\n # Resample tolerance process trajectories.\n new_tolerance_trajectories = []\n for tol, tol_traj in enumerate(tolerance_trajectories):\n\n # Resample poisson events on the tolerance trajectory,\n # then extract the event map from the resulting timing trajectory.\n timing_traj = _sample_mjp.resample_poisson(\n tol_traj, tolerance_poisson_rates)\n event_map = _graph_transform.get_event_map(T, root, timing_traj)\n edge_to_event_times = {}\n for edge, events in event_map.items():\n event_times = set(tm for tm, obj in events)\n edge_to_event_times[edge] = event_times\n\n # Resample the tolerance states.\n traj = resample_tolerance_states(\n T, root,\n ctm.primary_to_part,\n P_tolerance, ctm.tolerance_distn,\n primary_trajectory, edge_to_event_times, tol,\n disease_data=disease_data)\n\n # Add the tolerance trajectory.\n new_tolerance_trajectories.append(traj)\n\n # Update the list of tolerance trajectories.\n # Note that these have redundant nodes which should be removed.\n tolerance_trajectories = new_tolerance_trajectories\n\n\ndef resample_primary_states(\n T, root,\n primary_to_part,\n P_primary, primary_distn,\n node_to_primary_state,\n tolerance_trajectories, edge_to_event_times):\n \"\"\"\n Resample primary states.\n\n This function addresses the dependence among components of the tolerance\n process strictly through conditioning rather than through rate dependence.\n\n Parameters\n ----------\n T : weighted undirected acyclic networkx graph\n This is the original tree.\n root : integer\n Root of the tree.\n\n Returns\n -------\n\n Notes\n -----\n This function is not involved in resampling uniformization times.\n\n \"\"\"\n # Precompute the set of all tolerance classes.\n tolerance_classes = set(primary_to_part.values())\n\n # Build a merged tree corresponding to the tolerance trajectories,\n # with event nodes corresponding to uniformization times\n # for the primary process.\n T_merged, event_nodes = _graph_transform.add_trajectories(T, root,\n tolerance_trajectories,\n edge_to_event_times=edge_to_event_times)\n\n # Construct the 'chunk tree' whose edges\n # are in correspondence with the primary process event nodes.\n info = _graph_transform.get_chunk_tree_type_b(\n T_merged, root, event_nodes)\n chunk_tree, edge_to_chunk_node, event_node_to_chunk_edge = info\n\n # Get the map from each chunk node to the set of\n # tolerance classes in the 'off' state\n # that fall within the subtree represented by the chunk node.\n chunk_node_to_forbidden_tols = defaultdict(set)\n for merged_edge in nx.bfs_edges(T_merged, root):\n\n # Unpack the merged edge and get the chunk node that it maps to.\n na, nb = merged_edge\n chunk_node = edge_to_chunk_node[merged_edge]\n\n # For each tolerance class,\n # check if its state along this edge is 'off',\n # and if so, add the tolerance class to the set of forbidden\n # tolerance classes in the chunk node that includes this edge.\n for tol in tolerance_classes:\n tolerance_state = T_merged[na][nb]['states'][tol]\n if not tolerance_state:\n chunk_node_to_forbidden_tols[chunk_node].add(tol)\n\n # Check that no chunk node forbids all tolerance classes.\n for chunk_node, forbidden_tols in chunk_node_to_forbidden_tols.items():\n bad_tols = set(forbidden_tols) - set(tolerance_classes)\n if bad_tols:\n raise Exception('internal error: '\n 'for this chunk node, '\n 'the set of forbidden tolerance classes contains some '\n 'unrecognized entries: ' + str(sorted(bad_tols)))\n if set(forbidden_tols) == set(tolerance_classes):\n raise Exception('internal error: '\n 'for this chunk node, all tolerance classes are forbidden')\n\n # The chunk node may be constrained by primary state data.\n chunk_node_to_obs_state = {}\n for merged_edge in nx.bfs_edges(T_merged, root):\n\n # Unpack the merged edge and get the chunk node that it maps to.\n na, nb = merged_edge\n chunk_node = edge_to_chunk_node[merged_edge]\n\n # If a state has been observed for the given edge node,\n # then set the observation of the chunk node.\n for n in (na, nb):\n if n in node_to_primary_state:\n obs_state = node_to_primary_state[n]\n if chunk_node in chunk_node_to_obs_state:\n if chunk_node_to_obs_state[chunk_node] != obs_state:\n raise Exception('internal error: '\n 'multiple conflicting observations '\n 'within the same chunk node')\n chunk_node_to_obs_state[chunk_node] = obs_state\n\n # For each chunk node, construct the set of allowed tolerance states.\n # This is the set of primary states that do not belong\n # to any of the tolerance classes that are forbidden somewhere in the\n # region of the tree corresponding to the chunk node.\n chunk_node_to_allowed_states = {}\n for chunk_node in chunk_tree:\n\n # Get the set of forbidden tolerance classes for this chunk node.\n forbidden_tols = chunk_node_to_forbidden_tols[chunk_node]\n\n # Initialize the set of allowed states to\n # the set of all primary states not forbidden\n # by the tolerance class trajectory within the chunk node.\n allowed_states = set()\n for prim in set(primary_distn):\n if primary_to_part[prim] not in forbidden_tols:\n allowed_states.add(prim)\n\n # Further restrict the set of allowed state according to\n # observations at points within the chunk node.\n if chunk_node in chunk_node_to_obs_state:\n obs_state = chunk_node_to_obs_state[chunk_node]\n allowed_states.intersection_update({obs_state})\n\n # If no state is allowed then this is a problem.\n if not allowed_states:\n print()\n print('error report...')\n print('T:')\n for na, nb in nx.bfs_edges(T, root):\n print(na, nb, T[na][nb]['weight'])\n print('root:', root)\n for i, t_traj in enumerate(tolerance_trajectories):\n print('tolerance trajectory', i, ':')\n for na, nb in nx.bfs_edges(t_traj, root):\n weight = t_traj[na][nb]['weight']\n state = t_traj[na][nb]['state']\n print(na, nb, weight, state)\n print('merged tree:')\n for na, nb in nx.bfs_edges(T_merged, root):\n weight = T_merged[na][nb]['weight']\n states = T_merged[na][nb]['states']\n print(na, nb, weight, states)\n print('node to primary state:')\n for node, primary_state in node_to_primary_state.items():\n print(node, primary_state)\n print('chunk tree:')\n for na, nb in nx.bfs_edges(chunk_tree, root):\n print(na, nb)\n print('edge to chunk node:')\n for edge, cnode in sorted(edge_to_chunk_node.items()):\n na, nb = edge\n print(na, nb, cnode)\n print('chunk node to forbidden tolerance classes:')\n for cnode, forbidden_tols in chunk_node_to_forbidden_tols.items():\n print(cnode, forbidden_tols)\n raise Exception('internal error: '\n 'for this chunk node no primary state is allowed')\n\n # Store the set of allowed states for this chunk node.\n chunk_node_to_allowed_states[chunk_node] = allowed_states\n\n # Use mcy-type conditional sampling to\n # sample primary states at each node of the chunk tree.\n chunk_node_to_sampled_state = _sample_mcy.resample_states(\n chunk_tree, root,\n node_to_allowed_states=chunk_node_to_allowed_states,\n root_distn=primary_distn, P_default=P_primary)\n\n # Map the sampled chunk node primary states back onto\n # the base tree to give the sampled primary process trajectory.\n sampled_traj = nx.Graph()\n for merged_edge in nx.bfs_edges(T_merged, root):\n merged_na, merged_nb = merged_edge\n weight = T_merged[merged_na][merged_nb]['weight']\n chunk_node = edge_to_chunk_node[merged_edge]\n sampled_state = chunk_node_to_sampled_state[chunk_node]\n sampled_traj.add_edge(\n merged_na, merged_nb,\n weight=weight, state=sampled_state)\n\n # Return the resampled primary trajectory.\n return sampled_traj\n\n\ndef resample_tolerance_states(\n T, root,\n primary_to_part,\n P_tolerance, tolerance_distn,\n primary_trajectory, edge_to_event_times, tolerance_class,\n disease_data=None):\n \"\"\"\n Resample tolerance states.\n\n This function addresses the dependence among components of the tolerance\n process strictly through conditioning rather than through rate dependence.\n\n Parameters\n ----------\n T : weighted undirected acyclic networkx graph\n This is the original tree.\n root : integer\n Root of the tree.\n primary_to_part : x\n x\n P_tolerance : x\n x\n tolerance_distn : x\n x\n primary_trajectory : x\n x\n edge_to_event_times : x\n x\n tolerance_class : x\n x\n disease_data : list, optional\n A list, indexed by tolerance class,\n of maps from a node to a set of allowed tolerance states.\n\n Returns\n -------\n\n Notes\n -----\n This function resamples a tolerance trajectory for only a single\n tolerance class, and this function is not involved in\n resampling uniformization times.\n\n \"\"\"\n # This function only uses disease_data through disease_map.\n if disease_data is not None:\n disease_map = disease_data[tolerance_class]\n\n # Build a merged tree corresponding to the primary trajectory,\n # with event nodes corresponding to uniformization times\n # for the tolerance process of the tolerance class of interest.\n T_merged, event_nodes = _graph_transform.add_trajectories(T, root,\n [primary_trajectory],\n edge_to_event_times=edge_to_event_times)\n\n # Construct the 'chunk tree' whose edges\n # are in correspondence with the tolerance event nodes.\n info = _graph_transform.get_chunk_tree_type_b(\n T_merged, root, event_nodes)\n chunk_tree, edge_to_chunk_node, event_node_to_chunk_edge = info\n\n # Get the map from each chunk node to the set of\n # tolerance classes of primary states that fall within\n # the trajectory subtree represented by that chunk node.\n chunk_node_to_tol_set = defaultdict(set)\n for merged_edge in nx.bfs_edges(T_merged, root):\n\n # Unpack the merged edge and get the chunk node that it maps to.\n na, nb = merged_edge\n chunk_node = edge_to_chunk_node[merged_edge]\n\n # Get the tolerance class of the primary state of the trajectory\n # on the merged edge, and add its tolerance class to\n # the set of tolerance classes associated with the chunk node.\n primary_state = T_merged[na][nb]['states'][0]\n primary_tol_class = primary_to_part[primary_state]\n chunk_node_to_tol_set[chunk_node].add(primary_tol_class)\n\n # Get the map from each chunk node to the set of tolerance states\n # allowed by the disease data.\n if disease_data is not None:\n chunk_node_to_disease_restriction = dict(\n (n, {0, 1}) for n in chunk_tree)\n for merged_edge in nx.bfs_edges(T_merged, root):\n na, nb = merged_edge\n chunk_node = edge_to_chunk_node[merged_edge]\n for n in (na, nb):\n if n in disease_map:\n restriction = chunk_node_to_disease_restriction[chunk_node]\n restriction.intersection_update(disease_map[n])\n\n # For each chunk node, construct the set of allowed tolerance states.\n # This will be {1} if the primary process belongs to the\n # tolerance class of interest at any point within the subtree\n # corresponding to the chunk node.\n # Otherwise this set will be {0, 1}.\n # Unless the disease data further restricts the tolerance state.\n chunk_node_to_allowed_states = {}\n for chunk_node in chunk_tree:\n allowed_states = {0, 1}\n if disease_data is not None:\n disease_restriction = chunk_node_to_disease_restriction[chunk_node]\n allowed_states.intersection_update(disease_restriction)\n if tolerance_class in chunk_node_to_tol_set[chunk_node]:\n allowed_states.intersection_update({1})\n chunk_node_to_allowed_states[chunk_node] = allowed_states\n\n # Use mcy-type conditional sampling to\n # sample tolerance states at each node of the chunk tree.\n chunk_node_to_tolerance_state = _sample_mcy.resample_states(\n chunk_tree, root,\n node_to_allowed_states=chunk_node_to_allowed_states,\n root_distn=tolerance_distn, P_default=P_tolerance)\n\n # Map the sampled chunk node tolerance states back onto\n # the base tree to give the sampled tolerance process trajectory.\n tolerance_traj = nx.Graph()\n for merged_edge in nx.bfs_edges(T_merged, root):\n merged_na, merged_nb = merged_edge\n weight = T_merged[merged_na][merged_nb]['weight']\n chunk_node = edge_to_chunk_node[merged_edge]\n tolerance_state = chunk_node_to_tolerance_state[chunk_node]\n tolerance_traj.add_edge(\n merged_na, merged_nb,\n weight=weight, state=tolerance_state)\n\n # Return the resampled tolerance trajectory.\n return tolerance_traj\n\n\ndef get_feasible_history(ctm, T, root, node_to_primary_state,\n disease_data=None):\n \"\"\"\n Find an arbitrary feasible history.\n\n Parameters\n ----------\n ctm : instance of CompoundToleranceModel\n Model defining the compound Markov process.\n T : weighted undirected acyclic networkx graph\n This is the original tree.\n root : integer, optional\n Root of the tree.\n node_to_primary_state : dict\n A sparse map from a node to a known primary state.\n disease_data : list, optional\n A list, indexed by tolerance class,\n of maps from a node to a set of allowed tolerance states.\n\n Returns\n -------\n primary_trajectory : weighted undirected acyclic networkx graphs\n Primary process trajectory.\n Redundant nodes have been removed.\n tolerance_trajectories : seq of weighted undirected acyclic networkx graphs\n Sequence of tolerance trajectories.\n Redundant nodes have not been removed.\n\n Notes\n -----\n The returned history is not sampled according to any particularly\n meaningful distribution.\n It is up to the caller to remove redundant self-transitions.\n The primary process is assumed to be time-reversible.\n\n \"\"\"\n # Get the tolerance state distribution, rate matrix,\n # and uniformized tolerance transition probability matrix.\n Q_tolerance = _tmjp.get_tolerance_rate_matrix(ctm.rate_off, ctm.rate_on)\n P_tolerance = _sample_mjp.get_uniformized_transition_matrix(Q_tolerance)\n\n # Get a primary process proposal rate matrix\n # that approximates the primary component of the compound process.\n Q_proposal = _tmjp.get_primary_proposal_rate_matrix(\n ctm.Q_primary, ctm.primary_to_part, ctm.tolerance_distn)\n\n # Get the uniformized transition probability matrix\n # corresponding to the primary proposal transition rate matrix.\n P_proposal = _sample_mjp.get_uniformized_transition_matrix(Q_proposal)\n\n # Sample the primary process trajectory using this proposal.\n primary_trajectory = _sample_mcx.get_feasible_history(\n T, node_to_primary_state,\n root=root, root_distn=ctm.primary_distn,\n P_default=P_proposal)\n\n # Remove redundant nodes in the primary process trajectory\n # so that it can be more efficiently used as a background\n # for sampling the tolerance process trajectories.\n all_rnodes = _graph_transform.get_redundant_degree_two_nodes(\n primary_trajectory)\n expendable_rnodes = all_rnodes - set(T)\n primary_trajectory = _graph_transform.remove_redundant_nodes(\n primary_trajectory, expendable_rnodes)\n\n # Get the times of the primary trajectory events\n # along edges of the base tree.\n primary_event_map = _graph_transform.get_event_map(\n T, root, primary_trajectory, predecessors=None)\n\n # Initialize the list of tolerance process trajectories.\n tolerance_trajectories = []\n for tolerance_class in range(ctm.nparts):\n\n # Define tolerance process uniformization event times,\n # so that an assignment of tolerance states will be possible.\n # This can be accomplished by putting a tolerance process\n # uniformization event at a random time on each\n # primary trajectory segment.\n edge_to_event_times = {}\n for base_edge, events in primary_event_map.items():\n\n # Unpack the base edge nodes, ordered away from the root.\n base_na, base_nb = base_edge\n\n # Initialize the set of tolerance event times.\n tolerance_event_times = set()\n\n # Add a tolerance process event\n # before the first primary process transition.\n dead_time = T[base_na][base_nb]['weight']\n if events:\n dead_time = min(tm for tm, obj in events)\n tolerance_event_times.add(random.uniform(0, dead_time))\n\n # Add a tolerance process event\n # into every segment that follows a primary process transition.\n for tm, primary_edge_object in events:\n edge_length = primary_edge_object['weight']\n tolerance_event_times.add(random.uniform(tm, tm + edge_length))\n\n # Define the set of tolerance process event times for this edge.\n edge_to_event_times[base_edge] = tolerance_event_times\n\n # Sample the rest of the tolerance trajectory\n # by sampling the tolerance states given the uniformized timings.\n tolerance_traj = resample_tolerance_states(\n T, root,\n ctm.primary_to_part,\n P_tolerance, ctm.tolerance_distn,\n primary_trajectory, edge_to_event_times, tolerance_class,\n disease_data=disease_data)\n\n # Add the tolerance trajectory to the list.\n tolerance_trajectories.append(tolerance_traj)\n\n # Return the feasible trajectories.\n return primary_trajectory, tolerance_trajectories\n\n","sub_path":"raoteh/sampler/_sample_tmjp.py","file_name":"_sample_tmjp.py","file_ext":"py","file_size_in_byte":24441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"292567081","text":"# To add a new cell, type '#%%'\n# To add a new markdown cell, type '#%% [markdown]'\n#%%\n# Change directory to VSCode workspace root so that relative path loads work correctly. Turn this addition off with the DataScience.changeDirOnImportExport setting\n# ms-python.python added\nimport os\ntry:\n\tos.chdir(os.path.join(os.getcwd(), '../..'))\n\tprint(os.getcwd())\nexcept:\n\tpass\n\n\n#%%\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nrecent_grads = pd.read_csv('/home/ricabenhossi/Development/Trainning/PythonDS_AcademIA/PrepExam/Recent_graduation_data.csv')\nprint('Firts 5 rows\\n', recent_grads.head(5))\nprint('Last 5 rows\\n', recent_grads.tail(5))\nprint('Columns of Data Frame\\n', recent_grads.columns)\nprint('The SHAPE or format of the Data Frame\\n', recent_grads.shape, '173 ROWS per 21 COLUMNS')\nprint('Show the types of the data inside Data Frame\\n', recent_grads.dtypes)\nprint('Show the summary statistics\\n', recent_grads.describe())\nprint('Show the summary statistics EXCLUDING some type(s) of data\\n', recent_grads.describe(exclude=['object']))\n\n# NOTE - Replaceing Missing Values\ncolumns_with_missing_values = ['median', 'p25th', 'p75th']\nprint(recent_grads[columns_with_missing_values].dtypes)\n# Print all occurrences is in the DF. In this case there are all numbers, except for an 'UN' occurrence\nprint(recent_grads['median'].unique())\n# Replace missing values for numpy NaN (Not a Number)\nfor column in columns_with_missing_values:\n recent_grads.loc[recent_grads[column] == 'UN', column] = np.nan\n\nsw_col = recent_grads['sharewomen']\nprint('show only one (few) columns:\\n', sw_col)\nmax_sw = np.amax(sw_col)\nprint('Max value of a columns (sw_col) sing numpy.amax():\\n', max_sw)\nprint('Output the row containing the maximum percentage of women:\\n', recent_grads[(recent_grads['sharewomen'] == max_sw)])\n\n# Converting to a Numpy Array\nrecent_grads_np = recent_grads[['unemployed', 'low_wage_jobs']] # Selecting columns we need\nprint(type(recent_grads_np))\nrecent_grads_np = np.array(recent_grads_np)\nprint(type(recent_grads_np))\n\n# Calculate correlation between recent_grads_np columns\nprint(np.corrcoef(recent_grads_np[:, 0], recent_grads_np[:, 1])) # Correcoef of all elements with column 0 ([:, 0]) and with column 1 ([:, 1])\n\n# Adding a column\nrecent_grads['sharemen'] = recent_grads['men'] / recent_grads['total']\nprint(recent_grads['sharemen'])\n\n# Compare 2 Data frames\nprint(recent_grads[['sharemen']].equals(recent_grads[['sharemen']]))\n\nmax_men = np.amax(recent_grads['sharemen'])\nprint('Max value of a columns (sharemen) sing numpy.amax():\\n', max_men)\nprint('Output the row containing the maximum percentage of women:\\n', recent_grads[recent_grads['sharemen'] == max_men])\n\nrecent_grads['gender_diff'] = recent_grads['sharewomen'] - recent_grads['sharemen']\nrecent_grads['gender_diff'] = recent_grads['gender_diff'].abs()\nprint('New column: Gender Diff with abslute values:\\n', recent_grads['gender_diff'])\nprint('Show the 5 smallest valuee using df.nsmallest():\\n', recent_grads.nsmallest(5, 'gender_diff'))\nprint(recent_grads.nlargest(5, 'gender_diff')[['major_category', 'sharewomen', 'sharemen', 'total', 'gender_diff']])\n\n# Filtering Rows\ndiff_30 = recent_grads['gender_diff'] > .30 # Result in Boolean\nmore_men = recent_grads['men'] > recent_grads['women'] # Result in Boolean\n# Combining more_men with diff_30\nmore_men_and_diff_30 = np.logical_and(more_men, diff_30) # Combine both boolean TRUE and TRUE = TRUE\nprint('combining more_men and diff_30 using np.logical_and():\\n', more_men_and_diff_30)\n\n# Find rows with more men and and gender rate difference greater than .30\nfewer_women = recent_grads.loc[more_men_and_diff_30 == True]\n\n# Grouping and count\nprint('Group by major category and count: \\n', recent_grads.groupby(['major_category']).major_category.count())\nprint('Group departments that have less women by category and count: \\n', fewer_women.groupby(['major_category']).major_category.count())\nprint('Report average gender difference by major category:\\n', recent_grads.groupby(['major_category']).gender_diff.mean())\n# Find average number of low wage jobs and unemployment rate of each major category\ndept_stats = recent_grads.groupby(['major_category'])['low_wage_jobs', 'unemployment_rate'].mean()\nprint('Find average number of low wage jobs and unemployment rate of each major category:\\n', dept_stats)\n\n'''\n # NOTE From this point bellow we will talk about PLOTATION\n'''\n\nplt.scatter(recent_grads['unemployment_rate'], recent_grads['low_wage_jobs'])\nplt.xlabel('Unemployment rate')\nplt.ylabel('Low pay jobs')\n\nplt.show()\n\n# Changin color and shape of scatter\nplt.scatter(recent_grads['unemployment_rate'], recent_grads['low_wage_jobs'], color='r', marker='^')\nplt.show()\n\n# Hsitogram of sharewomen\nsharewomen = recent_grads['sharewomen']\nplt.hist(sharewomen)\nplt.show()\n\n# NOTE Plotting diectlly using PANDAS\ndept_stats = recent_grads.groupby(['major_category'])['low_wage_jobs', 'unemployment_rate'].mean()\ndept_stats.plot(kind='scatter', x='unemployment_rate', y='low_wage_jobs')\nplt.show()\n\ndf = recent_grads.groupby(['major_category']).non_college_jobs.sum()\ndf.plot(kind='bar')\nplt.show()\n\n# Two bar graph\ndf1 = recent_grads.groupby(['major_category'])['college_jobs', 'non_college_jobs'].sum()\ndf1.plot(kind='bar')\nplt.show()\n\n# Dropping values\nprint(recent_grads.size)\nrecent_grads.dropna(axis=0, inplace=True)\nprint(recent_grads.size)\n\n\n# Converto to number\nrecent_grads['median'] = pd.to_numeric(recent_grads['median']) / 1000\nrecent_grads['p25th'] = pd.to_numeric(recent_grads['p25th']) / 1000\nrecent_grads['p75th'] = pd.to_numeric(recent_grads['p75th']) / 1000\n\ncolumns = ['median', 'p25th', 'p75th']\nsal_quantiles = recent_grads.groupby(['major_category'])[columns].mean()\nsal_quantiles.plot()\n\n# Setting axis TICKS\nplt.xticks(np.arange(len(sal_quantiles.index)), sal_quantiles.index, rotation='vertical')\nplt.show()\n\n# Plot with SUBPLOTS\nsal_quantiles.plot(subplots=True)\n","sub_path":"PrepExam/exam_prep_exercises.py","file_name":"exam_prep_exercises.py","file_ext":"py","file_size_in_byte":5960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"576270100","text":"import BeautifulSoup\nimport urllib\nimport urllib2\nimport os\nimport json\nimport pymysql\n\n\n# Global configurations\nTABLE_NAME = \"university_info\"\nUNIV_COUNT = 200\n\n\ndef crawl_all_universities(overwrite):\n # Crawl each university from #1 to #UNIV_COUNT\n crawl_pages(range(1, UNIV_COUNT + 1), overwrite)\n\n\ndef crawl_pages(univ_ids, overwrite):\n print(\"=== START CRAWLING ===\")\n failures = []\n for univ_id in univ_ids:\n if crawl_wiki_page(univ_id, overwrite) is None:\n failures.append(univ_id)\n print(\"=== CRAWLING COMPLETE ===\")\n print(failures)\n\n\ndef crawl_wiki_page(univ_id, overwrite):\n file_name = \"html\" + os.sep + str(univ_id) + \".html\"\n\n # Check if already crawled on non-overwrite mode\n if overwrite is False and os.path.isfile(file_name):\n print(\"#\" + str(univ_id) + \" exists.\")\n return None\n\n univ_name = get_univ_name(univ_id)\n\n # Search the Wikipedia's internal page id for university's name\n query = \"https://en.wikipedia.org/w/api.php?action=query&titles=\" + urllib.quote(univ_name) + \"&prop=revisions&rvprop=content&format=json\"\n try:\n pageid_json = json.loads(urllib2.urlopen(query).read())\n pageids = pageid_json['query']['pages']\n except Exception:\n return None\n if len(pageids) == 0:\n return None\n\n # Fetch the url for the internal page id\n if pageids is None:\n return None\n for i in pageids:\n if pageids[i]['ns'] == 0:\n pageid = i\n query2 = 'https://en.wikipedia.org/w/api.php?action=query&prop=info&inprop=url&format=json&pageids=' + pageid\n try:\n url_json = json.loads(urllib2.urlopen(query2).read())\n page_url = url_json['query']['pages'][pageid]['fullurl']\n except Exception:\n return None\n\n # Crawl the page and parse the page's info box\n s = urllib2.urlopen(page_url).read()\n soup = BeautifulSoup.BeautifulSoup(s)\n info_box = soup.find('table', {'class': 'infobox vcard'}) # soup.select('#mw-content-text > div > table.infobox.vcard')[0].text\n\n # # Process the elements' styles in the info box\n # elems = info_box.findAll(True)\n # for elem in elems:\n # del elem['style']\n\n if info_box is None:\n print(\"#\" + str(univ_id) + \" failed!!!\")\n return None\n\n # Write to file\n f = open(file_name, 'w')\n f.write(str(info_box))\n f.close()\n print(\"#\" + str(univ_id) + \" done.\")\n\n return info_box\n\n\ndef get_univ_name(univ_id):\n # Connect to database\n cnx = pymysql.connect(\n host='127.0.0.1',\n database='coni_db',\n user='coni_admin',\n password='admin',\n )\n\n # Perform query on the database\n cursor = cnx.cursor()\n query = \"SELECT id, name FROM university_abbr WHERE id=\" + str(univ_id)\n cursor.execute(query)\n cnx.commit()\n\n # Fetch the first record's name\n univ_name = None\n for (id, name) in cursor:\n univ_name = name\n break\n\n # Process the name\n left_bracket_idx = univ_name.find(\"(\")\n if left_bracket_idx > 0:\n univ_name = univ_name[:left_bracket_idx].strip()\n dash_idx = univ_name.find(\"-\")\n if dash_idx > 0:\n univ_name = univ_name[:dash_idx].strip()\n\n # Close the connection and return the name\n cnx.close()\n return univ_name\n\n\ndef insert_records(drop_existing):\n # Connect to database\n cnx = pymysql.connect(\n host='127.0.0.1',\n database='coni_db',\n user='coni_admin',\n password='admin',\n )\n cursor = cnx.cursor()\n\n # Drop the existing table\n if drop_existing:\n cursor.execute(\"DROP TABLE IF EXISTS \" + TABLE_NAME)\n cnx.commit()\n print(\"=== DROPPED EXISTING TABLE ===\")\n\n # Create a table called \"university_info\"\n cursor.execute(\n \"CREATE TABLE IF NOT EXISTS \" + TABLE_NAME + \" (`id` INT(11) unsigned NOT NULL, info LONGTEXT, PRIMARY KEY (`id`))\")\n cnx.commit()\n print(\"=== CREATED NEW TABLE ===\")\n\n # Connect to database\n cnx = pymysql.connect(\n host='127.0.0.1',\n database='coni_db',\n user='coni_admin',\n password='admin',\n )\n cursor = cnx.cursor()\n\n # Insert each university's html into the database\n print(\"=== START CRAWLING ===\")\n for univ_id in range(1, UNIV_COUNT + 1):\n # Read html from the crawled file\n file_name = \"html\" + os.sep + str(univ_id) + \".html\"\n f = open(file_name, 'r')\n html = f.read()\n\n # Escape single quote\n html = html.replace(\"'\", \"''\")\n\n # Perform insertion on the database\n insertion_sql = \"INSERT INTO \" + TABLE_NAME + \" (id, info) VALUES (\" + str(univ_id) + \", '\" + html + \"')\"\n cursor.execute(insertion_sql)\n cnx.commit()\n\n\n# Start crawling\n# crawl_all_universities(False)\n\n# TODO: Change these universities' names to English!\n# ids = [52, 68, 72, 100, 101, 121, 163]\n# crawl_pages(ids, False)\n","sub_path":"wikipedia_crawler.py","file_name":"wikipedia_crawler.py","file_ext":"py","file_size_in_byte":4898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"590148972","text":"# Definition for a binary tree node.\r\n# class TreeNode(object):\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\nfrom collections import deque\r\n\r\nclass Solution(object):\r\n def levelOrder(self, root):\r\n \"\"\"\r\n :type root: TreeNode\r\n :rtype: List[List[int]]\r\n \"\"\"\r\n if root is None: return []\r\n queue = deque([(root, 0)])\r\n curLevel = 0\r\n result = []\r\n curResult = []\r\n while len(queue) > 0:\r\n cur, level = queue.popleft()\r\n if level > curLevel:\r\n result.append(curResult)\r\n curResult = [cur.val]\r\n curLevel = level\r\n else: \r\n curResult.append(cur.val)\r\n if cur.left is not None:\r\n queue.append((cur.left, level + 1))\r\n if cur.right is not None:\r\n queue.append((cur.right, level + 1))\r\n if len(curResult) > 0: result.append(curResult)\r\n return result","sub_path":"leetcode/102.binary-tree-level-order-traversal/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"384726472","text":"from django.shortcuts import render, render_to_response\nfrom user_info.models import Regist\nfrom django.contrib import auth\nfrom django.contrib.auth.models import User\nfrom django.core.context_processors import csrf\nfrom .forms import RegistForm, SearchQuestionnaires\n\n\n\n# def questionnaire(request):\n# \targs = {}\n# \targs.update(csrf(request))\n# \targs = {'regist':Regist.objects.all(),'username': auth.get_user(request).username}\n# \treturn render_to_response('questionnaire.html',args)\n\ndef user_info(request):\n args = {}\n args.update(csrf(request))\n args = {'username': auth.get_user(request).username}\n return render_to_response('user_info.html', args)\n\n\ndef questionnaire(request):\n args = {}\n args.update(csrf(request))\n\n try:\n regist = Regist.objects.get(user=request.user.id)\n except Regist.DoesNotExist:\n regist = Regist()\n\n user_auth = auth.get_user(request).id\n user_user = User.objects.get(id=user_auth)\n args['isSave'] = False\n if request.method == 'POST':\n post = request.POST.copy()\n post['user'] = user_auth\n formset = RegistForm(instance=regist, data=post)\n # formset.user = user_user\n\n if formset.is_valid():\n form = formset.save(commit=False)\n form.user = user_user\n if form.save():\n args['isSave'] = True\n\n\n args['user_auth'] = user_auth\n args['form'] = RegistForm(instance=regist)\n args['username'] = auth.get_user(request).username\n return render(request, 'questionnaire.html', args)\n\n\n\ndef firstPage(request):\n args = {}\n\n user = auth.authenticate()\n\n obj = Regist.objects.all()\n\n if request.method == 'POST':\n\n\n if request.POST['regist_date']:\n anket_date = request.POST['regist_date']\n obj = obj.filter(regist_date__gte=anket_date)\n\n if request.POST['regist_city']:\n anket_city = request.POST['regist_city']\n obj = obj.filter(regist_city=anket_city)\n\n if request.POST['regist_dateregist']:\n anket_dateregist = request.POST['regist_dateregist']\n obj = obj.filter(regist_dateregist__gte=anket_dateregist)\n if request.POST['regist_dateregist_do']:\n anket_dateregist_do = request.POST['regist_dateregist_do']\n obj = obj.filter(regist_dateregist__lte=anket_dateregist_do)\n\n args = {'username': auth.get_user(request).username, 'questionnaires': obj[::-1]}\n\n # if request.user.is_authenticated():\n # args['this_user'] = Regist.objects.get(user=request.user.id)\n\n args.update(csrf(request))\n args['form'] = SearchQuestionnaires()\n\n return render_to_response('first_page.html', args)\n\n# Create your views here.\n","sub_path":"znakomstva/user_info/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"630656439","text":"import os\n\ndef main():\n flag = True\n\n while flag:\n print(\"Choose an option - \")\n print(\" 1. Dump mongoDB \")\n print(\" 2. Restore mongoDB from the folder at ./mongo_dump/job-scheduler\")\n try:\n usr_input = int(input())\n except ValueError:\n print(\"Wrong option selected\")\n continue\n \n if usr_input == 1:\n print(\"Running script scripts/db_dump.sh to dump mongoDB data\")\n os.system(\"sh scripts/db_dump.sh\")\n flag = False\n elif usr_input == 2:\n print(\"Running script scripts/db_restore.sh to dump mongoDB data\")\n os.system(\"sh scripts/db_restore.sh\")\n flag = False\n else:\n continue\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts_wrapper.py","file_name":"scripts_wrapper.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"555791420","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 14 23:32:25 2016\n\n@author: Marion\n\"\"\"\n\nimport urllib\nimport xml.etree.ElementTree as ET\nurl='http://python-data.dr-chuck.net/comments_229583.xml '\nlink=urllib.request.urlopen(url)\ndata=link.read()\ntree=ET.fromstring(data)\ncounts=tree.findall('.//count')\ns=0\nfor count in counts:\n s=s+int(count.text)\nprint(s)","sub_path":"ExtractXML.py","file_name":"ExtractXML.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"326209887","text":"from jinja2 import StrictUndefined\nfrom flask import Flask, jsonify, render_template, redirect, request, flash, session\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom model import db, connect_to_db\nimport os\n\napp = Flask(__name__)\n\n# Raises an error if you use undefined Jinja variable.\napp.jinja_env.undefined = StrictUndefined\n\n\n\n@app.route('/')\ndef index():\n \"\"\"Homepage.\"\"\"\n\n return render_template(\"landingpage.html\")\n\n\nif __name__ == \"__main__\":\n # We have to set debug=True here, since it has to be True at the\n # point that we invoke the DebugToolbarExtension\n # app.debug = True\n # app.config['SQLALCHEMY_ECHO'] = True\n\n PORT = int(os.environ.get(\"PORT\", 5000))\n DEBUG = \"NO_DEBUG\" not in os.environ\n SECRET_KEY = os.environ.get(\"FLASK_SECRET_KEY\", \"asdf9k$\")\n\n # Required to use Flask sessions and the debug toolbar\n app.secret_key = SECRET_KEY\n\n\n app.jinja_env.auto_reload = True\n connect_to_db(app, os.environ.get(\"DATABASE_URL\"))\n\n # Use the DebugToolbar\n # DebugToolbarExtension(app)\n\n app.run(port=PORT, debug=DEBUG)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"352515295","text":"#!/usr/bin/env python2\nfrom base.client import ClientManager\nfrom base.server import PartovServer, MockServer\nfrom base.test import TestExecuter\nfrom base.logger import logger\nfrom base.config import config\n\nimport atexit, sys, argparse, os, signal, time, importlib\nimport tests\n\n# print(__import__('base', globals=globals()))\n\nsettings = {}\nparser = argparse.ArgumentParser(description=\"PA2 Tester\")\n\n\ndef main():\n client_manager = ClientManager(config['cf_path'])\n partov_server = PartovServer(client_manager)\n \n mock_server = None\n # mock_server = MockServer(client_manager)\n test_executor = TestExecuter(client_manager, partov_server=partov_server, mock_server=mock_server)\n test_executor.load_tests(tests)\n \n @atexit.register\n def on_exit():\n client_manager.clean_clients()\n # mock_server.stop()\n partov_server.stop()\n logger.flush()\n logger.stop()\n\n def sigint(signal, frame):\n on_exit()\n exit(0)\n signal.signal(signal.SIGINT, sigint)\n\n if not client_manager.check_cwd():\n logger.log(\"Client Framework not found\", break_after=True, color=\"error\")\n parser.print_help()\n time.sleep(1)\n sys.exit(1)\n\n if not client_manager.check_exec():\n logger.log(\"Target file '%s' not found, has the project been compiled?\" % (config['processor_target'] + \" or \" + config['router_target']))\n\n if config['partov_server'].get('renew_server', False):\n client_manager.free_map()\n if not client_manager.new_map():\n logger.log(\"Failed to create new map\", break_after=True, color=\"error\")\n time.sleep(1)\n sys.exit(1)\n\n\n partov_server.start()\n # mock_server.start()\n # raw_input(\"Waiting for input\")\n # network_monitor.start()\n test_executor.execute_tests()\n on_exit()\n\ndef create_loggers():\n logdefs = config.get('loggers', [])\n for logdef in logdefs:\n name = logdef['type']\n module_name, class_name = name[:name.rfind('.')], name[name.rfind('.') + 1:]\n module = __import__(module_name, fromlist=[class_name], globals=globals())\n handlerclass = getattr(module, class_name)\n\n outfile_name = logdef['output']\n if len(outfile_name.strip()) == 0:\n continue\n outfile = {\n 'stdout': sys.stdout,\n 'stdin': sys.stdin,\n 'stderr': sys.stderr\n }.get(outfile_name, None)\n\n if not outfile:\n outfile = open(outfile_name, 'w')\n\n handler = handlerclass(outfile)\n logger.add_handler(handler)\n logger.start()\n\n\ndef option_type(s):\n try:\n k, v = s.split(\"=\")\n return (k, v)\n except:\n raise argparse.ArgumentTypeError(\"Invalid option, has to be in the format: 'key=value'\")\n\ndef parse_args():\n parser.add_argument('-f', nargs='?', default=None, type=str, help='Client Framework location')\n parser.add_argument('-p', nargs='?', default=None, type=int, help='Partov port')\n parser.add_argument('-c', nargs='?', default=None, type=str, help='Config File')\n parser.add_argument('--options', nargs='+', dest=\"options\", type=option_type, help='Extra options')\n args, unknown = parser.parse_known_args()\n\n config['options'] = args.options if args.options else []\n\n if args.c:\n config_file = args.c\n else:\n config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.json')\n config.read_from_file(config_file, options=config['options'])\n\n if args.f:\n config['cf_path'] = args.f\n if args.p:\n config['partov_server']['port'] = args.p\n\n info_path = os.path.join(config['cf_path'], \"info.sh\")\n if not config.read_info_file(info_path):\n logger.log(\"Can't read %s, is the client framework path correct?\", break_after=True, color=\"error\")\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n parse_args()\n create_loggers()\n main()\n\n","sub_path":"PA2_test/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"176347523","text":"#!/usr/bin/env python\n\nimport cherrypy\nimport simplejson\n\n#from Page import Page\nfrom couchdb_layer.mcm_database import database\nimport copy\nfrom tools.locator import locator\n\nclass Search(object):\n\n\tdef search(self, db_name, query, page,query_list=[]):\n\t\tself.db_name = db_name\n\t\tself.db = database(self.db_name)\n\t\tself.query = query\n\t\tself.query_list = query_list\n\t\tself.page = int(page)\n\t\treturn self.run_query() \n\n\t\t\n\tdef run_query(self):\n\t\tif len(self.query_list):\n\t\t\tresults_list= self.db.queries( self.query_list )\n\t\t\tresults = { 'results' : results_list}\n\t\t\t\n\t\t\tfinal = simplejson.dumps(results)\n\t\t\treturn final\n\t\telse:\n\t\t\tresults = {}\n\t\t\tresults['results'] = []\n\t\t\tif not self.query or self.query=='\"\"':\n\t\t\t\tres = self.db.get_all(page_num=self.page)\n\t\t\telse:\n\t\t\t\tres = self.db.query(self.query, page_num=self.page)\n\t\t\t\n\t\t\tquery_result = self.db.unique_res( res )\n\t\t\tresults['results'] = query_result\n\t\t\t#results['results'] = res\n\t\t\tfinal = simplejson.dumps(results)\n\t\t\treturn final\n\n\t#def index(self, db_name='campaigns',query='', page=0):\n\tdef index(self, **args):\n\t\tdb_name='requests'\n\t\tquery=''\n\t\tquery_list=[]\n\t\tpage=0\n\t\tmanual_keys=['db_name','query','page']\n\t\tif 'db_name' in args:\n\t\t\tdb_name=args['db_name']\n\t\t\targs.pop('db_name')\n\t\tif 'query' in args:\n\t\t\tquery=args['query']\n\t\t\targs.pop('query')\n\t\tif 'page' in args:\n\t\t\tpage=args['page']\n\t\t\targs.pop('page')\n\t\t# retrieve the _design/object document\n\t\todb=database(db_name)\n\t\tdesign = odb.get('_design/%s'%(db_name))\n\t\tallowed_key_search = design['views'].keys()\n\n\t\tvetoed_keys = []\n\t\tfor (view,f) in design['views'].items():\n\t\t\tif 'for(' in f['map'] or 'for (' in f['map']:\n\t\t\t\tvetoed_keys.append( view )\n\t\tallowed_key_search.sort()\n\t\tmultiple_view=[]\n\t\t#### \n\t\t## to switch on/off the view creation on the fly\n\t\tsimple_search=(not locator().isDev())\n\t\tsimple_search=False\n\t\t####\n\t\tfor key in filter (lambda s : '-' not in s, allowed_key_search):\n\t\t\tif key in args:\n\t\t\t\tif key in vetoed_keys or simple_search:\n\t\t\t\t\tquery_list.append('%s==%s'%(key,args[key]))\n\t\t\t\telse:\n\t\t\t\t\tif args[key].isdigit():\n\t\t\t\t\t\tmultiple_view.append( (key, args[key]) )\n\t\t\t\t\telse:\n\t\t\t\t\t\tmultiple_view.append( (key, '\"'+args[key]+'\"') )\n\n\t\t\t\targs.pop(key)\n\n\t\tif len(multiple_view)>1:\n\t\t\tmultiple_search = '-'.join( map( lambda p:p[0], multiple_view))\n\t\t\t## faster query with multiple keys\n\t\t\tif not multiple_search in allowed_key_search:\n\t\t\t\t## try harder to find it\n\t\t\t\treally_not_there=True\n\t\t\t\tm_set = set( map( lambda p:p[0], multiple_view) )\n\t\t\t\tfor key in filter (lambda s : '-' in s, allowed_key_search):\n\t\t\t\t\t## parse all composite view\n\t\t\t\t\tif set(key.split('-')) == m_set:\n\t\t\t\t\t\t#we found one that has the same search in absolute, just the order is different\n\t\t\t\t\t\t# then re-order multiple_view so as to map to the existing view\n\t\t\t\t\t\tnew_multiple_view = []\n\t\t\t\t\t\tfor sv in key.split('-'):\n\t\t\t\t\t\t\tnew_multiple_view.append( filter( lambda e : e[0]==sv, multiple_view) [0] )\n\t\t\t\t\t\tmultiple_view = new_multiple_view\n\t\t\t\t\t\tmultiple_search = '-'.join( map( lambda p:p[0], multiple_view))\n\t\t\t\t\t\treally_not_there=False\n\t\t\t\t\t\tbreak\n\t\t\t\tif really_not_there:\n\t\t\t\t #tempatively add the view to the design\n\t\t\t\t\tnew_func = \"function(doc){ emit([%s], doc._id);}\"%( ','.join(map( lambda k: \"doc.%s\"%(k), map( lambda p:p[0], multiple_view) )))\n\t\t\t\t\tdesign['views'] [ multiple_search ] = { \"map\" : new_func }\n\t\t\t\t\tsaved = odb.update( design )\n\t\t ##### NOTE ####\n\t\t\t\t ## the query that will follow will be super slow because the view needs to be re-build\n\t\t\t\t\n\t\t\t\t\n\t\t\tm_query = '%s==[%s]'%(multiple_search,\n\t\t\t\t\t ','.join( map( lambda p:p[1], multiple_view))\n\t\t\t\t\t )\n\t\t\tquery_list.append( m_query )\t \n\t\t\t#query_list =[]\n\t\telif len(multiple_view)==1:\n\t\t\tm_query = '%s==%s'%( multiple_view[0][0], multiple_view[0][1])\n\t\t\tquery_list.append( m_query )\n\n\t\t#revert to simple query for one query only\n\t\tif len(query_list)==1:\n\t\t\tquery=query_list[0]\n\t\t\tquery_list=[]\n\n\t\tif len(args):\n\t\t\t## check whether the key is actually a member of the object in db and put back the view in the odb design\n\t\t\treturn simplejson.dumps(args)\n\t\t\t#return simplejson.dumps(design['views'].keys())\n\t\treturn self.search(db_name, query, page, query_list)\n\n\tsearch.exposed = True\t\n\tindex.exposed = True\n","sub_path":"mcm/web_apps/Search.py","file_name":"Search.py","file_ext":"py","file_size_in_byte":4256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"267927746","text":"#!/usr/bin/env python3\nimport sys\nfrom hashlib import md5\n\nsecret = sys.stdin.readline().strip()\n\ni = 0\n\nwhile True:\n digest = md5(f\"{secret}{i}\".encode(\"utf-8\")).hexdigest()\n if digest.startswith(\"00000\"):\n print(i)\n break\n\n i += 1\n","sub_path":"2015/d04/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"68224268","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Feb 4 14:05:25 2018\r\n\r\n@author: tgill\r\n\"\"\"\r\n\r\nfrom keras.layers import Input, Dense, Dropout, Activation, Bidirectional, CuDNNGRU, Embedding, Concatenate, CuDNNLSTM, Multiply, Add, Lambda, TimeDistributed, Dot, GlobalAvgPool1D, GlobalMaxPool1D, Permute, BatchNormalization, Conv1D, MaxPooling1D, Flatten\r\nfrom keras.models import Model\r\nfrom keras.activations import softmax\r\nfrom keras import backend as K\r\n\r\ndef nn(input_dim=56, output_dim=2, layers=3, units=32, dropout=0.2):\r\n inputs = Input(shape=(input_dim,))\r\n x=Dense(units, activation='relu')(inputs)\r\n x=Dropout(dropout)(x)\r\n x=Dense(units, activation='relu')(x)\r\n x=Dropout(dropout)(x)\r\n x=Dense(units, activation='relu')(x)\r\n x=Dropout(dropout)(x)\r\n x=Dense(output_dim)(x)\r\n x=Activation('softmax')(x)\r\n model = Model(inputs=inputs, outputs=x)\r\n return model\r\n\r\ndef siamois(maxlen, max_features):\r\n inp1 = Input(shape=(maxlen,))\r\n inp2 = Input(shape=(maxlen,))\r\n \r\n# com = Bidirectional(CuDNNGRU(64, return_sequences=True))\r\n# com = Dropout(0.3)(com)\r\n emb = Embedding(max_features, 256)\r\n #com = Bidirectional(CuDNNGRU(64, return_sequences=False))\r\n com = CuDNNLSTM(64, return_sequences=False)\r\n #com2 = CuDNNGRU(64, return_sequences=False)\r\n \r\n x1 = emb(inp1)\r\n x1 = com(x1)\r\n #x1 = Dropout(0.2)(x1)\r\n #x1 = com2(x1)\r\n \r\n x2 = emb(inp2)\r\n x2 = com(x2)\r\n #x2 = Dropout(0.2)(x2)\r\n #x2 = com2(x2)\r\n \r\n #merge=Concatenate()([x1, x2])\r\n merge = submult(x1, x2)\r\n merge = Dropout(0.2)(merge)\r\n merge = Dense(512, activation='relu')(merge)\r\n merge = Dropout(0.2)(merge)\r\n #merge = Dense(256, activation='relu')(merge)\r\n #merge = Dropout(0.2)(merge)\r\n \r\n preds = Dense(2, activation='softmax')(merge)\r\n \r\n model = Model(inputs=[inp1, inp2], outputs=preds)\r\n print(model.summary())\r\n return model\r\n\r\ndef siamois_seq(maxlen, max_features):\r\n inp1 = Input(shape=(maxlen,))\r\n inp2 = Input(shape=(maxlen,))\r\n \r\n emb = Embedding(max_features, 256)\r\n com = CuDNNGRU(64, return_sequences=True)\r\n \r\n x1 = emb(inp1)\r\n x1 = com(x1)\r\n \r\n x2 = emb(inp2)\r\n x2 = com(x2)\r\n \r\n pool = GlobalMaxPool1D()\r\n avg = GlobalAvgPool1D()\r\n \r\n x1 = Concatenate()([pool(x1), avg(x1)])\r\n x2 = Concatenate()([pool(x2), avg(x2)])\r\n \r\n merge = submult(x1, x2)\r\n merge = Dropout(0.1)(merge)\r\n merge = Dense(512, activation='relu')(merge)\r\n merge = Dropout(0.2)(merge)\r\n \r\n preds = Dense(2, activation='softmax')(merge)\r\n \r\n model = Model(inputs=[inp1, inp2], outputs=preds)\r\n print(model.summary())\r\n return model\r\n\r\ndef decomposable_attention(maxlen, max_features, projection_hidden=0, projection_dropout=0.2, projection_dim=64, compare_dim=128, compare_dropout=0.2, dense_dim=64, dense_dropout=0.2):#maxlen, max_features, projection_hidden=0, projection_dropout=0.2, projection_dim=300, compare_dim=500, compare_dropout=0.2, dense_dim=300, dense_dropout=0.2\r\n inp1 = Input(shape=(maxlen,))\r\n inp2 = Input(shape=(maxlen,))\r\n \r\n emb = Embedding(max_features, 256)\r\n emb1 = emb(inp1)\r\n emb2 = emb(inp2)\r\n \r\n # Projection\r\n projection_layers = []\r\n if projection_hidden > 0:\r\n projection_layers.extend([\r\n Dense(projection_hidden, activation='relu'),\r\n Dropout(rate=projection_dropout),\r\n ])\r\n projection_layers.extend([\r\n Dense(projection_dim, activation=None),\r\n Dropout(rate=projection_dropout),\r\n ])\r\n encoded1 = time_distributed(emb1, projection_layers)\r\n encoded2 = time_distributed(emb2, projection_layers)\r\n \r\n # Attention\r\n att1, att2 = soft_attention_alignment(encoded1, encoded2)\r\n \r\n # Compare\r\n combine1 = Concatenate()([encoded1, att2, submult(encoded1, att2)])\r\n combine2 = Concatenate()([encoded2, att1, submult(encoded2, att1)])\r\n compare_layers = [\r\n Dense(compare_dim, activation='relu'),\r\n Dropout(compare_dropout),\r\n Dense(compare_dim, activation='relu'),\r\n Dropout(compare_dropout),\r\n ]\r\n compare1 = time_distributed(combine1, compare_layers)\r\n compare2 = time_distributed(combine2, compare_layers)\r\n \r\n # Aggregate\r\n agg1 = apply_multiple(compare1, [GlobalAvgPool1D(), GlobalMaxPool1D()])\r\n agg2 = apply_multiple(compare2, [GlobalAvgPool1D(), GlobalMaxPool1D()])\r\n \r\n # Merge\r\n merge = Concatenate()([agg1, agg2])\r\n #merge = BatchNormalization()(merge)\r\n dense = Dense(dense_dim, activation='relu')(merge)\r\n dense = Dropout(dense_dropout)(dense)\r\n #dense = BatchNormalization()(dense)\r\n #dense = Dense(dense_dim, activation='relu')(dense)\r\n #dense = Dropout(dense_dropout)(dense)\r\n \r\n preds = Dense(2, activation='softmax')(dense)\r\n model = Model(inputs=[inp1, inp2], outputs=preds)\r\n print(model.summary())\r\n return model\r\n \r\n\r\ndef unchanged_shape(input_shape):\r\n \"Function for Lambda layer\"\r\n return input_shape\r\n\r\ndef substract(input_1, input_2):\r\n \"Substract element-wise\"\r\n neg_input_2 = Lambda(lambda x: -x, output_shape=unchanged_shape)(input_2)\r\n out_ = Add()([input_1, neg_input_2])\r\n return out_\r\n \r\ndef submult(input_1, input_2):\r\n \"Get multiplication and subtraction then concatenate results\"\r\n mult = Multiply()([input_1, input_2])\r\n sub = substract(input_1, input_2)\r\n out_= Concatenate()([sub, mult])\r\n return out_\r\n\r\ndef time_distributed(input_, layers):\r\n \"Apply a list of layers in TimeDistributed mode\"\r\n out_ = []\r\n node_ = input_\r\n for layer_ in layers:\r\n node_ = TimeDistributed(layer_)(node_)\r\n out_ = node_\r\n return out_\r\n\r\ndef soft_attention_alignment(input_1, input_2):\r\n \"Align text representation with neural soft attention\"\r\n attention = Dot(axes=-1)([input_1, input_2])\r\n w_att_1 = Lambda(lambda x: softmax(x, axis=1),\r\n output_shape=unchanged_shape)(attention)\r\n w_att_2 = Permute((2,1))(Lambda(lambda x: softmax(x, axis=2),\r\n output_shape=unchanged_shape)(attention))\r\n in1_aligned = Dot(axes=1)([w_att_1, input_1])\r\n in2_aligned = Dot(axes=1)([w_att_2, input_2])\r\n return in1_aligned, in2_aligned\r\n \r\ndef apply_multiple(input_, layers):\r\n \"Apply layers to input then concatenate result\"\r\n if not len(layers) > 1:\r\n raise ValueError('Layers list should contain more than 1 layer')\r\n else:\r\n agg_ = []\r\n for layer in layers:\r\n agg_.append(layer(input_))\r\n out_ = Concatenate()(agg_)\r\n return out_\r\n \r\n \r\ndef esim(maxlen, max_features, lstm_dim=32, dense_dim=64, dense_dropout=0.5):\r\n inp1 = Input(shape=(maxlen,))\r\n inp2 = Input(shape=(maxlen,))\r\n \r\n emb = Embedding(max_features, 256)\r\n emb1 = emb(inp1)\r\n emb2 = emb(inp2)\r\n \r\n #Encode\r\n encode = Bidirectional(CuDNNLSTM(lstm_dim, return_sequences=True))\r\n encoded1=encode(emb1)\r\n encoded2=encode(emb2)\r\n \r\n #Attention\r\n att1, att2 = soft_attention_alignment(encoded1, encoded2)\r\n \r\n #Compose\r\n comb1 = Concatenate()([encoded1, att2, submult(encoded1, att2)])\r\n comb2 = Concatenate()([encoded2, att1, submult(encoded2, att1)])\r\n \r\n compose = Bidirectional(CuDNNLSTM(lstm_dim, return_sequences=True))\r\n compare1 = compose(comb1)\r\n compare2 = compose(comb2)\r\n \r\n #Aggregate\r\n agg1 = apply_multiple(compare1, [GlobalAvgPool1D(), GlobalMaxPool1D()])\r\n agg2 = apply_multiple(compare2, [GlobalAvgPool1D(), GlobalMaxPool1D()])\r\n \r\n #Merge\r\n merge = Concatenate()([agg1, agg2])\r\n dense = Dense(dense_dim, activation='relu')(merge)\r\n dense = Dropout(dense_dropout)(dense)\r\n \r\n preds = Dense(2, activation='softmax')(dense)\r\n model = Model(inputs=[inp1, inp2], outputs=preds)\r\n print(model.summary())\r\n return model\r\n\r\ndef multiple_conv(input_, convs, pool):\r\n agg_ = []\r\n for conv in convs:\r\n agg_.append(pool(conv(input_)))\r\n out_ = Concatenate()(agg_)\r\n return out_\r\n\r\ndef apply_serie(input_, layers):\r\n x = input_\r\n for layer in layers:\r\n x = layer(x)\r\n return x\r\n \r\n \r\n\r\ndef siamois_cnn(maxlen, max_features, filters=64, sizes=[2, 3, 5, 8], embedding_matrix=None):\r\n inp1 = Input(shape=(maxlen,))\r\n inp2 = Input(shape=(maxlen,))\r\n \r\n emb = Embedding(max_features, 128)\r\n if embedding_matrix is not None:\r\n embed_size = embedding_matrix.shape[1]\r\n emb = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)\r\n \r\n \r\n convs=[]\r\n for size in sizes:\r\n convs.append(Conv1D(filters=filters, kernel_size=size, activation='relu', padding='valid'))\r\n \r\n emb1 = emb(inp1)\r\n# x1a = apply_multiple(conv1(emb1), layers)\r\n# x1b = apply_multiple(conv2(emb1), layers)\r\n# x1c = apply_multiple(conv3(emb1), layers)\r\n# x1d = apply_multiple(conv4(emb1), layers)\r\n# x1a = pool(conv1(emb1))\r\n# x1b = pool(conv2(emb1))\r\n# x1c = pool(conv3(emb1))\r\n# x1d = pool(conv4(emb1))\r\n# x1e = pool(conv5(emb1))\r\n \r\n \r\n emb2 = emb(inp2)\r\n# x2a = apply_multiple(conv1(emb2), layers)\r\n# x2b = apply_multiple(conv2(emb2), layers)\r\n# x2c = apply_multiple(conv3(emb2), layers)\r\n# x2d = apply_multiple(conv4(emb2), layers)\r\n# x2a = pool(conv1(emb2))\r\n# x2b = pool(conv2(emb2))\r\n# x2c = pool(conv3(emb2))\r\n# x2d = pool(conv4(emb2))\r\n# x2e = pool(conv5(emb2))\r\n \r\n# x1 = Concatenate()([x1a, x1b, x1c, x1d, x1e])\r\n# x2 = Concatenate()([x2a, x2b, x2c, x2d, x2e])\r\n \r\n x1 = multiple_conv(emb1, convs, GlobalMaxPool1D())\r\n x2 = multiple_conv(emb2, convs, GlobalMaxPool1D())\r\n \r\n merge = submult(x1, x2)\r\n \r\n #gru = CuDNNGRU(64, return_sequences=True)\r\n #gru1 = apply_multiple(gru(emb1), [GlobalAvgPool1D(), GlobalMaxPool1D()])\r\n #gru2 = apply_multiple(gru(emb2), [GlobalAvgPool1D(), GlobalMaxPool1D()])\r\n \r\n #gru_merge = submult(gru1, gru2)\r\n \r\n #merge = Concatenate()([merge, gru_merge])\r\n \r\n merge = Dropout(0.1)(merge)\r\n merge = Dense(512, activation='relu')(merge)\r\n merge = Dropout(0.2)(merge)\r\n \r\n preds = Dense(2, activation='softmax')(merge)\r\n \r\n model = Model(inputs=[inp1, inp2], outputs=preds)\r\n print(model.summary())\r\n return model\r\n\r\ndef siamois_char(maxlen, max_features, filters=64, sizes=[2, 3, 5, 8], embedding_matrix=None):\r\n inp1 = Input(shape=(maxlen,), dtype='uint8')\r\n inp2 = Input(shape=(maxlen,), dtype='uint8')\r\n \r\n emb = Embedding(max_features, 16)\r\n #emb = Lambda(K.one_hot, arguments={'num_classes':max_features}, output_shape=(maxlen, max_features))\r\n \r\n emb1 = emb(inp1) \r\n emb2 = emb(inp2)\r\n \r\n convs = []\r\n convs.append(Conv1D(filters=filters, kernel_size=7, padding='same', activation='relu'))\r\n convs.append(MaxPooling1D(pool_size=3))\r\n convs.append(Conv1D(filters=filters, kernel_size=7, padding='same', activation='relu'))\r\n convs.append(MaxPooling1D(pool_size=3))\r\n convs.append(Conv1D(filters=filters, kernel_size=3, padding='same', activation='relu'))\r\n convs.append(Conv1D(filters=filters, kernel_size=3, padding='same', activation='relu'))\r\n convs.append(Conv1D(filters=filters, kernel_size=3, padding='same', activation='relu'))\r\n convs.append(Conv1D(filters=filters, kernel_size=3, padding='same', activation='relu'))\r\n convs.append(MaxPooling1D(pool_size=3))\r\n \r\n# x1 = Concatenate()([x1a, x1b, x1c, x1d, x1e])\r\n# x2 = Concatenate()([x2a, x2b, x2c, x2d, x2e])\r\n \r\n x1 = apply_serie(emb1, convs)\r\n x2 = apply_serie(emb2, convs)\r\n \r\n x1 = GlobalMaxPool1D()(x1)\r\n x2 = GlobalMaxPool1D()(x2)\r\n \r\n# x1 = Flatten()(x1)\r\n# x2 = Flatten()(x2)\r\n \r\n merge = submult(x1, x2)\r\n \r\n merge = Dropout(0.1)(merge)\r\n merge = Dense(512, activation='relu')(merge)\r\n merge = Dropout(0.2)(merge)\r\n# merge = Dense(512, activation='relu')(merge)\r\n# merge = Dropout(0.2)(merge)\r\n \r\n preds = Dense(2, activation='softmax')(merge)\r\n \r\n model = Model(inputs=[inp1, inp2], outputs=preds)\r\n print(model.summary())\r\n return model\r\n\r\ndef deep_char(maxlen, max_features=64, nblocks=4, ns = [2, 2, 2, 2]):\r\n inp1 = Input(shape=(maxlen,), dtype='uint8')\r\n inp2 = Input(shape=(maxlen,), dtype='uint8')\r\n \r\n emb = Embedding(max_features, 16)\r\n \r\n emb1 = emb(inp1)\r\n emb2 = emb(inp2)\r\n \r\n first_conv = Conv1D(64, kernel_size=3, activation='relu')\r\n x1 = first_conv(emb1)\r\n x2 = first_conv(emb2)\r\n for i in range(nblocks):\r\n conv1 = Conv1D(64*2**(i), kernel_size=3, padding='same')\r\n y1 = conv1(x1)\r\n y2 = conv1(x1)\r\n #y1 = BatchNormalization()(y1)\r\n #y2 = BatchNormalization()(y2)\r\n y1 = Activation('relu')(y1)\r\n y2 = Activation('relu')(y2)\r\n \r\n conv2 = Conv1D(64*2**(i), kernel_size=3, padding='same')\r\n y1 = conv2(y1)\r\n y2 = conv2(y2)\r\n #y1 = BatchNormalization()(y1)\r\n #y2 = BatchNormalization()(y2)\r\n y1 = Activation('relu')(y1)\r\n y2 = Activation('relu')(y2)\r\n \r\n# l = ns[i]\r\n# for j in range(l):\r\n# conv = Conv1D(64*2**(i), kernel_size=3, padding='same')\r\n# y1 = conv(x1)\r\n# y2 = conv(x1)\r\n# y1 = BatchNormalization()(y1)\r\n# y2 = BatchNormalization()(y2)\r\n# y1 = Activation('relu')(y1)\r\n# y2 = Activation('relu')(y2)\r\n \r\n \r\n #conv = Conv1D(filters=64*2**(i), kernel_size=1, padding='same')(x)\r\n #y1 = Add()([x1, y1])\r\n #y2 = Add()([x2, y2])\r\n \r\n if i!=nblocks-1:\r\n x1 = MaxPooling1D(pool_size=3, strides=2)(y1)\r\n x2 = MaxPooling1D(pool_size=3, strides=2)(y2)\r\n# x1 = Flatten()(x1)\r\n# x2 = Flatten()(x2)\r\n x1 = GlobalMaxPool1D()(x1)\r\n x2 = GlobalMaxPool1D()(x2)\r\n merge = submult(x1, x2)\r\n merge = Dropout(0.1)(merge)\r\n x = Dense(512, activation='relu')(merge)\r\n x = Dropout(0.2)(x)\r\n #x = Dense(512, activation='relu')(x)\r\n preds = Dense(2, activation='softmax')(x)\r\n model = Model(inputs=[inp1, inp2], outputs=preds)\r\n print(model.summary())\r\n return model\r\n\r\n","sub_path":"code_gilles/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":14290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"471572732","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#\n# Copyright 2019 The FATE Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport json\nimport os\nimport time\n\nfrom examples.test import submit\n\n\ndef check_data_count(submitter, fate_home, table_name, namespace, expect_count):\n fate_flow_path = os.path.join(fate_home, \"../fate_flow/fate_flow_client.py\")\n cmd = [\"python\", fate_flow_path, \"-f\", \"table_info\", \"-t\", table_name, \"-n\", namespace]\n stdout = submitter.run_cmd(cmd)\n try:\n stdout = json.loads(stdout)\n count = stdout[\"data\"][\"count\"]\n if count != expect_count:\n raise AssertionError(\"Count of upload file is not as expect, count is: {},\"\n \"expect is: {}\".format(count, expect_count))\n except:\n raise RuntimeError(f\"check data error, stdout: {stdout}\")\n\n print(f\"[{time.strftime('%Y-%m-%d %X')}] check_data_out {stdout} \\n\")\n\n\ndef data_upload(submitter, upload_config, check_interval, fate_home):\n # with open(file_name) as f:\n # upload_config = json.loads(f.read())\n\n task_data = upload_config[\"data\"]\n for data in task_data:\n format_msg = f\"@{data['file']} >> {data['namespace']}.{data['table_name']}\"\n print(f\"[{time.strftime('%Y-%m-%d %X')}]uploading {format_msg}\")\n job_id = submitter.upload(data_path=data[\"file\"],\n namespace=data[\"namespace\"],\n name=data[\"table_name\"],\n partition=data[\"partition\"],\n head=data[\"head\"])\n print(f\"[{time.strftime('%Y-%m-%d %X')}]upload done {format_msg}, job_id={job_id}\\n\")\n\n submitter.await_finish(job_id, check_interval=check_interval)\n check_data_count(submitter, fate_home, data[\"table_name\"], data[\"namespace\"], data[\"count\"])\n\n\ndef read_data(fate_home):\n config_file = os.path.join(fate_home, \"scripts/config.json\")\n with open(config_file, 'r', encoding='utf-8') as f:\n json_info = json.loads(f.read())\n return json_info\n\n\ndef main():\n import examples\n fate_home = os.path.dirname(examples.__file__)\n # fate_home = os.path.abspath(f\"{os.getcwd()}/../\")\n\n arg_parser = argparse.ArgumentParser()\n\n arg_parser.add_argument(\"-m\", \"--mode\", type=int, help=\"work mode\", choices=[0, 1],\n required=True)\n arg_parser.add_argument(\"-f\", \"--force\",\n help=\"table existing strategy, \"\n \"-1 means skip upload, \"\n \"0 means force upload, \"\n \"1 means upload after deleting old table\",\n type=int,\n choices=[-1, 0, 1],\n default=0)\n arg_parser.add_argument(\"-i\", \"--interval\", type=int, help=\"check job status every i seconds, defaults to 1\",\n default=1)\n\n arg_parser.add_argument(\"-b\", \"--backend\", type=int, help=\"backend\", choices=[0, 1], default=0)\n args = arg_parser.parse_args()\n\n work_mode = args.mode\n existing_strategy = args.force\n backend = args.backend\n interval = args.interval\n\n spark_submit_config = {}\n submitter = submit.Submitter(fate_home=fate_home,\n work_mode=work_mode,\n backend=backend,\n existing_strategy=existing_strategy,\n spark_submit_config=spark_submit_config)\n\n upload_data = read_data(fate_home)\n\n data_upload(submitter, upload_data, interval, fate_home)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"examples/scripts/upload_default_data.py","file_name":"upload_default_data.py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"496210704","text":"# coding: utf-8\n\n\"\"\"\n DocuSign REST API\n\n The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501\n\n OpenAPI spec version: v2.1\n Contact: devcenter@docusign.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom docusign_esign.client.configuration import Configuration\n\n\nclass CustomFieldsEnvelope(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'list_custom_fields': 'list[ListCustomField]',\n 'text_custom_fields': 'list[TextCustomField]'\n }\n\n attribute_map = {\n 'list_custom_fields': 'listCustomFields',\n 'text_custom_fields': 'textCustomFields'\n }\n\n def __init__(self, _configuration=None, **kwargs): # noqa: E501\n \"\"\"CustomFieldsEnvelope - a model defined in Swagger\"\"\" # noqa: E501\n if _configuration is None:\n _configuration = Configuration()\n self._configuration = _configuration\n\n self._list_custom_fields = None\n self._text_custom_fields = None\n self.discriminator = None\n\n setattr(self, \"_{}\".format('list_custom_fields'), kwargs.get('list_custom_fields', None))\n setattr(self, \"_{}\".format('text_custom_fields'), kwargs.get('text_custom_fields', None))\n\n @property\n def list_custom_fields(self):\n \"\"\"Gets the list_custom_fields of this CustomFieldsEnvelope. # noqa: E501\n\n An array of list custom fields. # noqa: E501\n\n :return: The list_custom_fields of this CustomFieldsEnvelope. # noqa: E501\n :rtype: list[ListCustomField]\n \"\"\"\n return self._list_custom_fields\n\n @list_custom_fields.setter\n def list_custom_fields(self, list_custom_fields):\n \"\"\"Sets the list_custom_fields of this CustomFieldsEnvelope.\n\n An array of list custom fields. # noqa: E501\n\n :param list_custom_fields: The list_custom_fields of this CustomFieldsEnvelope. # noqa: E501\n :type: list[ListCustomField]\n \"\"\"\n\n self._list_custom_fields = list_custom_fields\n\n @property\n def text_custom_fields(self):\n \"\"\"Gets the text_custom_fields of this CustomFieldsEnvelope. # noqa: E501\n\n An array of text custom fields. # noqa: E501\n\n :return: The text_custom_fields of this CustomFieldsEnvelope. # noqa: E501\n :rtype: list[TextCustomField]\n \"\"\"\n return self._text_custom_fields\n\n @text_custom_fields.setter\n def text_custom_fields(self, text_custom_fields):\n \"\"\"Sets the text_custom_fields of this CustomFieldsEnvelope.\n\n An array of text custom fields. # noqa: E501\n\n :param text_custom_fields: The text_custom_fields of this CustomFieldsEnvelope. # noqa: E501\n :type: list[TextCustomField]\n \"\"\"\n\n self._text_custom_fields = text_custom_fields\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(CustomFieldsEnvelope, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, CustomFieldsEnvelope):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, CustomFieldsEnvelope):\n return True\n\n return self.to_dict() != other.to_dict()\n","sub_path":"docusign_esign/models/custom_fields_envelope.py","file_name":"custom_fields_envelope.py","file_ext":"py","file_size_in_byte":4895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"559824470","text":"import asyncio\nfrom time import sleep\n\nfrom utilities import tor\nfrom utilities.models import *\nfrom utilities.url_util import get_urls_from_content, format_url\nimport utilities.log as log\n\n\n# Add found URLs to the database if they are not being blocked by the content-block feature.\n@db_session\ndef save_url(url):\n blocked_urls = select(b.value for b in Block if b.type == \"Url\" and b.active)[:]\n\n if not any(x for x in blocked_urls if x in url):\n result = select(p for p in Url if p.url == url).count()\n if result == 0:\n \tif \".onion\" in url:\n\t url_object = Url(\n\t url=url,\n\t date_added=datetime.now()\n\t )\n\t else:\n\t \tprint(\"{} is blocked\".format(url) )\n else:\n log.info(\"URL: {} is blocked\".format(url))\n\n commit()\n\n\n# Update the URL which was being scraped\n@db_session\ndef update_url(url):\n \"\"\"The update_url function sets the date of scraped urls to now.\"\"\"\n url_db = select(u for u in Url if u.id == url.id).get()\n url_db.date_scanned = datetime.now()\n\n\n@db_session\ndef get_urls_from_database():\n \"\"\"The get_urls_from_database function fetches 8 urls from the database that need to be scraped by the scout.\n The urls which are set with a priority in the database will be retrieved first.\n \"\"\"\n return select(u for u in Url if u.date_scanned is None).order_by(desc(Url.priority_scan))[:8]\n\n\ndef get_urls_from_results(urls, results):\n urls_in_results = []\n for index, url in enumerate(urls):\n if type(results[index]) is not bytes:\n continue\n\n urls_in_content = get_urls_from_content(results[index])\n for url_in_content in urls_in_content:\n urls_in_results.append(format_url(url.url, url_in_content))\n\n return urls_in_results\n\n\n\n@db_session(optimistic=False)\nasync def main(loop):\n log.debug('scout has been started')\n while True:\n urls = get_urls_from_database()\n # update urls immediately to avoid different instances crawling the same urls\n for url in urls:\n update_url(url)\n\n if len(urls) == 0:\n print(\"No URLs to be crawled, waiting for 60 seconds.\")\n log.info('No URLs to be crawled, waiting for 60 seconds.')\n sleep(60)\n commit()\n continue\n\n results = await tor.get_content_from_urls(loop, urls)\n urls_from_content = get_urls_from_results(urls, results)\n\n for u in urls_from_content:\n if u is not None:\n save_url(u)\n print('Found ', len(urls_from_content), ' urls')\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main(loop))\n","sub_path":"Source/crawler/scout.py","file_name":"scout.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"218209067","text":"from typing import Dict, List, NamedTuple\nimport requests\nfrom bs4 import BeautifulSoup\nimport urllib3\nimport urllib.parse\nfrom tqdm import tqdm\nimport json\nimport argparse\nimport os\nimport pathvalidate\nfrom typing import Union\nfrom datetime import datetime\n\nROOT_URL = 'https://tululu.org'\nBOOK_CATEGORY = 'l55'\n\n\ndef fetch_book_data(book_url: str, id: int, dest_folder: str, skip_imgs: bool, skip_txt: bool) -> Union[Dict, None]:\n try:\n response = requests.get(book_url, verify=False)\n check_response(response)\n book_properties = parse_book_page(response.text)\n book_filename = f'{id}.{book_properties[\"autor\"]} {book_properties[\"name\"]}.txt'\n if not skip_txt:\n url_for_txt_download = urllib.parse.urljoin(ROOT_URL, 'txt.php')\n book_path = download_txt_file(url_for_txt_download, {'id': id}, book_filename, dest_folder)\n book_properties['book_path'] = book_path\n if not skip_imgs:\n url_for_image_download = urllib.parse.urljoin(ROOT_URL, book_properties[\"img_url\"])\n download_image(url_for_image_download, dest_folder)\n return book_properties\n except requests.HTTPError as e:\n tqdm.write(f'Book from {book_url} not loaded something was wrong: {str(e)}')\n except requests.ConnectionError:\n tqdm.write(f'Error, cant connected to site!')\n\n\ndef write_to_json_file(data: List, filepath: str) -> None:\n with open(filepath, \"w\", encoding='utf8') as my_file:\n json.dump(data, my_file, ensure_ascii=False, sort_keys=True, indent=4)\n\n\ndef download_txt_file(url: str, params: Dict, filename: str, folder: str) -> str:\n response = requests.get(url, verify=False, params=params)\n check_response(response)\n os.makedirs(folder, exist_ok=True)\n filepath = os.path.join(folder, pathvalidate.sanitize_filename(filename))\n with open(filepath, 'w') as file:\n file.write(response.text)\n return filepath\n\n\ndef check_response(response: requests.Response) -> None:\n response.raise_for_status()\n if response.history:\n raise requests.HTTPError('Error: redirect detected!')\n\n\ndef download_image(url: str, folder: str) -> str:\n response = requests.get(url, verify=False)\n check_response(response)\n os.makedirs(folder, exist_ok=True)\n url_parts = urllib.parse.urlsplit(url, scheme='', allow_fragments=True)\n timestamp = datetime.now().timestamp()\n filename = f'{timestamp}-{os.path.basename(url_parts.path)}'\n filepath = os.path.join(folder, pathvalidate.sanitize_filename(filename))\n with open(filepath, 'wb') as file:\n file.write(response.content)\n return filepath\n\n\ndef parse_book_page(html_text: str) -> Dict:\n soup = BeautifulSoup(html_text, 'html.parser')\n title_tag = soup.select_one('#content h1')\n name, autor = title_tag.text.split('::')\n img_tag = soup.select_one('#content .bookimage img')\n comment_tags = soup.select('.texts span.black')\n genre_tags = soup.select('span.d_book a')\n\n book_properties = {\n 'name': name.strip(),\n 'autor': autor.strip(),\n 'img_url': img_tag.attrs['src'],\n 'genres': [item.text for item in genre_tags],\n 'comments': [item.text for item in comment_tags]\n }\n return book_properties\n\n\ndef parse_command_line_arguments() -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n parser.add_argument('--start_page', help='Номер начальной страницы', default=1, type=int)\n parser.add_argument('--end_page', help='Номер конечной страницы', default=701, type=int)\n parser.add_argument('--dest_folder', help='Путь каталогу для сохранения данных', default='parse_results', type=str)\n parser.add_argument('--skip_imgs', help='Не скачивать обложки', action='store_true')\n parser.add_argument('--skip_txt', help='Не скачивать текст книги', action='store_true')\n parser.add_argument('--json_path', help='Путь к файлу с результатами парсинга', default='books.json', type=str)\n return parser.parse_args()\n\n\ndef main():\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n args = parse_command_line_arguments()\n books_of_category = []\n for i in tqdm(range(args.start_page, args.end_page), desc='Pages'):\n category_url = urllib.parse.urljoin(ROOT_URL, BOOK_CATEGORY, i)\n response = requests.get(category_url, verify=False)\n soup = BeautifulSoup(response.text, 'html.parser')\n links_to_books = soup.select('.d_book .bookimage a')\n for tag in tqdm(links_to_books, desc=f'Books on page({i})'):\n href = tag.attrs['href']\n book_url = urllib.parse.urljoin(ROOT_URL, href)\n id = int(href.replace('b', '').replace('/', ''))\n book_properties = fetch_book_data(book_url, id, args.dest_folder, args.skip_imgs, args.skip_txt)\n if book_properties:\n books_of_category.append(book_properties)\n\n write_to_json_file(books_of_category, args.json_path)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"book_loader.py","file_name":"book_loader.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"108118296","text":"from django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse, HttpResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\n\nfrom post.forms import PostForm\nfrom utils import strings as s\n# Create your views here.\nfrom post.models import Post, PostSerializer\nfrom utils.decorators import post_owner\n\n\n@login_required\ndef post_create(request):\n if request.method == s.GET:\n form = PostForm()\n context = {\n 'form': form\n }\n return render(request, 'post/post_create.html', context)\n\n elif request.method == s.POST:\n form = PostForm(data=request.POST)\n if form.is_valid():\n post = form.save(author=request.user)\n return redirect('post:post_detail', user_id=request.user.user_id, pk=post.pk)\n else:\n return HttpResponse('포스트 작성에 실패했습니다.')\n\n\n@post_owner\n@login_required\ndef post_delete(request, user_id, pk):\n post = get_object_or_404(Post, pk=pk)\n post.delete()\n return redirect('post:post_list')\n\n\n@post_owner\n@login_required\ndef post_modify(request, user_id, pk):\n post = get_object_or_404(Post, pk=pk)\n if request.method == s.GET:\n form = PostForm(instance=post)\n context = {\n 'form': form\n }\n return render(request, 'post/post_create.html', context)\n\n elif request.method == s.POST:\n form = PostForm(data=request.POST, instance=post)\n if form.is_valid():\n post = form.save(author=request.user, is_updated=True)\n return redirect('post:post_detail', post.author.user_id, post.pk)\n\n\n@login_required\ndef post_list(request):\n context = {\n 'posts': Post.objects.filter(author=request.user)\n }\n return render(request, 'post/post_list.html', context)\n\n\n@login_required\ndef post_search(request, keyword):\n context = {'posts': Post.objects.filter(keyword__contains=keyword)}\n return render(request, \"post/post_list.html\", context)\n\n\ndef post_detail(request, user_id, pk):\n post = get_object_or_404(Post, pk=pk)\n context = {\n 'post': post\n }\n return render(request, 'post/post_detail.html', context)\n\n\ndef api_post(request, user_id, keyword):\n post = get_object_or_404(Post, keyword=keyword)\n serializer = PostSerializer(post)\n return JsonResponse(serializer.data)","sub_path":"django_app/post/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"602896729","text":"# -*- coding: utf-8 -*-\n\nfrom postprocess.xcVtk.FE_model import quick_graphics as qg\n\nexecfile('../model_gen.py')\nexecfile('../loadStateData.py')\n\n#available components: 'axialComponent', 'transComponent', 'transYComponent',\n# 'transZComponent'\n\nloadCasesToDisplay=[Q3,Q4,Q5,Q6,Q7]\n#loadCasesToDisplay=[LS1,LS2]\n#loadCasesToDisplay=[Q9]\n#End data\n\nfor lc in loadCasesToDisplay:\n for st in lc.setsToDispBeamLoads:\n lcs=qg.QuickGraphics(FEcase)\n capt=lc.loadCaseDescr + ', ' + st.description + ', ' + lc.unitsLoads\n lcs.dispLoadCaseBeamEl(loadCaseName=lc.loadCaseName,setToDisplay=st,fUnitConv=lc.unitsScaleLoads,elLoadComp=lc.compElLoad,elLoadScaleF=lc.vectorScaleLoads,nodLoadScaleF=lc.vectorScalePointLoads,viewName=lc.viewName,hCamFct=lc.hCamFct,caption= capt,fileName=None)\n\n","sub_path":"workingModel/display/display_loads_beamEl.py","file_name":"display_loads_beamEl.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"139371115","text":"import cv2, time\n\ndef countdown(n):\n\twhile n > 0: \n\t\tprint(n)\n\t\tn = n-1\n\t\ttime.sleep(1)\t\n\tif n==0:\n\t\tprint('Smile!')\n\n\n\n \ncam = cv2.VideoCapture(0)\ncountdown(3)\ns, im = cam.read() # captures image\ncv2.imshow(\"Test Picture\", im) # displays captured image\ncv2.waitKey(0)\ncv2.imwrite(\"test.jpg\",im) # writes image test.bmp to disk\n","sub_path":"module2_TakePhotos/TakePicture.py","file_name":"TakePicture.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"576472069","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n#=============================================================================\n# FileName: tasks\n# Desc:\n# Author: ge.jin\n# Email: ge.jin@woqutech.com\n# HomePage: wwww.woqutech.com\n# Version: 0.0.1\n# LastChange: 3/2/16 10:56 PM\n# History:\n#=============================================================================\n'''\nfrom __future__ import absolute_import\nimport redis_lock\n\nfrom rest.snippets.models import TargetInfo, DiskInfo\nfrom rest.snippets.serializers import DiskInfoSerializer\nfrom rest.utils import sshutils, execute\nfrom celery import current_app\nfrom redis import StrictRedis\n\nfrom rest.utils.ip import check_network\n\nconn = StrictRedis()\nredis_lock.reset_all(conn)\n\n\n@current_app.task\ndef flush_diskinfo(dataset, target_id):\n DiskInfo.objects.filter(target_id=target_id).delete()\n serializer = DiskInfoSerializer(data=dataset, many=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n\n@current_app.task\ndef poweroff(target_id):\n ti = TargetInfo.objects.get(id=target_id)\n return execute(\"ipmitool -H {0.ipmi_addr} -P {0.ipmi_password} -U {0.ipmi_user} power off\".format(ti))\n\n\n@current_app.task\ndef poweron(target_id):\n ti = TargetInfo.objects.get(id=target_id)\n return execute(\"ipmitool -H {0.ipmi_addr} -P {0.ipmi_password} -U {0.ipmi_user} power on\".format(ti))\n\n\n@current_app.task\ndef reboot(target):\n lock = redis_lock.Lock(conn, \"lock-{0.id}\".format(target))\n if lock.acquire(blocking=False):\n try:\n sshpool = sshutils.SSHPool(ip=target.ipaddress,\n user=\"root\",\n password=\"123456\",\n port=int(22),\n conn_timeout=10)\n connection, channel = sshpool.get_ssh_channel()\n stdin, stdout, stderr = connection.exec_command(\"reboot\", timeout=30)\n error = \"\".join(stderr.readlines())\n if error:\n target.status = \"UNKNOW\"\n else:\n target.status = \"OFFLINE\"\n target.save()\n finally:\n lock.release()\n return \"success\"\n else:\n print(\"Another worker has already hold the job\")\n return \"fail\"\n\n\n@current_app.task\ndef check_alive(target=None, targetid=None):\n if target is not None:\n target = target\n else:\n target = TargetInfo.objects.get(id=targetid)\n\n try:\n check_network(target.ipaddress, 22)\n target.status = \"ONLINE\"\n target.save()\n except Exception:\n target.status = \"OFFLINE\"\n target.save()\n\n\n@current_app.task\ndef install(target):\n lock = redis_lock.Lock(conn, \"lock-{0.id}\".format(target))\n if lock.acquire(blocking=False):\n try:\n return execute(\"\"\"cobbler system edit --name={0.macaddress} --netboot-enabled=true\"\"\".format(target))\n finally:\n lock.release()\n if target.status == 'InMemoryOS':\n reboot.apply_async(args=(target,))\n else:\n print(\"Another worker has already hold the job\")\n\n\n@current_app.task\ndef unregister(target):\n execute(\"\"\"cobbler system remove --name={0.macaddress}\"\"\".format(target))\n\n\n@current_app.task\ndef register(target):\n cmd = \"\"\"cobbler system add --name={0.macaddress} --profile={0.profile} --interface={0.interface} \\\n--mac={0.macaddress} --ip-address={0.ipaddress} --netmask=255.255.255.0 --static=1 \\\n--netboot-enabled=false\"\"\".format(target)\n execute(cmd)\n\n\n@current_app.task\ndef update(target):\n cmd = \"\"\"cobbler system edit --name={0.macaddress} --profile={0.profile} --interface={0.interface} \\\n--mac={0.macaddress} --ip-address={0.ipaddress} --netmask=255.255.255.0 --static=1 \\\n--netboot-enabled=false\"\"\".format(target)\n execute(cmd)\n","sub_path":"rest/snippets/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"494299763","text":"\"\"\"empty message\n\nRevision ID: 60fb267db680\nRevises: 0829b200eb31\nCreate Date: 2019-10-30 15:50:14.806829\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '60fb267db680'\ndown_revision = '0829b200eb31'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('tallySheetVersionRow_PRE_34_CO',\n sa.Column('tallySheetVersionRowId', sa.Integer(), nullable=False),\n sa.Column('tallySheetVersionId', sa.Integer(), nullable=True),\n sa.Column('candidateId', sa.Integer(), nullable=True),\n sa.Column('notCountedBallotPapers', sa.Integer(), nullable=False),\n sa.Column('remainingBallotPapers', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['candidateId'], ['candidate.candidateId'], ),\n sa.ForeignKeyConstraint(['tallySheetVersionId'], ['tallySheetVersion.tallySheetVersionId'], ),\n sa.PrimaryKeyConstraint('tallySheetVersionRowId'),\n sa.UniqueConstraint('tallySheetVersionId', 'candidateId', name='PRE_34_CO_row')\n )\n op.create_table('tallySheetVersionRow_PRE_34_CO_PREFERENCES',\n sa.Column('preferenceRowId', sa.Integer(), nullable=False),\n sa.Column('tallySheetPre34RowId', sa.Integer(), nullable=True),\n sa.Column('candidateId', sa.Integer(), nullable=True),\n sa.Column('no2ndPreferences', sa.Integer(), nullable=False),\n sa.Column('no3rdPreferences', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['candidateId'], ['candidate.candidateId'], ),\n sa.ForeignKeyConstraint(['tallySheetPre34RowId'], ['tallySheetVersionRow_PRE_34_CO.tallySheetVersionRowId'], ),\n sa.PrimaryKeyConstraint('preferenceRowId'),\n sa.UniqueConstraint('preferenceRowId', name='PRE_34_CO_PREFERENCES_row')\n )\n op.drop_index('stamp_barcodeId_fk', table_name='stamp')\n op.create_foreign_key(None, 'stamp', 'barcode', ['barcodeId'], ['barcodeId'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'stamp', type_='foreignkey')\n op.create_index('stamp_barcodeId_fk', 'stamp', ['barcodeId'], unique=False)\n op.drop_table('tallySheetVersionRow_PRE_34_CO_PREFERENCES')\n op.drop_table('tallySheetVersionRow_PRE_34_CO')\n ### end Alembic commands ###\n","sub_path":"migrations/versions/60fb267db680_.py","file_name":"60fb267db680_.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"29076670","text":"import pickle\r\nquestions = [\"\"\"\r\nКто занимается предпринимательством?\r\n1)Частные лица\r\n2)Государство\r\n3)Различные ассоциации\r\n4)Всё выше перечисленое\r\n\"\"\",\r\n\"\"\"\r\nЧто из ниже перечисленного является физическим лицом?\r\n1)фирма\r\n2)Организация\r\n3)Человек занимающийся экономической деятельностью\r\n\"\"\",\r\n\"\"\"\r\nБартер это -\r\n1)Обмен товара на товар\r\n2)Обмен товара на деньги\r\n3)Вид предпринимательства\r\n\"\"\",\r\n\"\"\"\r\nСколько условий имеет обмен?\r\n1)2\r\n2)3\r\n3)4\r\n\"\"\",\r\n\"\"\"\r\nЧто является объектом собственности?\r\n1)Факторы производства\r\n2)Группы\r\n3)Отдельная личность\r\n\"\"\",\r\n\"\"\"\r\nЧто из ниже перечисленного является формой собстенности?\r\n1)Частная собственность\r\n2)Готовая продукция\r\n3)Государство\r\n\"\"\"]\r\n\r\nanswers = [4, 3, 2, 3, 1, 1]\r\ndatafile = open(\"test.dat\", \"wb\")\r\npickle.dump(questions, datafile)\r\npickle.dump(answers, datafile)\r\ndatafile.close()\r\nimport pickle\r\nmark = 0\r\ntry:\r\n datafile = open(\"test.dat\", \"rb\")\r\nexcept: \r\n print(Ошибка)\r\nelse:\r\n questions = pickle.load(datafile)\r\nanswers = pickle.load(datafile)\r\ndatafile.close()\r\nn = len(answers) #К-во вопросов и ответов\r\ni = 5\r\nfor i in range(0, n):\r\n print(questions[i])\r\na = int(input(\"Ваш ответ: \"))\r\nif a == answers[i]:\r\n mark = mark + 1\r\n print(\"Правильно1\")\r\nelse:\r\n print (\"Неправильно\")\r\n print(\"Вы правильно ответили на \", mark, \" вопросов из \",n)\r\n\r\n","sub_path":"Предпринимательство обмен частная собственность.py","file_name":"Предпринимательство обмен частная собственность.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"104058611","text":"#!/usr/bin/env python3\n\n#===============================================================================\n# This program is based on the legacy script `bf-legacy/bfcompare.py`, which was\n# written by Pete Schmitt for Blackfynn file server.\n#===============================================================================\n\nimport os\nimport sys\n\nfrom pennsieve.models import BaseCollection\n\nfrom psv_lib import (\n EXTENSIONS,\n psv,\n psv_datasets,\n parse_options,\n)\n\nSYNTAX = \"\"\"\npsv-compare.py -h (help)\n -c \n -p \n --all (compare with all datasets)\n -d (required)\n -i (optional, case-insensitive comparison)\n --data (optional, also compare data)\n\nNote: `-c`, `-p` and `--all` are mutually exclusive.\n\"\"\"\n\n\ndef get_collections(element, collections, data_opt, indent=0):\n \"\"\"Get the contents of a dataset as a tree.\"\"\"\n\n try:\n element._check_exists()\n except Exception:\n print(f\"ERROR: {element} not exist on Pennsieve\")\n return\n\n if indent > 0:\n collections.append(f\"{indent - 1}:{element.name}\")\n print(\".\", end=\"\")\n\n for item in element.items:\n if isinstance(item, BaseCollection):\n get_collections(item, collections, data_opt, indent=indent+1)\n elif data_opt:\n pkg = psv.get(item)\n pkg_name = pkg.name\n try:\n real_name = str(pkg.sources[0].s3_key.split('/')[-1])\n except:\n print(\n f\"\\nERROR: unable to get real name of package: \"\n f\"{elment.name}/{pkg_name}, so it will be ignored\"\n )\n continue\n\n real_ext = None\n for ext in EXTENSIONS:\n if real_name.lower().endswith(ext.lower()):\n real_ext = ext\n break\n\n if real_ext is None:\n real_ext = real_name.rsplit(\".\", 1)[-1]\n\n if pkg_name[-len(real_ext):] == real_ext:\n filename = pkg_name\n else:\n filename = pkg_name.replace(real_ext, \"\") + \".\" + real_ext\n\n collections.append(f\"{indent}:{filename}\")\n\n return collections\n\n\ndef create_paths(the_list):\n \"\"\"Create a list of UNIX-like paths from Pennsieve dataset.\"\"\"\n\n paths = list()\n\n for item in the_list:\n colon = item.find(':')\n indent = int(item[:colon])\n collection = item[colon+1:]\n\n if indent == 0:\n path = [0] * 100\n path[0] = collection\n paths.append(collection)\n else:\n path[indent] = collection\n p0 = path[0]\n\n for i in range(1, indent + 1):\n if path[i] != 0:\n p0 += \"/\" + path[i]\n\n paths.append(p0)\n\n return paths\n\n\ndef find(collection, paths, case_sensitive):\n \"\"\"Search paths for collection.\"\"\"\n\n for p in paths:\n if case_sensitive and p == collection:\n return True\n\n if not case_sensitive and p.upper() == collection.upper():\n return True\n\n return False\n\n\ndef find_first_only(paths_1, paths_2, case_sensitive):\n \"\"\"Find paths that are in `ds1` but not in `ds2`.\"\"\"\n\n for p1 in paths_1:\n if not find(p1, paths_2, case_sensitive):\n print(p1)\n\n\ndef compare_datasets(d_arg, d_paths, c_dataset, case_sensitive, data_opt):\n \"\"\"Compare `d_dataset` and `c_dataset`.\"\"\"\n\n c_paths = get_ds_paths(c_dataset)\n\n print(f\"\\n\\nData in '{d_arg}' but are NOT in '{c_dataset.name}':\")\n print('*' * 80)\n find_first_only(d_paths, c_paths, case_sensitive)\n\n print(f\"\\nData in '{c_dataset.name}' but are NOT in '{d_arg}':\")\n print('*' * 80)\n find_first_only(c_paths, d_paths, case_sensitive)\n\n\ndef get_local_paths(p_arg, data_opt):\n os.chdir(p_arg)\n paths = list()\n\n for r, d, f in os.walk('.'):\n if data_opt:\n dir_list = d + f\n else:\n dir_list = d\n\n for item in dir_list:\n paths.append((r + '/' + item)[2:])\n\n paths.sort()\n return paths\n\n\ndef get_ds_paths(dataset):\n \"\"\"Get paths in input dataset.\"\"\"\n\n print(f\"\\nGathering collections from '{dataset.name}'\")\n collections = []\n ds_list = get_collections(dataset, collections, data_opt)\n ds_paths = create_paths(ds_list)\n ds_paths.sort()\n\n return ds_paths\n\n\ndef handle_d_option(d_arg):\n \"\"\"Handle `-d` option.\"\"\"\n\n if not d_arg:\n print(\"ERROR: `-d ` option not found\")\n sys.exit(1)\n\n ds_name = psv_datasets.get(d_arg, None)\n if ds_name is None:\n print(f\"ERROR: dataset '{d_arg}' not found on Pennsieve\")\n sys.exit(1)\n\n dataset = psv.get_dataset(ds_name)\n ds_paths = get_ds_paths(dataset)\n return ds_paths\n\n\ndef get_c_p_all_options(opts_dict):\n \"\"\"Ensure that only one of `-c`, `-d` and `--all` is being used.\"\"\"\n\n c_opt = '-c' in opts_dict\n p_opt = '-p' in opts_dict\n all_opt = '--all' in opts_dict\n\n if c_opt + p_opt + all_opt != 1:\n print(\"ERROR: ONE AND ONLY ONE of `-c`, `-p` and `--all` options is allowed\")\n sys.exit(1)\n\n return c_opt, p_opt, all_opt\n\n\ndef handle_c_option(c_arg, d_arg, d_paths, case_sensitive, data_opt):\n \"\"\"Handle `-c` option.\"\"\"\n\n if not c_arg:\n print(\"ERROR: `-c ` option not found\")\n sys.exit(1)\n\n ds_name = psv_datasets.get(c_arg, None)\n if ds_name is None:\n print(f\"ERROR: dataset '{c_arg}' not found on Pennsieve server\")\n sys.exit(1)\n\n c_dataset = psv.get_dataset(ds_name)\n compare_datasets(d_arg, d_paths, c_dataset, case_sensitive, data_opt)\n\n\ndef handle_p_option(p_arg, d_arg, d_paths, case_sensitive, data_opt):\n \"\"\"Handle `-p` option.\"\"\"\n\n local_paths = get_local_paths(p_arg, data_opt)\n\n print(f\"\\n\\nData in '{d_arg}' but are NOT in '{p_arg}':\")\n print('*' * 80)\n find_first_only(d_paths, local_paths, case_sensitive)\n\n print(f\"\\nData in '{p_arg}' but are NOT in '{d_arg}':\")\n print('*' * 80)\n find_first_only(local_paths, d_paths, case_sensitive)\n\n\ndef handle_all_option(d_arg, d_paths, case_sensitive, data_opt):\n \"\"\"Handle `--all` option.\"\"\"\n\n for k in sorted(psv_datasets):\n if not k.startswith('HPAP-'):\n continue\n\n ds_name = psv_datasets[k]\n c_dataset = psv.get_dataset(ds_name)\n compare_datasets(d_arg, d_paths, c_dataset, case_sensitive, data_opt)\n\n\n#==============================================================================\n# Main program\n#==============================================================================\nif __name__ == '__main__':\n # Parse options\n opts_dict = parse_options(sys.argv, \"hip:d:c:\", ['all', 'data'], SYNTAX)\n\n # `-i` option\n case_sensitive = '-i' not in opts_dict\n if not case_sensitive:\n print(\"INFO: case-insensitive comparison enabled\")\n\n # `--data` option\n data_opt = '--data' in opts_dict\n\n # `-c`, `-p` and `--all` options\n c_opt, p_opt, all_opt = get_c_p_all_options(opts_dict)\n\n # `-d` is required. It specifies the input dataset.\n d_arg = opts_dict.get('-d', None)\n d_paths = handle_d_option(d_arg)\n\n # `-c ` option\n if c_opt:\n c_arg = opts_dict.get('-c', None)\n handle_c_option(c_arg, d_arg, d_paths, case_sensitive, data_opt)\n\n # `-p ` option\n if p_opt:\n p_arg = opts_dict.get('-p', None)\n handle_p_option(p_arg, d_arg, d_paths, case_sensitive, data_opt)\n\n # `--all` option\n if all_opt:\n handle_all_option(d_arg, d_paths, case_sensitive, data_opt)\n","sub_path":"psv-compare.py","file_name":"psv-compare.py","file_ext":"py","file_size_in_byte":7714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"157500292","text":"# -*- coding: utf-8 -*-\nimport contextlib\nimport functools\nfrom typing import Callable, Union\n\nfrom hutils.shortcuts import log_error\n\n\ndef obj_cache(key):\n \"\"\" 使用对象的属性来充当方法缓存。use object attribute as cache.\n\n Examples::\n\n class A:\n @obj_cache('_value')\n def get_value(self, *args):\n ...\n\n :type key: str\n \"\"\"\n\n def decorator(func):\n @functools.wraps(func)\n def wrapper(obj, *args, **kwargs):\n if hasattr(obj, key):\n return getattr(obj, key)\n value = func(obj, *args, **kwargs)\n setattr(obj, key, value)\n return value\n\n return wrapper\n\n return decorator\n\n\n@contextlib.contextmanager\ndef ignore_error(*exceptions, logger=None):\n exceptions = exceptions or (Exception,)\n try:\n yield\n except exceptions as ex:\n log_error(logger or __name__, ex)\n\n\n@contextlib.contextmanager\ndef catches(*exceptions, raises: Union[BaseException, Callable[[Exception], BaseException]], log=False, logger=None):\n \"\"\" 封装转换错误类。transfer exceptions to a different type.\n\n Examples::\n\n with self.assertRaises(IOError), catches(ValueError, TypeError, raises=IOError()):\n raise ValueError('should wrap this error')\n\n @catches(raises=get_validation_error, log=True)\n def raise_io_error():\n raise ValueError('should wrap this error')\n \"\"\"\n exceptions = exceptions or (Exception,)\n try:\n yield\n except exceptions as ex:\n if callable(raises):\n raises = raises(ex)\n if log:\n log_error(logger or __name__, raises)\n raise raises from ex\n\n\ndef mutes(*exceptions, returns=None, log=True):\n \"\"\" 出错时保持沉默,返回普通值。mute exception\n\n Examples::\n\n @mutes(returns=42)\n def get_answer(a, b):\n return a + b\n \"\"\"\n\n exceptions = exceptions or (Exception,)\n\n def decorator(func):\n @contextlib.wraps(func)\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except exceptions as ex:\n if log:\n log_error(__name__, ex)\n return returns\n\n return wrapper\n\n return decorator\n","sub_path":"hutils/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"282893785","text":"import os\nimport time\n\nimport caldav\nimport transaction\nfrom caldav.lib.error import AuthorizationError\nfrom collections import OrderedDict\nfrom nose.tools import eq_\nfrom nose.tools import ok_\nfrom nose.tools import raises\nimport requests\nfrom requests.exceptions import ConnectionError\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom tg import config\n\nfrom tracim.config.app_cfg import daemons\nfrom tracim.lib.calendar import CalendarManager\nfrom tracim.lib.workspace import WorkspaceApi\nfrom tracim.model import DBSession\nfrom tracim.tests import TestCalendar as BaseTestCalendar\nfrom tracim.tests import not_raises\nfrom tracim.model.auth import User\nfrom tracim.model.data import Content\nfrom tracim.model.data import ContentRevisionRO\nfrom tracim.model.data import Workspace\n\n\nclass TestCalendar(BaseTestCalendar):\n def setUp(self):\n super().setUp()\n time.sleep(3) # TODO - 20160606 - Bastien: sleep to wait ...\n # ... radicale daemon started. We should lock something somewhere !\n\n def test_func__radicale_connectivity__ok__nominal_case(self):\n radicale_base_url = CalendarManager.get_base_url()\n\n try:\n response = requests.get(radicale_base_url)\n eq_(response.status_code, 401, 'Radicale http response should be '\n '401, its {0}'\n .format(response.status_code))\n except ConnectionError as exc:\n ok_(False, 'Unable to contact radicale on HTTP: {0}'.format(exc))\n\n @not_raises(AuthorizationError)\n def test_func__radicale_auth__ok__as_lawrence(self):\n radicale_base_url = CalendarManager.get_base_url()\n client = caldav.DAVClient(\n radicale_base_url,\n username='lawrence-not-real-email@fsf.local',\n password='foobarbaz'\n )\n client.propfind()\n\n @raises(AuthorizationError)\n def test_func__radicale_auth__fail__as_john_doe(self):\n radicale_base_url = CalendarManager.get_base_url()\n client = caldav.DAVClient(\n radicale_base_url,\n username='john.doe@foo.local',\n password='nopasswd'\n )\n client.propfind()\n\n @not_raises(AuthorizationError)\n def test_func__rights_read_user_calendar__ok__as_lawrence(self):\n radicale_base_url = CalendarManager.get_base_url()\n client = caldav.DAVClient(\n radicale_base_url,\n username='lawrence-not-real-email@fsf.local',\n password='foobarbaz'\n )\n user = DBSession.query(User).filter(\n User.email == 'lawrence-not-real-email@fsf.local'\n ).one()\n user_calendar_url = CalendarManager.get_user_calendar_url(user.user_id)\n caldav.Calendar(\n parent=client,\n client=client,\n url=user_calendar_url\n ).events()\n\n @raises(AuthorizationError)\n def test_func__rights_read_user_calendar__fail__as_john_doe(self):\n radicale_base_url = CalendarManager.get_base_url()\n client = caldav.DAVClient(\n radicale_base_url,\n username='john.doe@foo.local',\n password='nopasswd'\n )\n other_user = DBSession.query(User).filter(\n User.email == 'admin@admin.admin'\n ).one()\n user_calendar_url = CalendarManager.get_user_calendar_url(other_user.user_id)\n caldav.Calendar(\n parent=client,\n client=client,\n url=user_calendar_url\n ).events()\n\n @not_raises(AuthorizationError)\n def test_func__rights_read_workspace_calendar__ok__as_owner(self):\n lawrence = DBSession.query(User).filter(\n User.email == 'lawrence-not-real-email@fsf.local'\n ).one()\n workspace = WorkspaceApi(lawrence).create_workspace(\n 'workspace_1',\n save_now=False\n )\n workspace.calendar_enabled = True\n DBSession.flush()\n\n workspace_calendar_url = CalendarManager.get_workspace_calendar_url(\n workspace.workspace_id\n )\n\n transaction.commit()\n\n radicale_base_url = CalendarManager.get_base_url()\n client = caldav.DAVClient(\n radicale_base_url,\n username='lawrence-not-real-email@fsf.local',\n password='foobarbaz'\n )\n caldav.Calendar(\n parent=client,\n client=client,\n url=workspace_calendar_url\n ).events()\n\n @raises(AuthorizationError)\n def test_func__rights_read_workspace_calendar__fail__as_unauthorized(self):\n lawrence = DBSession.query(User).filter(\n User.email == 'lawrence-not-real-email@fsf.local'\n ).one()\n workspace = WorkspaceApi(lawrence).create_workspace(\n 'workspace_1',\n save_now=False\n )\n workspace.calendar_enabled = True\n DBSession.flush()\n\n workspace_calendar_url = CalendarManager.get_workspace_calendar_url(\n workspace.workspace_id\n )\n\n transaction.commit()\n\n radicale_base_url = CalendarManager.get_base_url()\n client = caldav.DAVClient(\n radicale_base_url,\n username='bob@fsf.local',\n password='foobarbaz'\n )\n caldav.Calendar(\n parent=client,\n client=client,\n url=workspace_calendar_url\n ).events()\n\n def test_func__event_create__ok__nominal_case(self):\n lawrence = DBSession.query(User).filter(\n User.email == 'lawrence-not-real-email@fsf.local'\n ).one()\n radicale_base_url = CalendarManager.get_base_url()\n client = caldav.DAVClient(\n radicale_base_url,\n username='lawrence-not-real-email@fsf.local',\n password='foobarbaz'\n )\n user_calendar_url = CalendarManager.get_user_calendar_url(\n lawrence.user_id\n )\n user_calendar = caldav.Calendar(\n parent=client,\n client=client,\n url=user_calendar_url\n )\n\n event_ics = \"\"\"BEGIN:VCALENDAR\nVERSION:2.0\nPRODID:-//Example Corp.//CalDAV Client//EN\nBEGIN:VEVENT\nUID:1234567890\nDTSTAMP:20100510T182145Z\nDTSTART:20100512T170000Z\nDTEND:20100512T180000Z\nSUMMARY:This is an event\nLOCATION:Here\nEND:VEVENT\nEND:VCALENDAR\n\"\"\"\n user_calendar.add_event(event_ics)\n user_calendar.save()\n\n daemons.execute_in_thread('radicale', lambda: transaction.commit())\n # TODO - 20160606 - Bastien: lock should be better here ?\n time.sleep(3) # Wait for be sure transaction commited in daemon\n transaction.commit()\n try:\n event = DBSession.query(Content) \\\n .filter(Content.label == 'This is an event') \\\n .filter(Content.owner_id == lawrence.user_id) \\\n .filter(Content.id == ContentRevisionRO.content_id) \\\n .one()\n except NoResultFound:\n ok_(False, 'Content record should exist for '\n '\"This is an event\" label')\n\n eq_(event.properties['location'], 'Here')\n eq_(event.properties['start'], '2010-05-12 18:00:00+0000')\n eq_(event.properties['end'], '2010-05-12 17:00:00+0000')\n\n def test_created_user_radicale_calendar(self):\n self._connect_user(\n 'admin@admin.admin',\n 'admin@admin.admin',\n )\n\n user_count = DBSession.query(User)\\\n .filter(User.email == 'an-other-email@test.local').count()\n eq_(0, user_count, 'User should not exist yet')\n\n radicale_users_folder = '{0}/user'\\\n .format(config.get('radicale.server.filesystem.folder'))\n eq_(\n False,\n os.path.isdir(radicale_users_folder),\n 'Radicale users folder should not exist yet',\n )\n\n # Create a new user, his calendar should be created to\n try_post_user = self.app.post(\n '/admin/users',\n OrderedDict([\n ('name', 'TEST'),\n ('email', 'an-other-email@test.local'),\n ('password', 'an-other-email@test.local'),\n ('is_tracim_manager', 'off'),\n ('is_tracim_admin', 'off'),\n ('send_email', 'off'),\n ])\n )\n\n eq_(try_post_user.status_code, 302,\n \"Code should be 302, but is %d\" % try_post_user.status_code)\n\n users_calendars = len([\n name for name in os.listdir(radicale_users_folder)\n if name.endswith('.ics')\n ])\n\n user = DBSession.query(User) \\\n .filter(User.email == 'an-other-email@test.local').one()\n\n eq_(1, users_calendars, 'Radicale user path should list 1 calendar')\n user_calendar = '{0}/{1}.ics'.format(\n radicale_users_folder,\n user.user_id,\n )\n user_calendar_exist = os.path.isfile(user_calendar)\n eq_(True, user_calendar_exist, 'User calendar should be created')\n\n def test_created_workspace_radicale_calendar(self):\n self._connect_user(\n 'admin@admin.admin',\n 'admin@admin.admin',\n )\n\n workspaces_count = DBSession.query(Workspace)\\\n .filter(Workspace.label == 'WTESTCAL').count()\n eq_(0, workspaces_count, 'Workspace should not exist yet !')\n\n radicale_workspaces_folder = '{0}/workspace'\\\n .format(config.get('radicale.server.filesystem.folder'))\n eq_(\n False,\n os.path.isdir(radicale_workspaces_folder),\n 'Radicale workskpaces folder should not exist yet',\n )\n\n # Create a new workspace, his calendar should be created to\n try_post_workspace = self.app.post(\n '/admin/workspaces',\n OrderedDict([\n ('name', 'WTESTCAL'),\n ('description', 'WTESTCALDESCR'),\n ('calendar_enabled', 'on'),\n ])\n )\n\n eq_(try_post_workspace.status_code, 302,\n \"Code should be 302, but is %d\" % try_post_workspace.status_code)\n\n workspaces_calendars = len([\n name for name in os.listdir(radicale_workspaces_folder)\n if name.endswith('.ics')\n ])\n\n workspace = DBSession.query(Workspace) \\\n .filter(Workspace.label == 'WTESTCAL').one()\n\n eq_(\n 1,\n workspaces_calendars,\n 'Radicale workspace path should list 1 calendar',\n )\n workspace_calendar = '{0}/{1}.ics'.format(\n radicale_workspaces_folder,\n workspace.workspace_id,\n )\n workspace_calendar_exist = os.path.isfile(workspace_calendar)\n eq_(\n True,\n workspace_calendar_exist,\n 'Workspace calendar should be created',\n )\n\n def unit_test__disable_workspace_disable_file__ok__nominal_case(self):\n self._connect_user(\n 'admin@admin.admin',\n 'admin@admin.admin',\n )\n radicale_workspaces_folder = '{0}/workspace'.format(\n config.get('radicale.server.filesystem.folder'),\n )\n delete_radicale_workspaces_folder = '{0}/workspace/deleted'.format(\n config.get('radicale.server.filesystem.folder'),\n )\n\n # Core after assume \"test_created_workspace_radicale_calendar\" is ok\n self.app.post(\n '/admin/workspaces',\n OrderedDict([\n ('name', 'WTESTCAL2'),\n ('description', 'WTESTCAL2DESCR'),\n ('calendar_enabled', 'on'),\n ])\n )\n created_workspace = DBSession.query(Workspace)\\\n .filter(Workspace.label == 'WTESTCAL2')\\\n .one()\n disable_response = self.app.put(\n '/admin/workspaces/{}?_method=PUT'.format(\n created_workspace.workspace_id,\n ),\n OrderedDict([\n ('name', 'WTESTCAL2'),\n ('description', 'WTESTCAL2DESCR'),\n ('calendar_enabled', 'off'),\n ])\n )\n eq_(disable_response.status_code, 302,\n \"Code should be 302, but is %d\" % disable_response.status_code)\n workspaces_calendars = [\n name for name in\n os.listdir(radicale_workspaces_folder)\n if name.endswith('.ics')\n ]\n deleted_workspaces_calendars = [\n name for name in\n os.listdir(delete_radicale_workspaces_folder)\n if name.endswith('.ics')\n ]\n\n eq_(\n 0,\n len(workspaces_calendars),\n msg='No workspace ics file should exist, but {} found'.format(\n len(workspaces_calendars),\n ),\n )\n eq_(\n 1,\n len(deleted_workspaces_calendars),\n msg='1 deleted workspace ics file should exist, but {} found'\n .format(\n len(deleted_workspaces_calendars),\n ),\n )\n workspace_ics_file_name = '{}.ics'.format(\n created_workspace.workspace_id\n )\n ok_(\n workspace_ics_file_name in deleted_workspaces_calendars,\n '{} should be in deleted workspace calendar folder'.format(\n workspace_ics_file_name\n ),\n )\n\n def unit_test__re_enable_workspace_re_enable_file__ok__nominal_case(self):\n self._connect_user(\n 'admin@admin.admin',\n 'admin@admin.admin',\n )\n radicale_workspaces_folder = '{0}/workspace'.format(\n config.get('radicale.server.filesystem.folder'),\n )\n delete_radicale_workspaces_folder = '{0}/workspace/deleted'.format(\n config.get('radicale.server.filesystem.folder'),\n )\n\n # Core after assume\n # \"unit_test__disable_workspace_disable_file__ok__nominal_case\" is ok\n self.app.post(\n '/admin/workspaces',\n OrderedDict([\n ('name', 'WTESTCAL2'),\n ('description', 'WTESTCAL2DESCR'),\n ('calendar_enabled', 'on'),\n ])\n )\n created_workspace = DBSession.query(Workspace) \\\n .filter(Workspace.label == 'WTESTCAL2') \\\n .one()\n self.app.put(\n '/admin/workspaces/{}?_method=PUT'.format(\n created_workspace.workspace_id,\n ),\n OrderedDict([\n ('name', 'WTESTCAL2'),\n ('description', 'WTESTCAL2DESCR'),\n ('calendar_enabled', 'off'),\n ])\n )\n re_enable_response = self.app.put(\n '/admin/workspaces/{}?_method=PUT'.format(\n created_workspace.workspace_id,\n ),\n OrderedDict([\n ('name', 'WTESTCAL2'),\n ('description', 'WTESTCAL2DESCR'),\n ('calendar_enabled', 'on'),\n ])\n )\n eq_(re_enable_response.status_code, 302,\n \"Code should be 302, but is %d\" % re_enable_response.status_code)\n workspaces_calendars = [\n name for name in\n os.listdir(radicale_workspaces_folder)\n if name.endswith('.ics')\n ]\n deleted_workspaces_calendars = [\n name for name in\n os.listdir(delete_radicale_workspaces_folder)\n if name.endswith('.ics')\n ]\n\n eq_(\n 1,\n len(workspaces_calendars),\n msg='1 workspace ics file should exist, but {} found'.format(\n len(workspaces_calendars),\n ),\n )\n eq_(\n 0,\n len(deleted_workspaces_calendars),\n msg='0 deleted workspace ics file should exist, but {} found'\n .format(\n len(deleted_workspaces_calendars),\n ),\n )\n workspace_ics_file_name = '{}.ics'.format(\n created_workspace.workspace_id\n )\n ok_(\n workspace_ics_file_name in workspaces_calendars,\n '{} should be in workspace calendar folder'.format(\n workspace_ics_file_name\n ),\n )\n","sub_path":"tracim/tracim/tests/functional/test_calendar.py","file_name":"test_calendar.py","file_ext":"py","file_size_in_byte":16137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"464829729","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def recoverTree(self, root: TreeNode) -> None:\n \"\"\"\n Do not return anything, modify root in-place instead.\n \"\"\"\n '''\n InOrder Traversal\n '''\n value = []\n x = y = float('-inf')\n self.inOrder(root, value)\n # self.findSwappedNumber(value, x, y) # 1\n x, y = self.findSwappedNumber(value, x, y)\n self.recover(root, x, y)\n \n def inOrder(self, root, value):\n if not root: return # 3\n self.inOrder(root.left, value)\n value.append(root.val)\n self.inOrder(root.right, value)\n \n def findSwappedNumber(self, value, x, y):\n for i in range(len(value) - 1):\n if value[i] > value[i+1]:\n y = value[i+1]\n if x == float('-inf'): x = value[i] # 2\n else: break\n \n return x, y\n \n def recover(self, root, x, y):\n if not root: return # 3\n if root.val == x:\n root.val = y\n elif root.val == y:\n root.val = x\n \n self.recover(root.left, x, y)\n self.recover(root.right, x, y)\n \n # TC: O(n)\n \n # SC: O(n)\n \n # 1. this line wont alter the value of x and y, they will still be -inf\n # by the time of findSwappedNumber() returns. This differs with c++\n # ref to this link to see why:\n # https://stackoverflow.com/questions/575196/\n # 2. This is to deal with when the swapped number are next to each other \n # or not.\n # 3. Remember to check corner cases!!!!!\n \n # ref: https://www.youtube.com/watch?v=H3PPKuyzKro\n","sub_path":"99_RecoverBinarySearchTree/99_RecoverBinarySearchTree_1.py","file_name":"99_RecoverBinarySearchTree_1.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"370236983","text":"# test_idm.py\n\nimport pandas as pd\nfrom intersim.datautils import *\nfrom intersim.policies.idm import IDM\nfrom intersim import RoundaboutSimulator\nimport matplotlib.animation as animation\nfrom intersim.viz.animatedviz import AnimatedViz\nimport matplotlib.pyplot as plt\n\ndef main():\n\n # load a trackfile\n df = pd.read_csv('datasets/trackfiles/DR_USA_Roundabout_FT/vehicle_tracks_000.csv')\n\n stv = df_to_stackedvehicletraj(df)\n\n sim = RoundaboutSimulator(stv)\n\n states = []\n s,_ = sim.reset()\n s = s.reshape(-1,5)\n states.append(s)\n\n idm = IDM(stv.lengths)\n\n for i in range(150):\n v = s[:,2:3]\n nni = ~torch.isnan(v)\n a = idm(s.reshape(-1))\n if torch.any(torch.isnan(a[nni])):\n import ipdb\n ipdb.set_trace()\n s,_ = sim.step(a)\n\n s = s.reshape(-1,5)\n\n states.append(s)\n\n states = torch.stack(states).reshape(151,-1)\n\n fig = plt.figure()\n ax = plt.axes(\n xlim=(900, 1100), ylim=(900, 1100)\n )\n ax.set_aspect('equal', 'box')\n\n osm = 'datasets/maps/DR_USA_Roundabout_FT.osm'\n\n av = AnimatedViz(ax, osm, states, stv.lengths, stv.widths)\n\n ani = animation.FuncAnimation(fig, av.animate, frames=len(states),\n interval=20, blit=True, init_func=av.initfun, \n repeat=True)\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()","sub_path":"test_scripts/test_idm.py","file_name":"test_idm.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"169933840","text":"from room import Room\nfrom player import Player\nfrom item import Item\nimport textwrap\n# Declare all the rooms\n\nroom = {\n 'outside': Room(\"Outside Cave Entrance\",\n \"North of you, the cave mount beckons\"),\n\n 'foyer': Room(\"Foyer\", \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\"),\n\n 'overlook': Room(\"Grand Overlook\", \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\"),\n\n 'narrow': Room(\"Narrow Passage\", \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\"),\n\n 'treasure': Room(\"Treasure Chamber\", \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\"),\n}\n\n# Link rooms together\n\nroom['outside'].doors['n'] = room['foyer']\nroom['foyer'].doors['s'] = room['outside']\nroom['foyer'].doors['n'] = room['overlook']\nroom['foyer'].doors['e'] = room['narrow']\nroom['overlook'].doors['s'] = room['foyer']\nroom['narrow'].doors['w'] = room['foyer']\nroom['narrow'].doors['n'] = room['treasure']\nroom['treasure'].doors['s'] = room['narrow']\n\n#\n# Main\n#\n\n# Make a new player object that is currently in the 'outside' room.\n# Is\n# Write a loop that:\n#\n# * Prints the current room name\n# * Prints the current description (the textwrap module might be useful here).\n# * Waits for user input and decides what to do.\n#\n# If the user enters a cardinal direction, attempt to move to the room there.\n# Print an error message if the movement isn't allowed.\n#\n# If the user enters \"q\", quit the game.\n# Every option maps to a function that performs the aciton\n\nplayer1 = Player(name=\"John\", room=room[\"outside\"])\nsword = Item(\"Sting\", \"Pointy\")\nplayer1.room.add_item(sword)\nitemNameHolder = \"\"\nrunProgram = True\nchoices = []\n\n\ndef killProgram():\n global runProgram\n runProgram = False\n print(\"Thanks for playing!\")\n# , \"n\":travel(\"n\"), \"s\":travel(\"s\"), \"e\":travel(\"e\"), \"w\":travel(\"w\"), \"q\": killProgram(), \"sword\":player1.add_item(sword)\n\n\ndef print_commands():\n commands = [\"\\ni: inventory\\nn: Go North\\ns: Go South\\ne: Go East\\nw: Go West\\nq: Quit Game\\nlook: See Items Inside Room\\ninteract: Interact\\ntake [itemName]: Picks up and item\\ndrop [itemName]: Drops the item in the room\"]\n for key in commands:\n print(key)\n\n\ndef travel(dir):\n try:\n if(player1.room.dir_exists(dir)):\n player1.room = player1.room.doors[dir]\n else:\n print(\"You can't go that way\")\n except ValueError:\n print(\"Invalid Response\")\n\n\ndef look():\n # Prints the current description (the textwrap module might be useful here).\n print(\"Room Description: \", textwrap.fill(player1.get_roomDesc()))\n player1.room.print_items()\n\n\ndef interact():\n input(\"Type `get [item]` or drop your item by typing `drop [item]`: \\n\")\n# , \"drop\":drop_item(itemNameHolder)\n# , \"take\":take_item(itemNameHolder)\n#commands={\"i\":player1.print_items(), \"n\":travel(\"n\"), \"s\":travel(\"s\"), \"e\":travel(\"e\"), \"w\":travel(\"w\"), \"q\": killProgram(), \"sword\":player1.add_item(sword) }\n\n\ndef command(command):\n if command == \"i\":\n player1.print_items()\n elif command == \"c\":\n print_commands()\n elif command == \"n\":\n travel(\"n\")\n elif command == \"s\":\n travel(\"s\")\n elif command == \"e\":\n travel(\"e\")\n elif command == \"w\":\n travel(\"w\")\n elif command == \"q\":\n killProgram()\n elif command == \"look\":\n look()\n elif command == \"interact\":\n interact()\n # elif command ==\"l\":\n # player1.room.print_items()\n elif command == \"sword\":\n player1.add_item(sword)\n\n\ndef longCommand(command, itemName):\n if command == \"take\":\n rem_room_item = player1.room.remove_item(itemName)\n player1.add_item(rem_room_item)\n\n elif command == \"drop\":\n rem_player_item = player1.remove_item(itemName)\n player1.room.add_item(rem_player_item)\n print(f\"you have dropped {itemName}\")\n\n\nwhile runProgram:\n ''' REPL '''\n # print current room name\n print(f\"\\n\\033[1m{player1.get_room()}\\033[0m\")\n # Asks user input for which room to move to next\n stringIn = input(\n \"\\x1B[3mPlease enter a command. Press c for commands\\x1B[23m\\n\")\n choices = stringIn.split(\" \")\n\n if len(choices) == 1:\n command(stringIn)\n elif len(choices) == 2:\n longCommand(choices[0], choices[1])\n else:\n print(\"You use many big words. Me no understand!\")\n","sub_path":"src/adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":4604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"509578658","text":"from __future__ import print_function\nfrom __future__ import unicode_literals\nimport time\nimport re\n\nfrom tiny_test_fw import DUT, App, TinyFW\nfrom ttfw_bl import BL602App, BL602DUT\n\n\n@TinyFW.test_method(app=BL602App.BL602App, dut=BL602DUT.BL602TyMbDUT, test_suite_name='sdk_app_gpio_tc')\ndef sdk_app_gpio_tc(env, extra_data):\n # first, flash dut\n # then, test\n dut = env.get_dut(\"port0\", \"fake app path\")\n print('Flashing app')\n dut.flash_app(env.log_path, env.get_variable('flash'))\n print('Starting app')\n dut.start_app()\n\n try:\n dut.expect(\"Booting BL602 Chip...\", timeout=1)\n print('BL602 booted')\n dut.expect('Init CLI with event Driven', timeout=1)\n print('BL602 CLI init done')\n\n dut.write('gpio-func 8 0 0 0')\n dut.expect(\"GPIO8 is set output with null pullup null pulldown\", timeout=1)\n\n dut.write('gpio-set 8 1')\n dut.expect(\"GPIO8 is set to high\", timeout=1)\n\n dut.write('gpio-set 8 0')\n dut.expect(\"GPIO8 is set to lo\", timeout=1)\n\n dut.write('gpio-get 8')\n dut.expect(\"GPIO8 val is low\", timeout=1)\n dut.halt()\n except DUT.ExpectTimeout:\n print('ENV_TEST_FAILURE: BL602 gpio test failed')\n raise\n\n\nif __name__ == '__main__':\n sdk_app_gpio_tc()\n","sub_path":"customer_app/sdk_app_gpio/tests/sdk_app_gpio_tc_test.py","file_name":"sdk_app_gpio_tc_test.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"339843825","text":"from threading import Thread\r\nimport serial\r\nimport serial.tools.list_ports\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.animation as animation\r\n\r\nimport numpy\r\nimport math\r\nimport numpy.fft\r\nimport scipy.signal\r\nimport os\r\nimport time\r\n\r\n\r\n\r\nliste_concentration =[]\r\ntemps=[]\r\nt_acquisition= 20\r\n\r\nfig=plt.figure()\r\nline0, = plt.plot([],[])\r\nCmax = 5000\r\n\r\nplt.title('Concentration CO2=f(temps)') # titre du graphique\r\nplt.xlim(0, t_acquisition)\r\nplt.ylim(0,Cmax)\r\nplt.xlabel(\"temps en s\")\r\nplt.ylabel(\"Concentration CO2 en ppm\")\r\n\r\n\r\n\r\n\r\n# fonction à définir quand blit=True\r\n# crée l'arrière de l'animation qui sera présent sur chaque image\r\ndef init():\r\n line0.set_data([],[])\r\n\r\n\r\n return line0,\r\n\r\n\r\n\r\ndef animate(i):\r\n\r\n line0.set_data(temps, liste_concentration)\r\n\r\n\r\n\r\nani = animation.FuncAnimation(fig,animate,frames = 10000,interval=20)\r\n\r\nplt.show()\r\n\r\n\r\n######################################### COMMUNICATION AVEC CARTE ARDUINO ET DEFINITION DES BROCHES ET VARIABLES #######################################################\r\n\r\n\r\nne=2000\r\nte=t_acquisition/ne\r\n\r\n\r\n\r\n\r\n\r\nlisteCO2 = numpy.zeros(ne,dtype=float)\r\n\r\n\r\nliste_temps = numpy.zeros(ne,dtype=float)\r\nports = list(serial.tools.list_ports.comports())\r\nfor p in ports:\r\n\r\n print (p)\r\n if 'Arduino' in p.description :\r\n mData = serial.Serial(p.device,9600)\r\n\r\n\r\nprint(mData.is_open) #Print and check if the port is open\r\nprint(mData.name) # Print the name of the port\r\nt_reel =0\r\nfor k in range(ne):\r\n\r\n while t_reel < t_acquisition:\r\n t_clock = time.time()\r\n line = mData.readline() # Read the line from the serial port.\r\n print (line)\r\n temps.append(t_reel)\r\n listeDonnees = line.strip()\r\n listeDonnees = line.split() # on sépare l'abcisse et l'ordonnée\r\n\r\n print(listeDonnees) # print the line in the console.\r\n if len(listeDonnees)!=0:\r\n concentration = float(listeDonnees[1].decode())\r\n listeCO2[k]= concentration\r\n liste_concentration.append(listeCO2[k])\r\n\r\n\r\n\r\n else :\r\n liste_concentration.append(liste_concentration[-1])\r\n\r\n t_clock_new = time.time()\r\n t_reel = t_reel + t_clock_new-t_clock # instant réel\r\n\r\n t_clock = t_clock_new\r\n print(\"t = %f\"%(temps[k]))\r\n print(\"m = %f\"%(liste_concentration[k]))\r\n # time.sleep((k+1)*te-t_reel) # calage sur le temps réel à chaque itération\r\n # time.sleep(2) # calage sur le temps réel à chaque itération\r\n\r\n\r\nmData.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n# plt.title('Concentration CO2=f(temps)') # titre du graphique\r\n# plt.scatter(temps,liste_concentration, color ='r', marker = 'o') # On affiche les points de coordonnées (I,U) avec des points rouges\r\n# plt.xlabel('temps en s') # nommer l'axe des abscisses\r\n# plt.ylabel('Concentration CO2 en ppm') # nommer l'axe des ordonnéees\r\n# # plt.xlim (min(liste_temps),max(liste_temps)) #limtes pour les axes avec les valeurs extrêmes\r\n# # plt.ylim(min(liste_concentration),max(liste_concentration))\r\n# plt.xlim (0,50) #limtes pour les axes avec les valeurs extrêmes\r\n# plt.ylim(0,10000)\r\n#\r\n# plt.show() #afficher le graphique (ne rien mettre dans la parenthèse)\r\n\r\n\r\n","sub_path":"Capteur CO2/capteur_CO2_graphe.py","file_name":"capteur_CO2_graphe.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"564848642","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib.animation import ArtistAnimation\n\nfig=plt.figure()\nax1=fig.add_subplot(3,1,1)\nax2=fig.add_subplot(3,1,2)\nax3=fig.add_subplot(3,1,3)\n\n##各コマの画像を格納\nimage_list=[]\n##delayの初期\n\n\n##信号の作成\n##遅延がある信号の作成\ntarget_sig=np.random.normal(size=10000)*1.0\n##一つ目の信号作成\nsig1=np.random.normal(size=32768)*0.0\n##二つ目の信号作成\nsig2=np.random.normal(size=32768)*0.0\nsig2[:10000]+=target_sig\ndelay=0\n\nfor i in range(50):\n sig1[delay:delay+10000]+=target_sig \n image1=ax1.plot(sig1)\n image2=ax2.plot(sig2)\n corr=np.correlate(sig1,sig2,\"full\")\n image3=ax3.plot(corr)\n image_list.append(image1+image2+image3)\n sig1[delay:delay+10000]-=target_sig\n delay+=300\n\nani=ArtistAnimation(fig,image_list,interval=200)\nani.save(\"test.gif\")\n\n\n","sub_path":"Python-file/Cross-correlation_simulation_kanta/animation_cross.py","file_name":"animation_cross.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"433483694","text":"# -*- coding: utf-8 -*-\n\nfrom PyQt4.QtSql import *\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom PyQt4 import QtCore, QtGui\nfrom PyQt4.QtGui import *\n\nimport sys\nimport os\nimport os, re\nimport os.path\nimport math\nimport psycopg2\nimport datetime\nimport platform\nimport socket\nimport processing\nimport qgis.utils\n\nfrom qgis import *\nfrom qgis.core import *\nfrom qgis.gui import *\nfrom Ui_gpx_shp import Ui_gpx_shp\nfrom objet_tools import *\n\n\npath_absolute = os.path.dirname(os.path.realpath(__file__))\n\n\nclass gpx_shpDialog(QDialog):\n\tdef __init__(self):\n\t\t# On récupère le dictionnaire des communes\n\t\tQDialog.__init__(self)\n\t\t# Set up the user interface from Designer.\n\t\tself.ui = Ui_gpx_shp()\n\t\tself.ui.setupUi(self)\n\t\t\n\t\t#treeView\n\t\tself.model = QStandardItemModel()\n\t\tself.model.setHorizontalHeaderLabels([self.tr(\"DATA_SERVER\")])\n\t\tself.ui.tV_table.setContextMenuPolicy(Qt.CustomContextMenu)\n\t\tself.ui.tV_table.customContextMenuRequested.connect(self.openMenu)\n\t\tself.ui.tV_table.clicked.connect(self.on_treeView_clicked)\n\n\t\t# SIGNAL SLOT\n\t\tself.connect(self.ui.btn_Cnx, SIGNAL(\"clicked()\"), self.loginuser)\n\t\tself.connect(self.ui.r_spatial, SIGNAL(\"clicked()\"), self.radio1_clicked)\n\t\tself.connect(self.ui.r_nonspatial, SIGNAL(\"clicked()\"), self.radio2_clicked)\n\t\tself.ui.liste_base.currentItemChanged.connect(self.chargertable)\n\t\tself.connect(self.ui.btn_Ajout, SIGNAL(\"clicked()\"), self.ajouter)\n\t\tself.connect(self.ui.btn_Annule_Select, SIGNAL(\"clicked()\"), self.annule_tous)\n\n\t\tself.ui.son_statut.setVisible(False)\n\t\tself.ui.base_encours.setVisible(False)\n\t\tself.ui.btn_Ajout.setDisabled(True)\n\t\tself.ui.btn_Annule_Select.setDisabled(True)\n\t\t\n\t\trep_epidemio=os.path.join(path_absolute, 'tmp_dos/epidemio.png')\n\t\trep_IPM=os.path.join(path_absolute, 'tmp_dos/IPM.png')\n\t\tself.ui.label_epidemio.setPixmap(QtGui.QPixmap(rep_epidemio))\n\t\tself.ui.label_ipm.setPixmap(QtGui.QPixmap(rep_IPM))\n\t\t\n\n\tdef openMenu(self, position):\n\t\tindexes = self.ui.tV_table.selectedIndexes()\n\t\tif len(indexes) > 0:\n\n\t\t\tlevel = 0\n\t\t\tindex = indexes[0]\n\t\t\twhile index.parent().isValid():\n\t\t\t\tindex = index.parent()\n\t\t\t\tlevel += 1\n\t\t\n\t\tmenu = QMenu()\n\t\tif level == 0:\n\t\t\tmenu.addAction(self.tr(\"Edit person\"))\n\t\telif level == 1:\n\t\t\tmenu.addAction(self.tr(\"Edit DATA_SERVER/container\"))\n\t\telif level == 2:\n\t\t\tmenu.addAction(self.tr(\"Edit DATA_SERVER\"))\n\n\t\tmenu.exec_(self.ui.tV_table.viewport().mapToGlobal(position))\n\t\n\t\n\tdef on_treeView_clicked(self):\n\t\t\"\"\"\n\t\tindex = self.ui.tV_table.selectedIndexes()[0]\n\t\tfileName = index.model().itemFromIndex(index).text()\n\t\tisanZaza=0\n\t\tZanany = index.model().itemFromIndex(index).rowCount()\n\t\tfor isa in range (0, Zanany):\n\t\t\tisanZaza +=1\n\t\t\n\t\tif isanZaza == 0:\n\t\t\tQtGui.QMessageBox.information(self, 'Message', fileName)\n\t\telse:\n\t\t\tQtGui.QMessageBox.information(self, 'Message', 'reniny')\t\n\t\t\"\"\"\n\t\tcls_SQL = Class_objet(self.ui)\n\t\tcls_SQL.on_treeView_clicked_fonc()\n\t\n\tdef loginuser(self):\n\t\ttxt_login = self.ui.son_login.toPlainText()\n\t\ttxt_mdp = self.ui.son_mdp.text()\n\t\tif txt_login != '' and txt_mdp != '':\n\t\t\tcls_SQL = Class_objet(self.ui)\n\t\t\tcls_SQL.login_ici()\n\t\telse:\n\t\t\tQtGui.QMessageBox.information(self, 'Message', \"Veuillez identifier votre login et mot de passe\")\n\t\t\t\n\n\tdef radio1_clicked(self):\n\t\tcls_radio1 = Class_objet(self.ui)\n\t\tcls_radio1.click_radio1()\n\t\t#vider\n\t\t\t\t\n\tdef radio2_clicked(self):\n\t\tcls_radio2 = Class_objet(self.ui)\n\t\tcls_radio2.click_radio2()\n\t\t#vider\n\t\t\n\tdef chargertable(self):\n\t\tcls_chargertable = Class_objet(self.ui)\n\t\tcls_chargertable.chargertable_fonc()\n\t\t\n\n\tdef ajouter(self):\n\t\tisanZananySeleky=0\n\t\t#for index in self.ui.tV_table.selectedIndexes():\n\t\tfor index in self.ui.tV_table.selectionModel().selectedIndexes():\n\t\t\tisanZaza=0\n\t\t\tZanany = index.model().itemFromIndex(index).rowCount()\n\t\t\tfor isa in range (0, Zanany):\n\t\t\t\tisanZaza +=1\n\t\t\t\t\n\t\t\tif isanZaza == 0:\n\t\t\t\tisanZananySeleky +=1\n\t\t\t\tfileName = index.model().itemFromIndex(index).text()\n\t\t\t\t#QtGui.QMessageBox.information(self, 'isan zaza :', fileName)\n\t\t\t\t#appel function\n\t\t\t\tcls_ajouter = Class_objet(self.ui)\n\t\t\t\tcls_ajouter.ajouter_fonc(fileName)\n\t\t\n\t\t\"\"\"\n\t\t#QtGui.QMessageBox.information(self, 'isan zaza :', str(isanZananySeleky))\n\t\tif isanZananySeleky > 0:\n\t\t\tcls_ajouter = Class_objet(self.ui)\n\t\t\tcls_ajouter.ajouter_fonc()\n\t\t\"\"\"\n\t\n\tdef annule_tous(self):\n\t\t\"\"\"\n\t\tif self.ui.liste_table.count()>0:\n\t\t\tself.ui.liste_table.clearSelection()\n\t\t\"\"\"\n\t\tself.ui.tV_table.clearSelection()\n\t\t\n\t\t\n\t\n\n\t","sub_path":"tmp_dos/postgis_sig_dialog.py","file_name":"postgis_sig_dialog.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"225740717","text":"from scipy.io import wavfile as wav\r\nfrom DTMF2_v3 import DTMF\r\nimport numpy as np\r\nimport os\r\n\r\n\r\ndef save_as_csv(results):\r\n with open(\"Results2.csv\", \"w\") as resFile:\r\n resFile.write(\"Id,Predicted\\n\")\r\n for sample in results:\r\n resFile.write(\"{},'{}'\\n\".format(sample[0], \"\".join(sample[1])))\r\n\r\nresults = []\r\nfor sample in os.listdir('Phase2-Unlabeled'):\r\n rate, data = wav.read(os.path.join('Phase2-Unlabeled', sample))\r\n sample = sample.split('.')[0]\r\n if len(data.T.shape) > 1:\r\n data = data.T[0] # Extracting the main channel of the sound\r\n result = DTMF(data, rate)\r\n results.append((sample, result))\r\n\r\n\r\nsave_as_csv(results)\r\n","sub_path":"phase2/predictor2.py","file_name":"predictor2.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"447674954","text":"T=int(input())\n\nfor t in range(1,T+1):\n\ts=input()\n\tl=list(s)\n\tres=0\n\twhile '-' in l:\n\t\tend=0\n\t\tcurr=l[end]\n\t\twhile end+1\" in line:\n tokens = re.split(r\"[ \\t]*-->[ \\t]*\", line)\n inst = tokens[0]\n op = tokens[1]\n imap[inst] = op\n logging.debug(\"added mapping: {} --> {}\".format(inst, op))\n else:\n logging.warning(\"unknown line: {}\".format(line))\n\n if \"#width\" not in meta_attrib:\n logging.error(\"\\\"#width\\\" was never defined\")\n sys.exit(-1)\n\ndef imap_try(instr):\n op = []\n\n logging.debug(\"trying to map \\\"{}\\\"\".format(instr))\n if instr in imap:\n logging.debug(\"direct mapping\")\n op_string_raw = imap[instr]\n op_string = re.sub(r\"[ \\t]+\", \"\", op_string_raw)\n i = 8\n while True:\n b_str = op_string[i*-1:]\n b = int(b_str, 2)\n op += [b]\n i += 8\n logging.debug(\"direct map \\\"{}\\\" to \\\"{}\\\"\".format(instr, b_str))\n if i >= len(op_string):\n break\n\n return bytes(op)\n\ndef source_process(source_path, output_path):\n logging.debug(\"opening source file: \\\"{}\\\"\".format(source_path))\n source_file = open(source_path, \"r\")\n logging.debug(\"reading source file\")\n source_text = source_file.read()\n\n logging.debug(\"opening output file: \\\"{}\\\"\".format(output_path))\n output_file = open(output_path, \"wb\")\n\n logging.debug(\"processing source text\")\n source_text = re.sub(r\"/\\*.*\\*/\", \"\", source_text, flags=re.DOTALL)\n source_text = re.sub(r\"//.*\", \"\", source_text)\n source_text_lines = str.splitlines(source_text)\n for line_raw in source_text_lines:\n line = str.strip(line_raw)\n if line == \"\":\n continue\n op = imap_try(line)\n if len(op) > 0:\n output_file.write(op)\n elif re.match(\"^[a-z-A-Z0-9]:$\", line):\n print(\"label: \\\"{}\\\"\".format(line))\n else:\n logging.warning(\"unknown line: \\\"{}\\\"\".format(line))\n\nlogging.basicConfig(format=\"%(levelname)s: %(message)s\", level=logging.INFO)\n\nimap_path = sys.argv[1]\nlogging.debug(\"imap path is {}\".format(imap_path))\noutput_path = sys.argv[2]\nlogging.debug(\"output path is {}\".format(output_path))\nsource_pathlist = sys.argv[3:]\nlogging.debug(\"source paths are {}\".format(source_pathlist))\n\nimap_process(imap_path)\n\nif len(source_pathlist) < 1:\n logging.warning(\"no source files\")\nfor source_path in source_pathlist:\n source_process(source_path, output_path)\n\nlogging.info(\"done\")\n\n","sub_path":"alas.py","file_name":"alas.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"85944726","text":"#!/usr/bin/env python3\n\nimport os\n\ndirname = os.path.dirname(__file__)\ninputfile = os.path.join(dirname, 'input.txt')\n\ndef parse_rules(data):\n\n # Create a dictionary of bags, with a list of sub bags\n bag_rules = {}\n for rule in data:\n outerBag, contents = rule.split(\" bags contain \")\n\n bag_rules[outerBag] = []\n\n if not 'no' in contents:\n inners = ([b.split(\" \") for b in contents.split(', ')])\n\n for bag in inners:\n if len(bag) == 4:\n bag.pop()\n\n bag_rules[outerBag] += ([\"%s %s\" % (bag[1], bag[2])] * int(bag[0]))\n\n return(bag_rules)\n\n\ndef part1(bag_rules):\n print(\"Part 1:\")\n\n\n colour_list = find_bags(1, bag_rules, bag_colour=\"shiny gold\")\n\n print(\"Total Number of bags that can contain shiny gold = {}\".format(len(colour_list)))\n return\n\n\ndef find_bags(depth, bag_rules, bag_colour):\n # print(\"Depth {} - Find bag {}\".format(depth, bag_colour))\n colour_list = set()\n\n for bag, contents in bag_rules.items():\n if (bag_colour in contents):\n colour_list.add(bag)\n colour_list.update(find_bags(depth + 1, bag_rules, bag))\n\n return(colour_list)\n\ndef count_bags(bag_rules, bag_colour):\n bag_count = 0\n for bag in bag_rules[bag_colour]:\n bag_count += 1;\n bag_count += count_bags(bag_rules, bag)\n\n return(bag_count)\n\ndef part2(bag_rules):\n print(\"Part 2:\")\n\n print(\"Total bags = {}\".format(count_bags(bag_rules, \"shiny gold\")))\n return\n\ndef main():\n with open(inputfile) as f:\n data = f.read().splitlines()\n\n bag_rules = parse_rules(data)\n\n part1(bag_rules)\n part2(bag_rules)\n\n\n return\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"Day_7/day7.py","file_name":"day7.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"96706372","text":"# Logging facility\n#\n# Copyright (C) 2020 Anichang \n#\n# This file may be distributed under the terms of the GNU GPLv3 license.\n\nimport logging, os, multiprocessing, time\nfrom logging import handlers\n\nfrom text import msg\nfrom error import KError as error\nimport process\nlogger = logging.getLogger(__name__)\n\nclass FilterMeta(logging.Filter):\n \"Adds meta information to logger messages.\"\n def filter(self, record):\n # debug\n if record.levelno <= 10:\n record.meta = record.levelname.ljust(8, \" \") + \"] [\"\n # info\n if record.levelno > 10:\n record.meta = \" \".ljust(8, \" \") + \"] [\"\n # warning\n if record.levelno > 20:\n record.meta = record.levelname.ljust(8, \" \") + \"] [\"\n #\n record.meta = record.meta + record.name.ljust(8, \" \")\n # error\n if record.levelno > 30:\n pass\n # critical\n if record.levelno > 40:\n pass\n return True\n\nclass FilterMetaDevel(logging.Filter):\n \"Adds meta debug information. For development purposes.\"\n def filter(self, record):\n if record.levelno > 50:\n record.meta = \"DEVEL:\" + record.processName + \":\" + record.filename + \":\" + str(record.lineno) + \":\" + record.funcName\n return True\n\nclass FilterMetaDebug(logging.Filter):\n \"Adds meta debug information.\"\n justmax = 30\n def filter(self, record):\n extras = record.processName + \":\" + record.filename + \":\" + str(record.lineno) + \":\" + record.funcName\n self.justmax = max(self.justmax, len(extras))\n record.meta = record.levelname.ljust(8, \" \") + \"] [\" + extras.ljust(self.justmax, \" \")\n return True\n\n# to be applied after Meta filters\nclass FilterMultiline(logging.Filter):\n \"Format multiline text messages.\"\n def filter(self, record):\n lines = str(record.msg).split('\\n')\n if len(lines) > 1:\n if hasattr(record,\"meta\"):\n spaces = len(str(record.created))+len(record.meta)+10\n else:\n spaces = len(str(record.created))+10\n message = lines[0]+\"\\n\"\n for l in lines[1:]:\n message = message + \" \"*spaces + l + \"\\n\"\n record.msg = message.strip()\n return True\n\n# log listener for central logging of multiprocessing app\n# once started, reads from the given queue and writes to console/file\nclass Writer(process.Base):\n \"Background logging process. Reads from queue, writes to console and file accordingly.\"\n formatter = logging.Formatter(\"%(asctime)s [%(meta)-8s] %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n def setup(self, log_queue, console_level: str = 'info', file_level: str = 'debug', log_dir: str = None, file_size: int = 10485760):\n self.log_queue = log_queue\n self.rollover_info = {}\n # init root logger\n logger = logging.getLogger()\n logger.handlers = []\n if console_level:\n ch = logging.StreamHandler()\n ch.setLevel(console_level.upper())\n ch.addFilter(FilterMeta())\n ch.addFilter(FilterMetaDevel())\n if console_level == 'debug':\n ch.addFilter(FilterMetaDebug())\n ch.addFilter(FilterMultiline())\n ch.setFormatter(self.formatter)\n logger.addHandler(ch)\n if file_level and log_dir:\n fh = handlers.RotatingFileHandler(os.path.join(log_dir, f\"{self.name}.log\"), 'a', file_size, 10)\n fh.setLevel(file_level.upper())\n fh.addFilter(FilterMeta())\n fh.addFilter(FilterMetaDevel())\n if file_level == 'debug':\n fh.addFilter(FilterMetaDebug())\n fh.addFilter(FilterMultiline())\n fh.setFormatter(self.formatter)\n logger.addHandler(fh)\n logger.setLevel(level=logging.DEBUG)\n process.worker_start(self)\n # runner\n def _runner(self, *args):\n while process.running.is_set():\n while not self.log_queue.empty():\n record = self.log_queue.get()\n logger = logging.getLogger(record.name)\n logger.handle(record)\n if self._subpipe.poll():\n obj = self._subpipe.recv()\n if 'showth' in obj:\n self._subpipe.send({'showth':self._showth(), 'eventtime':0.})\n time.sleep(0.1)\n def rollover_set_info(self, name, info):\n self.rollover_info[name] = info\n def rollover_clear_info(self):\n self.rollover_info.clear()\n def rollover_do(self):\n # TODO\n handlers.RotatingFileHandler.doRollover(self)\n lines = [self.rollover_info[name] for name in sorted(self.rollover_info)]\n lines.append(\"=============== Log rollover at %s ===============\" % (time.asctime(),))\n #self.emit(logger.makeLogRecord({'msg': \"\\n\".join(lines), 'level': logging.INFO}))\n\n","sub_path":"klippy/processlog.py","file_name":"processlog.py","file_ext":"py","file_size_in_byte":4918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"132470379","text":"import RPi.GPIO as GPIO\nimport os\nfrom pprint import pprint\n\nnumber = 0\n\n# use P1 header pin numbering convention\nGPIO.setmode(GPIO.BCM)\n\n#remove warnings\n#GPIO.setwarnings(False) \n\n# Set up the GPIO channels\nGPIO.setup(2, GPIO.OUT) #1\nGPIO.setup(3, GPIO.OUT) #2\nGPIO.setup(17, GPIO.OUT) #3\nGPIO.setup(27, GPIO.OUT) #4\nGPIO.setup(22, GPIO.OUT) #5\nGPIO.setup(10, GPIO.OUT) #6\nGPIO.setup(9, GPIO.OUT) #7\n\ndef clearNumber():\n\tGPIO.output(2, GPIO.LOW)\n\tGPIO.output(3, GPIO.LOW)\n\tGPIO.output(17, GPIO.LOW)\n\tGPIO.output(27, GPIO.LOW)\n\tGPIO.output(22, GPIO.LOW)\n\tGPIO.output(10, GPIO.LOW)\n\tGPIO.output(9, GPIO.LOW)\n\ndef displayNumber(na):\n#\tprint('displayNumber')\n\tclearNumber()\n\tif na[1] == 1:\n\t\tGPIO.output(2, GPIO.HIGH)\n\tif na[2] == 1:\n\t\tGPIO.output(3, GPIO.HIGH)\n\tif na[3] == 1:\n\t\tGPIO.output(17, GPIO.HIGH)\n\tif na[4] == 1:\n\t\tGPIO.output(27, GPIO.HIGH)\n\tif na[5] == 1:\n\t\tGPIO.output(22, GPIO.HIGH)\n\tif na[6] == 1:\n\t\tGPIO.output(10, GPIO.HIGH)\n\tif na[7] == 1:\n\t\tGPIO.output(9, GPIO.HIGH)\n\n\ndef setNumber(number):\n#\tprint('setNumber')\n\tna = [0,0,0,0,0,0,0,0]\n\tif number == 1:\n#\t\tprint('1')\n\t\tna = [0,0,1,1,0,0,0,0]\t\n\tif number == 2:\n\t\tna = [0,1,1,0,1,1,0,1]\n\tif number == 3:\n\t\tna = [0,1,1,1,1,0,0,1]\n\tif number == 4:\n\t\tna = [0,0,1,1,0,0,1,1]\n\tif number == 5:\n\t\tna = [0,1,0,1,1,0,1,1]\n\tif number == 6:\n\t\tna = [0,1,0,1,1,1,1,1]\n\tif number == 7:\n\t\tna = [0,1,1,1,0,0,0,0]\n\tif number == 8:\n\t\tna = [0,1,1,1,1,1,1,1]\n\tif number == 9:\n\t\tna = [0,1,1,1,0,0,1,1]\n\tif number == 0:\n\t\tna = [0,1,1,1,1,1,1,0]\n\tdisplayNumber(na)\n\n\n\nGPIO.output(9, GPIO.HIGH)\nclearNumber()\n#na = [0,0,0,0,0,0,0,0]\nwhile True:\n\tos.system('clear')\n\tprint('Displaying ' + str(number))\n\tnumber=input('Enter a number 0-9. Enter -1 to exit:')\n\tif number == -1:\n\t\tclearNumber()\n\t\tbreak\n\telse:\n\t\tsetNumber(number)\n\t\t\n\nos.system('clear')\n#GPIO.cleanup()\n","sub_path":"gpio/7seg.py","file_name":"7seg.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"327646372","text":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\n\nfrom mmdet.utils import build_from_cfg\nfrom .dataset_wrappers import ConcatDataset, RepeatDataset\nfrom .registry import DATASETS\n\n\ndef _concat_dataset(cfg, default_args=None):\n ann_files = cfg['ann_file']\n img_prefixes = cfg.get('img_prefix', None)\n seg_prefixes = cfg.get('seg_prefix', None)\n proposal_files = cfg.get('proposal_file', None)\n\n datasets = []\n num_dset = len(ann_files)\n for i in range(num_dset):\n data_cfg = copy.deepcopy(cfg)\n data_cfg['ann_file'] = ann_files[i]\n if isinstance(img_prefixes, (list, tuple)):\n data_cfg['img_prefix'] = img_prefixes[i]\n if isinstance(seg_prefixes, (list, tuple)):\n data_cfg['seg_prefix'] = seg_prefixes[i]\n if isinstance(proposal_files, (list, tuple)):\n data_cfg['proposal_file'] = proposal_files[i]\n datasets.append(build_dataset(data_cfg, default_args))\n\n return ConcatDataset(datasets)\n\n\ndef build_dataset(cfg, default_args=None):\n if isinstance(cfg, (list, tuple)):\n dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])\n elif cfg['type'] == 'RepeatDataset':\n dataset = RepeatDataset(\n build_dataset(cfg['dataset'], default_args), cfg['times'])\n elif isinstance(cfg['ann_file'], (list, tuple)):\n dataset = _concat_dataset(cfg, default_args)\n else:\n dataset = build_from_cfg(cfg, DATASETS, default_args)\n\n return dataset\n","sub_path":"PyTorch/contrib/cv/detection/SOLOv1/mmdet/datasets/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"323426239","text":"import cv2\nimport os\n\nout_dir = \"/home/cabe0006/mb20_scratch/chamath/data/evaluation_27/images_in\"\nos.makedirs(out_dir, exist_ok=True)\nvid_dir = \"/home/cabe0006/mb20_scratch/chamath/data/ant_dataset/untagged\"\n\n\ndef convert_frames(vid_path, out_dir, file_name):\n capture = cv2.VideoCapture(vid_path)\n read_count = 0\n print(\"Converting video file: {}\".format(vid_path))\n frames = []\n while True:\n success, image = capture.read()\n if not success:\n break\n cv2.imwrite(os.path.join(out_dir, f\"{file_name}_{read_count:06d}.jpeg\"), image)\n read_count += 1\n if read_count > 300:\n break\n return frames\n\n\ndef process(file_name):\n# file_name = \"CU10L1B1Out_0\"\n video_path = os.path.join(vid_dir, f'{file_name}.mp4')\n convert_frames(video_path, out_dir, file_name)\n\n\nfile_names = [\n \"CU15L1B1In_1\", \"CU15L1B1Out_1\",\n \"CU15L1B4In_1\", \"CU15L1B4Out_1\",\n \"CU25L1B4In_1\", \"CU25L1B4Out_1\",\n \"CU10L1B5In_1\", \"CU10L1B5Out_1\",\n \"OU10B1L1In_1\", \"OU10B1L1Out_1\",\n \"OU10B3L3In_1\", \"OU10B3L3Out_1\",\n \"OU50B1L2In_1\", \"OU50B1L2Out_1\",\n \"OU50B2L1In_1\", \"OU50B2L1Out_1\",\n]\nfor file_name in file_names:\n print(file_name)\n process(file_name)\n\n\n\n","sub_path":"detr_video_predicitons/convert_to_frames.py","file_name":"convert_to_frames.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"159722711","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 16 13:22:18 2021\r\n\r\n@author: boris\r\n\"\"\"\r\n\r\nimport argparse\r\nimport os\r\nimport sys\r\n\r\nimport numpy as np\r\nnp.set_printoptions(threshold=sys.maxsize)\r\n\r\n#Visualisation: on importe matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"imageName\", help=\"The relative path of the image. The root is where the program is.\", type=str)\r\n\r\nparser.add_argument(\"--edgeReinforcement\", help=\"Enter the output name of your image color for edgeReinforcement. Ex: edge_lena_couleur.bmp\")\r\n\r\nargs = parser.parse_args()\r\nprint(\"The relative path of your image is\", args.imageName)\r\n\r\n#Name of the image put by the user\r\nfilename = args.imageName \r\n\r\n#def contourDetectionImage(filename):\r\ndef visualize(filename):\r\n #Visualisation d’une image avec Matplotlib \r\n img = mpimg.imread(filename)\r\n imgplot = plt.imshow(img)\r\n plt.show()\r\n\r\n#function which convert little_endian to big_endian\r\ndef to_big(val):\r\n big_hex = bytearray.fromhex(val)\r\n big_hex.reverse()\r\n #print(\"Byte array format:\", big_hex)\r\n\r\n str_big = ''.join(format(x, '02x') for x in big_hex)\r\n\r\n return str_big\r\n\r\n#function to open bmp image\r\ndef ouverture_Fichiers_Image(filename):\r\n f_lecture =open(filename,'rb') #read in binary mode\r\n i=1\r\n octet = bytes([0])\r\n \r\n \r\n octets=[]\r\n octets_size=[] #variable containing the size of the image from the header in binary\r\n octets_app=[] #variable containing the application name from the header in binary\r\n octets_size_header=[] #variable containing the size of the image's header from the header in binary\r\n octets_size_width=[] #variable containing the width size of the image from the header in binary\r\n octets_size_height=[] #variable containing the height size of the image from the header in binary\r\n octets_planes=[] #variable containing the number of planes of the picture\r\n octets_colours=[] #variable containing the number of color of the picture \r\n \r\n #Lecture du MAGIC NUMBER\r\n while (i <=2): #lecture Magic number sur 2 octets\r\n octet=f_lecture.read(1) #Lecture octet par octet\r\n octets.append(ord(octet))\r\n print (octet.decode('utf-8'),\" dec=\",ord(octet))\r\n i=i+1\r\n print(\" =>Magic Number =\", octets, \" BM => BitMap\")\r\n \r\n i=1\r\n \r\n #hex variables\r\n hexDecSiz=[]\r\n hexStrSiz=\"\"\r\n hexStrApp=\"\"\r\n hexStrSizHeader=\"\"\r\n hexStrSizWidth=\"\"\r\n hexStrSizHeight=\"\"\r\n hexStrPlanes=\"\"\r\n hexStrColours=\"\"\r\n \r\n \r\n #BLOC ENTETE 54 octets en standard\r\n while (i<=54):\r\n octet=f_lecture.read(1)\r\n if (i>=1 and i<=4): #size of the file in octets\r\n octets_size.append(octet)\r\n #print (octet.hex()) #print the hex\r\n hexStrSiz = hexStrSiz + \" \" + octet.hex() \r\n if (i>=5 and i<=8): #application image\r\n octets_app.append(octet)\r\n hexStrApp = hexStrApp + \" \" + octet.hex() \r\n if (i>=9 and i<=12): #print size of the file's header\r\n octets_size_header.append(octet)\r\n hexStrSizHeader = hexStrSizHeader+ \" \" + octet.hex() \r\n if (i>=17 and i<=20): #print size of the picture's width\r\n octets_size_width.append(octet)\r\n hexStrSizWidth = hexStrSizWidth+ \" \" + octet.hex()\r\n if (i>=21 and i<=24): #print size of the picture's height\r\n octets_size_height.append(octet)\r\n hexStrSizHeight = hexStrSizHeight+ \" \" + octet.hex()\r\n if (i>=25 and i<=26): #print number of planes in the image\r\n octets_planes.append(octet)\r\n hexStrPlanes = hexStrPlanes+ \" \" + octet.hex()\r\n if (i>=27 and i<=28): #print number of colours in the image\r\n octets_colours.append(octet)\r\n hexStrColours = hexStrColours+ \" \" + octet.hex()\r\n \r\n i=i+1\r\n big_endian_siz = to_big(hexStrSiz)\r\n #print(\"Big endian hex:\", big_endian_siz)\r\n #print(\"Hex to int:\", int(big_endian_siz, 16))\r\n print(hexStrSiz, \" =>Taille de fichier =\", int(big_endian_siz, 16), \" octets\")\r\n \r\n hexStrSiz = hexStrSiz.replace(\" \", \"\") # we remove all spaces\r\n for m in range(0, len(hexStrSiz), 2): # we take the characters two by two\r\n code = hexStrSiz[m:m+2]\r\n hexDecSiz.append(int(code, 16)) # we convert in decimal and we put in the list hexDecSiz\r\n print(hexDecSiz, \" =>Taille de fichier =\", int(big_endian_siz, 16), \" octets\")\r\n \r\n #-------------------------------------------------------------------------------------------\r\n big_endian_app = to_big(hexStrApp) #variable containing the conversion in big_endian of the application of image from the header in binary\r\n print(hexStrApp, \" =>application image =\", int(big_endian_app, 16), \" noms\")\r\n \r\n #-------------------------------------------------------------------------------------------\r\n big_endian_size_header = to_big(hexStrSizHeader) #variable containing the conversion in big_endian of the size of the image from the header in binary\r\n print(hexStrSizHeader, \" =>Taille Entete =\", int(big_endian_size_header, 16), \" octets\")\r\n \r\n #-------------------------------------------------------------------------------------------\r\n big_endian_size_width = to_big(hexStrSizWidth) #variable containing the conversion in big_endian of the width size of the image from the header in binary\r\n print(hexStrSizWidth, \" =>Largeur Image =\", int(big_endian_size_width, 16), \" pixels\")\r\n \r\n #-------------------------------------------------------------------------------------------\r\n big_endian_size_height = to_big(hexStrSizHeight) #variable containing the conversion in big_endian of the height size of the image from the header in binary\r\n print(hexStrSizHeight, \" =>Hauteur Image =\", int(big_endian_size_height, 16), \" pixels\")\r\n \r\n #-------------------------------------------------------------------------------------------\r\n big_endian_planes = to_big(hexStrPlanes) #variable containing the conversion in big_endian of the number of planes of the image from the header in binary\r\n print(hexStrPlanes, \" =>NB plan Image =\", int(big_endian_planes, 16), \" plan\")\r\n \r\n #-------------------------------------------------------------------------------------------\r\n big_endian_colours = to_big(hexStrColours) #variable containing the conversion in big_endian of the number of colour image from the header in binary\r\n print(hexStrColours, \" =>NB Couleur Image =\", int(big_endian_colours, 16), \" couleurs\")\r\n \r\n global widthImage \r\n global heightImage\r\n \r\n widthImage = int(big_endian_size_width, 16)\r\n heightImage = int(big_endian_size_height, 16)\r\n \r\n f_lecture.close\r\n\r\n#Function for convolution\r\ndef convolve(image, kernel):\r\n \r\n if(image.ndim == 2):\r\n image = image[:, :, None]\r\n \r\n if(kernel.ndim == 2):\r\n kernel = np.repeat(np.expand_dims(kernel, axis=-1), image.shape[-1], axis=-1)\r\n \r\n if(kernel.shape[-1] == 1):\r\n kernel = np.repeat(kernel, image.shape[-1], axis=-1) \r\n \r\n assert image.shape[-1] == kernel.shape[-1]\r\n \r\n xk = kernel.shape[0]\r\n yk = kernel.shape[0]\r\n \r\n width, height = image.shape[:2]\r\n\r\n # Convolution Output: [(W−K+2P)/S]+1\r\n output_array = np.zeros(((width - xk + 2) + 1, (height - yk + 2) + 1, image.shape[-1])) \r\n \r\n padded_image = np.pad(image, [(1, 1), (1, 1), (0, 0)])\r\n \r\n for x in range(padded_image.shape[0] - xk + 1): # -xk + 1 is to keep the window within the bounds of the image\r\n for y in range(padded_image.shape[1] - yk + 1):\r\n\r\n # Creates the window with the same size as the filter\r\n window = padded_image[x:x + xk, y:y + yk]\r\n\r\n # Sums over the product of the filter and the window\r\n output_values = np.sum(kernel * window, axis=(0, 1)) \r\n\r\n # Places the calculated value into the output_array\r\n output_array[x, y] = output_values\r\n \r\n return output_array\r\n\r\n#function for edge reinforcement (filter)\r\ndef edgeReinforcement(filename):\r\n f_lecture =open(filename,'rb') #read in binary mode\r\n f_ecriture =open(args.edgeReinforcement,'wb') #read in binary mode\r\n \r\n i=1\r\n octet = bytes([0])\r\n \r\n trioctets=[]\r\n \r\n #hex variables\r\n octetsHex=\"\"\r\n \r\n #Lecture du MAGIC NUMBER\r\n while (i <=2): #lecture Magic number sur 2 octets\r\n octet=f_lecture.read(1) #Lecture octet par octet\r\n f_ecriture.write(octet)\r\n i=i+1\r\n \r\n i=1\r\n \r\n #Lecture du MAGIC NUMBER\r\n while (i<= 54): #lecture Magic number sur 2 octets\r\n octet=f_lecture.read(1) #Lecture octet par octet\r\n f_ecriture.write(octet)\r\n \r\n i = i+1\r\n \r\n i=1\r\n f_lecture.seek(54)\r\n f_ecriture.seek(54)\r\n pixel_end = widthImage*heightImage*3+1\r\n #Lecture of the pixels of the image\r\n while (i< pixel_end): #lecture Magic number sur 2 octets\r\n octet=f_lecture.read(1) #Lecture octet par octet\r\n octetsHex = octetsHex+ \" \" + octet.hex() \r\n big_endian_octet = to_big(octetsHex)\r\n octetDec = int(big_endian_octet, 16)\r\n trioctets.append(octetDec)\r\n i = i+1\r\n octetsHex=\"\"\r\n \r\n trioctets = np.array(trioctets)\r\n tri = trioctets.reshape((widthImage,heightImage,3))\r\n kernel = [[ 0 , 0 , 0],\r\n [ -1 , 1 , 0],\r\n [ 0 , 0, 0 ]]\r\n kernel = np.array(kernel)\r\n\r\n nono = convolve(tri, kernel)\r\n\r\n for i in nono:\r\n for j in i: \r\n for k in j:\r\n if (k <0): \r\n k = 0\r\n if (k > 255): \r\n k = 255\r\n f_ecriture.write(int(k).to_bytes(1,'little'))\r\n\r\n#-------------ouverture_Fichiers_Image \r\nif (args.imageName[-3:] == \"bmp\"):\r\n sizeImage = ouverture_Fichiers_Image(filename) \r\n\r\n#-------------edgeReinforcement\r\nif (args.edgeReinforcement and args.edgeReinforcement[-3:] == \"bmp\"):\r\n edgeReinforcement(filename) \r\n print(\"The size of your image \", args.imageName, \" is \", widthImage, \" * \", heightImage, \" .\")\r\n visualize(args.edgeReinforcement)\r\n\r\n\r\n","sub_path":"Part 3/tests/edgeReinforcement.py","file_name":"edgeReinforcement.py","file_ext":"py","file_size_in_byte":10262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"391990715","text":"# classe 2\n\n# parte final !!!\n# sem experimentos\n\n# uso essa classe quando já tiver o modelo consistente (modelo final)\n\nimport pandas as pd\nimport numpy as np\nfrom xgboost.sklearn import XGBClassifier\nfrom joblib import dump, load\n\nfrom fonte_dados import FonteDados\nfrom preprocessamento import Preprocessamento\nfrom experimentos import Experimentos \n\n\nclass TreinamentoModelo:\n\n # começo aqui chamando dataSource e preProcessamento\n # como preProc não foi definido então \n def __init__(self):\n self.dados = FonteDados()\n self.pre_proc = None\n \n def treinamento_modelo(self):\n '''\n Train the model.\n :return: Dict with trained model, preprocessing used and columns used in training\n '''\n from numpy.random import seed\n \n # chamo o prePocessamento\n self.pre_proc = Preprocessamento()\n\n # leio os dados\n print('Carregamento dos dados', '\\n\\n')\n X_treino, y_treino = self.dados.leitura_dados()\n #df = self.dados.read_data(etapa_treino = True)\n\n # preProcessamento\n print('Treinamento do pré-processamento', '\\n\\n')\n # para treino\n X_treino = self.pre_proc.processo(X_treino)\n #y_treino = self.pre_proc.processo(y_treino, target=True)\n #X_train, y_train = pre.process(df, etapa_treino = True)\n\n print('Balanceamento Oversampling', '\\n\\n')\n #self.y_treino = y_treino\n X_treino, y_treino = self.pre_proc.balanceamento_oversampling(X_treino, y_treino)\n\n print('Treinamento do modelo', '\\n\\n')\n # chamo uma regLinear mas já poderia linkar\n # com a classe Experiment e retorna o experimento com \n # a melhor métrica\n seed(42)\n model_obj = XGBClassifier()\n model_obj.fit(X_treino, y_treino)\n\n # guardando informacoes no dicionario\n model = {'model_obj' : model_obj,\n 'preprocess' : self.pre_proc,\n 'colunas' : self.pre_proc.df_nomes_tipos_treino }\n print(model)\n\n # salvando modelo treinado com informacoes\n dump(model, '../saida/modelo.pkl')\n\n # retorna o dicionario de modelo\n return model\n \n ","sub_path":"projeto_padrao/codigos/treinamento_modelo.py","file_name":"treinamento_modelo.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"122368417","text":"# coding=utf-8\r\nimport subprocess\r\nimport time\r\nimport re\r\nimport os\r\nimport sys\r\nimport threading\r\nimport gc\r\nimport argparse\r\nimport pickle\r\nimport Queue\r\n\r\nimport tool.Tool as Tool\r\n\r\ndef getNumMCSs(data_infor, module_name, MCSs_map):\r\n\tnum_module_MCSs = 0\r\n\tMCSs = MCSs_map[module_name]\r\n\tfor mcs in MCSs:\r\n\t\tnew_MCS_num = 1\r\n\t\tfor l in mcs:\r\n\t\t\tl_index = abs(l)\r\n\t\t\tl_var = data_infor['module_index_var_map'][Tool.name(module_name)][l_index]\r\n\t\t\tif l < 0:\r\n\t\t\t\tl_var = '-' + l_var\r\n\r\n\t\t\tif 'm' in l_var:\r\n\t\t\t\tnew_MCS_num_temp = getNumMCSs(data_infor, l_var, MCSs_map)\r\n\t\t\t\tif new_MCS_num_temp != 0:\r\n\t\t\t\t\tnew_MCS_num = new_MCS_num * getNumMCSs(data_infor, l_var, MCSs_map)\r\n\t\t\t\t\t\r\n\t\tnum_module_MCSs = num_module_MCSs + new_MCS_num\r\n\r\n\treturn num_module_MCSs\r\n\r\ndef extendMCSs(p_MCSs, m_MCSs):\r\n\tnew_p_MCSs = []\r\n\tif len(p_MCSs) == 0:\r\n\t\tfor item in m_MCSs:\r\n\t\t\tnew_p_MCSs.append(item)\r\n\telif len(m_MCSs) == 0:\r\n\t\tfor item in p_MCSs:\r\n\t\t\tnew_p_MCSs.append(item)\r\n\telse:\r\n\t\tfor item_1 in p_MCSs:\r\n\t\t\tfor item_2 in m_MCSs:\r\n\t\t\t\titem_1_temp = item_1 + item_2\r\n\t\t\t\tnew_p_MCSs.append(item_1_temp)\r\n\t\r\n\treturn new_p_MCSs\r\n\r\ndef mergeMCSs(data_infor, module_name, MCSs_map, module_all_MCSs, hesitory):\r\n\tnum_MCS = 0\r\n\t# print '***************** begin', module_name, '*****************'\r\n\tall_MCSs = []\r\n\tMCSs = MCSs_map[module_name]\r\n\tfor mcs in MCSs:\r\n\t\tMCSs_cluster = []\r\n\t\tfor l in mcs:\r\n\t\t\tl_index = abs(l)\r\n\t\t\tl_var = data_infor['module_index_var_map'][Tool.name(module_name)][l_index]\r\n\t\t\tif l < 0:\r\n\t\t\t\tl_var = '-' + l_var\r\n\r\n\t\t\tif 'm' in l_var:\r\n\t\t\t\tif module_all_MCSs.has_key(l_var):\r\n\t\t\t\t\tmodule_MCSs = module_all_MCSs[l_var]\r\n\t\t\t\t\t# print 'have obtained all MCSs of', l_var\r\n\t\t\t\telse:\r\n\t\t\t\t\tif l_var in hesitory:\r\n\t\t\t\t\t\t# print 'there is a roop'\r\n\t\t\t\t\t\tsys.exit()\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\thesitory.append(l_var)\r\n\r\n\t\t\t\t\tmodule_MCSs = mergeMCSs(data_infor, l_var, MCSs_map, module_all_MCSs, hesitory)\r\n\t\t\t\t\t# print '***************** continue', module_name, '*****************'\r\n\t\t\t\t\tmodule_all_MCSs[l_var] = module_MCSs\r\n\t\t\t\t\t# print 'get all MCSs of', l_var\r\n\t\t\t\t\thesitory.remove(l_var)\r\n\t\t\t\t\t# print module_all_MCSs\r\n\t\t\telse:\r\n\t\t\t\tmodule_MCSs = [[l_var]]\r\n\r\n\t\t\tMCSs_cluster = extendMCSs(MCSs_cluster, module_MCSs)\r\n\r\n\t\t# if module_name is 'm0':\r\n\t\t# \tfor item in MCSs_cluster:\r\n\t\t# \t\tnum_MCS = num_MCS + 1\r\n\t\t# \t\tprint num_MCS, item\r\n\r\n\t\tall_MCSs = all_MCSs + MCSs_cluster\r\n\r\n\t# print '***************** end', module_name, '*****************'\r\n\treturn all_MCSs\r\n","sub_path":"Merge.py","file_name":"Merge.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"647929860","text":"\"\"\"\nThere are two lists of different length. The first one consists of keys, the \nsecond one consists of values. Write a function createDict(keys, values) that \nreturns a dictionary created from keys and values. \nIf there are not enough values, the rest of keys should have a None value. \nIf there not enough keys, just ignore the rest of values.\n\n>>> createDict(['a', 'b', 'c', 'd'], [1, 2, 3])\n{'a': 1, 'b': 2, 'c': 3, 'd': None})\n>>> createDict(['a', 'b', 'c'], [1, 2, 3, 4])\n{'a': 1, 'b': 2, 'c': 3})\n\n\n\"\"\"\n\ndef createDict(keys, values):\n\n\tnew_dict = {}\n\n\tfor i in range(0, len(keys)):\n\t\tif i < len(values):\n\t\t\tnew_dict[keys[i]] = values[i]\n\t\telse:\n\t\t\tnew_dict[keys[i]] = None\n\n\treturn new_dict\n\n\n\"\"\"\npasses cw tests.\nfails doctest bc testmod looks for literal match,\ndictionary is unordered therefore not returned as literal match.\n\nneed to learn other methods for local testing.\n\n\"\"\"\n\n# if __name__ == \"__main__\":\n\n# \timport doctest\n# \tdoctest.testmod()\n\n\"\"\"\nalso, most solutions on cw use \"zip()\"\nhave not used zip. investigate and revisit.\n\"\"\"","sub_path":"cw/cw_7_dictFrom2lists.py","file_name":"cw_7_dictFrom2lists.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"430517017","text":"class Solution:\n \"\"\"\n @param numbers: Give an array numbers of n integer\n @return: Find all unique triplets in the array which gives the sum of zero.\n \"\"\"\n def threeSum(self, numbers):\n reslts = set()\n sorted_numbers = self.merge_sort(numbers)\n for i in range(0, len(sorted_numbers)):\n two_sums = self.check(sorted_numbers[:i], 0 - sorted_numbers[i])\n if two_sums:\n for two_sum in two_sums:\n reslts.add((two_sum[0], two_sum[1], sorted_numbers[i]))\n return list(reslts)\n\n def check(self, numbers, k):\n two_sums = set()\n nums_set = set()\n for i in range(0, len(numbers)):\n if (k - numbers[i]) in nums_set:\n two_sums.add((k - numbers[i], numbers[i]))\n nums_set.add(numbers[i])\n return two_sums\n\n def merge_sort(self, numbers):\n if len(numbers) > 1:\n mid = int(len(numbers)/2)\n ll = self.merge_sort(numbers[:mid])\n rl = self.merge_sort(numbers[mid:])\n numbers = self.merge(ll, rl)\n return numbers\n\n def merge(self, ll, rl):\n l = []\n a = b = 0\n while a < len(ll) and b < len(rl):\n if ll[a] <= rl[b]:\n l.append(ll[a])\n a += 1\n else:\n l.append(rl[b])\n b += 1\n l += rl[b:] if b < len(rl) else ll[a:]\n return l\n\n\nif __name__ == \"__main__\":\n a = [-2,-3,5,-1,-4,5,-11,7,1,2,3,4,-7,-1,-2,-3,-4,-5]\n print(Solution().threeSum(a))\n","sub_path":"List/3Sum.py","file_name":"3Sum.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"632036474","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 2013-9-29\n\n@author: huanghu\n'''\nfrom com.utils.common import Common\n\nclass Read(object):\n\n def getCommonValue(self ,key):\n path = Common().getCommonPath()\n value = self.read(path, key);\n return value\n \n\n def read(self ,path ,key):\n files = open(path ,\"r\")\n contents = files.readlines()\n for content in contents:\n index = content.index(\"=\")\n contentKey = content[0:index]\n if contentKey == key :\n contentValue = content[index + 1: len(content)]\n #去掉回车换行\n return ''.join(contentValue.split())\n \n ","sub_path":"com/utils/readFile.py","file_name":"readFile.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"509790525","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val, left, right, next):\n self.val = val\n self.left = left\n self.right = right\n self.next = next\n\"\"\"\nclass Solution:\n def connect(self, root: 'Node') -> 'Node':\n if not root: return root\n queue = [[],[]]\n queue[0].append(root)\n \n while queue[0]:\n node = queue[0].pop(0)\n \n if node.left:\n queue[1].append(node.left)\n if node.right:\n queue[1].append(node.right)\n \n if not queue[0]:\n queue[0],queue[1] = queue[1],queue[0]\n else:\n node.next = queue[0][0]\n \n return root","sub_path":"Medium/116. Populating Next Right Pointers in Each Node.py","file_name":"116. Populating Next Right Pointers in Each Node.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"453659746","text":"\"\"\"\n406. Minimum Size Subarray Sum\nGiven an array of n positive integers and a positive integer s,\nfind the minimal length of a subarray of which the sum ≥ s.\nIf there isn't one, return -1 instead.\n\nExample\nGiven the array [2,3,1,2,4,3] and s = 7,\nthe subarray [4,3] has the minimal length under the problem constraint.\n\nChallenge\nIf you have figured out the O(n) solution,\ntry coding another solution of which the time complexity is O(n log n).\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param nums: an array of integers\n @param s: An integer\n @return: an integer representing the minimum size of subarray\n \"\"\"\n\n def minimumSize(self, nums, s):\n sum = 0\n res = 1e10\n n = len(nums)\n l = 0\n for r in range(n):\n sum += nums[r]\n while sum >= s:\n res = min(res, r - l + 1)\n sum -= nums[l]\n l += 1\n\n if res == 1e10:\n return -1\n else:\n return res\n\n\narr = [2, 3, 1, 2, 4, 3]\ntarget = 7\n\ns = Solution()\nprint(s.minimumSize(arr, target))\n","sub_path":"TwoPointers/MinimumSizeSubarraySum.py","file_name":"MinimumSizeSubarraySum.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"462902364","text":"from os.path import join, dirname\n\nfrom difficult_dialogs.arguments import Argument\n\narg_folder = join(dirname(__file__), \"i_think_therefore_i_am\")\narg = Argument(arg_folder)\n\n# argument data\nall_statements = arg.all_statements()\nall_support_statements = arg.all_support()\nall_sources = arg.all_sources()\nintro = arg.intro_statement\nconclusion = arg.conclusion_statement\n\n\n# argument / user loop\ndebug = False\n\nif debug:\n print(\"ARGUING IN FAVOR OF:\", arg.name)\n print(\"---ARGUMENT INTRO\")\nprint(\"BOT:\", arg.start())\nwhile not arg.finished:\n if debug:\n print(\"---NEXT ASSERTION\")\n\n print(\"BOT:\", arg.next_statement())\n if debug:\n print(\"__ARGUING IN FAVOR OF: \", arg.current_statement)\n user = input(\"do you agree? USER: \")\n while \"y\" not in user:\n support = arg.support()\n\n if not support:\n if debug:\n print(\"---OUT OF ARGUMENTS\")\n source = arg.source()\n if source:\n if debug:\n print(\"---ASSERTION SOURCES\")\n print(\"BOT:\", \"here is the source of my information\",\n \"\\n\" + arg.source())\n print(\"BOT:\", \"we will need to agree to disagree\")\n else:\n if debug:\n print(\"---NO SOURCES\")\n print(\"BOT:\", \"i guess you're right\")\n user = \"y\"\n else:\n if debug:\n print(\"---ASSERTION SUPPORT ARGUMENT\")\n print(\"BOT:\", support)\n user = input(\"do you agree? USER: \")\n","sub_path":"examples/i_think_therefore_i_am.py","file_name":"i_think_therefore_i_am.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"472249448","text":"###########################################\n#\n# Creates the TF-IDF Matrix on the user documents corpus and saves it to a file\n#\n###########################################\nfrom setup_api import setup_api\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom nltk.tokenize import TweetTokenizer\nimport pickle\nfrom read_user_tweets_tokenized_pickles import read_user_tweets_tokenized_pickles\nimport os\nfrom user_tweets_class import UserTweets\nfrom urllib.request import urlopen\nimport simplejson\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport natsort\n\ndef filterArrayWithThreshold(bot_array, threshhold):\n for i in range(0, len(bot_array)):\n if (float(bot_array[i]) >= float(threshhold)):\n return bot_array[i:-1]\n\ndef calcBotScoreForUserStance(userstance):\n print('calc for ' + userstance + ' users')\n \n #get all protrump users\n connection = urlopen('http://localhost:8983/solr/tweets_cleaned/select?fl=user_bot_score_english&q=user_stance:'+userstance+'&group=true&group.field=user_screen_name&group.ngroups=true&rows=1000000')\n users_solr = simplejson.load(connection)\n \n usernames_protrump = []\n \n for group in users_solr[\"grouped\"][\"user_screen_name\"][\"groups\"]:\n usernames_protrump.append(group[\"groupValue\"])\n \n user_bot_score_universal = []\n user_bot_score_english = []\n \n for username in usernames_protrump:\n connection = urlopen('http://localhost:8983/solr/tweets_cleaned/select?fl=user_bot_score_universal,user_bot_score_english&q=user_screen_name:'+username+'&rows=1')\n users_solr = simplejson.load(connection)\n \n for element in users_solr['response']['docs']:\n if 'user_bot_score_universal' in element:\n user_bot_score_universal.append(element['user_bot_score_universal'][0])\n \n if 'user_bot_score_english' in element:\n user_bot_score_english.append(element['user_bot_score_english'][0])\n \n \n user_bot_score_english_sorted = natsort.natsorted(user_bot_score_english)\n user_bot_score_universal_sorted = natsort.natsorted(user_bot_score_universal)\n \n print('THe average user_bot_score_english is: ' + str(sum(user_bot_score_english)/len(user_bot_score_english)))\n print('THe average user_bot_score_universal is: ' + str(sum(user_bot_score_universal)/len(user_bot_score_universal)))\n \n user_bot_score_english_sorted_filtered = filterArrayWithThreshold(user_bot_score_english_sorted, \"0.5\")\n user_bot_score_universal_sorted_filtered = filterArrayWithThreshold(user_bot_score_universal_sorted, \"0.5\")\n \n print('Sorted bot_score_english Length: ' + str(len(user_bot_score_english_sorted)))\n print('Sorted bot_score_english Length: ' + str(len(user_bot_score_english_sorted_filtered)))\n print(user_bot_score_english_sorted_filtered)\n \n print('Sorted bot_score_universal Length: ' + str(len(user_bot_score_universal_sorted)))\n print('Sorted bot_score_universal Length: ' + str(len(user_bot_score_universal_sorted_filtered)))\n print(user_bot_score_universal_sorted_filtered)\n \n \ncalcBotScoreForUserStance('protrump')\ncalcBotScoreForUserStance('contratrump')","sub_path":"Software/Crawling/Workspace/Crawler/DataManipulator/__9e_calc_bot_score.py","file_name":"__9e_calc_bot_score.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"430299869","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 5 19:03:57 2018\n@author: Ingvar\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport time\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom collections import defaultdict\nfrom multiprocessing import Pool\n#from numba_module import main_numba\nfrom sklearn.preprocessing import normalize\nfrom sklearn.utils import shuffle\nfrom functools import partial\n\n\n\n#@jit\ndef main(all_args):\n def sigmoid(x):\n return 1 / (1 + np.exp(-x))\n print(\"starting....\")\n debugg = False\n\n X_train, y_train, X_test, y_test, total_amount_of_data_in_interval, dimensionality, epsilons = all_args\n \n \n \n #X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_size, shuffle = True)\n\n # shuffle the data for randomnes in the smaller values of n\n X_train, y_train = shuffle(X_train, y_train)\n regularization_constant = 5 # this was obtained from the tunning program\n \n \n all_accuracies = defaultdict(list)\n avg_noise_for_each_n = defaultdict(list)\n var_noise_for_each_n = defaultdict(list)\n all_noise = defaultdict(partial(defaultdict, list)) # defaultdict inside defaultdict\n # we use tuple because lambda functions are not pickable- thus dont work with multiprocessing -uses que\n all_weights = defaultdict(tuple)\n for n in total_amount_of_data_in_interval:\n \n clf = LogisticRegression(penalty=\"l2\", C=1 / regularization_constant)\n clf.fit(X_train[:n], y_train[:n])\n if debugg:\n print(clf.score(X_test, y_test))\n print(len(X_train))\n weights = clf.coef_[0]\n\n \n scikit_proba = clf.predict_proba(X_test)\n scikit_predict = clf.predict(X_test)\n num_correct_predictions = 0\n for i in range(len(y_test)):\n arg = np.dot(weights, X_test[i])\n prediction_probability = sigmoid(arg)\n if debugg:\n print('my prediction', prediction_probability )\n print('scikit prediction proba', scikit_proba[i])\n print('scikit prediction', scikit_predict[i])\n print('truth', y_test[i])\n \n # check which class to predict\n if prediction_probability > (1 - prediction_probability):\n predicition = 1\n else:\n predicition = -1\n \n truth = y_test[i]\n if predicition == truth:\n num_correct_predictions += 1\n \n \n ## add the score\n # we put the 9999 to make the wighout dp last when sorted with the epsilons\n all_accuracies[(9999, 'Without DP')].append(1 - clf.score(X_test, y_test)) \n # tak the absolute value of the weights and then store it for later analysis\n all_weights[str(n)] = (abs(weights))\n \n \n ############# add differential privacy #########################\n \n \n sensitivity = 2 / (n * regularization_constant)\n for epsilon in epsilons:\n \n noise = np.array([np.random.laplace(0, sensitivity / epsilon) for i in range(dimensionality)])\n weights_perturb = weights + noise\n \n # evaluate the model\n num_correct_predictions = 0\n for i in range(len(y_test)):\n arg = np.dot(weights_perturb, X_test[i])\n prediction_probability = sigmoid(arg)\n \n # check which class to predict\n if prediction_probability > (1 - prediction_probability):\n predicition = 1\n else:\n predicition = -1\n \n truth = y_test[i]\n if predicition == truth:\n num_correct_predictions += 1\n \n \n #total_noise = sum(abs(noise)) # vantar liklega abs gildinn\n accur = num_correct_predictions / len(y_test)\n\n # first index has the lowest n and then it increases\n all_accuracies[(epsilon, '$\\epsilon$ = ' + str(epsilon))].append(1 - accur)\n avg_noise_for_each_n[epsilon].append(np.mean(abs(noise)))#total_noise / num_rounds_to_avg)\n var_noise_for_each_n[epsilon].append(np.var(noise))\n \n all_noise[n][epsilon] = noise.tolist()\n all_noise[n][99999999999999999999] = weights.tolist() # add the weights at the end for plottting\n# =============================================================================\n# if n not in all_noise or epsilon not in all_noise[n] :\n# all_noise[n][epsilon] = [noise]\n# else:\n# all_noise[n][epsilon].append(noise)\n# =============================================================================\n #all_noise_and_weights['weights eps = ' + str(epsilon)].append()\n print(\"leaving!!!\")\n return (all_accuracies, avg_noise_for_each_n, all_weights, var_noise_for_each_n, all_noise)\n \nif __name__ == '__main__':\n print(\"hallo\")\n \n # load the data and select the binary classificatio problem\n num1 = 4\n num2 = 9\n \n y_train = []\n X_train = []\n with open('../mnist_train.csv') as l:\n for i , line in enumerate(l):\n line = line.split(\",\")\n label = int(line[0])\n if label == num1 or label == num2:\n features = [float(i) for i in line[1:]]\n y_train.append(label)\n X_train.append(features) \n \n\n y_train = np.asarray(y_train)\n X_train = np.asarray(X_train)\n \n X_train = normalize(X_train)\n y_train[y_train == num1] = -1\n y_train[y_train == num2] = 1\n \n\n\n y_test = []\n X_test = [] \n with open('../mnist_test.csv') as l:\n for i , line in enumerate(l):\n line = line.split(\",\")\n label = int(line[0])\n if label == num1 or label == num2:\n features = [float(i) for i in line[1:]]\n y_test.append(label)\n X_test.append(features) \n \n \n y_test = np.asarray(y_test)\n X_test = np.asarray(X_test)\n \n X_test = normalize(X_test)\n y_test[y_test == num1] = -1\n y_test[y_test == num2] = 1\n \n print('Data has ben loaded..')\n \n \n # The epsilons we are going to try to differential privacy\n epsilons = [0.000001, 0.000005, 0.00001, 0.00005, 0.0001, 0.0005, 0.002, 0.01, 0.1, 3, 10]\n dimensionality = len(X_train[0])\n number_of_training_samples = len(X_train)\n \n # Select The colorschemme for the plots \n sns.set_style('whitegrid')\n # the color palette dose not have enough colors so we add colors that go well with it\n sns.set_palette(sns.color_palette(\"Set1\", n_colors = 9) + sns.color_palette(\"Set2\", n_colors = 3)[0:3:2] + [(1.0, 191/255, 0.0)])\n \n # in each itreation we use different amount of data to see how the model improvese with increased data\n num_splits = 30 \n total_amount_of_data = [int(number_of_training_samples / num_splits) for i in range(num_splits)] \n total_amount_of_data_in_interval = np.cumsum(total_amount_of_data)\n \n # Lets do multi-threading to speed things up!\n t1 = time.time()\n num_instances = 10\n p = Pool(10)\n args = [(X_train, y_train, X_test, y_test, total_amount_of_data_in_interval, dimensionality, epsilons )] * num_instances\n results_and_weights_perturb = p.map(main, args)\n p.close()\n p.join()\n\n print('Time taken for multiprocessing: {}'.format(time.time() - t1))\n\n # get three list out of a list with tuples of three\n results, noise, all_weights, variance_noise, all_noises = zip(*results_and_weights_perturb)\n \n\n ################# START off by analyzig the prediction error #######################\n\n # use lambda to be able to have np array inside defaultdict\n average_results = defaultdict(lambda:np.array([0.0 for i in range(num_splits)]))\n for result in results:\n #print(result)\n for item in result:\n average_results[item] += np.array(result[item])\n \n \n \n fig = plt.figure()\n ax = plt.subplot(111)\n\n \n for result in sorted(average_results):\n average_results[result] /= num_instances\n # result of 1 is the string represantation of the result\n ax.plot(total_amount_of_data_in_interval, average_results[result], '-*', label = result[1]) \n \n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n # Shrink current axis by 25%\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])\n \n #plt.legend(loc='upper center', bbox_to_anchor=(1.22, 1.01), fancybox=True, shadow=True)\n \n plt.ylabel('Error rate')\n plt.xlabel('Amount of training data [N] ')\n plt.title('Differentialy private Logistic Regression')\n plt.show()\n\n \n \n #%%\n ###################### write statistics to excel for generating table in latex ##################\n ###################### Lets make two tables, one for mean and one fore variance ############\n ###################### Also make box plot of the means and the variances #########\n \n \n# =============================================================================\n# \n# =============================================================================\n \n \n # combine all the thread values...\n very_all_noise = defaultdict(lambda: defaultdict(list))\n for i, noise in enumerate(all_noises):\n for n in noise:\n item = noise[n]\n for eps in item:\n very_all_noise[n][eps] = very_all_noise[n][eps] + all_noises[i][n][eps]\n\n\n # plot them..\n x_labels = ['$\\epsilon = {}$'.format(eps) for eps in epsilons]\n x_labels.append('weights')\n for n in very_all_noise:\n item = very_all_noise[n]\n if n == 393 or n == 11004:\n print('BOOOMMM!!!')\n to_plot = []\n to_plot1 = []\n for eps in item:\n to_plot.append(very_all_noise[n][eps])\n to_plot1.append([abs(value) for value in very_all_noise[n][eps]])\n \n plt.title('for n = {}'.format(n))\n ax = sns.boxplot(data=to_plot)\n plt.yscale('log')\n plt.xticks(range(len(to_plot)), x_labels, rotation=45)\n plt.show()\n \n plt.title('for n = {}'.format(n))\n ax = sns.boxplot(data=to_plot)\n plt.xticks(range(len(to_plot)), x_labels, rotation=45)\n plt.show()\n \n plt.title('The strength.. n = {} log axis'.format(n))\n ax = sns.barplot(data=to_plot1, estimator = sum)\n plt.yscale('log')\n plt.xticks(range(len(to_plot)), x_labels, rotation=45)\n plt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# intialize list of lists which will form a pandas data structure to plot things\n# pandas_values_mean, pandas_values_var = [], []\n# lenght_of_all_columns_for_pandas = len(epsilons) + 2 # 2 because of n and the weights\n# for i in range(len(total_amount_of_data_in_interval)):\n# pandas_values_mean.append([])\n# pandas_values_var.append([])\n# \n# # add the number of data points as a first attribute\n# for i, n in enumerate(total_amount_of_data_in_interval):\n# pandas_values_mean[i].append(n)\n# pandas_values_var[i].append(n)\n# \n# \n# \n# ####### Lets analyse the Noise ##########\n# \n# # find all the mean values which correspond to the same n and the same epsilon\n# averaged_noise = defaultdict(dict)\n# for item in noise:\n# i = 0\n# for key in sorted(item):\n# values = item[key]\n# # SKODA SORTED HERNA!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n# for n, value in enumerate(values):\n# # we use two keys so we can sort after the i key for plotting.. and writing to excel\n# # however, to ensure that we are doing corectly we use the other key as well do see that \n# # everything matches and for readability..\n# new_key = 'n = ' + str(total_amount_of_data_in_interval[n]) + ' ' + str(key)\n# sorting_key = i\n# \n# if sorting_key not in averaged_noise:\n# averaged_noise[sorting_key]['value'] = [value]\n# averaged_noise[sorting_key]['instance'] = new_key\n# else: \n# averaged_noise[sorting_key]['value'].append(value)\n# \n# i += 1\n# \n# \n# \n# # find all the variance values which correspond to the same n and the same epsilon\n# var_noise = defaultdict(dict)\n# for item in variance_noise:\n# i = 0\n# for key in sorted(item):\n# values = item[key]\n# \n# for n, value in enumerate(values):\n# # we use two keys so we can sort after the i key for plotting.. and writing to excel\n# # however, to ensure that we are doing corectly we use the other key as well do see that \n# # everything matches and for readability..\n# new_key = 'n = ' + str(total_amount_of_data_in_interval[n]) + ' ' + str(key)\n# sorting_key = i\n# \n# if sorting_key not in var_noise:\n# var_noise[sorting_key]['value'] = [value]\n# var_noise[sorting_key]['instance'] = new_key\n# else: \n# var_noise[sorting_key]['value'].append(value)\n# \n# i += 1\n# \n# \n# \n# \n# \n# \n# # mogulega splitta a kommu\n# # Lets try to do boxplot of the variances of the noise for n = 393\n# everything = []\n# for key in averaged_noise:\n# if averaged_noise[key]['instance'][:7] == 'n = 393':\n# everything.append(averaged_noise[key]['value'])\n# \n# \n# plt.boxplot(everything)\n# plt.yscale('log')\n# plt.show()\n# \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# =============================================================================\n# \n# \n# # calculate the statistics for the noise/epsilons\n# j, n_index = 0, 0\n# # we loop thorugh all epsilons for each n\n# for key in sorted(averaged_noise):\n# print(averaged_noise[key]['instance'])\n# pandas_values_mean[n_index].append(np.mean(averaged_noise[key]['value']))#averaged_noise[key]['value']))\n# pandas_values_var[n_index].append(np.var(var_noise[key]['value']))#averaged_noise[key]['value']))\n# if j == len(epsilons) - 1:\n# j = 0\n# n_index += 1\n# else:\n# j += 1\n# \n# \n# \n# ### analyse the weights ###\n# # Lets average all the instances of the weights, we know that each list inside all_weights\n# # is ordered by n, so that they start at the smallest and then increase towards the biggest\n# averaged_weights = defaultdict(lambda:np.array([0.0 for i in range(dimensionality)]))\n# for weights in all_weights:\n# for key in weights:\n# averaged_weights[key] += weights[key]\n# \n# for key in averaged_weights:\n# averaged_weights[key] /= num_instances\n# \n# # Collect the data for each n in list so we can build pandas data frame\n# i = 0\n# for key in sorted(averaged_weights):\n# value = averaged_weights[key]\n# pandas_values_mean[i].append(np.mean(value))\n# pandas_values_var[i].append(np.var(value))\n# i += 1\n# \n# \n# \n# # FOR PANDAS -- theses are the column names MUNNA AD LAGA\n# names = ['N'] + ['$\\epsilon = {}$'.format(epsilon) for epsilon in epsilons] + ['Weights']\n# \n# df_means = pd.DataFrame(pandas_values_mean, columns = names)\n# df_vars = pd.DataFrame(pandas_values_var, columns = names)\n# \n# # now we change the order in the \n# \n# # make box plot of the means and the variances\n# ax = sns.boxplot(data=df_means[names[1:]], palette = 'Set1') # exclude the N's\n# plt.xticks(rotation=45)\n# plt.yscale('log')\n# plt.title('Mean log axis')\n# plt.show()\n# \n# # make box plot of the means and the variances\n# ax = sns.boxplot(data=df_means[names[1:]], palette = 'Set1') # exclude the N's\n# plt.xticks(rotation=45)\n# plt.title('Mean')\n# plt.show()\n# \n# \n# ax = sns.boxplot(data=df_vars[names[1:]], palette = 'Set1')\n# plt.yscale('log')\n# plt.xticks(rotation=45)#ax.set_xticklabels(rotation=30) # ef failar plt.xticks(rotation=45)\n# plt.title('Variance log')\n# plt.show()\n# \n# ax = sns.boxplot(data=df_vars[names[1:]], palette = 'Set1')\n# plt.xticks(rotation=45)#ax.set_xticklabels(rotation=30) # ef failar plt.xticks(rotation=45)\n# plt.title('Variance')\n# plt.show()\n# \n# \n# # make a bar plot of the sum of the means and variances\n# #!!! gaeti gert rauda linu efst med maxinu svi tad sjaist vel hvad tetta er langt fra\n# # TILLA AD SUMMA ALL NOTA estimator=sum tarf ad fa abs sum nei spurning ad hafa mean....\n# ax = sns.barplot(data=df_means[names[1:]].abs(), palette = 'Set1', ci = None) # exclude the N's\n# plt.yscale('log')\n# plt.xticks(rotation=45)#ax.set_xticklabels(rotation=30)\n# plt.title('Mean of the Means')\n# plt.show()\n# \n# ax = sns.barplot(data=df_vars[names[1:]].abs(), palette = 'Set1', ci = None)\n# plt.yscale('log')\n# plt.xticks(rotation=45)#ax.set_xticklabels(rotation=30) # ef failar plt.xticks(rotation=45)\n# plt.title('Mean of the variance')\n# plt.show()\n# \n# # save the two dataframes as a table in excel\n# writer = pd.ExcelWriter('output.xlsx')\n# df_means.to_excel(writer, 'Sheet1')\n# df_vars.to_excel(writer, 'Sheet2')\n# writer.save()\n# =============================================================================\n","sub_path":"differential_privacy_logistic_regression/skoli/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"608724992","text":"from flask import session as login_session\nfrom flask import abort, request, redirect, url_for, render_template\nfrom sqlalchemy import asc\nfrom bleach.sanitizer import Cleaner\nfrom . import mod_catalog, login_required\nfrom ..models import Category, Item, db_session\n\n\n@mod_catalog.route(\"/item/new\", methods=[\"GET\", \"POST\"])\n@login_required\ndef new_item():\n \"\"\"Create a new category item\"\"\"\n categories = db_session.query(Category).order_by(asc(Category.title)).all()\n if request.method == \"POST\":\n cleaner = Cleaner(strip=True, tags=[], attributes=[])\n new_item = Item(title=cleaner.clean(request.form[\"title\"]),\n description=cleaner.clean(request.form[\"description\"]),\n category_id=cleaner.clean(request.form[\"category\"]),\n user_id=login_session[\"useremail\"])\n db_session.add(new_item)\n db_session.commit()\n return redirect(url_for(\".show_item\",\n category_title=new_item.category_id,\n item_title=new_item.title))\n else:\n return render_template(\"newitem.html\", categories=categories)\n","sub_path":"vagrant/catalog/app/mod_catalog/new_item.py","file_name":"new_item.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"185347112","text":"# Function to convert celcius to fahrenheit.\ndef fahrenheit(celsius):\n return (9/5)*celsius + 32\n\ntemps = [0, 22.5, 40, 100]\n\n# Using a for loop.\nf_temps = []\nfor temp in temps:\n f_temps.append(fahrenheit(temp))\nprint(f_temps)\n\n# Using a list comprehension.\nf_temps = [fahrenheit(temp) for temp in temps]\nprint(f_temps)\n\n# Using a map.\nf_temps = map(fahrenheit, temps)\nprint(f_temps)\nprint(list(f_temps))\nfor temp in f_temps:\n print(temp)\n\n# Using a map with a lambda function.\nf_temps = map(lambda x: (9 / 5) * x + 32, temps)\nprint(list(f_temps))\n","sub_path":"09-Built-in Functions/code_examples/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"511703215","text":"n,m = map(int, input().split())\ndata = []\nfor i in range(m):\n data.append(list(map(int, input().split())))\n\nnot_bridge = 0\ndef dfs(node, finished):\n finished.add(node)\n global not_bridge\n if len(finished) == n:\n not_bridge += 1\n else:\n for i in adjacent_list[node]:\n if i not in finished:\n dfs(i, finished)\n \n#iが省く要素\nfor i in range(m):\n adjacent_list = [[]for i in range(n+1)]\n finished = set()\n for j in range(m):\n if i != j:\n a, b = data[j]\n adjacent_list[a].append(b)\n adjacent_list[b].append(a)\n print(adjacent_list)\n dfs(1, finished)\nprint(m - not_bridge)\n","sub_path":"ABC/70/C-1.py","file_name":"C-1.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"593611964","text":"\nfrom flask import Flask, request, jsonify, render_template\nimport joblib\nimport traceback\nimport pandas as pd\nimport numpy as np\nimport sys\n\n# Your API definition\napp = Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n@app.route('/', methods=['GET', 'POST'])\n\n\ndef predict():\n pred = ''\n if request.method == 'POST':\n \n Open = float(request.form.get('Open'))\n high = float(request.form.get('High'))\n low = float(request.form.get('Low'))\n \n query = [[Open, low, high]]\n print(query)\n \n if query:\n pred = lr.predict(query)\n \n #prediction = jsonify({'prediction': str(prediction)})\n \n return render_template('index.html', prediction=pred)\n\n\n\nif __name__ == '__main__':\n\n lr = joblib.load(\"Model/model_EURUSD.pkl\") # Load \"model.pkl\"\n print ('Model loaded')\n\n app.run(debug=True)","sub_path":"Delivery/EURUSD/api_lr_EURUSD.py","file_name":"api_lr_EURUSD.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"572334366","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 5 08:55:08 2018\n\n@author: Richie\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.layers import Dense\nfrom keras.models import Sequential\nfrom keras.utils import to_categorical\n\ndata=pd.read_csv('basket_ball_shot_log.csv')\npredictors= data.drop(['shot_result'],axis=1).as_matrix\ntarget=to_categorical(data.shot_result)\n\n# Build the model\nmodel=Sequential()\n# Add the first layer with 100 nodes\nmodel.add(Dense(100,activation = 'relu', input_shape=(n_cols,)))\n# Add the Second layer with 100 nodes\nmodel.add(Dense(100,activation = 'relu'))\n# Add the third layer\nmodel.add(Dense(100, activation = 'relu'))\n\n# Add the output layer\n\nmodel.add(Dense(2, activation = 'softmax'))\n## Compile the model\nmodel.compile(optimizer='adam', loss = 'categorical_crossentropy', metrics=['accuracy'])\n## Fit the model\nmodel(predictors, target)\n\n## Saving, Reloading and making predcitions with the model\n\nfrom keras.models import load_model\nmodel.save('model_file.h5')\nmy_model=load_model('my_model.h5')\npredictions=my_model.predict(data_to_predict_with)\nprobability_true=predictions[:,1]\n\n## Verify the model structure\n\nmy_model.summary()\n\n\n","sub_path":"Deep Learning/keras_Categorical_model.py","file_name":"keras_Categorical_model.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"293345657","text":"from math import sqrt\n\ndef is_prime(n):\n if n < 2: return False\n\n for i in range(2,int(sqrt(n)+1)):\n\n if n%i == 0:\n return False\n break\n return True\n","sub_path":"homework/3/isprime.py","file_name":"isprime.py","file_ext":"py","file_size_in_byte":187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"605327383","text":"#!/usr/bin/env python\nfrom sseclient import SSEClient, requests\n\nPROTOCOL=\"http\"\nAPI_URL=\"192.168.0.69\"\nSSE_ENDPOINT=\"events\"\nTRIGGER_ENDPOINT=\"longreply\"\n\nmessages = SSEClient(f\"{PROTOCOL}://{API_URL}/{SSE_ENDPOINT}\")\nprint(requests.get(f\"{PROTOCOL}://{API_URL}/{TRIGGER_ENDPOINT}\").status_code)\nfor msg in messages:\n print(msg.data, msg.event, msg.id, msg.retry)","sub_path":"LED2/ESP8266_Master/_OLD/basic_file_server/utils/trigger.py","file_name":"trigger.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"35729169","text":"import os\nimport argparse\nfrom time import monotonic\n\nfrom loader.AssetExtractor import Extractor\nfrom loader.Database import DBManager\n\nfrom loader.Master import load_master, load_json\nfrom loader.Actions import load_actions\nfrom loader.Motion import load_character_motion, load_dragon_motion\n\nJP = 'jp'\nEN = 'en'\nCN = 'cn'\n\nMANIFESTS = {\n JP: 'manifest/jpmanifest_with_asset_labels.txt',\n EN: 'manifest/enmanifest_with_asset_labels.txt',\n CN: 'manifest/cnmanifest_with_asset_labels.txt'\n}\n\nMASTER = 'master'\nACTIONS = 'actions'\nCHARACTERS_MOTION = 'characters_motion'\nDRAGON_MOTION = 'dragon_motion'\n\nTEXT_LABEL = 'TextLabel.json'\nLABEL_PATTERNS_EN = {\n r'^master$': 'master'\n}\nLABEL_PATTERNS_CN = {\n r'^master$': 'master'\n}\nLABEL_PATTERNS_JP = {\n r'^master$': 'master',\n r'^actions$': 'actions',\n r'^characters/motion': 'characters_motion',\n r'characters/motion/animationclips$': 'characters_motion',\n r'^dragon/motion': 'dragon_motion',\n}\nIMAGE_PATTERNS = {\n r'^images/icon': 'icon',\n r'^images/outgame': 'outgame',\n}\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Import data to database.')\n parser.add_argument('--do_prep', help='Download and extract db related assets', action='store_true')\n parser.add_argument('-m_hash', help='Use', action='store_true')\n parser.add_argument('-o', type=str, help='output file', default='dl.sqlite')\n args = parser.parse_args()\n # if args.do_images:\n # ex = Extractor(MANIFEST_JP, MANIFEST_EN, ex_dir='images', stdout_log=True)\n # ex.download_and_extract_by_pattern(IMAGE_PATTERNS, region='jp')\n in_dir = '_extract'\n if args.do_prep:\n print('prepare: ', flush=True, end = '')\n start = monotonic()\n ex = Extractor(MANIFESTS, ex_dir=in_dir, stdout_log=False)\n ex.download_and_extract_by_pattern(LABEL_PATTERNS_CN, region='cn')\n ex.download_and_extract_by_pattern(LABEL_PATTERNS_EN, region='en')\n ex.download_and_extract_by_pattern(LABEL_PATTERNS_JP, region='jp')\n print(f'{monotonic()-start:.4f}s', flush=True)\n start = monotonic()\n print('load database: ', flush=True, end = '')\n db = DBManager(args.o)\n load_master(db, os.path.join(in_dir, EN, MASTER))\n load_json(db, os.path.join(in_dir, JP, MASTER, TEXT_LABEL), 'TextLabelJP')\n load_json(db, os.path.join(in_dir, CN, MASTER, TEXT_LABEL), 'TextLabelCN')\n load_actions(db, os.path.join(in_dir, JP, ACTIONS))\n load_character_motion(db, os.path.join(in_dir, JP, CHARACTERS_MOTION))\n load_dragon_motion(db, os.path.join(in_dir, JP, DRAGON_MOTION))\n print(f'{monotonic()-start:.4f}s', flush=True)","sub_path":"Load_Database.py","file_name":"Load_Database.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"129723823","text":"import logging\nimport importlib\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef module_import(name):\n if not isinstance(name, str):\n return name\n\n if '.' not in name:\n return importlib.import_module(name)\n\n parts = name.split('.')\n for i in reversed(range(1, len(parts))):\n mod_name = '.'.join(parts[:i])\n attrs = parts[i:]\n\n try:\n obj = importlib.import_module(mod_name)\n except ImportError as ex:\n logger.warning(ex)\n continue\n\n for attr in attrs:\n try:\n obj = getattr(obj, attr)\n except AttributeError as ex:\n logger.warning(ex)\n break\n else:\n return obj\n\n continue\n else:\n raise ImportError(\"Can't resolve: {}\".format(name))\n","sub_path":"utils/moduleimport.py","file_name":"moduleimport.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"144000442","text":"\nimport requests\nimport pprint\nimport time\nimport datetime\nimport tinydb\nfrom common import load_db, que, location_db\n\n\ndef _created_range_iter():\n for year in range(2014, 9999):\n for month in [1, 4, 7, 10]:\n today = datetime.date.today()\n if datetime.datetime(year, month, 1) > datetime.datetime(today.year, today.month, today.day):\n raise StopIteration\n end_year = year if month != 10 else year + 1\n end_month = month + 3 if month != 10 else 1\n created_range = f\"created:{year}-{str(month).zfill(2)}-01..{end_year}-{str(end_month).zfill(2)}-01\"\n yield created_range\n\n\ndef _test_created_range_iter():\n for created_range in _created_range_iter():\n print(created_range)\n\n\ndef github_api_get(url):\n headers = {\n 'Accept': 'application/vnd.github.mercy-preview+json', }\n sleep_time = 10\n while True:\n response = requests.get(url, headers=headers)\n json = response.json()\n if 'message' in json and \"API rate limit exceeded for \" in json['message']:\n print(\"API rate limit exceeded\")\n sleep_time *= 2\n print(\"sleeping\", sleep_time)\n time.sleep(sleep_time)\n else:\n return response\n\n\ndef _json_iter(topic=\"portfolio-website\"):\n last_GET_time = 0\n for created_range in _created_range_iter():\n for i in range(1, 9999):\n url = f'https://api.github.com/search/repositories?q=topic:{topic}+{created_range}&page={i}&per_page=100'\n print(\"GET\", url)\n time.sleep(6 - min([time.time() - last_GET_time, 5]))\n last_GET_time = time.time()\n response = github_api_get(url)\n json = response.json()\n # pprint.pprint(json)\n json[\"url\"] = response.url\n try:\n total_count = json['total_count']\n except Exception as e:\n pprint.pprint(json)\n raise\n\n yield json\n if i > total_count // 100:\n break\n\n\ndef _test_json_iter():\n for json in _json_iter():\n print(json['url'])\n print(json)\n\n\ndef iter_repo(topic=\"portfolio-website\"):\n que = tinydb.Query()\n urlset = set()\n for json in _json_iter(topic=topic):\n total_count, repos = json['total_count'], json['items']\n for repo in repos:\n if repo['html_url'] in urlset:\n continue\n urlset.add(repo['html_url'])\n if not repo['homepage'] and repo['full_name'].endswith('.github.io'):\n username, reponame = repo['full_name'].split('/', maxsplit=1)\n if username == reponame.replace(\".github.io\", ''):\n # such as 'umihico/umihic.github.io'\n homepage = \"https://\" + reponame\n print('estimated', homepage)\n repo['homepage'] = homepage\n if repo['homepage']:\n yield repo\n else:\n print(\"no homepage\", repo['html_url'])\n\n\ndef get_repo_info(ownername, reponame):\n url = f'https://api.github.com/repos/{ownername}/{reponame}'\n pprint.pprint(requests.get(url).json())\n\n\ndef _test_iter_repo():\n for repo in iter_repo():\n pprint.pprint(repo)\n # raise\n html_url, description, homepage, created_at, score, stargazers_count = repo['html_url'], repo[\n 'description'], repo['homepage'], repo['created_at'][:10], repo['score'], repo['stargazers_count']\n print(created_at, html_url, description, homepage, stargazers_count)\n\n\ndef api2location(username=\"umihico\"):\n url = f'https://api.github.com/users/{username}'\n response = github_api_get(url)\n return response.json()['location']\n\n\ndef test_api2location():\n print(api2location())\n\n\ndef get_users_location():\n content_tinydb = load_db()\n for i, repo in enumerate(iter_repo()):\n username = repo['owner']['login']\n if not location_db.search(que.username == username):\n time.sleep(5)\n location = api2location(username)\n print(i, location)\n location_db.upsert({'username': username, 'location': location, 'tags': geotag(location),\n 'updated_at': int(time.time())}, que.username == username)\n\n\ndef tagble_location():\n for d in location_db.all():\n # if 'tags' in d:\n # continue\n d['tags'] = geotag(d['location'])\n print(d['location'], d['tags'])\n location_db.upsert(d, que.username == d['username'])\n\n\ndef get_users_location_boost():\n rest_data = [\n (\"alecmarcus\", None),\n (\"CheapCyborg\", \"Richmond, VA\"),\n (\"BobDempsey\", \"Florida!\")\n ]\n for username, location in rest_data:\n d = {'username': username, 'location': location,\n 'updated_at': int(time.time())}\n print(username, location)\n location_db.upsert(d, que.username == username)\n raise\n location_db.upsert()\n from proxys import proxys\n import umihico\n import threading\n import queue\n content_tinydb = load_db()\n usernames = [r['full_name'].split('/')[0] for r in content_tinydb.all()\n if not location_db.search(que.username == r['full_name'].split('/')[0])]\n username_queue = queue.Queue()\n print(len(usernames))\n print(usernames)\n raise\n for username in usernames:\n username_queue.put(username)\n lock = threading.Lock()\n\n def get_location_proxy(proxy, username_queue, location_db, lock):\n while True:\n try:\n username = username_queue.get_nowait()\n except Exception as e:\n time.sleep(3)\n continue\n url = f'https://api.github.com/users/{username}'\n try:\n response = umihico.scraping.requests_.get(url, proxy=proxy)\n response.raise_for_status()\n except Exception as e:\n time.sleep(1000)\n if \"API rate limit exceeded\" in response.text:\n username_queue.put(username)\n time.sleep(10000)\n json = response.json()\n if \"login\" not in json:\n username_queue.put(username)\n time.sleep(1000)\n continue\n location = json['location']\n d = {'username': username, 'location': location,\n 'updated_at': int(time.time())}\n print(username, location)\n with lock:\n location_db.upsert(d, que.username == username)\n # proxys = proxys[:10]\n for proxy in proxys:\n thread = threading.Thread(target=get_location_proxy, args=(\n proxy, username_queue, location_db, lock))\n thread.start()\n\n\nif __name__ == '__main__':\n # _test_created_range_iter()\n # _test_json_iter()\n # _test_iter_repo()\n # test_api2location()\n # get_users_location()\n # get_users_location_boost()\n get_users_location()\n # tagble_location()\n # get_repo_info(\"meetcric\", \"myblog\")\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":7077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"26951848","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--api_key\", required=True, help=\"HistSync api key\")\nparser.add_argument(\"--user\", required=True, help=\"Your GitHub/HistSync username\")\nparser.add_argument(\"--host\", help=\"HistSync host\", default=\"http://histsync.herokuapp.com\")\nparser.add_argument(\"command\", help=\"Command to send\")\nargs = parser.parse_args()\n\nshell_pid = os.getppid()\n\nuser = args.user\napi_key = args.api_key\nhost = args.host\ncommand_text = args.command\n\nif os.fork() != 0:\n sys.exit()\n\nimport requests\nimport logging\n\n\ndef setup_logging():\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(process)d - %(name)s - %(levelname)s - %(message)s')\n\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n\ndef upload_command(host, username, api_key, command_text):\n try:\n payload = {'api_key': api_key, 'command_text': command_text}\n r = requests.post(\"{}/api/v0/user/{}/add_command\".format(host, username), data=payload)\n\n r.raise_for_status()\n except Exception as e:\n logging.exception(e)\n\n\nsetup_logging()\nupload_command(host, user, api_key, command_text)\n","sub_path":"histsync-client.py","file_name":"histsync-client.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"62999558","text":"\"\"\"\n快排 partition 思想\n1. 需要熟悉快速排序的方法\n2. 每次快排结束,枢纽左侧的数都比枢纽小,右侧的数都比它大\n3. 利用这个特性,进行如下判断:\n 如果枢纽正好在第 k 大元素对应的位置上,就是结果;\n 如果枢纽在第 k 大元素位置的左侧,说明需要在右侧查找;\n 反之,需要在左侧查找\n4. 上述过程不断循环,直到找到目标元素\n\n时间复杂度: O(n)\n空间复杂度:O(1)\n\n注意:\n1. 该方法会改变原数组\n2. 适用于少量数据\n\"\"\"\nclass Solution1:\n def findKthLargest(self, nums, k: int) -> int:\n if not nums or k < 1:\n return\n\n start, end = 0, len(nums) - 1\n l = len(nums)\n # 第一轮快排\n index = self.partition(nums, start, end)\n\n while index != l - k:\n if index < l - k: # 在枢轴的右侧寻找\n start = index + 1\n index = self.partition(nums, start, end)\n elif index > l - k: # 在枢轴的左侧寻找\n end = index - 1\n index = self.partition(nums, start, end)\n return nums[index]\n\n # 快速排序的划分函数,实现枢轴左侧的值都比枢轴小,右侧...大\n def partition(self, nums, start, end):\n pivot = nums[start]\n while start < end:\n while start < end and nums[end] >= pivot:\n end -= 1\n nums[start] = nums[end]\n\n while start < end and nums[start] <= pivot:\n start += 1\n nums[end] = nums[start]\n nums[start] = pivot\n return start\n\n\n\"\"\"\n哈希表、堆排序思想\n\"\"\"","sub_path":"0215.Kth-largest-element-in-an-array.py","file_name":"0215.Kth-largest-element-in-an-array.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"555630465","text":"# -*- coding: utf-8 -*-\n\"\"\"\n :copyright: ©2018 by IPIP.net\n\"\"\"\n\n\nclass MetaData(object):\n def __init__(self, **kwargs):\n self.fields = kwargs['fields']\n self.node_count = kwargs['node_count']\n self.total_size = kwargs['total_size']\n self.build = kwargs['build']\n self.languages = kwargs['languages']\n self.ip_version = kwargs['ip_version']","sub_path":"ipdb/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"629979789","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models, tools, _\n\n\nclass Company(models.Model):\n _inherit = 'res.company'\n\n # To do: Update the field type (ZEN)\n day_set_lost = fields.Integer(string='Day(s) to set to LOST', default=7)\n day_set_due = fields.Integer(string='Day(s) to set DUE for each activity',\n default=7)\n default_sale_target = fields.Float(string='Sales Target for Company')\n auto_lead = fields.Boolean(string='Auto Create Lead From SO',\n default=True)\n\n @api.onchange('day_set_due')\n def change_activity_date(self):\n # write the day_set_due to all activity type\n activity_types = self.env['mail.activity.type'].search([])\n for activity_type in activity_types:\n activity_type.write({\n 'delay_count': self.day_set_due\n })\n\n\nclass User(models.Model):\n _inherit = 'res.users'\n\n sale_ids = fields.One2many('sale.order', 'user_id', string='Sale')\n\n @api.model\n def systray_get_activities(self):\n res = super(User, self).systray_get_activities()\n # rename 'Lead/Opportunity' to 'Opportunity' for crm systray activity\n for activity in res:\n if activity['model'] == 'crm.lead':\n activity['name'] = 'Opportunity'\n return res\n","sub_path":"pivotino_crm/models/pivotino_res_company.py","file_name":"pivotino_res_company.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"84992320","text":"import re\nimport copy\nfrom collections import defaultdict\n\ndef masked_value(value, mask):\n bits = list('{0:036b}'.format(int(value)))\n\n for m, v in mask:\n bits[m] = v\n\n return int(''.join(bits), 2)\n\ndef masked_permutations(bits, mask, index, perms):\n for i in range(index, len(mask)):\n if mask[i] == 'X':\n for replacement in ['0', '1']:\n changed = copy.deepcopy(bits) \n changed[i] = replacement\n masked_permutations(changed, mask, i + 1, perms)\n break\n \n perms.add(int(''.join(bits), 2))\n\ndef masked_address(address, mask):\n bits = list('{0:036b}'.format(int(address)))\n\n for i in range(0, len(mask)):\n if mask[i] == '1':\n bits[i] = '1'\n\n perms = set()\n masked_permutations(bits, mask, 0, perms)\n \n return perms\n\ndef solve1(data):\n mem = defaultdict(int)\n mask = []\n for instruction in data:\n parts = re.findall(r'\\w+|\\d|\\d+', instruction)\n\n if parts[0] == 'mask':\n mask = [(i, v) for i, v in enumerate(parts[1]) if v != 'X']\n elif parts[0] == 'mem':\n mem[parts[1]] = masked_value(parts[2], mask)\n\n return sum([v for k, v in mem.items()])\n\ndef solve2(data):\n mem = defaultdict(int)\n mask = []\n for instruction in data:\n parts = re.findall(r'\\w+|\\d|\\d+', instruction)\n\n if parts[0] == 'mask':\n mask = list(parts[1])\n elif parts[0] == 'mem':\n addresses = masked_address(parts[1], mask)\n for a in addresses:\n mem[a] = int(parts[2])\n\n return sum([v for k, v in mem.items()])\n\nif __name__ == \"__main__\":\n with open(\"input.txt\", \"r\") as input_file:\n data = input_file.read().splitlines()\n print(\"Part1: \", solve1(data))\n print(\"Part2: \", solve2(data))","sub_path":"day14/day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"114140471","text":"import hashlib\n\nimport synapse.exc as s_exc\nimport synapse.common as s_common\n\nimport synapse.tests.utils as s_t_utils\n\n\nBITS = 2048\nHEXSTR_MODULUS = 'abbd407f417fe8d6632aae1c6d09b271416bef9244e61f7c7c2856ddfde3ecf93cd50b3eaea5c9b8cb9bfb5a317bf50925a' \\\n 'b500a06247ec2f3294891a8e62c317ee648f933ec1bf760a9d7e9a5ea4706b2a2c3f6376079114ddcc7a15d3fecf001458f' \\\n '22f0551802a25ef95cf464aabeb0514ea3849583bc09022730c44a2ff5f893fc6885add69c103d75114dd2f11436f617fbf' \\\n 'b0af2978802aabf35483bbfcc470d50d6afb4283c1d06d2bf27efe9d7c09f226895633a46c3d77173bf0db8634299462b5f' \\\n '29629ad3b0470c76ddfd331ed0207d4dbd5fd44a2f66ca5f802ac0130e4a4bb2c149b5baa7a373188823ee21fe2950a76c8' \\\n '18586919f7914453d'\nHEXSTR_PUBLIC_EXPONENT = 0x10001\nHEXSTR_PRIVATE_EXPONENT = '9db58a80120f3b2b7d1f998a231b8f916fa985f4456f2a24f0033f5a56a7b35b61e0a695e65dfab3c7ceb2f0ad' \\\n '968e7bdaeac9f29a97730ce5add8a5627c14c3532c7880d88c8f56099f8ed65275a4c9e2cb93b70c3d7c904677' \\\n '639fac7962c537f5bfaf2f12859d0dacb7c403ee59da0922715bba0a6f5202d7c653833e39715f04664c2396c4' \\\n '7bdf3f09f5486d8f6aea767ba011f1a5a10c8b57f079aea58abfd5e50ef20aa5e09b1082f6af98e806c9aeeb89' \\\n '4148a7d82cd6e1443c6115eb567fba0eacf5b7178518b8ba312da6ace22238d1ed19f3e703652576a6152ba60d' \\\n '4d4c6bc75b3ee7c8efeadee0c5ed7c14bf2930a6c4f13137becf38912f49c5'\nHEXSTR_PRIVATE_PRIME_P = 'dee90ee63c12729a3fe7d38c581abf7e1c784ec0bd4bfdd1282286ea9996673942a24c7c98b31c6cd12db8ba96d' \\\n 'a785c4392569d7bfc2be9d9907c3b7fbf40d31891642952a0e5a23dfbe721a746588df9a246ea4936a1958f66fd' \\\n '3a32c08008a0f6ed9b516fa869fb08a57ef31c0ec217f173e489a2f8f111e25c25c961c2b7'\nHEXSTR_PRIVATE_PRIME_Q = 'c53b9c8dfb3dda04d16c7f779a02b3b8c7b44bf876dc88ad562778eafaded9ade882ccfb887761515a251c22476' \\\n '1bef7207fa489e398041787cfbd155f1034a207d517f06bc76a044262484f82f0c6a887f776b1dce837408999d8' \\\n '8dd33a96c7f80e23719e77a11075d337bf9cc47d7dbf98e341b81c23f165dd15ccfd2973ab'\n\nTEST_MD5 = hashlib.md5(b'test', usedforsecurity=False).hexdigest()\nTEST_SHA1 = hashlib.sha1(b'test', usedforsecurity=False).hexdigest()\nTEST_SHA256 = hashlib.sha256(b'test').hexdigest()\nTEST_SHA384 = hashlib.sha384(b'test').hexdigest()\nTEST_SHA512 = hashlib.sha512(b'test').hexdigest()\n\nclass CryptoModelTest(s_t_utils.SynTest):\n\n async def test_model_crypto_currency(self):\n\n async with self.getTestCore() as core:\n\n nodes = await core.nodes('[ crypto:currency:client=(1.2.3.4, (btc, 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2)) ]')\n self.len(1, nodes)\n\n nodes = await core.nodes('''\n crypto:currency:address=btc/1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2\n [ :seed={\n [ crypto:key=*\n :algorithm=aes256\n :mode=CBC\n :iv=41414141\n :private=00000000\n :private:md5=$md5\n :private:sha1=$sha1\n :private:sha256=$sha256\n :public=ffffffff\n :public:md5=$md5\n :public:sha1=$sha1\n :public:sha256=$sha256\n :seed:passwd=s3cret\n :seed:algorithm=pbkdf2 ]\n }]\n ''', opts={'vars': {'md5': TEST_MD5, 'sha1': TEST_SHA1, 'sha256': TEST_SHA256}})\n\n self.len(1, await core.nodes('crypto:algorithm=aes256'))\n self.len(1, await core.nodes('''\n crypto:key:algorithm=aes256\n +:private=00000000\n +:public=ffffffff\n +:seed:algorithm=pbkdf2\n +:seed:passwd=s3cret\n +:mode=cbc\n +:iv=41414141\n '''))\n self.len(1, await core.nodes('inet:passwd=s3cret -> crypto:key -> crypto:currency:address'))\n\n self.len(2, await core.nodes('crypto:key -> hash:md5'))\n self.len(2, await core.nodes('crypto:key -> hash:sha1'))\n self.len(2, await core.nodes('crypto:key -> hash:sha256'))\n\n nodes = await core.nodes('inet:client=1.2.3.4 -> crypto:currency:client -> crypto:currency:address')\n self.eq(nodes[0].get('coin'), 'btc')\n self.eq(nodes[0].get('iden'), '1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2')\n\n nodes = await core.nodes('''\n [\n econ:acct:payment=\"*\"\n :from:coinaddr=(btc, 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2)\n :to:coinaddr=(btc, 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2)\n ]\n ''')\n\n # these would explode if the model was wrong\n self.len(1, await core.nodes('crypto:currency:address [ :desc=\"woot woot\" :contact=\"*\" ] -> ps:contact'))\n self.len(1, await core.nodes('crypto:currency:address:iden=1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2'))\n self.len(1, await core.nodes('crypto:currency:address:coin=btc'))\n self.len(1, await core.nodes('crypto:currency:client:inetaddr=1.2.3.4'))\n\n opts = {'vars': {\n 'input': hashlib.sha256(b'asdf').hexdigest(),\n 'output': hashlib.sha256(b'qwer').hexdigest(),\n }}\n\n payors = await core.nodes('[ crypto:payment:input=* :transaction=(t1,) :address=(btc, 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2) :value=30 ]')\n self.eq(payors[0].get('value'), '30')\n self.eq(payors[0].get('address'), ('btc', '1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2'))\n\n payees = await core.nodes('[ crypto:payment:output=* :transaction=(t1,) :address=(btc, 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2) :value=30 ]')\n self.eq(payees[0].get('value'), '30')\n self.eq(payees[0].get('address'), ('btc', '1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2'))\n\n payor = payors[0].ndef[1]\n payee = payees[0].ndef[1]\n\n nodes = await core.nodes(f'''\n [\n crypto:currency:transaction=(t1,)\n :hash=0x01020304\n :desc=\"Woot Woot\"\n :block=(BTC, 998877)\n :success=1\n :status:code=10\n :status:message=success\n :to = (btc, 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2)\n :from = (btc, 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2)\n :fee = 0.0001\n :value = 30\n :time = 20211031\n :eth:gasused = 10\n :eth:gaslimit = 20\n :eth:gasprice = 0.001\n :contract:input = $input\n :contract:output = $output\n ]\n ''', opts=opts)\n self.len(1, nodes)\n node = nodes[0]\n self.eq(node.get('hash'), '01020304')\n self.eq(node.get('desc'), 'Woot Woot')\n self.eq(node.get('block'), ('btc', 998877))\n self.eq(node.get('block:coin'), 'btc')\n self.eq(node.get('block:offset'), 998877)\n self.eq(node.get('success'), True)\n self.eq(node.get('status:code'), 10)\n self.eq(node.get('status:message'), 'success')\n self.eq(node.get('to'), ('btc', '1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2'))\n self.eq(node.get('from'), ('btc', '1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2'))\n self.eq(node.get('fee'), '0.0001')\n self.eq(node.get('value'), '30')\n self.eq(node.get('time'), 1635638400000)\n self.eq(node.get('eth:gasused'), 10)\n self.eq(node.get('eth:gaslimit'), 20)\n self.eq(node.get('eth:gasprice'), '0.001')\n self.eq(node.get('contract:input'), 'sha256:f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b')\n self.eq(node.get('contract:output'), 'sha256:f6f2ea8f45d8a057c9566a33f99474da2e5c6a6604d736121650e2730c6fb0a3')\n\n with self.raises(s_exc.IsDeprLocked):\n await node.set('inputs', (payor,))\n with self.raises(s_exc.IsDeprLocked):\n await node.set('outputs', (payee,))\n\n q = 'crypto:currency:transaction=(t1,) | tee { -> crypto:payment:input } { -> crypto:payment:output }'\n nodes = await core.nodes(q)\n self.eq({n.ndef[1] for n in nodes}, {payor, payee})\n\n nodes = await core.nodes('''\n [\n crypto:currency:block=(btc, 12345)\n :hash=0x01020304\n :minedby = (btc, 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2)\n :time=20211130\n ]''')\n self.len(1, nodes)\n node = nodes[0]\n self.eq(node.get('coin'), 'btc')\n self.eq(node.get('offset'), 12345)\n self.eq(node.get('hash'), '01020304')\n self.eq(node.get('time'), 1638230400000)\n\n nodes = await core.nodes('''\n [\n crypto:smart:contract=*\n :transaction=*\n :bytecode=$input\n :address = (btc, 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2)\n :token:name=Foo\n :token:symbol=Bar\n :token:totalsupply=300\n ]''', opts=opts)\n self.len(1, nodes)\n node = nodes[0]\n self.nn(node.get('transaction'))\n self.eq(node.get('bytecode'), 'sha256:f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b')\n self.eq(node.get('address'), ('btc', '1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2'))\n self.eq(node.get('token:name'), 'Foo')\n self.eq(node.get('token:symbol'), 'Bar')\n self.eq(node.get('token:totalsupply'), '300')\n\n nodes = await core.nodes('''\n [\n crypto:smart:effect:transfertoken=*\n :token=(2bdea834252a220b61aadf592cc0de66, 30)\n :to=eth/bbbb\n :from=eth/aaaa\n :transaction=*\n ]''')\n self.len(1, nodes)\n node = nodes[0]\n self.nn(node.get('token'))\n self.nn(node.get('transaction'))\n self.eq(node.get('to'), ('eth', 'bbbb'))\n self.eq(node.get('from'), ('eth', 'aaaa'))\n self.len(1, await core.nodes('crypto:smart:effect:transfertoken -> crypto:smart:token'))\n self.len(1, await core.nodes('crypto:smart:effect:transfertoken -> crypto:currency:transaction'))\n\n nodes = await core.nodes('''\n [\n crypto:smart:effect:transfertokens=*\n :to=eth/bbbb\n :from=eth/aaaa\n :amount=20\n :transaction=*\n :contract=*\n ]''')\n self.len(1, nodes)\n node = nodes[0]\n self.nn(node.get('transaction'))\n self.nn(node.get('contract'))\n self.eq(node.get('to'), ('eth', 'bbbb'))\n self.eq(node.get('from'), ('eth', 'aaaa'))\n self.eq(node.get('amount'), '20')\n self.len(1, await core.nodes('crypto:smart:effect:transfertokens -> crypto:smart:contract'))\n self.len(1, await core.nodes('crypto:smart:effect:transfertokens -> crypto:currency:transaction'))\n\n nodes = await core.nodes('''\n [\n crypto:smart:effect:edittokensupply=*\n :amount=20\n :contract=*\n :transaction=*\n :totalsupply=1020\n ]''')\n self.len(1, nodes)\n node = nodes[0]\n self.nn(node.get('contract'))\n self.nn(node.get('transaction'))\n self.eq(node.get('amount'), '20')\n self.eq(node.get('totalsupply'), '1020')\n self.len(1, await core.nodes('crypto:smart:effect:edittokensupply -> crypto:smart:contract'))\n self.len(1, await core.nodes('crypto:smart:effect:edittokensupply -> crypto:currency:transaction'))\n\n nodes = await core.nodes('''\n [\n crypto:smart:effect:minttoken=*\n :index=0\n :token=(2bdea834252a220b61aadf592cc0de66, 30)\n :transaction=*\n ]''')\n self.len(1, nodes)\n node = nodes[0]\n self.eq(node.get('index'), 0)\n self.nn(node.get('token'))\n self.nn(node.get('transaction'))\n self.len(1, await core.nodes('crypto:smart:effect:minttoken -> crypto:smart:token'))\n self.len(1, await core.nodes('crypto:smart:effect:minttoken -> crypto:currency:transaction'))\n\n nodes = await core.nodes('''\n [\n crypto:smart:effect:burntoken=*\n :index=0\n :token=(2bdea834252a220b61aadf592cc0de66, 30)\n :transaction=*\n ]''')\n self.len(1, nodes)\n node = nodes[0]\n self.eq(node.get('index'), 0)\n self.nn(node.get('token'))\n self.nn(node.get('transaction'))\n self.len(1, await core.nodes('crypto:smart:effect:burntoken -> crypto:smart:token'))\n self.len(1, await core.nodes('crypto:smart:effect:burntoken -> crypto:currency:transaction'))\n\n nodes = await core.nodes('''\n [\n crypto:smart:effect:proxytoken=*\n :index=0\n :token=(2bdea834252a220b61aadf592cc0de66, 30)\n :transaction=*\n :owner=(btc, 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2)\n :proxy=(btc, 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2)\n ]''')\n self.len(1, nodes)\n node = nodes[0]\n self.eq(node.get('index'), 0)\n self.nn(node.get('token'))\n self.nn(node.get('owner'))\n self.nn(node.get('proxy'))\n self.len(1, await core.nodes('crypto:smart:effect:minttoken -> crypto:smart:token'))\n self.len(1, await core.nodes('crypto:smart:effect:minttoken -> crypto:currency:transaction'))\n\n nodes = await core.nodes('''\n [\n crypto:smart:effect:proxytokenall=*\n :index=0\n :transaction=*\n :contract=*\n :owner=(btc, 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2)\n :proxy=(btc, 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2)\n :approval=$lib.true\n ]''')\n self.len(1, nodes)\n node = nodes[0]\n self.eq(node.get('index'), 0)\n self.nn(node.get('owner'))\n self.nn(node.get('proxy'))\n self.nn(node.get('contract'))\n self.true(node.get('approval'))\n self.len(2, await core.nodes('crypto:smart:effect:proxytokenall -> crypto:currency:address'))\n self.len(1, await core.nodes('crypto:smart:effect:proxytokenall -> crypto:currency:transaction'))\n self.len(1, await core.nodes('crypto:smart:effect:proxytokenall -> crypto:smart:contract'))\n\n nodes = await core.nodes('''\n [\n crypto:smart:effect:proxytokens=*\n :index=0\n :transaction=*\n :contract=*\n :owner=(btc, 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2)\n :proxy=(btc, 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2)\n :amount=0xff\n ]''')\n self.len(1, nodes)\n node = nodes[0]\n self.eq(node.get('index'), 0)\n self.nn(node.get('owner'))\n self.nn(node.get('proxy'))\n self.nn(node.get('contract'))\n self.eq(node.get('amount'), 'ff')\n self.len(2, await core.nodes('crypto:smart:effect:proxytokens -> crypto:currency:address'))\n self.len(1, await core.nodes('crypto:smart:effect:proxytokens -> crypto:currency:transaction'))\n self.len(1, await core.nodes('crypto:smart:effect:proxytokens -> crypto:smart:contract'))\n\n nodes = await core.nodes('''\n [\n crypto:smart:token=(2bdea834252a220b61aadf592cc0de66, 30)\n :owner=eth/aaaa\n :nft:url = https://coin.vertex.link/nfts/30\n :nft:meta = $lib.dict(name=WootWoot)\n :nft:meta:name = WootWoot\n :nft:meta:description = LoLoL\n :nft:meta:image = https://vertex.link/favicon.ico\n ]''')\n self.len(1, nodes)\n node = nodes[0]\n self.eq(('2bdea834252a220b61aadf592cc0de66', '30'), node.ndef[1])\n self.eq('2bdea834252a220b61aadf592cc0de66', node.get('contract'))\n self.eq('30', node.get('tokenid'))\n self.eq(('eth', 'aaaa'), node.get('owner'))\n self.eq('https://coin.vertex.link/nfts/30', node.get('nft:url'))\n self.eq({'name': 'WootWoot'}, node.get('nft:meta'))\n self.eq('WootWoot', node.get('nft:meta:name'))\n self.eq('LoLoL', node.get('nft:meta:description'))\n self.eq('https://vertex.link/favicon.ico', node.get('nft:meta:image'))\n\n nodes = await core.nodes('''\n [ crypto:currency:transaction=*\n :value = '1e-24'\n ]''')\n self.len(1, nodes)\n self.eq(nodes[0].get('value'), '0.000000000000000000000001')\n\n nodes = await core.nodes('''\n [ crypto:currency:transaction=*\n :value = 0.000000000000000000000002\n ]''')\n self.len(1, await core.nodes('crypto:currency:transaction:value=1e-24'))\n self.len(1, await core.nodes('crypto:currency:transaction:value=0.000000000000000000000001'))\n\n huge = '730750818665451459101841.000000000000000000000002'\n huge2 = '730750818665451459101841.0000000000000000000000015'\n huge3 = '730750818665451459101841.000000000000000000000001'\n\n self.len(1, await core.nodes(f'[ crypto:currency:transaction=* :value={huge} ]'))\n self.len(1, await core.nodes(f'[ crypto:currency:transaction=* :value={huge2} ]'))\n self.len(2, await core.nodes(f'crypto:currency:transaction:value={huge}'))\n\n self.len(1, await core.nodes(f'[ crypto:currency:transaction=* :value={huge3} ]'))\n self.len(2, await core.nodes(f'crypto:currency:transaction:value={huge}'))\n self.len(2, await core.nodes(f'crypto:currency:transaction:value={huge2}'))\n self.len(1, await core.nodes(f'crypto:currency:transaction:value={huge3}'))\n\n async def test_norm_lm_ntlm(self):\n async with self.getTestCore() as core: # type: s_cortex.Cortex\n lm = core.model.type('hash:lm')\n valu, subs = lm.norm(TEST_MD5.upper())\n self.eq(valu, TEST_MD5)\n self.eq(subs, {})\n self.raises(s_exc.BadTypeValu, lm.norm, TEST_SHA256)\n\n ntlm = core.model.type('hash:ntlm')\n valu, subs = lm.norm(TEST_MD5.upper())\n self.eq(valu, TEST_MD5)\n self.eq(subs, {})\n self.raises(s_exc.BadTypeValu, ntlm.norm, TEST_SHA256)\n\n async def test_forms_crypto_simple(self):\n async with self.getTestCore() as core: # type: s_cortex.Cortex\n async with await core.snap() as snap:\n # md5\n node = await snap.addNode('hash:md5', TEST_MD5.upper())\n self.eq(node.ndef, ('hash:md5', TEST_MD5))\n await self.asyncraises(s_exc.BadTypeValu, snap.addNode('hash:md5', TEST_SHA1))\n # sha1\n node = await snap.addNode('hash:sha1', TEST_SHA1.upper())\n self.eq(node.ndef, ('hash:sha1', TEST_SHA1))\n await self.asyncraises(s_exc.BadTypeValu, snap.addNode('hash:sha1', TEST_SHA256))\n # sha256\n node = await snap.addNode('hash:sha256', TEST_SHA256.upper())\n self.eq(node.ndef, ('hash:sha256', TEST_SHA256))\n await self.asyncraises(s_exc.BadTypeValu, snap.addNode('hash:sha256', TEST_SHA384))\n # sha384\n node = await snap.addNode('hash:sha384', TEST_SHA384.upper())\n self.eq(node.ndef, ('hash:sha384', TEST_SHA384))\n await self.asyncraises(s_exc.BadTypeValu, snap.addNode('hash:sha384', TEST_SHA512))\n # sha512\n node = await snap.addNode('hash:sha512', TEST_SHA512.upper())\n self.eq(node.ndef, ('hash:sha512', TEST_SHA512))\n await self.asyncraises(s_exc.BadTypeValu, snap.addNode('hash:sha512', TEST_MD5))\n\n async def test_form_rsakey(self):\n prop = 'rsa:key'\n props = {\n 'bits': BITS,\n 'priv:exp': HEXSTR_PRIVATE_EXPONENT,\n 'priv:p': HEXSTR_PRIVATE_PRIME_P,\n 'priv:q': HEXSTR_PRIVATE_PRIME_Q,\n }\n valu = (HEXSTR_MODULUS, HEXSTR_PUBLIC_EXPONENT)\n\n async with self.getTestCore() as core: # type: s_cortex.Cortex\n\n async with await core.snap() as snap:\n\n node = await snap.addNode(prop, valu, props)\n\n self.eq(node.ndef[1], (HEXSTR_MODULUS, HEXSTR_PUBLIC_EXPONENT))\n\n self.eq(node.get('mod'), HEXSTR_MODULUS)\n self.eq(node.get('bits'), BITS)\n self.eq(node.get('pub:exp'), HEXSTR_PUBLIC_EXPONENT)\n self.eq(node.get('priv:exp'), HEXSTR_PRIVATE_EXPONENT)\n self.eq(node.get('priv:p'), HEXSTR_PRIVATE_PRIME_P)\n self.eq(node.get('priv:q'), HEXSTR_PRIVATE_PRIME_Q)\n\n async def test_model_x509(self):\n\n async with self.getTestCore() as core:\n\n crl = s_common.guid()\n cert = s_common.guid()\n icert = s_common.guid()\n fileguid = 'guid:' + s_common.guid()\n\n nodes = await core.nodes('''\n [ crypto:x509:cert=$icert\n :subject=\"CN=issuer.link\"\n :issuer:cert=$icert\n :selfsigned=$lib.true\n ]\n ''', opts={'vars': {'icert': icert}})\n self.eq(nodes[0].ndef, ('crypto:x509:cert', icert))\n self.eq(nodes[0].get('subject'), \"CN=issuer.link\")\n self.eq(nodes[0].get('issuer:cert'), icert)\n self.eq(nodes[0].get('selfsigned'), True)\n\n nodes = await core.nodes('''\n [ crypto:x509:cert=$cert\n\n :subject=\"CN=vertex.link\"\n :issuer=\"DN FOO THING\"\n :issuer:cert=$icert\n\n :serial=0000000000000000000000000000000000003039\n :version=v3\n\n :validity:notafter=2019\n :validity:notbefore=2015\n\n :md5=$md5\n :sha1=$sha1\n :sha256=$sha256\n\n :algo=1.2.840.113549.1.1.11\n :rsa:key=(ff00ff00, 100)\n :signature=ff00ff00\n\n :ext:sans=((dns, vertex.link), (dns, \"*.vertex.link\"))\n :ext:crls = ((dns, http://vertex.link/crls),)\n :crl:urls = (\"http://vertex.link/crls\",)\n\n :identities:urls=(http://woot.com/1, http://woot.com/2)\n :identities:fqdns=(vertex.link, woot.com)\n :identities:ipv4s=(1.2.3.4, 5.5.5.5)\n :identities:ipv6s=(ff::11, ff::aa)\n :identities:emails=(visi@vertex.link, v@vtx.lk)\n ]\n ''', opts={'vars': {'icert': icert, 'cert': cert, 'md5': TEST_MD5, 'sha1': TEST_SHA1, 'sha256': TEST_SHA256}})\n\n self.eq(nodes[0].ndef, ('crypto:x509:cert', cert))\n self.eq(nodes[0].get('subject'), \"CN=vertex.link\")\n self.eq(nodes[0].get('issuer'), \"DN FOO THING\")\n self.eq(nodes[0].get('issuer:cert'), icert)\n self.eq(nodes[0].get('serial'), \"0000000000000000000000000000000000003039\")\n self.eq(nodes[0].get('version'), 2)\n\n self.eq(nodes[0].get('validity:notafter'), 1546300800000)\n self.eq(nodes[0].get('validity:notbefore'), 1420070400000)\n\n self.eq(nodes[0].get('md5'), TEST_MD5)\n self.eq(nodes[0].get('sha1'), TEST_SHA1)\n self.eq(nodes[0].get('sha256'), TEST_SHA256)\n\n self.eq(nodes[0].get('algo'), '1.2.840.113549.1.1.11')\n self.eq(nodes[0].get('rsa:key'), ('ff00ff00', 100))\n self.eq(nodes[0].get('signature'), 'ff00ff00')\n self.eq(nodes[0].get('ext:crls'), (('dns', 'http://vertex.link/crls'),))\n self.eq(nodes[0].get('crl:urls'), ('http://vertex.link/crls',))\n self.eq(nodes[0].get('ext:sans'), (('dns', '*.vertex.link'), ('dns', 'vertex.link')))\n self.eq(nodes[0].get('identities:urls'), ('http://woot.com/1', 'http://woot.com/2'))\n self.eq(nodes[0].get('identities:fqdns'), ('vertex.link', 'woot.com'))\n self.eq(nodes[0].get('identities:ipv4s'), (0x01020304, 0x05050505))\n self.eq(nodes[0].get('identities:ipv6s'), ('ff::11', 'ff::aa'))\n\n nodes = await core.nodes('''\n [\n crypto:x509:crl=$crl\n :url=http://vertex.link/crls\n :file=\"*\"\n ]\n ''', opts={'vars': {'crl': crl}})\n\n self.eq(nodes[0].ndef, ('crypto:x509:crl', crl))\n self.nn(nodes[0].get('file'))\n self.eq(nodes[0].get('url'), 'http://vertex.link/crls')\n\n opts = {'vars': {'cert': cert, 'file': fileguid}}\n nodes = await core.nodes('[ crypto:x509:signedfile = ($cert, $file) ]', opts=opts)\n\n self.eq(nodes[0].ndef, ('crypto:x509:signedfile', (cert, fileguid)))\n self.eq(nodes[0].get('cert'), cert)\n self.nn(nodes[0].get('file'), fileguid)\n\n opts = {'vars': {'cert': cert, 'crl': crl}}\n nodes = await core.nodes('[ crypto:x509:revoked = ($crl, $cert) ]', opts=opts)\n\n self.eq(nodes[0].ndef, ('crypto:x509:revoked', (crl, cert)))\n self.eq(nodes[0].get('crl'), crl)\n self.nn(nodes[0].get('cert'), cert)\n","sub_path":"synapse/tests/test_model_crypto.py","file_name":"test_model_crypto.py","file_ext":"py","file_size_in_byte":26981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"3758427","text":"bl_info = {\r\n \"name\": \"3DSupport\",\r\n \"author\": \"mimyllyv\",\r\n \"version\": (0, 0, 1),\r\n \"blender\": (2, 78, 0),\r\n \"location\": \"Toolshelf > 3DSupport\",\r\n \"description\": \"\",\r\n \"warning\": \"First alpha version\",\r\n \"wiki_url\": \"\"\r\n \"\",\r\n \"category\": \"Object\",\r\n}\r\n\r\nimport bpy, bmesh, math\r\n\r\nclass ThreeDSupport(bpy.types.Panel):\r\n bl_space_type = 'VIEW_3D'\r\n bl_region_type = 'TOOLS'\r\n bl_label = '3D Support'\r\n# bl_context = 'EDIT_MESH'\r\n bl_category = '3D Support'\r\n\r\n def draw(self, context):\r\n self.layout.prop(bpy.context.scene.tds_vars, 'go_through')\r\n self.layout.prop(bpy.context.scene.tds_vars, 'width')\r\n self.layout.prop(bpy.context.scene.tds_vars, 'tip_height')\r\n self.layout.operator('tds.add_support', text='Add new Support', icon='MESH_CUBE')\r\n\r\nclass ThreeDSupportVariables(bpy.types.PropertyGroup):\r\n go_through = bpy.props.FloatProperty(\r\n name=\"Go through\",\r\n description=\"Go through\",\r\n default=0.5,\r\n )\r\n \r\n width = bpy.props.FloatProperty(\r\n name=\"Width\",\r\n description=\"Width\",\r\n default=0.4\r\n )\r\n\r\n tip_height = bpy.props.FloatProperty(\r\n name=\"Tip height\",\r\n description=\"Tip height\",\r\n default=0.5\r\n )\r\n\r\ndef createSupport(ctx):\r\n obj = bpy.context.object\r\n mesh = obj.data # Assumed that obj.type == 'MESH'\r\n obj.update_from_editmode() # Loads edit-mode data into object data\r\n\r\n selected_vertices = [v for v in mesh.vertices if v.select]\r\n\r\n if(len(selected_vertices) == 2):\r\n createSupportObject(ctx, obj, selected_vertices[0], selected_vertices[1])\r\n \r\ndef createSupportObject(ctx, obj, v1, v2):\r\n if(v1.co[2] > v2.co[2]):\r\n vt = v1\r\n v1 = v2\r\n v2 = vt\r\n\r\n go_through = bpy.data.scenes[0].tds_vars.go_through\r\n width = bpy.data.scenes[0].tds_vars.width\r\n tip_height = bpy.data.scenes[0].tds_vars.tip_height\r\n \r\n bm = bmesh.from_edit_mesh(obj.data)\r\n\r\n f0v0 = bm.verts.new((v1.co[0], v1.co[1], v1.co[2] - go_through))\r\n\r\n f1v1 = bm.verts.new((v1.co[0] - width, v1.co[1] - width, v1.co[2] + tip_height))\r\n f1v2 = bm.verts.new((v1.co[0] - width, v1.co[1] + width, v1.co[2] + tip_height))\r\n f1v3 = bm.verts.new((v1.co[0] + width, v1.co[1] - width, v1.co[2] + tip_height))\r\n f1v4 = bm.verts.new((v1.co[0] + width, v1.co[1] + width, v1.co[2] + tip_height))\r\n\r\n bm.faces.new((f0v0, f1v1, f1v2))\r\n bm.faces.new((f0v0, f1v1, f1v3))\r\n bm.faces.new((f0v0, f1v2, f1v4))\r\n bm.faces.new((f0v0, f1v3, f1v4))\r\n\r\n f3v0 = bm.verts.new((v2.co[0], v2.co[1], v2.co[2] + go_through))\r\n\r\n f2v1 = bm.verts.new((v2.co[0] - width, v2.co[1] - width, v2.co[2] - tip_height))\r\n f2v2 = bm.verts.new((v2.co[0] - width, v2.co[1] + width, v2.co[2] - tip_height))\r\n f2v3 = bm.verts.new((v2.co[0] + width, v2.co[1] - width, v2.co[2] - tip_height))\r\n f2v4 = bm.verts.new((v2.co[0] + width, v2.co[1] + width, v2.co[2] - tip_height))\r\n\r\n bm.faces.new((f3v0, f2v1, f2v2))\r\n bm.faces.new((f3v0, f2v1, f2v3))\r\n bm.faces.new((f3v0, f2v2, f2v4))\r\n bm.faces.new((f3v0, f2v3, f2v4))\r\n\r\n bm.faces.new((f1v1, f1v2, f2v2, f2v1))\r\n bm.faces.new((f1v2, f1v4, f2v4, f2v2))\r\n bm.faces.new((f1v4, f1v3, f2v3, f2v4))\r\n bm.faces.new((f1v3, f1v1, f2v1, f2v3))\r\n\r\n bmesh.update_edit_mesh(obj.data, False, False)\r\n\r\nclass ThreeDSupportOperator(bpy.types.Operator):\r\n bl_idname = 'tds.add_support'\r\n bl_label = 'Add support'\r\n \r\n def execute(self, context):\r\n createSupport(context)\r\n return {'FINISHED'}\r\n \r\n def invoke(self, context, event):\r\n return self.execute(context)\r\n\r\ndef register():\r\n bpy.utils.register_class(ThreeDSupport)\r\n bpy.utils.register_class(ThreeDSupportOperator)\r\n bpy.utils.register_class(ThreeDSupportVariables)\r\n bpy.types.Scene.tds_vars = bpy.props.PointerProperty(type=ThreeDSupportVariables)\r\n \r\ndef unregister():\r\n bpy.utils.unregister_class(ThreeDSupport)\r\n bpy.utils.unregister_class(ThreeDSupportOperator)\r\n bpy.utils.unregister_class(ThreeDSupportVariables)\r\n \r\nif(__name__ == '__main__'):\r\n register()\r\n","sub_path":"3DSupport/src/3DSupport.py","file_name":"3DSupport.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"545285351","text":"\"\"\"Deal with ipv4 and ipv6 IP addresses.\"\"\"\n\nimport ipaddress\nfrom dyndns.exceptions import IpAddressesError\n\n\ndef validate(address, ip_version=None):\n try:\n address = ipaddress.ip_address(address)\n if ip_version and ip_version != address.version:\n raise IpAddressesError('IP version \"{}\" does not match.'\n .format(ip_version))\n return str(address), address.version\n except ValueError:\n raise IpAddressesError('Invalid ip address \"{}\"'.format(address))\n\n\ndef format_attr(ip_version):\n return 'ipv{}'.format(ip_version)\n\n\nclass IpAddresses(object):\n\n def __init__(self, ip_1=None, ip_2=None, ipv4=None, ipv6=None,\n request=None):\n\n if request:\n self.request = request\n\n self.ipv4 = None\n \"\"\"The ipv4 address to update DNS record with.\"\"\"\n if ipv4:\n self.ipv4, ipv4_version = validate(ipv4, 4)\n\n self.ipv6 = None\n \"\"\"The ipv6 address to update DNS record with.\"\"\"\n if ipv6:\n self.ipv6, ipv6_version = validate(ipv6, 6)\n\n if ip_1:\n self._set_ip(ip_1)\n\n if ip_2:\n self._set_ip(ip_2)\n\n if not self.ipv4 and not self.ipv6:\n self._get_client_ip()\n\n if not self.ipv4 and not self.ipv6:\n raise IpAddressesError('No ip address set.')\n\n def _get_ip(self, ip_version):\n return getattr(self, format_attr(ip_version))\n\n def _setattr(self, ip_version, value):\n return setattr(self, format_attr(ip_version), value)\n\n def _get_client_ip(self):\n # request.environ['REMOTE_ADDR']\n if hasattr(self, 'request'):\n remote_addr = self.request.remote_addr\n self._set_ip(remote_addr)\n return remote_addr\n\n def _set_ip(self, address):\n ip, ip_version = validate(address)\n old_ip = self._get_ip(ip_version)\n if old_ip:\n msg = 'The attribute \"{}\" is already set and has the value \"{}\".' \\\n .format(\n format_attr(ip_version),\n old_ip,\n )\n raise IpAddressesError(msg)\n\n self._setattr(ip_version, ip)\n","sub_path":"dyndns/ipaddresses.py","file_name":"ipaddresses.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"609678078","text":"import cv2\nimport torch\nimport numpy as np\nfrom torch import nn, optim\nimport tensorflow as tf\n\nfrom dataset import get_dataset, images_to_vectors, vectors_to_images\nfrom models import noise, DiscriminatorNet, GeneratorNetwork\n\n\ndef ones_target(size):\n data = torch.Tensor(torch.ones(size, 1))\n return data\n\n\ndef zeros_target(size):\n data = torch.Tensor(torch.zeros(size, 1))\n return data\n\n\ndef train_discriminator(discriminator, loss, optimizer, real_data, fake_data):\n N = real_data.size(0)\n\n optimizer.zero_grad()\n\n prediction_real = discriminator(real_data)\n\n error_real = loss(prediction_real, ones_target(N))\n error_real.backward()\n\n prediction_fake = discriminator(fake_data)\n\n error_fake = loss(prediction_fake, zeros_target(N))\n error_fake.backward()\n\n optimizer.step()\n\n return error_real + error_fake, prediction_real, prediction_fake\n\n\ndef train_generator(discriminator, loss, optimizer, fake_data):\n N = fake_data.size(0)\n\n optimizer.zero_grad()\n\n prediction = discriminator(fake_data)\n\n error = loss(prediction, ones_target(N))\n error.backward()\n\n optimizer.step()\n\n return error\n\n\ndataloader, num_batches = get_dataset()\n\ndiscriminator = DiscriminatorNet()\ngenerator = GeneratorNetwork()\n\nd_optimizer = optim.Adam(discriminator.parameters(), lr=0.0002)\ng_optimizer = optim.Adam(generator.parameters(), lr=0.0002)\n\nloss = nn.BCELoss()\n\n\nnum_test_samples = 16\ntest_noise = noise(num_test_samples)\n\n\nsummary_writer = tf.summary.FileWriter('./events')\n\ng_error_summary = tf.placeholder(tf.float32, shape=())\nd_error_summary = tf.placeholder(tf.float32, shape=())\nimages_summary = tf.placeholder(tf.uint8, shape=(None, 28, 28, 1))\n\ntf.summary.scalar('Generator Error', g_error_summary)\ntf.summary.scalar('Discriminator Error', d_error_summary)\ntf.summary.image('Fake Images', images_summary)\n\nmerged_summaries = tf.summary.merge_all()\n\n\nnum_epochs = 200\n\nwith tf.Session(config=tf.ConfigProto(device_count={'CPU': 1, 'GPU': 0})) as sess:\n sess.run(tf.global_variables_initializer())\n for epoch in range(num_epochs):\n for n_batch, (real_batch, _) in enumerate(dataloader):\n N = real_batch.size(0)\n\n # Train discriminator\n real_data = torch.Tensor(images_to_vectors(real_batch))\n\n fake_data = generator(noise(N)).detach()\n\n d_error, d_pred_real, d_pred_fake = train_discriminator(\n discriminator, loss, d_optimizer, real_data, fake_data\n )\n\n\n # Train generator\n fake_data = generator(noise(N))\n\n g_error = train_generator(discriminator, loss, g_optimizer, fake_data)\n\n\n if n_batch % 100 == 0:\n\n test_images = vectors_to_images(generator(test_noise))\n test_images = test_images.detach().numpy()\n test_images_normalized = \\\n cv2.normalize(test_images, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX).\\\n astype(np.uint8)\n\n print(\"[{}, {}] - Discriminator error: {} - Generator error: {}\".\\\n format(epoch, n_batch, d_error, g_error))\n\n summary, *res = sess.run(\n [\n merged_summaries,\n g_error_summary,\n d_error_summary,\n images_summary\n ],\n feed_dict={\n g_error_summary: g_error.item(),\n d_error_summary: d_error.item(),\n images_summary: test_images_normalized\n }\n )\n\n summary_writer.add_summary(summary, global_step=epoch*num_batches+n_batch)","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"105647211","text":"# -*- coding: utf-8 -*-\nfrom weixin import WeixinError\nfrom weixin.pay import WeixinPay\n\nfrom .config import WxConfig\n\npay = WeixinPay(WxConfig.APPID, WxConfig.MCHID, WxConfig.KEY, WxConfig.SSLKEY_PATH, WxConfig.SSLCERT_PATH)\n\n\ndef js_pay(openid, body, out_trade_no, fee, attach):\n try:\n raw = pay.jsapi(openid=openid, body=body, out_trade_no=out_trade_no, total_fee=fee, attach=attach)\n return raw\n except WeixinError as err:\n return str(err)\n\n\npayRoute = [\n\n]\n","sub_path":"wechat/wxPay.py","file_name":"wxPay.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"291945981","text":"import logging, shlex, unicodedata, asyncio\n\nimport hangups\n\nimport re, time\n\nfrom commands import command\n\nfrom hangups.ui.utils import get_conv_name\n\nclass MessageHandler(object):\n \"\"\"Handle Hangups conversation events\"\"\"\n\n def __init__(self, bot, bot_command='/bot'):\n self.bot = bot\n self.bot_command = bot_command\n\n self.last_event_id = 'none' # recorded last event to avoid re-syncing\n self.last_user_id = 'none' # recorded last user to allow message compression\n self.last_chatroom_id = 'none' # recorded last chat room to prevent room crossover\n self.last_time_id = 0 # recorded timestamp of last chat to 'expire' chats\n\n self._extra_handlers = [];\n command.attach_extra_handlers(self) \n\n\n @staticmethod\n def words_in_text(word, text):\n \"\"\"Return True if word is in text\"\"\"\n # Transliterate unicode characters to ASCII and make everything lowercase\n word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode().lower()\n text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode().lower()\n\n # Replace delimiters in text with whitespace\n for delim in '.,:;!?':\n text = text.replace(delim, ' ')\n\n return True if word in text else False\n\n @asyncio.coroutine\n def handle(self, event):\n \"\"\"Handle conversation event\"\"\"\n if logging.root.level == logging.DEBUG:\n event.print_debug()\n\n if not event.user.is_self and event.text:\n if event.text.split()[0].lower() == self.bot_command:\n # Run command\n yield from self.handle_command(event)\n else:\n # Forward messages\n yield from self.handle_forward(event)\n\n # Sync messages\n yield from self.handle_syncing(event)\n\n # Send automatic replies\n yield from self.handle_autoreply(event)\n\n for function in self._extra_handlers:\n yield from function(self.bot, event, command)\n\n\n @asyncio.coroutine\n def handle_command(self, event):\n \"\"\"Handle command messages\"\"\"\n # Test if command handling is enabled\n if not self.bot.get_config_suboption(event.conv_id, 'commands_enabled'):\n return\n\n # Parse message\n event.text = event.text.replace(u'\\xa0', u' ') # convert non-breaking space in Latin1 (ISO 8859-1)\n line_args = shlex.split(event.text, posix=False)\n\n # Test if command length is sufficient\n if len(line_args) < 2:\n self.bot.send_message(event.conv,\n '{}: missing parameter(s)'.format(event.user.full_name))\n return\n\n # Test if user has permissions for running command\n commands_admin_list = self.bot.get_config_suboption(event.conv_id, 'commands_admin')\n if commands_admin_list and line_args[1].lower() in commands_admin_list:\n admins_list = self.bot.get_config_suboption(event.conv_id, 'admins')\n if event.user_id.chat_id not in admins_list:\n self.bot.send_message(event.conv,\n '{}: I\\'m sorry. I\\'m afraid I can\\'t do that.'.format(event.user.full_name))\n return\n\n # Run command\n yield from command.run(self.bot, event, *line_args[1:])\n\n @asyncio.coroutine\n def handle_syncing(self, event):\n \"\"\"Handle message syncing\"\"\"\n if not self.bot.get_config_option('syncing_enabled'):\n return\n sync_room_list = self.bot.get_config_suboption(event.conv_id, 'sync_rooms')\n\n if not sync_room_list:\n return # Sync room not configured, returning\n\n if self.last_event_id == event.conv_event.id_:\n return # This event has already been synced\n self.last_event_id = event.conv_event.id_\n\n if event.conv_id in sync_room_list:\n print('>> message from synced room');\n link = 'https://plus.google.com/u/0/{}/about'.format(event.user_id.chat_id)\n\n ### Deciding how to relay the name across\n\n # Checking that it hasn't timed out since last message\n timeout_threshold = 30.0 # Number of seconds to allow the timeout\n if time.time() - self.last_time_id > timeout_threshold:\n timeout = True\n else:\n timeout = False\n\n # Checking if the user is the same as the one who sent the previous message\n if self.last_user_id in event.user_id.chat_id:\n sameuser = True\n else:\n sameuser = False\n\n # Checking if the room is the same as the room where the last message was sent\n if self.last_chatroom_id in event.conv_id:\n sameroom = True\n else:\n sameroom = False\n\n if (not sameroom or timeout or not sameuser) and \\\n (self.bot.memory.exists(['user_data', event.user_id.chat_id, \"nickname\"])):\n # Now check if there is a nickname set\n\n try:\n fullname = '{0} ({1})'.format(event.user.full_name.split(' ', 1)[0]\n , self.bot.get_memory_suboption(event.user_id.chat_id, 'nickname'))\n except TypeError:\n fullname = event.user.full_name\n elif sameroom and sameuser and not timeout:\n fullname = '>>'\n else:\n fullname = event.user.full_name\n\n ### Name decided and put into variable 'fullname'\n\n segments = [hangups.ChatMessageSegment('{0}'.format(fullname), hangups.SegmentType.LINK,\n link_target=link, is_bold=True),\n hangups.ChatMessageSegment(': ', is_bold=True)]\n\n # Append links to attachments (G+ photos) to forwarded message\n if event.conv_event.attachments:\n segments.append(hangups.ChatMessageSegment('\\n', hangups.SegmentType.LINE_BREAK))\n segments.extend([hangups.ChatMessageSegment(link, hangups.SegmentType.LINK, link_target=link)\n for link in event.conv_event.attachments])\n\n # Make links hyperlinks and send message\n URL_RE = re.compile(r'https?://\\S+')\n for segment in event.conv_event.segments:\n last = 0\n for match in URL_RE.finditer(segment.text):\n if match.start() > last:\n segments.append(hangups.ChatMessageSegment(segment.text[last:match.start()]))\n segments.append(hangups.ChatMessageSegment(match.group(), link_target=match.group()))\n last = match.end()\n if last != len(segment.text):\n segments.append(hangups.ChatMessageSegment(segment.text[last:]))\n\n for dst in sync_room_list:\n try:\n conv = self.bot._conv_list.get(dst)\n except KeyError:\n continue\n if not dst == event.conv_id:\n self.bot.send_message_segments(conv, segments)\n\n self.last_user_id = event.user_id.chat_id\n self.last_time_id = time.time()\n self.last_chatroom_id = event.conv_id\n\n @asyncio.coroutine\n def handle_forward(self, event):\n \"\"\"Handle message forwarding\"\"\"\n # Test if message forwarding is enabled\n if not self.bot.get_config_suboption(event.conv_id, 'forwarding_enabled'):\n return\n\n forward_to_list = self.bot.get_config_suboption(event.conv_id, 'forward_to')\n if forward_to_list:\n for dst in forward_to_list:\n try:\n conv = self.bot._conv_list.get(dst)\n except KeyError:\n continue\n\n # Prepend forwarded message with name of sender\n link = 'https://plus.google.com/u/0/{}/about'.format(event.user_id.chat_id)\n segments = [hangups.ChatMessageSegment(event.user.full_name, hangups.SegmentType.LINK,\n link_target=link, is_bold=True),\n hangups.ChatMessageSegment(': ', is_bold=True)]\n # Copy original message segments\n segments.extend(event.conv_event.segments)\n # Append links to attachments (G+ photos) to forwarded message\n if event.conv_event.attachments:\n segments.append(hangups.ChatMessageSegment('\\n', hangups.SegmentType.LINE_BREAK))\n segments.extend([hangups.ChatMessageSegment(link, hangups.SegmentType.LINK, link_target=link)\n for link in event.conv_event.attachments])\n self.bot.send_message_segments(conv, segments)\n\n @asyncio.coroutine\n def handle_autoreply(self, event):\n \"\"\"Handle autoreplies to keywords in messages\"\"\"\n # Test if autoreplies are enabled\n if not self.bot.get_config_suboption(event.conv_id, 'autoreplies_enabled'):\n return\n\n autoreplies_list = self.bot.get_config_suboption(event.conv_id, 'autoreplies')\n if autoreplies_list:\n for kwds, sentence in autoreplies_list:\n for kw in kwds:\n if self.words_in_text(kw, event.text) or kw == \"*\":\n self.bot.send_message(event.conv, sentence)\n break","sub_path":"hangupsbot/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":9598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"497790831","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 10 15:04:11 2021\n\n@author: user9\n\"\"\"\n# 테이블만들기(0806_4) + DB와 연동하여 Data추출(0810_4)\n\nimport pymysql\nfrom PyQt5.QtWidgets import QWidget, QApplication, QTableWidget, QTableWidgetItem\nimport sys\n\nclass MyApp(QWidget):\n def __init__(self):\n super().__init__()\n \n self.initUI()\n\n def initUI(self):\n conn = pymysql.connect(host='127.0.0.1', user='bigdata', password='12345678',\\\n db='big_data', charset='utf8')\n cursor = conn.cursor() \n sql = 'select * from product'\n cursor.execute(sql)\n result = cursor.fetchall()\n print(result)\n row = len(result)\n col = len(result[0])\n \n self.tbl = QTableWidget(row,col, self)\n self.tbl.setGeometry(30, 30, 600, 400)\n self.col_head = [\"상품코드\", \"상품명\", \"재고량\", \"단가\", \"제조사\"]\n self.tbl.setHorizontalHeaderLabels(self.col_head)\n \n '''\n for i in range(row):\n for j in range(col):\n re=str(result[i][j])\n self.tbl.setItem(i, j, (QTableWidgetItem(re)))\n '''\n row = 0\n for item in result:\n for j in range(col):\n re = str(item[j])\n self.tbl.setItem(row, j, QTableWidgetItem(re))\n row += 1\n \n self.show()\n \n cursor.close()\n conn.close()\n \n \nif __name__=='__main__':\n app = QApplication(sys.argv) \n ex=MyApp() \n sys.exit(app.exec_()) ","sub_path":"spyder_python/04DBconnect/Ex0810_5.py","file_name":"Ex0810_5.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"347849763","text":"from rest_framework import serializers\nfrom .fields import HyperlinkedIdentityField\nfrom zeus.models import Build\nfrom zeus.models import Command\n\n\nbase_build_fields = [\n 'uri',\n 'url',\n 'number',\n 'created_at',\n 'finished_at',\n 'status',\n]\n\nclass BuildSerializer(serializers.ModelSerializer):\n uri = HyperlinkedIdentityField(\n view_name='zeus_api_build_detail',\n lookup_field={\n 'build_no': 'number',\n 'buildset_no': 'buildset__number',\n 'name': 'buildset__project__name',\n },\n )\n url = serializers.CharField(source='get_absolute_url', read_only=True)\n status = serializers.CharField(read_only=True)\n\n class Meta:\n model = Build\n fields = base_build_fields\n read_only_fields = ['number', 'created_at', 'finished_at']\n\n\nclass CommandSerializer(serializers.ModelSerializer):\n output = serializers.CharField()\n status = serializers.CharField()\n cmd = serializers.CharField(source='get_cmd_string')\n\n class Meta:\n model = Command\n fields = ['number', 'title', 'cmd', 'output', 'started_at',\n 'finished_at', 'status', 'returncode']\n\n\nclass BuildDetailSerializer(BuildSerializer):\n commands = CommandSerializer(source='commands', read_only=True)\n\n class Meta:\n model = Build\n fields = base_build_fields + ['commands', 'status']\n read_only_fields = ['number', 'created_at', 'finished_at']\n\n\nclass BuildsetSerializer(serializers.Serializer):\n uri = HyperlinkedIdentityField(\n view_name='zeus_api_buildset_detail',\n lookup_field={\n 'buildset_no': 'number',\n 'name': 'project__name',\n },\n )\n url = serializers.CharField(source='get_absolute_url')\n number = serializers.IntegerField()\n created_at = serializers.DateTimeField()\n finished_at = serializers.DateTimeField()\n status = serializers.CharField(source='get_status')\n builds = BuildSerializer(source='builds')\n errors = serializers.Field(source='errors')\n\n\nclass ProjectSerializer(serializers.Serializer):\n uri = serializers.HyperlinkedIdentityField(\n view_name='zeus_api_project_detail',\n pk_url_kwarg='name',\n lookup_field='name',\n )\n name = serializers.CharField('name')\n website_url = serializers.CharField(source='url')\n repo_url = serializers.CharField('repo_url')\n url = serializers.CharField(source='get_absolute_url')\n buildsets_uri = serializers.HyperlinkedIdentityField(\n view_name='zeus_api_buildset_list',\n pk_url_kwarg='name',\n lookup_field='name',\n )\n\n\nclass ProjectDetailSerializer(ProjectSerializer):\n buildsets_total_count = serializers.IntegerField(source='get_buildsets_total_count')\n buildsets_recent = BuildsetSerializer(source='get_recent_buildsets')\n\n","sub_path":"zeusci/zeus/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"596607928","text":"import pygame\r\nfrom pygame.locals import *\r\nimport os, sys\r\nfrom PIL import Image\r\n\r\nclass pr3splash:\r\n\r\n\tsplashtime = 4000\r\n\tsplashwidth = 0\r\n\tsplashheight = 0\r\n\r\n\tdef __init__(self):\r\n\t\tos.environ['SDL_VIDEO_CENTERED'] = '1' \r\n\t\tpygame.init() \r\n\t\r\n\tdef loadSplashScreen(self, splashimage='images/pr3-splash.png'):\r\n\t\tself.screen = self.__setSplashDimensions__(splashimage)\r\n\t\tsplashmain = pygame.image.load(splashimage)\r\n\t\tself.screen.blit(splashmain, (0,0))\r\n\t\tself.__welcomeText__()\r\n\t\tpygame.display.flip()\r\n\t\t\r\n\t\t#for event in pygame.event.get(): \r\n\t\t#\tif self.__keyPressed__(K_ESCAPE): \r\n\t\t#\t\tpygame.quit() \r\n\t\t\t\t\r\n\t\tpygame.time.delay(self.splashtime)\r\n\t\t\r\n\t\treturn True\r\n\r\n\tdef __welcomeText__(self):\r\n\t\tfont = pygame.font.Font(None, 36)\r\n\t\tloadingtext = font.render(\"W 3 l c o m 3 t o ... P i d g 3 r a c 3 r III\", 1, (44,44,44))\r\n\t\tself.screen.blit(loadingtext, (self.splashwidth-780,self.splashheight-40))\r\n\r\n\tdef __setSplashDimensions__(self, splashimage):\r\n\t\timg = Image.open(splashimage)\r\n\t\tself.splashwidth, self.splashheight = img.size\r\n\t\tscreen = pygame.display.set_mode((self.splashwidth,self.splashheight), NOFRAME | DOUBLEBUF)\r\n\t\treturn screen\r\n\r\n\tdef __keyPressed__(self, inputKey):\r\n\t\tkeysPressed = pygame.key.get_pressed()\r\n\t\tif keysPressed[inputKey]:\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False","sub_path":"pr3splash.py","file_name":"pr3splash.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"312595934","text":"from lxml import html\nimport requests\nimport json\nfrom bs4 import BeautifulSoup\n\n\n#Open Excel file\n# dataFile = open('officeQuoteTotals.csv', 'w')\n# wr = csv.writer(dataFile)\n\n#Hardcode in episode counts and characters tracked\nseasonLengths = [6,22,23,14,26,24,24,24,23]\ncharacters = ['Michael', 'Dwight', 'Jim', 'Pam', 'Andy', 'Darryl', 'Stanley', 'Oscar', 'Ryan', 'Kevin', 'Angela', 'Phyllis', 'Toby', 'Jan', 'Total']\njsonData = []\n#Column Titles\n# csvRow = ['Season', 'Episode'] + characters\n# wr.writerow(csvRow)\n\nfor season in range(len(seasonLengths)):\n\tfor episode in range(seasonLengths[season]):\n\n\t\t\n\t\t#Request HTML Page containing the scripts for every episode, from 'https://www.officequotes.net'\n\t\tpage = requests.get('http://www.officequotes.net/no' + str((season+1)) + '-' + str((\"%02d\" % (episode+1,))) +'.php')\n\t\tsoup = BeautifulSoup(page.content, 'html.parser')\n\t\tcharacterDict = dict.fromkeys(characters,0)\n\t\tnumQuotes = 0\n\n\t\tfor scene in soup.find_all('div', class_=\"quote\"):\n\t\t\tfor character in scene.find_all('b'):\n\t\t\t\tcharacter = character.get_text()\n\t\t\t\tcharacter = character[:-1]\n\t\t\t\tif character in set(characters):\n\t\t\t\t\tcharacterDict[str(character)] += 1\n\n\t\t\t\tif character[0:7] != 'Deleted':\n\t\t\t\t\tnumQuotes += 1\n\t\t\n\t\tcharacterDict['Total'] = numQuotes\n\t\t\t\n\t\tjsonData.append(characterDict)\n\nwith open('quote_freq.json', 'w') as outfile: \n json.dump(jsonData, outfile)\n\t\t# #Add new row every episode\n\t\t# csvRow = [season+1, episode+1] + [characterDict[x] for x in characters]\n\t\t# wr.writerow(csvRow)\n\t\t# ","sub_path":"scriptscrape.py","file_name":"scriptscrape.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"416483710","text":"import sys\r\nimport string\r\nimport numpy as np\r\n\r\nfrom WordBag import buildBagFromWords, getBag\r\nfrom ParseFile import parseFile\r\n\r\ndataSizes = [5000, 50000, 150000]\r\nbagSizes = [100, 500, 1000]\r\nnum_to_test = 10000\r\n\r\n# try different configs\r\nfor dS in range(0, 3):\r\n for bS in range(0,3):\r\n\r\n # parse the file\r\n rawWords, labels, labelCounts = parseFile('train.txt', dataSizes[dS])\r\n\r\n # build bag of words\r\n bagDict = buildBagFromWords(rawWords, labels, bagSizes[bS])\r\n\r\n # go through each text and convert to bag representation\r\n # also fills proportion of reviews with a certain word in the bag\r\n bagOfWordsVector = []\r\n wordProbs = np.zeros((2, len(bagDict))) \r\n for i in range(len(rawWords)):\r\n words = rawWords[i]\r\n bagRep = [0] * len(bagDict)\r\n for w in words:\r\n w = w[:len(w) - 1] if w[-1] in string.punctuation else w\r\n if w in bagDict:\r\n bagRep[bagDict[w]] = 1\r\n \r\n for j in range(len(bagRep)):\r\n wordProbs[labels[i] - 1][j] += bagRep[j]\r\n bagOfWordsVector.append(bagRep)\r\n\r\n # calculate values needed for naive bayes model\r\n totalReviews = labelCounts[0] + labelCounts[1]\r\n labelProbs = [labelCounts[0] / totalReviews, labelCounts[1] / totalReviews]\r\n wordProbs[0] = [x / labelCounts[0] for x in wordProbs[0]]\r\n wordProbs[1] = [x / labelCounts[1] for x in wordProbs[1]]\r\n\r\n testBagVector, testLabels = getBag('test.txt', bagDict, num_to_test)\r\n \r\n # let the model make predictions on the test set \r\n classifications = []\r\n for i in range(len(testBagVector)):\r\n data = testBagVector[i]\r\n probs = []\r\n denom = 0\r\n for y in range(len(labelProbs)):\r\n num = labelProbs[y]\r\n for i in range(len(data)):\r\n if data[i] == 0:\r\n num *= 1 - wordProbs[y][i]\r\n else:\r\n num *= wordProbs[y][i]\r\n probs.append(num)\r\n denom += num\r\n probs = [x / denom for x in probs]\r\n pred = 1 if probs[0] > probs[1] else 2\r\n classifications.append(pred) \r\n\r\n # see how many are correct\r\n correct = 0\r\n for j in range(len(classifications)):\r\n if classifications[j] == testLabels[j]:\r\n correct += 1\r\n\r\n print(\"Number of reviews considered for training = \" + str(dataSizes[dS]), flush=True)\r\n print(\"Bag size = \" + str(bagSizes[bS]), flush=True)\r\n print(correct / len(testLabels), flush=True)\r\n \r\n","sub_path":"NaiveBayes.py","file_name":"NaiveBayes.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"272132025","text":"from os import sys, path\nsys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\n\nimport pprint\n\nimport pandas as pd\nimport numpy as np\n\nimport h5py\nimport progressbar\n\nfrom os import sys, path\nsys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\nfrom scoring.utils import get_match_indexes\nfrom pack.utils import delete_directory_contents\n\n\n\ndef get_elo_home_and_away(data_h5, row, ix):\n \"\"\"\n This functions takes the data from the pair. iterates over the main data\n and finds the coresponding elo ratings based on the side the team is on.\n \"\"\"\n hdhome = data_h5[row.home_team][:].T\n hdaway = data_h5[row.away_team][:].T\n elohome = hdhome[np.where(hdhome==ix)[1],1]\n eloaway = hdaway[np.where(hdaway==ix)[1],1]\n return elohome, eloaway\n\n\nif __name__ == '__main__':\n\n print('[LOADING...]: Main DataFrame.')\n df = pd.read_csv('/home/kasper/Dropbox/Scrapping/soccerway/csv/final_data_soccerway.csv', index_col='Unnamed: 0')\n pred_data = pd.read_csv('../data/predict.csv', index_col='Unnamed: 0')\n df_teams = pred_data[['home_team','away_team']].values\n\n for p, pair in enumerate(df_teams[:]):\n\n print('[INFO]: pairs: ', pair)\n\n h5f = h5py.File('./elo_temp/elo_pairs_' + str(p) + '.h5', 'r')\n pair_num = list(h5f.keys())[0]\n # NOTE: do i need to convert this to a list since i iterate over it?\n teams = list(h5f[pair_num].keys())\n data = h5f[pair_num]\n print('[INFO]: getting indexes')\n uni = get_match_indexes(data_h5=data, teams=teams)\n\n da = df.loc[uni][['home_team','away_team']]\n da['res_index'] = uni\n bar = progressbar.ProgressBar(max_value=da.shape[0],widgets=[\n ' [', progressbar.Timer(), '] ',\n progressbar.Bar(),\n ' (', progressbar.ETA(), ') ',\n ])\n print('[INFO]: finding ratings.')\n elos = np.zeros((da.shape[0], 2))\n for i, (ix, row) in enumerate(da.iloc[:].iterrows()):\n # if i%100==0:\n # print(i,da.shape[0])\n try:\n elohome, eloaway = get_elo_home_and_away(row=row, ix=ix, data_h5=data)\n elos[i,0] = elohome\n elos[i,1] = eloaway\n except Exception as e:\n elos[i,0] = np.nan\n elos[i,1] = np.nan\n bar.update(i)\n\n elos_df = pd.DataFrame(elos, columns=['EH','EA'], index=da.index)\n\n final = pd.concat([da,elos_df],axis=1)\n # NOTE: do not drop duplicates just in case i am able to impute.\n # final = final.dropna()\n\n final.to_csv('./elo_goals_data_for_classifier/data_'+str(p)+'.csv')\n h5f.close()\n","sub_path":"scoring/elo_make_classifier_dataset.py","file_name":"elo_make_classifier_dataset.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"597514342","text":"#!/usr/bin/env python\n\nimport string\n\nclass trienode:\n def __init__(self):\n self.array = dict()\n\n def assign(self, c, p):\n self.array[c] = p\n\n def valueof(self,c):\n return self.array[c]\n\n def getnew(self,c):\n self.array[c] = trienode()\n\n def makenull(self):\n self.array = {}\n\n def insert(self, x):\n i = 0\n t = self\n trie_node = 0\n while x[i] != \"$\": \n new = True\n for key in t.array:\n if key == x[i]:\n new = False\n if new == True:\n t.getnew(x[i])\n trie_node = trie_node + 1\n t = t.valueof(x[i])\n i = i+1\n t.assign(\"$\",None)\n return trie_node\n\n\nroot = trienode()\ntrie_height = 0\ntrie_node = 0\nprint(\"Insert all the words from 'Alice in Wonderland.txt' to the trie, all the special characters are removed, Capital letters are considered a new character\")\nwith open('Alice-in-Wonderland.txt', 'r') as f:\n for line in f:\n for word in line.split():\n word = ''.join(e for e in word if e.isalnum())\n trie_node = root.insert(word + \"$\") + trie_node\n if len(word) > trie_height:\n trie_height = len(word)\nprint(\"Trie height is the longest possible path from the root of trie to one of its leaf, same length with the longest word\")\nprint(\"Trie_height = \", trie_height)\nprint(\"Total node of the trie = \", trie_node)\n","sub_path":"A3/trie.py","file_name":"trie.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"467655534","text":"\nfrom api import get_stock_stream\n\n# Example usage\n\nif __name__ == \"__main__\":\n\tparams = {\"max\": 56069133} # Set the max ID for the Tweets, this max ID is not included\n\tADBE_json = get_stock_stream('ADBE', params)\n\t#print(type(AAPL_json))\n\t#print(AAPL_json)\n\t\n\twith open('test2.txt', 'w') as f:\n\t\t\n\t\tfor k, v in ADBE_json.items():\n\t\t\tif k == 'messages': # Find the messages with 'messages' as the key\n\t\t\t\tprint('number of messages: ', len(v)) # v is a list\n\t\t\t\tfor message_count in range(0,len(v)): # We will have 30 messages here by default. More details available at http://stocktwits.com/developers/docs/api#streams-symbol-docs\n\t\t\t\t\tf.write(str(v[message_count]))\n\t\t\t\t\tf.write(\"\\n ############################### \\n\")\n\t\t\t\t\n\n","sub_path":"startCrawling.py","file_name":"startCrawling.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"121944923","text":"#!/usr/bin/env python3\n\nimport csv\nimport sys\nimport os.path\n\n# For this function to work, the attendance list needs to include the exact fields as described\n# below. Otherwise this function won't be able to confirm that the ballot came from the correct\n# person (or that the person even registered).\ndef confirm_id(id, name, org, attendance_list):\n #print(\"MATCH ID for \" + name + \", \" + org);\n for entry in iter(attendance_list):\n #print(entry);\n if (id == entry['uuid'] and\n name == entry['name'] and\n org == entry['org']):\n return 1;\n return 0;\n\ndef delete_from_list(org, ooe_list):\n for entry in iter(ooe_list):\n if (org == entry['org_name']):\n ooe_list.remove(entry)\n\ndef main():\n attendance_file=sys.argv[1]; # File with UUIDs for each attendee\n ballot_file=sys.argv[2]; # File with list of ballots on which to be voted\n votes_file=sys.argv[3]; # File with votes captured from Google Forms\n prev_votes_file=\"../_data/meetings/2023/07/votes.csv\"\n prev_ballots_file=\"../_data/meetings/2023/07/ballot.csv\"\n if not os.path.isfile(prev_votes_file):\n prev_votes_file=\"\"\n if not os.path.isfile(prev_ballots_file):\n prev_ballots_file=\"\"\n\n ballots=[];\n ballot_dict = {};\n votes = {};\n orgs_in_this_file = [];\n votes_in_this_file = []\n previous_topics = []\n\n print(\"Opening files...\");\n\n attendance_list = list(csv.DictReader(open(attendance_file)));\n ballot_list = list(csv.DictReader(open(ballot_file)));\n votes_list = list(csv.DictReader(open(votes_file)));\n ooe_list = list(csv.DictReader(open('ooe_orgs.csv')));\n if prev_votes_file != \"\":\n prev_ballot_list = list(csv.DictReader(open(prev_votes_file)));\n for ballot in prev_ballot_list:\n topic = \"#\" + ballot['issue_number'] + \" (PR #\" + ballot['pr_number'] + \"): \" + ballot['topic'] + \" (\" + ballot['type'] + \")\"\n if (ballot['topic'] == \"daybreak\" or\n ((ballot['yes'] == None or int(ballot['yes']) == 0) and\n (ballot['no'] == None or int(ballot['no']) == 0) and\n (ballot['abstain'] == None or int(ballot['abstain']) == 0) and\n (ballot['missed'] == None or int(ballot['missed']) == 0))):\n continue;\n ballot_dict[topic] = ballot;\n previous_topics.append(topic);\n ballots.append(topic);\n if prev_ballots_file != \"\":\n prev_votes_list = list(csv.DictReader(open(prev_ballots_file)));\n for vote in prev_votes_list:\n votes[vote['org']] = vote;\n\n print(\"Creating ballot list...\");\n\n # Create dictionary for CSV key line\n for ballot in iter(ballot_list):\n topic = \"#\" + ballot['issue_number'] + \" (PR #\" + ballot['pr_number'] + \"): \" + ballot['topic'] + \" (\" + ballot['type'] + \")\"\n if ballot['topic'] == \"daybreak\" or topic in ballots:\n continue;\n ballots.append(topic);\n ballot_dict[topic] = {\n \"issue_number\": ballot['issue_number'],\n \"pr_number\": ballot['pr_number'],\n \"topic\" : ballot['topic'],\n \"type\" : ballot['type'],\n \"yes\" : 0,\n \"no\" : 0,\n \"abstain\" : 0,\n \"missed\" : 0};\n\n header = ballots;\n header.insert(0, \"org\");\n\n print(\"Reading votes...\");\n\n for vote in iter(votes_list):\n # To count the vote, the field names in the CSV file need to exactly match what is typed\n # below. Otherwise the votes won't be found properly and something weird will happen here\n # (probably a crash).\n id = vote['ID (Pre-filled - Do not edit)'];\n name = vote['Name (Pre-filled - Do not edit)'];\n org = vote['Organization (Pre-filled - Do not edit)'];\n\n # Validate that the entry matches the original ID\n if (not confirm_id(id, name, org, attendance_list)):\n print(\"Incorrect ID for \" + name + \", \" + org);\n return 1;\n\n # Check that the org has not yet voted already\n if org in orgs_in_this_file:\n print(\"\" + org + \" has already votes. Discarding ballot from \" + name);\n continue;\n else:\n orgs_in_this_file.append(org);\n\n # Create ballot entry for org\n if org not in votes:\n votes[org] = {};\n votes[org]['org'] = org;\n\n for ballot in ballots:\n if ballot == \"org\" or ballot in previous_topics:\n continue;\n\n # Add to the list of topics in this file\n if ballot not in votes_in_this_file and ballot in vote and vote[ballot] != \"\":\n votes_in_this_file.append(ballot)\n\n if ballot in vote and vote[ballot]:\n votes[org][ballot] = vote[ballot];\n ballot_dict[ballot][vote[ballot].lower()] = int(ballot_dict[ballot][vote[ballot].lower()]) + 1;\n\n # Delete the org from the list of orgs that haven't voted yet\n delete_from_list(org, ooe_list)\n\n print(\"\\n=====\\n\");\n print(\"Orgs not voted...\")\n for org in ooe_list:\n name = org['org_name'];\n print(name);\n if name not in votes:\n votes[name] = {};\n votes[name]['org'] = name;\n for ballot in ballots:\n if ballot == \"org\" or ballot not in votes_in_this_file:\n continue;\n votes[name][ballot] = 'abstain';\n ballot_dict[ballot]['abstain'] = int(ballot_dict[ballot]['abstain']) + 1;\n print(\"\\n=====\\n\");\n\n print(\"Writing ballot.csv...\");\n\n with open('ballot.csv', 'w', newline='') as csvfile:\n writer = csv.DictWriter(csvfile, header, quoting = csv.QUOTE_ALL, extrasaction='ignore');\n\n writer.writeheader()\n for org in votes.keys():\n writer.writerow(votes[org]);\n\n print(\"Writing votes.csv...\");\n\n with open('votes.csv', 'w', newline='') as csvfile:\n writer = csv.DictWriter(csvfile, ['issue_number', 'pr_number', 'topic', 'type', 'yes', 'no', 'abstain', 'missed'],\n quoting = csv.QUOTE_ALL);\n\n writer.writeheader()\n for key in ballot_dict.keys():\n writer.writerow(ballot_dict[key]);\n\n print(\"\\n=====\\n\");\n\n print(\"Move votes.csv and ballot.csv to the appropriate folders.\");\n print(\"Update registered, ooe, and imove numbers in votes.md.\");\n\nif __name__ == '__main__':\n main()\n","sub_path":"utils/tally_votes.py","file_name":"tally_votes.py","file_ext":"py","file_size_in_byte":6458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"244343736","text":"\n\nfrom xai.brain.wordbase.nouns._jowl import _JOWL\n\n#calss header\nclass _JOWLS(_JOWL, ):\n\tdef __init__(self,): \n\t\t_JOWL.__init__(self)\n\t\tself.name = \"JOWLS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"jowl\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_jowls.py","file_name":"_jowls.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"375512067","text":"import os\nos.environ[\"DJANGO_SETTINGS_MODULE\"] = \"settings\"\n\nfrom tests import CommonTest\nfrom django.conf import settings\nfrom django.test import TestCase\nfrom .formatting import format_wiki\nfrom .parser.generator import Generator\nfrom .parser.parser import ParserError\nfrom .models import page\nfrom .debug import print_tokens\nfrom .parser.lexer import lexer\nfrom pprint import pprint\nimport signal\n\nfrom django.db.backends import BaseDatabaseWrapper\nfrom django.db.backends.util import CursorWrapper\n\nif settings.DEBUG:\n BaseDatabaseWrapper.make_debug_cursor = lambda self, cursor: CursorWrapper(cursor, self)\n\n\nclass TimeoutError(Exception):\n pass\n\n\nclass timeout:\n def __init__(self, seconds=1, error_message='Timeout'):\n self.seconds = seconds\n self.error_message = error_message\n\n def handle_timeout(self, signum, frame):\n raise TimeoutError(self.error_message)\n\n def __enter__(self):\n signal.signal(signal.SIGALRM, self.handle_timeout)\n signal.alarm(self.seconds)\n\n def __exit__(self, type, value, traceback):\n signal.alarm(0)\n\n\nclass Pages(CommonTest):\n def setUp(self):\n pass\n\n def test_open_pages(self):\n pages = page.objects.filter(page_namespace=page.NS_ARTICLE)\n for p in pages:\n #txt = p.render(test=True)\n try:\n with timeout(seconds=2):\n #sleep(4)\n p.render(test=True)\n except TimeoutError:\n print(p)\n print(p.content)\n raise\n except SyntaxError as e:\n token = e.args[0]\n print(p)\n print(p.content)\n print(\"TOKENS:\")\n print_tokens(p.content, token)\n raise\n except Exception:\n print(p)\n raise\n","sub_path":"src/article/t_pages.py","file_name":"t_pages.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"253579095","text":"#基于numpy实现余弦相似度计算,实现batchnormalization层前向计算\n#二维象限 x y 随机256个向量点\ndef cosine_similarity(x, y, dim=256):\n xx = 0.0 #初始值\n yy = 0.0 #初始值\n xy = 0.0 #初始值\n for i in range(dim):\n xx += x[i] * x[i]\n yy += y[i] * y[i]\n xy += x[i] * y[i]\n xx_sqrt = xx ** 0.5 #xx的对位相乘,累加,然后开方\n yy_sqrt = yy ** 0.5 #yy的对位相乘,累加,然后开方\n cos = xy/(xx_sqrt*yy_sqrt)*0.5+0.5 #余弦相似度公式\n print(\"\" + cos)\n return cos\n","sub_path":"46+马盟辉+杭州/week2/cosinesimilarity.py","file_name":"cosinesimilarity.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"455303891","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport numpy as np\n\ndata = pd.read_csv(\"avocado.csv\")\ndata = data.query(\"type == 'conventional' and region == 'Albany'\")\ndata[\"Date\"] = pd.to_datetime(data[\"Date\"], format=\"%Y-%m-%d\")\ndata.sort_values(\"Date\", inplace=True)\n\napp = dash.Dash(__name__)\n\napp.layout = html.Div(\n children=[\n html.Div(\n children=[\n html.Div(children=\"Region\", className=\"menu-title\"),\n dcc.Dropdown(\n id=\"region-filter\",\n options=[\n {\"label\": region, \"value\": region}\n for region in np.sort(data.region.unique())\n ],\n value=\"Albany\",\n clearable=False,\n className=\"dropdown\",\n ),\n ]\n ),\n html.Div(\n children=[\n html.Div(children=\"Type\", className=\"menu-title\"),\n dcc.Dropdown(\n id=\"type-filter\",\n options=[\n {\"label\": avocado_type, \"value\": avocado_type}\n for avocado_type in data.type.unique()\n ],\n value=\"organic\",\n clearable=False,\n searchable=False,\n className=\"dropdown\",\n ),\n ],\n ),\n html.Div(\n children=[\n html.Div(\n children=\"Date Range\",\n className=\"menu-title\"\n ),\n dcc.DatePickerRange(\n id=\"date-range\",\n min_date_allowed=data.Date.min().date(),\n max_date_allowed=data.Date.max().date(),\n start_date=data.Date.min().date(),\n end_date=data.Date.max().date(),\n ),\n ]\n ),\n ],\n className=\"menu\",\n) \n\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"215748578","text":"from . import loadDefaultParams as dp\nfrom . import timeIntegration as ti\nfrom ..model import Model\n\n\nclass SubDivWCModel(Model):\n \"\"\"\n A three-population Wilson-Cowan model with subtractive and divisive inhibition.\n\n This model is usually used for cortical nodes within a whole-brain network. Each node is modelled as three\n populations of WC neural masses, one excitatory representing cortical pyramidal cells and two inhibitory,\n representing dendrite-targeting, somatostatin-positive (SST+) interneurons and soma-targeting,\n parvalbumin-positive (PV+) interneurons, respectively. While the SST+ interneuron population simply provides\n subtractive inhibition, the PV+ population can provide both subtractive and divisive inhibition.\n The brain network is realised by coupling excitatory to excitatory masses within the network using the structural\n connectivity matrix and supports network delays.\n\n References:\n *Papasavvas et al., Divisive gain modulation enables flexible and rapid entrainment in a\n neocortical microcircuit model, J. Neurophysiol., 2020\"\"\"\n\n name = \"sdwc\"\n description = \"Three-population Wilson-Cowan model with both subtractive and divisive inhibition\"\n\n init_vars = [\"exc_init\", \"inh_s_init\",\"inh_d_init\", \"exc_ou\", \"inh_s_ou\", \"inh_d_ou\"]\n state_vars = [\"exc\", \"inh_s\", \"inh_d\", \"exc_ou\", \"inh_s_ou\", \"inh_d_ou\"]\n output_vars = [\"exc\", \"inh_s\", \"inh_d\"]\n default_output = \"exc\"\n input_vars = [\"exc_ext\", \"inh_s_ext\", \"inh_d_ext\"]\n default_input = \"exc_ext\"\n\n # because this is not a rate model, the input\n # to the bold model must be transformed\n boldInputTransform = lambda self, x: x * 50\n\n def __init__(self, params=None, Cmat=None, Dmat=None, seed=None):\n\n self.Cmat = Cmat\n self.Dmat = Dmat\n self.seed = seed\n\n # the integration function must be passed\n integration = ti.timeIntegration\n\n # load default parameters if none were given\n if params is None:\n params = dp.loadDefaultParams(Cmat=self.Cmat, Dmat=self.Dmat, seed=self.seed)\n\n # Initialize base class Model\n super().__init__(integration=integration, params=params)\n","sub_path":"neurolib/models/subdivwc/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"464477580","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'blog'\nurlpatterns = [\n path('', views.IndexView.as_view(), name='index'),\n path('', views.detail, name='detail'),\n path('comment/', views.add_comment, name='add_comment'),\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"605991518","text":"\"\"\"Tests for the CustomResourceHandler.\"\"\"\nfrom unittest.mock import patch, ANY\n\nimport pytest\n\nfrom cf import cfnresponse\nfrom cf.custom_resource import handler\n\n\n@pytest.fixture\ndef echo_event(event):\n \"\"\"Get a sample echo event.\"\"\"\n event[\"ResourceProperties\"] = {\"Operator\": \"echo\", \"Operands\": \"test\"}\n return event\n\n\ndef test_echo():\n \"\"\"Test the default echo operation.\"\"\"\n assert handler.execute(\"echo\", \"test\") == \"test\"\n\n\ndef test_send_not_called(echo_event, context):\n \"\"\"It should not call send if there is no response url in the event.\"\"\"\n event = echo_event\n\n if \"ResponseURL\" in event:\n del event[\"ResponseURL\"]\n\n with patch(\"cf.cfnresponse.send\") as mock_send:\n handler(event, context)\n\n mock_send.assert_not_called()\n\n\ndef test_send_called_with_success(echo_event, context):\n \"\"\"It should call send if there is a response url in the event on success.\"\"\"\n event = echo_event\n\n with patch(\"cf.cfnresponse.send\") as mock_send:\n handler(event, context)\n\n mock_send.assert_called_once_with(event, context, cfnresponse.SUCCESS, ANY)\n\n\ndef test_send_called_with_failed(echo_event, context):\n \"\"\"It should call send if there is a response url in the event on failure.\"\"\"\n event = echo_event\n event[\"ResourceProperties\"] = {\"Operator\": \"unknown\", \"Operands\": \"test\"}\n\n with patch(\"cf.cfnresponse.send\") as mock_send:\n handler(event, context)\n\n mock_send.assert_called_once_with(event, context, cfnresponse.FAILED, ANY)\n","sub_path":"tests/unit/test_custom_resource.py","file_name":"test_custom_resource.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"27756675","text":"def merge_and_count(b,c):\n res_arr, inv_count = [], 0\n while len(b) > 0 or len(c) > 0:\n if len(b) > 0 and len(c) > 0:\n if b[0] < c[0]:\n res_arr.append(b[0])\n b = b[1:]\n else:\n res_arr.append(c[0])\n c = c[1:]\n inv_count += len(b)\n elif len(b) > 0:\n res_arr.append(b[0])\n b = b[1:]\n elif len(c) > 0:\n res_arr.append(c[0])\n c = c[1:]\n\n return res_arr, inv_count\n\ndef sort_and_count(a):\n arr_len = len(a)\n if arr_len <= 1:\n return a, 0\n b,x = sort_and_count(a[:(arr_len/2)])\n c,y = sort_and_count(a[(arr_len/2):])\n d,z = merge_and_count(b,c)\n\n return d, x+y+z\n","sub_path":"merge_count.py","file_name":"merge_count.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"470610030","text":"class Date:\n def __init__(self, d =0, m = 0, y = 0):\n self.d = d\n self.m = m\n self.y = y\n\n def is_leap(self):\n return self.y % 4 == 0\n\n def set(self,d,m,y):\n self.m = m\n self.d = d\n self.y = y\n\n def indian_format(self):\n return F\"{self.d}/{self.m}/{self.y}\"\n\n def is_valid(self):\n \"\"\"Date validation\"\"\"\n if self.m in [1,3,5,7,8,10,12]:\n return self.d>=1 and self.d<=31\n elif self.m in [4,6,9,11]:\n return self.d>=1 and self.d<=30\n elif self.m == 2:\n return self.d>=1 and self.d<=29 if self.is_leap() else self.d>=1 and self.d<=28\n return False\n \n def next(self):\n t = Date(self.d, self.m, self.y)\n if t.m in [1,3,5,7,8,10,12]:\n if t.d == 31 and t.m == 12:\n t.d = 1\n t.m = 1\n t.y += 1\n\n return t\n def previous(self):\n pass\n \n def day_of_year(self):\n days = [0,31, 31+28, 31+28+31, 31+28+31+30, 31+28+31+30+31]\n return days[self.m-1] + self.d + 1 if self.is_leap else 0\n\np = Date(30,5,2006)\nq = p.next().next().next()\nq.show()\n\n# Test - 1 \na = Date() # Create an object\na.d = 10\na.m = 2\na.y = 2017\nprint(a.d, a.m, a.y)\nif a.is_leap():\n print(\"It is leap year!\")\nelse:\n print(\"Not leap\")\n\n# Test - 2\nx = Date()\nx.set(30,1,2000)\nprint(x.is_leap())\nprint(x.indian_format())\n\n# Test - 3\nz = Date()\nz.set(29,2,2001)\nprint(z.is_valid())\n\n\n# Test - 4\np = Date()\np.set(20,2,2002)\nprint(p.day_of_year())","sub_path":"oop/date1.py","file_name":"date1.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"142013368","text":"#! /usr/bin/python3\n\n''' The function to search the text in this tag on the html page.\n'''\n\nimport re\n\nHTML_PAGE = (''\n ''\n ' Turtle Soup '\n '

Adjust the Project Settings

')\n\ndef parsing_html():\n ''' Find text between tags.\n '''\n pattern = re.compile(r\"(?is)]*>(.+?)\")\n result = pattern.findall(HTML_PAGE)\n print(result[0])\n\nparsing_html()\n","sub_path":"task_min/parsing_html.py","file_name":"parsing_html.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"250274990","text":"# -*- coding: utf-8 -*-\r\n\r\n\r\n#Funciones\r\ndef mat(n):\r\n for i in range (n):\r\n matriz.append([])\r\n for j in range (n):\r\n matriz[i].append(0)\r\n return matriz\r\n\r\ndef llenar(n):\r\n matriz = mat(n)\r\n for x in range (n):\r\n for y in range (n):\r\n matriz[x][y] = float(input('Valor de [' + str(x) + '][' + str(y) + '] = '))\r\n res.append(float(input('Valor del resultado de la matriz [' + str(x) + '] = ')))\r\n \r\ndef gauss(n):\r\n for z in range (n-1):\r\n for x in range(1, n-z):\r\n if (matriz[z][z] != 0 ):\r\n p = matriz[x+z][z] / matriz[z][z]\r\n for y in range (n):\r\n matriz[x+z][y] = matriz[x+z][y] - (matriz[z][y]*p)\r\n res[x+z] = res [x+z] - (res[z]*p)\r\n\r\ndef gjordan(n):\r\n for z in range (n-1, 0, -1):\r\n for x in range (z):\r\n if (matriz[z][z] != 0):\r\n p = matriz[x][z] / matriz[z][z]\r\n matriz[x][z] = matriz [x][z] - (matriz[z][z] * p)\r\n res[x] = res[x] - (res[z] * p)\r\n \r\ndef sol(n):\r\n print(\"\\n\")\r\n for i in range (n):\r\n if (matriz[i][i] != 0):\r\n ms=True\r\n else:\r\n ms=False\r\n break\r\n if (ms == True):\r\n for i in range(n):\r\n print (\"El valor de x\" + str(i) + ' es = ' + str(res[i]/matriz[i][i]))\r\n else:\r\n print ('La matriz no tiene solucion')\r\n\r\ndef det(n):\r\n deter=1\r\n for x in range (n):\r\n deter=matriz[x][x]*deter\r\n print ('\\nEl determinante de la matriz es = ', deter)\r\n \r\ndef im(n):\r\n print(\"\\nMatriz resultante:\")\r\n for i in range (n):\r\n print (matriz[i][:])\r\n\r\ndef gJordanRun(matriz,res,n):\r\n\t#Variables\r\n#\tmatriz = matrix\r\n#\tres = res\r\n\r\n\tgjordan(n)\r\n\tsol(n)\r\n\tim(n)\r\n\r\n\r\n","sub_path":"lab6/Gauss_Jordan.py","file_name":"Gauss_Jordan.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"116959906","text":"import os\nimport sys\nimport string\n\nPRINTABLE_WITHOUT_DIGITS = set(string.printable) ^ set(string.digits)\n\n\nclass LineInFileError(Exception): pass\n\n\nclass LuckyTicket:\n def __init__(self, path):\n self.path = path\n\n def _is_file_exists(self):\n if not os.path.isfile(self.path) and \\\n not os.path.exists(self.path):\n raise FileNotFoundError('Enter correct path to the file')\n\n @staticmethod\n def _is_valid_line(line: str):\n is_not_valid_data = False\n\n for i in line:\n if i in PRINTABLE_WITHOUT_DIGITS:\n is_not_valid_data = True\n\n if is_not_valid_data or len(line) != 6:\n raise LineInFileError(\n 'Invalid data in file: {}'.format(line)\n )\n\n def count_moskow_tickets(self):\n self._is_file_exists()\n file = open(self.path, encoding=\"utf-8\")\n counter = 0\n\n for line in file:\n line = line.strip()\n self._is_valid_line(line)\n\n if sum(map(int, line[:3])) == sum(map(int, line[3:])):\n counter += 1\n\n return counter\n\n def count_piter_tickets(self):\n self._is_file_exists()\n file = open(self.path, encoding=\"utf-8\")\n counter = 0\n\n for line in file:\n line = line.strip()\n self._is_valid_line(line)\n\n if sum(map(int, line[::2])) == sum(map(int, line[1::2])):\n counter += 1\n\n return counter\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 3:\n tickets = LuckyTicket(sys.argv[2])\n if sys.argv[1].lower() == '-m':\n print(tickets.count_moskow_tickets())\n if sys.argv[1].lower() == '-p':\n print(tickets.count_piter_tickets())\n\n else:\n print(\n \"\"\"\n You enter wrong parameters.\n - Enter mode (-m) to count in Moskow mode\n (-p) to count in Piter mode\n \n - Enter valid path to your file\n \"\"\"\n )\n","sub_path":"elementary_task/Task6/lucky_tickets.py","file_name":"lucky_tickets.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"378312901","text":"from conans import ConanFile\nimport os\nfrom conans import CMake\n\nclass GLogConan(ConanFile):\n name = \"glog\"\n version = \"latest\"\n generators = \"cmake\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"gflags\": [True, False], \"multithreaded\": [True, False]}\n default_options = \"shared=True\", \"gflags=True\", \"multithreaded=True\"\n url=\"http://github.com/eliaskousk/conan-glog\"\n license=\"https://www.apache.org/licenses/LICENSE-2.0\"\n exports= \"CMakeLists.txt\", \"FindGlog.cmake\", \"change_dylib_names.sh\"\n # zip_name = \"%s.tar.gz\" % version\n # unzipped_name = \"glog-%s\" % version\n folder_name = \"glog-%s\" % version\n\n def config(self):\n if self.options.gflags == True:\n self.requires.add(\"gflags/2.2.0@eliaskousk/stable\", private=False)\n self.options['gflags'].shared = True\n\n def source(self):\n self.run(\"git clone https://github.com/google/glog.git %s\" % self.folder_name)\n self.run(\"cd %s && git checkout master\" % self.folder_name)\n\n def build(self):\n cmake = CMake(self.settings)\n if self.settings.os == \"Windows\":\n self.run(\"IF not exist _build mkdir _build\")\n else:\n self.run(\"mkdir _build\")\n cd_build = \"cd _build\"\n gflags = \"-DWITH_GFLAGS=1\" if self.options.gflags else \"\"\n multithreaded = \"-DWITH_THREADS=1\" if self.options.multithreaded else \"\"\n shared = \"-DBUILD_SHARED_LIBS=1\" if self.options.shared else \"\"\n self.run('%s && cmake .. %s %s %s %s' % (cd_build, cmake.command_line, shared, gflags, multithreaded))\n self.run(\"%s && cmake --build . %s\" % (cd_build, cmake.build_config))\n\n def package(self):\n\n if self.settings.os == \"Macos\" and self.options.shared:\n self.run(\"bash ./change_dylib_names.sh\")\n\n # Copy findglog script into project\n self.copy(\"FindGlog.cmake\", \".\", \".\")\n\n # Copying headers\n self.copy(pattern=\"*.h\", dst=\"include/glog\", src=\"_build/%s/glog\" % self.folder_name, keep_path=True)\n self.copy(pattern=\"*.h\", dst=\"include/glog\", src=\"%s/src/glog\" % self.folder_name, keep_path=True)\n\n # Copying static libs\n libdir = \"_build/lib\"\n self.copy(pattern=\"*.a\", dst=\"lib\", src=libdir, keep_path=False)\n self.copy(pattern=\"*.lib\", dst=\"lib\", src=libdir, keep_path=False)\n\n # Copying dynamic libs\n libdir = \"_build/%s\" % self.folder_name\n self.copy(pattern=\"*.so*\", dst=\"lib\", src=libdir, keep_path=False)\n self.copy(pattern=\"*.dylib*\", dst=\"lib\", src=libdir, keep_path=False)\n self.copy(pattern=\"*.dll\", dst=\"bin\", src=libdir, keep_path=False)\n\n bindir = \"_build/bin\"\n # Copying binaries\n self.copy(pattern=\"*\", dst=\"bin\", src=bindir, keep_path=False)\n\n def package_info(self):\n self.cpp_info.libs = ['glog']\n\n","sub_path":"conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"22853287","text":"from pybicl.algorithm import DepenedRule, DP\nimport sys\n\nclass RuleSeq1Gap(DepenedRule):\n def __init__(self, extend_penalty=-2, gap_penalty=-3):\n super().__init__()\n self.extend_penalty = extend_penalty\n self.gap_penalty = gap_penalty\n\n RULENAME = \"Seq1Gap\"\n @classmethod\n def child_tags(cls, tag):\n indx1, indx2 = tag\n if (indx1 - 1 >= 0):\n return [(indx1 - 1, indx2)]\n else:\n return list()\n\n def solution(self, node, child_tag_nodes, data):\n child_node = child_tag_nodes[0]\n child_indx1, child_indx2 = child_node.tag\n score = child_node.score\n if not child_node[\"init\"]:\n if child_node[\"seq2gap\"]:\n if child_node.optimal_rule_name == self.RULENAME:\n score += self.extend_penalty\n else:\n score += self.gap_penalty\n elif (child_indx2 == 0) or (child_indx2 == (len(data[1]) - 1)):\n pass\n else:\n assert child_node.optimal_rule_name is not None\n if child_node.optimal_rule_name == self.RULENAME:\n score += self.extend_penalty\n else:\n score += self.gap_penalty\n return score, child_tag_nodes\n\nclass RuleSeq2Gap(DepenedRule):\n def __init__(self, extend_penalty=-2, gap_penalty=-3):\n super().__init__()\n self.extend_penalty = extend_penalty\n self.gap_penalty = gap_penalty\n\n RULENAME = \"Seq2Gap\"\n @classmethod\n def child_tags(cls, tag):\n indx1, indx2 = tag\n if (indx2 - 1 >= 0):\n return [(indx1, indx2 - 1)]\n else:\n return list()\n\n def solution(self, node, child_tag_nodes, data):\n child_node = child_tag_nodes[0]\n score = child_node.score\n child_indx1, child_indx2 = child_node.tag\n if not child_node[\"init\"]:\n if child_node[\"seq1gap\"]:\n if child_node.optimal_rule_name == self.RULENAME:\n score += self.extend_penalty\n else:\n score += self.gap_penalty\n elif (child_indx1 == 0) or (child_indx1 == (len(data[0]) - 1)):\n pass\n else:\n assert child_node.optimal_rule_name is not None\n if child_node.optimal_rule_name == self.RULENAME:\n score += self.extend_penalty\n else:\n score += self.gap_penalty\n return score, child_tag_nodes\n\nclass RuleMatchMismatch(DepenedRule):\n def __init__(self, match_score = 1, mismatch_penalty=-2, n_penalty=-1, match_wildcard=\"X\"):\n super().__init__()\n self.match_score = match_score\n self.mismatch_penalty = mismatch_penalty\n self.n_penalty = n_penalty\n self.match_wildcard = match_wildcard\n\n RULENAME = \"MatchMismatch\"\n @classmethod\n def child_tags(cls, tag):\n indx1, indx2 = tag\n if (indx1 - 1 >= 0) and (indx2 - 1 >= 0):\n return [(indx1 - 1, indx2 - 1)]\n else:\n return list()\n\n def solution(self, node, child_tag_nodes, data):\n indx1, indx2 = node.tag\n base1 = data[0][indx1]\n base2 = data[1][indx2]\n score = child_tag_nodes[0].score\n if (base1 == self.match_wildcard) or (base2 == self.match_wildcard):\n pass\n elif (base1 == \"N\") or (base2 == \"N\"):\n score += self.n_penalty\n elif (base1 != base2):\n score += self.mismatch_penalty\n else:\n score += self.match_score\n return score, child_tag_nodes\n\nclass AlignmentDP(DP):\n \"\"\"\n DP for alignment\n data = (seq1, seq2)\n \"\"\"\n def __init__(self, data):\n data = (data[0].upper(), data[1].upper())\n super().__init__(data)\n\n def get_root_tag(self):\n return (len(self.data[0]) - 1, len(self.data[1]) - 1)\n\n def update_node_info(self, node, child_nodes):\n optimal_rule_name = node.optimal_rule_name\n child_node = node.optimal_child_nodes[0]\n node[\"seq1gap\"] = (optimal_rule_name == RuleSeq1Gap.RULENAME) and (node.score == child_node.score)\n node[\"seq2gap\"] = (optimal_rule_name == RuleSeq2Gap.RULENAME) and (node.score == child_node.score)\n if child_node[\"init\"]:\n return None\n if child_node[\"seq1gap\"]:\n node[\"seq1gap\"] = True\n if child_node[\"seq2gap\"]:\n node[\"seq2gap\"] = True\n\n def init_node_condition(self, tag):\n indx1, indx2 = tag\n if (indx1 == 0) and (indx2 == 0):\n return 0\n return None\n\ndef alignment_dp(seq1, seq2, match_score = 1, mismatch_penalty=-2, n_penalty=-1, extend_penalty=-2, gap_penalty=-3, match_wildcard=\"X\"):\n data = (seq1, seq2)\n align = AlignmentDP(data)\n align.add_rule(RuleSeq1Gap(extend_penalty=extend_penalty, gap_penalty=gap_penalty))\n align.add_rule(RuleSeq2Gap(extend_penalty=extend_penalty, gap_penalty=gap_penalty))\n align.add_rule(RuleMatchMismatch(match_score = match_score, mismatch_penalty=mismatch_penalty,\n n_penalty=n_penalty, match_wildcard=match_wildcard))\n align.run()\n return align","sub_path":"pybicl/algorithm/alignment.py","file_name":"alignment.py","file_ext":"py","file_size_in_byte":5232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"503076203","text":"import warnings\nimport pandas as pd\nimport numpy as np\nimport time\nimport tensorflow as tf\n\nfrom tensorflow.python.keras.callbacks import EarlyStopping\nfrom tensorflow.python.keras.layers import Dense, LSTM\nfrom tensorflow.python.keras import Sequential\nfrom sklearn.preprocessing import MinMaxScaler\n\nwarnings.filterwarnings(\"ignore\")\npd.options.display.float_format = '{:.5f}'.format\n\nsc = MinMaxScaler()\n\nstart_time = time.time()\n\ntf.random.set_seed(1)\nnp.random.seed(1)\n\n\nclass ConsumptionLSTMModel:\n def __init__(self, n_steps):\n print('******************** CONSUM LSTM Model ********************')\n self.n_steps = n_steps\n self.n_features = 1\n self.n_targets = 1\n\n def split_sequence(self, values_list):\n \"\"\"\n return X: train_set / y: label_set(target_set)\n\n :param values_list: list of drugcode & values dict entered by the user([{drugcode: demand values}]\n :return: train_set X / target_set y\n \"\"\"\n x, y = list(), list()\n\n for i in range(len(values_list)):\n # find the end of this pattern\n end_ix = i + self.n_steps\n # check if we are beyond the sequence\n if end_ix > len(values_list) - self.n_targets:\n break\n # gather input and output parts of the pattern\n # seq_x, seq_y = values_list[i:end_ix], values_list[end_ix]\n seq_x, seq_y = values_list[i:end_ix], values_list[end_ix:(end_ix + self.n_targets)]\n\n x.append(seq_x)\n y.append(seq_y)\n\n return np.array(x), np.array(y)\n\n def lstm(self, x):\n \"\"\"\n LSTM model.\n\n Learn with X sliced in 'n_steps'.\n The label of X is y expressed as the value of 'n_features'.\n X is a value from 'x-7 ~ x-1', divided by 'n_steps' value.\n y is 'x', which is equal to the value of 'n_features' value.\n And the model trained in this way receives 'x-7 ~ x' again, predicts 'x+1', and returns.\n\n :param x: train dataset\n :return: prediction of x+1\n \"\"\"\n best_pred_list = []\n best_node_list = []\n best_loss_list = []\n\n _x = np.reshape(x, (-1, 1))\n\n x_sc = sc.fit_transform(_x)\n\n x_train, y_train = self.split_sequence(x_sc)\n x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], self.n_features))\n y_train = np.reshape(y_train, (y_train.shape[0], y_train.shape[1]))\n\n x_input = x[-self.n_steps:]\n x_input_sc = np.reshape(x_input, (-1, 1))\n x_input_fin = sc.transform(x_input_sc).reshape((1, self.n_steps, self.n_features))\n\n start_nodenum = 2\n end_nodenum = 11\n\n for i in range(start_nodenum, end_nodenum):\n model = Sequential()\n\n # input shape: (train values list length, target value list length)\n model.add(LSTM(i, activation='relu', return_sequences=True, input_shape=(self.n_steps, self.n_features)))\n\n model.add(LSTM(10, activation='relu', return_sequences=False))\n\n model.add(Dense(self.n_targets)) # output\n\n model.compile(loss='mse', optimizer='adam')\n\n early_stop = EarlyStopping(monitor='loss', patience=0, verbose=0,\n mode='auto', baseline=None, restore_best_weights=False)\n\n # hist = model.fit(x_train, y_train, epochs=300, batch_size=self.n_steps, verbose=1, callbacks=[early_stop])\n hist = model.fit(x_train, y_train, epochs=300, batch_size=self.n_steps, verbose=1)\n\n # predict\n globals()['prediction_{}'.format(i)] = model.predict(x_input_fin)\n\n # loss\n globals()['min_loss_{}'.format(i)] = min(hist.history['loss'])\n\n list_loss = []\n for m in range(start_nodenum, end_nodenum):\n list_loss.append(globals()['min_loss_{}'.format(m)])\n\n # index_loss_list = [node, loss, prediction]\n index_loss_list = []\n for i, n in enumerate(list_loss, start=start_nodenum):\n index_loss_list.append([i, n,\n sc.inverse_transform(globals()['prediction_{}'.format(i)])[0]])\n\n index_loss_df = pd.DataFrame(index_loss_list, columns=['node', 'loss', 'pred'])\n\n # lowest loss\n best_loss_pred = index_loss_df[index_loss_df['loss'] == min(index_loss_df['loss'])]\n best_loss_val = best_loss_pred['loss'].values[0]\n\n # save to lowest loss's node\n best_loss_node = best_loss_pred['node'].values[0]\n\n # lowest loss's prediction and return as result\n p_result = globals()['prediction_{}'.format(best_loss_node)]\n\n # result to inverse transform(true value)\n pred_data = sc.inverse_transform(p_result)\n\n # result list\n best_pred_list.append(pred_data[0][0])\n # result's node list\n best_node_list.append(best_loss_node)\n # result's loss list\n best_loss_list.append(best_loss_val)\n\n # Running Time\n end_time = time.time()\n print(\"WorkingTime: {:.2f} sec\".format(end_time - start_time))\n\n return best_pred_list[0]\n","sub_path":"consumption_models/consumption_model_lstm.py","file_name":"consumption_model_lstm.py","file_ext":"py","file_size_in_byte":5130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"374771179","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 22 16:47:48 2019\n\n@author: jidi\n\"\"\"\n\n# encoding=utf-8\nimport matplotlib.pyplot as plt\nfrom pylab import * #支持中文\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.grid() # 生成网格\nplt.grid(axis=\"y\")\naxis='x'\nplt.grid(linestyle='--')\nplt.grid(ls='-.')\nplt.grid(c='gray')\nnames = ['3', '5', '7', '9', '11','13','15']\nx = range(len(names))\ny = [13,15,21,29,31,32,34]\ny1=[9,11,18,26,28,29,31]\ny2= [11,13,20,27,28,30,31]\ny3=[8,10,15,21,23,25,29]\ny4= [9,11,19,25,27,28,29]\ny5=[7,8,15,20,23,25,27]\n#plt.plot(x, y, 'ro-')\n#plt.plot(x, y1, 'bo-')\n#pl.xlim(-1, 11) # 限定横轴的范围\n#pl.ylim(-1, 110) # 限定纵轴的范围\nplt.plot(x, y, color='r',linewidth=1.8,linestyle=':',marker='H',mfc='w',label='CTPP f=25%')\nplt.plot(x, y1, color='darkgreen',linewidth=1.8,marker='D',mfc='w',label=r'$k^τ$ f=25%' )\nplt.plot(x, y2, color='coral',linewidth=1.8,linestyle='-.',marker='x',mfc='r',label='CTPP f=50%')\nplt.plot(x, y3, color='b',linewidth=1.8,linestyle='-.',marker='H',mfc='w',label=r'$k^τ$ f=50%')\nplt.plot(x, y4, color='darkviolet',linewidth=1.8,linestyle=':',marker='o',mfc='w',label='CTPP f=75%')\nplt.plot(x, y5, color='k',linewidth=1.8,linestyle='-.',marker='x',mfc='w',label=r'$k^τ$ f=75%')\nplt.plot(fontsize=15)\nplt.legend() # 让图例生效\nplt.xticks(x, names,fontsize=15)\nplt.yticks(fontsize=15)\nplt.margins(0)\nplt.subplots_adjust(bottom=0.15)\n#plt.xlim(1,15)#设置横坐标范围\nplt.ylim(6,35)#设置横坐标范围\nplt.xlabel(\"h\",fontsize=15) #X轴标签\nplt.ylabel(\"communication cost\",fontsize=15) #Y轴标签\nplt.title(r'$N_m$''$_a$''$_x$=6',fontsize=15) #标题\n\nplt.show()\n","sub_path":"图组合(1).py","file_name":"图组合(1).py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"60195787","text":"import random\r\ndone = {''}\r\nops = ['+', '-', '*', '/', '%']\r\ndef randEquation(arg, res):\r\n equation = str(arg)\r\n for i in range(0, arg-1):\r\n r = random.randint(0, 4)\r\n op = ops[r]\r\n equation += op+str(arg)\r\n if eval(equation) == res and not equation in done:\r\n print(equation)\r\n done.add(equation)\r\nwhile True:\r\n i = input('Choose a starting number (e.g 6 for splendid sixes): ')\r\n j = input('Choose a result number (e.g 3 for splendid sixes): ')\r\n print('Hit Ctrl+C to stop generating expressions.');\r\n try:\r\n while True:\r\n randEquation(int(i), int(j))\r\n except KeyboardInterrupt:\r\n continue\r\n","sub_path":"splendidsixes.py","file_name":"splendidsixes.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"153077392","text":"import cv2\nimport numpy as np\n\nprint(\"OpenCV version:\", cv2.__version__)\n\n\n# callback fct : x current position of the trackbar\ndef nothing(x):\n print(x)\n\n\n# Create a black image, a window\nimg = np.zeros((300, 512, 3), np.uint8)\ncv2.namedWindow('image')\n\n# Create a trackbar\ncv2.createTrackbar('B', 'image', 0, 255, nothing)\ncv2.createTrackbar('G', 'image', 0, 255, nothing)\ncv2.createTrackbar('R', 'image', 0, 255, nothing)\n# args : trackbar name, window name, value count, callback fct\n\n# Add a switch using a trackbar\nswitch = '0 : OFF\\n 1 : ON'\ncv2.createTrackbar(switch, 'image', 0, 1, nothing)\n\nwhile(1):\n cv2.imshow('image', img)\n k = cv2.waitKey(1) & 0xFF\n if k == 27:\n break\n\n # Get trackbar position\n b = cv2.getTrackbarPos('B', 'image')\n g = cv2.getTrackbarPos('G', 'image')\n r = cv2.getTrackbarPos('R', 'image')\n s = cv2.getTrackbarPos(switch, 'image')\n\n if s == 0:\n img[:] = 0\n else:\n img[:] = [b, g, r]\n\ncv2.destroyAllWindows()\n","sub_path":"examples/12_python_opencv_trackbar_example1.py","file_name":"12_python_opencv_trackbar_example1.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"166511699","text":"from unittest import TestCase\nfrom terraform_compliance.steps.steps import i_have_name_section_configured, i_have_resource_defined\nfrom tests.mocks import MockedStep, MockedWorld, MockedWorldConfigTerraform\n\n\nclass TestGivenStepCases(TestCase):\n\n def setUp(self):\n self.step = MockedStep()\n self.radish_world = MockedWorld()\n\n def test_i_have_name_section_configured(self):\n i_have_name_section_configured(self.step, 'resource_type', 'resource', self.radish_world)\n self.assertEqual(self.step.context.stash.resource_list[0], MockedWorldConfigTerraform().terraform_config['resource']['resource_type'])\n\n i_have_name_section_configured(self.step, 'aws', 'provider', self.radish_world)\n self.assertEqual(self.step.context.stash, MockedWorldConfigTerraform().terraform_config['provider']['aws'])\n\n i_have_name_section_configured(self.step, 'non_existent', 'something_else', self.radish_world)\n self.assertEqual(self.step.context.stash, MockedWorldConfigTerraform().terraform_config['something_else'])\n\n i_have_name_section_configured(self.step, 'AWS S3 Bucket', 'resource', self.radish_world)\n self.assertEqual(self.step.context.stash.resource_list[0], MockedWorldConfigTerraform().terraform_config['resource']['aws_s3_bucket'])\n\n def test_i_have_resource_defined(self):\n i_have_resource_defined(self.step, 'resource_type', self.radish_world)\n self.assertEqual(self.step.context.stash.resource_list[0], MockedWorldConfigTerraform().terraform_config['resource']['resource_type'])\n\n i_have_resource_defined(self.step, 'AWS S3 Bucket', self.radish_world)\n self.assertEqual(self.step.context.stash.resource_list[0], MockedWorldConfigTerraform().terraform_config['resource']['aws_s3_bucket'])\n\n","sub_path":"tests/terraform_compliance/steps/test_given_steps.py","file_name":"test_given_steps.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"499788739","text":"__author__ = 'nsheridan'\n\nebrdList = {'asb': ['Ashgabat', 'Turkmenistan', 'ASB', '220', '5', '1', '512'],\n 'akm': ['Astana', 'Kazakhstan', 'AKM', '219', '6', '1', '2048'],\n 'bku': ['Baku', 'Azerbaijan', 'BKU', '229', '4', '1', '2048'],\n 'beg': ['Belgrade', 'Serbia & Mont.', 'BEG', '208', '1', '1', '8192'],\n 'bsk': ['Bishkek', 'Kyrgyzstan', 'BSK', '218', '5', '1', '2048'],\n 'brt': ['Bratislava', 'Slovak Republic', 'BRT', '228', '1', '1', '4096'],\n 'buh': ['Bucharest', 'Romania', 'BUH', '234', '2', '1', '8192'],\n 'bud': ['Budapest', 'Hungary', 'BUD', '237', '1', '1', '2048'],\n 'kiv': ['Chisinau', 'Moldova', 'KIV', '217', '2', '1', '10240'],\n 'dyu': ['Dushanbe', 'Tajikistan', 'DYU', '216', '5', '1', '1024'],\n 'eka': ['Ekaterinburg', 'Russia', 'EKA', '215', '5', '1', '1024'],\n 'kev': ['Kiev', 'Ukraine', 'KEV', '233', '2', '1', '2048'],\n 'kja': ['Krasnoyarsk', 'Russia', 'KJA', '226', '7', '1', '1024'],\n 'ist': ['Istanbul', 'Turkey', 'IST', '227', '2', '1', '4096'],\n 'msk': ['Minsk', 'Belarus', 'MSK', '222', '2', '1', '2048'],\n 'mos': ['Moscow', 'Russia', 'MOS', '238', '3', '2', '4096'],\n 'pdg': ['Podgorica', 'Montenegro', 'PDG', '205', '1', '1', '1024'],\n 'prs': ['Pristina', 'Kosovo', 'PRS', '236', '1', '1', '1024'],\n 'rov': ['Rostov', 'Russia', 'ROV', '204', '3', '1', '1024'],\n 'smr': ['Samara', 'Russia', 'SMR', '206', '4', '1', '1024'],\n 'sjo': ['Sarajevo', 'Bosnia/Herzegovina', 'SJO', '225', '1', '1', '4096'],\n 'skp': ['Skopje', 'Macedonia', 'SKP', '214', '1', '1', '5120'],\n 'sof': ['Sofia', 'Bulgaria', 'SOF', '232', '2', '1', '2048'],\n 'stp': ['St Petersburg', 'Russia', 'STP', '224', '3', '1', '2048'],\n 'tbl': ['Tbilisi', 'Georgia', 'TBL', '213', '3', '1', '5120'],\n 'tir': ['Tirana', 'Albania', 'TIR', '212', '1', '1', '2048'],\n 'uln': ['Ulaanbaatar', 'Mongolia', 'ULN', '209', '8', '1', '2048'],\n 'vil': ['Vilnius', 'Lithuania', 'VIL', '221', '2', '1', '10240'],\n 'vld': ['Vladivostok', 'Russia', 'VLD', '211', '10', '1', '1024'],\n 'waw': ['Warsaw', 'Poland', 'WAW', '239', '1', '1', '10240'],\n 'evn': ['Yerevan', 'Armenia', 'EVN', '210', '4', '1', '2048'],\n 'zgb': ['Zagreb', 'Croatia', 'ZGB', '231', '2', '1', '10240']}\n\n\nclass BranchOffice(object):\n def __init__(self, city, country, loc_code, vpn_num, time_zone, link_num, wan_speed):\n \"\"\"\n Instance of a Office within the network\n :rtype : object\n \"\"\"\n self.city = city\n self.country = country\n self.loc_code = loc_code\n self.vpn_num = vpn_num\n self.time_zone = time_zone\n self.link_num = link_num\n self.wan_speed = wan_speed\n\nasb = BranchOffice('Ashgabat', 'Turkmenistan', 'ASB', '220', '5', '1', '512')\nakm = BranchOffice('Astana', 'Kazakhstan', 'AKM', '219', '6', '1', '2048')\nbku = BranchOffice('Baku', 'Azerbaijan', 'BKU', '229', '4', '1', '2048')\nbeg = BranchOffice('Belgrade', 'Serbia & Mont.', 'BEG', '208', '1', '1', '8192')\nbsk = BranchOffice('Bishkek', 'Kyrgyzstan', 'BSK', '218', '5', '1', '2048')\nbrt = BranchOffice('Bratislava', 'Slovak Republic', 'BRT', '228', '1', '1', '4096')\nbuh = BranchOffice('Bucharest', 'Romania', 'BUH', '234', '2', '1', '8192')\nbud = BranchOffice('Budapest', 'Hungary', 'BUD', '237', '1', '1', '2048')\nkiv = BranchOffice('Chisinau', 'Moldova', 'KIV', '217', '2', '1', '10240')\ndyu = BranchOffice('Dushanbe', 'Tajikistan', 'DYU', '216', '5', '1', '1024')\neka = BranchOffice('Ekaterinburg', 'Russia', 'EKA', '215', '5', '1', '1024')\nkev = BranchOffice('Kiev', 'Ukraine', 'KEV', '233', '2', '1', '2048')\nkja = BranchOffice('Krasnoyarsk', 'Russia', 'KJA', '226', '7', '1', '1024')\nist = BranchOffice('Istanbul', 'Turkey', 'IST', '227', '2', '1', '4096')\nmsk = BranchOffice('Minsk', 'Belarus', 'MSK', '222', '2', '1', '2048')\nmos = BranchOffice('Moscow', 'Russia', 'MOS', '238', '3', '2', '4096')\npdg = BranchOffice('Podgorica', 'Montenegro', 'PDG', '205', '1', '1', '1024')\nprs = BranchOffice('Pristina', 'Kosovo', 'PRS', '236', '1', '1', '1024')\nrov = BranchOffice('Rostov', 'Russia', 'ROV', '204', '3', '1', '1024')\nsmr = BranchOffice('Samara', 'Russia', 'SMR', '206', '4', '1', '1024')\nsjo = BranchOffice('Sarajevo', 'Bosnia/Herzegovina', 'SJO', '225', '1', '1', '4096')\nskp = BranchOffice('Skopje', 'Macedonia', 'SKP', '214', '1', '1', '5120')\nsof = BranchOffice('Sofia', 'Bulgaria', 'SOF', '232', '2', '1', '2048')\nstp = BranchOffice('St Petersburg', 'Russia', 'STP', '224', '3', '1', '2048')\ntbl = BranchOffice('Tbilisi', 'Georgia', 'TBL', '213', '3', '1', '5120')\ntir = BranchOffice('Tirana', 'Albania', 'TIR', '212', '1', '1', '2048')\nuln = BranchOffice('Ulaanbaatar', 'Mongolia', 'ULN', '209', '8', '1', '2048')\nvil = BranchOffice('Vilnius', 'Lithuania', 'VIL', '221', '2', '1', '10240')\nvld = BranchOffice('Vladivostok', 'Russia', 'VLD', '211', '10', '1', '1024')\nwaw = BranchOffice('Warsaw', 'Poland', 'WAW', '239', '1', '1', '10240')\nevn = BranchOffice('Yerevan', 'Armenia', 'EVN', '210', '4', '1', '2048')\nzgb = BranchOffice('Zagreb', 'Croatia', 'ZGB', '231', '2', '1', '10240')\n\nebrdBranches = [asb, akm, bku, beg, bsk, brt, buh, bud, kiv, dyu, eka, kev, kja, ist, msk, mos, pdg, prs, rov, smr, sjo,\n skp, sof, stp, tbl, tir, uln, vil, vld, waw, evn, zgb]\n\nfor office in ebrdBranches:\n print('!')\n print('ap system-profile \"EBRD-' + office.loc_code + '-APSYSTEM1\"')\n print(' lms-ip 10.' + office.vpn_num + '.240.101')\n print(' bkup-lms-ip 172.29.234.7')\n print(' lms-preemption')\n print('!')\n print('ap-group \"EBRD-' + office.loc_code + '-GRP01\"')\n print(' ap-system-profile \"EBRD-' + office.vpn_num + '-APSYSTEM1\"')\n print(' ids-profile \"EBRD-WIP-POLICY\"')\n print('!')\n","sub_path":"arubaConfigGeneratorObjects/arubaConfigGeneratorV0.0001.py","file_name":"arubaConfigGeneratorV0.0001.py","file_ext":"py","file_size_in_byte":5981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"587936721","text":"import os\r\nimport sys\r\nimport numpy as np\r\nimport torchvision\r\nimport torch.utils.data as data\r\nfrom PIL import Image\r\n\r\nsys.path.append(\"..\")\r\nfrom util import util\r\nfrom util import array_operation as arr\r\n\r\nsave_dir = '../datasets/mnist2m_2d'\r\nutil.makedirs(save_dir)\r\n# mnist\r\nsource_data = torchvision.datasets.MNIST(\r\n root='./mnist/',\r\n train=True, # this is training data\r\n transform=torchvision.transforms.ToTensor(), # Converts a PIL.Image or numpy.ndarray to\r\n # torch.FloatTensor of shape (C x H x W) and normalize in the range [0.0, 1.0]\r\n download=True, # download it if you don't have it\r\n)\r\n\r\nmnist_signals = source_data.data.numpy()\r\nmnist_signals = mnist_signals.astype(np.float32)\r\nmnist_signals = ((mnist_signals)/256.0-0.1307)/0.3081\r\nmnist_signals = mnist_signals.reshape(-1,1,28,28)\r\n# mnist_signals = np.concatenate((mnist_signals,mnist_signals,mnist_signals),axis=1)\r\nmnist_labels = source_data.targets.numpy()\r\nmnist_labels = mnist_labels.reshape(-1,1)\r\nprint('mnist:',np.max(mnist_signals),np.min(mnist_signals))\r\nprint('mnist:',mnist_signals.shape)\r\n\r\ntarget_image_root = './mnist_m'\r\ndata_list = os.path.join(target_image_root, 'mnist_m_train_labels.txt')\r\nf = open(data_list, 'r')\r\ndata_list = f.readlines()\r\nf.close()\r\n\r\nmnist_m_labels = []\r\nmnist_m_signals = []\r\nfor data in data_list:\r\n mnist_m_labels.append(data[-2])\r\n mnist_m_signals.append(np.array(Image.open(os.path.join(target_image_root,'mnist_m_train', data[:-3])).convert('L')))\r\n\r\nmnist_m_signals = (np.array(mnist_m_signals)).astype(np.float32).reshape(-1,1,32,32)\r\n\r\nmnist_m_signals = ((mnist_m_signals[:,:,2:30,2:30]-128)/128)\r\nmnist_m_labels = np.array(mnist_m_labels).reshape(-1,1)\r\nprint('mnist_m:',np.max(mnist_m_signals),np.min(mnist_m_signals))\r\nprint('mnist_m:',mnist_m_signals.shape)\r\n\r\nindexs = np.array([len(mnist_labels)])\r\ndomains = np.concatenate((np.zeros(len(mnist_labels),dtype=np.int64),np.ones(len(mnist_m_labels),dtype=np.int64)),axis=0)\r\nsignals = np.concatenate((mnist_signals,mnist_m_signals),axis=0)\r\nlabels = np.concatenate((mnist_labels,mnist_m_labels),axis=0)\r\n\r\nnp.save(os.path.join(save_dir,'index'), indexs)\r\nnp.save(os.path.join(save_dir,'domains'), domains)\r\nnp.save(os.path.join(save_dir,'signals'), signals)\r\nnp.save(os.path.join(save_dir,'labels'), labels)","sub_path":"examples/mnist2mnist_m.py","file_name":"mnist2mnist_m.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"563402529","text":"from collections import Counter\nimport json\nimport requests\n\n\nAPI = \"https://codeforces.com/api\"\nMISSING_CONTESTS = [\n 1222, 1224, 1226, 1232, 1233, 1258, 1289,\n 1306, 1317, 1318, 1390,\n 1410, 1412, 1414, 1429, 1448, 1449,\n 1502, 1507, 1518, 1564, 1565, 1568, 1577, 1587, 1590, 1595, 1596, 1597,\n 1636, 1640, 1643, 1653, 1655, 1664, 1683, 1704\n]\n\n\ndef make_query(name: str, params: dict[str, ]) -> dict:\n params_str = '&'.join([f\"{k}={v}\" for k, v in params.items()])\n url = f\"{API}/{name}?{params_str}\"\n resp = requests.get(url).json()\n assert resp[\"status\"] == \"OK\", f\"{url} -> {resp}\"\n return resp[\"result\"]\n\n\ndef get_participants_list(contest_id: int) -> set[str]:\n try:\n with open(f\"cache/{contest_id}.json\", 'r') as f:\n resp = json.load(f)\n except FileNotFoundError:\n resp = make_query(\"contest.standings\", {\"contestId\": contest_id, \"showUnofficial\": \"true\"})\n with open(f\"cache/{contest_id}.json\", 'w') as f:\n json.dump(resp, f)\n\n participants = resp[\"rows\"]\n handles = [member[\"handle\"] for item in participants\n for member in item[\"party\"][\"members\"]\n if item[\"party\"][\"participantType\"] in [\"CONTESTANT\", \"OUT_OF_COMPETITION\"]]\n return set(handles)\n\n\ndef main():\n conflict = Counter()\n separate = Counter()\n\n print(\"Fetching contest data\")\n for contest_id in range(1185, 1710):\n if contest_id % 10 == 0:\n print(\"Fetching contest\", contest_id)\n if contest_id in MISSING_CONTESTS:\n continue\n participants = get_participants_list(contest_id)\n if \"SuperJ6\" in participants:\n conflict.update(participants)\n else:\n separate.update(participants)\n\n for candidate, _ in separate.most_common():\n if separate[candidate] >= 5 and conflict[candidate] <= 2:\n print(candidate, separate[candidate], conflict[candidate])\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"LITCTF/2022/misc/RythmsDoubleIdentity/solve_rythms_double_identity.py","file_name":"solve_rythms_double_identity.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"259317675","text":"from .message import CheckMessage\r\nfrom ..config.standards import google_standard, sun_standard\r\nfrom ..operation import execute\r\n\r\n\r\nclass CheckStandard:\r\n \"\"\"\r\n 检查标准类\r\n \"\"\"\r\n\r\n def __init__(self, standard_file, ignore_return_code=False, ignore_stderr=False):\r\n \"\"\"\r\n 构造函数\r\n :param standard_file: 检查规范标准文件\r\n :param ignore_return_code: 忽略非零返回值\r\n :param ignore_stderr: 忽略异常输出\r\n \"\"\"\r\n self.__standard_file = standard_file\r\n self.__ignore_return_code = ignore_return_code\r\n self.__ignore_stderr = ignore_stderr\r\n\r\n @property\r\n def standard_file(self):\r\n \"\"\"\r\n 获取检查规范标准文件\r\n :return: 检查规范标准文件\r\n \"\"\"\r\n return self.__standard_file\r\n\r\n def check(self, check_dir, abs_path=False):\r\n \"\"\"\r\n 检查对应的路径(文件)\r\n :param check_dir: 检查的路径(文件)\r\n :param abs_path: 使用绝对路径(默认为False)\r\n :return: 检查结果(list格式,内部包含CheckMessage对象)\r\n \"\"\"\r\n out, err = execute(\r\n \"-c\", self.standard_file, check_dir,\r\n ignore_return_code=self.__ignore_return_code,\r\n ignore_stderr=self.__ignore_stderr\r\n )\r\n messages = []\r\n for line in out.splitlines():\r\n if abs_path:\r\n work_dir = None\r\n else:\r\n work_dir = check_dir\r\n message = CheckMessage.parse(line, work_dir)\r\n if message:\r\n messages += [message]\r\n return messages\r\n\r\n\r\nclass GoogleStandard(CheckStandard):\r\n \"\"\"\r\n 内置的Google规范检查标注\r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"\r\n 构造函数\r\n \"\"\"\r\n super().__init__(google_standard)\r\n\r\n\r\nclass SunStandard(CheckStandard):\r\n \"\"\"\r\n 内置的Sun规范检查标准\r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"\r\n 构造函数\r\n \"\"\"\r\n super().__init__(sun_standard, ignore_return_code=True)\r\n","sub_path":"pycheckstyle/models/standards.py","file_name":"standards.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"640580903","text":"import os\nimport time\n\nfrom selenium import webdriver\n\nlink = \" http://suninjuly.github.io/file_input.html\"\n\ntry:\n browser = webdriver.Chrome()\n browser.get(link)\n\n input1 = browser.find_element_by_tag_name('input ')\n input1.send_keys(\"Ivan\")\n # \n input2 = browser.find_element_by_name(\"lastname\")\n input2.send_keys(\"Petrov\")\n # \n input3 = browser.find_element_by_name(\"email\")\n input3.send_keys(\"ase_ws@mail.ru\")\n # \n element = browser.find_element_by_name(\"file\")\n\n current_dir = os.path.abspath(os.path.dirname(__file__)) # получаем путь к директории текущего исполняемого файла\n os.mkdir(r'file.txt')\n file_path = os.path.join(current_dir, 'file.txt') # добавляем к этому пути имя файла\n #print(file_path)\n #print(current_dir)\n element.send_keys(file_path)\n button = browser.find_element_by_css_selector('button')\n button.click()\n alert = browser.switch_to.alert\n alert_text = alert.text\n print(alert_text)\n\nfinally:\n # успеваем скопировать код за 30 секунд\n time.sleep(30)\n # закрываем браузер после всех манипуляций\n browser.quit()\n os.rmdir(r\"C:\\Users\\home\\PycharmProjects\\Selenium\\file.txt\")\n\n# не забываем оставить пустую строку в конце файла\n\n\n\n\n\n\n","sub_path":"OSFiele.py","file_name":"OSFiele.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"77593438","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Movie',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),\n ('title', models.CharField(max_length=260)),\n ('release_date', models.CharField(max_length=11)),\n ],\n ),\n migrations.CreateModel(\n name='Rater',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),\n ('age', models.IntegerField()),\n ('gender', models.CharField(max_length=1)),\n ('occupation', models.CharField(max_length=20)),\n ('zipcode', models.CharField(max_length=5)),\n ],\n ),\n migrations.CreateModel(\n name='Rating',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),\n ('rating', models.IntegerField()),\n ('timestamp', models.IntegerField()),\n ('movie', models.ForeignKey(to='ratings.Movie')),\n ('rater', models.ForeignKey(to='ratings.Rater')),\n ],\n ),\n ]\n","sub_path":"movieratings/ratings/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"38711725","text":"# encoding: utf-8\nimport pytest\n\nfrom wellcomeml.ml.bert_semantic_equivalence import \\\n SemanticEquivalenceClassifier, SemanticEquivalenceMetaClassifier\n\n\n@pytest.mark.transformers\ndef test_semantic_similarity():\n classifier = SemanticEquivalenceClassifier(pretrained=\"scibert\",\n batch_size=2,\n eval_batch_size=1)\n\n X = [('This sentence has context_1', 'This one also has context_1'),\n ('This sentence has context_2', 'This one also has context_2'),\n ('This sentence is about something else', 'God save the queen')]\n\n y = [1, 1, 0]\n\n classifier.fit(X, y, epochs=3)\n\n loss_initial = classifier.history['loss'][0]\n loss_epoch_2 = classifier.history['loss'][2]\n scores = classifier.score(X)\n\n assert loss_epoch_2 < loss_initial\n assert len(classifier.predict(X)) == 3\n assert (scores > 0).sum() == 6\n assert (scores < 1).sum() == 6\n\n # Fits two extra epoch\n\n classifier.fit(X, y, epochs=2)\n\n # Asserts that the classifier model is adding to the history, and still\n # not re-training from scratch\n\n assert len(classifier.history['loss']) == 5\n\n\n@pytest.mark.transformers\ndef test_semantic_meta_fit():\n classifier = SemanticEquivalenceMetaClassifier(n_numerical_features=2,\n pretrained=\"scibert\",\n batch_size=2,\n eval_batch_size=1)\n\n X = [['This sentence has context_1', 'This one also has context_1', 0.1, 0.2],\n ['This sentence has context_2', 'This one also has context_2', 0.2, 0.2],\n ['This sentence is about something else', 'God save the queen', -0.5, -0.5]]\n\n y = [1, 1, 0]\n\n classifier.fit(X, y, epochs=3)\n\n loss_initial = classifier.history['loss'][0]\n loss_epoch_2 = classifier.history['loss'][2]\n scores = classifier.score(X)\n\n assert loss_epoch_2 < loss_initial\n assert len(classifier.predict(X)) == 3\n assert (scores > 0).sum() == 6\n assert (scores < 1).sum() == 6\n\n # Fits two extra epochs\n\n classifier.fit(X, y, epochs=2)\n\n # Asserts that the classifier model is adding to the history, and still\n # not re-training from scratch\n\n assert len(classifier.history['loss']) == 5\n\n\n@pytest.mark.transformers\ndef test_save_and_load_semantic(tmp_path):\n classifier_1 = SemanticEquivalenceClassifier(pretrained=\"scibert\",\n batch_size=2,\n eval_batch_size=1)\n\n X = [('This sentence has context_1', 'This one also has context_1'),\n ('This sentence has context_2', 'This one also has context_2'),\n ('This sentence is about something else', 'God save the queen')]\n\n y = [1, 1, 0]\n\n classifier_1.fit(X, y, epochs=1)\n classifier_1.save(tmp_path)\n scores_1 = classifier_1.score(X)\n\n classifier_2 = SemanticEquivalenceClassifier(pretrained=\"scibert\")\n classifier_2.load(tmp_path)\n scores_2 = classifier_2.score(X)\n\n score_diff = sum([abs(diff) for diff in (scores_1-scores_2).flatten()])\n\n assert pytest.approx(score_diff, 0)\n\n\n@pytest.mark.skip(reason=\"Test requires too much memory\")\n@pytest.mark.transformers\ndef test_save_and_load_meta(tmp_path):\n classifier = SemanticEquivalenceMetaClassifier(n_numerical_features=1,\n pretrained=\"bert\",\n batch_size=2,\n eval_batch_size=1)\n\n # Save and load for Meta Models only accepts strings (not PosixPath)\n classifier._initialise_models()\n classifier.save(str(tmp_path.absolute()) + '.h5')\n config_1 = classifier.config\n\n classifier = SemanticEquivalenceMetaClassifier(n_numerical_features=1)\n classifier.load(str(tmp_path.absolute()) + '.h5')\n\n config_2 = classifier.config\n\n assert config_1 == config_2\n","sub_path":"tests/transformers/test_semantic_similarity.py","file_name":"test_semantic_similarity.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"111346345","text":"import os\n\nkitti_det_label_path = '/home/cory/KITTI_Dataset/data_object_image_2/training/label_2'\nout_label_path = '/home/cory/KITTI_Dataset/detection_label'\n\nall_images_file = '/media/cory/c_disk/Project/KITTI_Dataset/kitti_detection_images.txt'\nall_labels_file = '/media/cory/c_disk/Project/KITTI_Dataset/kitti_detection_labels.txt'\n\n\ndef copy_exclude(filename, outfilename, patterns):\n with open(outfilename, 'w') as out:\n with open(filename) as f:\n for line in f.readlines():\n pattern_found = False\n for p in patterns:\n if line.find(p) >= 0:\n pattern_found = True\n if not pattern_found:\n out.write(line)\n print(line.strip())\n\n\ndef copy_include(filename, outfilename, patterns):\n with open(outfilename, 'w') as out:\n with open(filename) as f:\n for line in f.readlines():\n for p in patterns:\n if line.find(p) >= 0:\n print(line.strip())\n out.write(line)\n break\n\n\ndef convert_file(infile_path, outfile_path):\n # 'Pedestrian 0.00 0 -0.20 712.40 143.00 810.73 307.92 1.89 0.48 1.20 1.84 1.47 8.41 0.01'\n infile = open(infile_path)\n outfile = open(outfile_path, 'w')\n for line in infile.readlines():\n v = line.strip().split(' ')\n bb = list(map(str, map(int, map(float, v[4:8]))))\n outfile.write(v[0] + ' ' + ' '.join(bb) + '\\n')\n\n\ndef convert_format():\n file_list = os.listdir(kitti_det_label_path)\n file_list.sort()\n for f in file_list:\n infile_path = kitti_det_label_path + '/' + f\n outfile_path = out_label_path + '/' + f\n convert_file(infile_path, outfile_path)\n\n print(infile_path, outfile_path)\n\n print(len(file_list))\n\n\ndef main():\n copy_exclude(all_images_file, 'kitti/kitti_det_train_images.txt', ['/006', '/007'])\n copy_exclude(all_labels_file, 'kitti/kitti_det_train_labels.txt', ['/006', '/007'])\n copy_include(all_images_file, 'kitti/kitti_det_val_images.txt', ['/006', '/007'])\n copy_include(all_labels_file, 'kitti/kitti_det_val_labels.txt', ['/006', '/007'])\n\nif __name__ == '__main__':\n # convert_format()\n main()\n","sub_path":"train_data/gen_kitti_det_train_data.py","file_name":"gen_kitti_det_train_data.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"76562382","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/cgcloud/core/fedora_box.py\n# Compiled at: 2016-11-22 15:21:45\nfrom abc import abstractmethod\nimport re\nfrom operator import attrgetter\nfrom fabric.operations import sudo\nfrom cgcloud.core.box import fabric_task\nfrom cgcloud.core.agent_box import AgentBox\nfrom cgcloud.core.cloud_init_box import CloudInitBox\nfrom cgcloud.core.rc_local_box import RcLocalBox\nfrom cgcloud.core.yum_box import YumBox\n\nclass FedoraBox(YumBox, AgentBox, CloudInitBox, RcLocalBox):\n \"\"\"\n A box that boots of an official Fedora cloud AMI\n \"\"\"\n\n @abstractmethod\n def release(self):\n \"\"\"\n :return: the version number of the Fedora release, e.g. 17\n :rtype: int\n \"\"\"\n raise NotImplementedError\n\n def admin_account(self):\n if self.release() >= 19:\n return 'fedora'\n return 'ec2-user'\n\n def _base_image(self, virtualization_type):\n release = self.release()\n name = None\n if release < 21:\n name = 'Fedora-x86_64-%i-*' % release\n elif release == 21:\n name = 'Fedora-Cloud-Base-*-21.x86_64-*'\n else:\n name = 'Fedora-Cloud-Base-%s-*.x86_64-*' % release\n images = self.ctx.ec2.get_all_images(owners=[\n '125523088429'], filters={'name': name, \n 'root-device-type': 'ebs', \n 'virtualization-type': virtualization_type})\n images = [ i for i in images if not re.search('Alpha|Beta', i.name) ]\n if not images:\n raise self.NoSuchImageException(\"Can't find any AMIs for Fedora %i and virtualization type %s\" % (\n release, virtualization_type))\n images.sort(key=attrgetter('name'), reverse=True)\n if False:\n if len(images) > 1:\n raise RuntimeError('Found more than one AMI for Fedora %i and virtualization type %s' % (\n release, virtualization_type))\n return images[0]\n\n def _list_packages_to_install(self):\n return super(FedoraBox, self)._list_packages_to_install() + [\n 'redhat-lsb']\n\n def _get_package_substitutions(self):\n return super(FedoraBox, self)._get_package_substitutions() + [\n (\n 'python', ('python', 'openssl-devel'))]\n\n @fabric_task\n def _get_rc_local_path(self):\n rc_local_path = '/etc/rc.d/rc.local'\n sudo(('test -f {f} || echo \"#!/bin/sh\" > {f} && chmod +x {f}').format(f=rc_local_path))\n return rc_local_path","sub_path":"pycfiles/cgcloud_core-1.6.0-py2.7/fedora_box.py","file_name":"fedora_box.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"601515526","text":"############### Author: Bipul Ranjan @ranjanbipul ###############\r\nimport sys\r\nimport time\r\nimport os\r\nimport math\r\nimport operator\r\nimport random\r\nfrom functools import lru_cache\r\nfrom decimal import Decimal as D\r\nfrom fractions import Fraction as F\r\n#sys.setrecursionlimit(10000)\r\n#@lru_cache(maxsize=None)\r\nMOD = 1000000007\r\n################################################################\r\nQNO = 'a' #SET QUESTION NUMBER\r\nFIN,FOUT = QNO+'.in.txt',QNO+'.out.txt'\r\nFIN = QNO.capitalize()+'-small-attempt0.in'\r\n#FIN = QNO+'.sample.txt'\r\n#FIN = QNO.capitalize()+'-large.in'\r\nfin = open(FIN)\r\nfout = open(FOUT,'w')\r\nsys.stdin = fin\r\n######################## PROGRAM START ##########################\r\n\r\ndef solve(a,n):\r\n return len(a)\r\n\r\nfor nu in range(int(input())):\r\n n = int(input())\r\n a = [int(i) for i in input().strip().split(\" \")]\r\n t = 0\r\n for i in a: t+=i\r\n print(\"Case #{0}:\".format(nu+1),file=fout,end=\" \")\r\n while t>0:\r\n #print(t)\r\n s = []\r\n if t==2:\r\n for i in range(n):\r\n if a[i]==1:\r\n s.append(i)\r\n t-=1\r\n a[i]-=1\r\n if len(s)==2: break\r\n elif t==3:\r\n for i in range(n):\r\n if a[i]==1:\r\n s.append(i)\r\n t-=1\r\n a[i]-=1\r\n break\r\n else:\r\n m = 0\r\n for i in range(1,n):\r\n if a[i]>a[m]: m =i\r\n s.append(m)\r\n t-=1\r\n a[m]-=1\r\n m = 0\r\n for i in range(1,n):\r\n if a[i]>a[m]: m =i\r\n s.append(m)\r\n t-=1\r\n a[m]-=1\r\n s = [chr(i+65) for i in s]\r\n print(\"{0}\".format(\"\".join(s)),file=fout,end=\" \")\r\n print(\"\",file=fout)\r\n######################## PROGRAM END #############################\r\nfin.close()\r\nfout.close()\r\nprint(\"Program complete\")\r\n","sub_path":"solutions_5753053697277952_0/Python/BIPUL/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"554743992","text":"from scipy.interpolate import interp1d\r\nfrom numpy import *\r\nfrom math import *\r\nimport random\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef f(x):\r\n return sin(x)\r\n\r\nxs = []\r\ncos_vals = []\r\nx = 0.0\r\n\r\nfor i in range (12):\r\n x = i*(1-0.5*random.random())\r\n cos_vals += [f(x)]\r\n xs += [x]\r\n\r\nxs_real = []\r\ncos_vals_real = []\r\n\r\nfor i in range (120):\r\n x = i/10\r\n cos_vals_real += [f(x)]\r\n xs_real += [x]\r\n\r\n\r\n\r\ndpi = 80\r\nfig = plt.figure(dpi=dpi, figsize=(512 / dpi, 384 / dpi))\r\nmpl.rcParams.update({'font.size': 10})\r\n\r\nplt.axis([0, 12, -1.5, 1.5])\r\n\r\nplt.title('Cos(x)')\r\nplt.xlabel('x')\r\nplt.ylabel('F(x)')\r\n\r\n\r\n\r\n\r\ng = interp1d(xs, cos_vals, kind = 3)\r\n\r\nxs_interpol = []\r\ncos_vals_interpol = []\r\nx = 0.0\r\n\r\nfor i in range (120):\r\n try:\r\n x = i/10\r\n cos_vals_interpol += [g(x)]\r\n xs_interpol += [x]\r\n except:\r\n print('')\r\n\r\nplt.plot(xs, cos_vals, 'go')\r\nplt.plot(xs_interpol, cos_vals_interpol, color='red', linestyle='solid',\r\n label='cos(x)')\r\nplt.plot(xs_real, cos_vals_real, color='blue', linestyle='dashed',\r\n label='cos(x)')\r\n\r\nplt.show()\r\nerror = 0\r\nfor i in range(ceil(max(xs) * 10)):\r\n error += (cos_vals_interpol[i] - cos_vals_real[i]) ** 2\r\nerror = sqrt(error / ceil(max(xs) * 10))\r\nprint(error)\r\n","sub_path":"Interpolation_System.py","file_name":"Interpolation_System.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"449750058","text":"import asyncio\nfrom typing import TYPE_CHECKING, Any, Dict, Optional\n\nfrom hummingbot.connector.exchange.ftx import ftx_constants as CONSTANTS\nfrom hummingbot.connector.exchange.ftx.ftx_auth import FtxAuth\nfrom hummingbot.core.data_type.user_stream_tracker_data_source import UserStreamTrackerDataSource\nfrom hummingbot.core.web_assistant.connections.data_types import WSJSONRequest\nfrom hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory\nfrom hummingbot.core.web_assistant.ws_assistant import WSAssistant\nfrom hummingbot.logger import HummingbotLogger\n\nif TYPE_CHECKING:\n from hummingbot.connector.exchange.ftx.ftx_exchange import FtxExchange\n\n\nclass FtxAPIUserStreamDataSource(UserStreamTrackerDataSource):\n\n _logger: Optional[HummingbotLogger] = None\n\n def __init__(\n self,\n auth: FtxAuth,\n connector: 'FtxExchange',\n api_factory: WebAssistantsFactory):\n super().__init__()\n self._auth: FtxAuth = auth\n self._connector = connector\n self._api_factory = api_factory\n self._last_ws_message_sent_timestamp = 0\n\n async def _connected_websocket_assistant(self) -> WSAssistant:\n \"\"\"\n Creates an instance of WSAssistant connected to the exchange\n \"\"\"\n\n ws: WSAssistant = await self._get_ws_assistant()\n async with self._api_factory.throttler.execute_task(limit_id=CONSTANTS.WS_CONNECTION_LIMIT_ID):\n await ws.connect(ws_url=CONSTANTS.FTX_WS_URL)\n\n payload = {\n \"op\": \"login\",\n \"args\": self._auth.websocket_login_parameters()\n }\n\n login_request: WSJSONRequest = WSJSONRequest(payload=payload)\n\n async with self._api_factory.throttler.execute_task(limit_id=CONSTANTS.WS_REQUEST_LIMIT_ID):\n await ws.send(login_request)\n\n return ws\n\n async def _subscribe_channels(self, websocket_assistant: WSAssistant):\n try:\n payload = {\n \"op\": \"subscribe\",\n \"channel\": CONSTANTS.WS_PRIVATE_FILLS_CHANNEL,\n }\n subscribe_fills_request: WSJSONRequest = WSJSONRequest(payload=payload)\n\n payload = {\n \"op\": \"subscribe\",\n \"channel\": CONSTANTS.WS_PRIVATE_ORDERS_CHANNEL,\n }\n subscribe_orders_request: WSJSONRequest = WSJSONRequest(payload=payload)\n\n async with self._api_factory.throttler.execute_task(limit_id=CONSTANTS.WS_REQUEST_LIMIT_ID):\n await websocket_assistant.send(subscribe_fills_request)\n async with self._api_factory.throttler.execute_task(limit_id=CONSTANTS.WS_REQUEST_LIMIT_ID):\n await websocket_assistant.send(subscribe_orders_request)\n\n self._last_ws_message_sent_timestamp = self._time()\n self.logger().info(\"Subscribed to private fills and orders channels...\")\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().exception(\"Unexpected error occurred subscribing to order book trading and delta streams...\")\n raise\n\n async def _process_websocket_messages(self, websocket_assistant: WSAssistant, queue: asyncio.Queue):\n while True:\n try:\n seconds_until_next_ping = (CONSTANTS.WS_PING_INTERVAL\n - (self._time() - self._last_ws_message_sent_timestamp))\n await asyncio.wait_for(super()._process_websocket_messages(\n websocket_assistant=websocket_assistant,\n queue=queue),\n timeout=seconds_until_next_ping)\n except asyncio.TimeoutError:\n payload = {\"op\": \"ping\"}\n ping_request = WSJSONRequest(payload=payload)\n self._last_ws_message_sent_timestamp = self._time()\n await websocket_assistant.send(request=ping_request)\n\n async def _process_event_message(self, event_message: Dict[str, Any], queue: asyncio.Queue):\n event_type = event_message.get(\"type\")\n error_code = event_message.get(\"code\")\n error_message = event_message.get(\"msg\")\n if (event_type == CONSTANTS.WS_EVENT_ERROR_TYPE\n and error_code == CONSTANTS.WS_EVENT_ERROR_CODE\n and error_message in [\n CONSTANTS.WS_EVENT_INVALID_LOGIN_MESSAGE,\n CONSTANTS.WS_EVENT_NOT_LOGGED_IN_MESSAGE]):\n raise IOError(f\"Error authenticating the user stream websocket connection \"\n f\"(code: {error_code}, message: {error_message})\")\n elif (event_type == CONSTANTS.WS_EVENT_UPDATE_TYPE\n and event_message.get(\"channel\") in [\n CONSTANTS.WS_PRIVATE_ORDERS_CHANNEL,\n CONSTANTS.WS_PRIVATE_FILLS_CHANNEL]):\n queue.put_nowait(event_message)\n\n async def _get_ws_assistant(self) -> WSAssistant:\n if self._ws_assistant is None:\n self._ws_assistant = await self._api_factory.get_ws_assistant()\n return self._ws_assistant\n","sub_path":"hummingbot/connector/exchange/ftx/ftx_api_user_stream_data_source.py","file_name":"ftx_api_user_stream_data_source.py","file_ext":"py","file_size_in_byte":5097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"48184101","text":"# Functions for differential phase contrast imaging\n\nimport numpy as np\nfrom ..utils import make_Fourier_coords2D\nfrom ...file.datastructure import DataCube\n\n############################# DPC Functions ################################\n\ndef get_CoM_images(datacube, mask=None, normalize=True):\n \"\"\"\n Calculates two images - center of mass x and y - from a 4D-STEM datacube.\n\n The centers of mass are returned in units of pixels and in the Qx/Qy detector coordinate system.\n\n Accepts:\n datacube (DataCube) the 4D-STEM data\n mask (2D array) optionally, calculate the CoM only in the areas where mask==True\n normalize (bool) if true, subtract off the mean of the CoM images\n\n Returns:\n CoMx (2D array) the center of mass x coordinate\n CoMy (2D array) the center of mass y coordinate\n \"\"\"\n assert isinstance(datacube, DataCube)\n assert isinstance(normalize, bool)\n\n # Coordinates\n qy,qx = np.meshgrid(np.arange(datacube.Q_Ny),np.arange(datacube.Q_Nx))\n if mask is not None:\n qx *= mask\n qy *= mask\n\n # Get CoM\n CoMx = np.zeros((datacube.R_Nx,datacube.R_Ny))\n CoMy = np.zeros((datacube.R_Nx,datacube.R_Ny))\n mass = np.zeros((datacube.R_Nx,datacube.R_Ny))\n for Rx in range(datacube.R_Nx):\n for Ry in range(datacube.R_Ny):\n DP = datacube.data[Rx,Ry,:,:]\n mass[Rx,Ry] = np.sum(DP*mask)\n CoMx[Rx,Ry] = np.sum(qx*DP) / mass[Rx,Ry]\n CoMy[Rx,Ry] = np.sum(qy*DP) / mass[Rx,Ry]\n\n if normalize:\n CoMx -= np.mean(CoMx)\n CoMy -= np.mean(CoMy)\n\n return CoMx, CoMy\n\ndef get_rotation_and_flip(CoMx, CoMy, Q_Nx, Q_Ny, n_iter=100, stepsize=1, return_costs=False):\n \"\"\"\n Find the rotation offset between real space and diffraction space, and whether there exists a\n relative axis flip their coordinate systems.\n\n The idea of the algorithm is to find the rotation which best preserves self-consistency in the\n observed CoM changes. By 'self-consistency', we refer to the requirement that the CoM changes -\n because they correspond to a gradient - must be a conservative vector field (i.e. path\n independent). This condition fails to be met when there exists some rotational offset between\n real and diffraction space. Thus this algorithm performs gradient descent to minimize the square\n of the sums of all the 4-pixel closed loop line integrals, while varying the rotation angle of\n the diffraction space (CoMx/CoMy) axes.\n\n Accepts:\n CoMx (2D array) the x coordinates of the diffraction space centers of mass\n CoMy (2D array) the y coordinates of the diffraction space centers of mass\n Q_Nx (int) the x shape of diffraction space\n Q_Ny (int) the y shape of diffraction space\n n_iter (int) the number of gradient descent iterations\n stepsize (float) the gradient descent step size (i.e. change to theta in a single\n step, relative to the gradient)\n return_costs (bool) if True, returns the theta values and costs, both with and without an\n axis flip, for all gradient descent steps, for diagnostic purposes\n\n Returns:\n theta (float) the rotation angle between the real and diffraction space coordinates\n flip (bool) if True, the real and diffraction space coordinates are flipped\n relative to one another. By convention, we take flip=True to correspond to\n the change CoMy --> -CoMy.\n thetas (float) returned iff return_costs is True. The theta values at each gradient\n descent step for flip=False\n costs (float) returned iff return_costs is True. The cost values at each gradient\n descent step for flip=False\n thetas_f (float) returned iff return_costs is True. The theta values for flip=True\n descent step for flip=False\n costs_f (float) returned iff return_costs is True. The cost values for flip=False\n \"\"\"\n # Cost function coefficients, with / without flip\n term1 = np.roll(CoMx,(0,-1),axis=(0,1)) - np.roll(CoMx,( 0,+1),axis=(0,1)) + \\\n np.roll(CoMy,(1, 0),axis=(0,1)) - np.roll(CoMy,(-1, 0),axis=(0,1))\n term2 = np.roll(CoMx,(1, 0),axis=(0,1)) - np.roll(CoMx,(-1, 0),axis=(0,1)) + \\\n np.roll(CoMy,(0, 1),axis=(0,1)) - np.roll(CoMy,( 0,-1),axis=(0,1))\n\n term1_f = np.roll( CoMx,(0,-1),axis=(0,1)) - np.roll( CoMx,( 0,+1),axis=(0,1)) + \\\n np.roll(-CoMy,(1, 0),axis=(0,1)) - np.roll(-CoMy,(-1, 0),axis=(0,1))\n term2_f = np.roll( CoMx,(1, 0),axis=(0,1)) - np.roll( CoMx,(-1, 0),axis=(0,1)) + \\\n np.roll(-CoMy,(0, 1),axis=(0,1)) - np.roll(-CoMy,( 0,-1),axis=(0,1))\n\n # Gradient descent\n\n thetas = np.zeros(n_iter)\n costs = np.zeros(n_iter)\n theta = 0\n for i in range(n_iter):\n thetas[i] = theta\n gradAll = stepsize * ( term1*np.cos(theta) + term2*np.sin(theta)) * \\\n (-term1*np.sin(theta) + term2*np.cos(theta))\n grad = np.mean(gradAll)\n theta -= grad*stepsize\n costs[i] = np.mean((term1*np.cos(theta) + term2*np.sin(theta))**2)\n\n thetas_f = np.zeros(n_iter)\n costs_f = np.zeros(n_iter)\n theta = 0\n for i in range(n_iter):\n thetas_f[i] = theta\n gradAll = stepsize * ( term1_f*np.cos(theta) + term2_f*np.sin(theta)) * \\\n (-term1_f*np.sin(theta) + term2_f*np.cos(theta))\n grad = np.mean(gradAll)\n theta -= grad*stepsize\n costs_f[i] = np.mean((term1_f*np.cos(theta) + term2_f*np.sin(theta))**2)\n\n # Get rotation and flip\n if costs_f[-1] < costs[-1]:\n flip = True\n theta = thetas_f[-1]\n else:\n flip = False\n theta = thetas[-1]\n\n if return_costs:\n return theta, flip, thetas, costs, thetas_f, costs_f\n else:\n return theta, flip\n\ndef get_phase_from_CoM(CoMx, CoMy, theta, flip, regLowPass=0.5, regHighPass=100, paddingfactor=2,\n stepsize=1, n_iter=10):\n \"\"\"\n Calculate the phase of the sample transmittance from the diffraction centers of mass.\n A bare bones description of the approach taken here is below - for detailed discussion of the\n relevnt theory, see, e.g.:\n Ishizuka et al, Microscopy (2017) 397-405\n Close et al, Ultramicroscopy 159 (2015) 124-137\n Wadell and Chapman, Optik 54 (1979) No. 2, 83-96\n\n The idea here is that the deflection of the center of mass of the electron beam in the\n diffraction plane scales linearly with the gradient of the phase of the sample transmittance.\n When this correspondence holds, it is therefore possible to invert the differential equation and\n extract the phase itself.* The primary assumption made is that the sample is well\n described as a pure phase object (i.e. the real part of the transmittance is 1). The inversion\n is performed in this algorithm in Fourier space, i.e. using the Fourier transform property\n that derivatives in real space are turned into multiplication in Fourier space.\n\n *Note: because in DPC a differential equation is being inverted - i.e. the fundamental theorem\n of calculus is invoked - one might be tempted to call this \"integrated differential phase\n contrast\". Strictly speaking, this term is redundant - performing an integration is simply how\n DPC works. Anyone who tells you otherwise is selling something.\n\n Accepts:\n CoMx (2D array) the diffraction space centers of mass x coordinates\n CoMy (2D array) the diffraction space centers of mass y coordinates\n theta (float) the rotational offset between real and diffraction space coordinates\n flip (bool) whether or not the real and diffraction space coords contain a\n relative flip\n regLowPass (float) low pass regularization term for the Fourier integration operators\n regHighPass (float) high pass regularization term for the Fourier integration operators\n paddingfactor (int) padding to add to the CoM arrays for boundry condition handling.\n 1 corresponds to no padding, 2 to doubling the array size, etc.\n stepsize (float) the stepsize in the iteration step which updates the phase\n n_iter (int) the number of iterations\n\n Returns:\n phase (2D array) the phase of the sample transmittance\n error (1D array) the error - RMSD of the phase gradients compared to the CoM - at\n each iteration step\n \"\"\"\n assert isinstance(flip,bool)\n assert isinstance(paddingfactor,(int,np.integer))\n assert isinstance(n_iter,(int,np.integer))\n\n # Coordinates\n R_Nx,R_Ny = CoMx.shape\n R_Nx_padded,R_Ny_padded = R_Nx*paddingfactor,R_Ny*paddingfactor\n qx,qy = make_Fourier_coords2D(R_Nx_padded,R_Ny_padded,pixelSize=1)\n qr2 = qx**2 + qy**2\n\n # Invese operators\n denominator = qr2 + regHighPass + qr2**2*regLowPass\n _ = np.seterr(divide='ignore')\n denominator = 1./denominator\n denominator[0,0] = 0\n _ = np.seterr(divide='warn')\n f = 1j * 0.25 * stepsize\n qxOperator = f*qx*denominator\n qyOperator = f*qy*denominator\n\n # Perform rotation and flipping\n if not flip:\n CoMx_rot = CoMx*np.cos(theta) - CoMy*np.sin(theta)\n CoMy_rot = CoMx*np.sin(theta) + CoMy*np.cos(theta)\n if flip:\n CoMx_rot = CoMx*np.cos(theta) + CoMy*np.sin(theta)\n CoMy_rot = CoMx*np.sin(theta) - CoMy*np.cos(theta)\n\n # Initializations\n phase = np.zeros((R_Nx_padded,R_Ny_padded))\n update = np.zeros((R_Nx_padded,R_Ny_padded))\n dx = np.zeros((R_Nx_padded,R_Ny_padded))\n dy = np.zeros((R_Nx_padded,R_Ny_padded))\n error = np.zeros(n_iter)\n mask = np.zeros((R_Nx_padded,R_Ny_padded),dtype=bool)\n mask[:R_Nx,:R_Ny] = True\n maskInv = mask==False\n\n # Iterative reconstruction\n for i in range(n_iter):\n\n # Update gradient estimates using measured CoM values\n dx[mask] -= CoMx_rot.ravel()\n dy[mask] -= CoMy_rot.ravel()\n dx[maskInv] = 0\n dy[maskInv] = 0\n\n # Calculate reconstruction update\n update = np.real(np.fft.ifft2( np.fft.fft2(dx)*qxOperator + np.fft.fft2(dy)*qyOperator))\n\n # Apply update\n phase += stepsize*update\n\n # Measure current phase gradients\n dx = (np.roll(phase,(-1,0),axis=(0,1)) - np.roll(phase,(1,0),axis=(0,1))) / 2.\n dy = (np.roll(phase,(0,-1),axis=(0,1)) - np.roll(phase,(0,1),axis=(0,1))) / 2.\n\n # Estimate error from cost function, RMS deviation of gradients\n xDiff = dx[mask] - CoMx_rot.ravel()\n yDiff = dy[mask] - CoMy_rot.ravel()\n error[i] = np.sqrt(np.mean((xDiff-np.mean(xDiff))**2 + (yDiff-np.mean(yDiff))**2))\n\n phase = phase[:R_Nx,:R_Ny]\n\n return phase, error\n\n\n#################### Functions for constructing the e-beam #################\n\ndef construct_illumation(shape, size, keV, aperture, ap_in_mrad=True,\n df=0, cs=0, c5=0, return_qspace=False):\n \"\"\"\n Makes a probe wave function, in the sample plane.\n\n The arguments shape and size together describe a rectangular region in which the\n illumination of the beam is calculated, with the probe placed at the center of this region.\n size gives the region size (xsize,ysize), in units of Angstroms.\n shape describes the sampling (Nx,Ny), i.e. the number of pixels spanning the grid, in the x\n and y directions.\n\n Accepts:\n shape (2-tuple of ints) the number of pixels (Nx,Ny) making grid in which\n the illumination is calculated.\n size (2-tuple of floats) the size (xsize,ysize) of the grid, in real space.\n keV (float) the energe of the probe electrons, in keV\n aperture (float) the probe forming aperture size. Units are specified by ap_in_mrad.\n ap_in_mrad (bool) if True, aperture describes the aperture size in mrads, i.e. it\n specifies the convergence semi-angle.\n If False, aperture describes the aperture size in inverse Angstroms\n df (float) probe defocus, in Angstroms, with negative values corresponding to\n overfocus.\n cs (float) the 3rd order spherical aberration coefficient, in mm\n c5 (float) the 5th order spherical aberration coefficient, in mm\n return_qspace (bool) if True, return the probe in the diffraction plane, rather than the\n sample plane.\n \"\"\"\n # Get shapes\n Nx,Ny = shape\n xsize,ysize = size\n\n # Get diffraction space coordinates\n qsize = (float(Nx)/xsize,float(Ny)/ysize)\n qx,qy = make_qspace_coords(shape, qsize)\n qr = np.sqrt(qx**2 + qy**2)\n\n # Get electron wavenumber and aperture size\n k = get_wavenumber(keV*1000)\n if ap_in_mrad is True:\n aperture = np.tan(aperture/1000)*k\n\n # Get the probe\n probe = np.asarray(qr<=aperture, dtype=complex) # Initialize probe\n probe *= np.exp(-1j*sph_aberration(qr, lam=1.0/k, df=df, cs=cs, c5=c5)) # Add aberrations\n if return_qspace is True:\n return probe\n probe = np.fft.ifft2(probe) # Convert to real space\n probe /= np.sqrt(np.sum(np.square(np.abs(probe)))) # Normalize\n return probe\n\ndef sph_aberration(qr, lam, df=0, cs=0, c5=0):\n \"\"\"\n Calculates the aberration function chi as a function of diffraction space radial coordinates qr\n for an electron with wavelength lam.\n\n Note that this function only considers the rotationally symmetric terms of chi (i.e. spherical\n aberration) up to 5th order. Non-rotationally symmetric terms (coma, stig, etc) and higher\n order terms (c7, etc) are not considered.\n\n Accepts:\n qr (float or array) diffraction space radial coordinate(s), in inverse Angstroms\n lam (float) wavelength of electron, in Angstroms\n df (float) probe defocus, in Angstroms\n cs (float) probe 3rd order spherical aberration coefficient, in mm\n c5 (float) probe 5th order spherical aberration coefficient, in mm\n\n Returns:\n chi (float) the aberation function\n \"\"\"\n p = lam*qr\n chi = df*np.square(p)/2.0 + cs*1e7*np.power(p,4)/4.0 + c5*1e7*np.power(p,6)/6.0\n chi = 2*np.pi*chi/lam\n return chi\n\n\n##################### Electron physics functions ########################\n\ndef get_relativistic_mass_correction(E):\n \"\"\"\n Calculates the relativistic mass correction (i.e. the Lorentz factor, gamma) for an electron\n with kinetic energy E, in eV.\n See, e.g., Kirkland, 'Advanced Computing in Electron Microscopy', Eq. 2.2.\n\n Accepts:\n E (float) electron energy, in eV\n\n Returns:\n gamma (float) relativistic mass correction factor\n \"\"\"\n m0c2 = 5.109989461e5 # electron rest mass, in eV\n return (m0c2 + E)/m0c2\n\ndef get_wavenumber(E):\n \"\"\"\n Calculates the relativistically corrected wavenumber k0 (reciprocal of wavelength) for an\n electron with kinetic energy E, in eV.\n See, e.g., Kirkland, 'Advanced Computing in Electron Microscopy', Eq. 2.5.\n\n Accepts:\n E (float) electron energy, in eV\n\n Returns:\n k0 (float) relativistically corrected wavenumber\n \"\"\"\n hc = 1.23984193e4 # Planck's constant times the speed of light in eV Angstroms\n m0c2 = 5.109989461e5 # electron rest mass, in eV\n return np.sqrt( E*(E + 2*m0c2) ) / hc\n\ndef get_interaction_constant(E):\n \"\"\"\n Calculates the interaction constant, sigma, to convert electrostatic potential (in V Angstroms)\n to radians. Units of this constant are rad/(V Angstrom).\n See, e.g., Kirkland, 'Advanced Computing in Electron Microscopy', Eq. 2.5.\n\n Accepts:\n E (float) electron energy, in eV\n\n Returns:\n m (float) relativistically corrected electron mass\n \"\"\"\n h = 6.62607004e-34 # Planck's constant in Js\n me = 9.10938356e-31 # Electron rest mass in kg\n qe = 1.60217662e-19 # Electron charge in C\n k0 = get_wavenumber(E) # Electron wavenumber in inverse Angstroms\n gamma = get_relativistic_mass_correction(E) # Relativistic mass correction\n return 2*np.pi*gamma*me*qe/(k0*1e-20*h**2)\n\n\n\n####################### Utility functions ##########################\n\n# def make_qspace_coords(shape,qsize):\n# \"\"\"\n# Creates a diffraction space coordinate grid.\n# \n# Number of pixels in the grid (sampling) is given by shape = (Nx,Ny).\n# Extent of the grid is given by qsize = (xsize,ysize), where xsize,ysize are in inverse length\n# units, and are the number of pixels divided by the real space size.\n# \n# Accepts:\n# shape (2-tuple of ints) grid shape\n# qsize (2-tuple of floats) grid size, in reciprocal length units\n# \n# Returns:\n# qx (2D ndarray) the x diffraction space coordinates\n# qy (2D ndarray) the y diffraction space coordinates\n# \"\"\"\n# qx = np.fft.fftfreq(shape[0])*qsize[0]\n# qy = np.fft.fftfreq(shape[1])*qsize[1]\n# return qx,qy\n# \n# def pad_shift(ar, x, y):\n# \"\"\"\n# Similar to np.roll, but designed for special handling of zero padded matrices.\n# \n# In particular, for a zero-padded matrix ar and shift values (x,y) which are equal to\n# or less than the pad width, pad_shift is identical to np.roll.\n# For a zero-padded matrix ar and shift values (x,y) which are greater than the pad\n# width, values of ar which np.roll would 'wrap around' are instead set to zero.\n# \n# For a 1D analog, np.roll and pad_shift are identical in the first case, but differ in the second:\n# \n# Case 1:\n# np.roll(np.array([0,0,1,1,1,0,0],2) = array([0,0,0,0,1,1,1])\n# pad_shift(np.array([0,0,1,1,1,0,0],2) = array([0,0,0,0,1,1,1])\n# \n# Case 2:\n# np.roll(np.array([0,0,1,1,1,0,0],3) = array([1,0,0,0,0,1,1])\n# pad_shift(np.array([0,0,1,1,1,0,0],3) = array([0,0,0,0,0,1,1])\n# \n# Accepts:\n# ar (ndarray) a 2D array\n# x (int) the x shift\n# y (int) the y shift\n# \n# Returns:\n# shifted_ar (ndarray) the shifted array\n# \"\"\"\n# assert isinstance(x,(int,np.integer))\n# assert isinstance(y,(int,np.integer))\n# \n# xend,yend = np.shape(ar)\n# xend,yend = xend-x,yend-y\n# \n# return np.pad(ar, ((x*(x>=0),-x*(x<=0)),(y*(y>=0),-y*(y<=0))),\n# mode='constant')[-x*(x<=0):-x*(x>=0)+xend*(x<=0), \\\n# -y*(y<=0):-y*(y>=0)+yend*(y<=0)]\n# \n# def rotate_point(origin, point, angle):\n# \"\"\"\n# Rotates point counterclockwise by angle about origin.\n# \n# Accepts:\n# origin (2-tuple of floats) the (x,y) coords of the origin\n# point (2-tuple of floats) the (x,y) coords of the point\n# angle (float) the rotation angle, in radians\n# \n# Returns:\n# rotated_point (2-tuple of floats) the (x,y) coords of the rotated point\n# \"\"\"\n# ox,oy = origin\n# px,py = point\n# \n# qx = ox + np.cos(angle)*(px-ox) - np.sin(angle)*(py-oy)\n# qy = oy + np.sin(angle)*(px-ox) + np.cos(angle)*(py-oy)\n# \n# return qx,qy\n\n\n\n","sub_path":"process/dpc/dpc.py","file_name":"dpc.py","file_ext":"py","file_size_in_byte":19717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"322519480","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*\n\nimport json\nimport os\n\n\nclass JsonFile:\n\t\"\"\"Represent json files to work on\"\"\"\n\tdef __init__(self, file):\n\t\tself.file = file\n\t\tself.json_name = \"data\" + str(file) + \".json\"\n\t\tself.file_to_list()\n\t\tself.prod_to_sql()\n\n\tJSON_ELTS = []\n\tPROD_SQL = []\n\tLPS_SQL = []\n\tLPC_SQL = []\n\tPROD_COUNT = 0\n\tJSON_COUNT = 0\n\tSQL_FILENB = 0\n\n\t@classmethod\n\tdef get_fnb(cls):\n\t\t\"\"\"Get the number of json files to work on\"\"\"\n\t\tparent = os.path.abspath(os.pardir)\n\t\tnb = len(os.listdir(parent + '\\\\json_update\\\\'))\n\t\treturn nb\n\n\n\n\t@classmethod\n\tdef prod_to_sql(cls):\n\t\t\"\"\"Translate the JSON_ELTS datas\n\t\tfrom JSON to SQL requests\n\n\t\t\"\"\"\n\t\tlast_elt = 0\n\t\tfor prod in JsonFile.JSON_ELTS:\n\t\t\tif prod['OFF_id'] != last_elt:\n\t\t\t\tdata = prod.values()\n\t\t\t\tp_id = prod['OFF_id']\n\t\t\t\tp_name = prod['name']\n\t\t\t\tp_nutrition_grade = prod['nutrition_grade']\n\t\t\t\tp_description = prod['categories'][-1]\n\t\t\t\tp_store = prod['stores']\n\t\t\t\tp_categorie = prod['categories'][0]\n\t\t\t\tprod_str = \"(NULL,\" + str(p_id) + \",\\\"\" + p_name + \"\\\",\\\"\" + p_nutrition_grade + \"\\\",\\\"\" + p_description + \"\\\")\"\n\t\t\t\tlps_str = \"(\" + str(p_id) + \",\\\"\" + p_store + \"\\\")\"\n\t\t\t\tlpc_str = \"(\" + str(p_id) + \",\\\"\" + p_categorie + \"\\\")\"\n\t\t\t\tJsonFile.PROD_COUNT += 1\n\t\t\t\tJsonFile.JSON_COUNT += 1\n\t\t\t\tJsonFile.to_sql_lists(prod_str, lps_str, lpc_str)\n\t\t\t\tlast_elt = prod['OFF_id']\n\t@classmethod\n\tdef to_sql_lists(cls, prod_str, lps_str, lpc_str):\n\t\t\"\"\"Return SQL request in lists for each product\"\"\"\n\t\tif JsonFile.PROD_COUNT == 1000 or JsonFile.JSON_COUNT == len(JsonFile.JSON_ELTS):\n\t\t\tprod_str = prod_str + \";\"\n\t\t\tlps_str = lps_str + \";\"\n\t\t\tlpc_str = lpc_str + \";\"\n\t\t\tJsonFile.PROD_SQL.append(prod_str)\n\t\t\tJsonFile.LPS_SQL.append(lps_str)\n\t\t\tJsonFile.LPC_SQL.append(lpc_str)\n\t\t\tJsonFile.prod_to_fsql()\n\t\t\tJsonFile.lps_to_fsql()\n\t\t\tJsonFile.lpc_to_fsql()\n\t\t\tJsonFile.PROD_COUNT = 0\n\n\t\telif JsonFile.PROD_COUNT % 5 == 0:\n\t\t\tprod_str = prod_str + \",\\n\"\n\t\t\tlps_str = lps_str + \",\\n\"\n\t\t\tlpc_str = lpc_str + \",\\n\"\n\t\t\tJsonFile.PROD_SQL.append(prod_str)\n\t\t\tJsonFile.LPS_SQL.append(lps_str)\n\t\t\tJsonFile.LPC_SQL.append(lpc_str)\n\t\telse:\n\t\t\tprod_str = prod_str + \",\"\n\t\t\tlps_str = lps_str + \",\"\n\t\t\tlpc_str = lpc_str + \",\"\n\t\t\tJsonFile.PROD_SQL.append(prod_str)\n\t\t\tJsonFile.LPS_SQL.append(lps_str)\n\t\t\tJsonFile.LPC_SQL.append(lpc_str)\n\n\t@classmethod\n\tdef prod_to_fsql(cls):\n\t\t\"\"\"Transfer datas from sql data list\n\t\t with the corresponding request to the sql file\n\n\t\t\"\"\"\n\t\tJsonFile.SQL_FILENB += 1\n\t\tparent = os.path.abspath(os.pardir)\n\t\twith open(parent + \"\\\\sql\\\\\" + \"p_requests_\" + str(\n\t\t\tJsonFile.SQL_FILENB) +\".sql\", 'w+', encoding='utf-8') as sql_f:\n\t\t\tinsert_table_p = 'INSERT INTO product_p VALUES \\n'\n\t\t\tsql_f.write(insert_table_p)\n\t\t\tlast_elt = \"\"\n\t\t\tfor sql_elt_p in JsonFile.PROD_SQL:\n\t\t\t\tif sql_elt_p != last_elt:\n\t\t\t\t\tsql_f.write(str(sql_elt_p))\n\t\t\t\t\tlast_elt = sql_elt_p\n\t\tJsonFile.PROD_SQL = []\n\n\t@classmethod\n\tdef lps_to_fsql(cls):\n\t\t\"\"\"Transfer datas from sql data list\n\t\t with the corresponding request to the sql file\n\n\t\t\"\"\"\n\t\tparent = os.path.abspath(os.pardir)\n\t\twith open(parent + \"\\\\sql\\\\\" + \"table_lps_requests_\" + str(\n\t\t\tJsonFile.SQL_FILENB) +\".sql\", 'w+', encoding='utf-8') as sql_f:\n\t\t\tinsert_table_lps = 'INSERT INTO link_product_store_ps VALUES \\n'\n\t\t\tsql_f.write(insert_table_lps)\n\t\t\tlast_elt = \"\"\n\t\t\tfor sql_elt_lps in JsonFile.LPS_SQL:\n\t\t\t\tif sql_elt_lps != last_elt:\n\t\t\t\t\tsql_f.write(str(sql_elt_lps))\n\t\t\t\t\tlast_elt = sql_elt_lps\n\t\tJsonFile.LPS_SQL = []\n\n\t@classmethod\n\tdef lpc_to_fsql(cls):\n\t\t\"\"\"Transfer datas from sql data list\n\t\t with the corresponding request to the sql file\n\n\t\t\"\"\"\n\t\tparent = os.path.abspath(os.pardir)\n\t\twith open(parent + \"\\\\sql\\\\\" + \"table_lpc_requests_\" + str(\n\t\t\tJsonFile.SQL_FILENB) +\".sql\", 'w+', encoding='utf-8') as sql_f:\n\t\t\tinsert_table_lpc = 'INSERT INTO link_product_category_pc VALUES \\n'\n\t\t\tsql_f.write(insert_table_lpc)\n\t\t\tlast_elt = \"\"\n\t\t\tfor sql_elt_lpc in JsonFile.LPC_SQL:\n\t\t\t\tif sql_elt_lpc != last_elt:\n\t\t\t\t\tsql_f.write(str(sql_elt_lpc))\n\t\t\t\t\tlast_elt = sql_elt_lpc\n\t\tJsonFile.LPC_SQL = []\n\n\tdef file_to_list(self):\n\t\t\"\"\"get the data from the file, and put it in the JSON_ELTS list\"\"\"\n\t\tparent = os.path.abspath(os.pardir)\n\t\twith open(parent + \"\\\\json_update\\\\\" + self.json_name, 'r') as file:\n\t\t\tf = json.load(file)\n\t\t\tfor elt in f:\n\t\t\t\tJsonFile.JSON_ELTS.append(elt)\n\n\n\ndef browse_jfiles():\n\tjfiles = []\n\tfile_nb = JsonFile.get_fnb()\n\tfor nb in range(1, file_nb + 1):\n\t\tjfiles.append(JsonFile(nb))","sub_path":"script/prod_json_to_sql.py","file_name":"prod_json_to_sql.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"569365473","text":"from Server import app, render_template, flask_login, users\nfrom ServerFunction import Folder\nimport re\n\nTITLE: str = None\nUPLOAD_STATUS: str = False\nUPLOAD_FOLDER: str = \"/media/uploads\"\n\n\n@app.route('/animation', defaults={\"path\": ''})\n@app.route('/animation/')\n@flask_login.login_required\ndef animation_detail(path):\n current_user = str(flask_login.current_user.id)\n split = re.findall(r\"([^\\/^\\n]+)\", path)\n is_view_function = False\n try:\n int(split[-1])\n is_view_function = True\n except ValueError:\n pass\n except IndexError:\n pass\n\n if not is_view_function:\n path = f\"/media/animation/{path}\"\n data_folder = Folder(path)\n print(data_folder.short_name)\n print(users.check_animation(current_user, data_folder.short_name))\n if not data_folder.animation:\n return render_template(\"animation.html\",\n ani_data=data_folder,\n is_upload=UPLOAD_STATUS,\n )\n else:\n return render_template(\"animation_detail.html\",\n ani_dict=data_folder,\n _user_watched=users.check_animation(current_user, data_folder.short_name)\n )\n else:\n path = f\"/media/animation/{''.join([x + '/' for x in split[:-1]])}\".strip()\n data_folder = Folder(path)\n users.update_animation(current_user, data_folder.short_name, int(split[-1]))\n return render_template(\n \"animation_view.html\",\n ani_dict=data_folder,\n _user_watched=users.check_animation(current_user, data_folder.short_name),\n current_number=int(split[-1])\n )\n","sub_path":"Server/_animation.py","file_name":"_animation.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"99022273","text":"import sys\r\n\r\nfrom yahoo_fin import stock_info as si\r\nfrom datetime import *\r\nfrom win10toast import ToastNotifier \r\nimport pandas as pd\r\n\r\nfile = pd.read_csv(r'C:\\\\Users\\\\jackf\\\\Desktop\\\\Python\\\\Test Data\\\\Watchlist.csv')\r\ntickers = file.iloc[:,0]\r\nprice = file.iloc[:,1]\r\ntrigger = file.iloc[:,2]\r\n\r\n#for loop to get live price\r\nfor ticker in tickers:\r\n if ticker == 'AAPL':\r\n apple = round(si.get_live_price(ticker),2)\r\n elif ticker == 'NVDA':\r\n nvidia = round(si.get_live_price(ticker),2)\r\n elif ticker == 'DFEN':\r\n dfen = round(si.get_live_price(ticker),2)\r\n\r\n\r\n\r\ncurrent_time = datetime.datetime.now().strftime('%I:%M:%S')\r\ntoast = ToastNotifier()\r\ntoast.show_toast(\"Stock Update Notification\", f\"As of {current_time} stock prices are as follows: \\n AAPL ${apple} \\n NVDA ${nvidia} \\n DFEN ${dfen}\", duration= 20)\r\n","sub_path":"stock price push.py","file_name":"stock price push.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"369391937","text":"import random\n\nk = int(input(\"Введіть k = \"))\n\narr = []\narrFilter = []\n# counterMax = counterMin = 0\n\nfor i in range(10):\n arr.append(int(random.random() * 100))\n\n# for i in arr:\n# if i > k:\n# counterMax += 1\n#\n# for i in arr:\n# if i < k:\n# counterMin += 1\n\n\ndef maxMin (k, type, array):\n\n counter = 0\n\n for i in array:\n if type == 'max':\n if i > k:\n counter += 1\n elif type == 'min':\n if i < k:\n counter += 1\n return counter\n\n\nprint(\"\\n\", arr)\nprint(\"\\nкількість більших = \", maxMin(k, 'max', arr))\nprint(\"кількість менших = \", maxMin(k, 'min', arr))\n\n# print(\"\\nкількість більших = \", counterMax)\n# print(\"кількість менших = \", counterMin)","sub_path":"Lab4(4).py","file_name":"Lab4(4).py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"211418556","text":"'''必要なデータをインポート'''\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport sys\r\nfrom PIL import Image\r\nimport subprocess\r\n\r\n'''データフローグラフの構築'''\r\nwith tf.device('/cpu:0'):\r\n\tx = tf.placeholder(tf.float32, shape=[None,784])\r\n\t\r\n\t# 784ノード -> 625ノード\r\n\tw_h = tf.Variable(tf.random_normal([784,625], mean=0.0, stddev=0.05))\r\n\tb_h = tf.Variable(tf.zeros([625]))\r\n\th = tf.matmul(x,w_h) + b_h\r\n\th = tf.sigmoid(h)\r\n\t\r\n\t# 625ノード -> 10ノード\r\n\tw_o = tf.Variable(tf.random_normal([625,10], mean=0.0, stddev=0.05))\r\n\tb_o = tf.Variable(tf.zeros([10]))\r\n\tout = tf.matmul(h,w_o) + b_o\r\n\t\r\n\tlogit = tf.argmax(out,1)\r\n\t'''データフローグラフ構築終了'''\r\n\r\n\t'''作成したデータフローグラフに値を流す'''\r\n\twith tf.Session() as sess:\r\n\t\tsubprocess.call(['display', '/home/matsumura/GPU_lecture/mnist_data/'+sys.argv[1]+'.png'])\r\n\t\t# 初期化\r\n\t\tsaver = tf.train.Saver()\r\n\t\tsaver.restore(sess,'/home/matsumura/GPU_lecture/model/mnist')\r\n\t\t\r\n\t\t# 画像読み込み\r\n\t\timg = np.array(Image.open('/home/matsumura/GPU_lecture/mnist_data/'+sys.argv[1]+'.png'))\r\n\t\timg = np.reshape(img,[-1,784])\r\n\t\t\r\n\t\tinference = sess.run(logit,feed_dict={x:img})\r\n\t\tprint('\\n result : %d\\n'%inference)","sub_path":"mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"327177717","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport sys\nfrom reportlab.graphics import renderPDF\nfrom reportlab.graphics.barcode.qr import QrCodeWidget\nfrom reportlab.graphics.shapes import Drawing\nfrom reportlab.lib.pagesizes import A4\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.units import mm\nfrom reportlab.platypus import (\n Image,\n Paragraph,\n SimpleDocTemplate,\n Spacer,\n Table,\n )\nfrom .letter_design import STYLES\nfrom .letter_processor import ProcessedText\nfrom .models import Letterhead, ContentTemplate, Letter\n\n\nclass NumberedCanvas(canvas.Canvas):\n def __init__(self, *args, **kwargs):\n \"\"\"Constructor\"\"\"\n canvas.Canvas.__init__(self, *args, **kwargs)\n self._saved_page_states = []\n\n def showPage(self):\n self._saved_page_states.append(dict(self.__dict__))\n self._startPage()\n\n def save(self):\n \"\"\"add page info to each page (page x of y)\"\"\"\n num_pages = len(self._saved_page_states)\n for state in self._saved_page_states:\n self.__dict__.update(state)\n self.draw_page_number(num_pages)\n canvas.Canvas.showPage(self)\n canvas.Canvas.save(self)\n\n def draw_page_number(self, page_count):\n # Change the position of this to wherever you want the page number to be\n self.drawRightString(\n 195 * mm,\n 10 * mm,\n \"Page %d of %d\" % (self._pageNumber, page_count)\n )\n\n\nclass LetterCanvas(object):\n def __init__(self, letterhead, content_template, letter, response_FLO):\n \"\"\"Constructor\"\"\"\n self.letterhead = letterhead\n self.content_template = content_template\n self.letter = letter\n self.response_FLO = response_FLO\n self.pagesize = A4\n self.width, self.height = self.pagesize\n\n def run(self):\n \"\"\"\n Run the report\n \"\"\"\n self.doc = SimpleDocTemplate(\n self.response_FLO,\n rightMargin=self.letterhead.right_margin*mm,\n leftMargin=self.letterhead.left_margin*mm,\n topMargin=self.letterhead.top_margin*mm,\n bottomMargin=self.letterhead.bottom_margin*mm,\n pagesize=self.pagesize,\n )\n self.elements = [Spacer(1, 67*mm)]\n self.insert_content()\n self.doc.build(\n self.elements,\n onFirstPage=self.first_page,\n onLaterPages=self.subsequent_pages,\n canvasmaker=NumberedCanvas\n )\n\n def first_page(self, canvas, doc):\n \"\"\"\n Defines layout for the first page of our letter.\n \"\"\"\n # Save the state of our canvas so we can draw on it\n canvas.saveState()\n\n # Logo block\n logo = Image(\n self.letterhead.logo.image,\n width=self.letterhead.logo_width*mm,\n height=self.letterhead.logo_height*mm\n )\n logo.wrapOn(canvas, doc.width/2.0, doc.height)\n logo.drawOn(\n canvas,\n self.letterhead.logo_x*mm,\n (297-self.letterhead.logo_y-self.letterhead.logo_height)*mm\n )\n\n # Return address block\n ptext = \"
\".join([line for line in self.letterhead.return_contacts.split('\\n')])\n p = Paragraph(ptext, STYLES['ReturnAddress'])\n p.wrapOn(\n canvas,\n doc.width/3.0,\n doc.height\n )\n p.drawOn(\n canvas,\n self.letterhead.return_contacts_x*mm,\n (257-self.letterhead.return_contacts_y)*mm\n )\n\n # Reference block\n ptext = \"Your reference: \" + self.letter.your_reference\n p = Paragraph(ptext, STYLES['ReturnAddress'])\n p.wrapOn(\n canvas,\n doc.width/3.0,\n doc.height\n )\n p.drawOn(\n canvas,\n self.letterhead.your_reference_x*mm,\n (257-self.letterhead.your_reference_y)*mm\n )\n ptext = \"Our reference: \" + self.letter.our_reference\n p = Paragraph(ptext, STYLES['ReturnAddress'])\n p.wrapOn(\n canvas,\n doc.width/3.0,\n doc.height\n )\n p.drawOn(\n canvas,\n self.letterhead.our_reference_x*mm,\n (257-self.letterhead.our_reference_y)*mm\n )\n\n # Recipient address block\n ptext = \"\" + \"
\".join(\n [\n (\" \").join([\n self.letter.addressee_title,\n self.letter.addressee_first_name,\n self.letter.addressee_second_name,\n ]),\n self.letter.address_1, \n self.letter.address_2, \n self.letter.address_3, \n self.letter.town, \n self.letter.postcode, \n \"
\",\n ]\n )\n p = Paragraph(ptext, STYLES['Normal'])\n p.wrapOn(canvas, doc.width-300, doc.height)\n p.drawOn(canvas, 15*mm, 197*mm)\n\n # Footer\n # See http://stackoverflow.com/a/13132282\n qr_code = QrCodeWidget(self.letter.barcode)\n drawing = Drawing(45, 45)\n drawing.add(qr_code)\n renderPDF.draw(drawing, canvas, 1, 1)\n footer = Paragraph(self.letter.barcode, STYLES['Normal'])\n w, h = footer.wrap(doc.width, doc.bottomMargin)\n footer.drawOn(canvas, doc.leftMargin+50, h)\n\n # Release the canvas\n canvas.restoreState()\n\n def subsequent_pages(self, canvas, doc):\n \"\"\"\n Defines layout for all pages of our letter but the first.\n \"\"\"\n # Save the state of our canvas so we can draw on it\n canvas.saveState()\n\n # Header\n header = Paragraph(self.content_template.name, STYLES['Normal'])\n w, h = header.wrap(doc.width, doc.topMargin)\n header.drawOn(canvas, doc.leftMargin, doc.height + doc.topMargin + doc.bottomMargin - h*mm)\n\n # Footer\n qr_code = QrCodeWidget(self.letter.barcode)\n drawing = Drawing(45, 45)\n drawing.add(qr_code)\n renderPDF.draw(drawing, canvas, 1, 1)\n footer = Paragraph(self.letter.barcode, STYLES['Normal'])\n w, h = footer.wrap(doc.width, doc.bottomMargin)\n footer.drawOn(canvas, doc.leftMargin+50, h)\n\n # Release the canvas\n canvas.restoreState()\n\n def insert_content(self):\n \"\"\"\n Inserts the flowable elements into our letter.\n \"\"\"\n # Draw things on the PDF. Here's where the PDF generation happens.\n # See the ReportLab documentation for full list of functionality.\n self.elements.append(Paragraph(self.letter.date_sent.strftime(\"%d %B %Y\"), STYLES['DateLine']))\n self.elements.append(Paragraph(self.letter.letter_title, STYLES['LetterTitle']))\n if self.letter.addressee_organisation and not self.letter.addressee_title:\n salutation = \"Dear sir or madam,\"\n sign_off = \"Yours faithfully,\"\n else:\n salutation = (\" \").join([\n self.letter.addressee_title,\n self.letter.addressee_second_name,\n ])\n salutation += \",\"\n sign_off = \"Yours sincerely,\"\n self.elements.append(Paragraph(salutation, STYLES['Salutation']))\n flowable_text = ProcessedText(\n self.content_template,\n self.letter\n ).process().decode('utf-8')\n for i, par in enumerate(flowable_text.split('\\n')):\n self.elements.append(Paragraph(par, STYLES['LetterBody']))\n self.elements.append(Paragraph(sign_off, STYLES['Salutation']))\n self.elements.append(Paragraph(self.letter.sender_name, STYLES['Signature']))\n self.elements.append(Paragraph(self.letter.sender_title, STYLES['SignatoryTitle']))\n","sub_path":"api/correspondence/letters/letter_builder.py","file_name":"letter_builder.py","file_ext":"py","file_size_in_byte":8052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"258210775","text":"from django.conf import settings\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import render_to_string\n\n\nclass SendMail:\n from_email = settings.DEFAULT_FROM_EMAIL\n\n def __init__(self, template, context, subject, to_email):\n self.template = template\n self.to_email = to_email\n self.context = context\n self.subject = subject\n\n def _compose_mail(self):\n html_body = render_to_string(self.template, self.context)\n subject = self.subject\n to_email = self.to_email\n message = EmailMessage(\n subject=subject, body=html_body, from_email=SendMail.from_email, to=to_email\n )\n message.content_subtype = \"html\"\n return message\n\n def send(self):\n mail = self._compose_mail()\n mail.send(fail_silently=True)\n return True\n","sub_path":"sendit/apps/core/helpers/sendmail.py","file_name":"sendmail.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"383800465","text":"class Vehicle:\n\n def __init__(self, the_license, the_year):\n self.license = the_license\n self.year = the_year\n self.fee = 0.0\n self.weight = 0.0\n\n def __str__(self):\n return \"Vehicle: {} {} Weight={:.2f} Fee=${:.2f}\".format(self.license,\n self.year, self.weight, self.fee)\n\n def get_license(self):\n return self.license\n\n def get_year(self):\n return self.year\n\n def get_weight(self):\n return self.weight\n\n def get_fee(self):\n return self.fee\n\n def set_fee(self, the_fee):\n self.fee = the_fee\n\n def set_weight(self, the_weight):\n self.weight = the_weight\n\n\nC_WEIGHT1 = 3000\nC_WEIGHT2 = 5000\nC_FEE1 = 30\nC_FEE2 = 40\nC_FEE3 = 50\n\nclass Car(Vehicle):\n def __init__(self, the_license, the_year, the_style): \n Vehicle.__init__(self, the_license, the_year)\n self.style = the_style\n\n def __str__(self):\n return \"Car: {} {} {} Weight={:.2f} Fee=${:.2f}\".format(self.get_license(), self.get_year(), self.style, self.get_weight(), self.get_fee())\n \n def set_weight(self, the_weight):\n Vehicle.set_weight(self, the_weight)\n if the_weight < C_WEIGHT1:\n self.set_fee(C_FEE1)\n elif the_weight < C_WEIGHT2:\n self.set_fee(C_FEE2)\n else:\n self.set_fee(C_FEE3)\n\n\nT_WEIGHT1 = 3000\nT_WEIGHT2 = 5000\nT_WEIGHT3 = 10000\n\nT_FEE1 = 40\nT_FEE2 = 50\nT_FEE3 = 60\nT_FEE4 = 70\n\nclass Truck(Vehicle):\n \n def __init__(self, the_license, the_year, the_wheels):\n Vehicle.__init__(self, the_license, the_year) \n self.wheels = the_wheels\n\n\n def set_weight(self, the_weight):\n Vehicle.set_weight(self, the_weight)\n\n if the_weight < T_WEIGHT1:\n self.set_fee(T_FEE1)\n elif the_weight < T_WEIGHT2:\n self.set_fee(T_FEE2)\n elif the_weight < T_WEIGHT3: \n self.set_fee(T_FEE3)\n else:\n self.set_fee(T_FEE4)\n\n def __str__(self):\n return \"Truck: {} {} {} wheels Weight={:.2f} Fee=${:.2f}\".format(self.get_license(),self.get_year(), self.wheels, self.get_weight(), self.get_fee())\n\n\nM_CC1 = 50\nM_CC2 = 200\nM_FEE1 = 10\nM_FEE2 = 20\nM_FEE3 = 35\n\nclass Motorbike(Vehicle):\n def __init__(self, the_license, the_year):\n Vehicle.__init__(self, the_license, the_year)\n self.cc = 0\n\n def __str__(self):\n return \"Motorbike: {} {} {} cc Fee=${:.2f}\".format(self.get_license(), self.get_year(), self.cc, self.get_fee())\n\n def get_CC(self):\n return self.cc\n\n def set_CC(self, the_cc):\n self.cc = the_cc\n if self.cc < M_CC1:\n self.set_fee(M_FEE1)\n elif self.cc < M_CC2: \n self.set_fee(M_FEE2)\n else:\n self.set_fee(M_FEE3)\n\n \n\n\ndef main():\n \n # Create some vehicles\n v1 = Vehicle(\"AB 123\", 2010)\n c1 = Car(\"SF 735\", 2007, \"Station\")\n t1 = Truck(\"TU 765\", 1994, 6)\n b1 = Motorbike(\"XY 666\", 2005)\n\n c1.set_weight(3500)\n t1.set_weight(9000)\n b1.set_CC(250)\n\n # Print info\n print(v1)\n print(c1)\n print(\"The weight of the car is: {:.2f}\".format(c1.get_weight() ))\n print(t1)\n print(\"The fee for the truck is: {:.2f}\".format(t1.get_fee() ))\n print(b1)\n print(\"The CC of the bike is: {:.2f}\".format(b1.get_CC() ))\n print()\n\n #Put the four vehicles into a list. \n # Then loop through the list and call the print function for each of the vehicles.\n # You have to implement this part of the main program!\n\n vehicles = [v1, c1, t1, b1]\n for vehicle in vehicles:\n print(vehicle)\n\n v1 = c1\n print(v1)\n print()\n\nmain()\n","sub_path":"Forritun/Heimadæmi/Vehicle/vehicleskennarar.py","file_name":"vehicleskennarar.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"633575374","text":"\"\"\"\nBuild and run any AuTuMN model, storing the outputs\n\"\"\"\nimport os\nimport yaml\nfrom datetime import datetime\n\nfrom autumn.post_processing.processor import post_process, validate_post_process_config\n\nfrom autumn import constants\nfrom autumn.plots import save_flows_sheets, plot_scenarios, validate_plot_config\nfrom autumn.tool_kit.timer import Timer\nfrom autumn.tool_kit.scenarios import Scenario\nfrom autumn.tool_kit.utils import (\n get_git_branch,\n get_git_hash,\n)\nfrom autumn.db.models import store_run_models\n\nfrom summer.model.utils.flowchart import create_flowchart\n\n\ndef build_model_runner(\n model_name: str, build_model, params: dict, post_processing_config={}, plots_config={}\n):\n \"\"\"\n Factory function that returns a 'run_model' function.\n \"\"\"\n assert model_name, \"Value 'model_name' must be set.\"\n assert build_model, \"Value 'build_model' must be set.\"\n assert params, \"Value 'params' must be set.\"\n\n def run_model(run_name=\"model-run\", run_description=\"\"):\n \"\"\"\n Run the model, save the outputs.\n \"\"\"\n print(f\"Running {model_name}...\")\n if post_processing_config:\n validate_post_process_config(post_processing_config)\n\n if plots_config:\n validate_plot_config(plots_config)\n\n # Ensure project folder exists.\n project_dir = os.path.join(constants.DATA_PATH, model_name)\n timestamp = datetime.now().strftime(\"%d-%m-%Y--%H-%M-%S\")\n output_dir = os.path.join(project_dir, f\"{run_name}-{timestamp}\")\n os.makedirs(output_dir, exist_ok=True)\n\n # Determine where to save model outputs\n output_db_path = os.path.join(output_dir, \"outputs.db\")\n\n # Save model parameters to output dir.\n param_path = os.path.join(output_dir, \"params.yml\")\n with open(param_path, \"w\") as f:\n yaml.dump(params, f)\n\n # Save model run metadata to output dir.\n meta_path = os.path.join(output_dir, \"meta.yml\")\n metadata = {\n \"name\": run_name,\n \"description\": run_description,\n \"start_time\": timestamp,\n \"git_branch\": get_git_branch(),\n \"git_commit\": get_git_hash(),\n }\n with open(meta_path, \"w\") as f:\n yaml.dump(metadata, f)\n\n with Timer(\"Running model scenarios\"):\n num_scenarios = 1 + len(params[\"scenarios\"].keys())\n scenarios = []\n for scenario_idx in range(num_scenarios):\n scenario = Scenario(build_model, scenario_idx, params)\n scenarios.append(scenario)\n\n # Run the baseline scenario.\n baseline_scenario = scenarios[0]\n baseline_scenario.run()\n baseline_model = baseline_scenario.model\n\n # Run all the other scenarios\n for scenario in scenarios[1:]:\n scenario.run(base_model=baseline_model)\n\n with Timer(\"Saving model outputs to the database\"):\n models = [s.model for s in scenarios]\n store_run_models(models, output_db_path, powerbi=False)\n\n if post_processing_config:\n with Timer(\"Applying post-processing to model outputs\"):\n # Calculate generated outputs with post-processing.\n for scenario in scenarios:\n scenario.generated_outputs = post_process(\n scenario.model, post_processing_config\n )\n\n if plots_config:\n\n with Timer(\"Creating plots\"):\n # Plot all scenario outputs.\n plot_dir = os.path.join(output_dir, \"plots\")\n\n try:\n create_flowchart(models[0])\n except:\n pass\n\n os.makedirs(plot_dir, exist_ok=True)\n plot_scenarios(scenarios, plot_dir, plots_config)\n\n # Save some CSV sheets describing the baseline model.\n save_flows_sheets(baseline_model, output_dir)\n\n return run_model\n","sub_path":"autumn/model_runner.py","file_name":"model_runner.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"320092638","text":"import pymysql\nimport urllib.request\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport selenium\nimport datetime\nimport threading\nimport gc\n\n\nclass StockItemList:\n\n\n def GetStockItems(self, idx, driver, page, topid, categoryid, category, url):\n\n print(idx, category)\n driver.get('about:blank')\n driver.execute_script(\"Object.defineProperty(navigator, 'plugins', {get: function() {return[1, 2, 3, 4, 5];},});\")\n driver.get(url)\n\n html = driver.page_source\n\n # html = urllib.request.urlopen(targeturl).read();\n soup = BeautifulSoup(html, \"html.parser\")\n #driver.get_screenshot_as_file('test2.png')\n\n # 우측 메뉴에서 topname 을 찾은 후, 그 하위에 있는 li 값 가져오도록 함\n items = soup.find_all('a', {'class': 'product-link'})\n\n print('idx : %d - page : %d - total item : %d [%s]' % (idx, page, len(items), datetime.datetime.now()))\n\n conn = pymysql.connect(host='127.0.0.1', user='root', password='im39841!', db='tjmax')\n curs = conn.cursor()\n\n for item in items:\n item_url = 'https://tjmaxx.tjx.com' + item['href']\n img_front = item.find('img')\n if img_front != None:\n item_front = 'https:' + img_front['src']\n\n img_back = item.find('img')\n if img_back != None:\n item_back = 'https:' + img_back['src']\n\n sql = \"insert into category_itemurl (TopID, CategoryID, Category, URL, img_front, img_back) \\\n values(%s, %s, %s ,%s, %s, %s)\"\n sql_val = (topid, categoryid, category, item_url, item_front, item_back)\n\n curs.execute(sql, sql_val)\n conn.commit()\n\n curs.close()\n conn.close()\n\n\n\n isnext = soup.find('li', {'class' : 'next'})\n\n if isnext != None:\n alink = isnext.find('a')\n\n if not ('inactive' in alink['class']):\n self.GetStockItems(idx, driver, page+1, topid, categoryid, category, 'https://tjmaxx.tjx.com' + alink['href'])\n\n\n def Init(self, ary, idx):\n path = \"C:\\Python36-32\\chromedriver\\chromedriver.exe\"\n\n # headless 옵션\n options = webdriver.ChromeOptions()\n options.add_argument('headless')\n options.add_argument('windows-size=1920*1080')\n options.add_argument(\"lang=eng\")\n options.add_argument('disable-gpu')\n\n # user agent 값 변경.\n options.add_argument(\"user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36\")\n driver = webdriver.Chrome(path, chrome_options=options)\n\n # https://stackoverflow.com/questions/48941260/what-does-selenium-set-script-timeoutn-do-and-how-is-it-different-from-driver?noredirect=1&lq=1\n driver.set_page_load_timeout(600) # 60 sec\n\n # 3초 지연\n driver.implicitly_wait(3)\n\n conn = pymysql.connect(host='127.0.0.1', user='root', password='im39841!', db='tjmax')\n curs = conn.cursor()\n\n try:\n sql = \"select topid, categoryid, category, url From temp_category where LEAF_NODE=1 and idx in %s\" % str(tuple(ary))\n curs.execute(sql)\n datas = curs.fetchall()\n\n for data in datas:\n self.GetStockItems(idx, driver, 1, data[0], data[1], data[2], data[3])\n gc.collect()\n # print('{}, {}, {}'.format(data[0], data[1], data[2]))\n except:\n print(\"SQL Error \")\n\n curs.close()\n conn.close()\n\n\n\n","sub_path":"venv/StockList.py","file_name":"StockList.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"392691322","text":"##\n## data_loader.py\n## Load in brick/ball/cylinder examples for programming challenge. \n##\n\n\nimport numpy as np\nfrom easydict import EasyDict\nimport glob\nimport cv2\nimport pdb\n\ndef data_loader(label_indices, \n channel_means, \n train_test_split = 0.7, \n input_image_size = (227, 227), \n data_path = '../data'):\n\n '''\n Load, resize, subtract mean, and store data in easydicts.\n '''\n\n num_classes = len(label_indices)\n\n #Covert Channel means list to array\n channel_means = np.array(channel_means)\n\n #Pull in image filenames:\n im_paths = glob.glob('CK+/Emotion_labels/Emotion/*/*/*.txt')\n # rs_paths = glob.glob('extended-cohn-kanade-images/cohn-kanade-images/*/*/*.png')\n # im_paths = glob.glob(data_path + '/*/*.jpg')\n\n #Train test split\n num_training_examples = int(np.round(train_test_split*len(im_paths)))\n num_testing_examples = len(im_paths) - num_training_examples\n\n random_indices = np.arange(len(im_paths))\n np.random.shuffle(random_indices)\n\n training_indices = random_indices[:num_training_examples]\n testing_indices = random_indices[num_training_examples:]\n\n #Make easydicts for data\n data = EasyDict()\n data.train = EasyDict()\n data.test = EasyDict()\n\n # Make empty arrays to hold data:\n data.train.X = np.zeros((num_training_examples, input_image_size[0], input_image_size[1], 3), \n dtype = 'float32')\n data.train.y = np.zeros((num_training_examples, num_classes), dtype = 'float32')\n\n data.test.X = np.zeros((num_testing_examples, input_image_size[0], input_image_size[1], 3), \n dtype = 'float32')\n data.test.y = np.zeros((num_testing_examples, num_classes), dtype = 'float32')\n\n for count, index in enumerate(training_indices):\n image_path = \"extended-cohn-kanade-images/cohn-kanade-images/\"+ \"\\\\\".join(im_paths[0].split(\"\\\\\")[1:]).replace('_emotion.txt','.png')\n im = cv2.imread(image_path)\n # im = cv2.imread(im_paths[index])\n # pdb.set_trace()\n im = cv2.resize(im, (input_image_size[1], input_image_size[0]))\n data.train.X[count, :, :, :] = im\n f = open(im_paths[index],\"r\")\n cat = f.read()\n class_name = int(cat.replace('e+','').replace(' ','').replace('0','').replace('.',''))\n data.train.y[count, class_name] = 1\n \n for count, index in enumerate(testing_indices):\n image_path = \"extended-cohn-kanade-images/cohn-kanade-images/\"+ \"\\\\\".join(im_paths[0].split(\"\\\\\")[1:]).replace('_emotion.txt','.png')\n im = cv2.imread(image_path)\n im = cv2.resize(im, (input_image_size[1], input_image_size[0]))\n data.test.X[count, :, :, :] = im\n f = open(im_paths[index],\"r\")\n cat = f.read()\n class_name = int(cat.replace('e+','').replace(' ','').replace('0','').replace('.',''))\n data.test.y[count, class_name] = 1\n\n print('Loaded', str(len(training_indices)), 'training examples and ', \n str(len(testing_indices)), 'testing examples. ')\n\n return data","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":3101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"433682162","text":"user = 'ala ma kota' #input(\"Podaj znak: \")\r\nuser = user.lower()\r\nkey = [3,4,5,6]\r\nspacja = \" \"\r\nszyfr = \"\"\r\ni = 0\r\nfor znak in user:\r\n if znak == spacja:\r\n szyfr += spacja\r\n continue\r\n for klucz in key:\r\n szyfr += chr((ord(znak)-97+klucz[i])%26+97)\r\n i = (i+1)%4\r\nprint(szyfr)","sub_path":"lab4zad1szyfr.py","file_name":"lab4zad1szyfr.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"289315326","text":"'''\r\nProgram: circularQueue.py\r\nAuthor: Anila Hoxha\r\nLast date modified: 05/16/2020\r\n\r\nWrite a circularQueue class, using a circularly linked list as storage. Test your class\r\nin testing.py by adding, removing several elements. Submit, circularQueue.py and testing.py.\r\n'''\r\n\r\nclass CircularQueue:\r\n # Queue implementation using circularly linked list for storage\r\n class _Node:\r\n __slots__ = '_element', '_next'\r\n\r\n def __init__(self, element, next): # Constructor\r\n self._element = element # Value of the node\r\n self._next = next # Reference to the next node\r\n\r\n def __init__(self): # Constructor\r\n # Create an empty queue\r\n self._tail = None # No initial value\r\n self._size = 0\r\n\r\n def __len__(self): # Find the length of the queue\r\n return self._size # Return the number of elements in the queue\r\n\r\n def is_empty(self): # Check if the queue is empty or not\r\n return self._size == 0 # Return True if the queue is empty\r\n\r\n def first(self):\r\n # Return (but do not remove) the element at the front of the queue\r\n if self.is_empty(): # If the queue is empty\r\n return ('Queue is empty') # If so, return this statement\r\n head = self._tail._next # Head of the queue\r\n return head._element # Return the value of head\r\n\r\n def dequeue(self):\r\n # Remove and return the first elements of the queue\r\n if self.is_empty(): # If the queue is empty\r\n return ('Queue is empty') # If so, return this statement\r\n oldhead = self._tail._next\r\n if self._size == 1: # Removing only element\r\n self._tail = None\r\n else:\r\n self._tail._next = oldhead._next # Bypass the old head\r\n self._size -= 1 # Decrement size by 1\r\n return oldhead._element\r\n\r\n def enqueue(self, e):\r\n # Add an element to the back of the queue\r\n newest = self._Node(e, None) # Node will be new tail node\r\n if self.is_empty(): # If the queue is empty\r\n newest._next = newest # Initialize circularly\r\n else: # If not\r\n newest._next = self._tail._next # New node points to head\r\n self._tail._next = newest # Old tail points to new node\r\n self._tail = newest # New node becomes the tail\r\n self._size += 1 # Increment the queue size by one\r\n\r\n def rotate(self):\r\n # Rotate front element to the back of the queue\r\n if self._size > 0: # If there are elements in the queue\r\n self._tail = self._tail._next # Old head becomes new tail\r\n","sub_path":"midterm/Q2/E/circularQueue.py","file_name":"circularQueue.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"378057226","text":"import os\nimport sys\nimport cv2 as cv\nimport numpy\n\n\ndef detect(imagefilename, cascadefilename):\n srcimg = cv.imread(imagefilename)\n if srcimg is None:\n print('cannot load image')\n sys.exit(-1)\n dstimg = srcimg.copy()\n cascade = cv.CascadeClassifier(cascadefilename)\n if cascade.empty():\n print('cannnot load cascade file')\n sys.exit(-1)\n objects = cascade.detectMultiScale(srcimg, 1.1, 3)\n\n for (x, y, w, h) in objects:\n srcimg = srcimg[y: y + h, x: x + w]\n\n return srcimg\n\n\nif __name__ == '__main__':\n path = r\"C:\\Users\\oreno\\Downloads\\cnn_image\\cat\"\n list = os.listdir(path)\n for i in list:\n # print(path + \"\\\" + x)\n result = detect(os.path.join(\n path, i), r'C:\\Users\\oreno\\github\\atom_project\\pythont\\CNN\\cat-fancier\\cat-fancier-master\\detector\\models\\cat\\cascade.xml')\n cv.imwrite(os.path.join(\n r'C:\\Users\\oreno\\Downloads\\cnn_image\\cat_square2', i), result)\n","sub_path":"pythont/tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"405612925","text":"import farat\n\nimport multiprocessing\nimport sys\n\nANDROID_ISO = 'android-x86-2.3.iso'\n\ndef launch_test( args ):\n vm_id, android_iso, apk = args\n \n farat.LaunchTest(vm_id, android_iso, apk)\n\ndef main():\n if len(sys.argv) < 2:\n print('You need to specify APKs to test as arguments.')\n sys.exit()\n apk_list = sys.argv[1:]\n\n arguments = []\n for d, apk in enumerate(apk_list):\n arguments.append( (d, ANDROID_ISO, apk) )\n \n pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()-1) # Leave one CPU for system jobs\n \n pool.map(launch_test, arguments)\n\nif __name__ == '__main__':\n main()\n","sub_path":"multiprocessing_apk_test.py","file_name":"multiprocessing_apk_test.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"318672982","text":"# Opening file\nname = input('Enter file name: ')\ntry:\n if len(name) < 1: name = 'mbox-short.txt'\n fhand = open(name)\nexcept:\n print('Invalid input.')\n quit()\n\n# Populating dictionary with time stamp hours\ndic = dict()\nlst = list()\nhrs = list()\nfor line in fhand:\n line = line.strip()\n words = line.split()\n if len(words) < 3:\n continue\n if words[0] == 'From':\n lst.append(words[5])\nfor ts in lst:\n ts = ts.split(':')\n hrs.append(ts[0])\nfor hr in hrs:\n dic[hr] = dic.get(hr,0) + 1\n\n# Print sorted counts by hour\nfor k,v in sorted(dic.items()):\n print(k,v)\n","sub_path":"PythonC2/ex_10_02.py","file_name":"ex_10_02.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"471493997","text":"\"\"\"\nModule which contains the web server related function\nFastAPI routes/classes etc.\n\"\"\"\n\n\nimport asyncio\nimport logging\nimport os\nfrom typing import Dict, Any, List, Optional\n\nimport mimetypes\n\nfrom starlette.types import Scope\nfrom uvicorn import Server\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.responses import FileResponse\nfrom fastapi import (\n Response,\n FastAPI,\n APIRouter,\n Depends,\n HTTPException,\n WebSocket,\n WebSocketDisconnect,\n)\nfrom spotdl.download.progress_handler import NAME_TO_LEVEL, ProgressHandler, SongTracker\n\nfrom spotdl.types.song import Song\nfrom spotdl.types.album import Album\nfrom spotdl.download.downloader import Downloader\nfrom spotdl.utils.search import get_search_results\nfrom spotdl.utils.config import get_spotdl_path\nfrom spotdl._version import __version__\n\nALLOWED_ORIGINS = [\n \"http://localhost:8800\",\n \"http://127.0.0.1:8800\",\n \"https://localhost:8800\",\n \"https://127.0.0.1:8800\",\n]\n\n\nclass SPAStaticFiles(StaticFiles):\n \"\"\"\n Override the static files to serve the index.html and other assets.\n \"\"\"\n\n async def get_response(self, path: str, scope: Scope) -> Response:\n \"\"\"\n Serve static files from the SPA.\n\n ### Arguments\n - path: The path to the file.\n - scope: The scope of the request.\n\n ### Returns\n - returns the response.\n \"\"\"\n\n response = await super().get_response(path, scope)\n if response.status_code == 404:\n response = await super().get_response(\".\", scope)\n\n return response\n\n\nclass WSProgressHandler:\n \"\"\"\n Handles song updates.\n \"\"\"\n\n def __init__(self, websocket: WebSocket, client_id: str):\n \"\"\"\n Initialize the WebSocket handler.\n ### Arguments\n - websocket: The WebSocket instance.\n - client_id: The client's ID.\n \"\"\"\n\n self.websocket = websocket\n self.client_id = client_id\n\n async def connect(self):\n \"\"\"\n Called when a new client connects to the websocket.\n \"\"\"\n\n await self.websocket.accept()\n\n # Add the connection to the list of connections\n app_state.ws_instances.append(self)\n\n app_state.logger.info(\"Client %s connected\", self.client_id)\n\n async def send_update(self, update: Dict[str, Any]):\n \"\"\"\n Send an update to the client.\n\n ### Arguments\n - update: The update to send.\n \"\"\"\n\n await self.websocket.send_json(update)\n\n def song_update(self, progress_handler: SongTracker, message: str):\n \"\"\"\n Called when a song updates.\n\n ### Arguments\n - progress_handler: The progress handler.\n - message: The message to send.\n \"\"\"\n\n update_message = {\n \"song\": progress_handler.song.json,\n \"progress\": progress_handler.progress,\n \"message\": message,\n }\n\n asyncio.run_coroutine_threadsafe(\n self.send_update(update_message), app_state.loop\n )\n\n @classmethod\n def get_instance(cls, client_id: str) -> Optional[\"WSProgressHandler\"]:\n \"\"\"\n Get the WebSocket instance for a client.\n\n ### Arguments\n - client_id: The client's ID.\n\n ### Returns\n - returns the WebSocket instance.\n \"\"\"\n\n for instance in app_state.ws_instances:\n if instance.client_id == client_id:\n return instance\n\n app_state.logger.error(\"Client %s not found\", client_id)\n\n return None\n\n\nclass ApplicationState:\n \"\"\"\n Class that holds the application state.\n \"\"\"\n\n api: FastAPI\n server: Server\n loop: asyncio.AbstractEventLoop\n downloader: Downloader\n settings: Dict[str, Any]\n ws_instances: List[WSProgressHandler] = []\n logger: logging.Logger\n\n\nrouter = APIRouter()\napp_state: ApplicationState = ApplicationState()\n\n\ndef get_current_state() -> ApplicationState:\n \"\"\"\n Get the current state of the application.\n\n ### Returns\n - returns the application state.\n \"\"\"\n\n return app_state\n\n\n@router.websocket(\"/api/ws\")\nasync def websocket_endpoint(websocket: WebSocket, client_id: str):\n \"\"\"\n Websocket endpoint.\n ### Arguments\n - websocket: The WebSocket instance.\n \"\"\"\n\n await WSProgressHandler(websocket, client_id).connect()\n\n try:\n while True:\n await websocket.receive_json()\n except WebSocketDisconnect:\n instance = WSProgressHandler.get_instance(client_id)\n if instance:\n app_state.ws_instances.remove(instance)\n\n if (\n len(app_state.ws_instances) == 0\n and app_state.settings[\"keep_alive\"] is False\n ):\n app_state.logger.debug(\n \"No active connections, waiting 1s before shutting down\"\n )\n\n await asyncio.sleep(1)\n\n # Wait 5 seconds before shutting down\n # This is to prevent the server from shutting down when a client\n # disconnects and reconnects quickly (e.g. when refreshing the page)\n if len(app_state.ws_instances) == 0:\n # Perform a clean exit\n app_state.logger.info(\"Shutting down server, no active connections\")\n app_state.server.force_exit = True\n app_state.server.should_exit = True\n await app_state.server.shutdown()\n\n\n@router.get(\"/api/song/url\", response_model=None)\ndef song_from_url(url: str) -> Song:\n \"\"\"\n Search for a song on spotify using url.\n\n ### Arguments\n - url: The url to search.\n\n ### Returns\n - returns the first result as a Song object.\n \"\"\"\n\n return Song.from_url(url)\n\n\n@router.get(\"/api/songs/search\", response_model=None)\ndef query_search(query: str) -> List[Song]:\n \"\"\"\n Parse search term and return list of Song objects.\n\n ### Arguments\n - query: The query to parse.\n\n ### Returns\n - returns a list of Song objects.\n \"\"\"\n\n return get_search_results(query)\n\n\n@router.get(\"/api/albums/search\", response_model=None)\ndef query_search_albums(query: str) -> List[Album]:\n \"\"\"\n Parse search term and return list of Album objects.\n\n ### Arguments\n - query: The query to parse.\n\n ### Returns\n - returns a list of Album objects.\n \"\"\"\n\n return Album.list_from_search_term(query)\n\n\n@router.post(\"/api/download/url\")\nasync def download_url(\n url: str, client_id: str, state: ApplicationState = Depends(get_current_state)\n) -> Optional[str]:\n \"\"\"\n Download songs using Song url.\n\n ### Arguments\n - url: The url to download.\n\n ### Returns\n - returns the file path if the song was downloaded.\n \"\"\"\n\n state.downloader.output = str(\n (get_spotdl_path() / f\"web/sessions/{client_id}\").absolute()\n )\n\n ws_instance = WSProgressHandler.get_instance(client_id)\n if ws_instance is not None:\n state.downloader.progress_handler = ProgressHandler(\n NAME_TO_LEVEL[state.settings[\"log_level\"]],\n simple_tui=True,\n update_callback=ws_instance.song_update,\n )\n\n try:\n # Fetch song metadata\n song = Song.from_url(url)\n\n # Download Song\n _, path = await state.downloader.pool_download(song)\n\n if path is None:\n state.logger.error(f\"Failure downloading {song.name}\")\n\n raise HTTPException(\n status_code=500, detail=f\"Error downloading: {song.name}\"\n )\n\n # Strip Filename\n filename = os.path.basename(path)\n\n return filename\n\n except Exception as exception:\n state.logger.error(f\"Error downloading! {exception}\")\n\n raise HTTPException(\n status_code=500, detail=f\"Error downloading: {exception}\"\n ) from exception\n\n\n@router.get(\"/api/download/file\")\nasync def download_file(file: str, client_id: str) -> FileResponse:\n \"\"\"\n Download file using path.\n\n ### Arguments\n - file: The file path.\n\n ### Returns\n - returns the file response, filename specified to return as attachment.\n \"\"\"\n\n return FileResponse(\n str((get_spotdl_path() / f\"web/sessions/{client_id}/{file}\").absolute()),\n filename=file,\n )\n\n\n@router.get(\"/api/settings\")\ndef get_settings(\n state: ApplicationState = Depends(get_current_state),\n) -> Dict[str, Any]:\n \"\"\"\n Get the settings.\n\n ### Returns\n - returns the settings.\n \"\"\"\n\n return state.settings\n\n\n@router.post(\"/api/settings/update\")\ndef update_settings(\n settings: Dict[str, Any], state: ApplicationState = Depends(get_current_state)\n) -> Dict[str, Any]:\n \"\"\"\n Change downloader settings by re-initializing the downloader.\n\n ### Arguments\n - settings: The settings to change.\n\n ### Returns\n - returns True if the settings were changed.\n \"\"\"\n\n # Create shallow copy of settings\n settings_cpy = state.settings.copy()\n\n # Update settings with new settings that are not None\n settings_cpy.update({k: v for k, v in settings.items() if v is not None})\n\n state.logger.info(f\"Applying settings: {settings_cpy}\")\n\n # Re-initialize downloader\n state.downloader = Downloader(\n audio_providers=settings_cpy[\"audio_providers\"],\n lyrics_providers=settings_cpy[\"lyrics_providers\"],\n ffmpeg=settings_cpy[\"ffmpeg\"],\n bitrate=settings_cpy[\"bitrate\"],\n ffmpeg_args=settings_cpy[\"ffmpeg_args\"],\n output_format=settings_cpy[\"format\"],\n threads=settings_cpy[\"threads\"],\n output=settings_cpy[\"output\"],\n save_file=settings_cpy[\"save_file\"],\n overwrite=settings_cpy[\"overwrite\"],\n cookie_file=settings_cpy[\"cookie_file\"],\n filter_results=settings_cpy[\"filter_results\"],\n search_query=settings_cpy[\"search_query\"],\n log_level=settings_cpy[\"log_level\"],\n simple_tui=True,\n restrict=settings_cpy[\"restrict\"],\n print_errors=settings_cpy[\"print_errors\"],\n sponsor_block=settings_cpy[\"sponsor_block\"],\n loop=state.loop,\n preserve_original_audio=settings_cpy[\"preserve_original_audio\"],\n )\n\n return settings_cpy\n\n\ndef fix_mime_types():\n \"\"\"Fix incorrect entries in the `mimetypes` registry.\n On Windows, the Python standard library's `mimetypes` reads in\n mappings from file extension to MIME type from the Windows\n registry. Other applications can and do write incorrect values\n to this registry, which causes `mimetypes.guess_type` to return\n incorrect values, which causes spotDL to fail to render on\n the frontend.\n This method hard-codes the correct mappings for certain MIME\n types that are known to be either used by TensorBoard or\n problematic in general.\n \"\"\"\n\n # Known to be problematic when Visual Studio is installed:\n # \n # https://github.com/spotDL/spotify-downloader/issues/1540\n mimetypes.add_type(\"application/javascript\", \".js\")\n\n # Not known to be problematic, but used by spotDL:\n mimetypes.add_type(\"text/css\", \".css\")\n mimetypes.add_type(\"image/svg+xml\", \".svg\")\n mimetypes.add_type(\"text/html\", \".html\")\n","sub_path":"spotdl/utils/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":11168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"613144334","text":"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\nimport grpc\n\nimport bookstore_pb2 as bookstore__pb2\nfrom google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2\n\n\nclass BookstoreStub:\n \"\"\"A simple Bookstore API.\n\n The API manages shelves and books resources. Shelves contain books.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.ListShelves = channel.unary_unary(\n \"/endpoints.examples.bookstore.Bookstore/ListShelves\",\n request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n response_deserializer=bookstore__pb2.ListShelvesResponse.FromString,\n )\n self.CreateShelf = channel.unary_unary(\n \"/endpoints.examples.bookstore.Bookstore/CreateShelf\",\n request_serializer=bookstore__pb2.CreateShelfRequest.SerializeToString,\n response_deserializer=bookstore__pb2.Shelf.FromString,\n )\n self.GetShelf = channel.unary_unary(\n \"/endpoints.examples.bookstore.Bookstore/GetShelf\",\n request_serializer=bookstore__pb2.GetShelfRequest.SerializeToString,\n response_deserializer=bookstore__pb2.Shelf.FromString,\n )\n self.DeleteShelf = channel.unary_unary(\n \"/endpoints.examples.bookstore.Bookstore/DeleteShelf\",\n request_serializer=bookstore__pb2.DeleteShelfRequest.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.ListBooks = channel.unary_unary(\n \"/endpoints.examples.bookstore.Bookstore/ListBooks\",\n request_serializer=bookstore__pb2.ListBooksRequest.SerializeToString,\n response_deserializer=bookstore__pb2.ListBooksResponse.FromString,\n )\n self.CreateBook = channel.unary_unary(\n \"/endpoints.examples.bookstore.Bookstore/CreateBook\",\n request_serializer=bookstore__pb2.CreateBookRequest.SerializeToString,\n response_deserializer=bookstore__pb2.Book.FromString,\n )\n self.GetBook = channel.unary_unary(\n \"/endpoints.examples.bookstore.Bookstore/GetBook\",\n request_serializer=bookstore__pb2.GetBookRequest.SerializeToString,\n response_deserializer=bookstore__pb2.Book.FromString,\n )\n self.DeleteBook = channel.unary_unary(\n \"/endpoints.examples.bookstore.Bookstore/DeleteBook\",\n request_serializer=bookstore__pb2.DeleteBookRequest.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n\n\nclass BookstoreServicer:\n \"\"\"A simple Bookstore API.\n\n The API manages shelves and books resources. Shelves contain books.\n \"\"\"\n\n def ListShelves(self, request, context):\n \"\"\"Returns a list of all shelves in the bookstore.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")\n\n def CreateShelf(self, request, context):\n \"\"\"Creates a new shelf in the bookstore.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")\n\n def GetShelf(self, request, context):\n \"\"\"Returns a specific bookstore shelf.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")\n\n def DeleteShelf(self, request, context):\n \"\"\"Deletes a shelf, including all books that are stored on the shelf.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")\n\n def ListBooks(self, request, context):\n \"\"\"Returns a list of books on a shelf.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")\n\n def CreateBook(self, request, context):\n \"\"\"Creates a new book.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")\n\n def GetBook(self, request, context):\n \"\"\"Returns a specific book.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")\n\n def DeleteBook(self, request, context):\n \"\"\"Deletes a book from a shelf.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")\n\n\ndef add_BookstoreServicer_to_server(servicer, server):\n rpc_method_handlers = {\n \"ListShelves\": grpc.unary_unary_rpc_method_handler(\n servicer.ListShelves,\n request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n response_serializer=bookstore__pb2.ListShelvesResponse.SerializeToString,\n ),\n \"CreateShelf\": grpc.unary_unary_rpc_method_handler(\n servicer.CreateShelf,\n request_deserializer=bookstore__pb2.CreateShelfRequest.FromString,\n response_serializer=bookstore__pb2.Shelf.SerializeToString,\n ),\n \"GetShelf\": grpc.unary_unary_rpc_method_handler(\n servicer.GetShelf,\n request_deserializer=bookstore__pb2.GetShelfRequest.FromString,\n response_serializer=bookstore__pb2.Shelf.SerializeToString,\n ),\n \"DeleteShelf\": grpc.unary_unary_rpc_method_handler(\n servicer.DeleteShelf,\n request_deserializer=bookstore__pb2.DeleteShelfRequest.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n \"ListBooks\": grpc.unary_unary_rpc_method_handler(\n servicer.ListBooks,\n request_deserializer=bookstore__pb2.ListBooksRequest.FromString,\n response_serializer=bookstore__pb2.ListBooksResponse.SerializeToString,\n ),\n \"CreateBook\": grpc.unary_unary_rpc_method_handler(\n servicer.CreateBook,\n request_deserializer=bookstore__pb2.CreateBookRequest.FromString,\n response_serializer=bookstore__pb2.Book.SerializeToString,\n ),\n \"GetBook\": grpc.unary_unary_rpc_method_handler(\n servicer.GetBook,\n request_deserializer=bookstore__pb2.GetBookRequest.FromString,\n response_serializer=bookstore__pb2.Book.SerializeToString,\n ),\n \"DeleteBook\": grpc.unary_unary_rpc_method_handler(\n servicer.DeleteBook,\n request_deserializer=bookstore__pb2.DeleteBookRequest.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n \"endpoints.examples.bookstore.Bookstore\", rpc_method_handlers\n )\n server.add_generic_rpc_handlers((generic_handler,))\n","sub_path":"endpoints/bookstore-grpc-transcoding/bookstore_pb2_grpc.py","file_name":"bookstore_pb2_grpc.py","file_ext":"py","file_size_in_byte":7927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"10451667","text":"import os\nimport re\nimport shutil\nimport sys\nfrom collections import defaultdict\nfrom tempfile import TemporaryDirectory\nfrom textwrap import dedent\nfrom typing import Dict\nfrom typing import Iterable\nfrom typing import List\n\nimport mypy.api\n\n\ndef _run_mypy(program: str) -> Iterable[str]:\n with TemporaryDirectory() as tempdirname:\n with open('{}/__main__.py'.format(tempdirname), 'w') as f:\n f.write(program)\n config_file = tempdirname + '/mypy.ini'\n shutil.copyfile(os.path.dirname(__file__) + '/mypy.ini', config_file)\n error_pattern = re.compile(r'^{}:(\\d+): error: (.*)$'.format(re.escape(f.name)))\n stdout, stderr, exit_status = mypy.api.run([\n f.name,\n '--show-traceback',\n '--config-file', config_file,\n ])\n if stderr:\n print(stderr, file=sys.stderr) # allow \"printf debugging\" of the plugin\n\n # Group errors by line\n errors_by_line: Dict[int, List[str]] = defaultdict(list)\n for line in stdout.split('\\n'):\n m = error_pattern.match(line)\n if m:\n errors_by_line[int(m.group(1))].append(m.group(2))\n elif line:\n print(line) # allow \"printf debugging\"\n\n # Reconstruct the \"actual\" program with \"error\" comments\n error_comment_pattern = re.compile(r'(\\s+# E: .*)?$')\n for line_no, line in enumerate(program.split('\\n'), start=1):\n line = error_comment_pattern.sub('', line)\n errors = errors_by_line.get(line_no)\n if errors:\n yield '{}{}'.format(line, ''.join(' # E: {}'.format(error) for error in errors))\n else:\n yield line\n\n\ndef assert_mypy_output(program: str) -> None:\n expected = dedent(program).strip()\n actual = '\\n'.join(_run_mypy(expected))\n assert actual == expected\n","sub_path":"tests/mypy_helpers.py","file_name":"mypy_helpers.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"544521549","text":"#Question number 3:\nimport pandas as pd\nimport geocoder\nimport csv\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics import pairwise_distances_argmin\nfrom scipy.spatial.distance import cdist, pdist\n\ndef elbow(df, n):\n kMeansVar = [KMeans(n_clusters=k).fit(df) for k in range(1, n)]\n centroids = [X.cluster_centers_ for X in kMeansVar]\n k_euclid = [cdist(df, cent) for cent in centroids]\n dist = [np.min(ke, axis=1) for ke in k_euclid]\n wcss = [sum(d**2) for d in dist]\n tss = sum(pdist(df)**2)/df.shape[0]\n bss = tss - wcss\n plt.figure()\n plt.xlabel('Clusters')\n plt.title('Variance explained vs K')\n plt.ylabel('Percentage of variance(%)')\n plt.plot(bss)\n plt.show()\n\ndef CopyTextMatrixValues(X):\n\ti=0\n\tglobal TypeValues\n\tTypeValues=[]\n\tfor index in X:\n\t\t\n\t\tif X[i,1]:\n\t\t\tXnew=(X[i,1])\n\t\t\tTypeValues.append(Xnew)\n\t\t\ti+=1\n\t\telif X[i,4]:\n\t\t\tXnew=X[i,4]\n\t\t\tTypeValues.append(Xnew)\n\t\t\ti+=1\n\tprint(len(TypeValues))\n\n\nQ3=[]\ncsvfile = open('/Users/breyawalker/Desktop/data/auctions.csv', 'r',encoding='iso-8859-1', newline='')\ncsvreader = csv.reader(csvfile, delimiter = ',')\nfor row in csvreader:\n\trow[7] = float(row[7].replace('$','').replace(',',''))\n\t\n\tQ3.append(row)\n\nQ3df=pd.DataFrame(Q3)\nQ3df.columns=['AUCTIONTYPE','TAXAUTHORITYNAME'\t,'ACCOUNTNUMBER',\t'Address',\t'LEGALDESCRIPTION',\t'OWNERNAME',\t'OWNERADDRESS',\t'PRICE'\t,'AUCTIONSTARTDATE',\t'AUCTIONENDDATE'\t,'URL']\n\nQ3new=Q3df.drop(Q3df.index[0])\n\nTaxType=Q3new.AUCTIONTYPE\nQ3new['PRICE'] = Q3new['PRICE'].replace(',','').replace('$','')\nTaxesOwed=Q3new['PRICE'].values\n\nY=TaxesOwed\nvectorizer= TfidfVectorizer(stop_words='english')\nXvec=vectorizer.fit_transform(TaxType) #create vector of text\n#store text vector values into list\nCopyTextMatrixValues(Xvec)\nX=np.matrix(list(zip(TypeValues,Y)))\nReducedData=[TypeValues,Y]\n#print(ReducedData)\nelbow(X,20)\nkmeans=KMeans(n_clusters=4,random_state=0).fit(X) #two cluster b/c of two tax sale types\nkmeans.fit(X)\n\nprint(kmeans.cluster_centers_)\n# Step size of the mesh. Decrease to increase the quality of the VQ.\n\n# Plot the decision boundary. For that, we will assign a color to each\nx_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\ny_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n\nplt.figure(1)\nplt.clf()\n\nplt.plot(ReducedData[0],ReducedData[1],'ro')\ncentroids = kmeans.cluster_centers_\nplt.scatter(centroids[:, 0], centroids[:, 1],\n marker='o', s=169, linewidths=3, zorder=10)\nplt.title('K-means clustering of Tax Sale Type by by Taxes Owed\\n'\n 'Centroids are marked blue circles')\nplt.xlim(x_min, x_max)\nplt.ylim(y_min, y_max)\nplt.xlabel('Tax Sale Type')\nplt.ylabel('Taxes Owed ($)')\nplt.show()\n\n\n\n\n","sub_path":"Question3 Data incubator.py","file_name":"Question3 Data incubator.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"622869800","text":"import os\n\nfrom flask import render_template\nfrom flask_mail import Message, Mail\n\nfrom dynaconf import settings\nfrom functools import wraps\n\nfrom .celery import celery\nimport os\nfrom .__init__ import create_app\n\napp = create_app()\nmail = Mail(app)\n\n#send async email\ndef send_email(recipient, subject, template, **kwargs):\n if \"DISABLE_SEND_EMAIL\" not in os.environ:\n send_email_async.delay(recipient, subject, template, **kwargs)\n\n@celery.task\ndef send_email_async(recipient, subject, template, **kwargs):\n with app.app_context():\n msg = Message(\n settings['EMAIL_SUBJECT_PREFIX'] + ' ' + subject,\n sender=settings['EMAIL_SENDER'],\n recipients=[recipient])\n msg.body = render_template(template + '.txt', **kwargs)\n msg.html = render_template(template + '.html', **kwargs)\n \"\"\"images is an array where images[i][0] = 'name of image' \n and images[i][1]= 'path of image' \"\"\"\n if 'images' in kwargs:\n for img in kwargs.get('images'):\n with app.open_resource(img[1]) as fp:\n msg.attach(img[0], \"image/jpg\", fp.read())\n mail.send(msg)","sub_path":"app/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"373065882","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\" Utility functions\n\n__author__: Conor Heins, Alexander Tschantz, Brennan Klein\n\"\"\"\n\nimport numpy as np\n\nfrom pymdp.distributions import Categorical, Dirichlet\n\n\ndef obj_array(shape):\n return np.empty(shape, dtype=object)\n\n\ndef onehot(value, num_values):\n arr = np.zeros(num_values)\n arr[value] = 1.0\n return arr\n\n\ndef random_A_matrix(num_obs, num_states):\n if type(num_obs) is int:\n num_obs = [num_obs]\n if type(num_states) is int:\n num_states = [num_states]\n num_modalities = len(num_obs)\n\n A = obj_array(num_modalities)\n for modality, modality_obs in enumerate(num_obs):\n modality_shape = [modality_obs] + num_states\n modality_dist = np.random.rand(*modality_shape)\n A[modality] = norm_dist(modality_dist)\n return A\n\n\ndef random_B_matrix(num_states, num_controls):\n if type(num_states) is int:\n num_states = [num_states]\n if type(num_controls) is int:\n num_controls = [num_controls]\n num_factors = len(num_states)\n assert len(num_controls) == len(num_states)\n\n B = obj_array(num_factors)\n for factor in range(num_factors):\n factor_shape = (num_states[factor], num_states[factor], num_controls[factor])\n factor_dist = np.random.rand(*factor_shape)\n B[factor] = norm_dist(factor_dist)\n return B\n\n\ndef get_model_dimensions(A, B):\n num_obs = [a.shape[0] for a in A] if is_arr_of_arr(A) else [A.shape[0]]\n num_states = [b.shape[0] for b in B] if is_arr_of_arr(B) else [B.shape[0]]\n num_modalities = len(num_obs)\n num_factors = len(num_states)\n return num_obs, num_states, num_modalities, num_factors\n\n\ndef norm_dist(dist):\n if len(dist.shape) == 3:\n new_dist = np.zeros_like(dist)\n for c in range(dist.shape[2]):\n new_dist[:, :, c] = np.divide(dist[:, :, c], dist[:, :, c].sum(axis=0))\n return new_dist\n else:\n return np.divide(dist, dist.sum(axis=0))\n\n\ndef to_numpy(dist, flatten=False):\n \"\"\"\n If flatten is True, then the individual entries of the object array will be \n flattened into row vectors(common operation when dealing with array of arrays \n with 1D numpy array entries)\n \"\"\"\n if isinstance(dist, (Categorical, Dirichlet)):\n values = np.copy(dist.values)\n if flatten:\n if dist.IS_AOA:\n for idx, arr in enumerate(values):\n values[idx] = arr.flatten()\n else:\n values = values.flatten()\n else:\n values = dist\n if flatten:\n if is_arr_of_arr(values):\n for idx, arr in enumerate(values):\n values[idx] = arr.flatten()\n else:\n values = values.flatten()\n return values\n\n\ndef is_distribution(obj):\n return isinstance(obj, (Categorical, Dirichlet))\n\n\ndef is_arr_of_arr(arr):\n return arr.dtype == \"object\"\n\n\ndef to_arr_of_arr(arr):\n if is_arr_of_arr(arr):\n return arr\n arr_of_arr = np.empty(1, dtype=object)\n arr_of_arr[0] = arr.squeeze()\n return arr_of_arr\n\n\ndef to_categorical(values):\n return Categorical(values=values)\n\n\ndef to_dirichlet(values):\n return Dirichlet(values=values)\n\n","sub_path":"pymdp/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"562854981","text":"\"\"\"\nThis script deletes last sheet in year and (or) total workbook\n\nArguments:\nyear_nr: two-digits year number\nif_year: 1 if a sheet is to be deleted from the yearly workbook\nif_total: 1 if a sheet is to be deleted from the total workbook\n\"\"\"\n\nimport sys\nfrom openpyxl import load_workbook\n\nyear_nr = sys.argv[1]\nif_year = sys.argv[2]\nif_total = sys.argv[3]\n\n\n# 1. Get paths of year and total data\nfolder_path_file = open(\"../path.txt\", \"r\")\nroot_path = folder_path_file.read()\nyear_folder_path = root_path + \"/yearly data/\"\ntotal_folder_path = root_path + \"/total data/\"\nfolder_path_file.close()\n\n# 2. Delete last sheet in year and total\nif if_year == \"1\":\n year_sheet_path = year_folder_path + \"20\" + year_nr + \".xlsx\"\n\n year_workbook = load_workbook(year_sheet_path)\n last_sheet = year_workbook.sheetnames[-1]\n year_workbook.remove(year_workbook[last_sheet])\n\n year_workbook.save(year_sheet_path)\n\nif if_total == \"1\":\n total_sheet_path = total_folder_path + \"total.xlsx\"\n\n total_workbook = load_workbook(total_sheet_path)\n last_sheet = total_workbook.sheetnames[-1]\n total_workbook.remove(total_workbook[last_sheet])\n\n total_workbook.save(total_sheet_path)\n","sub_path":"util_scripts/delete_last_sheet.py","file_name":"delete_last_sheet.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"204102899","text":"import requests, os, json\nfrom .base_scraper import BaseScraper\nfrom agency_dataaccessor import AgencyDataAccessor\nfrom lighthouse import PageInsightsClient\n\n\nclass SecurityScraper(BaseScraper):\n\n def __init__(self, raw_page_content, url):\n self.page = raw_page_content\n self.url = url\n self.apiClient = PageInsightsClient()\n\n def get_security_privacy_info(self):\n return {\n \"https\": self.get_http_acess(),\n \"hsts\": self.get_hsts(),\n \"privacy_policies\": self.get_privacy_policies()\n }\n\n def get_http_acess(self):\n try:\n lighthouse_results = self.apiClient.get_page_insights(self.url, 'pwa').content['lighthouseResult']\n score = lighthouse_results['audits']['is-on-https']['score']\n is_criteria_met = True if score == 1 else False\n return self.get_criteria_object(score, is_criteria_met)\n except:\n print(\"Error in get_http_acess for\", self.url)\n\n def get_hsts(self):\n try:\n lighthouse_results = self.apiClient.get_page_insights(self.url, 'pwa').content['lighthouseResult']\n score = lighthouse_results['audits']['redirects-http']['score']\n is_criteria_met = True if score == 1 else False\n return self.get_criteria_object(score, is_criteria_met)\n except:\n print(\"Error in get_hsts for\", self.url)\n\n def get_privacy_policies(self):\n is_criteria_met = True if \"privacy policy\" in self.page.text.lower() else False\n return self.get_criteria_object(None, is_criteria_met)\n\n","sub_path":"scrapers/scrapers/security_scraper.py","file_name":"security_scraper.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"248327402","text":"from .cryptomethod import CryptoMethod\n\nclass RailFence(CryptoMethod):\n \"\"\"docstring for CryptoMethod\"\"\"\n def __init__(self, rail_number):\n super(RailFence, self).__init__()\n self.rail_number = rail_number\n if rail_number < 1:\n raise ValueError\n def encrypt(self, message):\n if self.rail_number == 1:\n return message\n rails = self._create_rails(len(message))\n filled_rails = [[message[i] for i in rail] for rail in rails]\n joined_rails = [''.join(rail) for rail in filled_rails]\n return ''.join(joined_rails)\n def decrypt(self, message):\n if self.rail_number == 1:\n return message\n decrypted_message = [' ']*len(message)\n rails = self._create_rails(len(message))\n counter = 0\n for rail in rails:\n for i in rail:\n decrypted_message[counter] = message[i]\n counter += 1\n return ''.join(decrypted_message)\n def _create_rails(self, length):\n rails = [[] for i in range(self.rail_number)]\n pointer = 0\n direction = 1\n for i in range(length):\n if pointer == self.rail_number - 1:\n direction = -1\n elif pointer == 0:\n direction = 1\n rails[pointer].append(i)\n pointer += direction\n return rails\n","sub_path":"cryptomethods/railfence.py","file_name":"railfence.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"235614896","text":"import os\nimport argparse\nfrom .ServerCommunicator import ServerCommunicator\nfrom utils import *\nfrom communication import MessageParser\nfrom communication import MessageType\nimport logging\nimport threading\nimport time\n\n\nclass Server:\n __MIN_PERFORMANCE_SCORE_THRESHOLD = 1.5\n __MIN_PERFORMANCE_SCORE = 3.0\n __MAX_UTILIZATION_THRESHOLD = 0.9\n __BATCH_SIZE = 2000000\n __CLIENT_POLL_INTERVAL = 3\n __MAX_GUESS_THRESHOLD = 9999999999\n __MAX_NUMBER_OF_RANGES = 20\n \n \n def __init__(self, args):\n parser = argparse.ArgumentParser()\n parser.add_argument('--target', type=str, required=True)\n parser.add_argument('--host', default='127.0.0.1', type=str)\n parser.add_argument('--port', '-p', default=16720, type=int, dest='port')\n args = parser.parse_args(args)\n\n self.__hash_target = args.target\n self.__ranges_available = []\n self.__hash_result = ''\n\n self.__cpu_performances = []\n self.__queued_ranges = {}\n\n self.__server_communicator = ServerCommunicator(args.host, args.port)\n\n\n def init(self):\n logging.info(f'Initiating server:')\n logging.debug(self.__server_communicator)\n logging.info(f'Target - {self.__hash_target}')\n\n threading.Thread(target=self.accept_clients).start()\n \n logging.info('Generating number ranges...')\n self.generate_ranges()\n \n \n def generate_ranges(self):\n for i in range(int(self.__MAX_GUESS_THRESHOLD / self.__BATCH_SIZE) + 1):\n lower = i * self.__BATCH_SIZE\n higher = min(self.__MAX_GUESS_THRESHOLD, (i + 1) * self.__BATCH_SIZE) + 1\n new_range = range(lower, higher)\n self.__ranges_available.append(new_range)\n\n\n def accept_clients(self):\n self.__server_communicator.bind()\n\n while self.__hash_result == '':\n new_id = self.__server_communicator.accept_client()\n thread = threading.Thread(target=self.handle_client, args=(new_id, ))\n thread.setDaemon(True)\n thread.start()\n\n\n def handle_client(self, client_id):\n client_name = self.__server_communicator.get_name(client_id)\n\n if self.__server_communicator.client_exists(client_id):\n self.__server_communicator.send_target(client_id, self.__hash_target)\n\n while self.__hash_result == '' and self.__server_communicator.client_exists(client_id):\n try:\n if self.__server_communicator.client_exists(client_id):\n self.__server_communicator.request_performance(client_id)\n msg = self.__server_communicator.get_client(client_id).accept_message()\n client_performance = MessageParser.cpu_performance_from_message(msg)\n else:\n break\n self.__cpu_performances.append(client_performance)\n \n capacity = self.get_client_capacity(client_performance)\n\n if capacity > 0:\n self.__queued_ranges[client_id] = []\n \n for i in range(capacity):\n hash_range = self.__ranges_available.pop()\n \n if self.__server_communicator.client_exists(client_id):\n self.__server_communicator.assign_hash_range(client_id, hash_range)\n else:\n break\n\n logging.info(f'Gave range {hash_range} to {client_name}')\n\n self.__queued_ranges[client_id].append(hash_range)\n\n\n while len(self.__queued_ranges[client_id]) > 0 and self.__hash_result == '':\n # time.sleep(self.__CLIENT_POLL_INTERVAL)\n \n if self.__server_communicator.client_exists(client_id): \n # self.__server_communicator.send_target(client_id, self.__hash_target)\n msg = self.__server_communicator.get_client(client_id).accept_message()\n else:\n break\n \n if msg.get_type() == MessageType.REPORT_RESULT:\n hash_range = range(int(msg.get_args()['range_start']), int(msg.get_args()['range_end']) + 1)\n hash_result = str(msg.get_args()['hash_result'])\n \n if hash_result != '':\n self.__hash_result = hash_result\n self.__server_communicator.kill_all()\n logging.info(f'Found key - {self.__hash_result}')\n break\n else:\n for qrange in self.__queued_ranges[client_id]:\n if min(qrange) == min(hash_range) and max(qrange) == max(hash_range):\n self.__queued_ranges[client_id].remove(qrange)\n logging.info(f'Range {qrange} is now done.')\n else:\n raise Exception('Scrambled sequence')\n except:\n break\n if self.__hash_result == '':\n logging.info(f'{client_name} crashed.')\n\n if client_id in self.__queued_ranges.keys():\n for qrange in self.__queued_ranges[client_id]:\n self.__ranges_available.append(qrange)\n logging.info(f'Adding range back to available ranges - {qrange}')\n \n if client_id in self.__queued_ranges.keys():\n self.__queued_ranges.pop(client_id)\n\n if client_id in range(len(self.__cpu_performances)):\n self.__cpu_performances.pop(client_id)\n\n if self.__server_communicator.client_exists(client_id):\n self.__server_communicator.remove_client(client_id)\n\n\n def get_client_capacity(self, cpu_performance):\n if cpu_performance.get_utilization() > self.__MAX_UTILIZATION_THRESHOLD:\n return 0\n\n if self .__server_communicator.get_number_of_clients() > 1:\n core_counts = []\n frequencies = []\n for performance in self.__cpu_performances:\n core_counts.append(performance.get_cores())\n frequencies.append(performance.get_frequency())\n\n cores_score = Utils.scale(cpu_performance.get_cores(), min(core_counts), max(core_counts))\n frequency_score = Utils.scale(cpu_performance.get_frequency(), min(frequencies), max(frequencies))\n else:\n cores_score = 1.0\n frequency_score = 1.0\n\n performance_score = cores_score + frequency_score + cpu_performance.get_utilization()\n \n if performance_score > self.__MIN_PERFORMANCE_SCORE_THRESHOLD:\n return round(Utils.scale(\n performance_score, \n self.__MIN_PERFORMANCE_SCORE_THRESHOLD, \n self.__MIN_PERFORMANCE_SCORE, \n scale=self.__MAX_NUMBER_OF_RANGES))\n else:\n return 0\n","sub_path":"src/server/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":7477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"356429747","text":"import streamlit as st\nimport pandas as pd\nimport geojson\nimport plotly.express as px\nimport plotly.graph_objs as go\nimport numpy as np\n\nfrom datetime import date\n\nimport json\nfrom urllib.request import urlopen\n\n\nst.title('Analisi Focolai Covid 19')\n\n\nurl_p = 'https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-json/dpc-covid19-ita-province.json'\nurl_r = 'https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-regioni/dpc-covid19-ita-regioni.csv'\nurl_pop = 'https://raw.githubusercontent.com/RomanTomz/Covid-Outbreaks-Dashboard-Italy/master/popdf.csv'\n\n@st.cache(persist=True, allow_output_mutation=True)\ndef load_data_p():\n data_p = pd.read_json(url_p)\n data_p['data'] = pd.to_datetime(data_p['data'])\n return data_p\n\n# Data cleaning and preparation (province)\n\ndata_p = load_data_p()\ndata_p = data_p[~(data_p.denominazione_provincia == 'In fase di definizione/aggiornamento') & ~(data_p.denominazione_provincia == 'Fuori Regione / Provincia Autonoma')]\ndata_p['pct_crescita'] = data_p.groupby('denominazione_provincia')['totale_casi'].transform(lambda x :(x.pct_change(periods=7)*100).round(2))\ndata_p['nuovi_casi_7gg'] = data_p.groupby('denominazione_provincia')['totale_casi'].apply(lambda x:x.diff(periods=7))\n\nprov_latest = data_p.set_index('data').sort_index().groupby('denominazione_provincia').tail(1)\ntop5_nc7 = prov_latest.sort_values('nuovi_casi_7gg', ascending=False).head(5)\ntop5_mm = prov_latest.sort_values('pct_crescita', ascending=False).head(5)\n# Data cleaning and preparation (regioni)\n@st.cache(persist=True, allow_output_mutation=True)\ndef load_data_r():\n data_r = pd.read_csv(url_r, parse_dates=True, error_bad_lines=False)\n return data_r\n\ndata_r = load_data_r()\ndata_r['crescita_ospedalizzati_sett'] = data_r.groupby('denominazione_regione')['totale_ospedalizzati'].apply(lambda x : x.diff(periods=7))\ndata_r['casi_testati_7gg'] = data_r.groupby('denominazione_regione')['casi_testati'].apply(lambda x: x.diff(periods=7))\ndata_r['nuovi_pos_7gg'] = data_r.groupby('denominazione_regione')['nuovi_positivi'].apply(lambda x: x.diff(periods=7))\ndata_r['positivi_mm'] = data_r.sort_values(by='data').groupby('denominazione_regione')['totale_casi'].apply(lambda x : (x.pct_change(periods=7)*100).round(2))\n\nreg_latest = data_r.set_index('data').sort_index().groupby('denominazione_regione').tail(1)\n\n@st.cache(persist=True, allow_output_mutation=True)\ndef load_data_pop():\n data_pop = pd.read_csv(url_pop, index_col='Regione')\n return data_pop\n\npopdf = load_data_pop()\n\nreg_merged = reg_latest.merge(right=popdf, how='left', left_on=reg_latest.denominazione_regione, right_on=popdf.index)\nreg_merged['prevalenza'] = ((reg_merged.totale_positivi/reg_merged.Popolazioneresidenti)*100000).round(2)\nreg_merged['indice_rischio'] = (reg_merged.positivi_mm * reg_merged.prevalenza).round(2)\nreg_merged['rischio'] = pd.qcut(reg_merged.indice_rischio, q=4, labels=[1,2,3,4])\n\n\n\n\ntop5_osp = reg_latest.set_index('denominazione_regione').sort_values(by='crescita_ospedalizzati_sett', ascending=False).head()\n\ntop5_osp.index.rename('Regione', inplace=True)\ntop5_osp.rename(columns={'crescita_ospedalizzati_sett':'Incremento Ricoveri Negli Ultimi 7 gg'}, inplace=True)\n\ntop5_osp = top5_osp[['Incremento Ricoveri Negli Ultimi 7 gg']].style.background_gradient(cmap='Reds', ).format('{0:,.0f}')\n\n\n# Geo Data import and preparation\n #Province\n@st.cache(persist=True, allow_output_mutation=True)\ndef load_geo_p():\n with urlopen('https://gist.githubusercontent.com/datajournalism-it/212e7134625fbee6f9f7/raw/dabd071fe607f5210921f138ad3c7276e3841166/province.geojson') as response:\n province = json.load(response)\n return province\n\nprovince_geo = load_geo_p()\n\nfor feature in province_geo['features']:\n feature['id'] = feature['properties']['NOME_PRO']\n\n #Regioni\n@st.cache(persist=True, allow_output_mutation=True)\ndef load_geo_r():\n with urlopen('https://gist.githubusercontent.com/datajournalism-it/48e29e7c87dca7eb1d29/raw/2636aeef92ba0770a073424853f37690064eb0ea/regioni.geojson') as response:\n regjson = json.load(response)\n return regjson\n\nregioni_geo = load_geo_r()\n\nfor feature in regioni_geo['features']:\n feature['id'] = feature['properties']['NOME_REG']\n\n#Data Prep per tabelle\ntop5_mm.rename(columns={'denominazione_provincia':'Provincia', 'pct_crescita':'Crescita % 7gg'}, inplace=True)\ntop5_nc7.rename(columns={'denominazione_provincia':'Provincia', 'nuovi_casi_7gg':'Nuovi Casi in 7gg'}, inplace=True)\ntop5_mm.set_index('Provincia', inplace=True)\ntop5_nc7.set_index('Provincia', inplace=True)\n\n\nformatted_mm = top5_mm[['Crescita % 7gg']].style.background_gradient(cmap='Reds', ).format('{0:,.2f}')\nformatter_nc7 = top5_nc7[['Nuovi Casi in 7gg']].style.background_gradient(cmap='Reds', ).format('{0:,.0f}')\n\n\n# Mappa nuovi casi\nprov_max = (prov_latest.groupby('denominazione_provincia')['nuovi_casi_7gg'].max()).max()\nfig_geo_7 = go.Figure(go.Choroplethmapbox(geojson=province_geo, locations=data_p.denominazione_provincia, z=data_p.nuovi_casi_7gg,\n colorscale=\"Reds\", zmin=0, zmax=prov_max-100,\n marker_opacity=1, marker_line_width=0.6))\nfig_geo_7.update_layout(mapbox_style=\"carto-positron\",\n mapbox_zoom=4, mapbox_center = {\"lat\": 41.8719, \"lon\": 12.5674})\nfig_geo_7.update_layout(margin={\"r\":0,\"t\":20,\"l\":0,\"b\":0})\n\n\n\n#Mappa media Mobile\nmm_max = (prov_latest.groupby('denominazione_provincia')['pct_crescita'].max()).max()\nmm_min = (prov_latest.groupby('denominazione_provincia')['pct_crescita'].min()).min()\n\nfig_geo_mm = go.Figure(go.Choroplethmapbox(geojson=province_geo, locations=data_p.denominazione_provincia, z=data_p.pct_crescita,\n colorscale=\"Reds\", zmin=mm_min, zmax=mm_max,\n marker_opacity=1, marker_line_width=0.6))\nfig_geo_mm.update_layout(mapbox_style=\"carto-positron\",\n mapbox_zoom=4, mapbox_center = {\"lat\": 41.8719, \"lon\": 12.5674})\nfig_geo_mm.update_layout(margin={\"r\":0,\"t\":20,\"l\":0,\"b\":0})\n\n#Mappa Ospedalizzati\nextremum_2 = max(np.max(reg_latest.crescita_ospedalizzati_sett), np.abs(np.min(reg_latest.crescita_ospedalizzati_sett)))\nfig_osp = go.Figure(go.Choroplethmapbox(geojson=regioni_geo, locations=reg_latest.denominazione_regione, z=reg_latest.crescita_ospedalizzati_sett,\n colorscale=\"temps\", zmin=-extremum_2,zmid=0, zmax=extremum_2,\n marker_opacity=0.7, marker_line_width=0.6))\nfig_osp.update_layout(mapbox_style=\"carto-positron\",\n mapbox_zoom=4, mapbox_center = {\"lat\": 41.8719, \"lon\": 12.5674})\nfig_osp.update_layout(margin={\"r\":0,\"t\":20,\"l\":0,\"b\":0})\n\n#Indice Rischio Relativo\nfig_rischio = go.Figure(go.Choroplethmapbox(geojson=regioni_geo, locations=reg_merged.denominazione_regione, z=reg_merged.rischio,\n colorscale=\"Pinkyl\", zmin=1, zmax=4,\n colorbar=dict(dtick=1),\n marker_opacity=0.5, marker_line_width=0.6))\nfig_rischio.update_layout(mapbox_style=\"carto-positron\",\n mapbox_zoom=4, mapbox_center = {\"lat\": 41.8719, \"lon\": 12.5674})\nfig_rischio.update_layout(margin={\"r\":0,\"t\":20,\"l\":0,\"b\":0})\n\n\nst.sidebar.header('Tipo Visualizzazione')\n\nviz = st.sidebar.radio('Seleziona Visualizzazione',('Nuovi Casi 7 gg','Crescita % in 7 gg','Incremento Ospedalizzati 7 gg (Regionale)', 'Indice Rischio Relativo (Regionale)'))\n\n\nif viz == 'Nuovi Casi 7 gg':\n st.markdown('#### Prime 5 Province per Nuovi Contagi Negli Ultimi 7 Giorni')\n st.write(formatter_nc7)\n st.markdown('#### Nuovi Casi Negli Ultimi 7 Giorni - Visualizzazione Geografica')\n st.plotly_chart(fig_geo_7)\n\nif viz == 'Crescita % in 7 gg':\n st.markdown('Prime 5 Province per Maggiore Incremento Percentuale Contagi a 7 Giorni')\n st.write(formatted_mm)\n st.markdown('#### Crescita Percentuale Contagi Negli Ultimi 7 Giorni - Visualizzazione Geografica')\n st.plotly_chart(fig_geo_mm)\n\nif viz == 'Incremento Ospedalizzati 7 gg (Regionale)':\n st.markdown('#### Prime 5 Regioni per Crescita Ospedalizzati Negli Ultimi 7 Giorni')\n st.write(top5_osp)\n st.markdown('#### Crescita Ospedalizzati Negli Ultimi 7 Giorni - Visualizzazione Geografica')\n st.plotly_chart(fig_osp)\n\nif viz == 'Indice Rischio Relativo (Regionale)':\n st.markdown('#### Indice Rischio Relativo')\n st.latex(\" indice\\ richio\\ relativo: {(Crescita\\ Media\\ Mobile\\ 7gg)} \\cdot {(Prevalenza\\ per\\ 100k)}\")\n st.markdown('#### IRR divide le regioni in 4 diverse zone di rischio: 1 = Moderato - 4 = Critico.' )\n st.plotly_chart(fig_rischio)\n\n\nst.sidebar.markdown(\n (\"#### La Dashboard utilizza l'incremento a 7 giorni per dare maggiore chiarezza al trend eliminando le oscillazioni quotidiane\")\n\n)\n\n\nst.sidebar.markdown(\"###### Dati da https://github.com/pcm-dpc/COVID-19 \")\nst.sidebar.button('Aggiorna Dati')\n\nst.sidebar.markdown('###### *Code:* https://github.com/RomanTomz/Covid-Outbreaks-Dashboard-Italy/tree/master')\n\n\nst.sidebar.markdown('###### *Sviluppata da: Tommaso Di Marcello - @TomZ_UK*' )\n","sub_path":"main_1.py","file_name":"main_1.py","file_ext":"py","file_size_in_byte":9250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"354893374","text":"import time\nfrom functools import lru_cache\n\n'''\n lru_cache decorator caches the computed result\n\n maxsize represents no. of results you can store\n\n LRU - least recently used\n'''\n\n\n@lru_cache(maxsize=3)\ndef fibonacci_cache(n) -> int:\n if n == 1 or n == 2:\n return 1\n else:\n return fibonacci_cache(n - 1) + fibonacci_cache(n - 2)\n\ndef fibonacci(n) -> int:\n if n == 1 or n == 2:\n return 1\n else:\n return fibonacci(n - 1) + fibonacci(n - 2)\n\nprint(\"**** Using Cache ****\")\nstart_time = time.time()\nans = fibonacci_cache(37)\nprint(ans)\nprint(time.time() - start_time)\n\nprint(\"**** Without Cache ****\")\nstart_time = time.time()\nans = fibonacci(37)\nprint(ans)\nprint(time.time() - start_time)","sub_path":"py_caching.py","file_name":"py_caching.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"247622037","text":"import os\nimport unittest\nfrom argparse import Namespace\nfrom cslang.cslang import main as cslang_main\nfrom cslang.cslang_error import CSlangError\n\n\ndef get_test_data_path(filename):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n return os.path.join(dir_path, filename)\n\n\nclass TestJSON(unittest.TestCase):\n def test_json(self):\n test_file = get_test_data_path(\"update.cslang\")\n automaton, containerbuilder = cslang_main(\n Namespace(mode=\"build\", cslang_path=get_test_data_path(\"update.cslang\"))\n )\n\n automaton, datawords, _ = cslang_main(\n Namespace(\n mode=\"run\",\n format=\"jsonrpc\",\n json_path=get_test_data_path(\"update.json\"),\n automaton_path=get_test_data_path(\"update.auto\"),\n )\n )\n assert \"update\" in containerbuilder.builders\n assert \"test\" in containerbuilder.builders\n\n container = datawords[0].container\n\n assert container[\"type\"] == \"update\"\n assert container[\"members\"][0][\"type\"] == \"Numeric\"\n assert container[\"members\"][0][\"members\"] == [1]\n assert container[\"members\"][1][\"type\"] == \"Numeric\"\n assert container[\"members\"][1][\"members\"][0] == 2\n\n container2 = datawords[2].container\n\n assert container2[\"type\"] == \"update\"\n assert container2[\"members\"][0][\"members\"][0] == 999\n assert container2[\"members\"][1][\"members\"][0] == 888\n\n assert automaton.is_accepting\n assert automaton.current_state == 3\n","sub_path":"test/test_json.py","file_name":"test_json.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"144907802","text":"from torch.utils import data\nimport torchvision.transforms as transforms\nimport os\nimport torchvision\n\nimport glob\n\nfrom data.AugMixDataset import AugMixDataset\n\n\ndef get_train_loader(args, dataset_class, use_sobel=False, use_color=False):\n # Data loading code\n img_transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip()\n # transforms.RandomRotation(15),\n ])\n\n preprocess = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize([0.5] * 3, [0.5] * 3)\n ])\n\n\n dataset = dataset_class(f'{args.img_dir}/train', transform=img_transform, use_sobel=use_sobel, use_color=use_color)\n dataset = AugMixDataset(dataset, preprocess, args, args.no_jsd)\n\n train_dataloader = data.DataLoader(dataset, num_workers=args.n_workers, batch_size=args.batch_size, shuffle=True,\n drop_last=True, pin_memory=True)\n\n return train_dataloader\n\ndef get_val_loader(args, dataset_class):\n # Data loading code\n img_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize([0.5] * 3, [0.5] * 3)\n ])\n\n dataset = dataset_class(f'{args.img_dir}/test', transform=img_transform)\n\n train_dataloader = data.DataLoader(dataset, num_workers=args.n_workers, batch_size=args.batch_size, shuffle=True,\n drop_last=True, pin_memory=True)\n\n return train_dataloader\n\n","sub_path":"CIFAR100/data/augmix_data_manager.py","file_name":"augmix_data_manager.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"436374027","text":"\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport glob\nimport pickle as pkl\n\nfrom skimage.filters import gaussian\nimport skimage.feature as skfeat\nfrom math import pi\n\nfrom scipy.optimize import curve_fit\n\n# Define the image channels\nCHANNELS = ['R','G','B']\n\n\ndef save_obj(obj, name, folder ):\n \"\"\"\n To save a .pkl object in a desired folder\n\n Parameters\n ----------\n obj : python object\n python object to be saved.\n (e.g. dictionay, list, etc)\n\n name : string\n name with which save the object\n \n folder: string\n folder name where to save the object\n \n Returns\n -------\n ImageCount : int\n number of defined filetype files on the path folder\n\n \"\"\"\n with open(folder+'/'+ name + '.pkl', 'wb') as f:\n pkl.dump(obj, f, pkl.HIGHEST_PROTOCOL)\n\n\ndef load_obj(name, folder ):\n \n \"\"\"\n To load a .pkl object from a desired folder\n\n Parameters\n ----------\n name : string\n name of the object to be loaded\n \n folder: string\n name of the folder where the object is.\n \n Returns\n -------\n \n returns the .pkl object to be loaded\n\n \"\"\"\n with open(folder+'/' + name + '.pkl', 'rb') as f:\n return pkl.load(f)\n\n\ndef count_files(path,file_type):\n \"\"\"\n To count the number of files of a defined extension (filetype on a certain folder (path)\n\n Parameters\n ----------\n path : string\n folder name where the images are stored\n\n file_type : string\n extension of the files to count (e.g. tif, png, jpg)\n\n Returns\n -------\n ImageCount : int\n number of defined filetype files on the path folder\n\n \"\"\"\n\n ImageCount = len(glob.glob1(path,\"*.\"+file_type))\n print(path.split('\\\\')[-1]+' = '+str(ImageCount) + ' files')\n return(ImageCount)\n\ndef get_im_data(x_frames,image_count,f_name, init = 0):\n \"\"\"\n Load image data from a sequence of files\n\n Parameters\n ----------\n x_frames : int\n step frames (e.g 10 to use only ten to ten images)\n\n image_count : int\n total number of files on the folder (can be obtained with count_files function)\n\n f_name : string\n file name pattern including full path where images are stored, e.g. \"/folder/image-%04d\"\n \n init: int\n first image number name to be used in the analysis. \n e.g. init = 33 means to use /folder/image-%33\n\n Returns\n -------\n ImsR,ImsG,ImsB: array_like\n data per channel of each image (ImsR -> matrix size = (W,H,image_count/x_frames))\n\n \"\"\"\n \n W,H,_ = plt.imread(f_name%init).shape # Measure the image size based on the first image on the folder\n NT = int(image_count/x_frames)\n ImsR = np.zeros((W,H,NT))\n ImsG = np.zeros((W,H,NT))\n ImsB = np.zeros((W,H,NT))\n init = int(init)\n \n for i in range(0,NT):\n im = plt.imread(f_name%(init + i*x_frames))\n ImsR[:,:,i] = im[:,:,0] # Last number code the channel: 0=red, 1=green, 2=blue\n ImsG[:,:,i] = im[:,:,1]\n ImsB[:,:,i] = im[:,:,2]\n return(ImsR,ImsG,ImsB)\n\n# at call you can take only the channels you are interested in (e.g.):\n# red,_,blue=get_im_data(xframes,imagecount) ---> this only takes the red and blue channels\n\n\ndef time_vector(data, x_frames, dt):\n \"\"\"\n Get the vector of times for the image sequence loaded\n\n Parameters\n ----------\n data : dictionary\n dictionary with the R G B data of all images\n\n xframes : int\n step frames used on the analysis (e.g 10 means you are using one every ten to ten images)\n \n dt : double\n time step of the frames in hour units. It can be obtained from the file used to perform the timelapse.\n \n Returns\n -------\n T: array_like\n Time vector for the used data (hour units)\n \"\"\"\n\n _,_,LT = data[CHANNELS[0]].shape # Length of time vector\n T = np.zeros((LT))\n for i in range(0,LT):\n T[i] = (i)*x_frames*dt\n \n return(T)\n\n\ndef bg_value(x1, x2, y1, y2, data, im_count):\n \"\"\"\n compute the background mean value for each channel and frame based on a rectagle\n defined by the user. Plot the rectangle over the image and makes plots of each channel\n mean background value over time\n\n Parameters\n ----------\n x1,x2,x1,x2: int values\n rectangle area limits: (x1,y1) = left-up corner. (x2,y2) = rigth-bottom corner\n \n data : dictionary\n R G B images data to get the background, and his names on data['Im']\n \n im_count : int\n total number of files on the folder (can be obtained with count_files function)\n\n Returns\n -------\n bg: dictionary\n Background mean value of each channel for every time frame\n\n \"\"\"\n\n X2R = x2-x1 #convert on steps because the rectangle patch definition\n Y2R = y2-y1\n\n #plot the defined area\n plt.figure(figsize=(8,8))\n fig = plt.gcf()\n ax = fig.gca()\n Im = plt.imread(data['Im']%(im_count-1))\n ax.imshow(Im)\n rect = matplotlib.patches.Rectangle((y1,x1), Y2R, X2R, linewidth=1, edgecolor='r', facecolor='none')\n ax.add_patch(rect)\n\n\n #get the mean background value at each time for each channel and plot it\n BG = {}\n LColors = ['r','g','b'] # each color will be for each line in the plot\n count = 0\n \n plt.figure()\n for chan in CHANNELS:\n BG[chan] = data[chan][x1:x2,y1:y2,:].mean(axis=(0,1))\n plt.plot(BG[chan][:],LColors[count])\n count += 1\n\n plt.xlabel('Time step')\n plt.ylabel('Fluorescence intensity')\n\n return(BG)\n\ndef bg_subst(data, bg):\n \"\"\"\n Substract the mean background value for each channel and frame obtained with BG_Val function.\n \n Parameters\n ----------\n data: dictionary\n R G B images data\n bg : array\n ackground mean value of each channel for every time frame (can be obtained with BG_Val function)\n\n\n Returns\n -------\n Data: dictionary\n R G B images data with the background substracted\n\n \"\"\"\n\n L = bg[CHANNELS[0]].shape[0]\n S1,S2,_ = data[CHANNELS[0]].shape\n\n for c in CHANNELS:\n for i in range(0,L):\n BGM = np.ones((S1,S2))\n BGM = BGM*bg[c][i] #create a matrix with bg to substract it to the frame\n\n Data = data[c][:,:,i]\n\n Data = Data-BGM #perform the substraction\n\n Data[Data<0] = 0 # values < 0 are not allowed --> transform it to 0\n\n data[c][:,:,i] = Data #actualize Data\n\n\n return(data)\n\ndef data_sum_time(data):\n \"\"\"\n Sum the data for each pixel over time\n\n Parameters\n ----------\n Data: dictionary\n R G B images data\n\n Returns\n -------\n SData: array like\n Sum data over time and over channels for each pixel of the Data\n\n \"\"\"\n SData = data[CHANNELS[0]][:,:,:].sum(axis=(2))+data[CHANNELS[1]][:,:,:].sum(axis=(2))+data[CHANNELS[2]][:,:,:].sum(axis=(2))\n plt.imshow(SData)\n plt.colorbar()\n plt.title('All channels')\n\n return(SData)\n \ndef smooth_data(data,sigma):\n\n \"\"\"\n Apply gaussian filter to smooth each frame data\n\n Parameters\n ----------\n data: dictionary\n 4 dimensional (R,G,B, and Time) matrix with the data \n sigma: double\n Filter parameter (standard deviation)\n\n Returns\n -------\n NSIms: dictionary\n Sum over time of Smoothed data per channel (call it nsims[channel][r,c])\n\n NSImsAll: array_like\n Matrix with sum of nsims over the channels (call it nsimsAll[r,c])\n \n SImsT: dictionary\n Smoothed data per channel per frame (call it as simsT[channel][r,c,f])\n\n \"\"\"\n\n NSIms = {}\n NSIms_All = np.zeros((data[CHANNELS[0]].shape[0],\n data[CHANNELS[0]].shape[1]))\n SImsT = {}\n \n plt.figure(figsize=(17,3))\n POS_VECT = [131,132,133] # figure position vector\n count = 0\n\n for c in CHANNELS:\n # apply filter\n Data_Sum = data[c].sum(axis=2)\n SIms = gaussian(Data_Sum, sigma)\n NSIms [c] = (SIms-SIms.min())/(SIms.max()-SIms.min())\n\n NSIms_All += NSIms[c]\n \n Maux = np.zeros((data[CHANNELS[0]].shape))\n for fr in range(data[c].shape[-1]):\n Maux[:,:,fr] = gaussian(data[c][:,:,fr], sigma) \n \n SImsT[c] = Maux\n # make plot of the sum over time of smoothed data per channel\n \n plt.subplot(POS_VECT[count])\n plt.imshow(NSIms[c])\n plt.colorbar()\n plt.title(c+' channel')\n \n count += 1\n \n return(NSIms,NSIms_All,SImsT)\n\n\ndef colony_blobs_id(data, im_name, thresh, sigma_lims =[1,10], max_over=0.8, filename='null'):\n \"\"\"\n Use skimage to identify the position of each colony and define the circular region\n used by each of them\n\n Parameters\n ----------\n data: array of single channel image data\n\n thresh:\n Pixel values > thresh are included in the analysis, range (0,1)\n \n im_name:\n Name of an image on which to overlay colony positions and sizes\n \n sigma_lims: list [min,max]\n Indicates the minimum and maximum sigma to search for colonies.\n The actual radio of a colony is: sqrt(2)*sigma\n \n max_over: int or float\n Indicates the maximum overlap allowed between two colonies.\n If the area of two colonies overlaps by a fraction greater than threshold,\n the smaller colony is con taked in account.\n \n filename: string\n filename with whom save the output image+blobs+ID\n\n Returns\n -------\n A: array (Nx3)\n Contains the (x,y) position and size of each blob for each of N colonies detected\n \"\"\"\n\n A = skfeat.blob_log(data, min_sigma=sigma_lims[0], max_sigma=sigma_lims[1], num_sigma=100, \n threshold=thresh, overlap=max_over)\n\n plt.figure(figsize=(8,8))\n plt.imshow(data, cmap='gray')\n #plt.hold(True)\n plt.title('Sumarized Image')\n for i in range(len(A)):\n circle = plt.Circle((A[i,1], A[i,0]), (2**0.5)*A[i,2], color='r', fill=False , \n lw=0.5)\n fig = plt.gcf()\n ax = fig.gca()\n ax.add_artist(circle)\n\n plt.figure(figsize=(8,8))\n plt.imshow(plt.imread(im_name))\n #plt.hold(True)\n plt.title('Over '+ im_name)\n for i in range(len(A)):\n # plot the circle area identified for each colony\n circle = plt.Circle((A[i,1], A[i,0]), (2**0.5)*A[i,2], color='w', fill=False , lw=0.5)\n fig = plt.gcf()\n ax = fig.gca()\n ax.add_artist(circle)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n \n # attach the ID label to each colony\n plt.annotate(i, xy=(A[i,1], A[i,0]), xytext=(-2, 2),\n textcoords='offset points', ha='right', va='bottom',\n color='white')\n if filename != 'null':\n plt.savefig(str(filename) + \".pdf\", transparent=True)\n\n return(A)\n\n\ndef obtain_rois(data,blobs):\n \"\"\"\n Based on the information of each identified colony, create arrays to contain\n the regions of interest (ROI) around each one.\n \n \n Parameters\n ----------\n data: dictionary\n R G B image data per frame\n\n blobs: array like\n Array of colony positions and sizes given by skimage in colonyBlob()\n\n Returns\n -------\n all_rois:\n The ROI array image data for square region around colony position of side 2*(colony size)\n to call it: all_rois['channel_name'][blob_number][y,x,timepoint]\n \n all_rois_circle:\n The ROI array image data only within circle (radius = width/2), with the data outside the circle equal to zero.\n The size of the array is equal to square ROIS (all_rois) size.\n to call it: all_rois_circle['channel_name'][blob_number][y,x,timepoint]\n\n nc:\n Number of colonies analysed (length of returned arrays)\n \"\"\"\n\n all_rois = {}\n all_rois_circle = {}\n nc = len(blobs)\n\n for char in CHANNELS:\n rois = {}\n rois_circle = {}\n\n for i in range(nc):\n x = blobs[i,0]\n y = blobs[i,1]\n r = 2*blobs[i,2] # blobs[i,2] is the std deviation of the radius \n # --> r=2*std implies 95% confidence\n\n####### this lines are to eliminate the out of image bounds error\n x1 = int(round(x-r))\n x2 = int(round(x+r+1)) #plus 1 because slice working\n y1 = int(round(y-r))\n y2 = int(round(y+r+1)) #plus 1 because slice working\n\n if x1 < 0:\n x1 = 0\n if x2 >= data[char].shape[0]:\n x2 = data[char].shape[0]-1\n if y1 < 0:\n y1 = 0\n if y2 >= data[char].shape[1]:\n y2 = data[char].shape[1]-1\n \n if x2>x1 and y2>y1:\n print('ROI','x1','x2','y1','y2')\n print(i,x1,x2,y1,y2)\n rois[i] = data[char][x1:x2,y1:y2,:]\n else:\n rois[i] = []\n#######\n xr = int((rois[i].shape[0]+1)/2)\n yr = int((rois[i].shape[1]+1)/2)\n rois_circle[i] = np.zeros((rois[i].shape))\n for n in range(rois[i].shape[0]):\n for m in range(rois[i].shape[1]):\n if ((n-xr)**2+(m-yr)**2) <= (r**2):\n rois_circle[i][n,m,:] = rois[i][n,m,:]\n all_rois[char] = rois\n all_rois_circle[char] = rois_circle\n\n return(all_rois,all_rois_circle,nc)\n\n# rois contains a square arund the colony\n# rois_circle makes the values outside the colony boundaries equals to zero\n\n\ndef channels_sum(rois_data, cv):\n \"\"\"\n Compute the sum over the RGB channels for each image\n\n Parameters\n ----------\n rois_data: dictionary\n RGB time-lapse image data of each ROIS, from obtain_rois()\n \n cv: vector\n contain the ID of the of colonies analysed\n\n Returns\n -------\n sum_chan_rois: dictionary\n Sum of channels for each time step and ROI\n \"\"\"\n sum_chan_rois = {}\n for i in cv:\n sum_chan_rois[i] = np.zeros((rois_data[CHANNELS[0]][i].shape))\n\n for c in CHANNELS:\n for i in cv:\n sum_chan_rois[i] += rois_data[c][i][:,:,:]\n\n return(sum_chan_rois)\n\n\ndef frame_colony_radius(rois, cv, thr, min_sig=0.5, max_sig=10, num_sig=200):\n \"\"\"\n Get the colony radius at each time step\n \n Parameters\n ----------\n rois: dictionary\n ROI image data from obtain_rois()\n\n cv: vector\n contain the ID of the of colonies analysed\n\n thr: double\n Threshold for skfeat.blob_log \n \n min_sig: double\n minimum value of sigma used on skfeat.blob_log\n \n max_sig: double\n maximum value of sigma used on skfeat.blob_log\n \n num_sig: int\n number of sigma values used between min_sig and max_sig on skfeat.blob_log\n \n\n Returns\n -------\n R: dictionary\n The time series of colony radius size, indexed by colony id number.\n\n \"\"\"\n R = {}\n nt = rois[cv[0]].shape[2]\n for k in cv:\n R[k] = np.zeros((nt,))\n for i in range(nt):\n troi = rois[k][:,:,i].astype(np.float32)\n if len(troi):\n nt_roi = (troi-troi.min())/(troi.max()-troi.min()) # Normalization\n AA = skfeat.blob_log(nt_roi, min_sigma=min_sig, \n max_sigma=max_sig, num_sigma=num_sig, \n threshold=thr, overlap=0.8)\n #AA = skfeat.blob_log(nt_roi, min_sigma=0.1, max_sigma=6.0, num_sigma=150, threshold=thr, overlap=0.8)\n if len(AA)>0:\n R[k][i] = AA[0,2]*(2)\n #R[k][i] = AA[0,2]*(2**0.5)\n return(R)\n\n\ndef area(r, cv, T, filename='null'):\n \"\"\"\n Compute and plot the colonies area over time as a perfect circle (using \n the input radius value) around the colony position value \n \n Parameters\n ----------\n r: dictionary\n colony radius at each time step of the selected colony (obtained with frame_colony_radius() function) \n \n cv: vector\n colonies ID vector to plot\n\n \n T: vector\n the vector of real time values\n \n filename: string\n filename to save the plot generated\n \n Returns\n -------\n A: dictionary\n colony area at each time step of the selected colony. Call it as: A[colonyID][time step]\n \"\"\"\n plt.figure()\n A = {}\n for i in cv:\n R = r[i]\n A[i] = pi*R*R\n plt.plot(T,A[i],'.',label='colony '+str(i)) \n\n if filename != 'null': \n #plt.savefig(\"KymoGraph.pdf\", transparent=True) \n plt.savefig(str(filename)+\".pdf\", transparent=True)\n \n return(A)\n\ndef f_sigma(t, a, b, c):\n \"\"\"\n Compute the sigmoide function value using the given input values\n \n Parameters\n ----------\n t: vector\n independent variable ( \"x axis\", suposed to be time) \n \n a: double\n maximum value parameter\n \n b: double\n function parameter\n \n c: double\n delay parameter\n \n Returns\n -------\n function evaluation\n \n \"\"\"\n return((a /(1+np.exp(-(t+b)*c))))\n #return((a /(1+np.exp(-(t+b)*c)))+d) \n\n\ndef function_fit(xdata, ydata, init, end, cv, func=f_sigma, \n param_bounds=([1,-np.inf,0.1],[np.inf,-1,1])):\n \"\"\"\n Fit a given function to given data\n \n Parameters\n ----------\n xdata: vector\n independent variable ( \"x axis\", suposed to be time vector) \n \n ydict: array like\n array of dependent variable vectors \n \n init: double\n point on the time vector to init the fitting\n \n end: double\n point on the time vector to end the fitting\n \n cv: vector\n contain the ID of the colonies to analyse\n \n func: function\n function to be fitted\n \n param_bounds: array of vectors\n lower and upper bounds of each parameters\n para_bounds=([lower bounds],[upper bounds])\n \n Returns\n -------\n Y_fit: dictionay\n contain the fitting result for each colony in the dictionary. \n It is:\n \n Y_fit[col ID][evalF z]:\n \n evalF: vector\n result vector of the fitted function: \n evalF=func(xdata, optimal_parameters)\n \n z: vector\n fitted parameters\n \n \"\"\"\n \n Y_fit = {}\n for i in cv:\n z,_ = curve_fit(func, xdata[init:end], ydata[i][init:end], \n bounds=param_bounds)\n print(z)\n evalF = func(xdata,z[0],z[1],z[2])\n plt.plot(xdata, ydata[i], '.',xdata, evalF, '-')\n plt.title('Colony '+str(i))\n plt.show()\n Y_fit[i] = evalF,z\n return(Y_fit)\n\n\ndef croi_mean_int_frames(data, blobs, radii, cv):\n \"\"\"\n compute the mean intensity values for each time and channels for each CROI \n (circular ROI), redefining the ROIS based on radii values \n It takes the fit radius value at each time (radii), with it defines a \n circular ROI, sum all the pixel values inside them and divide this value \n for the number of pixel considered. --> obtain the intensity mean value \n inside the colony limits on each time.\n \n Parameters\n ----------\n data: dictionary\n RGB dictionary with the images data\n \n blobs: array like\n contains the information of identified blobs\n \n radii: dictionary\n contains the radius for each colony on each time step\n \n cv: vector\n contain the ID of the colonies to analyse\n \n Returns\n -------\n all_chan_crois_mean_val: dictionary\n contain the mean pixel value of each channel for each time step of each colony.\n call it as: all_chan_crois_mean_val['channel_name'][blob_number][timepoint]\n\n \n \"\"\"\n all_chan_crois_mean_val = {}\n \n for char in CHANNELS:\n crois_mean_val = {}\n \n for i in cv:\n #x and y are the colony center pixel stored on blobs\n x = blobs[i,0]\n y = blobs[i,1]\n CRoi_int = 0\n count = 0\n meanInt = np.zeros((len(radii[i])))\n \n for j in range(len(radii[i])): \n####### this lines is to eliminate the out of image bounds error\n r = radii[i][j]\n \n x1 = int(round(x-r))\n x2 = int(round(x+r+1))\n y1 = int(round(y-r))\n y2 = int(round(y+r+1))\n\n if x1 < 0:\n x1 = 0\n if x2 >= data[char].shape[0]:\n x2 = data[char].shape[0]-1\n if y1 < 0:\n y1 = 0\n if y2 >= data[char].shape[1]:\n y2 = data[char].shape[1]-1\n\n SRoi = data[char][x1:x2,y1:y2,j]\n\n####### \n xr = int((SRoi.shape[0]+1)/2)\n yr = int((SRoi.shape[1]+1)/2)\n \n for n in range(SRoi.shape[0]):\n for m in range(SRoi.shape[1]):\n if ((n-xr)**2+(m-yr)**2) <= (r**2):\n CRoi_int += SRoi[n,m]\n count += 1\n if count != 0:\n meanInt[j] = CRoi_int/count\n crois_mean_val[i] = meanInt\n all_chan_crois_mean_val[char] = crois_mean_val\n \n return(all_chan_crois_mean_val)\n\ndef f_mu (t, b, d):\n \"\"\"\n compute the grwoth rate (mu) function value\n \n Parameters\n ----------\n t: int or vector\n independent variable values (suposed to be time vector)\n \n b: double\n functon parameter\n \n c: double\n function parameter\n \n \n Returns\n -------\n evaluated \"mu\" fucntion with the given parameters\n\n \n \"\"\"\n return((d /(np.exp(d*(t+b))+1)))\n\ndef f_linear(x, a, b):\n \"\"\"\n compute the linear function value with given parameters\n \n Parameters\n ----------\n x: int or vector\n independent variable values\n \n a: double\n slope parameter\n \n b: double\n y-intercept parameter\n \n \n Returns\n -------\n evaluated linear fucntion with the given parameters for the given x\n \"\"\"\n \n return(a * x + b)\n\ndef linear_fit(data1, data2, filename='null'):\n \"\"\"\n Fit linear function (f_linear) to given data, display the fited function\n and make a plot of the result. You are able to save the resulting plot by\n given as input the \"filename\" to save it.\n \n Parameters\n ----------\n data1: vector\n independent variable ( \"x axis\") to be used as input of f_linear \n \n data2: vector\n \"y-data values\" used as reference to peform the fitting\n \n filename: string\n name of the image file if it is desired to save it.\n\n \n Returns\n ------- \n z: vector\n fitted parameters\n \n \"\"\"\n \n z,_ = curve_fit(f_linear, data1, data2, bounds=([0,-np.inf], np.inf))\n #print(z) #first component is the slope\n p = np.poly1d(z)\n print(np.poly1d(p))\n xp = np.linspace(data1.min(), data1.max(), 2)\n #plt.plot(timeC[init:end], ratio[init:end,i], '.', xp, p(xp), '-')\n plt.figure()\n axisMax = np.max([np.max(data1), np.max(data2)])\n axisMin = np.min([np.min(data1), np.min(data2)])\n plt.axis([axisMin, axisMax, axisMin, axisMax])\n plt.plot(data1, data2, '.', xp, p(xp), '-')\n \n if filename != 'null':\n #plt.savefig(\"FluorIntRGB.pdf\", transparent=True)\n plt.savefig(str(filename) + \".pdf\", transparent=True)\n return(z)\n\ndef colony_classifier(fit, classes, chanx_dat, chany_dat): \n \"\"\"\n Classify chanx_dat and chany_dat (which correspond to the data serie being\n classified) on the classes names given as inputs. The classification is \n on the small chany_dat distance value with the value computed with the\n chanx_dat and each fitted function (that are on \"fit\"). In other words,\n accord the minimal y-coordinate distance between each dot and fitted lines.\n \n Parameters\n ----------\n fit: array like\n each position on the array cointains the parameters of the linear\n fit of each categorie. fit = [z1, z2, z3] where z is the return of \n linear_fit.\n \n classes: string array\n contain the names of the defined categories (its length have to be\n same long as fit)\n \n chanx_dat: vector\n data of the channel on x axis for the data to be classified\n \n chany_dat: vector\n data of the channel on y axis for the data to be classified\n\n \n Returns\n ---------- \n clas: list\n contain the category of each classified colony in order. \n e.g. clas = ['cat3', 'cat1, 'cat1', 'cat1', 'cat2', etc ...]\n \n clas_dict: dictionary\n contain the channel value of the colonies of each category in the\n corresponding dictinary class. clas_dict = ['class'][chan_xdat,\n chany_dat, boolean]. The boolean vector have the length of the \n total colony analyzed, and indicate (with True) which colonies\n correspond to that category.\n \n \"\"\"\n CAT_NUM = len(fit) # number of categories\n y = np.zeros(CAT_NUM)\n d = np.zeros(CAT_NUM)\n clas = np.zeros(len(chanx_dat))\n clas_dict = {}\n \n # evaluate if have same number of classes as linear fits\n if CAT_NUM == len(classes):\n \n # compute the difference between the straight lines categories and the \n # colonies being classified\n for i in range(len(chanx_dat)):\n for j in range(CAT_NUM):\n y[j] = fit[j][0]*chanx_dat[i]+fit[j][1]\n d[j] = (y[j]-chany_dat[i])*(y[j]-chany_dat[i])\n \n # find the minimal difference value\n mindif = np.min(d)\n \n # perform the classification\n TOKEN = 0\n count = 0\n while TOKEN == 0:\n if mindif == d[count]:\n clas[i] = count\n TOKEN = 1\n count += 1\n \n \n # store the data in a dictionary of categories\n for n in range(len(classes)):\n clas_dict[classes[n]]=[chanx_dat[clas==n],chany_dat[clas==n],clas[:]==n]\n \n # save a list with the corresponding string category name of each element in clas \n roi_clas = []\n for i in range(len(clas)):\n roi_clas.append(classes[int(clas[i])])\n \n return(roi_clas, clas_dict)\n \n else:\n print('\\nERROR: classes have to be same length as fits\\n')\n\n# End\n\n","sub_path":"fluopi/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":27402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"193700388","text":"import urllib\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport requests\nimport json\nfrom unicodedata import normalize\nimport diccionario\nimport re\nimport lista\n\n\ndef getSoup(url):\n html = urllib.request.urlopen(url)\n soup = BeautifulSoup(html, \"lxml\")\n return soup\n\n\ndef getProvincia(soup):\n provincia = soup.select('.pagination ol li a')[2]\n print(provincia.text)\n return provincia.text\n\n\ndef getListaPaginas(soup):\n pagination = soup.select('.adlist-paginator-pages a')\n print (pagination)\n # n_item = len(pagination)\n # print (n_item)\n # del(pagination[n_item-1]) # borro el ultimo porque se repite enlace\n # del(pagination[0])\n # startURL = \"https://www.milanuncios.com\"\n # # \"\"\"\"\n # listaPaginas = []\n # for link in pagination:\n # url = startURL+link.get('href')\n # listaPaginas.append(url)\n # # print(url)\n # # \"\"\"\n # return listaPaginas\n return 0\n\n\ndef getURLEmpresas(url):\n\n html = urllib.request.urlopen(url)\n soup = BeautifulSoup(html, \"lxml\")\n cont = 0\n listaURL = []\n listaURL2 = []\n fProvincia = soup.select('div', {'id': 'cuerpo'})\n # print(fProvincia)\n\n for tag in fProvincia:\n aux = tag.find('a', {'class': 'aditem-detail-title'})\n cont += 1\n # print(str(cont) + \" \"+str(aux))\n listaURL.append(aux)\n\n # quitar el none de una lista de 700 item\n contador = 0\n for tag in listaURL:\n if str(tag) != 'None':\n contador += 1\n aux = tag.get('href')\n listaURL2.append(aux)\n # print(str(contador) + \" \"+aux)\n\n \n \n\n lista_nueva = lista.quitarRepetido(listaURL2) \n \n lista.imprime(lista_nueva)\n\n return 0\n\n\ndef muestraListaURL(listaURL):\n cont = 0\n for enlace in listaURL:\n cont += 1\n print(str(cont) + \" \" + enlace)\n\n\ndef getDatos(url):\n html = urllib.request.urlopen(url)\n soup = BeautifulSoup(html, \"lxml\")\n\n fProvincia = soup.find('span', {'itemprop': 'addressRegion'}).text\n # print(fProvincia)\n trans_tab = dict.fromkeys(map(ord, u'\\u0301\\u0308'), None) # quitar tildes\n provincia = normalize('NFKC', normalize(\n 'NFKD', fProvincia).translate(trans_tab))\n provincia = provincia.upper()\n # print(provincia)\n\n fNombre = soup.find('span', {'itemprop': 'name'}).text\n nombreEmpresa = fNombre\n # print(fNombre)\n\n nombreTecnico = ''\n # print(nombreTecnico)\n\n especialidad = 'Electrodomésticos'\n # print(especialidad)\n\n direccion = soup.find(\n 'li', {'itemtype': 'https://schema.org/PostalAddress'})\n direccion1 = direccion.find('span', {'itemprop': 'streetAddress'}).text\n direccion2 = direccion.find('span', {'itemprop': 'postalCode'}).text\n direccion3 = direccion.find('span', {'itemprop': 'addressLocality'}).text\n direccion4 = direccion.find('span', {'itemprop': 'addressRegion'}).text\n direccion = direccion1 + \" \" + direccion2 + \" \" + \\\n direccion3 + \" \" + \"(\" + direccion4 + \")\"\n # print (direccion)\n\n email = ''\n # print (email)\n\n fweb = ''\n\n if not soup.find('a', {'itemprop': 'url'}):\n fweb = ''\n else:\n fweb = soup.find('a', {'itemprop': 'url'})\n fweb = fweb.get('href')\n\n web = fweb\n print(web)\n\n horario = ''\n print(horario)\n\n especificacion = soup.find('a', {'itemprop': 'description'}).text\n # print (especificacion)\n\n contratado = 'no'\n # print (contratado)\n\n repetido = 'no'\n # print (repetido)\n\n webFound = url\n # print (webFound)\n\n interesado = ''\n # print (interesado)\n\n comentario = ''\n # print (comentario)\n\n ocultar = 'no'\n # print (ocultar)\n\n localidad = soup.find('span', {'itemprop': 'addressLocality'}).text\n trans_tab = dict.fromkeys(map(ord, u'\\u0301\\u0308'), None) # quitar tildes\n localidad = normalize('NFKC', normalize(\n 'NFKD', localidad).translate(trans_tab)).upper().replace(\",\", \"\")\n # print (localidad)\n\n telefono = soup.find('span', {'itemprop': 'telephone'}).text\n # print (telefono)\n\n data = {\n 'provincia': provincia,\n 'nombreEmpresa': nombreEmpresa,\n 'nombreTecnico': nombreTecnico,\n 'especialidad': especialidad,\n 'direccion': direccion,\n 'email': email,\n 'web': web,\n 'horario': horario,\n 'especificacion': especificacion,\n 'contratado': contratado,\n 'repetido': repetido,\n 'webFound': webFound,\n 'interesado': interesado,\n 'comentario': comentario,\n 'ocultar': ocultar,\n 'localidad': localidad,\n 'telefono': telefono\n }\n return data\n\n\ndef mostrarDatos(data, cont):\n print(str(cont) + '**********************')\n print('provincia:', data['provincia'])\n print('nombreEmpresa:', data['nombreEmpresa'])\n print('nombreTecnico:', data['nombreTecnico'])\n print('especialidad:', data['especialidad'])\n print('direccion:', data['direccion'])\n print('email:', data['email'])\n print('web:', data['web'])\n print('horario:', data['horario'])\n print('especificacion:', data['especificacion'])\n print('contratado:', data['contratado'])\n print('repetido:', data['repetido'])\n print('webFound:', data['webFound'])\n print('interesado:', data['interesado'])\n print('comentario:', data['comentario'])\n print('ocultar:', data['ocultar'])\n print('localidad:', data['localidad'])\n print('telefono:', data['telefono'])\n print('')\n\n\ndef mostrarArray(ArrayEmpresas):\n cont = 0\n for empresa in ArrayEmpresas:\n cont += 1\n print(str(cont) + '~~**********************')\n print('nombreEmpresa:', empresa['nombreEmpresa'])\n # print('nombreTecnico:', empresa['nombreTecnico'])\n print('especialidad:', empresa['especialidad'])\n print('direccion:', empresa['direccion'])\n print('email:', empresa['email'])\n print('web:', empresa['web'])\n # print('horario:', empresa['horario'])\n print('especificacion:', empresa['especificacion'])\n # print('contratado:', empresa['contratado'])\n # print('repetido:', empresa['repetido'])\n print('webFound:', empresa['webFound'])\n # print('interesado:', empresa['interesado'])\n # print('comentario:', empresa['comentario'])\n # print('ocultar:', empresa['ocultar'])\n print('provincia:', empresa['provincia'])\n print('localidad:', empresa['localidad'])\n print('telefono:', empresa['telefono'])\n print('')\n\n\ndef addData(empresa):\n uri = \"http://localhost/tecnicos/api/index.php/empresa\"\n response = requests.post(uri, empresa)\n response = response.json()\n\n return response\n\n\ndef addListaEmpresa(listaEmpresa):\n cont = 0\n cont2 = 0\n cont3 = 0\n for empresa in listaEmpresa:\n response = addData(empresa)\n if response['data'] == 'Se ha añadido la empresa correctamente':\n cont += 1\n if response['data'] == 'Se ha añadido correctamente la localidad o el telefono':\n cont2 += 1\n if response['status'] == 'Error':\n cont3 += 1\n\n print(\"--------------Resultados----------------\")\n print(\"El número total de empresas recopiladas son: \" + str(len(listaEmpresa)))\n print(\"Empresas Nuevas que se han añadido son: \" + str(cont))\n print(\"Empresas que se han repetido, pero solo se añaden pueblos y telefonos son: \" + str(cont2))\n print(\"No se han añadido a la base de datos: \" + str(cont3))\n\n\ndef compruebaExisteDiccionario(cadena):\n comprueba = False\n cadena = cadena.lower()\n trans_tab = dict.fromkeys(map(ord, u'\\u0301\\u0308'), None) # quitar tildes\n cadena = normalize('NFKC', normalize('NFKD', cadena).translate(trans_tab))\n # print(cadena)\n\n listaDicc = diccionario.getDiccionario()\n\n for palabra in listaDicc:\n aux = cadena.find(palabra)\n if aux < 0:\n comprueba = False\n # print(comprueba)\n else:\n comprueba = True\n # print(comprueba)\n break\n\n return comprueba\n\n\ndef main():\n\n url = \"https://www.milanuncios.com/anuncios-en-almeria/reparar-electrodomesticos.htm?fromSearch=1\"\n soup = getSoup(url)\n\n # PASO 1 Obtiene url hojas\n listaPaginas = getListaPaginas(soup)\n # listaPaginas.insert(0, url)\n # muestraListaURL(listaPaginas)\n\n # PASO 2 Obtienes url empresas\n # listaURLEmpresas = getURLEmpresas(listaPaginas[0])\n # muestraListaURL(listaURLEmpresas)\n\n # PASO 2.1 comprobar datos de las empresas\n # hay que comentar\n # url = \"https://www.citiservi.es/almeria/servicio-tecnico-balay-air-almeria-664836045-almeria__10149220_24.html\"\n # getDatos(url)\n # mostrarDatos(datosEmpresa,cont)\n\n \"\"\"\n cont = 0\n contadorEmpresasQuieres = 2\n listaFinalDatos = []\n salirBucle = False\n\n # PASO 3 Recorre las hojas y las empresas\n for urlPagina in listaPaginas:\n listaEmpresas = getURLEmpresas(urlPagina)\n for urlEmpresa in listaEmpresas:\n cont += 1\n # print(str(cont)+\" \"+ urlEmpresa)\n datosEmpresa = getDatos(urlEmpresa)\n # mostrarDatos(datosEmpresa,cont)\n\n comprueba = compruebaExisteDiccionario(\n datosEmpresa['especificacion'])\n\n if comprueba == False: # No esta en el diccionario, asi que añade\n listaFinalDatos.append(datosEmpresa)\n\n if cont == contadorEmpresasQuieres:\n salirBucle = True\n break\n if salirBucle:\n break\n\n # Paso 4 añadir base de datos\n mostrarArray(listaFinalDatos)\n print(\"¿Desea guardar los datos? (s o n)\")\n respuesta = input()\n if respuesta != 's':\n print(\"El codigo se ha parado\")\n else:\n print('Los datos se estan guardado correctamente en la base de datos')\n addListaEmpresa(listaFinalDatos)\n # \"\"\"\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scrapy/4_milanuncios.py","file_name":"4_milanuncios.py","file_ext":"py","file_size_in_byte":9959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"540505088","text":"\nfrom openprocurement.auctions.geb.tests.helpers import (\n get_next_status,\n)\n\n\ndef phase_commit(self):\n next_status = get_next_status(self.auction['status'])\n field = 'status'\n\n request_data = {\"data\": {field: next_status}}\n response = self.app.patch_json(self.entrypoint, request_data)\n self.assertEqual(next_status, response.json['data'][field])\n\n\ndef change_forbidden_field_in_draft(self):\n new_title = 'Test Title'\n field = 'title'\n\n request_data = {\"data\": {field: new_title}}\n response = self.app.patch_json(self.entrypoint, request_data)\n\n entrypoint = '/auctions/{}'.format(self.auction['id'])\n response = self.app.get(entrypoint)\n\n self.assertNotEqual(new_title, response.json['data'][field])\n","sub_path":"openprocurement/auctions/geb/tests/blanks/draft.py","file_name":"draft.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"383909733","text":"\"\"\"\nWrites results\n\"\"\"\nimport datetime\nimport logging\nimport os\nimport uuid\n\n\nclass ResultWriter:\n\n @property\n def logger(self):\n return logging.getLogger(__name__)\n\n def __call__(self, x, y_actual, y_pred, pos_label, output_dir, x_meta=None, filename_prefix=\"results\"):\n from sklearn.metrics import confusion_matrix\n cnf_matrix = confusion_matrix(y_actual, y_pred)\n\n filename = os.path.join(output_dir,\n \"predictedvsactual_{}_{}.csv\".format(str(uuid.uuid4()),\n datetime.datetime.strftime(datetime.datetime.now(),\n format=\"%Y%m%d_%H%M%S\")))\n self.save_data(y_pred, y_actual, filename)\n self.logger.info(\"Confusion matrix, full output in {}: \\n{}\".format(filename, cnf_matrix))\n\n def save_data(self, pred, actual, outfile):\n # Write to output\n\n with open(outfile, \"w\") as out:\n out.write(\"{},{}\\n\".format(\"actual\", \"pred\"))\n for a, p in zip(actual, pred):\n out.write(\"{},{}\\n\".format(a, p))\n","sub_path":"source/algorithms/result_writer.py","file_name":"result_writer.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"145494435","text":"from datetime import datetime\nfrom decimal import Decimal, ROUND_DOWN\nfrom six import string_types\nfrom .base import CandleStickProvider, CandleStickWidth\nfrom pandas.io.parsers import TextFileReader\nfrom pandas import read_csv\n\n\ndef reader(file_, chunksize=100):\n return read_csv(file_, header=None, names=('sec', 'date', 'min', 'max'), usecols=('date', 'min', 'max'),\n parse_dates=['date'], iterator=True, chunksize=chunksize, compression=None, engine=\"c\",\n date_parser=lambda dt: datetime.strptime(dt, '%Y%m%d %H:%M:%S.%f'),\n converters={'min': (lambda u: Decimal(u)), 'max': (lambda u: Decimal(u))})\n\n\nclass TrueFXCandleStickProvider(CandleStickProvider):\n\n def __init__(self, source, candlestick_width=CandleStickWidth.SECOND, decimal_round=ROUND_DOWN, chunk_size=100):\n \"\"\"\n This provider accepts the source as a filename, iterable, or result of csv.reader()\n :param source: A filename, or result of csv.reader(), or any iterable.\n :param candlestick_width: The width of the candlestick to generate (by default: seconds)\n :param decimal_round: The round method for the Decimal class, after the price mean was calculated\n :param args: Additional positional arguments to csv.reader()\n :param kwargs: Additional keyword arguments to csv.reader()\n :return:\n \"\"\"\n\n if isinstance(source, string_types):\n source = open(source, 'r')\n if not isinstance(source, TextFileReader):\n source = iter(reader(source))\n super(TrueFXCandleStickProvider, self).__init__(source, candlestick_width)\n self._decimal_round = decimal_round\n self._chunk_iterator = iter([])\n\n def _next_value(self):\n \"\"\"\n This method iterates over the TrueFX historical CSV file and returns a pair like this:\n 1. Parsing the datetime (2nd column) with format: '%Y%m%d %H:%M:%S.%f'\n 2. Parsing the buy and sell price (as decimals, calculating the mean)\n :return:\n \"\"\"\n\n while True:\n # Try obtaining the next row. If fails, try obtaining the next chunk and run the next iteration, skipping\n # the current one.\n try:\n row = next(self._chunk_iterator)\n except StopIteration:\n self._chunk_iterator = next(self._source).itertuples()\n continue\n\n price = ((Decimal(row.min) + Decimal(row.max)) / 2).quantize(Decimal('0.00001'), rounding=self._decimal_round)\n try:\n return row.date, price\n except:\n # Error parsing datetime! So we discard this row and continue\n pass\n","sub_path":"financialfantasy/historical/digest/providers/truefx.py","file_name":"truefx.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"41857725","text":"# ------------------------------------------------------------------------------\r\n# Name: attenuation_correction_example.py\r\n# Purpose: Examplify different attenuation correction procedures\r\n#\r\n# Author: Maik Heistermann\r\n#\r\n# Created: 10.01.2015\r\n# Copyright: (c) Maik Heistermann 2015\r\n# Licence: The MIT License\r\n# ------------------------------------------------------------------------------\r\n\r\nimport wradlib\r\nimport matplotlib.pyplot as plt\r\n#plt.interactive(True)\r\nimport os\r\n\r\n\r\ndef ex_attenuation():\r\n filename = os.path.dirname(__file__) + '/' + 'data/raa00-dx_10908-0806021655-fbg---bin'\r\n\r\n # Show PPI\r\n data, attrs = wradlib.io.readDX(filename)\r\n test = data.copy()\r\n ax, cf = wradlib.vis.plot_ppi(data, cmap=\"spectral\")\r\n plt.xlabel(\"Easting from radar (km)\")\r\n plt.ylabel(\"Northing from radar (km)\")\r\n plt.title(\"Radar Feldberg, 2008-06-02 17:45 UTC\")\r\n cb = plt.colorbar(cf, shrink=0.8)\r\n cb.set_label(\"dBZ\")\r\n plt.plot([0, 105.6], [0, 73.4], \"-\", color=\"white\", lw=2)\r\n plt.xlim(-128, 128)\r\n plt.ylim(-128, 128)\r\n plt.grid(color=\"grey\")\r\n\r\n plt.show()\r\n plt.close()\r\n\r\n # Hitschfeld and Bordan\r\n pia_hibo = wradlib.atten.correctAttenuationHB(data, coefficients=dict(a=8.e-5, b=0.731, l=1.0), mode=\"warn\",\r\n thrs=59.)\r\n\r\n # Harrison\r\n pia_harrison = wradlib.atten.correctAttenuationHB(data, coefficients=dict(a=4.57e-5, b=0.731, l=1.0), mode=\"warn\",\r\n thrs=59.)\r\n pia_harrison[pia_harrison > 4.8] = 4.8\r\n\r\n # Kraemer\r\n pia_kraemer = wradlib.atten.correctAttenuationConstrained2(\r\n data,\r\n a_max=1.67e-4,\r\n a_min=2.33e-5,\r\n n_a=100,\r\n b_max=0.7,\r\n b_min=0.65,\r\n n_b=100,\r\n l=1.,\r\n constraints=[wradlib.atten.constraint_dBZ],\r\n constraint_args=[[59.0]])\r\n\r\n # Modified Kraemer\r\n pia_modKraemer = wradlib.atten.correctAttenuationConstrained2(\r\n data,\r\n a_max=1.67e-4,\r\n a_min=2.33e-5,\r\n n_a=100,\r\n b_max=0.7,\r\n b_min=0.65,\r\n n_b=17,\r\n l=1.,\r\n constraints=[wradlib.atten.constraint_dBZ,\r\n wradlib.atten.constraint_pia],\r\n constraint_args=[[59.0],\r\n [20.0]])\r\n\r\n # Plot all results into one figure as profiles along the beams\r\n fig = plt.figure(figsize=(9, 15))\r\n\r\n mybeams = slice(53, 56)\r\n labelsize = 13\r\n\r\n ax = fig.add_subplot(511)\r\n plt.plot(data[53], label=\"53 deg\")\r\n plt.plot(data[54], label=\"54 deg\")\r\n plt.plot(data[55], label=\"55 deg\")\r\n plt.grid()\r\n plt.text(0.99, 0.88, \"Reflectivity along beams\", horizontalalignment='right', transform=ax.transAxes,\r\n fontsize=\"large\")\r\n plt.ylabel(\"Reflectivity (dBZ)\", fontsize=\"large\")\r\n plt.legend(loc=\"upper left\")\r\n ax.tick_params(axis='x', labelsize=labelsize)\r\n ax.tick_params(axis='y', labelsize=labelsize)\r\n plt.xlim(0, 128)\r\n\r\n ax = fig.add_subplot(512)\r\n plt.plot(pia_hibo[mybeams].T)\r\n plt.grid()\r\n plt.ylim(0, 30)\r\n plt.ylabel(\"PIA (dB)\", fontsize=\"large\")\r\n plt.text(0.01, 0.88, \"PIA according to Hitschfeld and Bordan\", transform=ax.transAxes, fontsize=\"large\")\r\n ax.tick_params(axis='x', labelsize=labelsize)\r\n ax.tick_params(axis='y', labelsize=labelsize)\r\n plt.xlim(0, 128)\r\n\r\n ax = fig.add_subplot(513)\r\n plt.plot(pia_harrison[mybeams].T)\r\n plt.grid()\r\n plt.ylim(0, 30)\r\n plt.ylabel(\"PIA (dB)\", fontsize=\"large\")\r\n plt.text(0.01, 0.88, \"PIA according to Harrison\", transform=ax.transAxes, fontsize=\"large\")\r\n ax.tick_params(axis='x', labelsize=labelsize)\r\n ax.tick_params(axis='y', labelsize=labelsize)\r\n plt.xlim(0, 128)\r\n\r\n ax = fig.add_subplot(514)\r\n plt.plot(pia_kraemer[mybeams].T)\r\n plt.grid()\r\n plt.ylim(0, 30)\r\n plt.ylabel(\"PIA (dB)\", fontsize=\"large\")\r\n plt.text(0.01, 0.88, \"PIA according to Kraemer\", transform=ax.transAxes, fontsize=\"large\")\r\n ax.tick_params(axis='x', labelsize=labelsize)\r\n ax.tick_params(axis='y', labelsize=labelsize)\r\n plt.xlim(0, 128)\r\n\r\n ax = fig.add_subplot(515)\r\n plt.plot(pia_modKraemer[mybeams].T)\r\n plt.grid()\r\n plt.ylim(0, 30)\r\n plt.xlabel(\"range (km)\", fontsize=\"large\")\r\n plt.ylabel(\"PIA (dB)\", fontsize=\"large\")\r\n plt.text(0.01, 0.88, \"PIA according to modified Kraemer\", transform=ax.transAxes, fontsize=\"large\")\r\n ax.tick_params(axis='x', labelsize=labelsize)\r\n ax.tick_params(axis='y', labelsize=labelsize)\r\n plt.xlim(0, 128)\r\n\r\n plt.show()\r\n plt.close()\r\n\r\n\r\n# =======================================================\r\nif __name__ == '__main__':\r\n ex_attenuation()\r\n","sub_path":"examples/attenuation_correction_example.py","file_name":"attenuation_correction_example.py","file_ext":"py","file_size_in_byte":4758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"297444091","text":"from django.shortcuts import render, redirect\nfrom django.contrib import admin\nfrom .models import Companies, Kurse\nimport django.forms as forms\nfrom django.urls import path\nimport csv\nimport pandas as pd\nimport os\nfrom pandas.io.common import urlencode\nimport requests\nimport json\nimport time\n#from django.db import transaction\n\n# Register your models here.\n\n#\n\n#@transaction.commit_manually\n#def savedataframe(df):\n #for item in df.to_dict('records'):\n #entry = Entry(**item)\n #entry.save()\n #transaction.commit()\n\nclass KurseAdmin(admin.ModelAdmin):\n list_display = ['symbol', 'date']\n actions = [ ]\n change_list_template = os.path.join('admin','kurse_changelist.html')\n\n def get_urls(self):\n urls = super().get_urls()\n my_urls = [\n path('update/', self.set_update),\n ]\n return my_urls + urls\n\n def set_update(self, request):\n #compact oder full\n type = request.POST.get('inctype')\n queryset = Companies.objects.all()\n symbolliste = [p.symbol for p in queryset]\n\n for sym in symbolliste[:]:\n params = {\n \"function\": \"TIME_SERIES_DAILY_ADJUSTED\",\n \"symbol\": sym,\n \"outputsize\": type,\n \"apikey\": \"NC25JEGQ688IWO7X\"\n }\n\n url = \"https://www.alphavantage.co/query?\" + urlencode(params)\n time.sleep(15)\n response = requests.get(url)\n if response.status_code != 200:\n self.message_user(request, \"Failed to get data\")\n else:\n aktien = response.json()\n self.message_user(request, 'Update ' + sym)\n df = pd.DataFrame(aktien[\"Time Series (Daily)\"]).transpose()\n #df['symbol'] = aktien[\"Meta Data\"][\"2. Symbol\"]\n\n df = df.drop(columns=['7. dividend amount', '8. split coefficient'])\n df.columns = ['open', 'high', 'low', 'close','adjusted_close' ,'volume']\n\n df[['open', 'high', 'low', 'close','adjusted_close','volume']] = df[['open', 'high', 'low', 'close','adjusted_close','volume']].apply(pd.to_numeric)\n df[\"change\"] = df[\"adjusted_close\"].diff(periods=-1)\n df = df[:-1]\n df[\"change\"] = df[\"change\"].round(3)\n df['date'] = pd.to_datetime(df.index)\n #df['date'] = df['date'].dt.date\n df = df[df[\"date\"] > '2015-01-01']\n df[\"symbol\"] = Companies.objects.get(symbol=sym)\n\n for item in df.to_dict('records'):\n Kurse.objects.update_or_create(**item)\n\n self.message_user(request, \"Update Stocks\")\n return redirect(\"..\")\n\n\n\nclass CsvImportForm(forms.Form):\n csv_file = forms.FileField()\n\nclass CompaniesAdmin(admin.ModelAdmin):\n list_display = ['symbol', 'name']\n change_list_template = os.path.join('admin','aktien_changelist.html')\n\n def get_urls(self):\n urls = super().get_urls()\n my_urls = [\n path('import-csv/', self.import_csv),\n ]\n return my_urls + urls\n\n def import_csv(self, request):\n if request.method == \"POST\":\n csv_file = request.FILES[\"csv_file\"]\n #reader = csv.reader(csv_file)\n\n try:\n df = pd.read_csv(csv_file, sep=';',encoding='utf-8')\n Companies.objects.bulk_create(\n Companies(**vals) for vals in df.to_dict('records')\n )\n self.message_user(request, \"Your csv file has been imported\")\n return redirect(\"..\")\n except:\n self.message_user(request, \"Your csv file counld not be imported\")\n return redirect(\"..\")\n\n form = CsvImportForm()\n payload = {\"form\": form}\n return render(\n request, os.path.join('admin', 'csv_form.html'), payload\n )\n\nadmin.site.register(Companies, CompaniesAdmin)\nadmin.site.register(Kurse, KurseAdmin)\n","sub_path":"aktien/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}